@grainulation/wheat 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +136 -0
- package/bin/wheat.js +193 -0
- package/compiler/detect-sprints.js +319 -0
- package/compiler/generate-manifest.js +280 -0
- package/compiler/wheat-compiler.js +1229 -0
- package/lib/compiler.js +35 -0
- package/lib/connect.js +418 -0
- package/lib/disconnect.js +188 -0
- package/lib/guard.js +151 -0
- package/lib/index.js +14 -0
- package/lib/init.js +457 -0
- package/lib/install-prompt.js +186 -0
- package/lib/quickstart.js +276 -0
- package/lib/serve-mcp.js +509 -0
- package/lib/server.js +391 -0
- package/lib/stats.js +184 -0
- package/lib/status.js +135 -0
- package/lib/update.js +71 -0
- package/package.json +53 -0
- package/public/index.html +1798 -0
- package/templates/claude.md +122 -0
- package/templates/commands/blind-spot.md +47 -0
- package/templates/commands/brief.md +73 -0
- package/templates/commands/calibrate.md +39 -0
- package/templates/commands/challenge.md +72 -0
- package/templates/commands/connect.md +104 -0
- package/templates/commands/evaluate.md +80 -0
- package/templates/commands/feedback.md +60 -0
- package/templates/commands/handoff.md +53 -0
- package/templates/commands/init.md +68 -0
- package/templates/commands/merge.md +51 -0
- package/templates/commands/present.md +52 -0
- package/templates/commands/prototype.md +68 -0
- package/templates/commands/replay.md +61 -0
- package/templates/commands/research.md +73 -0
- package/templates/commands/resolve.md +42 -0
- package/templates/commands/status.md +56 -0
- package/templates/commands/witness.md +79 -0
- package/templates/explainer.html +343 -0
|
@@ -0,0 +1,1229 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Wheat Compiler — Bran-based compilation passes for research claims
|
|
4
|
+
*
|
|
5
|
+
* Reads claims.json, runs validation/conflict/resolution passes,
|
|
6
|
+
* outputs compilation.json that all output artifacts consume.
|
|
7
|
+
*
|
|
8
|
+
* Usage:
|
|
9
|
+
* node wheat-compiler.js # compile and write compilation.json
|
|
10
|
+
* node wheat-compiler.js --check # compile and exit with error code if blocked
|
|
11
|
+
* node wheat-compiler.js --summary # print human-readable summary to stdout
|
|
12
|
+
* node wheat-compiler.js --gate # staleness check + readiness gate
|
|
13
|
+
* node wheat-compiler.js --input X --output Y # compile arbitrary claims file
|
|
14
|
+
* node wheat-compiler.js --diff A B # diff two compilation.json files
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import fs from 'fs';
|
|
18
|
+
import crypto from 'crypto';
|
|
19
|
+
import path from 'path';
|
|
20
|
+
import { execFileSync } from 'child_process';
|
|
21
|
+
import { fileURLToPath } from 'url';
|
|
22
|
+
|
|
23
|
+
// Sprint detection — git-based, no config pointer needed (p013/f001)
|
|
24
|
+
import { detectSprints } from './detect-sprints.js';
|
|
25
|
+
|
|
26
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
27
|
+
const __dirname = path.dirname(__filename);
|
|
28
|
+
|
|
29
|
+
// ─── --dir: target directory (defaults to script location for backwards compat) ─
|
|
30
|
+
const _dirIdx = process.argv.indexOf('--dir');
|
|
31
|
+
const TARGET_DIR = _dirIdx !== -1 && process.argv[_dirIdx + 1]
|
|
32
|
+
? path.resolve(process.argv[_dirIdx + 1])
|
|
33
|
+
: __dirname;
|
|
34
|
+
|
|
35
|
+
// ─── Configuration ──────────────────────────────────────────────────────────
|
|
36
|
+
/** @returns {{ dirs: Object<string, string>, compiler: Object<string, string> }} Merged config from wheat.config.json with defaults */
|
|
37
|
+
function loadConfig(dir) {
|
|
38
|
+
const configPath = path.join(dir, 'wheat.config.json');
|
|
39
|
+
const defaults = {
|
|
40
|
+
dirs: { output: 'output', research: 'research', prototypes: 'prototypes', evidence: 'evidence', templates: 'templates' },
|
|
41
|
+
compiler: { claims: 'claims.json', compilation: 'compilation.json' },
|
|
42
|
+
};
|
|
43
|
+
try {
|
|
44
|
+
const raw = fs.readFileSync(configPath, 'utf8');
|
|
45
|
+
const config = JSON.parse(raw);
|
|
46
|
+
return {
|
|
47
|
+
dirs: { ...defaults.dirs, ...(config.dirs || {}) },
|
|
48
|
+
compiler: { ...defaults.compiler, ...(config.compiler || {}) },
|
|
49
|
+
};
|
|
50
|
+
} catch {
|
|
51
|
+
return defaults;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const config = loadConfig(TARGET_DIR);
|
|
56
|
+
|
|
57
|
+
// ─── Evidence tier hierarchy (higher = stronger) ─────────────────────────────
|
|
58
|
+
/** @type {Object<string, number>} Maps evidence tier names to numeric strength (1–5) */
|
|
59
|
+
const EVIDENCE_TIERS = {
|
|
60
|
+
stated: 1,
|
|
61
|
+
web: 2,
|
|
62
|
+
documented: 3,
|
|
63
|
+
tested: 4,
|
|
64
|
+
production: 5,
|
|
65
|
+
};
|
|
66
|
+
|
|
67
|
+
/** @type {string[]} Allowed claim type values */
|
|
68
|
+
const VALID_TYPES = ['constraint', 'factual', 'estimate', 'risk', 'recommendation', 'feedback'];
|
|
69
|
+
const VALID_STATUSES = ['active', 'superseded', 'conflicted', 'resolved'];
|
|
70
|
+
const VALID_PHASES = ['define', 'research', 'prototype', 'evaluate', 'feedback'];
|
|
71
|
+
const PHASE_ORDER = ['init', 'define', 'research', 'prototype', 'evaluate', 'compile'];
|
|
72
|
+
|
|
73
|
+
// Burn-residue ID prefix — synthetic claims from /control-burn must never persist
|
|
74
|
+
const BURN_PREFIX = 'burn-';
|
|
75
|
+
|
|
76
|
+
// ─── Schema Migration Framework [r237] ──────────────────────────────────────
|
|
77
|
+
const CURRENT_SCHEMA = '1.0';
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Ordered list of migration functions. Each entry migrates from one version to the next.
|
|
81
|
+
* Add new entries here as schema evolves: { from: '1.0', to: '1.1', migrate: fn }
|
|
82
|
+
* The migrate function receives the full claimsData object and returns it mutated.
|
|
83
|
+
*/
|
|
84
|
+
const SCHEMA_MIGRATIONS = [
|
|
85
|
+
// Example for future use:
|
|
86
|
+
// { from: '1.0', to: '1.1', migrate(data) { /* transform data */ return data; } },
|
|
87
|
+
];
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Compare two semver-style version strings (e.g. '1.0', '2.1').
|
|
91
|
+
* Returns -1 if a < b, 0 if equal, 1 if a > b.
|
|
92
|
+
*/
|
|
93
|
+
function compareVersions(a, b) {
|
|
94
|
+
const pa = a.split('.').map(Number);
|
|
95
|
+
const pb = b.split('.').map(Number);
|
|
96
|
+
for (let i = 0; i < Math.max(pa.length, pb.length); i++) {
|
|
97
|
+
const na = pa[i] || 0;
|
|
98
|
+
const nb = pb[i] || 0;
|
|
99
|
+
if (na < nb) return -1;
|
|
100
|
+
if (na > nb) return 1;
|
|
101
|
+
}
|
|
102
|
+
return 0;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Validate and migrate schema version. Returns { data, errors }.
|
|
107
|
+
* - Missing schema_version is treated as '1.0' (backwards compat).
|
|
108
|
+
* - If schema_version > CURRENT_SCHEMA, returns a fatal error.
|
|
109
|
+
* - If schema_version < CURRENT_SCHEMA, runs migrations in order.
|
|
110
|
+
*/
|
|
111
|
+
function checkAndMigrateSchema(claimsData) {
|
|
112
|
+
const meta = claimsData.meta || {};
|
|
113
|
+
const fileVersion = meta.schema_version || '1.0';
|
|
114
|
+
|
|
115
|
+
// Future version — this compiler cannot handle it
|
|
116
|
+
if (compareVersions(fileVersion, CURRENT_SCHEMA) > 0) {
|
|
117
|
+
return {
|
|
118
|
+
data: claimsData,
|
|
119
|
+
errors: [{
|
|
120
|
+
code: 'E_SCHEMA_VERSION',
|
|
121
|
+
message: `claims.json uses schema v${fileVersion} but this compiler only supports up to v${CURRENT_SCHEMA}. Run: npx @grainulation/wheat@latest compile`,
|
|
122
|
+
}],
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Run migrations if file version is behind current
|
|
127
|
+
let currentVersion = fileVersion;
|
|
128
|
+
for (const migration of SCHEMA_MIGRATIONS) {
|
|
129
|
+
if (compareVersions(currentVersion, migration.from) === 0 &&
|
|
130
|
+
compareVersions(currentVersion, CURRENT_SCHEMA) < 0) {
|
|
131
|
+
claimsData = migration.migrate(claimsData);
|
|
132
|
+
currentVersion = migration.to;
|
|
133
|
+
if (!claimsData.meta) claimsData.meta = {};
|
|
134
|
+
claimsData.meta.schema_version = currentVersion;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
return { data: claimsData, errors: [] };
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
export { CURRENT_SCHEMA, SCHEMA_MIGRATIONS, checkAndMigrateSchema, compareVersions };
|
|
142
|
+
|
|
143
|
+
// ─── Pass 1: Schema Validation (+ burn-residue safety check) ────────────────
|
|
144
|
+
function validateSchema(claims) {
|
|
145
|
+
const errors = [];
|
|
146
|
+
const requiredFields = ['id', 'type', 'topic', 'content', 'source', 'evidence', 'status'];
|
|
147
|
+
|
|
148
|
+
claims.forEach((claim, i) => {
|
|
149
|
+
// Burn-residue safety check: reject claims with burn- prefix
|
|
150
|
+
if (claim.id && claim.id.startsWith(BURN_PREFIX)) {
|
|
151
|
+
errors.push({
|
|
152
|
+
code: 'E_BURN_RESIDUE',
|
|
153
|
+
message: `Claim ${claim.id} has burn- prefix — synthetic claims from /control-burn must not persist in claims.json. Remove it before compiling.`,
|
|
154
|
+
claims: [claim.id],
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
requiredFields.forEach(field => {
|
|
159
|
+
if (claim[field] === undefined || claim[field] === null || claim[field] === '') {
|
|
160
|
+
errors.push({
|
|
161
|
+
code: 'E_SCHEMA',
|
|
162
|
+
message: `Claim ${claim.id || `[index ${i}]`} missing required field: ${field}`,
|
|
163
|
+
claims: [claim.id || `index:${i}`],
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
// Check for duplicate IDs
|
|
169
|
+
const dupes = claims.filter(c => c.id === claim.id);
|
|
170
|
+
if (dupes.length > 1 && claims.indexOf(claim) === i) {
|
|
171
|
+
errors.push({
|
|
172
|
+
code: 'E_DUPLICATE_ID',
|
|
173
|
+
message: `Duplicate claim ID: ${claim.id}`,
|
|
174
|
+
claims: [claim.id],
|
|
175
|
+
});
|
|
176
|
+
}
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
return errors;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
// ─── Pass 2: Type Checking ───────────────────────────────────────────────────
|
|
183
|
+
function validateTypes(claims) {
|
|
184
|
+
const errors = [];
|
|
185
|
+
|
|
186
|
+
claims.forEach(claim => {
|
|
187
|
+
if (!VALID_TYPES.includes(claim.type)) {
|
|
188
|
+
errors.push({
|
|
189
|
+
code: 'E_TYPE',
|
|
190
|
+
message: `Claim ${claim.id}: invalid type "${claim.type}". Must be one of: ${VALID_TYPES.join(', ')}`,
|
|
191
|
+
claims: [claim.id],
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
if (!Object.keys(EVIDENCE_TIERS).includes(claim.evidence)) {
|
|
196
|
+
errors.push({
|
|
197
|
+
code: 'E_EVIDENCE_TIER',
|
|
198
|
+
message: `Claim ${claim.id}: invalid evidence tier "${claim.evidence}". Must be one of: ${Object.keys(EVIDENCE_TIERS).join(', ')}`,
|
|
199
|
+
claims: [claim.id],
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if (!VALID_STATUSES.includes(claim.status)) {
|
|
204
|
+
errors.push({
|
|
205
|
+
code: 'E_STATUS',
|
|
206
|
+
message: `Claim ${claim.id}: invalid status "${claim.status}". Must be one of: ${VALID_STATUSES.join(', ')}`,
|
|
207
|
+
claims: [claim.id],
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
});
|
|
211
|
+
|
|
212
|
+
return errors;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// ─── Pass 3: Evidence Tier Sorting (deterministic: tier → id) ────────────────
|
|
216
|
+
function sortByEvidenceTier(claims) {
|
|
217
|
+
return [...claims].sort((a, b) => {
|
|
218
|
+
const tierDiff = (EVIDENCE_TIERS[b.evidence] || 0) - (EVIDENCE_TIERS[a.evidence] || 0);
|
|
219
|
+
if (tierDiff !== 0) return tierDiff;
|
|
220
|
+
// Deterministic tiebreak: lexicographic by claim ID (stable across runs)
|
|
221
|
+
return (a.id || '').localeCompare(b.id || '');
|
|
222
|
+
});
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// ─── Pass 4: Conflict Detection ──────────────────────────────────────────────
|
|
226
|
+
function detectConflicts(claims) {
|
|
227
|
+
const conflicts = [];
|
|
228
|
+
const activeClaims = claims.filter(c => c.status === 'active' || c.status === 'conflicted');
|
|
229
|
+
|
|
230
|
+
for (let i = 0; i < activeClaims.length; i++) {
|
|
231
|
+
for (let j = i + 1; j < activeClaims.length; j++) {
|
|
232
|
+
const a = activeClaims[i];
|
|
233
|
+
const b = activeClaims[j];
|
|
234
|
+
|
|
235
|
+
// Same topic + explicitly marked as conflicting
|
|
236
|
+
if (a.conflicts_with && a.conflicts_with.includes(b.id)) {
|
|
237
|
+
conflicts.push({ claimA: a.id, claimB: b.id, topic: a.topic });
|
|
238
|
+
} else if (b.conflicts_with && b.conflicts_with.includes(a.id)) {
|
|
239
|
+
conflicts.push({ claimA: a.id, claimB: b.id, topic: a.topic });
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
return conflicts;
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// ─── Pass 5: Auto-Resolution ─────────────────────────────────────────────────
|
|
248
|
+
function autoResolve(claims, conflicts) {
|
|
249
|
+
const resolved = [];
|
|
250
|
+
const unresolved = [];
|
|
251
|
+
|
|
252
|
+
conflicts.forEach(conflict => {
|
|
253
|
+
const claimA = claims.find(c => c.id === conflict.claimA);
|
|
254
|
+
const claimB = claims.find(c => c.id === conflict.claimB);
|
|
255
|
+
|
|
256
|
+
if (!claimA || !claimB) {
|
|
257
|
+
unresolved.push({ ...conflict, reason: 'claim_not_found' });
|
|
258
|
+
return;
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
const tierA = EVIDENCE_TIERS[claimA.evidence] || 0;
|
|
262
|
+
const tierB = EVIDENCE_TIERS[claimB.evidence] || 0;
|
|
263
|
+
|
|
264
|
+
if (tierA > tierB) {
|
|
265
|
+
resolved.push({
|
|
266
|
+
winner: claimA.id,
|
|
267
|
+
loser: claimB.id,
|
|
268
|
+
reason: `evidence_tier: ${claimA.evidence} (${tierA}) > ${claimB.evidence} (${tierB})`,
|
|
269
|
+
});
|
|
270
|
+
claimB.status = 'superseded';
|
|
271
|
+
claimB.resolved_by = claimA.id;
|
|
272
|
+
} else if (tierB > tierA) {
|
|
273
|
+
resolved.push({
|
|
274
|
+
winner: claimB.id,
|
|
275
|
+
loser: claimA.id,
|
|
276
|
+
reason: `evidence_tier: ${claimB.evidence} (${tierB}) > ${claimA.evidence} (${tierA})`,
|
|
277
|
+
});
|
|
278
|
+
claimA.status = 'superseded';
|
|
279
|
+
claimA.resolved_by = claimB.id;
|
|
280
|
+
} else {
|
|
281
|
+
// Same evidence tier — cannot auto-resolve
|
|
282
|
+
unresolved.push({
|
|
283
|
+
claimA: claimA.id,
|
|
284
|
+
claimB: claimB.id,
|
|
285
|
+
topic: conflict.topic,
|
|
286
|
+
reason: `same_evidence_tier: both ${claimA.evidence}`,
|
|
287
|
+
});
|
|
288
|
+
claimA.status = 'conflicted';
|
|
289
|
+
claimB.status = 'conflicted';
|
|
290
|
+
}
|
|
291
|
+
});
|
|
292
|
+
|
|
293
|
+
return { resolved, unresolved };
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
// ─── Pass 6: Coverage Analysis (enhanced with source/type diversity + corroboration) ─
|
|
297
|
+
function analyzeCoverage(claims) {
|
|
298
|
+
const coverage = {};
|
|
299
|
+
const activeClaims = claims.filter(c => c.status === 'active' || c.status === 'resolved');
|
|
300
|
+
|
|
301
|
+
activeClaims.forEach(claim => {
|
|
302
|
+
if (!claim.topic) return;
|
|
303
|
+
|
|
304
|
+
if (!coverage[claim.topic]) {
|
|
305
|
+
coverage[claim.topic] = {
|
|
306
|
+
claims: 0,
|
|
307
|
+
max_evidence: 'stated',
|
|
308
|
+
max_evidence_rank: 0,
|
|
309
|
+
types: new Set(),
|
|
310
|
+
claim_ids: [],
|
|
311
|
+
constraint_count: 0,
|
|
312
|
+
source_origins: new Set(),
|
|
313
|
+
};
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
const entry = coverage[claim.topic];
|
|
317
|
+
entry.claims++;
|
|
318
|
+
entry.types.add(claim.type);
|
|
319
|
+
entry.claim_ids.push(claim.id);
|
|
320
|
+
if (claim.type === 'constraint' || claim.type === 'feedback') {
|
|
321
|
+
entry.constraint_count++;
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
// Track source diversity
|
|
325
|
+
if (claim.source && claim.source.origin) {
|
|
326
|
+
entry.source_origins.add(claim.source.origin);
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
const tier = EVIDENCE_TIERS[claim.evidence] || 0;
|
|
330
|
+
if (tier > entry.max_evidence_rank) {
|
|
331
|
+
entry.max_evidence = claim.evidence;
|
|
332
|
+
entry.max_evidence_rank = tier;
|
|
333
|
+
}
|
|
334
|
+
});
|
|
335
|
+
|
|
336
|
+
// Compute corroboration: how many other claims reference/support each claim
|
|
337
|
+
const corroboration = {};
|
|
338
|
+
const allClaims = claims.filter(c => c.status !== 'superseded');
|
|
339
|
+
allClaims.forEach(claim => {
|
|
340
|
+
corroboration[claim.id] = 0;
|
|
341
|
+
});
|
|
342
|
+
// A claim corroborates another if it has source.witnessed_claim or source.challenged_claim
|
|
343
|
+
// or shares the same topic and type with supporting relationship
|
|
344
|
+
allClaims.forEach(claim => {
|
|
345
|
+
if (claim.source) {
|
|
346
|
+
if (claim.source.witnessed_claim && corroboration[claim.source.witnessed_claim] !== undefined) {
|
|
347
|
+
if (claim.source.relationship === 'full_support' || claim.source.relationship === 'partial_support') {
|
|
348
|
+
corroboration[claim.source.witnessed_claim]++;
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
});
|
|
353
|
+
|
|
354
|
+
// Convert sets to arrays and compute status (deterministic key ordering)
|
|
355
|
+
const result = {};
|
|
356
|
+
Object.entries(coverage).sort(([a], [b]) => a.localeCompare(b)).forEach(([topic, entry]) => {
|
|
357
|
+
let status = 'weak';
|
|
358
|
+
if (entry.max_evidence_rank >= EVIDENCE_TIERS.tested) status = 'strong';
|
|
359
|
+
else if (entry.max_evidence_rank >= EVIDENCE_TIERS.documented) status = 'moderate';
|
|
360
|
+
|
|
361
|
+
// Type diversity: how many of the 6 possible types are present
|
|
362
|
+
const allTypes = [...entry.types].sort();
|
|
363
|
+
const missingTypes = VALID_TYPES.filter(t => !allTypes.includes(t));
|
|
364
|
+
|
|
365
|
+
// Source origins (sorted for determinism)
|
|
366
|
+
const sourceOrigins = [...entry.source_origins].sort();
|
|
367
|
+
|
|
368
|
+
result[topic] = {
|
|
369
|
+
claims: entry.claims,
|
|
370
|
+
max_evidence: entry.max_evidence,
|
|
371
|
+
status,
|
|
372
|
+
types: allTypes,
|
|
373
|
+
claim_ids: entry.claim_ids,
|
|
374
|
+
constraint_count: entry.constraint_count,
|
|
375
|
+
// New: source diversity
|
|
376
|
+
source_origins: sourceOrigins,
|
|
377
|
+
source_count: sourceOrigins.length,
|
|
378
|
+
// New: type diversity
|
|
379
|
+
type_diversity: allTypes.length,
|
|
380
|
+
missing_types: missingTypes,
|
|
381
|
+
};
|
|
382
|
+
});
|
|
383
|
+
|
|
384
|
+
return { coverage: result, corroboration };
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
// ─── Pass 7: Readiness Check ─────────────────────────────────────────────────
|
|
388
|
+
function checkReadiness(errors, unresolvedConflicts, coverage) {
|
|
389
|
+
const blockers = [...errors];
|
|
390
|
+
|
|
391
|
+
// Unresolved conflicts are blockers
|
|
392
|
+
unresolvedConflicts.forEach(conflict => {
|
|
393
|
+
blockers.push({
|
|
394
|
+
code: 'E_CONFLICT',
|
|
395
|
+
message: `Unresolved conflict between ${conflict.claimA} and ${conflict.claimB} (topic: ${conflict.topic}) — ${conflict.reason}`,
|
|
396
|
+
claims: [conflict.claimA, conflict.claimB],
|
|
397
|
+
});
|
|
398
|
+
});
|
|
399
|
+
|
|
400
|
+
// Weak coverage is a warning, not a blocker (sorted for determinism)
|
|
401
|
+
const warnings = [];
|
|
402
|
+
Object.entries(coverage).sort(([a], [b]) => a.localeCompare(b)).forEach(([topic, entry]) => {
|
|
403
|
+
if (entry.status === 'weak') {
|
|
404
|
+
// Constraint-dominated topics (>50% constraint/feedback) get a softer warning
|
|
405
|
+
const constraintRatio = (entry.constraint_count || 0) / entry.claims;
|
|
406
|
+
if (constraintRatio > 0.5) {
|
|
407
|
+
warnings.push({
|
|
408
|
+
code: 'W_CONSTRAINT_ONLY',
|
|
409
|
+
message: `Topic "${topic}" is constraint-dominated (${entry.constraint_count}/${entry.claims} claims are constraints/feedback) — stated-level evidence is expected`,
|
|
410
|
+
claims: entry.claim_ids,
|
|
411
|
+
});
|
|
412
|
+
} else {
|
|
413
|
+
warnings.push({
|
|
414
|
+
code: 'W_WEAK_EVIDENCE',
|
|
415
|
+
message: `Topic "${topic}" has only ${entry.max_evidence}-level evidence (${entry.claims} claims)`,
|
|
416
|
+
claims: entry.claim_ids,
|
|
417
|
+
});
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
// Type monoculture warning
|
|
422
|
+
if (entry.type_diversity < 2 && entry.claims >= 1) {
|
|
423
|
+
warnings.push({
|
|
424
|
+
code: 'W_TYPE_MONOCULTURE',
|
|
425
|
+
message: `Topic "${topic}" has only ${entry.type_diversity} claim type(s): ${entry.types.join(', ')}. Missing: ${entry.missing_types.join(', ')}`,
|
|
426
|
+
claims: entry.claim_ids,
|
|
427
|
+
});
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
// Echo chamber warning: all claims from single source origin
|
|
431
|
+
if (entry.source_count === 1 && entry.claims >= 3) {
|
|
432
|
+
warnings.push({
|
|
433
|
+
code: 'W_ECHO_CHAMBER',
|
|
434
|
+
message: `Topic "${topic}" has ${entry.claims} claims but all from a single source origin: ${entry.source_origins[0]}`,
|
|
435
|
+
claims: entry.claim_ids,
|
|
436
|
+
});
|
|
437
|
+
}
|
|
438
|
+
});
|
|
439
|
+
|
|
440
|
+
return { blockers, warnings };
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
// ─── Phase Summary ───────────────────────────────────────────────────────────
|
|
444
|
+
function summarizePhases(claims) {
|
|
445
|
+
const summary = {};
|
|
446
|
+
VALID_PHASES.forEach(phase => {
|
|
447
|
+
const phaseClaims = claims.filter(c => c.phase_added === phase);
|
|
448
|
+
summary[phase] = {
|
|
449
|
+
claims: phaseClaims.length,
|
|
450
|
+
complete: phaseClaims.length > 0,
|
|
451
|
+
};
|
|
452
|
+
});
|
|
453
|
+
return summary;
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
// ─── Canonical JSON — key-order-independent serialization ────────────────────
|
|
457
|
+
function canonicalJSON(obj) {
|
|
458
|
+
if (obj === null || typeof obj !== 'object') return JSON.stringify(obj);
|
|
459
|
+
if (Array.isArray(obj)) return '[' + obj.map(canonicalJSON).join(',') + ']';
|
|
460
|
+
const keys = Object.keys(obj).sort();
|
|
461
|
+
return '{' + keys.map(k => JSON.stringify(k) + ':' + canonicalJSON(obj[k])).join(',') + '}';
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
// ─── Compilation Certificate ─────────────────────────────────────────────────
|
|
465
|
+
function generateCertificate(claimsData, compilerVersion) {
|
|
466
|
+
const hash = crypto.createHash('sha256')
|
|
467
|
+
.update(canonicalJSON(claimsData))
|
|
468
|
+
.digest('hex');
|
|
469
|
+
|
|
470
|
+
return {
|
|
471
|
+
input_hash: `sha256:${hash}`,
|
|
472
|
+
compiler_version: compilerVersion,
|
|
473
|
+
deterministic: true,
|
|
474
|
+
};
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
// ─── diffCompilations — compare two compilation objects ─────────────────────
|
|
478
|
+
/**
|
|
479
|
+
* Compare two compilation objects and return a structured delta.
|
|
480
|
+
* @param {object} before - Earlier compilation.json contents
|
|
481
|
+
* @param {object} after - Later compilation.json contents
|
|
482
|
+
* @returns {object} Delta with new/removed claims, coverage changes, conflict changes
|
|
483
|
+
*/
|
|
484
|
+
function diffCompilations(before, after) {
|
|
485
|
+
const delta = {
|
|
486
|
+
new_claims: [],
|
|
487
|
+
removed_claims: [],
|
|
488
|
+
status_changes: [],
|
|
489
|
+
coverage_changes: [],
|
|
490
|
+
conflict_changes: {
|
|
491
|
+
new_resolved: [],
|
|
492
|
+
new_unresolved: [],
|
|
493
|
+
removed_resolved: [],
|
|
494
|
+
removed_unresolved: [],
|
|
495
|
+
},
|
|
496
|
+
meta_changes: {},
|
|
497
|
+
};
|
|
498
|
+
|
|
499
|
+
// Claim IDs
|
|
500
|
+
const beforeIds = new Set((before.resolved_claims || []).map(c => c.id));
|
|
501
|
+
const afterIds = new Set((after.resolved_claims || []).map(c => c.id));
|
|
502
|
+
|
|
503
|
+
afterIds.forEach(id => {
|
|
504
|
+
if (!beforeIds.has(id)) delta.new_claims.push(id);
|
|
505
|
+
});
|
|
506
|
+
beforeIds.forEach(id => {
|
|
507
|
+
if (!afterIds.has(id)) delta.removed_claims.push(id);
|
|
508
|
+
});
|
|
509
|
+
|
|
510
|
+
// Status changes on claims that exist in both
|
|
511
|
+
const beforeClaimsMap = {};
|
|
512
|
+
(before.resolved_claims || []).forEach(c => { beforeClaimsMap[c.id] = c; });
|
|
513
|
+
const afterClaimsMap = {};
|
|
514
|
+
(after.resolved_claims || []).forEach(c => { afterClaimsMap[c.id] = c; });
|
|
515
|
+
|
|
516
|
+
for (const id of beforeIds) {
|
|
517
|
+
if (afterIds.has(id)) {
|
|
518
|
+
const bc = beforeClaimsMap[id];
|
|
519
|
+
const ac = afterClaimsMap[id];
|
|
520
|
+
if (bc.status !== ac.status) {
|
|
521
|
+
delta.status_changes.push({ id, from: bc.status, to: ac.status });
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
// Coverage changes
|
|
527
|
+
const beforeCov = before.coverage || {};
|
|
528
|
+
const afterCov = after.coverage || {};
|
|
529
|
+
const allTopics = new Set([...Object.keys(beforeCov), ...Object.keys(afterCov)]);
|
|
530
|
+
allTopics.forEach(topic => {
|
|
531
|
+
const bc = beforeCov[topic];
|
|
532
|
+
const ac = afterCov[topic];
|
|
533
|
+
if (!bc && ac) {
|
|
534
|
+
delta.coverage_changes.push({ topic, type: 'added', after: ac });
|
|
535
|
+
} else if (bc && !ac) {
|
|
536
|
+
delta.coverage_changes.push({ topic, type: 'removed', before: bc });
|
|
537
|
+
} else if (bc && ac) {
|
|
538
|
+
const changes = {};
|
|
539
|
+
if (bc.max_evidence !== ac.max_evidence) changes.max_evidence = { from: bc.max_evidence, to: ac.max_evidence };
|
|
540
|
+
if (bc.status !== ac.status) changes.status = { from: bc.status, to: ac.status };
|
|
541
|
+
if (bc.claims !== ac.claims) changes.claims = { from: bc.claims, to: ac.claims };
|
|
542
|
+
if (Object.keys(changes).length > 0) {
|
|
543
|
+
delta.coverage_changes.push({ topic, type: 'changed', changes });
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
});
|
|
547
|
+
|
|
548
|
+
// Conflict graph changes
|
|
549
|
+
const beforeResolved = new Set((before.conflict_graph?.resolved || []).map(r => `${r.winner}>${r.loser}`));
|
|
550
|
+
const afterResolved = new Set((after.conflict_graph?.resolved || []).map(r => `${r.winner}>${r.loser}`));
|
|
551
|
+
const beforeUnresolved = new Set((before.conflict_graph?.unresolved || []).map(u => `${u.claimA}|${u.claimB}`));
|
|
552
|
+
const afterUnresolved = new Set((after.conflict_graph?.unresolved || []).map(u => `${u.claimA}|${u.claimB}`));
|
|
553
|
+
|
|
554
|
+
afterResolved.forEach(r => { if (!beforeResolved.has(r)) delta.conflict_changes.new_resolved.push(r); });
|
|
555
|
+
beforeResolved.forEach(r => { if (!afterResolved.has(r)) delta.conflict_changes.removed_resolved.push(r); });
|
|
556
|
+
afterUnresolved.forEach(u => { if (!beforeUnresolved.has(u)) delta.conflict_changes.new_unresolved.push(u); });
|
|
557
|
+
beforeUnresolved.forEach(u => { if (!afterUnresolved.has(u)) delta.conflict_changes.removed_unresolved.push(u); });
|
|
558
|
+
|
|
559
|
+
// Meta changes
|
|
560
|
+
if (before.status !== after.status) delta.meta_changes.status = { from: before.status, to: after.status };
|
|
561
|
+
if ((before.sprint_meta?.phase) !== (after.sprint_meta?.phase)) {
|
|
562
|
+
delta.meta_changes.phase = { from: before.sprint_meta?.phase, to: after.sprint_meta?.phase };
|
|
563
|
+
}
|
|
564
|
+
if ((before.sprint_meta?.total_claims) !== (after.sprint_meta?.total_claims)) {
|
|
565
|
+
delta.meta_changes.total_claims = { from: before.sprint_meta?.total_claims, to: after.sprint_meta?.total_claims };
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
return delta;
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
// ─── Manifest Generation (topic map) ─────────────────────────────────────────
|
|
572
|
+
/**
|
|
573
|
+
* Run generate-manifest.js to produce wheat-manifest.json.
|
|
574
|
+
* Called automatically after each compilation. Failures are non-fatal
|
|
575
|
+
* (manifest is an optimization, not a correctness requirement).
|
|
576
|
+
* @param {object} compilation - The compiled output (unused, but available for future use)
|
|
577
|
+
*/
|
|
578
|
+
function generateManifest(compilation, dir) {
|
|
579
|
+
const baseDir = dir || TARGET_DIR;
|
|
580
|
+
const manifestScript = path.join(baseDir, 'generate-manifest.js');
|
|
581
|
+
if (!fs.existsSync(manifestScript)) {
|
|
582
|
+
// Manifest generator not present — skip silently
|
|
583
|
+
return;
|
|
584
|
+
}
|
|
585
|
+
try {
|
|
586
|
+
const result = execFileSync(process.execPath, [manifestScript], {
|
|
587
|
+
cwd: baseDir,
|
|
588
|
+
timeout: 10000,
|
|
589
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
590
|
+
});
|
|
591
|
+
// Print manifest summary on --summary runs (stdout captured above)
|
|
592
|
+
const output = result.toString().trim();
|
|
593
|
+
if (output && process.argv.includes('--summary')) {
|
|
594
|
+
console.log(`\nManifest: ${output}`);
|
|
595
|
+
}
|
|
596
|
+
} catch (err) {
|
|
597
|
+
// Non-fatal: warn but don't block compilation
|
|
598
|
+
const stderr = err.stderr ? err.stderr.toString().trim() : err.message;
|
|
599
|
+
console.error(`Warning: manifest generation failed — ${stderr}`);
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
// ─── Main Compilation Pipeline ───────────────────────────────────────────────
|
|
604
|
+
/**
|
|
605
|
+
* Run the full compilation pipeline: validate, sort, detect conflicts, resolve, compute coverage.
|
|
606
|
+
* @param {string|null} inputPath - Path to claims.json (null = default from config)
|
|
607
|
+
* @param {string|null} outputPath - Path to write compilation.json (null = default from config)
|
|
608
|
+
* @returns {object} The compiled output object
|
|
609
|
+
*/
|
|
610
|
+
function compile(inputPath, outputPath, dir) {
|
|
611
|
+
const compilerVersion = '0.2.0';
|
|
612
|
+
const baseDir = dir || TARGET_DIR;
|
|
613
|
+
const claimsPath = inputPath || path.join(baseDir, config.compiler.claims);
|
|
614
|
+
const compilationOutputPath = outputPath || path.join(baseDir, config.compiler.compilation);
|
|
615
|
+
|
|
616
|
+
// Read claims
|
|
617
|
+
if (!fs.existsSync(claimsPath)) {
|
|
618
|
+
console.error(`Error: ${path.basename(claimsPath)} not found. Run "wheat init" to start a sprint.`);
|
|
619
|
+
process.exit(1);
|
|
620
|
+
}
|
|
621
|
+
|
|
622
|
+
const raw = fs.readFileSync(claimsPath, 'utf8');
|
|
623
|
+
let claimsData;
|
|
624
|
+
try {
|
|
625
|
+
claimsData = JSON.parse(raw);
|
|
626
|
+
} catch (e) {
|
|
627
|
+
console.error(`Error: ${path.basename(claimsPath)} is not valid JSON — ${e.message}`);
|
|
628
|
+
process.exit(1);
|
|
629
|
+
}
|
|
630
|
+
// ── Schema version check + migration [r237] ──────────────────────────────
|
|
631
|
+
const migrationResult = checkAndMigrateSchema(claimsData);
|
|
632
|
+
if (migrationResult.errors.length > 0) {
|
|
633
|
+
for (const err of migrationResult.errors) {
|
|
634
|
+
console.error(`Error: ${err.message}`);
|
|
635
|
+
}
|
|
636
|
+
process.exit(1);
|
|
637
|
+
}
|
|
638
|
+
claimsData = migrationResult.data;
|
|
639
|
+
|
|
640
|
+
const claims = claimsData.claims || [];
|
|
641
|
+
const meta = claimsData.meta || {};
|
|
642
|
+
|
|
643
|
+
// Run passes
|
|
644
|
+
const schemaErrors = validateSchema(claims);
|
|
645
|
+
const typeErrors = validateTypes(claims);
|
|
646
|
+
const allValidationErrors = [...schemaErrors, ...typeErrors];
|
|
647
|
+
|
|
648
|
+
// Only run conflict/resolution if validation passes
|
|
649
|
+
let conflictGraph = { resolved: [], unresolved: [] };
|
|
650
|
+
let coverageResult = { coverage: {}, corroboration: {} };
|
|
651
|
+
let readiness = { blockers: allValidationErrors, warnings: [] };
|
|
652
|
+
let resolvedClaims = claims.filter(c => c.status === 'active' || c.status === 'resolved');
|
|
653
|
+
|
|
654
|
+
if (allValidationErrors.length === 0) {
|
|
655
|
+
const sortedClaims = sortByEvidenceTier(claims);
|
|
656
|
+
const conflicts = detectConflicts(sortedClaims);
|
|
657
|
+
conflictGraph = autoResolve(claims, conflicts);
|
|
658
|
+
coverageResult = analyzeCoverage(claims);
|
|
659
|
+
readiness = checkReadiness([], conflictGraph.unresolved, coverageResult.coverage);
|
|
660
|
+
resolvedClaims = claims.filter(c => c.status === 'active' || c.status === 'resolved');
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
const phaseSummary = summarizePhases(claims);
|
|
664
|
+
const certificate = generateCertificate(claimsData, compilerVersion);
|
|
665
|
+
|
|
666
|
+
// Determine overall status
|
|
667
|
+
const status = readiness.blockers.length > 0 ? 'blocked' : 'ready';
|
|
668
|
+
|
|
669
|
+
// Determine current phase from meta or infer from claims
|
|
670
|
+
const currentPhase = meta.phase || inferPhase(phaseSummary);
|
|
671
|
+
|
|
672
|
+
// ── Sprint detection (git-based, non-fatal) ──────────────────────────────
|
|
673
|
+
let sprintsInfo = { active: null, sprints: [] };
|
|
674
|
+
try {
|
|
675
|
+
sprintsInfo = detectSprints(baseDir);
|
|
676
|
+
} catch (err) {
|
|
677
|
+
// Non-fatal: sprint detection failure should not block compilation
|
|
678
|
+
console.error(`Warning: sprint detection failed — ${err.message}`);
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
// Build sprint summaries: active sprint gets full compilation, others get summary entries
|
|
682
|
+
const sprintSummaries = sprintsInfo.sprints.map(s => ({
|
|
683
|
+
name: s.name,
|
|
684
|
+
path: s.path,
|
|
685
|
+
status: s.status,
|
|
686
|
+
phase: s.phase,
|
|
687
|
+
question: s.question,
|
|
688
|
+
claims_count: s.claims_count,
|
|
689
|
+
active_claims: s.active_claims,
|
|
690
|
+
last_git_activity: s.last_git_activity,
|
|
691
|
+
git_commit_count: s.git_commit_count,
|
|
692
|
+
}));
|
|
693
|
+
|
|
694
|
+
const compilation = {
|
|
695
|
+
compiled_at: new Date().toISOString(), // Non-deterministic metadata (excluded from certificate)
|
|
696
|
+
claims_hash: certificate.input_hash.slice(7, 14),
|
|
697
|
+
compiler_version: compilerVersion,
|
|
698
|
+
status,
|
|
699
|
+
errors: readiness.blockers,
|
|
700
|
+
warnings: readiness.warnings,
|
|
701
|
+
resolved_claims: resolvedClaims,
|
|
702
|
+
conflict_graph: conflictGraph,
|
|
703
|
+
coverage: coverageResult.coverage,
|
|
704
|
+
corroboration: coverageResult.corroboration,
|
|
705
|
+
phase_summary: phaseSummary,
|
|
706
|
+
sprints: sprintSummaries,
|
|
707
|
+
sprint_meta: {
|
|
708
|
+
question: meta.question || '',
|
|
709
|
+
audience: meta.audience || [],
|
|
710
|
+
initiated: meta.initiated || '',
|
|
711
|
+
phase: currentPhase,
|
|
712
|
+
total_claims: claims.length,
|
|
713
|
+
active_claims: claims.filter(c => c.status === 'active').length,
|
|
714
|
+
conflicted_claims: claims.filter(c => c.status === 'conflicted').length,
|
|
715
|
+
superseded_claims: claims.filter(c => c.status === 'superseded').length,
|
|
716
|
+
connectors: meta.connectors || [],
|
|
717
|
+
},
|
|
718
|
+
compilation_certificate: certificate,
|
|
719
|
+
};
|
|
720
|
+
|
|
721
|
+
// Write compilation output
|
|
722
|
+
fs.writeFileSync(compilationOutputPath, JSON.stringify(compilation, null, 2));
|
|
723
|
+
|
|
724
|
+
// Generate topic-map manifest (wheat-manifest.json)
|
|
725
|
+
generateManifest(compilation, baseDir);
|
|
726
|
+
|
|
727
|
+
return compilation;
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
function inferPhase(phaseSummary) {
|
|
731
|
+
// Walk backwards through phases to find the latest completed one
|
|
732
|
+
const phases = ['evaluate', 'prototype', 'research', 'define'];
|
|
733
|
+
for (const phase of phases) {
|
|
734
|
+
if (phaseSummary[phase] && phaseSummary[phase].complete) {
|
|
735
|
+
return phase;
|
|
736
|
+
}
|
|
737
|
+
}
|
|
738
|
+
return 'init';
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
// ─── Self-Containment Scanner ────────────────────────────────────────────────
|
|
742
|
+
function scanSelfContainment(dirs) {
|
|
743
|
+
const extPattern = /(?:<script[^>]+src=["'](?!data:)|<link[^>]+href=["'](?!#|data:)|@import\s+url\(["']?(?!data:)|<img[^>]+src=["'](?!data:))(https?:\/\/[^"'\s)]+)/gi;
|
|
744
|
+
const results = [];
|
|
745
|
+
for (const dir of dirs) {
|
|
746
|
+
if (!fs.existsSync(dir)) continue;
|
|
747
|
+
const files = fs.readdirSync(dir).filter(f => f.endsWith('.html'));
|
|
748
|
+
for (const file of files) {
|
|
749
|
+
const raw = fs.readFileSync(path.join(dir, file), 'utf8');
|
|
750
|
+
// Strip inline script/style bodies so URLs inside JS/CSS data aren't flagged.
|
|
751
|
+
// Preserve <script src="..."> tags (external scripts we DO want to detect).
|
|
752
|
+
const content = raw.replace(/(<script(?:\s[^>]*)?)>([\s\S]*?)<\/script>/gi, (_, open) => {
|
|
753
|
+
return open + '></script>';
|
|
754
|
+
}).replace(/(<style(?:\s[^>]*)?)>([\s\S]*?)<\/style>/gi, (_, open) => {
|
|
755
|
+
return open + '></style>';
|
|
756
|
+
});
|
|
757
|
+
const matches = [];
|
|
758
|
+
let m;
|
|
759
|
+
while ((m = extPattern.exec(content)) !== null) {
|
|
760
|
+
matches.push(m[1]);
|
|
761
|
+
}
|
|
762
|
+
results.push({ file: path.join(dir, file), external: matches });
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
return results;
|
|
766
|
+
}
|
|
767
|
+
|
|
768
|
+
// ─── CLI ─────────────────────────────────────────────────────────────────────
|
|
769
|
+
const isMain = process.argv[1] && fileURLToPath(import.meta.url) === path.resolve(process.argv[1]);
|
|
770
|
+
|
|
771
|
+
if (isMain) {
|
|
772
|
+
|
|
773
|
+
const args = process.argv.slice(2);
|
|
774
|
+
|
|
775
|
+
// --help / -h
|
|
776
|
+
if (args.includes('--help') || args.includes('-h')) {
|
|
777
|
+
console.log(`Wheat Compiler v0.2.0 — Bran-based compilation for research claims
|
|
778
|
+
|
|
779
|
+
Usage:
|
|
780
|
+
node wheat-compiler.js Compile claims.json → compilation.json
|
|
781
|
+
node wheat-compiler.js --summary Compile and print human-readable summary
|
|
782
|
+
node wheat-compiler.js --check Compile and exit with error if blocked
|
|
783
|
+
node wheat-compiler.js --gate Staleness check + readiness gate
|
|
784
|
+
node wheat-compiler.js --scan Check HTML artifacts for external dependencies
|
|
785
|
+
node wheat-compiler.js --next [N] Recommend next N actions by priority
|
|
786
|
+
node wheat-compiler.js --diff A B Diff two compilation.json files
|
|
787
|
+
node wheat-compiler.js --input X --output Y Compile arbitrary claims file
|
|
788
|
+
|
|
789
|
+
Options:
|
|
790
|
+
--dir <path> Resolve all paths relative to <path> instead of script location
|
|
791
|
+
--help, -h Show this help message
|
|
792
|
+
--json Output as JSON (works with --summary, --check, --gate, --scan, --next)`);
|
|
793
|
+
process.exit(0);
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
// --scan mode: check HTML artifacts for external dependencies
|
|
797
|
+
if (args.includes('--scan')) {
|
|
798
|
+
const scanDirs = ['output', 'research', 'evidence', 'prototypes'].map(d => path.join(TARGET_DIR, d));
|
|
799
|
+
// Also scan nested dirs one level deep (e.g. prototypes/live-dashboard/)
|
|
800
|
+
const allDirs = [...scanDirs];
|
|
801
|
+
for (const d of scanDirs) {
|
|
802
|
+
if (fs.existsSync(d)) {
|
|
803
|
+
fs.readdirSync(d, { withFileTypes: true })
|
|
804
|
+
.filter(e => e.isDirectory())
|
|
805
|
+
.forEach(e => allDirs.push(path.join(d, e.name)));
|
|
806
|
+
}
|
|
807
|
+
}
|
|
808
|
+
const results = scanSelfContainment(allDirs);
|
|
809
|
+
const clean = results.filter(r => r.external.length === 0);
|
|
810
|
+
const dirty = results.filter(r => r.external.length > 0);
|
|
811
|
+
|
|
812
|
+
const scanJsonFlag = args.includes('--json');
|
|
813
|
+
if (scanJsonFlag) {
|
|
814
|
+
console.log(JSON.stringify({ scanned: results.length, clean: clean.length, dirty: dirty.length, files: dirty }, null, 2));
|
|
815
|
+
process.exit(dirty.length > 0 ? 1 : 0);
|
|
816
|
+
}
|
|
817
|
+
|
|
818
|
+
console.log(`Self-Containment Scan`);
|
|
819
|
+
console.log('='.repeat(50));
|
|
820
|
+
console.log(`Scanned: ${results.length} HTML files`);
|
|
821
|
+
console.log(`Clean: ${clean.length}`);
|
|
822
|
+
console.log(`Dirty: ${dirty.length}`);
|
|
823
|
+
if (dirty.length > 0) {
|
|
824
|
+
console.log('\nExternal dependencies found:');
|
|
825
|
+
dirty.forEach(r => {
|
|
826
|
+
console.log(` ${r.file}:`);
|
|
827
|
+
r.external.forEach(url => console.log(` → ${url}`));
|
|
828
|
+
});
|
|
829
|
+
process.exit(1);
|
|
830
|
+
} else {
|
|
831
|
+
console.log('\n✓ All HTML artifacts are self-contained.');
|
|
832
|
+
}
|
|
833
|
+
process.exit(0);
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
// --diff mode: compare two compilation files
|
|
837
|
+
if (args.includes('--diff')) {
|
|
838
|
+
const diffIdx = args.indexOf('--diff');
|
|
839
|
+
const fileA = args[diffIdx + 1];
|
|
840
|
+
const fileB = args[diffIdx + 2];
|
|
841
|
+
if (!fileA || !fileB) {
|
|
842
|
+
console.error('Usage: node wheat-compiler.js --diff <before.json> <after.json>');
|
|
843
|
+
process.exit(1);
|
|
844
|
+
}
|
|
845
|
+
let before, after;
|
|
846
|
+
try { before = JSON.parse(fs.readFileSync(fileA, 'utf8')); }
|
|
847
|
+
catch (e) { console.error(`Error: ${fileA} is not valid JSON — ${e.message}`); process.exit(1); }
|
|
848
|
+
try { after = JSON.parse(fs.readFileSync(fileB, 'utf8')); }
|
|
849
|
+
catch (e) { console.error(`Error: ${fileB} is not valid JSON — ${e.message}`); process.exit(1); }
|
|
850
|
+
const delta = diffCompilations(before, after);
|
|
851
|
+
console.log(JSON.stringify(delta, null, 2));
|
|
852
|
+
process.exit(0);
|
|
853
|
+
}
|
|
854
|
+
|
|
855
|
+
// Parse --input and --output flags
|
|
856
|
+
let inputPath = null;
|
|
857
|
+
let outputPath = null;
|
|
858
|
+
const inputIdx = args.indexOf('--input');
|
|
859
|
+
if (inputIdx !== -1 && args[inputIdx + 1]) {
|
|
860
|
+
inputPath = path.resolve(args[inputIdx + 1]);
|
|
861
|
+
}
|
|
862
|
+
const outputIdx = args.indexOf('--output');
|
|
863
|
+
if (outputIdx !== -1 && args[outputIdx + 1]) {
|
|
864
|
+
outputPath = path.resolve(args[outputIdx + 1]);
|
|
865
|
+
}
|
|
866
|
+
|
|
867
|
+
const compilation = compile(inputPath, outputPath);
|
|
868
|
+
const jsonFlag = args.includes('--json');
|
|
869
|
+
|
|
870
|
+
if (args.includes('--summary')) {
|
|
871
|
+
const c = compilation;
|
|
872
|
+
const statusIcon = c.status === 'ready' ? '\u2713' : '\u2717';
|
|
873
|
+
console.log(`\nWheat Compiler v${c.compiler_version}`);
|
|
874
|
+
console.log(`${'='.repeat(50)}`);
|
|
875
|
+
console.log(`Sprint: ${c.sprint_meta.question || '(not initialized)'}`);
|
|
876
|
+
console.log(`Phase: ${c.sprint_meta.phase}`);
|
|
877
|
+
console.log(`Status: ${statusIcon} ${c.status.toUpperCase()}`);
|
|
878
|
+
console.log(`Claims: ${c.sprint_meta.total_claims} total, ${c.sprint_meta.active_claims} active, ${c.sprint_meta.conflicted_claims} conflicted`);
|
|
879
|
+
|
|
880
|
+
if (c.sprints && c.sprints.length > 0) {
|
|
881
|
+
console.log(`Sprints: ${c.sprints.length} detected`);
|
|
882
|
+
c.sprints.forEach(s => {
|
|
883
|
+
const icon = s.status === 'active' ? '>>' : ' ';
|
|
884
|
+
console.log(` ${icon} [${s.status.toUpperCase().padEnd(8)}] ${s.name} (${s.phase}, ${s.claims_count} claims)`);
|
|
885
|
+
});
|
|
886
|
+
}
|
|
887
|
+
console.log();
|
|
888
|
+
|
|
889
|
+
if (Object.keys(c.coverage).length > 0) {
|
|
890
|
+
console.log('Coverage:');
|
|
891
|
+
Object.entries(c.coverage).forEach(([topic, entry]) => {
|
|
892
|
+
const bar = '\u2588'.repeat(Math.min(entry.claims, 10)) + '\u2591'.repeat(Math.max(0, 10 - entry.claims));
|
|
893
|
+
const constraintDominated = (entry.constraint_count || 0) / entry.claims > 0.5;
|
|
894
|
+
const icon = entry.status === 'strong' ? '\u2713' : entry.status === 'moderate' ? '~' : constraintDominated ? '\u2139' : '\u26A0';
|
|
895
|
+
const srcInfo = entry.source_count !== undefined ? ` [${entry.source_count} src]` : '';
|
|
896
|
+
const typeInfo = entry.type_diversity !== undefined ? ` [${entry.type_diversity}/${VALID_TYPES.length} types]` : '';
|
|
897
|
+
console.log(` ${icon} ${topic.padEnd(20)} ${bar} ${entry.max_evidence} (${entry.claims} claims)${srcInfo}${typeInfo}`);
|
|
898
|
+
});
|
|
899
|
+
console.log();
|
|
900
|
+
}
|
|
901
|
+
|
|
902
|
+
if (c.corroboration && Object.keys(c.corroboration).length > 0) {
|
|
903
|
+
const corroborated = Object.entries(c.corroboration).filter(([, v]) => v > 0);
|
|
904
|
+
if (corroborated.length > 0) {
|
|
905
|
+
console.log('Corroborated claims:');
|
|
906
|
+
corroborated.forEach(([id, count]) => {
|
|
907
|
+
console.log(` ${id}: ${count} supporting witness(es)`);
|
|
908
|
+
});
|
|
909
|
+
console.log();
|
|
910
|
+
}
|
|
911
|
+
}
|
|
912
|
+
|
|
913
|
+
if (c.errors.length > 0) {
|
|
914
|
+
console.log('Errors:');
|
|
915
|
+
c.errors.forEach(e => console.log(` ${e.code}: ${e.message}`));
|
|
916
|
+
console.log();
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
if (c.warnings.length > 0) {
|
|
920
|
+
console.log('Warnings:');
|
|
921
|
+
c.warnings.forEach(w => console.log(` ${w.code}: ${w.message}`));
|
|
922
|
+
console.log();
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
console.log(`Certificate: ${c.compilation_certificate.input_hash.slice(0, 20)}...`);
|
|
926
|
+
|
|
927
|
+
if (jsonFlag) {
|
|
928
|
+
console.log(JSON.stringify(c, null, 2));
|
|
929
|
+
}
|
|
930
|
+
}
|
|
931
|
+
|
|
932
|
+
if (args.includes('--check')) {
|
|
933
|
+
if (compilation.status === 'blocked') {
|
|
934
|
+
if (jsonFlag) {
|
|
935
|
+
console.log(JSON.stringify({ status: 'blocked', errors: compilation.errors }, null, 2));
|
|
936
|
+
} else {
|
|
937
|
+
console.error(`Compilation blocked: ${compilation.errors.length} error(s)`);
|
|
938
|
+
compilation.errors.forEach(e => console.error(` ${e.code}: ${e.message}`));
|
|
939
|
+
}
|
|
940
|
+
process.exit(1);
|
|
941
|
+
} else {
|
|
942
|
+
if (jsonFlag) {
|
|
943
|
+
console.log(JSON.stringify({ status: 'ready' }, null, 2));
|
|
944
|
+
} else {
|
|
945
|
+
console.log('Compilation ready.');
|
|
946
|
+
}
|
|
947
|
+
process.exit(0);
|
|
948
|
+
}
|
|
949
|
+
}
|
|
950
|
+
|
|
951
|
+
if (args.includes('--gate')) {
|
|
952
|
+
// Staleness check: is compilation.json older than claims.json?
|
|
953
|
+
const compilationPath = path.join(TARGET_DIR, config.compiler.compilation);
|
|
954
|
+
const claimsPath = path.join(TARGET_DIR, config.compiler.claims);
|
|
955
|
+
|
|
956
|
+
if (fs.existsSync(compilationPath) && fs.existsSync(claimsPath)) {
|
|
957
|
+
const compilationMtime = fs.statSync(compilationPath).mtimeMs;
|
|
958
|
+
const claimsMtime = fs.statSync(claimsPath).mtimeMs;
|
|
959
|
+
|
|
960
|
+
if (claimsMtime > compilationMtime) {
|
|
961
|
+
console.error('Gate FAILED: compilation.json is stale. Recompiling now...');
|
|
962
|
+
// The compile() call above already refreshed it, so this is informational
|
|
963
|
+
}
|
|
964
|
+
}
|
|
965
|
+
|
|
966
|
+
if (compilation.status === 'blocked') {
|
|
967
|
+
if (jsonFlag) {
|
|
968
|
+
console.log(JSON.stringify({ gate: 'failed', errors: compilation.errors }, null, 2));
|
|
969
|
+
} else {
|
|
970
|
+
console.error(`Gate FAILED: ${compilation.errors.length} blocker(s)`);
|
|
971
|
+
compilation.errors.forEach(e => console.error(` ${e.code}: ${e.message}`));
|
|
972
|
+
}
|
|
973
|
+
process.exit(1);
|
|
974
|
+
}
|
|
975
|
+
|
|
976
|
+
if (jsonFlag) {
|
|
977
|
+
console.log(JSON.stringify({ gate: 'passed', active_claims: compilation.sprint_meta.active_claims, topics: Object.keys(compilation.coverage).length, hash: compilation.claims_hash }, null, 2));
|
|
978
|
+
} else {
|
|
979
|
+
// Print a one-line gate pass for audit
|
|
980
|
+
console.log(`Gate PASSED: ${compilation.sprint_meta.active_claims} claims, ${Object.keys(compilation.coverage).length} topics, hash ${compilation.claims_hash}`);
|
|
981
|
+
}
|
|
982
|
+
process.exit(0);
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
// ─── --next: Data-driven next action recommendation ──────────────────────────
|
|
986
|
+
if (args.includes('--next')) {
|
|
987
|
+
const n = parseInt(args[args.indexOf('--next') + 1]) || 1;
|
|
988
|
+
const actions = computeNextActions(compilation);
|
|
989
|
+
const top = actions.slice(0, n);
|
|
990
|
+
|
|
991
|
+
if (top.length === 0) {
|
|
992
|
+
console.log('\nNo actions recommended — sprint looks complete.');
|
|
993
|
+
console.log('Consider: /brief to compile, /present to share, /calibrate after shipping.');
|
|
994
|
+
} else {
|
|
995
|
+
console.log(`\nNext ${top.length === 1 ? 'action' : top.length + ' actions'} (by Bran priority):`);
|
|
996
|
+
console.log('='.repeat(50));
|
|
997
|
+
top.forEach((a, i) => {
|
|
998
|
+
console.log(`\n${i + 1}. [${a.priority}] ${a.command}`);
|
|
999
|
+
console.log(` ${a.reason}`);
|
|
1000
|
+
console.log(` Impact: ${a.impact}`);
|
|
1001
|
+
});
|
|
1002
|
+
console.log();
|
|
1003
|
+
}
|
|
1004
|
+
// Also output as JSON for programmatic use
|
|
1005
|
+
if (args.includes('--json')) {
|
|
1006
|
+
console.log(JSON.stringify(top, null, 2));
|
|
1007
|
+
}
|
|
1008
|
+
process.exit(0);
|
|
1009
|
+
}
|
|
1010
|
+
|
|
1011
|
+
} // end if (isMain)
|
|
1012
|
+
|
|
1013
|
+
/**
|
|
1014
|
+
* Suggest next actions based on compilation state (gaps, conflicts, weak evidence).
|
|
1015
|
+
* @param {object} comp - A compilation.json object
|
|
1016
|
+
* @returns {Array<{action: string, priority: string, target: string}>} Ordered action suggestions
|
|
1017
|
+
*/
|
|
1018
|
+
function computeNextActions(comp) {
|
|
1019
|
+
const actions = [];
|
|
1020
|
+
const coverage = comp.coverage || {};
|
|
1021
|
+
const conflicts = comp.conflict_graph || { resolved: [], unresolved: [] };
|
|
1022
|
+
const phase = comp.sprint_meta?.phase || 'init';
|
|
1023
|
+
const phases = comp.phase_summary || {};
|
|
1024
|
+
const warnings = comp.warnings || [];
|
|
1025
|
+
const corroboration = comp.corroboration || {};
|
|
1026
|
+
|
|
1027
|
+
// ── Priority 1: Unresolved conflicts (blocks compilation) ──────────────
|
|
1028
|
+
if (conflicts.unresolved.length > 0) {
|
|
1029
|
+
conflicts.unresolved.forEach(c => {
|
|
1030
|
+
actions.push({
|
|
1031
|
+
priority: 'P0-BLOCKER',
|
|
1032
|
+
score: 1000,
|
|
1033
|
+
command: `/resolve ${c.claim_a} ${c.claim_b}`,
|
|
1034
|
+
reason: `Unresolved conflict between ${c.claim_a} and ${c.claim_b} — blocks compilation.`,
|
|
1035
|
+
impact: 'Unblocks compilation. Status changes from BLOCKED to READY.',
|
|
1036
|
+
});
|
|
1037
|
+
});
|
|
1038
|
+
}
|
|
1039
|
+
|
|
1040
|
+
// ── Priority 2: Phase progression ──────────────────────────────────────
|
|
1041
|
+
const phaseFlow = ['init', 'define', 'research', 'prototype', 'evaluate'];
|
|
1042
|
+
const phaseIdx = phaseFlow.indexOf(phase);
|
|
1043
|
+
|
|
1044
|
+
if (phase === 'init') {
|
|
1045
|
+
actions.push({
|
|
1046
|
+
priority: 'P1-PHASE',
|
|
1047
|
+
score: 900,
|
|
1048
|
+
command: '/init',
|
|
1049
|
+
reason: 'Sprint not initialized. No question, constraints, or audience defined.',
|
|
1050
|
+
impact: 'Establishes sprint question and seeds constraint claims.',
|
|
1051
|
+
});
|
|
1052
|
+
}
|
|
1053
|
+
|
|
1054
|
+
// If in define, push toward research
|
|
1055
|
+
if (phase === 'define' && (!phases.research || phases.research.claims === 0)) {
|
|
1056
|
+
// Find topics with only constraint claims
|
|
1057
|
+
const constraintTopics = Object.entries(coverage)
|
|
1058
|
+
.filter(([, e]) => e.constraint_count === e.claims && e.claims > 0)
|
|
1059
|
+
.map(([t]) => t);
|
|
1060
|
+
const researchTarget = Object.entries(coverage)
|
|
1061
|
+
.sort((a, b) => a[1].claims - b[1].claims)
|
|
1062
|
+
.filter(([t]) => !constraintTopics.includes(t) || coverage[t].claims <= 1)
|
|
1063
|
+
.map(([t]) => t)[0];
|
|
1064
|
+
|
|
1065
|
+
actions.push({
|
|
1066
|
+
priority: 'P1-PHASE',
|
|
1067
|
+
score: 850,
|
|
1068
|
+
command: `/research "${researchTarget || 'core topic'}"`,
|
|
1069
|
+
reason: `Phase is define with no research claims yet. Need to advance to research.`,
|
|
1070
|
+
impact: 'Adds web-level evidence. Moves sprint into research phase.',
|
|
1071
|
+
});
|
|
1072
|
+
}
|
|
1073
|
+
|
|
1074
|
+
// If lots of research but no prototypes
|
|
1075
|
+
if (phaseIdx >= 2 && (!phases.prototype || phases.prototype.claims === 0)) {
|
|
1076
|
+
// Find topic with most web claims — best candidate to upgrade
|
|
1077
|
+
const webHeavy = Object.entries(coverage)
|
|
1078
|
+
.filter(([, e]) => e.max_evidence === 'web' && e.claims >= 2)
|
|
1079
|
+
.sort((a, b) => b[1].claims - a[1].claims);
|
|
1080
|
+
|
|
1081
|
+
if (webHeavy.length > 0) {
|
|
1082
|
+
actions.push({
|
|
1083
|
+
priority: 'P1-PHASE',
|
|
1084
|
+
score: 800,
|
|
1085
|
+
command: `/prototype "${webHeavy[0][0]}"`,
|
|
1086
|
+
reason: `Topic "${webHeavy[0][0]}" has ${webHeavy[0][1].claims} claims at web-level. Prototyping upgrades to tested.`,
|
|
1087
|
+
impact: `Evidence upgrade: web → tested for ${webHeavy[0][0]}. Enters prototype phase.`,
|
|
1088
|
+
});
|
|
1089
|
+
}
|
|
1090
|
+
}
|
|
1091
|
+
|
|
1092
|
+
// ── Priority 3: Weak evidence topics ───────────────────────────────────
|
|
1093
|
+
const evidenceRank = { stated: 1, web: 2, documented: 3, tested: 4, production: 5 };
|
|
1094
|
+
const phaseExpectation = { define: 1, research: 2, prototype: 4, evaluate: 4 };
|
|
1095
|
+
const expected = phaseExpectation[phase] || 2;
|
|
1096
|
+
|
|
1097
|
+
Object.entries(coverage).forEach(([topic, entry]) => {
|
|
1098
|
+
const rank = evidenceRank[entry.max_evidence] || 1;
|
|
1099
|
+
const constraintRatio = (entry.constraint_count || 0) / entry.claims;
|
|
1100
|
+
|
|
1101
|
+
// Skip constraint-dominated topics
|
|
1102
|
+
if (constraintRatio > 0.5) return;
|
|
1103
|
+
|
|
1104
|
+
if (rank < expected) {
|
|
1105
|
+
const gap = expected - rank;
|
|
1106
|
+
const score = 600 + (gap * 50) + entry.claims * 5;
|
|
1107
|
+
|
|
1108
|
+
if (rank <= 2 && expected >= 4) {
|
|
1109
|
+
actions.push({
|
|
1110
|
+
priority: 'P2-EVIDENCE',
|
|
1111
|
+
score,
|
|
1112
|
+
command: `/prototype "${topic}"`,
|
|
1113
|
+
reason: `Topic "${topic}" is at ${entry.max_evidence} (${entry.claims} claims) but phase is ${phase}. Needs tested-level evidence.`,
|
|
1114
|
+
impact: `Evidence upgrade: ${entry.max_evidence} → tested. Closes coverage gap.`,
|
|
1115
|
+
});
|
|
1116
|
+
} else if (rank <= 1) {
|
|
1117
|
+
actions.push({
|
|
1118
|
+
priority: 'P2-EVIDENCE',
|
|
1119
|
+
score,
|
|
1120
|
+
command: `/research "${topic}"`,
|
|
1121
|
+
reason: `Topic "${topic}" is at ${entry.max_evidence} (${entry.claims} claims). Needs deeper research.`,
|
|
1122
|
+
impact: `Evidence upgrade: ${entry.max_evidence} → web/documented.`,
|
|
1123
|
+
});
|
|
1124
|
+
}
|
|
1125
|
+
}
|
|
1126
|
+
});
|
|
1127
|
+
|
|
1128
|
+
// ── Priority 4: Type monoculture ───────────────────────────────────────
|
|
1129
|
+
Object.entries(coverage).forEach(([topic, entry]) => {
|
|
1130
|
+
const constraintRatio = (entry.constraint_count || 0) / entry.claims;
|
|
1131
|
+
if (constraintRatio > 0.5) return;
|
|
1132
|
+
|
|
1133
|
+
if ((entry.type_diversity || 0) < 2 && entry.claims >= 2) {
|
|
1134
|
+
const missing = (entry.missing_types || []).slice(0, 3).join(', ');
|
|
1135
|
+
actions.push({
|
|
1136
|
+
priority: 'P3-DIVERSITY',
|
|
1137
|
+
score: 400 + entry.claims * 3,
|
|
1138
|
+
command: `/challenge ${entry.claim_ids?.[0] || topic}`,
|
|
1139
|
+
reason: `Topic "${topic}" has ${entry.claims} claims but only ${entry.type_diversity} type(s). Missing: ${missing}.`,
|
|
1140
|
+
impact: 'Adds risk/recommendation claims. Improves type diversity.',
|
|
1141
|
+
});
|
|
1142
|
+
}
|
|
1143
|
+
|
|
1144
|
+
// Missing risk claims specifically
|
|
1145
|
+
if (entry.claims >= 3 && !(entry.types || []).includes('risk')) {
|
|
1146
|
+
actions.push({
|
|
1147
|
+
priority: 'P3-DIVERSITY',
|
|
1148
|
+
score: 380,
|
|
1149
|
+
command: `/challenge ${entry.claim_ids?.[0] || topic}`,
|
|
1150
|
+
reason: `Topic "${topic}" has ${entry.claims} claims but zero risks. What could go wrong?`,
|
|
1151
|
+
impact: 'Adds adversarial risk claims. Stress-tests assumptions.',
|
|
1152
|
+
});
|
|
1153
|
+
}
|
|
1154
|
+
});
|
|
1155
|
+
|
|
1156
|
+
// ── Priority 5: Echo chambers ──────────────────────────────────────────
|
|
1157
|
+
Object.entries(coverage).forEach(([topic, entry]) => {
|
|
1158
|
+
if (entry.claims >= 3 && (entry.source_count || 1) === 1) {
|
|
1159
|
+
actions.push({
|
|
1160
|
+
priority: 'P4-CORROBORATION',
|
|
1161
|
+
score: 300 + entry.claims * 2,
|
|
1162
|
+
command: `/witness ${entry.claim_ids?.[0] || ''} <url>`,
|
|
1163
|
+
reason: `Topic "${topic}" has ${entry.claims} claims all from "${(entry.source_origins || ['unknown'])[0]}". Single source.`,
|
|
1164
|
+
impact: 'Adds external corroboration. Breaks echo chamber.',
|
|
1165
|
+
});
|
|
1166
|
+
}
|
|
1167
|
+
});
|
|
1168
|
+
|
|
1169
|
+
// ── Priority 6: Zero corroboration on high-value claims ────────────────
|
|
1170
|
+
const uncorroborated = Object.entries(corroboration)
|
|
1171
|
+
.filter(([, count]) => count === 0)
|
|
1172
|
+
.map(([id]) => id);
|
|
1173
|
+
|
|
1174
|
+
// Find tested claims with zero corroboration — highest value to witness
|
|
1175
|
+
if (uncorroborated.length > 0) {
|
|
1176
|
+
const testedUncorroborated = (comp.resolved_claims || [])
|
|
1177
|
+
.filter(c => c.evidence === 'tested' && uncorroborated.includes(c.id))
|
|
1178
|
+
.slice(0, 1);
|
|
1179
|
+
|
|
1180
|
+
if (testedUncorroborated.length > 0) {
|
|
1181
|
+
actions.push({
|
|
1182
|
+
priority: 'P4-CORROBORATION',
|
|
1183
|
+
score: 250,
|
|
1184
|
+
command: `/witness ${testedUncorroborated[0].id} <url>`,
|
|
1185
|
+
reason: `Tested claim "${testedUncorroborated[0].id}" has zero external corroboration.`,
|
|
1186
|
+
impact: 'Adds external validation to highest-evidence claim.',
|
|
1187
|
+
});
|
|
1188
|
+
}
|
|
1189
|
+
}
|
|
1190
|
+
|
|
1191
|
+
// ── Priority 7: Sprint completion suggestions ──────────────────────────
|
|
1192
|
+
const hasEvaluate = phases.evaluate && phases.evaluate.claims > 0;
|
|
1193
|
+
const allTopicsTested = Object.entries(coverage)
|
|
1194
|
+
.filter(([, e]) => (e.constraint_count || 0) / e.claims <= 0.5)
|
|
1195
|
+
.every(([, e]) => evidenceRank[e.max_evidence] >= 4);
|
|
1196
|
+
|
|
1197
|
+
if (hasEvaluate && allTopicsTested && conflicts.unresolved.length === 0) {
|
|
1198
|
+
actions.push({
|
|
1199
|
+
priority: 'P5-SHIP',
|
|
1200
|
+
score: 100,
|
|
1201
|
+
command: '/brief',
|
|
1202
|
+
reason: 'All non-constraint topics at tested evidence, evaluate phase complete, 0 conflicts.',
|
|
1203
|
+
impact: 'Compiles the decision document. Sprint ready to ship.',
|
|
1204
|
+
});
|
|
1205
|
+
} else if (!hasEvaluate && phaseIdx >= 3) {
|
|
1206
|
+
actions.push({
|
|
1207
|
+
priority: 'P1-PHASE',
|
|
1208
|
+
score: 750,
|
|
1209
|
+
command: '/evaluate',
|
|
1210
|
+
reason: `Phase is ${phase} but no evaluation claims exist. Time to test claims against reality.`,
|
|
1211
|
+
impact: 'Validates claims, resolves conflicts, produces comparison dashboard.',
|
|
1212
|
+
});
|
|
1213
|
+
}
|
|
1214
|
+
|
|
1215
|
+
// Sort by score descending
|
|
1216
|
+
actions.sort((a, b) => b.score - a.score);
|
|
1217
|
+
|
|
1218
|
+
// Deduplicate by command
|
|
1219
|
+
const seen = new Set();
|
|
1220
|
+
return actions.filter(a => {
|
|
1221
|
+
const key = a.command.split(' ').slice(0, 2).join(' ');
|
|
1222
|
+
if (seen.has(key)) return false;
|
|
1223
|
+
seen.add(key);
|
|
1224
|
+
return true;
|
|
1225
|
+
});
|
|
1226
|
+
}
|
|
1227
|
+
|
|
1228
|
+
// Export for use as a library
|
|
1229
|
+
export { compile, diffCompilations, computeNextActions, generateManifest, loadConfig, detectSprints, EVIDENCE_TIERS, VALID_TYPES };
|