@grainulation/harvest 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +102 -0
- package/bin/harvest.js +284 -0
- package/lib/analyzer.js +88 -0
- package/lib/calibration.js +153 -0
- package/lib/dashboard.js +126 -0
- package/lib/decay.js +124 -0
- package/lib/farmer.js +107 -0
- package/lib/patterns.js +199 -0
- package/lib/report.js +125 -0
- package/lib/server.js +494 -0
- package/lib/templates.js +80 -0
- package/lib/velocity.js +177 -0
- package/package.json +51 -0
- package/public/index.html +982 -0
- package/templates/dashboard.html +1230 -0
- package/templates/retrospective.html +315 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 grainulation contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
# @grainulation/harvest
|
|
2
|
+
|
|
3
|
+
**Are your decisions getting better?**
|
|
4
|
+
|
|
5
|
+
Harvest is the analytics and retrospective layer for research sprints. It looks across sprints to find patterns, score predictions, and surface knowledge that's gone stale.
|
|
6
|
+
|
|
7
|
+
Learn from every decision you've made.
|
|
8
|
+
|
|
9
|
+
## What it does
|
|
10
|
+
|
|
11
|
+
- **Cross-sprint analysis** -- claim type distributions, evidence quality, recurring themes
|
|
12
|
+
- **Prediction calibration** -- score past estimates against actual outcomes
|
|
13
|
+
- **Decision patterns** -- what research approaches lead to better results?
|
|
14
|
+
- **Knowledge decay** -- which old claims need refreshing before they mislead you?
|
|
15
|
+
- **Sprint velocity** -- how long do sprints take, where do they stall?
|
|
16
|
+
- **Retrospective reports** -- dark-themed HTML reports for the team
|
|
17
|
+
|
|
18
|
+
## Install
|
|
19
|
+
|
|
20
|
+
```sh
|
|
21
|
+
npm install @grainulation/harvest
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
Or run directly:
|
|
25
|
+
|
|
26
|
+
```sh
|
|
27
|
+
npx @grainulation/harvest analyze ./sprints/
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
## Usage
|
|
31
|
+
|
|
32
|
+
```sh
|
|
33
|
+
# Cross-sprint claim analysis
|
|
34
|
+
harvest analyze ./sprints/
|
|
35
|
+
|
|
36
|
+
# Score predictions against outcomes
|
|
37
|
+
harvest calibrate ./sprints/
|
|
38
|
+
|
|
39
|
+
# Detect decision patterns and anti-patterns
|
|
40
|
+
harvest patterns ./sprints/
|
|
41
|
+
|
|
42
|
+
# Find stale claims that need refreshing
|
|
43
|
+
harvest decay ./sprints/ --days 60
|
|
44
|
+
|
|
45
|
+
# Sprint timing and phase analysis
|
|
46
|
+
harvest velocity ./sprints/
|
|
47
|
+
|
|
48
|
+
# Generate a full retrospective HTML report
|
|
49
|
+
harvest report ./sprints/ -o retrospective.html
|
|
50
|
+
|
|
51
|
+
# All analyses in one pass
|
|
52
|
+
harvest trends ./sprints/ --json
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Data format
|
|
56
|
+
|
|
57
|
+
Harvest reads standard wheat sprint data:
|
|
58
|
+
|
|
59
|
+
- `claims.json` -- array of typed claims with `id`, `type`, `evidence`, `status`, `text`, `created`, etc.
|
|
60
|
+
- `compilation.json` -- compiled sprint state (optional, enriches analysis)
|
|
61
|
+
- Git history on `claims.json` -- used for velocity and timing analysis
|
|
62
|
+
|
|
63
|
+
Point harvest at a directory containing sprint subdirectories, or at a single sprint directory:
|
|
64
|
+
|
|
65
|
+
```
|
|
66
|
+
sprints/
|
|
67
|
+
sprint-alpha/
|
|
68
|
+
claims.json
|
|
69
|
+
compilation.json
|
|
70
|
+
sprint-beta/
|
|
71
|
+
claims.json
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
## Design
|
|
75
|
+
|
|
76
|
+
- **Zero dependencies** -- Node built-in modules only (fs, path, child_process)
|
|
77
|
+
- **Reads, never writes** -- harvest is a pure analysis tool; it won't modify your sprint data
|
|
78
|
+
- **Git-aware** -- uses git log timestamps for velocity analysis when available
|
|
79
|
+
- **Composable** -- each module (analyzer, calibration, patterns, decay, velocity) works independently
|
|
80
|
+
|
|
81
|
+
## Claim types it understands
|
|
82
|
+
|
|
83
|
+
| Type | What it means |
|
|
84
|
+
|---|---|
|
|
85
|
+
| `constraint` | Hard requirements, non-negotiable |
|
|
86
|
+
| `factual` | Verifiable statements |
|
|
87
|
+
| `estimate` | Predictions, projections, ranges |
|
|
88
|
+
| `risk` | Potential failure modes |
|
|
89
|
+
| `recommendation` | Proposed courses of action |
|
|
90
|
+
| `feedback` | Stakeholder input |
|
|
91
|
+
|
|
92
|
+
## Evidence tiers (lowest to highest)
|
|
93
|
+
|
|
94
|
+
1. `stated` -- someone said it
|
|
95
|
+
2. `web` -- found online
|
|
96
|
+
3. `documented` -- in source code or official docs
|
|
97
|
+
4. `tested` -- verified via prototype or benchmark
|
|
98
|
+
5. `production` -- measured from live systems
|
|
99
|
+
|
|
100
|
+
## License
|
|
101
|
+
|
|
102
|
+
MIT
|
package/bin/harvest.js
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
'use strict';
|
|
4
|
+
|
|
5
|
+
const path = require('node:path');
|
|
6
|
+
const fs = require('node:fs');
|
|
7
|
+
|
|
8
|
+
const { analyze } = require('../lib/analyzer.js');
|
|
9
|
+
const { calibrate } = require('../lib/calibration.js');
|
|
10
|
+
const { detectPatterns } = require('../lib/patterns.js');
|
|
11
|
+
const { checkDecay } = require('../lib/decay.js');
|
|
12
|
+
const { measureVelocity } = require('../lib/velocity.js');
|
|
13
|
+
const { generateReport } = require('../lib/report.js');
|
|
14
|
+
const { connect: farmerConnect } = require('../lib/farmer.js');
|
|
15
|
+
|
|
16
|
+
const verbose = process.argv.includes('--verbose') || process.argv.includes('-v');
|
|
17
|
+
function vlog(...a) {
|
|
18
|
+
if (!verbose) return;
|
|
19
|
+
const ts = new Date().toISOString();
|
|
20
|
+
process.stderr.write(`[${ts}] harvest: ${a.join(' ')}\n`);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
const USAGE = `
|
|
24
|
+
harvest -- learn from every decision you've made
|
|
25
|
+
|
|
26
|
+
Usage:
|
|
27
|
+
harvest analyze <sprints-dir> Cross-sprint claim analysis
|
|
28
|
+
harvest calibrate <sprints-dir> Score predictions against outcomes
|
|
29
|
+
harvest patterns <sprints-dir> Detect decision patterns
|
|
30
|
+
harvest decay <sprints-dir> Find claims that need refreshing
|
|
31
|
+
harvest velocity <sprints-dir> Sprint timing and phase analysis
|
|
32
|
+
harvest report <sprints-dir> [-o <output>] Generate retrospective HTML
|
|
33
|
+
harvest trends <sprints-dir> All analyses in one pass
|
|
34
|
+
harvest serve [--port 9096] [--root <sprints-dir>] Start the dashboard UI
|
|
35
|
+
harvest connect farmer [--url <url>] Configure farmer integration
|
|
36
|
+
|
|
37
|
+
Options:
|
|
38
|
+
-o, --output <path> Output file path (default: stdout or ./retrospective.html)
|
|
39
|
+
-h, --help Show this help
|
|
40
|
+
--json Output as JSON instead of text
|
|
41
|
+
--days <n> Decay threshold in days (default: 90)
|
|
42
|
+
`.trim();
|
|
43
|
+
|
|
44
|
+
function parseArgs(argv) {
|
|
45
|
+
const args = argv.slice(2);
|
|
46
|
+
const parsed = { command: null, dir: null, output: null, json: false, days: 90 };
|
|
47
|
+
|
|
48
|
+
if (args.length === 0 || args.includes('-h') || args.includes('--help')) {
|
|
49
|
+
console.log(USAGE);
|
|
50
|
+
process.exit(0);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
parsed.command = args[0];
|
|
54
|
+
parsed.dir = (args[1] && !args[1].startsWith('-')) ? path.resolve(args[1]) : null;
|
|
55
|
+
|
|
56
|
+
for (let i = 2; i < args.length; i++) {
|
|
57
|
+
if ((args[i] === '-o' || args[i] === '--output') && args[i + 1]) {
|
|
58
|
+
parsed.output = path.resolve(args[++i]);
|
|
59
|
+
} else if (args[i] === '--json') {
|
|
60
|
+
parsed.json = true;
|
|
61
|
+
} else if (args[i] === '--days' && args[i + 1]) {
|
|
62
|
+
parsed.days = parseInt(args[++i], 10);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
return parsed;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
function loadSprintData(dir) {
|
|
70
|
+
if (!dir || !fs.existsSync(dir)) {
|
|
71
|
+
console.error(`harvest: directory not found: ${dir}`);
|
|
72
|
+
process.exit(1);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
const sprints = [];
|
|
76
|
+
|
|
77
|
+
// Include root if it has claims.json
|
|
78
|
+
const directClaims = path.join(dir, 'claims.json');
|
|
79
|
+
if (fs.existsSync(directClaims)) {
|
|
80
|
+
sprints.push(loadSingleSprint(dir));
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// Scan subdirectories (two levels deep to catch sprints/<name>/claims.json)
|
|
84
|
+
try {
|
|
85
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
86
|
+
for (const entry of entries) {
|
|
87
|
+
if (!entry.isDirectory()) continue;
|
|
88
|
+
if (entry.name.startsWith('.')) continue;
|
|
89
|
+
const childDir = path.join(dir, entry.name);
|
|
90
|
+
const childClaims = path.join(childDir, 'claims.json');
|
|
91
|
+
if (fs.existsSync(childClaims)) {
|
|
92
|
+
sprints.push(loadSingleSprint(childDir));
|
|
93
|
+
}
|
|
94
|
+
// Second level
|
|
95
|
+
try {
|
|
96
|
+
const subEntries = fs.readdirSync(childDir, { withFileTypes: true });
|
|
97
|
+
for (const sub of subEntries) {
|
|
98
|
+
if (!sub.isDirectory()) continue;
|
|
99
|
+
if (sub.name.startsWith('.')) continue;
|
|
100
|
+
const subDir = path.join(childDir, sub.name);
|
|
101
|
+
const subClaims = path.join(subDir, 'claims.json');
|
|
102
|
+
if (fs.existsSync(subClaims)) {
|
|
103
|
+
sprints.push(loadSingleSprint(subDir));
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
} catch { /* skip */ }
|
|
107
|
+
}
|
|
108
|
+
} catch { /* skip */ }
|
|
109
|
+
|
|
110
|
+
if (sprints.length === 0) {
|
|
111
|
+
console.error(`harvest: no sprint data found in ${dir}`);
|
|
112
|
+
console.error('Expected claims.json in the directory or its subdirectories.');
|
|
113
|
+
process.exit(1);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
return sprints;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
function loadSingleSprint(dir) {
|
|
120
|
+
const sprint = {
|
|
121
|
+
name: path.basename(dir),
|
|
122
|
+
dir,
|
|
123
|
+
claims: [],
|
|
124
|
+
compilation: null,
|
|
125
|
+
gitLog: null,
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
const claimsPath = path.join(dir, 'claims.json');
|
|
129
|
+
try {
|
|
130
|
+
sprint.claims = JSON.parse(fs.readFileSync(claimsPath, 'utf8'));
|
|
131
|
+
if (!Array.isArray(sprint.claims)) {
|
|
132
|
+
// Handle { claims: [...] } wrapper
|
|
133
|
+
sprint.claims = sprint.claims.claims || [];
|
|
134
|
+
}
|
|
135
|
+
} catch (e) {
|
|
136
|
+
console.error(`harvest: could not parse ${claimsPath}: ${e.message}`);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
const compilationPath = path.join(dir, 'compilation.json');
|
|
140
|
+
if (fs.existsSync(compilationPath)) {
|
|
141
|
+
try {
|
|
142
|
+
sprint.compilation = JSON.parse(fs.readFileSync(compilationPath, 'utf8'));
|
|
143
|
+
} catch (e) {
|
|
144
|
+
// skip
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// Try to read git log for the sprint directory
|
|
149
|
+
try {
|
|
150
|
+
const { execSync } = require('node:child_process');
|
|
151
|
+
sprint.gitLog = execSync(
|
|
152
|
+
`git log --oneline --format="%H|%ai|%s" -- claims.json`,
|
|
153
|
+
{ cwd: dir, encoding: 'utf8', timeout: 5000, stdio: ['pipe', 'pipe', 'pipe'] }
|
|
154
|
+
).trim().split('\n').filter(Boolean).map(line => {
|
|
155
|
+
const [hash, date, ...msg] = line.split('|');
|
|
156
|
+
return { hash, date, message: msg.join('|') };
|
|
157
|
+
});
|
|
158
|
+
} catch (e) {
|
|
159
|
+
sprint.gitLog = [];
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
return sprint;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
function output(result, opts) {
|
|
166
|
+
if (opts.json) {
|
|
167
|
+
console.log(JSON.stringify(result, null, 2));
|
|
168
|
+
} else if (typeof result === 'string') {
|
|
169
|
+
console.log(result);
|
|
170
|
+
} else {
|
|
171
|
+
console.log(JSON.stringify(result, null, 2));
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
async function main() {
|
|
176
|
+
const opts = parseArgs(process.argv);
|
|
177
|
+
vlog('startup', `command=${opts.command || '(none)'}`, `dir=${opts.dir || 'none'}`);
|
|
178
|
+
|
|
179
|
+
const commands = {
|
|
180
|
+
analyze() {
|
|
181
|
+
const sprints = loadSprintData(opts.dir);
|
|
182
|
+
const result = analyze(sprints);
|
|
183
|
+
output(result, opts);
|
|
184
|
+
},
|
|
185
|
+
calibrate() {
|
|
186
|
+
const sprints = loadSprintData(opts.dir);
|
|
187
|
+
const result = calibrate(sprints);
|
|
188
|
+
output(result, opts);
|
|
189
|
+
},
|
|
190
|
+
patterns() {
|
|
191
|
+
const sprints = loadSprintData(opts.dir);
|
|
192
|
+
const result = detectPatterns(sprints);
|
|
193
|
+
output(result, opts);
|
|
194
|
+
},
|
|
195
|
+
decay() {
|
|
196
|
+
const sprints = loadSprintData(opts.dir);
|
|
197
|
+
const result = checkDecay(sprints, { thresholdDays: opts.days });
|
|
198
|
+
output(result, opts);
|
|
199
|
+
},
|
|
200
|
+
velocity() {
|
|
201
|
+
const sprints = loadSprintData(opts.dir);
|
|
202
|
+
const result = measureVelocity(sprints);
|
|
203
|
+
output(result, opts);
|
|
204
|
+
},
|
|
205
|
+
report() {
|
|
206
|
+
const sprints = loadSprintData(opts.dir);
|
|
207
|
+
const html = generateReport(sprints, {
|
|
208
|
+
analyzeFn: analyze,
|
|
209
|
+
calibrateFn: calibrate,
|
|
210
|
+
patternsFn: detectPatterns,
|
|
211
|
+
decayFn: checkDecay,
|
|
212
|
+
velocityFn: measureVelocity,
|
|
213
|
+
});
|
|
214
|
+
const outPath = opts.output || path.join(process.cwd(), 'retrospective.html');
|
|
215
|
+
fs.writeFileSync(outPath, html, 'utf8');
|
|
216
|
+
console.log(`Retrospective written to ${outPath}`);
|
|
217
|
+
},
|
|
218
|
+
trends() {
|
|
219
|
+
const sprints = loadSprintData(opts.dir);
|
|
220
|
+
const result = {
|
|
221
|
+
analysis: analyze(sprints),
|
|
222
|
+
calibration: calibrate(sprints),
|
|
223
|
+
patterns: detectPatterns(sprints),
|
|
224
|
+
decay: checkDecay(sprints, { thresholdDays: opts.days }),
|
|
225
|
+
velocity: measureVelocity(sprints),
|
|
226
|
+
};
|
|
227
|
+
output(result, opts);
|
|
228
|
+
},
|
|
229
|
+
};
|
|
230
|
+
|
|
231
|
+
if (opts.command === 'help') {
|
|
232
|
+
console.log(USAGE);
|
|
233
|
+
process.exit(0);
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
if (opts.command === 'connect') {
|
|
237
|
+
// Forward remaining args to farmer connect handler
|
|
238
|
+
const connectArgs = process.argv.slice(process.argv.indexOf('connect') + 1);
|
|
239
|
+
await farmerConnect(opts.dir || process.cwd(), connectArgs);
|
|
240
|
+
return;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
if (opts.command === 'serve') {
|
|
244
|
+
// Launch the ESM server module
|
|
245
|
+
const { execFile } = require('node:child_process');
|
|
246
|
+
const serverPath = path.join(__dirname, '..', 'lib', 'server.js');
|
|
247
|
+
const serverArgs = [];
|
|
248
|
+
// Forward --port and --root
|
|
249
|
+
const portIdx = process.argv.indexOf('--port');
|
|
250
|
+
if (portIdx !== -1 && process.argv[portIdx + 1]) {
|
|
251
|
+
serverArgs.push('--port', process.argv[portIdx + 1]);
|
|
252
|
+
}
|
|
253
|
+
const rootIdx = process.argv.indexOf('--root');
|
|
254
|
+
if (rootIdx !== -1 && process.argv[rootIdx + 1]) {
|
|
255
|
+
serverArgs.push('--root', process.argv[rootIdx + 1]);
|
|
256
|
+
} else if (opts.dir) {
|
|
257
|
+
serverArgs.push('--root', opts.dir);
|
|
258
|
+
}
|
|
259
|
+
const child = execFile('node', [serverPath, ...serverArgs], {
|
|
260
|
+
stdio: 'inherit',
|
|
261
|
+
env: process.env,
|
|
262
|
+
});
|
|
263
|
+
child.stdout && child.stdout.pipe(process.stdout);
|
|
264
|
+
child.stderr && child.stderr.pipe(process.stderr);
|
|
265
|
+
child.on('error', (err) => {
|
|
266
|
+
console.error(`harvest: error starting server: ${err.message}`);
|
|
267
|
+
process.exit(1);
|
|
268
|
+
});
|
|
269
|
+
child.on('exit', (code) => process.exit(code || 0));
|
|
270
|
+
process.on('SIGTERM', () => child.kill('SIGTERM'));
|
|
271
|
+
process.on('SIGINT', () => child.kill('SIGINT'));
|
|
272
|
+
return;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
if (!commands[opts.command]) {
|
|
276
|
+
console.error(`harvest: unknown command: ${opts.command}`);
|
|
277
|
+
console.error(`Run "harvest --help" for usage.`);
|
|
278
|
+
process.exit(1);
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
commands[opts.command]();
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
main();
|
package/lib/analyzer.js
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Cross-sprint claim analysis.
|
|
5
|
+
*
|
|
6
|
+
* Looks across multiple sprints to find:
|
|
7
|
+
* - Claim type distribution (what kinds of findings dominate?)
|
|
8
|
+
* - Evidence tier distribution (how well-supported are claims?)
|
|
9
|
+
* - Cross-sprint themes (recurring topics or concerns)
|
|
10
|
+
* - Claim density per sprint (productivity signal)
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
function analyze(sprints) {
|
|
14
|
+
const allClaims = sprints.flatMap(s => s.claims.map(c => ({ ...c, _sprint: s.name })));
|
|
15
|
+
|
|
16
|
+
const typeDistribution = countBy(allClaims, 'type');
|
|
17
|
+
const evidenceDistribution = countBy(allClaims, 'evidence');
|
|
18
|
+
const statusDistribution = countBy(allClaims, 'status');
|
|
19
|
+
|
|
20
|
+
// Per-sprint density
|
|
21
|
+
const perSprint = sprints.map(s => ({
|
|
22
|
+
name: s.name,
|
|
23
|
+
claimCount: s.claims.length,
|
|
24
|
+
types: countBy(s.claims, 'type'),
|
|
25
|
+
evidence: countBy(s.claims, 'evidence'),
|
|
26
|
+
statuses: countBy(s.claims, 'status'),
|
|
27
|
+
}));
|
|
28
|
+
|
|
29
|
+
// Find cross-sprint themes by looking at tags
|
|
30
|
+
const tagFrequency = {};
|
|
31
|
+
for (const claim of allClaims) {
|
|
32
|
+
const tags = claim.tags || [];
|
|
33
|
+
for (const tag of tags) {
|
|
34
|
+
tagFrequency[tag] = (tagFrequency[tag] || 0) + 1;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// Identify weak spots: claims with low evidence tiers
|
|
39
|
+
const weakClaims = allClaims.filter(c =>
|
|
40
|
+
c.evidence === 'stated' || c.evidence === 'web'
|
|
41
|
+
);
|
|
42
|
+
|
|
43
|
+
// Type monoculture detection: sprints dominated by a single claim type
|
|
44
|
+
const monocultures = perSprint.filter(s => {
|
|
45
|
+
const types = Object.entries(s.types);
|
|
46
|
+
if (types.length === 0) return false;
|
|
47
|
+
const max = Math.max(...types.map(([, v]) => v));
|
|
48
|
+
return max / s.claimCount > 0.7 && s.claimCount > 3;
|
|
49
|
+
}).map(s => ({
|
|
50
|
+
sprint: s.name,
|
|
51
|
+
dominantType: Object.entries(s.types).sort((a, b) => b[1] - a[1])[0][0],
|
|
52
|
+
ratio: Math.round(Math.max(...Object.values(s.types)) / s.claimCount * 100),
|
|
53
|
+
}));
|
|
54
|
+
|
|
55
|
+
return {
|
|
56
|
+
summary: {
|
|
57
|
+
totalSprints: sprints.length,
|
|
58
|
+
totalClaims: allClaims.length,
|
|
59
|
+
averageClaimsPerSprint: sprints.length > 0
|
|
60
|
+
? Math.round(allClaims.length / sprints.length * 10) / 10
|
|
61
|
+
: 0,
|
|
62
|
+
},
|
|
63
|
+
typeDistribution,
|
|
64
|
+
evidenceDistribution,
|
|
65
|
+
statusDistribution,
|
|
66
|
+
tagFrequency,
|
|
67
|
+
weakClaims: weakClaims.map(c => ({
|
|
68
|
+
id: c.id,
|
|
69
|
+
sprint: c._sprint,
|
|
70
|
+
type: c.type,
|
|
71
|
+
evidence: c.evidence,
|
|
72
|
+
text: c.text || c.claim || c.description,
|
|
73
|
+
})),
|
|
74
|
+
monocultures,
|
|
75
|
+
perSprint,
|
|
76
|
+
};
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
function countBy(items, key) {
|
|
80
|
+
const counts = {};
|
|
81
|
+
for (const item of items) {
|
|
82
|
+
const val = item[key] || 'unknown';
|
|
83
|
+
counts[val] = (counts[val] || 0) + 1;
|
|
84
|
+
}
|
|
85
|
+
return counts;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
module.exports = { analyze };
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Prediction vs outcome scoring.
|
|
5
|
+
*
|
|
6
|
+
* Compares estimate claims against calibrate claims (from wheat's /calibrate).
|
|
7
|
+
* Answers: "How often were our estimates right?"
|
|
8
|
+
*
|
|
9
|
+
* Scoring:
|
|
10
|
+
* - Each estimate claim gets matched to calibration claims by ID reference or tag overlap
|
|
11
|
+
* - Calibration claims contain actual outcomes and a confidence delta
|
|
12
|
+
* - We compute accuracy, overconfidence, and underconfidence rates
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
const EVIDENCE_RANK = {
|
|
16
|
+
stated: 1,
|
|
17
|
+
web: 2,
|
|
18
|
+
documented: 3,
|
|
19
|
+
tested: 4,
|
|
20
|
+
production: 5,
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
function calibrate(sprints) {
|
|
24
|
+
const allClaims = sprints.flatMap(s => s.claims.map(c => ({ ...c, _sprint: s.name })));
|
|
25
|
+
|
|
26
|
+
const estimates = allClaims.filter(c => c.type === 'estimate');
|
|
27
|
+
const calibrations = allClaims.filter(c =>
|
|
28
|
+
c.id && (c.id.startsWith('cal') || c.type === 'calibration')
|
|
29
|
+
);
|
|
30
|
+
|
|
31
|
+
// Match calibrations to estimates
|
|
32
|
+
const scored = [];
|
|
33
|
+
for (const cal of calibrations) {
|
|
34
|
+
const refs = cal.references || cal.refs || [];
|
|
35
|
+
const matchedEstimates = estimates.filter(e =>
|
|
36
|
+
refs.includes(e.id) ||
|
|
37
|
+
(cal.tags && e.tags && cal.tags.some(t => e.tags.includes(t)))
|
|
38
|
+
);
|
|
39
|
+
|
|
40
|
+
for (const est of matchedEstimates) {
|
|
41
|
+
scored.push({
|
|
42
|
+
estimateId: est.id,
|
|
43
|
+
calibrationId: cal.id,
|
|
44
|
+
sprint: est._sprint,
|
|
45
|
+
estimateText: est.text || est.claim || est.description,
|
|
46
|
+
outcomeText: cal.text || cal.claim || cal.description,
|
|
47
|
+
estimateConfidence: est.confidence || null,
|
|
48
|
+
actualOutcome: cal.outcome || cal.actual || null,
|
|
49
|
+
accurate: cal.accurate ?? null,
|
|
50
|
+
delta: cal.delta ?? null,
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// Unmatched estimates -- predictions with no follow-up
|
|
56
|
+
const scoredEstimateIds = new Set(scored.map(s => s.estimateId));
|
|
57
|
+
const unmatched = estimates.filter(e => !scoredEstimateIds.has(e.id));
|
|
58
|
+
|
|
59
|
+
// Compute aggregate stats
|
|
60
|
+
const accurateCount = scored.filter(s => s.accurate === true).length;
|
|
61
|
+
const inaccurateCount = scored.filter(s => s.accurate === false).length;
|
|
62
|
+
const unchecked = scored.filter(s => s.accurate === null).length;
|
|
63
|
+
|
|
64
|
+
const totalScored = accurateCount + inaccurateCount;
|
|
65
|
+
const accuracyRate = totalScored > 0
|
|
66
|
+
? Math.round(accurateCount / totalScored * 100)
|
|
67
|
+
: null;
|
|
68
|
+
|
|
69
|
+
// Confidence calibration: group by confidence bucket
|
|
70
|
+
const buckets = { high: { total: 0, accurate: 0 }, medium: { total: 0, accurate: 0 }, low: { total: 0, accurate: 0 } };
|
|
71
|
+
for (const s of scored) {
|
|
72
|
+
const conf = s.estimateConfidence;
|
|
73
|
+
let bucket = 'medium';
|
|
74
|
+
if (typeof conf === 'number') {
|
|
75
|
+
bucket = conf >= 0.7 ? 'high' : conf >= 0.4 ? 'medium' : 'low';
|
|
76
|
+
} else if (typeof conf === 'string') {
|
|
77
|
+
bucket = conf.toLowerCase();
|
|
78
|
+
}
|
|
79
|
+
if (buckets[bucket]) {
|
|
80
|
+
buckets[bucket].total++;
|
|
81
|
+
if (s.accurate === true) buckets[bucket].accurate++;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
const calibrationScore = Object.fromEntries(
|
|
86
|
+
Object.entries(buckets)
|
|
87
|
+
.filter(([, v]) => v.total > 0)
|
|
88
|
+
.map(([k, v]) => [k, Math.round(v.accurate / v.total * 100)])
|
|
89
|
+
);
|
|
90
|
+
|
|
91
|
+
return {
|
|
92
|
+
summary: {
|
|
93
|
+
totalEstimates: estimates.length,
|
|
94
|
+
totalCalibrations: calibrations.length,
|
|
95
|
+
matched: scored.length,
|
|
96
|
+
unmatched: unmatched.length,
|
|
97
|
+
accuracyRate,
|
|
98
|
+
},
|
|
99
|
+
calibrationByConfidence: calibrationScore,
|
|
100
|
+
scored: scored.map(s => ({
|
|
101
|
+
estimateId: s.estimateId,
|
|
102
|
+
calibrationId: s.calibrationId,
|
|
103
|
+
sprint: s.sprint,
|
|
104
|
+
accurate: s.accurate,
|
|
105
|
+
delta: s.delta,
|
|
106
|
+
})),
|
|
107
|
+
unmatchedEstimates: unmatched.map(e => ({
|
|
108
|
+
id: e.id,
|
|
109
|
+
sprint: e._sprint,
|
|
110
|
+
text: e.text || e.claim || e.description,
|
|
111
|
+
age: e.created ? daysSince(e.created) : null,
|
|
112
|
+
})),
|
|
113
|
+
insight: generateInsight(accuracyRate, calibrationScore, unmatched.length, estimates.length),
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
function generateInsight(accuracy, byConfidence, unmatchedCount, totalEstimates) {
|
|
118
|
+
const parts = [];
|
|
119
|
+
|
|
120
|
+
if (accuracy !== null) {
|
|
121
|
+
if (accuracy >= 80) {
|
|
122
|
+
parts.push(`Strong calibration: ${accuracy}% of scored predictions were accurate.`);
|
|
123
|
+
} else if (accuracy >= 50) {
|
|
124
|
+
parts.push(`Moderate calibration: ${accuracy}% accuracy. Room for improvement.`);
|
|
125
|
+
} else {
|
|
126
|
+
parts.push(`Weak calibration: only ${accuracy}% accuracy. Estimates may need more evidence before committing.`);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
if (byConfidence.high !== undefined && byConfidence.low !== undefined) {
|
|
131
|
+
if (byConfidence.high < byConfidence.low) {
|
|
132
|
+
parts.push('Overconfidence detected: high-confidence predictions are less accurate than low-confidence ones.');
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
if (totalEstimates > 0 && unmatchedCount / totalEstimates > 0.5) {
|
|
137
|
+
parts.push(`${unmatchedCount} of ${totalEstimates} estimates have no calibration follow-up. Run /calibrate to close the loop.`);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
return parts.length > 0 ? parts.join(' ') : 'Not enough data to generate calibration insights.';
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
function daysSince(dateStr) {
|
|
144
|
+
try {
|
|
145
|
+
const then = new Date(dateStr);
|
|
146
|
+
const now = new Date();
|
|
147
|
+
return Math.floor((now - then) / (1000 * 60 * 60 * 24));
|
|
148
|
+
} catch {
|
|
149
|
+
return null;
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
module.exports = { calibrate };
|