@machinespirits/eval 0.2.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +91 -9
- package/config/eval-settings.yaml +3 -3
- package/config/paper-manifest.json +486 -0
- package/config/providers.yaml +9 -6
- package/config/tutor-agents.yaml +2261 -0
- package/content/README.md +23 -0
- package/content/courses/479/course.md +53 -0
- package/content/courses/479/lecture-1.md +361 -0
- package/content/courses/479/lecture-2.md +360 -0
- package/content/courses/479/lecture-3.md +655 -0
- package/content/courses/479/lecture-4.md +530 -0
- package/content/courses/479/lecture-5.md +326 -0
- package/content/courses/479/lecture-6.md +346 -0
- package/content/courses/479/lecture-7.md +326 -0
- package/content/courses/479/lecture-8.md +273 -0
- package/content/courses/479/roadmap-slides.md +656 -0
- package/content/manifest.yaml +8 -0
- package/docs/research/apa.csl +2133 -0
- package/docs/research/build.sh +98 -0
- package/docs/research/figures/figure1.png +0 -0
- package/docs/research/figures/figure10.png +0 -0
- package/docs/research/figures/figure11.png +0 -0
- package/docs/research/figures/figure2.png +0 -0
- package/docs/research/figures/figure3.png +0 -0
- package/docs/research/figures/figure4.png +0 -0
- package/docs/research/figures/figure5.png +0 -0
- package/docs/research/figures/figure6.png +0 -0
- package/docs/research/figures/figure7.png +0 -0
- package/docs/research/figures/figure8.png +0 -0
- package/docs/research/figures/figure9.png +0 -0
- package/docs/research/header.tex +25 -0
- package/docs/research/paper-full.md +2565 -0
- package/docs/research/paper-short.md +436 -0
- package/docs/research/references.bib +1143 -0
- package/docs/research/slides-header.tex +188 -0
- package/docs/research/slides-pptx.md +363 -0
- package/docs/research/slides.md +531 -0
- package/docs/research/style-reference-pptx.py +199 -0
- package/package.json +5 -5
- package/scripts/analyze-eval-results.js +69 -17
- package/scripts/analyze-mechanism-traces.js +763 -0
- package/scripts/analyze-modulation-learning.js +498 -0
- package/scripts/analyze-prosthesis.js +144 -0
- package/scripts/analyze-run.js +264 -79
- package/scripts/assess-transcripts.js +853 -0
- package/scripts/browse-transcripts.js +854 -0
- package/scripts/check-parse-failures.js +73 -0
- package/scripts/code-dialectical-modulation.js +1320 -0
- package/scripts/download-data.sh +55 -0
- package/scripts/eval-cli.js +106 -18
- package/scripts/generate-paper-figures.js +663 -0
- package/scripts/generate-paper-figures.py +577 -76
- package/scripts/generate-paper-tables.js +299 -0
- package/scripts/qualitative-analysis-ai.js +3 -3
- package/scripts/render-sequence-diagram.js +694 -0
- package/scripts/test-latency.js +210 -0
- package/scripts/test-rate-limit.js +95 -0
- package/scripts/test-token-budget.js +332 -0
- package/scripts/validate-paper-manifest.js +670 -0
- package/services/__tests__/evalConfigLoader.test.js +2 -2
- package/services/__tests__/learnerRubricEvaluator.test.js +361 -0
- package/services/__tests__/learnerTutorInteractionEngine.test.js +326 -0
- package/services/evaluationRunner.js +975 -98
- package/services/evaluationStore.js +12 -4
- package/services/learnerTutorInteractionEngine.js +27 -2
- package/services/mockProvider.js +133 -0
- package/services/promptRewriter.js +1471 -5
- package/services/rubricEvaluator.js +55 -2
- package/services/transcriptFormatter.js +675 -0
- package/config/machinespirits-eval.code-workspace +0 -11
- package/docs/EVALUATION-VARIABLES.md +0 -589
- package/docs/REPLICATION-PLAN.md +0 -577
- package/scripts/analyze-run.mjs +0 -282
- package/scripts/compare-runs.js +0 -44
- package/scripts/compare-suggestions.js +0 -80
- package/scripts/dig-into-run.js +0 -158
- package/scripts/show-failed-suggestions.js +0 -64
- /package/scripts/{check-run.mjs → check-run.js} +0 -0
package/scripts/analyze-run.js
CHANGED
|
@@ -1,97 +1,282 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
/**
|
|
2
|
+
* Detailed statistical analysis of an evaluation run.
|
|
3
|
+
* Usage: node scripts/analyze-run.mjs [run_id]
|
|
4
|
+
*/
|
|
5
|
+
import Database from 'better-sqlite3';
|
|
6
|
+
const db = new Database('data/evaluations.db');
|
|
3
7
|
|
|
4
|
-
const
|
|
5
|
-
|
|
8
|
+
const RUN_ID = process.argv[2] || db.prepare(
|
|
9
|
+
'SELECT run_id FROM evaluation_results ORDER BY created_at DESC LIMIT 1'
|
|
10
|
+
).get()?.run_id;
|
|
6
11
|
|
|
7
|
-
if (!
|
|
8
|
-
|
|
9
|
-
|
|
12
|
+
if (!RUN_ID) { console.error('No run found'); process.exit(1); }
|
|
13
|
+
console.log(`Analyzing run: ${RUN_ID}\n`);
|
|
14
|
+
|
|
15
|
+
// ============================================================
|
|
16
|
+
// Helper functions
|
|
17
|
+
// ============================================================
|
|
18
|
+
function std(values) {
|
|
19
|
+
if (values.length < 2) return 0;
|
|
20
|
+
const mean = values.reduce((a, b) => a + b, 0) / values.length;
|
|
21
|
+
const variance = values.reduce((sum, v) => sum + (v - mean) ** 2, 0) / (values.length - 1);
|
|
22
|
+
return Math.sqrt(variance);
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
function cohensD(group1, group2) {
|
|
26
|
+
const m1 = group1.reduce((a, b) => a + b, 0) / group1.length;
|
|
27
|
+
const m2 = group2.reduce((a, b) => a + b, 0) / group2.length;
|
|
28
|
+
const s1 = std(group1);
|
|
29
|
+
const s2 = std(group2);
|
|
30
|
+
const pooled = Math.sqrt(((group1.length - 1) * s1 ** 2 + (group2.length - 1) * s2 ** 2) / (group1.length + group2.length - 2));
|
|
31
|
+
return pooled === 0 ? 0 : (m1 - m2) / pooled;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
function percentile(values, p) {
|
|
35
|
+
const sorted = [...values].sort((a, b) => a - b);
|
|
36
|
+
const idx = (p / 100) * (sorted.length - 1);
|
|
37
|
+
const lo = Math.floor(idx);
|
|
38
|
+
const hi = Math.ceil(idx);
|
|
39
|
+
return lo === hi ? sorted[lo] : sorted[lo] + (sorted[hi] - sorted[lo]) * (idx - lo);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// ============================================================
|
|
43
|
+
// 1. Summary statistics
|
|
44
|
+
// ============================================================
|
|
45
|
+
const allScores = db.prepare(`
|
|
46
|
+
SELECT overall_score FROM evaluation_results
|
|
47
|
+
WHERE run_id = ? AND overall_score IS NOT NULL
|
|
48
|
+
`).all(RUN_ID).map(r => r.overall_score);
|
|
49
|
+
|
|
50
|
+
const mean = allScores.reduce((a, b) => a + b, 0) / allScores.length;
|
|
51
|
+
const sd = std(allScores);
|
|
52
|
+
const median = percentile(allScores, 50);
|
|
53
|
+
const q1 = percentile(allScores, 25);
|
|
54
|
+
const q3 = percentile(allScores, 75);
|
|
55
|
+
|
|
56
|
+
console.log('=== DESCRIPTIVE STATISTICS ===');
|
|
57
|
+
console.log(`N = ${allScores.length}`);
|
|
58
|
+
console.log(`Mean: ${mean.toFixed(1)} (SD: ${sd.toFixed(1)})`);
|
|
59
|
+
console.log(`Median: ${median.toFixed(1)} (IQR: ${q1.toFixed(1)} – ${q3.toFixed(1)})`);
|
|
60
|
+
console.log(`Range: ${Math.min(...allScores).toFixed(1)} – ${Math.max(...allScores).toFixed(1)}`);
|
|
61
|
+
|
|
62
|
+
// ============================================================
|
|
63
|
+
// 2. Per-model statistics
|
|
64
|
+
// ============================================================
|
|
65
|
+
console.log('\n=== PER-MODEL STATISTICS ===');
|
|
66
|
+
const models = db.prepare(`
|
|
67
|
+
SELECT DISTINCT model FROM evaluation_results
|
|
68
|
+
WHERE run_id = ? AND overall_score IS NOT NULL
|
|
69
|
+
`).all(RUN_ID).map(r => r.model);
|
|
70
|
+
|
|
71
|
+
const modelData = {};
|
|
72
|
+
for (const m of models) {
|
|
73
|
+
const scores = db.prepare(`
|
|
74
|
+
SELECT overall_score FROM evaluation_results
|
|
75
|
+
WHERE run_id = ? AND model = ? AND overall_score IS NOT NULL
|
|
76
|
+
`).all(RUN_ID, m).map(r => r.overall_score);
|
|
77
|
+
modelData[m] = scores;
|
|
78
|
+
const mn = scores.reduce((a, b) => a + b, 0) / scores.length;
|
|
79
|
+
const s = std(scores);
|
|
80
|
+
console.log(`${m}: M=${mn.toFixed(1)}, SD=${s.toFixed(1)}, N=${scores.length}, Range=[${Math.min(...scores).toFixed(1)}, ${Math.max(...scores).toFixed(1)}]`);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// ============================================================
|
|
84
|
+
// 3. Pairwise effect sizes between models
|
|
85
|
+
// ============================================================
|
|
86
|
+
console.log('\n=== PAIRWISE EFFECT SIZES (Cohen\'s d) ===');
|
|
87
|
+
const modelNames = Object.keys(modelData).filter(m => modelData[m].length >= 3);
|
|
88
|
+
for (let i = 0; i < modelNames.length; i++) {
|
|
89
|
+
for (let j = i + 1; j < modelNames.length; j++) {
|
|
90
|
+
const d = cohensD(modelData[modelNames[i]], modelData[modelNames[j]]);
|
|
91
|
+
const label = d > 0.8 ? 'large' : d > 0.5 ? 'medium' : d > 0.2 ? 'small' : 'negligible';
|
|
92
|
+
console.log(`${modelNames[i]} vs ${modelNames[j]}: d=${d.toFixed(2)} (${label})`);
|
|
93
|
+
}
|
|
10
94
|
}
|
|
11
95
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
const
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
const
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
96
|
+
// ============================================================
|
|
97
|
+
// 4. Per-dimension statistics
|
|
98
|
+
// ============================================================
|
|
99
|
+
console.log('\n=== DIMENSION STATISTICS ===');
|
|
100
|
+
const dims = ['relevance', 'specificity', 'pedagogical', 'personalization', 'actionability', 'tone'];
|
|
101
|
+
const dimCols = dims.map(d => `score_${d}`);
|
|
102
|
+
|
|
103
|
+
for (const dim of dims) {
|
|
104
|
+
const col = `score_${dim}`;
|
|
105
|
+
const vals = db.prepare(`
|
|
106
|
+
SELECT ${col} as v FROM evaluation_results
|
|
107
|
+
WHERE run_id = ? AND ${col} IS NOT NULL
|
|
108
|
+
`).all(RUN_ID).map(r => r.v);
|
|
109
|
+
if (vals.length === 0) continue;
|
|
110
|
+
const mn = vals.reduce((a, b) => a + b, 0) / vals.length;
|
|
111
|
+
const s = std(vals);
|
|
112
|
+
console.log(`${dim.padEnd(20)} M=${mn.toFixed(2)}, SD=${s.toFixed(2)}, N=${vals.length}`);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// ============================================================
|
|
116
|
+
// 5. Per-dimension per-model
|
|
117
|
+
// ============================================================
|
|
118
|
+
console.log('\n=== DIMENSION × MODEL BREAKDOWN ===');
|
|
119
|
+
const header = 'Model'.padEnd(25) + dims.map(d => d.substring(0, 8).padStart(9)).join('');
|
|
120
|
+
console.log(header);
|
|
121
|
+
for (const m of modelNames) {
|
|
122
|
+
let line = m.padEnd(25);
|
|
123
|
+
for (const dim of dims) {
|
|
124
|
+
const col = `score_${dim}`;
|
|
125
|
+
const val = db.prepare(`
|
|
126
|
+
SELECT AVG(${col}) as v FROM evaluation_results
|
|
127
|
+
WHERE run_id = ? AND model = ? AND ${col} IS NOT NULL
|
|
128
|
+
`).get(RUN_ID, m);
|
|
129
|
+
line += (val?.v?.toFixed(2) || 'N/A').padStart(9);
|
|
130
|
+
}
|
|
131
|
+
console.log(line);
|
|
32
132
|
}
|
|
33
133
|
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
134
|
+
// ============================================================
|
|
135
|
+
// 6. Scenario difficulty ranking
|
|
136
|
+
// ============================================================
|
|
137
|
+
console.log('\n=== SCENARIO DIFFICULTY RANKING (hardest → easiest) ===');
|
|
138
|
+
const scenarioStats = db.prepare(`
|
|
139
|
+
SELECT scenario_id,
|
|
140
|
+
AVG(overall_score) as mean,
|
|
141
|
+
COUNT(*) as n
|
|
142
|
+
FROM evaluation_results
|
|
143
|
+
WHERE run_id = ? AND overall_score IS NOT NULL
|
|
144
|
+
GROUP BY scenario_id
|
|
145
|
+
ORDER BY mean ASC
|
|
146
|
+
`).all(RUN_ID);
|
|
147
|
+
|
|
148
|
+
for (const s of scenarioStats) {
|
|
149
|
+
const scores = db.prepare(`
|
|
150
|
+
SELECT overall_score FROM evaluation_results
|
|
151
|
+
WHERE run_id = ? AND scenario_id = ? AND overall_score IS NOT NULL
|
|
152
|
+
`).all(RUN_ID, s.scenario_id).map(r => r.overall_score);
|
|
153
|
+
const s_sd = std(scores);
|
|
154
|
+
const bar = '█'.repeat(Math.round(s.mean / 5));
|
|
155
|
+
console.log(`${s.scenario_id.padEnd(40)} ${s.mean.toFixed(1).padStart(5)} (SD=${s_sd.toFixed(1).padStart(5)}) ${bar}`);
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// ============================================================
|
|
159
|
+
// 7. Inter-model agreement (scenario-level correlation)
|
|
160
|
+
// ============================================================
|
|
161
|
+
console.log('\n=== INTER-MODEL AGREEMENT ===');
|
|
162
|
+
const scenarios = db.prepare(`
|
|
163
|
+
SELECT DISTINCT scenario_id FROM evaluation_results
|
|
164
|
+
WHERE run_id = ? AND overall_score IS NOT NULL
|
|
165
|
+
`).all(RUN_ID).map(r => r.scenario_id);
|
|
166
|
+
|
|
167
|
+
// Check if models rank scenarios similarly
|
|
168
|
+
for (let i = 0; i < modelNames.length; i++) {
|
|
169
|
+
for (let j = i + 1; j < modelNames.length; j++) {
|
|
170
|
+
const pairs = [];
|
|
171
|
+
for (const s of scenarios) {
|
|
172
|
+
const s1 = db.prepare(`SELECT overall_score FROM evaluation_results WHERE run_id = ? AND model = ? AND scenario_id = ? AND overall_score IS NOT NULL`).get(RUN_ID, modelNames[i], s);
|
|
173
|
+
const s2 = db.prepare(`SELECT overall_score FROM evaluation_results WHERE run_id = ? AND model = ? AND scenario_id = ? AND overall_score IS NOT NULL`).get(RUN_ID, modelNames[j], s);
|
|
174
|
+
if (s1 && s2) pairs.push([s1.overall_score, s2.overall_score]);
|
|
175
|
+
}
|
|
176
|
+
if (pairs.length >= 3) {
|
|
177
|
+
// Spearman rank correlation
|
|
178
|
+
const ranked = pairs.map(([a, b], idx) => ({ a, b, idx }));
|
|
179
|
+
ranked.sort((x, y) => x.a - y.a);
|
|
180
|
+
ranked.forEach((r, i) => r.rankA = i + 1);
|
|
181
|
+
ranked.sort((x, y) => x.b - y.b);
|
|
182
|
+
ranked.forEach((r, i) => r.rankB = i + 1);
|
|
183
|
+
const n = ranked.length;
|
|
184
|
+
const dSquared = ranked.reduce((sum, r) => sum + (r.rankA - r.rankB) ** 2, 0);
|
|
185
|
+
const rho = 1 - (6 * dSquared) / (n * (n * n - 1));
|
|
186
|
+
const agreement = rho > 0.7 ? 'strong' : rho > 0.4 ? 'moderate' : rho > 0 ? 'weak' : 'none';
|
|
187
|
+
console.log(`${modelNames[i]} vs ${modelNames[j]}: Spearman ρ=${rho.toFixed(2)} (${agreement} agreement, N=${n})`);
|
|
188
|
+
}
|
|
189
|
+
}
|
|
42
190
|
}
|
|
43
191
|
|
|
44
|
-
//
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
192
|
+
// ============================================================
|
|
193
|
+
// 8. Base vs Recognition score analysis
|
|
194
|
+
// ============================================================
|
|
195
|
+
console.log('\n=== BASE vs RECOGNITION SCORE ANALYSIS ===');
|
|
196
|
+
const dualRows = db.prepare(`
|
|
197
|
+
SELECT model, base_score, recognition_score, overall_score
|
|
198
|
+
FROM evaluation_results
|
|
199
|
+
WHERE run_id = ? AND base_score IS NOT NULL AND recognition_score IS NOT NULL
|
|
200
|
+
`).all(RUN_ID);
|
|
50
201
|
|
|
51
|
-
|
|
52
|
-
const
|
|
53
|
-
const
|
|
202
|
+
if (dualRows.length > 0) {
|
|
203
|
+
const bases = dualRows.map(r => r.base_score);
|
|
204
|
+
const recogs = dualRows.map(r => r.recognition_score);
|
|
205
|
+
const overalls = dualRows.map(r => r.overall_score);
|
|
54
206
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
207
|
+
console.log(`N (with both scores): ${dualRows.length}`);
|
|
208
|
+
console.log(`Base: M=${(bases.reduce((a,b)=>a+b,0)/bases.length).toFixed(1)}, SD=${std(bases).toFixed(1)}`);
|
|
209
|
+
console.log(`Recognition: M=${(recogs.reduce((a,b)=>a+b,0)/recogs.length).toFixed(1)}, SD=${std(recogs).toFixed(1)}`);
|
|
210
|
+
console.log(`Overall: M=${(overalls.reduce((a,b)=>a+b,0)/overalls.length).toFixed(1)}, SD=${std(overalls).toFixed(1)}`);
|
|
58
211
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
else factors['Factor B (tutor arch)'].single.push(score);
|
|
212
|
+
const gap = cohensD(bases, recogs);
|
|
213
|
+
console.log(`Base vs Recognition gap: d=${gap.toFixed(2)} (${gap > 0.8 ? 'large' : gap > 0.5 ? 'medium' : 'small'})`);
|
|
62
214
|
|
|
63
|
-
//
|
|
64
|
-
|
|
65
|
-
|
|
215
|
+
// Per-model breakdown
|
|
216
|
+
console.log('\nPer-model dual scores:');
|
|
217
|
+
for (const m of modelNames) {
|
|
218
|
+
const mRows = dualRows.filter(r => r.model === m);
|
|
219
|
+
if (mRows.length === 0) continue;
|
|
220
|
+
const mb = mRows.map(r => r.base_score);
|
|
221
|
+
const mr = mRows.map(r => r.recognition_score);
|
|
222
|
+
console.log(` ${m}: Base=${(mb.reduce((a,b)=>a+b,0)/mb.length).toFixed(1)}, Recog=${(mr.reduce((a,b)=>a+b,0)/mr.length).toFixed(1)}, Gap=${((mb.reduce((a,b)=>a+b,0)/mb.length) - (mr.reduce((a,b)=>a+b,0)/mr.length)).toFixed(1)}, N=${mRows.length}`);
|
|
223
|
+
}
|
|
224
|
+
} else {
|
|
225
|
+
console.log('No results with both base_score and recognition_score');
|
|
66
226
|
}
|
|
67
227
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
228
|
+
// ============================================================
|
|
229
|
+
// 9. Variance decomposition (eta-squared)
|
|
230
|
+
// ============================================================
|
|
231
|
+
console.log('\n=== VARIANCE DECOMPOSITION ===');
|
|
232
|
+
// How much variance is explained by model vs scenario?
|
|
233
|
+
const grandMean = mean;
|
|
234
|
+
const SSTotal = allScores.reduce((sum, s) => sum + (s - grandMean) ** 2, 0);
|
|
235
|
+
|
|
236
|
+
// SS between models
|
|
237
|
+
let SSModel = 0;
|
|
238
|
+
for (const m of modelNames) {
|
|
239
|
+
const mScores = modelData[m];
|
|
240
|
+
const mMean = mScores.reduce((a, b) => a + b, 0) / mScores.length;
|
|
241
|
+
SSModel += mScores.length * (mMean - grandMean) ** 2;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// SS between scenarios
|
|
245
|
+
let SSScenario = 0;
|
|
246
|
+
for (const s of scenarioStats) {
|
|
247
|
+
const sMean = s.mean;
|
|
248
|
+
SSScenario += s.n * (sMean - grandMean) ** 2;
|
|
83
249
|
}
|
|
84
250
|
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
251
|
+
const etaModel = SSModel / SSTotal;
|
|
252
|
+
const etaScenario = SSScenario / SSTotal;
|
|
253
|
+
const etaResidual = 1 - etaModel - etaScenario;
|
|
254
|
+
|
|
255
|
+
console.log(`Total SS: ${SSTotal.toFixed(1)}`);
|
|
256
|
+
console.log(`Model effect (η²): ${(etaModel * 100).toFixed(1)}% — ${etaModel < 0.01 ? 'negligible' : etaModel < 0.06 ? 'small' : etaModel < 0.14 ? 'medium' : 'large'}`);
|
|
257
|
+
console.log(`Scenario effect (η²): ${(etaScenario * 100).toFixed(1)}% — ${etaScenario < 0.01 ? 'negligible' : etaScenario < 0.06 ? 'small' : etaScenario < 0.14 ? 'medium' : 'large'}`);
|
|
258
|
+
console.log(`Residual: ${(etaResidual * 100).toFixed(1)}%`);
|
|
259
|
+
|
|
260
|
+
// ============================================================
|
|
261
|
+
// 10. High-variance scenarios (discriminating power)
|
|
262
|
+
// ============================================================
|
|
263
|
+
console.log('\n=== SCENARIO DISCRIMINATING POWER (cross-model variance) ===');
|
|
264
|
+
const scenarioVariance = [];
|
|
265
|
+
for (const s of scenarios) {
|
|
266
|
+
const scores = db.prepare(`
|
|
267
|
+
SELECT overall_score FROM evaluation_results
|
|
268
|
+
WHERE run_id = ? AND scenario_id = ? AND overall_score IS NOT NULL
|
|
269
|
+
`).all(RUN_ID, s).map(r => r.overall_score);
|
|
270
|
+
if (scores.length >= 2) {
|
|
271
|
+
const sv = std(scores);
|
|
272
|
+
scenarioVariance.push({ id: s, sd: sv, range: Math.max(...scores) - Math.min(...scores) });
|
|
273
|
+
}
|
|
93
274
|
}
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
275
|
+
scenarioVariance.sort((a, b) => b.sd - a.sd);
|
|
276
|
+
console.log('Scenario'.padEnd(40), 'SD'.padStart(6), 'Range'.padStart(7));
|
|
277
|
+
for (const s of scenarioVariance) {
|
|
278
|
+
console.log(s.id.padEnd(40), s.sd.toFixed(1).padStart(6), s.range.toFixed(1).padStart(7));
|
|
97
279
|
}
|
|
280
|
+
|
|
281
|
+
console.log('\n=== ANALYSIS COMPLETE ===');
|
|
282
|
+
db.close();
|