opencode-swarm-plugin 0.40.0 → 0.42.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/.hive/analysis/eval-failure-analysis-2025-12-25.md +331 -0
  2. package/.hive/analysis/session-data-quality-audit.md +320 -0
  3. package/.hive/eval-results.json +481 -24
  4. package/.hive/issues.jsonl +67 -16
  5. package/.hive/memories.jsonl +159 -1
  6. package/.opencode/eval-history.jsonl +315 -0
  7. package/.turbo/turbo-build.log +5 -5
  8. package/CHANGELOG.md +165 -0
  9. package/README.md +2 -0
  10. package/SCORER-ANALYSIS.md +598 -0
  11. package/bin/eval-gate.test.ts +158 -0
  12. package/bin/eval-gate.ts +74 -0
  13. package/bin/swarm.serve.test.ts +46 -0
  14. package/bin/swarm.test.ts +661 -732
  15. package/bin/swarm.ts +335 -0
  16. package/dist/compaction-hook.d.ts +7 -5
  17. package/dist/compaction-hook.d.ts.map +1 -1
  18. package/dist/compaction-prompt-scoring.d.ts +1 -0
  19. package/dist/compaction-prompt-scoring.d.ts.map +1 -1
  20. package/dist/eval-runner.d.ts +134 -0
  21. package/dist/eval-runner.d.ts.map +1 -0
  22. package/dist/hive.d.ts.map +1 -1
  23. package/dist/index.d.ts +29 -0
  24. package/dist/index.d.ts.map +1 -1
  25. package/dist/index.js +99741 -58858
  26. package/dist/memory-tools.d.ts +70 -2
  27. package/dist/memory-tools.d.ts.map +1 -1
  28. package/dist/memory.d.ts +37 -0
  29. package/dist/memory.d.ts.map +1 -1
  30. package/dist/observability-tools.d.ts +64 -0
  31. package/dist/observability-tools.d.ts.map +1 -1
  32. package/dist/plugin.js +99356 -58318
  33. package/dist/swarm-orchestrate.d.ts.map +1 -1
  34. package/dist/swarm-prompts.d.ts +32 -1
  35. package/dist/swarm-prompts.d.ts.map +1 -1
  36. package/docs/planning/ADR-009-oh-my-opencode-patterns.md +353 -0
  37. package/evals/ARCHITECTURE.md +1189 -0
  38. package/evals/example.eval.ts +3 -4
  39. package/evals/fixtures/compaction-prompt-cases.ts +6 -0
  40. package/evals/scorers/coordinator-discipline.evalite-test.ts +1 -162
  41. package/evals/scorers/coordinator-discipline.ts +0 -323
  42. package/evals/swarm-decomposition.eval.ts +4 -2
  43. package/package.json +4 -3
  44. package/src/compaction-prompt-scorers.test.ts +185 -9
  45. package/src/compaction-prompt-scoring.ts +7 -5
  46. package/src/eval-runner.test.ts +128 -1
  47. package/src/eval-runner.ts +46 -0
  48. package/src/hive.ts +43 -42
  49. package/src/memory-tools.test.ts +84 -0
  50. package/src/memory-tools.ts +68 -3
  51. package/src/memory.test.ts +2 -112
  52. package/src/memory.ts +88 -49
  53. package/src/observability-tools.test.ts +13 -0
  54. package/src/observability-tools.ts +277 -0
  55. package/src/swarm-orchestrate.test.ts +162 -0
  56. package/src/swarm-orchestrate.ts +7 -5
  57. package/src/swarm-prompts.test.ts +168 -4
  58. package/src/swarm-prompts.ts +228 -7
  59. package/.env +0 -2
  60. package/.turbo/turbo-test.log +0 -481
  61. package/.turbo/turbo-typecheck.log +0 -1
@@ -1,26 +1,483 @@
1
1
  {
2
- "compaction-prompt": {
3
- "passed": true,
4
- "phase": "bootstrap",
5
- "message": "Bootstrap phase (1/10 runs) - collecting data",
6
- "currentScore": 0.85
7
- },
8
- "coordinator-behavior": {
9
- "passed": true,
10
- "phase": "bootstrap",
11
- "message": "Bootstrap phase (1/10 runs) - collecting data",
12
- "currentScore": 0.85
13
- },
14
- "coordinator-session": {
15
- "passed": true,
16
- "phase": "bootstrap",
17
- "message": "Bootstrap phase (1/10 runs) - collecting data",
18
- "currentScore": 0.85
19
- },
20
- "swarm-decomposition": {
21
- "passed": true,
22
- "phase": "bootstrap",
23
- "message": "Bootstrap phase (1/10 runs) - collecting data",
24
- "currentScore": 0.85
2
+ "meta": {
3
+ "generated": "2025-12-25",
4
+ "agent": "CalmLake",
5
+ "cell": "opencode-swarm-plugin--ys7z8-mjlk7jstvch",
6
+ "epic": "opencode-swarm-plugin--ys7z8-mjlk7js9bt1",
7
+ "source_reports": [
8
+ "evals/ARCHITECTURE.md",
9
+ ".hive/analysis/eval-failure-analysis-2025-12-25.md",
10
+ ".hive/analysis/session-data-quality-audit.md",
11
+ "SCORER-ANALYSIS.md"
12
+ ],
13
+ "synthesis_date": "2025-12-25T19:00:00Z"
14
+ },
15
+ "executive_summary": {
16
+ "status": "GOOD_WITH_TACTICAL_ISSUES",
17
+ "overall_assessment": "Eval infrastructure is well-designed at the macro level with clean pipeline (CAPTURE → STORE → LOAD → EVAL → GATE → LEARN) but has tactical issues impacting usability. The 'failures' are actually code bugs, not systemic problems.",
18
+ "key_strengths": [
19
+ "Progressive gates prevent premature failures",
20
+ "Real data integration grounds evals in reality",
21
+ "Type-safe Zod schemas prevent garbage data",
22
+ "Learning loop closes feedback cycle",
23
+ "Clear separation of concerns (loaders, scorers, evals)"
24
+ ],
25
+ "critical_findings": [
26
+ {
27
+ "issue": "Two eval bugs causing false failures",
28
+ "impact": "example.eval.ts at 0%, compaction-prompt at 53% (both fixable)",
29
+ "priority": "P0"
30
+ },
31
+ {
32
+ "issue": "4 unused scorers (250 LOC dead code)",
33
+ "impact": "38% of coordinator-discipline.ts wasted, misleading coverage",
34
+ "priority": "P0"
35
+ },
36
+ {
37
+ "issue": "Data loader abstraction leak",
38
+ "impact": "Hard to test, hard to extend, tight coupling",
39
+ "priority": "P1"
40
+ },
41
+ {
42
+ "issue": "No scorer versioning",
43
+ "impact": "Can't improve scorers without breaking history",
44
+ "priority": "P1"
45
+ },
46
+ {
47
+ "issue": "Session filter too strict (2.9% pass rate)",
48
+ "impact": "Most coordinator behavior invisible to evals",
49
+ "priority": "P1"
50
+ },
51
+ {
52
+ "issue": "LLM-as-judge has no budget controls",
53
+ "impact": "Unbounded cost, network failures break evals",
54
+ "priority": "P2"
55
+ }
56
+ ],
57
+ "data_quality": "EXCELLENT",
58
+ "data_quality_note": "The 3 passing coordinator sessions are gold-standard (6-9 hours, 20-24 worker spawns, 0 violations). High filter rate is by design, not poor data."
59
+ },
60
+ "recommendations": {
61
+ "immediate_p0": [
62
+ {
63
+ "id": "REC-001",
64
+ "title": "Fix example.eval.ts data/task mismatch",
65
+ "issue": "Eval provides CellTree in data.output but task returns input string unchanged. Scorer receives 'Test task' string instead of JSON.",
66
+ "source": "eval-failure-analysis",
67
+ "effort": "5 minutes",
68
+ "impact": "HIGH",
69
+ "fix": "Remove output from data(), make task() return JSON.stringify(input) of CellTree",
70
+ "files": ["evals/example.eval.ts"],
71
+ "expected_outcome": "0% → 100% on example.eval.ts",
72
+ "code_snippet": "task: async (input) => JSON.stringify(input)"
73
+ },
74
+ {
75
+ "id": "REC-002",
76
+ "title": "Make forbidden tools scorer case-insensitive",
77
+ "issue": "Scorer checks /\\bEdit\\b/ but fixtures use 'edit' (lowercase). Zero matches on perfect fixture.",
78
+ "source": "eval-failure-analysis",
79
+ "effort": "5 minutes",
80
+ "impact": "HIGH",
81
+ "fix": "Add 'i' flag to all forbiddenTools regexes: /\\bedit\\b/i, /\\bwrite\\b/i",
82
+ "files": ["src/compaction-prompt-scoring.ts:213-218"],
83
+ "expected_outcome": "compaction-prompt: 53% → 70-80%",
84
+ "code_snippet": "const forbiddenTools = [/\\bedit\\b/i, /\\bwrite\\b/i, /\\bbash\\b/i, /swarmmail_reserve/i, /git commit/i];"
85
+ },
86
+ {
87
+ "id": "REC-003",
88
+ "title": "Add missing forbidden tools to fixtures",
89
+ "issue": "Fixtures mention 'edit, write, bash' but scorer checks 'Edit, Write, swarmmail_reserve, git commit'. Missing 2/5 tools.",
90
+ "source": "eval-failure-analysis",
91
+ "effort": "10 minutes",
92
+ "impact": "MEDIUM",
93
+ "fix": "Update all fixtures to include swarmmail_reserve and git commit in forbidden tools list",
94
+ "files": ["evals/fixtures/compaction-prompt-cases.ts"],
95
+ "expected_outcome": "Full tool coverage in compaction-prompt eval",
96
+ "note": "Combine with REC-002 for maximum impact"
97
+ },
98
+ {
99
+ "id": "REC-004",
100
+ "title": "Remove or integrate 4 unused scorers",
101
+ "issue": "researcherSpawnRate, skillLoadingRate, inboxMonitoringRate, blockerResponseTime defined but never used in any eval",
102
+ "source": "scorer-analysis",
103
+ "effort": "30 minutes (remove) OR 2 hours (integrate)",
104
+ "impact": "MEDIUM",
105
+ "fix_option_1": "Delete scorers + tests, update exports (recommended)",
106
+ "fix_option_2": "Add to coordinator-session.eval.ts scorers array, tune weights",
107
+ "files": ["evals/scorers/coordinator-discipline.ts:345-588", "evals/coordinator-session.eval.ts"],
108
+ "expected_outcome": "Remove 250 LOC dead code OR expand coordinator metrics",
109
+ "recommendation": "Remove - current 5-scorer set is sufficient for protocol adherence"
110
+ }
111
+ ],
112
+ "high_priority_p1": [
113
+ {
114
+ "id": "REC-005",
115
+ "title": "Relax default session filters",
116
+ "issue": "Only 3/102 sessions (2.9%) pass strict filters. 97% filtered out are workers (expected) but also filters partial coordinator sessions.",
117
+ "source": "session-data-quality-audit",
118
+ "effort": "15 minutes",
119
+ "impact": "HIGH",
120
+ "fix": "Change defaults from {requireWorkerSpawn: true, requireReview: true} to {requireWorkerSpawn: false, requireReview: false}",
121
+ "files": ["evals/lib/data-loader.ts"],
122
+ "expected_outcome": "3 → ~28 sessions passing (27.5% pass rate)",
123
+ "rationale": "Captures early-stage coordinator behavior, still filters worker-only sessions",
124
+ "migration_path": "Add tiered filter presets: 'strict', 'moderate', 'lenient'"
125
+ },
126
+ {
127
+ "id": "REC-006",
128
+ "title": "Add session type detection filter",
129
+ "issue": "70/102 sessions are worker completions (OUTCOME/subtask_success), not coordinator sessions. Need automatic exclusion.",
130
+ "source": "session-data-quality-audit",
131
+ "effort": "30 minutes",
132
+ "impact": "HIGH",
133
+ "fix": "Add isCoordinatorSession() filter that checks for DECISION events (decomposition_complete, worker_spawned, strategy_selected)",
134
+ "files": ["evals/lib/data-loader.ts"],
135
+ "expected_outcome": "Automatically filters worker-only sessions before quality criteria applied",
136
+ "code_snippet": "function isCoordinatorSession(s: CoordinatorSession): boolean { return s.events.some(e => e.event_type === 'DECISION'); }"
137
+ },
138
+ {
139
+ "id": "REC-007",
140
+ "title": "Extract data source interface (EvalSource<T>)",
141
+ "issue": "data-loader.ts knows about PGlite internals AND JSONL format. Violates single-responsibility.",
142
+ "source": "architecture",
143
+ "effort": "4-6 hours",
144
+ "impact": "MEDIUM",
145
+ "fix": "Create EvalSource interface, implement PGliteSource, JsonlSessionSource, FixtureSource adapters",
146
+ "files": ["evals/lib/data-loader.ts", "evals/lib/sources/"],
147
+ "expected_outcome": "Easier to test (mock sources), easier to extend (add CSV/S3 sources), explicit fallback strategy",
148
+ "benefits": [
149
+ "Sources testable in isolation",
150
+ "Easy to add new sources (S3, API, CSV)",
151
+ "Explicit fallback strategy (not hardcoded)",
152
+ "Reduced coupling to storage format"
153
+ ]
154
+ },
155
+ {
156
+ "id": "REC-008",
157
+ "title": "Add scorer versioning",
158
+ "issue": "Scorer logic changes invalidate historical comparisons. Can't tell if score drop is regression or stricter scoring.",
159
+ "source": "architecture",
160
+ "effort": "3-4 hours",
161
+ "impact": "HIGH",
162
+ "fix": "Add version field to scorer metadata, track versions in eval history, filter baseline to compatible runs only",
163
+ "files": ["evals/scorers/*.ts", "src/eval-gates.ts", "src/eval-history.ts"],
164
+ "expected_outcome": "Can improve scorers without breaking history, clear attribution of score changes, A/B test new scorers",
165
+ "schema_change": "interface EvalRunRecord { scorer_versions: Record<string, string> }"
166
+ },
167
+ {
168
+ "id": "REC-009",
169
+ "title": "Make session filters first-class and composable",
170
+ "issue": "Quality criteria hardcoded in loader, can't experiment with different filter profiles",
171
+ "source": "architecture",
172
+ "effort": "2-3 hours",
173
+ "impact": "MEDIUM",
174
+ "fix": "Extract SessionFilter type, create filter library (minEvents, requireWorkerSpawn, etc.), use compose() at call site",
175
+ "files": ["evals/lib/data-loader.ts", "evals/lib/filters.ts"],
176
+ "expected_outcome": "Caller controls filtering (explicit, testable), easy to add new filters, can test partial compliance",
177
+ "code_snippet": "const filter = filters.compose(filters.minEvents(3), filters.requireWorkerSpawn); const sessions = await load({ filter });"
178
+ },
179
+ {
180
+ "id": "REC-010",
181
+ "title": "Document weight rationale for composite scorers",
182
+ "issue": "overallDiscipline, compactionQuality, overallCoordinatorBehavior have different weight distributions but no documented WHY",
183
+ "source": "scorer-analysis",
184
+ "effort": "30 minutes",
185
+ "impact": "LOW",
186
+ "fix": "Add comments explaining weight choices (based on failure impact, domain priorities)",
187
+ "files": ["evals/scorers/coordinator-discipline.ts", "evals/scorers/compaction-scorers.ts", "evals/coordinator-behavior.eval.ts"],
188
+ "expected_outcome": "Maintainers understand weight rationale, easier to tune in future",
189
+ "example": "// Violations (30%): Breaking protocol causes immediate harm\n// Spawn (25%): Delegation is core coordinator job"
190
+ }
191
+ ],
192
+ "medium_priority_p2": [
193
+ {
194
+ "id": "REC-011",
195
+ "title": "Add LLM judge budget enforcement",
196
+ "issue": "decompositionCoherence calls Claude for every test case. No cost controls, network failures fail entire eval.",
197
+ "source": "architecture",
198
+ "effort": "2-3 hours",
199
+ "impact": "MEDIUM",
200
+ "fix": "Add JUDGE_BUDGET {maxCalls, maxCost, maxLatency}, track usage, return fallback score on budget exhaustion",
201
+ "files": ["evals/lib/llm.ts", "evals/scorers/index.ts"],
202
+ "expected_outcome": "Predictable costs, graceful degradation on LLM failure, fast CI runs",
203
+ "budget_suggestion": "maxCalls: 100, maxCost: 1.00 USD, maxLatency: 5000ms"
204
+ },
205
+ {
206
+ "id": "REC-012",
207
+ "title": "Improve baseline calculation (EMA, trimmed mean)",
208
+ "issue": "Simple mean of all scores. Early bad runs drag down baseline forever. No time-based decay.",
209
+ "source": "architecture",
210
+ "effort": "3-4 hours",
211
+ "impact": "MEDIUM",
212
+ "fix": "Implement exponential moving average (EMA), trimmed mean, median strategies. Make baseline strategy configurable per eval.",
213
+ "files": ["src/eval-gates.ts"],
214
+ "expected_outcome": "Baseline adapts to improvements, robust to outliers, configurable per eval needs",
215
+ "strategies": ["mean", "ema", "trimmed-mean", "median"]
216
+ },
217
+ {
218
+ "id": "REC-013",
219
+ "title": "Validate and document normalization thresholds",
220
+ "issue": "timeToFirstSpawn uses EXCELLENT_MS=60s, POOR_MS=300s with no evidence these match reality. blockerResponseTime similar.",
221
+ "source": "scorer-analysis",
222
+ "effort": "2-3 hours (data gathering) + 1 hour (docs)",
223
+ "impact": "LOW",
224
+ "fix": "Gather real coordinator session data, plot distribution of spawn/response times, validate or adjust thresholds, document rationale",
225
+ "files": ["evals/scorers/coordinator-discipline.ts"],
226
+ "expected_outcome": "Evidence-based thresholds, documented assumptions, reproducible calibration process",
227
+ "method": "Run 20+ real coordinator sessions, calculate percentiles (p50, p95), use for normalization"
228
+ },
229
+ {
230
+ "id": "REC-014",
231
+ "title": "Add LLM retry logic and response caching",
232
+ "issue": "Single LLM call, no fallback on failure. No caching - repeated eval runs re-generate same decompositions.",
233
+ "source": "architecture",
234
+ "effort": "2-3 hours",
235
+ "impact": "LOW",
236
+ "fix": "Add exponential backoff retry wrapper, implement hash-based response cache (prompt → cached result)",
237
+ "files": ["evals/lib/llm.ts"],
238
+ "expected_outcome": "Resilient to network errors, faster repeat eval runs, reduced API costs",
239
+ "cache_key": "hash(prompt + model) → cache.get() || generateText()"
240
+ },
241
+ {
242
+ "id": "REC-015",
243
+ "title": "Clarify reviewEfficiency vs reviewThoroughness relationship",
244
+ "issue": "Both measure review behavior but can contradict (4 reviews / 2 workers = thorough 100%, efficient 50%)",
245
+ "source": "scorer-analysis",
246
+ "effort": "30 minutes",
247
+ "impact": "LOW",
248
+ "fix": "Add docstring explaining: thoroughness=quality gate (did they review?), efficiency=resource optimization (not over-reviewing)",
249
+ "files": ["evals/scorers/coordinator-discipline.ts"],
250
+ "expected_outcome": "Clear documentation of intentional complementary metrics",
251
+ "future_enhancement": "Consider composite reviewQuality scorer balancing both (1:1 ratio = perfect)"
252
+ },
253
+ {
254
+ "id": "REC-016",
255
+ "title": "Add characterization tests for outcome scorers",
256
+ "issue": "outcome-scorers.ts only has export verification tests, no unit tests for scoring logic",
257
+ "source": "scorer-analysis",
258
+ "effort": "2-3 hours",
259
+ "impact": "LOW",
260
+ "fix": "Add snapshot tests with known inputs: test('scopeAccuracy known input', () => { expect(result.score).toMatchSnapshot(); })",
261
+ "files": ["evals/scorers/outcome-scorers.test.ts"],
262
+ "expected_outcome": "Easier to debug scorer failures, catch regressions in scoring logic",
263
+ "note": "Currently outcome scorers not used in any eval (waiting for real execution data)"
264
+ }
265
+ ],
266
+ "long_term_p3": [
267
+ {
268
+ "id": "REC-017",
269
+ "title": "Complete learning loop (retrieval integration)",
270
+ "issue": "eval-learning.ts stores failures to semantic memory but never queries before eval runs",
271
+ "source": "architecture",
272
+ "effort": "4-6 hours",
273
+ "impact": "HIGH",
274
+ "fix": "Add queryEvalFailures() before prompt generation, inject past failures into LLM context with 'Avoid these patterns'",
275
+ "files": ["src/eval-learning.ts", "evals/lib/llm.ts", "src/swarm-prompts.ts"],
276
+ "expected_outcome": "LLM learns from past failures, avoids repeated mistakes, self-improving system",
277
+ "integration_point": "Before generateDecomposition(), query semantic memory, append failures to prompt"
278
+ },
279
+ {
280
+ "id": "REC-018",
281
+ "title": "Add failure analysis and diff tooling",
282
+ "issue": "Learning stores 'score dropped' but not WHY. No test case diff, no scorer output comparison.",
283
+ "source": "architecture",
284
+ "effort": "6-8 hours",
285
+ "impact": "MEDIUM",
286
+ "fix": "Implement analyzeFailure() to diff scorer outputs between runs, identify which test cases regressed, surface root cause signals",
287
+ "files": ["src/eval-learning.ts", "src/eval-gates.ts"],
288
+ "expected_outcome": "Automated root cause attribution, faster debugging, better semantic memory context",
289
+ "output_format": "Which scorer dropped? Which test cases? What changed in code?"
290
+ },
291
+ {
292
+ "id": "REC-019",
293
+ "title": "Add CI/PR integration hooks",
294
+ "issue": "Gates check but don't post results. No GitHub PR comments, no merge blocking.",
295
+ "source": "architecture",
296
+ "effort": "4-6 hours",
297
+ "impact": "MEDIUM",
298
+ "fix": "Implement postGateResultToGitHub() to comment on PRs with eval results, add merge protection rules",
299
+ "files": ["src/eval-gates.ts", ".github/workflows/"],
300
+ "expected_outcome": "Visible quality signals in PRs, automated merge protection, team awareness of regressions",
301
+ "example": "✅ Evals passing (coordinator-session: 85%, swarm-decomposition: 78%)"
302
+ },
303
+ {
304
+ "id": "REC-020",
305
+ "title": "Performance optimization (parallel LLM, indexing)",
306
+ "issue": "LLM evals slow (60-100s for 20 cases), JSONL parsing is linear scan",
307
+ "source": "architecture",
308
+ "effort": "6-8 hours",
309
+ "impact": "MEDIUM",
310
+ "fix": "Parallel LLM calls (10 concurrent = 10x faster), SQLite index on session_id/epic_id, incremental eval runs (only changed cases)",
311
+ "files": ["evals/lib/llm.ts", "evals/lib/data-loader.ts", "evals/lib/compaction-loader.ts"],
312
+ "expected_outcome": "Faster eval runs (60s → 10s), faster data loading, reduced CI time",
313
+ "optimizations": ["Promise.all() for LLM calls", "SQLite FTS index", "git diff → affected evals"]
314
+ },
315
+ {
316
+ "id": "REC-021",
317
+ "title": "Add eval parameterization support",
318
+ "issue": "Can't run same eval with different configs (max_subtasks=4 vs 8). Must copy-paste eval file.",
319
+ "source": "architecture",
320
+ "effort": "4-6 hours",
321
+ "impact": "LOW",
322
+ "fix": "Add evalite.parameterize() wrapper to test multiple config combinations in single eval file",
323
+ "files": ["evals/swarm-decomposition.eval.ts", "evals/coordinator-session.eval.ts"],
324
+ "expected_outcome": "DRY eval definitions, grid search optimal params, side-by-side strategy comparison",
325
+ "example": "params: [{maxSubtasks: 4, strategy: 'file-based'}, {maxSubtasks: 8, strategy: 'feature-based'}]"
326
+ },
327
+ {
328
+ "id": "REC-022",
329
+ "title": "Add observability (dashboards, traces, cost tracking)",
330
+ "issue": "No visibility into eval run performance, LLM costs, or score trends over time",
331
+ "source": "architecture",
332
+ "effort": "8-12 hours",
333
+ "impact": "LOW",
334
+ "fix": "Integrate Grafana + Prometheus for score dashboards, OpenTelemetry for trace collection, track LLM API costs",
335
+ "files": ["src/eval-runner.ts", "src/observability/"],
336
+ "expected_outcome": "Real-time eval health visibility, cost accountability, performance regression detection",
337
+ "metrics": ["eval_run_duration", "llm_calls_total", "llm_cost_usd", "eval_score_gauge"]
338
+ }
339
+ ]
340
+ },
341
+ "current_scores": {
342
+ "compaction-prompt": {
343
+ "score": "53%",
344
+ "status": "DEGRADED",
345
+ "target": "70-80% (after REC-002, REC-003)",
346
+ "blocker": "Case-sensitive regex + missing tools"
347
+ },
348
+ "example": {
349
+ "score": "0%",
350
+ "status": "BROKEN",
351
+ "target": "100% (after REC-001)",
352
+ "blocker": "Data/task structure mismatch"
353
+ },
354
+ "coordinator-session": {
355
+ "score": "66%",
356
+ "status": "FAIR",
357
+ "target": "75-85% (after filter tuning)",
358
+ "note": "Only 3/102 sessions passing strict filter (by design)"
359
+ },
360
+ "coordinator-behavior": {
361
+ "score": "77%",
362
+ "status": "GOOD",
363
+ "target": "80-90%",
364
+ "note": "Stable, no immediate issues"
365
+ },
366
+ "swarm-decomposition": {
367
+ "score": "70%",
368
+ "status": "GOOD",
369
+ "target": "75-85%",
370
+ "note": "LLM variance expected"
371
+ },
372
+ "compaction-resumption": {
373
+ "score": "93%",
374
+ "status": "EXCELLENT",
375
+ "target": "90-95%",
376
+ "note": "Well-calibrated"
377
+ }
378
+ },
379
+ "implementation_roadmap": {
380
+ "sprint_1_quick_wins": {
381
+ "duration": "1-2 days",
382
+ "goal": "Fix broken evals, remove dead code",
383
+ "tasks": ["REC-001", "REC-002", "REC-003", "REC-004"],
384
+ "expected_outcome": "example.eval.ts: 0% → 100%, compaction-prompt: 53% → 70-80%, remove 250 LOC dead code"
385
+ },
386
+ "sprint_2_foundation": {
387
+ "duration": "1-2 weeks",
388
+ "goal": "Improve data quality, add versioning",
389
+ "tasks": ["REC-005", "REC-006", "REC-007", "REC-008", "REC-009", "REC-010"],
390
+ "expected_outcome": "Session pass rate: 2.9% → 27.5%, scorer versioning enabled, cleaner abstractions"
391
+ },
392
+ "sprint_3_robustness": {
393
+ "duration": "2-3 weeks",
394
+ "goal": "Add reliability and observability",
395
+ "tasks": ["REC-011", "REC-012", "REC-013", "REC-014", "REC-015", "REC-016"],
396
+ "expected_outcome": "Budget controls, better baselines, documented thresholds, LLM resilience"
397
+ },
398
+ "sprint_4_intelligence": {
399
+ "duration": "3-4 weeks",
400
+ "goal": "Close learning loop, add CI integration",
401
+ "tasks": ["REC-017", "REC-018", "REC-019"],
402
+ "expected_outcome": "Self-improving evals via memory retrieval, automated PR feedback, root cause analysis"
403
+ },
404
+ "sprint_5_scale": {
405
+ "duration": "4-6 weeks",
406
+ "goal": "Optimize performance, add advanced features",
407
+ "tasks": ["REC-020", "REC-021", "REC-022"],
408
+ "expected_outcome": "10x faster eval runs, parameterized evals, production observability"
409
+ }
410
+ },
411
+ "risk_assessment": {
412
+ "high_risk": [
413
+ {
414
+ "area": "Scorer versioning (REC-008)",
415
+ "risk": "Schema changes could break existing eval history",
416
+ "mitigation": "Add migration script, test with copy of production data first"
417
+ },
418
+ {
419
+ "area": "Data source refactor (REC-007)",
420
+ "risk": "Large refactor could introduce regressions",
421
+ "mitigation": "TDD approach, extract interface first, migrate one source at a time"
422
+ }
423
+ ],
424
+ "medium_risk": [
425
+ {
426
+ "area": "Filter defaults change (REC-005)",
427
+ "risk": "Changing defaults could break existing eval expectations",
428
+ "mitigation": "Add preset system ('strict', 'moderate', 'lenient'), document migration"
429
+ },
430
+ {
431
+ "area": "LLM judge budget (REC-011)",
432
+ "risk": "Budget enforcement could cause eval failures if too strict",
433
+ "mitigation": "Start with generous budget, tune down based on real usage"
434
+ }
435
+ ],
436
+ "low_risk": [
437
+ {
438
+ "area": "Documentation improvements (REC-010, REC-015)",
439
+ "risk": "Minimal risk, pure documentation",
440
+ "mitigation": "None needed"
441
+ },
442
+ {
443
+ "area": "Quick fixes (REC-001, REC-002, REC-003)",
444
+ "risk": "Small code changes, easy to revert",
445
+ "mitigation": "Test with existing fixtures first"
446
+ }
447
+ ]
448
+ },
449
+ "success_metrics": {
450
+ "immediate": {
451
+ "example_eval_score": "0% → 100%",
452
+ "compaction_prompt_score": "53% → 70-80%",
453
+ "dead_code_removed": "250 LOC",
454
+ "test_coverage": "Maintained at current levels"
455
+ },
456
+ "short_term": {
457
+ "session_pass_rate": "2.9% → 27.5%",
458
+ "scorer_versioning": "Implemented and tracked",
459
+ "abstraction_quality": "3 source adapters (PGlite, JSONL, Fixture)",
460
+ "documentation_completeness": "All composite scorers have weight rationale"
461
+ },
462
+ "long_term": {
463
+ "eval_run_time": "60s → 10s (6x improvement)",
464
+ "llm_cost_predictability": "Budget enforcement at $1.00/run",
465
+ "ci_integration": "GitHub PR comments with eval results",
466
+ "learning_loop_closed": "Past failures injected into prompts"
467
+ }
468
+ },
469
+ "quotes_from_literature": {
470
+ "on_abstraction": "\"Deep modules provide powerful functionality yet have simple interfaces.\" - John Ousterhout, A Philosophy of Software Design (applies to REC-007 EvalSource interface)",
471
+ "on_testing": "\"Tests are the documentation that never lies.\" - Kent Beck (applies to REC-016 characterization tests)",
472
+ "on_refactoring": "\"Make the change easy, then make the easy change.\" - Kent Beck (applies to REC-007 data source refactor)",
473
+ "on_measurement": "\"You can't manage what you don't measure.\" - Peter Drucker (applies to REC-022 observability)"
474
+ },
475
+ "conclusion": {
476
+ "overall_health": "GOOD",
477
+ "confidence": "HIGH",
478
+ "summary": "The eval infrastructure is architecturally sound with clean pipeline design and progressive quality gates. Current 'failures' are tactical code bugs (case sensitivity, data/task mismatch) that are easily fixable. The 2.9% session pass rate is by design - filtering for gold-standard complete coordinator cycles. The 3 passing sessions are excellent examples with 20-24 worker spawns and 0 violations.",
479
+ "priority_order": "Fix eval bugs first (REC-001, REC-002, REC-003), then remove dead code (REC-004), then improve abstractions (REC-005 through REC-010). Long-term items (learning loop, CI integration, performance) can be tackled incrementally.",
480
+ "estimated_total_effort": "80-120 hours across 5 sprints",
481
+ "next_immediate_action": "Start with Sprint 1 quick wins (1-2 days) to unblock eval scoring and remove dead code."
25
482
  }
26
- }
483
+ }