nodebench-mcp 2.11.0 → 2.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/NODEBENCH_AGENTS.md +809 -809
  2. package/README.md +443 -431
  3. package/STYLE_GUIDE.md +477 -477
  4. package/dist/__tests__/gaiaCapabilityMediaEval.test.js +153 -5
  5. package/dist/__tests__/gaiaCapabilityMediaEval.test.js.map +1 -1
  6. package/dist/__tests__/helpers/textLlm.d.ts +1 -1
  7. package/dist/__tests__/presetRealWorldBench.test.d.ts +1 -0
  8. package/dist/__tests__/presetRealWorldBench.test.js +839 -0
  9. package/dist/__tests__/presetRealWorldBench.test.js.map +1 -0
  10. package/dist/__tests__/tools.test.js +8 -5
  11. package/dist/__tests__/tools.test.js.map +1 -1
  12. package/dist/__tests__/toolsetGatingEval.test.js +11 -11
  13. package/dist/__tests__/toolsetGatingEval.test.js.map +1 -1
  14. package/dist/index.js +397 -327
  15. package/dist/index.js.map +1 -1
  16. package/dist/tools/agentBootstrapTools.js +258 -258
  17. package/dist/tools/boilerplateTools.js +144 -144
  18. package/dist/tools/cCompilerBenchmarkTools.js +33 -33
  19. package/dist/tools/documentationTools.js +59 -59
  20. package/dist/tools/flywheelTools.js +6 -6
  21. package/dist/tools/learningTools.js +26 -26
  22. package/dist/tools/localFileTools.d.ts +3 -0
  23. package/dist/tools/localFileTools.js +3164 -125
  24. package/dist/tools/localFileTools.js.map +1 -1
  25. package/dist/tools/reconTools.js +31 -31
  26. package/dist/tools/selfEvalTools.js +44 -44
  27. package/dist/tools/sessionMemoryTools.d.ts +15 -0
  28. package/dist/tools/sessionMemoryTools.js +348 -0
  29. package/dist/tools/sessionMemoryTools.js.map +1 -0
  30. package/dist/tools/toolRegistry.d.ts +4 -0
  31. package/dist/tools/toolRegistry.js +229 -0
  32. package/dist/tools/toolRegistry.js.map +1 -1
  33. package/dist/tools/verificationTools.js +41 -41
  34. package/dist/tools/visionTools.js +17 -17
  35. package/dist/tools/webTools.js +18 -18
  36. package/package.json +101 -101
@@ -0,0 +1,839 @@
1
+ /**
2
+ * Preset Real-World Benchmark — Impact-Driven Evaluation
3
+ *
4
+ * Inspired by 8 open-source Claude Code ecosystem repos:
5
+ * - obra/superpowers: Mandatory skill-check gate, 4-phase debugging
6
+ * - wshobson/agents: Conductor (Context → Spec → Plan → Implement), agent-teams
7
+ * - ruvnet/claude-flow: Queen-led swarm, 5-layer memory, 3-tier model routing
8
+ * - Yeachan-Heo/oh-my-claudecode: Compaction-resilient notepad, learner skills
9
+ * - thedotmack/claude-mem: Session observations, token economics, context config
10
+ * - anthropic/planning-with-files: Manus-style markdown planning with checkpoints
11
+ * - K-Dense-AI/claude-scientific-skills: 140 domain skills, category-based discovery
12
+ * - zebbern/claude-code-guide: Best practices, workflow patterns, agent setup
13
+ *
14
+ * Fills gaps identified in existing eval suite:
15
+ * GAP 1: Cross-domain workflows (domain silos → end-to-end)
16
+ * GAP 2: Error recovery & failure paths
17
+ * GAP 3: Preset transitions (meta → lite → core escalation)
18
+ * GAP 4: Knowledge lifecycle (record → search → synthesize → reuse)
19
+ * GAP 5: Research writing workflows (0% coverage → full pipeline)
20
+ * GAP 6: Bootstrap cold-start (agent onboarding end-to-end)
21
+ * GAP 7: Multi-agent coordination at scale
22
+ * GAP 8: Progressive discovery search quality
23
+ *
24
+ * Architecture:
25
+ * 8 real-world scenarios × 4 presets (meta, lite, core, full) = 32 trajectories
26
+ * Each preset runs as a parallel "subagent" within each scenario
27
+ * Measures: tool calls, phases, knowledge reuse, token overhead, gaps found
28
+ *
29
+ * Run: npx vitest run src/__tests__/presetRealWorldBench.test.ts
30
+ */
31
+ import { describe, it, expect, afterAll } from "vitest";
32
+ import { verificationTools } from "../tools/verificationTools.js";
33
+ import { reconTools } from "../tools/reconTools.js";
34
+ import { evalTools } from "../tools/evalTools.js";
35
+ import { qualityGateTools } from "../tools/qualityGateTools.js";
36
+ import { flywheelTools } from "../tools/flywheelTools.js";
37
+ import { learningTools } from "../tools/learningTools.js";
38
+ import { agentBootstrapTools } from "../tools/agentBootstrapTools.js";
39
+ import { selfEvalTools } from "../tools/selfEvalTools.js";
40
+ import { parallelAgentTools } from "../tools/parallelAgentTools.js";
41
+ import { uiCaptureTools } from "../tools/uiCaptureTools.js";
42
+ import { visionTools } from "../tools/visionTools.js";
43
+ import { webTools } from "../tools/webTools.js";
44
+ import { githubTools } from "../tools/githubTools.js";
45
+ import { documentationTools } from "../tools/documentationTools.js";
46
+ import { localFileTools, gaiaMediaSolvers } from "../tools/localFileTools.js";
47
+ import { llmTools } from "../tools/llmTools.js";
48
+ import { securityTools } from "../tools/securityTools.js";
49
+ import { platformTools } from "../tools/platformTools.js";
50
+ import { researchWritingTools } from "../tools/researchWritingTools.js";
51
+ import { flickerDetectionTools } from "../tools/flickerDetectionTools.js";
52
+ import { figmaFlowTools } from "../tools/figmaFlowTools.js";
53
+ import { boilerplateTools } from "../tools/boilerplateTools.js";
54
+ import { cCompilerBenchmarkTools } from "../tools/cCompilerBenchmarkTools.js";
55
+ import { sessionMemoryTools } from "../tools/sessionMemoryTools.js";
56
+ import { createMetaTools } from "../tools/metaTools.js";
57
+ import { createProgressiveDiscoveryTools } from "../tools/progressiveDiscoveryTools.js";
58
+ // ═══════════════════════════════════════════════════════════════════════════
59
+ // PRESET & TOOLSET DEFINITIONS (mirrors index.ts exactly)
60
+ // ═══════════════════════════════════════════════════════════════════════════
61
+ const TOOLSET_MAP = {
62
+ verification: verificationTools,
63
+ eval: evalTools,
64
+ quality_gate: qualityGateTools,
65
+ learning: learningTools,
66
+ flywheel: flywheelTools,
67
+ recon: reconTools,
68
+ ui_capture: uiCaptureTools,
69
+ vision: visionTools,
70
+ local_file: localFileTools,
71
+ web: webTools,
72
+ github: githubTools,
73
+ docs: documentationTools,
74
+ bootstrap: agentBootstrapTools,
75
+ self_eval: selfEvalTools,
76
+ parallel: parallelAgentTools,
77
+ llm: llmTools,
78
+ security: securityTools,
79
+ platform: platformTools,
80
+ research_writing: researchWritingTools,
81
+ flicker_detection: flickerDetectionTools,
82
+ figma_flow: figmaFlowTools,
83
+ boilerplate: boilerplateTools,
84
+ benchmark: cCompilerBenchmarkTools,
85
+ session_memory: sessionMemoryTools,
86
+ gaia_solvers: gaiaMediaSolvers,
87
+ };
88
+ const PRESETS = {
89
+ meta: [],
90
+ lite: ["verification", "eval", "quality_gate", "learning", "flywheel", "recon", "security", "boilerplate"],
91
+ core: [
92
+ "verification", "eval", "quality_gate", "learning", "flywheel", "recon",
93
+ "bootstrap", "self_eval", "llm", "security", "platform", "research_writing",
94
+ "flicker_detection", "figma_flow", "boilerplate", "benchmark", "session_memory",
95
+ ],
96
+ full: Object.keys(TOOLSET_MAP),
97
+ };
98
+ function buildToolset(preset) {
99
+ const keys = PRESETS[preset];
100
+ const domain = keys.flatMap((k) => TOOLSET_MAP[k] ?? []);
101
+ const metaTools = createMetaTools(domain);
102
+ const allForDiscovery = [...domain, ...metaTools];
103
+ const discoveryTools = createProgressiveDiscoveryTools(allForDiscovery);
104
+ return [...domain, ...metaTools, ...discoveryTools];
105
+ }
106
+ // ═══════════════════════════════════════════════════════════════════════════
107
+ // HELPER: Execute a scenario against a preset
108
+ // ═══════════════════════════════════════════════════════════════════════════
109
+ async function executeScenario(scenario, preset) {
110
+ const tools = buildToolset(preset);
111
+ const toolMap = new Map(tools.map((t) => [t.name, t]));
112
+ const startTime = Date.now();
113
+ const phases = [];
114
+ let totalCalls = 0;
115
+ let totalMissing = 0;
116
+ let totalErrors = 0;
117
+ let knowledgeRecorded = false;
118
+ let knowledgeReused = false;
119
+ let discoveryUsed = false;
120
+ for (const phaseSpec of scenario.phases) {
121
+ const phaseStart = Date.now();
122
+ const called = [];
123
+ const missing = [];
124
+ const failed = [];
125
+ for (const attempt of phaseSpec.tools) {
126
+ const tool = toolMap.get(attempt.name);
127
+ if (!tool) {
128
+ missing.push(attempt.name);
129
+ totalMissing++;
130
+ continue;
131
+ }
132
+ try {
133
+ await tool.handler(attempt.args);
134
+ called.push(attempt.name);
135
+ totalCalls++;
136
+ if (attempt.name === "record_learning")
137
+ knowledgeRecorded = true;
138
+ if (attempt.name === "search_all_knowledge")
139
+ knowledgeReused = true;
140
+ if (attempt.name === "discover_tools" || attempt.name === "get_workflow_chain")
141
+ discoveryUsed = true;
142
+ }
143
+ catch {
144
+ failed.push(attempt.name);
145
+ totalErrors++;
146
+ }
147
+ }
148
+ phases.push({
149
+ phase: phaseSpec.name,
150
+ toolsCalled: called,
151
+ toolsMissing: missing,
152
+ toolsFailed: failed,
153
+ success: missing.length === 0 && (called.length > 0 || failed.length > 0),
154
+ durationMs: Date.now() - phaseStart,
155
+ });
156
+ }
157
+ return {
158
+ preset,
159
+ scenarioId: scenario.id,
160
+ scenarioName: scenario.name,
161
+ inspiredBy: scenario.inspiredBy,
162
+ gapFilled: scenario.gapFilled,
163
+ toolCount: tools.length,
164
+ estimatedSchemaTokens: tools.length * 200,
165
+ phases,
166
+ phasesCompleted: phases.filter((p) => p.success).length,
167
+ phasesSkipped: phases.filter((p) => !p.success).length,
168
+ totalToolCalls: totalCalls,
169
+ totalToolMissing: totalMissing,
170
+ totalToolErrors: totalErrors,
171
+ knowledgeRecorded,
172
+ knowledgeReused,
173
+ discoveryUsed,
174
+ durationMs: Date.now() - startTime,
175
+ };
176
+ }
177
+ // ═══════════════════════════════════════════════════════════════════════════
178
+ // 8 REAL-WORLD SCENARIOS — Inspired by open-source ecosystem repos
179
+ // ═══════════════════════════════════════════════════════════════════════════
180
+ const SCENARIOS = [
181
+ // ─── Scenario 1: Cold Start Self-Setup ───────────────────────────────
182
+ // Inspired by: superpowers (mandatory skill-check), oh-my-claudecode (zero-learning-curve)
183
+ // Gap filled: Bootstrap cold-start (GAP 6)
184
+ {
185
+ id: "cold-start-self-setup",
186
+ name: "Cold Start: Agent Onboarding via Discovery",
187
+ inspiredBy: "obra/superpowers + Yeachan-Heo/oh-my-claudecode",
188
+ gapFilled: "GAP 6: Bootstrap cold-start",
189
+ prompt: "You are a new agent. Discover available tools, find the right methodology, and set up your working environment.",
190
+ category: "cold_start",
191
+ phases: [
192
+ {
193
+ name: "discovery",
194
+ tools: [
195
+ { name: "discover_tools", args: { query: "getting started setup bootstrap" }, domain: "progressive_discovery" },
196
+ { name: "get_workflow_chain", args: { workflow: "self_setup" }, domain: "progressive_discovery" },
197
+ { name: "findTools", args: { query: "verify" }, domain: "meta" },
198
+ ],
199
+ },
200
+ {
201
+ name: "methodology",
202
+ tools: [
203
+ { name: "getMethodology", args: { topic: "verification" }, domain: "meta" },
204
+ { name: "getMethodology", args: { topic: "agent_contract" }, domain: "meta" },
205
+ ],
206
+ },
207
+ {
208
+ name: "bootstrap",
209
+ optionalForMeta: true,
210
+ optionalForLite: true,
211
+ tools: [
212
+ { name: "discover_infrastructure", args: { targetDir: "." }, domain: "bootstrap" },
213
+ { name: "triple_verify", args: { component: "database", claims: ["SQLite exists"] }, domain: "bootstrap" },
214
+ { name: "generate_self_instructions", args: { targetDir: ".", existingCapabilities: ["verification", "eval"] }, domain: "bootstrap" },
215
+ ],
216
+ },
217
+ {
218
+ name: "knowledge_seed",
219
+ optionalForMeta: true,
220
+ tools: [
221
+ { name: "search_all_knowledge", args: { query: "setup patterns" }, domain: "learning" },
222
+ { name: "record_learning", args: { key: "bench-cold-start", content: "Agent bootstrap completed via discovery-first pattern", category: "pattern" }, domain: "learning" },
223
+ ],
224
+ },
225
+ ],
226
+ },
227
+ // ─── Scenario 2: 4-Phase Bug Fix Pipeline ───────────────────────────
228
+ // Inspired by: superpowers (4-phase root cause analysis)
229
+ // Gap filled: Cross-domain workflows (GAP 1)
230
+ {
231
+ id: "four-phase-bug-fix",
232
+ name: "4-Phase Bug Fix: Root Cause → Verify → Eval → Learn",
233
+ inspiredBy: "obra/superpowers (systematic debugging)",
234
+ gapFilled: "GAP 1: Cross-domain workflows",
235
+ prompt: "Fix a production bug: the daily cron job silently fails when the API returns 429 rate-limit responses.",
236
+ category: "bug_fix",
237
+ phases: [
238
+ {
239
+ name: "investigate",
240
+ tools: [
241
+ { name: "search_all_knowledge", args: { query: "rate limit 429 cron failure" }, domain: "learning" },
242
+ { name: "run_recon", args: { target: "cron-rate-limit-bug", scope: "code", maxFindings: 5 }, domain: "recon" },
243
+ { name: "log_recon_finding", args: { sessionId: "bench-recon", category: "bug", summary: "429 not retried: Cron ignores HTTP 429 responses from upstream API" }, domain: "recon" },
244
+ ],
245
+ },
246
+ {
247
+ name: "verify",
248
+ tools: [
249
+ { name: "start_verification_cycle", args: { title: "Fix cron 429 handling" }, domain: "verification" },
250
+ { name: "log_phase_findings", args: { cycleId: "bench-cycle", phase: 1, summary: "Root cause: missing retry logic for 429", passed: true }, domain: "verification" },
251
+ { name: "log_gap", args: { cycleId: "bench-cycle", description: "No exponential backoff on 429", severity: "critical", phase: 2 }, domain: "verification" },
252
+ { name: "resolve_gap", args: { gapId: "bench-gap", resolution: "Added exponential backoff with jitter" }, domain: "verification" },
253
+ { name: "log_test_result", args: { cycleId: "bench-cycle", label: "unit-retry-429", layer: "unit", passed: true }, domain: "verification" },
254
+ ],
255
+ },
256
+ {
257
+ name: "eval",
258
+ tools: [
259
+ { name: "start_eval_run", args: { name: "cron-429-fix-eval" }, domain: "eval" },
260
+ { name: "record_eval_result", args: { runId: "bench-eval", case: "retry-backoff", passed: true, notes: "429 now triggers 3 retries with exponential backoff" }, domain: "eval" },
261
+ { name: "complete_eval_run", args: { runId: "bench-eval" }, domain: "eval" },
262
+ ],
263
+ },
264
+ {
265
+ name: "quality_gate",
266
+ tools: [
267
+ { name: "run_quality_gate", args: { targetId: "cron-fix", rules: [{ name: "test-coverage", threshold: 80 }] }, domain: "quality_gate" },
268
+ { name: "run_closed_loop", args: { targetId: "cron-fix", command: "npm test", expectedPattern: "PASS" }, domain: "quality_gate" },
269
+ ],
270
+ },
271
+ {
272
+ name: "learn",
273
+ tools: [
274
+ { name: "record_learning", args: { key: "bench-429-retry", content: "Always add exponential backoff for HTTP 429 in cron jobs", category: "gotcha", tags: ["http", "retry", "cron"] }, domain: "learning" },
275
+ ],
276
+ },
277
+ ],
278
+ },
279
+ // ─── Scenario 3: Feature Dev (Context → Plan → Implement → Ship) ──────
280
+ // Inspired by: wshobson/agents Conductor pattern
281
+ // Gap filled: Cross-domain end-to-end (GAP 1)
282
+ {
283
+ id: "conductor-feature-dev",
284
+ name: "Conductor-Style Feature: Context → Spec → Implement → Ship",
285
+ inspiredBy: "wshobson/agents (Conductor plugin)",
286
+ gapFilled: "GAP 1: Cross-domain workflows",
287
+ prompt: "Implement a new dark mode toggle feature following the Conductor workflow: gather context, spec, plan, implement, verify, ship.",
288
+ category: "feature_dev",
289
+ phases: [
290
+ {
291
+ name: "context_gathering",
292
+ tools: [
293
+ { name: "search_all_knowledge", args: { query: "dark mode theme UI toggle" }, domain: "learning" },
294
+ { name: "run_recon", args: { target: "dark-mode-feature", scope: "architecture", maxFindings: 5 }, domain: "recon" },
295
+ { name: "get_recon_summary", args: { sessionId: "bench-recon" }, domain: "recon" },
296
+ ],
297
+ },
298
+ {
299
+ name: "specification",
300
+ tools: [
301
+ { name: "start_verification_cycle", args: { title: "Dark Mode Toggle Implementation" }, domain: "verification" },
302
+ { name: "log_phase_findings", args: { cycleId: "bench-cycle", phase: 1, summary: "Architecture review: component tree supports theme prop injection", passed: true }, domain: "verification" },
303
+ ],
304
+ },
305
+ {
306
+ name: "implement_and_test",
307
+ tools: [
308
+ { name: "log_phase_findings", args: { cycleId: "bench-cycle", phase: 2, summary: "Implementation: ThemeProvider + useTheme hook + toggle component", passed: true }, domain: "verification" },
309
+ { name: "log_test_result", args: { cycleId: "bench-cycle", label: "dark-mode-unit", layer: "unit", passed: true }, domain: "verification" },
310
+ { name: "log_test_result", args: { cycleId: "bench-cycle", label: "dark-mode-integration", layer: "integration", passed: true }, domain: "verification" },
311
+ { name: "run_closed_loop", args: { targetId: "dark-mode", command: "npm test -- --grep theme", expectedPattern: "PASS" }, domain: "quality_gate" },
312
+ ],
313
+ },
314
+ {
315
+ name: "flywheel",
316
+ optionalForLite: true,
317
+ optionalForMeta: true,
318
+ tools: [
319
+ { name: "get_flywheel_status", args: {}, domain: "flywheel" },
320
+ { name: "trigger_investigation", args: { evalRunId: "bench-eval", regressionDescription: "dark-mode-accessibility regression detected" }, domain: "flywheel" },
321
+ ],
322
+ },
323
+ {
324
+ name: "ship",
325
+ tools: [
326
+ { name: "run_quality_gate", args: { targetId: "dark-mode", rules: [{ name: "all-tests-pass", threshold: 100 }] }, domain: "quality_gate" },
327
+ { name: "record_learning", args: { key: "bench-dark-mode-pattern", content: "ThemeProvider + useTheme hook pattern works well for dark mode", category: "pattern", tags: ["ui", "theme", "dark-mode"] }, domain: "learning" },
328
+ ],
329
+ },
330
+ ],
331
+ },
332
+ // ─── Scenario 4: Multi-Agent Coordination ────────────────────────────
333
+ // Inspired by: ruvnet/claude-flow (queen-led swarm), wshobson/agents (agent-teams)
334
+ // Gap filled: Parallel agents at scale (GAP 7)
335
+ {
336
+ id: "multi-agent-swarm",
337
+ name: "Multi-Agent Swarm: Coordinator + 3 Parallel Workers",
338
+ inspiredBy: "ruvnet/claude-flow + wshobson/agents (agent-teams)",
339
+ gapFilled: "GAP 7: Multi-agent coordination at scale",
340
+ prompt: "Coordinate 3 parallel agents: backend-api, frontend-ui, and testing-agent working on a full-stack feature.",
341
+ category: "multi_agent",
342
+ phases: [
343
+ {
344
+ name: "coordinator_setup",
345
+ optionalForMeta: true,
346
+ optionalForLite: true,
347
+ tools: [
348
+ { name: "bootstrap_parallel_agents", args: {}, domain: "parallel" },
349
+ { name: "assign_agent_role", args: { role: "backend" }, domain: "parallel" },
350
+ { name: "assign_agent_role", args: { role: "frontend" }, domain: "parallel" },
351
+ { name: "assign_agent_role", args: { role: "testing" }, domain: "parallel" },
352
+ ],
353
+ },
354
+ {
355
+ name: "task_assignment",
356
+ optionalForMeta: true,
357
+ optionalForLite: true,
358
+ tools: [
359
+ { name: "claim_agent_task", args: { taskKey: "backend-api-endpoints" }, domain: "parallel" },
360
+ { name: "claim_agent_task", args: { taskKey: "frontend-dark-mode" }, domain: "parallel" },
361
+ { name: "claim_agent_task", args: { taskKey: "e2e-tests" }, domain: "parallel" },
362
+ ],
363
+ },
364
+ {
365
+ name: "context_budget",
366
+ optionalForMeta: true,
367
+ optionalForLite: true,
368
+ tools: [
369
+ { name: "log_context_budget", args: { eventType: "checkpoint", tokensUsed: 15000 }, domain: "parallel" },
370
+ { name: "log_context_budget", args: { eventType: "checkpoint", tokensUsed: 12000 }, domain: "parallel" },
371
+ { name: "log_context_budget", args: { eventType: "checkpoint", tokensUsed: 8000 }, domain: "parallel" },
372
+ ],
373
+ },
374
+ {
375
+ name: "oracle_comparison",
376
+ optionalForMeta: true,
377
+ optionalForLite: true,
378
+ tools: [
379
+ { name: "run_oracle_comparison", args: { testLabel: "fullstack-integration", actualOutput: "API endpoints created + UI renders", expectedOutput: "API endpoints created + UI renders", oracleSource: "manual_review" }, domain: "parallel" },
380
+ ],
381
+ },
382
+ {
383
+ name: "knowledge_banking",
384
+ optionalForMeta: true,
385
+ tools: [
386
+ { name: "record_learning", args: { key: "bench-parallel-fullstack", content: "3-agent fullstack pattern: backend+frontend+testing agents with coordinator reduces merge conflicts", category: "pattern", tags: ["parallel", "fullstack", "coordination"] }, domain: "learning" },
387
+ ],
388
+ },
389
+ ],
390
+ },
391
+ // ─── Scenario 5: Research & Academic Writing Pipeline ─────────────────
392
+ // Inspired by: K-Dense-AI/claude-scientific-skills, planning-with-files
393
+ // Gap filled: Research writing 0% coverage (GAP 5)
394
+ {
395
+ id: "research-writing-pipeline",
396
+ name: "Research Paper: Outline → Draft → Polish → Review",
397
+ inspiredBy: "K-Dense-AI/claude-scientific-skills + planning-with-files",
398
+ gapFilled: "GAP 5: Research writing workflows",
399
+ prompt: "Write a research paper on 'Multi-Agent Coordination in AI-Assisted Development' with proper citations and peer review simulation.",
400
+ category: "research",
401
+ phases: [
402
+ {
403
+ name: "literature_review",
404
+ tools: [
405
+ { name: "search_all_knowledge", args: { query: "multi-agent coordination research" }, domain: "learning" },
406
+ { name: "run_recon", args: { target: "multi-agent-research", scope: "literature", maxFindings: 10 }, domain: "recon" },
407
+ ],
408
+ },
409
+ {
410
+ name: "outline_and_draft",
411
+ optionalForMeta: true,
412
+ optionalForLite: true,
413
+ tools: [
414
+ { name: "check_paper_logic", args: { text: "Multi-agent coordination enables parallel task execution. Our approach uses a coordinator pattern to assign roles and manage context budgets across agents." }, domain: "research_writing" },
415
+ { name: "generate_academic_caption", args: { description: "System architecture showing coordinator agent distributing tasks to 3 worker agents", figureType: "diagram" }, domain: "research_writing" },
416
+ ],
417
+ },
418
+ {
419
+ name: "polish_and_review",
420
+ optionalForMeta: true,
421
+ optionalForLite: true,
422
+ tools: [
423
+ { name: "polish_academic_text", args: { text: "Multi-agent systems enable parallel task execution. This improves throughput and reduces context window pressure." }, domain: "research_writing" },
424
+ { name: "review_paper_as_reviewer", args: { text: "We propose a coordinator pattern for multi-agent AI development. Our approach distributes tasks to specialized agents.", venue: "ICSE" }, domain: "research_writing" },
425
+ ],
426
+ },
427
+ {
428
+ name: "record_findings",
429
+ optionalForMeta: true,
430
+ tools: [
431
+ { name: "record_learning", args: { key: "bench-research-pattern", content: "4-phase research pipeline: literature review → outline → draft → polish works well for academic papers", category: "pattern", tags: ["research", "writing", "academic"] }, domain: "learning" },
432
+ ],
433
+ },
434
+ ],
435
+ },
436
+ // ─── Scenario 6: Cross-Domain Investigation ──────────────────────────
437
+ // Inspired by: claude-mem (multi-source observations), oh-my-claudecode (5 modes)
438
+ // Gap filled: Cross-domain silos (GAP 1)
439
+ {
440
+ id: "cross-domain-investigation",
441
+ name: "Cross-Domain: Recon → Local Files → Vision → Quality Gate",
442
+ inspiredBy: "thedotmack/claude-mem + Yeachan-Heo/oh-my-claudecode",
443
+ gapFilled: "GAP 1: Cross-domain workflows (break silos)",
444
+ prompt: "Investigate a UI rendering issue: parse local config files, analyze screenshot, search codebase, and verify the fix.",
445
+ category: "cross_domain",
446
+ phases: [
447
+ {
448
+ name: "recon",
449
+ tools: [
450
+ { name: "run_recon", args: { target: "ui-rendering-bug", scope: "code", maxFindings: 5 }, domain: "recon" },
451
+ { name: "search_all_knowledge", args: { query: "UI rendering CSS layout issue" }, domain: "learning" },
452
+ ],
453
+ },
454
+ {
455
+ name: "local_file_analysis",
456
+ tools: [
457
+ { name: "read_json_file", args: { filePath: "test_config.json" }, domain: "local_file" },
458
+ { name: "extract_structured_data", args: { text: "Error: flex container overflow at line 42 in MainLayout.tsx. Component tree depth: 8. Render time: 340ms.", fields: ["error_type", "file", "line", "render_time"] }, domain: "local_file" },
459
+ ],
460
+ },
461
+ {
462
+ name: "verification",
463
+ tools: [
464
+ { name: "start_verification_cycle", args: { title: "UI rendering fix" }, domain: "verification" },
465
+ { name: "log_gap", args: { cycleId: "bench-cycle", description: "CSS flex overflow not handled", severity: "high", phase: 2 }, domain: "verification" },
466
+ { name: "resolve_gap", args: { gapId: "bench-gap", resolution: "Added overflow-x: hidden to container" }, domain: "verification" },
467
+ ],
468
+ },
469
+ {
470
+ name: "quality_gate",
471
+ tools: [
472
+ { name: "run_quality_gate", args: { targetId: "ui-fix", rules: [{ name: "visual-regression", threshold: 95 }] }, domain: "quality_gate" },
473
+ ],
474
+ },
475
+ {
476
+ name: "learn",
477
+ tools: [
478
+ { name: "record_learning", args: { key: "bench-flex-overflow", content: "Flex container overflow: always set overflow-x on deeply nested component trees", category: "gotcha", tags: ["css", "flex", "overflow", "ui"] }, domain: "learning" },
479
+ ],
480
+ },
481
+ ],
482
+ },
483
+ // ─── Scenario 7: Error Recovery & Resilience ──────────────────────────
484
+ // Inspired by: claude-flow (Byzantine fault tolerance), oh-my-claudecode (compaction-resilient)
485
+ // Gap filled: Error recovery (GAP 2)
486
+ {
487
+ id: "error-recovery-resilience",
488
+ name: "Error Recovery: Failure at Each Phase → Graceful Degradation",
489
+ inspiredBy: "ruvnet/claude-flow (fault tolerance) + oh-my-claudecode (resilience)",
490
+ gapFilled: "GAP 2: Error recovery & failure paths",
491
+ prompt: "Handle a scenario where tools fail mid-workflow: recon times out, eval has stale data, and gate rules are violated.",
492
+ category: "error_recovery",
493
+ phases: [
494
+ {
495
+ name: "safe_recon",
496
+ tools: [
497
+ { name: "run_recon", args: { target: "resilience-test", scope: "code", maxFindings: 3 }, domain: "recon" },
498
+ ],
499
+ },
500
+ {
501
+ name: "verification_with_errors",
502
+ tools: [
503
+ { name: "start_verification_cycle", args: { title: "Resilience test cycle" }, domain: "verification" },
504
+ { name: "log_phase_findings", args: { cycleId: "bench-cycle", phase: 1, summary: "Phase 1 passed under degraded conditions", passed: true }, domain: "verification" },
505
+ // Intentionally log a gap that stays open (simulates partial recovery)
506
+ { name: "log_gap", args: { cycleId: "bench-cycle", description: "Stale cache detected but non-critical", severity: "medium", phase: 2 }, domain: "verification" },
507
+ ],
508
+ },
509
+ {
510
+ name: "eval_despite_gaps",
511
+ tools: [
512
+ { name: "start_eval_run", args: { name: "resilience-eval" }, domain: "eval" },
513
+ { name: "record_eval_result", args: { runId: "bench-eval", case: "graceful-degradation", passed: true, notes: "System operates correctly despite stale cache" }, domain: "eval" },
514
+ { name: "complete_eval_run", args: { runId: "bench-eval" }, domain: "eval" },
515
+ ],
516
+ },
517
+ {
518
+ name: "gate_with_violations",
519
+ tools: [
520
+ // Gate with a very high threshold that will "fail" (simulates gate violation)
521
+ { name: "run_quality_gate", args: { targetId: "resilience-check", rules: [{ name: "zero-open-gaps", threshold: 100 }] }, domain: "quality_gate" },
522
+ ],
523
+ },
524
+ {
525
+ name: "learn_from_failure",
526
+ tools: [
527
+ { name: "record_learning", args: { key: "bench-resilience-pattern", content: "Graceful degradation: continue eval even with open medium-severity gaps. Only block on critical.", category: "pattern", tags: ["resilience", "error-recovery", "degradation"] }, domain: "learning" },
528
+ ],
529
+ },
530
+ ],
531
+ },
532
+ // ─── Scenario 8: Knowledge Lifecycle ──────────────────────────────────
533
+ // Inspired by: thedotmack/claude-mem (session compression + token economics)
534
+ // Gap filled: Knowledge lifecycle (GAP 4)
535
+ {
536
+ id: "knowledge-lifecycle",
537
+ name: "Knowledge Lifecycle: Record → Search → Synthesize → Reuse",
538
+ inspiredBy: "thedotmack/claude-mem (context compression + observations)",
539
+ gapFilled: "GAP 4: Knowledge lifecycle (record → reuse)",
540
+ prompt: "Exercise the full knowledge lifecycle: record learnings from past work, search for relevant knowledge, synthesize findings, and verify reuse improves outcomes.",
541
+ category: "knowledge_lifecycle",
542
+ phases: [
543
+ {
544
+ name: "seed_knowledge",
545
+ tools: [
546
+ { name: "record_learning", args: { key: "bench-kl-pattern-1", content: "Always check for null pointers before accessing nested properties", category: "gotcha", tags: ["null", "safety", "typescript"] }, domain: "learning" },
547
+ { name: "record_learning", args: { key: "bench-kl-pattern-2", content: "Use zod schemas for API input validation at system boundaries", category: "pattern", tags: ["validation", "zod", "api"] }, domain: "learning" },
548
+ { name: "record_learning", args: { key: "bench-kl-edge-1", content: "SQLite FTS5 requires rebuilding index after schema changes", category: "edge_case", tags: ["sqlite", "fts5", "migration"] }, domain: "learning" },
549
+ ],
550
+ },
551
+ {
552
+ name: "search_and_retrieve",
553
+ tools: [
554
+ { name: "search_all_knowledge", args: { query: "typescript null safety" }, domain: "learning" },
555
+ { name: "search_all_knowledge", args: { query: "API validation" }, domain: "learning" },
556
+ { name: "search_all_knowledge", args: { query: "database migration" }, domain: "learning" },
557
+ ],
558
+ },
559
+ {
560
+ name: "apply_knowledge",
561
+ tools: [
562
+ { name: "start_verification_cycle", args: { title: "Apply prior learnings to new task" }, domain: "verification" },
563
+ { name: "log_phase_findings", args: { cycleId: "bench-cycle", phase: 1, summary: "Prior knowledge applied: null checks + zod validation added", passed: true }, domain: "verification" },
564
+ ],
565
+ },
566
+ {
567
+ name: "synthesize",
568
+ optionalForMeta: true,
569
+ optionalForLite: true,
570
+ tools: [
571
+ { name: "synthesize_recon_to_learnings", args: {}, domain: "self_eval" },
572
+ { name: "get_improvement_recommendations", args: {}, domain: "self_eval" },
573
+ ],
574
+ },
575
+ {
576
+ name: "verify_reuse",
577
+ tools: [
578
+ { name: "record_learning", args: { key: "bench-kl-meta-learning", content: "Knowledge reuse reduced verification time by ~30%: prior learnings prevented 3 known gotchas", category: "pattern", tags: ["knowledge", "reuse", "efficiency"] }, domain: "learning" },
579
+ ],
580
+ },
581
+ ],
582
+ },
583
+ ];
584
+ // ═══════════════════════════════════════════════════════════════════════════
585
+ // PARALLEL EXECUTION — All 4 presets run concurrently per scenario
586
+ // ═══════════════════════════════════════════════════════════════════════════
587
+ const allTrajectories = [];
588
+ const PRESET_NAMES = ["meta", "lite", "core", "full"];
589
+ describe("Preset Real-World Benchmark", () => {
590
+ // ─── Per-scenario tests: 4 presets run in parallel ─────────────────
591
+ for (const scenario of SCENARIOS) {
592
+ describe(`Scenario: ${scenario.name}`, () => {
593
+ const scenarioTrajectories = [];
594
+ it(`runs all 4 presets in parallel for ${scenario.id}`, async () => {
595
+ // Execute all 4 presets concurrently (simulates parallel subagents)
596
+ const results = await Promise.all(PRESET_NAMES.map((preset) => executeScenario(scenario, preset)));
597
+ scenarioTrajectories.push(...results);
598
+ allTrajectories.push(...results);
599
+ // Basic sanity: every preset produced a trajectory
600
+ expect(results.length).toBe(4);
601
+ for (const r of results) {
602
+ expect(r.scenarioId).toBe(scenario.id);
603
+ expect(r.phases.length).toBe(scenario.phases.length);
604
+ }
605
+ });
606
+ it(`full preset has no missing tools for ${scenario.id}`, () => {
607
+ const full = scenarioTrajectories.find((t) => t.preset === "full");
608
+ if (!full)
609
+ return; // depends on previous test
610
+ for (const phase of full.phases) {
611
+ expect(phase.toolsMissing).toEqual([]);
612
+ }
613
+ // All phases complete (tools found, even if some errored on stale IDs)
614
+ expect(full.phasesCompleted).toBe(scenario.phases.length);
615
+ });
616
+ it(`meta preset discovers tools but hits domain limits for ${scenario.id}`, () => {
617
+ const meta = scenarioTrajectories.find((t) => t.preset === "meta");
618
+ if (!meta)
619
+ return;
620
+ // Meta should always have discovery tools
621
+ expect(meta.toolCount).toBe(5); // 2 meta + 3 discovery
622
+ // Meta should succeed at discovery/methodology phases
623
+ const discoveryPhase = meta.phases.find((p) => p.phase === "discovery" || p.phase === "methodology");
624
+ if (discoveryPhase) {
625
+ expect(discoveryPhase.toolsCalled.length).toBeGreaterThan(0);
626
+ }
627
+ });
628
+ it(`lite has fewer tools but covers core verification for ${scenario.id}`, () => {
629
+ const lite = scenarioTrajectories.find((t) => t.preset === "lite");
630
+ const full = scenarioTrajectories.find((t) => t.preset === "full");
631
+ if (!lite || !full)
632
+ return;
633
+ expect(lite.toolCount).toBeLessThan(full.toolCount);
634
+ // Lite should always have verification, eval, learning, recon
635
+ const verifyPhase = lite.phases.find((p) => p.phase === "verify" || p.phase === "verification");
636
+ if (verifyPhase) {
637
+ expect(verifyPhase.toolsMissing.length).toBe(0);
638
+ }
639
+ });
640
+ it(`core covers most phases, loses only full-exclusive domains for ${scenario.id}`, () => {
641
+ const core = scenarioTrajectories.find((t) => t.preset === "core");
642
+ if (!core)
643
+ return;
644
+ // Core should complete most phases (may miss ui_capture, vision, web, github, parallel, docs)
645
+ const coreCompleted = core.phasesCompleted;
646
+ expect(coreCompleted).toBeGreaterThanOrEqual(scenario.phases.filter((p) => !p.optionalForLite).length - 1);
647
+ });
648
+ });
649
+ }
650
+ // ═══════════════════════════════════════════════════════════════════════
651
+ // CROSS-SCENARIO ANALYSIS
652
+ // ═══════════════════════════════════════════════════════════════════════
653
+ describe("Cross-Scenario Analysis", () => {
654
+ it("generated 32 trajectories (8 scenarios × 4 presets)", () => {
655
+ expect(allTrajectories.length).toBe(32);
656
+ });
657
+ it("full preset has most successful tool executions (calls + errors) across all scenarios", () => {
658
+ const byPreset = (p) => allTrajectories.filter((t) => t.preset === p).reduce((sum, t) => sum + t.totalToolCalls + t.totalToolErrors, 0);
659
+ expect(byPreset("full")).toBeGreaterThanOrEqual(byPreset("core"));
660
+ expect(byPreset("core")).toBeGreaterThanOrEqual(byPreset("lite"));
661
+ expect(byPreset("lite")).toBeGreaterThan(byPreset("meta"));
662
+ });
663
+ it("presets are strictly ordered by tool count: meta < lite < core < full", () => {
664
+ const counts = PRESET_NAMES.map((p) => {
665
+ const t = allTrajectories.find((tr) => tr.preset === p);
666
+ return t?.toolCount ?? 0;
667
+ });
668
+ expect(counts[0]).toBeLessThan(counts[1]); // meta < lite
669
+ expect(counts[1]).toBeLessThan(counts[2]); // lite < core
670
+ expect(counts[2]).toBeLessThan(counts[3]); // core < full
671
+ });
672
+ it("meta preset token overhead is <5% of full preset", () => {
673
+ const metaTokens = allTrajectories.find((t) => t.preset === "meta")?.estimatedSchemaTokens ?? 0;
674
+ const fullTokens = allTrajectories.find((t) => t.preset === "full")?.estimatedSchemaTokens ?? 0;
675
+ expect(metaTokens / fullTokens).toBeLessThan(0.05);
676
+ });
677
+ it("knowledge is recorded in at least 6/8 scenarios for full preset", () => {
678
+ const fullTrajectories = allTrajectories.filter((t) => t.preset === "full");
679
+ const withKnowledge = fullTrajectories.filter((t) => t.knowledgeRecorded);
680
+ expect(withKnowledge.length).toBeGreaterThanOrEqual(6);
681
+ });
682
+ it("knowledge is reused (searched) in at least 5/8 scenarios for full preset", () => {
683
+ const fullTrajectories = allTrajectories.filter((t) => t.preset === "full");
684
+ const withReuse = fullTrajectories.filter((t) => t.knowledgeReused);
685
+ expect(withReuse.length).toBeGreaterThanOrEqual(5);
686
+ });
687
+ it("discovery tools are used in cold-start scenario for all presets", () => {
688
+ const coldStartTrajectories = allTrajectories.filter((t) => t.scenarioId === "cold-start-self-setup");
689
+ for (const t of coldStartTrajectories) {
690
+ expect(t.discoveryUsed).toBe(true);
691
+ }
692
+ });
693
+ it("lite catches verification gaps in bug-fix and feature-dev scenarios", () => {
694
+ const liteTrajectories = allTrajectories.filter((t) => t.preset === "lite" && (t.scenarioId === "four-phase-bug-fix" || t.scenarioId === "conductor-feature-dev"));
695
+ for (const t of liteTrajectories) {
696
+ const verifyPhase = t.phases.find((p) => p.phase === "verify" || p.phase === "specification" || p.phase === "implement_and_test");
697
+ if (verifyPhase) {
698
+ expect(verifyPhase.toolsMissing.length).toBe(0);
699
+ }
700
+ }
701
+ });
702
+ it("multi-agent scenario requires full or core preset (lite/meta skip parallel)", () => {
703
+ const metaSwarm = allTrajectories.find((t) => t.preset === "meta" && t.scenarioId === "multi-agent-swarm");
704
+ const liteSwarm = allTrajectories.find((t) => t.preset === "lite" && t.scenarioId === "multi-agent-swarm");
705
+ const fullSwarm = allTrajectories.find((t) => t.preset === "full" && t.scenarioId === "multi-agent-swarm");
706
+ expect(metaSwarm.totalToolMissing).toBeGreaterThan(0);
707
+ expect(liteSwarm.totalToolMissing).toBeGreaterThan(0);
708
+ // Full should have all parallel tools
709
+ const coordPhase = fullSwarm.phases.find((p) => p.phase === "coordinator_setup");
710
+ expect(coordPhase?.toolsMissing.length).toBe(0);
711
+ });
712
+ it("research-writing scenario needs core+ (lite/meta missing research_writing tools)", () => {
713
+ const liteResearch = allTrajectories.find((t) => t.preset === "lite" && t.scenarioId === "research-writing-pipeline");
714
+ const coreResearch = allTrajectories.find((t) => t.preset === "core" && t.scenarioId === "research-writing-pipeline");
715
+ expect(liteResearch.totalToolMissing).toBeGreaterThan(0);
716
+ // Core should have research_writing
717
+ const draftPhase = coreResearch.phases.find((p) => p.phase === "outline_and_draft");
718
+ expect(draftPhase?.toolsMissing.length).toBe(0);
719
+ });
720
+ it("error-recovery scenario completes for all presets with verification", () => {
721
+ for (const preset of ["lite", "core", "full"]) {
722
+ const t = allTrajectories.find((tr) => tr.preset === preset && tr.scenarioId === "error-recovery-resilience");
723
+ expect(t.phasesCompleted).toBeGreaterThanOrEqual(3);
724
+ }
725
+ });
726
+ });
727
+ // ═══════════════════════════════════════════════════════════════════════
728
+ // GAP COVERAGE REPORT
729
+ // ═══════════════════════════════════════════════════════════════════════
730
+ describe("Gap Coverage Verification", () => {
731
+ it("GAP 1 (cross-domain) is exercised by 3 scenarios", () => {
732
+ const crossDomain = allTrajectories.filter((t) => t.preset === "full" &&
733
+ ["four-phase-bug-fix", "conductor-feature-dev", "cross-domain-investigation"].includes(t.scenarioId));
734
+ expect(crossDomain.length).toBe(3);
735
+ for (const t of crossDomain) {
736
+ // Each should call tools from 3+ domains
737
+ const domainsUsed = new Set(t.phases.flatMap((p) => p.toolsCalled));
738
+ expect(domainsUsed.size).toBeGreaterThanOrEqual(3);
739
+ }
740
+ });
741
+ it("GAP 2 (error recovery) is exercised", () => {
742
+ const recovery = allTrajectories.find((t) => t.preset === "full" && t.scenarioId === "error-recovery-resilience");
743
+ expect(recovery).toBeDefined();
744
+ expect(recovery.totalToolCalls).toBeGreaterThan(0);
745
+ });
746
+ it("GAP 4 (knowledge lifecycle) exercises record→search→synthesize", () => {
747
+ const kl = allTrajectories.find((t) => t.preset === "full" && t.scenarioId === "knowledge-lifecycle");
748
+ expect(kl).toBeDefined();
749
+ expect(kl.knowledgeRecorded).toBe(true);
750
+ expect(kl.knowledgeReused).toBe(true);
751
+ // Synthesize phase should complete for full
752
+ const synthPhase = kl.phases.find((p) => p.phase === "synthesize");
753
+ expect(synthPhase?.toolsMissing.length).toBe(0);
754
+ });
755
+ it("GAP 5 (research writing) exercises outline→draft→polish→review", () => {
756
+ const rw = allTrajectories.find((t) => t.preset === "full" && t.scenarioId === "research-writing-pipeline");
757
+ expect(rw).toBeDefined();
758
+ const phases = rw.phases.map((p) => p.phase);
759
+ expect(phases).toContain("outline_and_draft");
760
+ expect(phases).toContain("polish_and_review");
761
+ });
762
+ it("GAP 6 (bootstrap cold-start) exercises discovery→bootstrap→seed", () => {
763
+ const cs = allTrajectories.find((t) => t.preset === "full" && t.scenarioId === "cold-start-self-setup");
764
+ expect(cs).toBeDefined();
765
+ expect(cs.discoveryUsed).toBe(true);
766
+ const bootstrapPhase = cs.phases.find((p) => p.phase === "bootstrap");
767
+ expect(bootstrapPhase?.toolsMissing.length).toBe(0);
768
+ });
769
+ it("GAP 7 (multi-agent) exercises coordinator→assign→budget→oracle", () => {
770
+ const ma = allTrajectories.find((t) => t.preset === "full" && t.scenarioId === "multi-agent-swarm");
771
+ expect(ma).toBeDefined();
772
+ // Full preset has all parallel tools, so no missing tools in any phase
773
+ expect(ma.totalToolMissing).toBe(0);
774
+ expect(ma.phasesCompleted).toBe(5);
775
+ });
776
+ });
777
+ // ═══════════════════════════════════════════════════════════════════════
778
+ // FINAL REPORT (printed to console after all tests)
779
+ // ═══════════════════════════════════════════════════════════════════════
780
+ afterAll(() => {
781
+ if (allTrajectories.length === 0)
782
+ return;
783
+ console.log("\n╔══════════════════════════════════════════════════════════════════════════╗");
784
+ console.log("║ PRESET REAL-WORLD BENCHMARK — IMPACT REPORT ║");
785
+ console.log("║ 8 scenarios × 4 presets = 32 trajectories ║");
786
+ console.log("║ Inspired by: superpowers, agents, claude-flow, oh-my-claudecode, ║");
787
+ console.log("║ claude-mem, planning-with-files, scientific-skills, ║");
788
+ console.log("║ claude-code-guide ║");
789
+ console.log("╠══════════════════════════════════════════════════════════════════════════╣");
790
+ // Per-preset summary
791
+ for (const preset of PRESET_NAMES) {
792
+ const trajectories = allTrajectories.filter((t) => t.preset === preset);
793
+ const totalCalls = trajectories.reduce((s, t) => s + t.totalToolCalls, 0);
794
+ const totalMissing = trajectories.reduce((s, t) => s + t.totalToolMissing, 0);
795
+ const totalErrors = trajectories.reduce((s, t) => s + t.totalToolErrors, 0);
796
+ const completedPhases = trajectories.reduce((s, t) => s + t.phasesCompleted, 0);
797
+ const totalPhases = trajectories.reduce((s, t) => s + t.phases.length, 0);
798
+ const toolCount = trajectories[0]?.toolCount ?? 0;
799
+ const tokens = trajectories[0]?.estimatedSchemaTokens ?? 0;
800
+ const knowledgeCount = trajectories.filter((t) => t.knowledgeRecorded).length;
801
+ const duration = trajectories.reduce((s, t) => s + t.durationMs, 0);
802
+ console.log(`║ ║`);
803
+ console.log(`║ --preset ${preset.padEnd(6)} (${String(toolCount).padStart(3)} tools, ~${String(tokens).padStart(5)} schema tokens) ║`);
804
+ console.log(`║ Phases: ${completedPhases}/${totalPhases} completed ║`);
805
+ console.log(`║ Tools: ${totalCalls} called, ${totalMissing} missing, ${totalErrors} errors ║`);
806
+ console.log(`║ Knowledge: ${knowledgeCount}/8 scenarios recorded learnings ║`);
807
+ console.log(`║ Duration: ${duration}ms total ║`);
808
+ }
809
+ // Per-scenario breakdown
810
+ console.log("║ ║");
811
+ console.log("╠══════════════════════════════════════════════════════════════════════════╣");
812
+ console.log("║ PER-SCENARIO BREAKDOWN ║");
813
+ console.log("╠══════════════════════════════════════════════════════════════════════════╣");
814
+ for (const scenario of SCENARIOS) {
815
+ console.log(`║ ║`);
816
+ console.log(`║ ${scenario.id.padEnd(40)} [${scenario.gapFilled}] ║`);
817
+ for (const preset of PRESET_NAMES) {
818
+ const t = allTrajectories.find((tr) => tr.preset === preset && tr.scenarioId === scenario.id);
819
+ if (t) {
820
+ const status = t.phasesCompleted === t.phases.length ? "PASS" : `${t.phasesCompleted}/${t.phases.length}`;
821
+ console.log(`║ ${preset.padEnd(6)}: ${status.padEnd(6)} | calls=${String(t.totalToolCalls).padStart(3)} missing=${String(t.totalToolMissing).padStart(2)} | ${t.durationMs}ms ║`);
822
+ }
823
+ }
824
+ }
825
+ // Recommendations
826
+ console.log("║ ║");
827
+ console.log("╠══════════════════════════════════════════════════════════════════════════╣");
828
+ console.log("║ RECOMMENDATIONS ║");
829
+ console.log("╠══════════════════════════════════════════════════════════════════════════╣");
830
+ console.log("║ ║");
831
+ console.log("║ Discovery-first / new agents → --preset meta (self-escalate) ║");
832
+ console.log("║ Solo dev, bug fixes, features → --preset lite (fast, core coverage) ║");
833
+ console.log("║ Research + multi-agent teams → --preset core (full methodology) ║");
834
+ console.log("║ Full pipeline + all domains → --preset full (zero blind spots) ║");
835
+ console.log("║ ║");
836
+ console.log("╚══════════════════════════════════════════════════════════════════════════╝");
837
+ });
838
+ });
839
+ //# sourceMappingURL=presetRealWorldBench.test.js.map