@claude-flow/cli 3.6.30 → 3.7.0-alpha.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (492) hide show
  1. package/.claude/agents/analysis/analyze-code-quality.md +178 -178
  2. package/.claude/agents/analysis/code-analyzer.md +209 -209
  3. package/.claude/agents/analysis/code-review/analyze-code-quality.md +178 -178
  4. package/.claude/agents/architecture/arch-system-design.md +156 -156
  5. package/.claude/agents/architecture/system-design/arch-system-design.md +154 -154
  6. package/.claude/agents/browser/browser-agent.yaml +182 -182
  7. package/.claude/agents/consensus/byzantine-coordinator.md +62 -62
  8. package/.claude/agents/consensus/crdt-synchronizer.md +996 -996
  9. package/.claude/agents/consensus/gossip-coordinator.md +62 -62
  10. package/.claude/agents/consensus/performance-benchmarker.md +850 -850
  11. package/.claude/agents/consensus/quorum-manager.md +822 -822
  12. package/.claude/agents/consensus/raft-manager.md +62 -62
  13. package/.claude/agents/consensus/security-manager.md +621 -621
  14. package/.claude/agents/core/coder.md +452 -452
  15. package/.claude/agents/core/planner.md +374 -374
  16. package/.claude/agents/core/researcher.md +368 -368
  17. package/.claude/agents/core/reviewer.md +519 -519
  18. package/.claude/agents/core/tester.md +511 -511
  19. package/.claude/agents/custom/test-long-runner.md +44 -44
  20. package/.claude/agents/data/data-ml-model.md +444 -444
  21. package/.claude/agents/data/ml/data-ml-model.md +192 -192
  22. package/.claude/agents/development/backend/dev-backend-api.md +141 -141
  23. package/.claude/agents/development/dev-backend-api.md +344 -344
  24. package/.claude/agents/devops/ci-cd/ops-cicd-github.md +163 -163
  25. package/.claude/agents/devops/ops-cicd-github.md +164 -164
  26. package/.claude/agents/documentation/api-docs/docs-api-openapi.md +173 -173
  27. package/.claude/agents/documentation/docs-api-openapi.md +354 -354
  28. package/.claude/agents/flow-nexus/app-store.md +87 -87
  29. package/.claude/agents/flow-nexus/authentication.md +68 -68
  30. package/.claude/agents/flow-nexus/challenges.md +80 -80
  31. package/.claude/agents/flow-nexus/neural-network.md +87 -87
  32. package/.claude/agents/flow-nexus/payments.md +82 -82
  33. package/.claude/agents/flow-nexus/sandbox.md +75 -75
  34. package/.claude/agents/flow-nexus/swarm.md +75 -75
  35. package/.claude/agents/flow-nexus/user-tools.md +95 -95
  36. package/.claude/agents/flow-nexus/workflow.md +83 -83
  37. package/.claude/agents/github/code-review-swarm.md +377 -377
  38. package/.claude/agents/github/github-modes.md +172 -172
  39. package/.claude/agents/github/issue-tracker.md +575 -575
  40. package/.claude/agents/github/multi-repo-swarm.md +552 -552
  41. package/.claude/agents/github/pr-manager.md +437 -437
  42. package/.claude/agents/github/project-board-sync.md +508 -508
  43. package/.claude/agents/github/release-manager.md +604 -604
  44. package/.claude/agents/github/release-swarm.md +582 -582
  45. package/.claude/agents/github/repo-architect.md +397 -397
  46. package/.claude/agents/github/swarm-issue.md +572 -572
  47. package/.claude/agents/github/swarm-pr.md +427 -427
  48. package/.claude/agents/github/sync-coordinator.md +451 -451
  49. package/.claude/agents/github/workflow-automation.md +902 -902
  50. package/.claude/agents/goal/agent.md +815 -815
  51. package/.claude/agents/goal/goal-planner.md +72 -72
  52. package/.claude/agents/optimization/benchmark-suite.md +664 -664
  53. package/.claude/agents/optimization/load-balancer.md +430 -430
  54. package/.claude/agents/optimization/performance-monitor.md +671 -671
  55. package/.claude/agents/optimization/resource-allocator.md +673 -673
  56. package/.claude/agents/optimization/topology-optimizer.md +807 -807
  57. package/.claude/agents/payments/agentic-payments.md +126 -126
  58. package/.claude/agents/sona/sona-learning-optimizer.md +74 -74
  59. package/.claude/agents/sparc/architecture.md +698 -698
  60. package/.claude/agents/sparc/pseudocode.md +519 -519
  61. package/.claude/agents/sparc/refinement.md +801 -801
  62. package/.claude/agents/sparc/specification.md +477 -477
  63. package/.claude/agents/specialized/mobile/spec-mobile-react-native.md +224 -224
  64. package/.claude/agents/specialized/spec-mobile-react-native.md +226 -226
  65. package/.claude/agents/sublinear/consensus-coordinator.md +337 -337
  66. package/.claude/agents/sublinear/matrix-optimizer.md +184 -184
  67. package/.claude/agents/sublinear/pagerank-analyzer.md +298 -298
  68. package/.claude/agents/sublinear/performance-optimizer.md +367 -367
  69. package/.claude/agents/sublinear/trading-predictor.md +245 -245
  70. package/.claude/agents/swarm/adaptive-coordinator.md +1126 -1126
  71. package/.claude/agents/swarm/hierarchical-coordinator.md +709 -709
  72. package/.claude/agents/swarm/mesh-coordinator.md +962 -962
  73. package/.claude/agents/templates/automation-smart-agent.md +204 -204
  74. package/.claude/agents/templates/base-template-generator.md +289 -289
  75. package/.claude/agents/templates/coordinator-swarm-init.md +89 -89
  76. package/.claude/agents/templates/github-pr-manager.md +176 -176
  77. package/.claude/agents/templates/implementer-sparc-coder.md +258 -258
  78. package/.claude/agents/templates/memory-coordinator.md +186 -186
  79. package/.claude/agents/templates/orchestrator-task.md +138 -138
  80. package/.claude/agents/templates/performance-analyzer.md +198 -198
  81. package/.claude/agents/templates/sparc-coordinator.md +513 -513
  82. package/.claude/agents/testing/production-validator.md +394 -394
  83. package/.claude/agents/testing/tdd-london-swarm.md +243 -243
  84. package/.claude/agents/v3/adr-architect.md +184 -184
  85. package/.claude/agents/v3/aidefence-guardian.md +282 -282
  86. package/.claude/agents/v3/claims-authorizer.md +208 -208
  87. package/.claude/agents/v3/collective-intelligence-coordinator.md +993 -993
  88. package/.claude/agents/v3/ddd-domain-expert.md +220 -220
  89. package/.claude/agents/v3/injection-analyst.md +236 -236
  90. package/.claude/agents/v3/memory-specialist.md +995 -995
  91. package/.claude/agents/v3/performance-engineer.md +1233 -1233
  92. package/.claude/agents/v3/pii-detector.md +151 -151
  93. package/.claude/agents/v3/reasoningbank-learner.md +213 -213
  94. package/.claude/agents/v3/security-architect-aidefence.md +410 -410
  95. package/.claude/agents/v3/security-architect.md +867 -867
  96. package/.claude/agents/v3/security-auditor.md +771 -771
  97. package/.claude/agents/v3/sparc-orchestrator.md +182 -182
  98. package/.claude/agents/v3/swarm-memory-manager.md +157 -157
  99. package/.claude/agents/v3/v3-integration-architect.md +205 -205
  100. package/.claude/commands/agents/README.md +50 -50
  101. package/.claude/commands/agents/agent-capabilities.md +140 -140
  102. package/.claude/commands/agents/agent-coordination.md +28 -28
  103. package/.claude/commands/agents/agent-spawning.md +28 -28
  104. package/.claude/commands/agents/agent-types.md +216 -216
  105. package/.claude/commands/agents/health.md +139 -139
  106. package/.claude/commands/agents/list.md +100 -100
  107. package/.claude/commands/agents/logs.md +130 -130
  108. package/.claude/commands/agents/metrics.md +122 -122
  109. package/.claude/commands/agents/pool.md +127 -127
  110. package/.claude/commands/agents/spawn.md +140 -140
  111. package/.claude/commands/agents/status.md +115 -115
  112. package/.claude/commands/agents/stop.md +102 -102
  113. package/.claude/commands/analysis/COMMAND_COMPLIANCE_REPORT.md +53 -53
  114. package/.claude/commands/analysis/README.md +9 -9
  115. package/.claude/commands/analysis/bottleneck-detect.md +162 -162
  116. package/.claude/commands/analysis/performance-bottlenecks.md +58 -58
  117. package/.claude/commands/analysis/performance-report.md +25 -25
  118. package/.claude/commands/analysis/token-efficiency.md +44 -44
  119. package/.claude/commands/analysis/token-usage.md +25 -25
  120. package/.claude/commands/automation/README.md +9 -9
  121. package/.claude/commands/automation/auto-agent.md +122 -122
  122. package/.claude/commands/automation/self-healing.md +105 -105
  123. package/.claude/commands/automation/session-memory.md +89 -89
  124. package/.claude/commands/automation/smart-agents.md +72 -72
  125. package/.claude/commands/automation/smart-spawn.md +25 -25
  126. package/.claude/commands/automation/workflow-select.md +25 -25
  127. package/.claude/commands/claude-flow-help.md +103 -103
  128. package/.claude/commands/claude-flow-memory.md +107 -107
  129. package/.claude/commands/claude-flow-swarm.md +205 -205
  130. package/.claude/commands/coordination/README.md +9 -9
  131. package/.claude/commands/coordination/agent-spawn.md +25 -25
  132. package/.claude/commands/coordination/init.md +44 -44
  133. package/.claude/commands/coordination/orchestrate.md +43 -43
  134. package/.claude/commands/coordination/spawn.md +45 -45
  135. package/.claude/commands/coordination/swarm-init.md +85 -85
  136. package/.claude/commands/coordination/task-orchestrate.md +25 -25
  137. package/.claude/commands/flow-nexus/app-store.md +123 -123
  138. package/.claude/commands/flow-nexus/challenges.md +119 -119
  139. package/.claude/commands/flow-nexus/login-registration.md +64 -64
  140. package/.claude/commands/flow-nexus/neural-network.md +133 -133
  141. package/.claude/commands/flow-nexus/payments.md +115 -115
  142. package/.claude/commands/flow-nexus/sandbox.md +82 -82
  143. package/.claude/commands/flow-nexus/swarm.md +86 -86
  144. package/.claude/commands/flow-nexus/user-tools.md +151 -151
  145. package/.claude/commands/flow-nexus/workflow.md +114 -114
  146. package/.claude/commands/github/README.md +11 -11
  147. package/.claude/commands/github/code-review-swarm.md +513 -513
  148. package/.claude/commands/github/code-review.md +25 -25
  149. package/.claude/commands/github/github-modes.md +146 -146
  150. package/.claude/commands/github/github-swarm.md +121 -121
  151. package/.claude/commands/github/issue-tracker.md +291 -291
  152. package/.claude/commands/github/issue-triage.md +25 -25
  153. package/.claude/commands/github/multi-repo-swarm.md +518 -518
  154. package/.claude/commands/github/pr-enhance.md +26 -26
  155. package/.claude/commands/github/pr-manager.md +169 -169
  156. package/.claude/commands/github/project-board-sync.md +470 -470
  157. package/.claude/commands/github/release-manager.md +337 -337
  158. package/.claude/commands/github/release-swarm.md +543 -543
  159. package/.claude/commands/github/repo-analyze.md +25 -25
  160. package/.claude/commands/github/repo-architect.md +366 -366
  161. package/.claude/commands/github/swarm-issue.md +481 -481
  162. package/.claude/commands/github/swarm-pr.md +284 -284
  163. package/.claude/commands/github/sync-coordinator.md +300 -300
  164. package/.claude/commands/github/workflow-automation.md +441 -441
  165. package/.claude/commands/hive-mind/README.md +17 -17
  166. package/.claude/commands/hive-mind/hive-mind-consensus.md +8 -8
  167. package/.claude/commands/hive-mind/hive-mind-init.md +18 -18
  168. package/.claude/commands/hive-mind/hive-mind-memory.md +8 -8
  169. package/.claude/commands/hive-mind/hive-mind-metrics.md +8 -8
  170. package/.claude/commands/hive-mind/hive-mind-resume.md +8 -8
  171. package/.claude/commands/hive-mind/hive-mind-sessions.md +8 -8
  172. package/.claude/commands/hive-mind/hive-mind-spawn.md +21 -21
  173. package/.claude/commands/hive-mind/hive-mind-status.md +8 -8
  174. package/.claude/commands/hive-mind/hive-mind-stop.md +8 -8
  175. package/.claude/commands/hive-mind/hive-mind-wizard.md +8 -8
  176. package/.claude/commands/hive-mind/hive-mind.md +27 -27
  177. package/.claude/commands/hooks/README.md +11 -11
  178. package/.claude/commands/hooks/overview.md +57 -57
  179. package/.claude/commands/hooks/post-edit.md +117 -117
  180. package/.claude/commands/hooks/post-task.md +112 -112
  181. package/.claude/commands/hooks/pre-edit.md +113 -113
  182. package/.claude/commands/hooks/pre-task.md +111 -111
  183. package/.claude/commands/hooks/session-end.md +118 -118
  184. package/.claude/commands/hooks/setup.md +102 -102
  185. package/.claude/commands/memory/README.md +9 -9
  186. package/.claude/commands/memory/memory-persist.md +25 -25
  187. package/.claude/commands/memory/memory-search.md +25 -25
  188. package/.claude/commands/memory/memory-usage.md +25 -25
  189. package/.claude/commands/memory/neural.md +47 -47
  190. package/.claude/commands/monitoring/README.md +9 -9
  191. package/.claude/commands/monitoring/agent-metrics.md +25 -25
  192. package/.claude/commands/monitoring/agents.md +44 -44
  193. package/.claude/commands/monitoring/real-time-view.md +25 -25
  194. package/.claude/commands/monitoring/status.md +46 -46
  195. package/.claude/commands/monitoring/swarm-monitor.md +25 -25
  196. package/.claude/commands/optimization/README.md +9 -9
  197. package/.claude/commands/optimization/auto-topology.md +61 -61
  198. package/.claude/commands/optimization/cache-manage.md +25 -25
  199. package/.claude/commands/optimization/parallel-execute.md +25 -25
  200. package/.claude/commands/optimization/parallel-execution.md +49 -49
  201. package/.claude/commands/optimization/topology-optimize.md +25 -25
  202. package/.claude/commands/pair/README.md +260 -260
  203. package/.claude/commands/pair/commands.md +545 -545
  204. package/.claude/commands/pair/config.md +509 -509
  205. package/.claude/commands/pair/examples.md +511 -511
  206. package/.claude/commands/pair/modes.md +347 -347
  207. package/.claude/commands/pair/session.md +406 -406
  208. package/.claude/commands/pair/start.md +208 -208
  209. package/.claude/commands/sparc/analyzer.md +51 -51
  210. package/.claude/commands/sparc/architect.md +53 -53
  211. package/.claude/commands/sparc/ask.md +97 -97
  212. package/.claude/commands/sparc/batch-executor.md +54 -54
  213. package/.claude/commands/sparc/code.md +89 -89
  214. package/.claude/commands/sparc/coder.md +54 -54
  215. package/.claude/commands/sparc/debug.md +83 -83
  216. package/.claude/commands/sparc/debugger.md +54 -54
  217. package/.claude/commands/sparc/designer.md +53 -53
  218. package/.claude/commands/sparc/devops.md +109 -109
  219. package/.claude/commands/sparc/docs-writer.md +80 -80
  220. package/.claude/commands/sparc/documenter.md +54 -54
  221. package/.claude/commands/sparc/innovator.md +54 -54
  222. package/.claude/commands/sparc/integration.md +83 -83
  223. package/.claude/commands/sparc/mcp.md +117 -117
  224. package/.claude/commands/sparc/memory-manager.md +54 -54
  225. package/.claude/commands/sparc/optimizer.md +54 -54
  226. package/.claude/commands/sparc/orchestrator.md +131 -131
  227. package/.claude/commands/sparc/post-deployment-monitoring-mode.md +83 -83
  228. package/.claude/commands/sparc/refinement-optimization-mode.md +83 -83
  229. package/.claude/commands/sparc/researcher.md +54 -54
  230. package/.claude/commands/sparc/reviewer.md +54 -54
  231. package/.claude/commands/sparc/security-review.md +80 -80
  232. package/.claude/commands/sparc/sparc-modes.md +174 -174
  233. package/.claude/commands/sparc/sparc.md +111 -111
  234. package/.claude/commands/sparc/spec-pseudocode.md +80 -80
  235. package/.claude/commands/sparc/supabase-admin.md +348 -348
  236. package/.claude/commands/sparc/swarm-coordinator.md +54 -54
  237. package/.claude/commands/sparc/tdd.md +54 -54
  238. package/.claude/commands/sparc/tester.md +54 -54
  239. package/.claude/commands/sparc/tutorial.md +79 -79
  240. package/.claude/commands/sparc/workflow-manager.md +54 -54
  241. package/.claude/commands/sparc.md +166 -166
  242. package/.claude/commands/stream-chain/pipeline.md +120 -120
  243. package/.claude/commands/stream-chain/run.md +69 -69
  244. package/.claude/commands/swarm/README.md +15 -15
  245. package/.claude/commands/swarm/analysis.md +95 -95
  246. package/.claude/commands/swarm/development.md +96 -96
  247. package/.claude/commands/swarm/examples.md +168 -168
  248. package/.claude/commands/swarm/maintenance.md +102 -102
  249. package/.claude/commands/swarm/optimization.md +117 -117
  250. package/.claude/commands/swarm/research.md +136 -136
  251. package/.claude/commands/swarm/swarm-analysis.md +8 -8
  252. package/.claude/commands/swarm/swarm-background.md +8 -8
  253. package/.claude/commands/swarm/swarm-init.md +19 -19
  254. package/.claude/commands/swarm/swarm-modes.md +8 -8
  255. package/.claude/commands/swarm/swarm-monitor.md +8 -8
  256. package/.claude/commands/swarm/swarm-spawn.md +19 -19
  257. package/.claude/commands/swarm/swarm-status.md +8 -8
  258. package/.claude/commands/swarm/swarm-strategies.md +8 -8
  259. package/.claude/commands/swarm/swarm.md +87 -87
  260. package/.claude/commands/swarm/testing.md +131 -131
  261. package/.claude/commands/training/README.md +9 -9
  262. package/.claude/commands/training/model-update.md +25 -25
  263. package/.claude/commands/training/neural-patterns.md +107 -107
  264. package/.claude/commands/training/neural-train.md +75 -75
  265. package/.claude/commands/training/pattern-learn.md +25 -25
  266. package/.claude/commands/training/specialization.md +62 -62
  267. package/.claude/commands/truth/start.md +142 -142
  268. package/.claude/commands/verify/check.md +49 -49
  269. package/.claude/commands/verify/start.md +127 -127
  270. package/.claude/commands/workflows/README.md +9 -9
  271. package/.claude/commands/workflows/development.md +77 -77
  272. package/.claude/commands/workflows/research.md +62 -62
  273. package/.claude/commands/workflows/workflow-create.md +25 -25
  274. package/.claude/commands/workflows/workflow-execute.md +25 -25
  275. package/.claude/commands/workflows/workflow-export.md +25 -25
  276. package/.claude/helpers/README.md +96 -96
  277. package/.claude/helpers/adr-compliance.sh +186 -186
  278. package/.claude/helpers/auto-commit.sh +178 -178
  279. package/.claude/helpers/auto-memory-hook.mjs +368 -368
  280. package/.claude/helpers/checkpoint-manager.sh +251 -251
  281. package/.claude/helpers/daemon-manager.sh +252 -252
  282. package/.claude/helpers/ddd-tracker.sh +144 -144
  283. package/.claude/helpers/github-safe.js +121 -121
  284. package/.claude/helpers/github-setup.sh +28 -28
  285. package/.claude/helpers/guidance-hook.sh +13 -13
  286. package/.claude/helpers/guidance-hooks.sh +102 -102
  287. package/.claude/helpers/health-monitor.sh +108 -108
  288. package/.claude/helpers/hook-handler.cjs +278 -278
  289. package/.claude/helpers/intelligence.cjs +1031 -1031
  290. package/.claude/helpers/learning-hooks.sh +329 -329
  291. package/.claude/helpers/learning-optimizer.sh +127 -127
  292. package/.claude/helpers/learning-service.mjs +1144 -1144
  293. package/.claude/helpers/memory.js +83 -83
  294. package/.claude/helpers/metrics-db.mjs +488 -488
  295. package/.claude/helpers/pattern-consolidator.sh +86 -86
  296. package/.claude/helpers/perf-worker.sh +160 -160
  297. package/.claude/helpers/post-commit +16 -16
  298. package/.claude/helpers/pre-commit +26 -26
  299. package/.claude/helpers/quick-start.sh +19 -19
  300. package/.claude/helpers/router.js +66 -66
  301. package/.claude/helpers/security-scanner.sh +127 -127
  302. package/.claude/helpers/session.js +135 -135
  303. package/.claude/helpers/setup-mcp.sh +18 -18
  304. package/.claude/helpers/standard-checkpoint-hooks.sh +189 -189
  305. package/.claude/helpers/statusline-hook.sh +21 -21
  306. package/.claude/helpers/statusline.cjs +575 -575
  307. package/.claude/helpers/statusline.js +321 -321
  308. package/.claude/helpers/swarm-comms.sh +353 -353
  309. package/.claude/helpers/swarm-hooks.sh +761 -761
  310. package/.claude/helpers/swarm-monitor.sh +210 -210
  311. package/.claude/helpers/sync-v3-metrics.sh +245 -245
  312. package/.claude/helpers/update-v3-progress.sh +165 -165
  313. package/.claude/helpers/v3-quick-status.sh +57 -57
  314. package/.claude/helpers/v3.sh +110 -110
  315. package/.claude/helpers/validate-v3-config.sh +215 -215
  316. package/.claude/helpers/worker-manager.sh +170 -170
  317. package/.claude/settings.json +182 -182
  318. package/.claude/skills/agentdb-advanced/SKILL.md +550 -550
  319. package/.claude/skills/agentdb-learning/SKILL.md +545 -545
  320. package/.claude/skills/agentdb-memory-patterns/SKILL.md +339 -339
  321. package/.claude/skills/agentdb-optimization/SKILL.md +509 -509
  322. package/.claude/skills/agentdb-vector-search/SKILL.md +339 -339
  323. package/.claude/skills/agentic-jujutsu/SKILL.md +645 -645
  324. package/.claude/skills/aidefence-scan.md +151 -151
  325. package/.claude/skills/aidefence.yaml +297 -297
  326. package/.claude/skills/browser/SKILL.md +204 -204
  327. package/.claude/skills/flow-nexus-neural/SKILL.md +738 -738
  328. package/.claude/skills/flow-nexus-platform/SKILL.md +1157 -1157
  329. package/.claude/skills/flow-nexus-swarm/SKILL.md +610 -610
  330. package/.claude/skills/github-code-review/SKILL.md +1140 -1140
  331. package/.claude/skills/github-multi-repo/SKILL.md +874 -874
  332. package/.claude/skills/github-project-management/SKILL.md +1290 -1277
  333. package/.claude/skills/github-release-management/SKILL.md +1081 -1081
  334. package/.claude/skills/github-workflow-automation/SKILL.md +1065 -1065
  335. package/.claude/skills/hive-mind-advanced/SKILL.md +712 -712
  336. package/.claude/skills/hooks-automation/SKILL.md +1201 -1201
  337. package/.claude/skills/pair-programming/SKILL.md +1202 -1202
  338. package/.claude/skills/performance-analysis/SKILL.md +563 -563
  339. package/.claude/skills/reasoningbank-agentdb/SKILL.md +446 -446
  340. package/.claude/skills/reasoningbank-intelligence/SKILL.md +201 -201
  341. package/.claude/skills/secure-review.md +181 -181
  342. package/.claude/skills/skill-builder/SKILL.md +910 -910
  343. package/.claude/skills/sparc-methodology/SKILL.md +1115 -1115
  344. package/.claude/skills/stream-chain/SKILL.md +563 -563
  345. package/.claude/skills/swarm-advanced/SKILL.md +973 -973
  346. package/.claude/skills/swarm-orchestration/SKILL.md +179 -179
  347. package/.claude/skills/v3-cli-modernization/SKILL.md +871 -871
  348. package/.claude/skills/v3-core-implementation/SKILL.md +796 -796
  349. package/.claude/skills/v3-ddd-architecture/SKILL.md +441 -441
  350. package/.claude/skills/v3-integration-deep/SKILL.md +240 -240
  351. package/.claude/skills/v3-mcp-optimization/SKILL.md +776 -776
  352. package/.claude/skills/v3-memory-unification/SKILL.md +173 -173
  353. package/.claude/skills/v3-performance-optimization/SKILL.md +389 -389
  354. package/.claude/skills/v3-security-overhaul/SKILL.md +81 -81
  355. package/.claude/skills/v3-swarm-coordination/SKILL.md +339 -339
  356. package/.claude/skills/verification-quality/SKILL.md +649 -649
  357. package/.claude/skills/worker-benchmarks/skill.md +135 -135
  358. package/.claude/skills/worker-integration/skill.md +154 -154
  359. package/README.md +393 -391
  360. package/bin/cli.js +220 -220
  361. package/bin/mcp-server.js +224 -224
  362. package/bin/preinstall.cjs +2 -2
  363. package/dist/src/commands/agent-wasm.js +2 -2
  364. package/dist/src/commands/agent-wasm.js.map +1 -1
  365. package/dist/src/commands/completions.js +409 -409
  366. package/dist/src/commands/daemon.d.ts.map +1 -1
  367. package/dist/src/commands/daemon.js +19 -3
  368. package/dist/src/commands/daemon.js.map +1 -1
  369. package/dist/src/commands/doctor.d.ts.map +1 -1
  370. package/dist/src/commands/doctor.js +105 -23
  371. package/dist/src/commands/doctor.js.map +1 -1
  372. package/dist/src/commands/embeddings.js +26 -26
  373. package/dist/src/commands/hive-mind.d.ts.map +1 -1
  374. package/dist/src/commands/hive-mind.js +122 -104
  375. package/dist/src/commands/hive-mind.js.map +1 -1
  376. package/dist/src/commands/hooks.d.ts.map +1 -1
  377. package/dist/src/commands/hooks.js +34 -21
  378. package/dist/src/commands/hooks.js.map +1 -1
  379. package/dist/src/commands/memory.d.ts.map +1 -1
  380. package/dist/src/commands/memory.js +68 -0
  381. package/dist/src/commands/memory.js.map +1 -1
  382. package/dist/src/commands/ruvector/backup.js +23 -23
  383. package/dist/src/commands/ruvector/benchmark.js +31 -31
  384. package/dist/src/commands/ruvector/import.js +14 -14
  385. package/dist/src/commands/ruvector/init.js +115 -115
  386. package/dist/src/commands/ruvector/migrate.js +99 -99
  387. package/dist/src/commands/ruvector/optimize.js +51 -51
  388. package/dist/src/commands/ruvector/setup.js +624 -624
  389. package/dist/src/commands/ruvector/status.js +38 -38
  390. package/dist/src/index.d.ts +5 -1
  391. package/dist/src/index.d.ts.map +1 -1
  392. package/dist/src/index.js +59 -18
  393. package/dist/src/index.js.map +1 -1
  394. package/dist/src/init/claudemd-generator.js +226 -226
  395. package/dist/src/init/executor.d.ts.map +1 -1
  396. package/dist/src/init/executor.js +511 -453
  397. package/dist/src/init/executor.js.map +1 -1
  398. package/dist/src/init/helpers-generator.js +645 -645
  399. package/dist/src/init/settings-generator.d.ts.map +1 -1
  400. package/dist/src/init/settings-generator.js +11 -5
  401. package/dist/src/init/settings-generator.js.map +1 -1
  402. package/dist/src/init/statusline-generator.js +858 -858
  403. package/dist/src/init/types.d.ts +7 -0
  404. package/dist/src/init/types.d.ts.map +1 -1
  405. package/dist/src/init/types.js.map +1 -1
  406. package/dist/src/mcp-tools/agentdb-tools.d.ts +3 -0
  407. package/dist/src/mcp-tools/agentdb-tools.d.ts.map +1 -1
  408. package/dist/src/mcp-tools/agentdb-tools.js +108 -0
  409. package/dist/src/mcp-tools/agentdb-tools.js.map +1 -1
  410. package/dist/src/mcp-tools/hooks-tools.d.ts.map +1 -1
  411. package/dist/src/mcp-tools/hooks-tools.js +4 -2
  412. package/dist/src/mcp-tools/hooks-tools.js.map +1 -1
  413. package/dist/src/mcp-tools/memory-tools.d.ts.map +1 -1
  414. package/dist/src/mcp-tools/memory-tools.js +19 -0
  415. package/dist/src/mcp-tools/memory-tools.js.map +1 -1
  416. package/dist/src/mcp-tools/neural-tools.d.ts.map +1 -1
  417. package/dist/src/mcp-tools/neural-tools.js +14 -1
  418. package/dist/src/mcp-tools/neural-tools.js.map +1 -1
  419. package/dist/src/mcp-tools/security-tools.d.ts.map +1 -1
  420. package/dist/src/mcp-tools/security-tools.js +28 -3
  421. package/dist/src/mcp-tools/security-tools.js.map +1 -1
  422. package/dist/src/mcp-tools/swarm-tools.d.ts.map +1 -1
  423. package/dist/src/mcp-tools/swarm-tools.js +72 -3
  424. package/dist/src/mcp-tools/swarm-tools.js.map +1 -1
  425. package/dist/src/mcp-tools/types.d.ts +4 -33
  426. package/dist/src/mcp-tools/types.d.ts.map +1 -1
  427. package/dist/src/mcp-tools/types.js +4 -14
  428. package/dist/src/mcp-tools/types.js.map +1 -1
  429. package/dist/src/mcp-tools/validate-input.d.ts +5 -57
  430. package/dist/src/mcp-tools/validate-input.d.ts.map +1 -1
  431. package/dist/src/mcp-tools/validate-input.js +5 -233
  432. package/dist/src/mcp-tools/validate-input.js.map +1 -1
  433. package/dist/src/mcp-tools/wasm-agent-tools.js +1 -1
  434. package/dist/src/mcp-tools/wasm-agent-tools.js.map +1 -1
  435. package/dist/src/memory/intelligence.d.ts.map +1 -1
  436. package/dist/src/memory/intelligence.js +28 -3
  437. package/dist/src/memory/intelligence.js.map +1 -1
  438. package/dist/src/memory/memory-bridge.d.ts +69 -0
  439. package/dist/src/memory/memory-bridge.d.ts.map +1 -1
  440. package/dist/src/memory/memory-bridge.js +319 -66
  441. package/dist/src/memory/memory-bridge.js.map +1 -1
  442. package/dist/src/memory/memory-initializer.d.ts +5 -0
  443. package/dist/src/memory/memory-initializer.d.ts.map +1 -1
  444. package/dist/src/memory/memory-initializer.js +369 -363
  445. package/dist/src/memory/memory-initializer.js.map +1 -1
  446. package/dist/src/memory/neural-package-bridge.d.ts +48 -0
  447. package/dist/src/memory/neural-package-bridge.d.ts.map +1 -0
  448. package/dist/src/memory/neural-package-bridge.js +87 -0
  449. package/dist/src/memory/neural-package-bridge.js.map +1 -0
  450. package/dist/src/memory/rabitq-index.js +5 -5
  451. package/dist/src/memory/sona-optimizer.d.ts.map +1 -1
  452. package/dist/src/memory/sona-optimizer.js +1 -0
  453. package/dist/src/memory/sona-optimizer.js.map +1 -1
  454. package/dist/src/output.d.ts +6 -130
  455. package/dist/src/output.d.ts.map +1 -1
  456. package/dist/src/output.js +6 -511
  457. package/dist/src/output.js.map +1 -1
  458. package/dist/src/parser.d.ts +9 -0
  459. package/dist/src/parser.d.ts.map +1 -1
  460. package/dist/src/parser.js +11 -0
  461. package/dist/src/parser.js.map +1 -1
  462. package/dist/src/runtime/headless.js +28 -28
  463. package/dist/src/ruvector/agent-wasm.d.ts.map +1 -1
  464. package/dist/src/ruvector/agent-wasm.js +4 -1
  465. package/dist/src/ruvector/agent-wasm.js.map +1 -1
  466. package/dist/src/ruvector/index.d.ts +0 -2
  467. package/dist/src/ruvector/index.d.ts.map +1 -1
  468. package/dist/src/ruvector/index.js +8 -2
  469. package/dist/src/ruvector/index.js.map +1 -1
  470. package/dist/src/ruvector/model-router.d.ts +22 -1
  471. package/dist/src/ruvector/model-router.d.ts.map +1 -1
  472. package/dist/src/ruvector/model-router.js +125 -5
  473. package/dist/src/ruvector/model-router.js.map +1 -1
  474. package/dist/src/services/headless-worker-executor.js +84 -84
  475. package/dist/src/transfer/deploy-seraphine.js +23 -23
  476. package/dist/src/types.d.ts +10 -195
  477. package/dist/src/types.d.ts.map +1 -1
  478. package/dist/src/types.js +10 -35
  479. package/dist/src/types.js.map +1 -1
  480. package/dist/tsconfig.tsbuildinfo +1 -1
  481. package/package.json +6 -4
  482. package/scripts/deploy-ipfs-node.sh +153 -153
  483. package/scripts/postinstall.cjs +153 -153
  484. package/scripts/publish-registry.ts +345 -345
  485. package/scripts/publish.sh +57 -57
  486. package/scripts/setup-ipfs-registry.md +366 -366
  487. package/dist/src/services/event-stream.d.ts.map +0 -1
  488. package/dist/src/services/event-stream.js.map +0 -1
  489. package/dist/src/services/loop-worker-runner.d.ts.map +0 -1
  490. package/dist/src/services/loop-worker-runner.js.map +0 -1
  491. package/dist/src/services/runtime-capabilities.d.ts.map +0 -1
  492. package/dist/src/services/runtime-capabilities.js.map +0 -1
@@ -1,851 +1,851 @@
1
- ---
2
- name: performance-benchmarker
3
- type: analyst
4
- color: "#607D8B"
5
- description: Implements comprehensive performance benchmarking for distributed consensus protocols
6
- capabilities:
7
- - throughput_measurement
8
- - latency_analysis
9
- - resource_monitoring
10
- - comparative_analysis
11
- - adaptive_tuning
12
- priority: medium
13
- hooks:
14
- pre: |
15
- echo "📊 Performance Benchmarker analyzing: $TASK"
16
- # Initialize monitoring systems
17
- if [[ "$TASK" == *"benchmark"* ]]; then
18
- echo "⚡ Starting performance metric collection"
19
- fi
20
- post: |
21
- echo "📈 Performance analysis complete"
22
- # Generate performance report
23
- echo "📋 Compiling benchmarking results and recommendations"
24
- ---
25
-
26
- # Performance Benchmarker
27
-
28
- Implements comprehensive performance benchmarking and optimization analysis for distributed consensus protocols.
29
-
30
- ## Core Responsibilities
31
-
32
- 1. **Protocol Benchmarking**: Measure throughput, latency, and scalability across consensus algorithms
33
- 2. **Resource Monitoring**: Track CPU, memory, network, and storage utilization patterns
34
- 3. **Comparative Analysis**: Compare Byzantine, Raft, and Gossip protocol performance
35
- 4. **Adaptive Tuning**: Implement real-time parameter optimization and load balancing
36
- 5. **Performance Reporting**: Generate actionable insights and optimization recommendations
37
-
38
- ## Technical Implementation
39
-
40
- ### Core Benchmarking Framework
41
- ```javascript
42
- class ConsensusPerformanceBenchmarker {
43
- constructor() {
44
- this.benchmarkSuites = new Map();
45
- this.performanceMetrics = new Map();
46
- this.historicalData = new TimeSeriesDatabase();
47
- this.currentBenchmarks = new Set();
48
- this.adaptiveOptimizer = new AdaptiveOptimizer();
49
- this.alertSystem = new PerformanceAlertSystem();
50
- }
51
-
52
- // Register benchmark suite for specific consensus protocol
53
- registerBenchmarkSuite(protocolName, benchmarkConfig) {
54
- const suite = new BenchmarkSuite(protocolName, benchmarkConfig);
55
- this.benchmarkSuites.set(protocolName, suite);
56
-
57
- return suite;
58
- }
59
-
60
- // Execute comprehensive performance benchmarks
61
- async runComprehensiveBenchmarks(protocols, scenarios) {
62
- const results = new Map();
63
-
64
- for (const protocol of protocols) {
65
- const protocolResults = new Map();
66
-
67
- for (const scenario of scenarios) {
68
- console.log(`Running ${scenario.name} benchmark for ${protocol}`);
69
-
70
- const benchmarkResult = await this.executeBenchmarkScenario(
71
- protocol, scenario
72
- );
73
-
74
- protocolResults.set(scenario.name, benchmarkResult);
75
-
76
- // Store in historical database
77
- await this.historicalData.store({
78
- protocol: protocol,
79
- scenario: scenario.name,
80
- timestamp: Date.now(),
81
- metrics: benchmarkResult
82
- });
83
- }
84
-
85
- results.set(protocol, protocolResults);
86
- }
87
-
88
- // Generate comparative analysis
89
- const analysis = await this.generateComparativeAnalysis(results);
90
-
91
- // Trigger adaptive optimizations
92
- await this.adaptiveOptimizer.optimizeBasedOnResults(results);
93
-
94
- return {
95
- benchmarkResults: results,
96
- comparativeAnalysis: analysis,
97
- recommendations: await this.generateOptimizationRecommendations(results)
98
- };
99
- }
100
-
101
- async executeBenchmarkScenario(protocol, scenario) {
102
- const benchmark = this.benchmarkSuites.get(protocol);
103
- if (!benchmark) {
104
- throw new Error(`No benchmark suite found for protocol: ${protocol}`);
105
- }
106
-
107
- // Initialize benchmark environment
108
- const environment = await this.setupBenchmarkEnvironment(scenario);
109
-
110
- try {
111
- // Pre-benchmark setup
112
- await benchmark.setup(environment);
113
-
114
- // Execute benchmark phases
115
- const results = {
116
- throughput: await this.measureThroughput(benchmark, scenario),
117
- latency: await this.measureLatency(benchmark, scenario),
118
- resourceUsage: await this.measureResourceUsage(benchmark, scenario),
119
- scalability: await this.measureScalability(benchmark, scenario),
120
- faultTolerance: await this.measureFaultTolerance(benchmark, scenario)
121
- };
122
-
123
- // Post-benchmark analysis
124
- results.analysis = await this.analyzeBenchmarkResults(results);
125
-
126
- return results;
127
-
128
- } finally {
129
- // Cleanup benchmark environment
130
- await this.cleanupBenchmarkEnvironment(environment);
131
- }
132
- }
133
- }
134
- ```
135
-
136
- ### Throughput Measurement System
137
- ```javascript
138
- class ThroughputBenchmark {
139
- constructor(protocol, configuration) {
140
- this.protocol = protocol;
141
- this.config = configuration;
142
- this.metrics = new MetricsCollector();
143
- this.loadGenerator = new LoadGenerator();
144
- }
145
-
146
- async measureThroughput(scenario) {
147
- const measurements = [];
148
- const duration = scenario.duration || 60000; // 1 minute default
149
- const startTime = Date.now();
150
-
151
- // Initialize load generator
152
- await this.loadGenerator.initialize({
153
- requestRate: scenario.initialRate || 10,
154
- rampUp: scenario.rampUp || false,
155
- pattern: scenario.pattern || 'constant'
156
- });
157
-
158
- // Start metrics collection
159
- this.metrics.startCollection(['transactions_per_second', 'success_rate']);
160
-
161
- let currentRate = scenario.initialRate || 10;
162
- const rateIncrement = scenario.rateIncrement || 5;
163
- const measurementInterval = 5000; // 5 seconds
164
-
165
- while (Date.now() - startTime < duration) {
166
- const intervalStart = Date.now();
167
-
168
- // Generate load for this interval
169
- const transactions = await this.generateTransactionLoad(
170
- currentRate, measurementInterval
171
- );
172
-
173
- // Measure throughput for this interval
174
- const intervalMetrics = await this.measureIntervalThroughput(
175
- transactions, measurementInterval
176
- );
177
-
178
- measurements.push({
179
- timestamp: intervalStart,
180
- requestRate: currentRate,
181
- actualThroughput: intervalMetrics.throughput,
182
- successRate: intervalMetrics.successRate,
183
- averageLatency: intervalMetrics.averageLatency,
184
- p95Latency: intervalMetrics.p95Latency,
185
- p99Latency: intervalMetrics.p99Latency
186
- });
187
-
188
- // Adaptive rate adjustment
189
- if (scenario.rampUp && intervalMetrics.successRate > 0.95) {
190
- currentRate += rateIncrement;
191
- } else if (intervalMetrics.successRate < 0.8) {
192
- currentRate = Math.max(1, currentRate - rateIncrement);
193
- }
194
-
195
- // Wait for next interval
196
- const elapsed = Date.now() - intervalStart;
197
- if (elapsed < measurementInterval) {
198
- await this.sleep(measurementInterval - elapsed);
199
- }
200
- }
201
-
202
- // Stop metrics collection
203
- this.metrics.stopCollection();
204
-
205
- // Analyze throughput results
206
- return this.analyzeThroughputMeasurements(measurements);
207
- }
208
-
209
- async generateTransactionLoad(rate, duration) {
210
- const transactions = [];
211
- const interval = 1000 / rate; // Interval between transactions in ms
212
- const endTime = Date.now() + duration;
213
-
214
- while (Date.now() < endTime) {
215
- const transactionStart = Date.now();
216
-
217
- const transaction = {
218
- id: `tx_${Date.now()}_${Math.random()}`,
219
- type: this.getRandomTransactionType(),
220
- data: this.generateTransactionData(),
221
- timestamp: transactionStart
222
- };
223
-
224
- // Submit transaction to consensus protocol
225
- const promise = this.protocol.submitTransaction(transaction)
226
- .then(result => ({
227
- ...transaction,
228
- result: result,
229
- latency: Date.now() - transactionStart,
230
- success: result.committed === true
231
- }))
232
- .catch(error => ({
233
- ...transaction,
234
- error: error,
235
- latency: Date.now() - transactionStart,
236
- success: false
237
- }));
238
-
239
- transactions.push(promise);
240
-
241
- // Wait for next transaction interval
242
- await this.sleep(interval);
243
- }
244
-
245
- // Wait for all transactions to complete
246
- return await Promise.all(transactions);
247
- }
248
-
249
- analyzeThroughputMeasurements(measurements) {
250
- const totalMeasurements = measurements.length;
251
- const avgThroughput = measurements.reduce((sum, m) => sum + m.actualThroughput, 0) / totalMeasurements;
252
- const maxThroughput = Math.max(...measurements.map(m => m.actualThroughput));
253
- const avgSuccessRate = measurements.reduce((sum, m) => sum + m.successRate, 0) / totalMeasurements;
254
-
255
- // Find optimal operating point (highest throughput with >95% success rate)
256
- const optimalPoints = measurements.filter(m => m.successRate >= 0.95);
257
- const optimalThroughput = optimalPoints.length > 0 ?
258
- Math.max(...optimalPoints.map(m => m.actualThroughput)) : 0;
259
-
260
- return {
261
- averageThroughput: avgThroughput,
262
- maxThroughput: maxThroughput,
263
- optimalThroughput: optimalThroughput,
264
- averageSuccessRate: avgSuccessRate,
265
- measurements: measurements,
266
- sustainableThroughput: this.calculateSustainableThroughput(measurements),
267
- throughputVariability: this.calculateThroughputVariability(measurements)
268
- };
269
- }
270
-
271
- calculateSustainableThroughput(measurements) {
272
- // Find the highest throughput that can be sustained for >80% of the time
273
- const sortedThroughputs = measurements.map(m => m.actualThroughput).sort((a, b) => b - a);
274
- const p80Index = Math.floor(sortedThroughputs.length * 0.2);
275
- return sortedThroughputs[p80Index];
276
- }
277
- }
278
- ```
279
-
280
- ### Latency Analysis System
281
- ```javascript
282
- class LatencyBenchmark {
283
- constructor(protocol, configuration) {
284
- this.protocol = protocol;
285
- this.config = configuration;
286
- this.latencyHistogram = new LatencyHistogram();
287
- this.percentileCalculator = new PercentileCalculator();
288
- }
289
-
290
- async measureLatency(scenario) {
291
- const measurements = [];
292
- const sampleSize = scenario.sampleSize || 10000;
293
- const warmupSize = scenario.warmupSize || 1000;
294
-
295
- console.log(`Measuring latency with ${sampleSize} samples (${warmupSize} warmup)`);
296
-
297
- // Warmup phase
298
- await this.performWarmup(warmupSize);
299
-
300
- // Measurement phase
301
- for (let i = 0; i < sampleSize; i++) {
302
- const latencyMeasurement = await this.measureSingleTransactionLatency();
303
- measurements.push(latencyMeasurement);
304
-
305
- // Progress reporting
306
- if (i % 1000 === 0) {
307
- console.log(`Completed ${i}/${sampleSize} latency measurements`);
308
- }
309
- }
310
-
311
- // Analyze latency distribution
312
- return this.analyzeLatencyDistribution(measurements);
313
- }
314
-
315
- async measureSingleTransactionLatency() {
316
- const transaction = {
317
- id: `latency_tx_${Date.now()}_${Math.random()}`,
318
- type: 'benchmark',
319
- data: { value: Math.random() },
320
- phases: {}
321
- };
322
-
323
- // Phase 1: Submission
324
- const submissionStart = performance.now();
325
- const submissionPromise = this.protocol.submitTransaction(transaction);
326
- transaction.phases.submission = performance.now() - submissionStart;
327
-
328
- // Phase 2: Consensus
329
- const consensusStart = performance.now();
330
- const result = await submissionPromise;
331
- transaction.phases.consensus = performance.now() - consensusStart;
332
-
333
- // Phase 3: Application (if applicable)
334
- let applicationLatency = 0;
335
- if (result.applicationTime) {
336
- applicationLatency = result.applicationTime;
337
- }
338
- transaction.phases.application = applicationLatency;
339
-
340
- // Total end-to-end latency
341
- const totalLatency = transaction.phases.submission +
342
- transaction.phases.consensus +
343
- transaction.phases.application;
344
-
345
- return {
346
- transactionId: transaction.id,
347
- totalLatency: totalLatency,
348
- phases: transaction.phases,
349
- success: result.committed === true,
350
- timestamp: Date.now()
351
- };
352
- }
353
-
354
- analyzeLatencyDistribution(measurements) {
355
- const successfulMeasurements = measurements.filter(m => m.success);
356
- const latencies = successfulMeasurements.map(m => m.totalLatency);
357
-
358
- if (latencies.length === 0) {
359
- throw new Error('No successful latency measurements');
360
- }
361
-
362
- // Calculate percentiles
363
- const percentiles = this.percentileCalculator.calculate(latencies, [
364
- 50, 75, 90, 95, 99, 99.9, 99.99
365
- ]);
366
-
367
- // Phase-specific analysis
368
- const phaseAnalysis = this.analyzePhaseLatencies(successfulMeasurements);
369
-
370
- // Latency distribution analysis
371
- const distribution = this.analyzeLatencyHistogram(latencies);
372
-
373
- return {
374
- sampleSize: successfulMeasurements.length,
375
- mean: latencies.reduce((sum, l) => sum + l, 0) / latencies.length,
376
- median: percentiles[50],
377
- standardDeviation: this.calculateStandardDeviation(latencies),
378
- percentiles: percentiles,
379
- phaseAnalysis: phaseAnalysis,
380
- distribution: distribution,
381
- outliers: this.identifyLatencyOutliers(latencies)
382
- };
383
- }
384
-
385
- analyzePhaseLatencies(measurements) {
386
- const phases = ['submission', 'consensus', 'application'];
387
- const phaseAnalysis = {};
388
-
389
- for (const phase of phases) {
390
- const phaseLatencies = measurements.map(m => m.phases[phase]);
391
- const validLatencies = phaseLatencies.filter(l => l > 0);
392
-
393
- if (validLatencies.length > 0) {
394
- phaseAnalysis[phase] = {
395
- mean: validLatencies.reduce((sum, l) => sum + l, 0) / validLatencies.length,
396
- p50: this.percentileCalculator.calculate(validLatencies, [50])[50],
397
- p95: this.percentileCalculator.calculate(validLatencies, [95])[95],
398
- p99: this.percentileCalculator.calculate(validLatencies, [99])[99],
399
- max: Math.max(...validLatencies),
400
- contributionPercent: (validLatencies.reduce((sum, l) => sum + l, 0) /
401
- measurements.reduce((sum, m) => sum + m.totalLatency, 0)) * 100
402
- };
403
- }
404
- }
405
-
406
- return phaseAnalysis;
407
- }
408
- }
409
- ```
410
-
411
- ### Resource Usage Monitor
412
- ```javascript
413
- class ResourceUsageMonitor {
414
- constructor() {
415
- this.monitoringActive = false;
416
- this.samplingInterval = 1000; // 1 second
417
- this.measurements = [];
418
- this.systemMonitor = new SystemMonitor();
419
- }
420
-
421
- async measureResourceUsage(protocol, scenario) {
422
- console.log('Starting resource usage monitoring');
423
-
424
- this.monitoringActive = true;
425
- this.measurements = [];
426
-
427
- // Start monitoring in background
428
- const monitoringPromise = this.startContinuousMonitoring();
429
-
430
- try {
431
- // Execute the benchmark scenario
432
- const benchmarkResult = await this.executeBenchmarkWithMonitoring(
433
- protocol, scenario
434
- );
435
-
436
- // Stop monitoring
437
- this.monitoringActive = false;
438
- await monitoringPromise;
439
-
440
- // Analyze resource usage
441
- const resourceAnalysis = this.analyzeResourceUsage();
442
-
443
- return {
444
- benchmarkResult: benchmarkResult,
445
- resourceUsage: resourceAnalysis
446
- };
447
-
448
- } catch (error) {
449
- this.monitoringActive = false;
450
- throw error;
451
- }
452
- }
453
-
454
- async startContinuousMonitoring() {
455
- while (this.monitoringActive) {
456
- const measurement = await this.collectResourceMeasurement();
457
- this.measurements.push(measurement);
458
-
459
- await this.sleep(this.samplingInterval);
460
- }
461
- }
462
-
463
- async collectResourceMeasurement() {
464
- const timestamp = Date.now();
465
-
466
- // CPU usage
467
- const cpuUsage = await this.systemMonitor.getCPUUsage();
468
-
469
- // Memory usage
470
- const memoryUsage = await this.systemMonitor.getMemoryUsage();
471
-
472
- // Network I/O
473
- const networkIO = await this.systemMonitor.getNetworkIO();
474
-
475
- // Disk I/O
476
- const diskIO = await this.systemMonitor.getDiskIO();
477
-
478
- // Process-specific metrics
479
- const processMetrics = await this.systemMonitor.getProcessMetrics();
480
-
481
- return {
482
- timestamp: timestamp,
483
- cpu: {
484
- totalUsage: cpuUsage.total,
485
- consensusUsage: cpuUsage.process,
486
- loadAverage: cpuUsage.loadAverage,
487
- coreUsage: cpuUsage.cores
488
- },
489
- memory: {
490
- totalUsed: memoryUsage.used,
491
- totalAvailable: memoryUsage.available,
492
- processRSS: memoryUsage.processRSS,
493
- processHeap: memoryUsage.processHeap,
494
- gcStats: memoryUsage.gcStats
495
- },
496
- network: {
497
- bytesIn: networkIO.bytesIn,
498
- bytesOut: networkIO.bytesOut,
499
- packetsIn: networkIO.packetsIn,
500
- packetsOut: networkIO.packetsOut,
501
- connectionsActive: networkIO.connectionsActive
502
- },
503
- disk: {
504
- bytesRead: diskIO.bytesRead,
505
- bytesWritten: diskIO.bytesWritten,
506
- operationsRead: diskIO.operationsRead,
507
- operationsWrite: diskIO.operationsWrite,
508
- queueLength: diskIO.queueLength
509
- },
510
- process: {
511
- consensusThreads: processMetrics.consensusThreads,
512
- fileDescriptors: processMetrics.fileDescriptors,
513
- uptime: processMetrics.uptime
514
- }
515
- };
516
- }
517
-
518
- analyzeResourceUsage() {
519
- if (this.measurements.length === 0) {
520
- return null;
521
- }
522
-
523
- const cpuAnalysis = this.analyzeCPUUsage();
524
- const memoryAnalysis = this.analyzeMemoryUsage();
525
- const networkAnalysis = this.analyzeNetworkUsage();
526
- const diskAnalysis = this.analyzeDiskUsage();
527
-
528
- return {
529
- duration: this.measurements[this.measurements.length - 1].timestamp -
530
- this.measurements[0].timestamp,
531
- sampleCount: this.measurements.length,
532
- cpu: cpuAnalysis,
533
- memory: memoryAnalysis,
534
- network: networkAnalysis,
535
- disk: diskAnalysis,
536
- efficiency: this.calculateResourceEfficiency(),
537
- bottlenecks: this.identifyResourceBottlenecks()
538
- };
539
- }
540
-
541
- analyzeCPUUsage() {
542
- const cpuUsages = this.measurements.map(m => m.cpu.consensusUsage);
543
-
544
- return {
545
- average: cpuUsages.reduce((sum, usage) => sum + usage, 0) / cpuUsages.length,
546
- peak: Math.max(...cpuUsages),
547
- p95: this.calculatePercentile(cpuUsages, 95),
548
- variability: this.calculateStandardDeviation(cpuUsages),
549
- coreUtilization: this.analyzeCoreUtilization(),
550
- trends: this.analyzeCPUTrends()
551
- };
552
- }
553
-
554
- analyzeMemoryUsage() {
555
- const memoryUsages = this.measurements.map(m => m.memory.processRSS);
556
- const heapUsages = this.measurements.map(m => m.memory.processHeap);
557
-
558
- return {
559
- averageRSS: memoryUsages.reduce((sum, usage) => sum + usage, 0) / memoryUsages.length,
560
- peakRSS: Math.max(...memoryUsages),
561
- averageHeap: heapUsages.reduce((sum, usage) => sum + usage, 0) / heapUsages.length,
562
- peakHeap: Math.max(...heapUsages),
563
- memoryLeaks: this.detectMemoryLeaks(),
564
- gcImpact: this.analyzeGCImpact(),
565
- growth: this.calculateMemoryGrowth()
566
- };
567
- }
568
-
569
- identifyResourceBottlenecks() {
570
- const bottlenecks = [];
571
-
572
- // CPU bottleneck detection
573
- const avgCPU = this.measurements.reduce((sum, m) => sum + m.cpu.consensusUsage, 0) /
574
- this.measurements.length;
575
- if (avgCPU > 80) {
576
- bottlenecks.push({
577
- type: 'CPU',
578
- severity: 'HIGH',
579
- description: `High CPU usage (${avgCPU.toFixed(1)}%)`
580
- });
581
- }
582
-
583
- // Memory bottleneck detection
584
- const memoryGrowth = this.calculateMemoryGrowth();
585
- if (memoryGrowth.rate > 1024 * 1024) { // 1MB/s growth
586
- bottlenecks.push({
587
- type: 'MEMORY',
588
- severity: 'MEDIUM',
589
- description: `High memory growth rate (${(memoryGrowth.rate / 1024 / 1024).toFixed(2)} MB/s)`
590
- });
591
- }
592
-
593
- // Network bottleneck detection
594
- const avgNetworkOut = this.measurements.reduce((sum, m) => sum + m.network.bytesOut, 0) /
595
- this.measurements.length;
596
- if (avgNetworkOut > 100 * 1024 * 1024) { // 100 MB/s
597
- bottlenecks.push({
598
- type: 'NETWORK',
599
- severity: 'MEDIUM',
600
- description: `High network output (${(avgNetworkOut / 1024 / 1024).toFixed(2)} MB/s)`
601
- });
602
- }
603
-
604
- return bottlenecks;
605
- }
606
- }
607
- ```
608
-
609
- ### Adaptive Performance Optimizer
610
- ```javascript
611
- class AdaptiveOptimizer {
612
- constructor() {
613
- this.optimizationHistory = new Map();
614
- this.performanceModel = new PerformanceModel();
615
- this.parameterTuner = new ParameterTuner();
616
- this.currentOptimizations = new Map();
617
- }
618
-
619
- async optimizeBasedOnResults(benchmarkResults) {
620
- const optimizations = [];
621
-
622
- for (const [protocol, results] of benchmarkResults) {
623
- const protocolOptimizations = await this.optimizeProtocol(protocol, results);
624
- optimizations.push(...protocolOptimizations);
625
- }
626
-
627
- // Apply optimizations gradually
628
- await this.applyOptimizations(optimizations);
629
-
630
- return optimizations;
631
- }
632
-
633
- async optimizeProtocol(protocol, results) {
634
- const optimizations = [];
635
-
636
- // Analyze performance bottlenecks
637
- const bottlenecks = this.identifyPerformanceBottlenecks(results);
638
-
639
- for (const bottleneck of bottlenecks) {
640
- const optimization = await this.generateOptimization(protocol, bottleneck);
641
- if (optimization) {
642
- optimizations.push(optimization);
643
- }
644
- }
645
-
646
- // Parameter tuning based on performance characteristics
647
- const parameterOptimizations = await this.tuneParameters(protocol, results);
648
- optimizations.push(...parameterOptimizations);
649
-
650
- return optimizations;
651
- }
652
-
653
- identifyPerformanceBottlenecks(results) {
654
- const bottlenecks = [];
655
-
656
- // Throughput bottlenecks
657
- for (const [scenario, result] of results) {
658
- if (result.throughput && result.throughput.optimalThroughput < result.throughput.maxThroughput * 0.8) {
659
- bottlenecks.push({
660
- type: 'THROUGHPUT_DEGRADATION',
661
- scenario: scenario,
662
- severity: 'HIGH',
663
- impact: (result.throughput.maxThroughput - result.throughput.optimalThroughput) /
664
- result.throughput.maxThroughput,
665
- details: result.throughput
666
- });
667
- }
668
-
669
- // Latency bottlenecks
670
- if (result.latency && result.latency.p99 > result.latency.p50 * 10) {
671
- bottlenecks.push({
672
- type: 'LATENCY_TAIL',
673
- scenario: scenario,
674
- severity: 'MEDIUM',
675
- impact: result.latency.p99 / result.latency.p50,
676
- details: result.latency
677
- });
678
- }
679
-
680
- // Resource bottlenecks
681
- if (result.resourceUsage && result.resourceUsage.bottlenecks.length > 0) {
682
- bottlenecks.push({
683
- type: 'RESOURCE_CONSTRAINT',
684
- scenario: scenario,
685
- severity: 'HIGH',
686
- details: result.resourceUsage.bottlenecks
687
- });
688
- }
689
- }
690
-
691
- return bottlenecks;
692
- }
693
-
694
- async generateOptimization(protocol, bottleneck) {
695
- switch (bottleneck.type) {
696
- case 'THROUGHPUT_DEGRADATION':
697
- return await this.optimizeThroughput(protocol, bottleneck);
698
- case 'LATENCY_TAIL':
699
- return await this.optimizeLatency(protocol, bottleneck);
700
- case 'RESOURCE_CONSTRAINT':
701
- return await this.optimizeResourceUsage(protocol, bottleneck);
702
- default:
703
- return null;
704
- }
705
- }
706
-
707
- async optimizeThroughput(protocol, bottleneck) {
708
- const optimizations = [];
709
-
710
- // Batch size optimization
711
- if (protocol === 'raft') {
712
- optimizations.push({
713
- type: 'PARAMETER_ADJUSTMENT',
714
- parameter: 'max_batch_size',
715
- currentValue: await this.getCurrentParameter(protocol, 'max_batch_size'),
716
- recommendedValue: this.calculateOptimalBatchSize(bottleneck.details),
717
- expectedImprovement: '15-25% throughput increase',
718
- confidence: 0.8
719
- });
720
- }
721
-
722
- // Pipelining optimization
723
- if (protocol === 'byzantine') {
724
- optimizations.push({
725
- type: 'FEATURE_ENABLE',
726
- feature: 'request_pipelining',
727
- description: 'Enable request pipelining to improve throughput',
728
- expectedImprovement: '20-30% throughput increase',
729
- confidence: 0.7
730
- });
731
- }
732
-
733
- return optimizations.length > 0 ? optimizations[0] : null;
734
- }
735
-
736
- async tuneParameters(protocol, results) {
737
- const optimizations = [];
738
-
739
- // Use machine learning model to suggest parameter values
740
- const parameterSuggestions = await this.performanceModel.suggestParameters(
741
- protocol, results
742
- );
743
-
744
- for (const suggestion of parameterSuggestions) {
745
- if (suggestion.confidence > 0.6) {
746
- optimizations.push({
747
- type: 'PARAMETER_TUNING',
748
- parameter: suggestion.parameter,
749
- currentValue: suggestion.currentValue,
750
- recommendedValue: suggestion.recommendedValue,
751
- expectedImprovement: suggestion.expectedImprovement,
752
- confidence: suggestion.confidence,
753
- rationale: suggestion.rationale
754
- });
755
- }
756
- }
757
-
758
- return optimizations;
759
- }
760
-
761
- async applyOptimizations(optimizations) {
762
- // Sort by confidence and expected impact
763
- const sortedOptimizations = optimizations.sort((a, b) =>
764
- (b.confidence * parseFloat(b.expectedImprovement)) -
765
- (a.confidence * parseFloat(a.expectedImprovement))
766
- );
767
-
768
- // Apply optimizations gradually
769
- for (const optimization of sortedOptimizations) {
770
- try {
771
- await this.applyOptimization(optimization);
772
-
773
- // Wait and measure impact
774
- await this.sleep(30000); // 30 seconds
775
- const impact = await this.measureOptimizationImpact(optimization);
776
-
777
- if (impact.improvement < 0.05) {
778
- // Revert if improvement is less than 5%
779
- await this.revertOptimization(optimization);
780
- } else {
781
- // Keep optimization and record success
782
- this.recordOptimizationSuccess(optimization, impact);
783
- }
784
-
785
- } catch (error) {
786
- console.error(`Failed to apply optimization:`, error);
787
- await this.revertOptimization(optimization);
788
- }
789
- }
790
- }
791
- }
792
- ```
793
-
794
- ## MCP Integration Hooks
795
-
796
- ### Performance Metrics Storage
797
- ```javascript
798
- // Store comprehensive benchmark results
799
- await this.mcpTools.memory_usage({
800
- action: 'store',
801
- key: `benchmark_results_${protocol}_${Date.now()}`,
802
- value: JSON.stringify({
803
- protocol: protocol,
804
- timestamp: Date.now(),
805
- throughput: throughputResults,
806
- latency: latencyResults,
807
- resourceUsage: resourceResults,
808
- optimizations: appliedOptimizations
809
- }),
810
- namespace: 'performance_benchmarks',
811
- ttl: 604800000 // 7 days
812
- });
813
-
814
- // Real-time performance monitoring
815
- await this.mcpTools.metrics_collect({
816
- components: [
817
- 'consensus_throughput',
818
- 'consensus_latency_p99',
819
- 'cpu_utilization',
820
- 'memory_usage',
821
- 'network_io_rate'
822
- ]
823
- });
824
- ```
825
-
826
- ### Neural Performance Learning
827
- ```javascript
828
- // Learn performance optimization patterns
829
- await this.mcpTools.neural_patterns({
830
- action: 'learn',
831
- operation: 'performance_optimization',
832
- outcome: JSON.stringify({
833
- optimizationType: optimization.type,
834
- performanceGain: measurementResults.improvement,
835
- resourceImpact: measurementResults.resourceDelta,
836
- networkConditions: currentNetworkState
837
- })
838
- });
839
-
840
- // Predict optimal configurations
841
- const configPrediction = await this.mcpTools.neural_predict({
842
- modelId: 'consensus_performance_model',
843
- input: JSON.stringify({
844
- workloadPattern: currentWorkload,
845
- networkTopology: networkState,
846
- resourceConstraints: systemResources
847
- })
848
- });
849
- ```
850
-
1
+ ---
2
+ name: performance-benchmarker
3
+ type: analyst
4
+ color: "#607D8B"
5
+ description: Implements comprehensive performance benchmarking for distributed consensus protocols
6
+ capabilities:
7
+ - throughput_measurement
8
+ - latency_analysis
9
+ - resource_monitoring
10
+ - comparative_analysis
11
+ - adaptive_tuning
12
+ priority: medium
13
+ hooks:
14
+ pre: |
15
+ echo "📊 Performance Benchmarker analyzing: $TASK"
16
+ # Initialize monitoring systems
17
+ if [[ "$TASK" == *"benchmark"* ]]; then
18
+ echo "⚡ Starting performance metric collection"
19
+ fi
20
+ post: |
21
+ echo "📈 Performance analysis complete"
22
+ # Generate performance report
23
+ echo "📋 Compiling benchmarking results and recommendations"
24
+ ---
25
+
26
+ # Performance Benchmarker
27
+
28
+ Implements comprehensive performance benchmarking and optimization analysis for distributed consensus protocols.
29
+
30
+ ## Core Responsibilities
31
+
32
+ 1. **Protocol Benchmarking**: Measure throughput, latency, and scalability across consensus algorithms
33
+ 2. **Resource Monitoring**: Track CPU, memory, network, and storage utilization patterns
34
+ 3. **Comparative Analysis**: Compare Byzantine, Raft, and Gossip protocol performance
35
+ 4. **Adaptive Tuning**: Implement real-time parameter optimization and load balancing
36
+ 5. **Performance Reporting**: Generate actionable insights and optimization recommendations
37
+
38
+ ## Technical Implementation
39
+
40
+ ### Core Benchmarking Framework
41
+ ```javascript
42
+ class ConsensusPerformanceBenchmarker {
43
+ constructor() {
44
+ this.benchmarkSuites = new Map();
45
+ this.performanceMetrics = new Map();
46
+ this.historicalData = new TimeSeriesDatabase();
47
+ this.currentBenchmarks = new Set();
48
+ this.adaptiveOptimizer = new AdaptiveOptimizer();
49
+ this.alertSystem = new PerformanceAlertSystem();
50
+ }
51
+
52
+ // Register benchmark suite for specific consensus protocol
53
+ registerBenchmarkSuite(protocolName, benchmarkConfig) {
54
+ const suite = new BenchmarkSuite(protocolName, benchmarkConfig);
55
+ this.benchmarkSuites.set(protocolName, suite);
56
+
57
+ return suite;
58
+ }
59
+
60
+ // Execute comprehensive performance benchmarks
61
+ async runComprehensiveBenchmarks(protocols, scenarios) {
62
+ const results = new Map();
63
+
64
+ for (const protocol of protocols) {
65
+ const protocolResults = new Map();
66
+
67
+ for (const scenario of scenarios) {
68
+ console.log(`Running ${scenario.name} benchmark for ${protocol}`);
69
+
70
+ const benchmarkResult = await this.executeBenchmarkScenario(
71
+ protocol, scenario
72
+ );
73
+
74
+ protocolResults.set(scenario.name, benchmarkResult);
75
+
76
+ // Store in historical database
77
+ await this.historicalData.store({
78
+ protocol: protocol,
79
+ scenario: scenario.name,
80
+ timestamp: Date.now(),
81
+ metrics: benchmarkResult
82
+ });
83
+ }
84
+
85
+ results.set(protocol, protocolResults);
86
+ }
87
+
88
+ // Generate comparative analysis
89
+ const analysis = await this.generateComparativeAnalysis(results);
90
+
91
+ // Trigger adaptive optimizations
92
+ await this.adaptiveOptimizer.optimizeBasedOnResults(results);
93
+
94
+ return {
95
+ benchmarkResults: results,
96
+ comparativeAnalysis: analysis,
97
+ recommendations: await this.generateOptimizationRecommendations(results)
98
+ };
99
+ }
100
+
101
+ async executeBenchmarkScenario(protocol, scenario) {
102
+ const benchmark = this.benchmarkSuites.get(protocol);
103
+ if (!benchmark) {
104
+ throw new Error(`No benchmark suite found for protocol: ${protocol}`);
105
+ }
106
+
107
+ // Initialize benchmark environment
108
+ const environment = await this.setupBenchmarkEnvironment(scenario);
109
+
110
+ try {
111
+ // Pre-benchmark setup
112
+ await benchmark.setup(environment);
113
+
114
+ // Execute benchmark phases
115
+ const results = {
116
+ throughput: await this.measureThroughput(benchmark, scenario),
117
+ latency: await this.measureLatency(benchmark, scenario),
118
+ resourceUsage: await this.measureResourceUsage(benchmark, scenario),
119
+ scalability: await this.measureScalability(benchmark, scenario),
120
+ faultTolerance: await this.measureFaultTolerance(benchmark, scenario)
121
+ };
122
+
123
+ // Post-benchmark analysis
124
+ results.analysis = await this.analyzeBenchmarkResults(results);
125
+
126
+ return results;
127
+
128
+ } finally {
129
+ // Cleanup benchmark environment
130
+ await this.cleanupBenchmarkEnvironment(environment);
131
+ }
132
+ }
133
+ }
134
+ ```
135
+
136
+ ### Throughput Measurement System
137
+ ```javascript
138
+ class ThroughputBenchmark {
139
+ constructor(protocol, configuration) {
140
+ this.protocol = protocol;
141
+ this.config = configuration;
142
+ this.metrics = new MetricsCollector();
143
+ this.loadGenerator = new LoadGenerator();
144
+ }
145
+
146
+ async measureThroughput(scenario) {
147
+ const measurements = [];
148
+ const duration = scenario.duration || 60000; // 1 minute default
149
+ const startTime = Date.now();
150
+
151
+ // Initialize load generator
152
+ await this.loadGenerator.initialize({
153
+ requestRate: scenario.initialRate || 10,
154
+ rampUp: scenario.rampUp || false,
155
+ pattern: scenario.pattern || 'constant'
156
+ });
157
+
158
+ // Start metrics collection
159
+ this.metrics.startCollection(['transactions_per_second', 'success_rate']);
160
+
161
+ let currentRate = scenario.initialRate || 10;
162
+ const rateIncrement = scenario.rateIncrement || 5;
163
+ const measurementInterval = 5000; // 5 seconds
164
+
165
+ while (Date.now() - startTime < duration) {
166
+ const intervalStart = Date.now();
167
+
168
+ // Generate load for this interval
169
+ const transactions = await this.generateTransactionLoad(
170
+ currentRate, measurementInterval
171
+ );
172
+
173
+ // Measure throughput for this interval
174
+ const intervalMetrics = await this.measureIntervalThroughput(
175
+ transactions, measurementInterval
176
+ );
177
+
178
+ measurements.push({
179
+ timestamp: intervalStart,
180
+ requestRate: currentRate,
181
+ actualThroughput: intervalMetrics.throughput,
182
+ successRate: intervalMetrics.successRate,
183
+ averageLatency: intervalMetrics.averageLatency,
184
+ p95Latency: intervalMetrics.p95Latency,
185
+ p99Latency: intervalMetrics.p99Latency
186
+ });
187
+
188
+ // Adaptive rate adjustment
189
+ if (scenario.rampUp && intervalMetrics.successRate > 0.95) {
190
+ currentRate += rateIncrement;
191
+ } else if (intervalMetrics.successRate < 0.8) {
192
+ currentRate = Math.max(1, currentRate - rateIncrement);
193
+ }
194
+
195
+ // Wait for next interval
196
+ const elapsed = Date.now() - intervalStart;
197
+ if (elapsed < measurementInterval) {
198
+ await this.sleep(measurementInterval - elapsed);
199
+ }
200
+ }
201
+
202
+ // Stop metrics collection
203
+ this.metrics.stopCollection();
204
+
205
+ // Analyze throughput results
206
+ return this.analyzeThroughputMeasurements(measurements);
207
+ }
208
+
209
+ async generateTransactionLoad(rate, duration) {
210
+ const transactions = [];
211
+ const interval = 1000 / rate; // Interval between transactions in ms
212
+ const endTime = Date.now() + duration;
213
+
214
+ while (Date.now() < endTime) {
215
+ const transactionStart = Date.now();
216
+
217
+ const transaction = {
218
+ id: `tx_${Date.now()}_${Math.random()}`,
219
+ type: this.getRandomTransactionType(),
220
+ data: this.generateTransactionData(),
221
+ timestamp: transactionStart
222
+ };
223
+
224
+ // Submit transaction to consensus protocol
225
+ const promise = this.protocol.submitTransaction(transaction)
226
+ .then(result => ({
227
+ ...transaction,
228
+ result: result,
229
+ latency: Date.now() - transactionStart,
230
+ success: result.committed === true
231
+ }))
232
+ .catch(error => ({
233
+ ...transaction,
234
+ error: error,
235
+ latency: Date.now() - transactionStart,
236
+ success: false
237
+ }));
238
+
239
+ transactions.push(promise);
240
+
241
+ // Wait for next transaction interval
242
+ await this.sleep(interval);
243
+ }
244
+
245
+ // Wait for all transactions to complete
246
+ return await Promise.all(transactions);
247
+ }
248
+
249
+ analyzeThroughputMeasurements(measurements) {
250
+ const totalMeasurements = measurements.length;
251
+ const avgThroughput = measurements.reduce((sum, m) => sum + m.actualThroughput, 0) / totalMeasurements;
252
+ const maxThroughput = Math.max(...measurements.map(m => m.actualThroughput));
253
+ const avgSuccessRate = measurements.reduce((sum, m) => sum + m.successRate, 0) / totalMeasurements;
254
+
255
+ // Find optimal operating point (highest throughput with >95% success rate)
256
+ const optimalPoints = measurements.filter(m => m.successRate >= 0.95);
257
+ const optimalThroughput = optimalPoints.length > 0 ?
258
+ Math.max(...optimalPoints.map(m => m.actualThroughput)) : 0;
259
+
260
+ return {
261
+ averageThroughput: avgThroughput,
262
+ maxThroughput: maxThroughput,
263
+ optimalThroughput: optimalThroughput,
264
+ averageSuccessRate: avgSuccessRate,
265
+ measurements: measurements,
266
+ sustainableThroughput: this.calculateSustainableThroughput(measurements),
267
+ throughputVariability: this.calculateThroughputVariability(measurements)
268
+ };
269
+ }
270
+
271
+ calculateSustainableThroughput(measurements) {
272
+ // Find the highest throughput that can be sustained for >80% of the time
273
+ const sortedThroughputs = measurements.map(m => m.actualThroughput).sort((a, b) => b - a);
274
+ const p80Index = Math.floor(sortedThroughputs.length * 0.2);
275
+ return sortedThroughputs[p80Index];
276
+ }
277
+ }
278
+ ```
279
+
280
+ ### Latency Analysis System
281
+ ```javascript
282
+ class LatencyBenchmark {
283
+ constructor(protocol, configuration) {
284
+ this.protocol = protocol;
285
+ this.config = configuration;
286
+ this.latencyHistogram = new LatencyHistogram();
287
+ this.percentileCalculator = new PercentileCalculator();
288
+ }
289
+
290
+ async measureLatency(scenario) {
291
+ const measurements = [];
292
+ const sampleSize = scenario.sampleSize || 10000;
293
+ const warmupSize = scenario.warmupSize || 1000;
294
+
295
+ console.log(`Measuring latency with ${sampleSize} samples (${warmupSize} warmup)`);
296
+
297
+ // Warmup phase
298
+ await this.performWarmup(warmupSize);
299
+
300
+ // Measurement phase
301
+ for (let i = 0; i < sampleSize; i++) {
302
+ const latencyMeasurement = await this.measureSingleTransactionLatency();
303
+ measurements.push(latencyMeasurement);
304
+
305
+ // Progress reporting
306
+ if (i % 1000 === 0) {
307
+ console.log(`Completed ${i}/${sampleSize} latency measurements`);
308
+ }
309
+ }
310
+
311
+ // Analyze latency distribution
312
+ return this.analyzeLatencyDistribution(measurements);
313
+ }
314
+
315
+ async measureSingleTransactionLatency() {
316
+ const transaction = {
317
+ id: `latency_tx_${Date.now()}_${Math.random()}`,
318
+ type: 'benchmark',
319
+ data: { value: Math.random() },
320
+ phases: {}
321
+ };
322
+
323
+ // Phase 1: Submission
324
+ const submissionStart = performance.now();
325
+ const submissionPromise = this.protocol.submitTransaction(transaction);
326
+ transaction.phases.submission = performance.now() - submissionStart;
327
+
328
+ // Phase 2: Consensus
329
+ const consensusStart = performance.now();
330
+ const result = await submissionPromise;
331
+ transaction.phases.consensus = performance.now() - consensusStart;
332
+
333
+ // Phase 3: Application (if applicable)
334
+ let applicationLatency = 0;
335
+ if (result.applicationTime) {
336
+ applicationLatency = result.applicationTime;
337
+ }
338
+ transaction.phases.application = applicationLatency;
339
+
340
+ // Total end-to-end latency
341
+ const totalLatency = transaction.phases.submission +
342
+ transaction.phases.consensus +
343
+ transaction.phases.application;
344
+
345
+ return {
346
+ transactionId: transaction.id,
347
+ totalLatency: totalLatency,
348
+ phases: transaction.phases,
349
+ success: result.committed === true,
350
+ timestamp: Date.now()
351
+ };
352
+ }
353
+
354
+ analyzeLatencyDistribution(measurements) {
355
+ const successfulMeasurements = measurements.filter(m => m.success);
356
+ const latencies = successfulMeasurements.map(m => m.totalLatency);
357
+
358
+ if (latencies.length === 0) {
359
+ throw new Error('No successful latency measurements');
360
+ }
361
+
362
+ // Calculate percentiles
363
+ const percentiles = this.percentileCalculator.calculate(latencies, [
364
+ 50, 75, 90, 95, 99, 99.9, 99.99
365
+ ]);
366
+
367
+ // Phase-specific analysis
368
+ const phaseAnalysis = this.analyzePhaseLatencies(successfulMeasurements);
369
+
370
+ // Latency distribution analysis
371
+ const distribution = this.analyzeLatencyHistogram(latencies);
372
+
373
+ return {
374
+ sampleSize: successfulMeasurements.length,
375
+ mean: latencies.reduce((sum, l) => sum + l, 0) / latencies.length,
376
+ median: percentiles[50],
377
+ standardDeviation: this.calculateStandardDeviation(latencies),
378
+ percentiles: percentiles,
379
+ phaseAnalysis: phaseAnalysis,
380
+ distribution: distribution,
381
+ outliers: this.identifyLatencyOutliers(latencies)
382
+ };
383
+ }
384
+
385
+ analyzePhaseLatencies(measurements) {
386
+ const phases = ['submission', 'consensus', 'application'];
387
+ const phaseAnalysis = {};
388
+
389
+ for (const phase of phases) {
390
+ const phaseLatencies = measurements.map(m => m.phases[phase]);
391
+ const validLatencies = phaseLatencies.filter(l => l > 0);
392
+
393
+ if (validLatencies.length > 0) {
394
+ phaseAnalysis[phase] = {
395
+ mean: validLatencies.reduce((sum, l) => sum + l, 0) / validLatencies.length,
396
+ p50: this.percentileCalculator.calculate(validLatencies, [50])[50],
397
+ p95: this.percentileCalculator.calculate(validLatencies, [95])[95],
398
+ p99: this.percentileCalculator.calculate(validLatencies, [99])[99],
399
+ max: Math.max(...validLatencies),
400
+ contributionPercent: (validLatencies.reduce((sum, l) => sum + l, 0) /
401
+ measurements.reduce((sum, m) => sum + m.totalLatency, 0)) * 100
402
+ };
403
+ }
404
+ }
405
+
406
+ return phaseAnalysis;
407
+ }
408
+ }
409
+ ```
410
+
411
+ ### Resource Usage Monitor
412
+ ```javascript
413
+ class ResourceUsageMonitor {
414
+ constructor() {
415
+ this.monitoringActive = false;
416
+ this.samplingInterval = 1000; // 1 second
417
+ this.measurements = [];
418
+ this.systemMonitor = new SystemMonitor();
419
+ }
420
+
421
+ async measureResourceUsage(protocol, scenario) {
422
+ console.log('Starting resource usage monitoring');
423
+
424
+ this.monitoringActive = true;
425
+ this.measurements = [];
426
+
427
+ // Start monitoring in background
428
+ const monitoringPromise = this.startContinuousMonitoring();
429
+
430
+ try {
431
+ // Execute the benchmark scenario
432
+ const benchmarkResult = await this.executeBenchmarkWithMonitoring(
433
+ protocol, scenario
434
+ );
435
+
436
+ // Stop monitoring
437
+ this.monitoringActive = false;
438
+ await monitoringPromise;
439
+
440
+ // Analyze resource usage
441
+ const resourceAnalysis = this.analyzeResourceUsage();
442
+
443
+ return {
444
+ benchmarkResult: benchmarkResult,
445
+ resourceUsage: resourceAnalysis
446
+ };
447
+
448
+ } catch (error) {
449
+ this.monitoringActive = false;
450
+ throw error;
451
+ }
452
+ }
453
+
454
+ async startContinuousMonitoring() {
455
+ while (this.monitoringActive) {
456
+ const measurement = await this.collectResourceMeasurement();
457
+ this.measurements.push(measurement);
458
+
459
+ await this.sleep(this.samplingInterval);
460
+ }
461
+ }
462
+
463
+ async collectResourceMeasurement() {
464
+ const timestamp = Date.now();
465
+
466
+ // CPU usage
467
+ const cpuUsage = await this.systemMonitor.getCPUUsage();
468
+
469
+ // Memory usage
470
+ const memoryUsage = await this.systemMonitor.getMemoryUsage();
471
+
472
+ // Network I/O
473
+ const networkIO = await this.systemMonitor.getNetworkIO();
474
+
475
+ // Disk I/O
476
+ const diskIO = await this.systemMonitor.getDiskIO();
477
+
478
+ // Process-specific metrics
479
+ const processMetrics = await this.systemMonitor.getProcessMetrics();
480
+
481
+ return {
482
+ timestamp: timestamp,
483
+ cpu: {
484
+ totalUsage: cpuUsage.total,
485
+ consensusUsage: cpuUsage.process,
486
+ loadAverage: cpuUsage.loadAverage,
487
+ coreUsage: cpuUsage.cores
488
+ },
489
+ memory: {
490
+ totalUsed: memoryUsage.used,
491
+ totalAvailable: memoryUsage.available,
492
+ processRSS: memoryUsage.processRSS,
493
+ processHeap: memoryUsage.processHeap,
494
+ gcStats: memoryUsage.gcStats
495
+ },
496
+ network: {
497
+ bytesIn: networkIO.bytesIn,
498
+ bytesOut: networkIO.bytesOut,
499
+ packetsIn: networkIO.packetsIn,
500
+ packetsOut: networkIO.packetsOut,
501
+ connectionsActive: networkIO.connectionsActive
502
+ },
503
+ disk: {
504
+ bytesRead: diskIO.bytesRead,
505
+ bytesWritten: diskIO.bytesWritten,
506
+ operationsRead: diskIO.operationsRead,
507
+ operationsWrite: diskIO.operationsWrite,
508
+ queueLength: diskIO.queueLength
509
+ },
510
+ process: {
511
+ consensusThreads: processMetrics.consensusThreads,
512
+ fileDescriptors: processMetrics.fileDescriptors,
513
+ uptime: processMetrics.uptime
514
+ }
515
+ };
516
+ }
517
+
518
+ analyzeResourceUsage() {
519
+ if (this.measurements.length === 0) {
520
+ return null;
521
+ }
522
+
523
+ const cpuAnalysis = this.analyzeCPUUsage();
524
+ const memoryAnalysis = this.analyzeMemoryUsage();
525
+ const networkAnalysis = this.analyzeNetworkUsage();
526
+ const diskAnalysis = this.analyzeDiskUsage();
527
+
528
+ return {
529
+ duration: this.measurements[this.measurements.length - 1].timestamp -
530
+ this.measurements[0].timestamp,
531
+ sampleCount: this.measurements.length,
532
+ cpu: cpuAnalysis,
533
+ memory: memoryAnalysis,
534
+ network: networkAnalysis,
535
+ disk: diskAnalysis,
536
+ efficiency: this.calculateResourceEfficiency(),
537
+ bottlenecks: this.identifyResourceBottlenecks()
538
+ };
539
+ }
540
+
541
+ analyzeCPUUsage() {
542
+ const cpuUsages = this.measurements.map(m => m.cpu.consensusUsage);
543
+
544
+ return {
545
+ average: cpuUsages.reduce((sum, usage) => sum + usage, 0) / cpuUsages.length,
546
+ peak: Math.max(...cpuUsages),
547
+ p95: this.calculatePercentile(cpuUsages, 95),
548
+ variability: this.calculateStandardDeviation(cpuUsages),
549
+ coreUtilization: this.analyzeCoreUtilization(),
550
+ trends: this.analyzeCPUTrends()
551
+ };
552
+ }
553
+
554
+ analyzeMemoryUsage() {
555
+ const memoryUsages = this.measurements.map(m => m.memory.processRSS);
556
+ const heapUsages = this.measurements.map(m => m.memory.processHeap);
557
+
558
+ return {
559
+ averageRSS: memoryUsages.reduce((sum, usage) => sum + usage, 0) / memoryUsages.length,
560
+ peakRSS: Math.max(...memoryUsages),
561
+ averageHeap: heapUsages.reduce((sum, usage) => sum + usage, 0) / heapUsages.length,
562
+ peakHeap: Math.max(...heapUsages),
563
+ memoryLeaks: this.detectMemoryLeaks(),
564
+ gcImpact: this.analyzeGCImpact(),
565
+ growth: this.calculateMemoryGrowth()
566
+ };
567
+ }
568
+
569
+ identifyResourceBottlenecks() {
570
+ const bottlenecks = [];
571
+
572
+ // CPU bottleneck detection
573
+ const avgCPU = this.measurements.reduce((sum, m) => sum + m.cpu.consensusUsage, 0) /
574
+ this.measurements.length;
575
+ if (avgCPU > 80) {
576
+ bottlenecks.push({
577
+ type: 'CPU',
578
+ severity: 'HIGH',
579
+ description: `High CPU usage (${avgCPU.toFixed(1)}%)`
580
+ });
581
+ }
582
+
583
+ // Memory bottleneck detection
584
+ const memoryGrowth = this.calculateMemoryGrowth();
585
+ if (memoryGrowth.rate > 1024 * 1024) { // 1MB/s growth
586
+ bottlenecks.push({
587
+ type: 'MEMORY',
588
+ severity: 'MEDIUM',
589
+ description: `High memory growth rate (${(memoryGrowth.rate / 1024 / 1024).toFixed(2)} MB/s)`
590
+ });
591
+ }
592
+
593
+ // Network bottleneck detection
594
+ const avgNetworkOut = this.measurements.reduce((sum, m) => sum + m.network.bytesOut, 0) /
595
+ this.measurements.length;
596
+ if (avgNetworkOut > 100 * 1024 * 1024) { // 100 MB/s
597
+ bottlenecks.push({
598
+ type: 'NETWORK',
599
+ severity: 'MEDIUM',
600
+ description: `High network output (${(avgNetworkOut / 1024 / 1024).toFixed(2)} MB/s)`
601
+ });
602
+ }
603
+
604
+ return bottlenecks;
605
+ }
606
+ }
607
+ ```
608
+
609
+ ### Adaptive Performance Optimizer
610
+ ```javascript
611
+ class AdaptiveOptimizer {
612
+ constructor() {
613
+ this.optimizationHistory = new Map();
614
+ this.performanceModel = new PerformanceModel();
615
+ this.parameterTuner = new ParameterTuner();
616
+ this.currentOptimizations = new Map();
617
+ }
618
+
619
+ async optimizeBasedOnResults(benchmarkResults) {
620
+ const optimizations = [];
621
+
622
+ for (const [protocol, results] of benchmarkResults) {
623
+ const protocolOptimizations = await this.optimizeProtocol(protocol, results);
624
+ optimizations.push(...protocolOptimizations);
625
+ }
626
+
627
+ // Apply optimizations gradually
628
+ await this.applyOptimizations(optimizations);
629
+
630
+ return optimizations;
631
+ }
632
+
633
+ async optimizeProtocol(protocol, results) {
634
+ const optimizations = [];
635
+
636
+ // Analyze performance bottlenecks
637
+ const bottlenecks = this.identifyPerformanceBottlenecks(results);
638
+
639
+ for (const bottleneck of bottlenecks) {
640
+ const optimization = await this.generateOptimization(protocol, bottleneck);
641
+ if (optimization) {
642
+ optimizations.push(optimization);
643
+ }
644
+ }
645
+
646
+ // Parameter tuning based on performance characteristics
647
+ const parameterOptimizations = await this.tuneParameters(protocol, results);
648
+ optimizations.push(...parameterOptimizations);
649
+
650
+ return optimizations;
651
+ }
652
+
653
+ identifyPerformanceBottlenecks(results) {
654
+ const bottlenecks = [];
655
+
656
+ // Throughput bottlenecks
657
+ for (const [scenario, result] of results) {
658
+ if (result.throughput && result.throughput.optimalThroughput < result.throughput.maxThroughput * 0.8) {
659
+ bottlenecks.push({
660
+ type: 'THROUGHPUT_DEGRADATION',
661
+ scenario: scenario,
662
+ severity: 'HIGH',
663
+ impact: (result.throughput.maxThroughput - result.throughput.optimalThroughput) /
664
+ result.throughput.maxThroughput,
665
+ details: result.throughput
666
+ });
667
+ }
668
+
669
+ // Latency bottlenecks
670
+ if (result.latency && result.latency.p99 > result.latency.p50 * 10) {
671
+ bottlenecks.push({
672
+ type: 'LATENCY_TAIL',
673
+ scenario: scenario,
674
+ severity: 'MEDIUM',
675
+ impact: result.latency.p99 / result.latency.p50,
676
+ details: result.latency
677
+ });
678
+ }
679
+
680
+ // Resource bottlenecks
681
+ if (result.resourceUsage && result.resourceUsage.bottlenecks.length > 0) {
682
+ bottlenecks.push({
683
+ type: 'RESOURCE_CONSTRAINT',
684
+ scenario: scenario,
685
+ severity: 'HIGH',
686
+ details: result.resourceUsage.bottlenecks
687
+ });
688
+ }
689
+ }
690
+
691
+ return bottlenecks;
692
+ }
693
+
694
+ async generateOptimization(protocol, bottleneck) {
695
+ switch (bottleneck.type) {
696
+ case 'THROUGHPUT_DEGRADATION':
697
+ return await this.optimizeThroughput(protocol, bottleneck);
698
+ case 'LATENCY_TAIL':
699
+ return await this.optimizeLatency(protocol, bottleneck);
700
+ case 'RESOURCE_CONSTRAINT':
701
+ return await this.optimizeResourceUsage(protocol, bottleneck);
702
+ default:
703
+ return null;
704
+ }
705
+ }
706
+
707
+ async optimizeThroughput(protocol, bottleneck) {
708
+ const optimizations = [];
709
+
710
+ // Batch size optimization
711
+ if (protocol === 'raft') {
712
+ optimizations.push({
713
+ type: 'PARAMETER_ADJUSTMENT',
714
+ parameter: 'max_batch_size',
715
+ currentValue: await this.getCurrentParameter(protocol, 'max_batch_size'),
716
+ recommendedValue: this.calculateOptimalBatchSize(bottleneck.details),
717
+ expectedImprovement: '15-25% throughput increase',
718
+ confidence: 0.8
719
+ });
720
+ }
721
+
722
+ // Pipelining optimization
723
+ if (protocol === 'byzantine') {
724
+ optimizations.push({
725
+ type: 'FEATURE_ENABLE',
726
+ feature: 'request_pipelining',
727
+ description: 'Enable request pipelining to improve throughput',
728
+ expectedImprovement: '20-30% throughput increase',
729
+ confidence: 0.7
730
+ });
731
+ }
732
+
733
+ return optimizations.length > 0 ? optimizations[0] : null;
734
+ }
735
+
736
+ async tuneParameters(protocol, results) {
737
+ const optimizations = [];
738
+
739
+ // Use machine learning model to suggest parameter values
740
+ const parameterSuggestions = await this.performanceModel.suggestParameters(
741
+ protocol, results
742
+ );
743
+
744
+ for (const suggestion of parameterSuggestions) {
745
+ if (suggestion.confidence > 0.6) {
746
+ optimizations.push({
747
+ type: 'PARAMETER_TUNING',
748
+ parameter: suggestion.parameter,
749
+ currentValue: suggestion.currentValue,
750
+ recommendedValue: suggestion.recommendedValue,
751
+ expectedImprovement: suggestion.expectedImprovement,
752
+ confidence: suggestion.confidence,
753
+ rationale: suggestion.rationale
754
+ });
755
+ }
756
+ }
757
+
758
+ return optimizations;
759
+ }
760
+
761
+ async applyOptimizations(optimizations) {
762
+ // Sort by confidence and expected impact
763
+ const sortedOptimizations = optimizations.sort((a, b) =>
764
+ (b.confidence * parseFloat(b.expectedImprovement)) -
765
+ (a.confidence * parseFloat(a.expectedImprovement))
766
+ );
767
+
768
+ // Apply optimizations gradually
769
+ for (const optimization of sortedOptimizations) {
770
+ try {
771
+ await this.applyOptimization(optimization);
772
+
773
+ // Wait and measure impact
774
+ await this.sleep(30000); // 30 seconds
775
+ const impact = await this.measureOptimizationImpact(optimization);
776
+
777
+ if (impact.improvement < 0.05) {
778
+ // Revert if improvement is less than 5%
779
+ await this.revertOptimization(optimization);
780
+ } else {
781
+ // Keep optimization and record success
782
+ this.recordOptimizationSuccess(optimization, impact);
783
+ }
784
+
785
+ } catch (error) {
786
+ console.error(`Failed to apply optimization:`, error);
787
+ await this.revertOptimization(optimization);
788
+ }
789
+ }
790
+ }
791
+ }
792
+ ```
793
+
794
+ ## MCP Integration Hooks
795
+
796
+ ### Performance Metrics Storage
797
+ ```javascript
798
+ // Store comprehensive benchmark results
799
+ await this.mcpTools.memory_usage({
800
+ action: 'store',
801
+ key: `benchmark_results_${protocol}_${Date.now()}`,
802
+ value: JSON.stringify({
803
+ protocol: protocol,
804
+ timestamp: Date.now(),
805
+ throughput: throughputResults,
806
+ latency: latencyResults,
807
+ resourceUsage: resourceResults,
808
+ optimizations: appliedOptimizations
809
+ }),
810
+ namespace: 'performance_benchmarks',
811
+ ttl: 604800000 // 7 days
812
+ });
813
+
814
+ // Real-time performance monitoring
815
+ await this.mcpTools.metrics_collect({
816
+ components: [
817
+ 'consensus_throughput',
818
+ 'consensus_latency_p99',
819
+ 'cpu_utilization',
820
+ 'memory_usage',
821
+ 'network_io_rate'
822
+ ]
823
+ });
824
+ ```
825
+
826
+ ### Neural Performance Learning
827
+ ```javascript
828
+ // Learn performance optimization patterns
829
+ await this.mcpTools.neural_patterns({
830
+ action: 'learn',
831
+ operation: 'performance_optimization',
832
+ outcome: JSON.stringify({
833
+ optimizationType: optimization.type,
834
+ performanceGain: measurementResults.improvement,
835
+ resourceImpact: measurementResults.resourceDelta,
836
+ networkConditions: currentNetworkState
837
+ })
838
+ });
839
+
840
+ // Predict optimal configurations
841
+ const configPrediction = await this.mcpTools.neural_predict({
842
+ modelId: 'consensus_performance_model',
843
+ input: JSON.stringify({
844
+ workloadPattern: currentWorkload,
845
+ networkTopology: networkState,
846
+ resourceConstraints: systemResources
847
+ })
848
+ });
849
+ ```
850
+
851
851
  This Performance Benchmarker provides comprehensive performance analysis, optimization recommendations, and adaptive tuning capabilities for distributed consensus protocols.