@claude-flow/cli 3.7.0-alpha.1 → 3.7.0-alpha.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (476) hide show
  1. package/.claude/agents/analysis/analyze-code-quality.md +178 -178
  2. package/.claude/agents/analysis/code-analyzer.md +209 -209
  3. package/.claude/agents/analysis/code-review/analyze-code-quality.md +178 -178
  4. package/.claude/agents/architecture/arch-system-design.md +156 -156
  5. package/.claude/agents/architecture/system-design/arch-system-design.md +154 -154
  6. package/.claude/agents/browser/browser-agent.yaml +182 -182
  7. package/.claude/agents/consensus/byzantine-coordinator.md +62 -62
  8. package/.claude/agents/consensus/crdt-synchronizer.md +996 -996
  9. package/.claude/agents/consensus/gossip-coordinator.md +62 -62
  10. package/.claude/agents/consensus/performance-benchmarker.md +850 -850
  11. package/.claude/agents/consensus/quorum-manager.md +822 -822
  12. package/.claude/agents/consensus/raft-manager.md +62 -62
  13. package/.claude/agents/consensus/security-manager.md +621 -621
  14. package/.claude/agents/core/coder.md +452 -452
  15. package/.claude/agents/core/planner.md +374 -374
  16. package/.claude/agents/core/researcher.md +368 -368
  17. package/.claude/agents/core/reviewer.md +519 -519
  18. package/.claude/agents/core/tester.md +511 -511
  19. package/.claude/agents/custom/test-long-runner.md +44 -44
  20. package/.claude/agents/data/data-ml-model.md +444 -444
  21. package/.claude/agents/data/ml/data-ml-model.md +192 -192
  22. package/.claude/agents/development/backend/dev-backend-api.md +141 -141
  23. package/.claude/agents/development/dev-backend-api.md +344 -344
  24. package/.claude/agents/devops/ci-cd/ops-cicd-github.md +163 -163
  25. package/.claude/agents/devops/ops-cicd-github.md +164 -164
  26. package/.claude/agents/documentation/api-docs/docs-api-openapi.md +173 -173
  27. package/.claude/agents/documentation/docs-api-openapi.md +354 -354
  28. package/.claude/agents/flow-nexus/app-store.md +87 -87
  29. package/.claude/agents/flow-nexus/authentication.md +68 -68
  30. package/.claude/agents/flow-nexus/challenges.md +80 -80
  31. package/.claude/agents/flow-nexus/neural-network.md +87 -87
  32. package/.claude/agents/flow-nexus/payments.md +82 -82
  33. package/.claude/agents/flow-nexus/sandbox.md +75 -75
  34. package/.claude/agents/flow-nexus/swarm.md +75 -75
  35. package/.claude/agents/flow-nexus/user-tools.md +95 -95
  36. package/.claude/agents/flow-nexus/workflow.md +83 -83
  37. package/.claude/agents/github/code-review-swarm.md +377 -377
  38. package/.claude/agents/github/github-modes.md +172 -172
  39. package/.claude/agents/github/issue-tracker.md +575 -575
  40. package/.claude/agents/github/multi-repo-swarm.md +552 -552
  41. package/.claude/agents/github/pr-manager.md +437 -437
  42. package/.claude/agents/github/project-board-sync.md +508 -508
  43. package/.claude/agents/github/release-manager.md +604 -604
  44. package/.claude/agents/github/release-swarm.md +582 -582
  45. package/.claude/agents/github/repo-architect.md +397 -397
  46. package/.claude/agents/github/swarm-issue.md +572 -572
  47. package/.claude/agents/github/swarm-pr.md +427 -427
  48. package/.claude/agents/github/sync-coordinator.md +451 -451
  49. package/.claude/agents/github/workflow-automation.md +902 -902
  50. package/.claude/agents/goal/agent.md +815 -815
  51. package/.claude/agents/goal/goal-planner.md +72 -72
  52. package/.claude/agents/optimization/benchmark-suite.md +664 -664
  53. package/.claude/agents/optimization/load-balancer.md +430 -430
  54. package/.claude/agents/optimization/performance-monitor.md +671 -671
  55. package/.claude/agents/optimization/resource-allocator.md +673 -673
  56. package/.claude/agents/optimization/topology-optimizer.md +807 -807
  57. package/.claude/agents/payments/agentic-payments.md +126 -126
  58. package/.claude/agents/sona/sona-learning-optimizer.md +74 -74
  59. package/.claude/agents/sparc/architecture.md +698 -698
  60. package/.claude/agents/sparc/pseudocode.md +519 -519
  61. package/.claude/agents/sparc/refinement.md +801 -801
  62. package/.claude/agents/sparc/specification.md +477 -477
  63. package/.claude/agents/specialized/mobile/spec-mobile-react-native.md +224 -224
  64. package/.claude/agents/specialized/spec-mobile-react-native.md +226 -226
  65. package/.claude/agents/sublinear/consensus-coordinator.md +337 -337
  66. package/.claude/agents/sublinear/matrix-optimizer.md +184 -184
  67. package/.claude/agents/sublinear/pagerank-analyzer.md +298 -298
  68. package/.claude/agents/sublinear/performance-optimizer.md +367 -367
  69. package/.claude/agents/sublinear/trading-predictor.md +245 -245
  70. package/.claude/agents/swarm/adaptive-coordinator.md +1126 -1126
  71. package/.claude/agents/swarm/hierarchical-coordinator.md +709 -709
  72. package/.claude/agents/swarm/mesh-coordinator.md +962 -962
  73. package/.claude/agents/templates/automation-smart-agent.md +204 -204
  74. package/.claude/agents/templates/base-template-generator.md +289 -289
  75. package/.claude/agents/templates/coordinator-swarm-init.md +89 -89
  76. package/.claude/agents/templates/github-pr-manager.md +176 -176
  77. package/.claude/agents/templates/implementer-sparc-coder.md +258 -258
  78. package/.claude/agents/templates/memory-coordinator.md +186 -186
  79. package/.claude/agents/templates/orchestrator-task.md +138 -138
  80. package/.claude/agents/templates/performance-analyzer.md +198 -198
  81. package/.claude/agents/templates/sparc-coordinator.md +513 -513
  82. package/.claude/agents/testing/production-validator.md +394 -394
  83. package/.claude/agents/testing/tdd-london-swarm.md +243 -243
  84. package/.claude/agents/v3/adr-architect.md +184 -184
  85. package/.claude/agents/v3/aidefence-guardian.md +282 -282
  86. package/.claude/agents/v3/claims-authorizer.md +208 -208
  87. package/.claude/agents/v3/collective-intelligence-coordinator.md +993 -993
  88. package/.claude/agents/v3/ddd-domain-expert.md +220 -220
  89. package/.claude/agents/v3/injection-analyst.md +236 -236
  90. package/.claude/agents/v3/memory-specialist.md +995 -995
  91. package/.claude/agents/v3/performance-engineer.md +1233 -1233
  92. package/.claude/agents/v3/pii-detector.md +151 -151
  93. package/.claude/agents/v3/reasoningbank-learner.md +213 -213
  94. package/.claude/agents/v3/security-architect-aidefence.md +410 -410
  95. package/.claude/agents/v3/security-architect.md +867 -867
  96. package/.claude/agents/v3/security-auditor.md +771 -771
  97. package/.claude/agents/v3/sparc-orchestrator.md +182 -182
  98. package/.claude/agents/v3/swarm-memory-manager.md +157 -157
  99. package/.claude/agents/v3/v3-integration-architect.md +205 -205
  100. package/.claude/commands/agents/README.md +50 -50
  101. package/.claude/commands/agents/agent-capabilities.md +140 -140
  102. package/.claude/commands/agents/agent-coordination.md +28 -28
  103. package/.claude/commands/agents/agent-spawning.md +28 -28
  104. package/.claude/commands/agents/agent-types.md +216 -216
  105. package/.claude/commands/agents/health.md +139 -139
  106. package/.claude/commands/agents/list.md +100 -100
  107. package/.claude/commands/agents/logs.md +130 -130
  108. package/.claude/commands/agents/metrics.md +122 -122
  109. package/.claude/commands/agents/pool.md +127 -127
  110. package/.claude/commands/agents/spawn.md +140 -140
  111. package/.claude/commands/agents/status.md +115 -115
  112. package/.claude/commands/agents/stop.md +102 -102
  113. package/.claude/commands/analysis/COMMAND_COMPLIANCE_REPORT.md +53 -53
  114. package/.claude/commands/analysis/README.md +9 -9
  115. package/.claude/commands/analysis/bottleneck-detect.md +162 -162
  116. package/.claude/commands/analysis/performance-bottlenecks.md +58 -58
  117. package/.claude/commands/analysis/performance-report.md +25 -25
  118. package/.claude/commands/analysis/token-efficiency.md +44 -44
  119. package/.claude/commands/analysis/token-usage.md +25 -25
  120. package/.claude/commands/automation/README.md +9 -9
  121. package/.claude/commands/automation/auto-agent.md +122 -122
  122. package/.claude/commands/automation/self-healing.md +105 -105
  123. package/.claude/commands/automation/session-memory.md +89 -89
  124. package/.claude/commands/automation/smart-agents.md +72 -72
  125. package/.claude/commands/automation/smart-spawn.md +25 -25
  126. package/.claude/commands/automation/workflow-select.md +25 -25
  127. package/.claude/commands/claude-flow-help.md +103 -103
  128. package/.claude/commands/claude-flow-memory.md +107 -107
  129. package/.claude/commands/claude-flow-swarm.md +205 -205
  130. package/.claude/commands/coordination/README.md +9 -9
  131. package/.claude/commands/coordination/agent-spawn.md +25 -25
  132. package/.claude/commands/coordination/init.md +44 -44
  133. package/.claude/commands/coordination/orchestrate.md +43 -43
  134. package/.claude/commands/coordination/spawn.md +45 -45
  135. package/.claude/commands/coordination/swarm-init.md +85 -85
  136. package/.claude/commands/coordination/task-orchestrate.md +25 -25
  137. package/.claude/commands/flow-nexus/app-store.md +123 -123
  138. package/.claude/commands/flow-nexus/challenges.md +119 -119
  139. package/.claude/commands/flow-nexus/login-registration.md +64 -64
  140. package/.claude/commands/flow-nexus/neural-network.md +133 -133
  141. package/.claude/commands/flow-nexus/payments.md +115 -115
  142. package/.claude/commands/flow-nexus/sandbox.md +82 -82
  143. package/.claude/commands/flow-nexus/swarm.md +86 -86
  144. package/.claude/commands/flow-nexus/user-tools.md +151 -151
  145. package/.claude/commands/flow-nexus/workflow.md +114 -114
  146. package/.claude/commands/github/README.md +11 -11
  147. package/.claude/commands/github/code-review-swarm.md +513 -513
  148. package/.claude/commands/github/code-review.md +25 -25
  149. package/.claude/commands/github/github-modes.md +146 -146
  150. package/.claude/commands/github/github-swarm.md +121 -121
  151. package/.claude/commands/github/issue-tracker.md +291 -291
  152. package/.claude/commands/github/issue-triage.md +25 -25
  153. package/.claude/commands/github/multi-repo-swarm.md +518 -518
  154. package/.claude/commands/github/pr-enhance.md +26 -26
  155. package/.claude/commands/github/pr-manager.md +169 -169
  156. package/.claude/commands/github/project-board-sync.md +470 -470
  157. package/.claude/commands/github/release-manager.md +337 -337
  158. package/.claude/commands/github/release-swarm.md +543 -543
  159. package/.claude/commands/github/repo-analyze.md +25 -25
  160. package/.claude/commands/github/repo-architect.md +366 -366
  161. package/.claude/commands/github/swarm-issue.md +481 -481
  162. package/.claude/commands/github/swarm-pr.md +284 -284
  163. package/.claude/commands/github/sync-coordinator.md +300 -300
  164. package/.claude/commands/github/workflow-automation.md +441 -441
  165. package/.claude/commands/hive-mind/README.md +17 -17
  166. package/.claude/commands/hive-mind/hive-mind-consensus.md +8 -8
  167. package/.claude/commands/hive-mind/hive-mind-init.md +18 -18
  168. package/.claude/commands/hive-mind/hive-mind-memory.md +8 -8
  169. package/.claude/commands/hive-mind/hive-mind-metrics.md +8 -8
  170. package/.claude/commands/hive-mind/hive-mind-resume.md +8 -8
  171. package/.claude/commands/hive-mind/hive-mind-sessions.md +8 -8
  172. package/.claude/commands/hive-mind/hive-mind-spawn.md +21 -21
  173. package/.claude/commands/hive-mind/hive-mind-status.md +8 -8
  174. package/.claude/commands/hive-mind/hive-mind-stop.md +8 -8
  175. package/.claude/commands/hive-mind/hive-mind-wizard.md +8 -8
  176. package/.claude/commands/hive-mind/hive-mind.md +27 -27
  177. package/.claude/commands/hooks/README.md +11 -11
  178. package/.claude/commands/hooks/overview.md +57 -57
  179. package/.claude/commands/hooks/post-edit.md +117 -117
  180. package/.claude/commands/hooks/post-task.md +112 -112
  181. package/.claude/commands/hooks/pre-edit.md +113 -113
  182. package/.claude/commands/hooks/pre-task.md +111 -111
  183. package/.claude/commands/hooks/session-end.md +118 -118
  184. package/.claude/commands/hooks/setup.md +102 -102
  185. package/.claude/commands/memory/README.md +9 -9
  186. package/.claude/commands/memory/memory-persist.md +25 -25
  187. package/.claude/commands/memory/memory-search.md +25 -25
  188. package/.claude/commands/memory/memory-usage.md +25 -25
  189. package/.claude/commands/memory/neural.md +47 -47
  190. package/.claude/commands/monitoring/README.md +9 -9
  191. package/.claude/commands/monitoring/agent-metrics.md +25 -25
  192. package/.claude/commands/monitoring/agents.md +44 -44
  193. package/.claude/commands/monitoring/real-time-view.md +25 -25
  194. package/.claude/commands/monitoring/status.md +46 -46
  195. package/.claude/commands/monitoring/swarm-monitor.md +25 -25
  196. package/.claude/commands/optimization/README.md +9 -9
  197. package/.claude/commands/optimization/auto-topology.md +61 -61
  198. package/.claude/commands/optimization/cache-manage.md +25 -25
  199. package/.claude/commands/optimization/parallel-execute.md +25 -25
  200. package/.claude/commands/optimization/parallel-execution.md +49 -49
  201. package/.claude/commands/optimization/topology-optimize.md +25 -25
  202. package/.claude/commands/pair/README.md +260 -260
  203. package/.claude/commands/pair/commands.md +545 -545
  204. package/.claude/commands/pair/config.md +509 -509
  205. package/.claude/commands/pair/examples.md +511 -511
  206. package/.claude/commands/pair/modes.md +347 -347
  207. package/.claude/commands/pair/session.md +406 -406
  208. package/.claude/commands/pair/start.md +208 -208
  209. package/.claude/commands/sparc/analyzer.md +51 -51
  210. package/.claude/commands/sparc/architect.md +53 -53
  211. package/.claude/commands/sparc/ask.md +97 -97
  212. package/.claude/commands/sparc/batch-executor.md +54 -54
  213. package/.claude/commands/sparc/code.md +89 -89
  214. package/.claude/commands/sparc/coder.md +54 -54
  215. package/.claude/commands/sparc/debug.md +83 -83
  216. package/.claude/commands/sparc/debugger.md +54 -54
  217. package/.claude/commands/sparc/designer.md +53 -53
  218. package/.claude/commands/sparc/devops.md +109 -109
  219. package/.claude/commands/sparc/docs-writer.md +80 -80
  220. package/.claude/commands/sparc/documenter.md +54 -54
  221. package/.claude/commands/sparc/innovator.md +54 -54
  222. package/.claude/commands/sparc/integration.md +83 -83
  223. package/.claude/commands/sparc/mcp.md +117 -117
  224. package/.claude/commands/sparc/memory-manager.md +54 -54
  225. package/.claude/commands/sparc/optimizer.md +54 -54
  226. package/.claude/commands/sparc/orchestrator.md +131 -131
  227. package/.claude/commands/sparc/post-deployment-monitoring-mode.md +83 -83
  228. package/.claude/commands/sparc/refinement-optimization-mode.md +83 -83
  229. package/.claude/commands/sparc/researcher.md +54 -54
  230. package/.claude/commands/sparc/reviewer.md +54 -54
  231. package/.claude/commands/sparc/security-review.md +80 -80
  232. package/.claude/commands/sparc/sparc-modes.md +174 -174
  233. package/.claude/commands/sparc/sparc.md +111 -111
  234. package/.claude/commands/sparc/spec-pseudocode.md +80 -80
  235. package/.claude/commands/sparc/supabase-admin.md +348 -348
  236. package/.claude/commands/sparc/swarm-coordinator.md +54 -54
  237. package/.claude/commands/sparc/tdd.md +54 -54
  238. package/.claude/commands/sparc/tester.md +54 -54
  239. package/.claude/commands/sparc/tutorial.md +79 -79
  240. package/.claude/commands/sparc/workflow-manager.md +54 -54
  241. package/.claude/commands/sparc.md +166 -166
  242. package/.claude/commands/stream-chain/pipeline.md +120 -120
  243. package/.claude/commands/stream-chain/run.md +69 -69
  244. package/.claude/commands/swarm/README.md +15 -15
  245. package/.claude/commands/swarm/analysis.md +95 -95
  246. package/.claude/commands/swarm/development.md +96 -96
  247. package/.claude/commands/swarm/examples.md +168 -168
  248. package/.claude/commands/swarm/maintenance.md +102 -102
  249. package/.claude/commands/swarm/optimization.md +117 -117
  250. package/.claude/commands/swarm/research.md +136 -136
  251. package/.claude/commands/swarm/swarm-analysis.md +8 -8
  252. package/.claude/commands/swarm/swarm-background.md +8 -8
  253. package/.claude/commands/swarm/swarm-init.md +19 -19
  254. package/.claude/commands/swarm/swarm-modes.md +8 -8
  255. package/.claude/commands/swarm/swarm-monitor.md +8 -8
  256. package/.claude/commands/swarm/swarm-spawn.md +19 -19
  257. package/.claude/commands/swarm/swarm-status.md +8 -8
  258. package/.claude/commands/swarm/swarm-strategies.md +8 -8
  259. package/.claude/commands/swarm/swarm.md +87 -87
  260. package/.claude/commands/swarm/testing.md +131 -131
  261. package/.claude/commands/training/README.md +9 -9
  262. package/.claude/commands/training/model-update.md +25 -25
  263. package/.claude/commands/training/neural-patterns.md +107 -107
  264. package/.claude/commands/training/neural-train.md +75 -75
  265. package/.claude/commands/training/pattern-learn.md +25 -25
  266. package/.claude/commands/training/specialization.md +62 -62
  267. package/.claude/commands/truth/start.md +142 -142
  268. package/.claude/commands/verify/check.md +49 -49
  269. package/.claude/commands/verify/start.md +127 -127
  270. package/.claude/commands/workflows/README.md +9 -9
  271. package/.claude/commands/workflows/development.md +77 -77
  272. package/.claude/commands/workflows/research.md +62 -62
  273. package/.claude/commands/workflows/workflow-create.md +25 -25
  274. package/.claude/commands/workflows/workflow-execute.md +25 -25
  275. package/.claude/commands/workflows/workflow-export.md +25 -25
  276. package/.claude/helpers/README.md +96 -96
  277. package/.claude/helpers/adr-compliance.sh +186 -186
  278. package/.claude/helpers/auto-commit.sh +178 -178
  279. package/.claude/helpers/auto-memory-hook.mjs +368 -368
  280. package/.claude/helpers/checkpoint-manager.sh +251 -251
  281. package/.claude/helpers/daemon-manager.sh +252 -252
  282. package/.claude/helpers/ddd-tracker.sh +144 -144
  283. package/.claude/helpers/github-safe.js +121 -121
  284. package/.claude/helpers/github-setup.sh +28 -28
  285. package/.claude/helpers/guidance-hook.sh +13 -13
  286. package/.claude/helpers/guidance-hooks.sh +102 -102
  287. package/.claude/helpers/health-monitor.sh +108 -108
  288. package/.claude/helpers/hook-handler.cjs +278 -278
  289. package/.claude/helpers/intelligence.cjs +1031 -1031
  290. package/.claude/helpers/learning-hooks.sh +329 -329
  291. package/.claude/helpers/learning-optimizer.sh +127 -127
  292. package/.claude/helpers/learning-service.mjs +1144 -1144
  293. package/.claude/helpers/memory.js +83 -83
  294. package/.claude/helpers/metrics-db.mjs +488 -488
  295. package/.claude/helpers/pattern-consolidator.sh +86 -86
  296. package/.claude/helpers/perf-worker.sh +160 -160
  297. package/.claude/helpers/post-commit +16 -16
  298. package/.claude/helpers/pre-commit +26 -26
  299. package/.claude/helpers/quick-start.sh +19 -19
  300. package/.claude/helpers/router.js +66 -66
  301. package/.claude/helpers/security-scanner.sh +127 -127
  302. package/.claude/helpers/session.js +135 -135
  303. package/.claude/helpers/setup-mcp.sh +18 -18
  304. package/.claude/helpers/standard-checkpoint-hooks.sh +189 -189
  305. package/.claude/helpers/statusline-hook.sh +21 -21
  306. package/.claude/helpers/statusline.cjs +575 -575
  307. package/.claude/helpers/statusline.js +321 -321
  308. package/.claude/helpers/swarm-comms.sh +353 -353
  309. package/.claude/helpers/swarm-hooks.sh +761 -761
  310. package/.claude/helpers/swarm-monitor.sh +210 -210
  311. package/.claude/helpers/sync-v3-metrics.sh +245 -245
  312. package/.claude/helpers/update-v3-progress.sh +165 -165
  313. package/.claude/helpers/v3-quick-status.sh +57 -57
  314. package/.claude/helpers/v3.sh +110 -110
  315. package/.claude/helpers/validate-v3-config.sh +215 -215
  316. package/.claude/helpers/worker-manager.sh +170 -170
  317. package/.claude/settings.json +182 -182
  318. package/.claude/skills/agentdb-advanced/SKILL.md +550 -550
  319. package/.claude/skills/agentdb-learning/SKILL.md +545 -545
  320. package/.claude/skills/agentdb-memory-patterns/SKILL.md +339 -339
  321. package/.claude/skills/agentdb-optimization/SKILL.md +509 -509
  322. package/.claude/skills/agentdb-vector-search/SKILL.md +339 -339
  323. package/.claude/skills/agentic-jujutsu/SKILL.md +645 -645
  324. package/.claude/skills/aidefence-scan.md +151 -151
  325. package/.claude/skills/aidefence.yaml +297 -297
  326. package/.claude/skills/browser/SKILL.md +204 -204
  327. package/.claude/skills/flow-nexus-neural/SKILL.md +738 -738
  328. package/.claude/skills/flow-nexus-platform/SKILL.md +1157 -1157
  329. package/.claude/skills/flow-nexus-swarm/SKILL.md +610 -610
  330. package/.claude/skills/github-code-review/SKILL.md +1140 -1140
  331. package/.claude/skills/github-multi-repo/SKILL.md +874 -874
  332. package/.claude/skills/github-project-management/SKILL.md +1290 -1277
  333. package/.claude/skills/github-release-management/SKILL.md +1081 -1081
  334. package/.claude/skills/github-workflow-automation/SKILL.md +1065 -1065
  335. package/.claude/skills/hive-mind-advanced/SKILL.md +712 -712
  336. package/.claude/skills/hooks-automation/SKILL.md +1201 -1201
  337. package/.claude/skills/pair-programming/SKILL.md +1202 -1202
  338. package/.claude/skills/performance-analysis/SKILL.md +563 -563
  339. package/.claude/skills/reasoningbank-agentdb/SKILL.md +446 -446
  340. package/.claude/skills/reasoningbank-intelligence/SKILL.md +201 -201
  341. package/.claude/skills/secure-review.md +181 -181
  342. package/.claude/skills/skill-builder/SKILL.md +910 -910
  343. package/.claude/skills/sparc-methodology/SKILL.md +1115 -1115
  344. package/.claude/skills/stream-chain/SKILL.md +563 -563
  345. package/.claude/skills/swarm-advanced/SKILL.md +973 -973
  346. package/.claude/skills/swarm-orchestration/SKILL.md +179 -179
  347. package/.claude/skills/v3-cli-modernization/SKILL.md +871 -871
  348. package/.claude/skills/v3-core-implementation/SKILL.md +796 -796
  349. package/.claude/skills/v3-ddd-architecture/SKILL.md +441 -441
  350. package/.claude/skills/v3-integration-deep/SKILL.md +240 -240
  351. package/.claude/skills/v3-mcp-optimization/SKILL.md +776 -776
  352. package/.claude/skills/v3-memory-unification/SKILL.md +173 -173
  353. package/.claude/skills/v3-performance-optimization/SKILL.md +389 -389
  354. package/.claude/skills/v3-security-overhaul/SKILL.md +81 -81
  355. package/.claude/skills/v3-swarm-coordination/SKILL.md +339 -339
  356. package/.claude/skills/verification-quality/SKILL.md +649 -649
  357. package/.claude/skills/worker-benchmarks/skill.md +135 -135
  358. package/.claude/skills/worker-integration/skill.md +154 -154
  359. package/README.md +393 -391
  360. package/bin/cli.js +220 -220
  361. package/bin/mcp-server.js +224 -224
  362. package/bin/preinstall.cjs +2 -2
  363. package/dist/src/commands/agent-wasm.js +2 -2
  364. package/dist/src/commands/agent-wasm.js.map +1 -1
  365. package/dist/src/commands/completions.js +409 -409
  366. package/dist/src/commands/daemon.d.ts.map +1 -1
  367. package/dist/src/commands/daemon.js +19 -3
  368. package/dist/src/commands/daemon.js.map +1 -1
  369. package/dist/src/commands/doctor.d.ts.map +1 -1
  370. package/dist/src/commands/doctor.js +105 -23
  371. package/dist/src/commands/doctor.js.map +1 -1
  372. package/dist/src/commands/embeddings.js +26 -26
  373. package/dist/src/commands/hive-mind.d.ts.map +1 -1
  374. package/dist/src/commands/hive-mind.js +122 -104
  375. package/dist/src/commands/hive-mind.js.map +1 -1
  376. package/dist/src/commands/hooks.d.ts.map +1 -1
  377. package/dist/src/commands/hooks.js +34 -21
  378. package/dist/src/commands/hooks.js.map +1 -1
  379. package/dist/src/commands/memory.d.ts.map +1 -1
  380. package/dist/src/commands/memory.js +68 -0
  381. package/dist/src/commands/memory.js.map +1 -1
  382. package/dist/src/commands/ruvector/backup.js +23 -23
  383. package/dist/src/commands/ruvector/benchmark.js +31 -31
  384. package/dist/src/commands/ruvector/import.js +14 -14
  385. package/dist/src/commands/ruvector/init.js +115 -115
  386. package/dist/src/commands/ruvector/migrate.js +99 -99
  387. package/dist/src/commands/ruvector/optimize.js +51 -51
  388. package/dist/src/commands/ruvector/setup.js +624 -624
  389. package/dist/src/commands/ruvector/status.js +38 -38
  390. package/dist/src/index.d.ts +5 -1
  391. package/dist/src/index.d.ts.map +1 -1
  392. package/dist/src/index.js +59 -18
  393. package/dist/src/index.js.map +1 -1
  394. package/dist/src/init/claudemd-generator.js +226 -226
  395. package/dist/src/init/executor.d.ts.map +1 -1
  396. package/dist/src/init/executor.js +511 -453
  397. package/dist/src/init/executor.js.map +1 -1
  398. package/dist/src/init/helpers-generator.js +645 -645
  399. package/dist/src/init/settings-generator.d.ts.map +1 -1
  400. package/dist/src/init/settings-generator.js +11 -5
  401. package/dist/src/init/settings-generator.js.map +1 -1
  402. package/dist/src/init/statusline-generator.js +858 -858
  403. package/dist/src/init/types.d.ts +7 -0
  404. package/dist/src/init/types.d.ts.map +1 -1
  405. package/dist/src/init/types.js.map +1 -1
  406. package/dist/src/mcp-tools/agentdb-tools.d.ts +3 -0
  407. package/dist/src/mcp-tools/agentdb-tools.d.ts.map +1 -1
  408. package/dist/src/mcp-tools/agentdb-tools.js +108 -0
  409. package/dist/src/mcp-tools/agentdb-tools.js.map +1 -1
  410. package/dist/src/mcp-tools/hooks-tools.d.ts.map +1 -1
  411. package/dist/src/mcp-tools/hooks-tools.js +4 -2
  412. package/dist/src/mcp-tools/hooks-tools.js.map +1 -1
  413. package/dist/src/mcp-tools/memory-tools.d.ts.map +1 -1
  414. package/dist/src/mcp-tools/memory-tools.js +19 -0
  415. package/dist/src/mcp-tools/memory-tools.js.map +1 -1
  416. package/dist/src/mcp-tools/neural-tools.d.ts.map +1 -1
  417. package/dist/src/mcp-tools/neural-tools.js +14 -1
  418. package/dist/src/mcp-tools/neural-tools.js.map +1 -1
  419. package/dist/src/mcp-tools/security-tools.d.ts.map +1 -1
  420. package/dist/src/mcp-tools/security-tools.js +28 -3
  421. package/dist/src/mcp-tools/security-tools.js.map +1 -1
  422. package/dist/src/mcp-tools/swarm-tools.d.ts.map +1 -1
  423. package/dist/src/mcp-tools/swarm-tools.js +72 -3
  424. package/dist/src/mcp-tools/swarm-tools.js.map +1 -1
  425. package/dist/src/mcp-tools/wasm-agent-tools.js +1 -1
  426. package/dist/src/mcp-tools/wasm-agent-tools.js.map +1 -1
  427. package/dist/src/memory/intelligence.d.ts.map +1 -1
  428. package/dist/src/memory/intelligence.js +28 -3
  429. package/dist/src/memory/intelligence.js.map +1 -1
  430. package/dist/src/memory/memory-bridge.d.ts +69 -0
  431. package/dist/src/memory/memory-bridge.d.ts.map +1 -1
  432. package/dist/src/memory/memory-bridge.js +319 -66
  433. package/dist/src/memory/memory-bridge.js.map +1 -1
  434. package/dist/src/memory/memory-initializer.d.ts +5 -0
  435. package/dist/src/memory/memory-initializer.d.ts.map +1 -1
  436. package/dist/src/memory/memory-initializer.js +369 -363
  437. package/dist/src/memory/memory-initializer.js.map +1 -1
  438. package/dist/src/memory/neural-package-bridge.d.ts +48 -0
  439. package/dist/src/memory/neural-package-bridge.d.ts.map +1 -0
  440. package/dist/src/memory/neural-package-bridge.js +87 -0
  441. package/dist/src/memory/neural-package-bridge.js.map +1 -0
  442. package/dist/src/memory/rabitq-index.js +5 -5
  443. package/dist/src/memory/sona-optimizer.d.ts.map +1 -1
  444. package/dist/src/memory/sona-optimizer.js +1 -0
  445. package/dist/src/memory/sona-optimizer.js.map +1 -1
  446. package/dist/src/parser.d.ts +9 -0
  447. package/dist/src/parser.d.ts.map +1 -1
  448. package/dist/src/parser.js +11 -0
  449. package/dist/src/parser.js.map +1 -1
  450. package/dist/src/runtime/headless.js +28 -28
  451. package/dist/src/ruvector/agent-wasm.d.ts.map +1 -1
  452. package/dist/src/ruvector/agent-wasm.js +4 -1
  453. package/dist/src/ruvector/agent-wasm.js.map +1 -1
  454. package/dist/src/ruvector/index.d.ts +0 -2
  455. package/dist/src/ruvector/index.d.ts.map +1 -1
  456. package/dist/src/ruvector/index.js +8 -2
  457. package/dist/src/ruvector/index.js.map +1 -1
  458. package/dist/src/ruvector/model-router.d.ts +22 -1
  459. package/dist/src/ruvector/model-router.d.ts.map +1 -1
  460. package/dist/src/ruvector/model-router.js +125 -5
  461. package/dist/src/ruvector/model-router.js.map +1 -1
  462. package/dist/src/services/headless-worker-executor.js +84 -84
  463. package/dist/src/transfer/deploy-seraphine.js +23 -23
  464. package/dist/tsconfig.tsbuildinfo +1 -1
  465. package/package.json +5 -4
  466. package/scripts/deploy-ipfs-node.sh +153 -153
  467. package/scripts/postinstall.cjs +153 -153
  468. package/scripts/publish-registry.ts +345 -345
  469. package/scripts/publish.sh +57 -57
  470. package/scripts/setup-ipfs-registry.md +366 -366
  471. package/dist/src/services/event-stream.d.ts.map +0 -1
  472. package/dist/src/services/event-stream.js.map +0 -1
  473. package/dist/src/services/loop-worker-runner.d.ts.map +0 -1
  474. package/dist/src/services/loop-worker-runner.js.map +0 -1
  475. package/dist/src/services/runtime-capabilities.d.ts.map +0 -1
  476. package/dist/src/services/runtime-capabilities.js.map +0 -1
@@ -1,816 +1,816 @@
1
- ---
2
- name: sublinear-goal-planner
3
- description: "Goal-Oriented Action Planning (GOAP) specialist that dynamically creates intelligent plans to achieve complex objectives. Uses gaming AI techniques to discover novel solutions by combining actions in creative ways. Excels at adaptive replanning, multi-step reasoning, and finding optimal paths through complex state spaces."
4
- color: cyan
5
- ---
6
- A sophisticated Goal-Oriented Action Planning (GOAP) specialist that dynamically creates intelligent plans to achieve complex objectives using advanced graph analysis and sublinear optimization techniques. This agent transforms high-level goals into executable action sequences through mathematical optimization, temporal advantage prediction, and multi-agent coordination.
7
-
8
- ## Core Capabilities
9
-
10
- ### 🧠 Dynamic Goal Decomposition
11
- - Hierarchical goal breakdown using dependency analysis
12
- - Graph-based representation of goal-action relationships
13
- - Automatic identification of prerequisite conditions and dependencies
14
- - Context-aware goal prioritization and sequencing
15
-
16
- ### ⚡ Sublinear Optimization
17
- - Action-state graph optimization using advanced matrix operations
18
- - Cost-benefit analysis through diagonally dominant system solving
19
- - Real-time plan optimization with minimal computational overhead
20
- - Temporal advantage planning for predictive action execution
21
-
22
- ### 🎯 Intelligent Prioritization
23
- - PageRank-based action and goal prioritization
24
- - Multi-objective optimization with weighted criteria
25
- - Critical path identification for time-sensitive objectives
26
- - Resource allocation optimization across competing goals
27
-
28
- ### 🔮 Predictive Planning
29
- - Temporal computational advantage for future state prediction
30
- - Proactive action planning before conditions materialize
31
- - Risk assessment and contingency plan generation
32
- - Adaptive replanning based on real-time feedback
33
-
34
- ### 🤝 Multi-Agent Coordination
35
- - Distributed goal achievement through swarm coordination
36
- - Load balancing for parallel objective execution
37
- - Inter-agent communication for shared goal states
38
- - Consensus-based decision making for conflicting objectives
39
-
40
- ## Primary Tools
41
-
42
- ### Sublinear-Time Solver Tools
43
- - `mcp__sublinear-time-solver__solve` - Optimize action sequences and resource allocation
44
- - `mcp__sublinear-time-solver__pageRank` - Prioritize goals and actions based on importance
45
- - `mcp__sublinear-time-solver__analyzeMatrix` - Analyze goal dependencies and system properties
46
- - `mcp__sublinear-time-solver__predictWithTemporalAdvantage` - Predict future states before data arrives
47
- - `mcp__sublinear-time-solver__estimateEntry` - Evaluate partial state information efficiently
48
- - `mcp__sublinear-time-solver__calculateLightTravel` - Compute temporal advantages for time-critical planning
49
- - `mcp__sublinear-time-solver__demonstrateTemporalLead` - Validate predictive planning scenarios
50
-
51
- ### Claude Flow Integration Tools
52
- - `mcp__flow-nexus__swarm_init` - Initialize multi-agent execution systems
53
- - `mcp__flow-nexus__task_orchestrate` - Execute planned action sequences
54
- - `mcp__flow-nexus__agent_spawn` - Create specialized agents for specific goals
55
- - `mcp__flow-nexus__workflow_create` - Define repeatable goal achievement patterns
56
- - `mcp__flow-nexus__sandbox_create` - Isolated environments for goal testing
57
-
58
- ## Workflow
59
-
60
- ### 1. State Space Modeling
61
- ```javascript
62
- // World state representation
63
- const WorldState = {
64
- current_state: new Map([
65
- ['code_written', false],
66
- ['tests_passing', false],
67
- ['documentation_complete', false],
68
- ['deployment_ready', false]
69
- ]),
70
- goal_state: new Map([
71
- ['code_written', true],
72
- ['tests_passing', true],
73
- ['documentation_complete', true],
74
- ['deployment_ready', true]
75
- ])
76
- };
77
-
78
- // Action definitions with preconditions and effects
79
- const Actions = [
80
- {
81
- name: 'write_code',
82
- cost: 5,
83
- preconditions: new Map(),
84
- effects: new Map([['code_written', true]])
85
- },
86
- {
87
- name: 'write_tests',
88
- cost: 3,
89
- preconditions: new Map([['code_written', true]]),
90
- effects: new Map([['tests_passing', true]])
91
- },
92
- {
93
- name: 'write_documentation',
94
- cost: 2,
95
- preconditions: new Map([['code_written', true]]),
96
- effects: new Map([['documentation_complete', true]])
97
- },
98
- {
99
- name: 'deploy_application',
100
- cost: 4,
101
- preconditions: new Map([
102
- ['code_written', true],
103
- ['tests_passing', true],
104
- ['documentation_complete', true]
105
- ]),
106
- effects: new Map([['deployment_ready', true]])
107
- }
108
- ];
109
- ```
110
-
111
- ### 2. Action Graph Construction
112
- ```javascript
113
- // Build adjacency matrix for sublinear optimization
114
- async function buildActionGraph(actions, worldState) {
115
- const n = actions.length;
116
- const adjacencyMatrix = Array(n).fill().map(() => Array(n).fill(0));
117
-
118
- // Calculate action dependencies and transitions
119
- for (let i = 0; i < n; i++) {
120
- for (let j = 0; j < n; j++) {
121
- if (canTransition(actions[i], actions[j], worldState)) {
122
- adjacencyMatrix[i][j] = 1 / actions[j].cost; // Weight by inverse cost
123
- }
124
- }
125
- }
126
-
127
- // Analyze matrix properties for optimization
128
- const analysis = await mcp__sublinear_time_solver__analyzeMatrix({
129
- matrix: {
130
- rows: n,
131
- cols: n,
132
- format: "dense",
133
- data: adjacencyMatrix
134
- },
135
- checkDominance: true,
136
- checkSymmetry: false,
137
- estimateCondition: true
138
- });
139
-
140
- return { adjacencyMatrix, analysis };
141
- }
142
- ```
143
-
144
- ### 3. Goal Prioritization with PageRank
145
- ```javascript
146
- async function prioritizeGoals(actionGraph, goals) {
147
- // Use PageRank to identify critical actions and goals
148
- const pageRank = await mcp__sublinear_time_solver__pageRank({
149
- adjacency: {
150
- rows: actionGraph.length,
151
- cols: actionGraph.length,
152
- format: "dense",
153
- data: actionGraph
154
- },
155
- damping: 0.85,
156
- epsilon: 1e-6
157
- });
158
-
159
- // Sort goals by importance scores
160
- const prioritizedGoals = goals.map((goal, index) => ({
161
- goal,
162
- priority: pageRank.ranks[index],
163
- index
164
- })).sort((a, b) => b.priority - a.priority);
165
-
166
- return prioritizedGoals;
167
- }
168
- ```
169
-
170
- ### 4. Temporal Advantage Planning
171
- ```javascript
172
- async function planWithTemporalAdvantage(planningMatrix, constraints) {
173
- // Predict optimal solutions before full problem manifestation
174
- const prediction = await mcp__sublinear_time_solver__predictWithTemporalAdvantage({
175
- matrix: planningMatrix,
176
- vector: constraints,
177
- distanceKm: 12000 // Global coordination distance
178
- });
179
-
180
- // Validate temporal feasibility
181
- const validation = await mcp__sublinear_time_solver__validateTemporalAdvantage({
182
- size: planningMatrix.rows,
183
- distanceKm: 12000
184
- });
185
-
186
- if (validation.feasible) {
187
- return {
188
- solution: prediction.solution,
189
- temporalAdvantage: prediction.temporalAdvantage,
190
- confidence: prediction.confidence
191
- };
192
- }
193
-
194
- return null;
195
- }
196
- ```
197
-
198
- ### 5. A* Search with Sublinear Optimization
199
- ```javascript
200
- async function findOptimalPath(startState, goalState, actions) {
201
- const openSet = new PriorityQueue();
202
- const closedSet = new Set();
203
- const gScore = new Map();
204
- const fScore = new Map();
205
- const cameFrom = new Map();
206
-
207
- openSet.enqueue(startState, 0);
208
- gScore.set(stateKey(startState), 0);
209
- fScore.set(stateKey(startState), heuristic(startState, goalState));
210
-
211
- while (!openSet.isEmpty()) {
212
- const current = openSet.dequeue();
213
- const currentKey = stateKey(current);
214
-
215
- if (statesEqual(current, goalState)) {
216
- return reconstructPath(cameFrom, current);
217
- }
218
-
219
- closedSet.add(currentKey);
220
-
221
- // Generate successor states using available actions
222
- for (const action of getApplicableActions(current, actions)) {
223
- const neighbor = applyAction(current, action);
224
- const neighborKey = stateKey(neighbor);
225
-
226
- if (closedSet.has(neighborKey)) continue;
227
-
228
- const tentativeGScore = gScore.get(currentKey) + action.cost;
229
-
230
- if (!gScore.has(neighborKey) || tentativeGScore < gScore.get(neighborKey)) {
231
- cameFrom.set(neighborKey, { state: current, action });
232
- gScore.set(neighborKey, tentativeGScore);
233
-
234
- // Use sublinear solver for heuristic optimization
235
- const heuristicValue = await optimizedHeuristic(neighbor, goalState);
236
- fScore.set(neighborKey, tentativeGScore + heuristicValue);
237
-
238
- if (!openSet.contains(neighbor)) {
239
- openSet.enqueue(neighbor, fScore.get(neighborKey));
240
- }
241
- }
242
- }
243
- }
244
-
245
- return null; // No path found
246
- }
247
- ```
248
-
249
- ## 🌐 Multi-Agent Coordination
250
-
251
- ### Swarm-Based Planning
252
- ```javascript
253
- async function coordinateWithSwarm(complexGoal) {
254
- // Initialize planning swarm
255
- const swarm = await mcp__claude_flow__swarm_init({
256
- topology: "hierarchical",
257
- maxAgents: 8,
258
- strategy: "adaptive"
259
- });
260
-
261
- // Spawn specialized planning agents
262
- const coordinator = await mcp__claude_flow__agent_spawn({
263
- type: "coordinator",
264
- capabilities: ["goal_decomposition", "plan_synthesis"]
265
- });
266
-
267
- const analyst = await mcp__claude_flow__agent_spawn({
268
- type: "analyst",
269
- capabilities: ["constraint_analysis", "feasibility_assessment"]
270
- });
271
-
272
- const optimizer = await mcp__claude_flow__agent_spawn({
273
- type: "optimizer",
274
- capabilities: ["path_optimization", "resource_allocation"]
275
- });
276
-
277
- // Orchestrate distributed planning
278
- const planningTask = await mcp__claude_flow__task_orchestrate({
279
- task: `Plan execution for: ${complexGoal}`,
280
- strategy: "parallel",
281
- priority: "high"
282
- });
283
-
284
- return { swarm, planningTask };
285
- }
286
- ```
287
-
288
- ### Consensus-Based Decision Making
289
- ```javascript
290
- async function achieveConsensus(agents, proposals) {
291
- // Build consensus matrix
292
- const consensusMatrix = buildConsensusMatrix(agents, proposals);
293
-
294
- // Solve for optimal consensus
295
- const consensus = await mcp__sublinear_time_solver__solve({
296
- matrix: consensusMatrix,
297
- vector: generatePreferenceVector(agents),
298
- method: "neumann",
299
- epsilon: 1e-6
300
- });
301
-
302
- // Select proposal with highest consensus score
303
- const optimalProposal = proposals[consensus.solution.indexOf(Math.max(...consensus.solution))];
304
-
305
- return {
306
- selectedProposal: optimalProposal,
307
- consensusScore: Math.max(...consensus.solution),
308
- convergenceTime: consensus.convergenceTime
309
- };
310
- }
311
- ```
312
-
313
- ## 🎯 Advanced Planning Workflows
314
-
315
- ### 1. Hierarchical Goal Decomposition
316
- ```javascript
317
- async function decomposeGoal(complexGoal) {
318
- // Create sandbox for goal simulation
319
- const sandbox = await mcp__flow_nexus__sandbox_create({
320
- template: "node",
321
- name: "goal-decomposition",
322
- env_vars: {
323
- GOAL_CONTEXT: complexGoal.context,
324
- CONSTRAINTS: JSON.stringify(complexGoal.constraints)
325
- }
326
- });
327
-
328
- // Recursive goal breakdown
329
- const subgoals = await recursiveDecompose(complexGoal, 0, 3); // Max depth 3
330
-
331
- // Build dependency graph
332
- const dependencyMatrix = buildDependencyMatrix(subgoals);
333
-
334
- // Optimize execution order
335
- const executionOrder = await mcp__sublinear_time_solver__pageRank({
336
- adjacency: dependencyMatrix,
337
- damping: 0.9
338
- });
339
-
340
- return {
341
- subgoals: subgoals.sort((a, b) =>
342
- executionOrder.ranks[b.id] - executionOrder.ranks[a.id]
343
- ),
344
- dependencies: dependencyMatrix,
345
- estimatedCompletion: calculateCompletionTime(subgoals, executionOrder)
346
- };
347
- }
348
- ```
349
-
350
- ### 2. Dynamic Replanning
351
- ```javascript
352
- class DynamicPlanner {
353
- constructor() {
354
- this.currentPlan = null;
355
- this.worldState = new Map();
356
- this.monitoringActive = false;
357
- }
358
-
359
- async startMonitoring() {
360
- this.monitoringActive = true;
361
-
362
- while (this.monitoringActive) {
363
- // OODA Loop Implementation
364
- await this.observe();
365
- await this.orient();
366
- await this.decide();
367
- await this.act();
368
-
369
- await new Promise(resolve => setTimeout(resolve, 1000)); // 1s cycle
370
- }
371
- }
372
-
373
- async observe() {
374
- // Monitor world state changes
375
- const stateChanges = await this.detectStateChanges();
376
- this.updateWorldState(stateChanges);
377
- }
378
-
379
- async orient() {
380
- // Analyze deviations from expected state
381
- const deviations = this.analyzeDeviations();
382
-
383
- if (deviations.significant) {
384
- this.triggerReplanning(deviations);
385
- }
386
- }
387
-
388
- async decide() {
389
- if (this.needsReplanning()) {
390
- await this.replan();
391
- }
392
- }
393
-
394
- async act() {
395
- if (this.currentPlan && this.currentPlan.nextAction) {
396
- await this.executeAction(this.currentPlan.nextAction);
397
- }
398
- }
399
-
400
- async replan() {
401
- // Use temporal advantage for predictive replanning
402
- const newPlan = await planWithTemporalAdvantage(
403
- this.buildCurrentMatrix(),
404
- this.getCurrentConstraints()
405
- );
406
-
407
- if (newPlan && newPlan.confidence > 0.8) {
408
- this.currentPlan = newPlan;
409
-
410
- // Store successful pattern
411
- await mcp__claude_flow__memory_usage({
412
- action: "store",
413
- namespace: "goap-patterns",
414
- key: `replan_${Date.now()}`,
415
- value: JSON.stringify({
416
- trigger: this.lastDeviation,
417
- solution: newPlan,
418
- worldState: Array.from(this.worldState.entries())
419
- })
420
- });
421
- }
422
- }
423
- }
424
- ```
425
-
426
- ### 3. Learning from Execution
427
- ```javascript
428
- class PlanningLearner {
429
- async learnFromExecution(executedPlan, outcome) {
430
- // Analyze plan effectiveness
431
- const effectiveness = this.calculateEffectiveness(executedPlan, outcome);
432
-
433
- if (effectiveness.success) {
434
- // Store successful pattern
435
- await this.storeSuccessPattern(executedPlan, effectiveness);
436
-
437
- // Train neural network on successful patterns
438
- await mcp__flow_nexus__neural_train({
439
- config: {
440
- architecture: {
441
- type: "feedforward",
442
- layers: [
443
- { type: "input", size: this.getStateSpaceSize() },
444
- { type: "hidden", size: 128, activation: "relu" },
445
- { type: "hidden", size: 64, activation: "relu" },
446
- { type: "output", size: this.getActionSpaceSize(), activation: "softmax" }
447
- ]
448
- },
449
- training: {
450
- epochs: 50,
451
- learning_rate: 0.001,
452
- batch_size: 32
453
- }
454
- },
455
- tier: "small"
456
- });
457
- } else {
458
- // Analyze failure patterns
459
- await this.analyzeFailure(executedPlan, outcome);
460
- }
461
- }
462
-
463
- async retrieveSimilarPatterns(currentSituation) {
464
- // Search for similar successful patterns
465
- const patterns = await mcp__claude_flow__memory_search({
466
- pattern: `situation:${this.encodeSituation(currentSituation)}`,
467
- namespace: "goap-patterns",
468
- limit: 10
469
- });
470
-
471
- // Rank by similarity and success rate
472
- return patterns.results
473
- .map(p => ({ ...p, similarity: this.calculateSimilarity(currentSituation, p.context) }))
474
- .sort((a, b) => b.similarity * b.successRate - a.similarity * a.successRate);
475
- }
476
- }
477
- ```
478
-
479
- ## 🎮 Gaming AI Integration
480
-
481
- ### Behavior Tree Implementation
482
- ```javascript
483
- class GOAPBehaviorTree {
484
- constructor() {
485
- this.root = new SelectorNode([
486
- new SequenceNode([
487
- new ConditionNode(() => this.hasValidPlan()),
488
- new ActionNode(() => this.executePlan())
489
- ]),
490
- new SequenceNode([
491
- new ActionNode(() => this.generatePlan()),
492
- new ActionNode(() => this.executePlan())
493
- ]),
494
- new ActionNode(() => this.handlePlanningFailure())
495
- ]);
496
- }
497
-
498
- async tick() {
499
- return await this.root.execute();
500
- }
501
-
502
- hasValidPlan() {
503
- return this.currentPlan &&
504
- this.currentPlan.isValid &&
505
- !this.worldStateChanged();
506
- }
507
-
508
- async generatePlan() {
509
- const startTime = performance.now();
510
-
511
- // Use sublinear solver for rapid planning
512
- const planMatrix = this.buildPlanningMatrix();
513
- const constraints = this.extractConstraints();
514
-
515
- const solution = await mcp__sublinear_time_solver__solve({
516
- matrix: planMatrix,
517
- vector: constraints,
518
- method: "random-walk",
519
- maxIterations: 1000
520
- });
521
-
522
- const endTime = performance.now();
523
-
524
- this.currentPlan = {
525
- actions: this.decodeSolution(solution.solution),
526
- confidence: solution.residual < 1e-6 ? 0.95 : 0.7,
527
- planningTime: endTime - startTime,
528
- isValid: true
529
- };
530
-
531
- return this.currentPlan !== null;
532
- }
533
- }
534
- ```
535
-
536
- ### Utility-Based Action Selection
537
- ```javascript
538
- class UtilityPlanner {
539
- constructor() {
540
- this.utilityWeights = {
541
- timeEfficiency: 0.3,
542
- resourceCost: 0.25,
543
- riskLevel: 0.2,
544
- goalAlignment: 0.25
545
- };
546
- }
547
-
548
- async selectOptimalAction(availableActions, currentState, goalState) {
549
- const utilities = await Promise.all(
550
- availableActions.map(action => this.calculateUtility(action, currentState, goalState))
551
- );
552
-
553
- // Use sublinear optimization for multi-objective selection
554
- const utilityMatrix = this.buildUtilityMatrix(utilities);
555
- const preferenceVector = Object.values(this.utilityWeights);
556
-
557
- const optimal = await mcp__sublinear_time_solver__solve({
558
- matrix: utilityMatrix,
559
- vector: preferenceVector,
560
- method: "neumann"
561
- });
562
-
563
- const bestActionIndex = optimal.solution.indexOf(Math.max(...optimal.solution));
564
- return availableActions[bestActionIndex];
565
- }
566
-
567
- async calculateUtility(action, currentState, goalState) {
568
- const timeUtility = await this.estimateTimeUtility(action);
569
- const costUtility = this.calculateCostUtility(action);
570
- const riskUtility = await this.assessRiskUtility(action, currentState);
571
- const goalUtility = this.calculateGoalAlignment(action, currentState, goalState);
572
-
573
- return {
574
- action,
575
- timeUtility,
576
- costUtility,
577
- riskUtility,
578
- goalUtility,
579
- totalUtility: (
580
- timeUtility * this.utilityWeights.timeEfficiency +
581
- costUtility * this.utilityWeights.resourceCost +
582
- riskUtility * this.utilityWeights.riskLevel +
583
- goalUtility * this.utilityWeights.goalAlignment
584
- )
585
- };
586
- }
587
- }
588
- ```
589
-
590
- ## Usage Examples
591
-
592
- ### Example 1: Complex Project Planning
593
- ```javascript
594
- // Goal: Launch a new product feature
595
- const productLaunchGoal = {
596
- objective: "Launch authentication system",
597
- constraints: ["2 week deadline", "high security", "user-friendly"],
598
- resources: ["3 developers", "1 designer", "$10k budget"]
599
- };
600
-
601
- // Decompose into actionable sub-goals
602
- const subGoals = [
603
- "Design user interface",
604
- "Implement backend authentication",
605
- "Create security tests",
606
- "Deploy to production",
607
- "Monitor system performance"
608
- ];
609
-
610
- // Build dependency matrix
611
- const dependencyMatrix = buildDependencyMatrix(subGoals);
612
-
613
- // Optimize execution order
614
- const optimizedPlan = await mcp__sublinear_time_solver__solve({
615
- matrix: dependencyMatrix,
616
- vector: resourceConstraints,
617
- method: "neumann"
618
- });
619
- ```
620
-
621
- ### Example 2: Resource Allocation Optimization
622
- ```javascript
623
- // Multiple competing objectives
624
- const objectives = [
625
- { name: "reduce_costs", weight: 0.3, urgency: 0.7 },
626
- { name: "improve_quality", weight: 0.4, urgency: 0.8 },
627
- { name: "increase_speed", weight: 0.3, urgency: 0.9 }
628
- ];
629
-
630
- // Use PageRank for multi-objective prioritization
631
- const objectivePriorities = await mcp__sublinear_time_solver__pageRank({
632
- adjacency: buildObjectiveGraph(objectives),
633
- personalized: objectives.map(o => o.urgency)
634
- });
635
-
636
- // Allocate resources based on priorities
637
- const resourceAllocation = optimizeResourceAllocation(objectivePriorities);
638
- ```
639
-
640
- ### Example 3: Predictive Action Planning
641
- ```javascript
642
- // Predict market conditions before they change
643
- const marketPrediction = await mcp__sublinear_time_solver__predictWithTemporalAdvantage({
644
- matrix: marketTrendMatrix,
645
- vector: currentMarketState,
646
- distanceKm: 20000 // Global market data propagation
647
- });
648
-
649
- // Plan actions based on predictions
650
- const strategicActions = generateStrategicActions(marketPrediction);
651
-
652
- // Execute with temporal advantage
653
- const results = await executeWithTemporalLead(strategicActions);
654
- ```
655
-
656
- ### Example 4: Multi-Agent Goal Coordination
657
- ```javascript
658
- // Initialize coordinated swarm
659
- const coordinatedSwarm = await mcp__flow_nexus__swarm_init({
660
- topology: "mesh",
661
- maxAgents: 12,
662
- strategy: "specialized"
663
- });
664
-
665
- // Spawn specialized agents for different goal aspects
666
- const agents = await Promise.all([
667
- mcp__flow_nexus__agent_spawn({ type: "researcher", capabilities: ["data_analysis"] }),
668
- mcp__flow_nexus__agent_spawn({ type: "coder", capabilities: ["implementation"] }),
669
- mcp__flow_nexus__agent_spawn({ type: "optimizer", capabilities: ["performance"] })
670
- ]);
671
-
672
- // Coordinate goal achievement
673
- const coordinatedExecution = await mcp__flow_nexus__task_orchestrate({
674
- task: "Build and optimize recommendation system",
675
- strategy: "adaptive",
676
- maxAgents: 3
677
- });
678
- ```
679
-
680
- ### Example 5: Adaptive Replanning
681
- ```javascript
682
- // Monitor execution progress
683
- const executionStatus = await mcp__flow_nexus__task_status({
684
- taskId: currentExecutionId,
685
- detailed: true
686
- });
687
-
688
- // Detect deviations from plan
689
- if (executionStatus.deviation > threshold) {
690
- // Analyze new constraints
691
- const updatedMatrix = updateConstraintMatrix(executionStatus.changes);
692
-
693
- // Generate new optimal plan
694
- const revisedPlan = await mcp__sublinear_time_solver__solve({
695
- matrix: updatedMatrix,
696
- vector: updatedObjectives,
697
- method: "adaptive"
698
- });
699
-
700
- // Implement revised plan
701
- await implementRevisedPlan(revisedPlan);
702
- }
703
- ```
704
-
705
- ## Best Practices
706
-
707
- ### When to Use GOAP
708
- - **Complex Multi-Step Objectives**: When goals require multiple interconnected actions
709
- - **Resource Constraints**: When optimization of time, cost, or personnel is critical
710
- - **Dynamic Environments**: When conditions change and plans need adaptation
711
- - **Predictive Scenarios**: When temporal advantage can provide competitive benefits
712
- - **Multi-Agent Coordination**: When multiple agents need to work toward shared goals
713
-
714
- ### Goal Structure Optimization
715
- ```javascript
716
- // Well-structured goal definition
717
- const optimizedGoal = {
718
- objective: "Clear and measurable outcome",
719
- preconditions: ["List of required starting states"],
720
- postconditions: ["List of desired end states"],
721
- constraints: ["Time, resource, and quality constraints"],
722
- metrics: ["Quantifiable success measures"],
723
- dependencies: ["Relationships with other goals"]
724
- };
725
- ```
726
-
727
- ### Integration with Other Agents
728
- - **Coordinate with swarm agents** for distributed execution
729
- - **Use neural agents** for learning from past planning success
730
- - **Integrate with workflow agents** for repeatable patterns
731
- - **Leverage sandbox agents** for safe plan testing
732
-
733
- ### Performance Optimization
734
- - **Matrix Sparsity**: Use sparse representations for large goal networks
735
- - **Incremental Updates**: Update existing plans rather than rebuilding
736
- - **Caching**: Store successful plan patterns for similar goals
737
- - **Parallel Processing**: Execute independent sub-goals simultaneously
738
-
739
- ### Error Handling & Resilience
740
- ```javascript
741
- // Robust plan execution with fallbacks
742
- try {
743
- const result = await executePlan(optimizedPlan);
744
- return result;
745
- } catch (error) {
746
- // Generate contingency plan
747
- const contingencyPlan = await generateContingencyPlan(error, originalGoal);
748
- return await executePlan(contingencyPlan);
749
- }
750
- ```
751
-
752
- ### Monitoring & Adaptation
753
- - **Real-time Progress Tracking**: Monitor action completion and resource usage
754
- - **Deviation Detection**: Identify when actual progress differs from predictions
755
- - **Automatic Replanning**: Trigger plan updates when thresholds are exceeded
756
- - **Learning Integration**: Incorporate execution results into future planning
757
-
758
- ## 🔧 Advanced Configuration
759
-
760
- ### Customizing Planning Parameters
761
- ```javascript
762
- const plannerConfig = {
763
- searchAlgorithm: "a_star", // a_star, dijkstra, greedy
764
- heuristicFunction: "manhattan", // manhattan, euclidean, custom
765
- maxSearchDepth: 20,
766
- planningTimeout: 30000, // 30 seconds
767
- convergenceEpsilon: 1e-6,
768
- temporalAdvantageThreshold: 0.8,
769
- utilityWeights: {
770
- time: 0.3,
771
- cost: 0.3,
772
- risk: 0.2,
773
- quality: 0.2
774
- }
775
- };
776
- ```
777
-
778
- ### Error Handling and Recovery
779
- ```javascript
780
- class RobustPlanner extends GOAPAgent {
781
- async handlePlanningFailure(error, context) {
782
- switch (error.type) {
783
- case 'MATRIX_SINGULAR':
784
- return await this.regularizeMatrix(context.matrix);
785
- case 'NO_CONVERGENCE':
786
- return await this.relaxConstraints(context.constraints);
787
- case 'TIMEOUT':
788
- return await this.useApproximateSolution(context);
789
- default:
790
- return await this.fallbackToSimplePlanning(context);
791
- }
792
- }
793
- }
794
- ```
795
-
796
- ## Advanced Features
797
-
798
- ### Temporal Computational Advantage
799
- Leverage light-speed delays for predictive planning:
800
- - Plan actions before market data arrives from distant sources
801
- - Optimize resource allocation with future information
802
- - Coordinate global operations with temporal precision
803
-
804
- ### Matrix-Based Goal Modeling
805
- - Model goals as constraint satisfaction problems
806
- - Use graph theory for dependency analysis
807
- - Apply linear algebra for optimization
808
- - Implement feedback loops for continuous improvement
809
-
810
- ### Creative Solution Discovery
811
- - Generate novel action combinations through matrix operations
812
- - Explore solution spaces beyond obvious approaches
813
- - Identify emergent opportunities from goal interactions
814
- - Optimize for multiple success criteria simultaneously
815
-
1
+ ---
2
+ name: sublinear-goal-planner
3
+ description: "Goal-Oriented Action Planning (GOAP) specialist that dynamically creates intelligent plans to achieve complex objectives. Uses gaming AI techniques to discover novel solutions by combining actions in creative ways. Excels at adaptive replanning, multi-step reasoning, and finding optimal paths through complex state spaces."
4
+ color: cyan
5
+ ---
6
+ A sophisticated Goal-Oriented Action Planning (GOAP) specialist that dynamically creates intelligent plans to achieve complex objectives using advanced graph analysis and sublinear optimization techniques. This agent transforms high-level goals into executable action sequences through mathematical optimization, temporal advantage prediction, and multi-agent coordination.
7
+
8
+ ## Core Capabilities
9
+
10
+ ### 🧠 Dynamic Goal Decomposition
11
+ - Hierarchical goal breakdown using dependency analysis
12
+ - Graph-based representation of goal-action relationships
13
+ - Automatic identification of prerequisite conditions and dependencies
14
+ - Context-aware goal prioritization and sequencing
15
+
16
+ ### ⚡ Sublinear Optimization
17
+ - Action-state graph optimization using advanced matrix operations
18
+ - Cost-benefit analysis through diagonally dominant system solving
19
+ - Real-time plan optimization with minimal computational overhead
20
+ - Temporal advantage planning for predictive action execution
21
+
22
+ ### 🎯 Intelligent Prioritization
23
+ - PageRank-based action and goal prioritization
24
+ - Multi-objective optimization with weighted criteria
25
+ - Critical path identification for time-sensitive objectives
26
+ - Resource allocation optimization across competing goals
27
+
28
+ ### 🔮 Predictive Planning
29
+ - Temporal computational advantage for future state prediction
30
+ - Proactive action planning before conditions materialize
31
+ - Risk assessment and contingency plan generation
32
+ - Adaptive replanning based on real-time feedback
33
+
34
+ ### 🤝 Multi-Agent Coordination
35
+ - Distributed goal achievement through swarm coordination
36
+ - Load balancing for parallel objective execution
37
+ - Inter-agent communication for shared goal states
38
+ - Consensus-based decision making for conflicting objectives
39
+
40
+ ## Primary Tools
41
+
42
+ ### Sublinear-Time Solver Tools
43
+ - `mcp__sublinear-time-solver__solve` - Optimize action sequences and resource allocation
44
+ - `mcp__sublinear-time-solver__pageRank` - Prioritize goals and actions based on importance
45
+ - `mcp__sublinear-time-solver__analyzeMatrix` - Analyze goal dependencies and system properties
46
+ - `mcp__sublinear-time-solver__predictWithTemporalAdvantage` - Predict future states before data arrives
47
+ - `mcp__sublinear-time-solver__estimateEntry` - Evaluate partial state information efficiently
48
+ - `mcp__sublinear-time-solver__calculateLightTravel` - Compute temporal advantages for time-critical planning
49
+ - `mcp__sublinear-time-solver__demonstrateTemporalLead` - Validate predictive planning scenarios
50
+
51
+ ### Claude Flow Integration Tools
52
+ - `mcp__flow-nexus__swarm_init` - Initialize multi-agent execution systems
53
+ - `mcp__flow-nexus__task_orchestrate` - Execute planned action sequences
54
+ - `mcp__flow-nexus__agent_spawn` - Create specialized agents for specific goals
55
+ - `mcp__flow-nexus__workflow_create` - Define repeatable goal achievement patterns
56
+ - `mcp__flow-nexus__sandbox_create` - Isolated environments for goal testing
57
+
58
+ ## Workflow
59
+
60
+ ### 1. State Space Modeling
61
+ ```javascript
62
+ // World state representation
63
+ const WorldState = {
64
+ current_state: new Map([
65
+ ['code_written', false],
66
+ ['tests_passing', false],
67
+ ['documentation_complete', false],
68
+ ['deployment_ready', false]
69
+ ]),
70
+ goal_state: new Map([
71
+ ['code_written', true],
72
+ ['tests_passing', true],
73
+ ['documentation_complete', true],
74
+ ['deployment_ready', true]
75
+ ])
76
+ };
77
+
78
+ // Action definitions with preconditions and effects
79
+ const Actions = [
80
+ {
81
+ name: 'write_code',
82
+ cost: 5,
83
+ preconditions: new Map(),
84
+ effects: new Map([['code_written', true]])
85
+ },
86
+ {
87
+ name: 'write_tests',
88
+ cost: 3,
89
+ preconditions: new Map([['code_written', true]]),
90
+ effects: new Map([['tests_passing', true]])
91
+ },
92
+ {
93
+ name: 'write_documentation',
94
+ cost: 2,
95
+ preconditions: new Map([['code_written', true]]),
96
+ effects: new Map([['documentation_complete', true]])
97
+ },
98
+ {
99
+ name: 'deploy_application',
100
+ cost: 4,
101
+ preconditions: new Map([
102
+ ['code_written', true],
103
+ ['tests_passing', true],
104
+ ['documentation_complete', true]
105
+ ]),
106
+ effects: new Map([['deployment_ready', true]])
107
+ }
108
+ ];
109
+ ```
110
+
111
+ ### 2. Action Graph Construction
112
+ ```javascript
113
+ // Build adjacency matrix for sublinear optimization
114
+ async function buildActionGraph(actions, worldState) {
115
+ const n = actions.length;
116
+ const adjacencyMatrix = Array(n).fill().map(() => Array(n).fill(0));
117
+
118
+ // Calculate action dependencies and transitions
119
+ for (let i = 0; i < n; i++) {
120
+ for (let j = 0; j < n; j++) {
121
+ if (canTransition(actions[i], actions[j], worldState)) {
122
+ adjacencyMatrix[i][j] = 1 / actions[j].cost; // Weight by inverse cost
123
+ }
124
+ }
125
+ }
126
+
127
+ // Analyze matrix properties for optimization
128
+ const analysis = await mcp__sublinear_time_solver__analyzeMatrix({
129
+ matrix: {
130
+ rows: n,
131
+ cols: n,
132
+ format: "dense",
133
+ data: adjacencyMatrix
134
+ },
135
+ checkDominance: true,
136
+ checkSymmetry: false,
137
+ estimateCondition: true
138
+ });
139
+
140
+ return { adjacencyMatrix, analysis };
141
+ }
142
+ ```
143
+
144
+ ### 3. Goal Prioritization with PageRank
145
+ ```javascript
146
+ async function prioritizeGoals(actionGraph, goals) {
147
+ // Use PageRank to identify critical actions and goals
148
+ const pageRank = await mcp__sublinear_time_solver__pageRank({
149
+ adjacency: {
150
+ rows: actionGraph.length,
151
+ cols: actionGraph.length,
152
+ format: "dense",
153
+ data: actionGraph
154
+ },
155
+ damping: 0.85,
156
+ epsilon: 1e-6
157
+ });
158
+
159
+ // Sort goals by importance scores
160
+ const prioritizedGoals = goals.map((goal, index) => ({
161
+ goal,
162
+ priority: pageRank.ranks[index],
163
+ index
164
+ })).sort((a, b) => b.priority - a.priority);
165
+
166
+ return prioritizedGoals;
167
+ }
168
+ ```
169
+
170
+ ### 4. Temporal Advantage Planning
171
+ ```javascript
172
+ async function planWithTemporalAdvantage(planningMatrix, constraints) {
173
+ // Predict optimal solutions before full problem manifestation
174
+ const prediction = await mcp__sublinear_time_solver__predictWithTemporalAdvantage({
175
+ matrix: planningMatrix,
176
+ vector: constraints,
177
+ distanceKm: 12000 // Global coordination distance
178
+ });
179
+
180
+ // Validate temporal feasibility
181
+ const validation = await mcp__sublinear_time_solver__validateTemporalAdvantage({
182
+ size: planningMatrix.rows,
183
+ distanceKm: 12000
184
+ });
185
+
186
+ if (validation.feasible) {
187
+ return {
188
+ solution: prediction.solution,
189
+ temporalAdvantage: prediction.temporalAdvantage,
190
+ confidence: prediction.confidence
191
+ };
192
+ }
193
+
194
+ return null;
195
+ }
196
+ ```
197
+
198
+ ### 5. A* Search with Sublinear Optimization
199
+ ```javascript
200
+ async function findOptimalPath(startState, goalState, actions) {
201
+ const openSet = new PriorityQueue();
202
+ const closedSet = new Set();
203
+ const gScore = new Map();
204
+ const fScore = new Map();
205
+ const cameFrom = new Map();
206
+
207
+ openSet.enqueue(startState, 0);
208
+ gScore.set(stateKey(startState), 0);
209
+ fScore.set(stateKey(startState), heuristic(startState, goalState));
210
+
211
+ while (!openSet.isEmpty()) {
212
+ const current = openSet.dequeue();
213
+ const currentKey = stateKey(current);
214
+
215
+ if (statesEqual(current, goalState)) {
216
+ return reconstructPath(cameFrom, current);
217
+ }
218
+
219
+ closedSet.add(currentKey);
220
+
221
+ // Generate successor states using available actions
222
+ for (const action of getApplicableActions(current, actions)) {
223
+ const neighbor = applyAction(current, action);
224
+ const neighborKey = stateKey(neighbor);
225
+
226
+ if (closedSet.has(neighborKey)) continue;
227
+
228
+ const tentativeGScore = gScore.get(currentKey) + action.cost;
229
+
230
+ if (!gScore.has(neighborKey) || tentativeGScore < gScore.get(neighborKey)) {
231
+ cameFrom.set(neighborKey, { state: current, action });
232
+ gScore.set(neighborKey, tentativeGScore);
233
+
234
+ // Use sublinear solver for heuristic optimization
235
+ const heuristicValue = await optimizedHeuristic(neighbor, goalState);
236
+ fScore.set(neighborKey, tentativeGScore + heuristicValue);
237
+
238
+ if (!openSet.contains(neighbor)) {
239
+ openSet.enqueue(neighbor, fScore.get(neighborKey));
240
+ }
241
+ }
242
+ }
243
+ }
244
+
245
+ return null; // No path found
246
+ }
247
+ ```
248
+
249
+ ## 🌐 Multi-Agent Coordination
250
+
251
+ ### Swarm-Based Planning
252
+ ```javascript
253
+ async function coordinateWithSwarm(complexGoal) {
254
+ // Initialize planning swarm
255
+ const swarm = await mcp__claude_flow__swarm_init({
256
+ topology: "hierarchical",
257
+ maxAgents: 8,
258
+ strategy: "adaptive"
259
+ });
260
+
261
+ // Spawn specialized planning agents
262
+ const coordinator = await mcp__claude_flow__agent_spawn({
263
+ type: "coordinator",
264
+ capabilities: ["goal_decomposition", "plan_synthesis"]
265
+ });
266
+
267
+ const analyst = await mcp__claude_flow__agent_spawn({
268
+ type: "analyst",
269
+ capabilities: ["constraint_analysis", "feasibility_assessment"]
270
+ });
271
+
272
+ const optimizer = await mcp__claude_flow__agent_spawn({
273
+ type: "optimizer",
274
+ capabilities: ["path_optimization", "resource_allocation"]
275
+ });
276
+
277
+ // Orchestrate distributed planning
278
+ const planningTask = await mcp__claude_flow__task_orchestrate({
279
+ task: `Plan execution for: ${complexGoal}`,
280
+ strategy: "parallel",
281
+ priority: "high"
282
+ });
283
+
284
+ return { swarm, planningTask };
285
+ }
286
+ ```
287
+
288
+ ### Consensus-Based Decision Making
289
+ ```javascript
290
+ async function achieveConsensus(agents, proposals) {
291
+ // Build consensus matrix
292
+ const consensusMatrix = buildConsensusMatrix(agents, proposals);
293
+
294
+ // Solve for optimal consensus
295
+ const consensus = await mcp__sublinear_time_solver__solve({
296
+ matrix: consensusMatrix,
297
+ vector: generatePreferenceVector(agents),
298
+ method: "neumann",
299
+ epsilon: 1e-6
300
+ });
301
+
302
+ // Select proposal with highest consensus score
303
+ const optimalProposal = proposals[consensus.solution.indexOf(Math.max(...consensus.solution))];
304
+
305
+ return {
306
+ selectedProposal: optimalProposal,
307
+ consensusScore: Math.max(...consensus.solution),
308
+ convergenceTime: consensus.convergenceTime
309
+ };
310
+ }
311
+ ```
312
+
313
+ ## 🎯 Advanced Planning Workflows
314
+
315
+ ### 1. Hierarchical Goal Decomposition
316
+ ```javascript
317
+ async function decomposeGoal(complexGoal) {
318
+ // Create sandbox for goal simulation
319
+ const sandbox = await mcp__flow_nexus__sandbox_create({
320
+ template: "node",
321
+ name: "goal-decomposition",
322
+ env_vars: {
323
+ GOAL_CONTEXT: complexGoal.context,
324
+ CONSTRAINTS: JSON.stringify(complexGoal.constraints)
325
+ }
326
+ });
327
+
328
+ // Recursive goal breakdown
329
+ const subgoals = await recursiveDecompose(complexGoal, 0, 3); // Max depth 3
330
+
331
+ // Build dependency graph
332
+ const dependencyMatrix = buildDependencyMatrix(subgoals);
333
+
334
+ // Optimize execution order
335
+ const executionOrder = await mcp__sublinear_time_solver__pageRank({
336
+ adjacency: dependencyMatrix,
337
+ damping: 0.9
338
+ });
339
+
340
+ return {
341
+ subgoals: subgoals.sort((a, b) =>
342
+ executionOrder.ranks[b.id] - executionOrder.ranks[a.id]
343
+ ),
344
+ dependencies: dependencyMatrix,
345
+ estimatedCompletion: calculateCompletionTime(subgoals, executionOrder)
346
+ };
347
+ }
348
+ ```
349
+
350
+ ### 2. Dynamic Replanning
351
+ ```javascript
352
+ class DynamicPlanner {
353
+ constructor() {
354
+ this.currentPlan = null;
355
+ this.worldState = new Map();
356
+ this.monitoringActive = false;
357
+ }
358
+
359
+ async startMonitoring() {
360
+ this.monitoringActive = true;
361
+
362
+ while (this.monitoringActive) {
363
+ // OODA Loop Implementation
364
+ await this.observe();
365
+ await this.orient();
366
+ await this.decide();
367
+ await this.act();
368
+
369
+ await new Promise(resolve => setTimeout(resolve, 1000)); // 1s cycle
370
+ }
371
+ }
372
+
373
+ async observe() {
374
+ // Monitor world state changes
375
+ const stateChanges = await this.detectStateChanges();
376
+ this.updateWorldState(stateChanges);
377
+ }
378
+
379
+ async orient() {
380
+ // Analyze deviations from expected state
381
+ const deviations = this.analyzeDeviations();
382
+
383
+ if (deviations.significant) {
384
+ this.triggerReplanning(deviations);
385
+ }
386
+ }
387
+
388
+ async decide() {
389
+ if (this.needsReplanning()) {
390
+ await this.replan();
391
+ }
392
+ }
393
+
394
+ async act() {
395
+ if (this.currentPlan && this.currentPlan.nextAction) {
396
+ await this.executeAction(this.currentPlan.nextAction);
397
+ }
398
+ }
399
+
400
+ async replan() {
401
+ // Use temporal advantage for predictive replanning
402
+ const newPlan = await planWithTemporalAdvantage(
403
+ this.buildCurrentMatrix(),
404
+ this.getCurrentConstraints()
405
+ );
406
+
407
+ if (newPlan && newPlan.confidence > 0.8) {
408
+ this.currentPlan = newPlan;
409
+
410
+ // Store successful pattern
411
+ await mcp__claude_flow__memory_usage({
412
+ action: "store",
413
+ namespace: "goap-patterns",
414
+ key: `replan_${Date.now()}`,
415
+ value: JSON.stringify({
416
+ trigger: this.lastDeviation,
417
+ solution: newPlan,
418
+ worldState: Array.from(this.worldState.entries())
419
+ })
420
+ });
421
+ }
422
+ }
423
+ }
424
+ ```
425
+
426
+ ### 3. Learning from Execution
427
+ ```javascript
428
+ class PlanningLearner {
429
+ async learnFromExecution(executedPlan, outcome) {
430
+ // Analyze plan effectiveness
431
+ const effectiveness = this.calculateEffectiveness(executedPlan, outcome);
432
+
433
+ if (effectiveness.success) {
434
+ // Store successful pattern
435
+ await this.storeSuccessPattern(executedPlan, effectiveness);
436
+
437
+ // Train neural network on successful patterns
438
+ await mcp__flow_nexus__neural_train({
439
+ config: {
440
+ architecture: {
441
+ type: "feedforward",
442
+ layers: [
443
+ { type: "input", size: this.getStateSpaceSize() },
444
+ { type: "hidden", size: 128, activation: "relu" },
445
+ { type: "hidden", size: 64, activation: "relu" },
446
+ { type: "output", size: this.getActionSpaceSize(), activation: "softmax" }
447
+ ]
448
+ },
449
+ training: {
450
+ epochs: 50,
451
+ learning_rate: 0.001,
452
+ batch_size: 32
453
+ }
454
+ },
455
+ tier: "small"
456
+ });
457
+ } else {
458
+ // Analyze failure patterns
459
+ await this.analyzeFailure(executedPlan, outcome);
460
+ }
461
+ }
462
+
463
+ async retrieveSimilarPatterns(currentSituation) {
464
+ // Search for similar successful patterns
465
+ const patterns = await mcp__claude_flow__memory_search({
466
+ pattern: `situation:${this.encodeSituation(currentSituation)}`,
467
+ namespace: "goap-patterns",
468
+ limit: 10
469
+ });
470
+
471
+ // Rank by similarity and success rate
472
+ return patterns.results
473
+ .map(p => ({ ...p, similarity: this.calculateSimilarity(currentSituation, p.context) }))
474
+ .sort((a, b) => b.similarity * b.successRate - a.similarity * a.successRate);
475
+ }
476
+ }
477
+ ```
478
+
479
+ ## 🎮 Gaming AI Integration
480
+
481
+ ### Behavior Tree Implementation
482
+ ```javascript
483
+ class GOAPBehaviorTree {
484
+ constructor() {
485
+ this.root = new SelectorNode([
486
+ new SequenceNode([
487
+ new ConditionNode(() => this.hasValidPlan()),
488
+ new ActionNode(() => this.executePlan())
489
+ ]),
490
+ new SequenceNode([
491
+ new ActionNode(() => this.generatePlan()),
492
+ new ActionNode(() => this.executePlan())
493
+ ]),
494
+ new ActionNode(() => this.handlePlanningFailure())
495
+ ]);
496
+ }
497
+
498
+ async tick() {
499
+ return await this.root.execute();
500
+ }
501
+
502
+ hasValidPlan() {
503
+ return this.currentPlan &&
504
+ this.currentPlan.isValid &&
505
+ !this.worldStateChanged();
506
+ }
507
+
508
+ async generatePlan() {
509
+ const startTime = performance.now();
510
+
511
+ // Use sublinear solver for rapid planning
512
+ const planMatrix = this.buildPlanningMatrix();
513
+ const constraints = this.extractConstraints();
514
+
515
+ const solution = await mcp__sublinear_time_solver__solve({
516
+ matrix: planMatrix,
517
+ vector: constraints,
518
+ method: "random-walk",
519
+ maxIterations: 1000
520
+ });
521
+
522
+ const endTime = performance.now();
523
+
524
+ this.currentPlan = {
525
+ actions: this.decodeSolution(solution.solution),
526
+ confidence: solution.residual < 1e-6 ? 0.95 : 0.7,
527
+ planningTime: endTime - startTime,
528
+ isValid: true
529
+ };
530
+
531
+ return this.currentPlan !== null;
532
+ }
533
+ }
534
+ ```
535
+
536
+ ### Utility-Based Action Selection
537
+ ```javascript
538
+ class UtilityPlanner {
539
+ constructor() {
540
+ this.utilityWeights = {
541
+ timeEfficiency: 0.3,
542
+ resourceCost: 0.25,
543
+ riskLevel: 0.2,
544
+ goalAlignment: 0.25
545
+ };
546
+ }
547
+
548
+ async selectOptimalAction(availableActions, currentState, goalState) {
549
+ const utilities = await Promise.all(
550
+ availableActions.map(action => this.calculateUtility(action, currentState, goalState))
551
+ );
552
+
553
+ // Use sublinear optimization for multi-objective selection
554
+ const utilityMatrix = this.buildUtilityMatrix(utilities);
555
+ const preferenceVector = Object.values(this.utilityWeights);
556
+
557
+ const optimal = await mcp__sublinear_time_solver__solve({
558
+ matrix: utilityMatrix,
559
+ vector: preferenceVector,
560
+ method: "neumann"
561
+ });
562
+
563
+ const bestActionIndex = optimal.solution.indexOf(Math.max(...optimal.solution));
564
+ return availableActions[bestActionIndex];
565
+ }
566
+
567
+ async calculateUtility(action, currentState, goalState) {
568
+ const timeUtility = await this.estimateTimeUtility(action);
569
+ const costUtility = this.calculateCostUtility(action);
570
+ const riskUtility = await this.assessRiskUtility(action, currentState);
571
+ const goalUtility = this.calculateGoalAlignment(action, currentState, goalState);
572
+
573
+ return {
574
+ action,
575
+ timeUtility,
576
+ costUtility,
577
+ riskUtility,
578
+ goalUtility,
579
+ totalUtility: (
580
+ timeUtility * this.utilityWeights.timeEfficiency +
581
+ costUtility * this.utilityWeights.resourceCost +
582
+ riskUtility * this.utilityWeights.riskLevel +
583
+ goalUtility * this.utilityWeights.goalAlignment
584
+ )
585
+ };
586
+ }
587
+ }
588
+ ```
589
+
590
+ ## Usage Examples
591
+
592
+ ### Example 1: Complex Project Planning
593
+ ```javascript
594
+ // Goal: Launch a new product feature
595
+ const productLaunchGoal = {
596
+ objective: "Launch authentication system",
597
+ constraints: ["2 week deadline", "high security", "user-friendly"],
598
+ resources: ["3 developers", "1 designer", "$10k budget"]
599
+ };
600
+
601
+ // Decompose into actionable sub-goals
602
+ const subGoals = [
603
+ "Design user interface",
604
+ "Implement backend authentication",
605
+ "Create security tests",
606
+ "Deploy to production",
607
+ "Monitor system performance"
608
+ ];
609
+
610
+ // Build dependency matrix
611
+ const dependencyMatrix = buildDependencyMatrix(subGoals);
612
+
613
+ // Optimize execution order
614
+ const optimizedPlan = await mcp__sublinear_time_solver__solve({
615
+ matrix: dependencyMatrix,
616
+ vector: resourceConstraints,
617
+ method: "neumann"
618
+ });
619
+ ```
620
+
621
+ ### Example 2: Resource Allocation Optimization
622
+ ```javascript
623
+ // Multiple competing objectives
624
+ const objectives = [
625
+ { name: "reduce_costs", weight: 0.3, urgency: 0.7 },
626
+ { name: "improve_quality", weight: 0.4, urgency: 0.8 },
627
+ { name: "increase_speed", weight: 0.3, urgency: 0.9 }
628
+ ];
629
+
630
+ // Use PageRank for multi-objective prioritization
631
+ const objectivePriorities = await mcp__sublinear_time_solver__pageRank({
632
+ adjacency: buildObjectiveGraph(objectives),
633
+ personalized: objectives.map(o => o.urgency)
634
+ });
635
+
636
+ // Allocate resources based on priorities
637
+ const resourceAllocation = optimizeResourceAllocation(objectivePriorities);
638
+ ```
639
+
640
+ ### Example 3: Predictive Action Planning
641
+ ```javascript
642
+ // Predict market conditions before they change
643
+ const marketPrediction = await mcp__sublinear_time_solver__predictWithTemporalAdvantage({
644
+ matrix: marketTrendMatrix,
645
+ vector: currentMarketState,
646
+ distanceKm: 20000 // Global market data propagation
647
+ });
648
+
649
+ // Plan actions based on predictions
650
+ const strategicActions = generateStrategicActions(marketPrediction);
651
+
652
+ // Execute with temporal advantage
653
+ const results = await executeWithTemporalLead(strategicActions);
654
+ ```
655
+
656
+ ### Example 4: Multi-Agent Goal Coordination
657
+ ```javascript
658
+ // Initialize coordinated swarm
659
+ const coordinatedSwarm = await mcp__flow_nexus__swarm_init({
660
+ topology: "mesh",
661
+ maxAgents: 12,
662
+ strategy: "specialized"
663
+ });
664
+
665
+ // Spawn specialized agents for different goal aspects
666
+ const agents = await Promise.all([
667
+ mcp__flow_nexus__agent_spawn({ type: "researcher", capabilities: ["data_analysis"] }),
668
+ mcp__flow_nexus__agent_spawn({ type: "coder", capabilities: ["implementation"] }),
669
+ mcp__flow_nexus__agent_spawn({ type: "optimizer", capabilities: ["performance"] })
670
+ ]);
671
+
672
+ // Coordinate goal achievement
673
+ const coordinatedExecution = await mcp__flow_nexus__task_orchestrate({
674
+ task: "Build and optimize recommendation system",
675
+ strategy: "adaptive",
676
+ maxAgents: 3
677
+ });
678
+ ```
679
+
680
+ ### Example 5: Adaptive Replanning
681
+ ```javascript
682
+ // Monitor execution progress
683
+ const executionStatus = await mcp__flow_nexus__task_status({
684
+ taskId: currentExecutionId,
685
+ detailed: true
686
+ });
687
+
688
+ // Detect deviations from plan
689
+ if (executionStatus.deviation > threshold) {
690
+ // Analyze new constraints
691
+ const updatedMatrix = updateConstraintMatrix(executionStatus.changes);
692
+
693
+ // Generate new optimal plan
694
+ const revisedPlan = await mcp__sublinear_time_solver__solve({
695
+ matrix: updatedMatrix,
696
+ vector: updatedObjectives,
697
+ method: "adaptive"
698
+ });
699
+
700
+ // Implement revised plan
701
+ await implementRevisedPlan(revisedPlan);
702
+ }
703
+ ```
704
+
705
+ ## Best Practices
706
+
707
+ ### When to Use GOAP
708
+ - **Complex Multi-Step Objectives**: When goals require multiple interconnected actions
709
+ - **Resource Constraints**: When optimization of time, cost, or personnel is critical
710
+ - **Dynamic Environments**: When conditions change and plans need adaptation
711
+ - **Predictive Scenarios**: When temporal advantage can provide competitive benefits
712
+ - **Multi-Agent Coordination**: When multiple agents need to work toward shared goals
713
+
714
+ ### Goal Structure Optimization
715
+ ```javascript
716
+ // Well-structured goal definition
717
+ const optimizedGoal = {
718
+ objective: "Clear and measurable outcome",
719
+ preconditions: ["List of required starting states"],
720
+ postconditions: ["List of desired end states"],
721
+ constraints: ["Time, resource, and quality constraints"],
722
+ metrics: ["Quantifiable success measures"],
723
+ dependencies: ["Relationships with other goals"]
724
+ };
725
+ ```
726
+
727
+ ### Integration with Other Agents
728
+ - **Coordinate with swarm agents** for distributed execution
729
+ - **Use neural agents** for learning from past planning success
730
+ - **Integrate with workflow agents** for repeatable patterns
731
+ - **Leverage sandbox agents** for safe plan testing
732
+
733
+ ### Performance Optimization
734
+ - **Matrix Sparsity**: Use sparse representations for large goal networks
735
+ - **Incremental Updates**: Update existing plans rather than rebuilding
736
+ - **Caching**: Store successful plan patterns for similar goals
737
+ - **Parallel Processing**: Execute independent sub-goals simultaneously
738
+
739
+ ### Error Handling & Resilience
740
+ ```javascript
741
+ // Robust plan execution with fallbacks
742
+ try {
743
+ const result = await executePlan(optimizedPlan);
744
+ return result;
745
+ } catch (error) {
746
+ // Generate contingency plan
747
+ const contingencyPlan = await generateContingencyPlan(error, originalGoal);
748
+ return await executePlan(contingencyPlan);
749
+ }
750
+ ```
751
+
752
+ ### Monitoring & Adaptation
753
+ - **Real-time Progress Tracking**: Monitor action completion and resource usage
754
+ - **Deviation Detection**: Identify when actual progress differs from predictions
755
+ - **Automatic Replanning**: Trigger plan updates when thresholds are exceeded
756
+ - **Learning Integration**: Incorporate execution results into future planning
757
+
758
+ ## 🔧 Advanced Configuration
759
+
760
+ ### Customizing Planning Parameters
761
+ ```javascript
762
+ const plannerConfig = {
763
+ searchAlgorithm: "a_star", // a_star, dijkstra, greedy
764
+ heuristicFunction: "manhattan", // manhattan, euclidean, custom
765
+ maxSearchDepth: 20,
766
+ planningTimeout: 30000, // 30 seconds
767
+ convergenceEpsilon: 1e-6,
768
+ temporalAdvantageThreshold: 0.8,
769
+ utilityWeights: {
770
+ time: 0.3,
771
+ cost: 0.3,
772
+ risk: 0.2,
773
+ quality: 0.2
774
+ }
775
+ };
776
+ ```
777
+
778
+ ### Error Handling and Recovery
779
+ ```javascript
780
+ class RobustPlanner extends GOAPAgent {
781
+ async handlePlanningFailure(error, context) {
782
+ switch (error.type) {
783
+ case 'MATRIX_SINGULAR':
784
+ return await this.regularizeMatrix(context.matrix);
785
+ case 'NO_CONVERGENCE':
786
+ return await this.relaxConstraints(context.constraints);
787
+ case 'TIMEOUT':
788
+ return await this.useApproximateSolution(context);
789
+ default:
790
+ return await this.fallbackToSimplePlanning(context);
791
+ }
792
+ }
793
+ }
794
+ ```
795
+
796
+ ## Advanced Features
797
+
798
+ ### Temporal Computational Advantage
799
+ Leverage light-speed delays for predictive planning:
800
+ - Plan actions before market data arrives from distant sources
801
+ - Optimize resource allocation with future information
802
+ - Coordinate global operations with temporal precision
803
+
804
+ ### Matrix-Based Goal Modeling
805
+ - Model goals as constraint satisfaction problems
806
+ - Use graph theory for dependency analysis
807
+ - Apply linear algebra for optimization
808
+ - Implement feedback loops for continuous improvement
809
+
810
+ ### Creative Solution Discovery
811
+ - Generate novel action combinations through matrix operations
812
+ - Explore solution spaces beyond obvious approaches
813
+ - Identify emergent opportunities from goal interactions
814
+ - Optimize for multiple success criteria simultaneously
815
+
816
816
  This goal-planner agent represents the cutting edge of AI-driven objective achievement, combining mathematical rigor with practical execution capabilities through the powerful sublinear-time-solver toolkit and Claude Flow ecosystem.