@claude-flow/cli 3.5.43 → 3.5.44
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +199 -196
- package/dist/src/commands/claims.d.ts.map +1 -1
- package/dist/src/commands/claims.js +352 -34
- package/dist/src/commands/claims.js.map +1 -1
- package/dist/src/commands/config.d.ts.map +1 -1
- package/dist/src/commands/config.js +66 -34
- package/dist/src/commands/config.js.map +1 -1
- package/dist/src/commands/deployment.d.ts.map +1 -1
- package/dist/src/commands/deployment.js +538 -46
- package/dist/src/commands/deployment.js.map +1 -1
- package/dist/src/commands/migrate.d.ts.map +1 -1
- package/dist/src/commands/migrate.js +531 -39
- package/dist/src/commands/migrate.js.map +1 -1
- package/dist/src/commands/providers.d.ts.map +1 -1
- package/dist/src/commands/providers.js +163 -29
- package/dist/src/commands/providers.js.map +1 -1
- package/dist/src/init/executor.d.ts.map +1 -1
- package/dist/src/init/executor.js +2 -3
- package/dist/src/init/executor.js.map +1 -1
- package/dist/src/mcp-client.d.ts.map +1 -1
- package/dist/src/mcp-client.js +3 -0
- package/dist/src/mcp-client.js.map +1 -1
- package/dist/src/mcp-server.d.ts +3 -1
- package/dist/src/mcp-server.d.ts.map +1 -1
- package/dist/src/mcp-server.js +31 -4
- package/dist/src/mcp-server.js.map +1 -1
- package/dist/src/mcp-tools/guidance-tools.d.ts +15 -0
- package/dist/src/mcp-tools/guidance-tools.d.ts.map +1 -0
- package/dist/src/mcp-tools/guidance-tools.js +617 -0
- package/dist/src/mcp-tools/guidance-tools.js.map +1 -0
- package/dist/src/mcp-tools/index.d.ts +1 -0
- package/dist/src/mcp-tools/index.d.ts.map +1 -1
- package/dist/src/mcp-tools/index.js +1 -0
- package/dist/src/mcp-tools/index.js.map +1 -1
- package/dist/src/services/config-file-manager.d.ts +37 -0
- package/dist/src/services/config-file-manager.d.ts.map +1 -0
- package/dist/src/services/config-file-manager.js +224 -0
- package/dist/src/services/config-file-manager.js.map +1 -0
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +3 -1
|
@@ -0,0 +1,617 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Guidance MCP Tools
|
|
3
|
+
*
|
|
4
|
+
* Helps the system navigate Ruflo's capabilities by providing structured
|
|
5
|
+
* discovery of tools, commands, agents, skills, and recommended workflows.
|
|
6
|
+
*
|
|
7
|
+
* @module @claude-flow/cli/mcp-tools/guidance
|
|
8
|
+
*/
|
|
9
|
+
import { existsSync, readFileSync, readdirSync } from 'node:fs';
|
|
10
|
+
import { join, dirname } from 'node:path';
|
|
11
|
+
import { fileURLToPath } from 'node:url';
|
|
12
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
13
|
+
const __dirname = dirname(__filename);
|
|
14
|
+
const CLI_ROOT = join(__dirname, '../../..');
|
|
15
|
+
/**
|
|
16
|
+
* Find the project root by looking for .claude/ directory.
|
|
17
|
+
* Tries CWD first (most common), then walks up from the CLI package location.
|
|
18
|
+
*/
|
|
19
|
+
function findProjectRoot() {
|
|
20
|
+
// Strategy 1: CWD (most reliable when invoked by user)
|
|
21
|
+
if (existsSync(join(process.cwd(), '.claude'))) {
|
|
22
|
+
return process.cwd();
|
|
23
|
+
}
|
|
24
|
+
// Strategy 2: Walk up from CLI package location
|
|
25
|
+
// CLI is at v3/@claude-flow/cli/ — project root is 4 levels up
|
|
26
|
+
const fromPackage = join(CLI_ROOT, '../../../..');
|
|
27
|
+
if (existsSync(join(fromPackage, '.claude'))) {
|
|
28
|
+
return fromPackage;
|
|
29
|
+
}
|
|
30
|
+
// Strategy 3: Walk up from CWD
|
|
31
|
+
let dir = process.cwd();
|
|
32
|
+
for (let i = 0; i < 10; i++) {
|
|
33
|
+
if (existsSync(join(dir, '.claude')))
|
|
34
|
+
return dir;
|
|
35
|
+
const parent = dirname(dir);
|
|
36
|
+
if (parent === dir)
|
|
37
|
+
break;
|
|
38
|
+
dir = parent;
|
|
39
|
+
}
|
|
40
|
+
// Fallback: CWD
|
|
41
|
+
return process.cwd();
|
|
42
|
+
}
|
|
43
|
+
const PROJECT_ROOT = findProjectRoot();
|
|
44
|
+
const CAPABILITY_CATALOG = {
|
|
45
|
+
'agent-management': {
|
|
46
|
+
name: 'Agent Management',
|
|
47
|
+
description: 'Spawn, manage, and monitor individual AI agents with lifecycle control.',
|
|
48
|
+
tools: ['agent_spawn', 'agent_list', 'agent_status', 'agent_stop', 'agent_metrics', 'agent_pool', 'agent_health', 'agent_logs'],
|
|
49
|
+
commands: ['agent spawn', 'agent list', 'agent status', 'agent stop', 'agent metrics', 'agent pool', 'agent health', 'agent logs'],
|
|
50
|
+
agents: ['coder', 'tester', 'reviewer', 'researcher', 'planner'],
|
|
51
|
+
skills: [],
|
|
52
|
+
whenToUse: 'When you need to create or manage individual agents for specific tasks.',
|
|
53
|
+
},
|
|
54
|
+
'swarm-orchestration': {
|
|
55
|
+
name: 'Swarm Orchestration',
|
|
56
|
+
description: 'Multi-agent coordination with topology-aware communication and consensus.',
|
|
57
|
+
tools: ['swarm_init', 'swarm_status', 'swarm_spawn', 'swarm_terminate', 'swarm_topology', 'swarm_metrics'],
|
|
58
|
+
commands: ['swarm init', 'swarm status', 'swarm spawn', 'swarm terminate'],
|
|
59
|
+
agents: ['hierarchical-coordinator', 'mesh-coordinator', 'adaptive-coordinator', 'queen-coordinator', 'collective-intelligence-coordinator'],
|
|
60
|
+
skills: ['swarm-orchestration', 'swarm-advanced', 'claude-flow-swarm'],
|
|
61
|
+
whenToUse: 'When a task requires multiple agents working together (3+ files, features, refactoring).',
|
|
62
|
+
},
|
|
63
|
+
'memory-knowledge': {
|
|
64
|
+
name: 'Memory & Knowledge',
|
|
65
|
+
description: 'Persistent memory with HNSW vector search, AgentDB storage, and embeddings.',
|
|
66
|
+
tools: ['memory_store', 'memory_retrieve', 'memory_search', 'memory_list', 'memory_delete', 'memory_init', 'memory_export', 'memory_import', 'memory_stats', 'memory_compact', 'memory_namespace'],
|
|
67
|
+
commands: ['memory store', 'memory retrieve', 'memory search', 'memory list', 'memory delete', 'memory init'],
|
|
68
|
+
agents: ['swarm-memory-manager', 'v3-memory-specialist'],
|
|
69
|
+
skills: ['v3-memory-unification', 'agentdb-advanced', 'agentdb-vector-search', 'agentdb-memory-patterns', 'agentdb-learning'],
|
|
70
|
+
whenToUse: 'When you need to persist, search, or retrieve knowledge across sessions.',
|
|
71
|
+
},
|
|
72
|
+
'intelligence-learning': {
|
|
73
|
+
name: 'Intelligence & Learning',
|
|
74
|
+
description: 'Neural pattern training (SONA), RL loops, Flash Attention, EWC++ consolidation.',
|
|
75
|
+
tools: ['neural_train', 'neural_predict', 'neural_status', 'neural_patterns', 'neural_optimize'],
|
|
76
|
+
commands: ['neural train', 'neural predict', 'neural status', 'neural patterns', 'neural optimize'],
|
|
77
|
+
agents: ['sona-learning-optimizer', 'safla-neural'],
|
|
78
|
+
skills: ['reasoningbank-intelligence', 'reasoningbank-agentdb'],
|
|
79
|
+
whenToUse: 'When optimizing agent routing, training patterns from outcomes, or adaptive learning.',
|
|
80
|
+
},
|
|
81
|
+
'hooks-automation': {
|
|
82
|
+
name: 'Hooks & Automation',
|
|
83
|
+
description: '17 lifecycle hooks + 12 background workers for automated learning and coordination.',
|
|
84
|
+
tools: ['hooks_pre_task', 'hooks_post_task', 'hooks_pre_edit', 'hooks_post_edit', 'hooks_route', 'hooks_explain'],
|
|
85
|
+
commands: [
|
|
86
|
+
'hooks pre-task', 'hooks post-task', 'hooks pre-edit', 'hooks post-edit',
|
|
87
|
+
'hooks session-start', 'hooks session-end', 'hooks route', 'hooks explain',
|
|
88
|
+
'hooks pretrain', 'hooks build-agents', 'hooks intelligence', 'hooks worker',
|
|
89
|
+
'hooks coverage-gaps', 'hooks coverage-route', 'hooks coverage-suggest',
|
|
90
|
+
'hooks statusline', 'hooks progress',
|
|
91
|
+
],
|
|
92
|
+
agents: [],
|
|
93
|
+
skills: ['hooks-automation'],
|
|
94
|
+
whenToUse: 'When you need pre/post task hooks, background workers, coverage routing, or intelligence.',
|
|
95
|
+
},
|
|
96
|
+
'hive-mind': {
|
|
97
|
+
name: 'Hive Mind Consensus',
|
|
98
|
+
description: 'Queen-led Byzantine fault-tolerant distributed consensus with multiple strategies.',
|
|
99
|
+
tools: ['hive_mind_init', 'hive_mind_status', 'hive_mind_propose', 'hive_mind_vote', 'hive_mind_consensus', 'hive_mind_metrics'],
|
|
100
|
+
commands: ['hive-mind init', 'hive-mind status', 'hive-mind consensus', 'hive-mind sessions', 'hive-mind spawn', 'hive-mind stop'],
|
|
101
|
+
agents: ['byzantine-coordinator', 'raft-manager', 'gossip-coordinator', 'crdt-synchronizer', 'quorum-manager'],
|
|
102
|
+
skills: ['hive-mind-advanced'],
|
|
103
|
+
whenToUse: 'When multiple agents need to reach agreement on decisions using BFT, Raft, or CRDT.',
|
|
104
|
+
},
|
|
105
|
+
'security': {
|
|
106
|
+
name: 'Security & Compliance',
|
|
107
|
+
description: 'Security scanning, CVE remediation, input validation, claims-based authorization.',
|
|
108
|
+
tools: ['security_scan', 'security_audit', 'security_cve', 'security_threats', 'security_validate', 'security_report', 'claims_check', 'claims_grant', 'claims_revoke', 'claims_list'],
|
|
109
|
+
commands: ['security scan', 'security audit', 'security cve', 'security threats', 'claims check', 'claims grant'],
|
|
110
|
+
agents: ['v3-security-architect'],
|
|
111
|
+
skills: ['v3-security-overhaul'],
|
|
112
|
+
whenToUse: 'When auditing code for vulnerabilities, managing permissions, or security reviews.',
|
|
113
|
+
},
|
|
114
|
+
'performance': {
|
|
115
|
+
name: 'Performance & Profiling',
|
|
116
|
+
description: 'Benchmarking, profiling, metrics collection, and optimization recommendations.',
|
|
117
|
+
tools: ['performance_benchmark', 'performance_profile', 'performance_metrics', 'performance_optimize', 'performance_report'],
|
|
118
|
+
commands: ['performance benchmark', 'performance profile', 'performance metrics', 'performance optimize', 'performance report'],
|
|
119
|
+
agents: ['v3-performance-engineer'],
|
|
120
|
+
skills: ['v3-performance-optimization', 'performance-analysis'],
|
|
121
|
+
whenToUse: 'When measuring, profiling, or optimizing system performance.',
|
|
122
|
+
},
|
|
123
|
+
'github-integration': {
|
|
124
|
+
name: 'GitHub Integration',
|
|
125
|
+
description: 'PR management, code review, issue tracking, release automation, multi-repo coordination.',
|
|
126
|
+
tools: ['github_pr_manage', 'github_code_review', 'github_issue_track', 'github_repo_analyze', 'github_sync_coord', 'github_metrics'],
|
|
127
|
+
commands: [],
|
|
128
|
+
agents: ['pr-manager', 'code-review-swarm', 'issue-tracker', 'release-manager', 'repo-architect', 'workflow-automation', 'multi-repo-swarm', 'project-board-sync', 'swarm-pr', 'swarm-issue', 'sync-coordinator', 'github-modes', 'release-swarm'],
|
|
129
|
+
skills: ['github-release-management', 'github-workflow-automation', 'github-code-review', 'github-project-management', 'github-multi-repo'],
|
|
130
|
+
whenToUse: 'When working with GitHub repos, PRs, issues, releases, or CI/CD pipelines.',
|
|
131
|
+
},
|
|
132
|
+
'session-workflow': {
|
|
133
|
+
name: 'Session & Workflow',
|
|
134
|
+
description: 'Session state management, workflow execution, task lifecycle, and daemon scheduling.',
|
|
135
|
+
tools: ['session_start', 'session_end', 'session_restore', 'session_list', 'workflow_execute', 'workflow_create', 'task_create', 'task_assign', 'task_status'],
|
|
136
|
+
commands: ['session start', 'session end', 'session restore', 'workflow execute', 'workflow create', 'task create', 'daemon start', 'daemon stop'],
|
|
137
|
+
agents: [],
|
|
138
|
+
skills: [],
|
|
139
|
+
whenToUse: 'When managing long-running sessions, executing workflow templates, or scheduling tasks.',
|
|
140
|
+
},
|
|
141
|
+
'embeddings-vectors': {
|
|
142
|
+
name: 'Embeddings & Vector Search',
|
|
143
|
+
description: 'Vector embeddings with sql.js, HNSW indexing, hyperbolic embeddings, ONNX integration.',
|
|
144
|
+
tools: ['embeddings_embed', 'embeddings_batch', 'embeddings_search', 'embeddings_init'],
|
|
145
|
+
commands: ['embeddings embed', 'embeddings batch', 'embeddings search', 'embeddings init'],
|
|
146
|
+
agents: [],
|
|
147
|
+
skills: ['agentdb-vector-search', 'agentdb-optimization'],
|
|
148
|
+
whenToUse: 'When you need semantic search, document embedding, or vector similarity operations.',
|
|
149
|
+
},
|
|
150
|
+
'wasm-agents': {
|
|
151
|
+
name: 'WASM Sandboxed Agents',
|
|
152
|
+
description: 'Sandboxed AI agents running in WebAssembly with virtual filesystem, no OS access.',
|
|
153
|
+
tools: ['wasm_agent_create', 'wasm_agent_prompt', 'wasm_agent_tool', 'wasm_agent_list', 'wasm_agent_terminate', 'wasm_agent_files', 'wasm_agent_export', 'wasm_gallery_list', 'wasm_gallery_search', 'wasm_gallery_create'],
|
|
154
|
+
commands: [],
|
|
155
|
+
agents: [],
|
|
156
|
+
skills: [],
|
|
157
|
+
whenToUse: 'When you need sandboxed agent execution without OS access (safe, isolated environments).',
|
|
158
|
+
},
|
|
159
|
+
'ruvllm-inference': {
|
|
160
|
+
name: 'RuVLLM Inference',
|
|
161
|
+
description: 'WASM-based HNSW routing, SONA instant adaptation, MicroLoRA, chat formatting.',
|
|
162
|
+
tools: ['ruvllm_status', 'ruvllm_hnsw_create', 'ruvllm_sona_create', 'ruvllm_microlora_create', 'ruvllm_chat_format', 'ruvllm_kvcache_create'],
|
|
163
|
+
commands: [],
|
|
164
|
+
agents: [],
|
|
165
|
+
skills: [],
|
|
166
|
+
whenToUse: 'When you need WASM-native HNSW routing, SONA adaptation, or MicroLoRA fine-tuning.',
|
|
167
|
+
},
|
|
168
|
+
'code-analysis': {
|
|
169
|
+
name: 'Code Analysis & Diff',
|
|
170
|
+
description: 'AST analysis, diff classification, coverage routing, dependency graph analysis.',
|
|
171
|
+
tools: ['analyze_diff', 'analyze_coverage', 'analyze_graph'],
|
|
172
|
+
commands: [],
|
|
173
|
+
agents: ['code-analyzer'],
|
|
174
|
+
skills: ['verification-quality'],
|
|
175
|
+
whenToUse: 'When analyzing code quality, diffs, coverage gaps, or dependency graphs.',
|
|
176
|
+
},
|
|
177
|
+
'sparc-methodology': {
|
|
178
|
+
name: 'SPARC Methodology',
|
|
179
|
+
description: 'Specification, Pseudocode, Architecture, Refinement, Completion — structured development.',
|
|
180
|
+
tools: [],
|
|
181
|
+
commands: [],
|
|
182
|
+
agents: ['specification', 'pseudocode', 'architecture', 'refinement'],
|
|
183
|
+
skills: ['sparc-methodology'],
|
|
184
|
+
whenToUse: 'When following structured SPARC development methodology for new features.',
|
|
185
|
+
},
|
|
186
|
+
'config-system': {
|
|
187
|
+
name: 'Configuration & System',
|
|
188
|
+
description: 'Configuration management, provider setup, system diagnostics, shell completions.',
|
|
189
|
+
tools: ['config_get', 'config_set', 'config_list', 'config_provider'],
|
|
190
|
+
commands: ['config get', 'config set', 'config list', 'config provider', 'doctor', 'status', 'providers list', 'completions'],
|
|
191
|
+
agents: [],
|
|
192
|
+
skills: [],
|
|
193
|
+
whenToUse: 'When managing configuration, providers, or running diagnostics.',
|
|
194
|
+
},
|
|
195
|
+
};
|
|
196
|
+
const TASK_ROUTES = [
|
|
197
|
+
{ pattern: /\b(bug|fix|debug|error|issue|crash|broken)\b/i, areas: ['agent-management', 'hooks-automation'], workflow: 'bugfix' },
|
|
198
|
+
{ pattern: /\b(feature|implement|create|build|add)\b/i, areas: ['swarm-orchestration', 'agent-management', 'hooks-automation'], workflow: 'feature' },
|
|
199
|
+
{ pattern: /\b(refactor|restructure|reorganize|clean\s*up|modernize)\b/i, areas: ['swarm-orchestration', 'code-analysis'], workflow: 'refactor' },
|
|
200
|
+
{ pattern: /\b(test|coverage|tdd|spec|assert)\b/i, areas: ['agent-management', 'hooks-automation', 'code-analysis'], workflow: 'testing' },
|
|
201
|
+
{ pattern: /\b(security|vulnerab|cve|audit|threat|auth)\b/i, areas: ['security'], workflow: 'security' },
|
|
202
|
+
{ pattern: /\b(perf|benchmark|profil|slow|optimi|latency|speed)\b/i, areas: ['performance'], workflow: 'performance' },
|
|
203
|
+
{ pattern: /\b(memory|embed|vector|search|hnsw|semantic)\b/i, areas: ['memory-knowledge', 'embeddings-vectors'], workflow: 'memory' },
|
|
204
|
+
{ pattern: /\b(pr|pull\s*request|review|merge|branch)\b/i, areas: ['github-integration'], workflow: 'github-pr' },
|
|
205
|
+
{ pattern: /\b(release|deploy|publish|version|changelog)\b/i, areas: ['github-integration', 'session-workflow'], workflow: 'release' },
|
|
206
|
+
{ pattern: /\b(swarm|multi.agent|coordin|hive|consensus)\b/i, areas: ['swarm-orchestration', 'hive-mind'], workflow: 'swarm' },
|
|
207
|
+
{ pattern: /\b(learn|train|neural|pattern|sona|lora)\b/i, areas: ['intelligence-learning'], workflow: 'learning' },
|
|
208
|
+
{ pattern: /\b(wasm|sandbox|isolated|gallery)\b/i, areas: ['wasm-agents', 'ruvllm-inference'], workflow: 'wasm' },
|
|
209
|
+
{ pattern: /\b(hook|pre.task|post.task|worker|daemon)\b/i, areas: ['hooks-automation', 'session-workflow'], workflow: 'automation' },
|
|
210
|
+
{ pattern: /\b(config|setup|init|provider|doctor)\b/i, areas: ['config-system'], workflow: 'setup' },
|
|
211
|
+
];
|
|
212
|
+
const WORKFLOW_TEMPLATES = {
|
|
213
|
+
bugfix: {
|
|
214
|
+
steps: ['Research the bug (hooks route)', 'Reproduce with tests', 'Fix the code', 'Verify fix passes', 'Record outcome (hooks post-task)'],
|
|
215
|
+
agents: ['researcher', 'coder', 'tester'],
|
|
216
|
+
topology: 'hierarchical',
|
|
217
|
+
},
|
|
218
|
+
feature: {
|
|
219
|
+
steps: ['Design architecture', 'Implement solution', 'Write tests', 'Review code', 'Record patterns (hooks post-task)'],
|
|
220
|
+
agents: ['planner', 'coder', 'tester', 'reviewer'],
|
|
221
|
+
topology: 'hierarchical',
|
|
222
|
+
},
|
|
223
|
+
refactor: {
|
|
224
|
+
steps: ['Analyze code structure', 'Plan refactor approach', 'Implement changes', 'Verify no regressions'],
|
|
225
|
+
agents: ['code-analyzer', 'coder', 'reviewer'],
|
|
226
|
+
topology: 'hierarchical',
|
|
227
|
+
},
|
|
228
|
+
testing: {
|
|
229
|
+
steps: ['Analyze coverage gaps', 'Generate test plan', 'Write tests', 'Verify coverage improvement'],
|
|
230
|
+
agents: ['tester', 'coder'],
|
|
231
|
+
topology: 'hierarchical',
|
|
232
|
+
},
|
|
233
|
+
security: {
|
|
234
|
+
steps: ['Run security scan', 'Triage findings', 'Fix vulnerabilities', 'Verify remediations'],
|
|
235
|
+
agents: ['v3-security-architect', 'coder', 'reviewer'],
|
|
236
|
+
topology: 'hierarchical',
|
|
237
|
+
},
|
|
238
|
+
performance: {
|
|
239
|
+
steps: ['Run benchmarks', 'Profile bottlenecks', 'Implement optimizations', 'Re-benchmark'],
|
|
240
|
+
agents: ['v3-performance-engineer', 'coder'],
|
|
241
|
+
topology: 'hierarchical',
|
|
242
|
+
},
|
|
243
|
+
memory: {
|
|
244
|
+
steps: ['Initialize memory store', 'Store/retrieve patterns', 'Search with HNSW', 'Compact and optimize'],
|
|
245
|
+
agents: ['v3-memory-specialist'],
|
|
246
|
+
topology: 'hierarchical',
|
|
247
|
+
},
|
|
248
|
+
'github-pr': {
|
|
249
|
+
steps: ['Analyze changes', 'Run code review swarm', 'Check CI status', 'Merge or request changes'],
|
|
250
|
+
agents: ['pr-manager', 'code-review-swarm', 'reviewer'],
|
|
251
|
+
topology: 'hierarchical',
|
|
252
|
+
},
|
|
253
|
+
release: {
|
|
254
|
+
steps: ['Verify all tests pass', 'Generate changelog', 'Bump version', 'Publish packages', 'Create GitHub release'],
|
|
255
|
+
agents: ['release-manager', 'tester'],
|
|
256
|
+
topology: 'hierarchical',
|
|
257
|
+
},
|
|
258
|
+
swarm: {
|
|
259
|
+
steps: ['Initialize swarm topology', 'Spawn specialized agents', 'Coordinate via memory', 'Collect and synthesize results'],
|
|
260
|
+
agents: ['hierarchical-coordinator', 'coder', 'tester', 'reviewer'],
|
|
261
|
+
topology: 'hierarchical',
|
|
262
|
+
},
|
|
263
|
+
learning: {
|
|
264
|
+
steps: ['Pretrain on codebase', 'Record trajectories', 'Compute rewards', 'Distill learning', 'Consolidate (EWC++)'],
|
|
265
|
+
agents: ['sona-learning-optimizer'],
|
|
266
|
+
topology: 'hierarchical',
|
|
267
|
+
},
|
|
268
|
+
wasm: {
|
|
269
|
+
steps: ['Check WASM availability', 'Create sandboxed agent', 'Execute tools in sandbox', 'Export results'],
|
|
270
|
+
agents: [],
|
|
271
|
+
topology: 'hierarchical',
|
|
272
|
+
},
|
|
273
|
+
automation: {
|
|
274
|
+
steps: ['List available hooks/workers', 'Configure hook handlers', 'Dispatch workers', 'Monitor outcomes'],
|
|
275
|
+
agents: [],
|
|
276
|
+
topology: 'hierarchical',
|
|
277
|
+
},
|
|
278
|
+
setup: {
|
|
279
|
+
steps: ['Run doctor diagnostics', 'Configure providers', 'Initialize memory', 'Start daemon'],
|
|
280
|
+
agents: [],
|
|
281
|
+
topology: 'hierarchical',
|
|
282
|
+
},
|
|
283
|
+
};
|
|
284
|
+
// ── Dynamic Discovery ───────────────────────────────────────
|
|
285
|
+
function discoverAgents() {
|
|
286
|
+
const agentsDir = join(PROJECT_ROOT, '.claude/agents');
|
|
287
|
+
if (!existsSync(agentsDir))
|
|
288
|
+
return [];
|
|
289
|
+
const agents = [];
|
|
290
|
+
function walk(dir) {
|
|
291
|
+
try {
|
|
292
|
+
const entries = readdirSync(dir, { withFileTypes: true });
|
|
293
|
+
for (const entry of entries) {
|
|
294
|
+
if (entry.isDirectory()) {
|
|
295
|
+
walk(join(dir, entry.name));
|
|
296
|
+
}
|
|
297
|
+
else if (entry.name.endsWith('.md') && entry.name !== 'MIGRATION_SUMMARY.md') {
|
|
298
|
+
const content = readFileSync(join(dir, entry.name), 'utf-8');
|
|
299
|
+
const nameMatch = content.match(/^name:\s*(.+)$/m);
|
|
300
|
+
if (nameMatch)
|
|
301
|
+
agents.push(nameMatch[1].trim().replace(/^["']|["']$/g, ''));
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
catch { /* ignore */ }
|
|
306
|
+
}
|
|
307
|
+
walk(agentsDir);
|
|
308
|
+
return [...new Set(agents)].sort();
|
|
309
|
+
}
|
|
310
|
+
function discoverSkills() {
|
|
311
|
+
const skillsDir = join(PROJECT_ROOT, '.claude/skills');
|
|
312
|
+
if (!existsSync(skillsDir))
|
|
313
|
+
return [];
|
|
314
|
+
const skills = [];
|
|
315
|
+
try {
|
|
316
|
+
const entries = readdirSync(skillsDir, { withFileTypes: true });
|
|
317
|
+
for (const entry of entries) {
|
|
318
|
+
if (entry.isDirectory()) {
|
|
319
|
+
const skillFile = join(skillsDir, entry.name, 'SKILL.md');
|
|
320
|
+
if (existsSync(skillFile)) {
|
|
321
|
+
skills.push(entry.name);
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
catch { /* ignore */ }
|
|
327
|
+
return skills.sort();
|
|
328
|
+
}
|
|
329
|
+
// ── MCP Tool Definitions ────────────────────────────────────
|
|
330
|
+
const guidanceCapabilities = {
|
|
331
|
+
name: 'guidance_capabilities',
|
|
332
|
+
description: 'List all capability areas with their tools, commands, agents, and skills. Use this to discover what Ruflo can do.',
|
|
333
|
+
inputSchema: {
|
|
334
|
+
type: 'object',
|
|
335
|
+
properties: {
|
|
336
|
+
area: {
|
|
337
|
+
type: 'string',
|
|
338
|
+
description: 'Filter to a specific area (e.g., "swarm-orchestration", "memory-knowledge"). Omit to list all areas.',
|
|
339
|
+
},
|
|
340
|
+
format: {
|
|
341
|
+
type: 'string',
|
|
342
|
+
enum: ['summary', 'detailed'],
|
|
343
|
+
description: 'Output format. "summary" lists names and descriptions, "detailed" includes tools/agents/skills.',
|
|
344
|
+
},
|
|
345
|
+
},
|
|
346
|
+
},
|
|
347
|
+
handler: async (params) => {
|
|
348
|
+
const area = params.area;
|
|
349
|
+
const format = params.format || 'summary';
|
|
350
|
+
if (area) {
|
|
351
|
+
const cap = CAPABILITY_CATALOG[area];
|
|
352
|
+
if (!cap) {
|
|
353
|
+
const available = Object.keys(CAPABILITY_CATALOG).join(', ');
|
|
354
|
+
return { content: [{ type: 'text', text: JSON.stringify({ error: `Unknown area: ${area}`, available }, null, 2) }], isError: true };
|
|
355
|
+
}
|
|
356
|
+
return { content: [{ type: 'text', text: JSON.stringify(cap, null, 2) }] };
|
|
357
|
+
}
|
|
358
|
+
if (format === 'detailed') {
|
|
359
|
+
return { content: [{ type: 'text', text: JSON.stringify(CAPABILITY_CATALOG, null, 2) }] };
|
|
360
|
+
}
|
|
361
|
+
const summary = Object.entries(CAPABILITY_CATALOG).map(([key, val]) => ({
|
|
362
|
+
area: key,
|
|
363
|
+
name: val.name,
|
|
364
|
+
description: val.description,
|
|
365
|
+
toolCount: val.tools.length,
|
|
366
|
+
agentCount: val.agents.length,
|
|
367
|
+
skillCount: val.skills.length,
|
|
368
|
+
whenToUse: val.whenToUse,
|
|
369
|
+
}));
|
|
370
|
+
return { content: [{ type: 'text', text: JSON.stringify({ areas: summary, totalAreas: summary.length }, null, 2) }] };
|
|
371
|
+
},
|
|
372
|
+
};
|
|
373
|
+
const guidanceRecommend = {
|
|
374
|
+
name: 'guidance_recommend',
|
|
375
|
+
description: 'Given a task description, recommend which capability areas, tools, agents, and workflow to use.',
|
|
376
|
+
inputSchema: {
|
|
377
|
+
type: 'object',
|
|
378
|
+
properties: {
|
|
379
|
+
task: {
|
|
380
|
+
type: 'string',
|
|
381
|
+
description: 'Description of what you want to accomplish.',
|
|
382
|
+
},
|
|
383
|
+
},
|
|
384
|
+
required: ['task'],
|
|
385
|
+
},
|
|
386
|
+
handler: async (params) => {
|
|
387
|
+
const task = params.task;
|
|
388
|
+
const matches = [];
|
|
389
|
+
for (const route of TASK_ROUTES) {
|
|
390
|
+
if (route.pattern.test(task)) {
|
|
391
|
+
for (const areaKey of route.areas) {
|
|
392
|
+
const cap = CAPABILITY_CATALOG[areaKey];
|
|
393
|
+
if (cap) {
|
|
394
|
+
matches.push({ area: areaKey, capability: cap, workflow: route.workflow, score: 1 });
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
// Deduplicate by area, keeping highest score
|
|
400
|
+
const seen = new Map();
|
|
401
|
+
for (const m of matches) {
|
|
402
|
+
const existing = seen.get(m.area);
|
|
403
|
+
if (!existing || m.score > existing.score) {
|
|
404
|
+
seen.set(m.area, m);
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
const recommendations = [...seen.values()];
|
|
408
|
+
if (recommendations.length === 0) {
|
|
409
|
+
return {
|
|
410
|
+
content: [{
|
|
411
|
+
type: 'text',
|
|
412
|
+
text: JSON.stringify({
|
|
413
|
+
task,
|
|
414
|
+
message: 'No specific pattern matched. Here are general-purpose capabilities:',
|
|
415
|
+
suggestions: [
|
|
416
|
+
{ area: 'agent-management', reason: 'Spawn individual agents for targeted work' },
|
|
417
|
+
{ area: 'swarm-orchestration', reason: 'Use swarms for multi-file or complex tasks' },
|
|
418
|
+
{ area: 'hooks-automation', reason: 'Use hooks for task routing and learning' },
|
|
419
|
+
],
|
|
420
|
+
tip: 'Use guidance_capabilities for a full list of all capability areas.',
|
|
421
|
+
}, null, 2),
|
|
422
|
+
}],
|
|
423
|
+
};
|
|
424
|
+
}
|
|
425
|
+
const primaryWorkflow = recommendations[0]?.workflow;
|
|
426
|
+
const template = primaryWorkflow ? WORKFLOW_TEMPLATES[primaryWorkflow] : undefined;
|
|
427
|
+
return {
|
|
428
|
+
content: [{
|
|
429
|
+
type: 'text',
|
|
430
|
+
text: JSON.stringify({
|
|
431
|
+
task,
|
|
432
|
+
recommendations: recommendations.map(r => ({
|
|
433
|
+
area: r.area,
|
|
434
|
+
name: r.capability.name,
|
|
435
|
+
description: r.capability.description,
|
|
436
|
+
tools: r.capability.tools,
|
|
437
|
+
agents: r.capability.agents,
|
|
438
|
+
skills: r.capability.skills,
|
|
439
|
+
})),
|
|
440
|
+
workflow: template ? {
|
|
441
|
+
name: primaryWorkflow,
|
|
442
|
+
steps: template.steps,
|
|
443
|
+
agents: template.agents,
|
|
444
|
+
topology: template.topology,
|
|
445
|
+
} : undefined,
|
|
446
|
+
}, null, 2),
|
|
447
|
+
}],
|
|
448
|
+
};
|
|
449
|
+
},
|
|
450
|
+
};
|
|
451
|
+
const guidanceDiscover = {
|
|
452
|
+
name: 'guidance_discover',
|
|
453
|
+
description: 'Discover all available agents and skills from the .claude/ directory. Returns live filesystem data.',
|
|
454
|
+
inputSchema: {
|
|
455
|
+
type: 'object',
|
|
456
|
+
properties: {
|
|
457
|
+
type: {
|
|
458
|
+
type: 'string',
|
|
459
|
+
enum: ['agents', 'skills', 'all'],
|
|
460
|
+
description: 'What to discover. Default: all.',
|
|
461
|
+
},
|
|
462
|
+
},
|
|
463
|
+
},
|
|
464
|
+
handler: async (params) => {
|
|
465
|
+
const type = params.type || 'all';
|
|
466
|
+
const result = {};
|
|
467
|
+
if (type === 'agents' || type === 'all') {
|
|
468
|
+
const agents = discoverAgents();
|
|
469
|
+
result.agents = { count: agents.length, names: agents };
|
|
470
|
+
}
|
|
471
|
+
if (type === 'skills' || type === 'all') {
|
|
472
|
+
const skills = discoverSkills();
|
|
473
|
+
result.skills = { count: skills.length, names: skills };
|
|
474
|
+
}
|
|
475
|
+
return { content: [{ type: 'text', text: JSON.stringify(result, null, 2) }] };
|
|
476
|
+
},
|
|
477
|
+
};
|
|
478
|
+
const guidanceWorkflow = {
|
|
479
|
+
name: 'guidance_workflow',
|
|
480
|
+
description: 'Get a recommended workflow template for a task type. Includes steps, agents, and topology.',
|
|
481
|
+
inputSchema: {
|
|
482
|
+
type: 'object',
|
|
483
|
+
properties: {
|
|
484
|
+
type: {
|
|
485
|
+
type: 'string',
|
|
486
|
+
enum: Object.keys(WORKFLOW_TEMPLATES),
|
|
487
|
+
description: 'Workflow type. Options: ' + Object.keys(WORKFLOW_TEMPLATES).join(', '),
|
|
488
|
+
},
|
|
489
|
+
},
|
|
490
|
+
required: ['type'],
|
|
491
|
+
},
|
|
492
|
+
handler: async (params) => {
|
|
493
|
+
const type = params.type;
|
|
494
|
+
const template = WORKFLOW_TEMPLATES[type];
|
|
495
|
+
if (!template) {
|
|
496
|
+
return {
|
|
497
|
+
content: [{
|
|
498
|
+
type: 'text',
|
|
499
|
+
text: JSON.stringify({
|
|
500
|
+
error: `Unknown workflow: ${type}`,
|
|
501
|
+
available: Object.keys(WORKFLOW_TEMPLATES),
|
|
502
|
+
}, null, 2),
|
|
503
|
+
}],
|
|
504
|
+
isError: true,
|
|
505
|
+
};
|
|
506
|
+
}
|
|
507
|
+
return {
|
|
508
|
+
content: [{
|
|
509
|
+
type: 'text',
|
|
510
|
+
text: JSON.stringify({
|
|
511
|
+
workflow: type,
|
|
512
|
+
...template,
|
|
513
|
+
swarmConfig: {
|
|
514
|
+
topology: template.topology,
|
|
515
|
+
maxAgents: Math.max(template.agents.length + 1, 4),
|
|
516
|
+
strategy: 'specialized',
|
|
517
|
+
consensus: 'raft',
|
|
518
|
+
},
|
|
519
|
+
}, null, 2),
|
|
520
|
+
}],
|
|
521
|
+
};
|
|
522
|
+
},
|
|
523
|
+
};
|
|
524
|
+
const guidanceQuickRef = {
|
|
525
|
+
name: 'guidance_quickref',
|
|
526
|
+
description: 'Quick reference card for common operations. Returns the most useful commands for a given domain.',
|
|
527
|
+
inputSchema: {
|
|
528
|
+
type: 'object',
|
|
529
|
+
properties: {
|
|
530
|
+
domain: {
|
|
531
|
+
type: 'string',
|
|
532
|
+
enum: ['getting-started', 'daily-dev', 'swarm-ops', 'memory-ops', 'github-ops', 'diagnostics'],
|
|
533
|
+
description: 'Domain to get quick reference for.',
|
|
534
|
+
},
|
|
535
|
+
},
|
|
536
|
+
required: ['domain'],
|
|
537
|
+
},
|
|
538
|
+
handler: async (params) => {
|
|
539
|
+
const domain = params.domain;
|
|
540
|
+
const refs = {
|
|
541
|
+
'getting-started': {
|
|
542
|
+
title: 'Getting Started',
|
|
543
|
+
commands: [
|
|
544
|
+
{ cmd: 'npx ruflo@latest init --wizard', desc: 'Initialize project with interactive setup' },
|
|
545
|
+
{ cmd: 'npx ruflo@latest doctor --fix', desc: 'Run diagnostics and auto-fix issues' },
|
|
546
|
+
{ cmd: 'npx ruflo@latest daemon start', desc: 'Start background workers' },
|
|
547
|
+
{ cmd: 'npx ruflo@latest status', desc: 'Check system status' },
|
|
548
|
+
],
|
|
549
|
+
},
|
|
550
|
+
'daily-dev': {
|
|
551
|
+
title: 'Daily Development',
|
|
552
|
+
commands: [
|
|
553
|
+
{ cmd: 'npx ruflo@latest hooks pre-task --description "..."', desc: 'Get routing recommendation before task' },
|
|
554
|
+
{ cmd: 'npx ruflo@latest hooks post-task --task-id "..." --success true', desc: 'Record task outcome for learning' },
|
|
555
|
+
{ cmd: 'npx ruflo@latest hooks post-edit --file "..." --train-neural true', desc: 'Train patterns from edits' },
|
|
556
|
+
{ cmd: 'npx ruflo@latest memory search --query "..."', desc: 'Search memory for relevant patterns' },
|
|
557
|
+
{ cmd: 'npx ruflo@latest hooks route --task "..."', desc: 'Route task to optimal agent' },
|
|
558
|
+
],
|
|
559
|
+
},
|
|
560
|
+
'swarm-ops': {
|
|
561
|
+
title: 'Swarm Operations',
|
|
562
|
+
commands: [
|
|
563
|
+
{ cmd: 'npx ruflo@latest swarm init --topology hierarchical --max-agents 8', desc: 'Initialize anti-drift swarm' },
|
|
564
|
+
{ cmd: 'npx ruflo@latest swarm status', desc: 'Check swarm status' },
|
|
565
|
+
{ cmd: 'npx ruflo@latest agent spawn -t coder --name my-coder', desc: 'Spawn a specific agent' },
|
|
566
|
+
{ cmd: 'npx ruflo@latest hive-mind init --strategy byzantine', desc: 'Start hive-mind consensus' },
|
|
567
|
+
],
|
|
568
|
+
},
|
|
569
|
+
'memory-ops': {
|
|
570
|
+
title: 'Memory Operations',
|
|
571
|
+
commands: [
|
|
572
|
+
{ cmd: 'npx ruflo@latest memory init --force', desc: 'Initialize memory database' },
|
|
573
|
+
{ cmd: 'npx ruflo@latest memory store --key "k" --value "v" --namespace patterns', desc: 'Store a value' },
|
|
574
|
+
{ cmd: 'npx ruflo@latest memory search --query "auth patterns"', desc: 'Semantic vector search' },
|
|
575
|
+
{ cmd: 'npx ruflo@latest memory list --namespace patterns', desc: 'List entries in namespace' },
|
|
576
|
+
{ cmd: 'npx ruflo@latest memory retrieve --key "k" --namespace patterns', desc: 'Get a specific entry' },
|
|
577
|
+
],
|
|
578
|
+
},
|
|
579
|
+
'github-ops': {
|
|
580
|
+
title: 'GitHub Operations',
|
|
581
|
+
commands: [
|
|
582
|
+
{ cmd: 'Use pr-manager agent for PR lifecycle', desc: 'Spawn pr-manager for automated PR management' },
|
|
583
|
+
{ cmd: 'Use code-review-swarm agent for reviews', desc: 'Deploy multi-agent code review' },
|
|
584
|
+
{ cmd: 'Use release-manager agent for releases', desc: 'Automated release with changelog' },
|
|
585
|
+
{ cmd: 'Use issue-tracker agent for triage', desc: 'Intelligent issue management' },
|
|
586
|
+
],
|
|
587
|
+
},
|
|
588
|
+
diagnostics: {
|
|
589
|
+
title: 'Diagnostics & Troubleshooting',
|
|
590
|
+
commands: [
|
|
591
|
+
{ cmd: 'npx ruflo@latest doctor --fix', desc: 'Full system diagnostics with auto-fix' },
|
|
592
|
+
{ cmd: 'npx ruflo@latest status --watch', desc: 'Live system monitoring' },
|
|
593
|
+
{ cmd: 'npx ruflo@latest hooks worker status', desc: 'Background worker health' },
|
|
594
|
+
{ cmd: 'npx ruflo@latest performance benchmark --suite all', desc: 'Run all benchmarks' },
|
|
595
|
+
{ cmd: 'npx ruflo@latest hooks progress --detailed', desc: 'V3 implementation progress' },
|
|
596
|
+
],
|
|
597
|
+
},
|
|
598
|
+
};
|
|
599
|
+
const ref = refs[domain];
|
|
600
|
+
if (!ref) {
|
|
601
|
+
return { content: [{ type: 'text', text: JSON.stringify({ error: `Unknown domain: ${domain}`, available: Object.keys(refs) }, null, 2) }], isError: true };
|
|
602
|
+
}
|
|
603
|
+
return { content: [{ type: 'text', text: JSON.stringify(ref, null, 2) }] };
|
|
604
|
+
},
|
|
605
|
+
};
|
|
606
|
+
/**
|
|
607
|
+
* All guidance tools
|
|
608
|
+
*/
|
|
609
|
+
export const guidanceTools = [
|
|
610
|
+
guidanceCapabilities,
|
|
611
|
+
guidanceRecommend,
|
|
612
|
+
guidanceDiscover,
|
|
613
|
+
guidanceWorkflow,
|
|
614
|
+
guidanceQuickRef,
|
|
615
|
+
];
|
|
616
|
+
export default guidanceTools;
|
|
617
|
+
//# sourceMappingURL=guidance-tools.js.map
|