claude-flow-novice 1.5.12 → 1.5.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/.claude/agents/analysis/code-review/analyze-code-quality.md +160 -177
  2. package/.claude/agents/architecture/system-design/arch-system-design.md +118 -153
  3. package/.claude-flow-novice/dist/mcp/auth.js +347 -0
  4. package/.claude-flow-novice/dist/mcp/claude-code-wrapper.js +717 -0
  5. package/.claude-flow-novice/dist/mcp/claude-flow-tools.js +1365 -0
  6. package/.claude-flow-novice/dist/mcp/client.js +201 -0
  7. package/.claude-flow-novice/dist/mcp/index.js +192 -0
  8. package/.claude-flow-novice/dist/mcp/integrate-wrapper.js +85 -0
  9. package/.claude-flow-novice/dist/mcp/lifecycle-manager.js +348 -0
  10. package/.claude-flow-novice/dist/mcp/load-balancer.js +386 -0
  11. package/.claude-flow-novice/dist/mcp/mcp-config-manager.js +1362 -0
  12. package/.claude-flow-novice/dist/mcp/mcp-server-novice-simplified.js +583 -0
  13. package/.claude-flow-novice/dist/mcp/mcp-server-novice.js +723 -0
  14. package/.claude-flow-novice/dist/mcp/mcp-server-sdk.js +649 -0
  15. package/.claude-flow-novice/dist/mcp/mcp-server.js +2256 -0
  16. package/.claude-flow-novice/dist/mcp/orchestration-integration.js +800 -0
  17. package/.claude-flow-novice/dist/mcp/performance-monitor.js +489 -0
  18. package/.claude-flow-novice/dist/mcp/protocol-manager.js +376 -0
  19. package/.claude-flow-novice/dist/mcp/router.js +220 -0
  20. package/.claude-flow-novice/dist/mcp/ruv-swarm-tools.js +671 -0
  21. package/.claude-flow-novice/dist/mcp/ruv-swarm-wrapper.js +254 -0
  22. package/.claude-flow-novice/dist/mcp/server-with-wrapper.js +32 -0
  23. package/.claude-flow-novice/dist/mcp/server-wrapper-mode.js +26 -0
  24. package/.claude-flow-novice/dist/mcp/server.js +539 -0
  25. package/.claude-flow-novice/dist/mcp/session-manager.js +338 -0
  26. package/.claude-flow-novice/dist/mcp/sparc-modes.js +455 -0
  27. package/.claude-flow-novice/dist/mcp/swarm-tools.js +903 -0
  28. package/.claude-flow-novice/dist/mcp/tools.js +426 -0
  29. package/.claude-flow-novice/dist/src/cli/commands/swarm.js +23 -1
  30. package/.claude-flow-novice/dist/src/cli/commands/swarm.js.map +1 -1
  31. package/.claude-flow-novice/dist/src/cli/simple-commands/init/templates/CLAUDE.md +42 -102
  32. package/.claude-flow-novice/dist/src/config/web-portal-config.js +2 -1
  33. package/.claude-flow-novice/dist/src/config/web-portal-config.js.map +1 -1
  34. package/.claude-flow-novice/dist/src/coordination/swarm-coordinator-factory.js +36 -0
  35. package/.claude-flow-novice/dist/src/coordination/swarm-coordinator-factory.js.map +1 -0
  36. package/.claude-flow-novice/dist/src/preferences/user-preference-manager.js +371 -0
  37. package/.claude-flow-novice/dist/src/preferences/user-preference-manager.js.map +1 -0
  38. package/.claude-flow-novice/dist/src/validators/index.js +12 -0
  39. package/.claude-flow-novice/dist/src/validators/index.js.map +1 -0
  40. package/.claude-flow-novice/dist/src/validators/swarm-init-validator.js +261 -0
  41. package/.claude-flow-novice/dist/src/validators/swarm-init-validator.js.map +1 -0
  42. package/.claude-flow-novice/dist/src/validators/todowrite-batching-validator.js +204 -0
  43. package/.claude-flow-novice/dist/src/validators/todowrite-batching-validator.js.map +1 -0
  44. package/.claude-flow-novice/dist/src/validators/todowrite-integration.js +189 -0
  45. package/.claude-flow-novice/dist/src/validators/todowrite-integration.js.map +1 -0
  46. package/.claude-flow-novice/dist/src/web/portal-server.js +12 -5
  47. package/.claude-flow-novice/dist/src/web/portal-server.js.map +1 -1
  48. package/config/hooks/post-edit-pipeline.js +231 -10
  49. package/package.json +4 -2
  50. package/scripts/src/web/frontend/.claude-flow/metrics/agent-metrics.json +1 -0
  51. package/scripts/src/web/frontend/.claude-flow/metrics/performance.json +9 -0
  52. package/scripts/src/web/frontend/.claude-flow/metrics/task-metrics.json +10 -0
  53. package/src/cli/simple-commands/init/templates/CLAUDE.md +4 -1
@@ -0,0 +1,717 @@
1
+ #!/usr/bin/env node
2
+ import { Server } from '@modelcontextprotocol/sdk/server/index.js';
3
+ import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
4
+ import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js';
5
+ import * as path from 'path';
6
+ import { fileURLToPath } from 'url';
7
+ import { loadSparcModes } from './sparc-modes.js';
8
+ // Simple ID generation
9
+ function generateId() {
10
+ return `${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
11
+ }
12
+ const __filename = fileURLToPath(import.meta.url);
13
+ const __dirname = path.dirname(__filename);
14
+ export class ClaudeCodeMCPWrapper {
15
+ server;
16
+ sparcModes = new Map();
17
+ swarmExecutions = new Map();
18
+ claudeCodeMCP;
19
+ constructor(){
20
+ this.server = new Server({
21
+ name: 'claude-flow-wrapper',
22
+ version: '1.0.0'
23
+ }, {
24
+ capabilities: {
25
+ tools: {}
26
+ }
27
+ });
28
+ this.setupHandlers();
29
+ this.loadSparcModes();
30
+ }
31
+ async loadSparcModes() {
32
+ try {
33
+ const modes = await loadSparcModes();
34
+ modes.forEach((mode)=>{
35
+ this.sparcModes.set(mode.name, mode);
36
+ });
37
+ } catch (error) {
38
+ console.error('Failed to load SPARC modes:', error);
39
+ }
40
+ }
41
+ setupHandlers() {
42
+ this.server.setRequestHandler(ListToolsRequestSchema, async ()=>({
43
+ tools: await this.getTools()
44
+ }));
45
+ this.server.setRequestHandler(CallToolRequestSchema, async (request)=>this.handleToolCall(request.params.name, request.params.arguments || {}));
46
+ }
47
+ async getTools() {
48
+ const tools = [];
49
+ // Add SPARC mode tools
50
+ for (const [name, mode] of this.sparcModes){
51
+ tools.push({
52
+ name: `sparc_${name}`,
53
+ description: `Execute SPARC ${name} mode: ${mode.description}`,
54
+ inputSchema: {
55
+ type: 'object',
56
+ properties: {
57
+ task: {
58
+ type: 'string',
59
+ description: 'The task description for the SPARC mode to execute'
60
+ },
61
+ context: {
62
+ type: 'object',
63
+ description: 'Optional context or parameters for the task',
64
+ properties: {
65
+ memoryKey: {
66
+ type: 'string',
67
+ description: 'Memory key to store results'
68
+ },
69
+ parallel: {
70
+ type: 'boolean',
71
+ description: 'Enable parallel execution'
72
+ }
73
+ }
74
+ }
75
+ },
76
+ required: [
77
+ 'task'
78
+ ]
79
+ }
80
+ });
81
+ }
82
+ // Add meta tools
83
+ tools.push({
84
+ name: 'sparc_list',
85
+ description: 'List all available SPARC modes',
86
+ inputSchema: {
87
+ type: 'object',
88
+ properties: {
89
+ verbose: {
90
+ type: 'boolean',
91
+ description: 'Include detailed information'
92
+ }
93
+ }
94
+ }
95
+ }, {
96
+ name: 'sparc_swarm',
97
+ description: 'Coordinate multiple SPARC agents in a swarm',
98
+ inputSchema: {
99
+ type: 'object',
100
+ properties: {
101
+ objective: {
102
+ type: 'string',
103
+ description: 'The swarm objective'
104
+ },
105
+ strategy: {
106
+ type: 'string',
107
+ enum: [
108
+ 'research',
109
+ 'development',
110
+ 'analysis',
111
+ 'testing',
112
+ 'optimization',
113
+ 'maintenance'
114
+ ],
115
+ description: 'Swarm execution strategy'
116
+ },
117
+ mode: {
118
+ type: 'string',
119
+ enum: [
120
+ 'centralized',
121
+ 'distributed',
122
+ 'hierarchical',
123
+ 'mesh',
124
+ 'hybrid'
125
+ ],
126
+ description: 'Coordination mode'
127
+ },
128
+ maxAgents: {
129
+ type: 'number',
130
+ description: 'Maximum number of agents',
131
+ default: 5
132
+ }
133
+ },
134
+ required: [
135
+ 'objective',
136
+ 'strategy'
137
+ ]
138
+ }
139
+ }, {
140
+ name: 'sparc_swarm_status',
141
+ description: 'Check status of running swarms and list created files',
142
+ inputSchema: {
143
+ type: 'object',
144
+ properties: {
145
+ swarmId: {
146
+ type: 'string',
147
+ description: 'Optional swarm ID to check specific swarm'
148
+ }
149
+ }
150
+ }
151
+ });
152
+ return tools;
153
+ }
154
+ async handleToolCall(toolName, args) {
155
+ try {
156
+ if (toolName.startsWith('sparc_')) {
157
+ return await this.handleSparcTool(toolName, args);
158
+ }
159
+ // Pass through to Claude Code MCP
160
+ return this.forwardToClaudeCode(toolName, args);
161
+ } catch (error) {
162
+ return {
163
+ content: [
164
+ {
165
+ type: 'text',
166
+ text: `Error: ${error instanceof Error ? error.message : String(error)}`
167
+ }
168
+ ],
169
+ isError: true
170
+ };
171
+ }
172
+ }
173
+ async handleSparcTool(toolName, args) {
174
+ const mode = toolName.replace('sparc_', '');
175
+ // Handle special tools
176
+ if (mode === 'list') {
177
+ return this.listModes(args.verbose);
178
+ }
179
+ if (mode === 'swarm') {
180
+ return this.handleSwarm(args);
181
+ }
182
+ if (mode === 'swarm_status') {
183
+ return this.getSwarmStatus(args.swarmId);
184
+ }
185
+ // Standard SPARC mode execution
186
+ const sparcMode = this.sparcModes.get(mode);
187
+ if (!sparcMode) {
188
+ throw new Error(`Unknown SPARC mode: ${mode}`);
189
+ }
190
+ // Execute the SPARC mode directly
191
+ try {
192
+ // Import the execution function dynamically to avoid circular dependencies
193
+ // const { executeSparcMode } = await import('../cli/mcp-stdio-server.js');
194
+ // TODO: Implement proper SPARC mode execution or fix import path
195
+ const executeSparcMode1 = (mode, task, tools, context)=>{
196
+ throw new Error('SPARC mode execution not yet implemented in wrapper');
197
+ };
198
+ const result = await executeSparcMode1(mode, args.task, sparcMode.tools || [], args.context || {});
199
+ return {
200
+ content: [
201
+ {
202
+ type: 'text',
203
+ text: result.output
204
+ }
205
+ ]
206
+ };
207
+ } catch (error) {
208
+ return {
209
+ content: [
210
+ {
211
+ type: 'text',
212
+ text: `Error executing SPARC ${mode}: ${error instanceof Error ? error.message : String(error)}`
213
+ }
214
+ ],
215
+ isError: true
216
+ };
217
+ }
218
+ }
219
+ buildEnhancedPrompt(mode, task, context) {
220
+ const parts = [];
221
+ // Add SPARC mode header
222
+ parts.push(`SPARC: ${mode.name}\n`);
223
+ parts.push(`## Mode Description\n${mode.description}\n`);
224
+ // Add available tools
225
+ if (mode.tools && mode.tools.length > 0) {
226
+ parts.push(`## Available Tools`);
227
+ mode.tools.forEach((tool)=>{
228
+ parts.push(`- **${tool}**: ${this.getToolDescription(tool)}`);
229
+ });
230
+ parts.push('');
231
+ }
232
+ // Add usage pattern
233
+ if (mode.usagePattern) {
234
+ parts.push(`## Usage Pattern\n\`\`\`javascript\n${mode.usagePattern}\n\`\`\`\n`);
235
+ }
236
+ // Add best practices
237
+ if (mode.bestPractices) {
238
+ parts.push(`## Best Practices`);
239
+ mode.bestPractices.forEach((practice)=>{
240
+ parts.push(`- ${practice}`);
241
+ });
242
+ parts.push('');
243
+ }
244
+ // Add integration capabilities
245
+ if (mode.integrationCapabilities) {
246
+ parts.push(`## Integration Capabilities\nThis mode integrates with:`);
247
+ mode.integrationCapabilities.forEach((capability)=>{
248
+ parts.push(`- ${capability}`);
249
+ });
250
+ parts.push('');
251
+ }
252
+ // Add instructions
253
+ if (mode.instructions) {
254
+ parts.push(`## Instructions\n${mode.instructions}\n`);
255
+ }
256
+ // Add the actual task
257
+ parts.push(`## TASK: ${task}\n`);
258
+ // Add SPARC methodology
259
+ parts.push(this.getSparcMethodology(mode.name, task, context));
260
+ // Add context if provided
261
+ if (context) {
262
+ if (context.memoryKey) {
263
+ parts.push(`**Memory Key:** \`${context.memoryKey}\``);
264
+ }
265
+ if (context.parallel) {
266
+ parts.push(`**Parallel Execution:** Enabled`);
267
+ }
268
+ if (context.workingDirectory) {
269
+ parts.push(`**Working Directory:** ${context.workingDirectory}`);
270
+ }
271
+ }
272
+ return parts.join('\n');
273
+ }
274
+ getToolDescription(tool) {
275
+ const descriptions = {
276
+ TodoWrite: 'Create and manage task coordination',
277
+ TodoRead: 'Monitor task progress and status',
278
+ Task: 'Spawn and manage specialized agents',
279
+ Memory: 'Store and retrieve coordination data',
280
+ Bash: 'Execute system commands',
281
+ Read: 'Read file contents',
282
+ Write: 'Write files',
283
+ Edit: 'Edit existing files',
284
+ MultiEdit: 'Make multiple edits to a file',
285
+ Glob: 'Search for files by pattern',
286
+ Grep: 'Search file contents',
287
+ WebSearch: 'Search the web',
288
+ WebFetch: 'Fetch web content'
289
+ };
290
+ return descriptions[tool] || `${tool} tool`;
291
+ }
292
+ getSparcMethodology(mode, task, context) {
293
+ return `
294
+ # đŸŽ¯ SPARC METHODOLOGY EXECUTION FRAMEWORK
295
+
296
+ You are operating in **SPARC ${mode} mode**. Follow the SPARC Workflow precisely:
297
+
298
+ ## SPARC Workflow Steps
299
+
300
+ ### 1ī¸âƒŖ SPECIFICATION - Clarify goals, scope, constraints
301
+ **Your Task:** ${task}
302
+
303
+ **Analysis Required:**
304
+ - Break down into clear, measurable objectives
305
+ - Identify all requirements and constraints
306
+ - Define acceptance criteria
307
+ - Never hard-code environment variables
308
+
309
+ **Use TodoWrite to capture specifications:**
310
+ \`\`\`javascript
311
+ TodoWrite([
312
+ {
313
+ id: "specification",
314
+ content: "Clarify goals, scope, and constraints for: ${task}",
315
+ status: "pending",
316
+ priority: "high"
317
+ },
318
+ {
319
+ id: "acceptance_criteria",
320
+ content: "Define clear acceptance criteria and success metrics",
321
+ status: "pending",
322
+ priority: "high"
323
+ }
324
+ ]);
325
+ \`\`\`
326
+
327
+ ### 2ī¸âƒŖ PSEUDOCODE - High-level logic with TDD anchors
328
+ **Design Approach:**
329
+ - Identify core functions and data structures
330
+ - Create TDD test anchors before implementation
331
+ - Map out component interactions
332
+
333
+ ### 3ī¸âƒŖ ARCHITECTURE - Design extensible systems
334
+ **Architecture Requirements:**
335
+ - Clear service boundaries
336
+ - Define interfaces between components
337
+ - Design for extensibility and maintainability
338
+ - Mode-specific architecture: ${this.getModeSpecificArchitecture(mode)}
339
+
340
+ ### 4ī¸âƒŖ REFINEMENT - Iterate with TDD and security
341
+ **Refinement Process:**
342
+ - TDD implementation cycles
343
+ - Security vulnerability checks (injection, XSS, CSRF)
344
+ - Performance optimization
345
+ - Code review and refactoring
346
+ - All files must be ≤ 500 lines
347
+
348
+ ### 5ī¸âƒŖ COMPLETION - Integrate and verify
349
+ **Completion Checklist:**
350
+ - [ ] All acceptance criteria met
351
+ - [ ] Tests passing (comprehensive test suite)
352
+ - [ ] Security review completed
353
+ - [ ] Documentation updated
354
+ - [ ] Results stored in Memory: \`sparc_${mode}_${Date.now()}\`
355
+ - [ ] No hard-coded secrets or env vars
356
+ - [ ] Proper error handling in all code paths
357
+
358
+ ## 🚀 Execution Configuration
359
+
360
+ **Mode:** ${mode}
361
+ **Strategy:** ${this.getModeStrategy(mode)}
362
+ **Memory Key:** \`sparc_${mode}_${Date.now()}\`
363
+ **Batch Operations:** ${context?.parallel ? 'Enabled' : 'Standard operations'}
364
+ **Primary Tools:** ${this.sparcModes.get(mode)?.tools?.join(', ') || 'Standard tools'}
365
+
366
+ ## 📋 Must Block (Non-negotiable)
367
+ - Every file ≤ 500 lines
368
+ - No hard-coded secrets or env vars
369
+ - All user inputs validated
370
+ - No security vulnerabilities
371
+ - Proper error handling in all paths
372
+ - Each subtask ends with completion check
373
+
374
+ ## đŸŽ¯ IMMEDIATE ACTION REQUIRED
375
+
376
+ **START NOW with SPARC Step 1 - SPECIFICATION:**
377
+
378
+ 1. Create comprehensive TodoWrite task breakdown following SPARC workflow
379
+ 2. Set "specification" task to "in_progress"
380
+ 3. Analyze requirements and define acceptance criteria
381
+ 4. Store initial analysis in Memory: \`sparc_${mode}_${Date.now()}\`
382
+
383
+ **Remember:** You're in **${mode}** mode. Follow the SPARC workflow systematically:
384
+ Specification → Pseudocode → Architecture → Refinement → Completion
385
+
386
+ Use the appropriate tools for each phase and maintain progress in TodoWrite.`;
387
+ }
388
+ getModeSpecificArchitecture(mode) {
389
+ const architectures = {
390
+ orchestrator: 'Design for parallel agent coordination with clear task boundaries',
391
+ coder: 'Focus on clean code architecture with proper abstractions',
392
+ researcher: 'Structure for data collection and analysis pipelines',
393
+ tdd: 'Test-first design with comprehensive test coverage',
394
+ architect: 'System-wide design patterns and component interactions',
395
+ reviewer: 'Code quality gates and review checkpoints',
396
+ debugger: 'Diagnostic and monitoring integration points',
397
+ tester: 'Test framework integration and coverage analysis'
398
+ };
399
+ return architectures[mode] || 'Design for the specific mode requirements';
400
+ }
401
+ getModeStrategy(mode) {
402
+ const strategies = {
403
+ orchestrator: 'Parallel coordination',
404
+ coder: 'Iterative development',
405
+ researcher: 'Deep analysis',
406
+ tdd: 'Test-driven cycles',
407
+ architect: 'System design',
408
+ reviewer: 'Quality assurance',
409
+ debugger: 'Systematic debugging',
410
+ tester: 'Comprehensive validation'
411
+ };
412
+ return strategies[mode] || 'Mode-specific execution';
413
+ }
414
+ listModes(verbose) {
415
+ const modes = Array.from(this.sparcModes.values());
416
+ if (verbose) {
417
+ const content = modes.map((mode)=>({
418
+ name: mode.name,
419
+ description: mode.description,
420
+ tools: mode.tools,
421
+ bestPractices: mode.bestPractices
422
+ }));
423
+ return {
424
+ content: [
425
+ {
426
+ type: 'text',
427
+ text: JSON.stringify(content, null, 2)
428
+ }
429
+ ]
430
+ };
431
+ }
432
+ const list = modes.map((m)=>`- **${m.name}**: ${m.description}`).join('\n');
433
+ return {
434
+ content: [
435
+ {
436
+ type: 'text',
437
+ text: `Available SPARC modes:\n\n${list}`
438
+ }
439
+ ]
440
+ };
441
+ }
442
+ async handleSwarm(args) {
443
+ const { objective, strategy, mode = 'distributed', maxAgents = 5 } = args;
444
+ const swarmId = generateId();
445
+ // Plan swarm agents
446
+ const agents = this.planSwarmAgents(objective, strategy, maxAgents);
447
+ // Create swarm execution record
448
+ const execution = {
449
+ id: swarmId,
450
+ objective,
451
+ strategy,
452
+ mode,
453
+ agents,
454
+ startTime: new Date(),
455
+ status: 'active'
456
+ };
457
+ this.swarmExecutions.set(swarmId, execution);
458
+ // Launch agents based on coordination mode
459
+ if (mode === 'distributed' || mode === 'mesh') {
460
+ // Parallel execution
461
+ await Promise.all(agents.map((agent)=>this.launchSwarmAgent(agent, execution)));
462
+ } else {
463
+ // Sequential execution
464
+ for (const agent of agents){
465
+ await this.launchSwarmAgent(agent, execution);
466
+ }
467
+ }
468
+ execution.status = 'completed';
469
+ execution.endTime = new Date();
470
+ return {
471
+ content: [
472
+ {
473
+ type: 'text',
474
+ text: JSON.stringify({
475
+ swarmId,
476
+ objective,
477
+ strategy,
478
+ mode,
479
+ agentCount: agents.length,
480
+ status: 'launched',
481
+ message: 'Swarm coordination initiated'
482
+ }, null, 2)
483
+ }
484
+ ]
485
+ };
486
+ }
487
+ planSwarmAgents(objective, strategy, maxAgents) {
488
+ const agents = [];
489
+ // Strategy-based agent planning
490
+ switch(strategy){
491
+ case 'research':
492
+ agents.push({
493
+ id: generateId(),
494
+ mode: 'researcher',
495
+ task: `Research: ${objective}`,
496
+ status: 'pending'
497
+ }, {
498
+ id: generateId(),
499
+ mode: 'analyst',
500
+ task: `Analyze findings for: ${objective}`,
501
+ status: 'pending'
502
+ }, {
503
+ id: generateId(),
504
+ mode: 'documenter',
505
+ task: `Document research results: ${objective}`,
506
+ status: 'pending'
507
+ });
508
+ break;
509
+ case 'development':
510
+ agents.push({
511
+ id: generateId(),
512
+ mode: 'architect',
513
+ task: `Design architecture: ${objective}`,
514
+ status: 'pending'
515
+ }, {
516
+ id: generateId(),
517
+ mode: 'coder',
518
+ task: `Implement: ${objective}`,
519
+ status: 'pending'
520
+ }, {
521
+ id: generateId(),
522
+ mode: 'tester',
523
+ task: `Test implementation: ${objective}`,
524
+ status: 'pending'
525
+ }, {
526
+ id: generateId(),
527
+ mode: 'reviewer',
528
+ task: `Review code: ${objective}`,
529
+ status: 'pending'
530
+ });
531
+ break;
532
+ case 'analysis':
533
+ agents.push({
534
+ id: generateId(),
535
+ mode: 'analyst',
536
+ task: `Analyze: ${objective}`,
537
+ status: 'pending'
538
+ }, {
539
+ id: generateId(),
540
+ mode: 'optimizer',
541
+ task: `Optimize based on analysis: ${objective}`,
542
+ status: 'pending'
543
+ });
544
+ break;
545
+ case 'testing':
546
+ agents.push({
547
+ id: generateId(),
548
+ mode: 'tester',
549
+ task: `Create test suite: ${objective}`,
550
+ status: 'pending'
551
+ }, {
552
+ id: generateId(),
553
+ mode: 'debugger',
554
+ task: `Debug issues: ${objective}`,
555
+ status: 'pending'
556
+ });
557
+ break;
558
+ case 'optimization':
559
+ agents.push({
560
+ id: generateId(),
561
+ mode: 'analyst',
562
+ task: `Performance analysis: ${objective}`,
563
+ status: 'pending'
564
+ }, {
565
+ id: generateId(),
566
+ mode: 'optimizer',
567
+ task: `Optimize: ${objective}`,
568
+ status: 'pending'
569
+ });
570
+ break;
571
+ case 'maintenance':
572
+ agents.push({
573
+ id: generateId(),
574
+ mode: 'reviewer',
575
+ task: `Code review: ${objective}`,
576
+ status: 'pending'
577
+ }, {
578
+ id: generateId(),
579
+ mode: 'debugger',
580
+ task: `Fix issues: ${objective}`,
581
+ status: 'pending'
582
+ }, {
583
+ id: generateId(),
584
+ mode: 'documenter',
585
+ task: `Update documentation: ${objective}`,
586
+ status: 'pending'
587
+ });
588
+ break;
589
+ }
590
+ // Limit to maxAgents
591
+ return agents.slice(0, maxAgents);
592
+ }
593
+ async launchSwarmAgent(agent, execution) {
594
+ agent.status = 'active';
595
+ try {
596
+ // Use the SPARC mode handler
597
+ const result = await this.handleSparcTool(`sparc_${agent.mode}`, {
598
+ task: agent.task,
599
+ context: {
600
+ memoryKey: `swarm_${execution.id}_${agent.id}`,
601
+ parallel: execution.mode === 'distributed'
602
+ }
603
+ });
604
+ agent.status = 'completed';
605
+ agent.result = result;
606
+ } catch (error) {
607
+ agent.status = 'failed';
608
+ agent.result = {
609
+ error: error instanceof Error ? error.message : String(error)
610
+ };
611
+ }
612
+ }
613
+ getSwarmStatus(swarmId) {
614
+ if (swarmId) {
615
+ const execution = this.swarmExecutions.get(swarmId);
616
+ if (!execution) {
617
+ return {
618
+ content: [
619
+ {
620
+ type: 'text',
621
+ text: `No swarm found with ID: ${swarmId}`
622
+ }
623
+ ]
624
+ };
625
+ }
626
+ return {
627
+ content: [
628
+ {
629
+ type: 'text',
630
+ text: JSON.stringify(execution, null, 2)
631
+ }
632
+ ]
633
+ };
634
+ }
635
+ // Return all swarms
636
+ const swarms = Array.from(this.swarmExecutions.values()).map((e)=>({
637
+ id: e.id,
638
+ objective: e.objective,
639
+ status: e.status,
640
+ agentCount: e.agents.length,
641
+ startTime: e.startTime,
642
+ endTime: e.endTime
643
+ }));
644
+ return {
645
+ content: [
646
+ {
647
+ type: 'text',
648
+ text: JSON.stringify(swarms, null, 2)
649
+ }
650
+ ]
651
+ };
652
+ }
653
+ async forwardToClaudeCode(toolName, args) {
654
+ // For SPARC tools that were already handled, this shouldn't be called
655
+ // For other tools, we execute them using the existing logic
656
+ if (toolName === 'Task') {
657
+ // This is a SPARC task that's been enhanced with prompts
658
+ // Extract the mode from the description if possible
659
+ const modeMatch = args.description?.match(/SPARC (\w+)/);
660
+ if (modeMatch) {
661
+ const modeName = modeMatch[1];
662
+ const mode = this.sparcModes.get(modeName);
663
+ if (mode) {
664
+ // Execute using the existing SPARC execution logic
665
+ try {
666
+ const result = await executeSparcMode(modeName, args.prompt || '', mode.tools || [], {});
667
+ return {
668
+ content: [
669
+ {
670
+ type: 'text',
671
+ text: result.output
672
+ }
673
+ ]
674
+ };
675
+ } catch (error) {
676
+ return {
677
+ content: [
678
+ {
679
+ type: 'text',
680
+ text: `Error executing SPARC ${modeName}: ${error instanceof Error ? error.message : String(error)}`
681
+ }
682
+ ],
683
+ isError: true
684
+ };
685
+ }
686
+ }
687
+ }
688
+ }
689
+ // For non-SPARC tools, return a message
690
+ return {
691
+ content: [
692
+ {
693
+ type: 'text',
694
+ text: `Tool ${toolName} is not available in this MCP server.`
695
+ }
696
+ ],
697
+ isError: true
698
+ };
699
+ }
700
+ async run() {
701
+ const transport = new StdioServerTransport();
702
+ // Log startup message
703
+ console.error('🚀 Claude-Flow MCP Server (Wrapper Mode)');
704
+ console.error('đŸ“Ļ Using Claude Code MCP pass-through with SPARC prompt injection');
705
+ console.error('🔧 All SPARC tools available with enhanced AI capabilities');
706
+ console.error('â„šī¸ To use legacy mode, set CLAUDE_FLOW_LEGACY_MCP=true');
707
+ console.error('');
708
+ await this.server.connect(transport);
709
+ }
710
+ }
711
+ // Run the server if this is the main module
712
+ if (import.meta.url === `file://${process.argv[1]}`) {
713
+ const wrapper = new ClaudeCodeMCPWrapper();
714
+ wrapper.run().catch(console.error);
715
+ }
716
+
717
+ //# sourceMappingURL=claude-code-wrapper.js.map