myaidev-method 0.2.2 → 0.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/mcp/mcp-config.json +93 -10
- package/.claude/mcp/sparc-orchestrator-server.js +607 -0
- package/DEV_WORKFLOW_GUIDE.md +1353 -0
- package/MCP_INTEGRATION.md +373 -0
- package/README.md +378 -21
- package/bin/cli.js +39 -1
- package/dist/mcp/mcp-config.json +93 -10
- package/dist/mcp/sparc-orchestrator-server.js +607 -0
- package/package.json +22 -3
- package/src/lib/dev-workflow/agent-types.js +163 -0
- package/src/lib/dev-workflow/sparc-workflow.js +302 -0
- package/src/lib/dev-workflow/task-manager.js +313 -0
- package/src/scripts/dev-architect.js +99 -0
- package/src/scripts/dev-code.js +106 -0
- package/src/scripts/dev-docs.js +122 -0
- package/src/scripts/dev-review.js +117 -0
- package/src/scripts/dev-test.js +115 -0
- package/src/scripts/sparc-workflow.js +187 -0
- package/src/templates/claude/agents/dev-architect.md +436 -0
- package/src/templates/claude/agents/dev-coder.md +749 -0
- package/src/templates/claude/agents/dev-documenter.md +939 -0
- package/src/templates/claude/agents/dev-reviewer.md +1152 -0
- package/src/templates/claude/agents/dev-tester.md +600 -0
- package/src/templates/claude/commands/myai-dev-architect.md +80 -0
- package/src/templates/claude/commands/myai-dev-code.md +93 -0
- package/src/templates/claude/commands/myai-dev-docs.md +94 -0
- package/src/templates/claude/commands/myai-dev-review.md +96 -0
- package/src/templates/claude/commands/myai-dev-test.md +95 -0
- package/src/templates/claude/commands/myai-sparc-workflow.md +196 -0
|
@@ -0,0 +1,607 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* SPARC Orchestrator MCP Server
|
|
5
|
+
* Lightweight task orchestration for the 5-phase SPARC development workflow
|
|
6
|
+
* Part of MyAIDev Method - simplified from cflow's complex swarm system
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
|
10
|
+
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
11
|
+
import {
|
|
12
|
+
CallToolRequestSchema,
|
|
13
|
+
ListToolsRequestSchema,
|
|
14
|
+
} from '@modelcontextprotocol/sdk/types.js';
|
|
15
|
+
import { promises as fs } from 'fs';
|
|
16
|
+
import { join } from 'path';
|
|
17
|
+
import { homedir } from 'os';
|
|
18
|
+
|
|
19
|
+
class SparcOrchestratorServer {
|
|
20
|
+
constructor() {
|
|
21
|
+
this.server = new Server(
|
|
22
|
+
{
|
|
23
|
+
name: 'sparc-orchestrator',
|
|
24
|
+
version: '1.0.0',
|
|
25
|
+
},
|
|
26
|
+
{
|
|
27
|
+
capabilities: {
|
|
28
|
+
tools: {},
|
|
29
|
+
},
|
|
30
|
+
}
|
|
31
|
+
);
|
|
32
|
+
|
|
33
|
+
// SPARC workflow state directory
|
|
34
|
+
this.workflowDir = join(homedir(), '.myaidev-method', 'workflows');
|
|
35
|
+
this.tasksDir = join(homedir(), '.myaidev-method', 'tasks');
|
|
36
|
+
|
|
37
|
+
this.setupToolHandlers();
|
|
38
|
+
this.initializeDirectories();
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async initializeDirectories() {
|
|
42
|
+
try {
|
|
43
|
+
await fs.mkdir(this.workflowDir, { recursive: true });
|
|
44
|
+
await fs.mkdir(this.tasksDir, { recursive: true });
|
|
45
|
+
} catch (error) {
|
|
46
|
+
console.error('Failed to initialize directories:', error);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
setupToolHandlers() {
|
|
51
|
+
// List available tools
|
|
52
|
+
this.server.setRequestHandler(ListToolsRequestSchema, async () => ({
|
|
53
|
+
tools: [
|
|
54
|
+
{
|
|
55
|
+
name: 'sparc_orchestrate',
|
|
56
|
+
description: 'Orchestrate the complete 5-phase SPARC development workflow with intelligent task routing',
|
|
57
|
+
inputSchema: {
|
|
58
|
+
type: 'object',
|
|
59
|
+
properties: {
|
|
60
|
+
task: {
|
|
61
|
+
type: 'string',
|
|
62
|
+
description: 'Description of the feature/system to build',
|
|
63
|
+
},
|
|
64
|
+
strategy: {
|
|
65
|
+
type: 'string',
|
|
66
|
+
enum: ['sequential', 'parallel', 'adaptive'],
|
|
67
|
+
default: 'sequential',
|
|
68
|
+
description: 'Execution strategy: sequential (one phase at a time), parallel (independent phases concurrently), adaptive (intelligent routing)',
|
|
69
|
+
},
|
|
70
|
+
phases: {
|
|
71
|
+
type: 'array',
|
|
72
|
+
items: {
|
|
73
|
+
type: 'string',
|
|
74
|
+
enum: ['architecture', 'implementation', 'testing', 'review', 'documentation'],
|
|
75
|
+
},
|
|
76
|
+
description: 'Specific phases to run (default: all 5 phases)',
|
|
77
|
+
},
|
|
78
|
+
options: {
|
|
79
|
+
type: 'object',
|
|
80
|
+
properties: {
|
|
81
|
+
techStack: { type: 'string', description: 'Technology stack (e.g., "nextjs,payloadcms,mongodb")' },
|
|
82
|
+
testFramework: { type: 'string', description: 'Testing framework (jest, mocha, pytest)' },
|
|
83
|
+
testDriven: { type: 'boolean', description: 'Use TDD approach' },
|
|
84
|
+
focusSecurity: { type: 'boolean', description: 'Enhanced security review' },
|
|
85
|
+
focusPerformance: { type: 'boolean', description: 'Enhanced performance review' },
|
|
86
|
+
outputDir: { type: 'string', description: 'Custom output directory' },
|
|
87
|
+
},
|
|
88
|
+
},
|
|
89
|
+
},
|
|
90
|
+
required: ['task'],
|
|
91
|
+
},
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
name: 'sparc_status',
|
|
95
|
+
description: 'Get status of current SPARC workflow execution',
|
|
96
|
+
inputSchema: {
|
|
97
|
+
type: 'object',
|
|
98
|
+
properties: {
|
|
99
|
+
workflowId: {
|
|
100
|
+
type: 'string',
|
|
101
|
+
description: 'Workflow ID to check (optional, returns latest if not provided)',
|
|
102
|
+
},
|
|
103
|
+
},
|
|
104
|
+
},
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
name: 'sparc_phase_execute',
|
|
108
|
+
description: 'Execute a single SPARC phase with detailed configuration',
|
|
109
|
+
inputSchema: {
|
|
110
|
+
type: 'object',
|
|
111
|
+
properties: {
|
|
112
|
+
phase: {
|
|
113
|
+
type: 'string',
|
|
114
|
+
enum: ['architecture', 'implementation', 'testing', 'review', 'documentation'],
|
|
115
|
+
description: 'The SPARC phase to execute',
|
|
116
|
+
},
|
|
117
|
+
task: {
|
|
118
|
+
type: 'string',
|
|
119
|
+
description: 'Task description for this phase',
|
|
120
|
+
},
|
|
121
|
+
options: {
|
|
122
|
+
type: 'object',
|
|
123
|
+
description: 'Phase-specific options',
|
|
124
|
+
},
|
|
125
|
+
workflowId: {
|
|
126
|
+
type: 'string',
|
|
127
|
+
description: 'Associate with existing workflow (optional)',
|
|
128
|
+
},
|
|
129
|
+
},
|
|
130
|
+
required: ['phase', 'task'],
|
|
131
|
+
},
|
|
132
|
+
},
|
|
133
|
+
{
|
|
134
|
+
name: 'sparc_workflow_history',
|
|
135
|
+
description: 'Get history of SPARC workflow executions',
|
|
136
|
+
inputSchema: {
|
|
137
|
+
type: 'object',
|
|
138
|
+
properties: {
|
|
139
|
+
limit: {
|
|
140
|
+
type: 'number',
|
|
141
|
+
default: 10,
|
|
142
|
+
description: 'Number of workflows to return',
|
|
143
|
+
},
|
|
144
|
+
status: {
|
|
145
|
+
type: 'string',
|
|
146
|
+
enum: ['running', 'completed', 'failed', 'all'],
|
|
147
|
+
default: 'all',
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
},
|
|
151
|
+
},
|
|
152
|
+
{
|
|
153
|
+
name: 'sparc_task_results',
|
|
154
|
+
description: 'Get detailed results from a completed SPARC workflow',
|
|
155
|
+
inputSchema: {
|
|
156
|
+
type: 'object',
|
|
157
|
+
properties: {
|
|
158
|
+
workflowId: {
|
|
159
|
+
type: 'string',
|
|
160
|
+
description: 'Workflow ID to get results for',
|
|
161
|
+
},
|
|
162
|
+
},
|
|
163
|
+
required: ['workflowId'],
|
|
164
|
+
},
|
|
165
|
+
},
|
|
166
|
+
],
|
|
167
|
+
}));
|
|
168
|
+
|
|
169
|
+
// Handle tool execution
|
|
170
|
+
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
171
|
+
const { name, arguments: args } = request.params;
|
|
172
|
+
|
|
173
|
+
try {
|
|
174
|
+
switch (name) {
|
|
175
|
+
case 'sparc_orchestrate':
|
|
176
|
+
return await this.handleSparcOrchestrate(args);
|
|
177
|
+
|
|
178
|
+
case 'sparc_status':
|
|
179
|
+
return await this.handleSparcStatus(args);
|
|
180
|
+
|
|
181
|
+
case 'sparc_phase_execute':
|
|
182
|
+
return await this.handlePhaseExecute(args);
|
|
183
|
+
|
|
184
|
+
case 'sparc_workflow_history':
|
|
185
|
+
return await this.handleWorkflowHistory(args);
|
|
186
|
+
|
|
187
|
+
case 'sparc_task_results':
|
|
188
|
+
return await this.handleTaskResults(args);
|
|
189
|
+
|
|
190
|
+
default:
|
|
191
|
+
throw new Error(`Unknown tool: ${name}`);
|
|
192
|
+
}
|
|
193
|
+
} catch (error) {
|
|
194
|
+
return {
|
|
195
|
+
content: [
|
|
196
|
+
{
|
|
197
|
+
type: 'text',
|
|
198
|
+
text: `Error executing ${name}: ${error.message}`,
|
|
199
|
+
},
|
|
200
|
+
],
|
|
201
|
+
isError: true,
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
async handleSparcOrchestrate(args) {
|
|
208
|
+
const { task, strategy = 'sequential', phases, options = {} } = args;
|
|
209
|
+
|
|
210
|
+
// Generate workflow ID
|
|
211
|
+
const workflowId = `sparc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
212
|
+
|
|
213
|
+
// Determine phases to execute
|
|
214
|
+
const allPhases = ['architecture', 'implementation', 'testing', 'review', 'documentation'];
|
|
215
|
+
const executionPhases = phases && phases.length > 0 ? phases : allPhases;
|
|
216
|
+
|
|
217
|
+
// Create workflow state
|
|
218
|
+
const workflow = {
|
|
219
|
+
id: workflowId,
|
|
220
|
+
task,
|
|
221
|
+
strategy,
|
|
222
|
+
phases: executionPhases,
|
|
223
|
+
options,
|
|
224
|
+
status: 'running',
|
|
225
|
+
startTime: new Date().toISOString(),
|
|
226
|
+
currentPhase: null,
|
|
227
|
+
results: {},
|
|
228
|
+
metadata: {
|
|
229
|
+
techStack: options.techStack || 'not specified',
|
|
230
|
+
testFramework: options.testFramework || 'auto-detect',
|
|
231
|
+
outputDir: options.outputDir || '.myaidev-method/sparc',
|
|
232
|
+
},
|
|
233
|
+
};
|
|
234
|
+
|
|
235
|
+
// Save workflow state
|
|
236
|
+
await this.saveWorkflow(workflow);
|
|
237
|
+
|
|
238
|
+
// Generate execution plan based on strategy
|
|
239
|
+
const executionPlan = this.generateExecutionPlan(executionPhases, strategy, task, options);
|
|
240
|
+
|
|
241
|
+
return {
|
|
242
|
+
content: [
|
|
243
|
+
{
|
|
244
|
+
type: 'text',
|
|
245
|
+
text: JSON.stringify({
|
|
246
|
+
workflowId,
|
|
247
|
+
status: 'initialized',
|
|
248
|
+
message: 'SPARC workflow orchestration initialized',
|
|
249
|
+
task,
|
|
250
|
+
strategy,
|
|
251
|
+
phases: executionPhases,
|
|
252
|
+
executionPlan,
|
|
253
|
+
nextSteps: this.getNextSteps(strategy, executionPhases, options),
|
|
254
|
+
}, null, 2),
|
|
255
|
+
},
|
|
256
|
+
],
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
generateExecutionPlan(phases, strategy, task, options) {
|
|
261
|
+
const plan = {
|
|
262
|
+
strategy,
|
|
263
|
+
totalPhases: phases.length,
|
|
264
|
+
phases: [],
|
|
265
|
+
};
|
|
266
|
+
|
|
267
|
+
phases.forEach((phase, index) => {
|
|
268
|
+
const phaseConfig = this.getPhaseConfiguration(phase, task, options);
|
|
269
|
+
|
|
270
|
+
plan.phases.push({
|
|
271
|
+
order: index + 1,
|
|
272
|
+
phase,
|
|
273
|
+
command: phaseConfig.command,
|
|
274
|
+
description: phaseConfig.description,
|
|
275
|
+
expectedOutput: phaseConfig.expectedOutput,
|
|
276
|
+
estimatedDuration: phaseConfig.estimatedDuration,
|
|
277
|
+
dependencies: phaseConfig.dependencies,
|
|
278
|
+
parallelizable: strategy === 'parallel' && phaseConfig.canRunParallel,
|
|
279
|
+
});
|
|
280
|
+
});
|
|
281
|
+
|
|
282
|
+
return plan;
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
getPhaseConfiguration(phase, task, options) {
|
|
286
|
+
const configs = {
|
|
287
|
+
architecture: {
|
|
288
|
+
command: options.techStack
|
|
289
|
+
? `/myai-dev-architect "${task}" --tech-stack "${options.techStack}"`
|
|
290
|
+
: `/myai-dev-architect "${task}"`,
|
|
291
|
+
description: 'Design system architecture, APIs, and data models',
|
|
292
|
+
expectedOutput: '.myaidev-method/sparc/architecture.md',
|
|
293
|
+
estimatedDuration: '5-10 minutes',
|
|
294
|
+
dependencies: [],
|
|
295
|
+
canRunParallel: false,
|
|
296
|
+
},
|
|
297
|
+
implementation: {
|
|
298
|
+
command: options.testDriven
|
|
299
|
+
? `/myai-dev-code "${task}" --test-driven`
|
|
300
|
+
: `/myai-dev-code "${task}"`,
|
|
301
|
+
description: 'Implement features with SOLID principles',
|
|
302
|
+
expectedOutput: '.myaidev-method/sparc/code-output/',
|
|
303
|
+
estimatedDuration: '10-20 minutes',
|
|
304
|
+
dependencies: ['architecture'],
|
|
305
|
+
canRunParallel: false,
|
|
306
|
+
},
|
|
307
|
+
testing: {
|
|
308
|
+
command: options.testFramework
|
|
309
|
+
? `/myai-dev-test "${task}" --coverage --framework ${options.testFramework}`
|
|
310
|
+
: `/myai-dev-test "${task}" --coverage`,
|
|
311
|
+
description: 'Create comprehensive test suites',
|
|
312
|
+
expectedOutput: '.myaidev-method/sparc/test-results/',
|
|
313
|
+
estimatedDuration: '8-15 minutes',
|
|
314
|
+
dependencies: ['implementation'],
|
|
315
|
+
canRunParallel: false,
|
|
316
|
+
},
|
|
317
|
+
review: {
|
|
318
|
+
command: this.buildReviewCommand(task, options),
|
|
319
|
+
description: 'Analyze code quality, security, and performance',
|
|
320
|
+
expectedOutput: '.myaidev-method/sparc/review-report.md',
|
|
321
|
+
estimatedDuration: '5-10 minutes',
|
|
322
|
+
dependencies: ['testing'],
|
|
323
|
+
canRunParallel: true,
|
|
324
|
+
},
|
|
325
|
+
documentation: {
|
|
326
|
+
command: `/myai-dev-docs "${task}"`,
|
|
327
|
+
description: 'Generate comprehensive documentation',
|
|
328
|
+
expectedOutput: '.myaidev-method/sparc/documentation/',
|
|
329
|
+
estimatedDuration: '5-10 minutes',
|
|
330
|
+
dependencies: ['implementation'],
|
|
331
|
+
canRunParallel: true,
|
|
332
|
+
},
|
|
333
|
+
};
|
|
334
|
+
|
|
335
|
+
return configs[phase];
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
buildReviewCommand(task, options) {
|
|
339
|
+
let command = `/myai-dev-review "${task}"`;
|
|
340
|
+
if (options.focusSecurity) command += ' --security';
|
|
341
|
+
if (options.focusPerformance) command += ' --performance';
|
|
342
|
+
return command;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
getNextSteps(strategy, phases, options) {
|
|
346
|
+
const steps = [
|
|
347
|
+
{
|
|
348
|
+
step: 1,
|
|
349
|
+
action: 'Execute phases in order',
|
|
350
|
+
description: strategy === 'sequential'
|
|
351
|
+
? 'Run each phase one at a time, reviewing outputs before proceeding'
|
|
352
|
+
: strategy === 'parallel'
|
|
353
|
+
? 'Run independent phases concurrently (review + documentation can run in parallel after testing)'
|
|
354
|
+
: 'Intelligently route tasks based on dependencies and available resources',
|
|
355
|
+
},
|
|
356
|
+
];
|
|
357
|
+
|
|
358
|
+
phases.forEach((phase, index) => {
|
|
359
|
+
const config = this.getPhaseConfiguration(phase, 'task', options);
|
|
360
|
+
steps.push({
|
|
361
|
+
step: index + 2,
|
|
362
|
+
phase,
|
|
363
|
+
command: config.command,
|
|
364
|
+
description: config.description,
|
|
365
|
+
output: config.expectedOutput,
|
|
366
|
+
});
|
|
367
|
+
});
|
|
368
|
+
|
|
369
|
+
steps.push({
|
|
370
|
+
step: phases.length + 2,
|
|
371
|
+
action: 'Review workflow results',
|
|
372
|
+
command: `/sparc_task_results --workflow-id <workflow-id>`,
|
|
373
|
+
description: 'Review all phase outputs and generate final report',
|
|
374
|
+
});
|
|
375
|
+
|
|
376
|
+
return steps;
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
async handleSparcStatus(args) {
|
|
380
|
+
const { workflowId } = args;
|
|
381
|
+
|
|
382
|
+
if (workflowId) {
|
|
383
|
+
const workflow = await this.loadWorkflow(workflowId);
|
|
384
|
+
return {
|
|
385
|
+
content: [
|
|
386
|
+
{
|
|
387
|
+
type: 'text',
|
|
388
|
+
text: JSON.stringify(workflow, null, 2),
|
|
389
|
+
},
|
|
390
|
+
],
|
|
391
|
+
};
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
// Get latest workflow
|
|
395
|
+
const workflows = await this.listWorkflows();
|
|
396
|
+
const latest = workflows[0];
|
|
397
|
+
|
|
398
|
+
if (!latest) {
|
|
399
|
+
return {
|
|
400
|
+
content: [
|
|
401
|
+
{
|
|
402
|
+
type: 'text',
|
|
403
|
+
text: JSON.stringify({
|
|
404
|
+
status: 'no_workflows',
|
|
405
|
+
message: 'No SPARC workflows found',
|
|
406
|
+
}, null, 2),
|
|
407
|
+
},
|
|
408
|
+
],
|
|
409
|
+
};
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
return {
|
|
413
|
+
content: [
|
|
414
|
+
{
|
|
415
|
+
type: 'text',
|
|
416
|
+
text: JSON.stringify(latest, null, 2),
|
|
417
|
+
},
|
|
418
|
+
],
|
|
419
|
+
};
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
async handlePhaseExecute(args) {
|
|
423
|
+
const { phase, task, options = {}, workflowId } = args;
|
|
424
|
+
|
|
425
|
+
const phaseConfig = this.getPhaseConfiguration(phase, task, options);
|
|
426
|
+
|
|
427
|
+
const execution = {
|
|
428
|
+
phase,
|
|
429
|
+
task,
|
|
430
|
+
command: phaseConfig.command,
|
|
431
|
+
status: 'ready',
|
|
432
|
+
description: phaseConfig.description,
|
|
433
|
+
expectedOutput: phaseConfig.expectedOutput,
|
|
434
|
+
estimatedDuration: phaseConfig.estimatedDuration,
|
|
435
|
+
startTime: new Date().toISOString(),
|
|
436
|
+
workflowId: workflowId || null,
|
|
437
|
+
};
|
|
438
|
+
|
|
439
|
+
// Save task execution record
|
|
440
|
+
const taskId = `task_${phase}_${Date.now()}`;
|
|
441
|
+
await this.saveTask(taskId, execution);
|
|
442
|
+
|
|
443
|
+
return {
|
|
444
|
+
content: [
|
|
445
|
+
{
|
|
446
|
+
type: 'text',
|
|
447
|
+
text: JSON.stringify({
|
|
448
|
+
taskId,
|
|
449
|
+
...execution,
|
|
450
|
+
nextAction: `Execute the following command: ${phaseConfig.command}`,
|
|
451
|
+
}, null, 2),
|
|
452
|
+
},
|
|
453
|
+
],
|
|
454
|
+
};
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
async handleWorkflowHistory(args) {
|
|
458
|
+
const { limit = 10, status = 'all' } = args;
|
|
459
|
+
|
|
460
|
+
let workflows = await this.listWorkflows();
|
|
461
|
+
|
|
462
|
+
if (status !== 'all') {
|
|
463
|
+
workflows = workflows.filter(w => w.status === status);
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
workflows = workflows.slice(0, limit);
|
|
467
|
+
|
|
468
|
+
return {
|
|
469
|
+
content: [
|
|
470
|
+
{
|
|
471
|
+
type: 'text',
|
|
472
|
+
text: JSON.stringify({
|
|
473
|
+
total: workflows.length,
|
|
474
|
+
workflows: workflows.map(w => ({
|
|
475
|
+
id: w.id,
|
|
476
|
+
task: w.task,
|
|
477
|
+
status: w.status,
|
|
478
|
+
phases: w.phases,
|
|
479
|
+
startTime: w.startTime,
|
|
480
|
+
endTime: w.endTime,
|
|
481
|
+
duration: w.duration,
|
|
482
|
+
})),
|
|
483
|
+
}, null, 2),
|
|
484
|
+
},
|
|
485
|
+
],
|
|
486
|
+
};
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
async handleTaskResults(args) {
|
|
490
|
+
const { workflowId } = args;
|
|
491
|
+
|
|
492
|
+
const workflow = await this.loadWorkflow(workflowId);
|
|
493
|
+
|
|
494
|
+
if (!workflow) {
|
|
495
|
+
throw new Error(`Workflow ${workflowId} not found`);
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
return {
|
|
499
|
+
content: [
|
|
500
|
+
{
|
|
501
|
+
type: 'text',
|
|
502
|
+
text: JSON.stringify({
|
|
503
|
+
workflowId,
|
|
504
|
+
task: workflow.task,
|
|
505
|
+
status: workflow.status,
|
|
506
|
+
results: workflow.results,
|
|
507
|
+
summary: this.generateWorkflowSummary(workflow),
|
|
508
|
+
}, null, 2),
|
|
509
|
+
},
|
|
510
|
+
],
|
|
511
|
+
};
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
generateWorkflowSummary(workflow) {
|
|
515
|
+
const completedPhases = Object.keys(workflow.results).length;
|
|
516
|
+
const totalPhases = workflow.phases.length;
|
|
517
|
+
|
|
518
|
+
return {
|
|
519
|
+
completionRate: `${completedPhases}/${totalPhases} phases`,
|
|
520
|
+
duration: workflow.duration || 'in progress',
|
|
521
|
+
outputFiles: this.getOutputFiles(workflow),
|
|
522
|
+
nextRecommendedAction: this.getNextAction(workflow),
|
|
523
|
+
};
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
getOutputFiles(workflow) {
|
|
527
|
+
const files = [];
|
|
528
|
+
|
|
529
|
+
workflow.phases.forEach(phase => {
|
|
530
|
+
const config = this.getPhaseConfiguration(phase, '', {});
|
|
531
|
+
files.push({
|
|
532
|
+
phase,
|
|
533
|
+
location: config.expectedOutput,
|
|
534
|
+
});
|
|
535
|
+
});
|
|
536
|
+
|
|
537
|
+
return files;
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
getNextAction(workflow) {
|
|
541
|
+
if (workflow.status === 'completed') {
|
|
542
|
+
return 'Workflow complete. Review outputs and integrate into your project.';
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
const completedPhases = Object.keys(workflow.results);
|
|
546
|
+
const nextPhase = workflow.phases.find(p => !completedPhases.includes(p));
|
|
547
|
+
|
|
548
|
+
if (nextPhase) {
|
|
549
|
+
const config = this.getPhaseConfiguration(nextPhase, workflow.task, workflow.options);
|
|
550
|
+
return `Execute next phase: ${config.command}`;
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
return 'All phases executed. Mark workflow as complete.';
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
// Persistence helpers
|
|
557
|
+
async saveWorkflow(workflow) {
|
|
558
|
+
const filePath = join(this.workflowDir, `${workflow.id}.json`);
|
|
559
|
+
await fs.writeFile(filePath, JSON.stringify(workflow, null, 2));
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
async loadWorkflow(workflowId) {
|
|
563
|
+
try {
|
|
564
|
+
const filePath = join(this.workflowDir, `${workflowId}.json`);
|
|
565
|
+
const data = await fs.readFile(filePath, 'utf-8');
|
|
566
|
+
return JSON.parse(data);
|
|
567
|
+
} catch (error) {
|
|
568
|
+
return null;
|
|
569
|
+
}
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
async listWorkflows() {
|
|
573
|
+
try {
|
|
574
|
+
const files = await fs.readdir(this.workflowDir);
|
|
575
|
+
const workflows = [];
|
|
576
|
+
|
|
577
|
+
for (const file of files) {
|
|
578
|
+
if (file.endsWith('.json')) {
|
|
579
|
+
const data = await fs.readFile(join(this.workflowDir, file), 'utf-8');
|
|
580
|
+
workflows.push(JSON.parse(data));
|
|
581
|
+
}
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
// Sort by start time (newest first)
|
|
585
|
+
return workflows.sort((a, b) =>
|
|
586
|
+
new Date(b.startTime) - new Date(a.startTime)
|
|
587
|
+
);
|
|
588
|
+
} catch (error) {
|
|
589
|
+
return [];
|
|
590
|
+
}
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
async saveTask(taskId, execution) {
|
|
594
|
+
const filePath = join(this.tasksDir, `${taskId}.json`);
|
|
595
|
+
await fs.writeFile(filePath, JSON.stringify(execution, null, 2));
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
async run() {
|
|
599
|
+
const transport = new StdioServerTransport();
|
|
600
|
+
await this.server.connect(transport);
|
|
601
|
+
console.error('SPARC Orchestrator MCP server running on stdio');
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
// Start the server
|
|
606
|
+
const server = new SparcOrchestratorServer();
|
|
607
|
+
server.run().catch(console.error);
|
package/package.json
CHANGED
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "myaidev-method",
|
|
3
|
-
"version": "0.2.
|
|
4
|
-
"description": "
|
|
3
|
+
"version": "0.2.4",
|
|
4
|
+
"description": "Comprehensive development framework with SPARC methodology for AI-assisted software development, multi-platform publishing (WordPress, PayloadCMS, Astro, Docusaurus, Mintlify), and Coolify deployment",
|
|
5
5
|
"mcpName": "io.github.myaione/myaidev-method",
|
|
6
6
|
"main": "src/index.js",
|
|
7
7
|
"bin": {
|
|
8
8
|
"myaidev-method": "./bin/cli.js",
|
|
9
9
|
"myaidev-mcp-server": "./.claude/mcp/wordpress-server.js",
|
|
10
|
-
"myaidev-mcp-launcher": "./src/mcp/mcp-launcher.js"
|
|
10
|
+
"myaidev-mcp-launcher": "./src/mcp/mcp-launcher.js",
|
|
11
|
+
"myaidev-sparc-orchestrator": "./.claude/mcp/sparc-orchestrator-server.js"
|
|
11
12
|
},
|
|
12
13
|
"scripts": {
|
|
13
14
|
"test": "node test/run-tests.js",
|
|
@@ -18,11 +19,18 @@
|
|
|
18
19
|
"mcp:start": "node src/mcp/mcp-launcher.js start",
|
|
19
20
|
"mcp:start:direct": "node .claude/mcp/wordpress-server.js",
|
|
20
21
|
"mcp:start:simple": "node .claude/mcp/wordpress-server-simple.js",
|
|
22
|
+
"mcp:sparc": "node .claude/mcp/sparc-orchestrator-server.js",
|
|
21
23
|
"mcp:status": "node src/mcp/mcp-launcher.js status",
|
|
22
24
|
"mcp:health": "node src/mcp/mcp-launcher.js health",
|
|
23
25
|
"build:mcp": "mkdir -p dist/mcp && cp .claude/mcp/*.js .claude/mcp/*.json dist/mcp/",
|
|
24
26
|
"prepublishOnly": "npm install && npm test && npm run build:mcp",
|
|
25
27
|
"postinstall": "echo \"MyAIDev Method installed successfully! Run 'npx myaidev-method init --claude' to get started.\"",
|
|
28
|
+
"dev:architect": "node src/scripts/dev-architect.js",
|
|
29
|
+
"dev:code": "node src/scripts/dev-code.js",
|
|
30
|
+
"dev:test": "node src/scripts/dev-test.js",
|
|
31
|
+
"dev:review": "node src/scripts/dev-review.js",
|
|
32
|
+
"dev:docs": "node src/scripts/dev-docs.js",
|
|
33
|
+
"sparc": "node src/scripts/sparc-workflow.js",
|
|
26
34
|
"wordpress:troubleshoot": "node src/templates/docs/wordpress-troubleshoot.js",
|
|
27
35
|
"wordpress:health-check": "node src/scripts/wordpress-health-check.js",
|
|
28
36
|
"wordpress:security-scan": "node src/scripts/wordpress-security-scan.js",
|
|
@@ -40,6 +48,14 @@
|
|
|
40
48
|
"keywords": [
|
|
41
49
|
"claude-code",
|
|
42
50
|
"ai-cli",
|
|
51
|
+
"sparc",
|
|
52
|
+
"sparc-methodology",
|
|
53
|
+
"agentic-development",
|
|
54
|
+
"spec-kit",
|
|
55
|
+
"software-development",
|
|
56
|
+
"code-quality",
|
|
57
|
+
"testing",
|
|
58
|
+
"code-review",
|
|
43
59
|
"mcp",
|
|
44
60
|
"wordpress",
|
|
45
61
|
"payloadcms",
|
|
@@ -48,6 +64,7 @@
|
|
|
48
64
|
"mintlify",
|
|
49
65
|
"astro",
|
|
50
66
|
"coolify",
|
|
67
|
+
"ramnode",
|
|
51
68
|
"content-writer",
|
|
52
69
|
"cms",
|
|
53
70
|
"headless-cms",
|
|
@@ -98,6 +115,8 @@
|
|
|
98
115
|
"USER_GUIDE.md",
|
|
99
116
|
"LICENSE",
|
|
100
117
|
".env.example",
|
|
118
|
+
"DEV_WORKFLOW_GUIDE.md",
|
|
119
|
+
"MCP_INTEGRATION.md",
|
|
101
120
|
"WORDPRESS_ADMIN_SCRIPTS.md",
|
|
102
121
|
"COOLIFY_DEPLOYMENT.md",
|
|
103
122
|
"PAYLOADCMS_PUBLISHING.md",
|