agileflow 2.43.0 → 2.44.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +2 -1
- package/scripts/generate-all.sh +77 -0
- package/scripts/generators/agent-registry.js +167 -0
- package/scripts/generators/command-registry.js +135 -0
- package/scripts/generators/index.js +87 -0
- package/scripts/generators/inject-babysit.js +167 -0
- package/scripts/generators/inject-help.js +109 -0
- package/scripts/generators/inject-readme.js +156 -0
- package/scripts/generators/skill-registry.js +144 -0
- package/src/core/commands/context.md +141 -5
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "agileflow",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.44.0",
|
|
4
4
|
"description": "AI-driven agile development system for Claude Code, Cursor, Windsurf, and more",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"agile",
|
|
@@ -31,6 +31,7 @@
|
|
|
31
31
|
"files": [
|
|
32
32
|
"tools/",
|
|
33
33
|
"src/",
|
|
34
|
+
"scripts/",
|
|
34
35
|
"LICENSE",
|
|
35
36
|
"README.md"
|
|
36
37
|
],
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
###############################################################################
|
|
4
|
+
# AgileFlow Content Generation Script
|
|
5
|
+
#
|
|
6
|
+
# Regenerates all dynamic content in AgileFlow plugin files.
|
|
7
|
+
# Run this after:
|
|
8
|
+
# - Adding/removing/renaming commands
|
|
9
|
+
# - Adding/removing/renaming agents
|
|
10
|
+
# - Adding/removing/renaming skills
|
|
11
|
+
# - Changing command/agent/skill descriptions or metadata
|
|
12
|
+
#
|
|
13
|
+
# Usage:
|
|
14
|
+
# bash scripts/generate-all.sh
|
|
15
|
+
# npm run generate (if added to package.json)
|
|
16
|
+
###############################################################################
|
|
17
|
+
|
|
18
|
+
set -e # Exit on error
|
|
19
|
+
|
|
20
|
+
# Get script directory
|
|
21
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
22
|
+
GENERATORS_DIR="$SCRIPT_DIR/generators"
|
|
23
|
+
|
|
24
|
+
# Colors for output
|
|
25
|
+
GREEN='\033[0;32m'
|
|
26
|
+
BLUE='\033[0;34m'
|
|
27
|
+
YELLOW='\033[1;33m'
|
|
28
|
+
RED='\033[0;31m'
|
|
29
|
+
NC='\033[0m' # No Color
|
|
30
|
+
|
|
31
|
+
echo -e "${BLUE}============================================================${NC}"
|
|
32
|
+
echo -e "${BLUE} AgileFlow Content Generation System${NC}"
|
|
33
|
+
echo -e "${BLUE}============================================================${NC}"
|
|
34
|
+
echo ""
|
|
35
|
+
|
|
36
|
+
# Check if Node.js is available
|
|
37
|
+
if ! command -v node &> /dev/null; then
|
|
38
|
+
echo -e "${RED}❌ Error: Node.js is not installed or not in PATH${NC}"
|
|
39
|
+
exit 1
|
|
40
|
+
fi
|
|
41
|
+
|
|
42
|
+
# Check if generators directory exists
|
|
43
|
+
if [ ! -d "$GENERATORS_DIR" ]; then
|
|
44
|
+
echo -e "${RED}❌ Error: Generators directory not found: $GENERATORS_DIR${NC}"
|
|
45
|
+
exit 1
|
|
46
|
+
fi
|
|
47
|
+
|
|
48
|
+
# Run the orchestrator
|
|
49
|
+
echo -e "${YELLOW}Running content generators...${NC}"
|
|
50
|
+
echo ""
|
|
51
|
+
|
|
52
|
+
cd "$GENERATORS_DIR"
|
|
53
|
+
node index.js
|
|
54
|
+
|
|
55
|
+
EXIT_CODE=$?
|
|
56
|
+
|
|
57
|
+
if [ $EXIT_CODE -eq 0 ]; then
|
|
58
|
+
echo ""
|
|
59
|
+
echo -e "${GREEN}============================================================${NC}"
|
|
60
|
+
echo -e "${GREEN}✅ Content generation completed successfully!${NC}"
|
|
61
|
+
echo -e "${GREEN}============================================================${NC}"
|
|
62
|
+
echo ""
|
|
63
|
+
echo -e "${YELLOW}Next steps:${NC}"
|
|
64
|
+
echo "1. Review the changes: git diff"
|
|
65
|
+
echo "2. Test the plugin to ensure everything works"
|
|
66
|
+
echo "3. Commit the generated content: git add -A && git commit -m 'chore: regenerate plugin content'"
|
|
67
|
+
echo ""
|
|
68
|
+
else
|
|
69
|
+
echo ""
|
|
70
|
+
echo -e "${RED}============================================================${NC}"
|
|
71
|
+
echo -e "${RED}❌ Content generation failed${NC}"
|
|
72
|
+
echo -e "${RED}============================================================${NC}"
|
|
73
|
+
echo ""
|
|
74
|
+
echo -e "${YELLOW}Please check the errors above and fix any issues.${NC}"
|
|
75
|
+
echo ""
|
|
76
|
+
exit 1
|
|
77
|
+
fi
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Agent Registry Scanner
|
|
5
|
+
*
|
|
6
|
+
* Scans agents/ directory and extracts metadata from frontmatter.
|
|
7
|
+
* Returns structured agent registry for use in generators.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require('fs');
|
|
11
|
+
const path = require('path');
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Extract YAML frontmatter from markdown file
|
|
15
|
+
* Handles multi-line values like tools arrays
|
|
16
|
+
* @param {string} filePath - Path to markdown file
|
|
17
|
+
* @returns {object} Frontmatter object
|
|
18
|
+
*/
|
|
19
|
+
function extractFrontmatter(filePath) {
|
|
20
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
21
|
+
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
|
|
22
|
+
|
|
23
|
+
if (!frontmatterMatch) {
|
|
24
|
+
return {};
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const frontmatter = {};
|
|
28
|
+
const lines = frontmatterMatch[1].split('\n');
|
|
29
|
+
let currentKey = null;
|
|
30
|
+
let currentArray = null;
|
|
31
|
+
|
|
32
|
+
for (const line of lines) {
|
|
33
|
+
// Handle array items (lines starting with -)
|
|
34
|
+
if (line.trim().startsWith('-')) {
|
|
35
|
+
if (currentArray) {
|
|
36
|
+
currentArray.push(line.trim().substring(1).trim());
|
|
37
|
+
}
|
|
38
|
+
continue;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Handle key-value pairs
|
|
42
|
+
const match = line.match(/^(\w+):\s*(.*)$/);
|
|
43
|
+
if (match) {
|
|
44
|
+
const [, key, value] = match;
|
|
45
|
+
currentKey = key;
|
|
46
|
+
|
|
47
|
+
// If value is empty, it's likely an array
|
|
48
|
+
if (!value) {
|
|
49
|
+
currentArray = [];
|
|
50
|
+
frontmatter[key] = currentArray;
|
|
51
|
+
} else {
|
|
52
|
+
// Remove quotes if present
|
|
53
|
+
frontmatter[key] = value.replace(/^["']|["']$/g, '');
|
|
54
|
+
currentArray = null;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return frontmatter;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Categorize agent based on its role
|
|
64
|
+
* @param {string} name - Agent name
|
|
65
|
+
* @param {string} description - Agent description
|
|
66
|
+
* @returns {string} Category name
|
|
67
|
+
*/
|
|
68
|
+
function categorizeAgent(name, description) {
|
|
69
|
+
const categories = {
|
|
70
|
+
'Core Development': ['ui', 'api', 'database', 'devops', 'ci'],
|
|
71
|
+
'Specialized Development': ['mobile', 'integrations', 'datamigration'],
|
|
72
|
+
'Quality & Testing': ['qa', 'testing', 'security', 'accessibility'],
|
|
73
|
+
'Architecture & Design': ['design', 'adr-writer', 'epic-planner', 'product'],
|
|
74
|
+
'Maintenance & Optimization': ['refactor', 'performance', 'monitoring'],
|
|
75
|
+
'Documentation & Knowledge': ['documentation', 'readme-updater', 'research'],
|
|
76
|
+
'Compliance & Governance': ['compliance', 'analytics'],
|
|
77
|
+
'Mentorship': ['mentor']
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
for (const [category, keywords] of Object.entries(categories)) {
|
|
81
|
+
if (keywords.some(kw => name.includes(kw))) {
|
|
82
|
+
return category;
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
return 'Other';
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Scan agents directory and build registry
|
|
91
|
+
* @param {string} agentsDir - Path to agents directory
|
|
92
|
+
* @returns {Array} Array of agent metadata objects
|
|
93
|
+
*/
|
|
94
|
+
function scanAgents(agentsDir) {
|
|
95
|
+
const agents = [];
|
|
96
|
+
const files = fs.readdirSync(agentsDir);
|
|
97
|
+
|
|
98
|
+
for (const file of files) {
|
|
99
|
+
if (!file.endsWith('.md')) continue;
|
|
100
|
+
|
|
101
|
+
const filePath = path.join(agentsDir, file);
|
|
102
|
+
const frontmatter = extractFrontmatter(filePath);
|
|
103
|
+
const name = file.replace('.md', '');
|
|
104
|
+
|
|
105
|
+
// Parse tools array if it exists
|
|
106
|
+
let tools = [];
|
|
107
|
+
if (frontmatter.tools) {
|
|
108
|
+
if (Array.isArray(frontmatter.tools)) {
|
|
109
|
+
tools = frontmatter.tools;
|
|
110
|
+
} else if (typeof frontmatter.tools === 'string') {
|
|
111
|
+
tools = frontmatter.tools.split(',').map(t => t.trim());
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
agents.push({
|
|
116
|
+
name,
|
|
117
|
+
file,
|
|
118
|
+
path: filePath,
|
|
119
|
+
displayName: frontmatter.name || name,
|
|
120
|
+
description: frontmatter.description || '',
|
|
121
|
+
tools,
|
|
122
|
+
model: frontmatter.model || 'haiku',
|
|
123
|
+
color: frontmatter.color || 'blue',
|
|
124
|
+
category: categorizeAgent(name, frontmatter.description || '')
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Sort by category, then by name
|
|
129
|
+
agents.sort((a, b) => {
|
|
130
|
+
if (a.category !== b.category) {
|
|
131
|
+
return a.category.localeCompare(b.category);
|
|
132
|
+
}
|
|
133
|
+
return a.name.localeCompare(b.name);
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
return agents;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Main function
|
|
141
|
+
*/
|
|
142
|
+
function main() {
|
|
143
|
+
const rootDir = path.resolve(__dirname, '../..');
|
|
144
|
+
const agentsDir = path.join(rootDir, 'src/core/agents');
|
|
145
|
+
|
|
146
|
+
if (!fs.existsSync(agentsDir)) {
|
|
147
|
+
console.error(`Agents directory not found: ${agentsDir}`);
|
|
148
|
+
process.exit(1);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
const agents = scanAgents(agentsDir);
|
|
152
|
+
|
|
153
|
+
// If called directly, output JSON
|
|
154
|
+
if (require.main === module) {
|
|
155
|
+
console.log(JSON.stringify(agents, null, 2));
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
return agents;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Export for use in other scripts
|
|
162
|
+
module.exports = { scanAgents, extractFrontmatter, categorizeAgent };
|
|
163
|
+
|
|
164
|
+
// Run if called directly
|
|
165
|
+
if (require.main === module) {
|
|
166
|
+
main();
|
|
167
|
+
}
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Command Registry Scanner
|
|
5
|
+
*
|
|
6
|
+
* Scans commands/ directory and extracts metadata from frontmatter.
|
|
7
|
+
* Returns structured command registry for use in generators.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require('fs');
|
|
11
|
+
const path = require('path');
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Extract frontmatter from markdown file
|
|
15
|
+
* @param {string} filePath - Path to markdown file
|
|
16
|
+
* @returns {object} Frontmatter object
|
|
17
|
+
*/
|
|
18
|
+
function extractFrontmatter(filePath) {
|
|
19
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
20
|
+
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
|
|
21
|
+
|
|
22
|
+
if (!frontmatterMatch) {
|
|
23
|
+
return {};
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const frontmatter = {};
|
|
27
|
+
const lines = frontmatterMatch[1].split('\n');
|
|
28
|
+
|
|
29
|
+
for (const line of lines) {
|
|
30
|
+
const match = line.match(/^(\w+):\s*(.+)$/);
|
|
31
|
+
if (match) {
|
|
32
|
+
const [, key, value] = match;
|
|
33
|
+
// Remove quotes if present
|
|
34
|
+
frontmatter[key] = value.replace(/^["']|["']$/g, '');
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
return frontmatter;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Categorize command based on its name/description
|
|
43
|
+
* @param {string} name - Command name
|
|
44
|
+
* @param {string} description - Command description
|
|
45
|
+
* @returns {string} Category name
|
|
46
|
+
*/
|
|
47
|
+
function categorizeCommand(name, description) {
|
|
48
|
+
const categories = {
|
|
49
|
+
'Story Management': ['story', 'epic', 'assign', 'status'],
|
|
50
|
+
'Development': ['verify', 'baseline', 'resume', 'session-init', 'babysit'],
|
|
51
|
+
'Quality & Testing': ['tests', 'review', 'ci'],
|
|
52
|
+
'Documentation': ['docs', 'adr', 'readme-sync'],
|
|
53
|
+
'Planning & Metrics': ['sprint', 'velocity', 'metrics', 'board', 'deps'],
|
|
54
|
+
'Research & Strategy': ['research', 'product'],
|
|
55
|
+
'Deployment & Operations': ['deploy', 'packages'],
|
|
56
|
+
'Collaboration': ['update', 'handoff', 'feedback', 'retro'],
|
|
57
|
+
'Maintenance': ['debt', 'compress', 'template'],
|
|
58
|
+
'System': ['setup', 'help', 'diagnose', 'auto', 'agent']
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
for (const [category, keywords] of Object.entries(categories)) {
|
|
62
|
+
if (keywords.some(kw => name.includes(kw))) {
|
|
63
|
+
return category;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
return 'Other';
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Scan commands directory and build registry
|
|
72
|
+
* @param {string} commandsDir - Path to commands directory
|
|
73
|
+
* @returns {Array} Array of command metadata objects
|
|
74
|
+
*/
|
|
75
|
+
function scanCommands(commandsDir) {
|
|
76
|
+
const commands = [];
|
|
77
|
+
const files = fs.readdirSync(commandsDir);
|
|
78
|
+
|
|
79
|
+
for (const file of files) {
|
|
80
|
+
if (!file.endsWith('.md')) continue;
|
|
81
|
+
|
|
82
|
+
const filePath = path.join(commandsDir, file);
|
|
83
|
+
const frontmatter = extractFrontmatter(filePath);
|
|
84
|
+
const name = file.replace('.md', '');
|
|
85
|
+
|
|
86
|
+
commands.push({
|
|
87
|
+
name,
|
|
88
|
+
file,
|
|
89
|
+
path: filePath,
|
|
90
|
+
description: frontmatter.description || '',
|
|
91
|
+
argumentHint: frontmatter['argument-hint'] || '',
|
|
92
|
+
category: categorizeCommand(name, frontmatter.description || '')
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Sort by category, then by name
|
|
97
|
+
commands.sort((a, b) => {
|
|
98
|
+
if (a.category !== b.category) {
|
|
99
|
+
return a.category.localeCompare(b.category);
|
|
100
|
+
}
|
|
101
|
+
return a.name.localeCompare(b.name);
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
return commands;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* Main function
|
|
109
|
+
*/
|
|
110
|
+
function main() {
|
|
111
|
+
const rootDir = path.resolve(__dirname, '../..');
|
|
112
|
+
const commandsDir = path.join(rootDir, 'src/core/commands');
|
|
113
|
+
|
|
114
|
+
if (!fs.existsSync(commandsDir)) {
|
|
115
|
+
console.error(`Commands directory not found: ${commandsDir}`);
|
|
116
|
+
process.exit(1);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
const commands = scanCommands(commandsDir);
|
|
120
|
+
|
|
121
|
+
// If called directly, output JSON
|
|
122
|
+
if (require.main === module) {
|
|
123
|
+
console.log(JSON.stringify(commands, null, 2));
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return commands;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// Export for use in other scripts
|
|
130
|
+
module.exports = { scanCommands, extractFrontmatter, categorizeCommand };
|
|
131
|
+
|
|
132
|
+
// Run if called directly
|
|
133
|
+
if (require.main === module) {
|
|
134
|
+
main();
|
|
135
|
+
}
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Content Generation Orchestrator
|
|
5
|
+
*
|
|
6
|
+
* Runs all content generators to update AgileFlow plugin files.
|
|
7
|
+
* Single source of truth: frontmatter and directory structure.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const path = require('path');
|
|
11
|
+
const { execSync } = require('child_process');
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Run a generator script
|
|
15
|
+
* @param {string} scriptName - Name of the generator script
|
|
16
|
+
* @returns {boolean} Success status
|
|
17
|
+
*/
|
|
18
|
+
function runGenerator(scriptName) {
|
|
19
|
+
const scriptPath = path.join(__dirname, scriptName);
|
|
20
|
+
|
|
21
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
22
|
+
console.log(`Running: ${scriptName}`);
|
|
23
|
+
console.log('='.repeat(60));
|
|
24
|
+
|
|
25
|
+
try {
|
|
26
|
+
execSync(`node "${scriptPath}"`, {
|
|
27
|
+
cwd: __dirname,
|
|
28
|
+
stdio: 'inherit'
|
|
29
|
+
});
|
|
30
|
+
console.log(`✅ ${scriptName} completed successfully`);
|
|
31
|
+
return true;
|
|
32
|
+
} catch (error) {
|
|
33
|
+
console.error(`❌ ${scriptName} failed:`, error.message);
|
|
34
|
+
return false;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Main orchestrator
|
|
40
|
+
*/
|
|
41
|
+
function main() {
|
|
42
|
+
console.log('🚀 AgileFlow Content Generation System');
|
|
43
|
+
console.log('Generating content from metadata...\n');
|
|
44
|
+
|
|
45
|
+
const generators = [
|
|
46
|
+
'inject-help.js',
|
|
47
|
+
'inject-babysit.js',
|
|
48
|
+
'inject-readme.js'
|
|
49
|
+
];
|
|
50
|
+
|
|
51
|
+
const results = [];
|
|
52
|
+
|
|
53
|
+
for (const generator of generators) {
|
|
54
|
+
const success = runGenerator(generator);
|
|
55
|
+
results.push({ generator, success });
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// Summary
|
|
59
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
60
|
+
console.log('GENERATION SUMMARY');
|
|
61
|
+
console.log('='.repeat(60));
|
|
62
|
+
|
|
63
|
+
let allSuccess = true;
|
|
64
|
+
for (const { generator, success } of results) {
|
|
65
|
+
const status = success ? '✅' : '❌';
|
|
66
|
+
console.log(`${status} ${generator}`);
|
|
67
|
+
if (!success) allSuccess = false;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
console.log('');
|
|
71
|
+
|
|
72
|
+
if (allSuccess) {
|
|
73
|
+
console.log('🎉 All generators completed successfully!');
|
|
74
|
+
console.log('📝 Generated content is ready for commit.');
|
|
75
|
+
process.exit(0);
|
|
76
|
+
} else {
|
|
77
|
+
console.log('⚠️ Some generators failed. Please check errors above.');
|
|
78
|
+
process.exit(1);
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Run if called directly
|
|
83
|
+
if (require.main === module) {
|
|
84
|
+
main();
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
module.exports = { runGenerator };
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Babysit Command Content Injector
|
|
5
|
+
*
|
|
6
|
+
* Injects agent list and command references into /agileflow:babysit command file.
|
|
7
|
+
* Handles multiple AUTOGEN sections.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require('fs');
|
|
11
|
+
const path = require('path');
|
|
12
|
+
const { scanAgents } = require('./agent-registry');
|
|
13
|
+
const { scanCommands } = require('./command-registry');
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Generate agent list content with details
|
|
17
|
+
* @param {Array} agents - Array of agent metadata
|
|
18
|
+
* @returns {string} Formatted agent list
|
|
19
|
+
*/
|
|
20
|
+
function generateAgentList(agents) {
|
|
21
|
+
const lines = [];
|
|
22
|
+
|
|
23
|
+
lines.push(`**AVAILABLE AGENTS** (${agents.length} total):`);
|
|
24
|
+
lines.push('');
|
|
25
|
+
|
|
26
|
+
let count = 1;
|
|
27
|
+
for (const agent of agents) {
|
|
28
|
+
lines.push(`${count}. **${agent.name}** (model: ${agent.model})`);
|
|
29
|
+
lines.push(` - **Purpose**: ${agent.description}`);
|
|
30
|
+
lines.push(` - **Tools**: ${agent.tools.join(', ')}`);
|
|
31
|
+
lines.push(` - **Category**: ${agent.category}`);
|
|
32
|
+
lines.push('');
|
|
33
|
+
count++;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
return lines.join('\n');
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Generate command reference list (compact format for babysit)
|
|
41
|
+
* @param {Array} commands - Array of command metadata
|
|
42
|
+
* @returns {string} Formatted command list
|
|
43
|
+
*/
|
|
44
|
+
function generateCommandReference(commands) {
|
|
45
|
+
const lines = [];
|
|
46
|
+
|
|
47
|
+
// Group by category
|
|
48
|
+
const categories = {};
|
|
49
|
+
for (const cmd of commands) {
|
|
50
|
+
if (!categories[cmd.category]) {
|
|
51
|
+
categories[cmd.category] = [];
|
|
52
|
+
}
|
|
53
|
+
categories[cmd.category].push(cmd);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
for (const [category, cmds] of Object.entries(categories)) {
|
|
57
|
+
const cmdNames = cmds.map(c => c.name).join(', ');
|
|
58
|
+
lines.push(`- **${category}**: ${cmdNames}`);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return lines.join('\n');
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Inject content between specified AUTOGEN markers
|
|
66
|
+
* @param {string} content - Original file content
|
|
67
|
+
* @param {string} markerName - Name of the marker (e.g., AGENT_LIST, COMMAND_REF)
|
|
68
|
+
* @param {string} generated - Generated content to inject
|
|
69
|
+
* @returns {string} Updated file content
|
|
70
|
+
*/
|
|
71
|
+
function injectContentByMarker(content, markerName, generated) {
|
|
72
|
+
const startMarker = `<!-- AUTOGEN:${markerName}:START -->`;
|
|
73
|
+
const endMarker = `<!-- AUTOGEN:${markerName}:END -->`;
|
|
74
|
+
|
|
75
|
+
const startIdx = content.indexOf(startMarker);
|
|
76
|
+
const endIdx = content.indexOf(endMarker);
|
|
77
|
+
|
|
78
|
+
if (startIdx === -1 || endIdx === -1) {
|
|
79
|
+
console.warn(`AUTOGEN:${markerName} markers not found - skipping`);
|
|
80
|
+
return content;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
const timestamp = new Date().toISOString().split('T')[0];
|
|
84
|
+
const injectedContent = `${startMarker}\n<!-- Auto-generated on ${timestamp}. Do not edit manually. -->\n\n${generated}\n${endMarker}`;
|
|
85
|
+
|
|
86
|
+
return content.substring(0, startIdx) + injectedContent + content.substring(endIdx + endMarker.length);
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Add AUTOGEN markers to babysit file if they don't exist
|
|
91
|
+
* @param {string} content - Original file content
|
|
92
|
+
* @returns {string} Updated content with markers
|
|
93
|
+
*/
|
|
94
|
+
function addMarkersIfMissing(content) {
|
|
95
|
+
let updated = content;
|
|
96
|
+
|
|
97
|
+
// Add AGENT_LIST markers around the agent list section
|
|
98
|
+
if (!content.includes('<!-- AUTOGEN:AGENT_LIST:START -->')) {
|
|
99
|
+
// Find "**AVAILABLE AGENTS**" and wrap it
|
|
100
|
+
const agentSectionStart = content.indexOf('**AVAILABLE AGENTS**');
|
|
101
|
+
if (agentSectionStart !== -1) {
|
|
102
|
+
// Find the end of the agent list (before "**WHEN TO SPAWN AGENTS**")
|
|
103
|
+
const agentSectionEnd = content.indexOf('**WHEN TO SPAWN AGENTS**', agentSectionStart);
|
|
104
|
+
if (agentSectionEnd !== -1) {
|
|
105
|
+
const before = content.substring(0, agentSectionStart);
|
|
106
|
+
const agentSection = content.substring(agentSectionStart, agentSectionEnd);
|
|
107
|
+
const after = content.substring(agentSectionEnd);
|
|
108
|
+
|
|
109
|
+
updated = `${before}<!-- AUTOGEN:AGENT_LIST:START -->\n${agentSection}<!-- AUTOGEN:AGENT_LIST:END -->\n\n${after}`;
|
|
110
|
+
console.log('✅ Added AGENT_LIST markers to babysit.md');
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
return updated;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Main function
|
|
120
|
+
*/
|
|
121
|
+
function main() {
|
|
122
|
+
const rootDir = path.resolve(__dirname, '../..');
|
|
123
|
+
const babysitFile = path.join(rootDir, 'src/core/commands/babysit.md');
|
|
124
|
+
const agentsDir = path.join(rootDir, 'src/core/agents');
|
|
125
|
+
const commandsDir = path.join(rootDir, 'src/core/commands');
|
|
126
|
+
|
|
127
|
+
// Check if babysit file exists
|
|
128
|
+
if (!fs.existsSync(babysitFile)) {
|
|
129
|
+
console.error(`Babysit file not found: ${babysitFile}`);
|
|
130
|
+
process.exit(1);
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// Scan agents and commands
|
|
134
|
+
console.log('Scanning agents...');
|
|
135
|
+
const agents = scanAgents(agentsDir);
|
|
136
|
+
console.log(`Found ${agents.length} agents`);
|
|
137
|
+
|
|
138
|
+
console.log('Scanning commands...');
|
|
139
|
+
const commands = scanCommands(commandsDir);
|
|
140
|
+
console.log(`Found ${commands.length} commands`);
|
|
141
|
+
|
|
142
|
+
// Read babysit file
|
|
143
|
+
let babysitContent = fs.readFileSync(babysitFile, 'utf-8');
|
|
144
|
+
|
|
145
|
+
// Add markers if missing
|
|
146
|
+
babysitContent = addMarkersIfMissing(babysitContent);
|
|
147
|
+
|
|
148
|
+
// Generate content
|
|
149
|
+
console.log('Generating agent list...');
|
|
150
|
+
const agentList = generateAgentList(agents);
|
|
151
|
+
|
|
152
|
+
// Inject content
|
|
153
|
+
console.log('Injecting content into babysit.md...');
|
|
154
|
+
babysitContent = injectContentByMarker(babysitContent, 'AGENT_LIST', agentList);
|
|
155
|
+
|
|
156
|
+
// Write back
|
|
157
|
+
fs.writeFileSync(babysitFile, babysitContent, 'utf-8');
|
|
158
|
+
console.log('✅ Successfully updated babysit.md');
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Export for use in orchestrator
|
|
162
|
+
module.exports = { generateAgentList, generateCommandReference, injectContentByMarker, addMarkersIfMissing };
|
|
163
|
+
|
|
164
|
+
// Run if called directly
|
|
165
|
+
if (require.main === module) {
|
|
166
|
+
main();
|
|
167
|
+
}
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Help Command Content Injector
|
|
5
|
+
*
|
|
6
|
+
* Injects command list into /agileflow:help command file.
|
|
7
|
+
* Finds AUTOGEN markers and replaces content with generated command directory.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require('fs');
|
|
11
|
+
const path = require('path');
|
|
12
|
+
const { scanCommands } = require('./command-registry');
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Generate command list content grouped by category
|
|
16
|
+
* @param {Array} commands - Array of command metadata
|
|
17
|
+
* @returns {string} Formatted command list
|
|
18
|
+
*/
|
|
19
|
+
function generateCommandList(commands) {
|
|
20
|
+
const lines = [];
|
|
21
|
+
|
|
22
|
+
// Group commands by category
|
|
23
|
+
const categories = {};
|
|
24
|
+
for (const cmd of commands) {
|
|
25
|
+
if (!categories[cmd.category]) {
|
|
26
|
+
categories[cmd.category] = [];
|
|
27
|
+
}
|
|
28
|
+
categories[cmd.category].push(cmd);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Generate markdown for each category
|
|
32
|
+
for (const [category, cmds] of Object.entries(categories)) {
|
|
33
|
+
lines.push(`**${category}:**`);
|
|
34
|
+
for (const cmd of cmds) {
|
|
35
|
+
const hint = cmd.argumentHint ? ` ${cmd.argumentHint}` : '';
|
|
36
|
+
lines.push(`- \`/agileflow:${cmd.name}${hint}\` - ${cmd.description}`);
|
|
37
|
+
}
|
|
38
|
+
lines.push(''); // Blank line between categories
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
return lines.join('\n');
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Inject content between AUTOGEN markers
|
|
46
|
+
* @param {string} content - Original file content
|
|
47
|
+
* @param {string} generated - Generated content to inject
|
|
48
|
+
* @returns {string} Updated file content
|
|
49
|
+
*/
|
|
50
|
+
function injectContent(content, generated) {
|
|
51
|
+
const startMarker = '<!-- AUTOGEN:COMMAND_LIST:START -->';
|
|
52
|
+
const endMarker = '<!-- AUTOGEN:COMMAND_LIST:END -->';
|
|
53
|
+
|
|
54
|
+
const startIdx = content.indexOf(startMarker);
|
|
55
|
+
const endIdx = content.indexOf(endMarker);
|
|
56
|
+
|
|
57
|
+
if (startIdx === -1 || endIdx === -1) {
|
|
58
|
+
console.error('AUTOGEN markers not found in file');
|
|
59
|
+
return content;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
const timestamp = new Date().toISOString().split('T')[0];
|
|
63
|
+
const injectedContent = `${startMarker}\n<!-- Auto-generated on ${timestamp}. Do not edit manually. -->\n\n${generated}\n${endMarker}`;
|
|
64
|
+
|
|
65
|
+
return content.substring(0, startIdx) + injectedContent + content.substring(endIdx + endMarker.length);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Main function
|
|
70
|
+
*/
|
|
71
|
+
function main() {
|
|
72
|
+
const rootDir = path.resolve(__dirname, '../..');
|
|
73
|
+
const helpFile = path.join(rootDir, 'src/core/commands/help.md');
|
|
74
|
+
const commandsDir = path.join(rootDir, 'src/core/commands');
|
|
75
|
+
|
|
76
|
+
// Check if help file exists
|
|
77
|
+
if (!fs.existsSync(helpFile)) {
|
|
78
|
+
console.error(`Help file not found: ${helpFile}`);
|
|
79
|
+
process.exit(1);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Scan commands
|
|
83
|
+
console.log('Scanning commands...');
|
|
84
|
+
const commands = scanCommands(commandsDir);
|
|
85
|
+
console.log(`Found ${commands.length} commands`);
|
|
86
|
+
|
|
87
|
+
// Generate command list
|
|
88
|
+
console.log('Generating command list...');
|
|
89
|
+
const commandList = generateCommandList(commands);
|
|
90
|
+
|
|
91
|
+
// Read help file
|
|
92
|
+
const helpContent = fs.readFileSync(helpFile, 'utf-8');
|
|
93
|
+
|
|
94
|
+
// Inject content
|
|
95
|
+
console.log('Injecting content into help.md...');
|
|
96
|
+
const updatedContent = injectContent(helpContent, commandList);
|
|
97
|
+
|
|
98
|
+
// Write back
|
|
99
|
+
fs.writeFileSync(helpFile, updatedContent, 'utf-8');
|
|
100
|
+
console.log('✅ Successfully updated help.md');
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Export for use in orchestrator
|
|
104
|
+
module.exports = { generateCommandList, injectContent };
|
|
105
|
+
|
|
106
|
+
// Run if called directly
|
|
107
|
+
if (require.main === module) {
|
|
108
|
+
main();
|
|
109
|
+
}
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* README Content Injector
|
|
5
|
+
*
|
|
6
|
+
* Injects stats, agent tables, and skill lists into README.md.
|
|
7
|
+
* Handles multiple AUTOGEN sections for different content types.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require('fs');
|
|
11
|
+
const path = require('path');
|
|
12
|
+
const { scanAgents } = require('./agent-registry');
|
|
13
|
+
const { scanCommands } = require('./command-registry');
|
|
14
|
+
const { scanSkills } = require('./skill-registry');
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Generate stats content (command/agent/skill counts)
|
|
18
|
+
* @param {Object} counts - {commands, agents, skills}
|
|
19
|
+
* @returns {string} Formatted stats
|
|
20
|
+
*/
|
|
21
|
+
function generateStats(counts) {
|
|
22
|
+
return `- **${counts.commands}** slash commands\n- **${counts.agents}** specialized agents\n- **${counts.skills}** code generation skills`;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Generate agent table (markdown table format)
|
|
27
|
+
* @param {Array} agents - Array of agent metadata
|
|
28
|
+
* @returns {string} Formatted table
|
|
29
|
+
*/
|
|
30
|
+
function generateAgentTable(agents) {
|
|
31
|
+
const lines = [];
|
|
32
|
+
|
|
33
|
+
lines.push('| Agent | Description | Model | Category |');
|
|
34
|
+
lines.push('|-------|-------------|-------|----------|');
|
|
35
|
+
|
|
36
|
+
for (const agent of agents) {
|
|
37
|
+
const tools = agent.tools.slice(0, 3).join(', ') + (agent.tools.length > 3 ? '...' : '');
|
|
38
|
+
lines.push(`| ${agent.name} | ${agent.description} | ${agent.model} | ${agent.category} |`);
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
return lines.join('\n');
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Generate skill list (bulleted list grouped by category)
|
|
46
|
+
* @param {Array} skills - Array of skill metadata
|
|
47
|
+
* @returns {string} Formatted list
|
|
48
|
+
*/
|
|
49
|
+
function generateSkillList(skills) {
|
|
50
|
+
const lines = [];
|
|
51
|
+
|
|
52
|
+
// Group by category
|
|
53
|
+
const categories = {};
|
|
54
|
+
for (const skill of skills) {
|
|
55
|
+
if (!categories[skill.category]) {
|
|
56
|
+
categories[skill.category] = [];
|
|
57
|
+
}
|
|
58
|
+
categories[skill.category].push(skill);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
for (const [category, categorySkills] of Object.entries(categories)) {
|
|
62
|
+
lines.push(`**${category}:**`);
|
|
63
|
+
for (const skill of categorySkills) {
|
|
64
|
+
lines.push(`- **${skill.name}**: ${skill.description}`);
|
|
65
|
+
}
|
|
66
|
+
lines.push('');
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
return lines.join('\n');
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Inject content between specified AUTOGEN markers
|
|
74
|
+
* @param {string} content - Original file content
|
|
75
|
+
* @param {string} markerName - Name of the marker (e.g., STATS, AGENT_TABLE)
|
|
76
|
+
* @param {string} generated - Generated content to inject
|
|
77
|
+
* @returns {string} Updated file content
|
|
78
|
+
*/
|
|
79
|
+
function injectContentByMarker(content, markerName, generated) {
|
|
80
|
+
const startMarker = `<!-- AUTOGEN:${markerName}:START -->`;
|
|
81
|
+
const endMarker = `<!-- AUTOGEN:${markerName}:END -->`;
|
|
82
|
+
|
|
83
|
+
const startIdx = content.indexOf(startMarker);
|
|
84
|
+
const endIdx = content.indexOf(endMarker);
|
|
85
|
+
|
|
86
|
+
if (startIdx === -1 || endIdx === -1) {
|
|
87
|
+
console.warn(`AUTOGEN:${markerName} markers not found - skipping`);
|
|
88
|
+
return content;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
const timestamp = new Date().toISOString().split('T')[0];
|
|
92
|
+
const injectedContent = `${startMarker}\n<!-- Auto-generated on ${timestamp}. Do not edit manually. -->\n\n${generated}\n${endMarker}`;
|
|
93
|
+
|
|
94
|
+
return content.substring(0, startIdx) + injectedContent + content.substring(endIdx + endMarker.length);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Main function
|
|
99
|
+
*/
|
|
100
|
+
function main() {
|
|
101
|
+
const cliDir = path.resolve(__dirname, '../..');
|
|
102
|
+
const rootDir = path.resolve(cliDir, '../..');
|
|
103
|
+
const readmeFile = path.join(rootDir, 'README.md');
|
|
104
|
+
const agentsDir = path.join(cliDir, 'src/core/agents');
|
|
105
|
+
const commandsDir = path.join(cliDir, 'src/core/commands');
|
|
106
|
+
const skillsDir = path.join(cliDir, 'src/core/skills');
|
|
107
|
+
|
|
108
|
+
// Check if README exists
|
|
109
|
+
if (!fs.existsSync(readmeFile)) {
|
|
110
|
+
console.error(`README not found: ${readmeFile}`);
|
|
111
|
+
process.exit(1);
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
// Scan all registries
|
|
115
|
+
console.log('Scanning commands, agents, and skills...');
|
|
116
|
+
const commands = scanCommands(commandsDir);
|
|
117
|
+
const agents = scanAgents(agentsDir);
|
|
118
|
+
const skills = scanSkills(skillsDir);
|
|
119
|
+
|
|
120
|
+
console.log(`Found: ${commands.length} commands, ${agents.length} agents, ${skills.length} skills`);
|
|
121
|
+
|
|
122
|
+
// Read README
|
|
123
|
+
let readmeContent = fs.readFileSync(readmeFile, 'utf-8');
|
|
124
|
+
|
|
125
|
+
// Generate content
|
|
126
|
+
console.log('Generating stats...');
|
|
127
|
+
const stats = generateStats({
|
|
128
|
+
commands: commands.length,
|
|
129
|
+
agents: agents.length,
|
|
130
|
+
skills: skills.length
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
console.log('Generating agent table...');
|
|
134
|
+
const agentTable = generateAgentTable(agents);
|
|
135
|
+
|
|
136
|
+
console.log('Generating skill list...');
|
|
137
|
+
const skillList = generateSkillList(skills);
|
|
138
|
+
|
|
139
|
+
// Inject content
|
|
140
|
+
console.log('Injecting content into README.md...');
|
|
141
|
+
readmeContent = injectContentByMarker(readmeContent, 'STATS', stats);
|
|
142
|
+
readmeContent = injectContentByMarker(readmeContent, 'AGENT_TABLE', agentTable);
|
|
143
|
+
readmeContent = injectContentByMarker(readmeContent, 'SKILL_LIST', skillList);
|
|
144
|
+
|
|
145
|
+
// Write back
|
|
146
|
+
fs.writeFileSync(readmeFile, readmeContent, 'utf-8');
|
|
147
|
+
console.log('✅ Successfully updated README.md');
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// Export for use in orchestrator
|
|
151
|
+
module.exports = { generateStats, generateAgentTable, generateSkillList, injectContentByMarker };
|
|
152
|
+
|
|
153
|
+
// Run if called directly
|
|
154
|
+
if (require.main === module) {
|
|
155
|
+
main();
|
|
156
|
+
}
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Skill Registry Scanner
|
|
5
|
+
*
|
|
6
|
+
* Scans skills/ directory and extracts metadata from SKILL.md frontmatter.
|
|
7
|
+
* Returns structured skill registry for use in generators.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require('fs');
|
|
11
|
+
const path = require('path');
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Extract YAML frontmatter from markdown file
|
|
15
|
+
* @param {string} filePath - Path to markdown file
|
|
16
|
+
* @returns {object} Frontmatter object
|
|
17
|
+
*/
|
|
18
|
+
function extractFrontmatter(filePath) {
|
|
19
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
20
|
+
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
|
|
21
|
+
|
|
22
|
+
if (!frontmatterMatch) {
|
|
23
|
+
return {};
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const frontmatter = {};
|
|
27
|
+
const lines = frontmatterMatch[1].split('\n');
|
|
28
|
+
|
|
29
|
+
for (const line of lines) {
|
|
30
|
+
const match = line.match(/^(\w+):\s*(.+)$/);
|
|
31
|
+
if (match) {
|
|
32
|
+
const [, key, value] = match;
|
|
33
|
+
// Remove quotes if present
|
|
34
|
+
frontmatter[key] = value.replace(/^["']|["']$/g, '');
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
return frontmatter;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Categorize skill based on its name/description
|
|
43
|
+
* @param {string} name - Skill name
|
|
44
|
+
* @param {string} description - Skill description
|
|
45
|
+
* @returns {string} Category name
|
|
46
|
+
*/
|
|
47
|
+
function categorizeSkill(name, description) {
|
|
48
|
+
const categories = {
|
|
49
|
+
'Story & Planning': ['story', 'epic', 'sprint', 'acceptance-criteria'],
|
|
50
|
+
'Code Generation': ['type-definitions', 'validation-schema', 'error-handler'],
|
|
51
|
+
'Testing': ['test-case'],
|
|
52
|
+
'Documentation': ['adr', 'api-documentation', 'changelog', 'pr-description'],
|
|
53
|
+
'Architecture': ['sql-schema', 'diagram'],
|
|
54
|
+
'Deployment': ['deployment-guide', 'migration-checklist']
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
const lowerName = name.toLowerCase();
|
|
58
|
+
const lowerDesc = description.toLowerCase();
|
|
59
|
+
|
|
60
|
+
for (const [category, keywords] of Object.entries(categories)) {
|
|
61
|
+
if (keywords.some(kw => lowerName.includes(kw) || lowerDesc.includes(kw))) {
|
|
62
|
+
return category;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
return 'Other';
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Scan skills directory and build registry
|
|
71
|
+
* @param {string} skillsDir - Path to skills directory
|
|
72
|
+
* @returns {Array} Array of skill metadata objects
|
|
73
|
+
*/
|
|
74
|
+
function scanSkills(skillsDir) {
|
|
75
|
+
const skills = [];
|
|
76
|
+
|
|
77
|
+
// Each skill is in its own directory with a SKILL.md file
|
|
78
|
+
const skillDirs = fs.readdirSync(skillsDir);
|
|
79
|
+
|
|
80
|
+
for (const skillDir of skillDirs) {
|
|
81
|
+
const skillPath = path.join(skillsDir, skillDir);
|
|
82
|
+
|
|
83
|
+
// Skip if not a directory
|
|
84
|
+
if (!fs.statSync(skillPath).isDirectory()) continue;
|
|
85
|
+
|
|
86
|
+
const skillFile = path.join(skillPath, 'SKILL.md');
|
|
87
|
+
|
|
88
|
+
// Skip if SKILL.md doesn't exist
|
|
89
|
+
if (!fs.existsSync(skillFile)) continue;
|
|
90
|
+
|
|
91
|
+
const frontmatter = extractFrontmatter(skillFile);
|
|
92
|
+
const name = frontmatter.name || skillDir;
|
|
93
|
+
const description = frontmatter.description || '';
|
|
94
|
+
|
|
95
|
+
skills.push({
|
|
96
|
+
name,
|
|
97
|
+
directory: skillDir,
|
|
98
|
+
file: 'SKILL.md',
|
|
99
|
+
path: skillFile,
|
|
100
|
+
description,
|
|
101
|
+
category: categorizeSkill(name, description)
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// Sort by category, then by name
|
|
106
|
+
skills.sort((a, b) => {
|
|
107
|
+
if (a.category !== b.category) {
|
|
108
|
+
return a.category.localeCompare(b.category);
|
|
109
|
+
}
|
|
110
|
+
return a.name.localeCompare(b.name);
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
return skills;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Main function
|
|
118
|
+
*/
|
|
119
|
+
function main() {
|
|
120
|
+
const rootDir = path.resolve(__dirname, '../..');
|
|
121
|
+
const skillsDir = path.join(rootDir, 'src/core/skills');
|
|
122
|
+
|
|
123
|
+
if (!fs.existsSync(skillsDir)) {
|
|
124
|
+
console.error(`Skills directory not found: ${skillsDir}`);
|
|
125
|
+
process.exit(1);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
const skills = scanSkills(skillsDir);
|
|
129
|
+
|
|
130
|
+
// If called directly, output JSON
|
|
131
|
+
if (require.main === module) {
|
|
132
|
+
console.log(JSON.stringify(skills, null, 2));
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
return skills;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Export for use in other scripts
|
|
139
|
+
module.exports = { scanSkills, extractFrontmatter, categorizeSkill };
|
|
140
|
+
|
|
141
|
+
// Run if called directly
|
|
142
|
+
if (require.main === module) {
|
|
143
|
+
main();
|
|
144
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
---
|
|
2
2
|
description: Generate context export for web AI tools
|
|
3
|
-
argument-hint: [MODE=full|export|note|research] [NOTE=<text>] [TOPIC=<text>] [DETAILS=<text>]
|
|
3
|
+
argument-hint: [MODE=full|export|note|research|import] [NOTE=<text>] [TOPIC=<text>] [DETAILS=<text>] [CONTENT=<text>] [SOURCE=<url>]
|
|
4
4
|
---
|
|
5
5
|
|
|
6
6
|
<!-- COMPACT_SUMMARY_START
|
|
@@ -12,11 +12,12 @@ This section is extracted by the PreCompact hook to preserve essential context a
|
|
|
12
12
|
Web AI Context Manager - Generates/exports/manages project context briefs for web AI tools (ChatGPT, Perplexity, Gemini, Claude web).
|
|
13
13
|
|
|
14
14
|
### Critical Behavioral Rules
|
|
15
|
-
- **ALWAYS create TodoWrite list** for MODE=full and MODE=
|
|
15
|
+
- **ALWAYS create TodoWrite list** for MODE=full, MODE=research, and MODE=import to track multi-step workflows
|
|
16
16
|
- **Diff-first approach**: Show changes and wait for YES/NO confirmation before ANY file writes
|
|
17
17
|
- **Preserve user-written content**: Only update managed sections in docs/context.md
|
|
18
18
|
- **No writes in export mode**: MODE=export outputs text only, never writes files
|
|
19
19
|
- **Research is two-step**: STEP 1 generates prompt, STEP 2 stores results when user returns
|
|
20
|
+
- **Import is one-step**: Process CONTENT immediately and create research file
|
|
20
21
|
- **Link research files**: Always reference research from ADRs/Epics/Stories that use it
|
|
21
22
|
|
|
22
23
|
### Core Workflow
|
|
@@ -50,6 +51,16 @@ Web AI Context Manager - Generates/exports/manages project context briefs for we
|
|
|
50
51
|
7. Ask if user wants ADR/Epic/Story created from research
|
|
51
52
|
8. Add research reference to created ADR/Epic/Story
|
|
52
53
|
|
|
54
|
+
**MODE=import**
|
|
55
|
+
1. Create todo list tracking: validate inputs, process content, extract key points, extract code, generate actions, suggest stories, format research file, save, update index
|
|
56
|
+
2. Validate TOPIC and CONTENT are provided
|
|
57
|
+
3. Process raw content (transcript, article, etc.) by: summarizing key points, extracting code snippets, generating action items, suggesting user stories
|
|
58
|
+
4. Format into structured research file with all extracted sections
|
|
59
|
+
5. Save to docs/10-research/YYYYMMDD-topic-slug.md
|
|
60
|
+
6. Update docs/10-research/README.md index
|
|
61
|
+
7. Ask if user wants ADR/Epic/Story created from imported content
|
|
62
|
+
8. Add research reference to created ADR/Epic/Story
|
|
63
|
+
|
|
53
64
|
### Key Files
|
|
54
65
|
- docs/context.md - Main context brief (managed sections + user content)
|
|
55
66
|
- docs/10-research/YYYYMMDD-topic-slug.md - Research results storage
|
|
@@ -68,9 +79,11 @@ Generate, export, or manage the web AI context brief.
|
|
|
68
79
|
ROLE: Web AI Context Manager
|
|
69
80
|
|
|
70
81
|
INPUTS (optional)
|
|
71
|
-
- MODE=full|export|note|research (default: full)
|
|
82
|
+
- MODE=full|export|note|research|import (default: full)
|
|
72
83
|
- NOTE=<text> (required if MODE=note)
|
|
73
|
-
- TOPIC=<text> (required if MODE=research)
|
|
84
|
+
- TOPIC=<text> (required if MODE=research or MODE=import)
|
|
85
|
+
- CONTENT=<text> (required if MODE=import - raw content to process)
|
|
86
|
+
- SOURCE=<url> (optional for MODE=import - original source URL)
|
|
74
87
|
|
|
75
88
|
---
|
|
76
89
|
|
|
@@ -249,6 +262,124 @@ When user pastes research results back:
|
|
|
249
262
|
|
|
250
263
|
---
|
|
251
264
|
|
|
265
|
+
## MODE=import
|
|
266
|
+
Import raw content (transcripts, articles, notes) and convert to structured research file.
|
|
267
|
+
|
|
268
|
+
### Input
|
|
269
|
+
- TOPIC=<text> (required - name for the research file)
|
|
270
|
+
- CONTENT=<text> (required - raw content to process: transcript, article, notes, etc.)
|
|
271
|
+
- SOURCE=<url> (optional - original source URL for reference)
|
|
272
|
+
|
|
273
|
+
### Use Cases
|
|
274
|
+
- YouTube video transcripts
|
|
275
|
+
- Conference talk notes
|
|
276
|
+
- Podcast transcripts
|
|
277
|
+
- Blog posts / articles
|
|
278
|
+
- Documentation pages
|
|
279
|
+
- Forum discussions / Stack Overflow threads
|
|
280
|
+
- Meeting notes
|
|
281
|
+
|
|
282
|
+
### TODO LIST TRACKING
|
|
283
|
+
**CRITICAL**: Immediately create a todo list using TodoWrite tool to track import workflow:
|
|
284
|
+
```
|
|
285
|
+
1. Validate TOPIC and CONTENT are provided
|
|
286
|
+
2. Analyze and summarize key points from content
|
|
287
|
+
3. Extract any code snippets
|
|
288
|
+
4. Generate action items based on content
|
|
289
|
+
5. Create user story suggestions (if applicable)
|
|
290
|
+
6. Format into structured research markdown
|
|
291
|
+
7. Show diff for review
|
|
292
|
+
8. Save to docs/10-research/YYYYMMDD-topic-slug.md
|
|
293
|
+
9. Update docs/10-research/README.md index
|
|
294
|
+
10. Ask about creating ADR/Epic/Story from research
|
|
295
|
+
```
|
|
296
|
+
|
|
297
|
+
Mark each step complete as you finish it.
|
|
298
|
+
|
|
299
|
+
### Processing Steps
|
|
300
|
+
|
|
301
|
+
1. **Validate Inputs**
|
|
302
|
+
- Verify TOPIC is provided (error if missing)
|
|
303
|
+
- Verify CONTENT is provided (error if missing)
|
|
304
|
+
- SOURCE is optional but recommended for attribution
|
|
305
|
+
|
|
306
|
+
2. **Analyze Content**
|
|
307
|
+
Extract from the raw content:
|
|
308
|
+
- **Summary**: 2-3 paragraph TL;DR of the main points
|
|
309
|
+
- **Key Findings**: Bullet list of important takeaways
|
|
310
|
+
- **Code Snippets**: Any code blocks, commands, or configuration (preserve exactly)
|
|
311
|
+
- **Action Items**: Concrete next steps mentioned or implied
|
|
312
|
+
- **Story Suggestions**: Potential user stories/epics based on content
|
|
313
|
+
|
|
314
|
+
3. **Format Research File**
|
|
315
|
+
```markdown
|
|
316
|
+
# [Topic Title]
|
|
317
|
+
|
|
318
|
+
**Import Date**: YYYY-MM-DD
|
|
319
|
+
**Topic**: [original topic]
|
|
320
|
+
**Source**: [URL if provided, or "Direct import"]
|
|
321
|
+
**Content Type**: [transcript/article/notes/etc.]
|
|
322
|
+
|
|
323
|
+
## Summary
|
|
324
|
+
[2-3 paragraph executive summary of the content]
|
|
325
|
+
|
|
326
|
+
## Key Findings
|
|
327
|
+
- [Main point 1 with details]
|
|
328
|
+
- [Main point 2 with details]
|
|
329
|
+
- [Main point 3 with details]
|
|
330
|
+
- ...
|
|
331
|
+
|
|
332
|
+
## Code Snippets
|
|
333
|
+
[Preserve all code snippets exactly as they appeared]
|
|
334
|
+
```language
|
|
335
|
+
[code here]
|
|
336
|
+
```
|
|
337
|
+
|
|
338
|
+
## Action Items
|
|
339
|
+
- [ ] [Action 1 - concrete next step]
|
|
340
|
+
- [ ] [Action 2 - concrete next step]
|
|
341
|
+
- [ ] [Action 3 - concrete next step]
|
|
342
|
+
|
|
343
|
+
## Story Suggestions
|
|
344
|
+
[If content suggests feature work, list potential stories]
|
|
345
|
+
|
|
346
|
+
### Potential Epic: [Epic Title]
|
|
347
|
+
- **US-XXXX**: [Story 1 title]
|
|
348
|
+
- AC: [acceptance criteria bullet]
|
|
349
|
+
- **US-XXXX**: [Story 2 title]
|
|
350
|
+
- AC: [acceptance criteria bullet]
|
|
351
|
+
|
|
352
|
+
## Raw Content Reference
|
|
353
|
+
<details>
|
|
354
|
+
<summary>Original content (click to expand)</summary>
|
|
355
|
+
|
|
356
|
+
[First 500 chars of original content for reference...]
|
|
357
|
+
</details>
|
|
358
|
+
|
|
359
|
+
## References
|
|
360
|
+
- Source: [URL or "Direct import"]
|
|
361
|
+
- Import date: [YYYY-MM-DD]
|
|
362
|
+
```
|
|
363
|
+
|
|
364
|
+
4. **Save and Index**
|
|
365
|
+
- Save to `docs/10-research/YYYYMMDD-<topic-slug>.md`
|
|
366
|
+
- Update `docs/10-research/README.md` with new entry
|
|
367
|
+
|
|
368
|
+
5. **Offer Next Steps**
|
|
369
|
+
Ask user via AskUserQuestion:
|
|
370
|
+
- Create an ADR referencing this research?
|
|
371
|
+
- Create an Epic/Stories based on the story suggestions?
|
|
372
|
+
- Link this research to an existing Epic/Story?
|
|
373
|
+
|
|
374
|
+
### Rules
|
|
375
|
+
- Diff-first; YES/NO before writing research file
|
|
376
|
+
- Preserve ALL code snippets exactly as provided
|
|
377
|
+
- Generate actionable items (not vague suggestions)
|
|
378
|
+
- Keep raw content reference collapsed to save space
|
|
379
|
+
- Always update the research index
|
|
380
|
+
|
|
381
|
+
---
|
|
382
|
+
|
|
252
383
|
## Usage Examples
|
|
253
384
|
|
|
254
385
|
```bash
|
|
@@ -262,9 +393,13 @@ When user pastes research results back:
|
|
|
262
393
|
# Add a quick note
|
|
263
394
|
/agileflow:context MODE=note NOTE="User reported auth bug in production"
|
|
264
395
|
|
|
265
|
-
# Build research prompt
|
|
396
|
+
# Build research prompt for web AI
|
|
266
397
|
/agileflow:context MODE=research TOPIC="Implement OAuth 2.0 with Google"
|
|
267
398
|
/agileflow:context MODE=research TOPIC="Add Stripe payments" DETAILS="Launch by end of sprint"
|
|
399
|
+
|
|
400
|
+
# Import external content (transcripts, articles, notes)
|
|
401
|
+
/agileflow:context MODE=import TOPIC="React Server Components" CONTENT="[paste transcript here]"
|
|
402
|
+
/agileflow:context MODE=import TOPIC="Stripe Webhooks Tutorial" SOURCE="https://youtube.com/..." CONTENT="[paste transcript here]"
|
|
268
403
|
```
|
|
269
404
|
|
|
270
405
|
---
|
|
@@ -276,3 +411,4 @@ Depending on MODE:
|
|
|
276
411
|
- **export**: Text output ready to paste into web AI tool
|
|
277
412
|
- **note**: Appended note to docs/context.md (after YES confirmation)
|
|
278
413
|
- **research**: Research prompt in code block ready to paste into web AI tool
|
|
414
|
+
- **import**: Processed research file saved to docs/10-research/ (after YES confirmation)
|