agileflow 2.43.0 → 2.45.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +2 -1
- package/scripts/README.md +267 -0
- package/scripts/agileflow-configure.js +927 -0
- package/scripts/agileflow-statusline.sh +355 -0
- package/scripts/agileflow-stop.sh +13 -0
- package/scripts/agileflow-welcome.js +427 -0
- package/scripts/archive-completed-stories.sh +162 -0
- package/scripts/clear-active-command.js +48 -0
- package/scripts/compress-status.sh +116 -0
- package/scripts/expertise-metrics.sh +264 -0
- package/scripts/generate-all.sh +77 -0
- package/scripts/generators/agent-registry.js +167 -0
- package/scripts/generators/command-registry.js +135 -0
- package/scripts/generators/index.js +87 -0
- package/scripts/generators/inject-babysit.js +167 -0
- package/scripts/generators/inject-help.js +109 -0
- package/scripts/generators/inject-readme.js +156 -0
- package/scripts/generators/skill-registry.js +144 -0
- package/scripts/get-env.js +209 -0
- package/scripts/obtain-context.js +293 -0
- package/scripts/precompact-context.sh +123 -0
- package/scripts/validate-expertise.sh +259 -0
- package/src/core/commands/context.md +141 -5
- package/tools/cli/installers/core/installer.js +97 -4
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# compress-status.sh
|
|
4
|
+
# Removes verbose fields from status.json to reduce file size
|
|
5
|
+
# Keeps only essential tracking metadata
|
|
6
|
+
|
|
7
|
+
set -e
|
|
8
|
+
|
|
9
|
+
# Colors for output
|
|
10
|
+
RED='\033[0;31m'
|
|
11
|
+
GREEN='\033[0;32m'
|
|
12
|
+
YELLOW='\033[1;33m'
|
|
13
|
+
BLUE='\033[0;34m'
|
|
14
|
+
NC='\033[0m' # No Color
|
|
15
|
+
|
|
16
|
+
# Default paths (relative to project root)
|
|
17
|
+
DOCS_DIR="docs"
|
|
18
|
+
STATUS_FILE="$DOCS_DIR/09-agents/status.json"
|
|
19
|
+
|
|
20
|
+
# Find project root (directory containing .agileflow)
|
|
21
|
+
PROJECT_ROOT="$(pwd)"
|
|
22
|
+
while [[ ! -d "$PROJECT_ROOT/.agileflow" ]] && [[ "$PROJECT_ROOT" != "/" ]]; do
|
|
23
|
+
PROJECT_ROOT="$(dirname "$PROJECT_ROOT")"
|
|
24
|
+
done
|
|
25
|
+
|
|
26
|
+
if [[ "$PROJECT_ROOT" == "/" ]]; then
|
|
27
|
+
echo -e "${RED}Error: Not in an AgileFlow project (no .agileflow directory found)${NC}"
|
|
28
|
+
exit 1
|
|
29
|
+
fi
|
|
30
|
+
|
|
31
|
+
# Update paths to absolute
|
|
32
|
+
STATUS_FILE="$PROJECT_ROOT/$STATUS_FILE"
|
|
33
|
+
|
|
34
|
+
# Check if status.json exists
|
|
35
|
+
if [[ ! -f "$STATUS_FILE" ]]; then
|
|
36
|
+
echo -e "${YELLOW}No status.json found at $STATUS_FILE${NC}"
|
|
37
|
+
exit 0
|
|
38
|
+
fi
|
|
39
|
+
|
|
40
|
+
echo -e "${BLUE}Compressing status.json...${NC}"
|
|
41
|
+
|
|
42
|
+
# Get original size
|
|
43
|
+
ORIGINAL_SIZE=$(wc -c < "$STATUS_FILE")
|
|
44
|
+
|
|
45
|
+
# Compress using Node.js
|
|
46
|
+
if command -v node &> /dev/null; then
|
|
47
|
+
STATUS_FILE="$STATUS_FILE" node <<'EOF'
|
|
48
|
+
const fs = require('fs');
|
|
49
|
+
|
|
50
|
+
const statusFile = process.env.STATUS_FILE;
|
|
51
|
+
|
|
52
|
+
// Read status.json
|
|
53
|
+
const status = JSON.parse(fs.readFileSync(statusFile, 'utf8'));
|
|
54
|
+
const stories = status.stories || {};
|
|
55
|
+
|
|
56
|
+
// Fields to keep (essential tracking metadata only)
|
|
57
|
+
const KEEP_FIELDS = [
|
|
58
|
+
'id',
|
|
59
|
+
'title',
|
|
60
|
+
'status',
|
|
61
|
+
'owner',
|
|
62
|
+
'created_at',
|
|
63
|
+
'updated_at',
|
|
64
|
+
'completed_at',
|
|
65
|
+
'epic',
|
|
66
|
+
'dependencies',
|
|
67
|
+
'blocked_by',
|
|
68
|
+
'blocks',
|
|
69
|
+
'pr_url',
|
|
70
|
+
'test_status',
|
|
71
|
+
'priority',
|
|
72
|
+
'tags'
|
|
73
|
+
];
|
|
74
|
+
|
|
75
|
+
// Compress each story
|
|
76
|
+
let removedFields = 0;
|
|
77
|
+
const compressed = {};
|
|
78
|
+
|
|
79
|
+
for (const [storyId, story] of Object.entries(stories)) {
|
|
80
|
+
compressed[storyId] = {};
|
|
81
|
+
|
|
82
|
+
for (const field of KEEP_FIELDS) {
|
|
83
|
+
if (story[field] !== undefined) {
|
|
84
|
+
compressed[storyId][field] = story[field];
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
const originalFieldCount = Object.keys(story).length;
|
|
89
|
+
const compressedFieldCount = Object.keys(compressed[storyId]).length;
|
|
90
|
+
removedFields += (originalFieldCount - compressedFieldCount);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// Update status.json
|
|
94
|
+
status.stories = compressed;
|
|
95
|
+
status.updated = new Date().toISOString();
|
|
96
|
+
fs.writeFileSync(statusFile, JSON.stringify(status, null, 2));
|
|
97
|
+
|
|
98
|
+
console.log(`\x1b[32m✓ Removed ${removedFields} verbose fields\x1b[0m`);
|
|
99
|
+
console.log(`\x1b[34mStories processed: ${Object.keys(compressed).length}\x1b[0m`);
|
|
100
|
+
EOF
|
|
101
|
+
|
|
102
|
+
# Get new size
|
|
103
|
+
NEW_SIZE=$(wc -c < "$STATUS_FILE")
|
|
104
|
+
SAVED=$((ORIGINAL_SIZE - NEW_SIZE))
|
|
105
|
+
PERCENT=$((SAVED * 100 / ORIGINAL_SIZE))
|
|
106
|
+
|
|
107
|
+
echo -e "${GREEN}Compression complete!${NC}"
|
|
108
|
+
echo -e "${BLUE}Original size: ${ORIGINAL_SIZE} bytes${NC}"
|
|
109
|
+
echo -e "${BLUE}New size: ${NEW_SIZE} bytes${NC}"
|
|
110
|
+
echo -e "${BLUE}Saved: ${SAVED} bytes (${PERCENT}%)${NC}"
|
|
111
|
+
else
|
|
112
|
+
echo -e "${RED}Error: Node.js not found. Cannot compress status.json.${NC}"
|
|
113
|
+
exit 1
|
|
114
|
+
fi
|
|
115
|
+
|
|
116
|
+
exit 0
|
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
#
|
|
3
|
+
# expertise-metrics.sh - Metrics dashboard for Agent Expert system
|
|
4
|
+
#
|
|
5
|
+
# Purpose: Track the health and activity of Agent Expert expertise files
|
|
6
|
+
#
|
|
7
|
+
# Metrics tracked:
|
|
8
|
+
# 1. Total experts count
|
|
9
|
+
# 2. Experts with learnings (self-improved at least once)
|
|
10
|
+
# 3. Average file size (lines)
|
|
11
|
+
# 4. Staleness distribution (how old are the files)
|
|
12
|
+
# 5. Recent activity (updated in last 7 days)
|
|
13
|
+
#
|
|
14
|
+
# Usage:
|
|
15
|
+
# ./scripts/expertise-metrics.sh # Show metrics dashboard
|
|
16
|
+
# ./scripts/expertise-metrics.sh --json # Output as JSON (for logging)
|
|
17
|
+
# ./scripts/expertise-metrics.sh --csv # Output as CSV
|
|
18
|
+
# ./scripts/expertise-metrics.sh --help # Show help
|
|
19
|
+
#
|
|
20
|
+
|
|
21
|
+
set -e
|
|
22
|
+
|
|
23
|
+
# Configuration
|
|
24
|
+
EXPERTS_DIR="packages/cli/src/core/experts"
|
|
25
|
+
|
|
26
|
+
# Colors
|
|
27
|
+
BLUE='\033[0;34m'
|
|
28
|
+
GREEN='\033[0;32m'
|
|
29
|
+
YELLOW='\033[1;33m'
|
|
30
|
+
NC='\033[0m'
|
|
31
|
+
|
|
32
|
+
# Help message
|
|
33
|
+
show_help() {
|
|
34
|
+
echo "Usage: $0 [--json | --csv | --help]"
|
|
35
|
+
echo ""
|
|
36
|
+
echo "Metrics dashboard for Agent Expert expertise files"
|
|
37
|
+
echo ""
|
|
38
|
+
echo "Options:"
|
|
39
|
+
echo " --json Output metrics as JSON"
|
|
40
|
+
echo " --csv Output metrics as CSV"
|
|
41
|
+
echo " --help Show this help message"
|
|
42
|
+
echo ""
|
|
43
|
+
echo "Metrics tracked:"
|
|
44
|
+
echo " - Total experts count"
|
|
45
|
+
echo " - Experts with learnings (self-improved)"
|
|
46
|
+
echo " - Average file size (lines)"
|
|
47
|
+
echo " - Staleness distribution"
|
|
48
|
+
echo " - Recent activity (last 7 days)"
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
# Extract YAML field
|
|
52
|
+
get_yaml_field() {
|
|
53
|
+
local file="$1"
|
|
54
|
+
local field="$2"
|
|
55
|
+
grep "^${field}:" "$file" 2>/dev/null | sed "s/^${field}:[[:space:]]*//" | tr -d '"' || echo ""
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
# Check if learnings is empty
|
|
59
|
+
has_learnings() {
|
|
60
|
+
local file="$1"
|
|
61
|
+
# Check for non-empty learnings
|
|
62
|
+
if grep -q "^learnings: \[\]" "$file" 2>/dev/null; then
|
|
63
|
+
return 1 # Empty
|
|
64
|
+
fi
|
|
65
|
+
# Check if there's actual content after learnings:
|
|
66
|
+
local after_learnings
|
|
67
|
+
after_learnings=$(sed -n '/^learnings:/,/^[a-z]/p' "$file" | grep -v "^#" | grep -v "^learnings:" | grep -v "^$" | grep "^ -" | head -1)
|
|
68
|
+
if [ -n "$after_learnings" ]; then
|
|
69
|
+
return 0 # Has content
|
|
70
|
+
fi
|
|
71
|
+
return 1 # Empty
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# Get file line count
|
|
75
|
+
get_line_count() {
|
|
76
|
+
wc -l < "$1" | tr -d ' '
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
# Calculate days since date
|
|
80
|
+
days_since() {
|
|
81
|
+
local date_str="$1"
|
|
82
|
+
local date_epoch now_epoch
|
|
83
|
+
|
|
84
|
+
if [[ "$date_str" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then
|
|
85
|
+
date_epoch=$(date -d "$date_str" +%s 2>/dev/null || date -j -f "%Y-%m-%d" "$date_str" +%s 2>/dev/null)
|
|
86
|
+
else
|
|
87
|
+
echo "999"
|
|
88
|
+
return
|
|
89
|
+
fi
|
|
90
|
+
|
|
91
|
+
now_epoch=$(date +%s)
|
|
92
|
+
echo $(( (now_epoch - date_epoch) / 86400 ))
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
# Count learnings entries
|
|
96
|
+
count_learnings() {
|
|
97
|
+
local file="$1"
|
|
98
|
+
local count
|
|
99
|
+
# Count lines starting with " - date:" in learnings section
|
|
100
|
+
count=$(grep -c "^ - date:" "$file" 2>/dev/null) || count=0
|
|
101
|
+
echo "$count"
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
# Main collection
|
|
105
|
+
collect_metrics() {
|
|
106
|
+
local script_dir
|
|
107
|
+
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
108
|
+
cd "$script_dir/.."
|
|
109
|
+
|
|
110
|
+
# Initialize counters
|
|
111
|
+
local total=0
|
|
112
|
+
local with_learnings=0
|
|
113
|
+
local total_lines=0
|
|
114
|
+
local recent_updates=0
|
|
115
|
+
local stale_7d=0
|
|
116
|
+
local stale_30d=0
|
|
117
|
+
local stale_90d=0
|
|
118
|
+
local total_learnings=0
|
|
119
|
+
|
|
120
|
+
# Collect data
|
|
121
|
+
local domains=()
|
|
122
|
+
local details=()
|
|
123
|
+
|
|
124
|
+
for dir in "$EXPERTS_DIR"/*/; do
|
|
125
|
+
local domain
|
|
126
|
+
domain=$(basename "$dir")
|
|
127
|
+
[ "$domain" = "templates" ] && continue
|
|
128
|
+
|
|
129
|
+
local file="$dir/expertise.yaml"
|
|
130
|
+
[ ! -f "$file" ] && continue
|
|
131
|
+
|
|
132
|
+
total=$((total + 1))
|
|
133
|
+
domains+=("$domain")
|
|
134
|
+
|
|
135
|
+
# File size
|
|
136
|
+
local lines
|
|
137
|
+
lines=$(get_line_count "$file")
|
|
138
|
+
total_lines=$((total_lines + lines))
|
|
139
|
+
|
|
140
|
+
# Learnings
|
|
141
|
+
local learnings_count
|
|
142
|
+
learnings_count=$(count_learnings "$file")
|
|
143
|
+
total_learnings=$((total_learnings + learnings_count))
|
|
144
|
+
if [ "$learnings_count" -gt 0 ]; then
|
|
145
|
+
with_learnings=$((with_learnings + 1))
|
|
146
|
+
fi
|
|
147
|
+
|
|
148
|
+
# Staleness
|
|
149
|
+
local last_updated days_old
|
|
150
|
+
last_updated=$(get_yaml_field "$file" "last_updated")
|
|
151
|
+
days_old=$(days_since "$last_updated")
|
|
152
|
+
|
|
153
|
+
if [ "$days_old" -le 7 ]; then
|
|
154
|
+
recent_updates=$((recent_updates + 1))
|
|
155
|
+
elif [ "$days_old" -le 30 ]; then
|
|
156
|
+
stale_7d=$((stale_7d + 1))
|
|
157
|
+
elif [ "$days_old" -le 90 ]; then
|
|
158
|
+
stale_30d=$((stale_30d + 1))
|
|
159
|
+
else
|
|
160
|
+
stale_90d=$((stale_90d + 1))
|
|
161
|
+
fi
|
|
162
|
+
|
|
163
|
+
# Store detail
|
|
164
|
+
details+=("$domain,$lines,$learnings_count,$days_old")
|
|
165
|
+
done
|
|
166
|
+
|
|
167
|
+
# Calculate averages
|
|
168
|
+
local avg_lines=0
|
|
169
|
+
if [ "$total" -gt 0 ]; then
|
|
170
|
+
avg_lines=$((total_lines / total))
|
|
171
|
+
fi
|
|
172
|
+
|
|
173
|
+
# Output based on format
|
|
174
|
+
local format="${1:-dashboard}"
|
|
175
|
+
local timestamp
|
|
176
|
+
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
177
|
+
|
|
178
|
+
case "$format" in
|
|
179
|
+
json)
|
|
180
|
+
echo "{"
|
|
181
|
+
echo " \"timestamp\": \"$timestamp\","
|
|
182
|
+
echo " \"total_experts\": $total,"
|
|
183
|
+
echo " \"with_learnings\": $with_learnings,"
|
|
184
|
+
echo " \"self_improve_rate\": $(awk -v w="$with_learnings" -v t="$total" 'BEGIN {printf "%.1f", (w / t) * 100}'),"
|
|
185
|
+
echo " \"total_learnings\": $total_learnings,"
|
|
186
|
+
echo " \"avg_file_lines\": $avg_lines,"
|
|
187
|
+
echo " \"staleness\": {"
|
|
188
|
+
echo " \"recent_7d\": $recent_updates,"
|
|
189
|
+
echo " \"stale_8_30d\": $stale_7d,"
|
|
190
|
+
echo " \"stale_31_90d\": $stale_30d,"
|
|
191
|
+
echo " \"stale_90d_plus\": $stale_90d"
|
|
192
|
+
echo " }"
|
|
193
|
+
echo "}"
|
|
194
|
+
;;
|
|
195
|
+
csv)
|
|
196
|
+
local csv_rate
|
|
197
|
+
csv_rate=$(awk -v w="$with_learnings" -v t="$total" 'BEGIN {printf "%.1f", (w / t) * 100}')
|
|
198
|
+
echo "timestamp,total_experts,with_learnings,self_improve_rate,total_learnings,avg_file_lines,recent_7d,stale_8_30d,stale_31_90d,stale_90d_plus"
|
|
199
|
+
echo "$timestamp,$total,$with_learnings,$csv_rate,$total_learnings,$avg_lines,$recent_updates,$stale_7d,$stale_30d,$stale_90d"
|
|
200
|
+
;;
|
|
201
|
+
dashboard)
|
|
202
|
+
echo ""
|
|
203
|
+
echo -e "${BLUE}╔════════════════════════════════════════════════╗${NC}"
|
|
204
|
+
echo -e "${BLUE}║ Agent Expert Metrics Dashboard ║${NC}"
|
|
205
|
+
echo -e "${BLUE}╚════════════════════════════════════════════════╝${NC}"
|
|
206
|
+
echo ""
|
|
207
|
+
echo -e "${GREEN}Summary${NC}"
|
|
208
|
+
echo "─────────────────────────────────────────────────"
|
|
209
|
+
printf "%-30s %s\n" "Total Experts:" "$total"
|
|
210
|
+
local pct
|
|
211
|
+
pct=$(awk -v w="$with_learnings" -v t="$total" 'BEGIN {printf "%.0f", (w / t) * 100}')
|
|
212
|
+
printf "%-30s %s (%s%%)\n" "With Learnings:" "$with_learnings" "$pct"
|
|
213
|
+
printf "%-30s %s\n" "Total Learnings Recorded:" "$total_learnings"
|
|
214
|
+
printf "%-30s %s lines\n" "Avg File Size:" "$avg_lines"
|
|
215
|
+
echo ""
|
|
216
|
+
echo -e "${YELLOW}Staleness Distribution${NC}"
|
|
217
|
+
echo "─────────────────────────────────────────────────"
|
|
218
|
+
printf "%-30s %s\n" "Updated in last 7 days:" "$recent_updates"
|
|
219
|
+
printf "%-30s %s\n" "8-30 days old:" "$stale_7d"
|
|
220
|
+
printf "%-30s %s\n" "31-90 days old:" "$stale_30d"
|
|
221
|
+
printf "%-30s %s\n" "90+ days old:" "$stale_90d"
|
|
222
|
+
echo ""
|
|
223
|
+
echo -e "${BLUE}Self-Improve Health${NC}"
|
|
224
|
+
echo "─────────────────────────────────────────────────"
|
|
225
|
+
if [ "$with_learnings" -eq 0 ]; then
|
|
226
|
+
echo "No agents have self-improved yet"
|
|
227
|
+
echo " Run agents and they will update expertise.yaml"
|
|
228
|
+
elif [ "$with_learnings" -lt $((total / 2)) ]; then
|
|
229
|
+
local rate
|
|
230
|
+
rate=$(awk -v w="$with_learnings" -v t="$total" 'BEGIN {printf "%.0f", (w / t) * 100}')
|
|
231
|
+
echo "$with_learnings/$total agents have started learning"
|
|
232
|
+
echo " Self-improve rate: ${rate}%"
|
|
233
|
+
else
|
|
234
|
+
local avg
|
|
235
|
+
avg=$(awk -v l="$total_learnings" -v t="$total" 'BEGIN {printf "%.1f", l / t}')
|
|
236
|
+
echo "Good adoption: $with_learnings/$total agents learning"
|
|
237
|
+
echo " Average learnings per expert: $avg"
|
|
238
|
+
fi
|
|
239
|
+
echo ""
|
|
240
|
+
echo "Generated: $timestamp"
|
|
241
|
+
;;
|
|
242
|
+
esac
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
# Main
|
|
246
|
+
main() {
|
|
247
|
+
case "$1" in
|
|
248
|
+
--help|-h)
|
|
249
|
+
show_help
|
|
250
|
+
exit 0
|
|
251
|
+
;;
|
|
252
|
+
--json)
|
|
253
|
+
collect_metrics "json"
|
|
254
|
+
;;
|
|
255
|
+
--csv)
|
|
256
|
+
collect_metrics "csv"
|
|
257
|
+
;;
|
|
258
|
+
*)
|
|
259
|
+
collect_metrics "dashboard"
|
|
260
|
+
;;
|
|
261
|
+
esac
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
main "$@"
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
###############################################################################
|
|
4
|
+
# AgileFlow Content Generation Script
|
|
5
|
+
#
|
|
6
|
+
# Regenerates all dynamic content in AgileFlow plugin files.
|
|
7
|
+
# Run this after:
|
|
8
|
+
# - Adding/removing/renaming commands
|
|
9
|
+
# - Adding/removing/renaming agents
|
|
10
|
+
# - Adding/removing/renaming skills
|
|
11
|
+
# - Changing command/agent/skill descriptions or metadata
|
|
12
|
+
#
|
|
13
|
+
# Usage:
|
|
14
|
+
# bash scripts/generate-all.sh
|
|
15
|
+
# npm run generate (if added to package.json)
|
|
16
|
+
###############################################################################
|
|
17
|
+
|
|
18
|
+
set -e # Exit on error
|
|
19
|
+
|
|
20
|
+
# Get script directory
|
|
21
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
22
|
+
GENERATORS_DIR="$SCRIPT_DIR/generators"
|
|
23
|
+
|
|
24
|
+
# Colors for output
|
|
25
|
+
GREEN='\033[0;32m'
|
|
26
|
+
BLUE='\033[0;34m'
|
|
27
|
+
YELLOW='\033[1;33m'
|
|
28
|
+
RED='\033[0;31m'
|
|
29
|
+
NC='\033[0m' # No Color
|
|
30
|
+
|
|
31
|
+
echo -e "${BLUE}============================================================${NC}"
|
|
32
|
+
echo -e "${BLUE} AgileFlow Content Generation System${NC}"
|
|
33
|
+
echo -e "${BLUE}============================================================${NC}"
|
|
34
|
+
echo ""
|
|
35
|
+
|
|
36
|
+
# Check if Node.js is available
|
|
37
|
+
if ! command -v node &> /dev/null; then
|
|
38
|
+
echo -e "${RED}❌ Error: Node.js is not installed or not in PATH${NC}"
|
|
39
|
+
exit 1
|
|
40
|
+
fi
|
|
41
|
+
|
|
42
|
+
# Check if generators directory exists
|
|
43
|
+
if [ ! -d "$GENERATORS_DIR" ]; then
|
|
44
|
+
echo -e "${RED}❌ Error: Generators directory not found: $GENERATORS_DIR${NC}"
|
|
45
|
+
exit 1
|
|
46
|
+
fi
|
|
47
|
+
|
|
48
|
+
# Run the orchestrator
|
|
49
|
+
echo -e "${YELLOW}Running content generators...${NC}"
|
|
50
|
+
echo ""
|
|
51
|
+
|
|
52
|
+
cd "$GENERATORS_DIR"
|
|
53
|
+
node index.js
|
|
54
|
+
|
|
55
|
+
EXIT_CODE=$?
|
|
56
|
+
|
|
57
|
+
if [ $EXIT_CODE -eq 0 ]; then
|
|
58
|
+
echo ""
|
|
59
|
+
echo -e "${GREEN}============================================================${NC}"
|
|
60
|
+
echo -e "${GREEN}✅ Content generation completed successfully!${NC}"
|
|
61
|
+
echo -e "${GREEN}============================================================${NC}"
|
|
62
|
+
echo ""
|
|
63
|
+
echo -e "${YELLOW}Next steps:${NC}"
|
|
64
|
+
echo "1. Review the changes: git diff"
|
|
65
|
+
echo "2. Test the plugin to ensure everything works"
|
|
66
|
+
echo "3. Commit the generated content: git add -A && git commit -m 'chore: regenerate plugin content'"
|
|
67
|
+
echo ""
|
|
68
|
+
else
|
|
69
|
+
echo ""
|
|
70
|
+
echo -e "${RED}============================================================${NC}"
|
|
71
|
+
echo -e "${RED}❌ Content generation failed${NC}"
|
|
72
|
+
echo -e "${RED}============================================================${NC}"
|
|
73
|
+
echo ""
|
|
74
|
+
echo -e "${YELLOW}Please check the errors above and fix any issues.${NC}"
|
|
75
|
+
echo ""
|
|
76
|
+
exit 1
|
|
77
|
+
fi
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Agent Registry Scanner
|
|
5
|
+
*
|
|
6
|
+
* Scans agents/ directory and extracts metadata from frontmatter.
|
|
7
|
+
* Returns structured agent registry for use in generators.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require('fs');
|
|
11
|
+
const path = require('path');
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Extract YAML frontmatter from markdown file
|
|
15
|
+
* Handles multi-line values like tools arrays
|
|
16
|
+
* @param {string} filePath - Path to markdown file
|
|
17
|
+
* @returns {object} Frontmatter object
|
|
18
|
+
*/
|
|
19
|
+
function extractFrontmatter(filePath) {
|
|
20
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
21
|
+
const frontmatterMatch = content.match(/^---\n([\s\S]*?)\n---/);
|
|
22
|
+
|
|
23
|
+
if (!frontmatterMatch) {
|
|
24
|
+
return {};
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const frontmatter = {};
|
|
28
|
+
const lines = frontmatterMatch[1].split('\n');
|
|
29
|
+
let currentKey = null;
|
|
30
|
+
let currentArray = null;
|
|
31
|
+
|
|
32
|
+
for (const line of lines) {
|
|
33
|
+
// Handle array items (lines starting with -)
|
|
34
|
+
if (line.trim().startsWith('-')) {
|
|
35
|
+
if (currentArray) {
|
|
36
|
+
currentArray.push(line.trim().substring(1).trim());
|
|
37
|
+
}
|
|
38
|
+
continue;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Handle key-value pairs
|
|
42
|
+
const match = line.match(/^(\w+):\s*(.*)$/);
|
|
43
|
+
if (match) {
|
|
44
|
+
const [, key, value] = match;
|
|
45
|
+
currentKey = key;
|
|
46
|
+
|
|
47
|
+
// If value is empty, it's likely an array
|
|
48
|
+
if (!value) {
|
|
49
|
+
currentArray = [];
|
|
50
|
+
frontmatter[key] = currentArray;
|
|
51
|
+
} else {
|
|
52
|
+
// Remove quotes if present
|
|
53
|
+
frontmatter[key] = value.replace(/^["']|["']$/g, '');
|
|
54
|
+
currentArray = null;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return frontmatter;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Categorize agent based on its role
|
|
64
|
+
* @param {string} name - Agent name
|
|
65
|
+
* @param {string} description - Agent description
|
|
66
|
+
* @returns {string} Category name
|
|
67
|
+
*/
|
|
68
|
+
function categorizeAgent(name, description) {
|
|
69
|
+
const categories = {
|
|
70
|
+
'Core Development': ['ui', 'api', 'database', 'devops', 'ci'],
|
|
71
|
+
'Specialized Development': ['mobile', 'integrations', 'datamigration'],
|
|
72
|
+
'Quality & Testing': ['qa', 'testing', 'security', 'accessibility'],
|
|
73
|
+
'Architecture & Design': ['design', 'adr-writer', 'epic-planner', 'product'],
|
|
74
|
+
'Maintenance & Optimization': ['refactor', 'performance', 'monitoring'],
|
|
75
|
+
'Documentation & Knowledge': ['documentation', 'readme-updater', 'research'],
|
|
76
|
+
'Compliance & Governance': ['compliance', 'analytics'],
|
|
77
|
+
'Mentorship': ['mentor']
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
for (const [category, keywords] of Object.entries(categories)) {
|
|
81
|
+
if (keywords.some(kw => name.includes(kw))) {
|
|
82
|
+
return category;
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
return 'Other';
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Scan agents directory and build registry
|
|
91
|
+
* @param {string} agentsDir - Path to agents directory
|
|
92
|
+
* @returns {Array} Array of agent metadata objects
|
|
93
|
+
*/
|
|
94
|
+
function scanAgents(agentsDir) {
|
|
95
|
+
const agents = [];
|
|
96
|
+
const files = fs.readdirSync(agentsDir);
|
|
97
|
+
|
|
98
|
+
for (const file of files) {
|
|
99
|
+
if (!file.endsWith('.md')) continue;
|
|
100
|
+
|
|
101
|
+
const filePath = path.join(agentsDir, file);
|
|
102
|
+
const frontmatter = extractFrontmatter(filePath);
|
|
103
|
+
const name = file.replace('.md', '');
|
|
104
|
+
|
|
105
|
+
// Parse tools array if it exists
|
|
106
|
+
let tools = [];
|
|
107
|
+
if (frontmatter.tools) {
|
|
108
|
+
if (Array.isArray(frontmatter.tools)) {
|
|
109
|
+
tools = frontmatter.tools;
|
|
110
|
+
} else if (typeof frontmatter.tools === 'string') {
|
|
111
|
+
tools = frontmatter.tools.split(',').map(t => t.trim());
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
agents.push({
|
|
116
|
+
name,
|
|
117
|
+
file,
|
|
118
|
+
path: filePath,
|
|
119
|
+
displayName: frontmatter.name || name,
|
|
120
|
+
description: frontmatter.description || '',
|
|
121
|
+
tools,
|
|
122
|
+
model: frontmatter.model || 'haiku',
|
|
123
|
+
color: frontmatter.color || 'blue',
|
|
124
|
+
category: categorizeAgent(name, frontmatter.description || '')
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Sort by category, then by name
|
|
129
|
+
agents.sort((a, b) => {
|
|
130
|
+
if (a.category !== b.category) {
|
|
131
|
+
return a.category.localeCompare(b.category);
|
|
132
|
+
}
|
|
133
|
+
return a.name.localeCompare(b.name);
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
return agents;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Main function
|
|
141
|
+
*/
|
|
142
|
+
function main() {
|
|
143
|
+
const rootDir = path.resolve(__dirname, '../..');
|
|
144
|
+
const agentsDir = path.join(rootDir, 'src/core/agents');
|
|
145
|
+
|
|
146
|
+
if (!fs.existsSync(agentsDir)) {
|
|
147
|
+
console.error(`Agents directory not found: ${agentsDir}`);
|
|
148
|
+
process.exit(1);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
const agents = scanAgents(agentsDir);
|
|
152
|
+
|
|
153
|
+
// If called directly, output JSON
|
|
154
|
+
if (require.main === module) {
|
|
155
|
+
console.log(JSON.stringify(agents, null, 2));
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
return agents;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Export for use in other scripts
|
|
162
|
+
module.exports = { scanAgents, extractFrontmatter, categorizeAgent };
|
|
163
|
+
|
|
164
|
+
// Run if called directly
|
|
165
|
+
if (require.main === module) {
|
|
166
|
+
main();
|
|
167
|
+
}
|