agileflow 2.44.0 → 2.45.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,293 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * obtain-context.js
4
+ *
5
+ * Gathers all project context in a single execution for any AgileFlow command or agent.
6
+ * Optionally registers the command/agent for PreCompact context preservation.
7
+ * Outputs structured summary to reduce tool calls and startup time.
8
+ *
9
+ * Usage:
10
+ * node scripts/obtain-context.js # Just gather context
11
+ * node scripts/obtain-context.js babysit # Gather + register 'babysit'
12
+ * node scripts/obtain-context.js mentor # Gather + register 'mentor'
13
+ */
14
+
15
+ const fs = require('fs');
16
+ const path = require('path');
17
+ const { execSync } = require('child_process');
18
+
19
+ // Optional: Register command for PreCompact context preservation
20
+ const commandName = process.argv[2];
21
+ if (commandName) {
22
+ const sessionStatePath = 'docs/09-agents/session-state.json';
23
+ if (fs.existsSync(sessionStatePath)) {
24
+ try {
25
+ const state = JSON.parse(fs.readFileSync(sessionStatePath, 'utf8'));
26
+ state.active_command = { name: commandName, activated_at: new Date().toISOString(), state: {} };
27
+ fs.writeFileSync(sessionStatePath, JSON.stringify(state, null, 2) + '\n');
28
+ } catch (e) {
29
+ // Silently continue if session state can't be updated
30
+ }
31
+ }
32
+ }
33
+
34
+ // ANSI colors
35
+ const C = {
36
+ reset: '\x1b[0m',
37
+ dim: '\x1b[2m',
38
+ bold: '\x1b[1m',
39
+ cyan: '\x1b[36m',
40
+ yellow: '\x1b[33m',
41
+ green: '\x1b[32m',
42
+ red: '\x1b[31m',
43
+ magenta: '\x1b[35m',
44
+ };
45
+
46
+ function safeRead(filePath) {
47
+ try {
48
+ return fs.readFileSync(filePath, 'utf8');
49
+ } catch {
50
+ return null;
51
+ }
52
+ }
53
+
54
+ function safeReadJSON(filePath) {
55
+ try {
56
+ return JSON.parse(fs.readFileSync(filePath, 'utf8'));
57
+ } catch {
58
+ return null;
59
+ }
60
+ }
61
+
62
+ function safeLs(dirPath) {
63
+ try {
64
+ return fs.readdirSync(dirPath);
65
+ } catch {
66
+ return [];
67
+ }
68
+ }
69
+
70
+ function safeExec(cmd) {
71
+ try {
72
+ return execSync(cmd, { encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] }).trim();
73
+ } catch {
74
+ return null;
75
+ }
76
+ }
77
+
78
+ function section(title) {
79
+ console.log(`\n${C.cyan}${C.bold}═══ ${title} ═══${C.reset}`);
80
+ }
81
+
82
+ function subsection(title) {
83
+ console.log(`${C.dim}───${C.reset} ${title}`);
84
+ }
85
+
86
+ // ============================================
87
+ // MAIN CONTEXT GATHERING
88
+ // ============================================
89
+
90
+ const title = commandName ? `AgileFlow Context [${commandName}]` : 'AgileFlow Context';
91
+ console.log(`${C.magenta}${C.bold}${title}${C.reset}`);
92
+ console.log(`${C.dim}Generated: ${new Date().toISOString()}${C.reset}`);
93
+
94
+ // 1. GIT STATUS
95
+ section('Git Status');
96
+ const branch = safeExec('git branch --show-current') || 'unknown';
97
+ const status = safeExec('git status --short') || '';
98
+ const statusLines = status.split('\n').filter(Boolean);
99
+ const lastCommit = safeExec('git log -1 --format="%h %s"') || 'no commits';
100
+
101
+ console.log(`Branch: ${C.green}${branch}${C.reset}`);
102
+ console.log(`Last commit: ${C.dim}${lastCommit}${C.reset}`);
103
+ if (statusLines.length > 0) {
104
+ console.log(`Uncommitted: ${C.yellow}${statusLines.length} file(s)${C.reset}`);
105
+ statusLines.slice(0, 10).forEach(line => console.log(` ${C.dim}${line}${C.reset}`));
106
+ if (statusLines.length > 10) console.log(` ${C.dim}... and ${statusLines.length - 10} more${C.reset}`);
107
+ } else {
108
+ console.log(`Uncommitted: ${C.green}clean${C.reset}`);
109
+ }
110
+
111
+ // 2. STATUS.JSON - Stories & Epics
112
+ section('Stories & Epics');
113
+ const statusJson = safeReadJSON('docs/09-agents/status.json');
114
+ if (statusJson) {
115
+ // Epics summary
116
+ const epics = statusJson.epics || {};
117
+ const epicList = Object.entries(epics);
118
+ if (epicList.length > 0) {
119
+ subsection('Epics');
120
+ epicList.forEach(([id, epic]) => {
121
+ const statusColor = epic.status === 'complete' ? C.green : epic.status === 'active' ? C.yellow : C.dim;
122
+ console.log(` ${id}: ${epic.title} ${statusColor}[${epic.status}]${C.reset}`);
123
+ });
124
+ }
125
+
126
+ // Stories summary by status
127
+ const stories = statusJson.stories || {};
128
+ const storyList = Object.entries(stories);
129
+ const byStatus = {};
130
+ storyList.forEach(([id, story]) => {
131
+ const s = story.status || 'unknown';
132
+ if (!byStatus[s]) byStatus[s] = [];
133
+ byStatus[s].push({ id, ...story });
134
+ });
135
+
136
+ // Priority order for display
137
+ const statusOrder = ['in-progress', 'ready', 'blocked', 'draft', 'in-review', 'done'];
138
+
139
+ subsection('Stories by Status');
140
+ statusOrder.forEach(status => {
141
+ if (byStatus[status] && byStatus[status].length > 0) {
142
+ const color = status === 'in-progress' ? C.yellow :
143
+ status === 'ready' ? C.green :
144
+ status === 'blocked' ? C.red :
145
+ status === 'done' ? C.dim : C.reset;
146
+ console.log(` ${color}${status}${C.reset}: ${byStatus[status].length}`);
147
+ byStatus[status].slice(0, 5).forEach(story => {
148
+ console.log(` ${C.dim}${story.id}: ${story.title}${C.reset}`);
149
+ });
150
+ if (byStatus[status].length > 5) {
151
+ console.log(` ${C.dim}... and ${byStatus[status].length - 5} more${C.reset}`);
152
+ }
153
+ }
154
+ });
155
+
156
+ // Show READY stories prominently (these are actionable)
157
+ if (byStatus['ready'] && byStatus['ready'].length > 0) {
158
+ subsection(`${C.green}⭐ Ready to Implement${C.reset}`);
159
+ byStatus['ready'].forEach(story => {
160
+ console.log(` ${story.id}: ${story.title} (${story.epic || 'no epic'})`);
161
+ });
162
+ }
163
+ } else {
164
+ console.log(`${C.dim}No status.json found${C.reset}`);
165
+ }
166
+
167
+ // 3. SESSION STATE
168
+ section('Session State');
169
+ const sessionState = safeReadJSON('docs/09-agents/session-state.json');
170
+ if (sessionState) {
171
+ const current = sessionState.current_session;
172
+ if (current && current.started_at) {
173
+ const started = new Date(current.started_at);
174
+ const duration = Math.round((Date.now() - started.getTime()) / 60000);
175
+ console.log(`Active session: ${C.green}${duration} min${C.reset}`);
176
+ if (current.current_story) {
177
+ console.log(`Working on: ${C.yellow}${current.current_story}${C.reset}`);
178
+ }
179
+ } else {
180
+ console.log(`${C.dim}No active session${C.reset}`);
181
+ }
182
+
183
+ const last = sessionState.last_session;
184
+ if (last && last.ended_at) {
185
+ console.log(`Last session: ${C.dim}${last.ended_at} (${last.duration_minutes || '?'} min)${C.reset}`);
186
+ if (last.summary) console.log(` Summary: ${C.dim}${last.summary}${C.reset}`);
187
+ }
188
+
189
+ // Active command (for context preservation)
190
+ if (sessionState.active_command) {
191
+ console.log(`Active command: ${C.cyan}${sessionState.active_command.name}${C.reset}`);
192
+ }
193
+ } else {
194
+ console.log(`${C.dim}No session-state.json found${C.reset}`);
195
+ }
196
+
197
+ // 4. DOCS STRUCTURE
198
+ section('Documentation');
199
+ const docsDir = 'docs';
200
+ const docFolders = safeLs(docsDir).filter(f => {
201
+ try {
202
+ return fs.statSync(path.join(docsDir, f)).isDirectory();
203
+ } catch {
204
+ return false;
205
+ }
206
+ });
207
+
208
+ if (docFolders.length > 0) {
209
+ docFolders.forEach(folder => {
210
+ const folderPath = path.join(docsDir, folder);
211
+ const files = safeLs(folderPath);
212
+ const mdFiles = files.filter(f => f.endsWith('.md'));
213
+ const jsonFiles = files.filter(f => f.endsWith('.json') || f.endsWith('.jsonl'));
214
+
215
+ let info = [];
216
+ if (mdFiles.length > 0) info.push(`${mdFiles.length} md`);
217
+ if (jsonFiles.length > 0) info.push(`${jsonFiles.length} json`);
218
+
219
+ console.log(` ${C.dim}${folder}/${C.reset} ${info.length > 0 ? `(${info.join(', ')})` : ''}`);
220
+ });
221
+ }
222
+
223
+ // 5. RESEARCH NOTES
224
+ section('Research Notes');
225
+ const researchDir = 'docs/10-research';
226
+ const researchFiles = safeLs(researchDir).filter(f => f.endsWith('.md') && f !== 'README.md');
227
+ if (researchFiles.length > 0) {
228
+ // Sort by date (filename starts with YYYYMMDD)
229
+ researchFiles.sort().reverse();
230
+ researchFiles.slice(0, 5).forEach(file => {
231
+ console.log(` ${C.dim}${file}${C.reset}`);
232
+ });
233
+ if (researchFiles.length > 5) {
234
+ console.log(` ${C.dim}... and ${researchFiles.length - 5} more${C.reset}`);
235
+ }
236
+ } else {
237
+ console.log(`${C.dim}No research notes${C.reset}`);
238
+ }
239
+
240
+ // 6. BUS MESSAGES (last 5)
241
+ section('Recent Agent Messages');
242
+ const busPath = 'docs/09-agents/bus/log.jsonl';
243
+ const busContent = safeRead(busPath);
244
+ if (busContent) {
245
+ const lines = busContent.trim().split('\n').filter(Boolean);
246
+ const recent = lines.slice(-5);
247
+ if (recent.length > 0) {
248
+ recent.forEach(line => {
249
+ try {
250
+ const msg = JSON.parse(line);
251
+ const time = msg.timestamp ? new Date(msg.timestamp).toLocaleTimeString() : '?';
252
+ console.log(` ${C.dim}[${time}]${C.reset} ${msg.from || '?'}: ${msg.type || msg.message || '?'}`);
253
+ } catch {
254
+ console.log(` ${C.dim}${line.substring(0, 80)}...${C.reset}`);
255
+ }
256
+ });
257
+ } else {
258
+ console.log(`${C.dim}No messages${C.reset}`);
259
+ }
260
+ } else {
261
+ console.log(`${C.dim}No bus log found${C.reset}`);
262
+ }
263
+
264
+ // 7. KEY FILES PRESENCE
265
+ section('Key Files');
266
+ const keyFiles = [
267
+ { path: 'CLAUDE.md', label: 'CLAUDE.md (project instructions)' },
268
+ { path: 'README.md', label: 'README.md (project overview)' },
269
+ { path: 'docs/08-project/roadmap.md', label: 'Roadmap' },
270
+ { path: 'docs/02-practices/README.md', label: 'Practices index' },
271
+ { path: '.claude/settings.json', label: 'Claude settings' },
272
+ ];
273
+
274
+ keyFiles.forEach(({ path: filePath, label }) => {
275
+ const exists = fs.existsSync(filePath);
276
+ const icon = exists ? `${C.green}✓${C.reset}` : `${C.dim}○${C.reset}`;
277
+ console.log(` ${icon} ${label}`);
278
+ });
279
+
280
+ // 8. EPICS FOLDER
281
+ section('Epic Files');
282
+ const epicFiles = safeLs('docs/05-epics').filter(f => f.endsWith('.md') && f !== 'README.md');
283
+ if (epicFiles.length > 0) {
284
+ epicFiles.forEach(file => {
285
+ console.log(` ${C.dim}${file}${C.reset}`);
286
+ });
287
+ } else {
288
+ console.log(`${C.dim}No epic files${C.reset}`);
289
+ }
290
+
291
+ // FOOTER
292
+ console.log(`\n${C.dim}─────────────────────────────────────────${C.reset}`);
293
+ console.log(`${C.dim}Context gathered in single execution. Ready for task selection.${C.reset}\n`);
@@ -0,0 +1,123 @@
1
+ #!/bin/bash
2
+ #
3
+ # AgileFlow PreCompact Hook
4
+ # Outputs critical context that should survive conversation compaction.
5
+ #
6
+
7
+ # Get current version from package.json
8
+ VERSION=$(node -p "require('./package.json').version" 2>/dev/null || echo "unknown")
9
+
10
+ # Get current git branch
11
+ BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown")
12
+
13
+ # Get current story from status.json
14
+ CURRENT_STORY=""
15
+ WIP_COUNT=0
16
+ if [ -f "docs/09-agents/status.json" ]; then
17
+ CURRENT_STORY=$(node -p "
18
+ const s = require('./docs/09-agents/status.json');
19
+ const stories = Object.entries(s.stories || {})
20
+ .filter(([,v]) => v.status === 'in_progress')
21
+ .map(([k,v]) => k + ': ' + v.title)
22
+ .join(', ');
23
+ stories || 'None in progress';
24
+ " 2>/dev/null || echo "Unable to read")
25
+
26
+ WIP_COUNT=$(node -p "
27
+ const s = require('./docs/09-agents/status.json');
28
+ Object.values(s.stories || {}).filter(v => v.status === 'in_progress').length;
29
+ " 2>/dev/null || echo "0")
30
+ fi
31
+
32
+ # Get practices list
33
+ PRACTICES=""
34
+ if [ -d "docs/02-practices" ]; then
35
+ PRACTICES=$(ls docs/02-practices/*.md 2>/dev/null | head -8 | xargs -I {} basename {} .md | tr '\n' ',' | sed 's/,$//')
36
+ fi
37
+
38
+ # Get active epics
39
+ EPICS=""
40
+ if [ -d "docs/05-epics" ]; then
41
+ EPICS=$(ls docs/05-epics/ 2>/dev/null | head -5 | tr '\n' ',' | sed 's/,$//')
42
+ fi
43
+
44
+ # Detect active commands and extract their Compact Summaries
45
+ COMMAND_SUMMARIES=""
46
+ if [ -f "docs/09-agents/session-state.json" ]; then
47
+ ACTIVE_COMMANDS=$(node -p "
48
+ const s = require('./docs/09-agents/session-state.json');
49
+ (s.active_commands || []).map(c => c.name).join(' ');
50
+ " 2>/dev/null || echo "")
51
+
52
+ for ACTIVE_COMMAND in $ACTIVE_COMMANDS; do
53
+ [ -z "$ACTIVE_COMMAND" ] && continue
54
+
55
+ COMMAND_FILE=""
56
+ if [ -f "packages/cli/src/core/commands/${ACTIVE_COMMAND}.md" ]; then
57
+ COMMAND_FILE="packages/cli/src/core/commands/${ACTIVE_COMMAND}.md"
58
+ elif [ -f ".agileflow/commands/${ACTIVE_COMMAND}.md" ]; then
59
+ COMMAND_FILE=".agileflow/commands/${ACTIVE_COMMAND}.md"
60
+ elif [ -f ".claude/commands/agileflow/${ACTIVE_COMMAND}.md" ]; then
61
+ COMMAND_FILE=".claude/commands/agileflow/${ACTIVE_COMMAND}.md"
62
+ fi
63
+
64
+ if [ ! -z "$COMMAND_FILE" ]; then
65
+ SUMMARY=$(node -e "
66
+ const fs = require('fs');
67
+ const content = fs.readFileSync('$COMMAND_FILE', 'utf8');
68
+ const match = content.match(/<!-- COMPACT_SUMMARY_START[\\s\\S]*?-->([\\s\\S]*?)<!-- COMPACT_SUMMARY_END -->/);
69
+ if (match) {
70
+ console.log('## ACTIVE COMMAND: /agileflow:${ACTIVE_COMMAND}');
71
+ console.log('');
72
+ console.log(match[1].trim());
73
+ }
74
+ " 2>/dev/null || echo "")
75
+
76
+ if [ ! -z "$SUMMARY" ]; then
77
+ COMMAND_SUMMARIES="${COMMAND_SUMMARIES}
78
+
79
+ ${SUMMARY}"
80
+ fi
81
+ fi
82
+ done
83
+ fi
84
+
85
+ # Output context
86
+ cat << EOF
87
+ AGILEFLOW PROJECT CONTEXT (preserve during compact):
88
+
89
+ ## Project Status
90
+ - Project: AgileFlow v${VERSION}
91
+ - Branch: ${BRANCH}
92
+ - Active Stories: ${CURRENT_STORY}
93
+ - WIP Count: ${WIP_COUNT}
94
+
95
+ ## Key Files to Check After Compact
96
+ - CLAUDE.md - Project system prompt with conventions
97
+ - README.md - Project overview and setup
98
+ - docs/09-agents/status.json - Story statuses and assignments
99
+ - docs/02-practices/ - Codebase practices (${PRACTICES:-check folder})
100
+
101
+ ## Active Epics
102
+ ${EPICS:-Check docs/05-epics/ for epic files}
103
+
104
+ ## Key Conventions (from CLAUDE.md)
105
+ $(grep -A 15 "## Key\|## Critical\|## Important\|CRITICAL:" CLAUDE.md 2>/dev/null | head -20 || echo "- Read CLAUDE.md for project conventions")
106
+
107
+ ## Recent Agent Activity
108
+ $(tail -3 docs/09-agents/bus/log.jsonl 2>/dev/null | head -3 || echo "")
109
+ EOF
110
+
111
+ # Output active command summaries
112
+ if [ ! -z "$COMMAND_SUMMARIES" ]; then
113
+ echo "$COMMAND_SUMMARIES"
114
+ fi
115
+
116
+ cat << EOF
117
+
118
+ ## Post-Compact Actions
119
+ 1. Re-read CLAUDE.md if unsure about conventions
120
+ 2. Check status.json for current story state
121
+ 3. Review docs/02-practices/ for implementation patterns
122
+ 4. Check git log for recent changes
123
+ EOF
@@ -0,0 +1,259 @@
1
+ #!/bin/bash
2
+ #
3
+ # validate-expertise.sh - Validate Agent Expert expertise.yaml files
4
+ #
5
+ # Purpose: Ensure expertise files remain accurate and useful over time
6
+ #
7
+ # Checks performed:
8
+ # 1. Schema validation (required fields: domain, last_updated, version)
9
+ # 2. Staleness check (last_updated > 30 days old)
10
+ # 3. File size check (warn if > 200 lines)
11
+ # 4. Learnings check (warn if empty - never self-improved)
12
+ #
13
+ # Usage:
14
+ # ./scripts/validate-expertise.sh # Validate all expertise files
15
+ # ./scripts/validate-expertise.sh database # Validate specific domain
16
+ # ./scripts/validate-expertise.sh --help # Show help
17
+ #
18
+ # Exit codes:
19
+ # 0 - All checks passed
20
+ # 1 - One or more checks failed (warnings don't cause failure)
21
+ #
22
+
23
+ set -e
24
+
25
+ # Colors
26
+ RED='\033[0;31m'
27
+ YELLOW='\033[1;33m'
28
+ GREEN='\033[0;32m'
29
+ BLUE='\033[0;34m'
30
+ NC='\033[0m' # No Color
31
+
32
+ # Configuration
33
+ EXPERTS_DIR="packages/cli/src/core/experts"
34
+ STALE_THRESHOLD_DAYS=30
35
+ MAX_LINES=200
36
+
37
+ # Counters
38
+ TOTAL=0
39
+ PASSED=0
40
+ WARNINGS=0
41
+ FAILED=0
42
+
43
+ # Help message
44
+ show_help() {
45
+ echo "Usage: $0 [domain]"
46
+ echo ""
47
+ echo "Validate Agent Expert expertise.yaml files"
48
+ echo ""
49
+ echo "Options:"
50
+ echo " domain Validate only the specified domain (e.g., 'database', 'testing')"
51
+ echo " --help Show this help message"
52
+ echo ""
53
+ echo "Checks performed:"
54
+ echo " - Schema validation (domain, last_updated, version fields)"
55
+ echo " - Staleness check (last_updated > $STALE_THRESHOLD_DAYS days)"
56
+ echo " - File size check (> $MAX_LINES lines)"
57
+ echo " - Learnings check (empty learnings array)"
58
+ echo ""
59
+ echo "Examples:"
60
+ echo " $0 # Validate all expertise files"
61
+ echo " $0 database # Validate only database domain"
62
+ echo " $0 --help # Show this help"
63
+ }
64
+
65
+ # Check if yq is available (for better YAML parsing)
66
+ has_yq() {
67
+ command -v yq &> /dev/null
68
+ }
69
+
70
+ # Extract YAML field using grep (fallback if yq not available)
71
+ get_yaml_field() {
72
+ local file="$1"
73
+ local field="$2"
74
+ grep "^${field}:" "$file" 2>/dev/null | sed "s/^${field}:[[:space:]]*//" | tr -d '"' || echo ""
75
+ }
76
+
77
+ # Check if learnings is empty
78
+ learnings_empty() {
79
+ local file="$1"
80
+ # Check for "learnings: []" or learnings with only comments
81
+ if grep -q "^learnings: \[\]" "$file" 2>/dev/null; then
82
+ return 0 # Empty
83
+ fi
84
+ # Check if there's actual content after learnings:
85
+ local after_learnings
86
+ after_learnings=$(sed -n '/^learnings:/,/^[a-z]/p' "$file" | grep -v "^#" | grep -v "^learnings:" | grep -v "^$" | head -1)
87
+ if [ -z "$after_learnings" ]; then
88
+ return 0 # Empty
89
+ fi
90
+ return 1 # Has content
91
+ }
92
+
93
+ # Get file line count
94
+ get_line_count() {
95
+ wc -l < "$1" | tr -d ' '
96
+ }
97
+
98
+ # Calculate days since date
99
+ days_since() {
100
+ local date_str="$1"
101
+ local date_epoch
102
+ local now_epoch
103
+
104
+ # Handle different date formats
105
+ if [[ "$date_str" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]; then
106
+ date_epoch=$(date -d "$date_str" +%s 2>/dev/null || date -j -f "%Y-%m-%d" "$date_str" +%s 2>/dev/null)
107
+ else
108
+ echo "999" # Invalid date
109
+ return
110
+ fi
111
+
112
+ now_epoch=$(date +%s)
113
+ echo $(( (now_epoch - date_epoch) / 86400 ))
114
+ }
115
+
116
+ # Validate a single expertise file
117
+ validate_expertise() {
118
+ local domain="$1"
119
+ local file="$EXPERTS_DIR/$domain/expertise.yaml"
120
+ local status="PASS"
121
+ local issues=()
122
+
123
+ TOTAL=$((TOTAL + 1))
124
+
125
+ # Check file exists
126
+ if [ ! -f "$file" ]; then
127
+ echo -e "${RED}FAIL${NC} $domain - File not found: $file"
128
+ FAILED=$((FAILED + 1))
129
+ return 1
130
+ fi
131
+
132
+ # Schema validation
133
+ local domain_field version last_updated
134
+ domain_field=$(get_yaml_field "$file" "domain")
135
+ version=$(get_yaml_field "$file" "version")
136
+ last_updated=$(get_yaml_field "$file" "last_updated")
137
+
138
+ if [ -z "$domain_field" ]; then
139
+ issues+=("missing 'domain' field")
140
+ status="FAIL"
141
+ fi
142
+
143
+ if [ -z "$version" ]; then
144
+ issues+=("missing 'version' field")
145
+ status="FAIL"
146
+ fi
147
+
148
+ if [ -z "$last_updated" ]; then
149
+ issues+=("missing 'last_updated' field")
150
+ status="FAIL"
151
+ fi
152
+
153
+ # Staleness check
154
+ if [ -n "$last_updated" ]; then
155
+ local days_old
156
+ days_old=$(days_since "$last_updated")
157
+ if [ "$days_old" -gt "$STALE_THRESHOLD_DAYS" ]; then
158
+ issues+=("stale (${days_old} days since update)")
159
+ if [ "$status" = "PASS" ]; then
160
+ status="WARN"
161
+ fi
162
+ fi
163
+ fi
164
+
165
+ # File size check
166
+ local line_count
167
+ line_count=$(get_line_count "$file")
168
+ if [ "$line_count" -gt "$MAX_LINES" ]; then
169
+ issues+=("large file (${line_count} lines > ${MAX_LINES})")
170
+ if [ "$status" = "PASS" ]; then
171
+ status="WARN"
172
+ fi
173
+ fi
174
+
175
+ # Learnings check
176
+ if learnings_empty "$file"; then
177
+ issues+=("no learnings recorded (never self-improved)")
178
+ if [ "$status" = "PASS" ]; then
179
+ status="WARN"
180
+ fi
181
+ fi
182
+
183
+ # Output result
184
+ case "$status" in
185
+ PASS)
186
+ echo -e "${GREEN}PASS${NC} $domain"
187
+ PASSED=$((PASSED + 1))
188
+ ;;
189
+ WARN)
190
+ echo -e "${YELLOW}WARN${NC} $domain - ${issues[*]}"
191
+ WARNINGS=$((WARNINGS + 1))
192
+ ;;
193
+ FAIL)
194
+ echo -e "${RED}FAIL${NC} $domain - ${issues[*]}"
195
+ FAILED=$((FAILED + 1))
196
+ ;;
197
+ esac
198
+ }
199
+
200
+ # Main
201
+ main() {
202
+ # Handle help
203
+ if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then
204
+ show_help
205
+ exit 0
206
+ fi
207
+
208
+ # Find script directory and change to repo root
209
+ local script_dir
210
+ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
211
+ cd "$script_dir/.."
212
+
213
+ # Check experts directory exists
214
+ if [ ! -d "$EXPERTS_DIR" ]; then
215
+ echo -e "${RED}Error:${NC} Experts directory not found: $EXPERTS_DIR"
216
+ echo "Are you running this from the repository root?"
217
+ exit 1
218
+ fi
219
+
220
+ echo -e "${BLUE}Validating Agent Expert Files${NC}"
221
+ echo "================================"
222
+ echo ""
223
+
224
+ # Validate specific domain or all
225
+ if [ -n "$1" ]; then
226
+ # Single domain
227
+ if [ ! -d "$EXPERTS_DIR/$1" ]; then
228
+ echo -e "${RED}Error:${NC} Domain not found: $1"
229
+ echo "Available domains:"
230
+ ls -1 "$EXPERTS_DIR" | grep -v templates | grep -v README
231
+ exit 1
232
+ fi
233
+ validate_expertise "$1"
234
+ else
235
+ # All domains
236
+ for dir in "$EXPERTS_DIR"/*/; do
237
+ local domain
238
+ domain=$(basename "$dir")
239
+ # Skip templates directory
240
+ if [ "$domain" = "templates" ]; then
241
+ continue
242
+ fi
243
+ validate_expertise "$domain"
244
+ done
245
+ fi
246
+
247
+ # Summary
248
+ echo ""
249
+ echo "================================"
250
+ echo -e "Total: $TOTAL | ${GREEN}Passed: $PASSED${NC} | ${YELLOW}Warnings: $WARNINGS${NC} | ${RED}Failed: $FAILED${NC}"
251
+
252
+ # Exit code
253
+ if [ "$FAILED" -gt 0 ]; then
254
+ exit 1
255
+ fi
256
+ exit 0
257
+ }
258
+
259
+ main "$@"