claude-flow-novice 2.14.9 → 2.14.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -707,6 +707,17 @@ EOF
707
707
  # Main CFN Loop
708
708
  ##############################################################################
709
709
 
710
+ # Validate CLI environment before spawning agents
711
+ echo "🔧 Validating CLI environment..."
712
+ if [ -f "$PROJECT_ROOT/.claude/skills/cfn-cli-setup/validate-cli-environment.sh" ]; then
713
+ if ! bash "$PROJECT_ROOT/.claude/skills/cfn-cli-setup/validate-cli-environment.sh"; then
714
+ echo "❌ CLI environment validation failed. Agents may not have required tools."
715
+ echo "⚠️ Continuing anyway, but expect potential tool failures..."
716
+ fi
717
+ else
718
+ echo "⚠️ CLI environment validation script not found. Skipping validation."
719
+ fi
720
+
710
721
  # Store context in Redis
711
722
  store_context "$TASK_ID"
712
723
 
@@ -764,11 +775,41 @@ for ((ITERATION=1; ITERATION<=MAX_ITERATIONS; ITERATION++)); do
764
775
  --agents "$LOOP3_IDS" \
765
776
  --threshold "$GATE" \
766
777
  --min-quorum "$MIN_QUORUM_LOOP3"; then
767
- # Gate passed - store confidence
768
- LOOP3_FINAL_CONFIDENCE=$("$REDIS_COORD_SKILL/invoke-waiting-mode.sh" collect \
769
- --task-id "$TASK_ID" \
770
- --agent-ids "$LOOP3_IDS" \
771
- --min-quorum "$MIN_QUORUM_LOOP3")
778
+ # Gate passed - validate confidence based on deliverables
779
+ echo "🔍 Validating agent confidence scores against deliverables..."
780
+
781
+ # Re-calculate confidence based on actual deliverables
782
+ if [ -n "$EXPECTED_FILES" ] && [ -f "$PROJECT_ROOT/.claude/skills/cfn-deliverable-validation/confidence-calculator.sh" ]; then
783
+ VALIDATED_CONFIDENCE=0
784
+
785
+ for agent_id in ${LOOP3_IDS//,/ }; do
786
+ # Get agent's reported confidence
787
+ agent_confidence=$(redis-cli get "swarm:${TASK_ID}:${agent_id}:confidence" 2>/dev/null || echo "0.5")
788
+
789
+ # Calculate deliverable-based confidence
790
+ deliverable_confidence=$("$PROJECT_ROOT/.claude/skills/cfn-deliverable-validation/confidence-calculator.sh" \
791
+ "$TASK_ID" "$agent_id" "$EXPECTED_FILES" "$PROJECT_ROOT")
792
+
793
+ echo " Agent $agent_id: reported=$agent_confidence, deliverable-based=$deliverable_confidence"
794
+
795
+ # Use the lower of the two scores (inflation prevention)
796
+ if (( $(echo "$deliverable_confidence < $agent_confidence" | bc -l) )); then
797
+ echo " ⚠️ Downgrading confidence for $agent_id (inflated score detected)"
798
+ VALIDATED_CONFIDENCE=$deliverable_confidence
799
+ else
800
+ VALIDATED_CONFIDENCE=$agent_confidence
801
+ fi
802
+ done
803
+
804
+ LOOP3_FINAL_CONFIDENCE=$VALIDATED_CONFIDENCE
805
+ echo "✅ Final validated Loop 3 confidence: $LOOP3_FINAL_CONFIDENCE"
806
+ else
807
+ # Store confidence (fallback method)
808
+ LOOP3_FINAL_CONFIDENCE=$("$REDIS_COORD_SKILL/invoke-waiting-mode.sh" collect \
809
+ --task-id "$TASK_ID" \
810
+ --agent-ids "$LOOP3_IDS" \
811
+ --min-quorum "$MIN_QUORUM_LOOP3")
812
+ fi
772
813
  else
773
814
  # Gate failed - iterate Loop 3
774
815
  echo "❌ Gate check failed - iterating Loop 3"
@@ -0,0 +1,192 @@
1
+ #!/bin/bash
2
+ # CLI Environment Validation Script
3
+ # Ensures required tools are available before agent deployment
4
+
5
+ set -euo pipefail
6
+
7
+ # Colors for output
8
+ RED='\033[0;31m'
9
+ GREEN='\033[0;32m'
10
+ YELLOW='\033[1;33m'
11
+ NC='\033[0m' # No Color
12
+
13
+ # Required tools for CFN Loop CLI agents
14
+ REQUIRED_TOOLS=(
15
+ "rg:ripgrep"
16
+ "git"
17
+ "node"
18
+ "npm"
19
+ "jq"
20
+ "redis-cli"
21
+ "find"
22
+ "grep"
23
+ "sed"
24
+ "awk"
25
+ "sort"
26
+ "uniq"
27
+ "head"
28
+ "tail"
29
+ "wc"
30
+ "xargs"
31
+ )
32
+
33
+ echo -e "${GREEN}🔧 CFN Loop CLI Environment Validation${NC}"
34
+
35
+ # Function to check if a tool is available
36
+ check_tool() {
37
+ local tool_name="$1"
38
+ local tool_description="$2"
39
+
40
+ if command -v "$tool_name" >/dev/null 2>&1; then
41
+ echo -e " ${GREEN}✓${NC} $tool_name ($tool_description)"
42
+ return 0
43
+ else
44
+ echo -e " ${RED}✗${NC} $tool_name ($tool_description) - ${YELLOW}MISSING${NC}"
45
+ return 1
46
+ fi
47
+ }
48
+
49
+ # Function to validate Node.js version
50
+ validate_node_version() {
51
+ if command -v node >/dev/null 2>&1; then
52
+ local node_version=$(node --version 2>/dev/null | sed 's/v//')
53
+ local major_version=$(echo "$node_version" | cut -d. -f1)
54
+
55
+ if [ "$major_version" -ge 18 ]; then
56
+ echo -e " ${GREEN}✓${NC} Node.js v$node_version (>= 18 required)"
57
+ return 0
58
+ else
59
+ echo -e " ${RED}✗${NC} Node.js v$node_version (>= 18 required) - ${YELLOW}VERSION TOO OLD${NC}"
60
+ return 1
61
+ fi
62
+ else
63
+ echo -e " ${RED}✗${NC} Node.js - ${YELLOW}NOT FOUND${NC}"
64
+ return 1
65
+ fi
66
+ }
67
+
68
+ # Function to check PATH for common development directories
69
+ validate_path() {
70
+ echo -e "${YELLOW}Validating PATH...${NC}"
71
+
72
+ # Check for common development tool paths
73
+ local paths_to_check=(
74
+ "/usr/local/bin"
75
+ "/usr/bin"
76
+ "/bin"
77
+ "$HOME/.local/bin"
78
+ "$HOME/.cargo/bin"
79
+ "$HOME/.npm-global/bin"
80
+ "./node_modules/.bin"
81
+ )
82
+
83
+ local found_paths=0
84
+ for path in "${paths_to_check[@]}"; do
85
+ if [ -d "$path" ] && echo "$PATH" | grep -q "$path"; then
86
+ echo -e " ${GREEN}✓${NC} $path in PATH"
87
+ ((found_paths++))
88
+ fi
89
+ done
90
+
91
+ if [ "$found_paths" -eq 0 ]; then
92
+ echo -e " ${YELLOW}⚠${NC} Limited development directories in PATH"
93
+ return 1
94
+ fi
95
+
96
+ return 0
97
+ }
98
+
99
+ # Function to validate Redis connection
100
+ validate_redis() {
101
+ echo -e "${YELLOW}Validating Redis connection...${NC}"
102
+
103
+ if redis-cli ping >/dev/null 2>&1; then
104
+ local redis_info=$(redis-cli info server 2>/dev/null | grep "redis_version" | cut -d: -f2 | tr -d '\r')
105
+ echo -e " ${GREEN}✓${NC} Redis v$redis_info - Connected"
106
+ return 0
107
+ else
108
+ echo -e " ${RED}✗${NC} Redis - ${YELLOW}NOT CONNECTED${NC}"
109
+ return 1
110
+ fi
111
+ }
112
+
113
+ # Function to validate working directory
114
+ validate_working_directory() {
115
+ echo -e "${YELLOW}Validating working directory...${NC}"
116
+
117
+ # Check if we're in a git repository
118
+ if git rev-parse --git-dir >/dev/null 2>&1; then
119
+ echo -e " ${GREEN}✓${NC} Git repository detected"
120
+ else
121
+ echo -e " ${YELLOW}⚠${NC} Not in a git repository"
122
+ fi
123
+
124
+ # Check for claude-flow-novice installation
125
+ if [ -f "package.json" ] && grep -q "claude-flow-novice" package.json; then
126
+ echo -e " ${GREEN}✓${NC} claude-flow-novice dependency found"
127
+ else
128
+ echo -e " ${YELLOW}⚠${NC} claude-flow-novice not found in package.json"
129
+ fi
130
+
131
+ # Check for .claude directory
132
+ if [ -d ".claude" ]; then
133
+ echo -e " ${GREEN}✓${NC} .claude directory found"
134
+ else
135
+ echo -e " ${YELLOW}⚠${NC} .claude directory not found"
136
+ fi
137
+ }
138
+
139
+ # Function to install missing tools (suggestions)
140
+ suggest_installations() {
141
+ echo -e "\n${YELLOW}Installation Suggestions:${NC}"
142
+ echo -e " ${YELLOW}ripgrep:${NC} sudo apt-get install ripgrep # Debian/Ubuntu"
143
+ echo -e " ${YELLOW}ripgrep:${NC} brew install ripgrep # macOS"
144
+ echo -e " ${YELLOW}jq:${NC} sudo apt-get install jq # Debian/Ubuntu"
145
+ echo -e " ${YELLOW}jq:${NC} brew install jq # macOS"
146
+ echo -e " ${YELLOW}Redis:${NC} sudo systemctl start redis # Linux"
147
+ echo -e " ${YELLOW}Redis:${NC} brew services start redis # macOS"
148
+ }
149
+
150
+ # Main validation
151
+ main() {
152
+ local failed=0
153
+
154
+ echo -e "${YELLOW}Checking required tools...${NC}"
155
+ for tool in "${REQUIRED_TOOLS[@]}"; do
156
+ local tool_name=$(echo "$tool" | cut -d: -f1)
157
+ local tool_description=$(echo "$tool" | cut -d: -f2)
158
+
159
+ if ! check_tool "$tool_name" "$tool_description"; then
160
+ ((failed++))
161
+ fi
162
+ done
163
+
164
+ echo -e "\n${YELLOW}Checking Node.js version...${NC}"
165
+ if ! validate_node_version; then
166
+ ((failed++))
167
+ fi
168
+
169
+ echo -e "\n${YELLOW}Checking environment configuration...${NC}"
170
+ if ! validate_path; then
171
+ ((failed++))
172
+ fi
173
+
174
+ if ! validate_redis; then
175
+ ((failed++))
176
+ fi
177
+
178
+ validate_working_directory
179
+
180
+ echo -e "\n${GREEN}=== Validation Summary ===${NC}"
181
+ if [ "$failed" -eq 0 ]; then
182
+ echo -e "${GREEN}✅ All validations passed! CLI environment is ready.${NC}"
183
+ return 0
184
+ else
185
+ echo -e "${RED}❌ $failed validation(s) failed. CLI environment needs setup.${NC}"
186
+ suggest_installations
187
+ return 1
188
+ fi
189
+ }
190
+
191
+ # Run validation
192
+ main "$@"
@@ -0,0 +1,262 @@
1
+ #!/bin/bash
2
+ # Deliverable-Based Confidence Calculator
3
+ # Calculates confidence scores based on actual deliverable completion
4
+
5
+ set -euo pipefail
6
+
7
+ # Arguments
8
+ TASK_ID="$1"
9
+ AGENT_ID="$2"
10
+ EXPECTED_DELIVERABLES="$3" # JSON array of expected files/deliverables
11
+ WORKING_DIR="${4:-$(pwd)}"
12
+
13
+ # Colors for output
14
+ RED='\033[0;31m'
15
+ GREEN='\033[0;32m'
16
+ YELLOW='\033[1;33m'
17
+ BLUE='\033[0;34m'
18
+ NC='\033[0m' # No Color
19
+
20
+ echo -e "${BLUE}📊 Deliverable-Based Confidence Calculator${NC}"
21
+ echo "Task ID: $TASK_ID"
22
+ echo "Agent ID: $AGENT_ID"
23
+ echo "Working Directory: $WORKING_DIR"
24
+ echo ""
25
+
26
+ # Function to check if deliverable exists and is valid
27
+ validate_deliverable() {
28
+ local deliverable="$1"
29
+ local deliverable_path="$WORKING_DIR/$deliverable"
30
+
31
+ # Handle different deliverable types
32
+ case "$deliverable" in
33
+ *.md|*.txt|*.json|*.yaml|*.yml)
34
+ # Text files - check existence and content
35
+ if [ -f "$deliverable_path" ]; then
36
+ local size=$(stat -c%s "$deliverable_path" 2>/dev/null || echo "0")
37
+ if [ "$size" -gt 0 ]; then
38
+ echo "VALID:$size"
39
+ return 0
40
+ fi
41
+ fi
42
+ ;;
43
+ *.sh|*.js|*.ts|*.tsx|*.py)
44
+ # Code files - check existence and basic syntax
45
+ if [ -f "$deliverable_path" ]; then
46
+ local size=$(stat -c%s "$deliverable_path" 2>/dev/null || echo "0")
47
+ if [ "$size" -gt 100 ]; then
48
+ echo "VALID:$size"
49
+ return 0
50
+ fi
51
+ fi
52
+ ;;
53
+ */)
54
+ # Directories - check existence and contents
55
+ if [ -d "$deliverable_path" ]; then
56
+ local file_count=$(find "$deliverable_path" -type f | wc -l)
57
+ if [ "$file_count" -gt 0 ]; then
58
+ echo "VALID:$file_count"
59
+ return 0
60
+ fi
61
+ fi
62
+ ;;
63
+ *.json)
64
+ # JSON files - check validity
65
+ if [ -f "$deliverable_path" ]; then
66
+ if jq empty "$deliverable_path" 2>/dev/null; then
67
+ local size=$(stat -c%s "$deliverable_path" 2>/dev/null || echo "0")
68
+ echo "VALID:$size"
69
+ return 0
70
+ fi
71
+ fi
72
+ ;;
73
+ *)
74
+ # Generic file check
75
+ if [ -f "$deliverable_path" ]; then
76
+ local size=$(stat -c%s "$deliverable_path" 2>/dev/null || echo "0")
77
+ if [ "$size" -gt 0 ]; then
78
+ echo "VALID:$size"
79
+ return 0
80
+ fi
81
+ fi
82
+ ;;
83
+ esac
84
+
85
+ echo "INVALID"
86
+ return 1
87
+ }
88
+
89
+ # Function to calculate quality score based on deliverable characteristics
90
+ calculate_quality_score() {
91
+ local deliverable="$1"
92
+ local deliverable_path="$WORKING_DIR/$deliverable"
93
+
94
+ local quality_score=0.5 # Base score for existing
95
+
96
+ # Size scoring (larger files with meaningful content)
97
+ if [ -f "$deliverable_path" ]; then
98
+ local size=$(stat -c%s "$deliverable_path" 2>/dev/null || echo "0")
99
+
100
+ if [ "$size" -gt 5000 ]; then
101
+ quality_score=0.9 # Substantial content
102
+ elif [ "$size" -gt 1000 ]; then
103
+ quality_score=0.8 # Good content
104
+ elif [ "$size" -gt 100 ]; then
105
+ quality_score=0.7 # Adequate content
106
+ else
107
+ quality_score=0.6 # Minimal content
108
+ fi
109
+ fi
110
+
111
+ # Content-specific scoring
112
+ case "$deliverable" in
113
+ *.md)
114
+ # Markdown files - check for structure
115
+ if grep -q "^#" "$deliverable_path" 2>/dev/null; then
116
+ quality_score=$((quality_score + 10)) # Has headers
117
+ fi
118
+ if grep -q "^```" "$deliverable_path" 2>/dev/null; then
119
+ quality_score=$((quality_score + 5)) # Has code blocks
120
+ fi
121
+ ;;
122
+ *.ts|*.tsx|*.js|*.jsx)
123
+ # Code files - check for functions/exports
124
+ if grep -q "function\|export\|class\|const.*=" "$deliverable_path" 2>/dev/null; then
125
+ quality_score=$((quality_score + 10)) # Has functions/exports
126
+ fi
127
+ ;;
128
+ *.json)
129
+ # JSON files - check for structure
130
+ if jq -e 'type == "object" and keys > 0' "$deliverable_path" >/dev/null 2>&1; then
131
+ quality_score=$((quality_score + 10)) # Has meaningful structure
132
+ fi
133
+ ;;
134
+ esac
135
+
136
+ # Cap at 1.0
137
+ if [ "$quality_score" -gt 100 ]; then
138
+ quality_score=100
139
+ fi
140
+
141
+ echo "$quality_score"
142
+ }
143
+
144
+ # Main confidence calculation
145
+ main() {
146
+ echo -e "${YELLOW}Analyzing expected deliverables...${NC}"
147
+
148
+ # Parse expected deliverables from JSON
149
+ if ! echo "$EXPECTED_DELIVERABLES" | jq empty 2>/dev/null; then
150
+ echo -e "${RED}❌ Invalid JSON in expected deliverables${NC}"
151
+ echo "0.0"
152
+ return 1
153
+ fi
154
+
155
+ local total_deliverables=$(echo "$EXPECTED_DELIVERABLES" | jq 'length')
156
+ local valid_deliverables=0
157
+ local total_quality_score=0
158
+
159
+ echo "Expected deliverables: $total_deliverables"
160
+ echo ""
161
+
162
+ # Check each deliverable
163
+ for ((i=0; i<total_deliverables; i++)); do
164
+ local deliverable=$(echo "$EXPECTED_DELIVERABLES" | jq -r ".[$i]")
165
+
166
+ echo -n " Checking: $deliverable ... "
167
+
168
+ local validation_result=$(validate_deliverable "$deliverable")
169
+
170
+ if [[ "$validation_result" == VALID* ]]; then
171
+ local quality_score=$(calculate_quality_score "$deliverable")
172
+ total_quality_score=$((total_quality_score + quality_score))
173
+ valid_deliverables=$((valid_deliverables + 1))
174
+
175
+ local size=$(echo "$validation_result" | cut -d: -f2)
176
+ echo -e "${GREEN}✓ VALID${NC} (size: $size, quality: $quality_score)"
177
+ else
178
+ echo -e "${RED}✗ MISSING/INVALID${NC}"
179
+ fi
180
+ done
181
+
182
+ echo ""
183
+ echo -e "${YELLOW}Summary:${NC}"
184
+ echo " Valid deliverables: $valid_deliverables/$total_deliverables"
185
+
186
+ # Calculate completion score (0-1)
187
+ local completion_score=0
188
+ if [ "$total_deliverables" -gt 0 ]; then
189
+ completion_score=$(echo "scale=3; $valid_deliverables / $total_deliverables" | bc -l)
190
+ fi
191
+
192
+ # Calculate average quality score (0-1)
193
+ local avg_quality_score=0.5 # Default if no deliverables
194
+ if [ "$valid_deliverables" -gt 0 ]; then
195
+ avg_quality_score=$(echo "scale=3; $total_quality_score / ($valid_deliverables * 100)" | bc -l)
196
+ fi
197
+
198
+ # Calculate final confidence score
199
+ # Weight: 60% completion, 40% quality
200
+ local final_confidence=$(echo "scale=3; ($completion_score * 0.6) + ($avg_quality_score * 0.4)" | bc -l)
201
+
202
+ # Round to 2 decimal places
203
+ final_confidence=$(echo "$final_confidence" | sed 's/^\./0./' | sed 's/\.\([0-9]\{2\}\)[0-9]*$/.\1/')
204
+
205
+ echo " Completion score: $completion_score"
206
+ echo " Quality score: $avg_quality_score"
207
+ echo " Final confidence: $final_confidence"
208
+
209
+ # Validation rules
210
+ if [ "$valid_deliverables" -eq 0 ]; then
211
+ echo -e "${RED}🚨 CRITICAL: No deliverables created - confidence should be 0.0${NC}"
212
+ echo "0.0"
213
+ return 1
214
+ elif (( $(echo "$final_confidence > 0.3" | bc -l) )); then
215
+ if [ "$valid_deliverables" -lt "$((total_deliverables / 2))" ]; then
216
+ echo -e "${YELLOW}⚠️ WARNING: Low deliverable completion (<50%) - confidence should be <= 0.3${NC}"
217
+ fi
218
+ fi
219
+
220
+ echo -e "${GREEN}✓ Calculated confidence: $final_confidence${NC}"
221
+ echo "$final_confidence"
222
+ }
223
+
224
+ # Store results in Redis if available
225
+ store_confidence_result() {
226
+ local confidence="$1"
227
+
228
+ if command -v redis-cli >/dev/null 2>&1; then
229
+ # Store detailed breakdown
230
+ local breakdown=$(cat << EOF
231
+ {
232
+ "task_id": "$TASK_ID",
233
+ "agent_id": "$AGENT_ID",
234
+ "calculated_confidence": $confidence,
235
+ "calculation_method": "deliverable-based",
236
+ "timestamp": "$(date -Iseconds)",
237
+ "working_directory": "$WORKING_DIR",
238
+ "expected_deliverables": $EXPECTED_DELIVERABLES
239
+ }
240
+ EOF
241
+ )
242
+
243
+ redis-cli set "${TASK_ID}:${AGENT_ID}:confidence-breakdown" "$breakdown" >/dev/null 2>&1 || true
244
+ redis-cli set "${TASK_ID}:${AGENT_ID}:confidence-score" "$confidence" >/dev/null 2>&1 || true
245
+
246
+ echo -e "📦 ${BLUE}Stored confidence breakdown in Redis:${NC} ${TASK_ID}:${AGENT_ID}:confidence-breakdown"
247
+ fi
248
+ }
249
+
250
+ # Execute main function
251
+ if [ "$#" -lt 3 ]; then
252
+ echo "Usage: $0 <task_id> <agent_id> <expected_deliverables_json> [working_directory]"
253
+ echo "Example: $0 task-123 agent-1 '[\"file1.txt\", \"script.sh\", \"docs/\"]' /path/to/project"
254
+ exit 1
255
+ fi
256
+
257
+ final_confidence=$(main "$@")
258
+
259
+ # Store results
260
+ store_confidence_result "$final_confidence"
261
+
262
+ echo "$final_confidence"
@@ -707,6 +707,17 @@ EOF
707
707
  # Main CFN Loop
708
708
  ##############################################################################
709
709
 
710
+ # Validate CLI environment before spawning agents
711
+ echo "🔧 Validating CLI environment..."
712
+ if [ -f "$PROJECT_ROOT/.claude/skills/cfn-cli-setup/validate-cli-environment.sh" ]; then
713
+ if ! bash "$PROJECT_ROOT/.claude/skills/cfn-cli-setup/validate-cli-environment.sh"; then
714
+ echo "❌ CLI environment validation failed. Agents may not have required tools."
715
+ echo "⚠️ Continuing anyway, but expect potential tool failures..."
716
+ fi
717
+ else
718
+ echo "⚠️ CLI environment validation script not found. Skipping validation."
719
+ fi
720
+
710
721
  # Store context in Redis
711
722
  store_context "$TASK_ID"
712
723
 
@@ -764,11 +775,41 @@ for ((ITERATION=1; ITERATION<=MAX_ITERATIONS; ITERATION++)); do
764
775
  --agents "$LOOP3_IDS" \
765
776
  --threshold "$GATE" \
766
777
  --min-quorum "$MIN_QUORUM_LOOP3"; then
767
- # Gate passed - store confidence
768
- LOOP3_FINAL_CONFIDENCE=$("$REDIS_COORD_SKILL/invoke-waiting-mode.sh" collect \
769
- --task-id "$TASK_ID" \
770
- --agent-ids "$LOOP3_IDS" \
771
- --min-quorum "$MIN_QUORUM_LOOP3")
778
+ # Gate passed - validate confidence based on deliverables
779
+ echo "🔍 Validating agent confidence scores against deliverables..."
780
+
781
+ # Re-calculate confidence based on actual deliverables
782
+ if [ -n "$EXPECTED_FILES" ] && [ -f "$PROJECT_ROOT/.claude/skills/cfn-deliverable-validation/confidence-calculator.sh" ]; then
783
+ VALIDATED_CONFIDENCE=0
784
+
785
+ for agent_id in ${LOOP3_IDS//,/ }; do
786
+ # Get agent's reported confidence
787
+ agent_confidence=$(redis-cli get "swarm:${TASK_ID}:${agent_id}:confidence" 2>/dev/null || echo "0.5")
788
+
789
+ # Calculate deliverable-based confidence
790
+ deliverable_confidence=$("$PROJECT_ROOT/.claude/skills/cfn-deliverable-validation/confidence-calculator.sh" \
791
+ "$TASK_ID" "$agent_id" "$EXPECTED_FILES" "$PROJECT_ROOT")
792
+
793
+ echo " Agent $agent_id: reported=$agent_confidence, deliverable-based=$deliverable_confidence"
794
+
795
+ # Use the lower of the two scores (inflation prevention)
796
+ if (( $(echo "$deliverable_confidence < $agent_confidence" | bc -l) )); then
797
+ echo " ⚠️ Downgrading confidence for $agent_id (inflated score detected)"
798
+ VALIDATED_CONFIDENCE=$deliverable_confidence
799
+ else
800
+ VALIDATED_CONFIDENCE=$agent_confidence
801
+ fi
802
+ done
803
+
804
+ LOOP3_FINAL_CONFIDENCE=$VALIDATED_CONFIDENCE
805
+ echo "✅ Final validated Loop 3 confidence: $LOOP3_FINAL_CONFIDENCE"
806
+ else
807
+ # Store confidence (fallback method)
808
+ LOOP3_FINAL_CONFIDENCE=$("$REDIS_COORD_SKILL/invoke-waiting-mode.sh" collect \
809
+ --task-id "$TASK_ID" \
810
+ --agent-ids "$LOOP3_IDS" \
811
+ --min-quorum "$MIN_QUORUM_LOOP3")
812
+ fi
772
813
  else
773
814
  # Gate failed - iterate Loop 3
774
815
  echo "❌ Gate check failed - iterating Loop 3"
@@ -0,0 +1,279 @@
1
+ #!/bin/bash
2
+ # Task Decomposition for Complex Operations
3
+ # Breaks down complex tasks into smaller, tool-budget-efficient subtasks
4
+
5
+ set -euo pipefail
6
+
7
+ # Configuration
8
+ TASK_ID="${1}"
9
+ TASK_DESCRIPTION="${2}"
10
+ TOOL_BUDGET="${3:-10}" # Default 10 tool budget per agent
11
+ COMPLEXITY_THRESHOLD="${4:-medium}" # low, medium, high
12
+
13
+ # Colors for output
14
+ RED='\033[0;31m'
15
+ GREEN='\033[0;32m'
16
+ YELLOW='\033[1;33m'
17
+ BLUE='\033[0;34m'
18
+ NC='\033[0m' # No Color
19
+
20
+ echo -e "${BLUE}🔧 CFN Loop Task Decomposition${NC}"
21
+ echo "Task ID: $TASK_ID"
22
+ echo "Tool Budget: $TOOL_BUDGET per agent"
23
+ echo "Complexity: $COMPLEXITY_THRESHOLD"
24
+ echo ""
25
+
26
+ # Function to analyze task complexity
27
+ analyze_task_complexity() {
28
+ local description="$1"
29
+ local complexity_score=0
30
+
31
+ # File operation indicators
32
+ if echo "$description" | grep -qiE "(fix|create|modify|update|delete|write).*file"; then
33
+ ((complexity_score += 3))
34
+ fi
35
+
36
+ # Directory exploration indicators
37
+ if echo "$description" | grep -qiE "(explore|find|search|scan|directory|folder)"; then
38
+ ((complexity_score += 2))
39
+ fi
40
+
41
+ # Multiple component indicators
42
+ if echo "$description" | grep -qiE "(multiple|batch|all|every|comprehensive)"; then
43
+ ((complexity_score += 4))
44
+ fi
45
+
46
+ # TypeScript/Complex file indicators
47
+ if echo "$description" | grep -qiE "(typescript|tsx|interface|type|complex)"; then
48
+ ((complexity_score += 2))
49
+ fi
50
+
51
+ # Dependency analysis indicators
52
+ if echo "$description" | grep -qiE "(dependency|import|require|module)"; then
53
+ ((complexity_score += 2))
54
+ fi
55
+
56
+ echo "$complexity_score"
57
+ }
58
+
59
+ # Function to decompose TypeScript error fixing tasks
60
+ decompose_typescript_task() {
61
+ local task_id="$1"
62
+ local description="$2"
63
+ local output_file=".claude/skills/cfn-task-decomposition/${task_id}-subtasks.json"
64
+
65
+ echo -e "${YELLOW}Decomposing TypeScript task...${NC}"
66
+
67
+ # Create subtasks
68
+ cat > "$output_file" << EOF
69
+ {
70
+ "task_id": "$task_id",
71
+ "original_task": "$description",
72
+ "decomposition_strategy": "typescript-error-fixing",
73
+ "subtasks": [
74
+ {
75
+ "subtask_id": "${task_id}-recon",
76
+ "title": "Directory Reconnaissance",
77
+ "description": "Explore project structure and identify TypeScript files needing fixes",
78
+ "agent_type": "researcher",
79
+ "tool_budget": 5,
80
+ "expected_tools": ["find", "grep", "Read", "Glob"],
81
+ "deliverables": ["file-list.txt", "error-classification.json"],
82
+ "estimated_duration": "5-10 minutes"
83
+ },
84
+ {
85
+ "subtask_id": "${task_id}-pattern",
86
+ "title": "Error Pattern Analysis",
87
+ "description": "Analyze TypeScript errors and identify common patterns",
88
+ "agent_type": "analyst",
89
+ "tool_budget": 8,
90
+ "expected_tools": ["Read", "Grep", "Bash"],
91
+ "deliverables": ["error-patterns.json", "fix-strategy.md"],
92
+ "estimated_duration": "10-15 minutes"
93
+ },
94
+ {
95
+ "subtask_id": "${task_id}-fix",
96
+ "title": "File-by-File Fixes",
97
+ "description": "Fix TypeScript errors in identified files using patterns",
98
+ "agent_type": "backend-developer",
99
+ "tool_budget": 15,
100
+ "expected_tools": ["Read", "Edit", "Write", "Bash"],
101
+ "deliverables": ["fixed-files/", "fix-summary.json"],
102
+ "estimated_duration": "20-30 minutes",
103
+ "batch_processing": {
104
+ "max_files_per_batch": 3,
105
+ "batches_total": 5
106
+ }
107
+ },
108
+ {
109
+ "subtask_id": "${task_id}-validate",
110
+ "title": "Validation and Testing",
111
+ "description": "Validate fixes and run TypeScript compilation",
112
+ "agent_type": "tester",
113
+ "tool_budget": 10,
114
+ "expected_tools": ["Bash", "Read", "Write"],
115
+ "deliverables": ["validation-report.json", "compilation-results.txt"],
116
+ "estimated_duration": "10-15 minutes"
117
+ }
118
+ ],
119
+ "coordination": {
120
+ "dependency_order": ["recon", "pattern", "fix", "validate"],
121
+ "redis_context_keys": {
122
+ "recon": "${task_id}:recon:results",
123
+ "pattern": "${task_id}:pattern:results",
124
+ "fix": "${task_id}:fix:results",
125
+ "validate": "${task_id}:validate:results"
126
+ }
127
+ }
128
+ }
129
+ EOF
130
+
131
+ echo -e "${GREEN}✓ Task decomposition created: $output_file${NC}"
132
+ echo "$output_file"
133
+ }
134
+
135
+ # Function to decompose file exploration tasks
136
+ decompose_exploration_task() {
137
+ local task_id="$1"
138
+ local description="$2"
139
+ local output_file=".claude/skills/cfn-task-decomposition/${task_id}-subtasks.json"
140
+
141
+ echo -e "${YELLOW}Decomposing exploration task...${NC}"
142
+
143
+ cat > "$output_file" << EOF
144
+ {
145
+ "task_id": "$task_id",
146
+ "original_task": "$description",
147
+ "decomposition_strategy": "directory-exploration",
148
+ "subtasks": [
149
+ {
150
+ "subtask_id": "${task_id}-scan",
151
+ "title": "Targeted Directory Scan",
152
+ "description": "Scan specific directories for target files using ripgrep",
153
+ "agent_type": "researcher",
154
+ "tool_budget": 6,
155
+ "expected_tools": ["Bash", "rg", "Glob", "Read"],
156
+ "deliverables": ["target-files.txt", "directory-map.json"],
157
+ "estimated_duration": "5-8 minutes"
158
+ },
159
+ {
160
+ "subtask_id": "${task_id}-analyze",
161
+ "title": "File Content Analysis",
162
+ "description": "Analyze found files to understand structure and patterns",
163
+ "agent_type": "analyst",
164
+ "tool_budget": 8,
165
+ "expected_tools": ["Read", "Grep", "Write"],
166
+ "deliverables": ["file-analysis.json", "content-summary.md"],
167
+ "estimated_duration": "8-12 minutes"
168
+ }
169
+ ]
170
+ }
171
+ EOF
172
+
173
+ echo -e "${GREEN}✓ Task decomposition created: $output_file${NC}"
174
+ echo "$output_file"
175
+ }
176
+
177
+ # Function to create task-specific agent prompts
178
+ create_agent_prompts() {
179
+ local subtasks_file="$1"
180
+ local prompts_dir=".claude/skills/cfn-task-decomposition/prompts"
181
+
182
+ mkdir -p "$prompts_dir"
183
+
184
+ # Extract subtask information
185
+ local task_id=$(jq -r '.task_id' "$subtasks_file")
186
+ local strategy=$(jq -r '.decomposition_strategy' "$subtasks_file")
187
+
188
+ # Generate prompts for each subtask
189
+ jq -r '.subtasks[] | @base64' "$subtasks_file" | while read -r subtask_b64; do
190
+ local subtask=$(echo "$subtask_b64" | base64 -d)
191
+ local subtask_id=$(echo "$subtask" | jq -r '.subtask_id')
192
+ local title=$(echo "$subtask" | jq -r '.title')
193
+ local description=$(echo "$subtask" | jq -r '.description')
194
+ local tool_budget=$(echo "$subtask" | jq -r '.tool_budget')
195
+ local deliverables=$(echo "$subtask" | jq -r '.deliverables[]')
196
+
197
+ cat > "$prompts_dir/${subtask_id}-prompt.md" << EOF
198
+ # Task: $title
199
+
200
+ ## Context
201
+ You are working on subtask "${subtask_id}" of the larger task "${task_id}".
202
+
203
+ ## Your Mission
204
+ $description
205
+
206
+ ## Tool Budget Optimization
207
+ You have **$tool_budget tool uses** available. Use them efficiently:
208
+
209
+ ### Recommended Tool Sequence:
210
+ 1. **Exploration tools** (Read, Glob, Bash) - 2-3 uses
211
+ 2. **Analysis tools** (Grep, Read) - 2-3 uses
212
+ 3. **Implementation tools** (Edit, Write) - 3-5 uses
213
+ 4. **Validation tools** (Bash, Read) - 1-2 uses
214
+
215
+ ### Efficiency Tips:
216
+ - **Batch operations**: Read multiple files in one tool use when possible
217
+ - **Targeted searches**: Use specific patterns instead of broad scans
218
+ - **Early validation**: Check results after each major step
219
+ - **Combine tool uses**: Use compound commands to reduce tool count
220
+
221
+ ## Expected Deliverables
222
+ $deliverables
223
+
224
+ ## Success Criteria
225
+ - All deliverables created with high quality
226
+ - Tool budget not exceeded
227
+ - Results passed to next subtask via Redis
228
+
229
+ ## Redis Context
230
+ - Store results in: \`${task_id}:${subtask_id}:results\`
231
+ - Use confidence scoring based on deliverable completion
232
+ - Signal completion via Redis LPUSH to \`${task_id}:${subtask_id}:done\`
233
+ EOF
234
+
235
+ echo -e "${GREEN}✓ Created prompt for $subtask_id${NC}"
236
+ done
237
+ }
238
+
239
+ # Main execution
240
+ main() {
241
+ local complexity_score=$(analyze_task_complexity "$TASK_DESCRIPTION")
242
+ echo -e "${YELLOW}Task Complexity Score: $complexity_score${NC}"
243
+
244
+ local decomposition_file=""
245
+
246
+ # Determine decomposition strategy based on task type
247
+ if echo "$TASK_DESCRIPTION" | grep -qiE "(typescript|tsx|ts27|ts23|error|fix)"; then
248
+ decomposition_file=$(decompose_typescript_task "$TASK_ID" "$TASK_DESCRIPTION")
249
+ elif echo "$TASK_DESCRIPTION" | grep -qiE "(explore|find|search|scan)"; then
250
+ decomposition_file=$(decompose_exploration_task "$TASK_ID" "$TASK_DESCRIPTION")
251
+ else
252
+ echo -e "${YELLOW}Using general task decomposition...${NC}"
253
+ decomposition_file=$(decompose_typescript_task "$TASK_ID" "$TASK_DESCRIPTION")
254
+ fi
255
+
256
+ # Create agent prompts
257
+ if [ -n "$decomposition_file" ] && [ -f "$decomposition_file" ]; then
258
+ create_agent_prompts "$decomposition_file"
259
+
260
+ echo -e "\n${GREEN}=== Task Decomposition Complete ===${NC}"
261
+ echo -e "📄 ${BLUE}Decomposition file:${NC} $decomposition_file"
262
+ echo -e "🤖 ${BLUE}Agent prompts:${NC} .claude/skills/cfn-task-decomposition/prompts/"
263
+ echo -e "💡 ${BLUE}Strategy:${NC} Break complex task into tool-budget-efficient subtasks"
264
+
265
+ # Store decomposition info in Redis for coordinator
266
+ if command -v redis-cli >/dev/null 2>&1; then
267
+ redis-cli set "${TASK_ID}:decomposition" "$(cat "$decomposition_file")" >/dev/null
268
+ echo -e "📦 ${BLUE}Redis context stored:${NC} ${TASK_ID}:decomposition"
269
+ fi
270
+
271
+ return 0
272
+ else
273
+ echo -e "${RED}❌ Task decomposition failed${NC}"
274
+ return 1
275
+ fi
276
+ }
277
+
278
+ # Execute main function
279
+ main "$@"
@@ -1,12 +1,145 @@
1
+ "use strict";
1
2
  /**
2
3
  * Dynamic Agent Loader - Reads agent definitions from .claude/agents/ directory
3
4
  * Single source of truth for agent types in the system
4
- */ import { readFileSync, existsSync } from 'node:fs';
5
- import { glob } from 'glob';
6
- import { resolve, dirname } from 'node:path';
7
- import { parse as parseYaml } from 'yaml';
5
+ */ var __awaiter = this && this.__awaiter || function(thisArg, _arguments, P, generator) {
6
+ function adopt(value) {
7
+ return value instanceof P ? value : new P(function(resolve) {
8
+ resolve(value);
9
+ });
10
+ }
11
+ return new (P || (P = Promise))(function(resolve, reject) {
12
+ function fulfilled(value) {
13
+ try {
14
+ step(generator.next(value));
15
+ } catch (e) {
16
+ reject(e);
17
+ }
18
+ }
19
+ function rejected(value) {
20
+ try {
21
+ step(generator["throw"](value));
22
+ } catch (e) {
23
+ reject(e);
24
+ }
25
+ }
26
+ function step(result) {
27
+ result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected);
28
+ }
29
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
30
+ });
31
+ };
32
+ var __generator = this && this.__generator || function(thisArg, body) {
33
+ var _ = {
34
+ label: 0,
35
+ sent: function() {
36
+ if (t[0] & 1) throw t[1];
37
+ return t[1];
38
+ },
39
+ trys: [],
40
+ ops: []
41
+ }, f, y, t, g = Object.create((typeof Iterator === "function" ? Iterator : Object).prototype);
42
+ return g.next = verb(0), g["throw"] = verb(1), g["return"] = verb(2), typeof Symbol === "function" && (g[Symbol.iterator] = function() {
43
+ return this;
44
+ }), g;
45
+ function verb(n) {
46
+ return function(v) {
47
+ return step([
48
+ n,
49
+ v
50
+ ]);
51
+ };
52
+ }
53
+ function step(op) {
54
+ if (f) throw new TypeError("Generator is already executing.");
55
+ while(g && (g = 0, op[0] && (_ = 0)), _)try {
56
+ if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
57
+ if (y = 0, t) op = [
58
+ op[0] & 2,
59
+ t.value
60
+ ];
61
+ switch(op[0]){
62
+ case 0:
63
+ case 1:
64
+ t = op;
65
+ break;
66
+ case 4:
67
+ _.label++;
68
+ return {
69
+ value: op[1],
70
+ done: false
71
+ };
72
+ case 5:
73
+ _.label++;
74
+ y = op[1];
75
+ op = [
76
+ 0
77
+ ];
78
+ continue;
79
+ case 7:
80
+ op = _.ops.pop();
81
+ _.trys.pop();
82
+ continue;
83
+ default:
84
+ if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
85
+ _ = 0;
86
+ continue;
87
+ }
88
+ if (op[0] === 3 && (!t || op[1] > t[0] && op[1] < t[3])) {
89
+ _.label = op[1];
90
+ break;
91
+ }
92
+ if (op[0] === 6 && _.label < t[1]) {
93
+ _.label = t[1];
94
+ t = op;
95
+ break;
96
+ }
97
+ if (t && _.label < t[2]) {
98
+ _.label = t[2];
99
+ _.ops.push(op);
100
+ break;
101
+ }
102
+ if (t[2]) _.ops.pop();
103
+ _.trys.pop();
104
+ continue;
105
+ }
106
+ op = body.call(thisArg, _);
107
+ } catch (e) {
108
+ op = [
109
+ 6,
110
+ e
111
+ ];
112
+ y = 0;
113
+ } finally{
114
+ f = t = 0;
115
+ }
116
+ if (op[0] & 5) throw op[1];
117
+ return {
118
+ value: op[0] ? op[1] : void 0,
119
+ done: true
120
+ };
121
+ }
122
+ };
123
+ var __spreadArray = this && this.__spreadArray || function(to, from, pack) {
124
+ if (pack || arguments.length === 2) for(var i = 0, l = from.length, ar; i < l; i++){
125
+ if (ar || !(i in from)) {
126
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
127
+ ar[i] = from[i];
128
+ }
129
+ }
130
+ return to.concat(ar || Array.prototype.slice.call(from));
131
+ };
132
+ Object.defineProperty(exports, "__esModule", {
133
+ value: true
134
+ });
135
+ exports.refreshAgents = exports.getAgentsByCategory = exports.isValidAgentType = exports.searchAgents = exports.getAgentCategories = exports.getAllAgents = exports.getAgent = exports.getAvailableAgentTypes = exports.agentLoader = exports.AgentLoader = void 0;
136
+ exports.resolveLegacyAgentType = resolveLegacyAgentType;
137
+ var node_fs_1 = require("node:fs");
138
+ var glob_1 = require("glob");
139
+ var node_path_1 = require("node:path");
140
+ var yaml_1 = require("yaml");
8
141
  // Legacy agent type mapping for backward compatibility
9
- const LEGACY_AGENT_MAPPING = {
142
+ var LEGACY_AGENT_MAPPING = {
10
143
  analyst: 'code-analyzer',
11
144
  coordinator: 'hierarchical-coordinator',
12
145
  optimizer: 'perf-analyzer',
@@ -17,38 +150,40 @@ const LEGACY_AGENT_MAPPING = {
17
150
  };
18
151
  /**
19
152
  * Resolve legacy agent types to current equivalents
20
- */ export function resolveLegacyAgentType(legacyType) {
153
+ */ function resolveLegacyAgentType(legacyType) {
21
154
  return LEGACY_AGENT_MAPPING[legacyType] || legacyType;
22
155
  }
23
- export class AgentLoader {
24
- agentCache = new Map();
25
- categoriesCache = [];
26
- lastLoadTime = 0;
27
- CACHE_EXPIRY = 60_000;
28
- getAgentsDirectory() {
29
- let currentDir = process.cwd();
156
+ var AgentLoader = /** @class */ function() {
157
+ function AgentLoader() {
158
+ this.agentCache = new Map();
159
+ this.categoriesCache = [];
160
+ this.lastLoadTime = 0;
161
+ this.CACHE_EXPIRY = 60000; // 1 minute cache
162
+ }
163
+ AgentLoader.prototype.getAgentsDirectory = function() {
164
+ var currentDir = process.cwd();
30
165
  while(currentDir !== '/'){
31
- const claudeAgentsPath = resolve(currentDir, '.claude', 'agents');
32
- if (existsSync(claudeAgentsPath)) {
166
+ var claudeAgentsPath = (0, node_path_1.resolve)(currentDir, '.claude', 'agents');
167
+ if ((0, node_fs_1.existsSync)(claudeAgentsPath)) {
33
168
  return claudeAgentsPath;
34
169
  }
35
- currentDir = dirname(currentDir);
170
+ currentDir = (0, node_path_1.dirname)(currentDir);
36
171
  }
37
- return resolve(process.cwd(), '.claude', 'agents');
38
- }
39
- parseAgentFile(filePath) {
172
+ return (0, node_path_1.resolve)(process.cwd(), '.claude', 'agents');
173
+ };
174
+ AgentLoader.prototype.parseAgentFile = function(filePath) {
40
175
  try {
41
- const content = readFileSync(filePath, 'utf-8');
42
- const frontmatterMatch = content.match(/^---\r?\n([\s\S]*?)\r?\n---\r?\n([\s\S]*)$/);
176
+ var content = (0, node_fs_1.readFileSync)(filePath, 'utf-8');
177
+ var frontmatterMatch = content.match(/^---\r?\n([\s\S]*?)\r?\n---\r?\n([\s\S]*)$/);
43
178
  if (!frontmatterMatch) {
44
- console.warn(`No frontmatter found in ${filePath}`);
179
+ console.warn("No frontmatter found in ".concat(filePath));
45
180
  return null;
46
181
  }
47
- const [, yamlContent, markdownContent] = frontmatterMatch;
48
- const frontmatter = parseYaml(yamlContent);
49
- const description = frontmatter.description;
182
+ var yamlContent = frontmatterMatch[1], markdownContent = frontmatterMatch[2];
183
+ var frontmatter = (0, yaml_1.parse)(yamlContent);
184
+ var description = frontmatter.description;
50
185
  if (!frontmatter.name || !description) {
51
- console.warn(`Missing required fields (name, description) in ${filePath}`);
186
+ console.warn("Missing required fields (name, description) in ".concat(filePath));
52
187
  return null;
53
188
  }
54
189
  return {
@@ -69,129 +204,13 @@ export class AgentLoader {
69
204
  content: markdownContent.trim()
70
205
  };
71
206
  } catch (error) {
72
- console.error(`Error parsing agent file ${filePath}:`, error);
207
+ console.error("Error parsing agent file ".concat(filePath, ":"), error);
73
208
  return null;
74
209
  }
75
- }
76
- parseTools(frontmatter) {
77
- const extractTools = (input)=>{
210
+ };
211
+ AgentLoader.prototype.parseTools = function(frontmatter) {
212
+ var extractTools = function(input) {
78
213
  if (Array.isArray(input)) return input.map(String);
79
- if (typeof input === 'string') {
80
- return input.split(/[,\s]+/).map((t)=>t.trim()).filter((t)=>t.length > 0);
81
- }
82
- return [];
83
- };
84
- // Safely handle tools and capabilities.tools
85
- const toolsFromFrontmatter = frontmatter.tools ? extractTools(frontmatter.tools) : [];
86
- const toolsFromCapabilities = frontmatter.capabilities && typeof frontmatter.capabilities === 'object' ? extractTools(Object(frontmatter.capabilities).tools) : [];
87
- return [
88
- ...toolsFromFrontmatter,
89
- ...toolsFromCapabilities
90
- ];
91
- }
92
- async loadAgents() {
93
- const agentsDir = this.getAgentsDirectory();
94
- if (!existsSync(agentsDir)) {
95
- console.warn(`Agents directory not found: ${agentsDir}`);
96
- return;
97
- }
98
- const agentFiles = await new Promise((resolve, reject)=>{
99
- glob('**/*.md', {
100
- cwd: agentsDir,
101
- ignore: [
102
- '**/README.md',
103
- '**/MIGRATION_SUMMARY.md'
104
- ],
105
- absolute: true
106
- }, (err, matches)=>{
107
- if (err) reject(err);
108
- else resolve(matches);
109
- });
110
- });
111
- this.agentCache.clear();
112
- this.categoriesCache = [];
113
- const categoryMap = new Map();
114
- for (const filePath of agentFiles){
115
- const agent = this.parseAgentFile(filePath);
116
- if (agent) {
117
- this.agentCache.set(agent.name, agent);
118
- const relativePath = filePath.replace(agentsDir, '');
119
- const pathParts = relativePath.split('/');
120
- const category = pathParts[1] || 'uncategorized';
121
- if (!categoryMap.has(category)) {
122
- categoryMap.set(category, []);
123
- }
124
- categoryMap.get(category).push(agent);
125
- }
126
- }
127
- this.categoriesCache = Array.from(categoryMap.entries()).map(([name, agents])=>({
128
- name,
129
- agents: agents.sort((a, b)=>a.name.localeCompare(b.name))
130
- }));
131
- this.lastLoadTime = Date.now();
132
- }
133
- // Rest of the methods remain similar to the original implementation
134
- needsRefresh() {
135
- return Date.now() - this.lastLoadTime > this.CACHE_EXPIRY;
136
- }
137
- async ensureLoaded() {
138
- if (this.agentCache.size === 0 || this.needsRefresh()) {
139
- await this.loadAgents();
140
- }
141
- }
142
- async getAvailableAgentTypes() {
143
- await this.ensureLoaded();
144
- const currentTypes = Array.from(this.agentCache.keys());
145
- const legacyTypes = Object.keys(LEGACY_AGENT_MAPPING);
146
- return Array.from(new Set([
147
- ...currentTypes,
148
- ...legacyTypes
149
- ])).sort();
150
- }
151
- async getAgent(name) {
152
- await this.ensureLoaded();
153
- return this.agentCache.get(name) || this.agentCache.get(resolveLegacyAgentType(name)) || null;
154
- }
155
- async getAllAgents() {
156
- await this.ensureLoaded();
157
- return Array.from(this.agentCache.values()).sort((a, b)=>a.name.localeCompare(b.name));
158
- }
159
- async getAgentCategories() {
160
- await this.ensureLoaded();
161
- return this.categoriesCache;
162
- }
163
- async searchAgents(query) {
164
- await this.ensureLoaded();
165
- const lowerQuery = query.toLowerCase();
166
- return Array.from(this.agentCache.values()).filter((agent)=>agent.name.toLowerCase().includes(lowerQuery) || agent.description.toLowerCase().includes(lowerQuery) || agent.capabilities?.some((cap)=>cap.toLowerCase().includes(lowerQuery)));
167
- }
168
- async isValidAgentType(name) {
169
- await this.ensureLoaded();
170
- return this.agentCache.has(name) || this.agentCache.has(resolveLegacyAgentType(name));
171
- }
172
- async getAgentsByCategory(category) {
173
- const categories = await this.getAgentCategories();
174
- const found = categories.find((cat)=>cat.name === category);
175
- return found?.agents || [];
176
- }
177
- async refresh() {
178
- this.lastLoadTime = 0;
179
- await this.loadAgents();
180
- }
181
- }
182
- // Singleton instance
183
- export const agentLoader = new AgentLoader();
184
- // Convenience exports for use in other modules
185
- export const getAvailableAgentTypes = ()=>agentLoader.getAvailableAgentTypes();
186
- export const getAgent = (name)=>agentLoader.getAgent(name);
187
- export const getAllAgents = ()=>agentLoader.getAllAgents();
188
- export const getAgentCategories = ()=>agentLoader.getAgentCategories();
189
- export const searchAgents = (query)=>agentLoader.searchAgents(query);
190
- export const isValidAgentType = (name)=>agentLoader.isValidAgentType(name);
191
- export const getAgentsByCategory = (category)=>agentLoader.getAgentsByCategory(category);
192
- export const refreshAgents = ()=>agentLoader.refresh();
193
-
194
- //# sourceMappingURL=agent-loader.js.map.isArray(input)) return input.map(String);
195
214
  if (typeof input === 'string') {
196
215
  return input.split(/[,\s]+/).map(function(t) {
197
216
  return t.trim();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-flow-novice",
3
- "version": "2.14.9",
3
+ "version": "2.14.10",
4
4
  "description": "AI agent orchestration framework with namespace-isolated skills, agents, and CFN Loop validation. Safe installation with ~0.01% collision risk.",
5
5
  "main": "dist/index.js",
6
6
  "type": "module",