claude-flow-novice 2.14.32 → 2.14.33
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/skills/pre-edit-backup/backup.sh +107 -0
- package/claude-assets/agents/cfn-dev-team/coordinators/cfn-v3-coordinator.md +71 -9
- package/claude-assets/skills/cfn-redis-data-extraction/SKILL.md +442 -0
- package/claude-assets/skills/cfn-redis-data-extraction/extract.sh +306 -0
- package/claude-assets/skills/hook-pipeline/security-scanner.sh +102 -0
- package/claude-assets/skills/pre-edit-backup/backup.sh +107 -0
- package/package.json +13 -4
- package/scripts/deploy-production.sh +356 -0
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# CFN Redis Data Extraction Script
|
|
3
|
+
# Extracts complete Redis coordination data from completed CFN Loop tasks
|
|
4
|
+
|
|
5
|
+
set -euo pipefail
|
|
6
|
+
|
|
7
|
+
# Default values
|
|
8
|
+
OUTPUT_DIR="./analysis/cfn-loop-data"
|
|
9
|
+
INCLUDE_PERFORMANCE=false
|
|
10
|
+
TASK_IDS=()
|
|
11
|
+
VERBOSE=false
|
|
12
|
+
|
|
13
|
+
# Parse arguments
|
|
14
|
+
while [[ $# -gt 0 ]]; do
|
|
15
|
+
case $1 in
|
|
16
|
+
--task-id)
|
|
17
|
+
TASK_IDS+=("$2")
|
|
18
|
+
shift 2
|
|
19
|
+
;;
|
|
20
|
+
--task-ids)
|
|
21
|
+
IFS=',' read -ra TASK_IDS <<< "$2"
|
|
22
|
+
shift 2
|
|
23
|
+
;;
|
|
24
|
+
--output-dir)
|
|
25
|
+
OUTPUT_DIR="$2"
|
|
26
|
+
shift 2
|
|
27
|
+
;;
|
|
28
|
+
--include-performance)
|
|
29
|
+
INCLUDE_PERFORMANCE=true
|
|
30
|
+
shift
|
|
31
|
+
;;
|
|
32
|
+
--verbose)
|
|
33
|
+
VERBOSE=true
|
|
34
|
+
shift
|
|
35
|
+
;;
|
|
36
|
+
-h|--help)
|
|
37
|
+
echo "Usage: $0 --task-id <TASK_ID> [--output-dir <DIR>] [--include-performance] [--verbose]"
|
|
38
|
+
exit 0
|
|
39
|
+
;;
|
|
40
|
+
*)
|
|
41
|
+
echo "Unknown option: $1"
|
|
42
|
+
exit 1
|
|
43
|
+
;;
|
|
44
|
+
esac
|
|
45
|
+
done
|
|
46
|
+
|
|
47
|
+
# Validate required arguments
|
|
48
|
+
if [[ ${#TASK_IDS[@]} -eq 0 ]]; then
|
|
49
|
+
echo "Error: --task-id or --task-ids is required"
|
|
50
|
+
exit 1
|
|
51
|
+
fi
|
|
52
|
+
|
|
53
|
+
# Create output directory
|
|
54
|
+
mkdir -p "$OUTPUT_DIR"
|
|
55
|
+
|
|
56
|
+
# Redis connection check
|
|
57
|
+
if ! redis-cli ping > /dev/null 2>&1; then
|
|
58
|
+
echo "Error: Redis is not accessible"
|
|
59
|
+
exit 1
|
|
60
|
+
fi
|
|
61
|
+
|
|
62
|
+
# Function to extract task data
|
|
63
|
+
extract_task_data() {
|
|
64
|
+
local task_id="$1"
|
|
65
|
+
local output_file="$OUTPUT_DIR/cfn-loop-${task_id}-extracted.json"
|
|
66
|
+
|
|
67
|
+
[[ "$VERBOSE" == true ]] && echo "Extracting data for task: $task_id"
|
|
68
|
+
|
|
69
|
+
# Get all Redis keys for the task
|
|
70
|
+
local redis_keys
|
|
71
|
+
redis_keys=$(redis-cli keys "*${task_id}*" 2>/dev/null | sort)
|
|
72
|
+
|
|
73
|
+
if [[ -z "$redis_keys" ]]; then
|
|
74
|
+
echo "Warning: No Redis keys found for task: $task_id"
|
|
75
|
+
return 1
|
|
76
|
+
fi
|
|
77
|
+
|
|
78
|
+
# Initialize JSON structure
|
|
79
|
+
local json_data
|
|
80
|
+
json_data=$(cat << EOF
|
|
81
|
+
{
|
|
82
|
+
"task_id": "$task_id",
|
|
83
|
+
"extraction_timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
|
84
|
+
"extraction_version": "1.0.0",
|
|
85
|
+
"redis_keys_analyzed": $(echo "$redis_keys" | wc -l),
|
|
86
|
+
"agents": {},
|
|
87
|
+
"metadata": {},
|
|
88
|
+
"summary": {}
|
|
89
|
+
}
|
|
90
|
+
EOF
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Process each Redis key
|
|
94
|
+
local agent_count=0
|
|
95
|
+
local completion_signals=0
|
|
96
|
+
local total_confidence=0
|
|
97
|
+
local confidence_count=0
|
|
98
|
+
declare -A agent_loops
|
|
99
|
+
declare -A agent_types
|
|
100
|
+
|
|
101
|
+
while IFS= read -r key; do
|
|
102
|
+
[[ -z "$key" ]] && continue
|
|
103
|
+
|
|
104
|
+
# Skip if not a swarm key
|
|
105
|
+
[[ ! "$key" =~ ^swarm: ]] && continue
|
|
106
|
+
|
|
107
|
+
# Extract agent information from key
|
|
108
|
+
local agent_id=""
|
|
109
|
+
local data_type=""
|
|
110
|
+
|
|
111
|
+
if [[ "$key" =~ ^swarm:([^:]+):([^:]+):([^:]+)$ ]]; then
|
|
112
|
+
local task_key="${BASH_REMATCH[1]}"
|
|
113
|
+
agent_id="${BASH_REMATCH[2]}"
|
|
114
|
+
data_type="${BASH_REMATCH[3]}"
|
|
115
|
+
|
|
116
|
+
# Determine agent type and loop number
|
|
117
|
+
local agent_type=""
|
|
118
|
+
local loop_number=""
|
|
119
|
+
|
|
120
|
+
if [[ "$agent_id" =~ ^cfn-v3-coordinator ]]; then
|
|
121
|
+
agent_type="coordinator"
|
|
122
|
+
loop_number="coordination"
|
|
123
|
+
elif [[ "$agent_id" =~ -validation$ ]]; then
|
|
124
|
+
agent_type="${agent_id%-validation}"
|
|
125
|
+
loop_number="2"
|
|
126
|
+
elif [[ "$agent_id" =~ -[0-9]+$ ]]; then
|
|
127
|
+
agent_type="${agent_id%-*}"
|
|
128
|
+
loop_number="3"
|
|
129
|
+
elif [[ "$agent_id" =~ product-owner$ ]]; then
|
|
130
|
+
agent_type="product-owner"
|
|
131
|
+
loop_number="4"
|
|
132
|
+
else
|
|
133
|
+
agent_type="$agent_id"
|
|
134
|
+
loop_number="unknown"
|
|
135
|
+
fi
|
|
136
|
+
|
|
137
|
+
# Store agent classification
|
|
138
|
+
agent_types["$agent_id"]="$agent_type"
|
|
139
|
+
agent_loops["$agent_id"]="$loop_number"
|
|
140
|
+
|
|
141
|
+
# Initialize agent in JSON if not exists
|
|
142
|
+
if [[ ! "$json_data" =~ "\"$agent_id\":" ]]; then
|
|
143
|
+
((agent_count++))
|
|
144
|
+
json_data=$(echo "$json_data" | jq ".agents += {\"$agent_id\": {\"agent_type\": \"$agent_type\", \"loop\": \"$loop_number\", \"data\": {}}}")
|
|
145
|
+
fi
|
|
146
|
+
|
|
147
|
+
# Extract data based on type
|
|
148
|
+
case "$data_type" in
|
|
149
|
+
"confidence")
|
|
150
|
+
local confidence
|
|
151
|
+
confidence=$(redis-cli get "$key" 2>/dev/null || echo "null")
|
|
152
|
+
json_data=$(echo "$json_data" | jq ".agents[\"$agent_id\"].confidence = $confidence")
|
|
153
|
+
if [[ "$confidence" != "null" ]]; then
|
|
154
|
+
total_confidence=$(echo "$total_confidence + $confidence" | bc -l)
|
|
155
|
+
((confidence_count++))
|
|
156
|
+
fi
|
|
157
|
+
;;
|
|
158
|
+
"done")
|
|
159
|
+
local completion_signal
|
|
160
|
+
completion_signal=$(redis-cli type "$key" 2>/dev/null)
|
|
161
|
+
if [[ "$completion_signal" == "list" ]]; then
|
|
162
|
+
completion_signal=$(redis-cli lrange "$key" 0 -1 2>/dev/null | head -1 || echo "null")
|
|
163
|
+
else
|
|
164
|
+
completion_signal=$(redis-cli get "$key" 2>/dev/null || echo "null")
|
|
165
|
+
fi
|
|
166
|
+
json_data=$(echo "$json_data" | jq ".agents[\"$agent_id\"].completion_signal = \"$completion_signal\"")
|
|
167
|
+
((completion_signals++))
|
|
168
|
+
;;
|
|
169
|
+
"messages")
|
|
170
|
+
local messages_json="[]"
|
|
171
|
+
local message_count
|
|
172
|
+
message_count=$(redis-cli llen "$key" 2>/dev/null || echo "0")
|
|
173
|
+
|
|
174
|
+
if [[ "$message_count" -gt 0 ]]; then
|
|
175
|
+
messages_json=$(redis-cli lrange "$key" 0 -1 2>/dev/null | jq -R . | jq -s .)
|
|
176
|
+
fi
|
|
177
|
+
|
|
178
|
+
json_data=$(echo "$json_data" | jq ".agents[\"$agent_id\"].messages = $messages_json")
|
|
179
|
+
;;
|
|
180
|
+
"result")
|
|
181
|
+
local result_type
|
|
182
|
+
result_type=$(redis-cli type "$key" 2>/dev/null)
|
|
183
|
+
local result="null"
|
|
184
|
+
|
|
185
|
+
case "$result_type" in
|
|
186
|
+
"string")
|
|
187
|
+
result=$(redis-cli get "$key" 2>/dev/null || echo "null")
|
|
188
|
+
;;
|
|
189
|
+
"list")
|
|
190
|
+
result=$(redis-cli lrange "$key" 0 -1 2>/dev/null | jq -R . | jq -s .)
|
|
191
|
+
;;
|
|
192
|
+
esac
|
|
193
|
+
|
|
194
|
+
json_data=$(echo "$json_data" | jq ".agents[\"$agent_id\"].result = $result")
|
|
195
|
+
;;
|
|
196
|
+
esac
|
|
197
|
+
fi
|
|
198
|
+
done <<< "$redis_keys"
|
|
199
|
+
|
|
200
|
+
# Calculate averages and summary
|
|
201
|
+
local avg_confidence="0"
|
|
202
|
+
if [[ "$confidence_count" -gt 0 ]]; then
|
|
203
|
+
avg_confidence=$(echo "scale=3; $total_confidence / $confidence_count" | bc -l)
|
|
204
|
+
fi
|
|
205
|
+
|
|
206
|
+
# Extract task context if available
|
|
207
|
+
local task_context
|
|
208
|
+
task_context=$(redis-cli get "cfn_loop:task:$task_id:context" 2>/dev/null || echo "{}")
|
|
209
|
+
|
|
210
|
+
# Extract completed agents list
|
|
211
|
+
local completed_agents
|
|
212
|
+
completed_agents=$(redis-cli lrange "swarm:$task_id:completed_agents" 0 -1 2>/dev/null | jq -R . | jq -s . || echo "[]")
|
|
213
|
+
|
|
214
|
+
# Update summary
|
|
215
|
+
json_data=$(echo "$json_data" | jq "
|
|
216
|
+
.summary += {
|
|
217
|
+
\"total_agents\": $agent_count,
|
|
218
|
+
\"completion_signals\": $completion_signals,
|
|
219
|
+
\"average_confidence\": $avg_confidence,
|
|
220
|
+
\"confidence_scores_count\": $confidence_count,
|
|
221
|
+
\"completed_agents\": $completed_agents,
|
|
222
|
+
\"extraction_status\": \"success\"
|
|
223
|
+
}
|
|
224
|
+
")
|
|
225
|
+
|
|
226
|
+
# Add task context
|
|
227
|
+
json_data=$(echo "$json_data" | jq ".metadata.task_context = \"$task_context\"")
|
|
228
|
+
|
|
229
|
+
# Add performance metrics if requested
|
|
230
|
+
if [[ "$INCLUDE_PERFORMANCE" == true ]]; then
|
|
231
|
+
local redis_memory
|
|
232
|
+
redis_memory=$(redis-cli info memory 2>/dev/null | grep "used_memory_human" | cut -d: -f2 | tr -d '\r' || echo "\"unknown\"")
|
|
233
|
+
local redis_clients
|
|
234
|
+
redis_clients=$(redis-cli info clients 2>/dev/null | grep "connected_clients" | cut -d: -f2 | tr -d '\r' || echo "0")
|
|
235
|
+
|
|
236
|
+
json_data=$(echo "$json_data" | jq "
|
|
237
|
+
.metadata.performance = {
|
|
238
|
+
\"redis_memory_usage\": \"$redis_memory\",
|
|
239
|
+
\"redis_connected_clients\": $redis_clients,
|
|
240
|
+
\"extraction_duration_ms\": 0
|
|
241
|
+
}
|
|
242
|
+
")
|
|
243
|
+
fi
|
|
244
|
+
|
|
245
|
+
# Write to file
|
|
246
|
+
echo "$json_data" | jq '.' > "$output_file"
|
|
247
|
+
|
|
248
|
+
[[ "$VERBOSE" == true ]] && echo "Data extracted to: $output_file"
|
|
249
|
+
|
|
250
|
+
# Generate summary report
|
|
251
|
+
local summary_file="$OUTPUT_DIR/cfn-loop-${task_id}-summary.txt"
|
|
252
|
+
cat > "$summary_file" << EOF
|
|
253
|
+
CFN Loop Data Extraction Summary
|
|
254
|
+
================================
|
|
255
|
+
|
|
256
|
+
Task ID: $task_id
|
|
257
|
+
Extraction Time: $(date -u +%Y-%m-%dT%H:%M:%SZ)
|
|
258
|
+
Output File: $output_file
|
|
259
|
+
|
|
260
|
+
Statistics:
|
|
261
|
+
- Redis Keys Analyzed: $(echo "$redis_keys" | wc -l)
|
|
262
|
+
- Total Agents: $agent_count
|
|
263
|
+
- Completion Signals: $completion_signals
|
|
264
|
+
- Average Confidence: $avg_confidence
|
|
265
|
+
- Confidence Scores: $confidence_count
|
|
266
|
+
|
|
267
|
+
Agent Types:
|
|
268
|
+
$(for agent_id in "${!agent_types[@]}"; do
|
|
269
|
+
echo "- $agent_id: ${agent_types[$agent_id]} (Loop ${agent_loops[$agent_id]})"
|
|
270
|
+
done | sort)
|
|
271
|
+
|
|
272
|
+
Status: Successfully extracted
|
|
273
|
+
EOF
|
|
274
|
+
|
|
275
|
+
[[ "$VERBOSE" == true ]] && echo "Summary report generated: $summary_file"
|
|
276
|
+
|
|
277
|
+
return 0
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
# Main execution
|
|
281
|
+
echo "CFN Redis Data Extraction Started"
|
|
282
|
+
echo "================================="
|
|
283
|
+
echo "Output Directory: $OUTPUT_DIR"
|
|
284
|
+
echo "Include Performance: $INCLUDE_PERFORMANCE"
|
|
285
|
+
echo ""
|
|
286
|
+
|
|
287
|
+
# Process each task
|
|
288
|
+
for task_id in "${TASK_IDS[@]}"; do
|
|
289
|
+
echo "Processing task: $task_id"
|
|
290
|
+
if extract_task_data "$task_id"; then
|
|
291
|
+
echo "✅ Successfully extracted data for: $task_id"
|
|
292
|
+
else
|
|
293
|
+
echo "❌ Failed to extract data for: $task_id"
|
|
294
|
+
fi
|
|
295
|
+
echo ""
|
|
296
|
+
done
|
|
297
|
+
|
|
298
|
+
echo "CFN Redis Data Extraction Completed"
|
|
299
|
+
echo "==================================="
|
|
300
|
+
echo "Output Directory: $OUTPUT_DIR"
|
|
301
|
+
echo "Files Generated:"
|
|
302
|
+
find "$OUTPUT_DIR" -name "cfn-loop-*.json" -o -name "cfn-loop-*.txt" | while read -r file; do
|
|
303
|
+
echo " - $file"
|
|
304
|
+
done
|
|
305
|
+
|
|
306
|
+
exit 0
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
##############################################################################
|
|
4
|
+
# Security Scanner for Post-Edit Hooks
|
|
5
|
+
# Version: 1.0.0
|
|
6
|
+
##############################################################################
|
|
7
|
+
|
|
8
|
+
set -euo pipefail
|
|
9
|
+
|
|
10
|
+
# Function to perform basic security scanning on files
|
|
11
|
+
security_scan() {
|
|
12
|
+
local file_path="$1"
|
|
13
|
+
local agent_id="${2:-unknown}"
|
|
14
|
+
|
|
15
|
+
# Initialize security scan results
|
|
16
|
+
local scan_result='{"confidence":0.9,"issues":[],"details":"{\\\"scanner\\\":\\\"basic-security\\\",\\\"timestamp\\\":\\\"'$(date -u +%Y-%m-%dT%H:%M:%SZ)'\\\"}"}'
|
|
17
|
+
|
|
18
|
+
# Check if file exists
|
|
19
|
+
if [[ ! -f "$file_path" ]]; then
|
|
20
|
+
scan_result='{"confidence":0,"issues":[{"severity":"error","message":"File not found","line":0}],"details":"{\\\"error\\\":\\\"file_not_found\\\"}"}'
|
|
21
|
+
echo "$scan_result"
|
|
22
|
+
return 1
|
|
23
|
+
fi
|
|
24
|
+
|
|
25
|
+
# Basic security checks
|
|
26
|
+
local issues=()
|
|
27
|
+
local confidence=0.9
|
|
28
|
+
|
|
29
|
+
# Check for potential secrets/passwords
|
|
30
|
+
if grep -i -E "(password|secret|key|token)\s*[:=]\s*['\"]?[a-zA-Z0-9+/=]{20,}" "$file_path" >/dev/null 2>&1; then
|
|
31
|
+
issues+=("{\"severity\":\"critical\",\"message\":\"Potential hardcoded secret detected\",\"type\":\"secret_exposure\"}")
|
|
32
|
+
confidence=0.3
|
|
33
|
+
fi
|
|
34
|
+
|
|
35
|
+
# Check for API keys patterns
|
|
36
|
+
if grep -E "(AIza[A-Za-z0-9_-]{35}|[a-zA-Z0-9_-]{40,})" "$file_path" >/dev/null 2>&1; then
|
|
37
|
+
issues+=("{\"severity\":\"high\",\"message\":\"Potential API key pattern detected\",\"type\":\"api_key\"}")
|
|
38
|
+
confidence=0.5
|
|
39
|
+
fi
|
|
40
|
+
|
|
41
|
+
# Check for SQL injection patterns
|
|
42
|
+
if grep -E "(SELECT|INSERT|UPDATE|DELETE).*(\\$|\\$\\{|\\\$\)" "$file_path" >/dev/null 2>&1; then
|
|
43
|
+
issues+=("{\"severity\":\"medium\",\"message\":\"Unsanitized variable in SQL query\",\"type\":\"sql_injection\"}")
|
|
44
|
+
confidence=0.7
|
|
45
|
+
fi
|
|
46
|
+
|
|
47
|
+
# Check for eval() usage (security risk)
|
|
48
|
+
if grep -E "eval\\s*\\(" "$file_path" >/dev/null 2>&1; then
|
|
49
|
+
issues+=("{\"severity\":\"medium\",\"message\":\"Use of eval() function detected\",\"type\":\"eval_usage\"}")
|
|
50
|
+
confidence=0.8
|
|
51
|
+
fi
|
|
52
|
+
|
|
53
|
+
# Check for shell command injection patterns
|
|
54
|
+
if grep -E "(exec|system|shell_exec)\\s*\\(" "$file_path" >/dev/null 2>&1; then
|
|
55
|
+
issues+=("{\"severity\":\"medium\",\"message\":\"Use of system execution functions\",\"type\":\"command_injection\"}")
|
|
56
|
+
confidence=0.8
|
|
57
|
+
fi
|
|
58
|
+
|
|
59
|
+
# Build JSON result
|
|
60
|
+
local issues_json="[]"
|
|
61
|
+
if [[ ${#issues[@]} -gt 0 ]]; then
|
|
62
|
+
issues_json="[$(IFS=','; echo "${issues[*]}")]"
|
|
63
|
+
fi
|
|
64
|
+
|
|
65
|
+
scan_result="{\"confidence\":$confidence,\"issues\":$issues_json,\"details\":\"{\\\"scanner\\\":\\\"basic-security\\\",\\\"timestamp\\\":\\\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\\\",\\\"file\\\":\\\"$file_path\\\",\\\"agent_id\\\":\\\"$agent_id\\\"}\"}"
|
|
66
|
+
|
|
67
|
+
echo "$scan_result"
|
|
68
|
+
|
|
69
|
+
# Return appropriate exit code based on findings
|
|
70
|
+
if [[ ${#issues[@]} -gt 0 ]]; then
|
|
71
|
+
return 1
|
|
72
|
+
else
|
|
73
|
+
return 0
|
|
74
|
+
fi
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
# Main execution
|
|
78
|
+
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
|
79
|
+
# Script called directly
|
|
80
|
+
if [[ $# -lt 1 ]]; then
|
|
81
|
+
echo "Usage: $0 <file_path> [--agent-id <id>]" >&2
|
|
82
|
+
exit 1
|
|
83
|
+
fi
|
|
84
|
+
|
|
85
|
+
file_path="$1"
|
|
86
|
+
agent_id="unknown"
|
|
87
|
+
|
|
88
|
+
# Parse optional arguments
|
|
89
|
+
while [[ $# -gt 0 ]]; do
|
|
90
|
+
case $1 in
|
|
91
|
+
--agent-id)
|
|
92
|
+
agent_id="$2"
|
|
93
|
+
shift 2
|
|
94
|
+
;;
|
|
95
|
+
*)
|
|
96
|
+
shift
|
|
97
|
+
;;
|
|
98
|
+
esac
|
|
99
|
+
done
|
|
100
|
+
|
|
101
|
+
security_scan "$file_path" "$agent_id"
|
|
102
|
+
fi
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
##############################################################################
|
|
4
|
+
# Pre-Edit Backup Script - Creates safe file backups before modifications
|
|
5
|
+
# Version: 1.0.0
|
|
6
|
+
##############################################################################
|
|
7
|
+
|
|
8
|
+
set -euo pipefail
|
|
9
|
+
|
|
10
|
+
# Function to create backup of a file before editing
|
|
11
|
+
create_backup() {
|
|
12
|
+
local file_path="$1"
|
|
13
|
+
local agent_id="${2:-unknown}"
|
|
14
|
+
local project_root="${3:-$(pwd)}"
|
|
15
|
+
|
|
16
|
+
# Validate inputs
|
|
17
|
+
if [[ -z "$file_path" ]]; then
|
|
18
|
+
echo "Error: File path is required" >&2
|
|
19
|
+
exit 1
|
|
20
|
+
fi
|
|
21
|
+
|
|
22
|
+
# Check if file exists
|
|
23
|
+
if [[ ! -f "$file_path" ]]; then
|
|
24
|
+
echo "Warning: File does not exist: $file_path" >&2
|
|
25
|
+
# Create empty backup path for new files
|
|
26
|
+
echo "$project_root/.backups/$agent_id/new-file-$(date +%s)-$(echo "$file_path" | tr '/' '_' | tr ' ' '_')"
|
|
27
|
+
return 0
|
|
28
|
+
fi
|
|
29
|
+
|
|
30
|
+
# Create backup directory structure
|
|
31
|
+
local backup_dir="$project_root/.backups/$agent_id"
|
|
32
|
+
local timestamp=$(date +%s)
|
|
33
|
+
local file_hash=$(md5sum "$file_path" | cut -d' ' -f1)
|
|
34
|
+
local backup_name="${timestamp}_${file_hash}"
|
|
35
|
+
|
|
36
|
+
# Create full backup path
|
|
37
|
+
local full_backup_path="$backup_dir/$backup_name"
|
|
38
|
+
|
|
39
|
+
# Create backup directory
|
|
40
|
+
mkdir -p "$full_backup_path"
|
|
41
|
+
|
|
42
|
+
# Copy original file to backup location
|
|
43
|
+
cp "$file_path" "$full_backup_path/original"
|
|
44
|
+
|
|
45
|
+
# Store backup metadata
|
|
46
|
+
cat > "$full_backup_path/metadata.json" << EOF
|
|
47
|
+
{
|
|
48
|
+
"timestamp": "$timestamp",
|
|
49
|
+
"agent_id": "$agent_id",
|
|
50
|
+
"original_file": "$file_path",
|
|
51
|
+
"file_hash": "$file_hash",
|
|
52
|
+
"backup_path": "$full_backup_path",
|
|
53
|
+
"created_at": "$(date -u +%Y-%m-%dT%H:%M:%SZ)"
|
|
54
|
+
}
|
|
55
|
+
EOF
|
|
56
|
+
|
|
57
|
+
# Create revert script
|
|
58
|
+
cat > "$full_backup_path/revert.sh" << EOF
|
|
59
|
+
#!/bin/bash
|
|
60
|
+
# Revert script for $file_path
|
|
61
|
+
set -euo pipefail
|
|
62
|
+
|
|
63
|
+
echo "Reverting file: $file_path"
|
|
64
|
+
cp "$full_backup_path/original" "$file_path"
|
|
65
|
+
echo "✅ File reverted successfully"
|
|
66
|
+
EOF
|
|
67
|
+
|
|
68
|
+
chmod +x "$full_backup_path/revert.sh"
|
|
69
|
+
|
|
70
|
+
# Output backup path for caller
|
|
71
|
+
echo "$full_backup_path"
|
|
72
|
+
|
|
73
|
+
echo "✅ Backup created: $full_backup_path" >&2
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
# Main execution
|
|
77
|
+
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
|
78
|
+
# Script called directly
|
|
79
|
+
if [[ $# -lt 1 ]]; then
|
|
80
|
+
echo "Usage: $0 <file_path> [--agent-id <id>] [--project-root <path>]" >&2
|
|
81
|
+
exit 1
|
|
82
|
+
fi
|
|
83
|
+
|
|
84
|
+
file_path="$1"
|
|
85
|
+
agent_id="unknown"
|
|
86
|
+
project_root="$(pwd)"
|
|
87
|
+
|
|
88
|
+
# Parse optional arguments
|
|
89
|
+
while [[ $# -gt 0 ]]; do
|
|
90
|
+
case $1 in
|
|
91
|
+
--agent-id)
|
|
92
|
+
agent_id="$2"
|
|
93
|
+
shift 2
|
|
94
|
+
;;
|
|
95
|
+
--project-root)
|
|
96
|
+
project_root="$2"
|
|
97
|
+
shift 2
|
|
98
|
+
;;
|
|
99
|
+
*)
|
|
100
|
+
# Skip unknown arguments
|
|
101
|
+
shift
|
|
102
|
+
;;
|
|
103
|
+
esac
|
|
104
|
+
done
|
|
105
|
+
|
|
106
|
+
create_backup "$file_path" "$agent_id" "$project_root"
|
|
107
|
+
fi
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "claude-flow-novice",
|
|
3
|
-
"version": "2.14.
|
|
3
|
+
"version": "2.14.33",
|
|
4
4
|
"description": "AI agent orchestration framework with namespace-isolated skills, agents, and CFN Loop validation. Safe installation with ~0.01% collision risk.",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"type": "module",
|
|
@@ -113,7 +113,17 @@
|
|
|
113
113
|
"portal:restart": "npm run portal:stop && npm run portal:start",
|
|
114
114
|
"portal:status": "./scripts/portal-status.sh",
|
|
115
115
|
"test:watch": "jest --watch",
|
|
116
|
-
"test:coverage": "jest --coverage"
|
|
116
|
+
"test:coverage": "jest --coverage",
|
|
117
|
+
"docker:build": "docker build -f Dockerfile.production -t claude-flow-novice:production .",
|
|
118
|
+
"docker:deploy": "./scripts/deploy-production.sh",
|
|
119
|
+
"docker:stop": "docker-compose -f docker-compose.production.yml down",
|
|
120
|
+
"docker:logs": "docker-compose -f docker-compose.production.yml logs -f",
|
|
121
|
+
"docker:status": "docker-compose -f docker-compose.production.yml ps",
|
|
122
|
+
"docker:test": "node tests/docker/production-deployment-test.js",
|
|
123
|
+
"docker:orchestrator": "node tests/docker/production-agent-orchestrator.js",
|
|
124
|
+
"test:docker:redis-coordination": "node tests/docker/cfn-coordination-test.js",
|
|
125
|
+
"test:docker:concurrent": "node tests/hello-world-docker/concurrent-agent-test.js",
|
|
126
|
+
"test:docker:container": "bash tests/docker/container-test-runner.sh"
|
|
117
127
|
},
|
|
118
128
|
"dependencies": {
|
|
119
129
|
"@anthropic-ai/sdk": "^0.67.0",
|
|
@@ -123,14 +133,13 @@
|
|
|
123
133
|
"commander": "^11.1.0",
|
|
124
134
|
"dotenv": "^17.2.3",
|
|
125
135
|
"glob": "^11.0.3",
|
|
136
|
+
"ioredis": "^5.8.2",
|
|
126
137
|
"lodash": "^4.17.21",
|
|
127
138
|
"redis": "^5.8.3",
|
|
128
139
|
"socket.io": "^4.8.1",
|
|
129
140
|
"sqlite": "^5.1.1",
|
|
130
141
|
"sqlite3": "^5.1.7"
|
|
131
142
|
},
|
|
132
|
-
"optionalDependencies": {},
|
|
133
|
-
"peerDependencies": {},
|
|
134
143
|
"engines": {
|
|
135
144
|
"node": ">=18.0.0"
|
|
136
145
|
},
|