claude-flow-novice 2.10.7 → 2.10.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/cfn/CFN_LOOP_TASK_MODE.md +94 -0
- package/.claude/commands/cfn/cfn-loop.md +4 -3
- package/.claude/hooks/cfn-invoke-pre-edit.sh +88 -0
- package/.claude/skills/cfn-agent-spawning/spawn-worker.sh +176 -0
- package/claude-assets/agents/csuite/cto-agent.md +371 -0
- package/claude-assets/agents/marketing_hybrid/cost_tracker.md +13 -0
- package/claude-assets/agents/marketing_hybrid/docker_deployer.md +13 -0
- package/claude-assets/agents/marketing_hybrid/zai_worker_spawner.md +13 -0
- package/claude-assets/commands/cfn/CFN_LOOP_TASK_MODE.md +94 -0
- package/claude-assets/commands/cfn/cfn-loop.md +4 -3
- package/claude-assets/hooks/cfn-invoke-pre-edit.sh +88 -0
- package/claude-assets/hooks/post-edit.config.json +19 -8
- package/claude-assets/skills/cfn-agent-spawning/spawn-worker.sh +176 -0
- package/claude-assets/skills/pre-edit-backup/backup.sh +130 -0
- package/claude-assets/skills/pre-edit-backup/cleanup.sh +155 -0
- package/claude-assets/skills/pre-edit-backup/restore.sh +128 -0
- package/claude-assets/skills/pre-edit-backup/revert-file.sh +168 -0
- package/dist/agents/agent-loader.js +146 -165
- package/dist/agents/agent-loader.js.map +1 -1
- package/dist/cli/config-manager.js +91 -109
- package/dist/cli/config-manager.js.map +1 -1
- package/package.json +1 -1
- package/scripts/marketing_hybrid_deployment.sh +45 -0
- package/scripts/redis-prometheus-exporter.sh +33 -0
- package/scripts/track-zai-costs.sh +19 -0
- package/claude-assets/skills/team-provider-routing/spawn-worker.sh +0 -91
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
set -eu
|
|
3
|
+
|
|
4
|
+
# Get the project root directory
|
|
5
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
6
|
+
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)"
|
|
7
|
+
|
|
8
|
+
# Load team providers configuration
|
|
9
|
+
PROVIDERS_CONFIG="${PROJECT_ROOT}/.claude/cfn-config/team-providers.json"
|
|
10
|
+
|
|
11
|
+
# Validate configuration file exists
|
|
12
|
+
if [[ ! -f "$PROVIDERS_CONFIG" ]]; then
|
|
13
|
+
echo "Error: Team providers configuration not found at $PROVIDERS_CONFIG"
|
|
14
|
+
exit 1
|
|
15
|
+
fi
|
|
16
|
+
|
|
17
|
+
# Function to validate provider configuration
|
|
18
|
+
validate_provider_config() {
|
|
19
|
+
local team="$1"
|
|
20
|
+
local role="$2" # coordinator or workers
|
|
21
|
+
|
|
22
|
+
# Use jq to validate JSON structure and extract provider details
|
|
23
|
+
if ! jq -e ".teams.${team}.${role}" "$PROVIDERS_CONFIG" &>/dev/null; then
|
|
24
|
+
echo "Error: Invalid or missing provider configuration for team=${team}, role=${role}"
|
|
25
|
+
exit 1
|
|
26
|
+
fi
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
# Function to select appropriate model based on complexity
|
|
30
|
+
select_model() {
|
|
31
|
+
local team="$1"
|
|
32
|
+
local complexity="$2" # simple or complex
|
|
33
|
+
|
|
34
|
+
# Retrieve model based on complexity and team configuration
|
|
35
|
+
local model=$(jq -r ".teams.${team}.workers.models.${complexity}" "$PROVIDERS_CONFIG")
|
|
36
|
+
|
|
37
|
+
if [[ "$model" == "null" ]]; then
|
|
38
|
+
# Fallback to default complexity from global config
|
|
39
|
+
local default_complexity=$(jq -r ".global_config.default_complexity // \"simple\"" "$PROVIDERS_CONFIG")
|
|
40
|
+
model=$(jq -r ".teams.${team}.workers.models.${default_complexity}" "$PROVIDERS_CONFIG")
|
|
41
|
+
fi
|
|
42
|
+
|
|
43
|
+
echo "$model"
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
# Function to get API key from environment
|
|
47
|
+
get_api_key() {
|
|
48
|
+
local team="$1"
|
|
49
|
+
local role="$2" # coordinator or workers
|
|
50
|
+
|
|
51
|
+
# Extract apiKeyEnvVar from config
|
|
52
|
+
local api_key_env_var=$(jq -r ".teams.${team}.${role}.apiKeyEnvVar" "$PROVIDERS_CONFIG")
|
|
53
|
+
|
|
54
|
+
if [[ "$api_key_env_var" == "null" ]]; then
|
|
55
|
+
echo "Error: apiKeyEnvVar not found for team=${team}, role=${role}"
|
|
56
|
+
exit 1
|
|
57
|
+
fi
|
|
58
|
+
|
|
59
|
+
# Get actual API key value from environment
|
|
60
|
+
local api_key_value="${!api_key_env_var:-}"
|
|
61
|
+
|
|
62
|
+
if [[ -z "$api_key_value" ]]; then
|
|
63
|
+
echo "Error: API key not found in environment variable: $api_key_env_var"
|
|
64
|
+
exit 1
|
|
65
|
+
fi
|
|
66
|
+
|
|
67
|
+
echo "$api_key_value"
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
# Main worker spawning logic
|
|
71
|
+
spawn_worker() {
|
|
72
|
+
local team="$1"
|
|
73
|
+
local complexity="${2:-simple}"
|
|
74
|
+
local provider_mode="${3:-auto}"
|
|
75
|
+
local agent_type="${4:-}"
|
|
76
|
+
local task_context="${5:-}"
|
|
77
|
+
|
|
78
|
+
# Validate input parameters
|
|
79
|
+
validate_provider_config "$team" "workers"
|
|
80
|
+
|
|
81
|
+
# Retrieve provider details from config
|
|
82
|
+
local provider=$(jq -r ".teams.${team}.workers.provider" "$PROVIDERS_CONFIG")
|
|
83
|
+
local api_key_env_var=$(jq -r ".teams.${team}.workers.apiKeyEnvVar" "$PROVIDERS_CONFIG")
|
|
84
|
+
local base_url=$(jq -r ".teams.${team}.workers.baseUrl" "$PROVIDERS_CONFIG")
|
|
85
|
+
|
|
86
|
+
# Select model dynamically based on complexity
|
|
87
|
+
local model=$(select_model "$team" "$complexity")
|
|
88
|
+
|
|
89
|
+
# Get API key from environment
|
|
90
|
+
local api_key=$(get_api_key "$team" "workers")
|
|
91
|
+
|
|
92
|
+
# Provider routing logic
|
|
93
|
+
case "$provider_mode" in
|
|
94
|
+
auto)
|
|
95
|
+
# Use provider routing rules from config
|
|
96
|
+
case "$provider" in
|
|
97
|
+
zai)
|
|
98
|
+
echo "Spawning Z.ai worker for team ${team} (Model: ${model}, Complexity: ${complexity})"
|
|
99
|
+
|
|
100
|
+
# Set environment variables for Z.ai spawning
|
|
101
|
+
export ZAI_API_KEY="$api_key"
|
|
102
|
+
export ZAI_BASE_URL="$base_url"
|
|
103
|
+
export ZAI_MODEL="$model"
|
|
104
|
+
|
|
105
|
+
# Call actual spawning logic (to be implemented)
|
|
106
|
+
# npx claude-flow-novice spawn "$agent_type" \
|
|
107
|
+
# --provider zai \
|
|
108
|
+
# --model "$model" \
|
|
109
|
+
# --context "$task_context"
|
|
110
|
+
;;
|
|
111
|
+
anthropic)
|
|
112
|
+
echo "Spawning Anthropic worker for team ${team} (Model: ${model}, Complexity: ${complexity})"
|
|
113
|
+
|
|
114
|
+
# Set environment variables for Anthropic spawning
|
|
115
|
+
export ANTHROPIC_API_KEY="$api_key"
|
|
116
|
+
export ANTHROPIC_BASE_URL="$base_url"
|
|
117
|
+
export ANTHROPIC_MODEL="$model"
|
|
118
|
+
|
|
119
|
+
# Call actual spawning logic (to be implemented)
|
|
120
|
+
# npx claude-flow-novice spawn "$agent_type" \
|
|
121
|
+
# --provider anthropic \
|
|
122
|
+
# --model "$model" \
|
|
123
|
+
# --context "$task_context"
|
|
124
|
+
;;
|
|
125
|
+
*)
|
|
126
|
+
echo "Error: Unsupported provider: ${provider}"
|
|
127
|
+
exit 1
|
|
128
|
+
;;
|
|
129
|
+
esac
|
|
130
|
+
;;
|
|
131
|
+
zai)
|
|
132
|
+
echo "Force spawning Z.ai worker for team ${team} (Model: ${model})"
|
|
133
|
+
local api_key=$(get_api_key "$team" "workers")
|
|
134
|
+
export ZAI_API_KEY="$api_key"
|
|
135
|
+
export ZAI_BASE_URL="$base_url"
|
|
136
|
+
export ZAI_MODEL="$model"
|
|
137
|
+
;;
|
|
138
|
+
anthropic)
|
|
139
|
+
echo "Force spawning Anthropic worker for team ${team} (Model: ${model})"
|
|
140
|
+
local api_key=$(get_api_key "$team" "workers")
|
|
141
|
+
export ANTHROPIC_API_KEY="$api_key"
|
|
142
|
+
export ANTHROPIC_BASE_URL="$base_url"
|
|
143
|
+
export ANTHROPIC_MODEL="$model"
|
|
144
|
+
;;
|
|
145
|
+
*)
|
|
146
|
+
echo "Error: Invalid provider mode: ${provider_mode}"
|
|
147
|
+
exit 1
|
|
148
|
+
;;
|
|
149
|
+
esac
|
|
150
|
+
|
|
151
|
+
# Log successful configuration
|
|
152
|
+
echo "Worker configuration complete:"
|
|
153
|
+
echo " Team: $team"
|
|
154
|
+
echo " Provider: $provider"
|
|
155
|
+
echo " Model: $model"
|
|
156
|
+
echo " Base URL: $base_url"
|
|
157
|
+
echo " Complexity: $complexity"
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
# Allow script to be used as a function or executed directly
|
|
161
|
+
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
|
162
|
+
# Script is being run directly
|
|
163
|
+
if [[ $# -lt 1 ]]; then
|
|
164
|
+
echo "Usage: $0 <team> [complexity] [provider_mode] [agent_type] [task_context]"
|
|
165
|
+
echo ""
|
|
166
|
+
echo "Arguments:"
|
|
167
|
+
echo " team - Team name (marketing, engineering, sales, support, finance)"
|
|
168
|
+
echo " complexity - simple|complex (default: simple)"
|
|
169
|
+
echo " provider_mode - auto|zai|anthropic (default: auto)"
|
|
170
|
+
echo " agent_type - Agent type to spawn (optional)"
|
|
171
|
+
echo " task_context - Task context for agent (optional)"
|
|
172
|
+
exit 1
|
|
173
|
+
fi
|
|
174
|
+
|
|
175
|
+
spawn_worker "$@"
|
|
176
|
+
fi
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# Pre-Edit Backup Script
|
|
4
|
+
# Creates timestamped backup with SHA-256 hash and JSON metadata
|
|
5
|
+
#
|
|
6
|
+
# Usage: backup.sh FILE_PATH AGENT_ID
|
|
7
|
+
#
|
|
8
|
+
# Arguments:
|
|
9
|
+
# FILE_PATH - Absolute path to file to backup
|
|
10
|
+
# AGENT_ID - Unique identifier for the agent creating the backup
|
|
11
|
+
#
|
|
12
|
+
# Returns:
|
|
13
|
+
# Backup directory path on success
|
|
14
|
+
# Exit code 1 on failure
|
|
15
|
+
#
|
|
16
|
+
# Example:
|
|
17
|
+
# ./.claude/skills/pre-edit-backup/backup.sh "/path/to/file.txt" "backend-dev-1"
|
|
18
|
+
|
|
19
|
+
set -euo pipefail
|
|
20
|
+
|
|
21
|
+
# === Input Validation ===
|
|
22
|
+
|
|
23
|
+
FILE_PATH="$1"
|
|
24
|
+
AGENT_ID="$2"
|
|
25
|
+
|
|
26
|
+
if [[ -z "$FILE_PATH" ]]; then
|
|
27
|
+
echo "Error: No file path provided" >&2
|
|
28
|
+
echo "Usage: backup.sh FILE_PATH AGENT_ID" >&2
|
|
29
|
+
exit 1
|
|
30
|
+
fi
|
|
31
|
+
|
|
32
|
+
if [[ -z "$AGENT_ID" ]]; then
|
|
33
|
+
echo "Error: No agent ID provided" >&2
|
|
34
|
+
echo "Usage: backup.sh FILE_PATH AGENT_ID" >&2
|
|
35
|
+
exit 1
|
|
36
|
+
fi
|
|
37
|
+
|
|
38
|
+
if [[ ! -f "$FILE_PATH" ]]; then
|
|
39
|
+
echo "Error: File does not exist: $FILE_PATH" >&2
|
|
40
|
+
exit 1
|
|
41
|
+
fi
|
|
42
|
+
|
|
43
|
+
# === Configuration ===
|
|
44
|
+
|
|
45
|
+
BACKUP_BASE_DIR=".backups"
|
|
46
|
+
DEFAULT_TTL=86400 # 24 hours in seconds
|
|
47
|
+
|
|
48
|
+
# === Tool Availability Checks ===
|
|
49
|
+
|
|
50
|
+
# Check for sha256sum (with fallback to shasum on macOS)
|
|
51
|
+
if command -v sha256sum &>/dev/null; then
|
|
52
|
+
HASH_TOOL="sha256sum"
|
|
53
|
+
elif command -v shasum &>/dev/null; then
|
|
54
|
+
HASH_TOOL="shasum -a 256"
|
|
55
|
+
else
|
|
56
|
+
echo "Error: Neither sha256sum nor shasum found. Cannot generate file hash." >&2
|
|
57
|
+
exit 1
|
|
58
|
+
fi
|
|
59
|
+
|
|
60
|
+
# Check for jq (graceful degradation)
|
|
61
|
+
if ! command -v jq &>/dev/null; then
|
|
62
|
+
echo "Warning: jq not found. Metadata will be created using basic shell." >&2
|
|
63
|
+
USE_JQ=false
|
|
64
|
+
else
|
|
65
|
+
USE_JQ=true
|
|
66
|
+
fi
|
|
67
|
+
|
|
68
|
+
# === Generate Backup Metadata ===
|
|
69
|
+
|
|
70
|
+
TIMESTAMP=$(date +%s%3N 2>/dev/null || date +%s) # Milliseconds if supported, else seconds
|
|
71
|
+
FILE_HASH=$($HASH_TOOL "$FILE_PATH" | cut -d' ' -f1)
|
|
72
|
+
|
|
73
|
+
# === Create Backup Directory ===
|
|
74
|
+
|
|
75
|
+
BACKUP_DIR="${BACKUP_BASE_DIR}/${AGENT_ID}/${TIMESTAMP}_${FILE_HASH}"
|
|
76
|
+
|
|
77
|
+
if ! mkdir -p "$BACKUP_DIR" 2>/dev/null; then
|
|
78
|
+
echo "Error: Failed to create backup directory: $BACKUP_DIR" >&2
|
|
79
|
+
exit 1
|
|
80
|
+
fi
|
|
81
|
+
|
|
82
|
+
# Set secure permissions (owner read/write/execute only)
|
|
83
|
+
chmod 700 "$BACKUP_DIR" 2>/dev/null || true
|
|
84
|
+
|
|
85
|
+
# === Copy Original File ===
|
|
86
|
+
|
|
87
|
+
if ! cp "$FILE_PATH" "${BACKUP_DIR}/original_file" 2>/dev/null; then
|
|
88
|
+
echo "Error: Failed to copy file to backup directory" >&2
|
|
89
|
+
rm -rf "$BACKUP_DIR"
|
|
90
|
+
exit 1
|
|
91
|
+
fi
|
|
92
|
+
|
|
93
|
+
# === Generate Metadata ===
|
|
94
|
+
|
|
95
|
+
METADATA_FILE="${BACKUP_DIR}/backup_metadata.json"
|
|
96
|
+
|
|
97
|
+
if [[ "$USE_JQ" == true ]]; then
|
|
98
|
+
# Use jq for structured JSON generation
|
|
99
|
+
jq -n \
|
|
100
|
+
--arg agent_id "$AGENT_ID" \
|
|
101
|
+
--arg original_path "$FILE_PATH" \
|
|
102
|
+
--arg timestamp "$TIMESTAMP" \
|
|
103
|
+
--arg file_hash "$FILE_HASH" \
|
|
104
|
+
--arg ttl "$DEFAULT_TTL" \
|
|
105
|
+
'{
|
|
106
|
+
agent_id: $agent_id,
|
|
107
|
+
original_path: $original_path,
|
|
108
|
+
backup_timestamp: ($timestamp | tonumber),
|
|
109
|
+
file_hash: $file_hash,
|
|
110
|
+
backup_ttl: ($ttl | tonumber),
|
|
111
|
+
backup_status: "active"
|
|
112
|
+
}' > "$METADATA_FILE"
|
|
113
|
+
else
|
|
114
|
+
# Fallback: Manual JSON generation
|
|
115
|
+
cat > "$METADATA_FILE" <<EOF
|
|
116
|
+
{
|
|
117
|
+
"agent_id": "$AGENT_ID",
|
|
118
|
+
"original_path": "$FILE_PATH",
|
|
119
|
+
"backup_timestamp": $TIMESTAMP,
|
|
120
|
+
"file_hash": "$FILE_HASH",
|
|
121
|
+
"backup_ttl": $DEFAULT_TTL,
|
|
122
|
+
"backup_status": "active"
|
|
123
|
+
}
|
|
124
|
+
EOF
|
|
125
|
+
fi
|
|
126
|
+
|
|
127
|
+
# === Return Backup Path ===
|
|
128
|
+
|
|
129
|
+
echo "$BACKUP_DIR"
|
|
130
|
+
exit 0
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# Pre-Edit Backup Cleanup Script
|
|
4
|
+
# Removes expired backups based on TTL configuration
|
|
5
|
+
# Runs as background process or cron-style with flock for concurrency control
|
|
6
|
+
#
|
|
7
|
+
# Usage: cleanup.sh [--dry-run] [--log-file FILE]
|
|
8
|
+
#
|
|
9
|
+
# Options:
|
|
10
|
+
# --dry-run - Show what would be deleted without actually deleting
|
|
11
|
+
# --log-file - Path to log file (default: none, outputs to stdout)
|
|
12
|
+
#
|
|
13
|
+
# Returns:
|
|
14
|
+
# Exit code 0 on success
|
|
15
|
+
# Exit code 1 if cleanup already in progress
|
|
16
|
+
#
|
|
17
|
+
# Example:
|
|
18
|
+
# ./.claude/skills/pre-edit-backup/cleanup.sh
|
|
19
|
+
# ./.claude/skills/pre-edit-backup/cleanup.sh --dry-run
|
|
20
|
+
# ./.claude/skills/pre-edit-backup/cleanup.sh --log-file /tmp/backup-cleanup.log
|
|
21
|
+
|
|
22
|
+
set -euo pipefail
|
|
23
|
+
|
|
24
|
+
# === Configuration ===
|
|
25
|
+
|
|
26
|
+
BACKUP_BASE_DIR=".backups"
|
|
27
|
+
CURRENT_TIME=$(date +%s)
|
|
28
|
+
DRY_RUN=false
|
|
29
|
+
LOG_FILE=""
|
|
30
|
+
|
|
31
|
+
# === Parse Options ===
|
|
32
|
+
|
|
33
|
+
while [[ "$#" -gt 0 ]]; do
|
|
34
|
+
case $1 in
|
|
35
|
+
--dry-run)
|
|
36
|
+
DRY_RUN=true
|
|
37
|
+
shift
|
|
38
|
+
;;
|
|
39
|
+
--log-file)
|
|
40
|
+
LOG_FILE="$2"
|
|
41
|
+
shift 2
|
|
42
|
+
;;
|
|
43
|
+
*)
|
|
44
|
+
echo "Error: Unknown option: $1" >&2
|
|
45
|
+
echo "Usage: cleanup.sh [--dry-run] [--log-file FILE]" >&2
|
|
46
|
+
exit 1
|
|
47
|
+
;;
|
|
48
|
+
esac
|
|
49
|
+
done
|
|
50
|
+
|
|
51
|
+
# === Logging Function ===
|
|
52
|
+
|
|
53
|
+
log() {
|
|
54
|
+
local message="$1"
|
|
55
|
+
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
56
|
+
local log_line="[$timestamp] $message"
|
|
57
|
+
|
|
58
|
+
if [[ -n "$LOG_FILE" ]]; then
|
|
59
|
+
echo "$log_line" >> "$LOG_FILE"
|
|
60
|
+
else
|
|
61
|
+
echo "$log_line"
|
|
62
|
+
fi
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
# === Prevent Concurrent Cleanup ===
|
|
66
|
+
|
|
67
|
+
if [[ ! -d "$BACKUP_BASE_DIR" ]]; then
|
|
68
|
+
log "Backup directory does not exist: $BACKUP_BASE_DIR"
|
|
69
|
+
exit 0
|
|
70
|
+
fi
|
|
71
|
+
|
|
72
|
+
LOCKFILE="${BACKUP_BASE_DIR}/cleanup.lock"
|
|
73
|
+
|
|
74
|
+
# Ensure lock file directory exists
|
|
75
|
+
mkdir -p "$(dirname "$LOCKFILE")" 2>/dev/null || true
|
|
76
|
+
|
|
77
|
+
# Acquire lock (non-blocking)
|
|
78
|
+
exec 9>"$LOCKFILE"
|
|
79
|
+
if ! flock -n 9; then
|
|
80
|
+
log "Cleanup already in progress (lock held)"
|
|
81
|
+
exit 1
|
|
82
|
+
fi
|
|
83
|
+
|
|
84
|
+
log "Cleanup started (dry-run: $DRY_RUN)"
|
|
85
|
+
|
|
86
|
+
# === Cleanup Logic ===
|
|
87
|
+
|
|
88
|
+
REMOVED_COUNT=0
|
|
89
|
+
SKIPPED_COUNT=0
|
|
90
|
+
ERROR_COUNT=0
|
|
91
|
+
|
|
92
|
+
# Check for jq availability
|
|
93
|
+
if ! command -v jq &>/dev/null; then
|
|
94
|
+
log "Error: jq is required for cleanup operations"
|
|
95
|
+
exit 1
|
|
96
|
+
fi
|
|
97
|
+
|
|
98
|
+
# Iterate through agent directories
|
|
99
|
+
for agent_dir in "$BACKUP_BASE_DIR"/*; do
|
|
100
|
+
# Skip if not a directory or if it's the lockfile
|
|
101
|
+
[[ -d "$agent_dir" ]] || continue
|
|
102
|
+
[[ "$(basename "$agent_dir")" == "cleanup.lock" ]] && continue
|
|
103
|
+
|
|
104
|
+
# Iterate through backup directories for this agent
|
|
105
|
+
for backup_dir in "$agent_dir"/*; do
|
|
106
|
+
[[ -d "$backup_dir" ]] || continue
|
|
107
|
+
|
|
108
|
+
metadata_file="${backup_dir}/backup_metadata.json"
|
|
109
|
+
|
|
110
|
+
if [[ ! -f "$metadata_file" ]]; then
|
|
111
|
+
log "Warning: Metadata missing for backup: ${backup_dir}"
|
|
112
|
+
SKIPPED_COUNT=$((SKIPPED_COUNT + 1))
|
|
113
|
+
continue
|
|
114
|
+
fi
|
|
115
|
+
|
|
116
|
+
# Extract backup timestamp and TTL
|
|
117
|
+
backup_timestamp=$(jq -r '.backup_timestamp' "$metadata_file" 2>/dev/null || echo "")
|
|
118
|
+
backup_ttl=$(jq -r '.backup_ttl' "$metadata_file" 2>/dev/null || echo "")
|
|
119
|
+
|
|
120
|
+
if [[ -z "$backup_timestamp" ]] || [[ "$backup_timestamp" == "null" ]] || \
|
|
121
|
+
[[ -z "$backup_ttl" ]] || [[ "$backup_ttl" == "null" ]]; then
|
|
122
|
+
log "Warning: Invalid metadata in: ${metadata_file}"
|
|
123
|
+
SKIPPED_COUNT=$((SKIPPED_COUNT + 1))
|
|
124
|
+
continue
|
|
125
|
+
fi
|
|
126
|
+
|
|
127
|
+
# Convert milliseconds to seconds if needed (timestamp > 10 digits = milliseconds)
|
|
128
|
+
if [[ ${#backup_timestamp} -gt 10 ]]; then
|
|
129
|
+
backup_timestamp=$((backup_timestamp / 1000))
|
|
130
|
+
fi
|
|
131
|
+
|
|
132
|
+
# Check if backup has expired
|
|
133
|
+
age=$((CURRENT_TIME - backup_timestamp))
|
|
134
|
+
if (( age > backup_ttl )); then
|
|
135
|
+
if [[ "$DRY_RUN" == true ]]; then
|
|
136
|
+
log "Would remove expired backup (age: ${age}s, ttl: ${backup_ttl}s): ${backup_dir}"
|
|
137
|
+
REMOVED_COUNT=$((REMOVED_COUNT + 1))
|
|
138
|
+
else
|
|
139
|
+
if rm -rf "$backup_dir" 2>/dev/null; then
|
|
140
|
+
log "Removed expired backup (age: ${age}s, ttl: ${backup_ttl}s): ${backup_dir}"
|
|
141
|
+
REMOVED_COUNT=$((REMOVED_COUNT + 1))
|
|
142
|
+
else
|
|
143
|
+
log "Error: Failed to remove backup: ${backup_dir}"
|
|
144
|
+
ERROR_COUNT=$((ERROR_COUNT + 1))
|
|
145
|
+
fi
|
|
146
|
+
fi
|
|
147
|
+
fi
|
|
148
|
+
done
|
|
149
|
+
done
|
|
150
|
+
|
|
151
|
+
# === Summary ===
|
|
152
|
+
|
|
153
|
+
log "Cleanup completed: removed=$REMOVED_COUNT, skipped=$SKIPPED_COUNT, errors=$ERROR_COUNT"
|
|
154
|
+
|
|
155
|
+
exit 0
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# Pre-Edit Restore Script
|
|
4
|
+
# Restores file from backup and updates metadata
|
|
5
|
+
#
|
|
6
|
+
# Usage: restore.sh BACKUP_DIR [--list FILE_PATH AGENT_ID]
|
|
7
|
+
#
|
|
8
|
+
# Arguments:
|
|
9
|
+
# BACKUP_DIR - Path to backup directory containing backup_metadata.json
|
|
10
|
+
# --list - List available backups for a file (requires FILE_PATH and AGENT_ID)
|
|
11
|
+
#
|
|
12
|
+
# Returns:
|
|
13
|
+
# Exit code 0 on success
|
|
14
|
+
# Exit code 1 on failure
|
|
15
|
+
#
|
|
16
|
+
# Example:
|
|
17
|
+
# ./.claude/skills/pre-edit-backup/restore.sh ".backups/backend-dev-1/1698764800000_abc123"
|
|
18
|
+
# ./.claude/skills/pre-edit-backup/restore.sh --list "/path/to/file.txt" "backend-dev-1"
|
|
19
|
+
|
|
20
|
+
set -euo pipefail
|
|
21
|
+
|
|
22
|
+
# === Handle --list Mode ===
|
|
23
|
+
|
|
24
|
+
if [[ "${1:-}" == "--list" ]]; then
|
|
25
|
+
FILE_PATH="$2"
|
|
26
|
+
AGENT_ID="$3"
|
|
27
|
+
|
|
28
|
+
if [[ -z "$FILE_PATH" ]] || [[ -z "$AGENT_ID" ]]; then
|
|
29
|
+
echo "Error: --list requires FILE_PATH and AGENT_ID" >&2
|
|
30
|
+
exit 1
|
|
31
|
+
fi
|
|
32
|
+
|
|
33
|
+
BACKUP_BASE_DIR=".backups"
|
|
34
|
+
AGENT_BACKUP_DIR="${BACKUP_BASE_DIR}/${AGENT_ID}"
|
|
35
|
+
|
|
36
|
+
if [[ ! -d "$AGENT_BACKUP_DIR" ]]; then
|
|
37
|
+
echo "No backups found for agent: $AGENT_ID" >&2
|
|
38
|
+
exit 1
|
|
39
|
+
fi
|
|
40
|
+
|
|
41
|
+
echo "Available backups for $FILE_PATH (agent: $AGENT_ID):"
|
|
42
|
+
echo "---"
|
|
43
|
+
|
|
44
|
+
FOUND_BACKUPS=0
|
|
45
|
+
for backup_dir in "$AGENT_BACKUP_DIR"/*; do
|
|
46
|
+
if [[ -f "${backup_dir}/backup_metadata.json" ]]; then
|
|
47
|
+
ORIGINAL=$(jq -r '.original_path' "${backup_dir}/backup_metadata.json" 2>/dev/null || echo "")
|
|
48
|
+
if [[ "$ORIGINAL" == "$FILE_PATH" ]]; then
|
|
49
|
+
TIMESTAMP=$(jq -r '.backup_timestamp' "${backup_dir}/backup_metadata.json" 2>/dev/null || echo "unknown")
|
|
50
|
+
STATUS=$(jq -r '.backup_status' "${backup_dir}/backup_metadata.json" 2>/dev/null || echo "unknown")
|
|
51
|
+
echo "Backup: $(basename "$backup_dir")"
|
|
52
|
+
echo " Timestamp: $TIMESTAMP"
|
|
53
|
+
echo " Status: $STATUS"
|
|
54
|
+
echo " Path: $backup_dir"
|
|
55
|
+
echo "---"
|
|
56
|
+
FOUND_BACKUPS=$((FOUND_BACKUPS + 1))
|
|
57
|
+
fi
|
|
58
|
+
fi
|
|
59
|
+
done
|
|
60
|
+
|
|
61
|
+
if [[ $FOUND_BACKUPS -eq 0 ]]; then
|
|
62
|
+
echo "No backups found for this file."
|
|
63
|
+
exit 1
|
|
64
|
+
fi
|
|
65
|
+
|
|
66
|
+
exit 0
|
|
67
|
+
fi
|
|
68
|
+
|
|
69
|
+
# === Restore Mode ===
|
|
70
|
+
|
|
71
|
+
BACKUP_DIR="$1"
|
|
72
|
+
|
|
73
|
+
if [[ -z "$BACKUP_DIR" ]]; then
|
|
74
|
+
echo "Error: No backup directory provided" >&2
|
|
75
|
+
echo "Usage: restore.sh BACKUP_DIR" >&2
|
|
76
|
+
exit 1
|
|
77
|
+
fi
|
|
78
|
+
|
|
79
|
+
if [[ ! -d "$BACKUP_DIR" ]]; then
|
|
80
|
+
echo "Error: Backup directory does not exist: $BACKUP_DIR" >&2
|
|
81
|
+
exit 1
|
|
82
|
+
fi
|
|
83
|
+
|
|
84
|
+
METADATA_FILE="${BACKUP_DIR}/backup_metadata.json"
|
|
85
|
+
BACKUP_FILE="${BACKUP_DIR}/original_file"
|
|
86
|
+
|
|
87
|
+
if [[ ! -f "$METADATA_FILE" ]]; then
|
|
88
|
+
echo "Error: Backup metadata not found: $METADATA_FILE" >&2
|
|
89
|
+
exit 1
|
|
90
|
+
fi
|
|
91
|
+
|
|
92
|
+
if [[ ! -f "$BACKUP_FILE" ]]; then
|
|
93
|
+
echo "Error: Backup file not found: $BACKUP_FILE" >&2
|
|
94
|
+
exit 1
|
|
95
|
+
fi
|
|
96
|
+
|
|
97
|
+
# Check for jq availability
|
|
98
|
+
if ! command -v jq &>/dev/null; then
|
|
99
|
+
echo "Error: jq is required for restore operations" >&2
|
|
100
|
+
exit 1
|
|
101
|
+
fi
|
|
102
|
+
|
|
103
|
+
# Extract original path
|
|
104
|
+
ORIGINAL_PATH=$(jq -r '.original_path' "$METADATA_FILE" 2>/dev/null)
|
|
105
|
+
|
|
106
|
+
if [[ -z "$ORIGINAL_PATH" ]] || [[ "$ORIGINAL_PATH" == "null" ]]; then
|
|
107
|
+
echo "Error: Failed to read original path from metadata" >&2
|
|
108
|
+
exit 1
|
|
109
|
+
fi
|
|
110
|
+
|
|
111
|
+
# Restore file
|
|
112
|
+
if ! cp "$BACKUP_FILE" "$ORIGINAL_PATH" 2>/dev/null; then
|
|
113
|
+
echo "Error: Failed to restore file to: $ORIGINAL_PATH" >&2
|
|
114
|
+
exit 1
|
|
115
|
+
fi
|
|
116
|
+
|
|
117
|
+
# Update backup status
|
|
118
|
+
TEMP_FILE=$(mktemp)
|
|
119
|
+
if jq '.backup_status = "restored"' "$METADATA_FILE" > "$TEMP_FILE" 2>/dev/null; then
|
|
120
|
+
mv "$TEMP_FILE" "$METADATA_FILE"
|
|
121
|
+
else
|
|
122
|
+
echo "Warning: Failed to update backup status in metadata" >&2
|
|
123
|
+
rm -f "$TEMP_FILE"
|
|
124
|
+
fi
|
|
125
|
+
|
|
126
|
+
echo "File restored from backup: ${BACKUP_DIR}"
|
|
127
|
+
echo "Restored to: ${ORIGINAL_PATH}"
|
|
128
|
+
exit 0
|