@claude-flow/cli 3.0.0-alpha.37 → 3.0.0-alpha.38
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/helpers/README.md +97 -0
- package/.claude/helpers/adr-compliance.sh +186 -0
- package/.claude/helpers/auto-commit.sh +178 -0
- package/.claude/helpers/checkpoint-manager.sh +251 -0
- package/.claude/helpers/daemon-manager.sh +252 -0
- package/.claude/helpers/ddd-tracker.sh +144 -0
- package/.claude/helpers/github-safe.js +106 -0
- package/.claude/helpers/github-setup.sh +28 -0
- package/.claude/helpers/guidance-hook.sh +13 -0
- package/.claude/helpers/guidance-hooks.sh +102 -0
- package/.claude/helpers/health-monitor.sh +108 -0
- package/.claude/helpers/learning-hooks.sh +329 -0
- package/.claude/helpers/learning-optimizer.sh +127 -0
- package/.claude/helpers/learning-service.mjs +1144 -0
- package/.claude/helpers/metrics-db.mjs +488 -0
- package/.claude/helpers/pattern-consolidator.sh +86 -0
- package/.claude/helpers/perf-worker.sh +160 -0
- package/.claude/helpers/quick-start.sh +19 -0
- package/.claude/helpers/security-scanner.sh +127 -0
- package/.claude/helpers/setup-mcp.sh +18 -0
- package/.claude/helpers/standard-checkpoint-hooks.sh +189 -0
- package/.claude/helpers/swarm-comms.sh +353 -0
- package/.claude/helpers/swarm-hooks.sh +761 -0
- package/.claude/helpers/swarm-monitor.sh +211 -0
- package/.claude/helpers/sync-v3-metrics.sh +245 -0
- package/.claude/helpers/update-v3-progress.sh +166 -0
- package/.claude/helpers/v3-quick-status.sh +58 -0
- package/.claude/helpers/v3.sh +111 -0
- package/.claude/helpers/validate-v3-config.sh +216 -0
- package/.claude/helpers/worker-manager.sh +170 -0
- package/dist/src/init/mcp-generator.js +2 -2
- package/dist/src/init/mcp-generator.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +1 -1
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Safe GitHub CLI Helper
|
|
5
|
+
* Prevents timeout issues when using gh commands with special characters
|
|
6
|
+
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* ./github-safe.js issue comment 123 "Message with `backticks`"
|
|
9
|
+
* ./github-safe.js pr create --title "Title" --body "Complex body"
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { execSync } from 'child_process';
|
|
13
|
+
import { writeFileSync, unlinkSync } from 'fs';
|
|
14
|
+
import { tmpdir } from 'os';
|
|
15
|
+
import { join } from 'path';
|
|
16
|
+
import { randomBytes } from 'crypto';
|
|
17
|
+
|
|
18
|
+
const args = process.argv.slice(2);
|
|
19
|
+
|
|
20
|
+
if (args.length < 2) {
|
|
21
|
+
console.log(`
|
|
22
|
+
Safe GitHub CLI Helper
|
|
23
|
+
|
|
24
|
+
Usage:
|
|
25
|
+
./github-safe.js issue comment <number> <body>
|
|
26
|
+
./github-safe.js pr comment <number> <body>
|
|
27
|
+
./github-safe.js issue create --title <title> --body <body>
|
|
28
|
+
./github-safe.js pr create --title <title> --body <body>
|
|
29
|
+
|
|
30
|
+
This helper prevents timeout issues with special characters like:
|
|
31
|
+
- Backticks in code examples
|
|
32
|
+
- Command substitution \$(...)
|
|
33
|
+
- Directory paths
|
|
34
|
+
- Special shell characters
|
|
35
|
+
`);
|
|
36
|
+
process.exit(1);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const [command, subcommand, ...restArgs] = args;
|
|
40
|
+
|
|
41
|
+
// Handle commands that need body content
|
|
42
|
+
if ((command === 'issue' || command === 'pr') &&
|
|
43
|
+
(subcommand === 'comment' || subcommand === 'create')) {
|
|
44
|
+
|
|
45
|
+
let bodyIndex = -1;
|
|
46
|
+
let body = '';
|
|
47
|
+
|
|
48
|
+
if (subcommand === 'comment' && restArgs.length >= 2) {
|
|
49
|
+
// Simple format: github-safe.js issue comment 123 "body"
|
|
50
|
+
body = restArgs[1];
|
|
51
|
+
bodyIndex = 1;
|
|
52
|
+
} else {
|
|
53
|
+
// Flag format: --body "content"
|
|
54
|
+
bodyIndex = restArgs.indexOf('--body');
|
|
55
|
+
if (bodyIndex !== -1 && bodyIndex < restArgs.length - 1) {
|
|
56
|
+
body = restArgs[bodyIndex + 1];
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
if (body) {
|
|
61
|
+
// Use temporary file for body content
|
|
62
|
+
const tmpFile = join(tmpdir(), `gh-body-${randomBytes(8).toString('hex')}.tmp`);
|
|
63
|
+
|
|
64
|
+
try {
|
|
65
|
+
writeFileSync(tmpFile, body, 'utf8');
|
|
66
|
+
|
|
67
|
+
// Build new command with --body-file
|
|
68
|
+
const newArgs = [...restArgs];
|
|
69
|
+
if (subcommand === 'comment' && bodyIndex === 1) {
|
|
70
|
+
// Replace body with --body-file
|
|
71
|
+
newArgs[1] = '--body-file';
|
|
72
|
+
newArgs.push(tmpFile);
|
|
73
|
+
} else if (bodyIndex !== -1) {
|
|
74
|
+
// Replace --body with --body-file
|
|
75
|
+
newArgs[bodyIndex] = '--body-file';
|
|
76
|
+
newArgs[bodyIndex + 1] = tmpFile;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Execute safely
|
|
80
|
+
const ghCommand = `gh ${command} ${subcommand} ${newArgs.join(' ')}`;
|
|
81
|
+
console.log(`Executing: ${ghCommand}`);
|
|
82
|
+
|
|
83
|
+
const result = execSync(ghCommand, {
|
|
84
|
+
stdio: 'inherit',
|
|
85
|
+
timeout: 30000 // 30 second timeout
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
} catch (error) {
|
|
89
|
+
console.error('Error:', error.message);
|
|
90
|
+
process.exit(1);
|
|
91
|
+
} finally {
|
|
92
|
+
// Clean up
|
|
93
|
+
try {
|
|
94
|
+
unlinkSync(tmpFile);
|
|
95
|
+
} catch (e) {
|
|
96
|
+
// Ignore cleanup errors
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
} else {
|
|
100
|
+
// No body content, execute normally
|
|
101
|
+
execSync(`gh ${args.join(' ')}`, { stdio: 'inherit' });
|
|
102
|
+
}
|
|
103
|
+
} else {
|
|
104
|
+
// Other commands, execute normally
|
|
105
|
+
execSync(`gh ${args.join(' ')}`, { stdio: 'inherit' });
|
|
106
|
+
}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Setup GitHub integration for Claude Flow
|
|
3
|
+
|
|
4
|
+
echo "🔗 Setting up GitHub integration..."
|
|
5
|
+
|
|
6
|
+
# Check for gh CLI
|
|
7
|
+
if ! command -v gh &> /dev/null; then
|
|
8
|
+
echo "⚠️ GitHub CLI (gh) not found"
|
|
9
|
+
echo "Install from: https://cli.github.com/"
|
|
10
|
+
echo "Continuing without GitHub features..."
|
|
11
|
+
else
|
|
12
|
+
echo "✅ GitHub CLI found"
|
|
13
|
+
|
|
14
|
+
# Check auth status
|
|
15
|
+
if gh auth status &> /dev/null; then
|
|
16
|
+
echo "✅ GitHub authentication active"
|
|
17
|
+
else
|
|
18
|
+
echo "⚠️ Not authenticated with GitHub"
|
|
19
|
+
echo "Run: gh auth login"
|
|
20
|
+
fi
|
|
21
|
+
fi
|
|
22
|
+
|
|
23
|
+
echo ""
|
|
24
|
+
echo "📦 GitHub swarm commands available:"
|
|
25
|
+
echo " - npx claude-flow github swarm"
|
|
26
|
+
echo " - npx claude-flow repo analyze"
|
|
27
|
+
echo " - npx claude-flow pr enhance"
|
|
28
|
+
echo " - npx claude-flow issue triage"
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Capture hook guidance for Claude visibility
|
|
3
|
+
GUIDANCE_FILE=".claude-flow/last-guidance.txt"
|
|
4
|
+
mkdir -p .claude-flow
|
|
5
|
+
|
|
6
|
+
case "$1" in
|
|
7
|
+
"route")
|
|
8
|
+
npx agentic-flow@alpha hooks route "$2" 2>&1 | tee "$GUIDANCE_FILE"
|
|
9
|
+
;;
|
|
10
|
+
"pre-edit")
|
|
11
|
+
npx agentic-flow@alpha hooks pre-edit "$2" 2>&1 | tee "$GUIDANCE_FILE"
|
|
12
|
+
;;
|
|
13
|
+
esac
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Guidance Hooks for Claude Flow V3
|
|
3
|
+
# Provides context and routing for Claude Code operations
|
|
4
|
+
|
|
5
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
6
|
+
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
|
7
|
+
CACHE_DIR="$PROJECT_ROOT/.claude-flow"
|
|
8
|
+
|
|
9
|
+
# Ensure cache directory exists
|
|
10
|
+
mkdir -p "$CACHE_DIR" 2>/dev/null || true
|
|
11
|
+
|
|
12
|
+
# Color codes
|
|
13
|
+
CYAN='\033[0;36m'
|
|
14
|
+
GREEN='\033[0;32m'
|
|
15
|
+
YELLOW='\033[1;33m'
|
|
16
|
+
RED='\033[0;31m'
|
|
17
|
+
RESET='\033[0m'
|
|
18
|
+
DIM='\033[2m'
|
|
19
|
+
|
|
20
|
+
# Get command
|
|
21
|
+
COMMAND="${1:-help}"
|
|
22
|
+
shift || true
|
|
23
|
+
|
|
24
|
+
case "$COMMAND" in
|
|
25
|
+
pre-edit)
|
|
26
|
+
FILE_PATH="$1"
|
|
27
|
+
if [[ -n "$FILE_PATH" ]]; then
|
|
28
|
+
if [[ "$FILE_PATH" =~ (config|secret|credential|password|key|auth) ]]; then
|
|
29
|
+
echo -e "${YELLOW}[Guidance] Security-sensitive file${RESET}"
|
|
30
|
+
fi
|
|
31
|
+
if [[ "$FILE_PATH" =~ ^v3/ ]]; then
|
|
32
|
+
echo -e "${CYAN}[Guidance] V3 module - follow ADR guidelines${RESET}"
|
|
33
|
+
fi
|
|
34
|
+
fi
|
|
35
|
+
exit 0
|
|
36
|
+
;;
|
|
37
|
+
|
|
38
|
+
post-edit)
|
|
39
|
+
FILE_PATH="$1"
|
|
40
|
+
echo "$(date -Iseconds) edit $FILE_PATH" >> "$CACHE_DIR/edit-history.log" 2>/dev/null || true
|
|
41
|
+
exit 0
|
|
42
|
+
;;
|
|
43
|
+
|
|
44
|
+
pre-command)
|
|
45
|
+
COMMAND_STR="$1"
|
|
46
|
+
if [[ "$COMMAND_STR" =~ (rm -rf|sudo|chmod 777) ]]; then
|
|
47
|
+
echo -e "${RED}[Guidance] High-risk command${RESET}"
|
|
48
|
+
fi
|
|
49
|
+
exit 0
|
|
50
|
+
;;
|
|
51
|
+
|
|
52
|
+
route)
|
|
53
|
+
TASK="$1"
|
|
54
|
+
[[ -z "$TASK" ]] && exit 0
|
|
55
|
+
if [[ "$TASK" =~ (security|CVE|vulnerability) ]]; then
|
|
56
|
+
echo -e "${DIM}[Route] security-architect${RESET}"
|
|
57
|
+
elif [[ "$TASK" =~ (memory|AgentDB|HNSW|vector) ]]; then
|
|
58
|
+
echo -e "${DIM}[Route] memory-specialist${RESET}"
|
|
59
|
+
elif [[ "$TASK" =~ (performance|optimize|benchmark) ]]; then
|
|
60
|
+
echo -e "${DIM}[Route] performance-engineer${RESET}"
|
|
61
|
+
elif [[ "$TASK" =~ (test|TDD|spec) ]]; then
|
|
62
|
+
echo -e "${DIM}[Route] test-architect${RESET}"
|
|
63
|
+
fi
|
|
64
|
+
exit 0
|
|
65
|
+
;;
|
|
66
|
+
|
|
67
|
+
session-context)
|
|
68
|
+
cat << 'EOF'
|
|
69
|
+
## V3 Development Context
|
|
70
|
+
|
|
71
|
+
**Architecture**: Domain-Driven Design with 15 @claude-flow modules
|
|
72
|
+
**Priority**: Security-first (CVE-1, CVE-2, CVE-3 remediation)
|
|
73
|
+
**Performance Targets**:
|
|
74
|
+
- HNSW search: 150x-12,500x faster
|
|
75
|
+
- Flash Attention: 2.49x-7.47x speedup
|
|
76
|
+
- Memory: 50-75% reduction
|
|
77
|
+
|
|
78
|
+
**Active Patterns**:
|
|
79
|
+
- Use TDD London School (mock-first)
|
|
80
|
+
- Event sourcing for state changes
|
|
81
|
+
- agentic-flow@alpha as core foundation
|
|
82
|
+
- Bounded contexts with clear interfaces
|
|
83
|
+
|
|
84
|
+
**Code Quality Rules**:
|
|
85
|
+
- Files under 500 lines
|
|
86
|
+
- No hardcoded secrets
|
|
87
|
+
- Input validation at boundaries
|
|
88
|
+
- Typed interfaces for all public APIs
|
|
89
|
+
|
|
90
|
+
**Learned Patterns**: 17 available for reference
|
|
91
|
+
EOF
|
|
92
|
+
exit 0
|
|
93
|
+
;;
|
|
94
|
+
|
|
95
|
+
user-prompt)
|
|
96
|
+
exit 0
|
|
97
|
+
;;
|
|
98
|
+
|
|
99
|
+
*)
|
|
100
|
+
exit 0
|
|
101
|
+
;;
|
|
102
|
+
esac
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Claude Flow V3 - Health Monitor Worker
|
|
3
|
+
# Checks disk space, memory pressure, process health
|
|
4
|
+
|
|
5
|
+
set -euo pipefail
|
|
6
|
+
|
|
7
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
8
|
+
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
|
9
|
+
METRICS_DIR="$PROJECT_ROOT/.claude-flow/metrics"
|
|
10
|
+
HEALTH_FILE="$METRICS_DIR/health.json"
|
|
11
|
+
LAST_RUN_FILE="$METRICS_DIR/.health-last-run"
|
|
12
|
+
|
|
13
|
+
mkdir -p "$METRICS_DIR"
|
|
14
|
+
|
|
15
|
+
should_run() {
|
|
16
|
+
if [ ! -f "$LAST_RUN_FILE" ]; then return 0; fi
|
|
17
|
+
local last_run=$(cat "$LAST_RUN_FILE" 2>/dev/null || echo "0")
|
|
18
|
+
local now=$(date +%s)
|
|
19
|
+
[ $((now - last_run)) -ge 300 ] # 5 minutes
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
check_health() {
|
|
23
|
+
echo "[$(date +%H:%M:%S)] Running health check..."
|
|
24
|
+
|
|
25
|
+
# Disk usage
|
|
26
|
+
local disk_usage=$(df -h "$PROJECT_ROOT" 2>/dev/null | awk 'NR==2 {print $5}' | tr -d '%')
|
|
27
|
+
local disk_free=$(df -h "$PROJECT_ROOT" 2>/dev/null | awk 'NR==2 {print $4}')
|
|
28
|
+
|
|
29
|
+
# Memory usage
|
|
30
|
+
local mem_total=$(free -m 2>/dev/null | awk '/Mem:/ {print $2}' || echo "0")
|
|
31
|
+
local mem_used=$(free -m 2>/dev/null | awk '/Mem:/ {print $3}' || echo "0")
|
|
32
|
+
local mem_pct=$((mem_used * 100 / (mem_total + 1)))
|
|
33
|
+
|
|
34
|
+
# Process counts
|
|
35
|
+
local node_procs=$(pgrep -c node 2>/dev/null || echo "0")
|
|
36
|
+
local agentic_procs=$(ps aux 2>/dev/null | grep -c "agentic-flow" | grep -v grep || echo "0")
|
|
37
|
+
|
|
38
|
+
# CPU load
|
|
39
|
+
local load_avg=$(cat /proc/loadavg 2>/dev/null | awk '{print $1}' || echo "0")
|
|
40
|
+
|
|
41
|
+
# File descriptor usage
|
|
42
|
+
local fd_used=$(ls /proc/$$/fd 2>/dev/null | wc -l || echo "0")
|
|
43
|
+
|
|
44
|
+
# Determine health status
|
|
45
|
+
local status="healthy"
|
|
46
|
+
local warnings=""
|
|
47
|
+
|
|
48
|
+
if [ "$disk_usage" -gt 90 ]; then
|
|
49
|
+
status="critical"
|
|
50
|
+
warnings="$warnings disk_full"
|
|
51
|
+
elif [ "$disk_usage" -gt 80 ]; then
|
|
52
|
+
status="warning"
|
|
53
|
+
warnings="$warnings disk_high"
|
|
54
|
+
fi
|
|
55
|
+
|
|
56
|
+
if [ "$mem_pct" -gt 90 ]; then
|
|
57
|
+
status="critical"
|
|
58
|
+
warnings="$warnings memory_full"
|
|
59
|
+
elif [ "$mem_pct" -gt 80 ]; then
|
|
60
|
+
[ "$status" != "critical" ] && status="warning"
|
|
61
|
+
warnings="$warnings memory_high"
|
|
62
|
+
fi
|
|
63
|
+
|
|
64
|
+
# Write health metrics
|
|
65
|
+
cat > "$HEALTH_FILE" << EOF
|
|
66
|
+
{
|
|
67
|
+
"status": "$status",
|
|
68
|
+
"timestamp": "$(date -Iseconds)",
|
|
69
|
+
"disk": {
|
|
70
|
+
"usage_pct": $disk_usage,
|
|
71
|
+
"free": "$disk_free"
|
|
72
|
+
},
|
|
73
|
+
"memory": {
|
|
74
|
+
"total_mb": $mem_total,
|
|
75
|
+
"used_mb": $mem_used,
|
|
76
|
+
"usage_pct": $mem_pct
|
|
77
|
+
},
|
|
78
|
+
"processes": {
|
|
79
|
+
"node": $node_procs,
|
|
80
|
+
"agentic_flow": $agentic_procs
|
|
81
|
+
},
|
|
82
|
+
"load_avg": $load_avg,
|
|
83
|
+
"fd_used": $fd_used,
|
|
84
|
+
"warnings": "$(echo $warnings | xargs)"
|
|
85
|
+
}
|
|
86
|
+
EOF
|
|
87
|
+
|
|
88
|
+
echo "[$(date +%H:%M:%S)] ✓ Health: $status | Disk: ${disk_usage}% | Memory: ${mem_pct}% | Load: $load_avg"
|
|
89
|
+
|
|
90
|
+
date +%s > "$LAST_RUN_FILE"
|
|
91
|
+
|
|
92
|
+
# Return non-zero if unhealthy
|
|
93
|
+
[ "$status" = "healthy" ] && return 0 || return 1
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
case "${1:-check}" in
|
|
97
|
+
"run") check_health ;;
|
|
98
|
+
"check") should_run && check_health || echo "[$(date +%H:%M:%S)] Skipping (throttled)" ;;
|
|
99
|
+
"force") rm -f "$LAST_RUN_FILE"; check_health ;;
|
|
100
|
+
"status")
|
|
101
|
+
if [ -f "$HEALTH_FILE" ]; then
|
|
102
|
+
jq -r '"Status: \(.status) | Disk: \(.disk.usage_pct)% | Memory: \(.memory.usage_pct)% | Load: \(.load_avg)"' "$HEALTH_FILE"
|
|
103
|
+
else
|
|
104
|
+
echo "No health data available"
|
|
105
|
+
fi
|
|
106
|
+
;;
|
|
107
|
+
*) echo "Usage: $0 [run|check|force|status]" ;;
|
|
108
|
+
esac
|
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Claude Flow V3 - Learning Hooks
|
|
3
|
+
# Integrates learning-service.mjs with session lifecycle
|
|
4
|
+
|
|
5
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
6
|
+
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
|
7
|
+
LEARNING_SERVICE="$SCRIPT_DIR/learning-service.mjs"
|
|
8
|
+
LEARNING_DIR="$PROJECT_ROOT/.claude-flow/learning"
|
|
9
|
+
METRICS_DIR="$PROJECT_ROOT/.claude-flow/metrics"
|
|
10
|
+
|
|
11
|
+
# Ensure directories exist
|
|
12
|
+
mkdir -p "$LEARNING_DIR" "$METRICS_DIR"
|
|
13
|
+
|
|
14
|
+
# Colors
|
|
15
|
+
GREEN='\033[0;32m'
|
|
16
|
+
YELLOW='\033[1;33m'
|
|
17
|
+
CYAN='\033[0;36m'
|
|
18
|
+
RED='\033[0;31m'
|
|
19
|
+
DIM='\033[2m'
|
|
20
|
+
RESET='\033[0m'
|
|
21
|
+
|
|
22
|
+
log() { echo -e "${CYAN}[Learning] $1${RESET}"; }
|
|
23
|
+
success() { echo -e "${GREEN}[Learning] ✓ $1${RESET}"; }
|
|
24
|
+
warn() { echo -e "${YELLOW}[Learning] ⚠ $1${RESET}"; }
|
|
25
|
+
error() { echo -e "${RED}[Learning] ✗ $1${RESET}"; }
|
|
26
|
+
|
|
27
|
+
# Generate session ID
|
|
28
|
+
generate_session_id() {
|
|
29
|
+
echo "session_$(date +%Y%m%d_%H%M%S)_$$"
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
# =============================================================================
|
|
33
|
+
# Session Start Hook
|
|
34
|
+
# =============================================================================
|
|
35
|
+
session_start() {
|
|
36
|
+
local session_id="${1:-$(generate_session_id)}"
|
|
37
|
+
|
|
38
|
+
log "Initializing learning service for session: $session_id"
|
|
39
|
+
|
|
40
|
+
# Check if better-sqlite3 is available
|
|
41
|
+
if ! npm list better-sqlite3 --prefix "$PROJECT_ROOT" >/dev/null 2>&1; then
|
|
42
|
+
log "Installing better-sqlite3..."
|
|
43
|
+
npm install --prefix "$PROJECT_ROOT" better-sqlite3 --save-dev --silent 2>/dev/null || true
|
|
44
|
+
fi
|
|
45
|
+
|
|
46
|
+
# Initialize learning service
|
|
47
|
+
local init_result
|
|
48
|
+
init_result=$(node "$LEARNING_SERVICE" init "$session_id" 2>&1)
|
|
49
|
+
|
|
50
|
+
if [ $? -eq 0 ]; then
|
|
51
|
+
# Parse and display stats
|
|
52
|
+
local short_term=$(echo "$init_result" | grep -o '"shortTermPatterns":[0-9]*' | cut -d: -f2)
|
|
53
|
+
local long_term=$(echo "$init_result" | grep -o '"longTermPatterns":[0-9]*' | cut -d: -f2)
|
|
54
|
+
|
|
55
|
+
success "Learning service initialized"
|
|
56
|
+
echo -e " ${DIM}├─ Short-term patterns: ${short_term:-0}${RESET}"
|
|
57
|
+
echo -e " ${DIM}├─ Long-term patterns: ${long_term:-0}${RESET}"
|
|
58
|
+
echo -e " ${DIM}└─ Session ID: $session_id${RESET}"
|
|
59
|
+
|
|
60
|
+
# Store session ID for later hooks
|
|
61
|
+
echo "$session_id" > "$LEARNING_DIR/current-session-id"
|
|
62
|
+
|
|
63
|
+
# Update metrics
|
|
64
|
+
cat > "$METRICS_DIR/learning-status.json" << EOF
|
|
65
|
+
{
|
|
66
|
+
"sessionId": "$session_id",
|
|
67
|
+
"initialized": true,
|
|
68
|
+
"shortTermPatterns": ${short_term:-0},
|
|
69
|
+
"longTermPatterns": ${long_term:-0},
|
|
70
|
+
"hnswEnabled": true,
|
|
71
|
+
"timestamp": "$(date -Iseconds)"
|
|
72
|
+
}
|
|
73
|
+
EOF
|
|
74
|
+
|
|
75
|
+
return 0
|
|
76
|
+
else
|
|
77
|
+
warn "Learning service initialization failed (non-critical)"
|
|
78
|
+
echo "$init_result" | head -5
|
|
79
|
+
return 1
|
|
80
|
+
fi
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
# =============================================================================
|
|
84
|
+
# Session End Hook
|
|
85
|
+
# =============================================================================
|
|
86
|
+
session_end() {
|
|
87
|
+
log "Consolidating learning data..."
|
|
88
|
+
|
|
89
|
+
# Get session ID
|
|
90
|
+
local session_id=""
|
|
91
|
+
if [ -f "$LEARNING_DIR/current-session-id" ]; then
|
|
92
|
+
session_id=$(cat "$LEARNING_DIR/current-session-id")
|
|
93
|
+
fi
|
|
94
|
+
|
|
95
|
+
# Export session data
|
|
96
|
+
local export_result
|
|
97
|
+
export_result=$(node "$LEARNING_SERVICE" export 2>&1)
|
|
98
|
+
|
|
99
|
+
if [ $? -eq 0 ]; then
|
|
100
|
+
# Save export
|
|
101
|
+
echo "$export_result" > "$LEARNING_DIR/session-export-$(date +%Y%m%d_%H%M%S).json"
|
|
102
|
+
|
|
103
|
+
local patterns=$(echo "$export_result" | grep -o '"patterns":[0-9]*' | cut -d: -f2)
|
|
104
|
+
log "Session exported: $patterns patterns"
|
|
105
|
+
fi
|
|
106
|
+
|
|
107
|
+
# Run consolidation
|
|
108
|
+
local consolidate_result
|
|
109
|
+
consolidate_result=$(node "$LEARNING_SERVICE" consolidate 2>&1)
|
|
110
|
+
|
|
111
|
+
if [ $? -eq 0 ]; then
|
|
112
|
+
local removed=$(echo "$consolidate_result" | grep -o '"duplicatesRemoved":[0-9]*' | cut -d: -f2)
|
|
113
|
+
local pruned=$(echo "$consolidate_result" | grep -o '"patternsProned":[0-9]*' | cut -d: -f2)
|
|
114
|
+
local duration=$(echo "$consolidate_result" | grep -o '"durationMs":[0-9]*' | cut -d: -f2)
|
|
115
|
+
|
|
116
|
+
success "Consolidation complete"
|
|
117
|
+
echo -e " ${DIM}├─ Duplicates removed: ${removed:-0}${RESET}"
|
|
118
|
+
echo -e " ${DIM}├─ Patterns pruned: ${pruned:-0}${RESET}"
|
|
119
|
+
echo -e " ${DIM}└─ Duration: ${duration:-0}ms${RESET}"
|
|
120
|
+
else
|
|
121
|
+
warn "Consolidation failed (non-critical)"
|
|
122
|
+
fi
|
|
123
|
+
|
|
124
|
+
# Get final stats
|
|
125
|
+
local stats_result
|
|
126
|
+
stats_result=$(node "$LEARNING_SERVICE" stats 2>&1)
|
|
127
|
+
|
|
128
|
+
if [ $? -eq 0 ]; then
|
|
129
|
+
echo "$stats_result" > "$METRICS_DIR/learning-final-stats.json"
|
|
130
|
+
|
|
131
|
+
local total_short=$(echo "$stats_result" | grep -o '"shortTermPatterns":[0-9]*' | cut -d: -f2)
|
|
132
|
+
local total_long=$(echo "$stats_result" | grep -o '"longTermPatterns":[0-9]*' | cut -d: -f2)
|
|
133
|
+
local avg_search=$(echo "$stats_result" | grep -o '"avgSearchTimeMs":[0-9.]*' | cut -d: -f2)
|
|
134
|
+
|
|
135
|
+
log "Final stats:"
|
|
136
|
+
echo -e " ${DIM}├─ Short-term: ${total_short:-0}${RESET}"
|
|
137
|
+
echo -e " ${DIM}├─ Long-term: ${total_long:-0}${RESET}"
|
|
138
|
+
echo -e " ${DIM}└─ Avg search: ${avg_search:-0}ms${RESET}"
|
|
139
|
+
fi
|
|
140
|
+
|
|
141
|
+
# Clean up session file
|
|
142
|
+
rm -f "$LEARNING_DIR/current-session-id"
|
|
143
|
+
|
|
144
|
+
return 0
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
# =============================================================================
|
|
148
|
+
# Store Pattern (called by post-edit hooks)
|
|
149
|
+
# =============================================================================
|
|
150
|
+
store_pattern() {
|
|
151
|
+
local strategy="$1"
|
|
152
|
+
local domain="${2:-general}"
|
|
153
|
+
local quality="${3:-0.7}"
|
|
154
|
+
|
|
155
|
+
if [ -z "$strategy" ]; then
|
|
156
|
+
error "No strategy provided"
|
|
157
|
+
return 1
|
|
158
|
+
fi
|
|
159
|
+
|
|
160
|
+
# Escape quotes in strategy
|
|
161
|
+
local escaped_strategy="${strategy//\"/\\\"}"
|
|
162
|
+
|
|
163
|
+
local result
|
|
164
|
+
result=$(node "$LEARNING_SERVICE" store "$escaped_strategy" "$domain" 2>&1)
|
|
165
|
+
|
|
166
|
+
if [ $? -eq 0 ]; then
|
|
167
|
+
local action=$(echo "$result" | grep -o '"action":"[^"]*"' | cut -d'"' -f4)
|
|
168
|
+
local id=$(echo "$result" | grep -o '"id":"[^"]*"' | cut -d'"' -f4)
|
|
169
|
+
|
|
170
|
+
if [ "$action" = "created" ]; then
|
|
171
|
+
success "Pattern stored: $id"
|
|
172
|
+
else
|
|
173
|
+
log "Pattern updated: $id"
|
|
174
|
+
fi
|
|
175
|
+
return 0
|
|
176
|
+
else
|
|
177
|
+
warn "Pattern storage failed"
|
|
178
|
+
return 1
|
|
179
|
+
fi
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
# =============================================================================
|
|
183
|
+
# Search Patterns (called by pre-edit hooks)
|
|
184
|
+
# =============================================================================
|
|
185
|
+
search_patterns() {
|
|
186
|
+
local query="$1"
|
|
187
|
+
local k="${2:-3}"
|
|
188
|
+
|
|
189
|
+
if [ -z "$query" ]; then
|
|
190
|
+
error "No query provided"
|
|
191
|
+
return 1
|
|
192
|
+
fi
|
|
193
|
+
|
|
194
|
+
# Escape quotes
|
|
195
|
+
local escaped_query="${query//\"/\\\"}"
|
|
196
|
+
|
|
197
|
+
local result
|
|
198
|
+
result=$(node "$LEARNING_SERVICE" search "$escaped_query" "$k" 2>&1)
|
|
199
|
+
|
|
200
|
+
if [ $? -eq 0 ]; then
|
|
201
|
+
local patterns=$(echo "$result" | grep -o '"patterns":\[' | wc -l)
|
|
202
|
+
local search_time=$(echo "$result" | grep -o '"searchTimeMs":[0-9.]*' | cut -d: -f2)
|
|
203
|
+
|
|
204
|
+
echo "$result"
|
|
205
|
+
|
|
206
|
+
if [ -n "$search_time" ]; then
|
|
207
|
+
log "Search completed in ${search_time}ms"
|
|
208
|
+
fi
|
|
209
|
+
return 0
|
|
210
|
+
else
|
|
211
|
+
warn "Pattern search failed"
|
|
212
|
+
return 1
|
|
213
|
+
fi
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
# =============================================================================
|
|
217
|
+
# Record Pattern Usage (for promotion tracking)
|
|
218
|
+
# =============================================================================
|
|
219
|
+
record_usage() {
|
|
220
|
+
local pattern_id="$1"
|
|
221
|
+
local success="${2:-true}"
|
|
222
|
+
|
|
223
|
+
if [ -z "$pattern_id" ]; then
|
|
224
|
+
return 1
|
|
225
|
+
fi
|
|
226
|
+
|
|
227
|
+
# This would call into the learning service to record usage
|
|
228
|
+
# For now, log it
|
|
229
|
+
log "Recording usage: $pattern_id (success=$success)"
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
# =============================================================================
|
|
233
|
+
# Run Benchmark
|
|
234
|
+
# =============================================================================
|
|
235
|
+
run_benchmark() {
|
|
236
|
+
log "Running HNSW benchmark..."
|
|
237
|
+
|
|
238
|
+
local result
|
|
239
|
+
result=$(node "$LEARNING_SERVICE" benchmark 2>&1)
|
|
240
|
+
|
|
241
|
+
if [ $? -eq 0 ]; then
|
|
242
|
+
local avg_search=$(echo "$result" | grep -o '"avgSearchMs":"[^"]*"' | cut -d'"' -f4)
|
|
243
|
+
local p95_search=$(echo "$result" | grep -o '"p95SearchMs":"[^"]*"' | cut -d'"' -f4)
|
|
244
|
+
local improvement=$(echo "$result" | grep -o '"searchImprovementEstimate":"[^"]*"' | cut -d'"' -f4)
|
|
245
|
+
|
|
246
|
+
success "HNSW Benchmark Complete"
|
|
247
|
+
echo -e " ${DIM}├─ Avg search: ${avg_search}ms${RESET}"
|
|
248
|
+
echo -e " ${DIM}├─ P95 search: ${p95_search}ms${RESET}"
|
|
249
|
+
echo -e " ${DIM}└─ Estimated improvement: ${improvement}${RESET}"
|
|
250
|
+
|
|
251
|
+
echo "$result"
|
|
252
|
+
return 0
|
|
253
|
+
else
|
|
254
|
+
error "Benchmark failed"
|
|
255
|
+
echo "$result"
|
|
256
|
+
return 1
|
|
257
|
+
fi
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
# =============================================================================
|
|
261
|
+
# Get Stats
|
|
262
|
+
# =============================================================================
|
|
263
|
+
get_stats() {
|
|
264
|
+
local result
|
|
265
|
+
result=$(node "$LEARNING_SERVICE" stats 2>&1)
|
|
266
|
+
|
|
267
|
+
if [ $? -eq 0 ]; then
|
|
268
|
+
echo "$result"
|
|
269
|
+
return 0
|
|
270
|
+
else
|
|
271
|
+
error "Failed to get stats"
|
|
272
|
+
return 1
|
|
273
|
+
fi
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
# =============================================================================
|
|
277
|
+
# Main
|
|
278
|
+
# =============================================================================
|
|
279
|
+
case "${1:-help}" in
|
|
280
|
+
"session-start"|"start")
|
|
281
|
+
session_start "$2"
|
|
282
|
+
;;
|
|
283
|
+
"session-end"|"end")
|
|
284
|
+
session_end
|
|
285
|
+
;;
|
|
286
|
+
"store")
|
|
287
|
+
store_pattern "$2" "$3" "$4"
|
|
288
|
+
;;
|
|
289
|
+
"search")
|
|
290
|
+
search_patterns "$2" "$3"
|
|
291
|
+
;;
|
|
292
|
+
"record-usage"|"usage")
|
|
293
|
+
record_usage "$2" "$3"
|
|
294
|
+
;;
|
|
295
|
+
"benchmark")
|
|
296
|
+
run_benchmark
|
|
297
|
+
;;
|
|
298
|
+
"stats")
|
|
299
|
+
get_stats
|
|
300
|
+
;;
|
|
301
|
+
"help"|"-h"|"--help")
|
|
302
|
+
cat << 'EOF'
|
|
303
|
+
Claude Flow V3 Learning Hooks
|
|
304
|
+
|
|
305
|
+
Usage: learning-hooks.sh <command> [args]
|
|
306
|
+
|
|
307
|
+
Commands:
|
|
308
|
+
session-start [id] Initialize learning for new session
|
|
309
|
+
session-end Consolidate and export session data
|
|
310
|
+
store <strategy> Store a new pattern
|
|
311
|
+
search <query> [k] Search for similar patterns
|
|
312
|
+
record-usage <id> Record pattern usage
|
|
313
|
+
benchmark Run HNSW performance benchmark
|
|
314
|
+
stats Get learning statistics
|
|
315
|
+
help Show this help
|
|
316
|
+
|
|
317
|
+
Examples:
|
|
318
|
+
./learning-hooks.sh session-start
|
|
319
|
+
./learning-hooks.sh store "Fix authentication bug" code
|
|
320
|
+
./learning-hooks.sh search "authentication error" 5
|
|
321
|
+
./learning-hooks.sh session-end
|
|
322
|
+
EOF
|
|
323
|
+
;;
|
|
324
|
+
*)
|
|
325
|
+
error "Unknown command: $1"
|
|
326
|
+
echo "Use 'learning-hooks.sh help' for usage"
|
|
327
|
+
exit 1
|
|
328
|
+
;;
|
|
329
|
+
esac
|