qualia-framework 2.4.0 → 2.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/bin/collect-metrics.sh +62 -0
  2. package/framework/agents/qualia-phase-researcher.md +6 -3
  3. package/framework/agents/qualia-planner.md +10 -7
  4. package/framework/agents/qualia-research-synthesizer.md +110 -147
  5. package/framework/agents/red-team-qa.md +130 -0
  6. package/framework/hooks/auto-format.sh +9 -1
  7. package/framework/hooks/migration-validate.sh +21 -16
  8. package/framework/hooks/pre-commit.sh +13 -5
  9. package/framework/hooks/pre-deploy-gate.sh +23 -1
  10. package/framework/hooks/retention-cleanup.sh +4 -4
  11. package/framework/hooks/save-session-state.sh +18 -10
  12. package/framework/hooks/session-context-loader.sh +21 -0
  13. package/framework/hooks/skill-announce.sh +2 -0
  14. package/framework/install.ps1 +6 -6
  15. package/framework/install.sh +6 -4
  16. package/framework/qualia-engine/VERSION +1 -1
  17. package/framework/qualia-engine/bin/collect-metrics.sh +71 -0
  18. package/framework/qualia-engine/bin/qualia-tools.js +104 -63
  19. package/framework/qualia-engine/references/continuation-prompt.md +97 -0
  20. package/framework/qualia-engine/references/employee-guide.md +167 -0
  21. package/framework/qualia-engine/templates/lab-notes.md +16 -0
  22. package/framework/qualia-engine/templates/roadmap.md +2 -8
  23. package/framework/qualia-engine/workflows/execute-phase.md +17 -17
  24. package/framework/qualia-engine/workflows/new-project.md +37 -114
  25. package/framework/qualia-engine/workflows/progress.md +63 -28
  26. package/framework/skills/client-handoff/SKILL.md +13 -3
  27. package/framework/skills/deep-research/SKILL.md +34 -71
  28. package/framework/skills/learn/SKILL.md +29 -5
  29. package/framework/skills/qualia/SKILL.md +57 -17
  30. package/framework/skills/qualia-complete-milestone/SKILL.md +29 -7
  31. package/framework/skills/qualia-evolve/SKILL.md +200 -0
  32. package/framework/skills/qualia-execute-phase/SKILL.md +1 -1
  33. package/framework/skills/qualia-guide/SKILL.md +32 -0
  34. package/framework/skills/qualia-help/SKILL.md +62 -60
  35. package/framework/skills/qualia-new-project/SKILL.md +32 -30
  36. package/framework/skills/qualia-report/SKILL.md +217 -0
  37. package/framework/skills/qualia-start/SKILL.md +31 -59
  38. package/framework/skills/qualia-verify-work/SKILL.md +20 -3
  39. package/package.json +1 -1
@@ -10,37 +10,39 @@ if [ ! -t 0 ]; then
10
10
  fi
11
11
  fi
12
12
 
13
+ DESTRUCTIVE=0
14
+ WARNINGS_COUNT=0
15
+
13
16
  check_sql_file() {
14
17
  local file="$1"
15
- local issues=0
16
18
  [ ! -f "$file" ] && return 0
17
19
 
20
+ # Destructive ops → block
18
21
  if grep -iE '\bDROP\s+(TABLE|SCHEMA|DATABASE)\b' "$file" > /dev/null 2>&1; then
19
- q_warn "DROP TABLE/SCHEMA in ${file}"
20
- issues=$((issues + 1))
22
+ q_fail "DROP TABLE/SCHEMA in ${file}"
23
+ DESTRUCTIVE=$((DESTRUCTIVE + 1))
21
24
  fi
22
25
  if grep -iE '\bTRUNCATE\b' "$file" > /dev/null 2>&1; then
23
- q_warn "TRUNCATE in ${file}"
24
- issues=$((issues + 1))
26
+ q_fail "TRUNCATE in ${file}"
27
+ DESTRUCTIVE=$((DESTRUCTIVE + 1))
25
28
  fi
26
29
  if grep -iE '\bDELETE\s+FROM\b' "$file" > /dev/null 2>&1 && ! grep -iE '\bWHERE\b' "$file" > /dev/null 2>&1; then
27
- q_warn "DELETE without WHERE in ${file}"
28
- issues=$((issues + 1))
30
+ q_fail "DELETE without WHERE in ${file}"
31
+ DESTRUCTIVE=$((DESTRUCTIVE + 1))
29
32
  fi
30
33
  if grep -iE '\bALTER\s+TABLE\b.*\bDROP\s+COLUMN\b' "$file" > /dev/null 2>&1; then
31
- q_warn "DROP COLUMN in ${file}"
32
- issues=$((issues + 1))
34
+ q_fail "DROP COLUMN in ${file}"
35
+ DESTRUCTIVE=$((DESTRUCTIVE + 1))
33
36
  fi
37
+ # Missing RLS → warn only (reminder, not destructive)
34
38
  if grep -iE '\bCREATE\s+TABLE\b' "$file" > /dev/null 2>&1 && ! grep -iE '\bENABLE\s+ROW\s+LEVEL\s+SECURITY\b' "$file" > /dev/null 2>&1; then
35
- q_warn "New table needs security rules (RLS) in ${file} — tell Claude: 'add RLS policies to the new table'"
36
- issues=$((issues + 1))
39
+ q_warn "New table needs RLS in ${file} — tell Claude: 'add RLS policies to the new table'"
40
+ WARNINGS_COUNT=$((WARNINGS_COUNT + 1))
37
41
  fi
38
- return $issues
39
42
  }
40
43
 
41
44
  TOTAL=0
42
45
  TRIGGERED=false
43
- WARN_DETAILS=""
44
46
 
45
47
  # Mode 1: SQL file written
46
48
  if [ -n "$FILE_PATH" ] && [[ "$FILE_PATH" == *.sql ]]; then
@@ -59,9 +61,12 @@ if [ -n "$COMMAND" ] && echo "$COMMAND" | grep -qE 'supabase\s+db\s+push'; then
59
61
  done
60
62
  fi
61
63
 
62
- # Warn but don't block surface via systemMessage
63
- if $TRIGGERED && [ "$TOTAL" -gt 0 ]; then
64
- printf '{"continue":true,"systemMessage":"◆ MIGRATION CHECK: %d issue(s) found tell Claude to review and fix the migration warnings before proceeding."}' "$TOTAL"
64
+ # Block on destructive ops, warn on missing RLS
65
+ if $TRIGGERED && [ "$DESTRUCTIVE" -gt 0 ]; then
66
+ printf '{"continue":false,"stopReason":"◆ MIGRATION: %d destructive operation(s) blocked","systemMessage":"◆ MIGRATION BLOCKED: %d destructive operation(s) found. Tell Claude to review the SQL and confirm these operations are intentional."}' "$DESTRUCTIVE" "$DESTRUCTIVE"
67
+ exit 2
68
+ elif $TRIGGERED && [ "$WARNINGS_COUNT" -gt 0 ]; then
69
+ printf '{"continue":true,"systemMessage":"◆ MIGRATION CHECK: %d warning(s) — tell Claude to review missing RLS policies."}' "$WARNINGS_COUNT"
65
70
  elif $TRIGGERED; then
66
71
  printf '{"continue":true,"systemMessage":"◆ MIGRATION CHECK: clean"}'
67
72
  fi
@@ -2,6 +2,12 @@
2
2
  # Pre-commit validation — secrets, debug statements, TypeScript, lint
3
3
  source "$(dirname "$0")/qualia-colors.sh"
4
4
 
5
+ if ! command -v node &>/dev/null; then
6
+ [ ! -t 0 ] && cat > /dev/null
7
+ printf '{"continue":false,"stopReason":"PRE-COMMIT: node is not installed — cannot verify safety. Install node to proceed."}'
8
+ exit 2
9
+ fi
10
+
5
11
  if [ ! -t 0 ]; then
6
12
  INPUT=$(cat)
7
13
  COMMAND=$(echo "$INPUT" | node -e "try{const d=JSON.parse(require('fs').readFileSync('/dev/stdin','utf8'));process.stdout.write(d.tool_input?.command||d.tool_input||'')}catch(e){}" 2>/dev/null)
@@ -41,12 +47,14 @@ if [ -n "$ENV_FILES" ]; then
41
47
  BLOCKED=true
42
48
  fi
43
49
 
44
- # Debug statements in JS/TS
45
- JS_FILES=$(echo "$STAGED_FILES" | grep -E '\.(js|jsx|ts|tsx)$' || true)
46
- if [ -n "$JS_FILES" ]; then
47
- for file in $JS_FILES; do
50
+ # Debug statements in JS/TS (block in production code, allow in tests/stories)
51
+ JS_PROD_FILES=$(echo "$STAGED_FILES" | grep -E '\.(js|jsx|ts|tsx)$' | grep -vE '\.test\.|\.spec\.|__tests__/|\.stories\.' || true)
52
+ if [ -n "$JS_PROD_FILES" ]; then
53
+ for file in $JS_PROD_FILES; do
48
54
  if [ -f "$file" ] && grep -E "console\.(log|debug|info)|debugger" "$file" > /dev/null 2>&1; then
49
- q_warn "Debug statement in ${file}"
55
+ q_fail "Debug statement in ${file}"
56
+ FAIL_DETAILS="${FAIL_DETAILS}Debug statement in ${file} — tell Claude: 'remove console.log/debugger from production code.' "
57
+ BLOCKED=true
50
58
  fi
51
59
  done
52
60
  fi
@@ -5,6 +5,12 @@
5
5
 
6
6
  source "$(dirname "$0")/qualia-colors.sh"
7
7
 
8
+ if ! command -v node &>/dev/null; then
9
+ [ ! -t 0 ] && cat > /dev/null
10
+ printf '{"continue":false,"stopReason":"DEPLOY GATE: node is not installed — cannot verify safety. Install node to proceed."}'
11
+ exit 2
12
+ fi
13
+
8
14
  # Parse command from stdin JSON
9
15
  if [ ! -t 0 ]; then
10
16
  INPUT=$(cat)
@@ -14,7 +20,7 @@ else
14
20
  fi
15
21
 
16
22
  # Only gate production deploys
17
- if ! echo "$COMMAND" | grep -qE 'vercel\s+.*--prod|vercel\s+--prod'; then
23
+ if ! echo "$COMMAND" | grep -qE '(npx\s+|bunx\s+)?vercel\s+.*--prod|(npx\s+|bunx\s+)?vercel\s+--prod'; then
18
24
  exit 0
19
25
  fi
20
26
 
@@ -84,6 +90,22 @@ else
84
90
  q_pass "Environment"
85
91
  fi
86
92
 
93
+ # ─── Check 5: Build ───
94
+ if [ -f "package.json" ]; then
95
+ BUILD_SCRIPT=$(node -e "try{console.log(require('./package.json').scripts?.build||'')}catch(e){console.log('')}" 2>/dev/null)
96
+ if [ -n "$BUILD_SCRIPT" ]; then
97
+ if npm run build 2>/tmp/build-gate.txt 1>/dev/null; then
98
+ q_pass "Build"
99
+ else
100
+ q_fail "Build failed"
101
+ FAIL_DETAILS="${FAIL_DETAILS}Build is failing — tell Claude: 'fix the build errors' before deploying. "
102
+ FAILURES=$((FAILURES + 1))
103
+ fi
104
+ else
105
+ q_skip "Build"
106
+ fi
107
+ fi
108
+
87
109
  # ─── Check 6: REVIEW.md ───
88
110
  REVIEW_FILE=""
89
111
  [ -f ".planning/REVIEW.md" ] && REVIEW_FILE=".planning/REVIEW.md"
@@ -10,7 +10,7 @@
10
10
  # paste-cache/ 3 days
11
11
  # shell-snapshots/ 3 days
12
12
  # projects/*.jsonl 7 days
13
- # tasks/ 3 days
13
+ # tasks/ 14 days
14
14
  # plans/ 7 days
15
15
 
16
16
  CLAUDE_DIR="$HOME/.claude"
@@ -37,9 +37,9 @@ find "$CLAUDE_DIR/shell-snapshots/" -type f -mtime +3 -delete 2>/dev/null
37
37
  find "$CLAUDE_DIR/projects/" -name "*.jsonl" -mtime +7 -delete 2>/dev/null
38
38
  find "$CLAUDE_DIR/projects/" -type d -empty -delete 2>/dev/null
39
39
 
40
- # tasks/ — keep 3 days (stale task files from old sessions)
41
- find "$CLAUDE_DIR/tasks/" -type f -mtime +3 -delete 2>/dev/null
42
- find "$CLAUDE_DIR/tasks/" -type d -empty -delete 2>/dev/null
40
+ # tasks/ — keep 14 days (team sessions may span multiple days)
41
+ find "$CLAUDE_DIR/tasks/" -type f -mtime +14 -delete 2>/dev/null
42
+ find "$CLAUDE_DIR/tasks/" -mindepth 1 -type d -empty -delete 2>/dev/null
43
43
 
44
44
  # plans/ — keep 7 days (old Claude Code plan mode files)
45
45
  find "$CLAUDE_DIR/plans/" -type f -mtime +7 -delete 2>/dev/null
@@ -82,14 +82,17 @@ if [ "$PROJECT_NAME" != ".claude" ] && [ "$PROJECT_NAME" != "Projects" ] && { [
82
82
 
83
83
  # Dedup: skip if ANY existing entry has the same project + branch + summary
84
84
  NEW_KEY="${PROJECT_NAME}|${GIT_BRANCH:-—}|${SUMMARY:-—}"
85
- if tail -n +4 "$DIGEST_FILE" | while IFS= read -r line; do
85
+ DUPLICATE=false
86
+ while IFS= read -r line; do
86
87
  ENTRY_KEY=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$3); gsub(/^ +| +$/,"",$4); gsub(/^ +| +$/,"",$6); print $3"|"$4"|"$6}')
87
- [ "$ENTRY_KEY" = "$NEW_KEY" ] && exit 0
88
- done; then
89
- : # no match found, continue
88
+ if [ "$ENTRY_KEY" = "$NEW_KEY" ]; then
89
+ DUPLICATE=true
90
+ break
91
+ fi
92
+ done < <(tail -n +4 "$DIGEST_FILE")
93
+ if [ "$DUPLICATE" = "true" ]; then
94
+ : # duplicate found, skip adding to digest
90
95
  else
91
- exit 0 # duplicate found, skip
92
- fi
93
96
 
94
97
  # Read existing entries (skip header — first 3 lines), keep last 19
95
98
  EXISTING=$(tail -n +4 "$DIGEST_FILE" | head -19)
@@ -101,6 +104,7 @@ if [ "$PROJECT_NAME" != ".claude" ] && [ "$PROJECT_NAME" != "Projects" ] && { [
101
104
  ${ENTRY}
102
105
  ${EXISTING}
103
106
  HEADER
107
+ fi
104
108
  fi
105
109
 
106
110
  # --- Auto-handoff for significant sessions (>5 files changed) ---
@@ -156,15 +160,19 @@ HANDOFF
156
160
  fi
157
161
  fi
158
162
 
159
- # --- Push to portal (non-blocking, fire-and-forget) ---
163
+ # --- Push to portal (with error tracking) ---
160
164
  PORTAL_API_KEY="${CLAUDE_PORTAL_API_KEY:-}"
161
165
  if [ -n "$PORTAL_API_KEY" ] && [ "$PROJECT_NAME" != ".claude" ]; then
162
- curl -s -X POST "https://portal.qualiasolutions.net/api/claude/session-log" \
166
+ HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "https://portal.qualiasolutions.net/api/claude/session-log" \
163
167
  -H "X-API-Key: $PORTAL_API_KEY" \
164
168
  -H "Content-Type: application/json" \
165
169
  -d @"$SESSION_FILE" \
166
- --max-time 5 \
167
- > /dev/null 2>&1 &
170
+ --max-time 5 2>/dev/null || echo "000")
171
+ if [ "$HTTP_CODE" = "200" ] || [ "$HTTP_CODE" = "201" ]; then
172
+ rm -f "$SESSION_DIR/.portal-last-error"
173
+ else
174
+ echo "$HTTP_CODE" > "$SESSION_DIR/.portal-last-error"
175
+ fi
168
176
  fi
169
177
 
170
178
  # --- Cleanup old session files (keep last 50 for retrospective analytics — RETR-04) ---
@@ -62,6 +62,27 @@ if [ -f "$HOME/.claude/settings.json" ]; then
62
62
  fi
63
63
  fi
64
64
 
65
+ # Check for stale knowledge files (>30 days old)
66
+ STALE_COUNT=0
67
+ STALE_NAMES=""
68
+ for kf in "$HOME/.claude/knowledge"/*.md; do
69
+ [ -f "$kf" ] || continue
70
+ KF_AGE=$(( ($(date +%s) - $(stat -c %Y "$kf" 2>/dev/null || echo 0)) / 86400 ))
71
+ if [ "$KF_AGE" -gt 30 ]; then
72
+ STALE_COUNT=$((STALE_COUNT + 1))
73
+ STALE_NAMES="${STALE_NAMES} $(basename "$kf")(${KF_AGE}d)"
74
+ fi
75
+ done
76
+ if [ "$STALE_COUNT" -gt 0 ]; then
77
+ ISSUES="${ISSUES}${STALE_COUNT} stale knowledge file(s):${STALE_NAMES}. "
78
+ fi
79
+
80
+ # Check portal sync health
81
+ if [ -f "$SESSION_DIR/.portal-last-error" ]; then
82
+ PORTAL_ERR=$(cat "$SESSION_DIR/.portal-last-error" 2>/dev/null)
83
+ ISSUES="${ISSUES}Portal sync failed (HTTP ${PORTAL_ERR}). "
84
+ fi
85
+
65
86
  if [ -n "$ISSUES" ]; then
66
87
  MSG="${MSG} | ▲ ${ISSUES}"
67
88
  fi
@@ -29,6 +29,7 @@ SKILL_DESC=(
29
29
  # Navigation
30
30
  ["qualia-start"]="Activating Qualia mode"
31
31
  ["qualia-progress"]="Checking progress"
32
+ ["qualia-guide"]="Showing developer guide"
32
33
  ["qualia-idk"]="Analyzing next steps"
33
34
  ["qualia"]="Routing to next action"
34
35
  ["qualia-resume-work"]="Restoring session context"
@@ -36,6 +37,7 @@ SKILL_DESC=(
36
37
  # Quality
37
38
  ["qualia-review"]="Running code review"
38
39
  ["qualia-optimize"]="Running optimization pass"
40
+ ["qualia-evolve"]="Evolving framework"
39
41
  ["qualia-production-check"]="Running production audit"
40
42
  ["qualia-framework-audit"]="Auditing framework"
41
43
  ["deep-research"]="Spawning research agents"
@@ -87,10 +87,10 @@ $dirs = @(
87
87
  "$ClaudeDir\hooks",
88
88
  "$ClaudeDir\rules",
89
89
  "$ClaudeDir\knowledge\retros",
90
- "$ClaudeDir\qualia-engine\bin",
91
- "$ClaudeDir\qualia-engine\references",
92
- "$ClaudeDir\qualia-engine\templates",
93
- "$ClaudeDir\qualia-engine\workflows",
90
+ "$ClaudeDir\qualia-framework\bin",
91
+ "$ClaudeDir\qualia-framework\references",
92
+ "$ClaudeDir\qualia-framework\templates",
93
+ "$ClaudeDir\qualia-framework\workflows",
94
94
  "$ClaudeDir\projects",
95
95
  "$ClaudeDir\session-env"
96
96
  )
@@ -129,7 +129,7 @@ $hookCount = (Get-ChildItem "$ClaudeDir\hooks\*.sh" -ErrorAction SilentlyContinu
129
129
  Write-Host " + $hookCount hooks" -ForegroundColor Green
130
130
 
131
131
  # Qualia Engine
132
- Copy-Item "$ScriptDir\qualia-engine\*" "$ClaudeDir\qualia-engine\" -Recurse -Force -ErrorAction SilentlyContinue
132
+ Copy-Item "$ScriptDir\qualia-framework\*" "$ClaudeDir\qualia-framework\" -Recurse -Force -ErrorAction SilentlyContinue
133
133
  Write-Host " + Qualia engine" -ForegroundColor Green
134
134
 
135
135
  # Rules
@@ -273,7 +273,7 @@ $criticalFiles = @(
273
273
  "skills\qualia-start\SKILL.md",
274
274
  "skills\browser-qa\SKILL.md",
275
275
  "skills\ship\SKILL.md",
276
- "qualia-engine\bin\qualia-tools.js",
276
+ "qualia-framework\bin\qualia-tools.js",
277
277
  "CLAUDE.md"
278
278
  )
279
279
 
@@ -90,7 +90,7 @@ printf "\n"
90
90
  # ─── Step 2: Create directory structure ───
91
91
  printf "${Q_WARN}[2/6] Directory structure${Q_RESET}\n"
92
92
 
93
- mkdir -p "$CLAUDE_DIR"/{skills,agents,hooks,rules,knowledge/retros,qualia-engine/{bin,references,templates,workflows},projects,session-env}
93
+ mkdir -p "$CLAUDE_DIR"/{skills,agents,hooks,rules,knowledge/retros,qualia-framework/{bin,references,templates,workflows},projects,session-env}
94
94
 
95
95
  printf "${Q_PASS} ✓ Ready${Q_RESET}\n"
96
96
 
@@ -123,7 +123,9 @@ HOOK_COUNT=$(find "$CLAUDE_DIR/hooks" -name "*.sh" -type f | wc -l)
123
123
  printf "${Q_PASS} ✓ $HOOK_COUNT hooks${Q_RESET}\n"
124
124
 
125
125
  # Qualia Engine
126
- cp -r "$SCRIPT_DIR"/qualia-engine/* "$CLAUDE_DIR/qualia-engine/" 2>/dev/null || true
126
+ cp -r "$SCRIPT_DIR"/qualia-engine/* "$CLAUDE_DIR/qualia-framework/" 2>/dev/null || true
127
+ # Clean up old path if it exists from previous installs
128
+ [ -d "$CLAUDE_DIR/qualia-engine" ] && rm -rf "$CLAUDE_DIR/qualia-engine"
127
129
  printf "${Q_PASS} ✓ Qualia engine${Q_RESET}\n"
128
130
 
129
131
  # Rules
@@ -232,7 +234,7 @@ fi
232
234
  printf "${Q_WARN}[5/6] Platform setup${Q_RESET}\n"
233
235
 
234
236
  # Make qualia-tools executable
235
- [ -f "$CLAUDE_DIR/qualia-engine/bin/qualia-tools.js" ] && chmod +x "$CLAUDE_DIR/qualia-engine/bin/qualia-tools.js"
237
+ [ -f "$CLAUDE_DIR/qualia-framework/bin/qualia-tools.js" ] && chmod +x "$CLAUDE_DIR/qualia-framework/bin/qualia-tools.js"
236
238
 
237
239
  # Askpass helper (works with zenity, kdialog, or falls back to terminal)
238
240
  if [ ! -f "$CLAUDE_DIR/askpass.sh" ]; then
@@ -273,7 +275,7 @@ for check in \
273
275
  "skills/qualia-start/SKILL.md" \
274
276
  "skills/browser-qa/SKILL.md" \
275
277
  "skills/ship/SKILL.md" \
276
- "qualia-engine/bin/qualia-tools.js" \
278
+ "qualia-framework/bin/qualia-tools.js" \
277
279
  "CLAUDE.md"; do
278
280
  if [ ! -f "$CLAUDE_DIR/$check" ]; then
279
281
  printf "${Q_FAIL} ✗ Missing: $check${Q_RESET}\n"
@@ -1 +1 @@
1
- 2.4.0
1
+ 2.4.2
@@ -0,0 +1,71 @@
1
+ #!/bin/bash
2
+ # Qualia Framework Metrics Collection
3
+ # Run after a project ships to capture performance data.
4
+ # Usage: collect-metrics.sh [project-dir]
5
+ # Output: appends to ~/.claude/knowledge/framework-metrics.md
6
+
7
+ PROJECT_DIR="${1:-.}"
8
+ METRICS_FILE="$HOME/.claude/knowledge/framework-metrics.md"
9
+ DATE=$(date +%Y-%m-%d)
10
+
11
+ # Ensure metrics file exists
12
+ if [ ! -f "$METRICS_FILE" ]; then
13
+ cat > "$METRICS_FILE" << 'HEADER'
14
+ # Framework Performance Metrics
15
+
16
+ > Auto-collected after each project ships. Read by `/qualia-evolve` to optimize the framework.
17
+
18
+ | Date | Project | Phases | Sessions | Deviations | IDK Calls | Verify Pass Rate | Lab Notes | FQS |
19
+ |------|---------|--------|----------|------------|-----------|-----------------|-----------|-----|
20
+ HEADER
21
+ fi
22
+
23
+ cd "$PROJECT_DIR" || exit 1
24
+
25
+ # Project name
26
+ PROJECT=$(basename "$(pwd)")
27
+
28
+ # Phase count
29
+ PHASES=$(ls -d .planning/phases/*/ 2>/dev/null | wc -l)
30
+
31
+ # Session count (from session-digest entries for this project)
32
+ SESSIONS=$(grep -c "$PROJECT" ~/.claude/knowledge/session-digest.md 2>/dev/null || echo "?")
33
+
34
+ # Deviation count (gap-fix plans)
35
+ DEVIATIONS=$(find .planning/phases/ -name "*-PLAN.md" -exec grep -l "gaps" {} \; 2>/dev/null | wc -l)
36
+
37
+ # IDK calls (search session digest for qualia-idk mentions with this project)
38
+ IDK_CALLS=$(grep "$PROJECT" ~/.claude/knowledge/session-digest.md 2>/dev/null | grep -c "idk\|stuck\|lost" || echo "0")
39
+
40
+ # Verify pass rate (UAT files with PASSED vs total UAT files)
41
+ TOTAL_UAT=$(find .planning/phases/ -name "*-UAT.md" 2>/dev/null | wc -l)
42
+ PASSED_UAT=$(grep -rl "PASSED\|✅.*Overall" .planning/phases/*/*.md 2>/dev/null | wc -l)
43
+ if [ "$TOTAL_UAT" -gt 0 ]; then
44
+ PASS_RATE=$(( PASSED_UAT * 100 / TOTAL_UAT ))%
45
+ else
46
+ PASS_RATE="n/a"
47
+ fi
48
+
49
+ # Lab Notes count
50
+ LAB_NOTES=$(grep -c "^###" .planning/lab-notes.md 2>/dev/null || echo "0")
51
+
52
+ # Completion rate from ROADMAP.md (actual, not assumed)
53
+ TOTAL_PLANS=$(grep -c "^- \[" .planning/ROADMAP.md 2>/dev/null || echo "0")
54
+ COMPLETED_PLANS=$(grep -c "^- \[x\]" .planning/ROADMAP.md 2>/dev/null || echo "0")
55
+ if [ "$TOTAL_PLANS" -gt 0 ]; then
56
+ COMPLETION_RATE=$(( COMPLETED_PLANS * 100 / TOTAL_PLANS ))
57
+ else
58
+ COMPLETION_RATE=100
59
+ fi
60
+
61
+ # FQS = completion_rate / avg_sessions_to_ship * 100 (per OBJECTIVE.md)
62
+ if [ "$SESSIONS" != "?" ] && [ "$SESSIONS" -gt 0 ]; then
63
+ FQS=$(( COMPLETION_RATE * 100 / SESSIONS ))
64
+ else
65
+ FQS="?"
66
+ fi
67
+
68
+ # Append to metrics file
69
+ echo "| $DATE | $PROJECT | $PHASES | $SESSIONS | $DEVIATIONS | $IDK_CALLS | $PASS_RATE | $LAB_NOTES | $FQS |" >> "$METRICS_FILE"
70
+
71
+ echo "Metrics collected for $PROJECT → $METRICS_FILE"
@@ -99,7 +99,8 @@ function loadConfig(cwd) {
99
99
  verifier: get('verifier', { section: 'workflow', field: 'verifier' }) ?? defaults.verifier,
100
100
  parallelization,
101
101
  };
102
- } catch {
102
+ } catch (e) {
103
+ process.stderr.write(`WARNING: .planning/config.json is malformed or unreadable, using defaults: ${e.message}\n`);
103
104
  return defaults;
104
105
  }
105
106
  }
@@ -884,8 +885,8 @@ function cmdCommit(cwd, message, files, raw) {
884
885
  output(result, raw, 'nothing');
885
886
  return;
886
887
  }
887
- const result = { committed: false, hash: null, reason: 'nothing_to_commit', error: commitResult.stderr };
888
- output(result, raw, 'nothing');
888
+ const result = { committed: false, hash: null, reason: 'commit_failed', error: commitResult.stderr };
889
+ output(result, raw, 'COMMIT FAILED: ' + (commitResult.stderr || 'unknown error'));
889
890
  return;
890
891
  }
891
892
 
@@ -1156,79 +1157,95 @@ function cmdStateSnapshot(cwd, raw) {
1156
1157
 
1157
1158
  const content = fs.readFileSync(statePath, 'utf-8');
1158
1159
 
1159
- // Helper to extract **Field:** value patterns
1160
- const extractField = (fieldName) => {
1161
- const pattern = new RegExp(`\\*\\*${fieldName}:\\*\\*\\s*(.+)`, 'i');
1162
- const match = content.match(pattern);
1163
- return match ? match[1].trim() : null;
1164
- };
1160
+ // Parse "Phase: X of Y (Name)" or "Phase: X of Y ([Name])"
1161
+ let currentPhase = null, totalPhases = null, currentPhaseName = null;
1162
+ const phaseMatch = content.match(/^Phase:\s*(\d+(?:\.\d+)?)\s*of\s*(\d+)\s*\(([^)]+)\)/m);
1163
+ if (phaseMatch) {
1164
+ currentPhase = phaseMatch[1];
1165
+ totalPhases = parseInt(phaseMatch[2], 10);
1166
+ currentPhaseName = phaseMatch[3].trim();
1167
+ }
1168
+
1169
+ // Parse "Plan: A of B in current phase" or "Plan: Not started" or "Plan: All plans complete"
1170
+ let currentPlan = null, totalPlansInPhase = null;
1171
+ const planMatch = content.match(/^Plan:\s*(\d+)\s*of\s*(\d+)/m);
1172
+ if (planMatch) {
1173
+ currentPlan = planMatch[1];
1174
+ totalPlansInPhase = parseInt(planMatch[2], 10);
1175
+ } else {
1176
+ const planAlt = content.match(/^Plan:\s*(.+)/m);
1177
+ if (planAlt) currentPlan = planAlt[1].trim();
1178
+ }
1179
+
1180
+ // Parse "Status: ..."
1181
+ let status = null;
1182
+ const statusMatch = content.match(/^Status:\s*(.+)/m);
1183
+ if (statusMatch) status = statusMatch[1].trim();
1184
+
1185
+ // Parse "Last activity: DATE — Description"
1186
+ let lastActivity = null, lastActivityDesc = null;
1187
+ const activityMatch = content.match(/^Last activity:\s*(\S+)\s*(?:—|--)\s*(.+)/m);
1188
+ if (activityMatch) {
1189
+ lastActivity = activityMatch[1].trim();
1190
+ lastActivityDesc = activityMatch[2].trim();
1191
+ }
1192
+
1193
+ // Parse "Progress: [...] N%" — extract percentage
1194
+ let progressPercent = null;
1195
+ const progressMatch = content.match(/(\d+)%/);
1196
+ if (progressMatch) progressPercent = parseInt(progressMatch[1], 10);
1197
+
1198
+ // Parse "Assigned to: ..."
1199
+ let assignedTo = null;
1200
+ const assignedMatch = content.match(/^Assigned to:\s*(.+)/m);
1201
+ if (assignedMatch) assignedTo = assignedMatch[1].trim();
1165
1202
 
1166
- // Extract basic fields
1167
- const currentPhase = extractField('Current Phase');
1168
- const currentPhaseName = extractField('Current Phase Name');
1169
- const totalPhasesRaw = extractField('Total Phases');
1170
- const currentPlan = extractField('Current Plan');
1171
- const totalPlansRaw = extractField('Total Plans in Phase');
1172
- const status = extractField('Status');
1173
- const progressRaw = extractField('Progress');
1174
- const lastActivity = extractField('Last Activity');
1175
- const lastActivityDesc = extractField('Last Activity Description');
1176
- const pausedAt = extractField('Paused At');
1177
-
1178
- // Parse numeric fields
1179
- const totalPhases = totalPhasesRaw ? parseInt(totalPhasesRaw, 10) : null;
1180
- const totalPlansInPhase = totalPlansRaw ? parseInt(totalPlansRaw, 10) : null;
1181
- const progressPercent = progressRaw ? parseInt(progressRaw.replace('%', ''), 10) : null;
1182
-
1183
- // Extract decisions table
1203
+ // Extract decisions from "### Decisions" section
1204
+ // Format: "- [Phase X]: decision summary" or "- [Pre-roadmap]: decision"
1184
1205
  const decisions = [];
1185
- const decisionsMatch = content.match(/##\s*Decisions Made[\s\S]*?\n\|[^\n]+\n\|[-|\s]+\n([\s\S]*?)(?=\n##|\n$|$)/i);
1186
- if (decisionsMatch) {
1187
- const tableBody = decisionsMatch[1];
1188
- const rows = tableBody.trim().split('\n').filter(r => r.includes('|'));
1189
- for (const row of rows) {
1190
- const cells = row.split('|').map(c => c.trim()).filter(Boolean);
1191
- if (cells.length >= 3) {
1206
+ const decisionsSection = content.match(/###\s*Decisions\s*\n([\s\S]*?)(?=\n###|\n##|$)/i);
1207
+ if (decisionsSection) {
1208
+ const lines = decisionsSection[1].split('\n');
1209
+ for (const line of lines) {
1210
+ const decMatch = line.match(/^-\s+\[([^\]]+)\]:\s*(.+)/);
1211
+ if (decMatch) {
1192
1212
  decisions.push({
1193
- phase: cells[0],
1194
- summary: cells[1],
1195
- rationale: cells[2],
1213
+ phase: decMatch[1].trim(),
1214
+ summary: decMatch[2].trim(),
1215
+ rationale: '',
1196
1216
  });
1197
1217
  }
1198
1218
  }
1199
1219
  }
1200
1220
 
1201
- // Extract blockers list
1221
+ // Extract blockers from "### Blockers/Concerns" section
1202
1222
  const blockers = [];
1203
- const blockersMatch = content.match(/##\s*Blockers\s*\n([\s\S]*?)(?=\n##|$)/i);
1204
- if (blockersMatch) {
1205
- const blockersSection = blockersMatch[1];
1206
- const items = blockersSection.match(/^-\s+(.+)$/gm) || [];
1223
+ const blockersSection = content.match(/###\s*Blockers\/Concerns\s*\n([\s\S]*?)(?=\n###|\n##|$)/i);
1224
+ if (blockersSection) {
1225
+ const items = blockersSection[1].match(/^-\s+(.+)$/gm) || [];
1207
1226
  for (const item of items) {
1208
- blockers.push(item.replace(/^-\s+/, '').trim());
1227
+ const text = item.replace(/^-\s+/, '').trim();
1228
+ if (text && !text.match(/^none/i)) blockers.push(text);
1209
1229
  }
1210
1230
  }
1211
1231
 
1212
- // Extract session info
1213
- const session = {
1214
- last_date: null,
1215
- stopped_at: null,
1216
- resume_file: null,
1217
- };
1218
-
1219
- const sessionMatch = content.match(/##\s*Session\s*\n([\s\S]*?)(?=\n##|$)/i);
1220
- if (sessionMatch) {
1221
- const sessionSection = sessionMatch[1];
1222
- const lastDateMatch = sessionSection.match(/\*\*Last Date:\*\*\s*(.+)/i);
1223
- const stoppedAtMatch = sessionSection.match(/\*\*Stopped At:\*\*\s*(.+)/i);
1224
- const resumeFileMatch = sessionSection.match(/\*\*Resume File:\*\*\s*(.+)/i);
1225
-
1232
+ // Extract session info from "## Session Continuity"
1233
+ const session = { last_date: null, stopped_at: null, resume_file: null };
1234
+ const sessionSection = content.match(/##\s*Session Continuity\s*\n([\s\S]*?)(?=\n##|$)/i);
1235
+ if (sessionSection) {
1236
+ const sec = sessionSection[1];
1237
+ const lastDateMatch = sec.match(/^Last session:\s*(.+)/m);
1238
+ const stoppedAtMatch = sec.match(/^Stopped at:\s*(.+)/m);
1239
+ const resumeFileMatch = sec.match(/^Resume file:\s*(.+)/m);
1226
1240
  if (lastDateMatch) session.last_date = lastDateMatch[1].trim();
1227
1241
  if (stoppedAtMatch) session.stopped_at = stoppedAtMatch[1].trim();
1228
- if (resumeFileMatch) session.resume_file = resumeFileMatch[1].trim();
1242
+ if (resumeFileMatch) {
1243
+ const rf = resumeFileMatch[1].trim();
1244
+ session.resume_file = rf.toLowerCase() === 'none' ? null : rf;
1245
+ }
1229
1246
  }
1230
1247
 
1231
- const result = {
1248
+ output({
1232
1249
  current_phase: currentPhase,
1233
1250
  current_phase_name: currentPhaseName,
1234
1251
  total_phases: totalPhases,
@@ -1238,13 +1255,11 @@ function cmdStateSnapshot(cwd, raw) {
1238
1255
  progress_percent: progressPercent,
1239
1256
  last_activity: lastActivity,
1240
1257
  last_activity_desc: lastActivityDesc,
1258
+ assigned_to: assignedTo,
1241
1259
  decisions,
1242
1260
  blockers,
1243
- paused_at: pausedAt,
1244
1261
  session,
1245
- };
1246
-
1247
- output(result, raw);
1262
+ }, raw);
1248
1263
  }
1249
1264
 
1250
1265
  function cmdSummaryExtract(cwd, summaryPath, fields, raw) {
@@ -2152,6 +2167,32 @@ function main() {
2152
2167
  break;
2153
2168
  }
2154
2169
 
2170
+ case '--help':
2171
+ case 'help': {
2172
+ const cmds = [
2173
+ 'state load — Load project state + config as JSON',
2174
+ 'state-snapshot — Structured snapshot of STATE.md fields',
2175
+ 'resolve-model <agent> — Resolve model for agent from profile',
2176
+ 'find-phase <number> — Find phase directory path',
2177
+ 'commit <msg> --files .. — Commit with planning-aware logic',
2178
+ 'verify-summary <path> — Verify SUMMARY.md completeness',
2179
+ 'generate-slug <text> — Generate URL-safe slug',
2180
+ 'current-timestamp — ISO timestamp',
2181
+ 'list-todos [area] — List pending todos',
2182
+ 'verify-path-exists <path> — Check if path exists',
2183
+ 'config-ensure-section — Ensure config section exists',
2184
+ 'init <workflow> [args] — Initialize workflow context',
2185
+ 'roadmap get-phase <N> — Get phase details from ROADMAP.md',
2186
+ 'phase-plan-index <N> — Get plan inventory with wave grouping',
2187
+ 'summary-extract <path> — Extract fields from SUMMARY.md',
2188
+ ];
2189
+ console.log('qualia-tools v2.4.1\n');
2190
+ console.log('Usage: qualia-tools <command> [args] [--raw]\n');
2191
+ console.log('Commands:');
2192
+ cmds.forEach(c => console.log(' ' + c));
2193
+ process.exit(0);
2194
+ }
2195
+
2155
2196
  default:
2156
2197
  error(`Unknown command: ${command}`);
2157
2198
  }