@dtt_siye/atool 1.3.1 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +97 -214
- package/README.md.atool-backup.20260410_114701 +299 -0
- package/VERSION +1 -1
- package/bin/atool.js +55 -9
- package/hooks/doc-sync-reminder +4 -4
- package/hooks/hooks-cursor.json +20 -0
- package/hooks/hooks.json +21 -1
- package/hooks/pre-commit +191 -0
- package/hooks/prompt-guard +84 -35
- package/hooks/session-start +34 -12
- package/hooks/task-state-tracker +145 -0
- package/install.sh +14 -4
- package/lib/common.sh +36 -23
- package/lib/compute-importance.sh +73 -0
- package/lib/install-cursor.sh +24 -2
- package/lib/install-hooks.sh +64 -0
- package/lib/install-kiro.sh +26 -2
- package/lib/install-skills.sh +5 -2
- package/lib/pre-scan.sh +13 -1
- package/lib/project-init.sh +28 -9
- package/package.json +1 -1
- package/skills/agent-audit/SKILL.md +180 -0
- package/skills/ai-project-architecture/SKILL.md +33 -534
- package/skills/ai-project-architecture/rules/architecture-validation.md +200 -0
- package/skills/ai-project-architecture/rules/compliance-check.md +83 -0
- package/skills/ai-project-architecture/rules/iron-laws.md +188 -0
- package/skills/ai-project-architecture/rules/migration.md +94 -0
- package/skills/ai-project-architecture/rules/refactoring.md +91 -0
- package/skills/ai-project-architecture/rules/testing.md +249 -0
- package/skills/ai-project-architecture/rules/verification.md +111 -0
- package/skills/architecture-guard/SKILL.md +164 -0
- package/skills/architecture-guard/rules/violation-detection.md +90 -0
- package/skills/atool-init/SKILL.md +24 -4
- package/skills/ci-feedback/SKILL.md +165 -0
- package/skills/project-analyze/SKILL.md +129 -19
- package/skills/project-analyze/phases/phase1-setup.md +76 -5
- package/skills/project-analyze/phases/phase2-understand.md +137 -26
- package/skills/project-analyze/phases/phase2.5-refine.md +32 -23
- package/skills/project-analyze/phases/phase3-graph.md +39 -5
- package/skills/project-analyze/phases/phase4-synthesize.md +17 -1
- package/skills/project-analyze/phases/phase5-export.md +42 -4
- package/skills/project-analyze/prompts/understand-agent.md +156 -298
- package/skills/project-analyze/rules/java.md +69 -1
- package/skills/project-query/SKILL.md +91 -200
- package/skills/project-query/rules/aggregate-stats.md +301 -0
- package/skills/project-query/rules/data-lineage.md +228 -0
- package/skills/project-query/rules/impact-analysis.md +218 -0
- package/skills/project-query/rules/neighborhood.md +234 -0
- package/skills/project-query/rules/node-lookup.md +97 -0
- package/skills/project-query/rules/path-query.md +135 -0
- package/skills/software-architecture/SKILL.md +39 -501
- package/skills/software-architecture/rules/concurrency-ha.md +346 -0
- package/skills/software-architecture/rules/ddd.md +450 -0
- package/skills/software-architecture/rules/decision-workflow.md +155 -0
- package/skills/software-architecture/rules/deployment.md +508 -0
- package/skills/software-architecture/rules/styles.md +232 -0
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# aTool - hooks/task-state-tracker
|
|
3
|
+
# PostToolUse hook: tracks session-level task state for feedback loop
|
|
4
|
+
# Records: modified files, doc staleness, verification status, pending actions
|
|
5
|
+
# State file: .claude/task-state.json (session-scoped, gitignored)
|
|
6
|
+
|
|
7
|
+
set -euo pipefail
|
|
8
|
+
|
|
9
|
+
# Escape string for JSON embedding
|
|
10
|
+
escape_for_json() {
|
|
11
|
+
local s="$1"
|
|
12
|
+
s="${s//\\/\\\\}"
|
|
13
|
+
s="${s//\"/\\\"}"
|
|
14
|
+
s="${s//$'\n'/\\n}"
|
|
15
|
+
s="${s//$'\r'/\\r}"
|
|
16
|
+
s="${s//$'\t'/\\t}"
|
|
17
|
+
printf '%s' "$s"
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
# ── Read tool input ───────────────────────────────────────────────────────
|
|
21
|
+
|
|
22
|
+
INPUT=""
|
|
23
|
+
if [[ ! -t 0 ]]; then
|
|
24
|
+
INPUT=$(cat)
|
|
25
|
+
fi
|
|
26
|
+
|
|
27
|
+
TOOL_NAME=""
|
|
28
|
+
FILE_PATH=""
|
|
29
|
+
if command -v jq &>/dev/null && [[ -n "$INPUT" ]]; then
|
|
30
|
+
TOOL_NAME=$(printf '%s' "$INPUT" | jq -r '.tool_name // empty' 2>/dev/null || echo "")
|
|
31
|
+
FILE_PATH=$(printf '%s' "$INPUT" | jq -r '.tool_input.file_path // empty' 2>/dev/null || echo "")
|
|
32
|
+
fi
|
|
33
|
+
|
|
34
|
+
# Only respond to Write/Edit
|
|
35
|
+
if [[ "$TOOL_NAME" != "Write" && "$TOOL_NAME" != "Edit" ]]; then
|
|
36
|
+
exit 0
|
|
37
|
+
fi
|
|
38
|
+
if [[ -z "$FILE_PATH" ]]; then
|
|
39
|
+
exit 0
|
|
40
|
+
fi
|
|
41
|
+
|
|
42
|
+
# ── Source file detection ────────────────────────────────────────────────
|
|
43
|
+
|
|
44
|
+
FILE_EXT="${FILE_PATH##*.}"
|
|
45
|
+
FILE_BASE=$(basename "$FILE_PATH")
|
|
46
|
+
|
|
47
|
+
# Skip non-source files (same logic as doc-sync-reminder)
|
|
48
|
+
case "$FILE_EXT" in
|
|
49
|
+
md|markdown|txt|rst) exit 0 ;;
|
|
50
|
+
json|yaml|yml|toml|xml) exit 0 ;;
|
|
51
|
+
css|scss|less|sass|styl) exit 0 ;;
|
|
52
|
+
lock|map|log) exit 0 ;;
|
|
53
|
+
svg|png|jpg|jpeg|gif|ico|webp|ttf|woff|woff2|eot) exit 0 ;;
|
|
54
|
+
esac
|
|
55
|
+
case "$FILE_BASE" in
|
|
56
|
+
.*|*.config.*) exit 0 ;;
|
|
57
|
+
esac
|
|
58
|
+
case "$FILE_EXT" in
|
|
59
|
+
ts|tsx|js|jsx|vue|svelte|html|rs|py|go|java|kt|kts|swift|dart|ets|sh|bash) ;;
|
|
60
|
+
*) exit 0 ;;
|
|
61
|
+
esac
|
|
62
|
+
|
|
63
|
+
# ── State management ──────────────────────────────────────────────────────
|
|
64
|
+
|
|
65
|
+
# Require jq
|
|
66
|
+
if ! command -v jq &>/dev/null; then
|
|
67
|
+
exit 0
|
|
68
|
+
fi
|
|
69
|
+
|
|
70
|
+
NOW=$(date +%s 2>/dev/null || echo "0")
|
|
71
|
+
STATE_FILE=".claude/task-state.json"
|
|
72
|
+
MAX_TRACKED_FILES=50
|
|
73
|
+
|
|
74
|
+
# Initialize state file if needed
|
|
75
|
+
if [[ ! -f "$STATE_FILE" ]]; then
|
|
76
|
+
mkdir -p ".claude" 2>/dev/null || true
|
|
77
|
+
printf '{"session_id":"","modified_files":[],"docs_stale":false,"verification_passed":false,"pending_actions":[],"architecture_violations":0,"created_at":%s,"updated_at":%s}\n' "$NOW" "$NOW" > "$STATE_FILE"
|
|
78
|
+
fi
|
|
79
|
+
|
|
80
|
+
# Read current state
|
|
81
|
+
STATE=$(cat "$STATE_FILE" 2>/dev/null || echo "{}")
|
|
82
|
+
|
|
83
|
+
# Update: add file, set docs_stale=true, reset verification, update timestamp
|
|
84
|
+
NEW_STATE=$(printf '%s' "$STATE" | jq \
|
|
85
|
+
--arg fp "$FILE_PATH" \
|
|
86
|
+
--argjson now "$NOW" \
|
|
87
|
+
'
|
|
88
|
+
# Add file if not already tracked
|
|
89
|
+
if (.modified_files | map(select(.path == $fp)) | length) == 0 then
|
|
90
|
+
.modified_files += [{"path": $fp, "modified_at": $now}]
|
|
91
|
+
else
|
|
92
|
+
.modified_files = [.modified_files[] | if .path == $fp then .modified_at = $now else . end]
|
|
93
|
+
end
|
|
94
|
+
# Cap modified files to 50
|
|
95
|
+
| .modified_files = (.modified_files | sort_by(-.modified_at) | .[0:50])
|
|
96
|
+
# Mark docs as stale
|
|
97
|
+
| .docs_stale = true
|
|
98
|
+
# Reset verification since we made changes
|
|
99
|
+
| .verification_passed = false
|
|
100
|
+
# Ensure pending_actions includes doc update
|
|
101
|
+
| if (.pending_actions | index("update_docs") | not) then
|
|
102
|
+
.pending_actions += ["update_docs"]
|
|
103
|
+
else . end
|
|
104
|
+
| .updated_at = $now
|
|
105
|
+
')
|
|
106
|
+
|
|
107
|
+
printf '%s' "$NEW_STATE" | jq '.' > "$STATE_FILE" 2>/dev/null || true
|
|
108
|
+
|
|
109
|
+
# ── Output feedback at milestones ─────────────────────────────────────────
|
|
110
|
+
|
|
111
|
+
FILE_COUNT=$(jq '.modified_files | length' "$STATE_FILE" 2>/dev/null || echo "0")
|
|
112
|
+
|
|
113
|
+
_MILESTONE=""
|
|
114
|
+
# Milestone check: for every 5 files >= 10, show progress update
|
|
115
|
+
if (( FILE_COUNT >= 10 && FILE_COUNT % 5 == 0 )); then
|
|
116
|
+
if (( FILE_COUNT == 10 )); then
|
|
117
|
+
_MILESTONE="<ATOOL-TASK-PROGRESS>
|
|
118
|
+
Progress check: You have modified ${FILE_COUNT} source files. This is substantial.
|
|
119
|
+
- Consider breaking your work into smaller, verifiable units
|
|
120
|
+
- Docs are STALE and need updating
|
|
121
|
+
- Run /verification-before-completion before claiming done
|
|
122
|
+
</ATOOL-TASK-PROGRESS>"
|
|
123
|
+
else
|
|
124
|
+
_MILESTONE="<ATOOL-TASK-PROGRESS>
|
|
125
|
+
Progress check: You have modified ${FILE_COUNT} source files.
|
|
126
|
+
- This is a large refactoring. Consider intermediate commits/verification
|
|
127
|
+
- Docs are STALE and need updating
|
|
128
|
+
- Run /verification-before-completion to validate progress
|
|
129
|
+
</ATOOL-TASK-PROGRESS>"
|
|
130
|
+
fi
|
|
131
|
+
elif (( FILE_COUNT == 5 )); then
|
|
132
|
+
_MILESTONE="<ATOOL-TASK-PROGRESS>
|
|
133
|
+
Progress check: You have modified ${FILE_COUNT} source files.
|
|
134
|
+
- Docs are STALE and need updating
|
|
135
|
+
- Verification has NOT been run since last changes
|
|
136
|
+
Remember: Run /verification-before-completion before claiming done.
|
|
137
|
+
</ATOOL-TASK-PROGRESS>"
|
|
138
|
+
fi
|
|
139
|
+
|
|
140
|
+
if [[ -n "$_MILESTONE" ]]; then
|
|
141
|
+
_ESCAPED=$(escape_for_json "$_MILESTONE")
|
|
142
|
+
printf '{"hookSpecificOutput":{"hookEventName":"PostToolUse","additionalContext":"%s"}}\n' "$_ESCAPED"
|
|
143
|
+
fi
|
|
144
|
+
|
|
145
|
+
exit 0
|
package/install.sh
CHANGED
|
@@ -77,12 +77,22 @@ parse_args() {
|
|
|
77
77
|
shift
|
|
78
78
|
;;
|
|
79
79
|
--project)
|
|
80
|
-
|
|
81
|
-
|
|
80
|
+
if [[ $# -ge 2 && ! "$2" =~ ^-- ]]; then
|
|
81
|
+
PROJECT_PATH="$2"
|
|
82
|
+
shift 2
|
|
83
|
+
else
|
|
84
|
+
PROJECT_PATH="."
|
|
85
|
+
shift
|
|
86
|
+
fi
|
|
82
87
|
;;
|
|
83
88
|
--analyze)
|
|
84
|
-
|
|
85
|
-
|
|
89
|
+
if [[ $# -ge 2 && ! "$2" =~ ^-- ]]; then
|
|
90
|
+
ANALYZE_PROJECT="$2"
|
|
91
|
+
shift 2
|
|
92
|
+
else
|
|
93
|
+
ANALYZE_PROJECT="."
|
|
94
|
+
shift
|
|
95
|
+
fi
|
|
86
96
|
;;
|
|
87
97
|
--uninstall)
|
|
88
98
|
do_uninstall
|
package/lib/common.sh
CHANGED
|
@@ -140,11 +140,9 @@ ensure_dir() {
|
|
|
140
140
|
# ── IDE Config Directory Helpers (cross-platform) ─────────────────────────────
|
|
141
141
|
|
|
142
142
|
get_claude_config_dir() {
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
echo "$HOME/.claude"
|
|
147
|
-
fi
|
|
143
|
+
# Claude Code CLI always uses ~/.claude/ on all platforms
|
|
144
|
+
# (APPDATA/claude is the Claude Desktop app, not the CLI)
|
|
145
|
+
echo "$HOME/.claude"
|
|
148
146
|
}
|
|
149
147
|
|
|
150
148
|
get_cursor_config_dir() {
|
|
@@ -861,15 +859,20 @@ scan_skills_catalog() {
|
|
|
861
859
|
;;
|
|
862
860
|
pdf|docx|pptx|xlsx)
|
|
863
861
|
office="${office}- ${skill_name}: ${desc}
|
|
862
|
+
"
|
|
863
|
+
;;
|
|
864
|
+
# Core initialization
|
|
865
|
+
atool-init)
|
|
866
|
+
planning="${planning}- ${skill_name}: ${desc}
|
|
864
867
|
"
|
|
865
868
|
;;
|
|
866
869
|
# Architecture & Design
|
|
867
|
-
software-architecture|ai-project-architecture|ui-ux-pro)
|
|
870
|
+
software-architecture|ai-project-architecture|ui-ux-pro|architecture-guard)
|
|
868
871
|
arch_design="${arch_design}- ${skill_name}: ${desc}
|
|
869
872
|
"
|
|
870
873
|
;;
|
|
871
874
|
# Quality & Review
|
|
872
|
-
code-review|project-analyze|project-query|verification-before-completion)
|
|
875
|
+
code-review|project-analyze|project-query|verification-before-completion|agent-audit)
|
|
873
876
|
quality_review="${quality_review}- ${skill_name}: ${desc}
|
|
874
877
|
"
|
|
875
878
|
;;
|
|
@@ -879,8 +882,13 @@ scan_skills_catalog() {
|
|
|
879
882
|
"
|
|
880
883
|
;;
|
|
881
884
|
# Planning & Dispatch
|
|
882
|
-
writing-plans|smart-dispatch|brainstorming|clarify-before-build)
|
|
885
|
+
writing-plans|smart-dispatch|brainstorming|clarify-before-build|using-git-worktrees|find-skills)
|
|
883
886
|
planning="${planning}- ${skill_name}: ${desc}
|
|
887
|
+
"
|
|
888
|
+
;;
|
|
889
|
+
# Testing & Automation
|
|
890
|
+
webapp-testing|ci-feedback)
|
|
891
|
+
quality_review="${quality_review}- ${skill_name}: ${desc}
|
|
884
892
|
"
|
|
885
893
|
;;
|
|
886
894
|
*)
|
|
@@ -890,26 +898,31 @@ scan_skills_catalog() {
|
|
|
890
898
|
esac
|
|
891
899
|
done
|
|
892
900
|
|
|
893
|
-
# Check for Superpowers skills
|
|
901
|
+
# Check for Superpowers skills (only if Superpowers directory exists and is separate from skills_dir)
|
|
894
902
|
local sp_dir="$skills_dir"
|
|
895
903
|
if [[ -d "$skills_dir/_superpowers/skills" ]]; then
|
|
896
904
|
sp_dir="$skills_dir/_superpowers/skills"
|
|
897
905
|
fi
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
906
|
+
|
|
907
|
+
# Only scan Superpowers directory if it exists and is NOT the same as skills_dir
|
|
908
|
+
# (prevents double-scanning skills already categorized in first loop)
|
|
909
|
+
if [[ "$sp_dir" != "$skills_dir" ]] && [[ -d "$sp_dir" ]]; then
|
|
910
|
+
for skill_dir in "$sp_dir"/*/; do
|
|
911
|
+
[[ ! -d "$skill_dir" ]] && continue
|
|
912
|
+
local skill_name
|
|
913
|
+
skill_name=$(basename "$skill_dir")
|
|
914
|
+
[[ -L "$skill_dir" ]] && continue
|
|
915
|
+
local sp_md="$skill_dir/SKILL.md"
|
|
916
|
+
[[ ! -f "$sp_md" ]] && continue
|
|
917
|
+
local desc=""
|
|
918
|
+
desc=$(head -10 "$sp_md" | sed -n '/^---$/,/^---$/p' | grep '^description:' | head -1 | sed 's/^description:[[:space:]]*//')
|
|
919
|
+
if [[ -z "$desc" ]]; then
|
|
920
|
+
desc=$(head -10 "$sp_md" | grep -i '^#' | head -1 | sed 's/^#*\s*//' || echo "$skill_name")
|
|
921
|
+
fi
|
|
922
|
+
superpowers_list="${superpowers_list}- ${skill_name}: ${desc}
|
|
911
923
|
"
|
|
912
|
-
|
|
924
|
+
done
|
|
925
|
+
fi
|
|
913
926
|
|
|
914
927
|
# Output catalog
|
|
915
928
|
if [[ -n "$conventions" ]]; then
|
|
@@ -591,6 +591,79 @@ compute_importance() {
|
|
|
591
591
|
rm -rf "$tmpdir"
|
|
592
592
|
}
|
|
593
593
|
|
|
594
|
+
# === Batch Processing (for pre-scan manifest.json) ===
|
|
595
|
+
|
|
596
|
+
# Update manifest.json modules array with importance scores
|
|
597
|
+
# Args: MANIFEST_PATH - path to manifest.json
|
|
598
|
+
# PROJECT_ROOT - project root directory (parent of .atool-docs)
|
|
599
|
+
# Updates: manifest.json modules[].importance field in-place
|
|
600
|
+
compute_importance_batch() {
|
|
601
|
+
local manifest_path="${1:-.atool-docs/pre-scan/manifest.json}"
|
|
602
|
+
local project_root="${2:-.}"
|
|
603
|
+
|
|
604
|
+
if [[ ! -f "$manifest_path" ]]; then
|
|
605
|
+
log_error "Manifest not found: $manifest_path"
|
|
606
|
+
return 1
|
|
607
|
+
fi
|
|
608
|
+
|
|
609
|
+
log_info "Computing importance scores for modules in manifest..."
|
|
610
|
+
|
|
611
|
+
# Get inventory directory from project_root
|
|
612
|
+
local inventory_dir="${project_root}/.atool-docs/inventory"
|
|
613
|
+
|
|
614
|
+
# Read modules from manifest
|
|
615
|
+
local modules_json
|
|
616
|
+
modules_json=$(jq -r '.modules[] | .slug' "$manifest_path" 2>/dev/null)
|
|
617
|
+
|
|
618
|
+
if [[ -z "$modules_json" ]]; then
|
|
619
|
+
log_warn "No modules found in manifest"
|
|
620
|
+
return 0
|
|
621
|
+
fi
|
|
622
|
+
|
|
623
|
+
# Build module path map: slug -> importance_score
|
|
624
|
+
local tmpdir
|
|
625
|
+
tmpdir=$(mktemp -d)
|
|
626
|
+
|
|
627
|
+
# Discover all modules in project and compute importance
|
|
628
|
+
local all_modules
|
|
629
|
+
all_modules=$(discover_modules "$project_root")
|
|
630
|
+
|
|
631
|
+
if [[ -z "$all_modules" ]]; then
|
|
632
|
+
log_warn "No source modules found in project"
|
|
633
|
+
echo "{}" > "$tmpdir/importance_map"
|
|
634
|
+
else
|
|
635
|
+
# Compute importance for all discovered modules
|
|
636
|
+
log_info "Computing importance factors for discovered modules..."
|
|
637
|
+
local importance_output
|
|
638
|
+
importance_output=$(compute_importance "$project_root" --inventory-dir "$inventory_dir" 2>/dev/null || true)
|
|
639
|
+
|
|
640
|
+
# Build slug-to-importance map from output (module_path<tab>score<tab>tier)
|
|
641
|
+
# Convert module path to slug: extract last path component
|
|
642
|
+
{
|
|
643
|
+
while IFS=$'\t' read -r module_path score tier; do
|
|
644
|
+
[[ -z "$module_path" ]] && continue
|
|
645
|
+
local slug
|
|
646
|
+
slug=$(basename "$module_path")
|
|
647
|
+
printf '%s\t%s\n' "$slug" "$score"
|
|
648
|
+
done <<< "$importance_output"
|
|
649
|
+
} > "$tmpdir/importance_map"
|
|
650
|
+
fi
|
|
651
|
+
|
|
652
|
+
# Update manifest.json with importance scores
|
|
653
|
+
log_info "Updating manifest with importance scores..."
|
|
654
|
+
jq --slurpfile importance_data <(cat "$tmpdir/importance_map" | jq -R 'split("\t") | {key: .[0], value: (.[1] | tonumber)}' | jq -s 'from_entries') \
|
|
655
|
+
'.modules |= map(.importance = ($importance_data[0][.slug] // 0))' \
|
|
656
|
+
"$manifest_path" > "$tmpdir/manifest_updated.json"
|
|
657
|
+
|
|
658
|
+
# Atomically replace manifest
|
|
659
|
+
mv "$tmpdir/manifest_updated.json" "$manifest_path"
|
|
660
|
+
|
|
661
|
+
# Cleanup
|
|
662
|
+
rm -rf "$tmpdir"
|
|
663
|
+
|
|
664
|
+
log_success "Importance scores computed and manifest updated"
|
|
665
|
+
}
|
|
666
|
+
|
|
594
667
|
# === Standalone Execution ===
|
|
595
668
|
# When run directly (not sourced), execute compute_importance with CLI args
|
|
596
669
|
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
package/lib/install-cursor.sh
CHANGED
|
@@ -79,8 +79,8 @@ should_always_apply() {
|
|
|
79
79
|
project-analyze) return 0 ;;
|
|
80
80
|
# Workflow skill with no good glob match - must be always-on
|
|
81
81
|
using-git-worktrees) return 0 ;;
|
|
82
|
-
# Conventions skills -
|
|
83
|
-
*-conventions) return
|
|
82
|
+
# Conventions skills - globs handle triggering by file type (no need for alwaysApply)
|
|
83
|
+
*-conventions) return 1 ;;
|
|
84
84
|
# All other skills: use globs or on-demand (reduces context token consumption)
|
|
85
85
|
*) return 1 ;;
|
|
86
86
|
esac
|
|
@@ -140,6 +140,28 @@ skill_to_mdc() {
|
|
|
140
140
|
|
|
141
141
|
# Append skill content, stripping YAML front matter from source
|
|
142
142
|
awk '/^---$/{n++;next} n<2{next} {print}' "$skill_md" >> "$output_file"
|
|
143
|
+
|
|
144
|
+
# Append content from subdirectories (rules/, phases/, references/)
|
|
145
|
+
local subdirs=("rules" "phases" "references")
|
|
146
|
+
for subdir in "${subdirs[@]}"; do
|
|
147
|
+
local subdir_path="$skill_dir/$subdir"
|
|
148
|
+
if [[ -d "$subdir_path" ]]; then
|
|
149
|
+
# Sort files by name and append them
|
|
150
|
+
while IFS= read -r -d '' file; do
|
|
151
|
+
# Add a header for the file
|
|
152
|
+
echo "" >> "$output_file"
|
|
153
|
+
echo "# $(basename "$file")" >> "$output_file"
|
|
154
|
+
echo "" >> "$output_file"
|
|
155
|
+
|
|
156
|
+
# Append file content, only stripping YAML front matter if present
|
|
157
|
+
if head -n 1 "$file" | grep -q '^---$'; then
|
|
158
|
+
awk '/^---$/{n++;next} n<2{next} {print}' "$file" >> "$output_file"
|
|
159
|
+
else
|
|
160
|
+
cat "$file" >> "$output_file"
|
|
161
|
+
fi
|
|
162
|
+
done < <(find "$subdir_path" -name "*.md" -type f -print0 | sort -z)
|
|
163
|
+
fi
|
|
164
|
+
done
|
|
143
165
|
}
|
|
144
166
|
|
|
145
167
|
# Generate .mdc rules from skills into a target directory
|
package/lib/install-hooks.sh
CHANGED
|
@@ -132,6 +132,58 @@ install_hooks() {
|
|
|
132
132
|
fi
|
|
133
133
|
fi
|
|
134
134
|
|
|
135
|
+
# Install pre-commit hook (PreToolUse - checks before git commit)
|
|
136
|
+
local pre_commit_src="$atool_root/hooks/pre-commit"
|
|
137
|
+
if [[ -f "$pre_commit_src" ]]; then
|
|
138
|
+
local pre_commit_name="atool-pre-commit"
|
|
139
|
+
local dest_pre_commit="$hooks_dir/$pre_commit_name"
|
|
140
|
+
|
|
141
|
+
if [[ -f "$dest_pre_commit" ]] && [[ "${ATOOL_FORCE:-0}" != "1" ]]; then
|
|
142
|
+
local pc_existing pc_source
|
|
143
|
+
pc_existing=$(file_checksum "$dest_pre_commit")
|
|
144
|
+
pc_source=$(file_checksum "$pre_commit_src")
|
|
145
|
+
if [[ "$pc_existing" == "$pc_source" ]]; then
|
|
146
|
+
log_info "Hook '$pre_commit_name' already up to date, skipping"
|
|
147
|
+
else
|
|
148
|
+
log_info "Hook '$pre_commit_name' exists but differs - backing up"
|
|
149
|
+
backup_file "$dest_pre_commit"
|
|
150
|
+
run_cmd cp "$pre_commit_src" "$dest_pre_commit"
|
|
151
|
+
run_cmd chmod +x "$dest_pre_commit"
|
|
152
|
+
log_success "Hook installed: $dest_pre_commit"
|
|
153
|
+
fi
|
|
154
|
+
else
|
|
155
|
+
run_cmd cp "$pre_commit_src" "$dest_pre_commit"
|
|
156
|
+
run_cmd chmod +x "$dest_pre_commit"
|
|
157
|
+
log_success "Hook installed: $dest_pre_commit"
|
|
158
|
+
fi
|
|
159
|
+
fi
|
|
160
|
+
|
|
161
|
+
# Install task-state-tracker hook (PostToolUse - tracks session state)
|
|
162
|
+
local task_state_src="$atool_root/hooks/task-state-tracker"
|
|
163
|
+
if [[ -f "$task_state_src" ]]; then
|
|
164
|
+
local task_state_name="atool-task-state-tracker"
|
|
165
|
+
local dest_task_state="$hooks_dir/$task_state_name"
|
|
166
|
+
|
|
167
|
+
if [[ -f "$dest_task_state" ]] && [[ "${ATOOL_FORCE:-0}" != "1" ]]; then
|
|
168
|
+
local ts_existing ts_source
|
|
169
|
+
ts_existing=$(file_checksum "$dest_task_state")
|
|
170
|
+
ts_source=$(file_checksum "$task_state_src")
|
|
171
|
+
if [[ "$ts_existing" == "$ts_source" ]]; then
|
|
172
|
+
log_info "Hook '$task_state_name' already up to date, skipping"
|
|
173
|
+
else
|
|
174
|
+
log_info "Hook '$task_state_name' exists but differs - backing up"
|
|
175
|
+
backup_file "$dest_task_state"
|
|
176
|
+
run_cmd cp "$task_state_src" "$dest_task_state"
|
|
177
|
+
run_cmd chmod +x "$dest_task_state"
|
|
178
|
+
log_success "Hook installed: $dest_task_state"
|
|
179
|
+
fi
|
|
180
|
+
else
|
|
181
|
+
run_cmd cp "$task_state_src" "$dest_task_state"
|
|
182
|
+
run_cmd chmod +x "$dest_task_state"
|
|
183
|
+
log_success "Hook installed: $dest_task_state"
|
|
184
|
+
fi
|
|
185
|
+
fi
|
|
186
|
+
|
|
135
187
|
# Install hooks config JSON
|
|
136
188
|
local config_file
|
|
137
189
|
case "$ide_type" in
|
|
@@ -224,6 +276,18 @@ install_hooks_global() {
|
|
|
224
276
|
run_cmd chmod +x "$claude_hooks_dir/atool-doc-sync-reminder"
|
|
225
277
|
fi
|
|
226
278
|
|
|
279
|
+
# Copy pre-commit hook (PreToolUse)
|
|
280
|
+
if [[ -f "$atool_root/hooks/pre-commit" ]]; then
|
|
281
|
+
run_cmd cp "$atool_root/hooks/pre-commit" "$claude_hooks_dir/atool-pre-commit"
|
|
282
|
+
run_cmd chmod +x "$claude_hooks_dir/atool-pre-commit"
|
|
283
|
+
fi
|
|
284
|
+
|
|
285
|
+
# Copy task-state-tracker hook (PostToolUse)
|
|
286
|
+
if [[ -f "$atool_root/hooks/task-state-tracker" ]]; then
|
|
287
|
+
run_cmd cp "$atool_root/hooks/task-state-tracker" "$claude_hooks_dir/atool-task-state-tracker"
|
|
288
|
+
run_cmd chmod +x "$claude_hooks_dir/atool-task-state-tracker"
|
|
289
|
+
fi
|
|
290
|
+
|
|
227
291
|
# 2. Register hooks config in settings.json
|
|
228
292
|
# Claude Code reads hooks from settings.json, NOT from files on disk
|
|
229
293
|
local settings_file="$claude_config_dir/settings.json"
|
package/lib/install-kiro.sh
CHANGED
|
@@ -112,7 +112,10 @@ get_superpowers_kiro_category() {
|
|
|
112
112
|
# Args: SKILL_MD_PATH
|
|
113
113
|
# Outputs: condensed content to stdout
|
|
114
114
|
extract_condensed_skill() {
|
|
115
|
-
local
|
|
115
|
+
local skill_dir="$1"
|
|
116
|
+
local skill_md="$skill_dir/SKILL.md"
|
|
117
|
+
|
|
118
|
+
# First, extract the main SKILL.md content (with YAML frontmatter stripped)
|
|
116
119
|
awk '
|
|
117
120
|
BEGIN { fm = 0 }
|
|
118
121
|
/^---$/ { fm++; next }
|
|
@@ -129,6 +132,27 @@ extract_condensed_skill() {
|
|
|
129
132
|
}
|
|
130
133
|
{ print }
|
|
131
134
|
' "$skill_md"
|
|
135
|
+
|
|
136
|
+
# Then append content from subdirectories (rules/, phases/, references/)
|
|
137
|
+
local subdirs=("rules" "phases" "references")
|
|
138
|
+
for subdir in "${subdirs[@]}"; do
|
|
139
|
+
local subdir_path="$skill_dir/$subdir"
|
|
140
|
+
if [[ -d "$subdir_path" ]]; then
|
|
141
|
+
# Sort files by name and append them
|
|
142
|
+
while IFS= read -r -d '' file; do
|
|
143
|
+
echo ""
|
|
144
|
+
echo "### $(basename "$file")"
|
|
145
|
+
echo ""
|
|
146
|
+
|
|
147
|
+
# Append file content, only stripping YAML frontmatter if present
|
|
148
|
+
if head -n 1 "$file" | grep -q '^---$'; then
|
|
149
|
+
awk '/^---$/{n++;next} n<2{next} {print}' "$file"
|
|
150
|
+
else
|
|
151
|
+
cat "$file"
|
|
152
|
+
fi
|
|
153
|
+
done < <(find "$subdir_path" -name "*.md" -type f -print0 | sort -z)
|
|
154
|
+
fi
|
|
155
|
+
done
|
|
132
156
|
}
|
|
133
157
|
|
|
134
158
|
# Extract ultra-condensed instructions for always-in-context steering
|
|
@@ -232,7 +256,7 @@ generate_consolidated_steering() {
|
|
|
232
256
|
echo ""
|
|
233
257
|
echo "## Skill: $skill_name"
|
|
234
258
|
echo ""
|
|
235
|
-
$extract_func "$
|
|
259
|
+
$extract_func "$skill_dir"
|
|
236
260
|
} >> "$tmpdir/${category}.txt"
|
|
237
261
|
done
|
|
238
262
|
done
|
package/lib/install-skills.sh
CHANGED
|
@@ -122,8 +122,11 @@ install_skills() {
|
|
|
122
122
|
# Skills reference scripts via "source lib/X.sh" relative to the skill dir
|
|
123
123
|
local skill_lib_dir="$target_skill/lib"
|
|
124
124
|
local referenced_libs
|
|
125
|
-
|
|
126
|
-
|
|
125
|
+
local _find_paths=()
|
|
126
|
+
[[ -d "$target_skill/phases" ]] && _find_paths+=("$target_skill/phases")
|
|
127
|
+
_find_paths+=("$target_skill")
|
|
128
|
+
referenced_libs=$(find "${_find_paths[@]}" -maxdepth 2 -name '*.md' -exec grep -roh 'source lib/[^ ]*\.sh' {} + 2>/dev/null \
|
|
129
|
+
| sed 's/source lib\///' | sort -u || true)
|
|
127
130
|
if [[ -n "$referenced_libs" ]]; then
|
|
128
131
|
run_cmd mkdir -p "$skill_lib_dir"
|
|
129
132
|
for lib_name in $referenced_libs; do
|
package/lib/pre-scan.sh
CHANGED
|
@@ -1098,7 +1098,19 @@ pre_scan_project() {
|
|
|
1098
1098
|
generated_at: (now | todate),
|
|
1099
1099
|
total_modules: length,
|
|
1100
1100
|
scan_duration_seconds: null,
|
|
1101
|
-
modules: [.[]
|
|
1101
|
+
modules: [.[] | {
|
|
1102
|
+
slug: .module,
|
|
1103
|
+
name: .module,
|
|
1104
|
+
importance: 0
|
|
1105
|
+
} + (.module_summary // {} | {
|
|
1106
|
+
total_files,
|
|
1107
|
+
total_classes,
|
|
1108
|
+
total_functions,
|
|
1109
|
+
total_api_endpoints,
|
|
1110
|
+
total_data_models
|
|
1111
|
+
})],
|
|
1112
|
+
aggregated_modules: null,
|
|
1113
|
+
aggregated_count: null
|
|
1102
1114
|
}' "$output_dir"/*.json 2>/dev/null || echo '{}')
|
|
1103
1115
|
|
|
1104
1116
|
# Add duration to manifest
|
package/lib/project-init.sh
CHANGED
|
@@ -120,7 +120,14 @@ project_init_single() {
|
|
|
120
120
|
local atool_root="$2"
|
|
121
121
|
|
|
122
122
|
local stack
|
|
123
|
-
stack
|
|
123
|
+
# Check if stack is specified via environment variable
|
|
124
|
+
if [[ -n "${ATOOL_STACK:-}" ]]; then
|
|
125
|
+
stack="$ATOOL_STACK"
|
|
126
|
+
log_info "Using specified stack: ${BOLD}$stack${NC}"
|
|
127
|
+
else
|
|
128
|
+
stack=$(detect_stack "$project_dir")
|
|
129
|
+
fi
|
|
130
|
+
|
|
124
131
|
local template_name
|
|
125
132
|
template_name=$(get_template_name "$stack")
|
|
126
133
|
local description
|
|
@@ -130,22 +137,34 @@ project_init_single() {
|
|
|
130
137
|
|
|
131
138
|
echo ""
|
|
132
139
|
log_info "Detected stack: ${BOLD}$description${NC} (${stack})"
|
|
133
|
-
|
|
140
|
+
if [[ -n "${ATOOL_STACK:-}" ]]; then
|
|
141
|
+
log_info "Stack specified via --stack flag"
|
|
142
|
+
fi
|
|
143
|
+
if [[ "$stack" != "generic" ]]; then
|
|
144
|
+
log_info "Confidence: ${confidence}%"
|
|
145
|
+
fi
|
|
134
146
|
log_info "Template: ${template_name}"
|
|
135
147
|
echo ""
|
|
136
148
|
|
|
137
149
|
if [[ "$stack" == "generic" ]]; then
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
150
|
+
if [[ -n "${ATOOL_STACK:-}" ]]; then
|
|
151
|
+
log_error "Specified stack '$ATOOL_STACK' is not supported."
|
|
152
|
+
log_info "Available stacks: $(ls "$atool_root/templates" | grep "CLAUDE\." | sed 's/CLAUDE\.//' | sed 's/\.md$//' | tr '\n' ',' | sed 's/,$//')"
|
|
153
|
+
return 1
|
|
154
|
+
else
|
|
155
|
+
log_warn "Could not detect a specific technology stack."
|
|
156
|
+
log_warn "A generic template will be used."
|
|
157
|
+
if ! confirm "Continue with generic template?" "Y"; then
|
|
158
|
+
log_info "Cancelled. You can specify a stack manually with --stack <stack-name>"
|
|
159
|
+
return 0
|
|
160
|
+
fi
|
|
143
161
|
fi
|
|
144
162
|
else
|
|
145
|
-
confirm "Use '$description' template?" "Y"
|
|
163
|
+
if ! confirm "Use '$description' template?" "Y"; then
|
|
146
164
|
log_info "Cancelled."
|
|
165
|
+
log_info "You can specify a different stack with --stack <stack-name>"
|
|
147
166
|
return 0
|
|
148
|
-
|
|
167
|
+
fi
|
|
149
168
|
fi
|
|
150
169
|
|
|
151
170
|
# Generate CLAUDE.md
|