claude-dev-kit 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/angelic-workshop-energy-clearing.md +113 -0
- package/.claude/agents/angelic-workshop-intake.md +84 -0
- package/.claude/agents/angelic-workshop-integration.md +140 -0
- package/.claude/agents/angelic-workshop-invocation.md +92 -0
- package/.claude/agents/angelic-workshop-lead.md +225 -0
- package/.claude/agents/angelic-workshop-transmission.md +108 -0
- package/.claude/agents/deep-think-partner.md +41 -0
- package/.claude/agents/dev-backend.md +74 -0
- package/.claude/agents/dev-e2e.md +101 -0
- package/.claude/agents/dev-frontend.md +82 -0
- package/.claude/agents/dev-lead.md +144 -0
- package/.claude/agents/dev-reviewer.md +122 -0
- package/.claude/agents/dev-test.md +88 -0
- package/.claude/agents/documentation-manager.md +73 -0
- package/.claude/agents/haiku-executor.md +8 -0
- package/.claude/agents/pm-groomer.md +98 -0
- package/.claude/agents/pm-prp-writer.md +144 -0
- package/.claude/agents/pm-sizer.md +84 -0
- package/.claude/agents/project-manager.md +91 -0
- package/.claude/agents/system-architect.md +98 -0
- package/.claude/agents/validation-gates.md +121 -0
- package/.claude/agents/workflow-builder.md +416 -0
- package/.claude/commands/ai/detect.md +117 -0
- package/.claude/commands/ai/route.md +128 -0
- package/.claude/commands/ai/switch.md +121 -0
- package/.claude/commands/bs/brainstorm_full.md +149 -0
- package/.claude/commands/bs/claude.md +37 -0
- package/.claude/commands/bs/codex.md +37 -0
- package/.claude/commands/bs/gemini.md +37 -0
- package/.claude/commands/bs/glm.md +37 -0
- package/.claude/commands/bs/grok.md +37 -0
- package/.claude/commands/bs/kimi.md +37 -0
- package/.claude/commands/bs/minimax.md +37 -0
- package/.claude/commands/bs/ollama.md +71 -0
- package/.claude/commands/code/build-and-fix.md +80 -0
- package/.claude/commands/code/simplify.md +77 -0
- package/.claude/commands/dev/backend.md +47 -0
- package/.claude/commands/dev/e2e.md +49 -0
- package/.claude/commands/dev/frontend.md +45 -0
- package/.claude/commands/dev/review.md +48 -0
- package/.claude/commands/dev/test.md +54 -0
- package/.claude/commands/dev-epic.md +121 -0
- package/.claude/commands/dev-issue.md +79 -0
- package/.claude/commands/dev.md +134 -0
- package/.claude/commands/execute-prp.md +113 -0
- package/.claude/commands/fix-github-issue.md +14 -0
- package/.claude/commands/generate-prp.md +73 -0
- package/.claude/commands/git/status.md +14 -0
- package/.claude/commands/haiku.md +13 -0
- package/.claude/commands/improve.md +178 -0
- package/.claude/commands/init.md +311 -0
- package/.claude/commands/pm/groom.md +58 -0
- package/.claude/commands/pm/plan-epic.md +74 -0
- package/.claude/commands/pm/size.md +46 -0
- package/.claude/commands/pm.md +47 -0
- package/.claude/commands/primer.md +16 -0
- package/.claude/commands/self-improve.md +243 -0
- package/.claude/commands/think.md +68 -0
- package/.claude/commands/workflow/angelic-workshop.md +89 -0
- package/.claude/commands/workflow/build.md +91 -0
- package/.claude/hooks/pre-tool-use/block-dangerous-commands.js +196 -0
- package/.claude/hooks/skill-activation-prompt/package-lock.json +560 -0
- package/.claude/hooks/skill-activation-prompt/package.json +16 -0
- package/.claude/hooks/skill-activation-prompt/skill-activation-prompt.ts +135 -0
- package/.claude/hooks/skill-activation-prompt/skill-rules.json +50 -0
- package/.claude/hooks/stop/context_monitor.py +155 -0
- package/.claude/hooks/stop/learning_logger.py +218 -0
- package/.claude/skills/ai-router/SKILL.md +119 -0
- package/.claude/skills/build-and-fix/SKILL.md +271 -0
- package/.claude/skills/build-and-fix/examples/javascript-lint-fix.md +37 -0
- package/.claude/skills/build-and-fix/language-configs/javascript.yaml +139 -0
- package/.claude/skills/build-and-fix/references/config-schema.md +120 -0
- package/.claude/skills/build-and-fix/references/error-patterns.md +273 -0
- package/.claude/skills/code-investigator/SKILL.md +299 -0
- package/.claude/skills/code-investigator/references/investigation-workflows.md +542 -0
- package/.claude/skills/code-investigator/references/language-specific.md +761 -0
- package/.claude/skills/code-investigator/references/search-patterns.md +258 -0
- package/.claude/skills/code-investigator/references/serena-patterns.md +328 -0
- package/.claude/skills/stack-detector/SKILL.md +153 -0
- package/.claude/skills/verification-before-completion/SKILL.md +143 -0
- package/.claude/templates/claude-md-template.md +56 -0
- package/.claude/templates/stacks/express-node.md +134 -0
- package/.claude/templates/stacks/fastapi.md +152 -0
- package/.claude/templates/stacks/generic.md +101 -0
- package/.claude/templates/stacks/nextjs-prisma.md +235 -0
- package/README.md +499 -0
- package/bin/claude-dev-kit.js +11 -0
- package/package.json +31 -0
- package/scripts/install.sh +448 -0
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
#!/usr/bin/env tsx
|
|
2
|
+
import * as fs from "fs";
|
|
3
|
+
import * as path from "path";
|
|
4
|
+
|
|
5
|
+
interface HookInput {
|
|
6
|
+
session_id?: string;
|
|
7
|
+
prompt?: string;
|
|
8
|
+
transcript_path?: string;
|
|
9
|
+
hook_event_name?: string;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
interface SkillRule {
|
|
13
|
+
skill: string;
|
|
14
|
+
priority: "critical" | "high" | "medium" | "low";
|
|
15
|
+
alwaysActive?: boolean;
|
|
16
|
+
triggers?: string[];
|
|
17
|
+
message: string;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
interface SessionState {
|
|
21
|
+
[sessionId: string]: {
|
|
22
|
+
skills: string[];
|
|
23
|
+
timestamp: number;
|
|
24
|
+
};
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const SESSION_STATE_DIR = "/tmp/.claude-session-state";
|
|
28
|
+
const STATE_FILE = path.join(SESSION_STATE_DIR, "suggested-skills.json");
|
|
29
|
+
const MAX_AGE_MS = 7 * 24 * 60 * 60 * 1000; // 7 days
|
|
30
|
+
|
|
31
|
+
function getSessionState(sessionId: string): Set<string> {
|
|
32
|
+
try {
|
|
33
|
+
if (!fs.existsSync(STATE_FILE)) return new Set();
|
|
34
|
+
const raw = fs.readFileSync(STATE_FILE, "utf-8");
|
|
35
|
+
const data: SessionState = JSON.parse(raw);
|
|
36
|
+
const now = Date.now();
|
|
37
|
+
// Prune stale sessions
|
|
38
|
+
for (const key of Object.keys(data)) {
|
|
39
|
+
if (data[key].timestamp < now - MAX_AGE_MS) {
|
|
40
|
+
delete data[key];
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
return new Set(data[sessionId]?.skills ?? []);
|
|
44
|
+
} catch {
|
|
45
|
+
return new Set();
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function saveSessionState(sessionId: string, suggested: Set<string>): void {
|
|
50
|
+
try {
|
|
51
|
+
fs.mkdirSync(SESSION_STATE_DIR, { recursive: true });
|
|
52
|
+
let data: SessionState = {};
|
|
53
|
+
try {
|
|
54
|
+
if (fs.existsSync(STATE_FILE)) {
|
|
55
|
+
data = JSON.parse(fs.readFileSync(STATE_FILE, "utf-8"));
|
|
56
|
+
}
|
|
57
|
+
} catch {
|
|
58
|
+
// start fresh
|
|
59
|
+
}
|
|
60
|
+
data[sessionId] = { skills: [...suggested], timestamp: Date.now() };
|
|
61
|
+
fs.writeFileSync(STATE_FILE, JSON.stringify(data, null, 2));
|
|
62
|
+
} catch {
|
|
63
|
+
// non-fatal
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
function main(): void {
|
|
68
|
+
let input: HookInput = {};
|
|
69
|
+
try {
|
|
70
|
+
const stdin = fs.readFileSync("/dev/stdin", "utf-8").trim();
|
|
71
|
+
if (stdin) input = JSON.parse(stdin);
|
|
72
|
+
} catch {
|
|
73
|
+
process.exit(0);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
const prompt = (input.prompt ?? "").toLowerCase();
|
|
77
|
+
const sessionId = input.session_id ?? "default";
|
|
78
|
+
|
|
79
|
+
const rulesPath = path.join(__dirname, "skill-rules.json");
|
|
80
|
+
let rules: SkillRule[] = [];
|
|
81
|
+
try {
|
|
82
|
+
rules = JSON.parse(fs.readFileSync(rulesPath, "utf-8"));
|
|
83
|
+
} catch {
|
|
84
|
+
process.exit(0);
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const alreadySuggested = getSessionState(sessionId);
|
|
88
|
+
|
|
89
|
+
const priorityOrder: Record<string, number> = {
|
|
90
|
+
critical: 0,
|
|
91
|
+
high: 1,
|
|
92
|
+
medium: 2,
|
|
93
|
+
low: 3,
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
const toSuggest: SkillRule[] = [];
|
|
97
|
+
for (const rule of rules) {
|
|
98
|
+
if (alreadySuggested.has(rule.skill)) continue;
|
|
99
|
+
|
|
100
|
+
if (rule.alwaysActive) {
|
|
101
|
+
toSuggest.push(rule);
|
|
102
|
+
continue;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (rule.triggers) {
|
|
106
|
+
for (const trigger of rule.triggers) {
|
|
107
|
+
if (prompt.includes(trigger.toLowerCase())) {
|
|
108
|
+
toSuggest.push(rule);
|
|
109
|
+
break;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if (toSuggest.length === 0) {
|
|
116
|
+
process.exit(0);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
toSuggest.sort(
|
|
120
|
+
(a, b) => (priorityOrder[a.priority] ?? 9) - (priorityOrder[b.priority] ?? 9)
|
|
121
|
+
);
|
|
122
|
+
|
|
123
|
+
const lines = toSuggest.map((r) => `- [${r.priority.toUpperCase()}] ${r.message}`);
|
|
124
|
+
const reminder = `SKILL ACTIVATION SUGGESTIONS:\n${lines.join("\n")}`;
|
|
125
|
+
|
|
126
|
+
for (const r of toSuggest) {
|
|
127
|
+
alreadySuggested.add(r.skill);
|
|
128
|
+
}
|
|
129
|
+
saveSessionState(sessionId, alreadySuggested);
|
|
130
|
+
|
|
131
|
+
process.stdout.write(reminder);
|
|
132
|
+
process.exit(0);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
main();
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"skill": "verification-before-completion",
|
|
4
|
+
"priority": "critical",
|
|
5
|
+
"triggers": ["done", "complete", "finished", "implemented", "fixed", "ready", "working", "resolved"],
|
|
6
|
+
"message": "Use /verification-before-completion before claiming work is done or creating PRs — run verification commands and confirm output first."
|
|
7
|
+
},
|
|
8
|
+
{
|
|
9
|
+
"skill": "code-investigator",
|
|
10
|
+
"priority": "high",
|
|
11
|
+
"triggers": ["investigate", "how does", "how do", "trace", "debug", "understand", "look into", "explore", "find the", "why is", "what is", "where is", "call hierarchy", "who calls", "find all", "go to definition", "find references", "refactor", "reorganize", "understand the flow", "walk me through"],
|
|
12
|
+
"message": "Use /code-investigator for efficient targeted code investigation — saves tokens via precise searches."
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"skill": "build-and-fix",
|
|
16
|
+
"priority": "high",
|
|
17
|
+
"triggers": ["build", "lint", "compile", "type error", "type check", "tsc", "bun run build", "fix errors", "format"],
|
|
18
|
+
"message": "Use /build-and-fix to auto-detect project type, run build, and fix simple errors (formatting/linting)."
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"skill": "ai-router",
|
|
22
|
+
"priority": "high",
|
|
23
|
+
"triggers": ["use gemini", "use codex", "use grok", "use kimi", "use ollama", "ask grok", "ask gemini", "ask ollama", "route to", "which ai", "best ai for", "delegate to", "run with", "send to gemini", "let codex", "opencode", "other ai", "different model", "another model", "local model", "local llm"],
|
|
24
|
+
"message": "Use /ai:route to route this task to the best available AI CLI tool. Run /ai:detect first to see what's installed."
|
|
25
|
+
},
|
|
26
|
+
{
|
|
27
|
+
"skill": "ai-router",
|
|
28
|
+
"priority": "high",
|
|
29
|
+
"triggers": ["private", "confidential", "sensitive data", "don't send to cloud", "local only", "air-gapped", "offline mode", "no cloud", "keep it local"],
|
|
30
|
+
"message": "Use /ai:route for privacy-sensitive tasks — it will route to Ollama (local LLM) if available, keeping data on your machine."
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"skill": "ai-router",
|
|
34
|
+
"priority": "medium",
|
|
35
|
+
"triggers": ["entire codebase", "full project scan", "scan everything", "whole repo", "all files in"],
|
|
36
|
+
"message": "For large codebase scans, use /ai:route — Gemini's 1M token context may be better suited than Claude's 200k."
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"skill": "improve",
|
|
40
|
+
"priority": "medium",
|
|
41
|
+
"triggers": ["improve the kit", "make the kit better", "kit is slow", "skill not triggering", "agent keeps failing", "optimize agents", "feedback", "what can be improved", "kit improvements"],
|
|
42
|
+
"message": "Use /improve to analyze session learning data and propose concrete improvements to agents, skills, and commands."
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"skill": "self-improve",
|
|
46
|
+
"priority": "low",
|
|
47
|
+
"triggers": ["self-improve", "critique the kit", "improve itself", "meta-improve", "kit self-improvement", "have ais review"],
|
|
48
|
+
"message": "Use /self-improve to have multiple AI models critique and improve the kit's own prompts and configurations."
|
|
49
|
+
}
|
|
50
|
+
]
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Context monitor - warns when context usage is high."""
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
THRESHOLD_WARN = 65
|
|
12
|
+
THRESHOLD_STOP = 85
|
|
13
|
+
CONTEXT_LIMIT = 200000
|
|
14
|
+
|
|
15
|
+
CACHE_DIR = Path("/tmp/.claude_context_cache")
|
|
16
|
+
CACHE_TTL = 30
|
|
17
|
+
|
|
18
|
+
RED = "\033[0;31m"
|
|
19
|
+
YELLOW = "\033[0;33m"
|
|
20
|
+
NC = "\033[0m"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def get_current_session_id() -> str:
|
|
24
|
+
"""Get current session ID from history."""
|
|
25
|
+
history = Path.home() / ".claude" / "history.jsonl"
|
|
26
|
+
if not history.exists():
|
|
27
|
+
return ""
|
|
28
|
+
try:
|
|
29
|
+
with history.open() as f:
|
|
30
|
+
lines = f.readlines()
|
|
31
|
+
if lines:
|
|
32
|
+
return json.loads(lines[-1]).get("sessionId", "")
|
|
33
|
+
except (json.JSONDecodeError, OSError):
|
|
34
|
+
pass
|
|
35
|
+
return ""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def find_session_file(session_id: str) -> Path | None:
|
|
39
|
+
"""Find session file for given session ID."""
|
|
40
|
+
projects_dir = Path.home() / ".claude" / "projects"
|
|
41
|
+
if not projects_dir.exists():
|
|
42
|
+
return None
|
|
43
|
+
for project_dir in projects_dir.iterdir():
|
|
44
|
+
if project_dir.is_dir():
|
|
45
|
+
session_file = project_dir / f"{session_id}.jsonl"
|
|
46
|
+
if session_file.exists():
|
|
47
|
+
return session_file
|
|
48
|
+
return None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def get_actual_token_count(session_file: Path) -> int | None:
|
|
52
|
+
"""Get actual token count from the most recent API usage data."""
|
|
53
|
+
last_usage = None
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
with session_file.open() as f:
|
|
57
|
+
for line in f:
|
|
58
|
+
try:
|
|
59
|
+
msg = json.loads(line)
|
|
60
|
+
if msg.get("type") != "assistant":
|
|
61
|
+
continue
|
|
62
|
+
|
|
63
|
+
message = msg.get("message", {})
|
|
64
|
+
if not isinstance(message, dict):
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
usage = message.get("usage")
|
|
68
|
+
if usage:
|
|
69
|
+
last_usage = usage
|
|
70
|
+
except (json.JSONDecodeError, KeyError):
|
|
71
|
+
continue
|
|
72
|
+
except OSError:
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
if not last_usage:
|
|
76
|
+
return None
|
|
77
|
+
|
|
78
|
+
input_tokens = last_usage.get("input_tokens", 0)
|
|
79
|
+
cache_creation = last_usage.get("cache_creation_input_tokens", 0)
|
|
80
|
+
cache_read = last_usage.get("cache_read_input_tokens", 0)
|
|
81
|
+
|
|
82
|
+
return input_tokens + cache_creation + cache_read
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def get_cache_file(session_id: str) -> Path:
|
|
86
|
+
"""Get cache file path for a session."""
|
|
87
|
+
return CACHE_DIR / f"{session_id}.json"
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def get_cached_context(session_id: str) -> tuple[int, bool]:
|
|
91
|
+
"""Get cached context value if fresh enough."""
|
|
92
|
+
cache_file = get_cache_file(session_id)
|
|
93
|
+
if cache_file.exists():
|
|
94
|
+
try:
|
|
95
|
+
with cache_file.open() as f:
|
|
96
|
+
cache = json.load(f)
|
|
97
|
+
if time.time() - cache.get("timestamp", 0) < CACHE_TTL:
|
|
98
|
+
return cache.get("tokens", 0), True
|
|
99
|
+
except (json.JSONDecodeError, OSError):
|
|
100
|
+
pass
|
|
101
|
+
return 0, False
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def save_cache(session_id: str, tokens: int) -> None:
|
|
105
|
+
"""Save context calculation to cache."""
|
|
106
|
+
try:
|
|
107
|
+
CACHE_DIR.mkdir(exist_ok=True)
|
|
108
|
+
cache_file = get_cache_file(session_id)
|
|
109
|
+
with cache_file.open("w") as f:
|
|
110
|
+
json.dump({"tokens": tokens, "timestamp": time.time()}, f)
|
|
111
|
+
except OSError:
|
|
112
|
+
pass
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def run_context_monitor() -> int:
|
|
116
|
+
"""Run context monitoring and return exit code."""
|
|
117
|
+
session_id = get_current_session_id()
|
|
118
|
+
if not session_id:
|
|
119
|
+
return 0
|
|
120
|
+
|
|
121
|
+
cached_tokens, is_cached = get_cached_context(session_id)
|
|
122
|
+
if is_cached:
|
|
123
|
+
total_tokens = cached_tokens
|
|
124
|
+
else:
|
|
125
|
+
session_file = find_session_file(session_id)
|
|
126
|
+
if not session_file:
|
|
127
|
+
return 0
|
|
128
|
+
|
|
129
|
+
actual_tokens = get_actual_token_count(session_file)
|
|
130
|
+
if actual_tokens is None:
|
|
131
|
+
return 0
|
|
132
|
+
|
|
133
|
+
total_tokens = actual_tokens
|
|
134
|
+
save_cache(session_id, total_tokens)
|
|
135
|
+
|
|
136
|
+
percentage = (total_tokens / CONTEXT_LIMIT) * 100
|
|
137
|
+
|
|
138
|
+
if percentage >= THRESHOLD_STOP:
|
|
139
|
+
print("", file=sys.stderr)
|
|
140
|
+
print(f"{RED}CONTEXT LIMIT: {percentage:.0f}% ({total_tokens:,}/{CONTEXT_LIMIT//1000}k){NC}", file=sys.stderr)
|
|
141
|
+
print(f"{RED}Ask user to run /clear to reset context.{NC}", file=sys.stderr)
|
|
142
|
+
return 2
|
|
143
|
+
|
|
144
|
+
if percentage >= THRESHOLD_WARN:
|
|
145
|
+
print("", file=sys.stderr)
|
|
146
|
+
print(f"{YELLOW}Context: {percentage:.0f}% ({total_tokens:,}/{CONTEXT_LIMIT//1000}k){NC}", file=sys.stderr)
|
|
147
|
+
print(f"{YELLOW} Complete current task, wrap up at {THRESHOLD_STOP}% maximum{NC}", file=sys.stderr)
|
|
148
|
+
# Return 0 for warning - shows info without "error" label
|
|
149
|
+
# Only return 2 at THRESHOLD_STOP for actual blocking
|
|
150
|
+
|
|
151
|
+
return 0
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
if __name__ == "__main__":
|
|
155
|
+
sys.exit(run_context_monitor())
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Learning Logger - Stop hook that captures session data for self-improvement.
|
|
4
|
+
|
|
5
|
+
After each session ends, this hook:
|
|
6
|
+
1. Reads the session transcript from ~/.claude/projects/
|
|
7
|
+
2. Extracts: commands used, agents spawned, tools called, token usage, errors
|
|
8
|
+
3. Appends a structured entry to .claude/learning/sessions/YYYY-MM-DD.jsonl
|
|
9
|
+
|
|
10
|
+
This data feeds the /improve command for pattern analysis and kit evolution.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import os
|
|
17
|
+
import sys
|
|
18
|
+
import time
|
|
19
|
+
from collections import Counter
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
LEARNING_DIR = Path(".claude/learning/sessions")
|
|
25
|
+
MAX_SESSIONS_PER_FILE = 50
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def get_current_session_id() -> str:
|
|
29
|
+
"""Get current session ID from history."""
|
|
30
|
+
history = Path.home() / ".claude" / "history.jsonl"
|
|
31
|
+
if not history.exists():
|
|
32
|
+
return ""
|
|
33
|
+
try:
|
|
34
|
+
with history.open() as f:
|
|
35
|
+
lines = f.readlines()
|
|
36
|
+
if lines:
|
|
37
|
+
return json.loads(lines[-1]).get("sessionId", "")
|
|
38
|
+
except (json.JSONDecodeError, OSError):
|
|
39
|
+
pass
|
|
40
|
+
return ""
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def find_session_file(session_id: str) -> Path | None:
|
|
44
|
+
"""Find session file for given session ID."""
|
|
45
|
+
projects_dir = Path.home() / ".claude" / "projects"
|
|
46
|
+
if not projects_dir.exists():
|
|
47
|
+
return None
|
|
48
|
+
for project_dir in projects_dir.iterdir():
|
|
49
|
+
if project_dir.is_dir():
|
|
50
|
+
session_file = project_dir / f"{session_id}.jsonl"
|
|
51
|
+
if session_file.exists():
|
|
52
|
+
return session_file
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def extract_session_data(session_file: Path) -> dict:
|
|
57
|
+
"""Extract structured data from a session transcript."""
|
|
58
|
+
commands_used: list[str] = []
|
|
59
|
+
agents_spawned: list[str] = []
|
|
60
|
+
tools_called: Counter = Counter()
|
|
61
|
+
errors: list[str] = []
|
|
62
|
+
total_input_tokens = 0
|
|
63
|
+
total_output_tokens = 0
|
|
64
|
+
turn_count = 0
|
|
65
|
+
user_prompts: list[str] = []
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
with session_file.open() as f:
|
|
69
|
+
for line in f:
|
|
70
|
+
try:
|
|
71
|
+
msg = json.loads(line)
|
|
72
|
+
msg_type = msg.get("type", "")
|
|
73
|
+
|
|
74
|
+
# Count turns
|
|
75
|
+
if msg_type == "user":
|
|
76
|
+
turn_count += 1
|
|
77
|
+
# Capture user prompt text (truncated for privacy)
|
|
78
|
+
content = msg.get("message", {}).get("content", "")
|
|
79
|
+
if isinstance(content, str) and len(content) > 10:
|
|
80
|
+
user_prompts.append(content[:200])
|
|
81
|
+
elif isinstance(content, list):
|
|
82
|
+
for block in content:
|
|
83
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
84
|
+
text = block.get("text", "")
|
|
85
|
+
if text:
|
|
86
|
+
user_prompts.append(text[:200])
|
|
87
|
+
break
|
|
88
|
+
|
|
89
|
+
# Track assistant tool usage
|
|
90
|
+
if msg_type == "assistant":
|
|
91
|
+
message = msg.get("message", {})
|
|
92
|
+
usage = message.get("usage", {})
|
|
93
|
+
total_input_tokens += usage.get("input_tokens", 0)
|
|
94
|
+
total_output_tokens += usage.get("output_tokens", 0)
|
|
95
|
+
|
|
96
|
+
content = message.get("content", [])
|
|
97
|
+
if isinstance(content, list):
|
|
98
|
+
for block in content:
|
|
99
|
+
if isinstance(block, dict) and block.get("type") == "tool_use":
|
|
100
|
+
tool_name = block.get("name", "unknown")
|
|
101
|
+
tools_called[tool_name] += 1
|
|
102
|
+
|
|
103
|
+
# Detect slash commands from user prompts
|
|
104
|
+
tool_input = block.get("input", {})
|
|
105
|
+
|
|
106
|
+
# Detect agent spawning
|
|
107
|
+
if tool_name == "Task":
|
|
108
|
+
agent = tool_input.get("subagent_type", "unknown")
|
|
109
|
+
agents_spawned.append(agent)
|
|
110
|
+
|
|
111
|
+
# Detect bash commands for pattern learning
|
|
112
|
+
if tool_name == "Bash":
|
|
113
|
+
cmd = tool_input.get("command", "")
|
|
114
|
+
if cmd.startswith("git "):
|
|
115
|
+
commands_used.append("git")
|
|
116
|
+
elif cmd.startswith("gh "):
|
|
117
|
+
commands_used.append("gh")
|
|
118
|
+
elif "gemini" in cmd:
|
|
119
|
+
commands_used.append("gemini")
|
|
120
|
+
elif "opencode" in cmd:
|
|
121
|
+
commands_used.append("opencode")
|
|
122
|
+
|
|
123
|
+
# Capture tool result errors
|
|
124
|
+
if msg_type == "tool":
|
|
125
|
+
result = msg.get("content", "")
|
|
126
|
+
if isinstance(result, str) and ("error" in result.lower() or "failed" in result.lower()):
|
|
127
|
+
errors.append(result[:300])
|
|
128
|
+
|
|
129
|
+
except (json.JSONDecodeError, KeyError, TypeError):
|
|
130
|
+
continue
|
|
131
|
+
|
|
132
|
+
except OSError:
|
|
133
|
+
return {}
|
|
134
|
+
|
|
135
|
+
# Detect slash commands from user prompts
|
|
136
|
+
slash_commands = []
|
|
137
|
+
for prompt in user_prompts:
|
|
138
|
+
# Look for /command patterns
|
|
139
|
+
words = prompt.split()
|
|
140
|
+
for word in words:
|
|
141
|
+
if word.startswith("/") and len(word) > 1 and not word.startswith("//"):
|
|
142
|
+
slash_commands.append(word.lower())
|
|
143
|
+
|
|
144
|
+
return {
|
|
145
|
+
"turn_count": turn_count,
|
|
146
|
+
"tools_called": dict(tools_called.most_common(20)),
|
|
147
|
+
"agents_spawned": agents_spawned,
|
|
148
|
+
"commands_used": list(set(commands_used)),
|
|
149
|
+
"slash_commands": slash_commands,
|
|
150
|
+
"errors": errors[:5], # Cap at 5 error samples
|
|
151
|
+
"tokens": {
|
|
152
|
+
"input": total_input_tokens,
|
|
153
|
+
"output": total_output_tokens,
|
|
154
|
+
"total": total_input_tokens + total_output_tokens,
|
|
155
|
+
},
|
|
156
|
+
"user_prompts_preview": [p[:100] for p in user_prompts[:3]],
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def write_learning_entry(session_id: str, data: dict) -> None:
|
|
161
|
+
"""Write a learning entry to the daily log file."""
|
|
162
|
+
try:
|
|
163
|
+
LEARNING_DIR.mkdir(parents=True, exist_ok=True)
|
|
164
|
+
today = datetime.now().strftime("%Y-%m-%d")
|
|
165
|
+
log_file = LEARNING_DIR / f"{today}.jsonl"
|
|
166
|
+
|
|
167
|
+
entry = {
|
|
168
|
+
"ts": int(time.time()),
|
|
169
|
+
"session_id": session_id[:16], # Truncate for privacy
|
|
170
|
+
"date": today,
|
|
171
|
+
**data,
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
with log_file.open("a") as f:
|
|
175
|
+
f.write(json.dumps(entry) + "\n")
|
|
176
|
+
|
|
177
|
+
except OSError:
|
|
178
|
+
pass # Non-fatal — never block the session from ending
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def check_and_prune_old_logs() -> None:
|
|
182
|
+
"""Remove learning logs older than 90 days to keep storage bounded."""
|
|
183
|
+
try:
|
|
184
|
+
if not LEARNING_DIR.exists():
|
|
185
|
+
return
|
|
186
|
+
cutoff = time.time() - (90 * 24 * 3600)
|
|
187
|
+
for log_file in LEARNING_DIR.glob("*.jsonl"):
|
|
188
|
+
try:
|
|
189
|
+
if log_file.stat().st_mtime < cutoff:
|
|
190
|
+
log_file.unlink()
|
|
191
|
+
except OSError:
|
|
192
|
+
pass
|
|
193
|
+
except OSError:
|
|
194
|
+
pass
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def run_learning_logger() -> int:
|
|
198
|
+
"""Main entry point — extract session data and write learning log."""
|
|
199
|
+
session_id = get_current_session_id()
|
|
200
|
+
if not session_id:
|
|
201
|
+
return 0
|
|
202
|
+
|
|
203
|
+
session_file = find_session_file(session_id)
|
|
204
|
+
if not session_file:
|
|
205
|
+
return 0
|
|
206
|
+
|
|
207
|
+
data = extract_session_data(session_file)
|
|
208
|
+
if not data or data.get("turn_count", 0) < 2:
|
|
209
|
+
return 0 # Skip trivially short sessions
|
|
210
|
+
|
|
211
|
+
write_learning_entry(session_id, data)
|
|
212
|
+
check_and_prune_old_logs()
|
|
213
|
+
|
|
214
|
+
return 0
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
if __name__ == "__main__":
|
|
218
|
+
sys.exit(run_learning_logger())
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
---
|
|
2
|
+
version: 1.0.0
|
|
3
|
+
name: ai-router
|
|
4
|
+
description: Routes tasks to the best available AI CLI tool based on task type and provider strengths. Triggered when user asks to "use gemini for", "run with codex", "ask grok", "route to best AI", "which AI should", or "delegate to".
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# AI Router
|
|
8
|
+
|
|
9
|
+
**Notification:** At skill start, output: "Using ai-router to select optimal AI provider..."
|
|
10
|
+
|
|
11
|
+
## When to Use
|
|
12
|
+
|
|
13
|
+
- User wants to run a task with a specific non-Claude AI
|
|
14
|
+
- Task requires large context (> 100k tokens) → route to Gemini
|
|
15
|
+
- Task requires speed over depth → route to Grok
|
|
16
|
+
- Task is a coding challenge → consider Codex or Kimi
|
|
17
|
+
- User wants multi-AI perspective without full brainstorm
|
|
18
|
+
- Routing delegation for specific sub-tasks in a pipeline
|
|
19
|
+
|
|
20
|
+
## When NOT to Use
|
|
21
|
+
|
|
22
|
+
- Standard Claude Code tasks (just let Claude handle normally)
|
|
23
|
+
- When `/bs:brainstorm_full` is more appropriate (full multi-AI synthesis)
|
|
24
|
+
- When the user hasn't asked for a different AI
|
|
25
|
+
|
|
26
|
+
## Routing Logic
|
|
27
|
+
|
|
28
|
+
### Decision Tree
|
|
29
|
+
|
|
30
|
+
```
|
|
31
|
+
Is context > 100k tokens OR task = "scan entire codebase"?
|
|
32
|
+
→ YES: Use gemini (1M context window)
|
|
33
|
+
→ NO: Continue...
|
|
34
|
+
|
|
35
|
+
Is task = "private", "confidential", "local only", "don't send to cloud", "offline", "sensitive"?
|
|
36
|
+
→ YES: Use ollama if available (stays on machine), otherwise warn + use claude
|
|
37
|
+
→ NO: Continue...
|
|
38
|
+
|
|
39
|
+
Is task = "quick answer", "fast check", "brief analysis"?
|
|
40
|
+
→ YES: Use grok (fastest)
|
|
41
|
+
→ NO: Continue...
|
|
42
|
+
|
|
43
|
+
Is task = "code generation", "implement function", "write code"?
|
|
44
|
+
→ YES: Use claude (default) OR codex if available
|
|
45
|
+
→ NO: Continue...
|
|
46
|
+
|
|
47
|
+
Is task = "multilingual", "translate", "non-English"?
|
|
48
|
+
→ YES: Try glm or minimax
|
|
49
|
+
→ NO: Default to claude
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### Task → Provider Mapping
|
|
53
|
+
|
|
54
|
+
| Task Type | Preferred Provider | Fallback |
|
|
55
|
+
|-----------|-------------------|---------|
|
|
56
|
+
| Large codebase scan | gemini | claude |
|
|
57
|
+
| Web search + coding | gemini | claude |
|
|
58
|
+
| Privacy / offline / no-cloud | **ollama** (local) | claude (warn) |
|
|
59
|
+
| Quick analysis | grok | claude |
|
|
60
|
+
| Code generation | claude | codex |
|
|
61
|
+
| Brainstorming | multi | claude |
|
|
62
|
+
| Math/algorithms | kimi | claude |
|
|
63
|
+
| Multilingual | glm | claude |
|
|
64
|
+
| Architecture design | claude (opus) | claude |
|
|
65
|
+
|
|
66
|
+
## How to Route
|
|
67
|
+
|
|
68
|
+
### Step 1: Read providers.json
|
|
69
|
+
```bash
|
|
70
|
+
cat .claude/providers.json
|
|
71
|
+
```
|
|
72
|
+
Identify which providers have `"available": true`.
|
|
73
|
+
|
|
74
|
+
### Step 2: Determine task type from user prompt
|
|
75
|
+
|
|
76
|
+
Look for keywords:
|
|
77
|
+
- **large context**: "entire codebase", "all files", "full scan", "whole project"
|
|
78
|
+
- **speed**: "quick", "fast", "brief", "tldr", "summary"
|
|
79
|
+
- **code**: "implement", "write", "function", "class", "code"
|
|
80
|
+
- **math**: "algorithm", "complexity", "math", "calculate"
|
|
81
|
+
- **multilingual**: "translate", "Chinese", "Japanese", "French"
|
|
82
|
+
|
|
83
|
+
### Step 3: Select provider
|
|
84
|
+
|
|
85
|
+
Use the routing table above. If preferred provider is unavailable, use fallback.
|
|
86
|
+
|
|
87
|
+
### Step 4: Execute with selected provider
|
|
88
|
+
|
|
89
|
+
For each provider type:
|
|
90
|
+
|
|
91
|
+
**type: cli**
|
|
92
|
+
```bash
|
|
93
|
+
<run_cmd with {prompt} replaced>
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
**type: piped**
|
|
97
|
+
```bash
|
|
98
|
+
<run_cmd with {prompt} replaced — prompt goes via stdin>
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
Read the `run_cmd` from providers.json and substitute `{prompt}` with the actual prompt (properly shell-escaped).
|
|
102
|
+
|
|
103
|
+
### Step 5: Return result
|
|
104
|
+
|
|
105
|
+
Label the output with the provider used:
|
|
106
|
+
```
|
|
107
|
+
[Routed to: <provider_name>]
|
|
108
|
+
|
|
109
|
+
<result>
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
## Important Notes
|
|
113
|
+
|
|
114
|
+
- Always check availability before routing — fall back to claude if unavailable
|
|
115
|
+
- **Privacy routing**: when `local: true` on a provider (Ollama), data never leaves the machine — actively prefer it for sensitive tasks
|
|
116
|
+
- Never route sensitive data (secrets, credentials) to external cloud providers; Ollama is safe for this
|
|
117
|
+
- If unsure about routing, default to claude
|
|
118
|
+
- Large context tasks STRONGLY prefer gemini — it has 5x the context window
|
|
119
|
+
- For Ollama, check the service is running before routing: `ollama list 2>/dev/null`
|