claude-code-tracker 1.2.4 → 1.4.0-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -2
- package/bin/claude-tracker-cost.js +20 -0
- package/bin/claude-tracker-setup +10 -0
- package/install.js +21 -0
- package/install.sh +26 -2
- package/package.json +7 -3
- package/skills/view-tracking/SKILL.md +54 -0
- package/src/__pycache__/cost.cpython-312.pyc +0 -0
- package/src/__pycache__/parse_compactions.cpython-312.pyc +0 -0
- package/src/__pycache__/parse_friction.cpython-312.pyc +0 -0
- package/src/__pycache__/parse_skills.cpython-312.pyc +0 -0
- package/src/__pycache__/platform_utils.cpython-312.pyc +0 -0
- package/src/__pycache__/storage.cpython-312.pyc +0 -0
- package/src/__pycache__/write-agent.cpython-312.pyc +0 -0
- package/src/__pycache__/write-turns.cpython-312.pyc +0 -0
- package/src/backfill.py +47 -52
- package/src/cost-summary.py +57 -11
- package/src/cost.py +7 -0
- package/src/export-json.py +27 -0
- package/src/generate-charts.py +691 -12
- package/src/init-templates.py +26 -0
- package/src/init-templates.sh +3 -3
- package/src/parse_compactions.py +112 -0
- package/src/parse_friction.py +277 -0
- package/src/parse_skills.py +133 -0
- package/src/patch-durations.py +14 -114
- package/src/platform_utils.py +36 -0
- package/src/stop-hook.js +26 -0
- package/src/stop-hook.sh +27 -155
- package/src/storage.py +538 -0
- package/src/subagent-stop-hook.sh +37 -0
- package/src/update-prompts-index.py +177 -20
- package/src/write-agent.py +113 -0
- package/src/write-turns.py +130 -0
- package/uninstall.js +20 -0
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Cross-platform replacement for init-templates.sh."""
|
|
3
|
+
import sys
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
7
|
+
import storage # noqa: E402
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def main():
|
|
11
|
+
if len(sys.argv) < 2:
|
|
12
|
+
print(f"Usage: {sys.argv[0]} <tracking_dir>", file=sys.stderr)
|
|
13
|
+
sys.exit(1)
|
|
14
|
+
|
|
15
|
+
tracking_dir = sys.argv[1]
|
|
16
|
+
os.makedirs(tracking_dir, exist_ok=True)
|
|
17
|
+
|
|
18
|
+
# Initialize SQLite database (replaces tokens.json / agents.json)
|
|
19
|
+
storage.init_db(tracking_dir)
|
|
20
|
+
|
|
21
|
+
# Create key-prompts directory
|
|
22
|
+
key_prompts_dir = os.path.join(tracking_dir, 'key-prompts')
|
|
23
|
+
os.makedirs(key_prompts_dir, exist_ok=True)
|
|
24
|
+
|
|
25
|
+
if __name__ == '__main__':
|
|
26
|
+
main()
|
package/src/init-templates.sh
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
#!/usr/bin/env bash
|
|
2
2
|
set -euo pipefail
|
|
3
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
3
4
|
TRACKING_DIR="$1"
|
|
4
5
|
mkdir -p "$TRACKING_DIR"
|
|
5
6
|
mkdir -p "$TRACKING_DIR/key-prompts"
|
|
6
7
|
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
EOF
|
|
8
|
+
# Initialize SQLite database (replaces tokens.json / agents.json)
|
|
9
|
+
python3 "$SCRIPT_DIR/storage.py" --init "$TRACKING_DIR"
|
|
10
10
|
|
|
11
11
|
cat > "$TRACKING_DIR/key-prompts.md" <<'EOF'
|
|
12
12
|
# Prompt Journal
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Parse context compaction events from Claude Code JSONL transcripts.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python3 parse_compactions.py <transcript_path> <tracking_dir> <session_id> <project>
|
|
7
|
+
"""
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
import sys
|
|
11
|
+
from datetime import date, datetime
|
|
12
|
+
|
|
13
|
+
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
14
|
+
sys.path.insert(0, SCRIPT_DIR)
|
|
15
|
+
import storage
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def make_date(timestamp):
|
|
19
|
+
try:
|
|
20
|
+
return datetime.fromisoformat(
|
|
21
|
+
timestamp.replace('Z', '+00:00')).strftime('%Y-%m-%d')
|
|
22
|
+
except Exception:
|
|
23
|
+
return date.today().isoformat()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def parse_compactions(transcript_path, session_id, project):
|
|
27
|
+
"""Parse JSONL transcript for compact_boundary system events.
|
|
28
|
+
|
|
29
|
+
Derives turn_index by counting user/assistant pairs before each event.
|
|
30
|
+
Returns list of dicts ready for storage.replace_session_compactions().
|
|
31
|
+
"""
|
|
32
|
+
lines = []
|
|
33
|
+
with open(transcript_path, encoding='utf-8') as f:
|
|
34
|
+
for raw in f:
|
|
35
|
+
try:
|
|
36
|
+
obj = json.loads(raw)
|
|
37
|
+
lines.append(obj)
|
|
38
|
+
except Exception:
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
# Build turn boundaries for turn_index calculation
|
|
42
|
+
msg_list = []
|
|
43
|
+
for obj in lines:
|
|
44
|
+
ts = obj.get('timestamp', '')
|
|
45
|
+
t = obj.get('type')
|
|
46
|
+
if t == 'user' and not obj.get('isSidechain') and ts:
|
|
47
|
+
msg_list.append(('user', ts))
|
|
48
|
+
elif t == 'assistant' and ts:
|
|
49
|
+
msg_list.append(('assistant', ts))
|
|
50
|
+
|
|
51
|
+
# Pair user->assistant for turn boundaries
|
|
52
|
+
turn_boundaries = []
|
|
53
|
+
idx = 0
|
|
54
|
+
while idx < len(msg_list):
|
|
55
|
+
if msg_list[idx][0] == 'user':
|
|
56
|
+
j = idx + 1
|
|
57
|
+
while j < len(msg_list) and msg_list[j][0] != 'assistant':
|
|
58
|
+
j += 1
|
|
59
|
+
if j < len(msg_list):
|
|
60
|
+
turn_boundaries.append((msg_list[idx][1], msg_list[j][1]))
|
|
61
|
+
idx = j + 1
|
|
62
|
+
else:
|
|
63
|
+
idx += 1
|
|
64
|
+
else:
|
|
65
|
+
idx += 1
|
|
66
|
+
|
|
67
|
+
def get_turn_index(timestamp):
|
|
68
|
+
if not timestamp:
|
|
69
|
+
return 0
|
|
70
|
+
for ti, (user_ts, asst_ts) in enumerate(turn_boundaries):
|
|
71
|
+
if timestamp <= asst_ts:
|
|
72
|
+
return ti
|
|
73
|
+
if ti + 1 < len(turn_boundaries):
|
|
74
|
+
next_user_ts = turn_boundaries[ti + 1][0]
|
|
75
|
+
if asst_ts < timestamp < next_user_ts:
|
|
76
|
+
return ti
|
|
77
|
+
return max(0, len(turn_boundaries) - 1)
|
|
78
|
+
|
|
79
|
+
entries = []
|
|
80
|
+
for obj in lines:
|
|
81
|
+
if obj.get('type') != 'system':
|
|
82
|
+
continue
|
|
83
|
+
if obj.get('subtype') != 'compact_boundary':
|
|
84
|
+
continue
|
|
85
|
+
|
|
86
|
+
ts = obj.get('timestamp', '')
|
|
87
|
+
meta = obj.get('compactMetadata', {})
|
|
88
|
+
if not isinstance(meta, dict):
|
|
89
|
+
meta = {}
|
|
90
|
+
|
|
91
|
+
entries.append({
|
|
92
|
+
'session_id': session_id,
|
|
93
|
+
'date': make_date(ts),
|
|
94
|
+
'project': project,
|
|
95
|
+
'timestamp': ts,
|
|
96
|
+
'trigger': meta.get('trigger', 'auto'),
|
|
97
|
+
'pre_tokens': meta.get('preTokens', 0),
|
|
98
|
+
'turn_index': get_turn_index(ts),
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
return entries
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
if __name__ == '__main__':
|
|
105
|
+
if len(sys.argv) != 5:
|
|
106
|
+
print(f"Usage: {sys.argv[0]} <transcript_path> <tracking_dir> <session_id> <project>",
|
|
107
|
+
file=sys.stderr)
|
|
108
|
+
sys.exit(1)
|
|
109
|
+
|
|
110
|
+
transcript_path, tracking_dir, session_id, project = sys.argv[1:5]
|
|
111
|
+
entries = parse_compactions(transcript_path, session_id, project)
|
|
112
|
+
storage.replace_session_compactions(tracking_dir, session_id, entries)
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Parse friction events from Claude Code JSONL transcripts.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python3 parse_friction.py <transcript_path> <tracking_dir> <session_id> <project> <source> \
|
|
7
|
+
[--agent-type TYPE] [--agent-id ID]
|
|
8
|
+
|
|
9
|
+
Friction categories (priority order, first match wins):
|
|
10
|
+
permission_denied, hook_blocked, cascade_error, command_failed, tool_error, correction, retry
|
|
11
|
+
"""
|
|
12
|
+
import sys, json, os, argparse
|
|
13
|
+
|
|
14
|
+
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
15
|
+
sys.path.insert(0, SCRIPT_DIR)
|
|
16
|
+
import storage
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def parse_friction(transcript_path, session_id, project, source,
|
|
20
|
+
agent_type=None, agent_id=None):
|
|
21
|
+
"""Parse a JSONL transcript and return a list of friction event dicts."""
|
|
22
|
+
events = []
|
|
23
|
+
pending_tools = {} # tool_use_id -> {name, turn_index, timestamp}
|
|
24
|
+
last_error_by_tool = {} # tool_name -> tool_use_id of last errored call
|
|
25
|
+
skill_stack = [] # [(tool_use_id, skill_name)]
|
|
26
|
+
|
|
27
|
+
msgs = [] # (role, timestamp, is_sidechain, user_type)
|
|
28
|
+
lines = [] # raw parsed objects
|
|
29
|
+
model = "unknown"
|
|
30
|
+
|
|
31
|
+
with open(transcript_path, encoding='utf-8') as f:
|
|
32
|
+
for line in f:
|
|
33
|
+
try:
|
|
34
|
+
obj = json.loads(line)
|
|
35
|
+
lines.append(obj)
|
|
36
|
+
t = obj.get('type')
|
|
37
|
+
ts = obj.get('timestamp')
|
|
38
|
+
if t == 'user' and ts:
|
|
39
|
+
msgs.append(('user', ts, obj.get('isSidechain', False),
|
|
40
|
+
obj.get('userType')))
|
|
41
|
+
elif t == 'assistant' and ts:
|
|
42
|
+
msgs.append(('assistant', ts, False, None))
|
|
43
|
+
msg = obj.get('message', {})
|
|
44
|
+
if isinstance(msg, dict) and msg.get('role') == 'assistant':
|
|
45
|
+
m = msg.get('model', '')
|
|
46
|
+
if m:
|
|
47
|
+
model = m
|
|
48
|
+
except Exception:
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
# Build turn boundaries: user (non-sidechain) -> next assistant
|
|
52
|
+
turn_boundaries = [] # [(user_msg_idx, asst_msg_idx)]
|
|
53
|
+
i = 0
|
|
54
|
+
while i < len(msgs):
|
|
55
|
+
if msgs[i][0] == 'user' and not msgs[i][2]:
|
|
56
|
+
j = i + 1
|
|
57
|
+
while j < len(msgs) and msgs[j][0] != 'assistant':
|
|
58
|
+
j += 1
|
|
59
|
+
if j < len(msgs):
|
|
60
|
+
turn_boundaries.append((i, j))
|
|
61
|
+
i = j + 1
|
|
62
|
+
else:
|
|
63
|
+
i += 1
|
|
64
|
+
else:
|
|
65
|
+
i += 1
|
|
66
|
+
|
|
67
|
+
def get_turn_index(timestamp):
|
|
68
|
+
if not timestamp:
|
|
69
|
+
return 0
|
|
70
|
+
for idx, (ui, ai) in enumerate(turn_boundaries):
|
|
71
|
+
user_ts = msgs[ui][1]
|
|
72
|
+
asst_ts = msgs[ai][1]
|
|
73
|
+
if user_ts <= timestamp <= asst_ts:
|
|
74
|
+
return idx
|
|
75
|
+
if idx + 1 < len(turn_boundaries):
|
|
76
|
+
next_user_ts = msgs[turn_boundaries[idx + 1][0]][1]
|
|
77
|
+
if asst_ts <= timestamp < next_user_ts:
|
|
78
|
+
return idx
|
|
79
|
+
return max(0, len(turn_boundaries) - 1)
|
|
80
|
+
|
|
81
|
+
def make_date(timestamp):
|
|
82
|
+
try:
|
|
83
|
+
from datetime import datetime
|
|
84
|
+
return datetime.fromisoformat(
|
|
85
|
+
timestamp.replace('Z', '+00:00')).strftime('%Y-%m-%d')
|
|
86
|
+
except Exception:
|
|
87
|
+
from datetime import date
|
|
88
|
+
return date.today().isoformat()
|
|
89
|
+
|
|
90
|
+
def current_skill():
|
|
91
|
+
return skill_stack[-1][1] if skill_stack else None
|
|
92
|
+
|
|
93
|
+
def make_event(timestamp, turn_index, category, tool_name, detail, resolved=None):
|
|
94
|
+
return {
|
|
95
|
+
'timestamp': timestamp or '',
|
|
96
|
+
'date': make_date(timestamp),
|
|
97
|
+
'session_id': session_id,
|
|
98
|
+
'turn_index': turn_index,
|
|
99
|
+
'source': source,
|
|
100
|
+
'agent_type': agent_type,
|
|
101
|
+
'agent_id': agent_id,
|
|
102
|
+
'project': project,
|
|
103
|
+
'category': category,
|
|
104
|
+
'tool_name': tool_name,
|
|
105
|
+
'skill': current_skill(),
|
|
106
|
+
'model': model,
|
|
107
|
+
'detail': (detail or '')[:500],
|
|
108
|
+
'resolved': resolved,
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
# Second pass: detect friction
|
|
112
|
+
for obj in lines:
|
|
113
|
+
ts = obj.get('timestamp', '')
|
|
114
|
+
turn_idx = get_turn_index(ts)
|
|
115
|
+
msg = obj.get('message', {})
|
|
116
|
+
if not isinstance(msg, dict):
|
|
117
|
+
continue
|
|
118
|
+
content_blocks = msg.get('content', [])
|
|
119
|
+
|
|
120
|
+
if isinstance(content_blocks, list):
|
|
121
|
+
for block in content_blocks:
|
|
122
|
+
if not isinstance(block, dict):
|
|
123
|
+
continue
|
|
124
|
+
btype = block.get('type')
|
|
125
|
+
|
|
126
|
+
if btype == 'tool_use':
|
|
127
|
+
tool_id = block.get('id', '')
|
|
128
|
+
tool_name = block.get('name', '')
|
|
129
|
+
pending_tools[tool_id] = {
|
|
130
|
+
'name': tool_name,
|
|
131
|
+
'turn_index': turn_idx,
|
|
132
|
+
'timestamp': ts,
|
|
133
|
+
}
|
|
134
|
+
if tool_name == 'Skill':
|
|
135
|
+
skill_name = block.get('input', {}).get('skill', '')
|
|
136
|
+
if skill_name:
|
|
137
|
+
skill_stack.append((tool_id, skill_name))
|
|
138
|
+
|
|
139
|
+
elif btype == 'tool_result':
|
|
140
|
+
tool_id = block.get('tool_use_id', '')
|
|
141
|
+
is_error = block.get('is_error', False)
|
|
142
|
+
content = ''
|
|
143
|
+
raw_content = block.get('content', '')
|
|
144
|
+
if isinstance(raw_content, str):
|
|
145
|
+
content = raw_content
|
|
146
|
+
elif isinstance(raw_content, list):
|
|
147
|
+
content = ' '.join(
|
|
148
|
+
c.get('text', '') for c in raw_content
|
|
149
|
+
if isinstance(c, dict) and c.get('type') == 'text')
|
|
150
|
+
|
|
151
|
+
tool_info = pending_tools.get(tool_id, {})
|
|
152
|
+
tool_name = tool_info.get('name', '')
|
|
153
|
+
|
|
154
|
+
# Pop skill stack if this is a Skill tool result
|
|
155
|
+
if skill_stack and skill_stack[-1][0] == tool_id:
|
|
156
|
+
skill_stack.pop()
|
|
157
|
+
|
|
158
|
+
content_lower = content.lower()
|
|
159
|
+
|
|
160
|
+
# Check if this is a retry of a previously errored tool
|
|
161
|
+
if tool_name and tool_name in last_error_by_tool:
|
|
162
|
+
if not is_error:
|
|
163
|
+
events.append(make_event(ts, turn_idx, 'retry',
|
|
164
|
+
tool_name, 'Retry succeeded', True))
|
|
165
|
+
del last_error_by_tool[tool_name]
|
|
166
|
+
continue
|
|
167
|
+
else:
|
|
168
|
+
events.append(make_event(ts, turn_idx, 'retry',
|
|
169
|
+
tool_name, 'Retry failed', False))
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
if not is_error:
|
|
173
|
+
continue
|
|
174
|
+
|
|
175
|
+
# Priority 1: permission_denied
|
|
176
|
+
if ("user doesn't want to proceed" in content_lower or
|
|
177
|
+
"tool use was rejected" in content_lower):
|
|
178
|
+
events.append(make_event(ts, turn_idx, 'permission_denied',
|
|
179
|
+
tool_name, content))
|
|
180
|
+
last_error_by_tool[tool_name] = tool_id
|
|
181
|
+
continue
|
|
182
|
+
|
|
183
|
+
# Priority 2: hook_blocked
|
|
184
|
+
if 'pretooluse:' in content_lower and 'blocked' in content_lower:
|
|
185
|
+
events.append(make_event(ts, turn_idx, 'hook_blocked',
|
|
186
|
+
tool_name, content))
|
|
187
|
+
last_error_by_tool[tool_name] = tool_id
|
|
188
|
+
continue
|
|
189
|
+
|
|
190
|
+
# Priority 3: cascade_error
|
|
191
|
+
if 'sibling tool call errored' in content_lower:
|
|
192
|
+
events.append(make_event(ts, turn_idx, 'cascade_error',
|
|
193
|
+
tool_name, content))
|
|
194
|
+
continue
|
|
195
|
+
|
|
196
|
+
# Priority 4: command_failed
|
|
197
|
+
if tool_name == 'Bash' and content.startswith('Exit code '):
|
|
198
|
+
events.append(make_event(ts, turn_idx, 'command_failed',
|
|
199
|
+
tool_name, content))
|
|
200
|
+
last_error_by_tool[tool_name] = tool_id
|
|
201
|
+
continue
|
|
202
|
+
|
|
203
|
+
# Priority 5: tool_error (catch-all for is_error=true)
|
|
204
|
+
events.append(make_event(ts, turn_idx, 'tool_error',
|
|
205
|
+
tool_name, content))
|
|
206
|
+
last_error_by_tool[tool_name] = tool_id
|
|
207
|
+
|
|
208
|
+
# Check for corrections: user messages
|
|
209
|
+
obj_type = obj.get('type')
|
|
210
|
+
user_type = obj.get('userType')
|
|
211
|
+
is_sidechain = obj.get('isSidechain', False)
|
|
212
|
+
if obj_type == 'user' and user_type == 'human' and not is_sidechain:
|
|
213
|
+
text = ''
|
|
214
|
+
if isinstance(content_blocks, list):
|
|
215
|
+
texts = [c.get('text', '') for c in content_blocks
|
|
216
|
+
if isinstance(c, dict) and c.get('type') == 'text']
|
|
217
|
+
text = ' '.join(texts).strip()
|
|
218
|
+
elif isinstance(msg.get('content'), str):
|
|
219
|
+
text = msg['content'].strip()
|
|
220
|
+
|
|
221
|
+
if text:
|
|
222
|
+
text_lower = text.lower()
|
|
223
|
+
first_100 = text_lower[:100]
|
|
224
|
+
is_correction = False
|
|
225
|
+
|
|
226
|
+
for prefix in ('no,', 'no ', 'wrong', 'stop', 'wait', 'actually,'):
|
|
227
|
+
if text_lower.startswith(prefix):
|
|
228
|
+
is_correction = True
|
|
229
|
+
break
|
|
230
|
+
|
|
231
|
+
if not is_correction:
|
|
232
|
+
for phrase in ("that's wrong", "not what i", "i said", "i meant"):
|
|
233
|
+
if phrase in first_100:
|
|
234
|
+
is_correction = True
|
|
235
|
+
break
|
|
236
|
+
|
|
237
|
+
if is_correction:
|
|
238
|
+
events.append(make_event(ts, turn_idx, 'correction',
|
|
239
|
+
None, text[:200]))
|
|
240
|
+
|
|
241
|
+
return events
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def upsert_friction(tracking_dir, session_id, new_events):
|
|
245
|
+
"""Write friction events to SQLite via storage module.
|
|
246
|
+
|
|
247
|
+
For backward compatibility, also accepts a friction.json path --
|
|
248
|
+
if tracking_dir ends with '.json', derive the tracking dir from it.
|
|
249
|
+
"""
|
|
250
|
+
if tracking_dir.endswith('.json'):
|
|
251
|
+
tracking_dir = os.path.dirname(os.path.abspath(tracking_dir))
|
|
252
|
+
|
|
253
|
+
storage.replace_session_friction(tracking_dir, session_id, new_events)
|
|
254
|
+
return new_events
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def main():
|
|
258
|
+
parser = argparse.ArgumentParser(description='Parse friction events from JSONL transcript')
|
|
259
|
+
parser.add_argument('transcript_path')
|
|
260
|
+
parser.add_argument('tracking_dir')
|
|
261
|
+
parser.add_argument('session_id')
|
|
262
|
+
parser.add_argument('project')
|
|
263
|
+
parser.add_argument('source')
|
|
264
|
+
parser.add_argument('--agent-type', default=None)
|
|
265
|
+
parser.add_argument('--agent-id', default=None)
|
|
266
|
+
args = parser.parse_args()
|
|
267
|
+
|
|
268
|
+
events = parse_friction(args.transcript_path, args.session_id, args.project,
|
|
269
|
+
args.source, args.agent_type, args.agent_id)
|
|
270
|
+
|
|
271
|
+
upsert_friction(args.tracking_dir, args.session_id, events)
|
|
272
|
+
if events:
|
|
273
|
+
print(f"{len(events)} friction event(s) recorded.")
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
if __name__ == '__main__':
|
|
277
|
+
main()
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Parse Skill tool invocations from Claude Code JSONL transcripts.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python3 parse_skills.py <transcript_path> <tracking_dir> <session_id> <project>
|
|
7
|
+
"""
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
import sys
|
|
11
|
+
from datetime import date, datetime
|
|
12
|
+
|
|
13
|
+
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
14
|
+
sys.path.insert(0, SCRIPT_DIR)
|
|
15
|
+
import storage
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def make_date(timestamp):
|
|
19
|
+
try:
|
|
20
|
+
return datetime.fromisoformat(
|
|
21
|
+
timestamp.replace('Z', '+00:00')).strftime('%Y-%m-%d')
|
|
22
|
+
except Exception:
|
|
23
|
+
return date.today().isoformat()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def parse_skills(transcript_path, session_id, project):
|
|
27
|
+
"""Parse JSONL transcript for Skill tool_use blocks.
|
|
28
|
+
Returns list of dicts ready for storage.replace_session_skills()."""
|
|
29
|
+
lines = []
|
|
30
|
+
with open(transcript_path, encoding='utf-8') as f:
|
|
31
|
+
for raw in f:
|
|
32
|
+
try:
|
|
33
|
+
obj = json.loads(raw)
|
|
34
|
+
lines.append(obj)
|
|
35
|
+
except Exception:
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
pending = {} # tool_use_id -> {skill_name, args, tool_use_id, timestamp}
|
|
39
|
+
entries = []
|
|
40
|
+
|
|
41
|
+
for obj in lines:
|
|
42
|
+
ts = obj.get('timestamp', '')
|
|
43
|
+
msg = obj.get('message', {})
|
|
44
|
+
if not isinstance(msg, dict):
|
|
45
|
+
continue
|
|
46
|
+
content_blocks = msg.get('content', [])
|
|
47
|
+
if not isinstance(content_blocks, list):
|
|
48
|
+
continue
|
|
49
|
+
|
|
50
|
+
for block in content_blocks:
|
|
51
|
+
if not isinstance(block, dict):
|
|
52
|
+
continue
|
|
53
|
+
btype = block.get('type')
|
|
54
|
+
|
|
55
|
+
if btype == 'tool_use' and block.get('name') == 'Skill':
|
|
56
|
+
tool_use_id = block.get('id', '')
|
|
57
|
+
inp = block.get('input', {})
|
|
58
|
+
pending[tool_use_id] = {
|
|
59
|
+
'skill_name': inp.get('skill', 'unknown'),
|
|
60
|
+
'args': inp.get('args'),
|
|
61
|
+
'tool_use_id': tool_use_id,
|
|
62
|
+
'timestamp': ts,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
elif btype == 'tool_result':
|
|
66
|
+
tool_use_id = block.get('tool_use_id', '')
|
|
67
|
+
if tool_use_id not in pending:
|
|
68
|
+
continue
|
|
69
|
+
|
|
70
|
+
info = pending.pop(tool_use_id)
|
|
71
|
+
is_error = block.get('is_error', False)
|
|
72
|
+
|
|
73
|
+
error_content = None
|
|
74
|
+
if is_error:
|
|
75
|
+
raw_content = block.get('content', '')
|
|
76
|
+
if isinstance(raw_content, str):
|
|
77
|
+
error_content = raw_content
|
|
78
|
+
elif isinstance(raw_content, list):
|
|
79
|
+
error_content = ' '.join(
|
|
80
|
+
c.get('text', '') for c in raw_content
|
|
81
|
+
if isinstance(c, dict) and c.get('type') == 'text')
|
|
82
|
+
|
|
83
|
+
duration = 0
|
|
84
|
+
if info['timestamp'] and ts:
|
|
85
|
+
try:
|
|
86
|
+
use_ts = datetime.fromisoformat(
|
|
87
|
+
info['timestamp'].replace('Z', '+00:00'))
|
|
88
|
+
result_ts = datetime.fromisoformat(
|
|
89
|
+
ts.replace('Z', '+00:00'))
|
|
90
|
+
duration = max(0, int((result_ts - use_ts).total_seconds()))
|
|
91
|
+
except Exception:
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
entries.append({
|
|
95
|
+
'session_id': session_id,
|
|
96
|
+
'date': make_date(info['timestamp']),
|
|
97
|
+
'project': project,
|
|
98
|
+
'skill_name': info['skill_name'],
|
|
99
|
+
'args': info['args'],
|
|
100
|
+
'tool_use_id': info['tool_use_id'],
|
|
101
|
+
'timestamp': info['timestamp'],
|
|
102
|
+
'duration_seconds': duration,
|
|
103
|
+
'success': 0 if is_error else 1,
|
|
104
|
+
'error_message': error_content if is_error else None,
|
|
105
|
+
})
|
|
106
|
+
|
|
107
|
+
# Flush unmatched pending skills
|
|
108
|
+
for tool_use_id, info in pending.items():
|
|
109
|
+
entries.append({
|
|
110
|
+
'session_id': session_id,
|
|
111
|
+
'date': make_date(info['timestamp']),
|
|
112
|
+
'project': project,
|
|
113
|
+
'skill_name': info['skill_name'],
|
|
114
|
+
'args': info['args'],
|
|
115
|
+
'tool_use_id': info['tool_use_id'],
|
|
116
|
+
'timestamp': info['timestamp'],
|
|
117
|
+
'duration_seconds': 0,
|
|
118
|
+
'success': 1,
|
|
119
|
+
'error_message': None,
|
|
120
|
+
})
|
|
121
|
+
|
|
122
|
+
return entries
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
if __name__ == '__main__':
|
|
126
|
+
if len(sys.argv) != 5:
|
|
127
|
+
print(f"Usage: {sys.argv[0]} <transcript_path> <tracking_dir> <session_id> <project>",
|
|
128
|
+
file=sys.stderr)
|
|
129
|
+
sys.exit(1)
|
|
130
|
+
|
|
131
|
+
transcript_path, tracking_dir, session_id, project = sys.argv[1:5]
|
|
132
|
+
entries = parse_skills(transcript_path, session_id, project)
|
|
133
|
+
storage.replace_session_skills(tracking_dir, session_id, entries)
|