claude-attribution 1.2.4 → 1.2.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/hooks/post-tool-use.ts +11 -2
- package/src/metrics/collect.ts +130 -37
- package/src/metrics/transcript.ts +67 -28
- package/src/setup/branch-protection.ts +432 -0
- package/src/setup/install.ts +5 -1
package/package.json
CHANGED
|
@@ -45,12 +45,21 @@ async function main() {
|
|
|
45
45
|
const { session_id, tool_name, tool_input } = payload;
|
|
46
46
|
const repoRoot = resolve(process.cwd());
|
|
47
47
|
|
|
48
|
-
// Log every tool call
|
|
49
|
-
|
|
48
|
+
// Log every tool call. For Skill invocations, also capture the skill name
|
|
49
|
+
// so metrics can show "/pr" instead of the generic "Skill ×1".
|
|
50
|
+
const logEntry: {
|
|
51
|
+
timestamp: string;
|
|
52
|
+
session: string;
|
|
53
|
+
tool: string;
|
|
54
|
+
skill?: string;
|
|
55
|
+
} = {
|
|
50
56
|
timestamp: new Date().toISOString(),
|
|
51
57
|
session: session_id,
|
|
52
58
|
tool: tool_name,
|
|
53
59
|
};
|
|
60
|
+
if (tool_name === "Skill" && typeof tool_input["skill"] === "string") {
|
|
61
|
+
logEntry.skill = tool_input["skill"];
|
|
62
|
+
}
|
|
54
63
|
|
|
55
64
|
try {
|
|
56
65
|
const logDir = join(repoRoot, ".claude", "logs");
|
package/src/metrics/collect.ts
CHANGED
|
@@ -29,8 +29,11 @@ const execFileAsync = promisify(execFile);
|
|
|
29
29
|
export interface MetricsData {
|
|
30
30
|
repoRoot: string;
|
|
31
31
|
sessionId: string;
|
|
32
|
+
/** Notable non-file-op tools used (Bash, Read, Write, Edit etc. excluded). */
|
|
32
33
|
toolCounts: Map<string, number>;
|
|
33
34
|
agentCounts: Map<string, number>;
|
|
35
|
+
/** Slash command skills invoked (e.g. ["pr", "metrics"]). */
|
|
36
|
+
skillNames: string[];
|
|
34
37
|
transcript: TranscriptResult | null;
|
|
35
38
|
attributions: AttributionResult[];
|
|
36
39
|
lastSeenByFile: Map<string, FileAttribution>;
|
|
@@ -43,6 +46,44 @@ export interface MetricsData {
|
|
|
43
46
|
} | null;
|
|
44
47
|
}
|
|
45
48
|
|
|
49
|
+
/**
|
|
50
|
+
* Tools that are routine file/task operations — excluded from the metrics
|
|
51
|
+
* display because they add noise without conveying meaningful intent.
|
|
52
|
+
* Skill invocations are tracked separately by skill name.
|
|
53
|
+
*/
|
|
54
|
+
const BORING_TOOLS = new Set([
|
|
55
|
+
// File operations
|
|
56
|
+
"Read",
|
|
57
|
+
"Write",
|
|
58
|
+
"Edit",
|
|
59
|
+
"Glob",
|
|
60
|
+
"Grep",
|
|
61
|
+
"NotebookEdit",
|
|
62
|
+
"MultiEdit",
|
|
63
|
+
// Task/todo management
|
|
64
|
+
"TaskCreate",
|
|
65
|
+
"TaskUpdate",
|
|
66
|
+
"TaskGet",
|
|
67
|
+
"TaskList",
|
|
68
|
+
"TaskOutput",
|
|
69
|
+
"TaskStop",
|
|
70
|
+
// Planning & worktree lifecycle
|
|
71
|
+
"EnterPlanMode",
|
|
72
|
+
"ExitPlanMode",
|
|
73
|
+
"EnterWorktree",
|
|
74
|
+
"ExitWorktree",
|
|
75
|
+
// Cron
|
|
76
|
+
"CronCreate",
|
|
77
|
+
"CronDelete",
|
|
78
|
+
"CronList",
|
|
79
|
+
// Shell (too generic — any meaningful external calls are implicit in the PR)
|
|
80
|
+
"Bash",
|
|
81
|
+
// Skill is tracked by name in skillNames, not toolCounts
|
|
82
|
+
"Skill",
|
|
83
|
+
// Agent is tracked in agentCounts via agent-activity.jsonl
|
|
84
|
+
"Agent",
|
|
85
|
+
]);
|
|
86
|
+
|
|
46
87
|
async function readSessionStart(repoRoot: string): Promise<Date | null> {
|
|
47
88
|
const markerPath = join(
|
|
48
89
|
repoRoot,
|
|
@@ -201,7 +242,7 @@ export async function collectMetrics(
|
|
|
201
242
|
join(logDir, "tool-usage.jsonl"),
|
|
202
243
|
sessionId,
|
|
203
244
|
sessionStart ?? undefined,
|
|
204
|
-
) as Promise<{ tool?: string }[]>,
|
|
245
|
+
) as Promise<{ tool?: string; skill?: string }[]>,
|
|
205
246
|
readJsonlForSession(
|
|
206
247
|
join(logDir, "agent-activity.jsonl"),
|
|
207
248
|
sessionId,
|
|
@@ -212,10 +253,18 @@ export async function collectMetrics(
|
|
|
212
253
|
getMinimapTotals(root),
|
|
213
254
|
]);
|
|
214
255
|
|
|
215
|
-
// Tool counts
|
|
256
|
+
// Tool counts — skip boring file ops and infrastructure tools.
|
|
257
|
+
// Skill invocations are tracked separately by name.
|
|
216
258
|
const toolCounts = new Map<string, number>();
|
|
259
|
+
const skillNames: string[] = [];
|
|
217
260
|
for (const e of toolEntries) {
|
|
218
|
-
if (e.tool)
|
|
261
|
+
if (!e.tool) continue;
|
|
262
|
+
if (e.tool === "Skill") {
|
|
263
|
+
if (e.skill) skillNames.push(e.skill);
|
|
264
|
+
continue;
|
|
265
|
+
}
|
|
266
|
+
if (BORING_TOOLS.has(e.tool)) continue;
|
|
267
|
+
toolCounts.set(e.tool, (toolCounts.get(e.tool) ?? 0) + 1);
|
|
219
268
|
}
|
|
220
269
|
|
|
221
270
|
// Agent counts (SubagentStart events only)
|
|
@@ -261,6 +310,7 @@ export async function collectMetrics(
|
|
|
261
310
|
sessionId,
|
|
262
311
|
toolCounts,
|
|
263
312
|
agentCounts,
|
|
313
|
+
skillNames,
|
|
264
314
|
transcript,
|
|
265
315
|
attributions,
|
|
266
316
|
lastSeenByFile,
|
|
@@ -269,11 +319,36 @@ export async function collectMetrics(
|
|
|
269
319
|
};
|
|
270
320
|
}
|
|
271
321
|
|
|
322
|
+
/** Format a session time summary. Returns empty string when no time data. */
|
|
323
|
+
function formatSessionLine(transcript: TranscriptResult): string {
|
|
324
|
+
const parts: string[] = [];
|
|
325
|
+
|
|
326
|
+
parts.push(
|
|
327
|
+
`${transcript.humanPromptCount} prompt${transcript.humanPromptCount === 1 ? "" : "s"}`,
|
|
328
|
+
);
|
|
329
|
+
|
|
330
|
+
const {
|
|
331
|
+
activeMinutes: total,
|
|
332
|
+
aiMinutes: ai,
|
|
333
|
+
humanMinutes: human,
|
|
334
|
+
} = transcript;
|
|
335
|
+
if (total > 0) {
|
|
336
|
+
if (human > 0) {
|
|
337
|
+
parts.push(`${total}m total (${ai}m AI · ${human}m human)`);
|
|
338
|
+
} else {
|
|
339
|
+
parts.push(`${total}m`);
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
return parts.join(" · ");
|
|
344
|
+
}
|
|
345
|
+
|
|
272
346
|
export function renderMetrics(data: MetricsData): string {
|
|
273
347
|
const {
|
|
274
348
|
repoRoot,
|
|
275
349
|
toolCounts,
|
|
276
350
|
agentCounts,
|
|
351
|
+
skillNames,
|
|
277
352
|
transcript,
|
|
278
353
|
lastSeenByFile,
|
|
279
354
|
allTranscripts,
|
|
@@ -286,7 +361,7 @@ export function renderMetrics(data: MetricsData): string {
|
|
|
286
361
|
out("## Claude Code Metrics");
|
|
287
362
|
out();
|
|
288
363
|
|
|
289
|
-
// Headline: AI%
|
|
364
|
+
// Headline: AI% (most important stat, shown first)
|
|
290
365
|
const allFileStats = [...lastSeenByFile.values()];
|
|
291
366
|
const hasAttribution = allFileStats.length > 0;
|
|
292
367
|
if (minimapTotals && minimapTotals.total > 0) {
|
|
@@ -304,30 +379,26 @@ export function renderMetrics(data: MetricsData): string {
|
|
|
304
379
|
minimapTotals.total > 0
|
|
305
380
|
? Math.round((prTotal / minimapTotals.total) * 100)
|
|
306
381
|
: 0;
|
|
307
|
-
const activePart =
|
|
308
|
-
transcript && transcript.activeMinutes > 0
|
|
309
|
-
? ` · Active: ${transcript.activeMinutes}m`
|
|
310
|
-
: "";
|
|
311
382
|
out(
|
|
312
|
-
`**This PR:** ${prTotal} lines changed (${codebasePct}% of codebase) · ${prPctAi}% Claude edits · ${prAi} AI lines
|
|
383
|
+
`**This PR:** ${prTotal} lines changed (${codebasePct}% of codebase) · ${prPctAi}% Claude edits · ${prAi} AI lines`,
|
|
313
384
|
);
|
|
314
385
|
}
|
|
315
386
|
out();
|
|
316
387
|
} else if (hasAttribution) {
|
|
317
388
|
const { ai, total, pctAi } = aggregateTotals(allFileStats);
|
|
318
|
-
|
|
319
|
-
transcript && transcript.activeMinutes > 0
|
|
320
|
-
? ` · Active: ${transcript.activeMinutes}m`
|
|
321
|
-
: "";
|
|
322
|
-
out(
|
|
323
|
-
`**AI contribution: ~${pctAi}%** (${ai} of ${total} committed lines)${activePart}`,
|
|
324
|
-
);
|
|
325
|
-
out();
|
|
326
|
-
} else if (transcript && transcript.activeMinutes > 0) {
|
|
327
|
-
out(`**Active session time:** ${transcript.activeMinutes}m`);
|
|
389
|
+
out(`**AI contribution: ~${pctAi}%** (${ai} of ${total} committed lines)`);
|
|
328
390
|
out();
|
|
329
391
|
}
|
|
330
392
|
|
|
393
|
+
// Session: prompts + time breakdown
|
|
394
|
+
if (transcript) {
|
|
395
|
+
const sessionLine = formatSessionLine(transcript);
|
|
396
|
+
if (sessionLine) {
|
|
397
|
+
out(`**Session:** ${sessionLine}`);
|
|
398
|
+
out();
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
|
|
331
402
|
// Model usage table
|
|
332
403
|
if (transcript) {
|
|
333
404
|
out("| Model | Calls | Input | Output | Cache |");
|
|
@@ -344,8 +415,6 @@ export function renderMetrics(data: MetricsData): string {
|
|
|
344
415
|
`| **Total** | ${t.totalCalls} | ${kFormat(t.totalInputTokens)} | ${kFormat(t.totalOutputTokens)} | ${kFormat(totalCache)} |`,
|
|
345
416
|
);
|
|
346
417
|
out();
|
|
347
|
-
out(`**Human prompts (steering effort):** ${transcript.humanPromptCount}`);
|
|
348
|
-
out();
|
|
349
418
|
}
|
|
350
419
|
|
|
351
420
|
// Multi-session rollup (shown when multiple Claude sessions contributed)
|
|
@@ -362,6 +431,8 @@ export function renderMetrics(data: MetricsData): string {
|
|
|
362
431
|
t.totals.totalCacheCreationTokens +
|
|
363
432
|
t.totals.totalCacheReadTokens,
|
|
364
433
|
humanPromptCount: acc.humanPromptCount + t.humanPromptCount,
|
|
434
|
+
aiMinutes: acc.aiMinutes + t.aiMinutes,
|
|
435
|
+
humanMinutes: acc.humanMinutes + t.humanMinutes,
|
|
365
436
|
activeMinutes: acc.activeMinutes + t.activeMinutes,
|
|
366
437
|
}),
|
|
367
438
|
{
|
|
@@ -370,6 +441,8 @@ export function renderMetrics(data: MetricsData): string {
|
|
|
370
441
|
totalOutputTokens: 0,
|
|
371
442
|
totalCacheTokens: 0,
|
|
372
443
|
humanPromptCount: 0,
|
|
444
|
+
aiMinutes: 0,
|
|
445
|
+
humanMinutes: 0,
|
|
373
446
|
activeMinutes: 0,
|
|
374
447
|
},
|
|
375
448
|
);
|
|
@@ -379,24 +452,35 @@ export function renderMetrics(data: MetricsData): string {
|
|
|
379
452
|
`| ${agg.totalCalls} | ${kFormat(agg.totalInputTokens)} | ${kFormat(agg.totalOutputTokens)} | ${kFormat(agg.totalCacheTokens)} |`,
|
|
380
453
|
);
|
|
381
454
|
out();
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
455
|
+
const aggSessionLine = [
|
|
456
|
+
`${agg.humanPromptCount} prompt${agg.humanPromptCount === 1 ? "" : "s"}`,
|
|
457
|
+
...(agg.activeMinutes > 0
|
|
458
|
+
? agg.humanMinutes > 0
|
|
459
|
+
? [
|
|
460
|
+
`${agg.activeMinutes}m total (${agg.aiMinutes}m AI · ${agg.humanMinutes}m human)`,
|
|
461
|
+
]
|
|
462
|
+
: [`${agg.activeMinutes}m`]
|
|
463
|
+
: []),
|
|
464
|
+
].join(" · ");
|
|
465
|
+
if (aggSessionLine) {
|
|
466
|
+
out(`**Total session:** ${aggSessionLine}`);
|
|
385
467
|
}
|
|
386
468
|
out();
|
|
387
469
|
}
|
|
388
470
|
|
|
389
|
-
// <details> block —
|
|
471
|
+
// <details> block — skills, agents, notable tools, per-file breakdown
|
|
390
472
|
const claudeFiles = [...lastSeenByFile.entries()].filter(
|
|
391
473
|
([, stats]) => stats.ai > 0 || stats.mixed > 0,
|
|
392
474
|
);
|
|
393
|
-
const
|
|
475
|
+
const hasSkills = skillNames.length > 0;
|
|
394
476
|
const hasAgents = agentCounts.size > 0;
|
|
477
|
+
const hasNotableTools = toolCounts.size > 0;
|
|
395
478
|
const hasFiles = claudeFiles.length > 0;
|
|
396
479
|
|
|
397
480
|
const summaryParts: string[] = [];
|
|
398
|
-
if (
|
|
481
|
+
if (hasSkills) summaryParts.push("Skills");
|
|
399
482
|
if (hasAgents) summaryParts.push("Agents");
|
|
483
|
+
if (hasNotableTools) summaryParts.push("Tools");
|
|
400
484
|
if (hasFiles) summaryParts.push("Files");
|
|
401
485
|
if (summaryParts.length === 0) summaryParts.push("Details");
|
|
402
486
|
|
|
@@ -404,27 +488,36 @@ export function renderMetrics(data: MetricsData): string {
|
|
|
404
488
|
out(`<summary>${summaryParts.join(" · ")}</summary>`);
|
|
405
489
|
out();
|
|
406
490
|
|
|
407
|
-
if (
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
.join(", ");
|
|
412
|
-
out(`**Tools:** ${toolLine}`);
|
|
413
|
-
out();
|
|
414
|
-
} else {
|
|
415
|
-
out("_No tool usage logs found_");
|
|
491
|
+
if (hasSkills) {
|
|
492
|
+
// Deduplicate and show slash command names
|
|
493
|
+
const unique = [...new Set(skillNames)];
|
|
494
|
+
out(`**Skills:** ${unique.map((s) => `/${s}`).join(", ")}`);
|
|
416
495
|
out();
|
|
417
496
|
}
|
|
418
497
|
|
|
419
498
|
if (hasAgents) {
|
|
420
499
|
const agentLine = [...agentCounts.entries()]
|
|
421
500
|
.sort((a, b) => b[1] - a[1])
|
|
422
|
-
.map(([agent, count]) => `${agent} ×${count}`)
|
|
501
|
+
.map(([agent, count]) => (count > 1 ? `${agent} ×${count}` : agent))
|
|
423
502
|
.join(", ");
|
|
424
503
|
out(`**Agents:** ${agentLine}`);
|
|
425
504
|
out();
|
|
426
505
|
}
|
|
427
506
|
|
|
507
|
+
if (hasNotableTools) {
|
|
508
|
+
const toolLine = [...toolCounts.entries()]
|
|
509
|
+
.sort((a, b) => b[1] - a[1])
|
|
510
|
+
.map(([tool, count]) => (count > 1 ? `${tool} ×${count}` : tool))
|
|
511
|
+
.join(", ");
|
|
512
|
+
out(`**External tools:** ${toolLine}`);
|
|
513
|
+
out();
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
if (!hasSkills && !hasAgents && !hasNotableTools) {
|
|
517
|
+
out("_No tool usage logs found_");
|
|
518
|
+
out();
|
|
519
|
+
}
|
|
520
|
+
|
|
428
521
|
if (hasFiles) {
|
|
429
522
|
out("#### Files");
|
|
430
523
|
out();
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
* ~/.claude/projects/<project-key>/<session-id>/subagents/<agent-id>.jsonl
|
|
13
13
|
*
|
|
14
14
|
* This module reads both main and subagent transcripts, merges them by model,
|
|
15
|
-
* and returns aggregated token/model usage + human prompt count.
|
|
15
|
+
* and returns aggregated token/model usage + human prompt count + time breakdown.
|
|
16
16
|
*/
|
|
17
17
|
import { readFile, readdir } from "fs/promises";
|
|
18
18
|
import { existsSync } from "fs";
|
|
@@ -40,8 +40,12 @@ export interface TranscriptResult {
|
|
|
40
40
|
totalCacheReadTokens: number;
|
|
41
41
|
};
|
|
42
42
|
humanPromptCount: number;
|
|
43
|
-
/**
|
|
43
|
+
/** Total active session time in minutes (idle gaps >15 min excluded). */
|
|
44
44
|
activeMinutes: number;
|
|
45
|
+
/** Minutes Claude was actively processing (human→assistant gaps). */
|
|
46
|
+
aiMinutes: number;
|
|
47
|
+
/** Minutes the human was active between Claude responses (>30s gaps, <15m). */
|
|
48
|
+
humanMinutes: number;
|
|
45
49
|
}
|
|
46
50
|
|
|
47
51
|
interface TranscriptEntry {
|
|
@@ -58,6 +62,11 @@ interface TranscriptEntry {
|
|
|
58
62
|
};
|
|
59
63
|
}
|
|
60
64
|
|
|
65
|
+
interface TimedMessage {
|
|
66
|
+
type: string;
|
|
67
|
+
ts: number;
|
|
68
|
+
}
|
|
69
|
+
|
|
61
70
|
function modelShort(full: string): ModelUsage["modelShort"] {
|
|
62
71
|
if (/opus/i.test(full)) return "Opus";
|
|
63
72
|
if (/sonnet/i.test(full)) return "Sonnet";
|
|
@@ -82,12 +91,12 @@ function projectKey(repoRoot: string): string {
|
|
|
82
91
|
async function parseTranscriptFile(filePath: string): Promise<{
|
|
83
92
|
entries: TranscriptEntry[];
|
|
84
93
|
humanCount: number;
|
|
85
|
-
|
|
94
|
+
timedMessages: TimedMessage[];
|
|
86
95
|
}> {
|
|
87
96
|
const raw = await readFile(filePath, "utf8");
|
|
88
97
|
const entries: TranscriptEntry[] = [];
|
|
89
98
|
let humanCount = 0;
|
|
90
|
-
const
|
|
99
|
+
const timedMessages: TimedMessage[] = [];
|
|
91
100
|
|
|
92
101
|
for (const line of raw.split("\n")) {
|
|
93
102
|
const trimmed = line.trim();
|
|
@@ -96,34 +105,62 @@ async function parseTranscriptFile(filePath: string): Promise<{
|
|
|
96
105
|
const entry = JSON.parse(trimmed) as TranscriptEntry;
|
|
97
106
|
entries.push(entry);
|
|
98
107
|
if (entry.type === "human") humanCount++;
|
|
99
|
-
if (entry.timestamp) {
|
|
108
|
+
if (entry.type && entry.timestamp) {
|
|
100
109
|
const ms = new Date(entry.timestamp).getTime();
|
|
101
|
-
if (!isNaN(ms))
|
|
110
|
+
if (!isNaN(ms)) timedMessages.push({ type: entry.type, ts: ms });
|
|
102
111
|
}
|
|
103
112
|
} catch {
|
|
104
113
|
// Skip malformed lines
|
|
105
114
|
}
|
|
106
115
|
}
|
|
107
116
|
|
|
108
|
-
return { entries, humanCount,
|
|
117
|
+
return { entries, humanCount, timedMessages };
|
|
109
118
|
}
|
|
110
119
|
|
|
111
120
|
/**
|
|
112
|
-
* Compute
|
|
121
|
+
* Compute AI vs human time breakdown from a sequence of timed messages.
|
|
113
122
|
*
|
|
114
|
-
*
|
|
115
|
-
*
|
|
116
|
-
*
|
|
123
|
+
* - human→* gap: Claude is processing (AI time)
|
|
124
|
+
* - assistant→* gap <30s: automated tool result turnaround (AI time)
|
|
125
|
+
* - assistant→* gap 30s–15m: human reviewing/thinking (human time)
|
|
126
|
+
* - Any gap ≥15m: idle, excluded
|
|
117
127
|
*/
|
|
118
|
-
function
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
128
|
+
function computeTimeBreakdown(messages: TimedMessage[]): {
|
|
129
|
+
totalMinutes: number;
|
|
130
|
+
aiMinutes: number;
|
|
131
|
+
humanMinutes: number;
|
|
132
|
+
} {
|
|
133
|
+
const sorted = [...messages].sort((a, b) => a.ts - b.ts);
|
|
134
|
+
const IDLE_MS = 900_000; // 15 min
|
|
135
|
+
const AUTO_MS = 30_000; // 30 sec — automated tool turnaround
|
|
136
|
+
|
|
137
|
+
let aiMs = 0;
|
|
138
|
+
let humanMs = 0;
|
|
139
|
+
|
|
122
140
|
for (let i = 1; i < sorted.length; i++) {
|
|
123
|
-
const
|
|
124
|
-
|
|
141
|
+
const prev = sorted[i - 1]!;
|
|
142
|
+
const curr = sorted[i]!;
|
|
143
|
+
const gap = curr.ts - prev.ts;
|
|
144
|
+
if (gap >= IDLE_MS) continue; // idle gap, skip
|
|
145
|
+
|
|
146
|
+
if (prev.type === "human") {
|
|
147
|
+
// Claude is processing a message
|
|
148
|
+
aiMs += gap;
|
|
149
|
+
} else {
|
|
150
|
+
// Gap after an assistant message
|
|
151
|
+
if (gap < AUTO_MS) {
|
|
152
|
+
aiMs += gap; // automated tool result
|
|
153
|
+
} else {
|
|
154
|
+
humanMs += gap; // human reviewing / typing next message
|
|
155
|
+
}
|
|
156
|
+
}
|
|
125
157
|
}
|
|
126
|
-
|
|
158
|
+
|
|
159
|
+
return {
|
|
160
|
+
aiMinutes: Math.round(aiMs / 60_000),
|
|
161
|
+
humanMinutes: Math.round(humanMs / 60_000),
|
|
162
|
+
totalMinutes: Math.round((aiMs + humanMs) / 60_000),
|
|
163
|
+
};
|
|
127
164
|
}
|
|
128
165
|
|
|
129
166
|
function aggregateEntries(entries: TranscriptEntry[]): Map<string, ModelUsage> {
|
|
@@ -164,8 +201,9 @@ function aggregateEntries(entries: TranscriptEntry[]): Map<string, ModelUsage> {
|
|
|
164
201
|
* ~/.claude/projects/<key>/ directory structure. Aggregates token counts by model
|
|
165
202
|
* (Opus / Sonnet / Haiku / Unknown) and counts human prompt turns.
|
|
166
203
|
*
|
|
167
|
-
*
|
|
168
|
-
*
|
|
204
|
+
* Time breakdown uses only the main transcript's message sequence (human vs
|
|
205
|
+
* assistant), since subagent messages are automated orchestration — not human
|
|
206
|
+
* interaction. Human→assistant gaps = AI time; assistant→human gaps >30s = human time.
|
|
169
207
|
*
|
|
170
208
|
* Returns null if the session transcript file doesn't exist (session not found).
|
|
171
209
|
*/
|
|
@@ -183,21 +221,17 @@ export async function parseTranscript(
|
|
|
183
221
|
const {
|
|
184
222
|
entries: mainEntries,
|
|
185
223
|
humanCount,
|
|
186
|
-
|
|
224
|
+
timedMessages,
|
|
187
225
|
} = await parseTranscriptFile(mainFile);
|
|
188
226
|
const combined = aggregateEntries(mainEntries);
|
|
189
|
-
const allTimestamps: number[] = [...mainTimestamps];
|
|
190
227
|
|
|
191
|
-
// Merge subagent transcripts
|
|
228
|
+
// Merge subagent transcripts (token counts only — exclude from time breakdown)
|
|
192
229
|
const subagentDir = join(transcriptDir, sessionId, "subagents");
|
|
193
230
|
if (existsSync(subagentDir)) {
|
|
194
231
|
for (const file of (await readdir(subagentDir)).filter((f) =>
|
|
195
232
|
f.endsWith(".jsonl"),
|
|
196
233
|
)) {
|
|
197
|
-
const { entries
|
|
198
|
-
join(subagentDir, file),
|
|
199
|
-
);
|
|
200
|
-
allTimestamps.push(...timestamps);
|
|
234
|
+
const { entries } = await parseTranscriptFile(join(subagentDir, file));
|
|
201
235
|
for (const [model, usage] of aggregateEntries(entries)) {
|
|
202
236
|
const existing = combined.get(model);
|
|
203
237
|
if (!existing) {
|
|
@@ -235,11 +269,16 @@ export async function parseTranscript(
|
|
|
235
269
|
},
|
|
236
270
|
);
|
|
237
271
|
|
|
272
|
+
const { totalMinutes, aiMinutes, humanMinutes } =
|
|
273
|
+
computeTimeBreakdown(timedMessages);
|
|
274
|
+
|
|
238
275
|
return {
|
|
239
276
|
sessionId,
|
|
240
277
|
byModel,
|
|
241
278
|
totals,
|
|
242
279
|
humanPromptCount: humanCount,
|
|
243
|
-
activeMinutes:
|
|
280
|
+
activeMinutes: totalMinutes,
|
|
281
|
+
aiMinutes,
|
|
282
|
+
humanMinutes,
|
|
244
283
|
};
|
|
245
284
|
}
|
|
@@ -0,0 +1,432 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Branch protection utilities for claude-attribution install.
|
|
3
|
+
*
|
|
4
|
+
* Handles both classic branch protection rules and GitHub rulesets.
|
|
5
|
+
* After installing the workflow, detects what protection is active on the
|
|
6
|
+
* default branch and interactively offers to add our workflow job as a
|
|
7
|
+
* required status check.
|
|
8
|
+
*
|
|
9
|
+
* Design:
|
|
10
|
+
* - Classic protection: PATCH .../required_status_checks (preserves existing)
|
|
11
|
+
* - Rulesets: PUT .../rulesets/{id} with updated rules (preserves all other rules)
|
|
12
|
+
* - Both present: numbered prompt so user chooses where to add
|
|
13
|
+
* - Already configured: silent skip for that protection type
|
|
14
|
+
* - Any failure: graceful fallback to informational note, never breaks install
|
|
15
|
+
*/
|
|
16
|
+
import { execFile } from "child_process";
|
|
17
|
+
import { promisify } from "util";
|
|
18
|
+
import { writeFile, unlink, rmdir, mkdtemp } from "fs/promises";
|
|
19
|
+
import { tmpdir } from "os";
|
|
20
|
+
import { join } from "path";
|
|
21
|
+
import { createInterface } from "readline";
|
|
22
|
+
|
|
23
|
+
const execFileAsync = promisify(execFile);
|
|
24
|
+
|
|
25
|
+
/** The GitHub Actions job name — must match `jobs.metrics.name` in the workflow template. */
|
|
26
|
+
export const WORKFLOW_CHECK_NAME = "Claude Code Attribution Metrics";
|
|
27
|
+
|
|
28
|
+
/** GitHub Actions app ID — used so the check shows "GitHub Actions" rather than "any source". */
|
|
29
|
+
const GITHUB_ACTIONS_APP_ID = 15368;
|
|
30
|
+
|
|
31
|
+
type Check = { context: string; app_id: number };
|
|
32
|
+
|
|
33
|
+
interface ClassicStatus {
|
|
34
|
+
branch: string;
|
|
35
|
+
strict: boolean;
|
|
36
|
+
checks: Check[];
|
|
37
|
+
hasOurCheck: boolean;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
interface RulesetStatus {
|
|
41
|
+
id: number;
|
|
42
|
+
name: string;
|
|
43
|
+
hasOurCheck: boolean;
|
|
44
|
+
/** Full raw ruleset object — re-submitted on PUT to preserve all fields. */
|
|
45
|
+
raw: RawRuleset;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
interface RawRuleset {
|
|
49
|
+
name: string;
|
|
50
|
+
target?: string;
|
|
51
|
+
enforcement?: string;
|
|
52
|
+
conditions?: unknown;
|
|
53
|
+
bypass_actors?: unknown[];
|
|
54
|
+
rules?: RawRule[];
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
interface RawRule {
|
|
58
|
+
type: string;
|
|
59
|
+
parameters?: {
|
|
60
|
+
strict_required_status_checks_policy?: boolean;
|
|
61
|
+
required_status_checks?: Array<{
|
|
62
|
+
context: string;
|
|
63
|
+
integration_id?: number;
|
|
64
|
+
}>;
|
|
65
|
+
[key: string]: unknown;
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// ─── GitHub API helpers ───────────────────────────────────────────────────────
|
|
70
|
+
|
|
71
|
+
async function ghGet(path: string): Promise<unknown> {
|
|
72
|
+
try {
|
|
73
|
+
const { stdout } = (await execFileAsync("gh", [
|
|
74
|
+
"api",
|
|
75
|
+
path,
|
|
76
|
+
])) as unknown as { stdout: string };
|
|
77
|
+
return JSON.parse(stdout) as unknown;
|
|
78
|
+
} catch {
|
|
79
|
+
return null;
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
async function ghPut(path: string, body: unknown): Promise<void> {
|
|
84
|
+
const tmpDir = await mkdtemp(join(tmpdir(), "claude-attribution-api-"));
|
|
85
|
+
const tmpFile = join(tmpDir, "body.json");
|
|
86
|
+
try {
|
|
87
|
+
await writeFile(tmpFile, JSON.stringify(body), { flag: "wx" });
|
|
88
|
+
await execFileAsync("gh", [
|
|
89
|
+
"api",
|
|
90
|
+
path,
|
|
91
|
+
"--method",
|
|
92
|
+
"PUT",
|
|
93
|
+
"--input",
|
|
94
|
+
tmpFile,
|
|
95
|
+
]);
|
|
96
|
+
} finally {
|
|
97
|
+
await unlink(tmpFile).catch(() => {});
|
|
98
|
+
await rmdir(tmpDir).catch(() => {});
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
async function ghPatch(path: string, body: unknown): Promise<void> {
|
|
103
|
+
const tmpDir = await mkdtemp(join(tmpdir(), "claude-attribution-api-"));
|
|
104
|
+
const tmpFile = join(tmpDir, "body.json");
|
|
105
|
+
try {
|
|
106
|
+
await writeFile(tmpFile, JSON.stringify(body), { flag: "wx" });
|
|
107
|
+
await execFileAsync("gh", [
|
|
108
|
+
"api",
|
|
109
|
+
path,
|
|
110
|
+
"--method",
|
|
111
|
+
"PATCH",
|
|
112
|
+
"--input",
|
|
113
|
+
tmpFile,
|
|
114
|
+
]);
|
|
115
|
+
} finally {
|
|
116
|
+
await unlink(tmpFile).catch(() => {});
|
|
117
|
+
await rmdir(tmpDir).catch(() => {});
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// ─── Detection ────────────────────────────────────────────────────────────────
|
|
122
|
+
|
|
123
|
+
async function getClassicStatus(
|
|
124
|
+
slug: string,
|
|
125
|
+
branch: string,
|
|
126
|
+
): Promise<ClassicStatus | null> {
|
|
127
|
+
const data = await ghGet(`repos/${slug}/branches/${branch}/protection`);
|
|
128
|
+
if (!data) return null;
|
|
129
|
+
|
|
130
|
+
const prot = data as {
|
|
131
|
+
required_status_checks?: {
|
|
132
|
+
strict: boolean;
|
|
133
|
+
contexts?: string[];
|
|
134
|
+
checks?: Check[];
|
|
135
|
+
};
|
|
136
|
+
};
|
|
137
|
+
|
|
138
|
+
const rsc = prot.required_status_checks;
|
|
139
|
+
if (!rsc) {
|
|
140
|
+
// Protection exists but no required status checks configured yet
|
|
141
|
+
return {
|
|
142
|
+
branch,
|
|
143
|
+
strict: false,
|
|
144
|
+
checks: [],
|
|
145
|
+
hasOurCheck: false,
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
const checks: Check[] =
|
|
150
|
+
rsc.checks && rsc.checks.length > 0
|
|
151
|
+
? rsc.checks
|
|
152
|
+
: (rsc.contexts ?? []).map((c) => ({ context: c, app_id: -1 }));
|
|
153
|
+
|
|
154
|
+
return {
|
|
155
|
+
branch,
|
|
156
|
+
strict: rsc.strict,
|
|
157
|
+
checks,
|
|
158
|
+
hasOurCheck: checks.some((c) => c.context === WORKFLOW_CHECK_NAME),
|
|
159
|
+
};
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
async function getRulesetStatuses(slug: string): Promise<RulesetStatus[]> {
|
|
163
|
+
const list = await ghGet(`repos/${slug}/rulesets`);
|
|
164
|
+
if (!Array.isArray(list) || list.length === 0) return [];
|
|
165
|
+
|
|
166
|
+
// Fetch full details for each ruleset in parallel (rules not in list response)
|
|
167
|
+
const results = await Promise.all(
|
|
168
|
+
(list as Array<{ id: number }>).map(async (rs) => {
|
|
169
|
+
const full = (await ghGet(
|
|
170
|
+
`repos/${slug}/rulesets/${rs.id}`,
|
|
171
|
+
)) as RawRuleset | null;
|
|
172
|
+
if (!full) return null;
|
|
173
|
+
|
|
174
|
+
const statusCheckRule = full.rules?.find(
|
|
175
|
+
(r) => r.type === "required_status_checks",
|
|
176
|
+
);
|
|
177
|
+
const hasOurCheck =
|
|
178
|
+
statusCheckRule?.parameters?.required_status_checks?.some(
|
|
179
|
+
(c) => c.context === WORKFLOW_CHECK_NAME,
|
|
180
|
+
) ?? false;
|
|
181
|
+
|
|
182
|
+
return {
|
|
183
|
+
id: rs.id,
|
|
184
|
+
name: full.name,
|
|
185
|
+
hasOurCheck,
|
|
186
|
+
raw: full,
|
|
187
|
+
} satisfies RulesetStatus;
|
|
188
|
+
}),
|
|
189
|
+
);
|
|
190
|
+
|
|
191
|
+
return results.filter((r): r is RulesetStatus => r !== null);
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
/** Extract "owner/repo" from SSH or HTTPS origin remote URL. */
|
|
195
|
+
export function remoteUrlToSlug(url: string): string | null {
|
|
196
|
+
const m =
|
|
197
|
+
url.match(/github\.com[:/]([^/]+\/[^/]+?)(?:\.git)?$/) ??
|
|
198
|
+
url.match(/github\.com\/([^/]+\/[^/]+?)(?:\.git)?$/);
|
|
199
|
+
return m?.[1] ?? null;
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// ─── Modification ─────────────────────────────────────────────────────────────
|
|
203
|
+
|
|
204
|
+
async function addToClassic(
|
|
205
|
+
classic: ClassicStatus,
|
|
206
|
+
slug: string,
|
|
207
|
+
): Promise<void> {
|
|
208
|
+
await ghPatch(
|
|
209
|
+
`repos/${slug}/branches/${classic.branch}/protection/required_status_checks`,
|
|
210
|
+
{
|
|
211
|
+
strict: classic.strict,
|
|
212
|
+
checks: [
|
|
213
|
+
...classic.checks,
|
|
214
|
+
{ context: WORKFLOW_CHECK_NAME, app_id: GITHUB_ACTIONS_APP_ID },
|
|
215
|
+
],
|
|
216
|
+
},
|
|
217
|
+
);
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
async function addToRuleset(rs: RulesetStatus, slug: string): Promise<void> {
|
|
221
|
+
const rules = rs.raw.rules ?? [];
|
|
222
|
+
const existingRule = rules.find((r) => r.type === "required_status_checks");
|
|
223
|
+
|
|
224
|
+
let updatedRules: RawRule[];
|
|
225
|
+
if (existingRule) {
|
|
226
|
+
const existingChecks =
|
|
227
|
+
existingRule.parameters?.required_status_checks ?? [];
|
|
228
|
+
updatedRules = rules.map((r) =>
|
|
229
|
+
r.type === "required_status_checks"
|
|
230
|
+
? {
|
|
231
|
+
...r,
|
|
232
|
+
parameters: {
|
|
233
|
+
...r.parameters,
|
|
234
|
+
required_status_checks: [
|
|
235
|
+
...existingChecks,
|
|
236
|
+
{
|
|
237
|
+
context: WORKFLOW_CHECK_NAME,
|
|
238
|
+
integration_id: GITHUB_ACTIONS_APP_ID,
|
|
239
|
+
},
|
|
240
|
+
],
|
|
241
|
+
},
|
|
242
|
+
}
|
|
243
|
+
: r,
|
|
244
|
+
);
|
|
245
|
+
} else {
|
|
246
|
+
// No required_status_checks rule yet — add one
|
|
247
|
+
updatedRules = [
|
|
248
|
+
...rules,
|
|
249
|
+
{
|
|
250
|
+
type: "required_status_checks",
|
|
251
|
+
parameters: {
|
|
252
|
+
strict_required_status_checks_policy: false,
|
|
253
|
+
required_status_checks: [
|
|
254
|
+
{
|
|
255
|
+
context: WORKFLOW_CHECK_NAME,
|
|
256
|
+
integration_id: GITHUB_ACTIONS_APP_ID,
|
|
257
|
+
},
|
|
258
|
+
],
|
|
259
|
+
},
|
|
260
|
+
},
|
|
261
|
+
];
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
await ghPut(`repos/${slug}/rulesets/${rs.id}`, {
|
|
265
|
+
...rs.raw,
|
|
266
|
+
rules: updatedRules,
|
|
267
|
+
});
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
// ─── Prompts ──────────────────────────────────────────────────────────────────
|
|
271
|
+
|
|
272
|
+
async function promptYesNo(question: string): Promise<boolean> {
|
|
273
|
+
if (!process.stdin.isTTY) return false;
|
|
274
|
+
const rl = createInterface({ input: process.stdin, output: process.stdout });
|
|
275
|
+
return new Promise((resolve) => {
|
|
276
|
+
rl.question(question, (answer) => {
|
|
277
|
+
rl.close();
|
|
278
|
+
resolve(answer.trim().toLowerCase() === "y");
|
|
279
|
+
});
|
|
280
|
+
});
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
async function promptChoice(
|
|
284
|
+
question: string,
|
|
285
|
+
options: string[],
|
|
286
|
+
): Promise<number> {
|
|
287
|
+
if (!process.stdin.isTTY) return -1;
|
|
288
|
+
const rl = createInterface({ input: process.stdin, output: process.stdout });
|
|
289
|
+
return new Promise((resolve) => {
|
|
290
|
+
const numbered = options.map((o, i) => ` [${i + 1}] ${o}`).join("\n");
|
|
291
|
+
rl.question(`${question}\n${numbered}\n Choice [1]: `, (answer) => {
|
|
292
|
+
rl.close();
|
|
293
|
+
const n = parseInt(answer.trim() || "1", 10);
|
|
294
|
+
resolve(n >= 1 && n <= options.length ? n - 1 : -1);
|
|
295
|
+
});
|
|
296
|
+
});
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
function printNote(branch: string): void {
|
|
300
|
+
console.log(
|
|
301
|
+
`\n ℹ️ To block merges when this workflow fails, add '${WORKFLOW_CHECK_NAME}'`,
|
|
302
|
+
);
|
|
303
|
+
console.log(
|
|
304
|
+
` to required status checks for '${branch}' in Settings → Branches or Rules.`,
|
|
305
|
+
);
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
// ─── Main entry point ─────────────────────────────────────────────────────────
|
|
309
|
+
|
|
310
|
+
/**
|
|
311
|
+
* Detect branch protection on the repo's default branch and offer to add
|
|
312
|
+
* the workflow job as a required status check. Called from install.ts after
|
|
313
|
+
* the workflow file is written. Never throws — all errors fall back to a note.
|
|
314
|
+
*/
|
|
315
|
+
export async function configureRequiredCheck(repoRoot: string): Promise<void> {
|
|
316
|
+
try {
|
|
317
|
+
const { stdout: remoteOut } = (await execFileAsync(
|
|
318
|
+
"git",
|
|
319
|
+
["remote", "get-url", "origin"],
|
|
320
|
+
{ cwd: repoRoot },
|
|
321
|
+
)) as unknown as { stdout: string };
|
|
322
|
+
const slug = remoteUrlToSlug(remoteOut.trim());
|
|
323
|
+
if (!slug) return;
|
|
324
|
+
|
|
325
|
+
const repoData = await ghGet(`repos/${slug}`);
|
|
326
|
+
const branch = (repoData as { default_branch?: string } | null)
|
|
327
|
+
?.default_branch;
|
|
328
|
+
if (!branch) return;
|
|
329
|
+
|
|
330
|
+
// Detect both protection types in parallel
|
|
331
|
+
const [classic, rulesets] = await Promise.all([
|
|
332
|
+
getClassicStatus(slug, branch),
|
|
333
|
+
getRulesetStatuses(slug),
|
|
334
|
+
]);
|
|
335
|
+
|
|
336
|
+
// Determine what needs to be added
|
|
337
|
+
const classicNeeded = classic !== null && !classic.hasOurCheck;
|
|
338
|
+
const rulesetsNeeded = rulesets.filter((rs) => !rs.hasOurCheck);
|
|
339
|
+
|
|
340
|
+
// Already fully configured
|
|
341
|
+
if (!classicNeeded && rulesetsNeeded.length === 0) {
|
|
342
|
+
if (classic?.hasOurCheck || rulesets.some((rs) => rs.hasOurCheck)) {
|
|
343
|
+
console.log(
|
|
344
|
+
`✓ '${WORKFLOW_CHECK_NAME}' already a required status check`,
|
|
345
|
+
);
|
|
346
|
+
}
|
|
347
|
+
return;
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Build the list of targets that need our check
|
|
351
|
+
const targets: Array<
|
|
352
|
+
| { kind: "classic"; classic: ClassicStatus }
|
|
353
|
+
| { kind: "ruleset"; rs: RulesetStatus }
|
|
354
|
+
> = [];
|
|
355
|
+
if (classicNeeded) targets.push({ kind: "classic", classic });
|
|
356
|
+
for (const rs of rulesetsNeeded) targets.push({ kind: "ruleset", rs });
|
|
357
|
+
|
|
358
|
+
let chosen: typeof targets;
|
|
359
|
+
|
|
360
|
+
if (targets.length === 1) {
|
|
361
|
+
// Single target — simple yes/no
|
|
362
|
+
const target = targets[0]!;
|
|
363
|
+
const label =
|
|
364
|
+
target.kind === "classic"
|
|
365
|
+
? `branch protection rule on '${branch}'`
|
|
366
|
+
: `ruleset '${target.rs.name}'`;
|
|
367
|
+
console.log(`\n Branch protection is active on '${branch}'.`);
|
|
368
|
+
const yes = await promptYesNo(
|
|
369
|
+
` Add '${WORKFLOW_CHECK_NAME}' to ${label}? [y/N] `,
|
|
370
|
+
);
|
|
371
|
+
if (!yes) {
|
|
372
|
+
printNote(branch);
|
|
373
|
+
return;
|
|
374
|
+
}
|
|
375
|
+
chosen = targets;
|
|
376
|
+
} else {
|
|
377
|
+
// Multiple targets — numbered choice
|
|
378
|
+
const options = [
|
|
379
|
+
...targets.map((t) =>
|
|
380
|
+
t.kind === "classic"
|
|
381
|
+
? `Branch protection rule on '${branch}'`
|
|
382
|
+
: `Ruleset: '${t.rs.name}'`,
|
|
383
|
+
),
|
|
384
|
+
"Both",
|
|
385
|
+
"Skip",
|
|
386
|
+
];
|
|
387
|
+
console.log(
|
|
388
|
+
`\n Multiple branch protection rules active on '${branch}'.`,
|
|
389
|
+
);
|
|
390
|
+
console.log(
|
|
391
|
+
` Where should '${WORKFLOW_CHECK_NAME}' be added as a required check?`,
|
|
392
|
+
);
|
|
393
|
+
const idx = await promptChoice("", options);
|
|
394
|
+
if (idx === -1 || idx === options.length - 1) {
|
|
395
|
+
// Skip
|
|
396
|
+
printNote(branch);
|
|
397
|
+
return;
|
|
398
|
+
}
|
|
399
|
+
if (idx === options.length - 2) {
|
|
400
|
+
// Both
|
|
401
|
+
chosen = targets;
|
|
402
|
+
} else {
|
|
403
|
+
chosen = [targets[idx]!];
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
// Apply changes
|
|
408
|
+
for (const target of chosen) {
|
|
409
|
+
if (target.kind === "classic") {
|
|
410
|
+
await addToClassic(target.classic, slug);
|
|
411
|
+
console.log(
|
|
412
|
+
`✓ Added '${WORKFLOW_CHECK_NAME}' to branch protection on '${branch}'`,
|
|
413
|
+
);
|
|
414
|
+
} else {
|
|
415
|
+
await addToRuleset(target.rs, slug);
|
|
416
|
+
console.log(
|
|
417
|
+
`✓ Added '${WORKFLOW_CHECK_NAME}' to ruleset '${target.rs.name}'`,
|
|
418
|
+
);
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
} catch {
|
|
422
|
+
console.log(
|
|
423
|
+
`\n ℹ️ Could not configure required status checks automatically.`,
|
|
424
|
+
);
|
|
425
|
+
console.log(
|
|
426
|
+
` To block merges on workflow failure, add '${WORKFLOW_CHECK_NAME}'`,
|
|
427
|
+
);
|
|
428
|
+
console.log(
|
|
429
|
+
` to required status checks in your branch protection settings.`,
|
|
430
|
+
);
|
|
431
|
+
}
|
|
432
|
+
}
|
package/src/setup/install.ts
CHANGED
|
@@ -19,6 +19,7 @@ import {
|
|
|
19
19
|
detectHookManager,
|
|
20
20
|
type HooksConfig,
|
|
21
21
|
} from "./shared.ts";
|
|
22
|
+
import { configureRequiredCheck } from "./branch-protection.ts";
|
|
22
23
|
|
|
23
24
|
const execFileAsync = promisify(execFile);
|
|
24
25
|
|
|
@@ -174,7 +175,10 @@ async function main() {
|
|
|
174
175
|
`✓ Installed .github/workflows/claude-attribution-pr.yml — runner: ${runsOn}${detectedNote}`,
|
|
175
176
|
);
|
|
176
177
|
|
|
177
|
-
// 5.
|
|
178
|
+
// 5. Check branch protection and offer to add required status check
|
|
179
|
+
await configureRequiredCheck(targetRepo);
|
|
180
|
+
|
|
181
|
+
// 6. Record installed version for auto-upgrade tracking
|
|
178
182
|
const pkg = JSON.parse(
|
|
179
183
|
await readFile(join(ATTRIBUTION_ROOT, "package.json"), "utf8"),
|
|
180
184
|
) as { version: string };
|