@mesadev/agentblame 0.2.11 → 3.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agentblame.js +3500 -0
- package/dist/blame.d.ts +4 -1
- package/dist/blame.js +293 -78
- package/dist/blame.js.map +1 -1
- package/dist/capture.d.ts +4 -7
- package/dist/capture.js +464 -486
- package/dist/capture.js.map +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +248 -85
- package/dist/index.js.map +1 -1
- package/dist/lib/analytics.d.ts +179 -0
- package/dist/lib/analytics.js +833 -0
- package/dist/lib/analytics.js.map +1 -0
- package/dist/lib/attribution.d.ts +54 -0
- package/dist/lib/attribution.js +266 -0
- package/dist/lib/attribution.js.map +1 -0
- package/dist/lib/checkpoint.d.ts +97 -0
- package/dist/lib/checkpoint.js +441 -0
- package/dist/lib/checkpoint.js.map +1 -0
- package/dist/lib/config.d.ts +46 -0
- package/dist/lib/config.js +123 -0
- package/dist/lib/config.js.map +1 -0
- package/dist/lib/database.d.ts +115 -85
- package/dist/lib/database.js +305 -325
- package/dist/lib/database.js.map +1 -1
- package/dist/lib/delta.d.ts +78 -0
- package/dist/lib/delta.js +309 -0
- package/dist/lib/delta.js.map +1 -0
- package/dist/lib/git/gitBlame.js +9 -4
- package/dist/lib/git/gitBlame.js.map +1 -1
- package/dist/lib/git/gitConfig.d.ts +5 -3
- package/dist/lib/git/gitConfig.js +41 -6
- package/dist/lib/git/gitConfig.js.map +1 -1
- package/dist/lib/git/gitDiff.d.ts +13 -1
- package/dist/lib/git/gitDiff.js +39 -7
- package/dist/lib/git/gitDiff.js.map +1 -1
- package/dist/lib/git/gitNotes.d.ts +30 -4
- package/dist/lib/git/gitNotes.js +140 -24
- package/dist/lib/git/gitNotes.js.map +1 -1
- package/dist/lib/hooks.d.ts +1 -0
- package/dist/lib/hooks.js +148 -27
- package/dist/lib/hooks.js.map +1 -1
- package/dist/lib/index.d.ts +7 -0
- package/dist/lib/index.js +13 -0
- package/dist/lib/index.js.map +1 -1
- package/dist/lib/storage.d.ts +163 -0
- package/dist/lib/storage.js +823 -0
- package/dist/lib/storage.js.map +1 -0
- package/dist/lib/trace.d.ts +118 -0
- package/dist/lib/trace.js +499 -0
- package/dist/lib/trace.js.map +1 -0
- package/dist/lib/types.d.ts +322 -114
- package/dist/lib/types.js +2 -1
- package/dist/lib/types.js.map +1 -1
- package/dist/lib/util.d.ts +8 -8
- package/dist/lib/util.js +18 -22
- package/dist/lib/util.js.map +1 -1
- package/dist/lib/watcher.d.ts +104 -0
- package/dist/lib/watcher.js +398 -0
- package/dist/lib/watcher.js.map +1 -0
- package/dist/post-merge.js +460 -421
- package/dist/post-merge.js.map +1 -1
- package/dist/process.d.ts +6 -5
- package/dist/process.js +233 -152
- package/dist/process.js.map +1 -1
- package/dist/sync.js +172 -131
- package/dist/sync.js.map +1 -1
- package/package.json +3 -2
package/dist/post-merge.js
CHANGED
|
@@ -16,6 +16,7 @@
|
|
|
16
16
|
*/
|
|
17
17
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
18
18
|
const node_child_process_1 = require("node:child_process");
|
|
19
|
+
const gitNotes_1 = require("./lib/git/gitNotes");
|
|
19
20
|
// Get environment variables
|
|
20
21
|
const PR_NUMBER = process.env.PR_NUMBER || "";
|
|
21
22
|
const PR_TITLE = process.env.PR_TITLE || "";
|
|
@@ -23,9 +24,9 @@ const BASE_SHA = process.env.BASE_SHA || "";
|
|
|
23
24
|
const HEAD_SHA = process.env.HEAD_SHA || "";
|
|
24
25
|
const MERGE_SHA = process.env.MERGE_SHA || "";
|
|
25
26
|
const PR_AUTHOR = process.env.PR_AUTHOR || "unknown";
|
|
26
|
-
//
|
|
27
|
+
// Notes refs
|
|
28
|
+
const NOTES_REF = "refs/notes/agentblame";
|
|
27
29
|
const ANALYTICS_REF = "refs/notes/agentblame-analytics";
|
|
28
|
-
// We store analytics on the repo's first commit (root)
|
|
29
30
|
const ANALYTICS_ANCHOR = "agentblame-analytics-anchor";
|
|
30
31
|
function run(cmd) {
|
|
31
32
|
try {
|
|
@@ -42,7 +43,6 @@ function log(msg) {
|
|
|
42
43
|
* Detect what type of merge was performed
|
|
43
44
|
*/
|
|
44
45
|
function detectMergeType() {
|
|
45
|
-
// Get the merge commit
|
|
46
46
|
const mergeCommit = MERGE_SHA;
|
|
47
47
|
if (!mergeCommit) {
|
|
48
48
|
log("No merge commit SHA, assuming rebase");
|
|
@@ -50,14 +50,12 @@ function detectMergeType() {
|
|
|
50
50
|
}
|
|
51
51
|
// Check number of parents
|
|
52
52
|
const parents = run(`git rev-list --parents -n 1 ${mergeCommit}`).split(" ");
|
|
53
|
-
const parentCount = parents.length - 1;
|
|
53
|
+
const parentCount = parents.length - 1;
|
|
54
54
|
if (parentCount > 1) {
|
|
55
|
-
// Multiple parents = merge commit
|
|
56
55
|
log("Detected: Merge commit (multiple parents)");
|
|
57
56
|
return "merge_commit";
|
|
58
57
|
}
|
|
59
58
|
// Single parent - could be squash or rebase
|
|
60
|
-
// Check if commit message contains PR number (squash pattern)
|
|
61
59
|
const commitMsg = run(`git log -1 --format=%s ${mergeCommit}`);
|
|
62
60
|
if (commitMsg.includes(`#${PR_NUMBER}`) || commitMsg.includes(PR_TITLE)) {
|
|
63
61
|
log("Detected: Squash merge (single commit with PR reference)");
|
|
@@ -67,37 +65,30 @@ function detectMergeType() {
|
|
|
67
65
|
return "rebase";
|
|
68
66
|
}
|
|
69
67
|
/**
|
|
70
|
-
* Get all commits that were in the PR
|
|
68
|
+
* Get all commits that were in the PR
|
|
71
69
|
*/
|
|
72
70
|
function getPRCommits() {
|
|
73
|
-
// Get commits that were in the feature branch but not in base
|
|
74
71
|
const output = run(`git rev-list ${BASE_SHA}..${HEAD_SHA}`);
|
|
75
72
|
if (!output)
|
|
76
73
|
return [];
|
|
77
74
|
return output.split("\n").filter(Boolean);
|
|
78
75
|
}
|
|
79
76
|
/**
|
|
80
|
-
* Read
|
|
77
|
+
* Read attribution note from a commit (v3 format)
|
|
81
78
|
*/
|
|
82
79
|
function readNote(sha) {
|
|
83
|
-
const note = run(`git notes --ref
|
|
80
|
+
const note = run(`git notes --ref=${NOTES_REF} show ${sha} 2>/dev/null`);
|
|
84
81
|
if (!note)
|
|
85
82
|
return null;
|
|
86
|
-
|
|
87
|
-
return JSON.parse(note);
|
|
88
|
-
}
|
|
89
|
-
catch {
|
|
90
|
-
return null;
|
|
91
|
-
}
|
|
83
|
+
return (0, gitNotes_1.parseNote)(note);
|
|
92
84
|
}
|
|
93
85
|
/**
|
|
94
|
-
* Write
|
|
86
|
+
* Write attribution note to a commit (v3 format)
|
|
95
87
|
*/
|
|
96
|
-
function writeNote(sha, attribution) {
|
|
97
|
-
const
|
|
88
|
+
function writeNote(sha, attribution, sessions) {
|
|
89
|
+
const noteContent = (0, gitNotes_1.buildNote)(attribution, sessions);
|
|
98
90
|
try {
|
|
99
|
-
|
|
100
|
-
const result = (0, node_child_process_1.spawnSync)("git", ["notes", "--ref=refs/notes/agentblame", "add", "-f", "-m", noteJson, sha], { encoding: "utf8" });
|
|
91
|
+
const result = (0, node_child_process_1.spawnSync)("git", ["notes", `--ref=${NOTES_REF}`, "add", "-f", "-m", noteContent, sha], { encoding: "utf8" });
|
|
101
92
|
if (result.status !== 0) {
|
|
102
93
|
log(`Failed to write note to ${sha}: ${result.stderr}`);
|
|
103
94
|
return false;
|
|
@@ -110,66 +101,7 @@ function writeNote(sha, attribution) {
|
|
|
110
101
|
}
|
|
111
102
|
}
|
|
112
103
|
/**
|
|
113
|
-
*
|
|
114
|
-
*
|
|
115
|
-
* The contentHash in attributions is the hash of the FIRST line in the range.
|
|
116
|
-
* We need to find that line in the commit's diff to extract the full content.
|
|
117
|
-
*/
|
|
118
|
-
function collectPRAttributions(prCommits) {
|
|
119
|
-
const byHash = new Map();
|
|
120
|
-
const withContent = [];
|
|
121
|
-
for (const sha of prCommits) {
|
|
122
|
-
const note = readNote(sha);
|
|
123
|
-
if (!note?.attributions)
|
|
124
|
-
continue;
|
|
125
|
-
// Get the commit's diff with per-line hashes
|
|
126
|
-
const hunks = getCommitHunks(sha);
|
|
127
|
-
// Build a map from per-line contentHash to line data
|
|
128
|
-
// Also build a map from path+lineNumber to content for range extraction
|
|
129
|
-
const linesByHash = new Map();
|
|
130
|
-
const linesByLocation = new Map();
|
|
131
|
-
for (const hunk of hunks) {
|
|
132
|
-
for (const line of hunk.lines) {
|
|
133
|
-
linesByHash.set(line.contentHash, {
|
|
134
|
-
path: hunk.path,
|
|
135
|
-
lineNumber: line.lineNumber,
|
|
136
|
-
content: line.content,
|
|
137
|
-
});
|
|
138
|
-
linesByLocation.set(`${hunk.path}:${line.lineNumber}`, line.content);
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
for (const attr of note.attributions) {
|
|
142
|
-
const hash = attr.contentHash;
|
|
143
|
-
if (!byHash.has(hash)) {
|
|
144
|
-
byHash.set(hash, []);
|
|
145
|
-
}
|
|
146
|
-
byHash.get(hash)?.push(attr);
|
|
147
|
-
// Extract the full content for this attribution range
|
|
148
|
-
// The contentHash is for the first line; we need to get all lines in the range
|
|
149
|
-
const rangeLines = [];
|
|
150
|
-
for (let lineNum = attr.startLine; lineNum <= attr.endLine; lineNum++) {
|
|
151
|
-
const lineContent = linesByLocation.get(`${attr.path}:${lineNum}`);
|
|
152
|
-
if (lineContent !== undefined) {
|
|
153
|
-
rangeLines.push(lineContent);
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
if (rangeLines.length > 0) {
|
|
157
|
-
withContent.push({ ...attr, originalContent: rangeLines.join("\n") });
|
|
158
|
-
}
|
|
159
|
-
else {
|
|
160
|
-
// Fallback: try to find by hash (first line)
|
|
161
|
-
const lineData = linesByHash.get(hash);
|
|
162
|
-
if (lineData) {
|
|
163
|
-
withContent.push({ ...attr, originalContent: lineData.content });
|
|
164
|
-
}
|
|
165
|
-
}
|
|
166
|
-
}
|
|
167
|
-
}
|
|
168
|
-
return { byHash, withContent };
|
|
169
|
-
}
|
|
170
|
-
/**
|
|
171
|
-
* Get the diff of a commit and extract content with per-line hashes
|
|
172
|
-
* This matches the behavior of lib/git/gitDiff.ts parseDiff()
|
|
104
|
+
* Get diff hunks from a commit
|
|
173
105
|
*/
|
|
174
106
|
function getCommitHunks(sha) {
|
|
175
107
|
const diff = run(`git diff-tree -p ${sha}`);
|
|
@@ -181,17 +113,12 @@ function getCommitHunks(sha) {
|
|
|
181
113
|
let hunkLines = [];
|
|
182
114
|
let startLine = 0;
|
|
183
115
|
for (const line of diff.split("\n")) {
|
|
184
|
-
// New file header
|
|
185
116
|
if (line.startsWith("+++ b/")) {
|
|
186
|
-
// Save previous hunk
|
|
187
117
|
if (hunkLines.length > 0 && currentFile) {
|
|
188
|
-
const content = hunkLines.map((l) => l.content).join("\n");
|
|
189
118
|
hunks.push({
|
|
190
119
|
path: currentFile,
|
|
191
120
|
startLine,
|
|
192
121
|
endLine: startLine + hunkLines.length - 1,
|
|
193
|
-
content,
|
|
194
|
-
contentHash: computeHash(content),
|
|
195
122
|
lines: hunkLines,
|
|
196
123
|
});
|
|
197
124
|
hunkLines = [];
|
|
@@ -199,22 +126,16 @@ function getCommitHunks(sha) {
|
|
|
199
126
|
currentFile = line.slice(6);
|
|
200
127
|
continue;
|
|
201
128
|
}
|
|
202
|
-
// Hunk header
|
|
203
129
|
if (line.startsWith("@@")) {
|
|
204
|
-
// Save previous hunk
|
|
205
130
|
if (hunkLines.length > 0 && currentFile) {
|
|
206
|
-
const content = hunkLines.map((l) => l.content).join("\n");
|
|
207
131
|
hunks.push({
|
|
208
132
|
path: currentFile,
|
|
209
133
|
startLine,
|
|
210
134
|
endLine: startLine + hunkLines.length - 1,
|
|
211
|
-
content,
|
|
212
|
-
contentHash: computeHash(content),
|
|
213
135
|
lines: hunkLines,
|
|
214
136
|
});
|
|
215
137
|
hunkLines = [];
|
|
216
138
|
}
|
|
217
|
-
// Parse line number: @@ -old,count +new,count @@
|
|
218
139
|
const match = line.match(/@@ -\d+(?:,\d+)? \+(\d+)/);
|
|
219
140
|
if (match) {
|
|
220
141
|
lineNumber = parseInt(match[1], 10);
|
|
@@ -222,31 +143,23 @@ function getCommitHunks(sha) {
|
|
|
222
143
|
}
|
|
223
144
|
continue;
|
|
224
145
|
}
|
|
225
|
-
// Added line
|
|
226
146
|
if (line.startsWith("+") && !line.startsWith("+++")) {
|
|
227
147
|
if (hunkLines.length === 0) {
|
|
228
148
|
startLine = lineNumber;
|
|
229
149
|
}
|
|
230
|
-
const content = line.slice(1);
|
|
231
150
|
hunkLines.push({
|
|
232
151
|
lineNumber,
|
|
233
|
-
content,
|
|
234
|
-
contentHash: computeHash(content),
|
|
152
|
+
content: line.slice(1),
|
|
235
153
|
});
|
|
236
154
|
lineNumber++;
|
|
237
155
|
continue;
|
|
238
156
|
}
|
|
239
|
-
// Context or removed line
|
|
240
157
|
if (!line.startsWith("-")) {
|
|
241
|
-
// Save previous hunk if we hit a non-added line
|
|
242
158
|
if (hunkLines.length > 0 && currentFile) {
|
|
243
|
-
const content = hunkLines.map((l) => l.content).join("\n");
|
|
244
159
|
hunks.push({
|
|
245
160
|
path: currentFile,
|
|
246
161
|
startLine,
|
|
247
162
|
endLine: startLine + hunkLines.length - 1,
|
|
248
|
-
content,
|
|
249
|
-
contentHash: computeHash(content),
|
|
250
163
|
lines: hunkLines,
|
|
251
164
|
});
|
|
252
165
|
hunkLines = [];
|
|
@@ -254,182 +167,184 @@ function getCommitHunks(sha) {
|
|
|
254
167
|
lineNumber++;
|
|
255
168
|
}
|
|
256
169
|
}
|
|
257
|
-
// Save last hunk
|
|
258
170
|
if (hunkLines.length > 0 && currentFile) {
|
|
259
|
-
const content = hunkLines.map((l) => l.content).join("\n");
|
|
260
171
|
hunks.push({
|
|
261
172
|
path: currentFile,
|
|
262
173
|
startLine,
|
|
263
174
|
endLine: startLine + hunkLines.length - 1,
|
|
264
|
-
content,
|
|
265
|
-
contentHash: computeHash(content),
|
|
266
175
|
lines: hunkLines,
|
|
267
176
|
});
|
|
268
177
|
}
|
|
269
178
|
return hunks;
|
|
270
179
|
}
|
|
271
180
|
/**
|
|
272
|
-
*
|
|
181
|
+
* Collect attributions from PR commits
|
|
273
182
|
*/
|
|
274
|
-
function
|
|
275
|
-
const
|
|
276
|
-
|
|
183
|
+
function collectPRAttributions(prCommits) {
|
|
184
|
+
const allSessions = {};
|
|
185
|
+
const fileRanges = new Map();
|
|
186
|
+
for (const sha of prCommits) {
|
|
187
|
+
const note = readNote(sha);
|
|
188
|
+
if (!note)
|
|
189
|
+
continue;
|
|
190
|
+
// Collect sessions
|
|
191
|
+
for (const [sessionId, session] of Object.entries(note.sessions)) {
|
|
192
|
+
if (!allSessions[sessionId]) {
|
|
193
|
+
allSessions[sessionId] = session;
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
// Get content from the commit diff
|
|
197
|
+
const hunks = getCommitHunks(sha);
|
|
198
|
+
const hunksByPath = new Map();
|
|
199
|
+
for (const hunk of hunks) {
|
|
200
|
+
if (!hunksByPath.has(hunk.path)) {
|
|
201
|
+
hunksByPath.set(hunk.path, []);
|
|
202
|
+
}
|
|
203
|
+
hunksByPath.get(hunk.path).push(hunk);
|
|
204
|
+
}
|
|
205
|
+
// Collect file ranges with content
|
|
206
|
+
for (const [filePath, fileAttr] of Object.entries(note.files)) {
|
|
207
|
+
if (!fileRanges.has(filePath)) {
|
|
208
|
+
fileRanges.set(filePath, new Map());
|
|
209
|
+
}
|
|
210
|
+
const sessionMap = fileRanges.get(filePath);
|
|
211
|
+
for (const range of fileAttr.aiRanges) {
|
|
212
|
+
if (!sessionMap.has(range.sessionId)) {
|
|
213
|
+
sessionMap.set(range.sessionId, []);
|
|
214
|
+
}
|
|
215
|
+
// Find content for this range from hunks
|
|
216
|
+
const fileHunks = hunksByPath.get(filePath) || [];
|
|
217
|
+
const content = [];
|
|
218
|
+
for (const hunk of fileHunks) {
|
|
219
|
+
for (const line of hunk.lines) {
|
|
220
|
+
if (line.lineNumber >= range.startLine && line.lineNumber <= range.endLine) {
|
|
221
|
+
content.push(line.content);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
sessionMap.get(range.sessionId).push({
|
|
226
|
+
startLine: range.startLine,
|
|
227
|
+
endLine: range.endLine,
|
|
228
|
+
content,
|
|
229
|
+
});
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
return { sessions: allSessions, fileRanges };
|
|
277
234
|
}
|
|
278
235
|
/**
|
|
279
|
-
* Find
|
|
280
|
-
* Returns attributions with calculated precise line numbers
|
|
236
|
+
* Find where content appears in a merge commit
|
|
281
237
|
*/
|
|
282
|
-
function
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
if (offset > 0) {
|
|
302
|
-
const contentBeforeAI = hunkContent.slice(0, offset);
|
|
303
|
-
const linesBeforeAI = contentBeforeAI.split("\n").length - 1;
|
|
304
|
-
startLine = hunk.startLine + linesBeforeAI;
|
|
305
|
-
}
|
|
306
|
-
const aiLineCount = aiContent.split("\n").length;
|
|
307
|
-
const endLine = startLine + aiLineCount - 1;
|
|
308
|
-
// Create clean attribution without originalContent
|
|
309
|
-
const { originalContent: _, ...cleanAttr } = attr;
|
|
310
|
-
results.push({
|
|
311
|
-
...cleanAttr,
|
|
312
|
-
path: hunk.path,
|
|
313
|
-
startLine: startLine,
|
|
314
|
-
endLine: endLine,
|
|
315
|
-
});
|
|
316
|
-
log(` Contained match: ${hunk.path}:${startLine}-${endLine} (${attr.provider})`);
|
|
238
|
+
function findContentMatch(lineMap, content) {
|
|
239
|
+
if (content.length === 0)
|
|
240
|
+
return null;
|
|
241
|
+
const firstLine = content[0];
|
|
242
|
+
const lineNumbers = Array.from(lineMap.entries())
|
|
243
|
+
.filter(([_, c]) => c === firstLine)
|
|
244
|
+
.map(([n]) => n);
|
|
245
|
+
for (const startLine of lineNumbers) {
|
|
246
|
+
let matches = true;
|
|
247
|
+
for (let i = 0; i < content.length; i++) {
|
|
248
|
+
const lineContent = lineMap.get(startLine + i);
|
|
249
|
+
if (lineContent !== content[i]) {
|
|
250
|
+
matches = false;
|
|
251
|
+
break;
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
if (matches) {
|
|
255
|
+
return { start: startLine, end: startLine + content.length - 1 };
|
|
256
|
+
}
|
|
317
257
|
}
|
|
318
|
-
return
|
|
258
|
+
return null;
|
|
319
259
|
}
|
|
320
260
|
/**
|
|
321
|
-
*
|
|
261
|
+
* Match content and build attribution for a merge commit
|
|
322
262
|
*/
|
|
323
|
-
function
|
|
324
|
-
|
|
325
|
-
//
|
|
326
|
-
const
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
// Get hunks from the squash commit (with per-line hashes)
|
|
333
|
-
const hunks = getCommitHunks(MERGE_SHA);
|
|
334
|
-
log(`Squash commit has ${hunks.length} hunks`);
|
|
335
|
-
// Build a map of per-line hashes in the squash commit
|
|
336
|
-
const squashLinesByHash = new Map();
|
|
337
|
-
for (const hunk of hunks) {
|
|
263
|
+
function buildMergeAttribution(mergeSha, prData) {
|
|
264
|
+
const mergeHunks = getCommitHunks(mergeSha);
|
|
265
|
+
// Build content index from merge commit
|
|
266
|
+
const mergeContentIndex = new Map();
|
|
267
|
+
for (const hunk of mergeHunks) {
|
|
268
|
+
if (!mergeContentIndex.has(hunk.path)) {
|
|
269
|
+
mergeContentIndex.set(hunk.path, new Map());
|
|
270
|
+
}
|
|
271
|
+
const lineMap = mergeContentIndex.get(hunk.path);
|
|
338
272
|
for (const line of hunk.lines) {
|
|
339
|
-
|
|
340
|
-
path: hunk.path,
|
|
341
|
-
lineNumber: line.lineNumber,
|
|
342
|
-
content: line.content,
|
|
343
|
-
});
|
|
273
|
+
lineMap.set(line.lineNumber, line.content);
|
|
344
274
|
}
|
|
345
275
|
}
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
});
|
|
362
|
-
matchedContentHashes.add(hash);
|
|
363
|
-
log(` Line hash match: ${squashLine.path}:${squashLine.lineNumber} (${attr.provider})`);
|
|
276
|
+
const attribution = {
|
|
277
|
+
version: 3,
|
|
278
|
+
timestamp: new Date().toISOString(),
|
|
279
|
+
sessions: prData.sessions,
|
|
280
|
+
files: {},
|
|
281
|
+
};
|
|
282
|
+
let matchCount = 0;
|
|
283
|
+
for (const [filePath, sessionRanges] of prData.fileRanges) {
|
|
284
|
+
// Try to find matching path
|
|
285
|
+
let targetPath = filePath;
|
|
286
|
+
if (!mergeContentIndex.has(filePath)) {
|
|
287
|
+
const matchingPath = Array.from(mergeContentIndex.keys()).find((p) => p.endsWith(filePath) || filePath.endsWith(p));
|
|
288
|
+
if (!matchingPath)
|
|
289
|
+
continue;
|
|
290
|
+
targetPath = matchingPath;
|
|
364
291
|
}
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
for (const
|
|
373
|
-
|
|
374
|
-
|
|
292
|
+
const targetContent = mergeContentIndex.get(targetPath);
|
|
293
|
+
if (!attribution.files[targetPath]) {
|
|
294
|
+
attribution.files[targetPath] = {
|
|
295
|
+
aiRanges: [],
|
|
296
|
+
humanRanges: [],
|
|
297
|
+
};
|
|
298
|
+
}
|
|
299
|
+
for (const [sessionId, ranges] of sessionRanges) {
|
|
300
|
+
for (const range of ranges) {
|
|
301
|
+
if (range.content.length === 0)
|
|
302
|
+
continue;
|
|
303
|
+
const matchedLines = findContentMatch(targetContent, range.content);
|
|
304
|
+
if (matchedLines) {
|
|
305
|
+
attribution.files[targetPath].aiRanges.push({
|
|
306
|
+
sessionId,
|
|
307
|
+
startLine: matchedLines.start,
|
|
308
|
+
endLine: matchedLines.end,
|
|
309
|
+
});
|
|
310
|
+
matchCount++;
|
|
311
|
+
log(` Matched: ${targetPath}:${matchedLines.start}-${matchedLines.end}`);
|
|
312
|
+
}
|
|
313
|
+
}
|
|
375
314
|
}
|
|
376
315
|
}
|
|
377
|
-
if (
|
|
378
|
-
|
|
379
|
-
return;
|
|
380
|
-
}
|
|
381
|
-
// Merge consecutive attributions with same provider
|
|
382
|
-
const mergedAttributions = mergeConsecutiveAttributions(newAttributions);
|
|
383
|
-
// Write note to squash commit
|
|
384
|
-
const note = {
|
|
385
|
-
version: 2,
|
|
386
|
-
timestamp: new Date().toISOString(),
|
|
387
|
-
attributions: mergedAttributions,
|
|
388
|
-
};
|
|
389
|
-
if (writeNote(MERGE_SHA, note)) {
|
|
390
|
-
log(`✓ Attached ${mergedAttributions.length} attribution(s) to squash commit`);
|
|
316
|
+
if (matchCount === 0) {
|
|
317
|
+
return null;
|
|
391
318
|
}
|
|
319
|
+
return { attribution, matchCount };
|
|
392
320
|
}
|
|
393
321
|
/**
|
|
394
|
-
*
|
|
322
|
+
* Handle squash merge
|
|
395
323
|
*/
|
|
396
|
-
function
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
current.endLine >= next.startLine - 1 &&
|
|
412
|
-
current.provider === next.provider) {
|
|
413
|
-
// Merge: extend the range
|
|
414
|
-
current.endLine = Math.max(current.endLine, next.endLine);
|
|
415
|
-
current.confidence = Math.min(current.confidence, next.confidence);
|
|
416
|
-
}
|
|
417
|
-
else {
|
|
418
|
-
merged.push(current);
|
|
419
|
-
current = { ...next };
|
|
420
|
-
}
|
|
324
|
+
function handleSquashMerge(prCommits) {
|
|
325
|
+
log(`Transferring notes from ${prCommits.length} PR commits to squash commit ${MERGE_SHA}`);
|
|
326
|
+
const prData = collectPRAttributions(prCommits);
|
|
327
|
+
if (Object.keys(prData.sessions).length === 0) {
|
|
328
|
+
log("No attributions found in PR commits");
|
|
329
|
+
return;
|
|
330
|
+
}
|
|
331
|
+
log(`Found ${Object.keys(prData.sessions).length} sessions`);
|
|
332
|
+
const result = buildMergeAttribution(MERGE_SHA, prData);
|
|
333
|
+
if (!result) {
|
|
334
|
+
log("No attributions matched to squash commit");
|
|
335
|
+
return;
|
|
336
|
+
}
|
|
337
|
+
if (writeNote(MERGE_SHA, result.attribution, prData.sessions)) {
|
|
338
|
+
log(`✓ Attached ${result.matchCount} attribution range(s) to squash commit`);
|
|
421
339
|
}
|
|
422
|
-
merged.push(current);
|
|
423
|
-
return merged;
|
|
424
340
|
}
|
|
425
341
|
/**
|
|
426
|
-
*
|
|
342
|
+
* Handle rebase merge
|
|
427
343
|
*/
|
|
428
344
|
function handleRebaseMerge(prCommits) {
|
|
429
345
|
log(`Handling rebase merge: ${prCommits.length} original commits`);
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
if (byHash.size === 0) {
|
|
346
|
+
const prData = collectPRAttributions(prCommits);
|
|
347
|
+
if (Object.keys(prData.sessions).length === 0) {
|
|
433
348
|
log("No attributions found in PR commits");
|
|
434
349
|
return;
|
|
435
350
|
}
|
|
@@ -440,86 +355,41 @@ function handleRebaseMerge(prCommits) {
|
|
|
440
355
|
log(`Found ${newCommits.length} new commits after rebase`);
|
|
441
356
|
let totalTransferred = 0;
|
|
442
357
|
for (const newSha of newCommits) {
|
|
443
|
-
const
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
for (const hunk of hunks) {
|
|
449
|
-
for (const line of hunk.lines) {
|
|
450
|
-
linesByHash.set(line.contentHash, {
|
|
451
|
-
path: hunk.path,
|
|
452
|
-
lineNumber: line.lineNumber,
|
|
453
|
-
});
|
|
454
|
-
}
|
|
455
|
-
}
|
|
456
|
-
// First pass: exact line hash matches
|
|
457
|
-
for (const [hash, attrs] of byHash) {
|
|
458
|
-
const lineInfo = linesByHash.get(hash);
|
|
459
|
-
if (lineInfo && attrs.length > 0) {
|
|
460
|
-
const attr = attrs[0];
|
|
461
|
-
newAttributions.push({
|
|
462
|
-
...attr,
|
|
463
|
-
path: lineInfo.path,
|
|
464
|
-
startLine: lineInfo.lineNumber,
|
|
465
|
-
endLine: lineInfo.lineNumber,
|
|
466
|
-
});
|
|
467
|
-
matchedContentHashes.add(hash);
|
|
468
|
-
}
|
|
469
|
-
}
|
|
470
|
-
// Second pass: containment matching
|
|
471
|
-
for (const hunk of hunks) {
|
|
472
|
-
const unmatchedAttrs = withContent.filter((a) => !matchedContentHashes.has(a.contentHash));
|
|
473
|
-
if (unmatchedAttrs.length === 0)
|
|
474
|
-
continue;
|
|
475
|
-
const containedMatches = findContainedAttributions(hunk, unmatchedAttrs);
|
|
476
|
-
for (const match of containedMatches) {
|
|
477
|
-
newAttributions.push(match);
|
|
478
|
-
matchedContentHashes.add(match.contentHash);
|
|
479
|
-
}
|
|
480
|
-
}
|
|
481
|
-
if (newAttributions.length > 0) {
|
|
482
|
-
// Merge consecutive attributions
|
|
483
|
-
const merged = mergeConsecutiveAttributions(newAttributions);
|
|
484
|
-
const note = {
|
|
485
|
-
version: 2,
|
|
486
|
-
timestamp: new Date().toISOString(),
|
|
487
|
-
attributions: merged,
|
|
488
|
-
};
|
|
489
|
-
if (writeNote(newSha, note)) {
|
|
490
|
-
log(` ✓ ${newSha.slice(0, 7)}: ${merged.length} attribution(s)`);
|
|
491
|
-
totalTransferred += merged.length;
|
|
358
|
+
const result = buildMergeAttribution(newSha, prData);
|
|
359
|
+
if (result && result.matchCount > 0) {
|
|
360
|
+
if (writeNote(newSha, result.attribution, prData.sessions)) {
|
|
361
|
+
log(` ✓ ${newSha.slice(0, 7)}: ${result.matchCount} range(s)`);
|
|
362
|
+
totalTransferred += result.matchCount;
|
|
492
363
|
}
|
|
493
364
|
}
|
|
494
365
|
}
|
|
495
|
-
log(`✓ Transferred ${totalTransferred}
|
|
366
|
+
log(`✓ Transferred ${totalTransferred} range(s) across ${newCommits.length} commits`);
|
|
496
367
|
}
|
|
497
368
|
// =============================================================================
|
|
498
|
-
// Analytics
|
|
369
|
+
// Analytics
|
|
499
370
|
// =============================================================================
|
|
500
371
|
/**
|
|
501
|
-
* Get the root commit SHA
|
|
372
|
+
* Get the root commit SHA
|
|
502
373
|
*/
|
|
503
374
|
function getRootCommit() {
|
|
504
375
|
return run("git rev-list --max-parents=0 HEAD").split("\n")[0] || "";
|
|
505
376
|
}
|
|
506
377
|
/**
|
|
507
378
|
* Get or create the analytics anchor tag
|
|
508
|
-
* Returns the SHA the tag points to (root commit)
|
|
509
379
|
*/
|
|
510
380
|
function getOrCreateAnalyticsAnchor() {
|
|
511
|
-
// Check if tag exists
|
|
512
381
|
const existingTag = run(`git rev-parse ${ANALYTICS_ANCHOR} 2>/dev/null`);
|
|
513
382
|
if (existingTag) {
|
|
514
383
|
return existingTag;
|
|
515
384
|
}
|
|
516
|
-
// Create tag on root commit
|
|
517
385
|
const rootSha = getRootCommit();
|
|
518
386
|
if (!rootSha) {
|
|
519
387
|
log("Warning: Could not find root commit for analytics anchor");
|
|
520
388
|
return "";
|
|
521
389
|
}
|
|
522
|
-
const result = (0, node_child_process_1.spawnSync)("git", ["tag", ANALYTICS_ANCHOR, rootSha], {
|
|
390
|
+
const result = (0, node_child_process_1.spawnSync)("git", ["tag", ANALYTICS_ANCHOR, rootSha], {
|
|
391
|
+
encoding: "utf8",
|
|
392
|
+
});
|
|
523
393
|
if (result.status !== 0) {
|
|
524
394
|
log(`Warning: Could not create analytics anchor tag: ${result.stderr}`);
|
|
525
395
|
return "";
|
|
@@ -538,11 +408,7 @@ function readAnalyticsNote() {
|
|
|
538
408
|
if (!note)
|
|
539
409
|
return null;
|
|
540
410
|
try {
|
|
541
|
-
|
|
542
|
-
if (parsed.version === 2) {
|
|
543
|
-
return parsed;
|
|
544
|
-
}
|
|
545
|
-
return null;
|
|
411
|
+
return JSON.parse(note);
|
|
546
412
|
}
|
|
547
413
|
catch {
|
|
548
414
|
return null;
|
|
@@ -564,8 +430,7 @@ function writeAnalyticsNote(analytics) {
|
|
|
564
430
|
return true;
|
|
565
431
|
}
|
|
566
432
|
/**
|
|
567
|
-
* Get PR diff stats
|
|
568
|
-
* Only counts non-empty lines to match how attributions are counted
|
|
433
|
+
* Get PR diff stats
|
|
569
434
|
*/
|
|
570
435
|
function getPRDiffStats() {
|
|
571
436
|
const diff = run(`git diff ${BASE_SHA}..${MERGE_SHA || "HEAD"}`);
|
|
@@ -574,47 +439,59 @@ function getPRDiffStats() {
|
|
|
574
439
|
let additions = 0;
|
|
575
440
|
let deletions = 0;
|
|
576
441
|
for (const line of diff.split("\n")) {
|
|
577
|
-
// Added line (but not diff header)
|
|
578
442
|
if (line.startsWith("+") && !line.startsWith("+++")) {
|
|
579
443
|
const content = line.slice(1).trim();
|
|
580
|
-
if (content !== "")
|
|
444
|
+
if (content !== "")
|
|
581
445
|
additions++;
|
|
582
|
-
}
|
|
583
446
|
}
|
|
584
|
-
// Deleted line (but not diff header)
|
|
585
447
|
else if (line.startsWith("-") && !line.startsWith("---")) {
|
|
586
448
|
const content = line.slice(1).trim();
|
|
587
|
-
if (content !== "")
|
|
449
|
+
if (content !== "")
|
|
588
450
|
deletions++;
|
|
589
|
-
}
|
|
590
451
|
}
|
|
591
452
|
}
|
|
592
453
|
return { additions, deletions };
|
|
593
454
|
}
|
|
594
455
|
/**
|
|
595
|
-
*
|
|
456
|
+
* Compute stats from collected attributions
|
|
596
457
|
*/
|
|
597
|
-
function
|
|
458
|
+
function computePRStats(prData) {
|
|
598
459
|
let aiLines = 0;
|
|
599
|
-
|
|
460
|
+
let prompts = 0;
|
|
461
|
+
const byAgent = {};
|
|
600
462
|
const byModel = {};
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
463
|
+
const countedSessions = new Set();
|
|
464
|
+
for (const [_, sessionRanges] of prData.fileRanges) {
|
|
465
|
+
for (const [sessionId, ranges] of sessionRanges) {
|
|
466
|
+
const session = prData.sessions[sessionId];
|
|
467
|
+
if (!session)
|
|
468
|
+
continue;
|
|
469
|
+
for (const range of ranges) {
|
|
470
|
+
const lineCount = range.endLine - range.startLine + 1;
|
|
471
|
+
aiLines += lineCount;
|
|
472
|
+
// Agent breakdown
|
|
473
|
+
const agent = session.agent;
|
|
474
|
+
if (agent === "cursor" || agent === "claude" || agent === "opencode") {
|
|
475
|
+
byAgent[agent] = (byAgent[agent] || 0) + lineCount;
|
|
476
|
+
}
|
|
477
|
+
// Model breakdown
|
|
478
|
+
if (session.model) {
|
|
479
|
+
byModel[session.model] = (byModel[session.model] || 0) + lineCount;
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
// Count prompts from session metadata (only once per session)
|
|
483
|
+
if (!countedSessions.has(sessionId) && session.prompts) {
|
|
484
|
+
prompts += session.prompts.length;
|
|
485
|
+
countedSessions.add(sessionId);
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
}
|
|
489
|
+
return { aiLines, prompts, byAgent, byModel };
|
|
613
490
|
}
|
|
614
491
|
/**
|
|
615
|
-
* Merge
|
|
492
|
+
* Merge agent breakdowns
|
|
616
493
|
*/
|
|
617
|
-
function
|
|
494
|
+
function mergeAgents(a, b) {
|
|
618
495
|
const result = { ...a };
|
|
619
496
|
for (const [key, value] of Object.entries(b)) {
|
|
620
497
|
const k = key;
|
|
@@ -633,145 +510,308 @@ function mergeModels(a, b) {
|
|
|
633
510
|
return result;
|
|
634
511
|
}
|
|
635
512
|
/**
|
|
636
|
-
* Update analytics with
|
|
513
|
+
* Update analytics with PR data
|
|
637
514
|
*/
|
|
638
|
-
function updateAnalytics(existing,
|
|
639
|
-
const prStats =
|
|
515
|
+
function updateAnalytics(existing, prData) {
|
|
516
|
+
const prStats = computePRStats(prData);
|
|
640
517
|
const diffStats = getPRDiffStats();
|
|
641
518
|
const now = new Date().toISOString();
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
519
|
+
// Determine if this PR was tracked (has session data)
|
|
520
|
+
const isTracked = Object.keys(prData.sessions).length > 0;
|
|
521
|
+
// If tracked: we know AI vs Human
|
|
522
|
+
// If untracked: all lines are unknown
|
|
523
|
+
const aiLines = prStats.aiLines;
|
|
524
|
+
const humanLines = isTracked ? diffStats.additions - prStats.aiLines : 0;
|
|
525
|
+
const unknownLines = isTracked ? 0 : diffStats.additions;
|
|
526
|
+
// Create PR entry
|
|
527
|
+
const prEntry = {
|
|
528
|
+
number: parseInt(PR_NUMBER, 10) || 0,
|
|
529
|
+
title: PR_TITLE.slice(0, 100),
|
|
648
530
|
author: PR_AUTHOR,
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
531
|
+
aiLines,
|
|
532
|
+
humanLines,
|
|
533
|
+
unknownLines,
|
|
534
|
+
prompts: prStats.prompts,
|
|
535
|
+
mergedAt: now,
|
|
654
536
|
};
|
|
655
537
|
if (existing) {
|
|
656
|
-
// Update existing
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
totalLines: 0,
|
|
538
|
+
// Update existing
|
|
539
|
+
existing.summary.aiLines += aiLines;
|
|
540
|
+
existing.summary.humanLines += humanLines;
|
|
541
|
+
existing.summary.unknownLines = (existing.summary.unknownLines || 0) + unknownLines;
|
|
542
|
+
existing.summary.totalLines = existing.summary.aiLines + existing.summary.humanLines + existing.summary.unknownLines;
|
|
543
|
+
existing.summary.prompts = (existing.summary.prompts || 0) + prStats.prompts;
|
|
544
|
+
existing.summary.byAgent = mergeAgents(existing.summary.byAgent, prStats.byAgent);
|
|
545
|
+
existing.summary.byModel = mergeModels(existing.summary.byModel, prStats.byModel);
|
|
546
|
+
// Update contributor
|
|
547
|
+
if (!existing.contributors[PR_AUTHOR]) {
|
|
548
|
+
existing.contributors[PR_AUTHOR] = {
|
|
549
|
+
commits: 0,
|
|
550
|
+
prs: 0,
|
|
551
|
+
prompts: 0,
|
|
671
552
|
aiLines: 0,
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
553
|
+
humanLines: 0,
|
|
554
|
+
unknownLines: 0,
|
|
555
|
+
topModels: [],
|
|
675
556
|
};
|
|
676
557
|
}
|
|
677
|
-
const
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
558
|
+
const contributor = existing.contributors[PR_AUTHOR];
|
|
559
|
+
contributor.commits += 1;
|
|
560
|
+
contributor.prs = (contributor.prs || 0) + 1;
|
|
561
|
+
contributor.prompts = (contributor.prompts || 0) + prStats.prompts;
|
|
562
|
+
contributor.aiLines += aiLines;
|
|
563
|
+
contributor.humanLines += humanLines;
|
|
564
|
+
contributor.unknownLines = (contributor.unknownLines || 0) + unknownLines;
|
|
565
|
+
// Update top models
|
|
566
|
+
const modelCounts = new Map();
|
|
567
|
+
for (const model of contributor.topModels) {
|
|
568
|
+
modelCounts.set(model, (modelCounts.get(model) || 0) + 1);
|
|
569
|
+
}
|
|
570
|
+
for (const [model, count] of Object.entries(prStats.byModel)) {
|
|
571
|
+
modelCounts.set(model, (modelCounts.get(model) || 0) + count);
|
|
572
|
+
}
|
|
573
|
+
contributor.topModels = Array.from(modelCounts.entries())
|
|
574
|
+
.sort((a, b) => b[1] - a[1])
|
|
575
|
+
.slice(0, 3)
|
|
576
|
+
.map(([model]) => model);
|
|
577
|
+
// Add PR to recent list
|
|
578
|
+
if (!existing.recentPRs) {
|
|
579
|
+
existing.recentPRs = [];
|
|
580
|
+
}
|
|
581
|
+
existing.recentPRs.unshift(prEntry);
|
|
582
|
+
if (existing.recentPRs.length > 100) {
|
|
583
|
+
existing.recentPRs = existing.recentPRs.slice(0, 100);
|
|
584
|
+
}
|
|
585
|
+
// Update time series
|
|
586
|
+
const hour = now.slice(0, 13); // YYYY-MM-DDTHH
|
|
587
|
+
const date = now.slice(0, 10); // YYYY-MM-DD
|
|
588
|
+
if (!existing.timeSeries) {
|
|
589
|
+
existing.timeSeries = { hourly: [], daily: [] };
|
|
590
|
+
}
|
|
591
|
+
// Update or add hourly data point
|
|
592
|
+
const existingHourly = existing.timeSeries.hourly.find(h => h.hour === hour);
|
|
593
|
+
if (existingHourly) {
|
|
594
|
+
existingHourly.aiLines += aiLines;
|
|
595
|
+
existingHourly.humanLines += humanLines;
|
|
596
|
+
existingHourly.unknownLines = (existingHourly.unknownLines || 0) + unknownLines;
|
|
597
|
+
existingHourly.prompts = (existingHourly.prompts || 0) + prStats.prompts;
|
|
598
|
+
existingHourly.commits += 1;
|
|
599
|
+
for (const [agent, count] of Object.entries(prStats.byAgent)) {
|
|
600
|
+
if (count !== undefined) {
|
|
601
|
+
existingHourly.byAgent[agent] = (existingHourly.byAgent[agent] || 0) + count;
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
for (const [model, count] of Object.entries(prStats.byModel)) {
|
|
605
|
+
if (count !== undefined) {
|
|
606
|
+
existingHourly.byModel[model] = (existingHourly.byModel[model] || 0) + count;
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
else {
|
|
611
|
+
existing.timeSeries.hourly.unshift({
|
|
612
|
+
hour,
|
|
613
|
+
aiLines,
|
|
614
|
+
humanLines,
|
|
615
|
+
unknownLines,
|
|
616
|
+
prompts: prStats.prompts,
|
|
617
|
+
byAgent: { ...prStats.byAgent },
|
|
618
|
+
byModel: { ...prStats.byModel },
|
|
619
|
+
commits: 1,
|
|
620
|
+
});
|
|
621
|
+
// Keep only last 72 hours
|
|
622
|
+
if (existing.timeSeries.hourly.length > 72) {
|
|
623
|
+
existing.timeSeries.hourly = existing.timeSeries.hourly.slice(0, 72);
|
|
624
|
+
}
|
|
625
|
+
}
|
|
626
|
+
// Update or add daily data point
|
|
627
|
+
const existingDaily = existing.timeSeries.daily.find(d => d.date === date);
|
|
628
|
+
if (existingDaily) {
|
|
629
|
+
existingDaily.aiLines += aiLines;
|
|
630
|
+
existingDaily.humanLines += humanLines;
|
|
631
|
+
existingDaily.unknownLines = (existingDaily.unknownLines || 0) + unknownLines;
|
|
632
|
+
existingDaily.prompts = (existingDaily.prompts || 0) + prStats.prompts;
|
|
633
|
+
existingDaily.commits += 1;
|
|
634
|
+
for (const [agent, count] of Object.entries(prStats.byAgent)) {
|
|
635
|
+
if (count !== undefined) {
|
|
636
|
+
existingDaily.byAgent[agent] = (existingDaily.byAgent[agent] || 0) + count;
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
for (const [model, count] of Object.entries(prStats.byModel)) {
|
|
640
|
+
if (count !== undefined) {
|
|
641
|
+
existingDaily.byModel[model] = (existingDaily.byModel[model] || 0) + count;
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
else {
|
|
646
|
+
existing.timeSeries.daily.unshift({
|
|
647
|
+
date,
|
|
648
|
+
aiLines,
|
|
649
|
+
humanLines,
|
|
650
|
+
unknownLines,
|
|
651
|
+
prompts: prStats.prompts,
|
|
652
|
+
byAgent: { ...prStats.byAgent },
|
|
653
|
+
byModel: { ...prStats.byModel },
|
|
654
|
+
commits: 1,
|
|
655
|
+
});
|
|
656
|
+
// Keep only last 30 days
|
|
657
|
+
if (existing.timeSeries.daily.length > 30) {
|
|
658
|
+
existing.timeSeries.daily = existing.timeSeries.daily.slice(0, 30);
|
|
659
|
+
}
|
|
660
|
+
}
|
|
661
|
+
existing.updated = now;
|
|
662
|
+
return existing;
|
|
663
|
+
}
|
|
664
|
+
// Create time series data point
|
|
665
|
+
const hour = now.slice(0, 13); // YYYY-MM-DDTHH
|
|
666
|
+
const date = now.slice(0, 10); // YYYY-MM-DD
|
|
667
|
+
const hourlyDataPoint = {
|
|
668
|
+
hour,
|
|
669
|
+
aiLines,
|
|
670
|
+
humanLines,
|
|
671
|
+
unknownLines,
|
|
672
|
+
prompts: prStats.prompts,
|
|
673
|
+
byAgent: prStats.byAgent,
|
|
674
|
+
byModel: prStats.byModel,
|
|
675
|
+
commits: 1,
|
|
676
|
+
};
|
|
677
|
+
const dailyDataPoint = {
|
|
678
|
+
date,
|
|
679
|
+
aiLines,
|
|
680
|
+
humanLines,
|
|
681
|
+
unknownLines,
|
|
682
|
+
prompts: prStats.prompts,
|
|
683
|
+
byAgent: prStats.byAgent,
|
|
684
|
+
byModel: prStats.byModel,
|
|
685
|
+
commits: 1,
|
|
701
686
|
};
|
|
687
|
+
// Create new analytics
|
|
702
688
|
return {
|
|
703
|
-
|
|
689
|
+
v: 3,
|
|
690
|
+
updated: now,
|
|
704
691
|
summary: {
|
|
705
692
|
totalLines: diffStats.additions,
|
|
706
|
-
aiLines
|
|
707
|
-
humanLines
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
693
|
+
aiLines,
|
|
694
|
+
humanLines,
|
|
695
|
+
unknownLines,
|
|
696
|
+
prompts: prStats.prompts,
|
|
697
|
+
byAgent: prStats.byAgent,
|
|
698
|
+
byModel: prStats.byModel,
|
|
699
|
+
},
|
|
700
|
+
contributors: {
|
|
701
|
+
[PR_AUTHOR]: {
|
|
702
|
+
commits: 1,
|
|
703
|
+
prs: 1,
|
|
704
|
+
prompts: prStats.prompts,
|
|
705
|
+
aiLines,
|
|
706
|
+
humanLines,
|
|
707
|
+
unknownLines,
|
|
708
|
+
topModels: Object.keys(prStats.byModel).slice(0, 3),
|
|
709
|
+
},
|
|
710
|
+
},
|
|
711
|
+
recentPRs: [prEntry],
|
|
712
|
+
timeSeries: {
|
|
713
|
+
hourly: [hourlyDataPoint],
|
|
714
|
+
daily: [dailyDataPoint],
|
|
711
715
|
},
|
|
712
|
-
contributors,
|
|
713
|
-
history: [historyEntry],
|
|
714
716
|
};
|
|
715
717
|
}
|
|
716
718
|
/**
|
|
717
|
-
* Collect
|
|
719
|
+
* Collect attributions from merge result
|
|
718
720
|
*/
|
|
719
721
|
function collectMergeAttributions(mergeType) {
|
|
720
722
|
if (mergeType === "merge_commit") {
|
|
721
723
|
// For merge commits, notes survive on original commits
|
|
722
|
-
// Collect from all PR commits
|
|
723
724
|
const prCommits = getPRCommits();
|
|
724
|
-
|
|
725
|
-
for (const sha of prCommits) {
|
|
726
|
-
const note = readNote(sha);
|
|
727
|
-
if (note?.attributions) {
|
|
728
|
-
allAttributions.push(...note.attributions);
|
|
729
|
-
}
|
|
730
|
-
}
|
|
731
|
-
return allAttributions;
|
|
725
|
+
return collectPRAttributions(prCommits);
|
|
732
726
|
}
|
|
733
|
-
// For squash/rebase, read from the merge commit(s)
|
|
734
727
|
if (mergeType === "squash" && MERGE_SHA) {
|
|
735
728
|
const note = readNote(MERGE_SHA);
|
|
736
|
-
|
|
729
|
+
if (!note) {
|
|
730
|
+
return { sessions: {}, fileRanges: new Map() };
|
|
731
|
+
}
|
|
732
|
+
// Convert note back to PRAttributionData format
|
|
733
|
+
const fileRanges = new Map();
|
|
734
|
+
for (const [filePath, fileAttr] of Object.entries(note.files)) {
|
|
735
|
+
const sessionMap = new Map();
|
|
736
|
+
for (const range of fileAttr.aiRanges) {
|
|
737
|
+
if (!sessionMap.has(range.sessionId)) {
|
|
738
|
+
sessionMap.set(range.sessionId, []);
|
|
739
|
+
}
|
|
740
|
+
sessionMap.get(range.sessionId).push({
|
|
741
|
+
startLine: range.startLine,
|
|
742
|
+
endLine: range.endLine,
|
|
743
|
+
content: [], // Content not needed for analytics
|
|
744
|
+
});
|
|
745
|
+
}
|
|
746
|
+
fileRanges.set(filePath, sessionMap);
|
|
747
|
+
}
|
|
748
|
+
return { sessions: note.sessions, fileRanges };
|
|
737
749
|
}
|
|
738
750
|
if (mergeType === "rebase") {
|
|
739
751
|
// Collect from all new commits after rebase
|
|
740
752
|
const newCommits = run(`git rev-list ${BASE_SHA}..HEAD`)
|
|
741
753
|
.split("\n")
|
|
742
754
|
.filter(Boolean);
|
|
743
|
-
const
|
|
755
|
+
const allSessions = {};
|
|
756
|
+
const fileRanges = new Map();
|
|
744
757
|
for (const sha of newCommits) {
|
|
745
758
|
const note = readNote(sha);
|
|
746
|
-
if (note
|
|
747
|
-
|
|
759
|
+
if (!note)
|
|
760
|
+
continue;
|
|
761
|
+
// Merge sessions
|
|
762
|
+
for (const [sessionId, session] of Object.entries(note.sessions)) {
|
|
763
|
+
if (!allSessions[sessionId]) {
|
|
764
|
+
allSessions[sessionId] = session;
|
|
765
|
+
}
|
|
766
|
+
}
|
|
767
|
+
// Merge file ranges
|
|
768
|
+
for (const [filePath, fileAttr] of Object.entries(note.files)) {
|
|
769
|
+
if (!fileRanges.has(filePath)) {
|
|
770
|
+
fileRanges.set(filePath, new Map());
|
|
771
|
+
}
|
|
772
|
+
const sessionMap = fileRanges.get(filePath);
|
|
773
|
+
for (const range of fileAttr.aiRanges) {
|
|
774
|
+
if (!sessionMap.has(range.sessionId)) {
|
|
775
|
+
sessionMap.set(range.sessionId, []);
|
|
776
|
+
}
|
|
777
|
+
sessionMap.get(range.sessionId).push({
|
|
778
|
+
startLine: range.startLine,
|
|
779
|
+
endLine: range.endLine,
|
|
780
|
+
content: [],
|
|
781
|
+
});
|
|
782
|
+
}
|
|
748
783
|
}
|
|
749
784
|
}
|
|
750
|
-
return
|
|
785
|
+
return { sessions: allSessions, fileRanges };
|
|
751
786
|
}
|
|
752
|
-
return
|
|
787
|
+
return { sessions: {}, fileRanges: new Map() };
|
|
753
788
|
}
|
|
754
789
|
/**
|
|
755
790
|
* Update repository analytics after PR merge
|
|
756
791
|
*/
|
|
757
792
|
function updateRepositoryAnalytics(mergeType) {
|
|
758
793
|
log("Updating repository analytics...");
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
//
|
|
763
|
-
|
|
794
|
+
const prData = collectMergeAttributions(mergeType);
|
|
795
|
+
log(`Collected ${Object.keys(prData.sessions).length} sessions from PR`);
|
|
796
|
+
let existing = readAnalyticsNote();
|
|
797
|
+
// Check if existing analytics is v2 format (incompatible) - if so, start fresh
|
|
798
|
+
if (existing && (!existing.v || existing.v < 3)) {
|
|
799
|
+
log("Found v2 analytics, migrating to v3 format...");
|
|
800
|
+
existing = null; // Start fresh with v3
|
|
801
|
+
}
|
|
764
802
|
if (existing) {
|
|
765
|
-
|
|
803
|
+
const prCount = existing.recentPRs?.length || 0;
|
|
804
|
+
log(`Found existing analytics: ${prCount} PRs, ${existing.summary?.totalLines || 0} total lines`);
|
|
766
805
|
}
|
|
767
806
|
else {
|
|
768
807
|
log("No existing analytics found, creating new");
|
|
769
808
|
}
|
|
770
|
-
|
|
771
|
-
const updated = updateAnalytics(existing, attributions);
|
|
772
|
-
// Write updated analytics
|
|
809
|
+
const updated = updateAnalytics(existing, prData);
|
|
773
810
|
if (writeAnalyticsNote(updated)) {
|
|
774
|
-
|
|
811
|
+
const pct = updated.summary.totalLines > 0
|
|
812
|
+
? Math.round((updated.summary.aiLines / updated.summary.totalLines) * 100)
|
|
813
|
+
: 0;
|
|
814
|
+
log(`✓ Updated analytics: ${updated.summary.aiLines}/${updated.summary.totalLines} AI lines (${pct}%)`);
|
|
775
815
|
}
|
|
776
816
|
}
|
|
777
817
|
/**
|
|
@@ -785,7 +825,6 @@ async function main() {
|
|
|
785
825
|
const mergeType = detectMergeType();
|
|
786
826
|
if (mergeType === "merge_commit") {
|
|
787
827
|
log("Merge commit detected - notes survive automatically on original commits");
|
|
788
|
-
// Still update analytics for merge commits
|
|
789
828
|
updateRepositoryAnalytics(mergeType);
|
|
790
829
|
log("Done");
|
|
791
830
|
return;
|
|
@@ -803,7 +842,7 @@ async function main() {
|
|
|
803
842
|
else if (mergeType === "rebase") {
|
|
804
843
|
handleRebaseMerge(prCommits);
|
|
805
844
|
}
|
|
806
|
-
// Update repository analytics
|
|
845
|
+
// Update repository analytics
|
|
807
846
|
updateRepositoryAnalytics(mergeType);
|
|
808
847
|
log("Done");
|
|
809
848
|
}
|