claude-git-hooks 2.12.0 → 2.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -1
- package/README.md +36 -4
- package/bin/claude-hooks +8 -0
- package/lib/commands/analyze.js +217 -0
- package/lib/commands/bump-version.js +20 -5
- package/lib/hooks/pre-commit.js +26 -265
- package/lib/utils/analysis-engine.js +469 -0
- package/lib/utils/git-operations.js +130 -1
- package/lib/utils/git-tag-manager.js +58 -8
- package/lib/utils/interactive-ui.js +86 -1
- package/lib/utils/resolution-prompt.js +57 -34
- package/lib/utils/version-manager.js +219 -52
- package/package.json +1 -1
|
@@ -0,0 +1,469 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* File: analysis-engine.js
|
|
3
|
+
* Purpose: Shared code analysis logic for pre-commit hook and analyze command
|
|
4
|
+
*
|
|
5
|
+
* Why this exists: Both pre-commit.js and analyze.js need to:
|
|
6
|
+
* - Build file data (diff/content) for analysis
|
|
7
|
+
* - Orchestrate parallel vs sequential analysis
|
|
8
|
+
* - Consolidate results from multiple batches
|
|
9
|
+
* - Check for blocking/non-blocking issues
|
|
10
|
+
* - Display results to user
|
|
11
|
+
*
|
|
12
|
+
* By extracting this logic, we:
|
|
13
|
+
* - Eliminate code duplication
|
|
14
|
+
* - Ensure consistent behavior
|
|
15
|
+
* - Make testing easier
|
|
16
|
+
* - Simplify maintenance
|
|
17
|
+
*
|
|
18
|
+
* Dependencies:
|
|
19
|
+
* - git-operations: File diff and content retrieval
|
|
20
|
+
* - claude-client: Claude CLI execution
|
|
21
|
+
* - prompt-builder: Analysis prompt construction
|
|
22
|
+
* - logger: Debug and error logging
|
|
23
|
+
*/
|
|
24
|
+
|
|
25
|
+
import fs from 'fs';
|
|
26
|
+
import {
|
|
27
|
+
getFileDiff,
|
|
28
|
+
getFileContentFromStaging,
|
|
29
|
+
isNewFile,
|
|
30
|
+
getRepoName,
|
|
31
|
+
getCurrentBranch
|
|
32
|
+
} from './git-operations.js';
|
|
33
|
+
import { analyzeCode, analyzeCodeParallel, chunkArray } from './claude-client.js';
|
|
34
|
+
import { buildAnalysisPrompt } from './prompt-builder.js';
|
|
35
|
+
import logger from './logger.js';
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Standard file data schema used throughout the analysis pipeline
|
|
39
|
+
* @typedef {Object} FileData
|
|
40
|
+
* @property {string} path - File path (relative to repo root)
|
|
41
|
+
* @property {string|null} diff - Git diff content (for modified files)
|
|
42
|
+
* @property {string|null} content - Full content (for new files)
|
|
43
|
+
* @property {boolean} isNew - Whether file is newly added
|
|
44
|
+
*/
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Builds file data for analysis (diff or full content)
|
|
48
|
+
* Why: Unified file data extraction used by both pre-commit and analyze command
|
|
49
|
+
*
|
|
50
|
+
* @param {string} filePath - Path to file
|
|
51
|
+
* @param {Object} options - Build options
|
|
52
|
+
* @param {boolean} options.staged - Use staged content (default: true)
|
|
53
|
+
* @returns {FileData|null} File data object or null on error
|
|
54
|
+
*/
|
|
55
|
+
export const buildFileData = (filePath, options = {}) => {
|
|
56
|
+
const { staged = true } = options;
|
|
57
|
+
|
|
58
|
+
try {
|
|
59
|
+
// Check if new file
|
|
60
|
+
const isNew = isNewFile(filePath);
|
|
61
|
+
|
|
62
|
+
if (isNew) {
|
|
63
|
+
// For new files, use full content
|
|
64
|
+
const content = staged
|
|
65
|
+
? getFileContentFromStaging(filePath)
|
|
66
|
+
: fs.readFileSync(filePath, 'utf8');
|
|
67
|
+
|
|
68
|
+
return {
|
|
69
|
+
path: filePath,
|
|
70
|
+
diff: null,
|
|
71
|
+
content,
|
|
72
|
+
isNew: true
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// For modified files, use diff
|
|
77
|
+
const diff = getFileDiff(filePath, { cached: staged });
|
|
78
|
+
|
|
79
|
+
return {
|
|
80
|
+
path: filePath,
|
|
81
|
+
diff,
|
|
82
|
+
content: null,
|
|
83
|
+
isNew: false
|
|
84
|
+
};
|
|
85
|
+
|
|
86
|
+
} catch (err) {
|
|
87
|
+
logger.error('analysis-engine - buildFileData', `Failed to build file data: ${filePath}`, err);
|
|
88
|
+
return null;
|
|
89
|
+
}
|
|
90
|
+
};
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Builds file data for multiple files
|
|
94
|
+
* Why: Batch processing with error handling
|
|
95
|
+
*
|
|
96
|
+
* @param {Array<string|{path: string}>} files - Array of file paths or file objects
|
|
97
|
+
* @param {Object} options - Build options
|
|
98
|
+
* @param {boolean} options.staged - Use staged content (default: true)
|
|
99
|
+
* @returns {Array<FileData>} Array of file data objects (excludes failures)
|
|
100
|
+
*/
|
|
101
|
+
export const buildFilesData = (files, options = {}) => {
|
|
102
|
+
const results = [];
|
|
103
|
+
|
|
104
|
+
for (const file of files) {
|
|
105
|
+
// Support both string paths and objects with path property
|
|
106
|
+
const filePath = typeof file === 'string' ? file : file.path;
|
|
107
|
+
const data = buildFileData(filePath, options);
|
|
108
|
+
|
|
109
|
+
if (data) {
|
|
110
|
+
results.push(data);
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
logger.debug('analysis-engine - buildFilesData', 'Built file data', {
|
|
115
|
+
inputCount: files.length,
|
|
116
|
+
outputCount: results.length
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
return results;
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Consolidates multiple analysis results into one
|
|
124
|
+
* Why: Parallel analysis produces multiple results that need merging
|
|
125
|
+
*
|
|
126
|
+
* @param {Array<Object>} results - Array of analysis results
|
|
127
|
+
* @returns {Object} Consolidated result
|
|
128
|
+
*/
|
|
129
|
+
export const consolidateResults = (results) => {
|
|
130
|
+
const consolidated = {
|
|
131
|
+
QUALITY_GATE: 'PASSED',
|
|
132
|
+
approved: true,
|
|
133
|
+
score: 10,
|
|
134
|
+
metrics: {
|
|
135
|
+
reliability: 'A',
|
|
136
|
+
security: 'A',
|
|
137
|
+
maintainability: 'A',
|
|
138
|
+
coverage: 100,
|
|
139
|
+
duplications: 0,
|
|
140
|
+
complexity: 0
|
|
141
|
+
},
|
|
142
|
+
issues: { blocker: 0, critical: 0, major: 0, minor: 0, info: 0 },
|
|
143
|
+
details: [],
|
|
144
|
+
blockingIssues: [],
|
|
145
|
+
securityHotspots: 0
|
|
146
|
+
};
|
|
147
|
+
|
|
148
|
+
for (const result of results) {
|
|
149
|
+
// Worst-case quality gate
|
|
150
|
+
if (result.QUALITY_GATE === 'FAILED') {
|
|
151
|
+
consolidated.QUALITY_GATE = 'FAILED';
|
|
152
|
+
}
|
|
153
|
+
if (result.approved === false) {
|
|
154
|
+
consolidated.approved = false;
|
|
155
|
+
}
|
|
156
|
+
if (result.score < consolidated.score) {
|
|
157
|
+
consolidated.score = result.score;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
// Worst-case metrics
|
|
161
|
+
if (result.metrics) {
|
|
162
|
+
const metricOrder = { 'A': 5, 'B': 4, 'C': 3, 'D': 2, 'E': 1 };
|
|
163
|
+
['reliability', 'security', 'maintainability'].forEach(m => {
|
|
164
|
+
const current = metricOrder[consolidated.metrics[m]] || 5;
|
|
165
|
+
const incoming = metricOrder[result.metrics[m]] || 5;
|
|
166
|
+
if (incoming < current) {
|
|
167
|
+
consolidated.metrics[m] = result.metrics[m];
|
|
168
|
+
}
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
if (result.metrics.coverage !== undefined) {
|
|
172
|
+
consolidated.metrics.coverage = Math.min(
|
|
173
|
+
consolidated.metrics.coverage,
|
|
174
|
+
result.metrics.coverage
|
|
175
|
+
);
|
|
176
|
+
}
|
|
177
|
+
if (result.metrics.duplications !== undefined) {
|
|
178
|
+
consolidated.metrics.duplications = Math.max(
|
|
179
|
+
consolidated.metrics.duplications,
|
|
180
|
+
result.metrics.duplications
|
|
181
|
+
);
|
|
182
|
+
}
|
|
183
|
+
if (result.metrics.complexity !== undefined) {
|
|
184
|
+
consolidated.metrics.complexity = Math.max(
|
|
185
|
+
consolidated.metrics.complexity,
|
|
186
|
+
result.metrics.complexity
|
|
187
|
+
);
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Sum issue counts
|
|
192
|
+
if (result.issues) {
|
|
193
|
+
Object.keys(consolidated.issues).forEach(severity => {
|
|
194
|
+
consolidated.issues[severity] += (result.issues[severity] || 0);
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Merge arrays
|
|
199
|
+
if (Array.isArray(result.details)) {
|
|
200
|
+
consolidated.details.push(...result.details);
|
|
201
|
+
}
|
|
202
|
+
if (Array.isArray(result.blockingIssues)) {
|
|
203
|
+
consolidated.blockingIssues.push(...result.blockingIssues);
|
|
204
|
+
}
|
|
205
|
+
if (result.securityHotspots) {
|
|
206
|
+
consolidated.securityHotspots += result.securityHotspots;
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
logger.debug('analysis-engine - consolidateResults', 'Results consolidated', {
|
|
211
|
+
inputCount: results.length,
|
|
212
|
+
totalIssues: Object.values(consolidated.issues).reduce((a, b) => a + b, 0),
|
|
213
|
+
qualityGate: consolidated.QUALITY_GATE
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
return consolidated;
|
|
217
|
+
};
|
|
218
|
+
|
|
219
|
+
/**
|
|
220
|
+
* Checks if result has any blocking issues (CRITICAL or BLOCKER)
|
|
221
|
+
* Why: Determines if commit should be blocked
|
|
222
|
+
*
|
|
223
|
+
* @param {Object} result - Analysis result
|
|
224
|
+
* @returns {boolean} True if has blocking issues
|
|
225
|
+
*/
|
|
226
|
+
export const hasBlockingIssues = (result) => {
|
|
227
|
+
const { blocker = 0, critical = 0 } = result.issues || {};
|
|
228
|
+
return blocker + critical > 0;
|
|
229
|
+
};
|
|
230
|
+
|
|
231
|
+
/**
|
|
232
|
+
* Checks if result has any issues at any severity level
|
|
233
|
+
* Why: Determines if user should be prompted for confirmation
|
|
234
|
+
*
|
|
235
|
+
* @param {Object} result - Analysis result
|
|
236
|
+
* @returns {boolean} True if has any issues
|
|
237
|
+
*/
|
|
238
|
+
export const hasAnyIssues = (result) => {
|
|
239
|
+
const { blocker = 0, critical = 0, major = 0, minor = 0, info = 0 } = result.issues || {};
|
|
240
|
+
return blocker + critical + major + minor + info > 0;
|
|
241
|
+
};
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* Gets total issue count
|
|
245
|
+
* Why: Convenience function for display
|
|
246
|
+
*
|
|
247
|
+
* @param {Object} result - Analysis result
|
|
248
|
+
* @returns {number} Total issue count
|
|
249
|
+
*/
|
|
250
|
+
export const getTotalIssueCount = (result) => {
|
|
251
|
+
const { blocker = 0, critical = 0, major = 0, minor = 0, info = 0 } = result.issues || {};
|
|
252
|
+
return blocker + critical + major + minor + info;
|
|
253
|
+
};
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* Creates empty/passed analysis result
|
|
257
|
+
* Why: Standard result when no files to analyze
|
|
258
|
+
*
|
|
259
|
+
* @returns {Object} Empty result with PASSED quality gate
|
|
260
|
+
*/
|
|
261
|
+
export const createEmptyResult = () => ({
|
|
262
|
+
QUALITY_GATE: 'PASSED',
|
|
263
|
+
approved: true,
|
|
264
|
+
issues: { blocker: 0, critical: 0, major: 0, minor: 0, info: 0 },
|
|
265
|
+
details: [],
|
|
266
|
+
blockingIssues: [],
|
|
267
|
+
securityHotspots: 0
|
|
268
|
+
});
|
|
269
|
+
|
|
270
|
+
/**
|
|
271
|
+
* Displays compact issue summary
|
|
272
|
+
* Why: Quick overview of issues found
|
|
273
|
+
*
|
|
274
|
+
* @param {Object} result - Analysis result
|
|
275
|
+
*/
|
|
276
|
+
export const displayIssueSummary = (result) => {
|
|
277
|
+
const { blocker = 0, critical = 0, major = 0, minor = 0, info = 0 } = result.issues || {};
|
|
278
|
+
const total = blocker + critical + major + minor + info;
|
|
279
|
+
|
|
280
|
+
console.log(`📊 ${total} issue(s) found`);
|
|
281
|
+
if (blocker > 0) console.log(` 🔴 Blocker: ${blocker}`);
|
|
282
|
+
if (critical > 0) console.log(` 🟠 Critical: ${critical}`);
|
|
283
|
+
if (major > 0) console.log(` 🟡 Major: ${major}`);
|
|
284
|
+
if (minor > 0) console.log(` 🔵 Minor: ${minor}`);
|
|
285
|
+
if (info > 0) console.log(` ⚪ Info: ${info}`);
|
|
286
|
+
};
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* Displays detailed analysis results
|
|
290
|
+
* Why: Full structured output for code review
|
|
291
|
+
*
|
|
292
|
+
* @param {Object} result - Analysis result from Claude
|
|
293
|
+
*/
|
|
294
|
+
export const displayResults = (result) => {
|
|
295
|
+
console.log();
|
|
296
|
+
console.log('╔════════════════════════════════════════════════════════════════════╗');
|
|
297
|
+
console.log('║ CODE QUALITY ANALYSIS ║');
|
|
298
|
+
console.log('╚════════════════════════════════════════════════════════════════════╝');
|
|
299
|
+
console.log();
|
|
300
|
+
|
|
301
|
+
// Quality Gate Status
|
|
302
|
+
const qualityGate = result.QUALITY_GATE || 'UNKNOWN';
|
|
303
|
+
if (qualityGate === 'PASSED') {
|
|
304
|
+
logger.success('Quality Gate: PASSED');
|
|
305
|
+
} else {
|
|
306
|
+
logger.error('analysis-engine - displayResults', 'Quality Gate: FAILED');
|
|
307
|
+
}
|
|
308
|
+
console.log();
|
|
309
|
+
|
|
310
|
+
// Issues Summary - Simple count
|
|
311
|
+
if (Array.isArray(result.details) && result.details.length > 0) {
|
|
312
|
+
const fileCount = new Set(result.details.map(i => i.file)).size;
|
|
313
|
+
console.log(`📊 ${result.details.length} issue(s) found across ${fileCount} file(s)`);
|
|
314
|
+
} else {
|
|
315
|
+
console.log('✅ No issues found!');
|
|
316
|
+
}
|
|
317
|
+
console.log();
|
|
318
|
+
|
|
319
|
+
// Issues Breakdown (severity counts)
|
|
320
|
+
if (result.issues && typeof result.issues === 'object') {
|
|
321
|
+
console.log('📋 ISSUES SUMMARY');
|
|
322
|
+
|
|
323
|
+
const { blocker = 0, critical = 0, major = 0, minor = 0, info = 0 } = result.issues;
|
|
324
|
+
const total = blocker + critical + major + minor + info;
|
|
325
|
+
|
|
326
|
+
console.log(`Total: ${total} issues found`);
|
|
327
|
+
if (blocker > 0) console.log(` 🔴 Blocker: ${blocker}`);
|
|
328
|
+
if (critical > 0) console.log(` 🟠 Critical: ${critical}`);
|
|
329
|
+
if (major > 0) console.log(` 🟡 Major: ${major}`);
|
|
330
|
+
if (minor > 0) console.log(` 🔵 Minor: ${minor}`);
|
|
331
|
+
if (info > 0) console.log(` ⚪ Info: ${info}`);
|
|
332
|
+
console.log();
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
// Detailed Issues
|
|
336
|
+
if (Array.isArray(result.details) && result.details.length > 0) {
|
|
337
|
+
console.log('🔍 DETAILED ISSUES');
|
|
338
|
+
result.details.forEach(detail => {
|
|
339
|
+
console.log(`[${detail.severity}] ${detail.type} in ${detail.file}:${detail.line || '?'}`);
|
|
340
|
+
console.log(` ${detail.message}`);
|
|
341
|
+
console.log();
|
|
342
|
+
});
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
// Security Hotspots
|
|
346
|
+
if (result.securityHotspots && result.securityHotspots > 0) {
|
|
347
|
+
console.log(`🔥 SECURITY HOTSPOTS: ${result.securityHotspots} found`);
|
|
348
|
+
console.log(' Review security-sensitive code carefully');
|
|
349
|
+
console.log();
|
|
350
|
+
}
|
|
351
|
+
};
|
|
352
|
+
|
|
353
|
+
/**
|
|
354
|
+
* Runs code analysis on files
|
|
355
|
+
* Why: Unified analysis orchestration for both pre-commit and analyze command
|
|
356
|
+
*
|
|
357
|
+
* @param {Array<FileData>} filesData - Array of file data objects
|
|
358
|
+
* @param {Object} config - Configuration object
|
|
359
|
+
* @param {Object} options - Analysis options
|
|
360
|
+
* @param {boolean} options.saveDebug - Save debug output (default: from config)
|
|
361
|
+
* @param {string} options.hook - Hook name for telemetry (default: 'analysis')
|
|
362
|
+
* @returns {Promise<Object>} Analysis result
|
|
363
|
+
*/
|
|
364
|
+
export const runAnalysis = async (filesData, config, options = {}) => {
|
|
365
|
+
const { saveDebug = config.system?.debug, hook = 'analysis' } = options;
|
|
366
|
+
|
|
367
|
+
if (filesData.length === 0) {
|
|
368
|
+
logger.debug('analysis-engine - runAnalysis', 'No files to analyze');
|
|
369
|
+
return createEmptyResult();
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
// Determine if parallel execution should be used
|
|
373
|
+
const subagentsEnabled = config.subagents?.enabled !== false;
|
|
374
|
+
const batchSize = config.subagents?.batchSize || 3;
|
|
375
|
+
const useParallel = subagentsEnabled && filesData.length >= 3;
|
|
376
|
+
|
|
377
|
+
logger.debug('analysis-engine - runAnalysis', 'Starting analysis', {
|
|
378
|
+
fileCount: filesData.length,
|
|
379
|
+
useParallel,
|
|
380
|
+
batchSize
|
|
381
|
+
});
|
|
382
|
+
|
|
383
|
+
let result;
|
|
384
|
+
|
|
385
|
+
if (useParallel) {
|
|
386
|
+
// Parallel execution: split files into batches
|
|
387
|
+
const fileBatches = chunkArray(filesData, batchSize);
|
|
388
|
+
|
|
389
|
+
logger.debug('analysis-engine - runAnalysis', `Split into ${fileBatches.length} batches`);
|
|
390
|
+
|
|
391
|
+
// Build one prompt per batch
|
|
392
|
+
const prompts = await Promise.all(
|
|
393
|
+
fileBatches.map(async (batch) => await buildAnalysisPrompt({
|
|
394
|
+
templateName: config.templates?.analysis,
|
|
395
|
+
guidelinesName: config.templates?.guidelines,
|
|
396
|
+
files: batch,
|
|
397
|
+
metadata: {
|
|
398
|
+
REPO_NAME: getRepoName(),
|
|
399
|
+
BRANCH_NAME: getCurrentBranch()
|
|
400
|
+
},
|
|
401
|
+
subagentConfig: null // Don't add subagent instruction for parallel
|
|
402
|
+
}))
|
|
403
|
+
);
|
|
404
|
+
|
|
405
|
+
// Build telemetry context
|
|
406
|
+
const telemetryContext = {
|
|
407
|
+
fileCount: filesData.length,
|
|
408
|
+
batchSize,
|
|
409
|
+
model: config.subagents?.model || 'haiku',
|
|
410
|
+
hook
|
|
411
|
+
};
|
|
412
|
+
|
|
413
|
+
// Execute in parallel
|
|
414
|
+
const results = await analyzeCodeParallel(prompts, {
|
|
415
|
+
timeout: config.analysis?.timeout,
|
|
416
|
+
saveDebug: false, // Don't save debug for individual batches
|
|
417
|
+
telemetryContext
|
|
418
|
+
});
|
|
419
|
+
|
|
420
|
+
// Consolidate results
|
|
421
|
+
result = consolidateResults(results);
|
|
422
|
+
|
|
423
|
+
// Save consolidated debug if enabled
|
|
424
|
+
if (saveDebug) {
|
|
425
|
+
const { saveDebugResponse } = await import('./claude-client.js');
|
|
426
|
+
await saveDebugResponse(
|
|
427
|
+
`PARALLEL ANALYSIS: ${fileBatches.length} batches`,
|
|
428
|
+
JSON.stringify(result, null, 2)
|
|
429
|
+
);
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
} else {
|
|
433
|
+
// Single/sequential execution
|
|
434
|
+
logger.debug('analysis-engine - runAnalysis', 'Using sequential analysis');
|
|
435
|
+
|
|
436
|
+
const prompt = await buildAnalysisPrompt({
|
|
437
|
+
templateName: config.templates?.analysis,
|
|
438
|
+
guidelinesName: config.templates?.guidelines,
|
|
439
|
+
files: filesData,
|
|
440
|
+
metadata: {
|
|
441
|
+
REPO_NAME: getRepoName(),
|
|
442
|
+
BRANCH_NAME: getCurrentBranch()
|
|
443
|
+
},
|
|
444
|
+
subagentConfig: config.subagents
|
|
445
|
+
});
|
|
446
|
+
|
|
447
|
+
// Build telemetry context
|
|
448
|
+
const telemetryContext = {
|
|
449
|
+
fileCount: filesData.length,
|
|
450
|
+
batchSize: filesData.length,
|
|
451
|
+
totalBatches: 1,
|
|
452
|
+
model: config.subagents?.model || 'haiku',
|
|
453
|
+
hook
|
|
454
|
+
};
|
|
455
|
+
|
|
456
|
+
result = await analyzeCode(prompt, {
|
|
457
|
+
timeout: config.analysis?.timeout,
|
|
458
|
+
saveDebug,
|
|
459
|
+
telemetryContext
|
|
460
|
+
});
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
logger.debug('analysis-engine - runAnalysis', 'Analysis complete', {
|
|
464
|
+
qualityGate: result.QUALITY_GATE,
|
|
465
|
+
totalIssues: getTotalIssueCount(result)
|
|
466
|
+
});
|
|
467
|
+
|
|
468
|
+
return result;
|
|
469
|
+
};
|
|
@@ -130,6 +130,92 @@ const getStagedFiles = ({ extensions = [], includeDeleted = false } = {}) => {
|
|
|
130
130
|
return files;
|
|
131
131
|
};
|
|
132
132
|
|
|
133
|
+
/**
|
|
134
|
+
* Gets list of unstaged files (modified but not staged)
|
|
135
|
+
* Why: For on-demand analysis of working tree changes
|
|
136
|
+
*
|
|
137
|
+
* @param {Object} options - Filter options
|
|
138
|
+
* @param {Array<string>} options.extensions - File extensions to filter (e.g., ['.java', '.xml'])
|
|
139
|
+
* @param {boolean} options.includeDeleted - Include deleted files (default: false)
|
|
140
|
+
* @returns {Array<string>} Array of unstaged file paths
|
|
141
|
+
*/
|
|
142
|
+
const getUnstagedFiles = ({ extensions = [], includeDeleted = false } = {}) => {
|
|
143
|
+
logger.debug(
|
|
144
|
+
'git-operations - getUnstagedFiles',
|
|
145
|
+
'Getting unstaged files',
|
|
146
|
+
{ extensions, includeDeleted }
|
|
147
|
+
);
|
|
148
|
+
|
|
149
|
+
// Why: Without --cached flag, shows working tree changes
|
|
150
|
+
const filter = includeDeleted ? 'ACMR' : 'ACM';
|
|
151
|
+
const output = execGitCommand(`diff --name-only --diff-filter=${filter}`);
|
|
152
|
+
|
|
153
|
+
if (!output) {
|
|
154
|
+
logger.debug('git-operations - getUnstagedFiles', 'No unstaged files found');
|
|
155
|
+
return [];
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
const files = output.split(/\r?\n/).filter(f => f.length > 0);
|
|
159
|
+
|
|
160
|
+
if (extensions.length > 0) {
|
|
161
|
+
const filtered = files.filter(file =>
|
|
162
|
+
extensions.some(ext => file.endsWith(ext))
|
|
163
|
+
);
|
|
164
|
+
|
|
165
|
+
logger.debug(
|
|
166
|
+
'git-operations - getUnstagedFiles',
|
|
167
|
+
'Filtered files by extension',
|
|
168
|
+
{ totalFiles: files.length, filteredFiles: filtered.length, extensions }
|
|
169
|
+
);
|
|
170
|
+
|
|
171
|
+
return filtered;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
return files;
|
|
175
|
+
};
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* Gets all tracked files in repository
|
|
179
|
+
* Why: For comprehensive analysis of entire codebase
|
|
180
|
+
*
|
|
181
|
+
* @param {Object} options - Filter options
|
|
182
|
+
* @param {Array<string>} options.extensions - File extensions to filter (e.g., ['.java', '.xml'])
|
|
183
|
+
* @returns {Array<string>} Array of all tracked file paths
|
|
184
|
+
*/
|
|
185
|
+
const getAllTrackedFiles = ({ extensions = [] } = {}) => {
|
|
186
|
+
logger.debug(
|
|
187
|
+
'git-operations - getAllTrackedFiles',
|
|
188
|
+
'Getting all tracked files',
|
|
189
|
+
{ extensions }
|
|
190
|
+
);
|
|
191
|
+
|
|
192
|
+
// Why: ls-files shows all tracked files in the repository
|
|
193
|
+
const output = execGitCommand('ls-files');
|
|
194
|
+
|
|
195
|
+
if (!output) {
|
|
196
|
+
logger.debug('git-operations - getAllTrackedFiles', 'No tracked files found');
|
|
197
|
+
return [];
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
const files = output.split(/\r?\n/).filter(f => f.length > 0);
|
|
201
|
+
|
|
202
|
+
if (extensions.length > 0) {
|
|
203
|
+
const filtered = files.filter(file =>
|
|
204
|
+
extensions.some(ext => file.endsWith(ext))
|
|
205
|
+
);
|
|
206
|
+
|
|
207
|
+
logger.debug(
|
|
208
|
+
'git-operations - getAllTrackedFiles',
|
|
209
|
+
'Filtered files by extension',
|
|
210
|
+
{ totalFiles: files.length, filteredFiles: filtered.length, extensions }
|
|
211
|
+
);
|
|
212
|
+
|
|
213
|
+
return filtered;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
return files;
|
|
217
|
+
};
|
|
218
|
+
|
|
133
219
|
/**
|
|
134
220
|
* Gets the diff for a specific file
|
|
135
221
|
* Why: Shows what changed in a file, essential for code review
|
|
@@ -490,6 +576,46 @@ const getBranchPushStatus = (branchName) => {
|
|
|
490
576
|
return status;
|
|
491
577
|
};
|
|
492
578
|
|
|
579
|
+
/**
|
|
580
|
+
* Creates a git commit with the specified message
|
|
581
|
+
* Why: Allows programmatic commit creation after analysis approval
|
|
582
|
+
*
|
|
583
|
+
* @param {string} message - Commit message (use "auto" for auto-generation)
|
|
584
|
+
* @param {Object} options - Commit options
|
|
585
|
+
* @param {boolean} options.noVerify - Skip pre-commit and commit-msg hooks (default: false)
|
|
586
|
+
* @returns {Object} Result object with:
|
|
587
|
+
* - success: boolean
|
|
588
|
+
* - output: string (commit output including hash)
|
|
589
|
+
* - error: string (error message if failed)
|
|
590
|
+
*/
|
|
591
|
+
const createCommit = (message, { noVerify = false } = {}) => {
|
|
592
|
+
logger.debug('git-operations - createCommit', 'Creating commit', { message, noVerify });
|
|
593
|
+
|
|
594
|
+
try {
|
|
595
|
+
const flags = noVerify ? '--no-verify ' : '';
|
|
596
|
+
// Escape double quotes in message for shell safety
|
|
597
|
+
const escapedMessage = message.replace(/"/g, '\\"');
|
|
598
|
+
const output = execGitCommand(`commit ${flags}-m "${escapedMessage}"`);
|
|
599
|
+
|
|
600
|
+
logger.debug('git-operations - createCommit', 'Commit successful', { output });
|
|
601
|
+
|
|
602
|
+
return {
|
|
603
|
+
success: true,
|
|
604
|
+
output,
|
|
605
|
+
error: ''
|
|
606
|
+
};
|
|
607
|
+
|
|
608
|
+
} catch (error) {
|
|
609
|
+
logger.error('git-operations - createCommit', 'Commit failed', error);
|
|
610
|
+
|
|
611
|
+
return {
|
|
612
|
+
success: false,
|
|
613
|
+
output: '',
|
|
614
|
+
error: error.output || error.cause?.message || error.message || 'Unknown commit error'
|
|
615
|
+
};
|
|
616
|
+
}
|
|
617
|
+
};
|
|
618
|
+
|
|
493
619
|
/**
|
|
494
620
|
* Pushes branch to remote
|
|
495
621
|
* Why: Publishes local branch to remote before creating PR
|
|
@@ -540,6 +666,8 @@ const pushBranch = (branchName, { setUpstream = false } = {}) => {
|
|
|
540
666
|
export {
|
|
541
667
|
GitError,
|
|
542
668
|
getStagedFiles,
|
|
669
|
+
getUnstagedFiles,
|
|
670
|
+
getAllTrackedFiles,
|
|
543
671
|
getFileDiff,
|
|
544
672
|
getFileContentFromStaging,
|
|
545
673
|
isNewFile,
|
|
@@ -550,5 +678,6 @@ export {
|
|
|
550
678
|
getRemoteName,
|
|
551
679
|
verifyRemoteExists,
|
|
552
680
|
getBranchPushStatus,
|
|
553
|
-
pushBranch
|
|
681
|
+
pushBranch,
|
|
682
|
+
createCommit
|
|
554
683
|
};
|