@eldrforge/commands-git 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +190 -0
- package/README.md +62 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +2346 -0
- package/dist/index.js.map +1 -0
- package/dist/src/commands/clean.d.ts +3 -0
- package/dist/src/commands/clean.d.ts.map +1 -0
- package/dist/src/commands/commit.d.ts +3 -0
- package/dist/src/commands/commit.d.ts.map +1 -0
- package/dist/src/commands/precommit.d.ts +8 -0
- package/dist/src/commands/precommit.d.ts.map +1 -0
- package/dist/src/commands/review.d.ts +3 -0
- package/dist/src/commands/review.d.ts.map +1 -0
- package/dist/src/index.d.ts +7 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/src/util/performance.d.ts +25 -0
- package/dist/src/util/performance.d.ts.map +1 -0
- package/dist/src/util/precommitOptimizations.d.ts +37 -0
- package/dist/src/util/precommitOptimizations.d.ts.map +1 -0
- package/guide/index.md +48 -0
- package/package.json +73 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,2346 @@
|
|
|
1
|
+
import { Formatter } from '@riotprompt/riotprompt';
|
|
2
|
+
import 'dotenv/config';
|
|
3
|
+
import shellescape from 'shell-escape';
|
|
4
|
+
import { getDryRunLogger, DEFAULT_MAX_DIFF_BYTES, DEFAULT_EXCLUDED_PATTERNS, Diff, Files, Log, DEFAULT_OUTPUT_DIRECTORY, sanitizeDirection, toAIConfig, createStorageAdapter, createLoggerAdapter, getOutputPath, getTimestampedResponseFilename, getTimestampedRequestFilename, filterContent, getTimestampedCommitFilename, improveContentWithLLM, getLogger, getTimestampedReviewNotesFilename, getTimestampedReviewFilename } from '@eldrforge/core';
|
|
5
|
+
import { ValidationError, ExternalDependencyError, CommandError, createStorage, checkForFileDependencies as checkForFileDependencies$1, logFileDependencyWarning, logFileDependencySuggestions, FileOperationError } from '@eldrforge/shared';
|
|
6
|
+
import { run, validateString, safeJsonParse, validatePackageJson, unstageAll, stageFiles, verifyStagedFiles, runSecure } from '@eldrforge/git-tools';
|
|
7
|
+
import { getRecentClosedIssuesForCommit, handleIssueCreation, getReleaseNotesContent, getIssuesContent } from '@eldrforge/github-tools';
|
|
8
|
+
import { runAgenticCommit, requireTTY, generateReflectionReport, getUserChoice, STANDARD_CHOICES, getLLMFeedbackInEditor, editContentInEditor, createCompletionWithRetry, createCommitPrompt, createReviewPrompt, createCompletion } from '@eldrforge/ai-service';
|
|
9
|
+
import path from 'path';
|
|
10
|
+
import fs from 'fs/promises';
|
|
11
|
+
import os from 'os';
|
|
12
|
+
import { spawn } from 'child_process';
|
|
13
|
+
|
|
14
|
+
// Helper function to read context files
|
|
15
|
+
async function readContextFiles(contextFiles, logger) {
|
|
16
|
+
if (!contextFiles || contextFiles.length === 0) {
|
|
17
|
+
return '';
|
|
18
|
+
}
|
|
19
|
+
const storage = createStorage();
|
|
20
|
+
const contextParts = [];
|
|
21
|
+
for (const filePath of contextFiles){
|
|
22
|
+
try {
|
|
23
|
+
const content = await storage.readFile(filePath, 'utf8');
|
|
24
|
+
contextParts.push(`## Context from ${filePath}\n\n${content}\n`);
|
|
25
|
+
logger.debug(`Read context from file: ${filePath}`);
|
|
26
|
+
} catch (error) {
|
|
27
|
+
logger.warn(`Failed to read context file ${filePath}: ${error.message}`);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
return contextParts.join('\n---\n\n');
|
|
31
|
+
}
|
|
32
|
+
// Helper function to generate self-reflection output using observability module
|
|
33
|
+
async function generateSelfReflection(agenticResult, outputDirectory, storage, logger) {
|
|
34
|
+
try {
|
|
35
|
+
const timestamp = new Date().toISOString().replace(/[:.]/g, '-').split('.')[0];
|
|
36
|
+
const reflectionPath = getOutputPath(outputDirectory, `agentic-reflection-commit-${timestamp}.md`);
|
|
37
|
+
// Use new observability reflection generator
|
|
38
|
+
const report = await generateReflectionReport({
|
|
39
|
+
iterations: agenticResult.iterations || 0,
|
|
40
|
+
toolCallsExecuted: agenticResult.toolCallsExecuted || 0,
|
|
41
|
+
maxIterations: agenticResult.maxIterations || 10,
|
|
42
|
+
toolMetrics: agenticResult.toolMetrics || [],
|
|
43
|
+
conversationHistory: agenticResult.conversationHistory || [],
|
|
44
|
+
commitMessage: agenticResult.commitMessage,
|
|
45
|
+
suggestedSplits: agenticResult.suggestedSplits || [],
|
|
46
|
+
logger
|
|
47
|
+
});
|
|
48
|
+
// Save the report to output directory
|
|
49
|
+
await storage.writeFile(reflectionPath, report, 'utf8');
|
|
50
|
+
logger.info('');
|
|
51
|
+
logger.info('ā'.repeat(80));
|
|
52
|
+
logger.info('š SELF-REFLECTION REPORT GENERATED');
|
|
53
|
+
logger.info('ā'.repeat(80));
|
|
54
|
+
logger.info('');
|
|
55
|
+
logger.info('š Location: %s', reflectionPath);
|
|
56
|
+
logger.info('');
|
|
57
|
+
logger.info('š Report Summary:');
|
|
58
|
+
const iterations = agenticResult.iterations || 0;
|
|
59
|
+
const toolCalls = agenticResult.toolCallsExecuted || 0;
|
|
60
|
+
const uniqueTools = new Set((agenticResult.toolMetrics || []).map((m)=>m.name)).size;
|
|
61
|
+
logger.info(` ⢠${iterations} iterations completed`);
|
|
62
|
+
logger.info(` ⢠${toolCalls} tool calls executed`);
|
|
63
|
+
logger.info(` ⢠${uniqueTools} unique tools used`);
|
|
64
|
+
logger.info('');
|
|
65
|
+
logger.info('š” Use this report to:');
|
|
66
|
+
logger.info(' ⢠Understand which tools were most effective');
|
|
67
|
+
logger.info(' ⢠Identify performance bottlenecks');
|
|
68
|
+
logger.info(' ⢠Review the complete agentic conversation');
|
|
69
|
+
logger.info(' ⢠Improve tool implementation based on metrics');
|
|
70
|
+
logger.info('');
|
|
71
|
+
logger.info('ā'.repeat(80));
|
|
72
|
+
} catch (error) {
|
|
73
|
+
logger.warn('Failed to generate self-reflection output: %s', error.message);
|
|
74
|
+
logger.debug('Self-reflection error details:', error);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
// Helper function to get current version from package.json
|
|
78
|
+
async function getCurrentVersion(storage) {
|
|
79
|
+
try {
|
|
80
|
+
const packageJsonContents = await storage.readFile('package.json', 'utf-8');
|
|
81
|
+
const packageJson = safeJsonParse(packageJsonContents, 'package.json');
|
|
82
|
+
const validated = validatePackageJson(packageJson, 'package.json');
|
|
83
|
+
return validated.version;
|
|
84
|
+
} catch {
|
|
85
|
+
// Return undefined if we can't read the version (not a critical failure)
|
|
86
|
+
return undefined;
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
// Helper function to edit commit message using editor
|
|
90
|
+
async function editCommitMessageInteractively(commitMessage) {
|
|
91
|
+
const templateLines = [
|
|
92
|
+
'# Edit your commit message below. Lines starting with "#" will be ignored.',
|
|
93
|
+
'# Save and close the editor when you are done.'
|
|
94
|
+
];
|
|
95
|
+
const result = await editContentInEditor(commitMessage, templateLines, '.txt');
|
|
96
|
+
return result.content;
|
|
97
|
+
}
|
|
98
|
+
// Helper function to improve commit message using LLM
|
|
99
|
+
async function improveCommitMessageWithLLM(commitMessage, runConfig, promptConfig, promptContext, outputDirectory, diffContent) {
|
|
100
|
+
// Get user feedback on what to improve using the editor
|
|
101
|
+
const userFeedback = await getLLMFeedbackInEditor('commit message', commitMessage);
|
|
102
|
+
// Create AI config from kodrdriv config
|
|
103
|
+
const aiConfig = toAIConfig(runConfig);
|
|
104
|
+
const aiStorageAdapter = createStorageAdapter(outputDirectory);
|
|
105
|
+
const aiLogger = createLoggerAdapter(false);
|
|
106
|
+
const improvementConfig = {
|
|
107
|
+
contentType: 'commit message',
|
|
108
|
+
createImprovedPrompt: async (promptConfig, currentMessage, promptContext)=>{
|
|
109
|
+
var _aiConfig_commands_commit, _aiConfig_commands;
|
|
110
|
+
const improvementPromptContent = {
|
|
111
|
+
diffContent: diffContent,
|
|
112
|
+
userDirection: `Please improve this commit message based on the user's feedback: "${userFeedback}".
|
|
113
|
+
|
|
114
|
+
Current commit message: "${currentMessage}"
|
|
115
|
+
|
|
116
|
+
Please revise the commit message according to the user's feedback while maintaining accuracy and following conventional commit standards if appropriate.`
|
|
117
|
+
};
|
|
118
|
+
const prompt = await createCommitPrompt(promptConfig, improvementPromptContent, promptContext);
|
|
119
|
+
// Format the prompt into a proper request with messages
|
|
120
|
+
const modelToUse = ((_aiConfig_commands = aiConfig.commands) === null || _aiConfig_commands === void 0 ? void 0 : (_aiConfig_commands_commit = _aiConfig_commands.commit) === null || _aiConfig_commands_commit === void 0 ? void 0 : _aiConfig_commands_commit.model) || aiConfig.model || 'gpt-4o-mini';
|
|
121
|
+
return Formatter.create({
|
|
122
|
+
logger: getDryRunLogger(false)
|
|
123
|
+
}).formatPrompt(modelToUse, prompt);
|
|
124
|
+
},
|
|
125
|
+
callLLM: async (request, runConfig, outputDirectory)=>{
|
|
126
|
+
var _aiConfig_commands_commit, _aiConfig_commands, _aiConfig_commands_commit1, _aiConfig_commands1;
|
|
127
|
+
return await createCompletionWithRetry(request.messages, {
|
|
128
|
+
model: ((_aiConfig_commands = aiConfig.commands) === null || _aiConfig_commands === void 0 ? void 0 : (_aiConfig_commands_commit = _aiConfig_commands.commit) === null || _aiConfig_commands_commit === void 0 ? void 0 : _aiConfig_commands_commit.model) || aiConfig.model,
|
|
129
|
+
openaiReasoning: ((_aiConfig_commands1 = aiConfig.commands) === null || _aiConfig_commands1 === void 0 ? void 0 : (_aiConfig_commands_commit1 = _aiConfig_commands1.commit) === null || _aiConfig_commands_commit1 === void 0 ? void 0 : _aiConfig_commands_commit1.reasoning) || aiConfig.reasoning,
|
|
130
|
+
debug: runConfig.debug,
|
|
131
|
+
debugRequestFile: getOutputPath(outputDirectory, getTimestampedRequestFilename('commit-improve')),
|
|
132
|
+
debugResponseFile: getOutputPath(outputDirectory, getTimestampedResponseFilename('commit-improve')),
|
|
133
|
+
storage: aiStorageAdapter,
|
|
134
|
+
logger: aiLogger
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
};
|
|
138
|
+
return await improveContentWithLLM(commitMessage, runConfig, promptConfig, promptContext, outputDirectory, improvementConfig);
|
|
139
|
+
}
|
|
140
|
+
// Interactive feedback loop for commit message
|
|
141
|
+
async function handleInteractiveCommitFeedback(commitMessage, runConfig, promptConfig, promptContext, outputDirectory, storage, diffContent, hasActualChanges, cached) {
|
|
142
|
+
var _runConfig_commit, _runConfig_commit1;
|
|
143
|
+
const logger = getDryRunLogger(false);
|
|
144
|
+
let currentMessage = commitMessage;
|
|
145
|
+
// Determine what the confirm action will do based on configuration
|
|
146
|
+
const senditEnabled = (_runConfig_commit = runConfig.commit) === null || _runConfig_commit === void 0 ? void 0 : _runConfig_commit.sendit;
|
|
147
|
+
const willActuallyCommit = senditEnabled && hasActualChanges && cached;
|
|
148
|
+
// Create dynamic confirm choice based on configuration
|
|
149
|
+
const isAmendMode = (_runConfig_commit1 = runConfig.commit) === null || _runConfig_commit1 === void 0 ? void 0 : _runConfig_commit1.amend;
|
|
150
|
+
const confirmChoice = willActuallyCommit ? {
|
|
151
|
+
key: 'c',
|
|
152
|
+
label: isAmendMode ? 'Amend last commit with this message (sendit enabled)' : 'Commit changes with this message (sendit enabled)'
|
|
153
|
+
} : {
|
|
154
|
+
key: 'c',
|
|
155
|
+
label: 'Accept message (you will need to commit manually)'
|
|
156
|
+
};
|
|
157
|
+
while(true){
|
|
158
|
+
// Display the current commit message
|
|
159
|
+
logger.info('\nš Generated Commit Message:');
|
|
160
|
+
logger.info('ā'.repeat(50));
|
|
161
|
+
logger.info(currentMessage);
|
|
162
|
+
logger.info('ā'.repeat(50));
|
|
163
|
+
// Show configuration status
|
|
164
|
+
if (senditEnabled) {
|
|
165
|
+
if (willActuallyCommit) {
|
|
166
|
+
logger.info('\nSENDIT_MODE_ACTIVE: SendIt mode enabled | Action: Commit choice will execute git commit automatically | Staged Changes: Available');
|
|
167
|
+
} else {
|
|
168
|
+
logger.info('\nSENDIT_MODE_NO_CHANGES: SendIt mode configured but no staged changes | Action: Only message save available | Staged Changes: None');
|
|
169
|
+
}
|
|
170
|
+
} else {
|
|
171
|
+
logger.info('\nSENDIT_MODE_INACTIVE: SendIt mode not active | Action: Accept choice will only save message | Commit: Manual');
|
|
172
|
+
}
|
|
173
|
+
// Get user choice
|
|
174
|
+
const userChoice = await getUserChoice('\nWhat would you like to do with this commit message?', [
|
|
175
|
+
confirmChoice,
|
|
176
|
+
STANDARD_CHOICES.EDIT,
|
|
177
|
+
STANDARD_CHOICES.SKIP,
|
|
178
|
+
STANDARD_CHOICES.IMPROVE
|
|
179
|
+
], {
|
|
180
|
+
nonTtyErrorSuggestions: [
|
|
181
|
+
'Use --sendit flag to auto-commit without review'
|
|
182
|
+
]
|
|
183
|
+
});
|
|
184
|
+
switch(userChoice){
|
|
185
|
+
case 'c':
|
|
186
|
+
return {
|
|
187
|
+
action: 'commit',
|
|
188
|
+
finalMessage: currentMessage
|
|
189
|
+
};
|
|
190
|
+
case 'e':
|
|
191
|
+
try {
|
|
192
|
+
currentMessage = await editCommitMessageInteractively(currentMessage);
|
|
193
|
+
} catch (error) {
|
|
194
|
+
logger.error(`Failed to edit commit message: ${error.message}`);
|
|
195
|
+
// Continue the loop to show options again
|
|
196
|
+
}
|
|
197
|
+
break;
|
|
198
|
+
case 's':
|
|
199
|
+
return {
|
|
200
|
+
action: 'skip',
|
|
201
|
+
finalMessage: currentMessage
|
|
202
|
+
};
|
|
203
|
+
case 'i':
|
|
204
|
+
try {
|
|
205
|
+
currentMessage = await improveCommitMessageWithLLM(currentMessage, runConfig, promptConfig, promptContext, outputDirectory, diffContent);
|
|
206
|
+
} catch (error) {
|
|
207
|
+
logger.error(`Failed to improve commit message: ${error.message}`);
|
|
208
|
+
// Continue the loop to show options again
|
|
209
|
+
}
|
|
210
|
+
break;
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
// Helper function to check if there are any commits in the repository
|
|
215
|
+
const hasCommits = async ()=>{
|
|
216
|
+
try {
|
|
217
|
+
await run('git rev-parse HEAD');
|
|
218
|
+
return true;
|
|
219
|
+
} catch {
|
|
220
|
+
// No commits found or not a git repository
|
|
221
|
+
return false;
|
|
222
|
+
}
|
|
223
|
+
};
|
|
224
|
+
// Helper function to push the commit
|
|
225
|
+
const pushCommit = async (pushConfig, logger, isDryRun)=>{
|
|
226
|
+
if (!pushConfig) {
|
|
227
|
+
return; // No push requested
|
|
228
|
+
}
|
|
229
|
+
// Determine the remote to push to
|
|
230
|
+
let remote = 'origin';
|
|
231
|
+
if (typeof pushConfig === 'string') {
|
|
232
|
+
remote = pushConfig;
|
|
233
|
+
}
|
|
234
|
+
const pushCommand = `git push ${remote}`;
|
|
235
|
+
if (isDryRun) {
|
|
236
|
+
logger.info('Would push to %s with: %s', remote, pushCommand);
|
|
237
|
+
} else {
|
|
238
|
+
logger.info('š Pushing to %s...', remote);
|
|
239
|
+
try {
|
|
240
|
+
await run(pushCommand);
|
|
241
|
+
logger.info('ā
Push successful!');
|
|
242
|
+
} catch (error) {
|
|
243
|
+
logger.error('Failed to push to %s: %s', remote, error.message);
|
|
244
|
+
throw new ExternalDependencyError(`Failed to push to ${remote}`, 'git', error);
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
};
|
|
248
|
+
// Simplified cached determination with single check
|
|
249
|
+
const determineCachedState = async (config)=>{
|
|
250
|
+
var _config_commit, _config_commit1, _config_commit2;
|
|
251
|
+
// If amend is used, we use staged changes (since we're amending the last commit)
|
|
252
|
+
if ((_config_commit = config.commit) === null || _config_commit === void 0 ? void 0 : _config_commit.amend) {
|
|
253
|
+
// For amend mode, check that there's a previous commit to amend
|
|
254
|
+
const hasAnyCommits = await hasCommits();
|
|
255
|
+
if (!hasAnyCommits) {
|
|
256
|
+
throw new ValidationError('Cannot use --amend: no commits found in repository. Create an initial commit first.');
|
|
257
|
+
}
|
|
258
|
+
return true;
|
|
259
|
+
}
|
|
260
|
+
// If add is used, we always look at staged changes after add
|
|
261
|
+
if ((_config_commit1 = config.commit) === null || _config_commit1 === void 0 ? void 0 : _config_commit1.add) {
|
|
262
|
+
return true;
|
|
263
|
+
}
|
|
264
|
+
// If explicitly set, use that value
|
|
265
|
+
if (((_config_commit2 = config.commit) === null || _config_commit2 === void 0 ? void 0 : _config_commit2.cached) !== undefined) {
|
|
266
|
+
return config.commit.cached;
|
|
267
|
+
}
|
|
268
|
+
// Otherwise, check if there are staged changes
|
|
269
|
+
return await Diff.hasStagedChanges();
|
|
270
|
+
};
|
|
271
|
+
// Single validation of sendit + cached state
|
|
272
|
+
const validateSenditState = (config, cached, isDryRun, logger)=>{
|
|
273
|
+
var _config_commit;
|
|
274
|
+
if (((_config_commit = config.commit) === null || _config_commit === void 0 ? void 0 : _config_commit.sendit) && !cached && !isDryRun) {
|
|
275
|
+
const message = 'SendIt mode enabled, but no changes to commit.';
|
|
276
|
+
logger.warn(message);
|
|
277
|
+
return false; // Return false to indicate no changes to commit
|
|
278
|
+
}
|
|
279
|
+
return true; // Return true to indicate we can proceed
|
|
280
|
+
};
|
|
281
|
+
// Better file save handling with fallbacks
|
|
282
|
+
const saveCommitMessage = async (outputDirectory, summary, storage, logger)=>{
|
|
283
|
+
const timestampedFilename = getTimestampedCommitFilename();
|
|
284
|
+
const primaryPath = getOutputPath(outputDirectory, timestampedFilename);
|
|
285
|
+
try {
|
|
286
|
+
await storage.writeFile(primaryPath, summary, 'utf-8');
|
|
287
|
+
logger.debug('Saved timestamped commit message: %s', primaryPath);
|
|
288
|
+
return; // Success, no fallback needed
|
|
289
|
+
} catch (error) {
|
|
290
|
+
logger.warn('Failed to save commit message to primary location (%s): %s', primaryPath, error.message);
|
|
291
|
+
logger.debug('Primary save error details:', error);
|
|
292
|
+
// First fallback: try output directory root (in case subdirectory has issues)
|
|
293
|
+
try {
|
|
294
|
+
const outputRootPath = getOutputPath('output', timestampedFilename);
|
|
295
|
+
await storage.writeFile(outputRootPath, summary, 'utf-8');
|
|
296
|
+
logger.info('COMMIT_MESSAGE_SAVED_FALLBACK: Saved commit message to fallback location | Path: %s | Purpose: Preserve message for later use', outputRootPath);
|
|
297
|
+
return;
|
|
298
|
+
} catch (outputError) {
|
|
299
|
+
logger.warn('Failed to save to output directory fallback: %s', outputError.message);
|
|
300
|
+
}
|
|
301
|
+
// Last resort fallback: save to current directory (this creates the clutter!)
|
|
302
|
+
try {
|
|
303
|
+
const fallbackPath = `commit-message-${Date.now()}.txt`;
|
|
304
|
+
await storage.writeFile(fallbackPath, summary, 'utf-8');
|
|
305
|
+
logger.warn('ā ļø Saved commit message to current directory as last resort: %s', fallbackPath);
|
|
306
|
+
logger.warn('ā ļø This file should be moved to the output directory and may clutter your workspace');
|
|
307
|
+
} catch (fallbackError) {
|
|
308
|
+
logger.error('Failed to save commit message anywhere: %s', fallbackError.message);
|
|
309
|
+
logger.error('Commit message will only be available in console output');
|
|
310
|
+
// Continue execution - commit message is still returned
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
};
|
|
314
|
+
/**
|
|
315
|
+
* Interactive review of a single split before committing
|
|
316
|
+
*/ async function reviewSplitInteractively(split, index, total, logger) {
|
|
317
|
+
logger.info('');
|
|
318
|
+
logger.info('ā'.repeat(80));
|
|
319
|
+
logger.info(`š Commit ${index + 1} of ${total}`);
|
|
320
|
+
logger.info('ā'.repeat(80));
|
|
321
|
+
logger.info('');
|
|
322
|
+
logger.info('Files (%d):', split.files.length);
|
|
323
|
+
split.files.forEach((f)=>logger.info(` - ${f}`));
|
|
324
|
+
logger.info('');
|
|
325
|
+
logger.info('Rationale:');
|
|
326
|
+
logger.info(` ${split.rationale}`);
|
|
327
|
+
logger.info('');
|
|
328
|
+
logger.info('Proposed Message:');
|
|
329
|
+
logger.info('ā'.repeat(50));
|
|
330
|
+
logger.info(split.message);
|
|
331
|
+
logger.info('ā'.repeat(50));
|
|
332
|
+
logger.info('');
|
|
333
|
+
const choices = [
|
|
334
|
+
{
|
|
335
|
+
key: 'c',
|
|
336
|
+
label: 'Commit with this message'
|
|
337
|
+
},
|
|
338
|
+
{
|
|
339
|
+
key: 'e',
|
|
340
|
+
label: 'Edit message before committing'
|
|
341
|
+
},
|
|
342
|
+
{
|
|
343
|
+
key: 's',
|
|
344
|
+
label: 'Skip this commit'
|
|
345
|
+
},
|
|
346
|
+
{
|
|
347
|
+
key: 't',
|
|
348
|
+
label: 'Stop - no more commits'
|
|
349
|
+
}
|
|
350
|
+
];
|
|
351
|
+
const choice = await getUserChoice('What would you like to do?', choices, {
|
|
352
|
+
nonTtyErrorSuggestions: [
|
|
353
|
+
'Use --sendit to auto-commit without review'
|
|
354
|
+
]
|
|
355
|
+
});
|
|
356
|
+
if (choice === 'e') {
|
|
357
|
+
// Edit the message
|
|
358
|
+
const edited = await editCommitMessageInteractively(split.message);
|
|
359
|
+
return {
|
|
360
|
+
action: 'commit',
|
|
361
|
+
modifiedMessage: edited
|
|
362
|
+
};
|
|
363
|
+
} else if (choice === 'c') {
|
|
364
|
+
return {
|
|
365
|
+
action: 'commit'
|
|
366
|
+
};
|
|
367
|
+
} else if (choice === 's') {
|
|
368
|
+
return {
|
|
369
|
+
action: 'skip'
|
|
370
|
+
};
|
|
371
|
+
} else {
|
|
372
|
+
return {
|
|
373
|
+
action: 'stop'
|
|
374
|
+
};
|
|
375
|
+
}
|
|
376
|
+
}
|
|
377
|
+
/**
|
|
378
|
+
* Create a single commit from a split
|
|
379
|
+
*/ async function createSingleSplitCommit(split, commitMessage, isDryRun, logger) {
|
|
380
|
+
// Stage the files for this split
|
|
381
|
+
if (isDryRun) {
|
|
382
|
+
logger.debug(`[DRY RUN] Would stage: ${split.files.join(', ')}`);
|
|
383
|
+
} else {
|
|
384
|
+
await stageFiles(split.files);
|
|
385
|
+
// Verify files were staged correctly
|
|
386
|
+
const verification = await verifyStagedFiles(split.files);
|
|
387
|
+
if (!verification.allPresent) {
|
|
388
|
+
throw new ValidationError(`Stage verification failed. Missing: ${verification.missing.join(', ')}. ` + `Unexpected: ${verification.unexpected.join(', ')}`);
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
// Create the commit
|
|
392
|
+
if (isDryRun) {
|
|
393
|
+
logger.debug(`[DRY RUN] Would commit with message: ${commitMessage}`);
|
|
394
|
+
return undefined;
|
|
395
|
+
} else {
|
|
396
|
+
const validatedMessage = validateString(commitMessage, 'commit message');
|
|
397
|
+
const escapedMessage = shellescape([
|
|
398
|
+
validatedMessage
|
|
399
|
+
]);
|
|
400
|
+
await run(`git commit -m ${escapedMessage}`);
|
|
401
|
+
// Get the SHA of the commit we just created
|
|
402
|
+
const result = await run('git rev-parse HEAD');
|
|
403
|
+
const sha = (typeof result === 'string' ? result : result.stdout).trim();
|
|
404
|
+
logger.debug(`Created commit: ${sha}`);
|
|
405
|
+
return sha;
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
/**
|
|
409
|
+
* Execute a series of split commits
|
|
410
|
+
*/ async function executeSplitCommits(options) {
|
|
411
|
+
const { splits, isDryRun, interactive, logger} = options;
|
|
412
|
+
const result = {
|
|
413
|
+
success: false,
|
|
414
|
+
commitsCreated: 0,
|
|
415
|
+
commits: [],
|
|
416
|
+
skipped: 0
|
|
417
|
+
};
|
|
418
|
+
try {
|
|
419
|
+
logger.debug('Preparing to create split commits...');
|
|
420
|
+
logger.info('');
|
|
421
|
+
logger.info('ā'.repeat(80));
|
|
422
|
+
logger.info(`š Creating ${splits.length} commits from staged changes`);
|
|
423
|
+
logger.info('ā'.repeat(80));
|
|
424
|
+
// Process each split
|
|
425
|
+
for(let i = 0; i < splits.length; i++){
|
|
426
|
+
const split = splits[i];
|
|
427
|
+
logger.info('');
|
|
428
|
+
logger.info(`Processing commit ${i + 1} of ${splits.length}...`);
|
|
429
|
+
// Interactive review if enabled
|
|
430
|
+
let commitMessage = split.message;
|
|
431
|
+
if (interactive && !isDryRun) {
|
|
432
|
+
const review = await reviewSplitInteractively(split, i, splits.length, logger);
|
|
433
|
+
if (review.action === 'stop') {
|
|
434
|
+
logger.info('User stopped split commit process');
|
|
435
|
+
logger.info(`Created ${result.commitsCreated} commits before stopping`);
|
|
436
|
+
result.success = false;
|
|
437
|
+
return result;
|
|
438
|
+
} else if (review.action === 'skip') {
|
|
439
|
+
logger.info(`Skipped commit ${i + 1}`);
|
|
440
|
+
result.skipped++;
|
|
441
|
+
continue;
|
|
442
|
+
} else if (review.action === 'edit') {
|
|
443
|
+
commitMessage = review.modifiedMessage;
|
|
444
|
+
}
|
|
445
|
+
}
|
|
446
|
+
try {
|
|
447
|
+
// Unstage everything first
|
|
448
|
+
if (!isDryRun) {
|
|
449
|
+
await unstageAll();
|
|
450
|
+
}
|
|
451
|
+
// Create this split's commit
|
|
452
|
+
const sha = await createSingleSplitCommit(split, commitMessage, isDryRun, logger);
|
|
453
|
+
result.commits.push({
|
|
454
|
+
message: commitMessage,
|
|
455
|
+
files: split.files,
|
|
456
|
+
sha
|
|
457
|
+
});
|
|
458
|
+
result.commitsCreated++;
|
|
459
|
+
if (isDryRun) {
|
|
460
|
+
logger.info(`[DRY RUN] Would create commit ${i + 1}: ${commitMessage.split('\n')[0]}`);
|
|
461
|
+
} else {
|
|
462
|
+
logger.info(`ā
Created commit ${i + 1}: ${sha === null || sha === void 0 ? void 0 : sha.substring(0, 7)} - ${commitMessage.split('\n')[0]}`);
|
|
463
|
+
}
|
|
464
|
+
} catch (error) {
|
|
465
|
+
logger.error(`Failed to create commit ${i + 1}: ${error.message}`);
|
|
466
|
+
logger.info(`Successfully created ${result.commitsCreated} commits before error`);
|
|
467
|
+
// Re-stage remaining files for user
|
|
468
|
+
if (!isDryRun) {
|
|
469
|
+
const remainingFiles = splits.slice(i).flatMap((s)=>s.files);
|
|
470
|
+
try {
|
|
471
|
+
await stageFiles(remainingFiles);
|
|
472
|
+
logger.info(`Remaining ${remainingFiles.length} files are staged for manual commit`);
|
|
473
|
+
} catch (restageError) {
|
|
474
|
+
logger.error(`Failed to re-stage remaining files: ${restageError.message}`);
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
result.success = false;
|
|
478
|
+
result.error = error;
|
|
479
|
+
return result;
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
result.success = true;
|
|
483
|
+
return result;
|
|
484
|
+
} catch (error) {
|
|
485
|
+
logger.error(`Split commit process failed: ${error.message}`);
|
|
486
|
+
result.success = false;
|
|
487
|
+
result.error = error;
|
|
488
|
+
return result;
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
/**
|
|
492
|
+
* Format a summary message for split commits
|
|
493
|
+
*/ function formatSplitCommitSummary(result) {
|
|
494
|
+
const lines = [];
|
|
495
|
+
lines.push('');
|
|
496
|
+
lines.push('ā'.repeat(80));
|
|
497
|
+
lines.push('ā
COMMIT SPLITTING COMPLETE');
|
|
498
|
+
lines.push('ā'.repeat(80));
|
|
499
|
+
lines.push('');
|
|
500
|
+
lines.push(`Total commits created: ${result.commitsCreated}`);
|
|
501
|
+
if (result.skipped > 0) {
|
|
502
|
+
lines.push(`Commits skipped: ${result.skipped}`);
|
|
503
|
+
}
|
|
504
|
+
lines.push('');
|
|
505
|
+
if (result.commits.length > 0) {
|
|
506
|
+
lines.push('Commits:');
|
|
507
|
+
lines.push('');
|
|
508
|
+
result.commits.forEach((commit, idx)=>{
|
|
509
|
+
const sha = commit.sha ? `${commit.sha.substring(0, 7)} ` : '';
|
|
510
|
+
const firstLine = commit.message.split('\n')[0];
|
|
511
|
+
lines.push(` ${idx + 1}. ${sha}${firstLine}`);
|
|
512
|
+
lines.push(` Files: ${commit.files.length}`);
|
|
513
|
+
});
|
|
514
|
+
}
|
|
515
|
+
lines.push('');
|
|
516
|
+
lines.push('ā'.repeat(80));
|
|
517
|
+
return lines.join('\n');
|
|
518
|
+
}
|
|
519
|
+
const executeInternal$2 = async (runConfig)=>{
|
|
520
|
+
var _ref, _runConfig_excludedPatterns;
|
|
521
|
+
var _runConfig_commit, _runConfig_commit1, _runConfig_commit2, _runConfig_commit3, _runConfig_commit4, _runConfig_commit5, _runConfig_commit6, _aiConfig_commands_commit, _aiConfig_commands, _runConfig_commit7, _aiConfig_commands_commit1, _aiConfig_commands1, _runConfig_commit8, _runConfig_commit9, _runConfig_commit10, _runConfig_commit11, _runConfig_commit12, _runConfig_commit13, _runConfig_commit14;
|
|
522
|
+
const isDryRun = runConfig.dryRun || false;
|
|
523
|
+
const logger = getDryRunLogger(isDryRun);
|
|
524
|
+
// Track if user explicitly chose to skip in interactive mode
|
|
525
|
+
let userSkippedCommit = false;
|
|
526
|
+
if ((_runConfig_commit = runConfig.commit) === null || _runConfig_commit === void 0 ? void 0 : _runConfig_commit.add) {
|
|
527
|
+
if (isDryRun) {
|
|
528
|
+
logger.info('GIT_ADD_DRY_RUN: Would stage all changes | Mode: dry-run | Command: git add -A');
|
|
529
|
+
} else {
|
|
530
|
+
logger.info('GIT_ADD_STAGING: Adding all changes to index | Command: git add -A | Scope: all files | Purpose: Stage for commit');
|
|
531
|
+
await run('git add -A');
|
|
532
|
+
logger.info('GIT_ADD_SUCCESS: Successfully staged all changes | Command: git add -A | Status: completed');
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
// Determine cached state with single, clear logic
|
|
536
|
+
const cached = await determineCachedState(runConfig);
|
|
537
|
+
// Validate sendit state early - now returns boolean instead of throwing
|
|
538
|
+
validateSenditState(runConfig, cached, isDryRun, logger);
|
|
539
|
+
let diffContent = '';
|
|
540
|
+
const maxDiffBytes = (_ref = (_runConfig_commit1 = runConfig.commit) === null || _runConfig_commit1 === void 0 ? void 0 : _runConfig_commit1.maxDiffBytes) !== null && _ref !== void 0 ? _ref : DEFAULT_MAX_DIFF_BYTES;
|
|
541
|
+
const options = {
|
|
542
|
+
cached,
|
|
543
|
+
excludedPatterns: (_runConfig_excludedPatterns = runConfig.excludedPatterns) !== null && _runConfig_excludedPatterns !== void 0 ? _runConfig_excludedPatterns : DEFAULT_EXCLUDED_PATTERNS,
|
|
544
|
+
maxDiffBytes
|
|
545
|
+
};
|
|
546
|
+
const diff = await Diff.create(options);
|
|
547
|
+
diffContent = await diff.get();
|
|
548
|
+
// Check if there are actually any changes in the diff
|
|
549
|
+
let hasActualChanges = diffContent.trim().length > 0;
|
|
550
|
+
// If no changes found with current patterns, check for critical excluded files
|
|
551
|
+
if (!hasActualChanges) {
|
|
552
|
+
const criticalChanges = await Diff.hasCriticalExcludedChanges();
|
|
553
|
+
if (criticalChanges.hasChanges) {
|
|
554
|
+
var _runConfig_commit15;
|
|
555
|
+
logger.info('CRITICAL_FILES_DETECTED: No changes with exclusion patterns, but critical files modified | Files: %s | Action: May need to include critical files', criticalChanges.files.join(', '));
|
|
556
|
+
if (((_runConfig_commit15 = runConfig.commit) === null || _runConfig_commit15 === void 0 ? void 0 : _runConfig_commit15.sendit) && !isDryRun) {
|
|
557
|
+
var _runConfig_excludedPatterns1;
|
|
558
|
+
// In sendit mode, automatically include critical files
|
|
559
|
+
logger.info('SENDIT_INCLUDING_CRITICAL: SendIt mode including critical files in diff | Purpose: Ensure all important changes are captured');
|
|
560
|
+
const minimalPatterns = Diff.getMinimalExcludedPatterns((_runConfig_excludedPatterns1 = runConfig.excludedPatterns) !== null && _runConfig_excludedPatterns1 !== void 0 ? _runConfig_excludedPatterns1 : DEFAULT_EXCLUDED_PATTERNS);
|
|
561
|
+
const updatedOptions = {
|
|
562
|
+
...options,
|
|
563
|
+
excludedPatterns: minimalPatterns
|
|
564
|
+
};
|
|
565
|
+
const updatedDiff = await Diff.create(updatedOptions);
|
|
566
|
+
diffContent = await updatedDiff.get();
|
|
567
|
+
if (diffContent.trim().length > 0) {
|
|
568
|
+
logger.info('CRITICAL_FILES_INCLUDED: Successfully added critical files to diff | Status: ready for commit message generation');
|
|
569
|
+
// Update hasActualChanges since we now have content after including critical files
|
|
570
|
+
hasActualChanges = true;
|
|
571
|
+
} else {
|
|
572
|
+
logger.warn('No changes detected even after including critical files.');
|
|
573
|
+
return 'No changes to commit.';
|
|
574
|
+
}
|
|
575
|
+
} else {
|
|
576
|
+
var _runConfig_excludedPatterns2;
|
|
577
|
+
// In non-sendit mode, suggest including the files
|
|
578
|
+
logger.warn('Consider including these files by using:');
|
|
579
|
+
logger.warn(' kodrdriv commit --excluded-paths %s', ((_runConfig_excludedPatterns2 = runConfig.excludedPatterns) !== null && _runConfig_excludedPatterns2 !== void 0 ? _runConfig_excludedPatterns2 : DEFAULT_EXCLUDED_PATTERNS).filter((p)=>!criticalChanges.files.some((f)=>p.includes(f.split('/').pop() || ''))).map((p)=>`"${p}"`).join(' '));
|
|
580
|
+
logger.warn('Or run with --sendit to automatically include critical files.');
|
|
581
|
+
if (!isDryRun) {
|
|
582
|
+
return 'No changes to commit. Use suggestions above to include critical files.';
|
|
583
|
+
} else {
|
|
584
|
+
logger.info('Generating commit message template for future use...');
|
|
585
|
+
}
|
|
586
|
+
}
|
|
587
|
+
} else {
|
|
588
|
+
var _runConfig_commit16;
|
|
589
|
+
// No changes at all - try fallback to file content for new repositories
|
|
590
|
+
logger.info('NO_CHANGES_DETECTED: No changes found in working directory | Status: clean | Action: Nothing to commit');
|
|
591
|
+
if (((_runConfig_commit16 = runConfig.commit) === null || _runConfig_commit16 === void 0 ? void 0 : _runConfig_commit16.sendit) && !isDryRun) {
|
|
592
|
+
logger.warn('No changes detected to commit. Skipping commit operation.');
|
|
593
|
+
return 'No changes to commit.';
|
|
594
|
+
} else {
|
|
595
|
+
var _runConfig_excludedPatterns3;
|
|
596
|
+
logger.info('NO_DIFF_FALLBACK: No diff content available | Action: Attempting to generate commit message from file content | Strategy: fallback');
|
|
597
|
+
// Create file content collector as fallback
|
|
598
|
+
const fileOptions = {
|
|
599
|
+
excludedPatterns: (_runConfig_excludedPatterns3 = runConfig.excludedPatterns) !== null && _runConfig_excludedPatterns3 !== void 0 ? _runConfig_excludedPatterns3 : DEFAULT_EXCLUDED_PATTERNS,
|
|
600
|
+
maxTotalBytes: maxDiffBytes * 5,
|
|
601
|
+
workingDirectory: process.cwd()
|
|
602
|
+
};
|
|
603
|
+
const files = await Files.create(fileOptions);
|
|
604
|
+
const fileContent = await files.get();
|
|
605
|
+
if (fileContent && fileContent.trim().length > 0) {
|
|
606
|
+
logger.info('FILE_CONTENT_USING: Using file content for commit message generation | Content Length: %d characters | Source: file content', fileContent.length);
|
|
607
|
+
diffContent = fileContent;
|
|
608
|
+
hasActualChanges = true; // We have content to work with
|
|
609
|
+
} else {
|
|
610
|
+
var _runConfig_commit17;
|
|
611
|
+
if ((_runConfig_commit17 = runConfig.commit) === null || _runConfig_commit17 === void 0 ? void 0 : _runConfig_commit17.sendit) {
|
|
612
|
+
logger.info('COMMIT_SKIPPED: Skipping commit operation | Reason: No changes detected | Action: None');
|
|
613
|
+
return 'No changes to commit.';
|
|
614
|
+
} else {
|
|
615
|
+
logger.info('COMMIT_TEMPLATE_GENERATING: Creating commit message template for future use | Reason: No changes | Purpose: Provide template');
|
|
616
|
+
}
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
}
|
|
620
|
+
}
|
|
621
|
+
const logOptions = {
|
|
622
|
+
limit: (_runConfig_commit2 = runConfig.commit) === null || _runConfig_commit2 === void 0 ? void 0 : _runConfig_commit2.messageLimit
|
|
623
|
+
};
|
|
624
|
+
const log = await Log.create(logOptions);
|
|
625
|
+
const logContext = await log.get();
|
|
626
|
+
// Always ensure output directory exists for request/response files and GitHub issues lookup
|
|
627
|
+
const outputDirectory = runConfig.outputDirectory || DEFAULT_OUTPUT_DIRECTORY;
|
|
628
|
+
const storage = createStorage();
|
|
629
|
+
await storage.ensureDirectory(outputDirectory);
|
|
630
|
+
// Get GitHub issues context for large commits [[memory:5887795]]
|
|
631
|
+
let githubIssuesContext = '';
|
|
632
|
+
try {
|
|
633
|
+
const currentVersion = await getCurrentVersion(storage);
|
|
634
|
+
if (currentVersion) {
|
|
635
|
+
logger.debug(`Found current version: ${currentVersion}, fetching related GitHub issues...`);
|
|
636
|
+
githubIssuesContext = await getRecentClosedIssuesForCommit(currentVersion, 10);
|
|
637
|
+
if (githubIssuesContext) {
|
|
638
|
+
logger.debug(`Fetched GitHub issues context (${githubIssuesContext.length} characters)`);
|
|
639
|
+
} else {
|
|
640
|
+
logger.debug('No relevant GitHub issues found for commit context');
|
|
641
|
+
}
|
|
642
|
+
} else {
|
|
643
|
+
logger.debug('Could not determine current version, fetching recent issues without milestone filtering...');
|
|
644
|
+
githubIssuesContext = await getRecentClosedIssuesForCommit(undefined, 10);
|
|
645
|
+
if (githubIssuesContext) {
|
|
646
|
+
logger.debug(`Fetched general GitHub issues context (${githubIssuesContext.length} characters)`);
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
} catch (error) {
|
|
650
|
+
logger.debug(`Failed to fetch GitHub issues for commit context: ${error.message}`);
|
|
651
|
+
// Continue without GitHub context - this shouldn't block commit generation
|
|
652
|
+
}
|
|
653
|
+
const promptConfig = {
|
|
654
|
+
overridePaths: runConfig.discoveredConfigDirs || [],
|
|
655
|
+
overrides: runConfig.overrides || false
|
|
656
|
+
};
|
|
657
|
+
const userDirection = sanitizeDirection((_runConfig_commit3 = runConfig.commit) === null || _runConfig_commit3 === void 0 ? void 0 : _runConfig_commit3.direction);
|
|
658
|
+
if (userDirection) {
|
|
659
|
+
logger.debug('Using user direction: %s', userDirection);
|
|
660
|
+
}
|
|
661
|
+
// Create adapters for ai-service
|
|
662
|
+
const aiConfig = toAIConfig(runConfig);
|
|
663
|
+
const aiStorageAdapter = createStorageAdapter(outputDirectory);
|
|
664
|
+
const aiLogger = createLoggerAdapter(isDryRun);
|
|
665
|
+
// Read context from files if provided
|
|
666
|
+
const contextFromFiles = await readContextFiles((_runConfig_commit4 = runConfig.commit) === null || _runConfig_commit4 === void 0 ? void 0 : _runConfig_commit4.contextFiles, logger);
|
|
667
|
+
// Combine file context with existing context
|
|
668
|
+
const combinedContext = [
|
|
669
|
+
(_runConfig_commit5 = runConfig.commit) === null || _runConfig_commit5 === void 0 ? void 0 : _runConfig_commit5.context,
|
|
670
|
+
contextFromFiles
|
|
671
|
+
].filter(Boolean).join('\n\n---\n\n');
|
|
672
|
+
// Define promptContext for use in interactive improvements
|
|
673
|
+
const promptContext = {
|
|
674
|
+
logContext,
|
|
675
|
+
context: combinedContext || undefined,
|
|
676
|
+
directories: runConfig.contextDirectories
|
|
677
|
+
};
|
|
678
|
+
// Announce self-reflection if enabled
|
|
679
|
+
if ((_runConfig_commit6 = runConfig.commit) === null || _runConfig_commit6 === void 0 ? void 0 : _runConfig_commit6.selfReflection) {
|
|
680
|
+
logger.info('š Self-reflection enabled - detailed analysis will be generated');
|
|
681
|
+
}
|
|
682
|
+
// Get list of changed files
|
|
683
|
+
const changedFilesResult = await run(`git diff --name-only ${cached ? '--cached' : ''}`);
|
|
684
|
+
const changedFilesOutput = typeof changedFilesResult === 'string' ? changedFilesResult : changedFilesResult.stdout;
|
|
685
|
+
const changedFiles = changedFilesOutput.split('\n').filter((f)=>f.trim().length > 0);
|
|
686
|
+
logger.debug('Changed files for analysis: %d files', changedFiles.length);
|
|
687
|
+
// Run agentic commit generation
|
|
688
|
+
const agenticResult = await runAgenticCommit({
|
|
689
|
+
changedFiles,
|
|
690
|
+
diffContent,
|
|
691
|
+
userDirection,
|
|
692
|
+
logContext,
|
|
693
|
+
model: ((_aiConfig_commands = aiConfig.commands) === null || _aiConfig_commands === void 0 ? void 0 : (_aiConfig_commands_commit = _aiConfig_commands.commit) === null || _aiConfig_commands_commit === void 0 ? void 0 : _aiConfig_commands_commit.model) || aiConfig.model,
|
|
694
|
+
maxIterations: ((_runConfig_commit7 = runConfig.commit) === null || _runConfig_commit7 === void 0 ? void 0 : _runConfig_commit7.maxAgenticIterations) || 10,
|
|
695
|
+
debug: runConfig.debug,
|
|
696
|
+
debugRequestFile: getOutputPath(outputDirectory, getTimestampedRequestFilename('commit')),
|
|
697
|
+
debugResponseFile: getOutputPath(outputDirectory, getTimestampedResponseFilename('commit')),
|
|
698
|
+
storage: aiStorageAdapter,
|
|
699
|
+
logger: aiLogger,
|
|
700
|
+
openaiReasoning: ((_aiConfig_commands1 = aiConfig.commands) === null || _aiConfig_commands1 === void 0 ? void 0 : (_aiConfig_commands_commit1 = _aiConfig_commands1.commit) === null || _aiConfig_commands_commit1 === void 0 ? void 0 : _aiConfig_commands_commit1.reasoning) || aiConfig.reasoning
|
|
701
|
+
});
|
|
702
|
+
const iterations = agenticResult.iterations || 0;
|
|
703
|
+
const toolCalls = agenticResult.toolCallsExecuted || 0;
|
|
704
|
+
logger.info(`š Analysis complete: ${iterations} iterations, ${toolCalls} tool calls`);
|
|
705
|
+
// Generate self-reflection output if enabled
|
|
706
|
+
if ((_runConfig_commit8 = runConfig.commit) === null || _runConfig_commit8 === void 0 ? void 0 : _runConfig_commit8.selfReflection) {
|
|
707
|
+
await generateSelfReflection(agenticResult, outputDirectory, storage, logger);
|
|
708
|
+
}
|
|
709
|
+
// Check for suggested splits
|
|
710
|
+
if (agenticResult.suggestedSplits.length > 1 && ((_runConfig_commit9 = runConfig.commit) === null || _runConfig_commit9 === void 0 ? void 0 : _runConfig_commit9.allowCommitSplitting)) {
|
|
711
|
+
var _runConfig_commit18;
|
|
712
|
+
logger.info('\nš AI suggests splitting this into %d commits:', agenticResult.suggestedSplits.length);
|
|
713
|
+
for(let i = 0; i < agenticResult.suggestedSplits.length; i++){
|
|
714
|
+
const split = agenticResult.suggestedSplits[i];
|
|
715
|
+
logger.info('\nCommit %d (%d files):', i + 1, split.files.length);
|
|
716
|
+
logger.info(' Files: %s', split.files.join(', '));
|
|
717
|
+
logger.info(' Rationale: %s', split.rationale);
|
|
718
|
+
logger.info(' Message: %s', split.message);
|
|
719
|
+
}
|
|
720
|
+
// NEW: Check if auto-split is enabled (defaults to true if not specified)
|
|
721
|
+
const autoSplitEnabled = ((_runConfig_commit18 = runConfig.commit) === null || _runConfig_commit18 === void 0 ? void 0 : _runConfig_commit18.autoSplit) !== false; // Default to true
|
|
722
|
+
if (autoSplitEnabled) {
|
|
723
|
+
var _runConfig_commit19, _runConfig_commit20;
|
|
724
|
+
logger.info('\nš Auto-split enabled - creating separate commits...\n');
|
|
725
|
+
const splitResult = await executeSplitCommits({
|
|
726
|
+
splits: agenticResult.suggestedSplits,
|
|
727
|
+
isDryRun,
|
|
728
|
+
interactive: !!(((_runConfig_commit19 = runConfig.commit) === null || _runConfig_commit19 === void 0 ? void 0 : _runConfig_commit19.interactive) && !((_runConfig_commit20 = runConfig.commit) === null || _runConfig_commit20 === void 0 ? void 0 : _runConfig_commit20.sendit)),
|
|
729
|
+
logger});
|
|
730
|
+
if (splitResult.success) {
|
|
731
|
+
var _runConfig_commit21;
|
|
732
|
+
// Push if requested (all commits)
|
|
733
|
+
if (((_runConfig_commit21 = runConfig.commit) === null || _runConfig_commit21 === void 0 ? void 0 : _runConfig_commit21.push) && !isDryRun) {
|
|
734
|
+
await pushCommit(runConfig.commit.push, logger, isDryRun);
|
|
735
|
+
}
|
|
736
|
+
return formatSplitCommitSummary(splitResult);
|
|
737
|
+
} else {
|
|
738
|
+
var _splitResult_error;
|
|
739
|
+
const errorMessage = ((_splitResult_error = splitResult.error) === null || _splitResult_error === void 0 ? void 0 : _splitResult_error.message) || 'Unknown error';
|
|
740
|
+
throw new CommandError(`Failed to create split commits: ${errorMessage}`, 'SPLIT_COMMIT_FAILED', false, splitResult.error);
|
|
741
|
+
}
|
|
742
|
+
} else {
|
|
743
|
+
logger.info('\nā ļø Commit splitting is not automated. Please stage and commit files separately.');
|
|
744
|
+
logger.info('Using combined message for now...\n');
|
|
745
|
+
logger.info('š” To enable automatic splitting, add autoSplit: true to your commit configuration');
|
|
746
|
+
}
|
|
747
|
+
} else if (agenticResult.suggestedSplits.length > 1) {
|
|
748
|
+
logger.debug('AI suggested %d splits but commit splitting is not enabled', agenticResult.suggestedSplits.length);
|
|
749
|
+
}
|
|
750
|
+
const rawSummary = agenticResult.commitMessage;
|
|
751
|
+
// Apply stop-context filtering to commit message
|
|
752
|
+
const filterResult = filterContent(rawSummary, runConfig.stopContext);
|
|
753
|
+
const summary = filterResult.filtered;
|
|
754
|
+
// Save timestamped copy of commit message with better error handling
|
|
755
|
+
await saveCommitMessage(outputDirectory, summary, storage, logger);
|
|
756
|
+
// š”ļø Universal Safety Check: Run before ANY commit operation
|
|
757
|
+
// This protects both direct commits (--sendit) and automated commits (publish, etc.)
|
|
758
|
+
const willCreateCommit = ((_runConfig_commit10 = runConfig.commit) === null || _runConfig_commit10 === void 0 ? void 0 : _runConfig_commit10.sendit) && hasActualChanges && cached;
|
|
759
|
+
if (willCreateCommit && !((_runConfig_commit11 = runConfig.commit) === null || _runConfig_commit11 === void 0 ? void 0 : _runConfig_commit11.skipFileCheck) && !isDryRun) {
|
|
760
|
+
logger.debug('Checking for file: dependencies before commit operation...');
|
|
761
|
+
try {
|
|
762
|
+
const fileDependencyIssues = await checkForFileDependencies$1(storage, process.cwd());
|
|
763
|
+
if (fileDependencyIssues.length > 0) {
|
|
764
|
+
var _runConfig_commit22;
|
|
765
|
+
logger.error('š« COMMIT BLOCKED: Found file: dependencies that should not be committed!');
|
|
766
|
+
logger.error('');
|
|
767
|
+
logFileDependencyWarning(fileDependencyIssues, 'commit');
|
|
768
|
+
logFileDependencySuggestions(true);
|
|
769
|
+
logger.error('Generated commit message was:');
|
|
770
|
+
logger.error('%s', summary);
|
|
771
|
+
logger.error('');
|
|
772
|
+
if ((_runConfig_commit22 = runConfig.commit) === null || _runConfig_commit22 === void 0 ? void 0 : _runConfig_commit22.sendit) {
|
|
773
|
+
logger.error('To bypass this check, use: kodrdriv commit --skip-file-check --sendit');
|
|
774
|
+
} else {
|
|
775
|
+
logger.error('To bypass this check, add skipFileCheck: true to your commit configuration');
|
|
776
|
+
}
|
|
777
|
+
throw new ValidationError('Found file: dependencies that should not be committed. Use --skip-file-check to bypass.');
|
|
778
|
+
}
|
|
779
|
+
logger.debug('ā
No file: dependencies found, proceeding with commit');
|
|
780
|
+
} catch (error) {
|
|
781
|
+
logger.warn('Warning: Could not check for file: dependencies: %s', error.message);
|
|
782
|
+
logger.warn('Proceeding with commit...');
|
|
783
|
+
}
|
|
784
|
+
} else if (((_runConfig_commit12 = runConfig.commit) === null || _runConfig_commit12 === void 0 ? void 0 : _runConfig_commit12.skipFileCheck) && willCreateCommit) {
|
|
785
|
+
logger.warn('ā ļø Skipping file: dependency check as requested');
|
|
786
|
+
}
|
|
787
|
+
// Handle interactive mode
|
|
788
|
+
if (((_runConfig_commit13 = runConfig.commit) === null || _runConfig_commit13 === void 0 ? void 0 : _runConfig_commit13.interactive) && !isDryRun) {
|
|
789
|
+
var _runConfig_commit23;
|
|
790
|
+
requireTTY('Interactive mode requires a terminal. Use --sendit or --dry-run instead.');
|
|
791
|
+
const interactiveResult = await handleInteractiveCommitFeedback(summary, runConfig, promptConfig, promptContext, outputDirectory, storage, diffContent, hasActualChanges, cached);
|
|
792
|
+
if (interactiveResult.action === 'skip') {
|
|
793
|
+
logger.info('COMMIT_ABORTED: User aborted commit operation | Reason: User choice | Action: No commit performed');
|
|
794
|
+
logger.info('COMMIT_NO_ACTION: No commit will be performed | Status: aborted | Next: User can retry or modify changes');
|
|
795
|
+
userSkippedCommit = true;
|
|
796
|
+
return interactiveResult.finalMessage;
|
|
797
|
+
}
|
|
798
|
+
// User chose to commit - check if sendit is enabled to determine what action to take
|
|
799
|
+
const senditEnabled = (_runConfig_commit23 = runConfig.commit) === null || _runConfig_commit23 === void 0 ? void 0 : _runConfig_commit23.sendit;
|
|
800
|
+
const willActuallyCommit = senditEnabled && hasActualChanges && cached;
|
|
801
|
+
if (willActuallyCommit) {
|
|
802
|
+
var _runConfig_commit24;
|
|
803
|
+
const commitAction = ((_runConfig_commit24 = runConfig.commit) === null || _runConfig_commit24 === void 0 ? void 0 : _runConfig_commit24.amend) ? 'amending last commit' : 'committing';
|
|
804
|
+
logger.info('SENDIT_EXECUTING: SendIt enabled, executing commit action | Action: %s | Message Length: %d | Final Message: \n\n%s\n\n', commitAction.charAt(0).toUpperCase() + commitAction.slice(1), interactiveResult.finalMessage.length, interactiveResult.finalMessage);
|
|
805
|
+
try {
|
|
806
|
+
var _runConfig_commit25, _runConfig_commit26;
|
|
807
|
+
const validatedSummary = validateString(interactiveResult.finalMessage, 'commit summary');
|
|
808
|
+
const escapedSummary = shellescape([
|
|
809
|
+
validatedSummary
|
|
810
|
+
]);
|
|
811
|
+
const commitCommand = ((_runConfig_commit25 = runConfig.commit) === null || _runConfig_commit25 === void 0 ? void 0 : _runConfig_commit25.amend) ? `git commit --amend -m ${escapedSummary}` : `git commit -m ${escapedSummary}`;
|
|
812
|
+
await run(commitCommand);
|
|
813
|
+
logger.info('COMMIT_SUCCESS: Commit operation completed successfully | Status: committed | Action: Changes saved to repository');
|
|
814
|
+
// Push if requested
|
|
815
|
+
await pushCommit((_runConfig_commit26 = runConfig.commit) === null || _runConfig_commit26 === void 0 ? void 0 : _runConfig_commit26.push, logger, isDryRun);
|
|
816
|
+
} catch (error) {
|
|
817
|
+
logger.error('Failed to commit:', error);
|
|
818
|
+
throw new ExternalDependencyError('Failed to create commit', 'git', error);
|
|
819
|
+
}
|
|
820
|
+
} else if (senditEnabled && (!hasActualChanges || !cached)) {
|
|
821
|
+
logger.info('š SendIt enabled but no staged changes available. Final message saved: \n\n%s\n\n', interactiveResult.finalMessage);
|
|
822
|
+
if (!hasActualChanges) {
|
|
823
|
+
logger.info('š” No changes detected to commit');
|
|
824
|
+
} else if (!cached) {
|
|
825
|
+
logger.info('š” No staged changes found. Use "git add" to stage changes or configure add: true in commit settings');
|
|
826
|
+
}
|
|
827
|
+
} else {
|
|
828
|
+
logger.info('š Message accepted (SendIt not enabled). Use this commit message manually: \n\n%s\n\n', interactiveResult.finalMessage);
|
|
829
|
+
logger.info('š” To automatically commit, add sendit: true to your commit configuration');
|
|
830
|
+
}
|
|
831
|
+
return interactiveResult.finalMessage;
|
|
832
|
+
}
|
|
833
|
+
// Safety check: Never commit if user explicitly skipped in interactive mode
|
|
834
|
+
if (userSkippedCommit) {
|
|
835
|
+
logger.debug('Skipping sendit logic because user chose to skip in interactive mode');
|
|
836
|
+
return summary;
|
|
837
|
+
}
|
|
838
|
+
if ((_runConfig_commit14 = runConfig.commit) === null || _runConfig_commit14 === void 0 ? void 0 : _runConfig_commit14.sendit) {
|
|
839
|
+
if (isDryRun) {
|
|
840
|
+
var _runConfig_commit27, _runConfig_commit28;
|
|
841
|
+
logger.info('Would commit with message: \n\n%s\n\n', summary);
|
|
842
|
+
const commitAction = ((_runConfig_commit27 = runConfig.commit) === null || _runConfig_commit27 === void 0 ? void 0 : _runConfig_commit27.amend) ? 'git commit --amend -m <generated-message>' : 'git commit -m <generated-message>';
|
|
843
|
+
logger.info('Would execute: %s', commitAction);
|
|
844
|
+
// Show push command in dry run if requested
|
|
845
|
+
if ((_runConfig_commit28 = runConfig.commit) === null || _runConfig_commit28 === void 0 ? void 0 : _runConfig_commit28.push) {
|
|
846
|
+
const remote = typeof runConfig.commit.push === 'string' ? runConfig.commit.push : 'origin';
|
|
847
|
+
logger.info('Would push to %s with: git push %s', remote, remote);
|
|
848
|
+
}
|
|
849
|
+
} else if (hasActualChanges && cached) {
|
|
850
|
+
var _runConfig_commit29;
|
|
851
|
+
const commitAction = ((_runConfig_commit29 = runConfig.commit) === null || _runConfig_commit29 === void 0 ? void 0 : _runConfig_commit29.amend) ? 'amending commit' : 'committing';
|
|
852
|
+
logger.info('SendIt mode enabled. %s with message: \n\n%s\n\n', commitAction.charAt(0).toUpperCase() + commitAction.slice(1), summary);
|
|
853
|
+
try {
|
|
854
|
+
var _runConfig_commit30, _runConfig_commit31;
|
|
855
|
+
const validatedSummary = validateString(summary, 'commit summary');
|
|
856
|
+
const escapedSummary = shellescape([
|
|
857
|
+
validatedSummary
|
|
858
|
+
]);
|
|
859
|
+
const commitCommand = ((_runConfig_commit30 = runConfig.commit) === null || _runConfig_commit30 === void 0 ? void 0 : _runConfig_commit30.amend) ? `git commit --amend -m ${escapedSummary}` : `git commit -m ${escapedSummary}`;
|
|
860
|
+
await run(commitCommand);
|
|
861
|
+
logger.info('Commit successful!');
|
|
862
|
+
// Push if requested
|
|
863
|
+
await pushCommit((_runConfig_commit31 = runConfig.commit) === null || _runConfig_commit31 === void 0 ? void 0 : _runConfig_commit31.push, logger, isDryRun);
|
|
864
|
+
} catch (error) {
|
|
865
|
+
logger.error('Failed to commit:', error);
|
|
866
|
+
throw new ExternalDependencyError('Failed to create commit', 'git', error);
|
|
867
|
+
}
|
|
868
|
+
} else {
|
|
869
|
+
logger.info('SendIt mode enabled, but no changes to commit. Generated message: \n\n%s\n\n', summary);
|
|
870
|
+
}
|
|
871
|
+
} else if (isDryRun) {
|
|
872
|
+
logger.info('Generated commit message: \n\n%s\n\n', summary);
|
|
873
|
+
} else {
|
|
874
|
+
// Default behavior when neither --interactive nor --sendit is specified
|
|
875
|
+
logger.info('Generated commit message: \n\n%s\n\n', summary);
|
|
876
|
+
}
|
|
877
|
+
return summary;
|
|
878
|
+
};
|
|
879
|
+
const execute$3 = async (runConfig)=>{
|
|
880
|
+
try {
|
|
881
|
+
return await executeInternal$2(runConfig);
|
|
882
|
+
} catch (error) {
|
|
883
|
+
// Import getLogger for error handling
|
|
884
|
+
const { getLogger } = await import('@eldrforge/core');
|
|
885
|
+
const standardLogger = getLogger();
|
|
886
|
+
if (error instanceof ValidationError || error instanceof ExternalDependencyError || error instanceof CommandError) {
|
|
887
|
+
standardLogger.error(`commit failed: ${error.message}`);
|
|
888
|
+
if (error.cause && typeof error.cause === 'object' && 'message' in error.cause) {
|
|
889
|
+
standardLogger.debug(`Caused by: ${error.cause.message}`);
|
|
890
|
+
} else if (error.cause) {
|
|
891
|
+
standardLogger.debug(`Caused by: ${error.cause}`);
|
|
892
|
+
}
|
|
893
|
+
throw error;
|
|
894
|
+
}
|
|
895
|
+
// Unexpected errors
|
|
896
|
+
standardLogger.error(`commit encountered unexpected error: ${error.message}`);
|
|
897
|
+
throw error;
|
|
898
|
+
}
|
|
899
|
+
};
|
|
900
|
+
|
|
901
|
+
const logger = getLogger();
|
|
902
|
+
// Cache file to store test run timestamps per package
|
|
903
|
+
const TEST_CACHE_FILE = '.kodrdriv-test-cache.json';
|
|
904
|
+
/**
|
|
905
|
+
* Load test cache from disk
|
|
906
|
+
*/ async function loadTestCache(packageDir) {
|
|
907
|
+
const cachePath = path.join(packageDir, TEST_CACHE_FILE);
|
|
908
|
+
try {
|
|
909
|
+
const content = await fs.readFile(cachePath, 'utf-8');
|
|
910
|
+
return JSON.parse(content);
|
|
911
|
+
} catch {
|
|
912
|
+
return {};
|
|
913
|
+
}
|
|
914
|
+
}
|
|
915
|
+
/**
|
|
916
|
+
* Save test cache to disk
|
|
917
|
+
*/ async function saveTestCache(packageDir, cache) {
|
|
918
|
+
const cachePath = path.join(packageDir, TEST_CACHE_FILE);
|
|
919
|
+
try {
|
|
920
|
+
await fs.writeFile(cachePath, JSON.stringify(cache, null, 2), 'utf-8');
|
|
921
|
+
} catch (error) {
|
|
922
|
+
logger.debug(`Failed to save test cache: ${error.message}`);
|
|
923
|
+
}
|
|
924
|
+
}
|
|
925
|
+
/**
|
|
926
|
+
* Get the current git commit hash
|
|
927
|
+
*/ async function getCurrentCommitHash(packageDir) {
|
|
928
|
+
try {
|
|
929
|
+
const { stdout } = await runSecure('git', [
|
|
930
|
+
'rev-parse',
|
|
931
|
+
'HEAD'
|
|
932
|
+
], {
|
|
933
|
+
cwd: packageDir
|
|
934
|
+
});
|
|
935
|
+
return stdout.trim();
|
|
936
|
+
} catch {
|
|
937
|
+
return null;
|
|
938
|
+
}
|
|
939
|
+
}
|
|
940
|
+
/**
|
|
941
|
+
* Check if source files have changed since the last test run
|
|
942
|
+
*/ async function hasSourceFilesChanged(packageDir, lastCommitHash) {
|
|
943
|
+
if (!lastCommitHash) {
|
|
944
|
+
return {
|
|
945
|
+
changed: true,
|
|
946
|
+
reason: 'No previous test run recorded'
|
|
947
|
+
};
|
|
948
|
+
}
|
|
949
|
+
try {
|
|
950
|
+
// Get current commit hash
|
|
951
|
+
const currentCommitHash = await getCurrentCommitHash(packageDir);
|
|
952
|
+
if (!currentCommitHash) {
|
|
953
|
+
return {
|
|
954
|
+
changed: true,
|
|
955
|
+
reason: 'Not in a git repository'
|
|
956
|
+
};
|
|
957
|
+
}
|
|
958
|
+
// If commit hash changed, files definitely changed
|
|
959
|
+
if (currentCommitHash !== lastCommitHash) {
|
|
960
|
+
return {
|
|
961
|
+
changed: true,
|
|
962
|
+
reason: `Commit hash changed: ${lastCommitHash.substring(0, 7)} -> ${currentCommitHash.substring(0, 7)}`
|
|
963
|
+
};
|
|
964
|
+
}
|
|
965
|
+
// Check if there are any uncommitted changes to source files
|
|
966
|
+
const { stdout } = await runSecure('git', [
|
|
967
|
+
'status',
|
|
968
|
+
'--porcelain'
|
|
969
|
+
], {
|
|
970
|
+
cwd: packageDir
|
|
971
|
+
});
|
|
972
|
+
const changedFiles = stdout.split('\n').filter((line)=>line.trim()).map((line)=>line.substring(3).trim()).filter((file)=>{
|
|
973
|
+
// Only consider source files, not build artifacts or config files
|
|
974
|
+
const ext = path.extname(file);
|
|
975
|
+
return(// TypeScript/JavaScript source files
|
|
976
|
+
[
|
|
977
|
+
'.ts',
|
|
978
|
+
'.tsx',
|
|
979
|
+
'.js',
|
|
980
|
+
'.jsx'
|
|
981
|
+
].includes(ext) || // Test files
|
|
982
|
+
file.includes('.test.') || file.includes('.spec.') || // Config files that affect build/test
|
|
983
|
+
[
|
|
984
|
+
'tsconfig.json',
|
|
985
|
+
'vite.config.ts',
|
|
986
|
+
'vitest.config.ts',
|
|
987
|
+
'package.json'
|
|
988
|
+
].includes(path.basename(file)));
|
|
989
|
+
});
|
|
990
|
+
if (changedFiles.length > 0) {
|
|
991
|
+
return {
|
|
992
|
+
changed: true,
|
|
993
|
+
reason: `Uncommitted changes in: ${changedFiles.slice(0, 3).join(', ')}${changedFiles.length > 3 ? '...' : ''}`
|
|
994
|
+
};
|
|
995
|
+
}
|
|
996
|
+
return {
|
|
997
|
+
changed: false,
|
|
998
|
+
reason: 'No source file changes detected'
|
|
999
|
+
};
|
|
1000
|
+
} catch (error) {
|
|
1001
|
+
logger.debug(`Error checking for source file changes: ${error.message}`);
|
|
1002
|
+
// Conservative: assume changed if we can't verify
|
|
1003
|
+
return {
|
|
1004
|
+
changed: true,
|
|
1005
|
+
reason: `Could not verify changes: ${error.message}`
|
|
1006
|
+
};
|
|
1007
|
+
}
|
|
1008
|
+
}
|
|
1009
|
+
/**
|
|
1010
|
+
* Check if dist directory needs to be cleaned (is outdated compared to source files)
|
|
1011
|
+
*/ async function isCleanNeeded(packageDir) {
|
|
1012
|
+
const storage = createStorage();
|
|
1013
|
+
const distPath = path.join(packageDir, 'dist');
|
|
1014
|
+
try {
|
|
1015
|
+
// Check if dist directory exists
|
|
1016
|
+
const distExists = await storage.exists('dist');
|
|
1017
|
+
if (!distExists) {
|
|
1018
|
+
return {
|
|
1019
|
+
needed: false,
|
|
1020
|
+
reason: 'dist directory does not exist'
|
|
1021
|
+
};
|
|
1022
|
+
}
|
|
1023
|
+
// Get dist directory modification time
|
|
1024
|
+
const distStats = await fs.stat(distPath);
|
|
1025
|
+
const distMtime = distStats.mtimeMs;
|
|
1026
|
+
// Use git to find source files that are newer than dist
|
|
1027
|
+
try {
|
|
1028
|
+
// Get all tracked source files
|
|
1029
|
+
const { stdout: trackedFiles } = await runSecure('git', [
|
|
1030
|
+
'ls-files'
|
|
1031
|
+
], {
|
|
1032
|
+
cwd: packageDir
|
|
1033
|
+
});
|
|
1034
|
+
const files = trackedFiles.split('\n').filter(Boolean);
|
|
1035
|
+
// Check if any source files are newer than dist
|
|
1036
|
+
for (const file of files){
|
|
1037
|
+
const ext = path.extname(file);
|
|
1038
|
+
if (![
|
|
1039
|
+
'.ts',
|
|
1040
|
+
'.tsx',
|
|
1041
|
+
'.js',
|
|
1042
|
+
'.jsx',
|
|
1043
|
+
'.json'
|
|
1044
|
+
].includes(ext)) {
|
|
1045
|
+
continue;
|
|
1046
|
+
}
|
|
1047
|
+
// Skip dist files
|
|
1048
|
+
if (file.startsWith('dist/')) {
|
|
1049
|
+
continue;
|
|
1050
|
+
}
|
|
1051
|
+
try {
|
|
1052
|
+
const filePath = path.join(packageDir, file);
|
|
1053
|
+
const fileStats = await fs.stat(filePath);
|
|
1054
|
+
if (fileStats.mtimeMs > distMtime) {
|
|
1055
|
+
return {
|
|
1056
|
+
needed: true,
|
|
1057
|
+
reason: `${file} is newer than dist directory`
|
|
1058
|
+
};
|
|
1059
|
+
}
|
|
1060
|
+
} catch {
|
|
1061
|
+
continue;
|
|
1062
|
+
}
|
|
1063
|
+
}
|
|
1064
|
+
return {
|
|
1065
|
+
needed: false,
|
|
1066
|
+
reason: 'dist directory is up to date with source files'
|
|
1067
|
+
};
|
|
1068
|
+
} catch (error) {
|
|
1069
|
+
// If git check fails, fall back to checking common source directories
|
|
1070
|
+
logger.debug(`Git-based check failed, using fallback: ${error.message}`);
|
|
1071
|
+
const sourceDirs = [
|
|
1072
|
+
'src',
|
|
1073
|
+
'tests'
|
|
1074
|
+
];
|
|
1075
|
+
for (const dir of sourceDirs){
|
|
1076
|
+
const dirPath = path.join(packageDir, dir);
|
|
1077
|
+
try {
|
|
1078
|
+
const dirStats = await fs.stat(dirPath);
|
|
1079
|
+
if (dirStats.mtimeMs > distMtime) {
|
|
1080
|
+
return {
|
|
1081
|
+
needed: true,
|
|
1082
|
+
reason: `${dir} directory is newer than dist`
|
|
1083
|
+
};
|
|
1084
|
+
}
|
|
1085
|
+
} catch {
|
|
1086
|
+
continue;
|
|
1087
|
+
}
|
|
1088
|
+
}
|
|
1089
|
+
// Conservative: if we can't verify, assume clean is needed
|
|
1090
|
+
return {
|
|
1091
|
+
needed: true,
|
|
1092
|
+
reason: 'Could not verify dist freshness, cleaning to be safe'
|
|
1093
|
+
};
|
|
1094
|
+
}
|
|
1095
|
+
} catch (error) {
|
|
1096
|
+
logger.debug(`Error checking if clean is needed: ${error.message}`);
|
|
1097
|
+
// Conservative: assume clean is needed if we can't check
|
|
1098
|
+
return {
|
|
1099
|
+
needed: true,
|
|
1100
|
+
reason: `Could not verify: ${error.message}`
|
|
1101
|
+
};
|
|
1102
|
+
}
|
|
1103
|
+
}
|
|
1104
|
+
/**
|
|
1105
|
+
* Check if tests need to be run (source files changed since last test run)
|
|
1106
|
+
*/ async function isTestNeeded(packageDir) {
|
|
1107
|
+
try {
|
|
1108
|
+
// Load test cache
|
|
1109
|
+
const cache = await loadTestCache(packageDir);
|
|
1110
|
+
const cacheKey = packageDir;
|
|
1111
|
+
// Check if we have a cached test run for this package
|
|
1112
|
+
const cached = cache[cacheKey];
|
|
1113
|
+
if (!cached) {
|
|
1114
|
+
return {
|
|
1115
|
+
needed: true,
|
|
1116
|
+
reason: 'No previous test run recorded'
|
|
1117
|
+
};
|
|
1118
|
+
}
|
|
1119
|
+
// Check if source files have changed since last test run
|
|
1120
|
+
const changeCheck = await hasSourceFilesChanged(packageDir, cached.lastCommitHash);
|
|
1121
|
+
if (changeCheck.changed) {
|
|
1122
|
+
return {
|
|
1123
|
+
needed: true,
|
|
1124
|
+
reason: changeCheck.reason
|
|
1125
|
+
};
|
|
1126
|
+
}
|
|
1127
|
+
return {
|
|
1128
|
+
needed: false,
|
|
1129
|
+
reason: 'No source file changes since last test run'
|
|
1130
|
+
};
|
|
1131
|
+
} catch (error) {
|
|
1132
|
+
logger.debug(`Error checking if test is needed: ${error.message}`);
|
|
1133
|
+
// Conservative: assume test is needed if we can't check
|
|
1134
|
+
return {
|
|
1135
|
+
needed: true,
|
|
1136
|
+
reason: `Could not verify: ${error.message}`
|
|
1137
|
+
};
|
|
1138
|
+
}
|
|
1139
|
+
}
|
|
1140
|
+
/**
|
|
1141
|
+
* Record that tests were run for this package
|
|
1142
|
+
*/ async function recordTestRun(packageDir) {
|
|
1143
|
+
try {
|
|
1144
|
+
const cache = await loadTestCache(packageDir);
|
|
1145
|
+
const cacheKey = packageDir;
|
|
1146
|
+
const commitHash = await getCurrentCommitHash(packageDir);
|
|
1147
|
+
cache[cacheKey] = {
|
|
1148
|
+
lastTestRun: Date.now(),
|
|
1149
|
+
lastCommitHash: commitHash || 'unknown'
|
|
1150
|
+
};
|
|
1151
|
+
await saveTestCache(packageDir, cache);
|
|
1152
|
+
} catch (error) {
|
|
1153
|
+
logger.debug(`Failed to record test run: ${error.message}`);
|
|
1154
|
+
}
|
|
1155
|
+
}
|
|
1156
|
+
/**
|
|
1157
|
+
* Optimize a precommit command by skipping unnecessary steps
|
|
1158
|
+
* Returns the optimized command and information about what was skipped
|
|
1159
|
+
*/ async function optimizePrecommitCommand(packageDir, originalCommand, options = {}) {
|
|
1160
|
+
const { skipClean = true, skipTest = true } = options;
|
|
1161
|
+
// Parse the original command to extract individual scripts
|
|
1162
|
+
// Common patterns: "npm run precommit", "npm run clean && npm run build && npm run lint && npm run test"
|
|
1163
|
+
const isPrecommitScript = originalCommand.includes('precommit') || originalCommand.includes('pre-commit');
|
|
1164
|
+
let optimizedCommand = originalCommand;
|
|
1165
|
+
const skipped = {
|
|
1166
|
+
clean: false,
|
|
1167
|
+
test: false
|
|
1168
|
+
};
|
|
1169
|
+
const reasons = {};
|
|
1170
|
+
// If it's a precommit script, we need to check what it actually runs
|
|
1171
|
+
// For now, we'll optimize the common pattern: clean && build && lint && test
|
|
1172
|
+
if (isPrecommitScript || originalCommand.includes('clean')) {
|
|
1173
|
+
if (skipClean) {
|
|
1174
|
+
const cleanCheck = await isCleanNeeded(packageDir);
|
|
1175
|
+
if (!cleanCheck.needed) {
|
|
1176
|
+
// Remove clean from the command
|
|
1177
|
+
optimizedCommand = optimizedCommand.replace(/npm\s+run\s+clean\s+&&\s*/g, '').replace(/npm\s+run\s+clean\s+/g, '').replace(/\s*&&\s*npm\s+run\s+clean/g, '').trim();
|
|
1178
|
+
skipped.clean = true;
|
|
1179
|
+
reasons.clean = cleanCheck.reason;
|
|
1180
|
+
}
|
|
1181
|
+
}
|
|
1182
|
+
}
|
|
1183
|
+
if (isPrecommitScript || originalCommand.includes('test')) {
|
|
1184
|
+
if (skipTest) {
|
|
1185
|
+
const testCheck = await isTestNeeded(packageDir);
|
|
1186
|
+
if (!testCheck.needed) {
|
|
1187
|
+
// Remove test from the command
|
|
1188
|
+
optimizedCommand = optimizedCommand.replace(/\s*&&\s*npm\s+run\s+test\s*/g, '').replace(/\s*&&\s*npm\s+run\s+test$/g, '').replace(/npm\s+run\s+test\s+&&\s*/g, '').trim();
|
|
1189
|
+
skipped.test = true;
|
|
1190
|
+
reasons.test = testCheck.reason;
|
|
1191
|
+
}
|
|
1192
|
+
}
|
|
1193
|
+
}
|
|
1194
|
+
// Clean up any double && or trailing &&
|
|
1195
|
+
optimizedCommand = optimizedCommand.replace(/\s*&&\s*&&/g, ' && ').replace(/&&\s*$/, '').trim();
|
|
1196
|
+
return {
|
|
1197
|
+
optimizedCommand,
|
|
1198
|
+
skipped,
|
|
1199
|
+
reasons
|
|
1200
|
+
};
|
|
1201
|
+
}
|
|
1202
|
+
|
|
1203
|
+
function _define_property(obj, key, value) {
|
|
1204
|
+
if (key in obj) {
|
|
1205
|
+
Object.defineProperty(obj, key, {
|
|
1206
|
+
value: value,
|
|
1207
|
+
enumerable: true,
|
|
1208
|
+
configurable: true,
|
|
1209
|
+
writable: true
|
|
1210
|
+
});
|
|
1211
|
+
} else {
|
|
1212
|
+
obj[key] = value;
|
|
1213
|
+
}
|
|
1214
|
+
return obj;
|
|
1215
|
+
}
|
|
1216
|
+
// Performance timing helper
|
|
1217
|
+
class PerformanceTimer {
|
|
1218
|
+
static start(logger, operation) {
|
|
1219
|
+
logger.verbose(`ā±ļø Starting: ${operation}`);
|
|
1220
|
+
return new PerformanceTimer(logger);
|
|
1221
|
+
}
|
|
1222
|
+
end(operation) {
|
|
1223
|
+
const duration = Date.now() - this.startTime;
|
|
1224
|
+
this.logger.verbose(`ā±ļø Completed: ${operation} (${duration}ms)`);
|
|
1225
|
+
return duration;
|
|
1226
|
+
}
|
|
1227
|
+
constructor(logger){
|
|
1228
|
+
_define_property(this, "startTime", void 0);
|
|
1229
|
+
_define_property(this, "logger", void 0);
|
|
1230
|
+
this.logger = logger;
|
|
1231
|
+
this.startTime = Date.now();
|
|
1232
|
+
}
|
|
1233
|
+
}
|
|
1234
|
+
const EXCLUDED_DIRECTORIES = [
|
|
1235
|
+
'node_modules',
|
|
1236
|
+
'dist',
|
|
1237
|
+
'build',
|
|
1238
|
+
'coverage',
|
|
1239
|
+
'.git',
|
|
1240
|
+
'.next',
|
|
1241
|
+
'.nuxt',
|
|
1242
|
+
'out',
|
|
1243
|
+
'public',
|
|
1244
|
+
'static',
|
|
1245
|
+
'assets'
|
|
1246
|
+
];
|
|
1247
|
+
// Batch read multiple package.json files in parallel
|
|
1248
|
+
const batchReadPackageJsonFiles = async (packageJsonPaths, storage, rootDir)=>{
|
|
1249
|
+
const logger = getLogger();
|
|
1250
|
+
const timer = PerformanceTimer.start(logger, `Batch reading ${packageJsonPaths.length} package.json files`);
|
|
1251
|
+
const readPromises = packageJsonPaths.map(async (packageJsonPath)=>{
|
|
1252
|
+
try {
|
|
1253
|
+
const packageJsonContent = await storage.readFile(packageJsonPath, 'utf-8');
|
|
1254
|
+
const parsed = safeJsonParse(packageJsonContent, packageJsonPath);
|
|
1255
|
+
const packageJson = validatePackageJson(parsed, packageJsonPath, false);
|
|
1256
|
+
const relativePath = path.relative(rootDir, path.dirname(packageJsonPath));
|
|
1257
|
+
return {
|
|
1258
|
+
path: packageJsonPath,
|
|
1259
|
+
packageJson,
|
|
1260
|
+
relativePath: relativePath || '.'
|
|
1261
|
+
};
|
|
1262
|
+
} catch (error) {
|
|
1263
|
+
logger.debug(`Skipped invalid package.json at ${packageJsonPath}: ${error.message}`);
|
|
1264
|
+
return null;
|
|
1265
|
+
}
|
|
1266
|
+
});
|
|
1267
|
+
const results = await Promise.all(readPromises);
|
|
1268
|
+
const validResults = results.filter((result)=>result !== null);
|
|
1269
|
+
timer.end(`Successfully read ${validResults.length}/${packageJsonPaths.length} package.json files`);
|
|
1270
|
+
return validResults;
|
|
1271
|
+
};
|
|
1272
|
+
// Optimized recursive package.json finder with parallel processing
|
|
1273
|
+
const findAllPackageJsonFiles = async (rootDir, storage)=>{
|
|
1274
|
+
const logger = getLogger();
|
|
1275
|
+
const timer = PerformanceTimer.start(logger, 'Optimized scanning for package.json files');
|
|
1276
|
+
const scanForPaths = async (currentDir, depth = 0)=>{
|
|
1277
|
+
// Prevent infinite recursion and overly deep scanning
|
|
1278
|
+
if (depth > 5) {
|
|
1279
|
+
return [];
|
|
1280
|
+
}
|
|
1281
|
+
try {
|
|
1282
|
+
if (!await storage.exists(currentDir) || !await storage.isDirectory(currentDir)) {
|
|
1283
|
+
return [];
|
|
1284
|
+
}
|
|
1285
|
+
const items = await storage.listFiles(currentDir);
|
|
1286
|
+
const foundPaths = [];
|
|
1287
|
+
// Check for package.json in current directory
|
|
1288
|
+
if (items.includes('package.json')) {
|
|
1289
|
+
const packageJsonPath = path.join(currentDir, 'package.json');
|
|
1290
|
+
foundPaths.push(packageJsonPath);
|
|
1291
|
+
}
|
|
1292
|
+
// Process subdirectories in parallel
|
|
1293
|
+
const subdirPromises = [];
|
|
1294
|
+
for (const item of items){
|
|
1295
|
+
if (EXCLUDED_DIRECTORIES.includes(item)) {
|
|
1296
|
+
continue;
|
|
1297
|
+
}
|
|
1298
|
+
const itemPath = path.join(currentDir, item);
|
|
1299
|
+
subdirPromises.push((async ()=>{
|
|
1300
|
+
try {
|
|
1301
|
+
if (await storage.isDirectory(itemPath)) {
|
|
1302
|
+
return await scanForPaths(itemPath, depth + 1);
|
|
1303
|
+
}
|
|
1304
|
+
} catch (error) {
|
|
1305
|
+
logger.debug(`Skipped directory ${itemPath}: ${error.message}`);
|
|
1306
|
+
}
|
|
1307
|
+
return [];
|
|
1308
|
+
})());
|
|
1309
|
+
}
|
|
1310
|
+
if (subdirPromises.length > 0) {
|
|
1311
|
+
const subdirResults = await Promise.all(subdirPromises);
|
|
1312
|
+
for (const subdirPaths of subdirResults){
|
|
1313
|
+
foundPaths.push(...subdirPaths);
|
|
1314
|
+
}
|
|
1315
|
+
}
|
|
1316
|
+
return foundPaths;
|
|
1317
|
+
} catch (error) {
|
|
1318
|
+
logger.debug(`Failed to scan directory ${currentDir}: ${error.message}`);
|
|
1319
|
+
return [];
|
|
1320
|
+
}
|
|
1321
|
+
};
|
|
1322
|
+
const pathsTimer = PerformanceTimer.start(logger, 'Finding all package.json paths');
|
|
1323
|
+
const allPaths = await scanForPaths(rootDir);
|
|
1324
|
+
pathsTimer.end(`Found ${allPaths.length} package.json file paths`);
|
|
1325
|
+
// Phase 2: Batch read all package.json files in parallel
|
|
1326
|
+
const packageJsonFiles = await batchReadPackageJsonFiles(allPaths, storage, rootDir);
|
|
1327
|
+
timer.end(`Found ${packageJsonFiles.length} valid package.json files`);
|
|
1328
|
+
return packageJsonFiles;
|
|
1329
|
+
};
|
|
1330
|
+
// Optimized package scanning with parallel processing
|
|
1331
|
+
const scanDirectoryForPackages = async (rootDir, storage)=>{
|
|
1332
|
+
const logger = getLogger();
|
|
1333
|
+
const timer = PerformanceTimer.start(logger, `Optimized package scanning: ${rootDir}`);
|
|
1334
|
+
const packageMap = new Map(); // packageName -> relativePath
|
|
1335
|
+
const absoluteRootDir = path.resolve(process.cwd(), rootDir);
|
|
1336
|
+
logger.verbose(`Scanning directory for packages: ${absoluteRootDir}`);
|
|
1337
|
+
try {
|
|
1338
|
+
// Quick existence and directory check
|
|
1339
|
+
const existsTimer = PerformanceTimer.start(logger, `Checking directory: ${absoluteRootDir}`);
|
|
1340
|
+
if (!await storage.exists(absoluteRootDir) || !await storage.isDirectory(absoluteRootDir)) {
|
|
1341
|
+
existsTimer.end(`Directory not found or not a directory: ${absoluteRootDir}`);
|
|
1342
|
+
timer.end(`Directory invalid: ${rootDir}`);
|
|
1343
|
+
return packageMap;
|
|
1344
|
+
}
|
|
1345
|
+
existsTimer.end(`Directory verified: ${absoluteRootDir}`);
|
|
1346
|
+
// Get all items and process in parallel
|
|
1347
|
+
const listTimer = PerformanceTimer.start(logger, `Listing contents: ${absoluteRootDir}`);
|
|
1348
|
+
const items = await storage.listFiles(absoluteRootDir);
|
|
1349
|
+
listTimer.end(`Listed ${items.length} items`);
|
|
1350
|
+
// Create batched promises for better performance
|
|
1351
|
+
const BATCH_SIZE = 10; // Process directories in batches to avoid overwhelming filesystem
|
|
1352
|
+
const batches = [];
|
|
1353
|
+
for(let i = 0; i < items.length; i += BATCH_SIZE){
|
|
1354
|
+
const batch = items.slice(i, i + BATCH_SIZE);
|
|
1355
|
+
batches.push(batch);
|
|
1356
|
+
}
|
|
1357
|
+
const processTimer = PerformanceTimer.start(logger, `Processing ${batches.length} batches of directories`);
|
|
1358
|
+
for (const batch of batches){
|
|
1359
|
+
const batchPromises = batch.map(async (item)=>{
|
|
1360
|
+
const itemPath = path.join(absoluteRootDir, item);
|
|
1361
|
+
try {
|
|
1362
|
+
if (await storage.isDirectory(itemPath)) {
|
|
1363
|
+
const packageJsonPath = path.join(itemPath, 'package.json');
|
|
1364
|
+
if (await storage.exists(packageJsonPath)) {
|
|
1365
|
+
const packageJsonContent = await storage.readFile(packageJsonPath, 'utf-8');
|
|
1366
|
+
const parsed = safeJsonParse(packageJsonContent, packageJsonPath);
|
|
1367
|
+
const packageJson = validatePackageJson(parsed, packageJsonPath);
|
|
1368
|
+
if (packageJson.name) {
|
|
1369
|
+
const relativePath = path.relative(process.cwd(), itemPath);
|
|
1370
|
+
return {
|
|
1371
|
+
name: packageJson.name,
|
|
1372
|
+
path: relativePath
|
|
1373
|
+
};
|
|
1374
|
+
}
|
|
1375
|
+
}
|
|
1376
|
+
}
|
|
1377
|
+
} catch (error) {
|
|
1378
|
+
logger.debug(`Skipped ${itemPath}: ${error.message || error}`);
|
|
1379
|
+
}
|
|
1380
|
+
return null;
|
|
1381
|
+
});
|
|
1382
|
+
const batchResults = await Promise.all(batchPromises);
|
|
1383
|
+
for (const result of batchResults){
|
|
1384
|
+
if (result) {
|
|
1385
|
+
packageMap.set(result.name, result.path);
|
|
1386
|
+
logger.debug(`Found package: ${result.name} at ${result.path}`);
|
|
1387
|
+
}
|
|
1388
|
+
}
|
|
1389
|
+
}
|
|
1390
|
+
processTimer.end(`Processed ${items.length} directories in ${batches.length} batches`);
|
|
1391
|
+
logger.verbose(`Found ${packageMap.size} packages in ${items.length} subdirectories`);
|
|
1392
|
+
} catch (error) {
|
|
1393
|
+
logger.warn(`PERFORMANCE_DIR_READ_FAILED: Unable to read directory | Directory: ${absoluteRootDir} | Error: ${error}`);
|
|
1394
|
+
}
|
|
1395
|
+
timer.end(`Found ${packageMap.size} packages in: ${rootDir}`);
|
|
1396
|
+
return packageMap;
|
|
1397
|
+
};
|
|
1398
|
+
// Parallel scope processing for better performance
|
|
1399
|
+
const findPackagesByScope = async (dependencies, scopeRoots, storage)=>{
|
|
1400
|
+
const logger = getLogger();
|
|
1401
|
+
const timer = PerformanceTimer.start(logger, 'Finding packages by scope (optimized)');
|
|
1402
|
+
const workspacePackages = new Map();
|
|
1403
|
+
logger.silly(`Checking dependencies against scope roots: ${JSON.stringify(scopeRoots)}`);
|
|
1404
|
+
// Process all scopes in parallel for maximum performance
|
|
1405
|
+
const scopeTimer = PerformanceTimer.start(logger, 'Parallel scope scanning');
|
|
1406
|
+
const scopePromises = Object.entries(scopeRoots).map(async ([scope, rootDir])=>{
|
|
1407
|
+
logger.verbose(`Scanning scope ${scope} at root directory: ${rootDir}`);
|
|
1408
|
+
const scopePackages = await scanDirectoryForPackages(rootDir, storage);
|
|
1409
|
+
// Filter packages that match the scope
|
|
1410
|
+
const matchingPackages = [];
|
|
1411
|
+
for (const [packageName, packagePath] of scopePackages){
|
|
1412
|
+
if (packageName.startsWith(scope)) {
|
|
1413
|
+
matchingPackages.push([
|
|
1414
|
+
packageName,
|
|
1415
|
+
packagePath
|
|
1416
|
+
]);
|
|
1417
|
+
logger.debug(`Registered package: ${packageName} -> ${packagePath}`);
|
|
1418
|
+
}
|
|
1419
|
+
}
|
|
1420
|
+
return {
|
|
1421
|
+
scope,
|
|
1422
|
+
packages: matchingPackages
|
|
1423
|
+
};
|
|
1424
|
+
});
|
|
1425
|
+
const allScopeResults = await Promise.all(scopePromises);
|
|
1426
|
+
// Aggregate all packages from all scopes
|
|
1427
|
+
const allPackages = new Map();
|
|
1428
|
+
for (const { scope, packages } of allScopeResults){
|
|
1429
|
+
for (const [packageName, packagePath] of packages){
|
|
1430
|
+
allPackages.set(packageName, packagePath);
|
|
1431
|
+
}
|
|
1432
|
+
}
|
|
1433
|
+
scopeTimer.end(`Scanned ${Object.keys(scopeRoots).length} scope roots, found ${allPackages.size} packages`);
|
|
1434
|
+
// Match dependencies to available packages
|
|
1435
|
+
const matchTimer = PerformanceTimer.start(logger, 'Matching dependencies to packages');
|
|
1436
|
+
for (const [depName, depVersion] of Object.entries(dependencies)){
|
|
1437
|
+
logger.debug(`Processing dependency: ${depName}@${depVersion}`);
|
|
1438
|
+
if (allPackages.has(depName)) {
|
|
1439
|
+
const packagePath = allPackages.get(depName);
|
|
1440
|
+
workspacePackages.set(depName, packagePath);
|
|
1441
|
+
logger.verbose(`Found sibling package: ${depName} at ${packagePath}`);
|
|
1442
|
+
}
|
|
1443
|
+
}
|
|
1444
|
+
matchTimer.end(`Matched ${workspacePackages.size} dependencies to workspace packages`);
|
|
1445
|
+
timer.end(`Found ${workspacePackages.size} packages to link`);
|
|
1446
|
+
return workspacePackages;
|
|
1447
|
+
};
|
|
1448
|
+
// Utility to collect all dependencies from package.json files efficiently
|
|
1449
|
+
const collectAllDependencies = (packageJsonFiles)=>{
|
|
1450
|
+
const logger = getLogger();
|
|
1451
|
+
const timer = PerformanceTimer.start(logger, 'Collecting all dependencies');
|
|
1452
|
+
const allDependencies = {};
|
|
1453
|
+
for (const { packageJson } of packageJsonFiles){
|
|
1454
|
+
Object.assign(allDependencies, packageJson.dependencies);
|
|
1455
|
+
Object.assign(allDependencies, packageJson.devDependencies);
|
|
1456
|
+
Object.assign(allDependencies, packageJson.peerDependencies);
|
|
1457
|
+
}
|
|
1458
|
+
timer.end(`Collected ${Object.keys(allDependencies).length} unique dependencies`);
|
|
1459
|
+
return allDependencies;
|
|
1460
|
+
};
|
|
1461
|
+
// Utility to check for file: dependencies
|
|
1462
|
+
const checkForFileDependencies = (packageJsonFiles)=>{
|
|
1463
|
+
const logger = getLogger();
|
|
1464
|
+
const timer = PerformanceTimer.start(logger, 'Checking for file: dependencies');
|
|
1465
|
+
const filesWithFileDepedencies = [];
|
|
1466
|
+
for (const { path: packagePath, packageJson, relativePath } of packageJsonFiles){
|
|
1467
|
+
const fileDeps = [];
|
|
1468
|
+
// Check all dependency types for file: paths
|
|
1469
|
+
const allDeps = {
|
|
1470
|
+
...packageJson.dependencies,
|
|
1471
|
+
...packageJson.devDependencies,
|
|
1472
|
+
...packageJson.peerDependencies
|
|
1473
|
+
};
|
|
1474
|
+
for (const [name, version] of Object.entries(allDeps)){
|
|
1475
|
+
if (version.startsWith('file:')) {
|
|
1476
|
+
fileDeps.push(`${name}: ${version}`);
|
|
1477
|
+
}
|
|
1478
|
+
}
|
|
1479
|
+
if (fileDeps.length > 0) {
|
|
1480
|
+
filesWithFileDepedencies.push({
|
|
1481
|
+
path: relativePath,
|
|
1482
|
+
dependencies: fileDeps
|
|
1483
|
+
});
|
|
1484
|
+
}
|
|
1485
|
+
}
|
|
1486
|
+
if (filesWithFileDepedencies.length > 0) {
|
|
1487
|
+
logger.warn('FILE_DEPS_WARNING: Found file: dependencies that should not be committed | Count: ' + filesWithFileDepedencies.length + ' | Impact: May cause build issues');
|
|
1488
|
+
for (const file of filesWithFileDepedencies){
|
|
1489
|
+
logger.warn(`FILE_DEPS_PACKAGE: Package with file dependencies | Path: ${file.path}`);
|
|
1490
|
+
for (const dep of file.dependencies){
|
|
1491
|
+
logger.warn(`FILE_DEPS_DETAIL: File dependency detected | Dependency: ${dep}`);
|
|
1492
|
+
}
|
|
1493
|
+
}
|
|
1494
|
+
logger.warn('');
|
|
1495
|
+
logger.warn('FILE_DEPS_RESOLUTION: Action required before committing | Command: kodrdriv unlink | Purpose: Restore registry versions');
|
|
1496
|
+
logger.warn('FILE_DEPS_PREVENTION: Alternative option | Action: Add pre-commit hook | Purpose: Prevent accidental commits of linked dependencies');
|
|
1497
|
+
}
|
|
1498
|
+
timer.end(`Checked ${packageJsonFiles.length} files, found ${filesWithFileDepedencies.length} with file: dependencies`);
|
|
1499
|
+
};
|
|
1500
|
+
|
|
1501
|
+
/**
|
|
1502
|
+
* Execute precommit checks: lint -> build -> test
|
|
1503
|
+
* Skips clean step (clean should be run separately if needed)
|
|
1504
|
+
* Uses optimization to skip steps when unchanged
|
|
1505
|
+
*/ const execute$2 = async (runConfig)=>{
|
|
1506
|
+
const logger = getLogger();
|
|
1507
|
+
const isDryRun = runConfig.dryRun || false;
|
|
1508
|
+
const packageDir = process.cwd();
|
|
1509
|
+
// Default command: lint -> build -> test (no clean)
|
|
1510
|
+
const defaultCommand = 'npm run lint && npm run build && npm run test';
|
|
1511
|
+
// Check if package.json has a precommit script
|
|
1512
|
+
let commandToRun = defaultCommand;
|
|
1513
|
+
try {
|
|
1514
|
+
var _packageJson_scripts;
|
|
1515
|
+
const fs = await import('fs/promises');
|
|
1516
|
+
const packageJsonPath = path.join(packageDir, 'package.json');
|
|
1517
|
+
const packageJsonContent = await fs.readFile(packageJsonPath, 'utf-8');
|
|
1518
|
+
const packageJson = JSON.parse(packageJsonContent);
|
|
1519
|
+
// If there's a precommit script, check what it does
|
|
1520
|
+
if ((_packageJson_scripts = packageJson.scripts) === null || _packageJson_scripts === void 0 ? void 0 : _packageJson_scripts.precommit) {
|
|
1521
|
+
const precommitScript = packageJson.scripts.precommit;
|
|
1522
|
+
// If it includes clean, we'll optimize it out
|
|
1523
|
+
// Otherwise, use the precommit script directly
|
|
1524
|
+
if (!precommitScript.includes('clean')) {
|
|
1525
|
+
commandToRun = `npm run precommit`;
|
|
1526
|
+
} else {
|
|
1527
|
+
// Use default command (lint -> build -> test) if precommit includes clean
|
|
1528
|
+
commandToRun = defaultCommand;
|
|
1529
|
+
}
|
|
1530
|
+
}
|
|
1531
|
+
} catch (error) {
|
|
1532
|
+
logger.debug(`Could not read package.json, using default command: ${error.message}`);
|
|
1533
|
+
}
|
|
1534
|
+
if (isDryRun) {
|
|
1535
|
+
logger.info(`DRY RUN: Would execute: ${commandToRun}`);
|
|
1536
|
+
return `DRY RUN: Would run precommit checks: ${commandToRun}`;
|
|
1537
|
+
}
|
|
1538
|
+
// Optimize the command (skip clean/test if unchanged)
|
|
1539
|
+
let optimizedCommand = commandToRun;
|
|
1540
|
+
let optimizationInfo = null;
|
|
1541
|
+
try {
|
|
1542
|
+
const optimization = await optimizePrecommitCommand(packageDir, commandToRun);
|
|
1543
|
+
optimizedCommand = optimization.optimizedCommand;
|
|
1544
|
+
optimizationInfo = {
|
|
1545
|
+
skipped: optimization.skipped,
|
|
1546
|
+
reasons: optimization.reasons
|
|
1547
|
+
};
|
|
1548
|
+
if (optimization.skipped.clean || optimization.skipped.test) {
|
|
1549
|
+
const skippedParts = [];
|
|
1550
|
+
if (optimization.skipped.clean) {
|
|
1551
|
+
skippedParts.push(`clean (${optimization.reasons.clean})`);
|
|
1552
|
+
}
|
|
1553
|
+
if (optimization.skipped.test) {
|
|
1554
|
+
skippedParts.push(`test (${optimization.reasons.test})`);
|
|
1555
|
+
}
|
|
1556
|
+
logger.info(`ā” Optimized: Skipped ${skippedParts.join(', ')}`);
|
|
1557
|
+
if (runConfig.verbose || runConfig.debug) {
|
|
1558
|
+
logger.info(` Original: ${commandToRun}`);
|
|
1559
|
+
logger.info(` Optimized: ${optimizedCommand}`);
|
|
1560
|
+
}
|
|
1561
|
+
}
|
|
1562
|
+
} catch (error) {
|
|
1563
|
+
logger.debug(`Precommit optimization failed: ${error.message}`);
|
|
1564
|
+
}
|
|
1565
|
+
// Execute the optimized command
|
|
1566
|
+
const timer = PerformanceTimer.start(logger, 'Precommit checks');
|
|
1567
|
+
try {
|
|
1568
|
+
logger.info(`š§ Running precommit checks: ${optimizedCommand}`);
|
|
1569
|
+
await run(optimizedCommand, {
|
|
1570
|
+
cwd: packageDir
|
|
1571
|
+
});
|
|
1572
|
+
const duration = timer.end('Precommit checks');
|
|
1573
|
+
const seconds = (duration / 1000).toFixed(1);
|
|
1574
|
+
logger.info(`ā
Precommit checks passed (${seconds}s)`);
|
|
1575
|
+
// Record test run if tests were executed (not skipped)
|
|
1576
|
+
if (optimizedCommand.includes('test') && (!optimizationInfo || !optimizationInfo.skipped.test)) {
|
|
1577
|
+
try {
|
|
1578
|
+
await recordTestRun(packageDir);
|
|
1579
|
+
} catch (error) {
|
|
1580
|
+
logger.debug(`Failed to record test run: ${error.message}`);
|
|
1581
|
+
}
|
|
1582
|
+
}
|
|
1583
|
+
return `Precommit checks completed successfully in ${seconds}s`;
|
|
1584
|
+
} catch (error) {
|
|
1585
|
+
timer.end('Precommit checks');
|
|
1586
|
+
logger.error(`ā Precommit checks failed: ${error.message}`);
|
|
1587
|
+
throw error;
|
|
1588
|
+
}
|
|
1589
|
+
};
|
|
1590
|
+
|
|
1591
|
+
const executeInternal$1 = async (runConfig)=>{
|
|
1592
|
+
const isDryRun = runConfig.dryRun || false;
|
|
1593
|
+
const logger = getDryRunLogger(isDryRun);
|
|
1594
|
+
const storage = createStorage();
|
|
1595
|
+
const outputDirectory = runConfig.outputDirectory || DEFAULT_OUTPUT_DIRECTORY;
|
|
1596
|
+
if (isDryRun) {
|
|
1597
|
+
logger.info(`CLEAN_DRY_RUN: Would remove output directory | Mode: dry-run | Directory: ${outputDirectory} | Action: Would delete if exists`);
|
|
1598
|
+
logger.info(`CLEAN_CHECK_DRY_RUN: Would check directory existence | Mode: dry-run | Directory: ${outputDirectory}`);
|
|
1599
|
+
logger.info('CLEAN_REMOVE_DRY_RUN: Would remove directory if present | Mode: dry-run | Action: Delete');
|
|
1600
|
+
return;
|
|
1601
|
+
}
|
|
1602
|
+
logger.info(`CLEAN_STARTING: Removing output directory | Directory: ${outputDirectory} | Action: Delete | Purpose: Clean generated files`);
|
|
1603
|
+
try {
|
|
1604
|
+
if (await storage.exists(outputDirectory)) {
|
|
1605
|
+
await storage.removeDirectory(outputDirectory);
|
|
1606
|
+
logger.info(`CLEAN_SUCCESS: Successfully removed output directory | Directory: ${outputDirectory} | Status: deleted`);
|
|
1607
|
+
} else {
|
|
1608
|
+
logger.info(`CLEAN_NOT_EXISTS: Output directory does not exist | Directory: ${outputDirectory} | Status: nothing-to-clean`);
|
|
1609
|
+
}
|
|
1610
|
+
} catch (error) {
|
|
1611
|
+
logger.error(`CLEAN_FAILED: Failed to clean output directory | Directory: ${outputDirectory} | Error: ${error.message}`);
|
|
1612
|
+
throw new FileOperationError('Failed to remove output directory', outputDirectory, error);
|
|
1613
|
+
}
|
|
1614
|
+
};
|
|
1615
|
+
const execute$1 = async (runConfig)=>{
|
|
1616
|
+
try {
|
|
1617
|
+
await executeInternal$1(runConfig);
|
|
1618
|
+
} catch (error) {
|
|
1619
|
+
const logger = getLogger();
|
|
1620
|
+
if (error instanceof FileOperationError) {
|
|
1621
|
+
logger.error(`CLEAN_COMMAND_FAILED: Clean command failed | Error: ${error.message}`);
|
|
1622
|
+
if (error.cause && typeof error.cause === 'object' && 'message' in error.cause) {
|
|
1623
|
+
logger.debug(`Caused by: ${error.cause.message}`);
|
|
1624
|
+
}
|
|
1625
|
+
throw error;
|
|
1626
|
+
}
|
|
1627
|
+
// Unexpected errors
|
|
1628
|
+
logger.error(`CLEAN_UNEXPECTED_ERROR: Clean encountered unexpected error | Error: ${error.message} | Type: unexpected`);
|
|
1629
|
+
throw error;
|
|
1630
|
+
}
|
|
1631
|
+
};
|
|
1632
|
+
|
|
1633
|
+
// Utility function to read a review note from a file
|
|
1634
|
+
const readReviewNoteFromFile = async (filePath)=>{
|
|
1635
|
+
const logger = getLogger();
|
|
1636
|
+
try {
|
|
1637
|
+
logger.debug(`Reading review note from file: ${filePath}`);
|
|
1638
|
+
const content = await fs.readFile(filePath, 'utf8');
|
|
1639
|
+
if (!content.trim()) {
|
|
1640
|
+
throw new ValidationError(`Review file is empty: ${filePath}`);
|
|
1641
|
+
}
|
|
1642
|
+
logger.debug(`Successfully read review note from file: ${filePath} (${content.length} characters)`);
|
|
1643
|
+
return content.trim();
|
|
1644
|
+
} catch (error) {
|
|
1645
|
+
if (error.code === 'ENOENT') {
|
|
1646
|
+
throw new FileOperationError(`Review file not found: ${filePath}`, filePath, error);
|
|
1647
|
+
}
|
|
1648
|
+
if (error instanceof ValidationError) {
|
|
1649
|
+
throw error;
|
|
1650
|
+
}
|
|
1651
|
+
throw new FileOperationError(`Failed to read review file: ${error.message}`, filePath, error);
|
|
1652
|
+
}
|
|
1653
|
+
};
|
|
1654
|
+
// Utility function to get all review files in a directory
|
|
1655
|
+
const getReviewFilesInDirectory = async (directoryPath)=>{
|
|
1656
|
+
const logger = getLogger();
|
|
1657
|
+
try {
|
|
1658
|
+
logger.debug(`Scanning directory for review files: ${directoryPath}`);
|
|
1659
|
+
const entries = await fs.readdir(directoryPath, {
|
|
1660
|
+
withFileTypes: true
|
|
1661
|
+
});
|
|
1662
|
+
// Filter for regular files (not directories) and get full paths
|
|
1663
|
+
const files = entries.filter((entry)=>entry.isFile()).map((entry)=>path.join(directoryPath, entry.name)).sort(); // Sort alphabetically
|
|
1664
|
+
logger.debug(`Found ${files.length} files in directory: ${directoryPath}`);
|
|
1665
|
+
return files;
|
|
1666
|
+
} catch (error) {
|
|
1667
|
+
if (error.code === 'ENOENT') {
|
|
1668
|
+
throw new FileOperationError(`Directory not found: ${directoryPath}`, directoryPath, error);
|
|
1669
|
+
}
|
|
1670
|
+
throw new FileOperationError(`Failed to read directory: ${directoryPath}`, directoryPath, error);
|
|
1671
|
+
}
|
|
1672
|
+
};
|
|
1673
|
+
// New function for file selection phase
|
|
1674
|
+
const selectFilesForProcessing = async (reviewFiles, senditMode)=>{
|
|
1675
|
+
const logger = getLogger();
|
|
1676
|
+
if (senditMode) {
|
|
1677
|
+
logger.info(`REVIEW_AUTO_SELECT: Auto-selecting all files for processing | Mode: sendit | File Count: ${reviewFiles.length} | Confirmation: automatic`);
|
|
1678
|
+
return reviewFiles;
|
|
1679
|
+
}
|
|
1680
|
+
// Check if we're in an interactive environment
|
|
1681
|
+
if (!isTTYSafe()) {
|
|
1682
|
+
logger.warn(`REVIEW_NON_INTERACTIVE_SELECT: Non-interactive environment detected | Action: Selecting all files | Mode: non-interactive`);
|
|
1683
|
+
return reviewFiles;
|
|
1684
|
+
}
|
|
1685
|
+
logger.info(`\nREVIEW_SELECTION_PHASE: Starting file selection phase | File Count: ${reviewFiles.length} | Purpose: Choose files to process`);
|
|
1686
|
+
logger.info(`REVIEW_SELECTION_FILES: Found files to review | Count: ${reviewFiles.length} | Action: Select files for processing`);
|
|
1687
|
+
logger.info(`REVIEW_SELECTION_OPTIONS: File selection options available | [c]=Confirm | [s]=Skip | [a]=Abort`);
|
|
1688
|
+
logger.info(``);
|
|
1689
|
+
const selectedFiles = [];
|
|
1690
|
+
let shouldAbort = false;
|
|
1691
|
+
for(let i = 0; i < reviewFiles.length; i++){
|
|
1692
|
+
const filePath = reviewFiles[i];
|
|
1693
|
+
logger.info(`REVIEW_SELECTION_FILE: File for review | Progress: ${i + 1}/${reviewFiles.length} | File: ${filePath}`);
|
|
1694
|
+
const choice = await getUserChoice(`Select action for this file:`, [
|
|
1695
|
+
{
|
|
1696
|
+
key: 'c',
|
|
1697
|
+
label: 'Confirm and process'
|
|
1698
|
+
},
|
|
1699
|
+
{
|
|
1700
|
+
key: 's',
|
|
1701
|
+
label: 'Skip this file'
|
|
1702
|
+
},
|
|
1703
|
+
{
|
|
1704
|
+
key: 'a',
|
|
1705
|
+
label: 'Abort entire review'
|
|
1706
|
+
}
|
|
1707
|
+
]);
|
|
1708
|
+
if (choice === 'a') {
|
|
1709
|
+
logger.info(`REVIEW_ABORTED: User aborted review process | Action: Aborting | Reason: User request`);
|
|
1710
|
+
shouldAbort = true;
|
|
1711
|
+
break;
|
|
1712
|
+
} else if (choice === 'c') {
|
|
1713
|
+
selectedFiles.push(filePath);
|
|
1714
|
+
logger.info(`REVIEW_FILE_SELECTED: File selected for processing | File: ${filePath} | Action: Will be processed`);
|
|
1715
|
+
} else if (choice === 's') {
|
|
1716
|
+
logger.info(`REVIEW_FILE_SKIPPED: File skipped during selection | File: ${filePath} | Action: Will not be processed`);
|
|
1717
|
+
}
|
|
1718
|
+
}
|
|
1719
|
+
if (shouldAbort) {
|
|
1720
|
+
throw new Error('Review process aborted by user');
|
|
1721
|
+
}
|
|
1722
|
+
if (selectedFiles.length === 0) {
|
|
1723
|
+
throw new Error('No files were selected for processing');
|
|
1724
|
+
}
|
|
1725
|
+
logger.info(`\nš File selection complete. ${selectedFiles.length} files selected for processing:`);
|
|
1726
|
+
selectedFiles.forEach((file, index)=>{
|
|
1727
|
+
logger.info(` ${index + 1}. ${file}`);
|
|
1728
|
+
});
|
|
1729
|
+
logger.info(``);
|
|
1730
|
+
return selectedFiles;
|
|
1731
|
+
};
|
|
1732
|
+
// Safe temp file handling with proper permissions and validation
|
|
1733
|
+
const createSecureTempFile = async ()=>{
|
|
1734
|
+
const logger = getLogger();
|
|
1735
|
+
const tmpDir = os.tmpdir();
|
|
1736
|
+
// Ensure temp directory exists and is writable
|
|
1737
|
+
try {
|
|
1738
|
+
// Use constant value directly to avoid import restrictions
|
|
1739
|
+
const W_OK = 2; // fs.constants.W_OK value
|
|
1740
|
+
await fs.access(tmpDir, W_OK);
|
|
1741
|
+
} catch (error) {
|
|
1742
|
+
logger.error(`TEMP_DIR_NOT_WRITABLE: Temporary directory is not writable | Directory: ${tmpDir} | Impact: Cannot create temp files`);
|
|
1743
|
+
throw new FileOperationError(`Temp directory not writable: ${error.message}`, tmpDir, error);
|
|
1744
|
+
}
|
|
1745
|
+
const tmpFilePath = path.join(tmpDir, `kodrdriv_review_${Date.now()}_${Math.random().toString(36).substring(7)}.md`);
|
|
1746
|
+
// Create file with restrictive permissions (owner read/write only)
|
|
1747
|
+
try {
|
|
1748
|
+
const fd = await fs.open(tmpFilePath, 'w', 0o600);
|
|
1749
|
+
await fd.close();
|
|
1750
|
+
logger.debug(`Created secure temp file: ${tmpFilePath}`);
|
|
1751
|
+
return tmpFilePath;
|
|
1752
|
+
} catch (error) {
|
|
1753
|
+
logger.error(`TEMP_FILE_CREATE_FAILED: Unable to create temporary file | Error: ${error.message} | Impact: Cannot proceed with review`);
|
|
1754
|
+
throw new FileOperationError(`Failed to create temp file: ${error.message}`, 'temporary file', error);
|
|
1755
|
+
}
|
|
1756
|
+
};
|
|
1757
|
+
// Safe file cleanup with proper error handling
|
|
1758
|
+
const cleanupTempFile = async (filePath)=>{
|
|
1759
|
+
const logger = getLogger();
|
|
1760
|
+
try {
|
|
1761
|
+
await fs.unlink(filePath);
|
|
1762
|
+
logger.debug(`Cleaned up temp file: ${filePath}`);
|
|
1763
|
+
} catch (error) {
|
|
1764
|
+
// Only ignore ENOENT (file not found) errors, log others
|
|
1765
|
+
if (error.code !== 'ENOENT') {
|
|
1766
|
+
logger.warn(`TEMP_FILE_CLEANUP_FAILED: Unable to cleanup temporary file | File: ${filePath} | Error: ${error.message} | Impact: File may remain`);
|
|
1767
|
+
// Don't throw here to avoid masking the main operation
|
|
1768
|
+
}
|
|
1769
|
+
}
|
|
1770
|
+
};
|
|
1771
|
+
// Editor with optional timeout and proper error handling
|
|
1772
|
+
const openEditorWithTimeout = async (editorCmd, filePath, timeoutMs)=>{
|
|
1773
|
+
const logger = getLogger();
|
|
1774
|
+
return new Promise((resolve, reject)=>{
|
|
1775
|
+
if (timeoutMs) {
|
|
1776
|
+
logger.debug(`Opening editor: ${editorCmd} ${filePath} (timeout: ${timeoutMs}ms)`);
|
|
1777
|
+
} else {
|
|
1778
|
+
logger.debug(`Opening editor: ${editorCmd} ${filePath} (no timeout)`);
|
|
1779
|
+
}
|
|
1780
|
+
const child = spawn(editorCmd, [
|
|
1781
|
+
filePath
|
|
1782
|
+
], {
|
|
1783
|
+
stdio: 'inherit',
|
|
1784
|
+
shell: false // Prevent shell injection
|
|
1785
|
+
});
|
|
1786
|
+
let timeout;
|
|
1787
|
+
let timeoutCleared = false;
|
|
1788
|
+
const clearTimeoutSafely = ()=>{
|
|
1789
|
+
if (timeout && !timeoutCleared) {
|
|
1790
|
+
clearTimeout(timeout);
|
|
1791
|
+
timeoutCleared = true;
|
|
1792
|
+
}
|
|
1793
|
+
};
|
|
1794
|
+
if (timeoutMs) {
|
|
1795
|
+
timeout = setTimeout(()=>{
|
|
1796
|
+
clearTimeoutSafely(); // Clear the timeout immediately when it fires
|
|
1797
|
+
logger.warn(`Editor timed out after ${timeoutMs}ms, terminating...`);
|
|
1798
|
+
child.kill('SIGTERM');
|
|
1799
|
+
// Give it a moment to terminate gracefully, then force kill
|
|
1800
|
+
setTimeout(()=>{
|
|
1801
|
+
if (!child.killed) {
|
|
1802
|
+
logger.warn('Editor did not terminate gracefully, force killing...');
|
|
1803
|
+
child.kill('SIGKILL');
|
|
1804
|
+
}
|
|
1805
|
+
}, 5000);
|
|
1806
|
+
reject(new Error(`Editor '${editorCmd}' timed out after ${timeoutMs}ms. Consider using a different editor or increasing the timeout.`));
|
|
1807
|
+
}, timeoutMs);
|
|
1808
|
+
}
|
|
1809
|
+
child.on('exit', (code, signal)=>{
|
|
1810
|
+
clearTimeoutSafely();
|
|
1811
|
+
logger.debug(`Editor exited with code ${code}, signal ${signal}`);
|
|
1812
|
+
if (signal === 'SIGTERM' || signal === 'SIGKILL') {
|
|
1813
|
+
reject(new Error(`Editor was terminated (${signal})`));
|
|
1814
|
+
} else if (code === 0) {
|
|
1815
|
+
resolve();
|
|
1816
|
+
} else {
|
|
1817
|
+
reject(new Error(`Editor exited with non-zero code: ${code}`));
|
|
1818
|
+
}
|
|
1819
|
+
});
|
|
1820
|
+
child.on('error', (error)=>{
|
|
1821
|
+
clearTimeoutSafely();
|
|
1822
|
+
logger.error(`Editor error: ${error.message}`);
|
|
1823
|
+
reject(new Error(`Failed to launch editor '${editorCmd}': ${error.message}`));
|
|
1824
|
+
});
|
|
1825
|
+
});
|
|
1826
|
+
};
|
|
1827
|
+
// Validate API response format before use
|
|
1828
|
+
const validateReviewResult = (data)=>{
|
|
1829
|
+
if (!data || typeof data !== 'object') {
|
|
1830
|
+
throw new Error('Invalid API response: expected object, got ' + typeof data);
|
|
1831
|
+
}
|
|
1832
|
+
if (typeof data.summary !== 'string') {
|
|
1833
|
+
throw new Error('Invalid API response: missing or invalid summary field');
|
|
1834
|
+
}
|
|
1835
|
+
if (typeof data.totalIssues !== 'number' || data.totalIssues < 0) {
|
|
1836
|
+
throw new Error('Invalid API response: missing or invalid totalIssues field');
|
|
1837
|
+
}
|
|
1838
|
+
if (data.issues && !Array.isArray(data.issues)) {
|
|
1839
|
+
throw new Error('Invalid API response: issues field must be an array');
|
|
1840
|
+
}
|
|
1841
|
+
// Validate each issue if present
|
|
1842
|
+
if (data.issues) {
|
|
1843
|
+
for(let i = 0; i < data.issues.length; i++){
|
|
1844
|
+
const issue = data.issues[i];
|
|
1845
|
+
if (!issue || typeof issue !== 'object') {
|
|
1846
|
+
throw new Error(`Invalid API response: issue ${i} is not an object`);
|
|
1847
|
+
}
|
|
1848
|
+
if (typeof issue.title !== 'string') {
|
|
1849
|
+
throw new Error(`Invalid API response: issue ${i} missing title`);
|
|
1850
|
+
}
|
|
1851
|
+
if (typeof issue.priority !== 'string') {
|
|
1852
|
+
throw new Error(`Invalid API response: issue ${i} missing priority`);
|
|
1853
|
+
}
|
|
1854
|
+
}
|
|
1855
|
+
}
|
|
1856
|
+
return data;
|
|
1857
|
+
};
|
|
1858
|
+
// Enhanced TTY detection with fallback handling
|
|
1859
|
+
const isTTYSafe = ()=>{
|
|
1860
|
+
try {
|
|
1861
|
+
// Primary check
|
|
1862
|
+
if (process.stdin.isTTY === false) {
|
|
1863
|
+
return false;
|
|
1864
|
+
}
|
|
1865
|
+
// Additional checks for edge cases
|
|
1866
|
+
if (process.stdin.isTTY === true) {
|
|
1867
|
+
return true;
|
|
1868
|
+
}
|
|
1869
|
+
// Handle undefined case (some environments)
|
|
1870
|
+
if (process.stdin.isTTY === undefined) {
|
|
1871
|
+
// Check if we can reasonably assume interactive mode
|
|
1872
|
+
return process.stdout.isTTY === true && process.stderr.isTTY === true;
|
|
1873
|
+
}
|
|
1874
|
+
return false;
|
|
1875
|
+
} catch (error) {
|
|
1876
|
+
// If TTY detection fails entirely, assume non-interactive
|
|
1877
|
+
getLogger().debug(`TTY detection failed: ${error}, assuming non-interactive`);
|
|
1878
|
+
return false;
|
|
1879
|
+
}
|
|
1880
|
+
};
|
|
1881
|
+
// Safe file write with disk space and permission validation
|
|
1882
|
+
const safeWriteFile = async (filePath, content, encoding = 'utf-8')=>{
|
|
1883
|
+
const logger = getLogger();
|
|
1884
|
+
try {
|
|
1885
|
+
// Check if parent directory exists and is writable
|
|
1886
|
+
const parentDir = path.dirname(filePath);
|
|
1887
|
+
const W_OK = 2; // fs.constants.W_OK value
|
|
1888
|
+
await fs.access(parentDir, W_OK);
|
|
1889
|
+
// Check available disk space (basic check by writing a small test)
|
|
1890
|
+
const testFile = `${filePath}.test`;
|
|
1891
|
+
try {
|
|
1892
|
+
await fs.writeFile(testFile, 'test', encoding);
|
|
1893
|
+
await fs.unlink(testFile);
|
|
1894
|
+
} catch (error) {
|
|
1895
|
+
if (error.code === 'ENOSPC') {
|
|
1896
|
+
throw new Error(`Insufficient disk space to write file: ${filePath}`);
|
|
1897
|
+
}
|
|
1898
|
+
throw error;
|
|
1899
|
+
}
|
|
1900
|
+
// Write the actual file
|
|
1901
|
+
await fs.writeFile(filePath, content, encoding);
|
|
1902
|
+
logger.debug(`Successfully wrote file: ${filePath} (${content.length} characters)`);
|
|
1903
|
+
} catch (error) {
|
|
1904
|
+
logger.error(`Failed to write file ${filePath}: ${error.message}`);
|
|
1905
|
+
throw new Error(`Failed to write file ${filePath}: ${error.message}`);
|
|
1906
|
+
}
|
|
1907
|
+
};
|
|
1908
|
+
// Helper function to process a single review note
|
|
1909
|
+
const processSingleReview = async (reviewNote, runConfig, outputDirectory)=>{
|
|
1910
|
+
var _runConfig_review, _runConfig_review1, _runConfig_review2, _runConfig_review3, _runConfig_review_context, _runConfig_review4, _runConfig_review5, _aiConfig_commands_review, _aiConfig_commands, _analysisResult_issues;
|
|
1911
|
+
const logger = getLogger();
|
|
1912
|
+
// Gather additional context based on configuration with improved error handling
|
|
1913
|
+
let logContext = '';
|
|
1914
|
+
let diffContext = '';
|
|
1915
|
+
let releaseNotesContext = '';
|
|
1916
|
+
let issuesContext = '';
|
|
1917
|
+
const contextErrors = [];
|
|
1918
|
+
// Fetch commit history if enabled
|
|
1919
|
+
if ((_runConfig_review = runConfig.review) === null || _runConfig_review === void 0 ? void 0 : _runConfig_review.includeCommitHistory) {
|
|
1920
|
+
try {
|
|
1921
|
+
logger.debug('Fetching recent commit history...');
|
|
1922
|
+
const log = await Log.create({
|
|
1923
|
+
limit: runConfig.review.commitHistoryLimit
|
|
1924
|
+
});
|
|
1925
|
+
const logContent = await log.get();
|
|
1926
|
+
if (logContent.trim()) {
|
|
1927
|
+
logContext += `\n\n[Recent Commit History]\n${logContent}`;
|
|
1928
|
+
logger.debug('Added commit history to context (%d characters)', logContent.length);
|
|
1929
|
+
}
|
|
1930
|
+
} catch (error) {
|
|
1931
|
+
const errorMsg = `Failed to fetch commit history: ${error.message}`;
|
|
1932
|
+
logger.warn(errorMsg);
|
|
1933
|
+
contextErrors.push(errorMsg);
|
|
1934
|
+
}
|
|
1935
|
+
}
|
|
1936
|
+
// Fetch recent diffs if enabled
|
|
1937
|
+
if ((_runConfig_review1 = runConfig.review) === null || _runConfig_review1 === void 0 ? void 0 : _runConfig_review1.includeRecentDiffs) {
|
|
1938
|
+
try {
|
|
1939
|
+
var _runConfig_excludedPatterns;
|
|
1940
|
+
logger.debug('Fetching recent commit diffs...');
|
|
1941
|
+
const basePatterns = (_runConfig_excludedPatterns = runConfig.excludedPatterns) !== null && _runConfig_excludedPatterns !== void 0 ? _runConfig_excludedPatterns : DEFAULT_EXCLUDED_PATTERNS;
|
|
1942
|
+
const recentDiffs = await Diff.getRecentDiffsForReview({
|
|
1943
|
+
limit: runConfig.review.diffHistoryLimit,
|
|
1944
|
+
baseExcludedPatterns: basePatterns
|
|
1945
|
+
});
|
|
1946
|
+
diffContext += recentDiffs;
|
|
1947
|
+
if (recentDiffs.trim()) {
|
|
1948
|
+
logger.debug('Added recent diffs to context (%d characters)', recentDiffs.length);
|
|
1949
|
+
}
|
|
1950
|
+
} catch (error) {
|
|
1951
|
+
const errorMsg = `Failed to fetch recent diffs: ${error.message}`;
|
|
1952
|
+
logger.warn(errorMsg);
|
|
1953
|
+
contextErrors.push(errorMsg);
|
|
1954
|
+
}
|
|
1955
|
+
}
|
|
1956
|
+
// Fetch release notes if enabled
|
|
1957
|
+
if ((_runConfig_review2 = runConfig.review) === null || _runConfig_review2 === void 0 ? void 0 : _runConfig_review2.includeReleaseNotes) {
|
|
1958
|
+
try {
|
|
1959
|
+
logger.debug('Fetching recent release notes from GitHub...');
|
|
1960
|
+
const releaseNotesContent = await getReleaseNotesContent({
|
|
1961
|
+
limit: runConfig.review.releaseNotesLimit || 3
|
|
1962
|
+
});
|
|
1963
|
+
if (releaseNotesContent.trim()) {
|
|
1964
|
+
releaseNotesContext += `\n\n[Recent Release Notes]\n${releaseNotesContent}`;
|
|
1965
|
+
logger.debug('Added release notes to context (%d characters)', releaseNotesContent.length);
|
|
1966
|
+
}
|
|
1967
|
+
} catch (error) {
|
|
1968
|
+
const errorMsg = `Failed to fetch release notes: ${error.message}`;
|
|
1969
|
+
logger.warn(errorMsg);
|
|
1970
|
+
contextErrors.push(errorMsg);
|
|
1971
|
+
}
|
|
1972
|
+
}
|
|
1973
|
+
// Fetch GitHub issues if enabled
|
|
1974
|
+
if ((_runConfig_review3 = runConfig.review) === null || _runConfig_review3 === void 0 ? void 0 : _runConfig_review3.includeGithubIssues) {
|
|
1975
|
+
try {
|
|
1976
|
+
logger.debug('Fetching open GitHub issues...');
|
|
1977
|
+
issuesContext = await getIssuesContent({
|
|
1978
|
+
limit: runConfig.review.githubIssuesLimit || 20
|
|
1979
|
+
});
|
|
1980
|
+
if (issuesContext.trim()) {
|
|
1981
|
+
logger.debug('Added GitHub issues to context (%d characters)', issuesContext.length);
|
|
1982
|
+
}
|
|
1983
|
+
} catch (error) {
|
|
1984
|
+
const errorMsg = `Failed to fetch GitHub issues: ${error.message}`;
|
|
1985
|
+
logger.warn(errorMsg);
|
|
1986
|
+
contextErrors.push(errorMsg);
|
|
1987
|
+
}
|
|
1988
|
+
}
|
|
1989
|
+
// Report context gathering results
|
|
1990
|
+
if (contextErrors.length > 0) {
|
|
1991
|
+
var _runConfig_review6;
|
|
1992
|
+
logger.warn(`Context gathering completed with ${contextErrors.length} error(s):`);
|
|
1993
|
+
contextErrors.forEach((error)=>logger.warn(` - ${error}`));
|
|
1994
|
+
// For critical operations, consider failing if too many context sources fail
|
|
1995
|
+
const maxContextErrors = ((_runConfig_review6 = runConfig.review) === null || _runConfig_review6 === void 0 ? void 0 : _runConfig_review6.maxContextErrors) || contextErrors.length; // Default: allow all errors
|
|
1996
|
+
if (contextErrors.length > maxContextErrors) {
|
|
1997
|
+
throw new Error(`Too many context gathering errors (${contextErrors.length}), aborting review. Consider checking your configuration and network connectivity.`);
|
|
1998
|
+
}
|
|
1999
|
+
}
|
|
2000
|
+
// Analyze review note for issues using OpenAI
|
|
2001
|
+
logger.info('REVIEW_ANALYSIS_STARTING: Analyzing review note for project issues | Source: review note | Purpose: Identify actionable issues');
|
|
2002
|
+
logger.debug('Context summary:');
|
|
2003
|
+
logger.debug(' - Review note: %d chars', reviewNote.length);
|
|
2004
|
+
logger.debug(' - Log context: %d chars', logContext.length);
|
|
2005
|
+
logger.debug(' - Diff context: %d chars', diffContext.length);
|
|
2006
|
+
logger.debug(' - Release notes context: %d chars', releaseNotesContext.length);
|
|
2007
|
+
logger.debug(' - Issues context: %d chars', issuesContext.length);
|
|
2008
|
+
logger.debug(' - User context: %d chars', ((_runConfig_review4 = runConfig.review) === null || _runConfig_review4 === void 0 ? void 0 : (_runConfig_review_context = _runConfig_review4.context) === null || _runConfig_review_context === void 0 ? void 0 : _runConfig_review_context.length) || 0);
|
|
2009
|
+
const promptConfig = {
|
|
2010
|
+
overridePaths: runConfig.discoveredConfigDirs || [],
|
|
2011
|
+
overrides: runConfig.overrides || false
|
|
2012
|
+
};
|
|
2013
|
+
// Create adapters for ai-service
|
|
2014
|
+
const aiConfig = toAIConfig(runConfig);
|
|
2015
|
+
const aiStorageAdapter = createStorageAdapter(outputDirectory);
|
|
2016
|
+
const aiLogger = createLoggerAdapter(runConfig.dryRun || false);
|
|
2017
|
+
const promptContent = {
|
|
2018
|
+
notes: reviewNote
|
|
2019
|
+
};
|
|
2020
|
+
const promptContext = {
|
|
2021
|
+
context: (_runConfig_review5 = runConfig.review) === null || _runConfig_review5 === void 0 ? void 0 : _runConfig_review5.context,
|
|
2022
|
+
logContext,
|
|
2023
|
+
diffContext,
|
|
2024
|
+
releaseNotesContext,
|
|
2025
|
+
issuesContext
|
|
2026
|
+
};
|
|
2027
|
+
const prompt = await createReviewPrompt(promptConfig, promptContent, promptContext);
|
|
2028
|
+
const modelToUse = ((_aiConfig_commands = aiConfig.commands) === null || _aiConfig_commands === void 0 ? void 0 : (_aiConfig_commands_review = _aiConfig_commands.review) === null || _aiConfig_commands_review === void 0 ? void 0 : _aiConfig_commands_review.model) || aiConfig.model || 'gpt-4o-mini';
|
|
2029
|
+
const request = Formatter.create({
|
|
2030
|
+
logger
|
|
2031
|
+
}).formatPrompt(modelToUse, prompt);
|
|
2032
|
+
let analysisResult;
|
|
2033
|
+
try {
|
|
2034
|
+
var _aiConfig_commands_review1, _aiConfig_commands1, _rawAnalysisResult_issues;
|
|
2035
|
+
const rawResult = await createCompletion(request.messages, {
|
|
2036
|
+
model: modelToUse,
|
|
2037
|
+
openaiReasoning: ((_aiConfig_commands1 = aiConfig.commands) === null || _aiConfig_commands1 === void 0 ? void 0 : (_aiConfig_commands_review1 = _aiConfig_commands1.review) === null || _aiConfig_commands_review1 === void 0 ? void 0 : _aiConfig_commands_review1.reasoning) || aiConfig.reasoning,
|
|
2038
|
+
responseFormat: {
|
|
2039
|
+
type: 'json_object'
|
|
2040
|
+
},
|
|
2041
|
+
debug: runConfig.debug,
|
|
2042
|
+
debugRequestFile: getOutputPath(outputDirectory, getTimestampedRequestFilename('review-analysis')),
|
|
2043
|
+
debugResponseFile: getOutputPath(outputDirectory, getTimestampedResponseFilename('review-analysis')),
|
|
2044
|
+
storage: aiStorageAdapter,
|
|
2045
|
+
logger: aiLogger
|
|
2046
|
+
});
|
|
2047
|
+
// Validate the API response before using it
|
|
2048
|
+
const rawAnalysisResult = validateReviewResult(rawResult);
|
|
2049
|
+
// Apply stop-context filtering to issues
|
|
2050
|
+
analysisResult = {
|
|
2051
|
+
...rawAnalysisResult,
|
|
2052
|
+
summary: filterContent(rawAnalysisResult.summary, runConfig.stopContext).filtered,
|
|
2053
|
+
issues: (_rawAnalysisResult_issues = rawAnalysisResult.issues) === null || _rawAnalysisResult_issues === void 0 ? void 0 : _rawAnalysisResult_issues.map((issue)=>({
|
|
2054
|
+
...issue,
|
|
2055
|
+
title: filterContent(issue.title, runConfig.stopContext).filtered,
|
|
2056
|
+
description: filterContent(issue.description || '', runConfig.stopContext).filtered
|
|
2057
|
+
}))
|
|
2058
|
+
};
|
|
2059
|
+
} catch (error) {
|
|
2060
|
+
logger.error(`REVIEW_ANALYSIS_FAILED: Unable to analyze review note | Error: ${error.message} | Impact: Cannot identify issues`);
|
|
2061
|
+
throw new Error(`Review analysis failed: ${error.message}`);
|
|
2062
|
+
}
|
|
2063
|
+
logger.info('REVIEW_ANALYSIS_COMPLETE: Review note analysis completed successfully | Status: completed | Next: Issue creation if enabled');
|
|
2064
|
+
logger.debug('Analysis result summary: %s', analysisResult.summary);
|
|
2065
|
+
logger.debug('Total issues found: %d', analysisResult.totalIssues);
|
|
2066
|
+
logger.debug('Issues array length: %d', ((_analysisResult_issues = analysisResult.issues) === null || _analysisResult_issues === void 0 ? void 0 : _analysisResult_issues.length) || 0);
|
|
2067
|
+
if (analysisResult.issues && analysisResult.issues.length > 0) {
|
|
2068
|
+
analysisResult.issues.forEach((issue, index)=>{
|
|
2069
|
+
logger.debug(' Issue %d: [%s] %s', index + 1, issue.priority, issue.title);
|
|
2070
|
+
});
|
|
2071
|
+
}
|
|
2072
|
+
// Save timestamped copy of analysis result to output directory
|
|
2073
|
+
try {
|
|
2074
|
+
const reviewFilename = getTimestampedReviewFilename();
|
|
2075
|
+
const reviewPath = getOutputPath(outputDirectory, reviewFilename);
|
|
2076
|
+
// Format the analysis result as markdown
|
|
2077
|
+
const reviewContent = `# Review Analysis Result\n\n` + `## Summary\n${analysisResult.summary}\n\n` + `## Total Issues Found\n${analysisResult.totalIssues}\n\n` + `## Issues\n\n${JSON.stringify(analysisResult.issues, null, 2)}\n\n` + `---\n\n*Analysis completed at ${new Date().toISOString()}*`;
|
|
2078
|
+
await safeWriteFile(reviewPath, reviewContent);
|
|
2079
|
+
logger.debug('Saved timestamped review analysis: %s', reviewPath);
|
|
2080
|
+
} catch (error) {
|
|
2081
|
+
logger.warn('Failed to save timestamped review analysis: %s', error.message);
|
|
2082
|
+
// Don't fail the entire operation for this
|
|
2083
|
+
}
|
|
2084
|
+
return analysisResult;
|
|
2085
|
+
};
|
|
2086
|
+
const executeInternal = async (runConfig)=>{
|
|
2087
|
+
var _runConfig_review, _runConfig_review1, _runConfig_review2, _runConfig_review3, _runConfig_review4, _runConfig_review5, _runConfig_review6, _runConfig_review7, _runConfig_review8, _runConfig_review9, _runConfig_review10, _runConfig_review11, _runConfig_review12, _runConfig_review13, _runConfig_review14, _runConfig_review15, _runConfig_review16, _runConfig_review17, _runConfig_review18;
|
|
2088
|
+
const logger = getLogger();
|
|
2089
|
+
const isDryRun = runConfig.dryRun || false;
|
|
2090
|
+
// Show configuration even in dry-run mode
|
|
2091
|
+
logger.debug('Review context configuration:');
|
|
2092
|
+
logger.debug(' Include commit history: %s', (_runConfig_review = runConfig.review) === null || _runConfig_review === void 0 ? void 0 : _runConfig_review.includeCommitHistory);
|
|
2093
|
+
logger.debug(' Include recent diffs: %s', (_runConfig_review1 = runConfig.review) === null || _runConfig_review1 === void 0 ? void 0 : _runConfig_review1.includeRecentDiffs);
|
|
2094
|
+
logger.debug(' Include release notes: %s', (_runConfig_review2 = runConfig.review) === null || _runConfig_review2 === void 0 ? void 0 : _runConfig_review2.includeReleaseNotes);
|
|
2095
|
+
logger.debug(' Include GitHub issues: %s', (_runConfig_review3 = runConfig.review) === null || _runConfig_review3 === void 0 ? void 0 : _runConfig_review3.includeGithubIssues);
|
|
2096
|
+
logger.debug(' Commit history limit: %d', (_runConfig_review4 = runConfig.review) === null || _runConfig_review4 === void 0 ? void 0 : _runConfig_review4.commitHistoryLimit);
|
|
2097
|
+
logger.debug(' Diff history limit: %d', (_runConfig_review5 = runConfig.review) === null || _runConfig_review5 === void 0 ? void 0 : _runConfig_review5.diffHistoryLimit);
|
|
2098
|
+
logger.debug(' Release notes limit: %d', (_runConfig_review6 = runConfig.review) === null || _runConfig_review6 === void 0 ? void 0 : _runConfig_review6.releaseNotesLimit);
|
|
2099
|
+
logger.debug(' GitHub issues limit: %d', (_runConfig_review7 = runConfig.review) === null || _runConfig_review7 === void 0 ? void 0 : _runConfig_review7.githubIssuesLimit);
|
|
2100
|
+
logger.debug(' Sendit mode (auto-create issues): %s', (_runConfig_review8 = runConfig.review) === null || _runConfig_review8 === void 0 ? void 0 : _runConfig_review8.sendit);
|
|
2101
|
+
logger.debug(' File: %s', ((_runConfig_review9 = runConfig.review) === null || _runConfig_review9 === void 0 ? void 0 : _runConfig_review9.file) || 'not specified');
|
|
2102
|
+
logger.debug(' Directory: %s', ((_runConfig_review10 = runConfig.review) === null || _runConfig_review10 === void 0 ? void 0 : _runConfig_review10.directory) || 'not specified');
|
|
2103
|
+
if (isDryRun) {
|
|
2104
|
+
var _runConfig_review19, _runConfig_review20, _runConfig_review21, _runConfig_review22, _runConfig_review23;
|
|
2105
|
+
if ((_runConfig_review19 = runConfig.review) === null || _runConfig_review19 === void 0 ? void 0 : _runConfig_review19.file) {
|
|
2106
|
+
logger.info('DRY RUN: Would read review note from file: %s', runConfig.review.file);
|
|
2107
|
+
} else if ((_runConfig_review20 = runConfig.review) === null || _runConfig_review20 === void 0 ? void 0 : _runConfig_review20.directory) {
|
|
2108
|
+
logger.info('DRY RUN: Would process review files in directory: %s', runConfig.review.directory);
|
|
2109
|
+
logger.info('DRY RUN: Would first select which files to process, then analyze selected files');
|
|
2110
|
+
} else if ((_runConfig_review21 = runConfig.review) === null || _runConfig_review21 === void 0 ? void 0 : _runConfig_review21.note) {
|
|
2111
|
+
logger.info('DRY RUN: Would analyze provided note for review');
|
|
2112
|
+
} else {
|
|
2113
|
+
logger.info('DRY RUN: Would open editor to capture review note');
|
|
2114
|
+
}
|
|
2115
|
+
logger.info('DRY RUN: Would gather additional context based on configuration above');
|
|
2116
|
+
logger.info('DRY RUN: Would analyze note and identify issues');
|
|
2117
|
+
if ((_runConfig_review22 = runConfig.review) === null || _runConfig_review22 === void 0 ? void 0 : _runConfig_review22.sendit) {
|
|
2118
|
+
logger.info('DRY RUN: Would automatically create GitHub issues (sendit mode enabled)');
|
|
2119
|
+
} else {
|
|
2120
|
+
logger.info('DRY RUN: Would prompt for confirmation before creating GitHub issues');
|
|
2121
|
+
}
|
|
2122
|
+
// Show what exclusion patterns would be used in dry-run mode
|
|
2123
|
+
if ((_runConfig_review23 = runConfig.review) === null || _runConfig_review23 === void 0 ? void 0 : _runConfig_review23.includeRecentDiffs) {
|
|
2124
|
+
var _runConfig_excludedPatterns;
|
|
2125
|
+
const basePatterns = (_runConfig_excludedPatterns = runConfig.excludedPatterns) !== null && _runConfig_excludedPatterns !== void 0 ? _runConfig_excludedPatterns : DEFAULT_EXCLUDED_PATTERNS;
|
|
2126
|
+
const reviewExcluded = Diff.getReviewExcludedPatterns(basePatterns);
|
|
2127
|
+
logger.info('DRY RUN: Would use %d exclusion patterns for diff context', reviewExcluded.length);
|
|
2128
|
+
logger.debug('DRY RUN: Sample exclusions: %s', reviewExcluded.slice(0, 15).join(', ') + (reviewExcluded.length > 15 ? '...' : ''));
|
|
2129
|
+
}
|
|
2130
|
+
return 'DRY RUN: Review command would analyze note, gather context, and create GitHub issues';
|
|
2131
|
+
}
|
|
2132
|
+
// Enhanced TTY check with proper error handling
|
|
2133
|
+
const isInteractive = isTTYSafe();
|
|
2134
|
+
if (!isInteractive && !((_runConfig_review11 = runConfig.review) === null || _runConfig_review11 === void 0 ? void 0 : _runConfig_review11.sendit)) {
|
|
2135
|
+
logger.error('ā STDIN is piped but --sendit flag is not enabled');
|
|
2136
|
+
logger.error(' Interactive prompts cannot be used when input is piped');
|
|
2137
|
+
logger.error(' Solutions:');
|
|
2138
|
+
logger.error(' ⢠Add --sendit flag to auto-create all issues');
|
|
2139
|
+
logger.error(' ⢠Use terminal input instead of piping');
|
|
2140
|
+
logger.error(' ⢠Example: echo "note" | kodrdriv review --sendit');
|
|
2141
|
+
throw new ValidationError('Piped input requires --sendit flag for non-interactive operation');
|
|
2142
|
+
}
|
|
2143
|
+
// Get the review note from configuration
|
|
2144
|
+
let reviewNote = (_runConfig_review12 = runConfig.review) === null || _runConfig_review12 === void 0 ? void 0 : _runConfig_review12.note;
|
|
2145
|
+
let reviewFiles = [];
|
|
2146
|
+
// Check if we should process a single file
|
|
2147
|
+
if ((_runConfig_review13 = runConfig.review) === null || _runConfig_review13 === void 0 ? void 0 : _runConfig_review13.file) {
|
|
2148
|
+
logger.info(`š Reading review note from file: ${runConfig.review.file}`);
|
|
2149
|
+
reviewNote = await readReviewNoteFromFile(runConfig.review.file);
|
|
2150
|
+
reviewFiles = [
|
|
2151
|
+
runConfig.review.file
|
|
2152
|
+
];
|
|
2153
|
+
} else if ((_runConfig_review14 = runConfig.review) === null || _runConfig_review14 === void 0 ? void 0 : _runConfig_review14.directory) {
|
|
2154
|
+
var _runConfig_review24;
|
|
2155
|
+
logger.info(`š Processing review files in directory: ${runConfig.review.directory}`);
|
|
2156
|
+
reviewFiles = await getReviewFilesInDirectory(runConfig.review.directory);
|
|
2157
|
+
if (reviewFiles.length === 0) {
|
|
2158
|
+
throw new ValidationError(`No review files found in directory: ${runConfig.review.directory}`);
|
|
2159
|
+
}
|
|
2160
|
+
logger.info(`š Found ${reviewFiles.length} files to process`);
|
|
2161
|
+
// Set a dummy reviewNote for directory mode to satisfy validation
|
|
2162
|
+
// The actual review notes will be read from each file during processing
|
|
2163
|
+
reviewNote = `Processing ${reviewFiles.length} files from directory`;
|
|
2164
|
+
// If not in sendit mode, explain the two-phase process
|
|
2165
|
+
if (!((_runConfig_review24 = runConfig.review) === null || _runConfig_review24 === void 0 ? void 0 : _runConfig_review24.sendit)) {
|
|
2166
|
+
logger.info(`š Interactive mode: You will first select which files to process, then they will be analyzed in order.`);
|
|
2167
|
+
logger.info(`š Use --sendit to process all files automatically without confirmation.`);
|
|
2168
|
+
}
|
|
2169
|
+
} else if ((_runConfig_review15 = runConfig.review) === null || _runConfig_review15 === void 0 ? void 0 : _runConfig_review15.note) {
|
|
2170
|
+
reviewNote = runConfig.review.note;
|
|
2171
|
+
reviewFiles = [
|
|
2172
|
+
'provided note'
|
|
2173
|
+
];
|
|
2174
|
+
} else {
|
|
2175
|
+
// Open editor to capture review note
|
|
2176
|
+
const editor = process.env.EDITOR || process.env.VISUAL || 'vi';
|
|
2177
|
+
let tmpFilePath = null;
|
|
2178
|
+
try {
|
|
2179
|
+
var _runConfig_review25;
|
|
2180
|
+
// Create secure temporary file
|
|
2181
|
+
tmpFilePath = await createSecureTempFile();
|
|
2182
|
+
// Pre-populate the file with a helpful header so users know what to do.
|
|
2183
|
+
const templateContent = [
|
|
2184
|
+
'# Kodrdriv Review Note',
|
|
2185
|
+
'',
|
|
2186
|
+
'# Please enter your review note below. Lines starting with "#" will be ignored.',
|
|
2187
|
+
'# Save and close the editor when you are done.',
|
|
2188
|
+
'',
|
|
2189
|
+
''
|
|
2190
|
+
].join('\n');
|
|
2191
|
+
await safeWriteFile(tmpFilePath, templateContent);
|
|
2192
|
+
logger.info(`No review note provided ā opening ${editor} to capture input...`);
|
|
2193
|
+
// Open the editor with optional timeout protection
|
|
2194
|
+
const editorTimeout = (_runConfig_review25 = runConfig.review) === null || _runConfig_review25 === void 0 ? void 0 : _runConfig_review25.editorTimeout; // No default timeout - let user take their time
|
|
2195
|
+
await openEditorWithTimeout(editor, tmpFilePath, editorTimeout);
|
|
2196
|
+
// Read the file back in, stripping comment lines and whitespace.
|
|
2197
|
+
const fileContent = (await fs.readFile(tmpFilePath, 'utf8')).split('\n').filter((line)=>!line.trim().startsWith('#')).join('\n').trim();
|
|
2198
|
+
if (!fileContent) {
|
|
2199
|
+
throw new ValidationError('Review note is empty ā aborting. Provide a note as an argument, via STDIN, or through the editor.');
|
|
2200
|
+
}
|
|
2201
|
+
reviewNote = fileContent;
|
|
2202
|
+
// If the original runConfig.review object exists, update it so downstream code has the note.
|
|
2203
|
+
if (runConfig.review) {
|
|
2204
|
+
runConfig.review.note = reviewNote;
|
|
2205
|
+
}
|
|
2206
|
+
} catch (error) {
|
|
2207
|
+
logger.error(`Failed to capture review note via editor: ${error.message}`);
|
|
2208
|
+
throw error;
|
|
2209
|
+
} finally{
|
|
2210
|
+
// Always clean up the temp file
|
|
2211
|
+
if (tmpFilePath) {
|
|
2212
|
+
await cleanupTempFile(tmpFilePath);
|
|
2213
|
+
}
|
|
2214
|
+
}
|
|
2215
|
+
reviewFiles = [
|
|
2216
|
+
'editor input'
|
|
2217
|
+
];
|
|
2218
|
+
}
|
|
2219
|
+
if (!reviewNote || !reviewNote.trim()) {
|
|
2220
|
+
throw new ValidationError('No review note provided or captured');
|
|
2221
|
+
}
|
|
2222
|
+
logger.info('š Starting review analysis...');
|
|
2223
|
+
logger.debug('Review note: %s', reviewNote);
|
|
2224
|
+
logger.debug('Review note length: %d characters', reviewNote.length);
|
|
2225
|
+
const outputDirectory = runConfig.outputDirectory || DEFAULT_OUTPUT_DIRECTORY;
|
|
2226
|
+
const storage = createStorage();
|
|
2227
|
+
await storage.ensureDirectory(outputDirectory);
|
|
2228
|
+
// Save timestamped copy of review notes to output directory
|
|
2229
|
+
try {
|
|
2230
|
+
const reviewNotesFilename = getTimestampedReviewNotesFilename();
|
|
2231
|
+
const reviewNotesPath = getOutputPath(outputDirectory, reviewNotesFilename);
|
|
2232
|
+
const reviewNotesContent = `# Review Notes\n\n${reviewNote}\n\n`;
|
|
2233
|
+
await safeWriteFile(reviewNotesPath, reviewNotesContent);
|
|
2234
|
+
logger.debug('Saved timestamped review notes: %s', reviewNotesPath);
|
|
2235
|
+
} catch (error) {
|
|
2236
|
+
logger.warn('Failed to save review notes: %s', error.message);
|
|
2237
|
+
}
|
|
2238
|
+
// Phase 1: File selection (only for directory mode)
|
|
2239
|
+
let selectedFiles;
|
|
2240
|
+
if ((_runConfig_review16 = runConfig.review) === null || _runConfig_review16 === void 0 ? void 0 : _runConfig_review16.directory) {
|
|
2241
|
+
var _runConfig_review26;
|
|
2242
|
+
selectedFiles = await selectFilesForProcessing(reviewFiles, ((_runConfig_review26 = runConfig.review) === null || _runConfig_review26 === void 0 ? void 0 : _runConfig_review26.sendit) || false);
|
|
2243
|
+
} else {
|
|
2244
|
+
// For single note mode, just use the note directly
|
|
2245
|
+
selectedFiles = [
|
|
2246
|
+
'single note'
|
|
2247
|
+
];
|
|
2248
|
+
}
|
|
2249
|
+
// Phase 2: Process selected files in order
|
|
2250
|
+
logger.info(`\nš Starting analysis phase...`);
|
|
2251
|
+
const results = [];
|
|
2252
|
+
const processedFiles = [];
|
|
2253
|
+
if ((_runConfig_review17 = runConfig.review) === null || _runConfig_review17 === void 0 ? void 0 : _runConfig_review17.directory) {
|
|
2254
|
+
// Directory mode: process each selected file
|
|
2255
|
+
for(let i = 0; i < selectedFiles.length; i++){
|
|
2256
|
+
const filePath = selectedFiles[i];
|
|
2257
|
+
try {
|
|
2258
|
+
logger.info(`š Processing file ${i + 1}/${selectedFiles.length}: ${filePath}`);
|
|
2259
|
+
const fileNote = await readReviewNoteFromFile(filePath);
|
|
2260
|
+
const fileResult = await processSingleReview(fileNote, runConfig, outputDirectory);
|
|
2261
|
+
results.push(fileResult);
|
|
2262
|
+
processedFiles.push(filePath);
|
|
2263
|
+
} catch (error) {
|
|
2264
|
+
// Check if this is a critical error that should be propagated
|
|
2265
|
+
if (error.message.includes('Too many context gathering errors')) {
|
|
2266
|
+
throw error; // Propagate critical context errors
|
|
2267
|
+
}
|
|
2268
|
+
logger.warn(`Failed to process file ${filePath}: ${error.message}`);
|
|
2269
|
+
// Continue with other files for non-critical errors
|
|
2270
|
+
}
|
|
2271
|
+
}
|
|
2272
|
+
} else {
|
|
2273
|
+
// Single note mode: process the note directly
|
|
2274
|
+
try {
|
|
2275
|
+
logger.info(`š Processing single review note`);
|
|
2276
|
+
const fileResult = await processSingleReview(reviewNote, runConfig, outputDirectory);
|
|
2277
|
+
results.push(fileResult);
|
|
2278
|
+
processedFiles.push('single note');
|
|
2279
|
+
} catch (error) {
|
|
2280
|
+
logger.warn(`Failed to process review note: ${error.message}`);
|
|
2281
|
+
throw error; // Re-throw for single note mode since there's only one item
|
|
2282
|
+
}
|
|
2283
|
+
}
|
|
2284
|
+
if (results.length === 0) {
|
|
2285
|
+
throw new ValidationError('No files were processed successfully');
|
|
2286
|
+
}
|
|
2287
|
+
// Combine results if we processed multiple files
|
|
2288
|
+
let analysisResult;
|
|
2289
|
+
if (results.length === 1) {
|
|
2290
|
+
analysisResult = results[0];
|
|
2291
|
+
} else {
|
|
2292
|
+
logger.info(`ā
Successfully processed ${results.length} review files`);
|
|
2293
|
+
// Create a combined summary
|
|
2294
|
+
const totalIssues = results.reduce((sum, result)=>sum + result.totalIssues, 0);
|
|
2295
|
+
const allIssues = results.flatMap((result)=>result.issues || []);
|
|
2296
|
+
analysisResult = {
|
|
2297
|
+
summary: `Combined analysis of ${results.length} review files. Total issues found: ${totalIssues}`,
|
|
2298
|
+
totalIssues,
|
|
2299
|
+
issues: allIssues
|
|
2300
|
+
};
|
|
2301
|
+
// Save combined results
|
|
2302
|
+
try {
|
|
2303
|
+
const combinedFilename = getTimestampedReviewFilename();
|
|
2304
|
+
const combinedPath = getOutputPath(outputDirectory, combinedFilename);
|
|
2305
|
+
const combinedContent = `# Combined Review Analysis Result\n\n` + `## Summary\n${analysisResult.summary}\n\n` + `## Total Issues Found\n${totalIssues}\n\n` + `## Files Processed\n${processedFiles.join('\n')}\n\n` + `## Issues\n\n${JSON.stringify(allIssues, null, 2)}\n\n` + `---\n\n*Combined analysis completed at ${new Date().toISOString()}*`;
|
|
2306
|
+
await safeWriteFile(combinedPath, combinedContent);
|
|
2307
|
+
logger.debug('Saved combined review analysis: %s', combinedPath);
|
|
2308
|
+
} catch (error) {
|
|
2309
|
+
logger.warn('Failed to save combined review analysis: %s', error.message);
|
|
2310
|
+
}
|
|
2311
|
+
}
|
|
2312
|
+
// Handle GitHub issue creation using the issues module
|
|
2313
|
+
const senditMode = ((_runConfig_review18 = runConfig.review) === null || _runConfig_review18 === void 0 ? void 0 : _runConfig_review18.sendit) || false;
|
|
2314
|
+
return await handleIssueCreation(analysisResult, senditMode);
|
|
2315
|
+
};
|
|
2316
|
+
const execute = async (runConfig)=>{
|
|
2317
|
+
try {
|
|
2318
|
+
return await executeInternal(runConfig);
|
|
2319
|
+
} catch (error) {
|
|
2320
|
+
const logger = getLogger();
|
|
2321
|
+
if (error instanceof ValidationError) {
|
|
2322
|
+
logger.error(`review failed: ${error.message}`);
|
|
2323
|
+
throw error;
|
|
2324
|
+
}
|
|
2325
|
+
if (error instanceof FileOperationError) {
|
|
2326
|
+
logger.error(`review failed: ${error.message}`);
|
|
2327
|
+
if (error.cause && typeof error.cause === 'object' && 'message' in error.cause) {
|
|
2328
|
+
logger.debug(`Caused by: ${error.cause.message}`);
|
|
2329
|
+
}
|
|
2330
|
+
throw error;
|
|
2331
|
+
}
|
|
2332
|
+
if (error instanceof CommandError) {
|
|
2333
|
+
logger.error(`review failed: ${error.message}`);
|
|
2334
|
+
if (error.cause && typeof error.cause === 'object' && 'message' in error.cause) {
|
|
2335
|
+
logger.debug(`Caused by: ${error.cause.message}`);
|
|
2336
|
+
}
|
|
2337
|
+
throw error;
|
|
2338
|
+
}
|
|
2339
|
+
// Unexpected errors
|
|
2340
|
+
logger.error(`review encountered unexpected error: ${error.message}`);
|
|
2341
|
+
throw error;
|
|
2342
|
+
}
|
|
2343
|
+
};
|
|
2344
|
+
|
|
2345
|
+
export { PerformanceTimer, batchReadPackageJsonFiles, checkForFileDependencies, execute$1 as clean, collectAllDependencies, execute$3 as commit, findAllPackageJsonFiles, findPackagesByScope, isCleanNeeded, isTestNeeded, optimizePrecommitCommand, execute$2 as precommit, recordTestRun, execute as review, scanDirectoryForPackages };
|
|
2346
|
+
//# sourceMappingURL=index.js.map
|