@entro314labs/ai-changelog-generator 3.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +801 -0
- package/LICENSE +21 -0
- package/README.md +393 -0
- package/ai-changelog-mcp.sh +93 -0
- package/ai-changelog.sh +103 -0
- package/bin/ai-changelog-dxt.js +35 -0
- package/bin/ai-changelog-mcp.js +34 -0
- package/bin/ai-changelog.js +18 -0
- package/package.json +135 -0
- package/src/ai-changelog-generator.js +258 -0
- package/src/application/orchestrators/changelog.orchestrator.js +730 -0
- package/src/application/services/application.service.js +301 -0
- package/src/cli.js +157 -0
- package/src/domains/ai/ai-analysis.service.js +486 -0
- package/src/domains/analysis/analysis.engine.js +445 -0
- package/src/domains/changelog/changelog.service.js +1761 -0
- package/src/domains/changelog/workspace-changelog.service.js +505 -0
- package/src/domains/git/git-repository.analyzer.js +588 -0
- package/src/domains/git/git.service.js +302 -0
- package/src/infrastructure/cli/cli.controller.js +517 -0
- package/src/infrastructure/config/configuration.manager.js +538 -0
- package/src/infrastructure/interactive/interactive-workflow.service.js +444 -0
- package/src/infrastructure/mcp/mcp-server.service.js +540 -0
- package/src/infrastructure/metrics/metrics.collector.js +362 -0
- package/src/infrastructure/providers/core/base-provider.js +184 -0
- package/src/infrastructure/providers/implementations/anthropic.js +329 -0
- package/src/infrastructure/providers/implementations/azure.js +296 -0
- package/src/infrastructure/providers/implementations/bedrock.js +393 -0
- package/src/infrastructure/providers/implementations/dummy.js +112 -0
- package/src/infrastructure/providers/implementations/google.js +320 -0
- package/src/infrastructure/providers/implementations/huggingface.js +301 -0
- package/src/infrastructure/providers/implementations/lmstudio.js +189 -0
- package/src/infrastructure/providers/implementations/mock.js +275 -0
- package/src/infrastructure/providers/implementations/ollama.js +151 -0
- package/src/infrastructure/providers/implementations/openai.js +273 -0
- package/src/infrastructure/providers/implementations/vertex.js +438 -0
- package/src/infrastructure/providers/provider-management.service.js +415 -0
- package/src/infrastructure/providers/provider-manager.service.js +363 -0
- package/src/infrastructure/providers/utils/base-provider-helpers.js +660 -0
- package/src/infrastructure/providers/utils/model-config.js +610 -0
- package/src/infrastructure/providers/utils/provider-utils.js +286 -0
- package/src/shared/constants/colors.js +370 -0
- package/src/shared/utils/cli-entry-utils.js +525 -0
- package/src/shared/utils/error-classes.js +423 -0
- package/src/shared/utils/json-utils.js +318 -0
- package/src/shared/utils/utils.js +1997 -0
- package/types/index.d.ts +464 -0
|
@@ -0,0 +1,486 @@
|
|
|
1
|
+
import { buildEnhancedPrompt, parseAIResponse, summarizeFileChanges } from '../../shared/utils/utils.js';
|
|
2
|
+
import colors from '../../shared/constants/colors.js';
|
|
3
|
+
|
|
4
|
+
export class AIAnalysisService {
|
|
5
|
+
constructor(aiProvider, promptEngine, tagger, analysisMode = 'standard') {
|
|
6
|
+
this.aiProvider = aiProvider;
|
|
7
|
+
this.promptEngine = promptEngine;
|
|
8
|
+
this.tagger = tagger;
|
|
9
|
+
this.analysisMode = analysisMode;
|
|
10
|
+
this.hasAI = aiProvider && aiProvider.isAvailable();
|
|
11
|
+
this.metrics = {
|
|
12
|
+
apiCalls: 0,
|
|
13
|
+
ruleBasedFallbacks: 0,
|
|
14
|
+
totalTokens: 0
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
async selectOptimalModel(commitAnalysis) {
|
|
19
|
+
if (!this.hasAI) return null;
|
|
20
|
+
|
|
21
|
+
// Check for model override first
|
|
22
|
+
if (this.modelOverride) {
|
|
23
|
+
console.log(`🎯 Using model override: ${this.modelOverride}`);
|
|
24
|
+
return this.modelOverride;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const { files, diffStats, breaking, semanticAnalysis } = commitAnalysis;
|
|
28
|
+
const filesCount = files?.length || 0;
|
|
29
|
+
const linesChanged = (diffStats?.insertions || 0) + (diffStats?.deletions || 0);
|
|
30
|
+
|
|
31
|
+
// Detect complex patterns
|
|
32
|
+
const hasArchitecturalChanges = semanticAnalysis?.patterns?.includes('refactor') ||
|
|
33
|
+
semanticAnalysis?.patterns?.includes('architecture') ||
|
|
34
|
+
semanticAnalysis?.frameworks?.length > 2;
|
|
35
|
+
|
|
36
|
+
try {
|
|
37
|
+
const commitInfo = {
|
|
38
|
+
files: filesCount,
|
|
39
|
+
lines: linesChanged,
|
|
40
|
+
additions: diffStats?.insertions || 0,
|
|
41
|
+
deletions: diffStats?.deletions || 0,
|
|
42
|
+
message: commitAnalysis.subject,
|
|
43
|
+
breaking,
|
|
44
|
+
complex: hasArchitecturalChanges
|
|
45
|
+
};
|
|
46
|
+
|
|
47
|
+
const optimalModel = await this.aiProvider.selectOptimalModel(commitInfo);
|
|
48
|
+
|
|
49
|
+
if (optimalModel?.model) {
|
|
50
|
+
if (optimalModel.capabilities?.reasoning) {
|
|
51
|
+
console.log(colors.aiMessage('Using reasoning model for complex analysis'));
|
|
52
|
+
}
|
|
53
|
+
return optimalModel.model;
|
|
54
|
+
}
|
|
55
|
+
} catch (error) {
|
|
56
|
+
console.warn(colors.warningMessage('Model selection failed, using default'));
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return null;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
async generateAISummary(commitAnalysis, preSelectedModel = null) {
|
|
63
|
+
if (!this.hasAI) {
|
|
64
|
+
return this.generateRuleBasedSummary(commitAnalysis);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
const selectedModel = preSelectedModel || await this.selectOptimalModel(commitAnalysis);
|
|
68
|
+
|
|
69
|
+
try {
|
|
70
|
+
const modelToUse = selectedModel || this.aiProvider?.modelConfig?.default || 'unknown';
|
|
71
|
+
const filesCount = commitAnalysis.files?.length || 0;
|
|
72
|
+
const linesChanged = (commitAnalysis.diffStats?.insertions || 0) + (commitAnalysis.diffStats?.deletions || 0);
|
|
73
|
+
|
|
74
|
+
console.log(colors.infoMessage(`Selected model: ${colors.highlight(modelToUse)} for commit (${colors.number(filesCount)} files, ${colors.number(linesChanged)} lines)`));
|
|
75
|
+
|
|
76
|
+
// Skip model validation for now to avoid token limit issues
|
|
77
|
+
// const modelCheck = await this.aiProvider.validateModelAvailability(modelToUse);
|
|
78
|
+
// if (!modelCheck.available) {
|
|
79
|
+
// console.warn(colors.warningMessage(`⚠️ Selected model '${modelToUse}' not available, falling back to rule-based analysis`));
|
|
80
|
+
// console.warn(colors.warningMessage(` Error: ${modelCheck.error || 'Model validation failed'}`));
|
|
81
|
+
//
|
|
82
|
+
// if (modelCheck.alternatives && modelCheck.alternatives.length > 0) {
|
|
83
|
+
// console.warn(colors.infoMessage(` 💡 Available alternatives: ${modelCheck.alternatives.join(', ')}`));
|
|
84
|
+
// }
|
|
85
|
+
//
|
|
86
|
+
// this.metrics.ruleBasedFallbacks++;
|
|
87
|
+
// return this.generateRuleBasedSummary(commitAnalysis);
|
|
88
|
+
// }
|
|
89
|
+
|
|
90
|
+
const prompt = buildEnhancedPrompt(commitAnalysis, this.analysisMode);
|
|
91
|
+
const systemPrompt = this.promptEngine.systemPrompts.master;
|
|
92
|
+
const modeSpecificPrompt = this.promptEngine.systemPrompts[this.analysisMode] || this.promptEngine.systemPrompts.standard;
|
|
93
|
+
|
|
94
|
+
const optimizedPrompt = this.promptEngine.optimizeForProvider(
|
|
95
|
+
prompt,
|
|
96
|
+
this.aiProvider.getName ? this.aiProvider.getName() : 'unknown',
|
|
97
|
+
this.aiProvider.getCapabilities ? this.aiProvider.getCapabilities() : {}
|
|
98
|
+
);
|
|
99
|
+
|
|
100
|
+
const messages = [
|
|
101
|
+
{
|
|
102
|
+
role: "system",
|
|
103
|
+
content: `${systemPrompt}\n\n${modeSpecificPrompt}`
|
|
104
|
+
},
|
|
105
|
+
{
|
|
106
|
+
role: "user",
|
|
107
|
+
content: optimizedPrompt
|
|
108
|
+
}
|
|
109
|
+
];
|
|
110
|
+
|
|
111
|
+
this.metrics.apiCalls++;
|
|
112
|
+
const response = await this.aiProvider.generateCompletion(messages, {
|
|
113
|
+
model: modelToUse,
|
|
114
|
+
max_tokens: 2000,
|
|
115
|
+
temperature: 0.3
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
if (response?.usage) {
|
|
119
|
+
this.metrics.totalTokens += (response.usage.prompt_tokens || 0) + (response.usage.completion_tokens || 0);
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
const parsedResponse = parseAIResponse(response.content || response.text, commitAnalysis);
|
|
123
|
+
return parsedResponse;
|
|
124
|
+
|
|
125
|
+
} catch (error) {
|
|
126
|
+
// Provide helpful error messages and guidance
|
|
127
|
+
const errorContext = this.getErrorContext(error);
|
|
128
|
+
|
|
129
|
+
if (errorContext.isConnectionError) {
|
|
130
|
+
console.warn(colors.warningMessage(`⚠️ AI provider connection failed: ${errorContext.message}`));
|
|
131
|
+
if (errorContext.suggestions.length > 0) {
|
|
132
|
+
console.warn(colors.infoMessage(`💡 Suggestions: ${errorContext.suggestions.join(', ')}`));
|
|
133
|
+
}
|
|
134
|
+
} else if (errorContext.isConfigurationError) {
|
|
135
|
+
console.warn(colors.warningMessage(`⚠️ Configuration issue: ${errorContext.message}`));
|
|
136
|
+
if (errorContext.suggestions.length > 0) {
|
|
137
|
+
console.warn(colors.infoMessage(`💡 Try: ${errorContext.suggestions.join(', ')}`));
|
|
138
|
+
}
|
|
139
|
+
} else {
|
|
140
|
+
console.warn(colors.warningMessage(`⚠️ AI analysis failed: ${error.message}`));
|
|
141
|
+
console.warn(colors.infoMessage('💡 Falling back to pattern-based analysis'));
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
this.metrics.ruleBasedFallbacks++;
|
|
145
|
+
return this.generateRuleBasedSummary(commitAnalysis);
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
async analyzeChanges(changes, type, outputMode = 'console') {
|
|
150
|
+
try {
|
|
151
|
+
const changesSummary = summarizeFileChanges(changes);
|
|
152
|
+
|
|
153
|
+
const changesData = {
|
|
154
|
+
changeType: type,
|
|
155
|
+
totalFiles: changes.length,
|
|
156
|
+
categories: changesSummary.categories,
|
|
157
|
+
changesByCategory: Object.entries(changesSummary.categories).map(([cat, files]) => ({
|
|
158
|
+
category: cat,
|
|
159
|
+
files: files.map(f => ({ status: f.status, path: f.path }))
|
|
160
|
+
}))
|
|
161
|
+
};
|
|
162
|
+
|
|
163
|
+
const basePrompt = `Analyze these git changes and provide a summary suitable for a changelog entry.
|
|
164
|
+
**CHANGE TYPE:** ${type}
|
|
165
|
+
**FILES:** ${changes.length} files changed
|
|
166
|
+
**CATEGORIES:** ${Object.keys(changesSummary.categories).join(', ')}
|
|
167
|
+
**CHANGES BY CATEGORY:**
|
|
168
|
+
${Object.entries(changesSummary.categories).map(([cat, files]) =>
|
|
169
|
+
`${cat}: ${files.map(f => `${f.status} ${f.path}`).join(', ')}`
|
|
170
|
+
).join('\n')}
|
|
171
|
+
**ANALYSIS REQUIREMENTS:**
|
|
172
|
+
1. What is the primary purpose of these changes?
|
|
173
|
+
2. What category do they fall into (feature, fix, improvement, etc.)?
|
|
174
|
+
3. How would you describe the impact (critical, high, medium, low)?
|
|
175
|
+
4. Are these user-facing changes?`;
|
|
176
|
+
|
|
177
|
+
if (!this.hasAI) {
|
|
178
|
+
return this.analyzeChangesRuleBased(changes, type);
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
const messages = [
|
|
182
|
+
{
|
|
183
|
+
role: "system",
|
|
184
|
+
content: this.promptEngine.systemPrompts.changesAnalysis || "You are an expert at analyzing code changes."
|
|
185
|
+
},
|
|
186
|
+
{
|
|
187
|
+
role: "user",
|
|
188
|
+
content: basePrompt
|
|
189
|
+
}
|
|
190
|
+
];
|
|
191
|
+
|
|
192
|
+
const response = await this.aiProvider.generateText(messages);
|
|
193
|
+
return {
|
|
194
|
+
summary: response.text,
|
|
195
|
+
category: this.extractCategory(response.text),
|
|
196
|
+
impact: this.extractImpact(response.text),
|
|
197
|
+
userFacing: this.extractUserFacing(response.text)
|
|
198
|
+
};
|
|
199
|
+
|
|
200
|
+
} catch (error) {
|
|
201
|
+
console.error(colors.errorMessage('Changes analysis failed:'), error.message);
|
|
202
|
+
return this.analyzeChangesRuleBased(changes, type);
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
generateRuleBasedSummary(commitAnalysis) {
|
|
207
|
+
const { subject, files, diffStats, categories, importance } = commitAnalysis;
|
|
208
|
+
|
|
209
|
+
// Use intelligent tagging for better rule-based analysis
|
|
210
|
+
const analysis = this.tagger.analyzeCommit({
|
|
211
|
+
message: subject,
|
|
212
|
+
files: files.map(f => ({ path: f.filePath })),
|
|
213
|
+
stats: diffStats
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
return {
|
|
217
|
+
summary: `${subject} (${files.length} files changed)`,
|
|
218
|
+
category: analysis.categories[0] || 'other',
|
|
219
|
+
impact: importance || 'medium',
|
|
220
|
+
tags: analysis.tags || [],
|
|
221
|
+
userFacing: analysis.tags.includes('ui') || analysis.tags.includes('feature')
|
|
222
|
+
};
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
analyzeChangesRuleBased(changes, type) {
|
|
226
|
+
const categories = this.categorizeChanges(changes);
|
|
227
|
+
const primaryCategory = Object.keys(categories)[0] || 'other';
|
|
228
|
+
|
|
229
|
+
return {
|
|
230
|
+
summary: `${type}: ${changes.length} files modified in ${primaryCategory}`,
|
|
231
|
+
category: primaryCategory,
|
|
232
|
+
impact: this.assessImpact(changes),
|
|
233
|
+
userFacing: this.isUserFacing(changes)
|
|
234
|
+
};
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
categorizeChanges(changes) {
|
|
238
|
+
const categories = {};
|
|
239
|
+
changes.forEach(change => {
|
|
240
|
+
const category = this.getFileCategory(change.path);
|
|
241
|
+
if (!categories[category]) categories[category] = [];
|
|
242
|
+
categories[category].push(change);
|
|
243
|
+
});
|
|
244
|
+
return categories;
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
getFileCategory(filePath) {
|
|
248
|
+
if (!filePath || typeof filePath !== 'string') return 'other';
|
|
249
|
+
if (filePath.includes('/test/') || filePath.endsWith('.test.js')) return 'tests';
|
|
250
|
+
if (filePath.includes('/doc/') || filePath.endsWith('.md')) return 'documentation';
|
|
251
|
+
if (filePath.includes('/config/') || filePath.endsWith('.json')) return 'configuration';
|
|
252
|
+
if (filePath.includes('/src/') || filePath.endsWith('.js')) return 'source';
|
|
253
|
+
return 'other';
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
assessImpact(changes) {
|
|
257
|
+
if (changes.length > 20) return 'high';
|
|
258
|
+
if (changes.length > 5) return 'medium';
|
|
259
|
+
return 'low';
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
isUserFacing(changes) {
|
|
263
|
+
return changes.some(change =>
|
|
264
|
+
change.path && typeof change.path === 'string' && (
|
|
265
|
+
change.path.includes('/ui/') ||
|
|
266
|
+
change.path.includes('/component/') ||
|
|
267
|
+
change.path.includes('/page/')
|
|
268
|
+
)
|
|
269
|
+
);
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
extractCategory(text) {
|
|
273
|
+
if (!text || typeof text !== 'string') return 'other';
|
|
274
|
+
const categories = ['feature', 'fix', 'improvement', 'refactor', 'docs', 'test'];
|
|
275
|
+
const lowerText = text.toLowerCase();
|
|
276
|
+
for (const category of categories) {
|
|
277
|
+
if (lowerText.includes(category)) return category;
|
|
278
|
+
}
|
|
279
|
+
return 'other';
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
extractImpact(text) {
|
|
283
|
+
if (!text || typeof text !== 'string') return 'medium';
|
|
284
|
+
const impacts = ['critical', 'high', 'medium', 'low'];
|
|
285
|
+
const lowerText = text.toLowerCase();
|
|
286
|
+
for (const impact of impacts) {
|
|
287
|
+
if (lowerText.includes(impact)) return impact;
|
|
288
|
+
}
|
|
289
|
+
return 'medium';
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
extractUserFacing(text) {
|
|
293
|
+
if (!text || typeof text !== 'string') return false;
|
|
294
|
+
const lowerText = text.toLowerCase();
|
|
295
|
+
return lowerText.includes('user') || lowerText.includes('ui');
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
// Missing AI analysis methods from original class
|
|
299
|
+
async getBranchesAIAnalysis(branches, unmergedCommits, danglingCommits) {
|
|
300
|
+
try {
|
|
301
|
+
// Use enhanced branch analysis prompt
|
|
302
|
+
const branchData = { branches, unmergedCommits, danglingCommits };
|
|
303
|
+
|
|
304
|
+
const basePrompt = this.promptEngine.buildRepositoryHealthPrompt({
|
|
305
|
+
branches,
|
|
306
|
+
unmerged: unmergedCommits,
|
|
307
|
+
danglingCommits,
|
|
308
|
+
analysisType: 'branches'
|
|
309
|
+
}, this.analysisMode);
|
|
310
|
+
|
|
311
|
+
const systemPrompt = this.promptEngine.systemPrompts.master;
|
|
312
|
+
const modeSpecificPrompt = this.promptEngine.systemPrompts[this.analysisMode] || this.promptEngine.systemPrompts.standard;
|
|
313
|
+
|
|
314
|
+
const optimizedPrompt = this.promptEngine.optimizeForProvider(
|
|
315
|
+
basePrompt,
|
|
316
|
+
this.aiProvider.getName ? this.aiProvider.getName() : 'unknown',
|
|
317
|
+
this.aiProvider.getCapabilities ? this.aiProvider.getCapabilities() : {}
|
|
318
|
+
);
|
|
319
|
+
|
|
320
|
+
const response = await this.aiProvider.generateCompletion([
|
|
321
|
+
{ role: 'system', content: `${systemPrompt}\n\n${modeSpecificPrompt}` },
|
|
322
|
+
{ role: 'user', content: optimizedPrompt }
|
|
323
|
+
], { max_tokens: 400 });
|
|
324
|
+
|
|
325
|
+
this.metrics.apiCalls++;
|
|
326
|
+
return response.content;
|
|
327
|
+
} catch (error) {
|
|
328
|
+
this.metrics.errors++;
|
|
329
|
+
return `AI analysis failed: ${error.message}`;
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
async getRepositoryAIAnalysis(comprehensiveData) {
|
|
334
|
+
try {
|
|
335
|
+
// Use enhanced repository health analysis prompt
|
|
336
|
+
const healthData = {
|
|
337
|
+
statistics: comprehensiveData.statistics,
|
|
338
|
+
branches: comprehensiveData.branches,
|
|
339
|
+
workingDirectory: comprehensiveData.workingDirectory,
|
|
340
|
+
unmergedCommits: comprehensiveData.unmergedCommits,
|
|
341
|
+
danglingCommits: comprehensiveData.danglingCommits,
|
|
342
|
+
commitQuality: comprehensiveData.commitQuality || {},
|
|
343
|
+
security: comprehensiveData.security || {}
|
|
344
|
+
};
|
|
345
|
+
|
|
346
|
+
const basePrompt = this.promptEngine.buildRepositoryHealthPrompt(healthData, this.analysisMode);
|
|
347
|
+
|
|
348
|
+
const systemPrompt = this.promptEngine.systemPrompts.master;
|
|
349
|
+
const modeSpecificPrompt = this.promptEngine.systemPrompts[this.analysisMode] || this.promptEngine.systemPrompts.standard;
|
|
350
|
+
|
|
351
|
+
const optimizedPrompt = this.promptEngine.optimizeForProvider(
|
|
352
|
+
basePrompt,
|
|
353
|
+
this.aiProvider.getName ? this.aiProvider.getName() : 'unknown',
|
|
354
|
+
this.aiProvider.getCapabilities ? this.aiProvider.getCapabilities() : {}
|
|
355
|
+
);
|
|
356
|
+
|
|
357
|
+
const response = await this.aiProvider.generateCompletion([
|
|
358
|
+
{ role: 'system', content: `${systemPrompt}\n\n${modeSpecificPrompt}` },
|
|
359
|
+
{ role: 'user', content: optimizedPrompt }
|
|
360
|
+
], { max_tokens: 500 });
|
|
361
|
+
|
|
362
|
+
this.metrics.apiCalls++;
|
|
363
|
+
return response.content;
|
|
364
|
+
} catch (error) {
|
|
365
|
+
this.metrics.errors++;
|
|
366
|
+
return `AI analysis failed: ${error.message}`;
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
async getUntrackedFilesAIAnalysis(categories) {
|
|
371
|
+
try {
|
|
372
|
+
const prompt = `Analyze these untracked files and provide recommendations:
|
|
373
|
+
|
|
374
|
+
Files by category:
|
|
375
|
+
${Object.entries(categories).map(([cat, files]) =>
|
|
376
|
+
`${cat}: ${files.length} files (${files.slice(0, 5).join(', ')}${files.length > 5 ? '...' : ''})`
|
|
377
|
+
).join('\n')}
|
|
378
|
+
|
|
379
|
+
Provide analysis on:
|
|
380
|
+
1. Which files should be tracked in git?
|
|
381
|
+
2. Which files should be added to .gitignore?
|
|
382
|
+
3. Any security concerns (config files, secrets)?
|
|
383
|
+
4. Organizational recommendations?
|
|
384
|
+
|
|
385
|
+
Be concise and actionable.`;
|
|
386
|
+
|
|
387
|
+
const response = await this.aiProvider.generateCompletion([{
|
|
388
|
+
role: 'user',
|
|
389
|
+
content: prompt
|
|
390
|
+
}], { max_tokens: 400 });
|
|
391
|
+
|
|
392
|
+
this.metrics.apiCalls++;
|
|
393
|
+
return response.content;
|
|
394
|
+
} catch (error) {
|
|
395
|
+
this.metrics.errors++;
|
|
396
|
+
return `AI analysis failed: ${error.message}`;
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
getErrorContext(error) {
|
|
401
|
+
const errorMessage = error.message.toLowerCase();
|
|
402
|
+
|
|
403
|
+
// Connection errors
|
|
404
|
+
if (errorMessage.includes('fetch failed') ||
|
|
405
|
+
errorMessage.includes('connection refused') ||
|
|
406
|
+
errorMessage.includes('unreachable') ||
|
|
407
|
+
errorMessage.includes('timeout')) {
|
|
408
|
+
return {
|
|
409
|
+
isConnectionError: true,
|
|
410
|
+
message: 'Cannot connect to AI provider',
|
|
411
|
+
suggestions: [
|
|
412
|
+
'Check internet connection',
|
|
413
|
+
'Verify provider service is running',
|
|
414
|
+
'Check firewall settings'
|
|
415
|
+
]
|
|
416
|
+
};
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
// Authentication errors
|
|
420
|
+
if (errorMessage.includes('api key') ||
|
|
421
|
+
errorMessage.includes('401') ||
|
|
422
|
+
errorMessage.includes('unauthorized') ||
|
|
423
|
+
errorMessage.includes('invalid key')) {
|
|
424
|
+
return {
|
|
425
|
+
isConfigurationError: true,
|
|
426
|
+
message: 'Invalid or missing API key',
|
|
427
|
+
suggestions: [
|
|
428
|
+
'Check API key configuration in .env.local',
|
|
429
|
+
'Verify API key is valid and active',
|
|
430
|
+
'Run `ai-changelog init` to reconfigure'
|
|
431
|
+
]
|
|
432
|
+
};
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
// Model availability errors
|
|
436
|
+
if (errorMessage.includes('model') &&
|
|
437
|
+
(errorMessage.includes('not found') || errorMessage.includes('unavailable'))) {
|
|
438
|
+
return {
|
|
439
|
+
isConfigurationError: true,
|
|
440
|
+
message: 'Model not available',
|
|
441
|
+
suggestions: [
|
|
442
|
+
'Try a different model',
|
|
443
|
+
'Check provider model list',
|
|
444
|
+
'Update configuration'
|
|
445
|
+
]
|
|
446
|
+
};
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
// Rate limiting
|
|
450
|
+
if (errorMessage.includes('rate limit') || errorMessage.includes('429')) {
|
|
451
|
+
return {
|
|
452
|
+
isConnectionError: true,
|
|
453
|
+
message: 'Rate limit exceeded',
|
|
454
|
+
suggestions: [
|
|
455
|
+
'Wait before retrying',
|
|
456
|
+
'Upgrade API plan',
|
|
457
|
+
'Use a different provider'
|
|
458
|
+
]
|
|
459
|
+
};
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
// Generic error
|
|
463
|
+
return {
|
|
464
|
+
isConnectionError: false,
|
|
465
|
+
isConfigurationError: false,
|
|
466
|
+
message: error.message,
|
|
467
|
+
suggestions: ['Check provider configuration', 'Try again later']
|
|
468
|
+
};
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
setModelOverride(model) {
|
|
472
|
+
this.modelOverride = model;
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
getMetrics() {
|
|
476
|
+
return this.metrics;
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
resetMetrics() {
|
|
480
|
+
this.metrics = {
|
|
481
|
+
apiCalls: 0,
|
|
482
|
+
ruleBasedFallbacks: 0,
|
|
483
|
+
totalTokens: 0
|
|
484
|
+
};
|
|
485
|
+
}
|
|
486
|
+
}
|