@entro314labs/ai-changelog-generator 3.0.5 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +383 -785
  2. package/README.md +30 -3
  3. package/ai-changelog-mcp.sh +0 -0
  4. package/ai-changelog.sh +0 -0
  5. package/bin/ai-changelog-dxt.js +9 -9
  6. package/bin/ai-changelog-mcp.js +19 -17
  7. package/bin/ai-changelog.js +6 -6
  8. package/package.json +84 -52
  9. package/src/ai-changelog-generator.js +83 -81
  10. package/src/application/orchestrators/changelog.orchestrator.js +1040 -296
  11. package/src/application/services/application.service.js +145 -123
  12. package/src/cli.js +76 -57
  13. package/src/domains/ai/ai-analysis.service.js +289 -209
  14. package/src/domains/analysis/analysis.engine.js +253 -193
  15. package/src/domains/changelog/changelog.service.js +1062 -784
  16. package/src/domains/changelog/workspace-changelog.service.js +420 -249
  17. package/src/domains/git/git-repository.analyzer.js +348 -258
  18. package/src/domains/git/git.service.js +132 -112
  19. package/src/infrastructure/cli/cli.controller.js +415 -247
  20. package/src/infrastructure/config/configuration.manager.js +220 -190
  21. package/src/infrastructure/interactive/interactive-staging.service.js +332 -0
  22. package/src/infrastructure/interactive/interactive-workflow.service.js +200 -159
  23. package/src/infrastructure/mcp/mcp-server.service.js +208 -207
  24. package/src/infrastructure/metrics/metrics.collector.js +140 -123
  25. package/src/infrastructure/providers/core/base-provider.js +87 -40
  26. package/src/infrastructure/providers/implementations/anthropic.js +101 -99
  27. package/src/infrastructure/providers/implementations/azure.js +124 -101
  28. package/src/infrastructure/providers/implementations/bedrock.js +136 -126
  29. package/src/infrastructure/providers/implementations/dummy.js +23 -23
  30. package/src/infrastructure/providers/implementations/google.js +123 -114
  31. package/src/infrastructure/providers/implementations/huggingface.js +94 -87
  32. package/src/infrastructure/providers/implementations/lmstudio.js +75 -60
  33. package/src/infrastructure/providers/implementations/mock.js +69 -73
  34. package/src/infrastructure/providers/implementations/ollama.js +89 -66
  35. package/src/infrastructure/providers/implementations/openai.js +88 -89
  36. package/src/infrastructure/providers/implementations/vertex.js +227 -197
  37. package/src/infrastructure/providers/provider-management.service.js +245 -207
  38. package/src/infrastructure/providers/provider-manager.service.js +145 -125
  39. package/src/infrastructure/providers/utils/base-provider-helpers.js +308 -302
  40. package/src/infrastructure/providers/utils/model-config.js +220 -195
  41. package/src/infrastructure/providers/utils/provider-utils.js +105 -100
  42. package/src/infrastructure/validation/commit-message-validation.service.js +556 -0
  43. package/src/shared/constants/colors.js +467 -172
  44. package/src/shared/utils/cli-demo.js +285 -0
  45. package/src/shared/utils/cli-entry-utils.js +257 -249
  46. package/src/shared/utils/cli-ui.js +447 -0
  47. package/src/shared/utils/diff-processor.js +513 -0
  48. package/src/shared/utils/error-classes.js +125 -156
  49. package/src/shared/utils/json-utils.js +93 -89
  50. package/src/shared/utils/utils.js +1299 -775
  51. package/types/index.d.ts +353 -344
@@ -1,37 +1,44 @@
1
- import { buildEnhancedPrompt, parseAIResponse, summarizeFileChanges } from '../../shared/utils/utils.js';
2
- import colors from '../../shared/constants/colors.js';
1
+ import colors from '../../shared/constants/colors.js'
2
+ import {
3
+ buildEnhancedPrompt,
4
+ parseAIResponse,
5
+ summarizeFileChanges,
6
+ } from '../../shared/utils/utils.js'
3
7
 
4
8
  export class AIAnalysisService {
5
9
  constructor(aiProvider, promptEngine, tagger, analysisMode = 'standard') {
6
- this.aiProvider = aiProvider;
7
- this.promptEngine = promptEngine;
8
- this.tagger = tagger;
9
- this.analysisMode = analysisMode;
10
- this.hasAI = aiProvider && aiProvider.isAvailable();
10
+ this.aiProvider = aiProvider
11
+ this.promptEngine = promptEngine
12
+ this.tagger = tagger
13
+ this.analysisMode = analysisMode
14
+ this.hasAI = aiProvider?.isAvailable()
11
15
  this.metrics = {
12
16
  apiCalls: 0,
13
17
  ruleBasedFallbacks: 0,
14
- totalTokens: 0
15
- };
18
+ totalTokens: 0,
19
+ }
16
20
  }
17
21
 
18
22
  async selectOptimalModel(commitAnalysis) {
19
- if (!this.hasAI) return null;
23
+ if (!this.hasAI) {
24
+ return null
25
+ }
20
26
 
21
27
  // Check for model override first
22
28
  if (this.modelOverride) {
23
- console.log(`🎯 Using model override: ${this.modelOverride}`);
24
- return this.modelOverride;
29
+ console.log(`🎯 Using model override: ${this.modelOverride}`)
30
+ return this.modelOverride
25
31
  }
26
32
 
27
- const { files, diffStats, breaking, semanticAnalysis } = commitAnalysis;
28
- const filesCount = files?.length || 0;
29
- const linesChanged = (diffStats?.insertions || 0) + (diffStats?.deletions || 0);
33
+ const { files, diffStats, breaking, semanticAnalysis } = commitAnalysis
34
+ const filesCount = files?.length || 0
35
+ const linesChanged = (diffStats?.insertions || 0) + (diffStats?.deletions || 0)
30
36
 
31
37
  // Detect complex patterns
32
- const hasArchitecturalChanges = semanticAnalysis?.patterns?.includes('refactor') ||
33
- semanticAnalysis?.patterns?.includes('architecture') ||
34
- semanticAnalysis?.frameworks?.length > 2;
38
+ const hasArchitecturalChanges =
39
+ semanticAnalysis?.patterns?.includes('refactor') ||
40
+ semanticAnalysis?.patterns?.includes('architecture') ||
41
+ semanticAnalysis?.frameworks?.length > 2
35
42
 
36
43
  try {
37
44
  const commitInfo = {
@@ -41,37 +48,42 @@ export class AIAnalysisService {
41
48
  deletions: diffStats?.deletions || 0,
42
49
  message: commitAnalysis.subject,
43
50
  breaking,
44
- complex: hasArchitecturalChanges
45
- };
51
+ complex: hasArchitecturalChanges,
52
+ }
46
53
 
47
- const optimalModel = await this.aiProvider.selectOptimalModel(commitInfo);
54
+ const optimalModel = await this.aiProvider.selectOptimalModel(commitInfo)
48
55
 
49
56
  if (optimalModel?.model) {
50
57
  if (optimalModel.capabilities?.reasoning) {
51
- console.log(colors.aiMessage('Using reasoning model for complex analysis'));
58
+ console.log(colors.aiMessage('Using reasoning model for complex analysis'))
52
59
  }
53
- return optimalModel.model;
60
+ return optimalModel.model
54
61
  }
55
- } catch (error) {
56
- console.warn(colors.warningMessage('Model selection failed, using default'));
62
+ } catch (_error) {
63
+ console.warn(colors.warningMessage('Model selection failed, using default'))
57
64
  }
58
65
 
59
- return null;
66
+ return null
60
67
  }
61
68
 
62
69
  async generateAISummary(commitAnalysis, preSelectedModel = null) {
63
70
  if (!this.hasAI) {
64
- return this.generateRuleBasedSummary(commitAnalysis);
71
+ return this.generateRuleBasedSummary(commitAnalysis)
65
72
  }
66
73
 
67
- const selectedModel = preSelectedModel || await this.selectOptimalModel(commitAnalysis);
74
+ const selectedModel = preSelectedModel || (await this.selectOptimalModel(commitAnalysis))
68
75
 
69
76
  try {
70
- const modelToUse = selectedModel || this.aiProvider?.modelConfig?.default || 'unknown';
71
- const filesCount = commitAnalysis.files?.length || 0;
72
- const linesChanged = (commitAnalysis.diffStats?.insertions || 0) + (commitAnalysis.diffStats?.deletions || 0);
73
-
74
- console.log(colors.infoMessage(`Selected model: ${colors.highlight(modelToUse)} for commit (${colors.number(filesCount)} files, ${colors.number(linesChanged)} lines)`));
77
+ const modelToUse = selectedModel || this.aiProvider?.modelConfig?.default || 'unknown'
78
+ const filesCount = commitAnalysis.files?.length || 0
79
+ const linesChanged =
80
+ (commitAnalysis.diffStats?.insertions || 0) + (commitAnalysis.diffStats?.deletions || 0)
81
+
82
+ console.log(
83
+ colors.infoMessage(
84
+ `Selected model: ${colors.highlight(modelToUse)} for commit (${colors.number(filesCount)} files, ${colors.number(linesChanged)} lines)`
85
+ )
86
+ )
75
87
 
76
88
  // Skip model validation for now to avoid token limit issues
77
89
  // const modelCheck = await this.aiProvider.validateModelAvailability(modelToUse);
@@ -87,246 +99,300 @@ export class AIAnalysisService {
87
99
  // return this.generateRuleBasedSummary(commitAnalysis);
88
100
  // }
89
101
 
90
- const prompt = buildEnhancedPrompt(commitAnalysis, this.analysisMode);
91
- const systemPrompt = this.promptEngine.systemPrompts.master;
92
- const modeSpecificPrompt = this.promptEngine.systemPrompts[this.analysisMode] || this.promptEngine.systemPrompts.standard;
102
+ const prompt = buildEnhancedPrompt(commitAnalysis, this.analysisMode)
103
+ const systemPrompt = this.promptEngine.systemPrompts.master
104
+ const modeSpecificPrompt =
105
+ this.promptEngine.systemPrompts[this.analysisMode] ||
106
+ this.promptEngine.systemPrompts.standard
93
107
 
94
108
  const optimizedPrompt = this.promptEngine.optimizeForProvider(
95
109
  prompt,
96
110
  this.aiProvider.getName ? this.aiProvider.getName() : 'unknown',
97
111
  this.aiProvider.getCapabilities ? this.aiProvider.getCapabilities() : {}
98
- );
112
+ )
99
113
 
100
114
  const messages = [
101
115
  {
102
- role: "system",
103
- content: `${systemPrompt}\n\n${modeSpecificPrompt}`
116
+ role: 'system',
117
+ content: `${systemPrompt}\n\n${modeSpecificPrompt}`,
104
118
  },
105
119
  {
106
- role: "user",
107
- content: optimizedPrompt
108
- }
109
- ];
120
+ role: 'user',
121
+ content: optimizedPrompt,
122
+ },
123
+ ]
124
+
125
+ this.metrics.apiCalls++
126
+
127
+ // Set token limits based on analysis mode and commit complexity
128
+ let maxTokens = 2000 // Default
129
+ if (this.analysisMode === 'enterprise') {
130
+ maxTokens = 4000
131
+ } else if (this.analysisMode === 'detailed') {
132
+ maxTokens = 3000
133
+ }
134
+
135
+ // Increase token limit for very large commits
136
+ if (filesCount > 50 || linesChanged > 10000) {
137
+ maxTokens = Math.min(maxTokens + 2000, 8000)
138
+ }
110
139
 
111
- this.metrics.apiCalls++;
112
140
  const response = await this.aiProvider.generateCompletion(messages, {
113
141
  model: modelToUse,
114
- max_tokens: 2000,
115
- temperature: 0.3
116
- });
142
+ max_tokens: maxTokens,
143
+ temperature: 0.3,
144
+ })
117
145
 
118
146
  if (response?.usage) {
119
- this.metrics.totalTokens += (response.usage.prompt_tokens || 0) + (response.usage.completion_tokens || 0);
147
+ this.metrics.totalTokens +=
148
+ (response.usage.prompt_tokens || 0) + (response.usage.completion_tokens || 0)
120
149
  }
121
150
 
122
- const parsedResponse = parseAIResponse(response.content || response.text, commitAnalysis);
123
- return parsedResponse;
124
-
151
+ const parsedResponse = parseAIResponse(response.content || response.text, commitAnalysis)
152
+ return parsedResponse
125
153
  } catch (error) {
126
154
  // Provide helpful error messages and guidance
127
- const errorContext = this.getErrorContext(error);
155
+ const errorContext = this.getErrorContext(error)
128
156
 
129
157
  if (errorContext.isConnectionError) {
130
- console.warn(colors.warningMessage(`⚠️ AI provider connection failed: ${errorContext.message}`));
158
+ console.warn(
159
+ colors.warningMessage(`⚠️ AI provider connection failed: ${errorContext.message}`)
160
+ )
131
161
  if (errorContext.suggestions.length > 0) {
132
- console.warn(colors.infoMessage(`💡 Suggestions: ${errorContext.suggestions.join(', ')}`));
162
+ console.warn(colors.infoMessage(`💡 Suggestions: ${errorContext.suggestions.join(', ')}`))
133
163
  }
134
164
  } else if (errorContext.isConfigurationError) {
135
- console.warn(colors.warningMessage(`⚠️ Configuration issue: ${errorContext.message}`));
165
+ console.warn(colors.warningMessage(`⚠️ Configuration issue: ${errorContext.message}`))
136
166
  if (errorContext.suggestions.length > 0) {
137
- console.warn(colors.infoMessage(`💡 Try: ${errorContext.suggestions.join(', ')}`));
167
+ console.warn(colors.infoMessage(`💡 Try: ${errorContext.suggestions.join(', ')}`))
138
168
  }
139
169
  } else {
140
- console.warn(colors.warningMessage(`⚠️ AI analysis failed: ${error.message}`));
141
- console.warn(colors.infoMessage('💡 Falling back to pattern-based analysis'));
170
+ console.warn(colors.warningMessage(`⚠️ AI analysis failed: ${error.message}`))
171
+ console.warn(colors.infoMessage('💡 Falling back to pattern-based analysis'))
142
172
  }
143
173
 
144
- this.metrics.ruleBasedFallbacks++;
145
- return this.generateRuleBasedSummary(commitAnalysis);
174
+ this.metrics.ruleBasedFallbacks++
175
+ return this.generateRuleBasedSummary(commitAnalysis)
146
176
  }
147
177
  }
148
178
 
149
- async analyzeChanges(changes, type, outputMode = 'console') {
179
+ async analyzeChanges(changes, type, _outputMode = 'console') {
150
180
  try {
151
- const changesSummary = summarizeFileChanges(changes);
181
+ const changesSummary = summarizeFileChanges(changes)
152
182
 
153
- const changesData = {
183
+ const _changesData = {
154
184
  changeType: type,
155
185
  totalFiles: changes.length,
156
186
  categories: changesSummary.categories,
157
187
  changesByCategory: Object.entries(changesSummary.categories).map(([cat, files]) => ({
158
188
  category: cat,
159
- files: files.map(f => ({ status: f.status, path: f.path }))
160
- }))
161
- };
189
+ files: files.map((f) => ({ status: f.status, path: f.path })),
190
+ })),
191
+ }
162
192
 
163
193
  const basePrompt = `Analyze these git changes and provide a summary suitable for a changelog entry.
164
194
  **CHANGE TYPE:** ${type}
165
195
  **FILES:** ${changes.length} files changed
166
196
  **CATEGORIES:** ${Object.keys(changesSummary.categories).join(', ')}
167
197
  **CHANGES BY CATEGORY:**
168
- ${Object.entries(changesSummary.categories).map(([cat, files]) =>
169
- `${cat}: ${files.map(f => `${f.status} ${f.path}`).join(', ')}`
170
- ).join('\n')}
198
+ ${Object.entries(changesSummary.categories)
199
+ .map(([cat, files]) => `${cat}: ${files.map((f) => `${f.status} ${f.path}`).join(', ')}`)
200
+ .join('\n')}
171
201
  **ANALYSIS REQUIREMENTS:**
172
202
  1. What is the primary purpose of these changes?
173
203
  2. What category do they fall into (feature, fix, improvement, etc.)?
174
204
  3. How would you describe the impact (critical, high, medium, low)?
175
- 4. Are these user-facing changes?`;
205
+ 4. Are these user-facing changes?`
176
206
 
177
207
  if (!this.hasAI) {
178
- return this.analyzeChangesRuleBased(changes, type);
208
+ return this.analyzeChangesRuleBased(changes, type)
179
209
  }
180
210
 
181
211
  const messages = [
182
212
  {
183
- role: "system",
184
- content: this.promptEngine.systemPrompts.changesAnalysis || "You are an expert at analyzing code changes."
213
+ role: 'system',
214
+ content:
215
+ this.promptEngine.systemPrompts.changesAnalysis ||
216
+ 'You are an expert at analyzing code changes.',
185
217
  },
186
218
  {
187
- role: "user",
188
- content: basePrompt
189
- }
190
- ];
219
+ role: 'user',
220
+ content: basePrompt,
221
+ },
222
+ ]
191
223
 
192
- const response = await this.aiProvider.generateText(messages);
224
+ const response = await this.aiProvider.generateText(messages)
193
225
  return {
194
226
  summary: response.text,
195
227
  category: this.extractCategory(response.text),
196
228
  impact: this.extractImpact(response.text),
197
- userFacing: this.extractUserFacing(response.text)
198
- };
199
-
229
+ userFacing: this.extractUserFacing(response.text),
230
+ }
200
231
  } catch (error) {
201
- console.error(colors.errorMessage('Changes analysis failed:'), error.message);
202
- return this.analyzeChangesRuleBased(changes, type);
232
+ console.error(colors.errorMessage('Changes analysis failed:'), error.message)
233
+ return this.analyzeChangesRuleBased(changes, type)
203
234
  }
204
235
  }
205
236
 
206
237
  generateRuleBasedSummary(commitAnalysis) {
207
- const { subject, files, diffStats, categories, importance } = commitAnalysis;
238
+ const { subject, files, diffStats, importance } = commitAnalysis
208
239
 
209
240
  // Use intelligent tagging for better rule-based analysis
210
241
  const analysis = this.tagger.analyzeCommit({
211
242
  message: subject,
212
- files: files.map(f => ({ path: f.filePath })),
213
- stats: diffStats
214
- });
243
+ files: files.map((f) => ({ path: f.filePath })),
244
+ stats: diffStats,
245
+ })
215
246
 
216
247
  return {
217
248
  summary: `${subject} (${files.length} files changed)`,
218
249
  category: analysis.categories[0] || 'other',
219
250
  impact: importance || 'medium',
220
251
  tags: analysis.tags || [],
221
- userFacing: analysis.tags.includes('ui') || analysis.tags.includes('feature')
222
- };
252
+ userFacing: analysis.tags.includes('ui') || analysis.tags.includes('feature'),
253
+ }
223
254
  }
224
255
 
225
256
  analyzeChangesRuleBased(changes, type) {
226
- const categories = this.categorizeChanges(changes);
227
- const primaryCategory = Object.keys(categories)[0] || 'other';
257
+ const categories = this.categorizeChanges(changes)
258
+ const primaryCategory = Object.keys(categories)[0] || 'other'
228
259
 
229
260
  return {
230
261
  summary: `${type}: ${changes.length} files modified in ${primaryCategory}`,
231
262
  category: primaryCategory,
232
263
  impact: this.assessImpact(changes),
233
- userFacing: this.isUserFacing(changes)
234
- };
264
+ userFacing: this.isUserFacing(changes),
265
+ }
235
266
  }
236
267
 
237
268
  categorizeChanges(changes) {
238
- const categories = {};
239
- changes.forEach(change => {
240
- const category = this.getFileCategory(change.path);
241
- if (!categories[category]) categories[category] = [];
242
- categories[category].push(change);
243
- });
244
- return categories;
269
+ const categories = {}
270
+ changes.forEach((change) => {
271
+ const category = this.getFileCategory(change.path)
272
+ if (!categories[category]) {
273
+ categories[category] = []
274
+ }
275
+ categories[category].push(change)
276
+ })
277
+ return categories
245
278
  }
246
279
 
247
280
  getFileCategory(filePath) {
248
- if (!filePath || typeof filePath !== 'string') return 'other';
249
- if (filePath.includes('/test/') || filePath.endsWith('.test.js')) return 'tests';
250
- if (filePath.includes('/doc/') || filePath.endsWith('.md')) return 'documentation';
251
- if (filePath.includes('/config/') || filePath.endsWith('.json')) return 'configuration';
252
- if (filePath.includes('/src/') || filePath.endsWith('.js')) return 'source';
253
- return 'other';
281
+ if (!filePath || typeof filePath !== 'string') {
282
+ return 'other'
283
+ }
284
+ if (filePath.includes('/test/') || filePath.endsWith('.test.js')) {
285
+ return 'tests'
286
+ }
287
+ if (filePath.includes('/doc/') || filePath.endsWith('.md')) {
288
+ return 'documentation'
289
+ }
290
+ if (filePath.includes('/config/') || filePath.endsWith('.json')) {
291
+ return 'configuration'
292
+ }
293
+ if (filePath.includes('/src/') || filePath.endsWith('.js')) {
294
+ return 'source'
295
+ }
296
+ return 'other'
254
297
  }
255
298
 
256
299
  assessImpact(changes) {
257
- if (changes.length > 20) return 'high';
258
- if (changes.length > 5) return 'medium';
259
- return 'low';
300
+ if (changes.length > 20) {
301
+ return 'high'
302
+ }
303
+ if (changes.length > 5) {
304
+ return 'medium'
305
+ }
306
+ return 'low'
260
307
  }
261
308
 
262
309
  isUserFacing(changes) {
263
- return changes.some(change =>
264
- change.path && typeof change.path === 'string' && (
265
- change.path.includes('/ui/') ||
266
- change.path.includes('/component/') ||
267
- change.path.includes('/page/')
268
- )
269
- );
310
+ return changes.some(
311
+ (change) =>
312
+ change.path &&
313
+ typeof change.path === 'string' &&
314
+ (change.path.includes('/ui/') ||
315
+ change.path.includes('/component/') ||
316
+ change.path.includes('/page/'))
317
+ )
270
318
  }
271
319
 
272
320
  extractCategory(text) {
273
- if (!text || typeof text !== 'string') return 'other';
274
- const categories = ['feature', 'fix', 'improvement', 'refactor', 'docs', 'test'];
275
- const lowerText = text.toLowerCase();
321
+ if (!text || typeof text !== 'string') {
322
+ return 'other'
323
+ }
324
+ const categories = ['feature', 'fix', 'improvement', 'refactor', 'docs', 'test']
325
+ const lowerText = text.toLowerCase()
276
326
  for (const category of categories) {
277
- if (lowerText.includes(category)) return category;
327
+ if (lowerText.includes(category)) {
328
+ return category
329
+ }
278
330
  }
279
- return 'other';
331
+ return 'other'
280
332
  }
281
333
 
282
334
  extractImpact(text) {
283
- if (!text || typeof text !== 'string') return 'medium';
284
- const impacts = ['critical', 'high', 'medium', 'low'];
285
- const lowerText = text.toLowerCase();
335
+ if (!text || typeof text !== 'string') {
336
+ return 'medium'
337
+ }
338
+ const impacts = ['critical', 'high', 'medium', 'low']
339
+ const lowerText = text.toLowerCase()
286
340
  for (const impact of impacts) {
287
- if (lowerText.includes(impact)) return impact;
341
+ if (lowerText.includes(impact)) {
342
+ return impact
343
+ }
288
344
  }
289
- return 'medium';
345
+ return 'medium'
290
346
  }
291
347
 
292
348
  extractUserFacing(text) {
293
- if (!text || typeof text !== 'string') return false;
294
- const lowerText = text.toLowerCase();
295
- return lowerText.includes('user') || lowerText.includes('ui');
349
+ if (!text || typeof text !== 'string') {
350
+ return false
351
+ }
352
+ const lowerText = text.toLowerCase()
353
+ return lowerText.includes('user') || lowerText.includes('ui')
296
354
  }
297
355
 
298
356
  // Missing AI analysis methods from original class
299
357
  async getBranchesAIAnalysis(branches, unmergedCommits, danglingCommits) {
300
358
  try {
301
359
  // Use enhanced branch analysis prompt
302
- const branchData = { branches, unmergedCommits, danglingCommits };
360
+ const _branchData = { branches, unmergedCommits, danglingCommits }
303
361
 
304
- const basePrompt = this.promptEngine.buildRepositoryHealthPrompt({
305
- branches,
306
- unmerged: unmergedCommits,
307
- danglingCommits,
308
- analysisType: 'branches'
309
- }, this.analysisMode);
362
+ const basePrompt = this.promptEngine.buildRepositoryHealthPrompt(
363
+ {
364
+ branches,
365
+ unmerged: unmergedCommits,
366
+ danglingCommits,
367
+ analysisType: 'branches',
368
+ },
369
+ this.analysisMode
370
+ )
310
371
 
311
- const systemPrompt = this.promptEngine.systemPrompts.master;
312
- const modeSpecificPrompt = this.promptEngine.systemPrompts[this.analysisMode] || this.promptEngine.systemPrompts.standard;
372
+ const systemPrompt = this.promptEngine.systemPrompts.master
373
+ const modeSpecificPrompt =
374
+ this.promptEngine.systemPrompts[this.analysisMode] ||
375
+ this.promptEngine.systemPrompts.standard
313
376
 
314
377
  const optimizedPrompt = this.promptEngine.optimizeForProvider(
315
378
  basePrompt,
316
379
  this.aiProvider.getName ? this.aiProvider.getName() : 'unknown',
317
380
  this.aiProvider.getCapabilities ? this.aiProvider.getCapabilities() : {}
318
- );
381
+ )
319
382
 
320
- const response = await this.aiProvider.generateCompletion([
321
- { role: 'system', content: `${systemPrompt}\n\n${modeSpecificPrompt}` },
322
- { role: 'user', content: optimizedPrompt }
323
- ], { max_tokens: 400 });
383
+ const response = await this.aiProvider.generateCompletion(
384
+ [
385
+ { role: 'system', content: `${systemPrompt}\n\n${modeSpecificPrompt}` },
386
+ { role: 'user', content: optimizedPrompt },
387
+ ],
388
+ { max_tokens: 400 }
389
+ )
324
390
 
325
- this.metrics.apiCalls++;
326
- return response.content;
391
+ this.metrics.apiCalls++
392
+ return response.content
327
393
  } catch (error) {
328
- this.metrics.errors++;
329
- return `AI analysis failed: ${error.message}`;
394
+ this.metrics.errors++
395
+ return `AI analysis failed: ${error.message}`
330
396
  }
331
397
  }
332
398
 
@@ -340,30 +406,38 @@ ${Object.entries(changesSummary.categories).map(([cat, files]) =>
340
406
  unmergedCommits: comprehensiveData.unmergedCommits,
341
407
  danglingCommits: comprehensiveData.danglingCommits,
342
408
  commitQuality: comprehensiveData.commitQuality || {},
343
- security: comprehensiveData.security || {}
344
- };
409
+ security: comprehensiveData.security || {},
410
+ }
345
411
 
346
- const basePrompt = this.promptEngine.buildRepositoryHealthPrompt(healthData, this.analysisMode);
412
+ const basePrompt = this.promptEngine.buildRepositoryHealthPrompt(
413
+ healthData,
414
+ this.analysisMode
415
+ )
347
416
 
348
- const systemPrompt = this.promptEngine.systemPrompts.master;
349
- const modeSpecificPrompt = this.promptEngine.systemPrompts[this.analysisMode] || this.promptEngine.systemPrompts.standard;
417
+ const systemPrompt = this.promptEngine.systemPrompts.master
418
+ const modeSpecificPrompt =
419
+ this.promptEngine.systemPrompts[this.analysisMode] ||
420
+ this.promptEngine.systemPrompts.standard
350
421
 
351
422
  const optimizedPrompt = this.promptEngine.optimizeForProvider(
352
423
  basePrompt,
353
424
  this.aiProvider.getName ? this.aiProvider.getName() : 'unknown',
354
425
  this.aiProvider.getCapabilities ? this.aiProvider.getCapabilities() : {}
355
- );
426
+ )
356
427
 
357
- const response = await this.aiProvider.generateCompletion([
358
- { role: 'system', content: `${systemPrompt}\n\n${modeSpecificPrompt}` },
359
- { role: 'user', content: optimizedPrompt }
360
- ], { max_tokens: 500 });
428
+ const response = await this.aiProvider.generateCompletion(
429
+ [
430
+ { role: 'system', content: `${systemPrompt}\n\n${modeSpecificPrompt}` },
431
+ { role: 'user', content: optimizedPrompt },
432
+ ],
433
+ { max_tokens: 500 }
434
+ )
361
435
 
362
- this.metrics.apiCalls++;
363
- return response.content;
436
+ this.metrics.apiCalls++
437
+ return response.content
364
438
  } catch (error) {
365
- this.metrics.errors++;
366
- return `AI analysis failed: ${error.message}`;
439
+ this.metrics.errors++
440
+ return `AI analysis failed: ${error.message}`
367
441
  }
368
442
  }
369
443
 
@@ -372,9 +446,12 @@ ${Object.entries(changesSummary.categories).map(([cat, files]) =>
372
446
  const prompt = `Analyze these untracked files and provide recommendations:
373
447
 
374
448
  Files by category:
375
- ${Object.entries(categories).map(([cat, files]) =>
376
- `${cat}: ${files.length} files (${files.slice(0, 5).join(', ')}${files.length > 5 ? '...' : ''})`
377
- ).join('\n')}
449
+ ${Object.entries(categories)
450
+ .map(
451
+ ([cat, files]) =>
452
+ `${cat}: ${files.length} files (${files.slice(0, 5).join(', ')}${files.length > 5 ? '...' : ''})`
453
+ )
454
+ .join('\n')}
378
455
 
379
456
  Provide analysis on:
380
457
  1. Which files should be tracked in git?
@@ -382,68 +459,75 @@ Provide analysis on:
382
459
  3. Any security concerns (config files, secrets)?
383
460
  4. Organizational recommendations?
384
461
 
385
- Be concise and actionable.`;
462
+ Be concise and actionable.`
386
463
 
387
- const response = await this.aiProvider.generateCompletion([{
388
- role: 'user',
389
- content: prompt
390
- }], { max_tokens: 400 });
464
+ const response = await this.aiProvider.generateCompletion(
465
+ [
466
+ {
467
+ role: 'user',
468
+ content: prompt,
469
+ },
470
+ ],
471
+ { max_tokens: 400 }
472
+ )
391
473
 
392
- this.metrics.apiCalls++;
393
- return response.content;
474
+ this.metrics.apiCalls++
475
+ return response.content
394
476
  } catch (error) {
395
- this.metrics.errors++;
396
- return `AI analysis failed: ${error.message}`;
477
+ this.metrics.errors++
478
+ return `AI analysis failed: ${error.message}`
397
479
  }
398
480
  }
399
481
 
400
482
  getErrorContext(error) {
401
- const errorMessage = error.message.toLowerCase();
483
+ const errorMessage = error.message.toLowerCase()
402
484
 
403
485
  // Connection errors
404
- if (errorMessage.includes('fetch failed') ||
405
- errorMessage.includes('connection refused') ||
406
- errorMessage.includes('unreachable') ||
407
- errorMessage.includes('timeout')) {
486
+ if (
487
+ errorMessage.includes('fetch failed') ||
488
+ errorMessage.includes('connection refused') ||
489
+ errorMessage.includes('unreachable') ||
490
+ errorMessage.includes('timeout')
491
+ ) {
408
492
  return {
409
493
  isConnectionError: true,
410
494
  message: 'Cannot connect to AI provider',
411
495
  suggestions: [
412
496
  'Check internet connection',
413
497
  'Verify provider service is running',
414
- 'Check firewall settings'
415
- ]
416
- };
498
+ 'Check firewall settings',
499
+ ],
500
+ }
417
501
  }
418
502
 
419
503
  // Authentication errors
420
- if (errorMessage.includes('api key') ||
421
- errorMessage.includes('401') ||
422
- errorMessage.includes('unauthorized') ||
423
- errorMessage.includes('invalid key')) {
504
+ if (
505
+ errorMessage.includes('api key') ||
506
+ errorMessage.includes('401') ||
507
+ errorMessage.includes('unauthorized') ||
508
+ errorMessage.includes('invalid key')
509
+ ) {
424
510
  return {
425
511
  isConfigurationError: true,
426
512
  message: 'Invalid or missing API key',
427
513
  suggestions: [
428
514
  'Check API key configuration in .env.local',
429
515
  'Verify API key is valid and active',
430
- 'Run `ai-changelog init` to reconfigure'
431
- ]
432
- };
516
+ 'Run `ai-changelog init` to reconfigure',
517
+ ],
518
+ }
433
519
  }
434
520
 
435
521
  // Model availability errors
436
- if (errorMessage.includes('model') &&
437
- (errorMessage.includes('not found') || errorMessage.includes('unavailable'))) {
522
+ if (
523
+ errorMessage.includes('model') &&
524
+ (errorMessage.includes('not found') || errorMessage.includes('unavailable'))
525
+ ) {
438
526
  return {
439
527
  isConfigurationError: true,
440
528
  message: 'Model not available',
441
- suggestions: [
442
- 'Try a different model',
443
- 'Check provider model list',
444
- 'Update configuration'
445
- ]
446
- };
529
+ suggestions: ['Try a different model', 'Check provider model list', 'Update configuration'],
530
+ }
447
531
  }
448
532
 
449
533
  // Rate limiting
@@ -451,12 +535,8 @@ Be concise and actionable.`;
451
535
  return {
452
536
  isConnectionError: true,
453
537
  message: 'Rate limit exceeded',
454
- suggestions: [
455
- 'Wait before retrying',
456
- 'Upgrade API plan',
457
- 'Use a different provider'
458
- ]
459
- };
538
+ suggestions: ['Wait before retrying', 'Upgrade API plan', 'Use a different provider'],
539
+ }
460
540
  }
461
541
 
462
542
  // Generic error
@@ -464,23 +544,23 @@ Be concise and actionable.`;
464
544
  isConnectionError: false,
465
545
  isConfigurationError: false,
466
546
  message: error.message,
467
- suggestions: ['Check provider configuration', 'Try again later']
468
- };
547
+ suggestions: ['Check provider configuration', 'Try again later'],
548
+ }
469
549
  }
470
550
 
471
551
  setModelOverride(model) {
472
- this.modelOverride = model;
552
+ this.modelOverride = model
473
553
  }
474
554
 
475
555
  getMetrics() {
476
- return this.metrics;
556
+ return this.metrics
477
557
  }
478
558
 
479
559
  resetMetrics() {
480
560
  this.metrics = {
481
561
  apiCalls: 0,
482
562
  ruleBasedFallbacks: 0,
483
- totalTokens: 0
484
- };
563
+ totalTokens: 0,
564
+ }
485
565
  }
486
- }
566
+ }