@entro314labs/ai-changelog-generator 3.2.0 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/CHANGELOG.md +41 -10
  2. package/ai-changelog-mcp.sh +0 -0
  3. package/ai-changelog.sh +0 -0
  4. package/bin/ai-changelog-dxt.js +0 -0
  5. package/package.json +72 -80
  6. package/src/ai-changelog-generator.js +11 -2
  7. package/src/application/orchestrators/changelog.orchestrator.js +12 -202
  8. package/src/cli.js +4 -5
  9. package/src/domains/ai/ai-analysis.service.js +2 -0
  10. package/src/domains/analysis/analysis.engine.js +758 -5
  11. package/src/domains/changelog/changelog.service.js +711 -13
  12. package/src/domains/changelog/workspace-changelog.service.js +429 -571
  13. package/src/domains/git/commit-tagger.js +552 -0
  14. package/src/domains/git/git-manager.js +357 -0
  15. package/src/domains/git/git.service.js +865 -16
  16. package/src/infrastructure/cli/cli.controller.js +14 -9
  17. package/src/infrastructure/config/configuration.manager.js +24 -2
  18. package/src/infrastructure/interactive/interactive-workflow.service.js +8 -1
  19. package/src/infrastructure/mcp/mcp-server.service.js +35 -11
  20. package/src/infrastructure/providers/core/base-provider.js +1 -1
  21. package/src/infrastructure/providers/implementations/anthropic.js +16 -173
  22. package/src/infrastructure/providers/implementations/azure.js +16 -63
  23. package/src/infrastructure/providers/implementations/dummy.js +13 -16
  24. package/src/infrastructure/providers/implementations/mock.js +13 -26
  25. package/src/infrastructure/providers/implementations/ollama.js +12 -4
  26. package/src/infrastructure/providers/implementations/openai.js +13 -165
  27. package/src/infrastructure/providers/provider-management.service.js +126 -412
  28. package/src/infrastructure/providers/utils/base-provider-helpers.js +11 -0
  29. package/src/shared/utils/cli-ui.js +1 -1
  30. package/src/shared/utils/diff-processor.js +21 -19
  31. package/src/shared/utils/error-classes.js +33 -0
  32. package/src/shared/utils/utils.js +65 -60
  33. package/src/domains/git/git-repository.analyzer.js +0 -678
@@ -3,8 +3,9 @@
3
3
  * Used for testing without real API credentials
4
4
  */
5
5
 
6
- import { ProviderError } from '../../../shared/utils/utils.js'
6
+ import { ProviderError } from '../../../shared/utils/error-classes.js'
7
7
  import { BaseProvider } from '../core/base-provider.js'
8
+ import { applyMixins } from '../utils/base-provider-helpers.js'
8
9
 
9
10
  class MockProvider extends BaseProvider {
10
11
  constructor(config = {}) {
@@ -17,22 +18,22 @@ class MockProvider extends BaseProvider {
17
18
  this.models = ['mock-basic', 'mock-standard', 'mock-advanced']
18
19
  }
19
20
 
20
- /**
21
- * Get provider name
22
- * @returns {string} Provider name
23
- */
24
21
  getName() {
25
22
  return this.name
26
23
  }
27
24
 
28
- /**
29
- * Check if provider is available
30
- * @returns {boolean} Always true for mock provider
31
- */
32
25
  isAvailable() {
33
26
  return true
34
27
  }
35
28
 
29
+ getRequiredEnvVars() {
30
+ return []
31
+ }
32
+
33
+ getDefaultModel() {
34
+ return 'mock-standard'
35
+ }
36
+
36
37
  /**
37
38
  * Simulate network latency
38
39
  * @param {number} ms - Milliseconds to wait
@@ -232,40 +233,26 @@ class MockProvider extends BaseProvider {
232
233
  }
233
234
  }
234
235
 
235
- getAvailableModels() {
236
+ async getAvailableModels() {
236
237
  return [
237
238
  {
238
239
  id: 'mock-basic',
239
240
  name: 'Mock Basic Model',
240
- contextWindow: 2048,
241
- maxOutput: 1000,
242
- inputCost: 0,
243
- outputCost: 0,
244
- features: ['text', 'testing'],
245
241
  description: 'Basic mock model for simple testing',
246
242
  },
247
243
  {
248
244
  id: 'mock-standard',
249
245
  name: 'Mock Standard Model',
250
- contextWindow: 4096,
251
- maxOutput: 2000,
252
- inputCost: 0,
253
- outputCost: 0,
254
- features: ['text', 'tools', 'testing'],
255
246
  description: 'Standard mock model for moderate testing',
256
247
  },
257
248
  {
258
249
  id: 'mock-advanced',
259
250
  name: 'Mock Advanced Model',
260
- contextWindow: 8192,
261
- maxOutput: 4000,
262
- inputCost: 0,
263
- outputCost: 0,
264
- features: ['text', 'tools', 'json', 'testing'],
265
251
  description: 'Advanced mock model for complex testing scenarios',
266
252
  },
267
253
  ]
268
254
  }
269
255
  }
270
256
 
271
- export default MockProvider
257
+ // Apply mixins for enhanced functionality
258
+ export default applyMixins ? applyMixins(MockProvider, 'mock') : MockProvider
@@ -2,10 +2,9 @@ import process from 'node:process'
2
2
 
3
3
  import { Ollama } from 'ollama'
4
4
 
5
- import { ProviderError } from '../../../shared/utils/utils.js'
5
+ import { ProviderError } from '../../../shared/utils/error-classes.js'
6
6
  import { BaseProvider } from '../core/base-provider.js'
7
7
  import { applyMixins } from '../utils/base-provider-helpers.js'
8
- import { buildClientOptions } from '../utils/provider-utils.js'
9
8
 
10
9
  class OllamaProvider extends BaseProvider {
11
10
  constructor(config) {
@@ -17,12 +16,12 @@ class OllamaProvider extends BaseProvider {
17
16
  }
18
17
 
19
18
  initializeClient() {
20
- const clientOptions = buildClientOptions(this.getProviderConfig(), {
19
+ const clientOptions = this.buildClientOptions({
21
20
  host: 'http://localhost:11434',
22
21
  })
23
22
 
24
23
  this.client = new Ollama({
25
- host: clientOptions.host,
24
+ host: clientOptions.OLLAMA_HOST || clientOptions.host,
26
25
  })
27
26
  }
28
27
 
@@ -34,6 +33,15 @@ class OllamaProvider extends BaseProvider {
34
33
  return !!this.config.OLLAMA_HOST
35
34
  }
36
35
 
36
+ getRequiredEnvVars() {
37
+ return ['OLLAMA_HOST']
38
+ }
39
+
40
+ getDefaultModel() {
41
+ const modelConfig = this.getProviderModelConfig()
42
+ return modelConfig.standardModel
43
+ }
44
+
37
45
  async generateCompletion(messages, options = {}) {
38
46
  if (!this.isAvailable()) {
39
47
  return this.handleProviderError(
@@ -2,7 +2,6 @@ import { OpenAI } from 'openai'
2
2
 
3
3
  import { BaseProvider } from '../core/base-provider.js'
4
4
  import { applyMixins, ProviderResponseHandler } from '../utils/base-provider-helpers.js'
5
- import { buildClientOptions } from '../utils/provider-utils.js'
6
5
 
7
6
  export class OpenAIProvider extends BaseProvider {
8
7
  constructor(config) {
@@ -14,15 +13,15 @@ export class OpenAIProvider extends BaseProvider {
14
13
  }
15
14
 
16
15
  initializeClient() {
17
- const clientOptions = buildClientOptions(this.getProviderConfig(), {
16
+ const clientOptions = this.buildClientOptions({
18
17
  timeout: 60000,
19
18
  maxRetries: 2,
20
19
  })
21
20
 
22
21
  this.openai = new OpenAI({
23
- apiKey: clientOptions.apiKey,
24
- organization: clientOptions.organization,
25
- project: clientOptions.project,
22
+ apiKey: clientOptions.OPENAI_API_KEY,
23
+ organization: clientOptions.OPENAI_ORGANIZATION,
24
+ project: clientOptions.OPENAI_PROJECT,
26
25
  timeout: clientOptions.timeout,
27
26
  maxRetries: clientOptions.maxRetries,
28
27
  })
@@ -42,7 +41,8 @@ export class OpenAIProvider extends BaseProvider {
42
41
  }
43
42
 
44
43
  getDefaultModel() {
45
- return 'gpt-4o'
44
+ const modelConfig = this.getProviderModelConfig()
45
+ return modelConfig.standardModel
46
46
  }
47
47
 
48
48
  async generateCompletion(messages, options = {}) {
@@ -101,171 +101,19 @@ export class OpenAIProvider extends BaseProvider {
101
101
  name: m.id,
102
102
  id: m.id,
103
103
  description: `OpenAI ${m.id}`,
104
- contextWindow: this.getModelContextWindow(m.id),
105
- capabilities: this.getModelCapabilities(m.id),
106
104
  }))
107
105
  } catch (_error) {
108
106
  return []
109
107
  }
110
108
  }
111
109
 
112
- getModelContextWindow(modelName) {
113
- const contextWindows = {
114
- // Latest 2025 models
115
- 'gpt-4o': 128000,
116
- 'gpt-4o-mini': 128000,
117
- o1: 200000,
118
- 'o1-mini': 128000,
119
- 'gpt-4.1-nano': 200000,
120
- 'gpt-4.1-mini': 200000,
121
- 'gpt-4.1': 200000,
122
- // Note: o3/o4 are Azure-only models
123
- // Legacy models
124
- 'gpt-4': 8192,
125
- 'gpt-4-32k': 32768,
126
- 'gpt-4-turbo': 128000,
127
- 'gpt-4-turbo-preview': 128000,
128
- 'gpt-3.5-turbo': 4096,
129
- 'gpt-3.5-turbo-16k': 16384,
130
- }
131
- return contextWindows[modelName] || 128000
132
- }
133
-
134
- getModelCapabilities(modelName) {
135
- return {
136
- reasoning: modelName.includes('o1') || modelName.includes('gpt-4'),
137
- function_calling: !modelName.includes('o1'), // o1 models don't support function calling
138
- json_mode: true,
139
- multimodal: modelName.includes('gpt-4o') || modelName.includes('gpt-4.1'),
140
- largeContext:
141
- modelName.includes('4.1') || modelName.includes('o1') || modelName.includes('4o'),
142
- promptCaching: modelName.includes('4.1'),
143
- }
144
- }
145
-
146
- async validateModelAvailability(modelName) {
147
- try {
148
- const models = await this.getAvailableModels()
149
- const model = models.find((m) => m.name === modelName)
150
-
151
- if (model) {
152
- return {
153
- available: true,
154
- model: modelName,
155
- capabilities: model.capabilities,
156
- contextWindow: model.contextWindow,
157
- }
158
- }
159
- const availableModels = models.map((m) => m.name)
160
- return {
161
- available: false,
162
- error: `Model '${modelName}' not available`,
163
- alternatives: availableModels.slice(0, 5),
164
- }
165
- } catch (error) {
166
- return {
167
- available: false,
168
- error: error.message,
169
- alternatives: ['gpt-4o', 'gpt-4o-mini', 'o1-mini', 'o1', 'gpt-4'],
170
- }
171
- }
172
- }
173
-
174
- async testConnection() {
175
- try {
176
- const response = await this.generateCompletion([{ role: 'user', content: 'Hello' }], {
177
- max_tokens: 5,
178
- })
179
-
180
- return {
181
- success: true,
182
- model: response.model,
183
- message: 'Connection successful',
184
- }
185
- } catch (error) {
186
- return {
187
- success: false,
188
- error: error.message,
189
- }
190
- }
191
- }
192
-
193
- getCapabilities(modelName) {
194
- return {
195
- completion: true,
196
- streaming: true,
197
- function_calling: true,
198
- json_mode: true,
199
- reasoning: modelName ? modelName.includes('gpt-4') : true,
200
- multimodal: false,
201
- }
202
- }
203
-
204
- getModelRecommendation(commitDetails) {
205
- const { files = 0, lines = 0, breaking = false, complex = false } = commitDetails
206
-
207
- // Use o1 for highly complex reasoning tasks
208
- if (breaking || complex || files > 50 || lines > 5000) {
209
- return {
210
- model: 'o1-mini',
211
- reason: 'Highly complex change requiring advanced reasoning',
212
- }
213
- }
214
-
215
- // Use GPT-4o for complex changes
216
- if (files > 20 || lines > 1000) {
217
- return {
218
- model: 'gpt-4o',
219
- reason: 'Complex change requiring advanced analysis',
220
- }
221
- }
222
-
223
- // Use GPT-4o mini for medium changes
224
- if (files > 5 || lines > 200) {
225
- return {
226
- model: 'gpt-4o-mini',
227
- reason: 'Medium-sized change requiring good analysis',
228
- }
229
- }
230
-
231
- // Use GPT-4o mini for small changes (more capable than 3.5-turbo)
232
- return {
233
- model: 'gpt-4o-mini',
234
- reason: 'Small change, using efficient modern model',
235
- }
236
- }
237
-
238
- async selectOptimalModel(commitInfo) {
239
- const recommendation = this.getModelRecommendation(commitInfo)
240
- const validation = await this.validateModelAvailability(recommendation.model)
241
-
242
- if (validation.available) {
243
- return {
244
- model: recommendation.model,
245
- reason: recommendation.reason,
246
- capabilities: validation.capabilities,
247
- }
248
- }
249
- // Fallback to default model
250
- return {
251
- model: this.getDefaultModel(),
252
- reason: 'Fallback to default model',
253
- capabilities: this.getCapabilities(this.getDefaultModel()),
254
- }
255
- }
256
-
257
- getProviderModelConfig() {
258
- return {
259
- smallModel: 'gpt-4o-mini',
260
- mediumModel: 'gpt-4o',
261
- standardModel: 'gpt-4o',
262
- complexModel: 'o1-mini',
263
- reasoningModel: 'o1',
264
- default: 'gpt-4o',
265
- temperature: 0.3,
266
- maxTokens: 1000,
267
- }
268
- }
110
+ // All common methods now provided by mixins:
111
+ // - validateModelAvailability() from ModelValidationMixin
112
+ // - testConnection() from ConnectionTestMixin
113
+ // - getCapabilities() from CapabilitiesMixin
114
+ // - getModelRecommendation() from ModelRecommendationMixin
115
+ // - selectOptimalModel() from ModelRecommendationMixin
116
+ // - getProviderModelConfig() from ConfigurationMixin
269
117
  }
270
118
 
271
119
  // Apply mixins to add standard provider functionality