@entro314labs/ai-changelog-generator 3.2.1 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/CHANGELOG.md +42 -2
  2. package/README.md +21 -1
  3. package/ai-changelog-mcp.sh +0 -0
  4. package/ai-changelog.sh +0 -0
  5. package/bin/ai-changelog-dxt.js +6 -3
  6. package/manifest.json +177 -0
  7. package/package.json +76 -81
  8. package/src/ai-changelog-generator.js +5 -4
  9. package/src/application/orchestrators/changelog.orchestrator.js +19 -203
  10. package/src/cli.js +16 -5
  11. package/src/domains/ai/ai-analysis.service.js +2 -0
  12. package/src/domains/analysis/analysis.engine.js +714 -37
  13. package/src/domains/changelog/changelog.service.js +623 -32
  14. package/src/domains/changelog/workspace-changelog.service.js +445 -622
  15. package/src/domains/git/commit-tagger.js +552 -0
  16. package/src/domains/git/git-manager.js +357 -0
  17. package/src/domains/git/git.service.js +865 -16
  18. package/src/infrastructure/cli/cli.controller.js +14 -9
  19. package/src/infrastructure/config/configuration.manager.js +25 -11
  20. package/src/infrastructure/interactive/interactive-workflow.service.js +8 -1
  21. package/src/infrastructure/mcp/mcp-server.service.js +105 -32
  22. package/src/infrastructure/providers/core/base-provider.js +1 -1
  23. package/src/infrastructure/providers/implementations/anthropic.js +16 -173
  24. package/src/infrastructure/providers/implementations/azure.js +16 -63
  25. package/src/infrastructure/providers/implementations/dummy.js +13 -16
  26. package/src/infrastructure/providers/implementations/mock.js +13 -26
  27. package/src/infrastructure/providers/implementations/ollama.js +12 -4
  28. package/src/infrastructure/providers/implementations/openai.js +13 -165
  29. package/src/infrastructure/providers/provider-management.service.js +126 -412
  30. package/src/infrastructure/providers/utils/base-provider-helpers.js +11 -0
  31. package/src/shared/utils/cli-ui.js +8 -10
  32. package/src/shared/utils/diff-processor.js +21 -19
  33. package/src/shared/utils/error-classes.js +33 -0
  34. package/src/shared/utils/utils.js +83 -63
  35. package/types/index.d.ts +61 -68
  36. package/src/domains/git/git-repository.analyzer.js +0 -678
@@ -3,6 +3,7 @@ import { AzureOpenAI } from 'openai'
3
3
 
4
4
  import { BaseProvider } from '../core/base-provider.js'
5
5
  import { applyMixins, ProviderResponseHandler } from '../utils/base-provider-helpers.js'
6
+ import { getProviderModelConfig } from '../utils/model-config.js'
6
7
 
7
8
  class AzureOpenAIProvider extends BaseProvider {
8
9
  constructor(config) {
@@ -168,59 +169,11 @@ class AzureOpenAIProvider extends BaseProvider {
168
169
 
169
170
  // Azure-specific helper methods
170
171
  getDeploymentName() {
171
- return this.config.AZURE_OPENAI_DEPLOYMENT_NAME || this.getProviderModelConfig().standardModel
172
+ const modelConfig = this.getProviderModelConfig()
173
+ return this.config.AZURE_OPENAI_DEPLOYMENT_NAME || modelConfig.standardModel
172
174
  }
173
175
 
174
- getModelContextWindow(modelName) {
175
- const contextWindows = {
176
- // Latest 2025 models (Azure exclusive)
177
- o4: 500000,
178
- 'o4-mini': 200000,
179
- o3: 300000,
180
- 'o3-mini': 150000,
181
- // Standard 2025 models
182
- 'gpt-4o': 128000,
183
- 'gpt-4o-mini': 128000,
184
- o1: 200000,
185
- 'o1-mini': 128000,
186
- 'gpt-4.1': 200000,
187
- 'gpt-4.1-mini': 200000,
188
- 'gpt-4.1-nano': 200000,
189
- // Legacy models
190
- 'gpt-4': 8192,
191
- 'gpt-4-32k': 32768,
192
- 'gpt-4-turbo': 128000,
193
- 'gpt-35-turbo': 4096,
194
- 'gpt-35-turbo-16k': 16384,
195
- }
196
- return contextWindows[modelName] || 128000
197
- }
198
-
199
- getModelCapabilities(modelName) {
200
- return {
201
- reasoning:
202
- modelName.includes('o1') ||
203
- modelName.includes('o3') ||
204
- modelName.includes('o4') ||
205
- modelName.includes('gpt-4'),
206
- function_calling: !(
207
- modelName.includes('o1') ||
208
- modelName.includes('o3') ||
209
- modelName.includes('o4')
210
- ), // o-series models don't support function calling
211
- json_mode: true,
212
- multimodal: modelName.includes('gpt-4o') || modelName.includes('gpt-4.1'),
213
- largeContext:
214
- modelName.includes('4.1') ||
215
- modelName.includes('o1') ||
216
- modelName.includes('o3') ||
217
- modelName.includes('o4') ||
218
- modelName.includes('4o'),
219
- promptCaching: modelName.includes('4.1'),
220
- advancedReasoning: modelName.includes('o3') || modelName.includes('o4'),
221
- azureExclusive: modelName.includes('o3') || modelName.includes('o4'),
222
- }
223
- }
176
+ // Model capabilities now handled by centralized CapabilitiesMixin
224
177
 
225
178
  // Azure-specific method for testing deployment availability
226
179
  async testDeployment(deploymentName) {
@@ -257,19 +210,19 @@ class AzureOpenAIProvider extends BaseProvider {
257
210
  return this._cachedDeployments
258
211
  }
259
212
 
260
- // Get base config directly to avoid recursion
261
- const baseConfig = {
262
- commonDeployments: ['o4', 'o3', 'gpt-4.1', 'gpt-4o', 'gpt-35-turbo', 'o1'],
263
- fallbacks: ['gpt-4.1', 'gpt-4o', 'o1', 'gpt-35-turbo'],
264
- }
213
+ // Get model config from centralized system (avoiding circular dependency)
214
+ const modelConfig = getProviderModelConfig('azure', this.config)
265
215
 
266
216
  const potentialDeployments = [
267
217
  // User configured deployment
268
218
  this.config.AZURE_OPENAI_DEPLOYMENT_NAME,
269
- // Common deployment names
270
- ...baseConfig.commonDeployments,
219
+ // Models from config
220
+ modelConfig.complexModel,
221
+ modelConfig.standardModel,
222
+ modelConfig.mediumModel,
223
+ modelConfig.smallModel,
271
224
  // Fallback models
272
- ...baseConfig.fallbacks,
225
+ ...modelConfig.fallbacks,
273
226
  ]
274
227
  .filter(Boolean)
275
228
  .filter((v, i, a) => a.indexOf(v) === i) // Remove duplicates
@@ -294,16 +247,16 @@ class AzureOpenAIProvider extends BaseProvider {
294
247
 
295
248
  if (availableDeployments.length === 0) {
296
249
  console.warn('⚠️ No Azure deployments found. Using configured deployment name as fallback.')
297
- // Fallback to configured deployment even if untested, preferring latest models
298
- const fallback = this.config.AZURE_OPENAI_DEPLOYMENT_NAME || 'o4' || 'gpt-4.1'
250
+ // Fallback to configured deployment even if untested
251
+ const fallback = this.config.AZURE_OPENAI_DEPLOYMENT_NAME || modelConfig.standardModel
299
252
  return [fallback]
300
253
  }
301
254
 
302
255
  return availableDeployments
303
256
  } catch (error) {
304
257
  console.warn('⚠️ Failed to detect Azure deployments:', error.message)
305
- // Return common deployments as fallback
306
- return baseConfig.commonDeployments
258
+ // Return fallback models from config
259
+ return modelConfig.fallbacks
307
260
  }
308
261
  }
309
262
 
@@ -3,8 +3,9 @@
3
3
  * Fallback provider when no other providers are available
4
4
  */
5
5
 
6
- import { ProviderError } from '../../../shared/utils/utils.js'
6
+ import { ProviderError } from '../../../shared/utils/error-classes.js'
7
7
  import { BaseProvider } from '../core/base-provider.js'
8
+ import { applyMixins } from '../utils/base-provider-helpers.js'
8
9
 
9
10
  class DummyProvider extends BaseProvider {
10
11
  constructor(config = {}) {
@@ -12,22 +13,22 @@ class DummyProvider extends BaseProvider {
12
13
  this.name = 'dummy'
13
14
  }
14
15
 
15
- /**
16
- * Get provider name
17
- * @returns {string} Provider name
18
- */
19
16
  getName() {
20
17
  return this.name
21
18
  }
22
19
 
23
- /**
24
- * Check if provider is available
25
- * @returns {boolean} Always true for dummy provider
26
- */
27
20
  isAvailable() {
28
21
  return true
29
22
  }
30
23
 
24
+ getRequiredEnvVars() {
25
+ return []
26
+ }
27
+
28
+ getDefaultModel() {
29
+ return 'rule-based'
30
+ }
31
+
31
32
  /**
32
33
  * Generate completion (always fails with informative error)
33
34
  * @param {Array} messages - Messages for completion
@@ -93,20 +94,16 @@ class DummyProvider extends BaseProvider {
93
94
  }
94
95
  }
95
96
 
96
- getAvailableModels() {
97
+ async getAvailableModels() {
97
98
  return [
98
99
  {
99
100
  id: 'rule-based',
100
101
  name: 'Rule-based Fallback',
101
- contextWindow: 0,
102
- maxOutput: 0,
103
- inputCost: 0,
104
- outputCost: 0,
105
- features: [],
106
102
  description: 'No AI model available - configure a provider',
107
103
  },
108
104
  ]
109
105
  }
110
106
  }
111
107
 
112
- export default DummyProvider
108
+ // Apply minimal mixins (error handling only - dummy provider doesn't need full functionality)
109
+ export default applyMixins ? applyMixins(DummyProvider, 'dummy', []) : DummyProvider
@@ -3,8 +3,9 @@
3
3
  * Used for testing without real API credentials
4
4
  */
5
5
 
6
- import { ProviderError } from '../../../shared/utils/utils.js'
6
+ import { ProviderError } from '../../../shared/utils/error-classes.js'
7
7
  import { BaseProvider } from '../core/base-provider.js'
8
+ import { applyMixins } from '../utils/base-provider-helpers.js'
8
9
 
9
10
  class MockProvider extends BaseProvider {
10
11
  constructor(config = {}) {
@@ -17,22 +18,22 @@ class MockProvider extends BaseProvider {
17
18
  this.models = ['mock-basic', 'mock-standard', 'mock-advanced']
18
19
  }
19
20
 
20
- /**
21
- * Get provider name
22
- * @returns {string} Provider name
23
- */
24
21
  getName() {
25
22
  return this.name
26
23
  }
27
24
 
28
- /**
29
- * Check if provider is available
30
- * @returns {boolean} Always true for mock provider
31
- */
32
25
  isAvailable() {
33
26
  return true
34
27
  }
35
28
 
29
+ getRequiredEnvVars() {
30
+ return []
31
+ }
32
+
33
+ getDefaultModel() {
34
+ return 'mock-standard'
35
+ }
36
+
36
37
  /**
37
38
  * Simulate network latency
38
39
  * @param {number} ms - Milliseconds to wait
@@ -232,40 +233,26 @@ class MockProvider extends BaseProvider {
232
233
  }
233
234
  }
234
235
 
235
- getAvailableModels() {
236
+ async getAvailableModels() {
236
237
  return [
237
238
  {
238
239
  id: 'mock-basic',
239
240
  name: 'Mock Basic Model',
240
- contextWindow: 2048,
241
- maxOutput: 1000,
242
- inputCost: 0,
243
- outputCost: 0,
244
- features: ['text', 'testing'],
245
241
  description: 'Basic mock model for simple testing',
246
242
  },
247
243
  {
248
244
  id: 'mock-standard',
249
245
  name: 'Mock Standard Model',
250
- contextWindow: 4096,
251
- maxOutput: 2000,
252
- inputCost: 0,
253
- outputCost: 0,
254
- features: ['text', 'tools', 'testing'],
255
246
  description: 'Standard mock model for moderate testing',
256
247
  },
257
248
  {
258
249
  id: 'mock-advanced',
259
250
  name: 'Mock Advanced Model',
260
- contextWindow: 8192,
261
- maxOutput: 4000,
262
- inputCost: 0,
263
- outputCost: 0,
264
- features: ['text', 'tools', 'json', 'testing'],
265
251
  description: 'Advanced mock model for complex testing scenarios',
266
252
  },
267
253
  ]
268
254
  }
269
255
  }
270
256
 
271
- export default MockProvider
257
+ // Apply mixins for enhanced functionality
258
+ export default applyMixins ? applyMixins(MockProvider, 'mock') : MockProvider
@@ -2,10 +2,9 @@ import process from 'node:process'
2
2
 
3
3
  import { Ollama } from 'ollama'
4
4
 
5
- import { ProviderError } from '../../../shared/utils/utils.js'
5
+ import { ProviderError } from '../../../shared/utils/error-classes.js'
6
6
  import { BaseProvider } from '../core/base-provider.js'
7
7
  import { applyMixins } from '../utils/base-provider-helpers.js'
8
- import { buildClientOptions } from '../utils/provider-utils.js'
9
8
 
10
9
  class OllamaProvider extends BaseProvider {
11
10
  constructor(config) {
@@ -17,12 +16,12 @@ class OllamaProvider extends BaseProvider {
17
16
  }
18
17
 
19
18
  initializeClient() {
20
- const clientOptions = buildClientOptions(this.getProviderConfig(), {
19
+ const clientOptions = this.buildClientOptions({
21
20
  host: 'http://localhost:11434',
22
21
  })
23
22
 
24
23
  this.client = new Ollama({
25
- host: clientOptions.host,
24
+ host: clientOptions.OLLAMA_HOST || clientOptions.host,
26
25
  })
27
26
  }
28
27
 
@@ -34,6 +33,15 @@ class OllamaProvider extends BaseProvider {
34
33
  return !!this.config.OLLAMA_HOST
35
34
  }
36
35
 
36
+ getRequiredEnvVars() {
37
+ return ['OLLAMA_HOST']
38
+ }
39
+
40
+ getDefaultModel() {
41
+ const modelConfig = this.getProviderModelConfig()
42
+ return modelConfig.standardModel
43
+ }
44
+
37
45
  async generateCompletion(messages, options = {}) {
38
46
  if (!this.isAvailable()) {
39
47
  return this.handleProviderError(
@@ -2,7 +2,6 @@ import { OpenAI } from 'openai'
2
2
 
3
3
  import { BaseProvider } from '../core/base-provider.js'
4
4
  import { applyMixins, ProviderResponseHandler } from '../utils/base-provider-helpers.js'
5
- import { buildClientOptions } from '../utils/provider-utils.js'
6
5
 
7
6
  export class OpenAIProvider extends BaseProvider {
8
7
  constructor(config) {
@@ -14,15 +13,15 @@ export class OpenAIProvider extends BaseProvider {
14
13
  }
15
14
 
16
15
  initializeClient() {
17
- const clientOptions = buildClientOptions(this.getProviderConfig(), {
16
+ const clientOptions = this.buildClientOptions({
18
17
  timeout: 60000,
19
18
  maxRetries: 2,
20
19
  })
21
20
 
22
21
  this.openai = new OpenAI({
23
- apiKey: clientOptions.apiKey,
24
- organization: clientOptions.organization,
25
- project: clientOptions.project,
22
+ apiKey: clientOptions.OPENAI_API_KEY,
23
+ organization: clientOptions.OPENAI_ORGANIZATION,
24
+ project: clientOptions.OPENAI_PROJECT,
26
25
  timeout: clientOptions.timeout,
27
26
  maxRetries: clientOptions.maxRetries,
28
27
  })
@@ -42,7 +41,8 @@ export class OpenAIProvider extends BaseProvider {
42
41
  }
43
42
 
44
43
  getDefaultModel() {
45
- return 'gpt-4o'
44
+ const modelConfig = this.getProviderModelConfig()
45
+ return modelConfig.standardModel
46
46
  }
47
47
 
48
48
  async generateCompletion(messages, options = {}) {
@@ -101,171 +101,19 @@ export class OpenAIProvider extends BaseProvider {
101
101
  name: m.id,
102
102
  id: m.id,
103
103
  description: `OpenAI ${m.id}`,
104
- contextWindow: this.getModelContextWindow(m.id),
105
- capabilities: this.getModelCapabilities(m.id),
106
104
  }))
107
105
  } catch (_error) {
108
106
  return []
109
107
  }
110
108
  }
111
109
 
112
- getModelContextWindow(modelName) {
113
- const contextWindows = {
114
- // Latest 2025 models
115
- 'gpt-4o': 128000,
116
- 'gpt-4o-mini': 128000,
117
- o1: 200000,
118
- 'o1-mini': 128000,
119
- 'gpt-4.1-nano': 200000,
120
- 'gpt-4.1-mini': 200000,
121
- 'gpt-4.1': 200000,
122
- // Note: o3/o4 are Azure-only models
123
- // Legacy models
124
- 'gpt-4': 8192,
125
- 'gpt-4-32k': 32768,
126
- 'gpt-4-turbo': 128000,
127
- 'gpt-4-turbo-preview': 128000,
128
- 'gpt-3.5-turbo': 4096,
129
- 'gpt-3.5-turbo-16k': 16384,
130
- }
131
- return contextWindows[modelName] || 128000
132
- }
133
-
134
- getModelCapabilities(modelName) {
135
- return {
136
- reasoning: modelName.includes('o1') || modelName.includes('gpt-4'),
137
- function_calling: !modelName.includes('o1'), // o1 models don't support function calling
138
- json_mode: true,
139
- multimodal: modelName.includes('gpt-4o') || modelName.includes('gpt-4.1'),
140
- largeContext:
141
- modelName.includes('4.1') || modelName.includes('o1') || modelName.includes('4o'),
142
- promptCaching: modelName.includes('4.1'),
143
- }
144
- }
145
-
146
- async validateModelAvailability(modelName) {
147
- try {
148
- const models = await this.getAvailableModels()
149
- const model = models.find((m) => m.name === modelName)
150
-
151
- if (model) {
152
- return {
153
- available: true,
154
- model: modelName,
155
- capabilities: model.capabilities,
156
- contextWindow: model.contextWindow,
157
- }
158
- }
159
- const availableModels = models.map((m) => m.name)
160
- return {
161
- available: false,
162
- error: `Model '${modelName}' not available`,
163
- alternatives: availableModels.slice(0, 5),
164
- }
165
- } catch (error) {
166
- return {
167
- available: false,
168
- error: error.message,
169
- alternatives: ['gpt-4o', 'gpt-4o-mini', 'o1-mini', 'o1', 'gpt-4'],
170
- }
171
- }
172
- }
173
-
174
- async testConnection() {
175
- try {
176
- const response = await this.generateCompletion([{ role: 'user', content: 'Hello' }], {
177
- max_tokens: 5,
178
- })
179
-
180
- return {
181
- success: true,
182
- model: response.model,
183
- message: 'Connection successful',
184
- }
185
- } catch (error) {
186
- return {
187
- success: false,
188
- error: error.message,
189
- }
190
- }
191
- }
192
-
193
- getCapabilities(modelName) {
194
- return {
195
- completion: true,
196
- streaming: true,
197
- function_calling: true,
198
- json_mode: true,
199
- reasoning: modelName ? modelName.includes('gpt-4') : true,
200
- multimodal: false,
201
- }
202
- }
203
-
204
- getModelRecommendation(commitDetails) {
205
- const { files = 0, lines = 0, breaking = false, complex = false } = commitDetails
206
-
207
- // Use o1 for highly complex reasoning tasks
208
- if (breaking || complex || files > 50 || lines > 5000) {
209
- return {
210
- model: 'o1-mini',
211
- reason: 'Highly complex change requiring advanced reasoning',
212
- }
213
- }
214
-
215
- // Use GPT-4o for complex changes
216
- if (files > 20 || lines > 1000) {
217
- return {
218
- model: 'gpt-4o',
219
- reason: 'Complex change requiring advanced analysis',
220
- }
221
- }
222
-
223
- // Use GPT-4o mini for medium changes
224
- if (files > 5 || lines > 200) {
225
- return {
226
- model: 'gpt-4o-mini',
227
- reason: 'Medium-sized change requiring good analysis',
228
- }
229
- }
230
-
231
- // Use GPT-4o mini for small changes (more capable than 3.5-turbo)
232
- return {
233
- model: 'gpt-4o-mini',
234
- reason: 'Small change, using efficient modern model',
235
- }
236
- }
237
-
238
- async selectOptimalModel(commitInfo) {
239
- const recommendation = this.getModelRecommendation(commitInfo)
240
- const validation = await this.validateModelAvailability(recommendation.model)
241
-
242
- if (validation.available) {
243
- return {
244
- model: recommendation.model,
245
- reason: recommendation.reason,
246
- capabilities: validation.capabilities,
247
- }
248
- }
249
- // Fallback to default model
250
- return {
251
- model: this.getDefaultModel(),
252
- reason: 'Fallback to default model',
253
- capabilities: this.getCapabilities(this.getDefaultModel()),
254
- }
255
- }
256
-
257
- getProviderModelConfig() {
258
- return {
259
- smallModel: 'gpt-4o-mini',
260
- mediumModel: 'gpt-4o',
261
- standardModel: 'gpt-4o',
262
- complexModel: 'o1-mini',
263
- reasoningModel: 'o1',
264
- default: 'gpt-4o',
265
- temperature: 0.3,
266
- maxTokens: 1000,
267
- }
268
- }
110
+ // All common methods now provided by mixins:
111
+ // - validateModelAvailability() from ModelValidationMixin
112
+ // - testConnection() from ConnectionTestMixin
113
+ // - getCapabilities() from CapabilitiesMixin
114
+ // - getModelRecommendation() from ModelRecommendationMixin
115
+ // - selectOptimalModel() from ModelRecommendationMixin
116
+ // - getProviderModelConfig() from ConfigurationMixin
269
117
  }
270
118
 
271
119
  // Apply mixins to add standard provider functionality