@entro314labs/ai-changelog-generator 3.0.5 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +383 -785
  2. package/README.md +30 -3
  3. package/ai-changelog-mcp.sh +0 -0
  4. package/ai-changelog.sh +0 -0
  5. package/bin/ai-changelog-dxt.js +9 -9
  6. package/bin/ai-changelog-mcp.js +19 -17
  7. package/bin/ai-changelog.js +6 -6
  8. package/package.json +84 -52
  9. package/src/ai-changelog-generator.js +83 -81
  10. package/src/application/orchestrators/changelog.orchestrator.js +1040 -296
  11. package/src/application/services/application.service.js +145 -123
  12. package/src/cli.js +76 -57
  13. package/src/domains/ai/ai-analysis.service.js +289 -209
  14. package/src/domains/analysis/analysis.engine.js +253 -193
  15. package/src/domains/changelog/changelog.service.js +1062 -784
  16. package/src/domains/changelog/workspace-changelog.service.js +420 -249
  17. package/src/domains/git/git-repository.analyzer.js +348 -258
  18. package/src/domains/git/git.service.js +132 -112
  19. package/src/infrastructure/cli/cli.controller.js +415 -247
  20. package/src/infrastructure/config/configuration.manager.js +220 -190
  21. package/src/infrastructure/interactive/interactive-staging.service.js +332 -0
  22. package/src/infrastructure/interactive/interactive-workflow.service.js +200 -159
  23. package/src/infrastructure/mcp/mcp-server.service.js +208 -207
  24. package/src/infrastructure/metrics/metrics.collector.js +140 -123
  25. package/src/infrastructure/providers/core/base-provider.js +87 -40
  26. package/src/infrastructure/providers/implementations/anthropic.js +101 -99
  27. package/src/infrastructure/providers/implementations/azure.js +124 -101
  28. package/src/infrastructure/providers/implementations/bedrock.js +136 -126
  29. package/src/infrastructure/providers/implementations/dummy.js +23 -23
  30. package/src/infrastructure/providers/implementations/google.js +123 -114
  31. package/src/infrastructure/providers/implementations/huggingface.js +94 -87
  32. package/src/infrastructure/providers/implementations/lmstudio.js +75 -60
  33. package/src/infrastructure/providers/implementations/mock.js +69 -73
  34. package/src/infrastructure/providers/implementations/ollama.js +89 -66
  35. package/src/infrastructure/providers/implementations/openai.js +88 -89
  36. package/src/infrastructure/providers/implementations/vertex.js +227 -197
  37. package/src/infrastructure/providers/provider-management.service.js +245 -207
  38. package/src/infrastructure/providers/provider-manager.service.js +145 -125
  39. package/src/infrastructure/providers/utils/base-provider-helpers.js +308 -302
  40. package/src/infrastructure/providers/utils/model-config.js +220 -195
  41. package/src/infrastructure/providers/utils/provider-utils.js +105 -100
  42. package/src/infrastructure/validation/commit-message-validation.service.js +556 -0
  43. package/src/shared/constants/colors.js +467 -172
  44. package/src/shared/utils/cli-demo.js +285 -0
  45. package/src/shared/utils/cli-entry-utils.js +257 -249
  46. package/src/shared/utils/cli-ui.js +447 -0
  47. package/src/shared/utils/diff-processor.js +513 -0
  48. package/src/shared/utils/error-classes.js +125 -156
  49. package/src/shared/utils/json-utils.js +93 -89
  50. package/src/shared/utils/utils.js +1299 -775
  51. package/types/index.d.ts +353 -344
@@ -1,23 +1,22 @@
1
- import { AzureOpenAI } from 'openai';
2
- import { DefaultAzureCredential, getBearerTokenProvider } from '@azure/identity';
3
- import { BaseProvider } from '../core/base-provider.js';
4
- import { ProviderError } from '../../../shared/utils/utils.js';
5
- import { applyMixins, ProviderResponseHandler } from '../utils/base-provider-helpers.js';
6
- import { buildClientOptions } from '../utils/provider-utils.js';
1
+ import { DefaultAzureCredential, getBearerTokenProvider } from '@azure/identity'
2
+ import { AzureOpenAI } from 'openai'
3
+
4
+ import { BaseProvider } from '../core/base-provider.js'
5
+ import { applyMixins, ProviderResponseHandler } from '../utils/base-provider-helpers.js'
7
6
 
8
7
  class AzureOpenAIProvider extends BaseProvider {
9
8
  constructor(config) {
10
- super(config);
11
- this.azureClient = null;
12
- this._cachedDeployments = null;
13
- this._deploymentsCacheTime = 0;
9
+ super(config)
10
+ this.azureClient = null
11
+ this._cachedDeployments = null
12
+ this._deploymentsCacheTime = 0
14
13
  // Only initialize if properly configured
15
14
  if (this.isAvailable()) {
16
15
  try {
17
- this.initializeClient();
16
+ this.initializeClient()
18
17
  } catch (error) {
19
18
  // Don't throw during construction, just log
20
- console.warn(`Azure provider initialization warning: ${error.message}`);
19
+ console.warn(`Azure provider initialization warning: ${error.message}`)
21
20
  }
22
21
  }
23
22
  }
@@ -26,27 +25,27 @@ class AzureOpenAIProvider extends BaseProvider {
26
25
  // Check if using API key or Azure AD authentication
27
26
  if (this.config.AZURE_USE_AD_AUTH === 'true') {
28
27
  try {
29
- const credential = new DefaultAzureCredential();
28
+ const credential = new DefaultAzureCredential()
30
29
  const azureADTokenProvider = getBearerTokenProvider(
31
30
  credential,
32
31
  'https://cognitiveservices.azure.com/.default'
33
- );
32
+ )
34
33
 
35
34
  this.azureClient = new AzureOpenAI({
36
35
  azureADTokenProvider,
37
36
  apiVersion: this.config.AZURE_OPENAI_API_VERSION || '2025-04-01-preview',
38
37
  endpoint: this.config.AZURE_OPENAI_ENDPOINT,
39
38
  timeout: 30000, // Reduced timeout for individual requests
40
- maxRetries: 2, // Add retry logic
41
- defaultQuery: { 'api-version': '2025-04-01-preview' }
42
- });
39
+ maxRetries: 2, // Add retry logic
40
+ defaultQuery: { 'api-version': '2025-04-01-preview' },
41
+ })
43
42
  } catch (error) {
44
- console.error('Failed to initialize Azure AD authentication:', error.message);
43
+ console.error('Failed to initialize Azure AD authentication:', error.message)
45
44
  // Fallback to API key if AD auth fails
46
- this.initializeWithApiKey();
45
+ this.initializeWithApiKey()
47
46
  }
48
47
  } else {
49
- this.initializeWithApiKey();
48
+ this.initializeWithApiKey()
50
49
  }
51
50
  }
52
51
 
@@ -56,22 +55,22 @@ class AzureOpenAIProvider extends BaseProvider {
56
55
  apiVersion: this.config.AZURE_OPENAI_API_VERSION || '2025-04-01-preview',
57
56
  endpoint: this.config.AZURE_OPENAI_ENDPOINT,
58
57
  timeout: 30000, // Reduced timeout for individual requests
59
- maxRetries: 2, // Add retry logic
60
- defaultQuery: { 'api-version': '2025-04-01-preview' }
61
- });
58
+ maxRetries: 2, // Add retry logic
59
+ defaultQuery: { 'api-version': '2025-04-01-preview' },
60
+ })
62
61
  }
63
62
 
64
63
  getName() {
65
- return 'azure';
64
+ return 'azure'
66
65
  }
67
66
 
68
67
  isAvailable() {
69
- const { AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_KEY, AZURE_USE_AD_AUTH } = this.config;
68
+ const { AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_KEY, AZURE_USE_AD_AUTH } = this.config
70
69
  // Must have endpoint, and either API key or AD auth enabled
71
- const hasEndpoint = AZURE_OPENAI_ENDPOINT && AZURE_OPENAI_ENDPOINT.trim() !== '';
72
- const hasAuth = (AZURE_OPENAI_KEY && AZURE_OPENAI_KEY.trim() !== '') ||
73
- AZURE_USE_AD_AUTH === 'true';
74
- return hasEndpoint && hasAuth;
70
+ const hasEndpoint = AZURE_OPENAI_ENDPOINT && AZURE_OPENAI_ENDPOINT.trim() !== ''
71
+ const hasAuth =
72
+ (AZURE_OPENAI_KEY && AZURE_OPENAI_KEY.trim() !== '') || AZURE_USE_AD_AUTH === 'true'
73
+ return hasEndpoint && hasAuth
75
74
  }
76
75
 
77
76
  async generateCompletion(messages, options = {}) {
@@ -79,12 +78,15 @@ class AzureOpenAIProvider extends BaseProvider {
79
78
  this,
80
79
  'generate_completion',
81
80
  async () => {
82
- const modelConfig = this.getProviderModelConfig();
81
+ const modelConfig = this.getProviderModelConfig()
83
82
 
84
83
  // In Azure, the model is the deployment name.
85
- const deploymentName = options.model || this.config.AZURE_OPENAI_DEPLOYMENT_NAME || modelConfig.standardModel;
84
+ const deploymentName =
85
+ options.model || this.config.AZURE_OPENAI_DEPLOYMENT_NAME || modelConfig.standardModel
86
86
  if (!deploymentName) {
87
- throw new Error('Azure deployment name is not configured. Please set AZURE_OPENAI_DEPLOYMENT_NAME or pass a model (deployment name) in the options');
87
+ throw new Error(
88
+ 'Azure deployment name is not configured. Please set AZURE_OPENAI_DEPLOYMENT_NAME or pass a model (deployment name) in the options'
89
+ )
88
90
  }
89
91
 
90
92
  const params = {
@@ -93,24 +95,24 @@ class AzureOpenAIProvider extends BaseProvider {
93
95
  max_tokens: options.max_tokens || 2000, // Reduced for faster responses
94
96
  temperature: options.temperature || 0.3,
95
97
  user: options.user || this.config.AZURE_USER_ID,
96
- };
98
+ }
97
99
 
98
100
  // Add tool calling if provided
99
101
  if (options.tools) {
100
- params.tools = options.tools;
101
- params.tool_choice = options.tool_choice || 'auto';
102
+ params.tools = options.tools
103
+ params.tool_choice = options.tool_choice || 'auto'
102
104
  }
103
105
 
104
106
  // Add data sources for Azure-specific features like "On Your Data"
105
107
  if (options.dataSources) {
106
- params.data_sources = options.dataSources;
108
+ params.data_sources = options.dataSources
107
109
  }
108
110
 
109
111
  // Add streaming if requested
110
112
  if (options.stream) {
111
- params.stream = true;
112
- const stream = await this.azureClient.chat.completions.create(params);
113
- return { stream, model: deploymentName };
113
+ params.stream = true
114
+ const stream = await this.azureClient.chat.completions.create(params)
115
+ return { stream, model: deploymentName }
114
116
  }
115
117
 
116
118
  // Add additional timeout wrapper for better control
@@ -118,65 +120,68 @@ class AzureOpenAIProvider extends BaseProvider {
118
120
  this.azureClient.chat.completions.create(params),
119
121
  new Promise((_, reject) =>
120
122
  setTimeout(() => reject(new Error('Request timeout after 25 seconds')), 25000)
121
- )
122
- ]);
123
-
124
- if (!completion.choices?.length || !completion.choices[0]?.message?.content) {
125
- const finishReason = completion.choices?.[0]?.finish_reason;
126
- const errorMsg = finishReason === 'length'
127
- ? `Response truncated due to token limit (max_tokens: ${params.max_tokens}). Consider increasing max_tokens or reducing prompt size.`
128
- : 'Empty response from Azure API';
129
- throw new Error(errorMsg);
123
+ ),
124
+ ])
125
+
126
+ if (!(completion.choices?.length > 0 && completion.choices[0]?.message?.content)) {
127
+ const finishReason = completion.choices?.[0]?.finish_reason
128
+ const errorMsg =
129
+ finishReason === 'length'
130
+ ? `Response truncated due to token limit (max_tokens: ${params.max_tokens}). Consider increasing max_tokens or reducing prompt size.`
131
+ : 'Empty response from Azure API'
132
+ throw new Error(errorMsg)
130
133
  }
131
134
 
132
- const content = completion.choices[0].message.content;
133
- const finishReason = completion.choices[0].finish_reason;
135
+ const content = completion.choices[0].message.content
136
+ const finishReason = completion.choices[0].finish_reason
134
137
 
135
138
  // Warn if response was truncated but still return the partial content
136
139
  if (finishReason === 'length') {
137
- console.warn(`⚠️ Azure response truncated due to token limit (${params.max_tokens}). Response may be incomplete.`);
140
+ console.warn(
141
+ `⚠️ Azure response truncated due to token limit (${params.max_tokens}). Response may be incomplete.`
142
+ )
138
143
  }
139
144
 
140
145
  // Extract Azure-specific content filter results if present
141
- let contentFilters = null;
146
+ let contentFilters = null
142
147
  if (completion.choices[0].content_filter_results) {
143
- contentFilters = completion.choices[0].content_filter_results;
148
+ contentFilters = completion.choices[0].content_filter_results
144
149
  }
145
150
 
146
151
  return {
147
- content: content,
152
+ content,
148
153
  model: completion.model,
149
154
  tokens: completion.usage.total_tokens,
150
155
  usage: {
151
156
  prompt_tokens: completion.usage.prompt_tokens,
152
157
  completion_tokens: completion.usage.completion_tokens,
153
- total_tokens: completion.usage.total_tokens
158
+ total_tokens: completion.usage.total_tokens,
154
159
  },
155
160
  finish_reason: finishReason,
156
161
  tool_calls: completion.choices[0].message.tool_calls,
157
- content_filters: contentFilters
158
- };
162
+ content_filters: contentFilters,
163
+ }
159
164
  },
160
165
  { model: options.model }
161
- );
166
+ )
162
167
  }
163
168
 
164
169
  // Azure-specific helper methods
165
170
  getDeploymentName() {
166
- return this.config.AZURE_OPENAI_DEPLOYMENT_NAME || this.getProviderModelConfig().standardModel;
171
+ return this.config.AZURE_OPENAI_DEPLOYMENT_NAME || this.getProviderModelConfig().standardModel
167
172
  }
168
173
 
169
174
  getModelContextWindow(modelName) {
170
175
  const contextWindows = {
171
176
  // Latest 2025 models (Azure exclusive)
172
- 'o4': 500000,
177
+ o4: 500000,
173
178
  'o4-mini': 200000,
174
- 'o3': 300000,
179
+ o3: 300000,
175
180
  'o3-mini': 150000,
176
181
  // Standard 2025 models
177
182
  'gpt-4o': 128000,
178
183
  'gpt-4o-mini': 128000,
179
- 'o1': 200000,
184
+ o1: 200000,
180
185
  'o1-mini': 128000,
181
186
  'gpt-4.1': 200000,
182
187
  'gpt-4.1-mini': 200000,
@@ -186,22 +191,35 @@ class AzureOpenAIProvider extends BaseProvider {
186
191
  'gpt-4-32k': 32768,
187
192
  'gpt-4-turbo': 128000,
188
193
  'gpt-35-turbo': 4096,
189
- 'gpt-35-turbo-16k': 16384
190
- };
191
- return contextWindows[modelName] || 128000;
194
+ 'gpt-35-turbo-16k': 16384,
195
+ }
196
+ return contextWindows[modelName] || 128000
192
197
  }
193
198
 
194
199
  getModelCapabilities(modelName) {
195
200
  return {
196
- reasoning: modelName.includes('o1') || modelName.includes('o3') || modelName.includes('o4') || modelName.includes('gpt-4'),
197
- function_calling: !modelName.includes('o1') && !modelName.includes('o3') && !modelName.includes('o4'), // o-series models don't support function calling
201
+ reasoning:
202
+ modelName.includes('o1') ||
203
+ modelName.includes('o3') ||
204
+ modelName.includes('o4') ||
205
+ modelName.includes('gpt-4'),
206
+ function_calling: !(
207
+ modelName.includes('o1') ||
208
+ modelName.includes('o3') ||
209
+ modelName.includes('o4')
210
+ ), // o-series models don't support function calling
198
211
  json_mode: true,
199
212
  multimodal: modelName.includes('gpt-4o') || modelName.includes('gpt-4.1'),
200
- largeContext: modelName.includes('4.1') || modelName.includes('o1') || modelName.includes('o3') || modelName.includes('o4') || modelName.includes('4o'),
213
+ largeContext:
214
+ modelName.includes('4.1') ||
215
+ modelName.includes('o1') ||
216
+ modelName.includes('o3') ||
217
+ modelName.includes('o4') ||
218
+ modelName.includes('4o'),
201
219
  promptCaching: modelName.includes('4.1'),
202
220
  advancedReasoning: modelName.includes('o3') || modelName.includes('o4'),
203
- azureExclusive: modelName.includes('o3') || modelName.includes('o4')
204
- };
221
+ azureExclusive: modelName.includes('o3') || modelName.includes('o4'),
222
+ }
205
223
  }
206
224
 
207
225
  // Azure-specific method for testing deployment availability
@@ -210,37 +228,40 @@ class AzureOpenAIProvider extends BaseProvider {
210
228
  const response = await this.azureClient.chat.completions.create({
211
229
  model: deploymentName,
212
230
  messages: [{ role: 'user', content: 'Test' }],
213
- max_tokens: 1
214
- });
231
+ max_tokens: 1,
232
+ })
215
233
 
216
234
  return {
217
235
  success: true,
218
236
  deployment: deploymentName,
219
- model: response.model
220
- };
237
+ model: response.model,
238
+ }
221
239
  } catch (error) {
222
240
  return {
223
241
  success: false,
224
242
  error: error.message,
225
- deployment: deploymentName
226
- };
243
+ deployment: deploymentName,
244
+ }
227
245
  }
228
246
  }
229
247
 
230
248
  // Get available deployments by testing common model names
231
249
  async getAvailableModels() {
232
- if (!this.isAvailable()) return [];
250
+ if (!this.isAvailable()) {
251
+ return []
252
+ }
233
253
 
234
254
  // Cache the result to avoid repeated API calls
235
- if (this._cachedDeployments && Date.now() - this._deploymentsCacheTime < 300000) { // 5 min cache
236
- return this._cachedDeployments;
255
+ if (this._cachedDeployments && Date.now() - this._deploymentsCacheTime < 300000) {
256
+ // 5 min cache
257
+ return this._cachedDeployments
237
258
  }
238
259
 
239
260
  // Get base config directly to avoid recursion
240
261
  const baseConfig = {
241
262
  commonDeployments: ['o4', 'o3', 'gpt-4.1', 'gpt-4o', 'gpt-35-turbo', 'o1'],
242
- fallbacks: ['gpt-4.1', 'gpt-4o', 'o1', 'gpt-35-turbo']
243
- };
263
+ fallbacks: ['gpt-4.1', 'gpt-4o', 'o1', 'gpt-35-turbo'],
264
+ }
244
265
 
245
266
  const potentialDeployments = [
246
267
  // User configured deployment
@@ -248,49 +269,51 @@ class AzureOpenAIProvider extends BaseProvider {
248
269
  // Common deployment names
249
270
  ...baseConfig.commonDeployments,
250
271
  // Fallback models
251
- ...baseConfig.fallbacks
252
- ].filter(Boolean).filter((v, i, a) => a.indexOf(v) === i); // Remove duplicates
272
+ ...baseConfig.fallbacks,
273
+ ]
274
+ .filter(Boolean)
275
+ .filter((v, i, a) => a.indexOf(v) === i) // Remove duplicates
253
276
 
254
- const availableDeployments = [];
277
+ const availableDeployments = []
255
278
 
256
279
  // Test each potential deployment in parallel (but limit concurrency)
257
280
  const testPromises = potentialDeployments.slice(0, 8).map(async (deployment) => {
258
- const result = await this.testDeployment(deployment);
281
+ const result = await this.testDeployment(deployment)
259
282
  if (result.success) {
260
- availableDeployments.push(deployment);
283
+ availableDeployments.push(deployment)
261
284
  }
262
- return result;
263
- });
285
+ return result
286
+ })
264
287
 
265
288
  try {
266
- await Promise.allSettled(testPromises);
289
+ await Promise.allSettled(testPromises)
267
290
 
268
291
  // Cache the result
269
- this._cachedDeployments = availableDeployments;
270
- this._deploymentsCacheTime = Date.now();
292
+ this._cachedDeployments = availableDeployments
293
+ this._deploymentsCacheTime = Date.now()
271
294
 
272
295
  if (availableDeployments.length === 0) {
273
- console.warn('⚠️ No Azure deployments found. Using configured deployment name as fallback.');
296
+ console.warn('⚠️ No Azure deployments found. Using configured deployment name as fallback.')
274
297
  // Fallback to configured deployment even if untested, preferring latest models
275
- const fallback = this.config.AZURE_OPENAI_DEPLOYMENT_NAME || 'o4' || 'gpt-4.1';
276
- return [fallback];
298
+ const fallback = this.config.AZURE_OPENAI_DEPLOYMENT_NAME || 'o4' || 'gpt-4.1'
299
+ return [fallback]
277
300
  }
278
301
 
279
- return availableDeployments;
302
+ return availableDeployments
280
303
  } catch (error) {
281
- console.warn('⚠️ Failed to detect Azure deployments:', error.message);
304
+ console.warn('⚠️ Failed to detect Azure deployments:', error.message)
282
305
  // Return common deployments as fallback
283
- return baseConfig.commonDeployments;
306
+ return baseConfig.commonDeployments
284
307
  }
285
308
  }
286
309
 
287
310
  // Force refresh of available deployments
288
311
  async refreshAvailableModels() {
289
- this._cachedDeployments = null;
290
- this._deploymentsCacheTime = 0;
291
- return await this.getAvailableModels();
312
+ this._cachedDeployments = null
313
+ this._deploymentsCacheTime = 0
314
+ return await this.getAvailableModels()
292
315
  }
293
316
  }
294
317
 
295
318
  // Apply mixins to add standard provider functionality
296
- export default applyMixins(AzureOpenAIProvider, 'azure');
319
+ export default applyMixins(AzureOpenAIProvider, 'azure')