@entro314labs/ai-changelog-generator 3.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +801 -0
  2. package/LICENSE +21 -0
  3. package/README.md +393 -0
  4. package/ai-changelog-mcp.sh +93 -0
  5. package/ai-changelog.sh +103 -0
  6. package/bin/ai-changelog-dxt.js +35 -0
  7. package/bin/ai-changelog-mcp.js +34 -0
  8. package/bin/ai-changelog.js +18 -0
  9. package/package.json +135 -0
  10. package/src/ai-changelog-generator.js +258 -0
  11. package/src/application/orchestrators/changelog.orchestrator.js +730 -0
  12. package/src/application/services/application.service.js +301 -0
  13. package/src/cli.js +157 -0
  14. package/src/domains/ai/ai-analysis.service.js +486 -0
  15. package/src/domains/analysis/analysis.engine.js +445 -0
  16. package/src/domains/changelog/changelog.service.js +1761 -0
  17. package/src/domains/changelog/workspace-changelog.service.js +505 -0
  18. package/src/domains/git/git-repository.analyzer.js +588 -0
  19. package/src/domains/git/git.service.js +302 -0
  20. package/src/infrastructure/cli/cli.controller.js +517 -0
  21. package/src/infrastructure/config/configuration.manager.js +538 -0
  22. package/src/infrastructure/interactive/interactive-workflow.service.js +444 -0
  23. package/src/infrastructure/mcp/mcp-server.service.js +540 -0
  24. package/src/infrastructure/metrics/metrics.collector.js +362 -0
  25. package/src/infrastructure/providers/core/base-provider.js +184 -0
  26. package/src/infrastructure/providers/implementations/anthropic.js +329 -0
  27. package/src/infrastructure/providers/implementations/azure.js +296 -0
  28. package/src/infrastructure/providers/implementations/bedrock.js +393 -0
  29. package/src/infrastructure/providers/implementations/dummy.js +112 -0
  30. package/src/infrastructure/providers/implementations/google.js +320 -0
  31. package/src/infrastructure/providers/implementations/huggingface.js +301 -0
  32. package/src/infrastructure/providers/implementations/lmstudio.js +189 -0
  33. package/src/infrastructure/providers/implementations/mock.js +275 -0
  34. package/src/infrastructure/providers/implementations/ollama.js +151 -0
  35. package/src/infrastructure/providers/implementations/openai.js +273 -0
  36. package/src/infrastructure/providers/implementations/vertex.js +438 -0
  37. package/src/infrastructure/providers/provider-management.service.js +415 -0
  38. package/src/infrastructure/providers/provider-manager.service.js +363 -0
  39. package/src/infrastructure/providers/utils/base-provider-helpers.js +660 -0
  40. package/src/infrastructure/providers/utils/model-config.js +610 -0
  41. package/src/infrastructure/providers/utils/provider-utils.js +286 -0
  42. package/src/shared/constants/colors.js +370 -0
  43. package/src/shared/utils/cli-entry-utils.js +525 -0
  44. package/src/shared/utils/error-classes.js +423 -0
  45. package/src/shared/utils/json-utils.js +318 -0
  46. package/src/shared/utils/utils.js +1997 -0
  47. package/types/index.d.ts +464 -0
@@ -0,0 +1,189 @@
1
+ import { OpenAI } from 'openai';
2
+ import { BaseProvider } from '../core/base-provider.js';
3
+ import { applyMixins } from '../utils/base-provider-helpers.js';
4
+ import { buildClientOptions } from '../utils/provider-utils.js';
5
+
6
+ class LMStudioProvider extends BaseProvider {
7
+ constructor(config) {
8
+ super(config);
9
+ this.client = null;
10
+ if (this.isAvailable()) {
11
+ this.initializeClient();
12
+ }
13
+ }
14
+
15
+ initializeClient() {
16
+ const clientOptions = buildClientOptions(this.getProviderConfig(), {
17
+ baseURL: 'http://localhost:1234/v1',
18
+ apiKey: 'lm-studio', // Not required by LM Studio
19
+ timeout: 120000,
20
+ maxRetries: 2
21
+ });
22
+
23
+ // LM Studio uses OpenAI-compatible API
24
+ this.client = new OpenAI({
25
+ baseURL: clientOptions.baseURL,
26
+ apiKey: clientOptions.apiKey,
27
+ timeout: clientOptions.timeout,
28
+ maxRetries: clientOptions.maxRetries,
29
+ });
30
+ }
31
+
32
+ getName() {
33
+ return 'lmstudio';
34
+ }
35
+
36
+ isAvailable() {
37
+ return !!this.config.LMSTUDIO_API_BASE;
38
+ }
39
+
40
+ async generateCompletion(messages, options) {
41
+ if (!this.isAvailable()) {
42
+ return this.handleProviderError(
43
+ new Error('LM Studio provider is not configured'),
44
+ 'generate_completion'
45
+ );
46
+ }
47
+
48
+ // Prepare parameters for the API call
49
+ const params = {
50
+ model: options.model || this.config.AI_MODEL || this.config.LMSTUDIO_MODEL || this.config.LMSTUDIO_DEFAULT_MODEL || 'local-model',
51
+ messages: messages,
52
+ max_tokens: options.max_tokens || 2048,
53
+ temperature: options.temperature || 0.7,
54
+ top_p: options.top_p || 0.95,
55
+ frequency_penalty: options.frequency_penalty || 0,
56
+ presence_penalty: options.presence_penalty || 0,
57
+ user: options.user || this.config.LMSTUDIO_USER_ID
58
+ };
59
+
60
+ // Add function calling if provided and the model supports it
61
+ if (options.tools && this.getCapabilities(params.model).tool_use) {
62
+ params.tools = options.tools;
63
+ params.tool_choice = options.tool_choice || 'auto';
64
+ }
65
+
66
+ // Add JSON mode if requested and the model supports it
67
+ if (options.response_format?.type === 'json_object' && this.getCapabilities(params.model).json_mode) {
68
+ params.response_format = { type: 'json_object' };
69
+ }
70
+
71
+ // Handle streaming
72
+ if (options.stream) {
73
+ params.stream = true;
74
+ const stream = await this.client.chat.completions.create(params);
75
+ return { stream, model: params.model };
76
+ }
77
+
78
+ // Make the non-streaming API call
79
+ const response = await this.client.chat.completions.create(params);
80
+
81
+ // Extract tool calls if present
82
+ let toolCalls = null;
83
+ if (response.choices[0]?.message?.tool_calls?.length > 0) {
84
+ toolCalls = response.choices[0].message.tool_calls;
85
+ }
86
+
87
+ return {
88
+ content: response.choices[0].message.content,
89
+ model: response.model,
90
+ tokens: response.usage?.total_tokens || 0,
91
+ finish_reason: response.choices[0].finish_reason,
92
+ tool_calls: toolCalls
93
+ };
94
+ }
95
+
96
+
97
+ getCapabilities(modelName) {
98
+ const model = modelName || this.config.AI_MODEL || this.config.LMSTUDIO_MODEL || this.config.LMSTUDIO_DEFAULT_MODEL || 'local-model';
99
+
100
+ // Base capabilities - all models are local
101
+ const capabilities = {
102
+ vision: false,
103
+ tool_use: false,
104
+ json_mode: false,
105
+ reasoning: false,
106
+ local: true
107
+ };
108
+
109
+ // Capabilities depend on the specific model loaded in LM Studio
110
+ // These are general capabilities based on model family naming conventions
111
+
112
+ // Llama models
113
+ if (model.toLowerCase().includes('llama')) {
114
+ capabilities.json_mode = true;
115
+
116
+ // Llama 3 models likely support function calling
117
+ if (model.includes('3')) {
118
+ capabilities.tool_use = true;
119
+ }
120
+ }
121
+
122
+ // Mistral models
123
+ else if (model.toLowerCase().includes('mistral')) {
124
+ capabilities.json_mode = true;
125
+
126
+ // Mixtral models likely have better reasoning
127
+ if (model.toLowerCase().includes('mixtral')) {
128
+ capabilities.reasoning = true;
129
+ }
130
+ }
131
+
132
+ // Vision models
133
+ if (model.toLowerCase().includes('vision') ||
134
+ model.toLowerCase().includes('llava') ||
135
+ model.toLowerCase().includes('bakllava')) {
136
+ capabilities.vision = true;
137
+ }
138
+
139
+ return capabilities;
140
+ }
141
+
142
+ getAvailableModels() {
143
+ return [
144
+ {
145
+ id: 'local-model',
146
+ name: 'Local Model (LM Studio)',
147
+ contextWindow: 4096,
148
+ maxOutput: 2048,
149
+ inputCost: 0, // Local models are free
150
+ outputCost: 0,
151
+ features: ['text', 'local'],
152
+ description: 'Local model running in LM Studio'
153
+ },
154
+ {
155
+ id: 'llama-3-8b-instruct',
156
+ name: 'Llama 3 8B Instruct',
157
+ contextWindow: 8192,
158
+ maxOutput: 4096,
159
+ inputCost: 0,
160
+ outputCost: 0,
161
+ features: ['text', 'tools', 'local'],
162
+ description: 'Local Llama 3 8B instruction-tuned model'
163
+ },
164
+ {
165
+ id: 'mistral-7b-instruct',
166
+ name: 'Mistral 7B Instruct',
167
+ contextWindow: 8192,
168
+ maxOutput: 4096,
169
+ inputCost: 0,
170
+ outputCost: 0,
171
+ features: ['text', 'reasoning', 'local'],
172
+ description: 'Local Mistral 7B instruction-tuned model'
173
+ },
174
+ {
175
+ id: 'llava-1.5-7b',
176
+ name: 'LLaVA 1.5 7B',
177
+ contextWindow: 4096,
178
+ maxOutput: 2048,
179
+ inputCost: 0,
180
+ outputCost: 0,
181
+ features: ['text', 'vision', 'local'],
182
+ description: 'Local vision-language model'
183
+ }
184
+ ];
185
+ }
186
+ }
187
+
188
+ // Apply mixins to add standard provider functionality
189
+ export default applyMixins(LMStudioProvider, 'lmstudio');
@@ -0,0 +1,275 @@
1
+ /**
2
+ * Mock Provider
3
+ * Used for testing without real API credentials
4
+ */
5
+
6
+ import { BaseProvider } from '../core/base-provider.js';
7
+ import { ProviderError } from '../../../shared/utils/utils.js';
8
+
9
+ class MockProvider extends BaseProvider {
10
+ constructor(config = {}) {
11
+ super(config);
12
+ this.name = 'mock';
13
+ this.mockResponses = config.MOCK_RESPONSES || {};
14
+ this.shouldFail = config.MOCK_SHOULD_FAIL === 'true';
15
+ this.failureRate = parseFloat(config.MOCK_FAILURE_RATE || '0.1');
16
+ this.latency = parseInt(config.MOCK_LATENCY || '500', 10);
17
+ this.models = [
18
+ 'mock-basic',
19
+ 'mock-standard',
20
+ 'mock-advanced'
21
+ ];
22
+ }
23
+
24
+ /**
25
+ * Get provider name
26
+ * @returns {string} Provider name
27
+ */
28
+ getName() {
29
+ return this.name;
30
+ }
31
+
32
+ /**
33
+ * Check if provider is available
34
+ * @returns {boolean} Always true for mock provider
35
+ */
36
+ isAvailable() {
37
+ return true;
38
+ }
39
+
40
+ /**
41
+ * Simulate network latency
42
+ * @param {number} ms - Milliseconds to wait
43
+ * @returns {Promise} - Promise that resolves after delay
44
+ */
45
+ async _simulateLatency() {
46
+ return new Promise(resolve => setTimeout(resolve, this.latency));
47
+ }
48
+
49
+ /**
50
+ * Determine if operation should fail based on failure rate
51
+ * @returns {boolean} - Whether operation should fail
52
+ */
53
+ _shouldFailOperation() {
54
+ if (this.shouldFail) {
55
+ return true;
56
+ }
57
+ return Math.random() < this.failureRate;
58
+ }
59
+
60
+ /**
61
+ * Generate completion with mock data
62
+ * @param {Array} messages - Messages for completion
63
+ * @param {Object} options - Generation options
64
+ * @returns {Promise} - Promise that resolves with mock completion
65
+ */
66
+ async generateCompletion(messages, options = {}) {
67
+ await this._simulateLatency();
68
+
69
+ if (this._shouldFailOperation()) {
70
+ throw new ProviderError(
71
+ 'Mock provider simulated failure',
72
+ 'mock_error',
73
+ this.getName(),
74
+ options.model || 'mock-standard'
75
+ );
76
+ }
77
+
78
+ const model = options.model || 'mock-standard';
79
+ const lastMessage = messages[messages.length - 1];
80
+ const prompt = typeof lastMessage === 'string'
81
+ ? lastMessage
82
+ : (lastMessage.content || '');
83
+
84
+ // Check for predefined responses
85
+ if (this.mockResponses[prompt]) {
86
+ return {
87
+ content: this.mockResponses[prompt],
88
+ tokens: this.mockResponses[prompt].length / 4, // Rough estimate
89
+ model: model
90
+ };
91
+ }
92
+
93
+ // Generate mock response based on commit convention if it looks like a commit message
94
+ if (prompt.match(/^(feat|fix|docs|style|refactor|perf|test|build|ci|chore)(\(.+\))?:/)) {
95
+ return {
96
+ content: this._generateMockChangelog(prompt),
97
+ tokens: 150,
98
+ model: model
99
+ };
100
+ }
101
+
102
+ // Default mock response
103
+ return {
104
+ content: `This is a mock response from the ${model} model.`,
105
+ tokens: 10,
106
+ model: model
107
+ };
108
+ }
109
+
110
+ /**
111
+ * Generate mock changelog based on commit message
112
+ * @param {string} commitMessage - Commit message
113
+ * @returns {string} - Mock changelog entry
114
+ */
115
+ _generateMockChangelog(commitMessage) {
116
+ const typeMatch = commitMessage.match(/^(feat|fix|docs|style|refactor|perf|test|build|ci|chore)(\(.+\))?:/);
117
+
118
+ if (!typeMatch) {
119
+ return 'Mock changelog entry for conventional commit';
120
+ }
121
+
122
+ const type = typeMatch[1];
123
+ const scope = typeMatch[2] ? typeMatch[2].replace(/[()]/g, '') : '';
124
+ const description = commitMessage.split(':')[1]?.trim() || '';
125
+
126
+ const typeMap = {
127
+ feat: 'Feature',
128
+ fix: 'Bug Fix',
129
+ docs: 'Documentation',
130
+ style: 'Style',
131
+ refactor: 'Code Refactoring',
132
+ perf: 'Performance',
133
+ test: 'Tests',
134
+ build: 'Build',
135
+ ci: 'CI',
136
+ chore: 'Chore'
137
+ };
138
+
139
+ const title = `${typeMap[type]}${scope ? ` (${scope})` : ''}`;
140
+ return `### ${title}\n\n- ${description}\n`;
141
+ }
142
+
143
+ /**
144
+ * Get model recommendation based on mock rules
145
+ * @param {Object} commitInfo - Commit information
146
+ * @returns {Object} Model recommendation
147
+ */
148
+ getModelRecommendation(commitInfo = {}) {
149
+ if (!commitInfo.message) {
150
+ return {
151
+ model: 'mock-standard',
152
+ reason: 'Default model selected due to insufficient commit information'
153
+ };
154
+ }
155
+
156
+ // Simple logic based on commit complexity
157
+ const filesChanged = commitInfo.files?.length || 0;
158
+ const linesChanged = (commitInfo.additions || 0) + (commitInfo.deletions || 0);
159
+
160
+ if (filesChanged > 10 || linesChanged > 500 || commitInfo.breaking) {
161
+ return {
162
+ model: 'mock-advanced',
163
+ reason: 'Complex commit detected, using advanced model'
164
+ };
165
+ } else if (filesChanged > 3 || linesChanged > 100) {
166
+ return {
167
+ model: 'mock-standard',
168
+ reason: 'Moderate commit detected, using standard model'
169
+ };
170
+ } else {
171
+ return {
172
+ model: 'mock-basic',
173
+ reason: 'Simple commit detected, using basic model'
174
+ };
175
+ }
176
+ }
177
+
178
+ /**
179
+ * Validate model availability
180
+ * @param {string} model - Model name
181
+ * @returns {Promise} - Promise that resolves with validation result
182
+ */
183
+ async validateModelAvailability(model) {
184
+ await this._simulateLatency();
185
+
186
+ if (this._shouldFailOperation()) {
187
+ return {
188
+ available: false,
189
+ error: 'Mock validation failure',
190
+ alternatives: this.models
191
+ };
192
+ }
193
+
194
+ const isAvailable = this.models.includes(model);
195
+
196
+ return {
197
+ available: isAvailable,
198
+ model: isAvailable ? model : null,
199
+ alternatives: isAvailable ? [] : this.models
200
+ };
201
+ }
202
+
203
+ /**
204
+ * Test connection to mock provider
205
+ * @returns {Promise} - Promise that resolves with connection test result
206
+ */
207
+ async testConnection() {
208
+ await this._simulateLatency();
209
+
210
+ if (this._shouldFailOperation()) {
211
+ return {
212
+ success: false,
213
+ error: 'Mock connection failure',
214
+ provider: this.getName()
215
+ };
216
+ }
217
+
218
+ return {
219
+ success: true,
220
+ provider: this.getName(),
221
+ model: 'mock-standard',
222
+ response: 'Mock connection successful'
223
+ };
224
+ }
225
+
226
+ /**
227
+ * Get provider capabilities
228
+ * @returns {Object} - Capabilities object
229
+ */
230
+ getCapabilities() {
231
+ return {
232
+ streaming: false,
233
+ tool_use: true,
234
+ vision: false,
235
+ json_mode: true
236
+ };
237
+ }
238
+
239
+ getAvailableModels() {
240
+ return [
241
+ {
242
+ id: 'mock-basic',
243
+ name: 'Mock Basic Model',
244
+ contextWindow: 2048,
245
+ maxOutput: 1000,
246
+ inputCost: 0,
247
+ outputCost: 0,
248
+ features: ['text', 'testing'],
249
+ description: 'Basic mock model for simple testing'
250
+ },
251
+ {
252
+ id: 'mock-standard',
253
+ name: 'Mock Standard Model',
254
+ contextWindow: 4096,
255
+ maxOutput: 2000,
256
+ inputCost: 0,
257
+ outputCost: 0,
258
+ features: ['text', 'tools', 'testing'],
259
+ description: 'Standard mock model for moderate testing'
260
+ },
261
+ {
262
+ id: 'mock-advanced',
263
+ name: 'Mock Advanced Model',
264
+ contextWindow: 8192,
265
+ maxOutput: 4000,
266
+ inputCost: 0,
267
+ outputCost: 0,
268
+ features: ['text', 'tools', 'json', 'testing'],
269
+ description: 'Advanced mock model for complex testing scenarios'
270
+ }
271
+ ];
272
+ }
273
+ }
274
+
275
+ export default MockProvider;
@@ -0,0 +1,151 @@
1
+ import { Ollama } from 'ollama';
2
+ import { BaseProvider } from '../core/base-provider.js';
3
+ import { ProviderError } from '../../../shared/utils/utils.js';
4
+ import { applyMixins } from '../utils/base-provider-helpers.js';
5
+ import { buildClientOptions } from '../utils/provider-utils.js';
6
+
7
+ class OllamaProvider extends BaseProvider {
8
+ constructor(config) {
9
+ super(config);
10
+ this.client = null;
11
+ if (this.isAvailable()) {
12
+ this.initializeClient();
13
+ }
14
+ }
15
+
16
+ initializeClient() {
17
+ const clientOptions = buildClientOptions(this.getProviderConfig(), {
18
+ host: 'http://localhost:11434'
19
+ });
20
+
21
+ this.client = new Ollama({
22
+ host: clientOptions.host
23
+ });
24
+ }
25
+
26
+ getName() {
27
+ return 'ollama';
28
+ }
29
+
30
+ isAvailable() {
31
+ return !!this.config.OLLAMA_HOST;
32
+ }
33
+
34
+ async generateCompletion(messages, options = {}) {
35
+ if (!this.isAvailable()) {
36
+ return this.handleProviderError(
37
+ new Error('Ollama provider is not configured'),
38
+ 'generate_completion'
39
+ );
40
+ }
41
+
42
+ try {
43
+ // Test connection first time if not already done
44
+ if (!this._connectionTested) {
45
+ try {
46
+ await this.client.list();
47
+ this._connectionTested = true;
48
+ } catch (connectionError) {
49
+ return this.handleProviderError(
50
+ new Error(`Ollama server unreachable: ${connectionError.message}. Please run 'ollama serve' first.`),
51
+ 'generate_completion'
52
+ );
53
+ }
54
+ }
55
+
56
+ const modelConfig = this.getProviderModelConfig();
57
+ const modelName = options.model || modelConfig.standardModel;
58
+
59
+ const params = {
60
+ model: modelName,
61
+ messages: messages,
62
+ stream: !!options.stream,
63
+ options: {
64
+ temperature: options.temperature || 0.7,
65
+ top_p: options.top_p || 0.9,
66
+ num_predict: options.max_tokens || 1024,
67
+ stop: options.stop || [],
68
+ },
69
+ };
70
+
71
+ if (options.tools && this.getCapabilities(modelName).tool_use) {
72
+ params.tools = options.tools;
73
+ }
74
+
75
+ if (options.response_format?.type === 'json_object' && this.getCapabilities(modelName).json_mode) {
76
+ params.format = 'json';
77
+ }
78
+
79
+ if (params.stream) {
80
+ const stream = await this.client.chat(params);
81
+ return { stream, model: modelName };
82
+ }
83
+
84
+ const response = await this.client.chat(params);
85
+ return {
86
+ content: response.message.content,
87
+ model: response.model,
88
+ tokens: response.eval_count,
89
+ finish_reason: response.done ? 'stop' : 'incomplete',
90
+ tool_calls: response.message.tool_calls,
91
+ };
92
+ } catch (error) {
93
+ return this.handleProviderError(error, 'generate_completion', { model: options.model });
94
+ }
95
+ }
96
+
97
+ async generateEmbedding(text, options = {}) {
98
+ if (!this.isAvailable()) {
99
+ throw new ProviderError('Ollama provider is not configured', 'ollama', 'isAvailable');
100
+ }
101
+
102
+ const modelName = options.model || this.config.OLLAMA_EMBEDDING_MODEL || this.config.AI_MODEL_EMBEDDING || 'nomic-embed-text';
103
+
104
+ const response = await this.client.embeddings({
105
+ model: modelName,
106
+ prompt: text,
107
+ options: {
108
+ temperature: options.temperature || 0.0
109
+ }
110
+ });
111
+
112
+ return {
113
+ embedding: response.embedding,
114
+ model: modelName,
115
+ tokens: response.token_count || 0
116
+ };
117
+ }
118
+
119
+ // Ollama-specific helper methods
120
+ async getAvailableModels() {
121
+ if (!this.isAvailable()) return [];
122
+ try {
123
+ const response = await this.client.list();
124
+ return response.models.map(m => m.name);
125
+ } catch (error) {
126
+ // Only log connection errors in development mode or when explicitly used
127
+ if (!this._connectionErrorLogged && (process.env.NODE_ENV === 'development' || process.env.DEBUG)) {
128
+ console.warn(`⚠️ Ollama connection failed: ${error.message}`);
129
+ console.warn(`💡 Make sure Ollama is running: ollama serve`);
130
+ this._connectionErrorLogged = true;
131
+ }
132
+ return [];
133
+ }
134
+ }
135
+
136
+ async pullModel(modelName) {
137
+ if (!this.isAvailable()) {
138
+ throw new ProviderError('Ollama provider is not configured', 'ollama', 'isAvailable');
139
+ }
140
+
141
+ try {
142
+ const pullStream = await this.client.pull({ model: modelName, stream: true });
143
+ return { stream: pullStream, model: modelName };
144
+ } catch (error) {
145
+ throw new ProviderError(`Failed to pull model ${modelName}: ${error.message}`, 'ollama', 'pullModel', error, { modelName });
146
+ }
147
+ }
148
+ }
149
+
150
+ // Apply mixins to add standard provider functionality
151
+ export default applyMixins(OllamaProvider, 'ollama');