@entro314labs/ai-changelog-generator 3.1.1 → 3.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +412 -875
- package/README.md +8 -3
- package/ai-changelog-mcp.sh +0 -0
- package/ai-changelog.sh +0 -0
- package/bin/ai-changelog-dxt.js +9 -9
- package/bin/ai-changelog-mcp.js +19 -17
- package/bin/ai-changelog.js +6 -6
- package/package.json +80 -48
- package/src/ai-changelog-generator.js +91 -81
- package/src/application/orchestrators/changelog.orchestrator.js +791 -516
- package/src/application/services/application.service.js +137 -128
- package/src/cli.js +76 -57
- package/src/domains/ai/ai-analysis.service.js +289 -209
- package/src/domains/analysis/analysis.engine.js +328 -192
- package/src/domains/changelog/changelog.service.js +1174 -783
- package/src/domains/changelog/workspace-changelog.service.js +487 -249
- package/src/domains/git/git-repository.analyzer.js +348 -258
- package/src/domains/git/git.service.js +132 -112
- package/src/infrastructure/cli/cli.controller.js +390 -274
- package/src/infrastructure/config/configuration.manager.js +220 -190
- package/src/infrastructure/interactive/interactive-staging.service.js +154 -135
- package/src/infrastructure/interactive/interactive-workflow.service.js +200 -159
- package/src/infrastructure/mcp/mcp-server.service.js +208 -207
- package/src/infrastructure/metrics/metrics.collector.js +140 -123
- package/src/infrastructure/providers/core/base-provider.js +87 -40
- package/src/infrastructure/providers/implementations/anthropic.js +101 -99
- package/src/infrastructure/providers/implementations/azure.js +124 -101
- package/src/infrastructure/providers/implementations/bedrock.js +136 -126
- package/src/infrastructure/providers/implementations/dummy.js +23 -23
- package/src/infrastructure/providers/implementations/google.js +123 -114
- package/src/infrastructure/providers/implementations/huggingface.js +94 -87
- package/src/infrastructure/providers/implementations/lmstudio.js +75 -60
- package/src/infrastructure/providers/implementations/mock.js +69 -73
- package/src/infrastructure/providers/implementations/ollama.js +89 -66
- package/src/infrastructure/providers/implementations/openai.js +88 -89
- package/src/infrastructure/providers/implementations/vertex.js +227 -197
- package/src/infrastructure/providers/provider-management.service.js +245 -207
- package/src/infrastructure/providers/provider-manager.service.js +145 -125
- package/src/infrastructure/providers/utils/base-provider-helpers.js +308 -302
- package/src/infrastructure/providers/utils/model-config.js +220 -195
- package/src/infrastructure/providers/utils/provider-utils.js +105 -100
- package/src/infrastructure/validation/commit-message-validation.service.js +259 -161
- package/src/shared/constants/colors.js +453 -180
- package/src/shared/utils/cli-demo.js +285 -0
- package/src/shared/utils/cli-entry-utils.js +257 -249
- package/src/shared/utils/cli-ui.js +447 -0
- package/src/shared/utils/diff-processor.js +513 -0
- package/src/shared/utils/error-classes.js +125 -156
- package/src/shared/utils/json-utils.js +93 -89
- package/src/shared/utils/utils.js +1117 -945
- package/types/index.d.ts +353 -344
|
@@ -1,48 +1,48 @@
|
|
|
1
|
-
import { OpenAI } from 'openai'
|
|
2
|
-
|
|
3
|
-
import {
|
|
4
|
-
import { applyMixins, ProviderResponseHandler } from '../utils/base-provider-helpers.js'
|
|
5
|
-
import { buildClientOptions } from '../utils/provider-utils.js'
|
|
1
|
+
import { OpenAI } from 'openai'
|
|
2
|
+
|
|
3
|
+
import { BaseProvider } from '../core/base-provider.js'
|
|
4
|
+
import { applyMixins, ProviderResponseHandler } from '../utils/base-provider-helpers.js'
|
|
5
|
+
import { buildClientOptions } from '../utils/provider-utils.js'
|
|
6
6
|
|
|
7
7
|
export class OpenAIProvider extends BaseProvider {
|
|
8
8
|
constructor(config) {
|
|
9
|
-
super(config)
|
|
10
|
-
this.openai = null
|
|
9
|
+
super(config)
|
|
10
|
+
this.openai = null
|
|
11
11
|
if (this.isAvailable()) {
|
|
12
|
-
this.initializeClient()
|
|
12
|
+
this.initializeClient()
|
|
13
13
|
}
|
|
14
14
|
}
|
|
15
15
|
|
|
16
16
|
initializeClient() {
|
|
17
17
|
const clientOptions = buildClientOptions(this.getProviderConfig(), {
|
|
18
18
|
timeout: 60000,
|
|
19
|
-
maxRetries: 2
|
|
20
|
-
})
|
|
19
|
+
maxRetries: 2,
|
|
20
|
+
})
|
|
21
21
|
|
|
22
22
|
this.openai = new OpenAI({
|
|
23
23
|
apiKey: clientOptions.apiKey,
|
|
24
24
|
organization: clientOptions.organization,
|
|
25
25
|
project: clientOptions.project,
|
|
26
26
|
timeout: clientOptions.timeout,
|
|
27
|
-
maxRetries: clientOptions.maxRetries
|
|
28
|
-
})
|
|
27
|
+
maxRetries: clientOptions.maxRetries,
|
|
28
|
+
})
|
|
29
29
|
}
|
|
30
30
|
|
|
31
31
|
getName() {
|
|
32
|
-
return 'openai'
|
|
32
|
+
return 'openai'
|
|
33
33
|
}
|
|
34
34
|
|
|
35
35
|
isAvailable() {
|
|
36
|
-
const apiKey = this.config.OPENAI_API_KEY
|
|
37
|
-
return apiKey && apiKey.trim() !== '' && apiKey.startsWith('sk-')
|
|
36
|
+
const apiKey = this.config.OPENAI_API_KEY
|
|
37
|
+
return apiKey && apiKey.trim() !== '' && apiKey.startsWith('sk-')
|
|
38
38
|
}
|
|
39
39
|
|
|
40
40
|
getRequiredEnvVars() {
|
|
41
|
-
return ['OPENAI_API_KEY']
|
|
41
|
+
return ['OPENAI_API_KEY']
|
|
42
42
|
}
|
|
43
43
|
|
|
44
44
|
getDefaultModel() {
|
|
45
|
-
return 'gpt-4o'
|
|
45
|
+
return 'gpt-4o'
|
|
46
46
|
}
|
|
47
47
|
|
|
48
48
|
async generateCompletion(messages, options = {}) {
|
|
@@ -50,28 +50,28 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
50
50
|
this,
|
|
51
51
|
'generate_completion',
|
|
52
52
|
async () => {
|
|
53
|
-
const modelConfig = this.getProviderModelConfig()
|
|
53
|
+
const modelConfig = this.getProviderModelConfig()
|
|
54
54
|
const params = {
|
|
55
55
|
model: options.model || modelConfig.standardModel || this.getDefaultModel(),
|
|
56
56
|
messages,
|
|
57
57
|
max_tokens: options.max_tokens || 1000,
|
|
58
58
|
temperature: options.temperature || 0.3,
|
|
59
|
-
}
|
|
59
|
+
}
|
|
60
60
|
|
|
61
61
|
// Add tool calling if provided
|
|
62
62
|
if (options.tools) {
|
|
63
|
-
params.tools = options.tools
|
|
64
|
-
params.tool_choice = options.tool_choice || 'auto'
|
|
63
|
+
params.tools = options.tools
|
|
64
|
+
params.tool_choice = options.tool_choice || 'auto'
|
|
65
65
|
}
|
|
66
66
|
|
|
67
67
|
// Add streaming if requested
|
|
68
68
|
if (options.stream) {
|
|
69
|
-
params.stream = true
|
|
70
|
-
const stream = await this.openai.chat.completions.create(params)
|
|
71
|
-
return { stream, model: params.model }
|
|
69
|
+
params.stream = true
|
|
70
|
+
const stream = await this.openai.chat.completions.create(params)
|
|
71
|
+
return { stream, model: params.model }
|
|
72
72
|
}
|
|
73
73
|
|
|
74
|
-
const completion = await this.openai.chat.completions.create(params)
|
|
74
|
+
const completion = await this.openai.chat.completions.create(params)
|
|
75
75
|
|
|
76
76
|
return {
|
|
77
77
|
text: completion.choices[0].message.content,
|
|
@@ -80,32 +80,32 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
80
80
|
usage: completion.usage,
|
|
81
81
|
tokens: completion.usage.total_tokens,
|
|
82
82
|
finish_reason: completion.choices[0].finish_reason,
|
|
83
|
-
tool_calls: completion.choices[0].message.tool_calls
|
|
84
|
-
}
|
|
83
|
+
tool_calls: completion.choices[0].message.tool_calls,
|
|
84
|
+
}
|
|
85
85
|
},
|
|
86
86
|
{ model: options.model }
|
|
87
|
-
)
|
|
87
|
+
)
|
|
88
88
|
}
|
|
89
89
|
|
|
90
90
|
async generateText(messages, model = null) {
|
|
91
|
-
const response = await this.generateCompletion(messages, { model })
|
|
92
|
-
return response
|
|
91
|
+
const response = await this.generateCompletion(messages, { model })
|
|
92
|
+
return response
|
|
93
93
|
}
|
|
94
94
|
|
|
95
95
|
async getAvailableModels() {
|
|
96
96
|
try {
|
|
97
|
-
const models = await this.openai.models.list()
|
|
97
|
+
const models = await this.openai.models.list()
|
|
98
98
|
return models.data
|
|
99
|
-
.filter(m => m.id.includes('gpt') || m.id.includes('o1'))
|
|
100
|
-
.map(m => ({
|
|
99
|
+
.filter((m) => m.id.includes('gpt') || m.id.includes('o1'))
|
|
100
|
+
.map((m) => ({
|
|
101
101
|
name: m.id,
|
|
102
102
|
id: m.id,
|
|
103
103
|
description: `OpenAI ${m.id}`,
|
|
104
104
|
contextWindow: this.getModelContextWindow(m.id),
|
|
105
|
-
capabilities: this.getModelCapabilities(m.id)
|
|
106
|
-
}))
|
|
107
|
-
} catch (
|
|
108
|
-
return []
|
|
105
|
+
capabilities: this.getModelCapabilities(m.id),
|
|
106
|
+
}))
|
|
107
|
+
} catch (_error) {
|
|
108
|
+
return []
|
|
109
109
|
}
|
|
110
110
|
}
|
|
111
111
|
|
|
@@ -114,7 +114,7 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
114
114
|
// Latest 2025 models
|
|
115
115
|
'gpt-4o': 128000,
|
|
116
116
|
'gpt-4o-mini': 128000,
|
|
117
|
-
|
|
117
|
+
o1: 200000,
|
|
118
118
|
'o1-mini': 128000,
|
|
119
119
|
'gpt-4.1-nano': 200000,
|
|
120
120
|
'gpt-4.1-mini': 200000,
|
|
@@ -126,9 +126,9 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
126
126
|
'gpt-4-turbo': 128000,
|
|
127
127
|
'gpt-4-turbo-preview': 128000,
|
|
128
128
|
'gpt-3.5-turbo': 4096,
|
|
129
|
-
'gpt-3.5-turbo-16k': 16384
|
|
130
|
-
}
|
|
131
|
-
return contextWindows[modelName] || 128000
|
|
129
|
+
'gpt-3.5-turbo-16k': 16384,
|
|
130
|
+
}
|
|
131
|
+
return contextWindows[modelName] || 128000
|
|
132
132
|
}
|
|
133
133
|
|
|
134
134
|
getModelCapabilities(modelName) {
|
|
@@ -137,56 +137,56 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
137
137
|
function_calling: !modelName.includes('o1'), // o1 models don't support function calling
|
|
138
138
|
json_mode: true,
|
|
139
139
|
multimodal: modelName.includes('gpt-4o') || modelName.includes('gpt-4.1'),
|
|
140
|
-
largeContext:
|
|
141
|
-
|
|
142
|
-
|
|
140
|
+
largeContext:
|
|
141
|
+
modelName.includes('4.1') || modelName.includes('o1') || modelName.includes('4o'),
|
|
142
|
+
promptCaching: modelName.includes('4.1'),
|
|
143
|
+
}
|
|
143
144
|
}
|
|
144
145
|
|
|
145
146
|
async validateModelAvailability(modelName) {
|
|
146
147
|
try {
|
|
147
|
-
const models = await this.getAvailableModels()
|
|
148
|
-
const model = models.find(m => m.name === modelName)
|
|
148
|
+
const models = await this.getAvailableModels()
|
|
149
|
+
const model = models.find((m) => m.name === modelName)
|
|
149
150
|
|
|
150
151
|
if (model) {
|
|
151
152
|
return {
|
|
152
153
|
available: true,
|
|
153
154
|
model: modelName,
|
|
154
155
|
capabilities: model.capabilities,
|
|
155
|
-
contextWindow: model.contextWindow
|
|
156
|
-
}
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
};
|
|
156
|
+
contextWindow: model.contextWindow,
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
const availableModels = models.map((m) => m.name)
|
|
160
|
+
return {
|
|
161
|
+
available: false,
|
|
162
|
+
error: `Model '${modelName}' not available`,
|
|
163
|
+
alternatives: availableModels.slice(0, 5),
|
|
164
164
|
}
|
|
165
165
|
} catch (error) {
|
|
166
166
|
return {
|
|
167
167
|
available: false,
|
|
168
168
|
error: error.message,
|
|
169
|
-
alternatives: ['gpt-4o', 'gpt-4o-mini', 'o1-mini', 'o1', 'gpt-4']
|
|
170
|
-
}
|
|
169
|
+
alternatives: ['gpt-4o', 'gpt-4o-mini', 'o1-mini', 'o1', 'gpt-4'],
|
|
170
|
+
}
|
|
171
171
|
}
|
|
172
172
|
}
|
|
173
173
|
|
|
174
174
|
async testConnection() {
|
|
175
175
|
try {
|
|
176
|
-
const response = await this.generateCompletion([
|
|
177
|
-
|
|
178
|
-
|
|
176
|
+
const response = await this.generateCompletion([{ role: 'user', content: 'Hello' }], {
|
|
177
|
+
max_tokens: 5,
|
|
178
|
+
})
|
|
179
179
|
|
|
180
180
|
return {
|
|
181
181
|
success: true,
|
|
182
182
|
model: response.model,
|
|
183
|
-
message: 'Connection successful'
|
|
184
|
-
}
|
|
183
|
+
message: 'Connection successful',
|
|
184
|
+
}
|
|
185
185
|
} catch (error) {
|
|
186
186
|
return {
|
|
187
187
|
success: false,
|
|
188
|
-
error: error.message
|
|
189
|
-
}
|
|
188
|
+
error: error.message,
|
|
189
|
+
}
|
|
190
190
|
}
|
|
191
191
|
}
|
|
192
192
|
|
|
@@ -197,61 +197,60 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
197
197
|
function_calling: true,
|
|
198
198
|
json_mode: true,
|
|
199
199
|
reasoning: modelName ? modelName.includes('gpt-4') : true,
|
|
200
|
-
multimodal: false
|
|
201
|
-
}
|
|
200
|
+
multimodal: false,
|
|
201
|
+
}
|
|
202
202
|
}
|
|
203
203
|
|
|
204
204
|
getModelRecommendation(commitDetails) {
|
|
205
|
-
const { files = 0, lines = 0, breaking = false, complex = false } = commitDetails
|
|
205
|
+
const { files = 0, lines = 0, breaking = false, complex = false } = commitDetails
|
|
206
206
|
|
|
207
207
|
// Use o1 for highly complex reasoning tasks
|
|
208
208
|
if (breaking || complex || files > 50 || lines > 5000) {
|
|
209
209
|
return {
|
|
210
210
|
model: 'o1-mini',
|
|
211
|
-
reason: 'Highly complex change requiring advanced reasoning'
|
|
212
|
-
}
|
|
211
|
+
reason: 'Highly complex change requiring advanced reasoning',
|
|
212
|
+
}
|
|
213
213
|
}
|
|
214
214
|
|
|
215
215
|
// Use GPT-4o for complex changes
|
|
216
216
|
if (files > 20 || lines > 1000) {
|
|
217
217
|
return {
|
|
218
218
|
model: 'gpt-4o',
|
|
219
|
-
reason: 'Complex change requiring advanced analysis'
|
|
220
|
-
}
|
|
219
|
+
reason: 'Complex change requiring advanced analysis',
|
|
220
|
+
}
|
|
221
221
|
}
|
|
222
222
|
|
|
223
223
|
// Use GPT-4o mini for medium changes
|
|
224
224
|
if (files > 5 || lines > 200) {
|
|
225
225
|
return {
|
|
226
226
|
model: 'gpt-4o-mini',
|
|
227
|
-
reason: 'Medium-sized change requiring good analysis'
|
|
228
|
-
}
|
|
227
|
+
reason: 'Medium-sized change requiring good analysis',
|
|
228
|
+
}
|
|
229
229
|
}
|
|
230
230
|
|
|
231
231
|
// Use GPT-4o mini for small changes (more capable than 3.5-turbo)
|
|
232
232
|
return {
|
|
233
233
|
model: 'gpt-4o-mini',
|
|
234
|
-
reason: 'Small change, using efficient modern model'
|
|
235
|
-
}
|
|
234
|
+
reason: 'Small change, using efficient modern model',
|
|
235
|
+
}
|
|
236
236
|
}
|
|
237
237
|
|
|
238
238
|
async selectOptimalModel(commitInfo) {
|
|
239
|
-
const recommendation = this.getModelRecommendation(commitInfo)
|
|
240
|
-
const validation = await this.validateModelAvailability(recommendation.model)
|
|
239
|
+
const recommendation = this.getModelRecommendation(commitInfo)
|
|
240
|
+
const validation = await this.validateModelAvailability(recommendation.model)
|
|
241
241
|
|
|
242
242
|
if (validation.available) {
|
|
243
243
|
return {
|
|
244
244
|
model: recommendation.model,
|
|
245
245
|
reason: recommendation.reason,
|
|
246
|
-
capabilities: validation.capabilities
|
|
247
|
-
}
|
|
248
|
-
}
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
};
|
|
246
|
+
capabilities: validation.capabilities,
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
// Fallback to default model
|
|
250
|
+
return {
|
|
251
|
+
model: this.getDefaultModel(),
|
|
252
|
+
reason: 'Fallback to default model',
|
|
253
|
+
capabilities: this.getCapabilities(this.getDefaultModel()),
|
|
255
254
|
}
|
|
256
255
|
}
|
|
257
256
|
|
|
@@ -264,10 +263,10 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
264
263
|
reasoningModel: 'o1',
|
|
265
264
|
default: 'gpt-4o',
|
|
266
265
|
temperature: 0.3,
|
|
267
|
-
maxTokens: 1000
|
|
268
|
-
}
|
|
266
|
+
maxTokens: 1000,
|
|
267
|
+
}
|
|
269
268
|
}
|
|
270
269
|
}
|
|
271
270
|
|
|
272
271
|
// Apply mixins to add standard provider functionality
|
|
273
|
-
export default applyMixins ? applyMixins(OpenAIProvider, 'openai') : OpenAIProvider
|
|
272
|
+
export default applyMixins ? applyMixins(OpenAIProvider, 'openai') : OpenAIProvider
|