@entro314labs/ai-changelog-generator 3.1.1 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +383 -877
  2. package/README.md +8 -3
  3. package/ai-changelog-mcp.sh +0 -0
  4. package/ai-changelog.sh +0 -0
  5. package/bin/ai-changelog-dxt.js +9 -9
  6. package/bin/ai-changelog-mcp.js +19 -17
  7. package/bin/ai-changelog.js +6 -6
  8. package/package.json +80 -48
  9. package/src/ai-changelog-generator.js +83 -81
  10. package/src/application/orchestrators/changelog.orchestrator.js +791 -516
  11. package/src/application/services/application.service.js +137 -128
  12. package/src/cli.js +76 -57
  13. package/src/domains/ai/ai-analysis.service.js +289 -209
  14. package/src/domains/analysis/analysis.engine.js +253 -193
  15. package/src/domains/changelog/changelog.service.js +1062 -784
  16. package/src/domains/changelog/workspace-changelog.service.js +420 -249
  17. package/src/domains/git/git-repository.analyzer.js +348 -258
  18. package/src/domains/git/git.service.js +132 -112
  19. package/src/infrastructure/cli/cli.controller.js +390 -274
  20. package/src/infrastructure/config/configuration.manager.js +220 -190
  21. package/src/infrastructure/interactive/interactive-staging.service.js +154 -135
  22. package/src/infrastructure/interactive/interactive-workflow.service.js +200 -159
  23. package/src/infrastructure/mcp/mcp-server.service.js +208 -207
  24. package/src/infrastructure/metrics/metrics.collector.js +140 -123
  25. package/src/infrastructure/providers/core/base-provider.js +87 -40
  26. package/src/infrastructure/providers/implementations/anthropic.js +101 -99
  27. package/src/infrastructure/providers/implementations/azure.js +124 -101
  28. package/src/infrastructure/providers/implementations/bedrock.js +136 -126
  29. package/src/infrastructure/providers/implementations/dummy.js +23 -23
  30. package/src/infrastructure/providers/implementations/google.js +123 -114
  31. package/src/infrastructure/providers/implementations/huggingface.js +94 -87
  32. package/src/infrastructure/providers/implementations/lmstudio.js +75 -60
  33. package/src/infrastructure/providers/implementations/mock.js +69 -73
  34. package/src/infrastructure/providers/implementations/ollama.js +89 -66
  35. package/src/infrastructure/providers/implementations/openai.js +88 -89
  36. package/src/infrastructure/providers/implementations/vertex.js +227 -197
  37. package/src/infrastructure/providers/provider-management.service.js +245 -207
  38. package/src/infrastructure/providers/provider-manager.service.js +145 -125
  39. package/src/infrastructure/providers/utils/base-provider-helpers.js +308 -302
  40. package/src/infrastructure/providers/utils/model-config.js +220 -195
  41. package/src/infrastructure/providers/utils/provider-utils.js +105 -100
  42. package/src/infrastructure/validation/commit-message-validation.service.js +259 -161
  43. package/src/shared/constants/colors.js +453 -180
  44. package/src/shared/utils/cli-demo.js +285 -0
  45. package/src/shared/utils/cli-entry-utils.js +257 -249
  46. package/src/shared/utils/cli-ui.js +447 -0
  47. package/src/shared/utils/diff-processor.js +513 -0
  48. package/src/shared/utils/error-classes.js +125 -156
  49. package/src/shared/utils/json-utils.js +93 -89
  50. package/src/shared/utils/utils.js +1117 -945
  51. package/types/index.d.ts +353 -344
@@ -4,19 +4,19 @@
4
4
  */
5
5
 
6
6
  // Cache for warnings to prevent spam (with size limit to prevent memory leaks)
7
- const warningCache = new Set();
8
- const MAX_WARNING_CACHE_SIZE = 100;
7
+ const warningCache = new Set()
8
+ const MAX_WARNING_CACHE_SIZE = 100
9
9
 
10
10
  function addWarningToCache(key) {
11
11
  if (warningCache.size >= MAX_WARNING_CACHE_SIZE) {
12
12
  // Clear oldest entries by recreating the set
13
- const entries = Array.from(warningCache);
14
- warningCache.clear();
13
+ const entries = Array.from(warningCache)
14
+ warningCache.clear()
15
15
  // Keep last 80% of entries
16
- const keepCount = Math.floor(MAX_WARNING_CACHE_SIZE * 0.8);
17
- entries.slice(-keepCount).forEach(entry => warningCache.add(entry));
16
+ const keepCount = Math.floor(MAX_WARNING_CACHE_SIZE * 0.8)
17
+ entries.slice(-keepCount).forEach((entry) => warningCache.add(entry))
18
18
  }
19
- warningCache.add(key);
19
+ warningCache.add(key)
20
20
  }
21
21
 
22
22
  /**
@@ -27,27 +27,31 @@ export const MODEL_CONFIGS = {
27
27
  openai: {
28
28
  complexModel: 'gpt-4o',
29
29
  standardModel: 'gpt-4.1',
30
- mediumModel: 'gpt-4.1-mini',
30
+ mediumModel: 'gpt-4.1-mini',
31
31
  smallModel: 'gpt-4.1-nano',
32
- fallbacks: ['gpt-4.1', 'gpt-4o', 'o1']
32
+ fallbacks: ['gpt-4.1', 'gpt-4o', 'o1'],
33
33
  },
34
-
34
+
35
35
  anthropic: {
36
36
  complexModel: 'claude-opus-4-20250617',
37
37
  standardModel: 'claude-sonnet-4-20250514',
38
38
  mediumModel: 'claude-3.7-sonnet-20250114',
39
39
  smallModel: 'claude-3.5-haiku-20241022',
40
- fallbacks: ['claude-sonnet-4-20250514', 'claude-3.5-sonnet-20241022', 'claude-3.5-haiku-20241022']
40
+ fallbacks: [
41
+ 'claude-sonnet-4-20250514',
42
+ 'claude-3.5-sonnet-20241022',
43
+ 'claude-3.5-haiku-20241022',
44
+ ],
41
45
  },
42
-
46
+
43
47
  google: {
44
48
  complexModel: 'gemini-2.5-pro',
45
49
  standardModel: 'gemini-2.5-flash',
46
50
  mediumModel: 'gemini-2.0-flash',
47
51
  smallModel: 'gemini-2.0-flash-001',
48
- fallbacks: ['gemini-2.5-flash', 'gemini-2.0-flash', 'gemini-1.5-pro']
52
+ fallbacks: ['gemini-2.5-flash', 'gemini-2.0-flash', 'gemini-1.5-pro'],
49
53
  },
50
-
54
+
51
55
  vertex: {
52
56
  complexModel: 'gemini-2.5-pro',
53
57
  standardModel: 'gemini-2.5-flash',
@@ -56,13 +60,13 @@ export const MODEL_CONFIGS = {
56
60
  fallbacks: ['gemini-2.5-flash', 'gemini-2.0-flash', 'gemini-1.5-pro'],
57
61
  // Vertex AI configuration
58
62
  requiresAuth: true,
59
- location: 'us-central1'
63
+ location: 'us-central1',
60
64
  },
61
-
65
+
62
66
  azure: {
63
67
  // Azure uses deployment names - should detect from actual deployments
64
68
  complexModel: 'gpt-4.1',
65
- standardModel: 'gpt-4.1',
69
+ standardModel: 'gpt-4.1',
66
70
  mediumModel: 'gpt-4.1',
67
71
  smallModel: 'gpt-4.1',
68
72
  fallbacks: ['gpt-4.1', 'gpt-4o', 'gpt-35-turbo'],
@@ -76,17 +80,21 @@ export const MODEL_CONFIGS = {
76
80
  openai: ['gpt-4o', 'gpt-4.1', 'gpt-4.1-mini', 'gpt-4.1-nano', 'o3', 'o4'],
77
81
  microsoft: ['phi-3', 'phi-3.5'],
78
82
  meta: ['llama-3.1', 'llama-3.2'],
79
- anthropic: ['claude-3.5-sonnet']
80
- }
83
+ anthropic: ['claude-3.5-sonnet'],
84
+ },
81
85
  },
82
-
86
+
83
87
  bedrock: {
84
88
  // Amazon Bedrock - Multi-provider AI hub
85
89
  complexModel: 'anthropic.claude-opus-4-v1:0',
86
90
  standardModel: 'anthropic.claude-sonnet-4-v1:0',
87
91
  mediumModel: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
88
92
  smallModel: 'anthropic.claude-3-5-haiku-20241022-v1:0',
89
- fallbacks: ['anthropic.claude-sonnet-4-v1:0', 'anthropic.claude-3-5-sonnet-20241022-v2:0', 'meta.llama3-1-70b-instruct-v1:0'],
93
+ fallbacks: [
94
+ 'anthropic.claude-sonnet-4-v1:0',
95
+ 'anthropic.claude-3-5-sonnet-20241022-v2:0',
96
+ 'meta.llama3-1-70b-instruct-v1:0',
97
+ ],
90
98
  // Hub-specific configuration
91
99
  isHub: true,
92
100
  detectModels: true,
@@ -97,54 +105,49 @@ export const MODEL_CONFIGS = {
97
105
  hubModels: {
98
106
  anthropic: [
99
107
  'anthropic.claude-opus-4-v1:0',
100
- 'anthropic.claude-sonnet-4-v1:0',
108
+ 'anthropic.claude-sonnet-4-v1:0',
101
109
  'anthropic.claude-3-5-sonnet-20241022-v2:0',
102
- 'anthropic.claude-3-5-haiku-20241022-v1:0'
110
+ 'anthropic.claude-3-5-haiku-20241022-v1:0',
103
111
  ],
104
112
  meta: [
105
113
  'meta.llama3-1-405b-instruct-v1:0',
106
114
  'meta.llama3-1-70b-instruct-v1:0',
107
- 'meta.llama3-1-8b-instruct-v1:0'
108
- ],
109
- amazon: [
110
- 'amazon.titan-text-premier-v1:0',
111
- 'amazon.titan-text-express-v1'
115
+ 'meta.llama3-1-8b-instruct-v1:0',
112
116
  ],
113
- ai21: [
114
- 'ai21.jamba-1-5-large-v1:0',
115
- 'ai21.jamba-1-5-mini-v1:0'
116
- ],
117
- cohere: [
118
- 'cohere.command-r-plus-v1:0',
119
- 'cohere.command-r-v1:0'
120
- ]
121
- }
117
+ amazon: ['amazon.titan-text-premier-v1:0', 'amazon.titan-text-express-v1'],
118
+ ai21: ['ai21.jamba-1-5-large-v1:0', 'ai21.jamba-1-5-mini-v1:0'],
119
+ cohere: ['cohere.command-r-plus-v1:0', 'cohere.command-r-v1:0'],
120
+ },
122
121
  },
123
-
122
+
124
123
  huggingface: {
125
124
  complexModel: 'meta-llama/Llama-3.1-70B-Instruct',
126
125
  standardModel: 'meta-llama/Llama-3.1-8B-Instruct',
127
126
  mediumModel: 'meta-llama/Llama-3.2-3B-Instruct',
128
127
  smallModel: 'meta-llama/Llama-3.2-1B-Instruct',
129
- fallbacks: ['meta-llama/Llama-3.1-8B-Instruct', 'microsoft/DialoGPT-medium', 'google/flan-t5-base']
128
+ fallbacks: [
129
+ 'meta-llama/Llama-3.1-8B-Instruct',
130
+ 'microsoft/DialoGPT-medium',
131
+ 'google/flan-t5-base',
132
+ ],
130
133
  },
131
-
134
+
132
135
  ollama: {
133
136
  complexModel: 'llama3.1:70b',
134
137
  standardModel: 'llama3.1',
135
138
  mediumModel: 'llama3.1:8b',
136
139
  smallModel: 'llama3.2:3b',
137
- fallbacks: ['llama3.1', 'llama3.2', 'mistral']
140
+ fallbacks: ['llama3.1', 'llama3.2', 'mistral'],
138
141
  },
139
-
142
+
140
143
  lmstudio: {
141
144
  complexModel: 'llama-3.2-1b-instruct',
142
145
  standardModel: 'llama-3.2-1b-instruct',
143
146
  mediumModel: 'llama-3.2-1b-instruct',
144
147
  smallModel: 'llama-3.2-1b-instruct',
145
- fallbacks: ['local-model']
146
- }
147
- };
148
+ fallbacks: ['local-model'],
149
+ },
150
+ }
148
151
 
149
152
  /**
150
153
  * Get model configuration for a provider with config overrides
@@ -153,76 +156,92 @@ export const MODEL_CONFIGS = {
153
156
  * @returns {Object} Model configuration with overrides applied
154
157
  */
155
158
  export function getProviderModelConfig(providerName, config = {}, availableModels = []) {
156
- const baseConfig = MODEL_CONFIGS[providerName] || MODEL_CONFIGS.openai;
157
-
159
+ const baseConfig = MODEL_CONFIGS[providerName] || MODEL_CONFIGS.openai
160
+
158
161
  const modelConfig = {
159
- complexModel: config.AI_MODEL_COMPLEX || config[`${providerName.toUpperCase()}_MODEL_COMPLEX`] || baseConfig.complexModel,
160
- standardModel: config.AI_MODEL || config[`${providerName.toUpperCase()}_MODEL`] || baseConfig.standardModel,
161
- mediumModel: config.AI_MODEL_SIMPLE || config[`${providerName.toUpperCase()}_MODEL_SIMPLE`] || baseConfig.mediumModel,
162
- smallModel: config.AI_MODEL_NANO || config[`${providerName.toUpperCase()}_MODEL_NANO`] || baseConfig.smallModel,
163
- fallbacks: baseConfig.fallbacks
164
- };
165
-
162
+ complexModel:
163
+ config.AI_MODEL_COMPLEX ||
164
+ config[`${providerName.toUpperCase()}_MODEL_COMPLEX`] ||
165
+ baseConfig.complexModel,
166
+ standardModel:
167
+ config.AI_MODEL || config[`${providerName.toUpperCase()}_MODEL`] || baseConfig.standardModel,
168
+ mediumModel:
169
+ config.AI_MODEL_SIMPLE ||
170
+ config[`${providerName.toUpperCase()}_MODEL_SIMPLE`] ||
171
+ baseConfig.mediumModel,
172
+ smallModel:
173
+ config.AI_MODEL_NANO ||
174
+ config[`${providerName.toUpperCase()}_MODEL_NANO`] ||
175
+ baseConfig.smallModel,
176
+ fallbacks: baseConfig.fallbacks,
177
+ }
178
+
166
179
  // For hub providers, validate models against available deployments
167
180
  if (baseConfig.isHub) {
168
181
  if (availableModels.length > 0) {
169
182
  // Use actual deployed models
170
183
  const validateModel = (model) => {
171
- if (availableModels.includes(model)) return model;
172
-
184
+ if (availableModels.includes(model)) {
185
+ return model
186
+ }
187
+
173
188
  // Try fallbacks in order
174
189
  for (const fallback of baseConfig.fallbacks) {
175
- if (availableModels.includes(fallback)) return fallback;
190
+ if (availableModels.includes(fallback)) {
191
+ return fallback
192
+ }
176
193
  }
177
-
194
+
178
195
  // Try common deployments for Azure
179
196
  if (providerName === 'azure' && baseConfig.commonDeployments) {
180
197
  for (const common of baseConfig.commonDeployments) {
181
- if (availableModels.includes(common)) return common;
198
+ if (availableModels.includes(common)) {
199
+ return common
200
+ }
182
201
  }
183
202
  }
184
-
185
- return availableModels[0] || model; // Use first available or original
186
- };
187
-
188
- modelConfig.complexModel = validateModel(modelConfig.complexModel);
189
- modelConfig.standardModel = validateModel(modelConfig.standardModel);
190
- modelConfig.mediumModel = validateModel(modelConfig.mediumModel);
191
- modelConfig.smallModel = validateModel(modelConfig.smallModel);
192
-
203
+
204
+ return availableModels[0] || model // Use first available or original
205
+ }
206
+
207
+ modelConfig.complexModel = validateModel(modelConfig.complexModel)
208
+ modelConfig.standardModel = validateModel(modelConfig.standardModel)
209
+ modelConfig.mediumModel = validateModel(modelConfig.mediumModel)
210
+ modelConfig.smallModel = validateModel(modelConfig.smallModel)
211
+
193
212
  // Update fallbacks to only include available models
194
- modelConfig.fallbacks = baseConfig.fallbacks.filter(model =>
213
+ modelConfig.fallbacks = baseConfig.fallbacks.filter((model) =>
195
214
  availableModels.includes(model)
196
- );
197
-
198
- modelConfig.availableModels = availableModels;
215
+ )
216
+
217
+ modelConfig.availableModels = availableModels
199
218
  } else {
200
219
  // No deployment info available - use safer defaults for Azure
201
220
  if (providerName === 'azure') {
202
- const warningKey = 'azure-no-deployment-info';
221
+ const warningKey = 'azure-no-deployment-info'
203
222
  if (!warningCache.has(warningKey)) {
204
- console.log('ℹ️ Using default Azure deployment names (deployment detection runs async)');
205
- addWarningToCache(warningKey);
223
+ console.log('ℹ️ Using default Azure deployment names (deployment detection runs async)')
224
+ addWarningToCache(warningKey)
206
225
  }
207
- modelConfig.complexModel = 'gpt-4.1';
208
- modelConfig.standardModel = 'gpt-4.1';
209
- modelConfig.mediumModel = 'gpt-4.1';
210
- modelConfig.smallModel = 'gpt-4.1';
211
- modelConfig.availableModels = baseConfig.commonDeployments || [];
226
+ modelConfig.complexModel = 'gpt-4.1'
227
+ modelConfig.standardModel = 'gpt-4.1'
228
+ modelConfig.mediumModel = 'gpt-4.1'
229
+ modelConfig.smallModel = 'gpt-4.1'
230
+ modelConfig.availableModels = baseConfig.commonDeployments || []
212
231
  }
213
232
  }
214
-
233
+
215
234
  // Add hub-specific info
216
- modelConfig.isHub = true;
235
+ modelConfig.isHub = true
217
236
  modelConfig.hubInfo = {
218
237
  supportedProviders: baseConfig.supportedProviders,
219
238
  defaultProvider: baseConfig.defaultProvider,
220
239
  hubModels: baseConfig.hubModels,
221
- commonDeployments: baseConfig.commonDeployments
222
- };
240
+ commonDeployments: baseConfig.commonDeployments,
241
+ }
223
242
  }
224
-
225
- return modelConfig;
243
+
244
+ return modelConfig
226
245
  }
227
246
 
228
247
  /**
@@ -238,9 +257,9 @@ export const MODEL_CAPABILITIES = {
238
257
  json_mode: true,
239
258
  reasoning: true,
240
259
  large_context: true,
241
- multimodal: true
260
+ multimodal: true,
242
261
  },
243
-
262
+
244
263
  'gpt-4.1': {
245
264
  prompt_caching: true,
246
265
  tool_use: true,
@@ -248,33 +267,33 @@ export const MODEL_CAPABILITIES = {
248
267
  vision: true,
249
268
  large_context: true,
250
269
  coding_optimized: true,
251
- cost_reduction: 0.75 // 75% cost reduction with caching
270
+ cost_reduction: 0.75, // 75% cost reduction with caching
252
271
  },
253
-
254
- 'o1': {
272
+
273
+ o1: {
255
274
  reasoning: true,
256
275
  tool_use: true,
257
276
  large_context: true,
258
- advanced_reasoning: true
277
+ advanced_reasoning: true,
259
278
  },
260
-
261
- 'o3': {
279
+
280
+ o3: {
262
281
  reasoning: true,
263
282
  advanced_reasoning: true,
264
283
  tool_use: true,
265
284
  large_context: true,
266
- azure_only: true
285
+ azure_only: true,
267
286
  },
268
-
269
- 'o4': {
287
+
288
+ o4: {
270
289
  reasoning: true,
271
290
  advanced_reasoning: true,
272
291
  tool_use: true,
273
292
  large_context: true,
274
293
  azure_only: true,
275
- next_generation: true
294
+ next_generation: true,
276
295
  },
277
-
296
+
278
297
  // Anthropic Models
279
298
  'claude-sonnet-4': {
280
299
  vision: true,
@@ -283,9 +302,9 @@ export const MODEL_CAPABILITIES = {
283
302
  reasoning: true,
284
303
  large_context: true,
285
304
  balanced_performance: true,
286
- coding_optimized: true
305
+ coding_optimized: true,
287
306
  },
288
-
307
+
289
308
  'claude-opus-4': {
290
309
  vision: true,
291
310
  tool_use: true,
@@ -296,32 +315,32 @@ export const MODEL_CAPABILITIES = {
296
315
  extended_thinking: true,
297
316
  coding_optimized: true,
298
317
  parallel_tool_use: true,
299
- most_capable: true
318
+ most_capable: true,
300
319
  },
301
-
320
+
302
321
  'claude-3.7': {
303
322
  vision: true,
304
323
  tool_use: true,
305
324
  json_mode: true,
306
325
  reasoning: true,
307
326
  large_context: true,
308
- previous_generation: true
327
+ previous_generation: true,
309
328
  },
310
-
329
+
311
330
  'claude-3.5': {
312
331
  vision: true,
313
332
  tool_use: true,
314
333
  json_mode: true,
315
334
  reasoning: true,
316
- large_context: true
335
+ large_context: true,
317
336
  },
318
-
337
+
319
338
  'claude-3': {
320
339
  vision: true,
321
340
  tool_use: true, // Only Opus and Sonnet
322
- json_mode: true // Only Opus and Sonnet
341
+ json_mode: true, // Only Opus and Sonnet
323
342
  },
324
-
343
+
325
344
  // Google Models (Gemini & Vertex AI)
326
345
  'gemini-2.5': {
327
346
  vision: true,
@@ -331,9 +350,9 @@ export const MODEL_CAPABILITIES = {
331
350
  large_context: true,
332
351
  multimodal: true,
333
352
  thinking_mode: true,
334
- most_capable: true
353
+ most_capable: true,
335
354
  },
336
-
355
+
337
356
  'gemini-2.0': {
338
357
  vision: true,
339
358
  tool_use: true,
@@ -341,35 +360,35 @@ export const MODEL_CAPABILITIES = {
341
360
  reasoning: true,
342
361
  large_context: true,
343
362
  multimodal: true,
344
- fast_processing: true
363
+ fast_processing: true,
345
364
  },
346
-
365
+
347
366
  'gemini-1.5': {
348
367
  vision: true,
349
368
  tool_use: true,
350
369
  json_mode: true,
351
370
  large_context: true,
352
371
  multimodal: true,
353
- deprecated: true
372
+ deprecated: true,
354
373
  },
355
-
374
+
356
375
  // Hugging Face Models
357
376
  'llama-3.1': {
358
377
  tool_use: true,
359
378
  json_mode: true,
360
379
  reasoning: true,
361
380
  large_context: true,
362
- open_source: true
381
+ open_source: true,
363
382
  },
364
-
383
+
365
384
  'llama-3.2': {
366
385
  tool_use: true,
367
386
  json_mode: true,
368
387
  reasoning: true,
369
388
  open_source: true,
370
- lightweight: true
389
+ lightweight: true,
371
390
  },
372
-
391
+
373
392
  // Amazon Bedrock Models
374
393
  'anthropic.claude': {
375
394
  vision: true,
@@ -377,27 +396,27 @@ export const MODEL_CAPABILITIES = {
377
396
  json_mode: true,
378
397
  reasoning: true,
379
398
  large_context: true,
380
- bedrock_hosted: true
399
+ bedrock_hosted: true,
381
400
  },
382
-
401
+
383
402
  'meta.llama': {
384
403
  tool_use: true,
385
404
  json_mode: true,
386
405
  reasoning: true,
387
406
  large_context: true,
388
407
  open_source: true,
389
- bedrock_hosted: true
408
+ bedrock_hosted: true,
390
409
  },
391
-
410
+
392
411
  'amazon.titan': {
393
412
  tool_use: false,
394
413
  json_mode: true,
395
414
  reasoning: false,
396
415
  large_context: true,
397
416
  bedrock_hosted: true,
398
- aws_native: true
417
+ aws_native: true,
399
418
  },
400
-
419
+
401
420
  // Local Models (Ollama/LM Studio)
402
421
  'local-model': {
403
422
  tool_use: false,
@@ -405,9 +424,9 @@ export const MODEL_CAPABILITIES = {
405
424
  reasoning: false,
406
425
  large_context: false,
407
426
  offline: true,
408
- privacy_focused: true
409
- }
410
- };
427
+ privacy_focused: true,
428
+ },
429
+ }
411
430
 
412
431
  /**
413
432
  * Get capabilities for a specific model
@@ -415,8 +434,10 @@ export const MODEL_CAPABILITIES = {
415
434
  * @returns {Object} Model capabilities
416
435
  */
417
436
  export function getModelCapabilities(modelName) {
418
- if (!modelName) return {};
419
-
437
+ if (!modelName) {
438
+ return {}
439
+ }
440
+
420
441
  // Find the closest match in our capabilities database
421
442
  for (const [pattern, capabilities] of Object.entries(MODEL_CAPABILITIES)) {
422
443
  if (modelName.includes(pattern)) {
@@ -428,13 +449,13 @@ export function getModelCapabilities(modelName) {
428
449
  large_context: false,
429
450
  streaming: true,
430
451
  temperature_control: true,
431
- max_tokens_control: true
432
- };
433
-
434
- return { ...baseCapabilities, ...capabilities };
452
+ max_tokens_control: true,
453
+ }
454
+
455
+ return { ...baseCapabilities, ...capabilities }
435
456
  }
436
457
  }
437
-
458
+
438
459
  // Return basic capabilities for unknown models
439
460
  return {
440
461
  vision: false,
@@ -444,8 +465,8 @@ export function getModelCapabilities(modelName) {
444
465
  large_context: false,
445
466
  streaming: true,
446
467
  temperature_control: true,
447
- max_tokens_control: true
448
- };
468
+ max_tokens_control: true,
469
+ }
449
470
  }
450
471
 
451
472
  /**
@@ -455,11 +476,13 @@ export function getModelCapabilities(modelName) {
455
476
  * @returns {Array<string>} Suggested alternative models
456
477
  */
457
478
  export function getSuggestedModels(providerName, unavailableModel) {
458
- const config = MODEL_CONFIGS[providerName];
459
- if (!config) return [];
460
-
479
+ const config = MODEL_CONFIGS[providerName]
480
+ if (!config) {
481
+ return []
482
+ }
483
+
461
484
  // Return fallbacks, but exclude the unavailable model
462
- return config.fallbacks.filter(model => model !== unavailableModel);
485
+ return config.fallbacks.filter((model) => model !== unavailableModel)
463
486
  }
464
487
 
465
488
  /**
@@ -469,8 +492,8 @@ export function getSuggestedModels(providerName, unavailableModel) {
469
492
  * @returns {boolean} Whether the model supports the capability
470
493
  */
471
494
  export function modelSupports(modelName, capability) {
472
- const capabilities = getModelCapabilities(modelName);
473
- return !!capabilities[capability];
495
+ const capabilities = getModelCapabilities(modelName)
496
+ return !!capabilities[capability]
474
497
  }
475
498
 
476
499
  /**
@@ -481,26 +504,26 @@ export function modelSupports(modelName, capability) {
481
504
  * @returns {string} Best model name for the use case
482
505
  */
483
506
  export function getBestModelForCapabilities(providerName, requiredCapabilities = [], config = {}) {
484
- const modelConfig = getProviderModelConfig(providerName, config);
507
+ const modelConfig = getProviderModelConfig(providerName, config)
485
508
  const modelsToCheck = [
486
509
  modelConfig.complexModel,
487
510
  modelConfig.standardModel,
488
511
  modelConfig.mediumModel,
489
- modelConfig.smallModel
490
- ];
491
-
512
+ modelConfig.smallModel,
513
+ ]
514
+
492
515
  // Find the first model that supports all required capabilities
493
516
  for (const model of modelsToCheck) {
494
- const capabilities = getModelCapabilities(model);
495
- const supportsAll = requiredCapabilities.every(capability => capabilities[capability]);
496
-
517
+ const capabilities = getModelCapabilities(model)
518
+ const supportsAll = requiredCapabilities.every((capability) => capabilities[capability])
519
+
497
520
  if (supportsAll) {
498
- return model;
521
+ return model
499
522
  }
500
523
  }
501
-
524
+
502
525
  // If no model supports all capabilities, return the most capable one
503
- return modelConfig.complexModel;
526
+ return modelConfig.complexModel
504
527
  }
505
528
 
506
529
  /**
@@ -509,7 +532,7 @@ export function getBestModelForCapabilities(providerName, requiredCapabilities =
509
532
  */
510
533
  export function getAllHubProviders() {
511
534
  return Object.entries(MODEL_CONFIGS)
512
- .filter(([name, config]) => config.isHub)
535
+ .filter(([_name, config]) => config.isHub)
513
536
  .map(([name, config]) => ({
514
537
  name,
515
538
  defaultProvider: config.defaultProvider,
@@ -517,9 +540,9 @@ export function getAllHubProviders() {
517
540
  modelCount: config.hubModels ? Object.values(config.hubModels).flat().length : 0,
518
541
  region: config.region,
519
542
  location: config.location,
520
- detectDeployments: config.detectDeployments || false,
521
- detectModels: config.detectModels || false
522
- }));
543
+ detectDeployments: config.detectDeployments,
544
+ detectModels: config.detectModels,
545
+ }))
523
546
  }
524
547
 
525
548
  /**
@@ -529,42 +552,42 @@ export function getAllHubProviders() {
529
552
  * @returns {Object} Model recommendation with reasoning
530
553
  */
531
554
  export function analyzeCommitComplexity(commitInfo, providerName) {
532
- const { files = [], additions = 0, deletions = 0, complex = false } = commitInfo;
533
- const totalChanges = additions + deletions;
534
- const fileCount = files.length;
535
-
536
- let complexity = 'simple';
537
- let reasoning = [];
538
-
555
+ const { files = [], additions = 0, deletions = 0, complex = false } = commitInfo
556
+ const totalChanges = additions + deletions
557
+ const fileCount = files.length
558
+
559
+ let complexity = 'simple'
560
+ const reasoning = []
561
+
539
562
  // Analyze complexity factors
540
563
  if (complex) {
541
- complexity = 'complex';
542
- reasoning.push('Commit marked as complex');
564
+ complexity = 'complex'
565
+ reasoning.push('Commit marked as complex')
543
566
  } else if (fileCount > 20) {
544
- complexity = 'complex';
545
- reasoning.push(`High file count: ${fileCount} files`);
567
+ complexity = 'complex'
568
+ reasoning.push(`High file count: ${fileCount} files`)
546
569
  } else if (totalChanges > 1000) {
547
- complexity = 'complex';
548
- reasoning.push(`Large change set: ${totalChanges} lines`);
570
+ complexity = 'complex'
571
+ reasoning.push(`Large change set: ${totalChanges} lines`)
549
572
  } else if (fileCount > 10 || totalChanges > 500) {
550
- complexity = 'medium';
551
- reasoning.push(`Moderate changes: ${fileCount} files, ${totalChanges} lines`);
573
+ complexity = 'medium'
574
+ reasoning.push(`Moderate changes: ${fileCount} files, ${totalChanges} lines`)
552
575
  } else if (totalChanges > 100) {
553
- complexity = 'standard';
554
- reasoning.push(`Standard changes: ${totalChanges} lines`);
576
+ complexity = 'standard'
577
+ reasoning.push(`Standard changes: ${totalChanges} lines`)
555
578
  } else {
556
- complexity = 'simple';
557
- reasoning.push(`Simple changes: ${totalChanges} lines in ${fileCount} files`);
579
+ complexity = 'simple'
580
+ reasoning.push(`Simple changes: ${totalChanges} lines in ${fileCount} files`)
558
581
  }
559
-
560
- const modelConfig = MODEL_CONFIGS[providerName];
582
+
583
+ const modelConfig = MODEL_CONFIGS[providerName]
561
584
  const modelTiers = {
562
585
  simple: modelConfig?.smallModel,
563
586
  standard: modelConfig?.standardModel,
564
587
  medium: modelConfig?.mediumModel,
565
- complex: modelConfig?.complexModel
566
- };
567
-
588
+ complex: modelConfig?.complexModel,
589
+ }
590
+
568
591
  return {
569
592
  complexity,
570
593
  recommendedModel: modelTiers[complexity] || modelConfig?.standardModel,
@@ -574,9 +597,9 @@ export function analyzeCommitComplexity(commitInfo, providerName) {
574
597
  totalChanges,
575
598
  additions,
576
599
  deletions,
577
- isComplex: complex
578
- }
579
- };
600
+ isComplex: complex,
601
+ },
602
+ }
580
603
  }
581
604
 
582
605
  /**
@@ -586,25 +609,27 @@ export function analyzeCommitComplexity(commitInfo, providerName) {
586
609
  * @returns {string} Normalized model name
587
610
  */
588
611
  export function normalizeModelName(providerName, modelName) {
589
- if (!modelName) return null;
590
-
612
+ if (!modelName) {
613
+ return null
614
+ }
615
+
591
616
  switch (providerName) {
592
617
  case 'azure':
593
618
  // Azure uses deployment names, so return as-is
594
- return modelName;
595
-
619
+ return modelName
620
+
596
621
  case 'ollama':
597
622
  // Ollama models often have version tags
598
- return modelName.includes(':') ? modelName : `${modelName}:latest`;
599
-
623
+ return modelName.includes(':') ? modelName : `${modelName}:latest`
624
+
600
625
  case 'anthropic':
601
626
  // Anthropic models need full version strings
602
627
  if (modelName.includes('claude') && !modelName.includes('-')) {
603
- return `${modelName}-20250514`; // Add default date if missing
628
+ return `${modelName}-20250514` // Add default date if missing
604
629
  }
605
- return modelName;
606
-
630
+ return modelName
631
+
607
632
  default:
608
- return modelName;
633
+ return modelName
609
634
  }
610
- }
635
+ }