@entro314labs/ai-changelog-generator 3.0.5 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +383 -785
  2. package/README.md +30 -3
  3. package/ai-changelog-mcp.sh +0 -0
  4. package/ai-changelog.sh +0 -0
  5. package/bin/ai-changelog-dxt.js +9 -9
  6. package/bin/ai-changelog-mcp.js +19 -17
  7. package/bin/ai-changelog.js +6 -6
  8. package/package.json +84 -52
  9. package/src/ai-changelog-generator.js +83 -81
  10. package/src/application/orchestrators/changelog.orchestrator.js +1040 -296
  11. package/src/application/services/application.service.js +145 -123
  12. package/src/cli.js +76 -57
  13. package/src/domains/ai/ai-analysis.service.js +289 -209
  14. package/src/domains/analysis/analysis.engine.js +253 -193
  15. package/src/domains/changelog/changelog.service.js +1062 -784
  16. package/src/domains/changelog/workspace-changelog.service.js +420 -249
  17. package/src/domains/git/git-repository.analyzer.js +348 -258
  18. package/src/domains/git/git.service.js +132 -112
  19. package/src/infrastructure/cli/cli.controller.js +415 -247
  20. package/src/infrastructure/config/configuration.manager.js +220 -190
  21. package/src/infrastructure/interactive/interactive-staging.service.js +332 -0
  22. package/src/infrastructure/interactive/interactive-workflow.service.js +200 -159
  23. package/src/infrastructure/mcp/mcp-server.service.js +208 -207
  24. package/src/infrastructure/metrics/metrics.collector.js +140 -123
  25. package/src/infrastructure/providers/core/base-provider.js +87 -40
  26. package/src/infrastructure/providers/implementations/anthropic.js +101 -99
  27. package/src/infrastructure/providers/implementations/azure.js +124 -101
  28. package/src/infrastructure/providers/implementations/bedrock.js +136 -126
  29. package/src/infrastructure/providers/implementations/dummy.js +23 -23
  30. package/src/infrastructure/providers/implementations/google.js +123 -114
  31. package/src/infrastructure/providers/implementations/huggingface.js +94 -87
  32. package/src/infrastructure/providers/implementations/lmstudio.js +75 -60
  33. package/src/infrastructure/providers/implementations/mock.js +69 -73
  34. package/src/infrastructure/providers/implementations/ollama.js +89 -66
  35. package/src/infrastructure/providers/implementations/openai.js +88 -89
  36. package/src/infrastructure/providers/implementations/vertex.js +227 -197
  37. package/src/infrastructure/providers/provider-management.service.js +245 -207
  38. package/src/infrastructure/providers/provider-manager.service.js +145 -125
  39. package/src/infrastructure/providers/utils/base-provider-helpers.js +308 -302
  40. package/src/infrastructure/providers/utils/model-config.js +220 -195
  41. package/src/infrastructure/providers/utils/provider-utils.js +105 -100
  42. package/src/infrastructure/validation/commit-message-validation.service.js +556 -0
  43. package/src/shared/constants/colors.js +467 -172
  44. package/src/shared/utils/cli-demo.js +285 -0
  45. package/src/shared/utils/cli-entry-utils.js +257 -249
  46. package/src/shared/utils/cli-ui.js +447 -0
  47. package/src/shared/utils/diff-processor.js +513 -0
  48. package/src/shared/utils/error-classes.js +125 -156
  49. package/src/shared/utils/json-utils.js +93 -89
  50. package/src/shared/utils/utils.js +1299 -775
  51. package/types/index.d.ts +353 -344
@@ -4,18 +4,24 @@
4
4
  * Supports Claude, Llama, and other Bedrock models
5
5
  */
6
6
 
7
- import { BedrockRuntimeClient, InvokeModelCommand, ConverseCommand } from "@aws-sdk/client-bedrock-runtime";
8
- import { BaseProvider } from '../core/base-provider.js';
9
- import { ProviderError } from '../../../shared/utils/utils.js';
10
- import { applyMixins, ProviderResponseHandler } from '../utils/base-provider-helpers.js';
11
- import { buildClientOptions } from '../utils/provider-utils.js';
7
+ import process from 'node:process'
8
+
9
+ import {
10
+ BedrockRuntimeClient,
11
+ ConverseCommand,
12
+ InvokeModelCommand,
13
+ } from '@aws-sdk/client-bedrock-runtime'
14
+
15
+ import { BaseProvider } from '../core/base-provider.js'
16
+ import { applyMixins, ProviderResponseHandler } from '../utils/base-provider-helpers.js'
17
+ import { buildClientOptions } from '../utils/provider-utils.js'
12
18
 
13
19
  export class BedrockProvider extends BaseProvider {
14
20
  constructor(config) {
15
- super(config);
16
- this.bedrockClient = null;
21
+ super(config)
22
+ this.bedrockClient = null
17
23
  if (this.isAvailable()) {
18
- this.initializeClient();
24
+ this.initializeClient()
19
25
  }
20
26
  }
21
27
 
@@ -23,44 +29,48 @@ export class BedrockProvider extends BaseProvider {
23
29
  const clientOptions = buildClientOptions(this.getProviderConfig(), {
24
30
  region: 'us-east-1',
25
31
  timeout: 60000,
26
- maxRetries: 2
27
- });
32
+ maxRetries: 2,
33
+ })
28
34
 
29
35
  this.bedrockClient = new BedrockRuntimeClient({
30
36
  region: clientOptions.region || this.config.AWS_REGION || 'us-east-1',
31
- credentials: this.config.AWS_ACCESS_KEY_ID ? {
32
- accessKeyId: this.config.AWS_ACCESS_KEY_ID,
33
- secretAccessKey: this.config.AWS_SECRET_ACCESS_KEY,
34
- sessionToken: this.config.AWS_SESSION_TOKEN
35
- } : undefined, // Use default credential chain if not provided
37
+ credentials: this.config.AWS_ACCESS_KEY_ID
38
+ ? {
39
+ accessKeyId: this.config.AWS_ACCESS_KEY_ID,
40
+ secretAccessKey: this.config.AWS_SECRET_ACCESS_KEY,
41
+ sessionToken: this.config.AWS_SESSION_TOKEN,
42
+ }
43
+ : undefined, // Use default credential chain if not provided
36
44
  maxAttempts: clientOptions.maxRetries,
37
- requestTimeout: clientOptions.timeout
38
- });
45
+ requestTimeout: clientOptions.timeout,
46
+ })
39
47
  }
40
48
 
41
49
  getName() {
42
- return 'bedrock';
50
+ return 'bedrock'
43
51
  }
44
52
 
45
53
  isAvailable() {
46
54
  // Can use default AWS credential chain or explicit credentials
47
55
  return !!(
48
56
  // Explicit credentials
49
- (this.config.AWS_ACCESS_KEY_ID && this.config.AWS_SECRET_ACCESS_KEY) ||
50
- // Or AWS profile/role-based auth (detected at runtime)
51
- this.config.AWS_REGION ||
52
- // Or default region
53
- process.env.AWS_REGION ||
54
- process.env.AWS_DEFAULT_REGION
55
- );
57
+ (
58
+ (this.config.AWS_ACCESS_KEY_ID && this.config.AWS_SECRET_ACCESS_KEY) ||
59
+ // Or AWS profile/role-based auth (detected at runtime)
60
+ this.config.AWS_REGION ||
61
+ // Or default region
62
+ process.env.AWS_REGION ||
63
+ process.env.AWS_DEFAULT_REGION
64
+ )
65
+ )
56
66
  }
57
67
 
58
68
  getRequiredEnvVars() {
59
- return []; // Can work with default AWS credential chain
69
+ return [] // Can work with default AWS credential chain
60
70
  }
61
71
 
62
72
  getDefaultModel() {
63
- return 'anthropic.claude-sonnet-4-20250514-v1:0';
73
+ return 'anthropic.claude-sonnet-4-20250514-v1:0'
64
74
  }
65
75
 
66
76
  async generateCompletion(messages, options = {}) {
@@ -68,33 +78,32 @@ export class BedrockProvider extends BaseProvider {
68
78
  this,
69
79
  'generate_completion',
70
80
  async () => {
71
- const modelConfig = this.getProviderModelConfig();
72
- const modelId = options.model || modelConfig.standardModel || this.getDefaultModel();
81
+ const modelConfig = this.getProviderModelConfig()
82
+ const modelId = options.model || modelConfig.standardModel || this.getDefaultModel()
73
83
 
74
84
  // Use Converse API for modern interface
75
85
  if (this.supportsConverseAPI(modelId)) {
76
- return await this.generateWithConverseAPI(messages, options, modelId);
77
- } else {
78
- return await this.generateWithInvokeModel(messages, options, modelId);
86
+ return await this.generateWithConverseAPI(messages, options, modelId)
79
87
  }
88
+ return await this.generateWithInvokeModel(messages, options, modelId)
80
89
  },
81
90
  { model: options.model }
82
- );
91
+ )
83
92
  }
84
93
 
85
94
  supportsConverseAPI(modelId) {
86
95
  // Converse API supports Claude, Llama, and other modern models
87
- return modelId.includes('claude') || modelId.includes('llama') || modelId.includes('titan');
96
+ return modelId.includes('claude') || modelId.includes('llama') || modelId.includes('titan')
88
97
  }
89
98
 
90
99
  async generateWithConverseAPI(messages, options, modelId) {
91
- const systemMessage = messages.find(m => m.role === 'system');
100
+ const systemMessage = messages.find((m) => m.role === 'system')
92
101
  const conversationMessages = messages
93
- .filter(m => m.role !== 'system')
94
- .map(m => ({
102
+ .filter((m) => m.role !== 'system')
103
+ .map((m) => ({
95
104
  role: m.role === 'assistant' ? 'assistant' : 'user',
96
- content: [{ text: m.content }]
97
- }));
105
+ content: [{ text: m.content }],
106
+ }))
98
107
 
99
108
  const converseParams = {
100
109
  modelId,
@@ -102,30 +111,30 @@ export class BedrockProvider extends BaseProvider {
102
111
  inferenceConfig: {
103
112
  maxTokens: options.max_tokens || 2000,
104
113
  temperature: options.temperature || 0.3,
105
- topP: options.top_p || 0.9
106
- }
107
- };
114
+ topP: options.top_p || 0.9,
115
+ },
116
+ }
108
117
 
109
118
  if (systemMessage) {
110
- converseParams.system = [{ text: systemMessage.content }];
119
+ converseParams.system = [{ text: systemMessage.content }]
111
120
  }
112
121
 
113
122
  if (options.tools && options.tools.length > 0) {
114
123
  converseParams.toolConfig = {
115
- tools: options.tools.map(tool => ({
124
+ tools: options.tools.map((tool) => ({
116
125
  toolSpec: {
117
126
  name: tool.function.name,
118
127
  description: tool.function.description,
119
128
  inputSchema: {
120
- json: tool.function.parameters
121
- }
122
- }
123
- }))
124
- };
129
+ json: tool.function.parameters,
130
+ },
131
+ },
132
+ })),
133
+ }
125
134
  }
126
135
 
127
- const command = new ConverseCommand(converseParams);
128
- const response = await this.bedrockClient.send(command);
136
+ const command = new ConverseCommand(converseParams)
137
+ const response = await this.bedrockClient.send(command)
129
138
 
130
139
  return {
131
140
  content: response.output.message.content[0].text,
@@ -133,65 +142,65 @@ export class BedrockProvider extends BaseProvider {
133
142
  usage: {
134
143
  prompt_tokens: response.usage.inputTokens,
135
144
  completion_tokens: response.usage.outputTokens,
136
- total_tokens: response.usage.inputTokens + response.usage.outputTokens
145
+ total_tokens: response.usage.inputTokens + response.usage.outputTokens,
137
146
  },
138
147
  finish_reason: response.stopReason,
139
148
  tool_calls: response.output.message.content
140
- .filter(c => c.toolUse)
141
- .map(c => ({
149
+ .filter((c) => c.toolUse)
150
+ .map((c) => ({
142
151
  id: c.toolUse.toolUseId,
143
152
  type: 'function',
144
153
  function: {
145
154
  name: c.toolUse.name,
146
- arguments: JSON.stringify(c.toolUse.input)
147
- }
148
- }))
149
- };
155
+ arguments: JSON.stringify(c.toolUse.input),
156
+ },
157
+ })),
158
+ }
150
159
  }
151
160
 
152
161
  async generateWithInvokeModel(messages, options, modelId) {
153
162
  // Format for specific model types
154
- let body;
163
+ let body
155
164
 
156
165
  if (modelId.includes('claude')) {
157
166
  // Anthropic Claude format
158
- const systemMessage = messages.find(m => m.role === 'system')?.content;
167
+ const systemMessage = messages.find((m) => m.role === 'system')?.content
159
168
  const conversationMessages = messages
160
- .filter(m => m.role !== 'system')
161
- .map(m => ({
169
+ .filter((m) => m.role !== 'system')
170
+ .map((m) => ({
162
171
  role: m.role,
163
- content: m.content
164
- }));
172
+ content: m.content,
173
+ }))
165
174
 
166
175
  body = JSON.stringify({
167
- anthropic_version: "bedrock-2023-05-31",
176
+ anthropic_version: 'bedrock-2023-05-31',
168
177
  max_tokens: options.max_tokens || 2000,
169
178
  temperature: options.temperature || 0.3,
170
179
  system: systemMessage,
171
- messages: conversationMessages
172
- });
180
+ messages: conversationMessages,
181
+ })
173
182
  } else if (modelId.includes('llama')) {
174
183
  // Meta Llama format
175
- const prompt = messages.map(m => `${m.role}: ${m.content}`).join('\n');
184
+ const prompt = messages.map((m) => `${m.role}: ${m.content}`).join('\n')
176
185
  body = JSON.stringify({
177
186
  prompt,
178
187
  max_gen_len: options.max_tokens || 2000,
179
188
  temperature: options.temperature || 0.3,
180
- top_p: options.top_p || 0.9
181
- });
189
+ top_p: options.top_p || 0.9,
190
+ })
182
191
  } else {
183
- throw new Error(`Unsupported model format: ${modelId}`);
192
+ throw new Error(`Unsupported model format: ${modelId}`)
184
193
  }
185
194
 
186
195
  const command = new InvokeModelCommand({
187
196
  modelId,
188
197
  contentType: 'application/json',
189
198
  accept: 'application/json',
190
- body
191
- });
199
+ body,
200
+ })
192
201
 
193
- const response = await this.bedrockClient.send(command);
194
- const responseBody = JSON.parse(new TextDecoder().decode(response.body));
202
+ const response = await this.bedrockClient.send(command)
203
+ const responseBody = JSON.parse(new TextDecoder().decode(response.body))
195
204
 
196
205
  if (modelId.includes('claude')) {
197
206
  return {
@@ -200,21 +209,22 @@ export class BedrockProvider extends BaseProvider {
200
209
  usage: {
201
210
  prompt_tokens: responseBody.usage.input_tokens,
202
211
  completion_tokens: responseBody.usage.output_tokens,
203
- total_tokens: responseBody.usage.input_tokens + responseBody.usage.output_tokens
212
+ total_tokens: responseBody.usage.input_tokens + responseBody.usage.output_tokens,
204
213
  },
205
- finish_reason: responseBody.stop_reason
206
- };
207
- } else if (modelId.includes('llama')) {
214
+ finish_reason: responseBody.stop_reason,
215
+ }
216
+ }
217
+ if (modelId.includes('llama')) {
208
218
  return {
209
219
  content: responseBody.generation,
210
220
  model: modelId,
211
221
  usage: {
212
222
  prompt_tokens: responseBody.prompt_token_count,
213
223
  completion_tokens: responseBody.generation_token_count,
214
- total_tokens: responseBody.prompt_token_count + responseBody.generation_token_count
224
+ total_tokens: responseBody.prompt_token_count + responseBody.generation_token_count,
215
225
  },
216
- finish_reason: responseBody.stop_reason
217
- };
226
+ finish_reason: responseBody.stop_reason,
227
+ }
218
228
  }
219
229
  }
220
230
 
@@ -231,8 +241,8 @@ export class BedrockProvider extends BaseProvider {
231
241
  function_calling: true,
232
242
  json_mode: true,
233
243
  multimodal: true,
234
- advancedReasoning: true
235
- }
244
+ advancedReasoning: true,
245
+ },
236
246
  },
237
247
  {
238
248
  name: 'anthropic.claude-sonnet-4-20250514-v1:0',
@@ -243,8 +253,8 @@ export class BedrockProvider extends BaseProvider {
243
253
  reasoning: true,
244
254
  function_calling: true,
245
255
  json_mode: true,
246
- multimodal: true
247
- }
256
+ multimodal: true,
257
+ },
248
258
  },
249
259
  {
250
260
  name: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
@@ -255,8 +265,8 @@ export class BedrockProvider extends BaseProvider {
255
265
  reasoning: true,
256
266
  function_calling: true,
257
267
  json_mode: true,
258
- multimodal: true
259
- }
268
+ multimodal: true,
269
+ },
260
270
  },
261
271
  // Meta Llama models
262
272
  {
@@ -268,8 +278,8 @@ export class BedrockProvider extends BaseProvider {
268
278
  reasoning: true,
269
279
  function_calling: false,
270
280
  json_mode: true,
271
- multimodal: false
272
- }
281
+ multimodal: false,
282
+ },
273
283
  },
274
284
  {
275
285
  name: 'meta.llama3-2-90b-instruct-v1:0',
@@ -280,8 +290,8 @@ export class BedrockProvider extends BaseProvider {
280
290
  reasoning: true,
281
291
  function_calling: false,
282
292
  json_mode: true,
283
- multimodal: true
284
- }
293
+ multimodal: true,
294
+ },
285
295
  },
286
296
  // Amazon Titan models
287
297
  {
@@ -293,10 +303,10 @@ export class BedrockProvider extends BaseProvider {
293
303
  reasoning: true,
294
304
  function_calling: false,
295
305
  json_mode: true,
296
- multimodal: false
297
- }
298
- }
299
- ];
306
+ multimodal: false,
307
+ },
308
+ },
309
+ ]
300
310
  }
301
311
 
302
312
  getProviderModelConfig() {
@@ -308,41 +318,41 @@ export class BedrockProvider extends BaseProvider {
308
318
  reasoningModel: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
309
319
  default: 'anthropic.claude-sonnet-4-20250514-v1:0',
310
320
  temperature: 0.3,
311
- maxTokens: 2000
312
- };
321
+ maxTokens: 2000,
322
+ }
313
323
  }
314
324
 
315
325
  getModelCapabilities(modelName) {
316
326
  return {
317
- reasoning: modelName.includes('claude') || modelName.includes('llama') || modelName.includes('titan'),
327
+ reasoning:
328
+ modelName.includes('claude') || modelName.includes('llama') || modelName.includes('titan'),
318
329
  function_calling: modelName.includes('claude'),
319
330
  json_mode: true,
320
331
  multimodal: modelName.includes('claude') || modelName.includes('llama3-2'),
321
332
  largeContext: modelName.includes('claude') || modelName.includes('llama'),
322
333
  advancedReasoning: modelName.includes('opus-4') || modelName.includes('3-7-sonnet'),
323
- awsManaged: true
324
- };
334
+ awsManaged: true,
335
+ }
325
336
  }
326
337
 
327
338
  async validateModelAvailability(modelName) {
328
339
  try {
329
- const models = await this.getAvailableModels();
330
- const model = models.find(m => m.name === modelName);
340
+ const models = await this.getAvailableModels()
341
+ const model = models.find((m) => m.name === modelName)
331
342
 
332
343
  if (model) {
333
344
  return {
334
345
  available: true,
335
346
  model: modelName,
336
347
  capabilities: model.capabilities,
337
- contextWindow: model.contextWindow
338
- };
339
- } else {
340
- const availableModels = models.map(m => m.name);
341
- return {
342
- available: false,
343
- error: `Model '${modelName}' not available in Bedrock`,
344
- alternatives: availableModels.slice(0, 5)
345
- };
348
+ contextWindow: model.contextWindow,
349
+ }
350
+ }
351
+ const availableModels = models.map((m) => m.name)
352
+ return {
353
+ available: false,
354
+ error: `Model '${modelName}' not available in Bedrock`,
355
+ alternatives: availableModels.slice(0, 5),
346
356
  }
347
357
  } catch (error) {
348
358
  return {
@@ -351,28 +361,28 @@ export class BedrockProvider extends BaseProvider {
351
361
  alternatives: [
352
362
  'anthropic.claude-sonnet-4-20250514-v1:0',
353
363
  'anthropic.claude-opus-4-20250514-v1:0',
354
- 'us.anthropic.claude-3-7-sonnet-20250219-v1:0'
355
- ]
356
- };
364
+ 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
365
+ ],
366
+ }
357
367
  }
358
368
  }
359
369
 
360
370
  async testConnection() {
361
371
  try {
362
- const response = await this.generateCompletion([
363
- { role: 'user', content: 'Hello' }
364
- ], { max_tokens: 5 });
372
+ const response = await this.generateCompletion([{ role: 'user', content: 'Hello' }], {
373
+ max_tokens: 5,
374
+ })
365
375
 
366
376
  return {
367
377
  success: true,
368
378
  model: response.model,
369
- message: 'Bedrock connection successful'
370
- };
379
+ message: 'Bedrock connection successful',
380
+ }
371
381
  } catch (error) {
372
382
  return {
373
383
  success: false,
374
- error: error.message
375
- };
384
+ error: error.message,
385
+ }
376
386
  }
377
387
  }
378
388
 
@@ -383,11 +393,11 @@ export class BedrockProvider extends BaseProvider {
383
393
  function_calling: modelName ? modelName.includes('claude') : true,
384
394
  json_mode: true,
385
395
  reasoning: true,
386
- multimodal: modelName ? (modelName.includes('claude') || modelName.includes('llama3-2')) : true,
387
- awsManaged: true
388
- };
396
+ multimodal: modelName ? modelName.includes('claude') || modelName.includes('llama3-2') : true,
397
+ awsManaged: true,
398
+ }
389
399
  }
390
400
  }
391
401
 
392
402
  // Apply mixins to add standard provider functionality
393
- export default applyMixins ? applyMixins(BedrockProvider, 'bedrock') : BedrockProvider;
403
+ export default applyMixins ? applyMixins(BedrockProvider, 'bedrock') : BedrockProvider
@@ -3,13 +3,13 @@
3
3
  * Fallback provider when no other providers are available
4
4
  */
5
5
 
6
- import { BaseProvider } from '../core/base-provider.js';
7
- import { ProviderError } from '../../../shared/utils/utils.js';
6
+ import { ProviderError } from '../../../shared/utils/utils.js'
7
+ import { BaseProvider } from '../core/base-provider.js'
8
8
 
9
9
  class DummyProvider extends BaseProvider {
10
10
  constructor(config = {}) {
11
- super(config);
12
- this.name = 'dummy';
11
+ super(config)
12
+ this.name = 'dummy'
13
13
  }
14
14
 
15
15
  /**
@@ -17,7 +17,7 @@ class DummyProvider extends BaseProvider {
17
17
  * @returns {string} Provider name
18
18
  */
19
19
  getName() {
20
- return this.name;
20
+ return this.name
21
21
  }
22
22
 
23
23
  /**
@@ -25,7 +25,7 @@ class DummyProvider extends BaseProvider {
25
25
  * @returns {boolean} Always true for dummy provider
26
26
  */
27
27
  isAvailable() {
28
- return true;
28
+ return true
29
29
  }
30
30
 
31
31
  /**
@@ -34,13 +34,13 @@ class DummyProvider extends BaseProvider {
34
34
  * @param {Object} options - Generation options
35
35
  * @returns {Promise} - Promise that rejects with error
36
36
  */
37
- async generateCompletion(messages, options = {}) {
37
+ async generateCompletion(_messages, _options = {}) {
38
38
  throw new ProviderError(
39
39
  'No AI provider is available. Please configure at least one provider in your .env.local file. ' +
40
- 'Run "node config-wizard.js" to set up your providers.',
40
+ 'Run "node config-wizard.js" to set up your providers.',
41
41
  'dummy',
42
42
  'generateCompletion'
43
- );
43
+ )
44
44
  }
45
45
 
46
46
  /**
@@ -48,11 +48,11 @@ class DummyProvider extends BaseProvider {
48
48
  * @param {Object} commitInfo - Commit information
49
49
  * @returns {Object} Model recommendation
50
50
  */
51
- getModelRecommendation(commitInfo = {}) {
51
+ getModelRecommendation(_commitInfo = {}) {
52
52
  return {
53
53
  model: 'rule-based',
54
- reason: 'No AI provider configured, using rule-based fallback.'
55
- };
54
+ reason: 'No AI provider configured, using rule-based fallback.',
55
+ }
56
56
  }
57
57
 
58
58
  /**
@@ -60,12 +60,12 @@ class DummyProvider extends BaseProvider {
60
60
  * @param {string} model - Model name
61
61
  * @returns {Promise} - Promise that resolves with validation result
62
62
  */
63
- async validateModelAvailability(model) {
63
+ async validateModelAvailability(_model) {
64
64
  return {
65
65
  available: false,
66
66
  error: 'No AI provider configured',
67
- alternatives: []
68
- };
67
+ alternatives: [],
68
+ }
69
69
  }
70
70
 
71
71
  /**
@@ -76,8 +76,8 @@ class DummyProvider extends BaseProvider {
76
76
  return {
77
77
  success: false,
78
78
  error: 'No AI provider configured',
79
- provider: this.getName()
80
- };
79
+ provider: this.getName(),
80
+ }
81
81
  }
82
82
 
83
83
  /**
@@ -89,8 +89,8 @@ class DummyProvider extends BaseProvider {
89
89
  streaming: false,
90
90
  tool_use: false,
91
91
  vision: false,
92
- json_mode: false
93
- };
92
+ json_mode: false,
93
+ }
94
94
  }
95
95
 
96
96
  getAvailableModels() {
@@ -103,10 +103,10 @@ class DummyProvider extends BaseProvider {
103
103
  inputCost: 0,
104
104
  outputCost: 0,
105
105
  features: [],
106
- description: 'No AI model available - configure a provider'
107
- }
108
- ];
106
+ description: 'No AI model available - configure a provider',
107
+ },
108
+ ]
109
109
  }
110
110
  }
111
111
 
112
- export default DummyProvider;
112
+ export default DummyProvider