@entro314labs/ai-changelog-generator 3.1.1 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +383 -877
  2. package/README.md +8 -3
  3. package/ai-changelog-mcp.sh +0 -0
  4. package/ai-changelog.sh +0 -0
  5. package/bin/ai-changelog-dxt.js +9 -9
  6. package/bin/ai-changelog-mcp.js +19 -17
  7. package/bin/ai-changelog.js +6 -6
  8. package/package.json +80 -48
  9. package/src/ai-changelog-generator.js +83 -81
  10. package/src/application/orchestrators/changelog.orchestrator.js +791 -516
  11. package/src/application/services/application.service.js +137 -128
  12. package/src/cli.js +76 -57
  13. package/src/domains/ai/ai-analysis.service.js +289 -209
  14. package/src/domains/analysis/analysis.engine.js +253 -193
  15. package/src/domains/changelog/changelog.service.js +1062 -784
  16. package/src/domains/changelog/workspace-changelog.service.js +420 -249
  17. package/src/domains/git/git-repository.analyzer.js +348 -258
  18. package/src/domains/git/git.service.js +132 -112
  19. package/src/infrastructure/cli/cli.controller.js +390 -274
  20. package/src/infrastructure/config/configuration.manager.js +220 -190
  21. package/src/infrastructure/interactive/interactive-staging.service.js +154 -135
  22. package/src/infrastructure/interactive/interactive-workflow.service.js +200 -159
  23. package/src/infrastructure/mcp/mcp-server.service.js +208 -207
  24. package/src/infrastructure/metrics/metrics.collector.js +140 -123
  25. package/src/infrastructure/providers/core/base-provider.js +87 -40
  26. package/src/infrastructure/providers/implementations/anthropic.js +101 -99
  27. package/src/infrastructure/providers/implementations/azure.js +124 -101
  28. package/src/infrastructure/providers/implementations/bedrock.js +136 -126
  29. package/src/infrastructure/providers/implementations/dummy.js +23 -23
  30. package/src/infrastructure/providers/implementations/google.js +123 -114
  31. package/src/infrastructure/providers/implementations/huggingface.js +94 -87
  32. package/src/infrastructure/providers/implementations/lmstudio.js +75 -60
  33. package/src/infrastructure/providers/implementations/mock.js +69 -73
  34. package/src/infrastructure/providers/implementations/ollama.js +89 -66
  35. package/src/infrastructure/providers/implementations/openai.js +88 -89
  36. package/src/infrastructure/providers/implementations/vertex.js +227 -197
  37. package/src/infrastructure/providers/provider-management.service.js +245 -207
  38. package/src/infrastructure/providers/provider-manager.service.js +145 -125
  39. package/src/infrastructure/providers/utils/base-provider-helpers.js +308 -302
  40. package/src/infrastructure/providers/utils/model-config.js +220 -195
  41. package/src/infrastructure/providers/utils/provider-utils.js +105 -100
  42. package/src/infrastructure/validation/commit-message-validation.service.js +259 -161
  43. package/src/shared/constants/colors.js +453 -180
  44. package/src/shared/utils/cli-demo.js +285 -0
  45. package/src/shared/utils/cli-entry-utils.js +257 -249
  46. package/src/shared/utils/cli-ui.js +447 -0
  47. package/src/shared/utils/diff-processor.js +513 -0
  48. package/src/shared/utils/error-classes.js +125 -156
  49. package/src/shared/utils/json-utils.js +93 -89
  50. package/src/shared/utils/utils.js +1117 -945
  51. package/types/index.d.ts +353 -344
@@ -1,32 +1,35 @@
1
- import { GoogleGenAI } from '@google/genai';
2
- import { GoogleAuth } from 'google-auth-library';
3
- import { BaseProvider } from '../core/base-provider.js';
4
- import { applyMixins } from '../utils/base-provider-helpers.js';
5
- import { buildClientOptions } from '../utils/provider-utils.js';
1
+ import process from 'node:process'
2
+
3
+ import { GoogleGenAI } from '@google/genai'
4
+ import { GoogleAuth } from 'google-auth-library'
5
+
6
+ import { BaseProvider } from '../core/base-provider.js'
7
+ import { applyMixins } from '../utils/base-provider-helpers.js'
8
+ import { buildClientOptions } from '../utils/provider-utils.js'
6
9
 
7
10
  // Cache for model instances to avoid recreating them (with size limit to prevent memory leaks)
8
- const modelCache = new Map();
9
- const MAX_MODEL_CACHE_SIZE = 50;
11
+ const modelCache = new Map()
12
+ const MAX_MODEL_CACHE_SIZE = 50
10
13
 
11
14
  function addToModelCache(key, value) {
12
15
  if (modelCache.size >= MAX_MODEL_CACHE_SIZE) {
13
16
  // Remove oldest entries (first added)
14
- const firstKey = modelCache.keys().next().value;
15
- modelCache.delete(firstKey);
17
+ const firstKey = modelCache.keys().next().value
18
+ modelCache.delete(firstKey)
16
19
  }
17
- modelCache.set(key, value);
20
+ modelCache.set(key, value)
18
21
  }
19
22
 
20
23
  class VertexAIProvider extends BaseProvider {
21
24
  constructor(config) {
22
- super(config);
23
- this.client = null;
24
- this.generativeModel = null;
25
- this.auth = null;
26
- this.authClient = null;
27
-
25
+ super(config)
26
+ this.client = null
27
+ this.generativeModel = null
28
+ this.auth = null
29
+ this.authClient = null
30
+
28
31
  if (this.isAvailable()) {
29
- this.initializeClient();
32
+ this.initializeClient()
30
33
  }
31
34
  }
32
35
 
@@ -34,159 +37,164 @@ class VertexAIProvider extends BaseProvider {
34
37
  try {
35
38
  const clientOptions = buildClientOptions(this.getProviderConfig(), {
36
39
  location: 'us-central1',
37
- apiVersion: 'v1'
38
- });
39
-
40
+ apiVersion: 'v1',
41
+ })
42
+
40
43
  // Initialize Google Auth if needed
41
44
  if (clientOptions.keyFile || clientOptions.credentials) {
42
45
  this.auth = new GoogleAuth({
43
46
  scopes: ['https://www.googleapis.com/auth/cloud-platform'],
44
47
  keyFile: clientOptions.keyFile,
45
- credentials: clientOptions.credentials ? JSON.parse(clientOptions.credentials) : undefined
46
- });
47
- this.authClient = await this.auth.getClient();
48
+ credentials: clientOptions.credentials
49
+ ? JSON.parse(clientOptions.credentials)
50
+ : undefined,
51
+ })
52
+ this.authClient = await this.auth.getClient()
48
53
  }
49
-
54
+
50
55
  // Initialize the GoogleGenAI client with Vertex AI configuration
51
56
  const vertexOptions = {
52
57
  vertexai: true,
53
58
  project: clientOptions.projectId,
54
59
  location: clientOptions.location,
55
- apiVersion: clientOptions.apiVersion
56
- };
57
-
60
+ apiVersion: clientOptions.apiVersion,
61
+ }
62
+
58
63
  if (clientOptions.apiEndpoint) {
59
- vertexOptions.apiEndpoint = clientOptions.apiEndpoint;
64
+ vertexOptions.apiEndpoint = clientOptions.apiEndpoint
60
65
  }
61
-
66
+
62
67
  if (this.authClient) {
63
- const accessToken = await this.authClient.getAccessToken();
64
- vertexOptions.apiKey = accessToken.token;
68
+ const accessToken = await this.authClient.getAccessToken()
69
+ vertexOptions.apiKey = accessToken.token
65
70
  }
66
-
67
- this.client = new GoogleGenAI(vertexOptions);
68
71
 
69
- const modelConfig = this.getProviderModelConfig();
70
- this.generativeModel = this.getModelInstance(modelConfig.standardModel);
71
-
72
- return true;
72
+ this.client = new GoogleGenAI(vertexOptions)
73
+
74
+ const modelConfig = this.getProviderModelConfig()
75
+ this.generativeModel = this.getModelInstance(modelConfig.standardModel)
76
+
77
+ return true
73
78
  } catch (error) {
74
- console.error('Failed to initialize Vertex AI client:', error);
75
- this.client = null;
76
- this.generativeModel = null;
77
- return false;
79
+ console.error('Failed to initialize Vertex AI client:', error)
80
+ this.client = null
81
+ this.generativeModel = null
82
+ return false
78
83
  }
79
84
  }
80
85
 
81
86
  getModelInstance(modelName, options = {}) {
82
87
  if (!this.client) {
83
- throw new Error('Vertex AI client not initialized');
88
+ throw new Error('Vertex AI client not initialized')
84
89
  }
85
-
90
+
86
91
  // Create a cache key based on model name and options
87
- const cacheKey = `${modelName}-${JSON.stringify(options)}`;
88
-
92
+ const cacheKey = `${modelName}-${JSON.stringify(options)}`
93
+
89
94
  // Check if we already have this model instance cached
90
95
  if (modelCache.has(cacheKey)) {
91
- return modelCache.get(cacheKey);
96
+ return modelCache.get(cacheKey)
92
97
  }
93
-
98
+
94
99
  // Default generation config
95
100
  const generationConfig = {
96
101
  temperature: options.temperature || this.config.VERTEX_TEMPERATURE || 0.7,
97
102
  topP: options.top_p || this.config.VERTEX_TOP_P || 0.95,
98
103
  topK: options.top_k || this.config.VERTEX_TOP_K || 40,
99
104
  maxOutputTokens: options.max_tokens || this.config.VERTEX_MAX_TOKENS || 8192,
100
- };
101
-
105
+ }
106
+
102
107
  // Add stop sequences if provided
103
108
  if (options.stop && options.stop.length > 0) {
104
- generationConfig.stopSequences = options.stop;
109
+ generationConfig.stopSequences = options.stop
105
110
  }
106
-
111
+
107
112
  // Create model instance
108
113
  const modelInstance = this.client.getGenerativeModel({
109
114
  model: modelName,
110
115
  generationConfig,
111
- safetySettings: this.getSafetySettings()
112
- });
113
-
116
+ safetySettings: this.getSafetySettings(),
117
+ })
118
+
114
119
  // Cache the model instance with size limit
115
- addToModelCache(cacheKey, modelInstance);
116
-
117
- return modelInstance;
120
+ addToModelCache(cacheKey, modelInstance)
121
+
122
+ return modelInstance
118
123
  }
119
-
124
+
120
125
  getSafetySettings() {
121
126
  // Configure safety settings based on environment variables or defaults
122
127
  const safetySettings = [
123
128
  {
124
129
  category: 'HARM_CATEGORY_HATE_SPEECH',
125
- threshold: this.config.VERTEX_SAFETY_HATE_SPEECH || 'BLOCK_MEDIUM_AND_ABOVE'
130
+ threshold: this.config.VERTEX_SAFETY_HATE_SPEECH || 'BLOCK_MEDIUM_AND_ABOVE',
126
131
  },
127
132
  {
128
133
  category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
129
- threshold: this.config.VERTEX_SAFETY_DANGEROUS || 'BLOCK_MEDIUM_AND_ABOVE'
134
+ threshold: this.config.VERTEX_SAFETY_DANGEROUS || 'BLOCK_MEDIUM_AND_ABOVE',
130
135
  },
131
136
  {
132
137
  category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
133
- threshold: this.config.VERTEX_SAFETY_SEXUALLY_EXPLICIT || 'BLOCK_MEDIUM_AND_ABOVE'
138
+ threshold: this.config.VERTEX_SAFETY_SEXUALLY_EXPLICIT || 'BLOCK_MEDIUM_AND_ABOVE',
134
139
  },
135
140
  {
136
141
  category: 'HARM_CATEGORY_HARASSMENT',
137
- threshold: this.config.VERTEX_SAFETY_HARASSMENT || 'BLOCK_MEDIUM_AND_ABOVE'
138
- }
139
- ];
140
-
141
- return safetySettings;
142
+ threshold: this.config.VERTEX_SAFETY_HARASSMENT || 'BLOCK_MEDIUM_AND_ABOVE',
143
+ },
144
+ ]
145
+
146
+ return safetySettings
142
147
  }
143
148
 
144
149
  getName() {
145
- return 'vertex';
150
+ return 'vertex'
146
151
  }
147
152
 
148
153
  isAvailable() {
149
154
  // Check if we have the required configuration for Vertex AI
150
- return !!(this.config.VERTEX_PROJECT_ID &&
151
- (this.config.VERTEX_KEY_FILE || this.config.VERTEX_CREDENTIALS ||
152
- process.env.GOOGLE_APPLICATION_CREDENTIALS));
155
+ return !!(
156
+ this.config.VERTEX_PROJECT_ID &&
157
+ (this.config.VERTEX_KEY_FILE ||
158
+ this.config.VERTEX_CREDENTIALS ||
159
+ process.env.GOOGLE_APPLICATION_CREDENTIALS)
160
+ )
153
161
  }
154
162
 
155
163
  async generateCompletion(messages, options = {}) {
156
164
  if (!this.isAvailable()) {
157
165
  return this.handleProviderError(
158
- new Error('Vertex AI provider is not configured'),
166
+ new Error('Vertex AI provider is not configured'),
159
167
  'generate_completion'
160
- );
168
+ )
161
169
  }
162
-
170
+
163
171
  // Initialize client if not already done
164
172
  if (!this.client) {
165
- await this.initializeClient();
173
+ await this.initializeClient()
166
174
  if (!this.client) {
167
175
  return this.handleProviderError(
168
- new Error('Failed to initialize Vertex AI client'),
176
+ new Error('Failed to initialize Vertex AI client'),
169
177
  'generate_completion'
170
- );
178
+ )
171
179
  }
172
180
  }
173
181
 
174
182
  try {
175
- const modelConfig = this.getProviderModelConfig();
176
- const modelName = options.model || modelConfig.standardModel;
177
-
183
+ const modelConfig = this.getProviderModelConfig()
184
+ const modelName = options.model || modelConfig.standardModel
185
+
178
186
  // Get or create model instance with the specified options
179
- const model = this.getModelInstance(modelName, options);
187
+ const model = this.getModelInstance(modelName, options)
180
188
 
181
189
  // Convert messages to Vertex AI format
182
- const formattedMessages = this.formatMessages(messages);
190
+ const formattedMessages = await this.formatMessages(messages)
183
191
 
184
192
  // Add function calling if provided and the model supports it
185
- let tools = null;
193
+ let tools = null
186
194
  if (options.tools && Array.isArray(options.tools) && options.tools.length > 0) {
187
- const capabilities = this.getCapabilities(modelName);
195
+ const capabilities = this.getCapabilities(modelName)
188
196
  if (capabilities.function_calling) {
189
- tools = this.formatTools(options.tools);
197
+ tools = this.formatTools(options.tools)
190
198
  }
191
199
  }
192
200
 
@@ -195,206 +203,228 @@ class VertexAIProvider extends BaseProvider {
195
203
  // For streaming, we'll use the generateContentStream method
196
204
  const streamResult = await model.generateContentStream({
197
205
  contents: formattedMessages,
198
- tools: tools
199
- });
200
-
201
- let fullContent = '';
206
+ tools,
207
+ })
208
+
209
+ let fullContent = ''
202
210
  for await (const chunk of streamResult.stream) {
203
- const chunkContent = chunk.text();
204
- fullContent += chunkContent;
211
+ const chunkContent = chunk.text()
212
+ fullContent += chunkContent
205
213
  options.onUpdate({
206
214
  content: chunkContent,
207
- done: false
208
- });
215
+ done: false,
216
+ })
209
217
  }
210
-
218
+
211
219
  // Signal completion
212
220
  options.onUpdate({
213
221
  content: '',
214
- done: true
215
- });
216
-
222
+ done: true,
223
+ })
224
+
217
225
  // Return the full result
218
226
  return {
219
227
  content: fullContent,
220
- model: modelName
221
- };
222
- } else {
223
- // Non-streaming request
224
- const result = await model.generateContent({
225
- contents: formattedMessages,
226
- tools: tools
227
- });
228
-
229
- // Extract the response text
230
- const responseText = result.response?.text() || '';
231
-
232
- // Handle function calls if present
233
- let functionCalls = null;
234
- if (result.response?.functionCalls && result.response.functionCalls.length > 0) {
235
- functionCalls = result.response.functionCalls.map(call => ({
236
- name: call.name,
237
- arguments: JSON.parse(call.args)
238
- }));
239
- }
240
-
241
- return {
242
- content: responseText,
243
228
  model: modelName,
244
- function_call: functionCalls ? functionCalls[0] : undefined,
245
- function_calls: functionCalls
246
- };
229
+ }
230
+ }
231
+ // Non-streaming request
232
+ const result = await model.generateContent({
233
+ contents: formattedMessages,
234
+ tools,
235
+ })
236
+
237
+ // Extract the response text
238
+ const responseText = result.response?.text() || ''
239
+
240
+ // Handle function calls if present
241
+ let functionCalls = null
242
+ if (result.response?.functionCalls && result.response.functionCalls.length > 0) {
243
+ functionCalls = result.response.functionCalls.map((call) => ({
244
+ name: call.name,
245
+ arguments: JSON.parse(call.args),
246
+ }))
247
+ }
248
+
249
+ return {
250
+ content: responseText,
251
+ model: modelName,
252
+ function_call: functionCalls ? functionCalls[0] : undefined,
253
+ function_calls: functionCalls,
247
254
  }
248
255
  } catch (error) {
249
256
  // Handle rate limiting with exponential backoff
250
- if (error.message && (error.message.includes('quota') || error.message.includes('rate') || error.message.includes('limit'))) {
251
- const retryCount = options.retryCount || 0;
257
+ if (
258
+ error.message &&
259
+ (error.message.includes('quota') ||
260
+ error.message.includes('rate') ||
261
+ error.message.includes('limit'))
262
+ ) {
263
+ const retryCount = options.retryCount || 0
252
264
  if (retryCount < 3) {
253
- const delay = Math.pow(2, retryCount) * 1000; // Exponential backoff: 1s, 2s, 4s
254
- console.warn(`Rate limit hit, retrying in ${delay}ms...`);
255
-
265
+ const delay = 2 ** retryCount * 1000 // Exponential backoff: 1s, 2s, 4s
266
+ console.warn(`Rate limit hit, retrying in ${delay}ms...`)
267
+
256
268
  return new Promise((resolve) => {
257
269
  setTimeout(async () => {
258
- const retryOptions = { ...options, retryCount: retryCount + 1 };
259
- const result = await this.generateCompletion(messages, retryOptions);
260
- resolve(result);
261
- }, delay);
262
- });
270
+ const retryOptions = { ...options, retryCount: retryCount + 1 }
271
+ const result = await this.generateCompletion(messages, retryOptions)
272
+ resolve(result)
273
+ }, delay)
274
+ })
263
275
  }
264
276
  }
265
-
277
+
266
278
  // If model not found, try with a fallback model
267
- if (error.message && (error.message.includes('not found') || error.message.includes('invalid model'))) {
268
- const fallbackModels = this.getSuggestedModels(modelName);
279
+ if (
280
+ error.message &&
281
+ (error.message.includes('not found') || error.message.includes('invalid model'))
282
+ ) {
283
+ const fallbackModels = this.getSuggestedModels(modelName)
269
284
  if (fallbackModels.length > 0 && !options.triedFallback) {
270
- console.warn(`Model ${modelName} not found, trying fallback model: ${fallbackModels[0]}`);
271
- return this.generateCompletion(messages, {
272
- ...options,
285
+ console.warn(`Model ${modelName} not found, trying fallback model: ${fallbackModels[0]}`)
286
+ return this.generateCompletion(messages, {
287
+ ...options,
273
288
  model: fallbackModels[0],
274
- triedFallback: true
275
- });
289
+ triedFallback: true,
290
+ })
276
291
  }
277
292
  }
278
-
279
- return this.handleProviderError(error, 'generate_completion', { model: options.model });
293
+
294
+ return this.handleProviderError(error, 'generate_completion', { model: options.model })
280
295
  }
281
296
  }
282
297
 
283
- formatMessages(messages) {
298
+ async formatMessages(messages) {
284
299
  // Convert messages to Vertex AI format for the new SDK
285
- const formattedMessages = [];
286
-
300
+ const formattedMessages = []
301
+
287
302
  for (const message of messages) {
288
- const role = message.role === 'assistant' ? 'model' : message.role;
289
-
303
+ const role = message.role === 'assistant' ? 'model' : message.role
304
+
290
305
  // Handle different content formats
291
- let parts = [];
292
-
306
+ const parts = []
307
+
293
308
  // If content is a string, convert to text part
294
309
  if (typeof message.content === 'string') {
295
- parts.push({ text: message.content });
310
+ parts.push({ text: message.content })
296
311
  }
297
312
  // If content is an array (multimodal), convert each part
298
313
  else if (Array.isArray(message.content)) {
299
314
  for (const part of message.content) {
300
315
  if (part.type === 'text') {
301
- parts.push({ text: part.text });
302
- }
303
- else if (part.type === 'image_url') {
316
+ parts.push({ text: part.text })
317
+ } else if (part.type === 'image_url') {
304
318
  // Handle inline image data
305
319
  if (part.image_url.url.startsWith('data:image/')) {
306
- const imageData = this.getImageData(part.image_url.url);
307
- parts.push({ inlineData: { data: imageData, mimeType: 'image/jpeg' } });
320
+ const imageData = await this.getImageData(part.image_url.url)
321
+ parts.push({ inlineData: { data: imageData, mimeType: 'image/jpeg' } })
308
322
  }
309
- // Handle remote image URL
323
+ // Handle remote image URL - for remote URLs, we now fetch and convert
310
324
  else {
311
- parts.push({ fileData: { mimeType: 'image/jpeg', fileUri: part.image_url.url } });
325
+ try {
326
+ const imageData = await this.getImageData(part.image_url.url)
327
+ parts.push({ inlineData: { data: imageData, mimeType: 'image/jpeg' } })
328
+ } catch (_error) {
329
+ // Fallback to fileData if remote fetch fails
330
+ parts.push({ fileData: { mimeType: 'image/jpeg', fileUri: part.image_url.url } })
331
+ }
312
332
  }
313
333
  }
314
334
  }
315
335
  }
316
-
336
+
317
337
  formattedMessages.push({
318
338
  role,
319
- parts
320
- });
339
+ parts,
340
+ })
321
341
  }
322
-
323
- return formattedMessages;
342
+
343
+ return formattedMessages
324
344
  }
325
345
 
326
- getImageData(imageUrl) {
346
+ async getImageData(imageUrl) {
327
347
  // For base64 data URLs
328
348
  if (imageUrl.startsWith('data:')) {
329
- return imageUrl.split(',')[1];
349
+ return imageUrl.split(',')[1]
350
+ }
351
+
352
+ // For regular URLs, fetch the image and convert to base64
353
+ try {
354
+ const response = await fetch(imageUrl)
355
+ if (!response.ok) {
356
+ throw new Error(`Failed to fetch image: ${response.statusText}`)
357
+ }
358
+
359
+ const arrayBuffer = await response.arrayBuffer()
360
+ const buffer = Buffer.from(arrayBuffer)
361
+ return buffer.toString('base64')
362
+ } catch (error) {
363
+ throw new Error(`Remote image fetching failed: ${error.message}`)
330
364
  }
331
-
332
- // For regular URLs, we'd need to fetch the image and convert to base64
333
- // Remote image fetching requires additional implementation
334
- throw new Error('Remote image fetching not implemented');
335
365
  }
336
366
 
337
367
  formatTools(tools) {
338
368
  // Convert OpenAI-style function definitions to Google GenAI format
339
- const functionDeclarations = [];
340
-
369
+ const functionDeclarations = []
370
+
341
371
  for (const tool of tools) {
342
372
  if (tool.type === 'function' && tool.function) {
343
373
  functionDeclarations.push({
344
374
  name: tool.function.name,
345
375
  description: tool.function.description || '',
346
- parameters: tool.function.parameters || {}
347
- });
376
+ parameters: tool.function.parameters || {},
377
+ })
348
378
  } else if (Array.isArray(tool.functions)) {
349
379
  // Handle legacy format
350
380
  for (const fn of tool.functions) {
351
381
  functionDeclarations.push({
352
382
  name: fn.name,
353
383
  description: fn.description || '',
354
- parameters: fn.parameters || {}
355
- });
384
+ parameters: fn.parameters || {},
385
+ })
356
386
  }
357
387
  }
358
388
  }
359
-
360
- return { functionDeclarations };
389
+
390
+ return { functionDeclarations }
361
391
  }
362
392
 
363
393
  estimateTokenCount(text) {
364
394
  // Simple estimation: ~4 characters per token
365
- return Math.ceil(text.length / 4);
395
+ return Math.ceil(text.length / 4)
366
396
  }
367
397
 
368
398
  // Vertex AI-specific helper methods
369
399
  getRetryDelay(attempt) {
370
- return Math.pow(2, attempt) * 1000; // Exponential backoff
400
+ return 2 ** attempt * 1000 // Exponential backoff
371
401
  }
372
402
 
373
403
  // Vertex AI-specific method for testing model availability
374
404
  async testModel(modelName) {
375
405
  try {
376
- const model = this.getModelInstance(modelName, { maxOutputTokens: 10 });
377
-
406
+ const model = this.getModelInstance(modelName, { maxOutputTokens: 10 })
407
+
378
408
  await model.generateContent({
379
409
  contents: [{ role: 'user', parts: [{ text: 'Test' }] }],
380
410
  generationConfig: {
381
411
  maxOutputTokens: 10,
382
- temperature: 0
383
- }
384
- });
385
-
412
+ temperature: 0,
413
+ },
414
+ })
415
+
386
416
  return {
387
417
  success: true,
388
418
  model: modelName,
389
419
  project: this.config.VERTEX_PROJECT_ID,
390
- location: this.config.VERTEX_LOCATION || 'us-central1'
391
- };
420
+ location: this.config.VERTEX_LOCATION || 'us-central1',
421
+ }
392
422
  } catch (error) {
393
423
  return {
394
424
  success: false,
395
425
  error: error.message,
396
- model: modelName
397
- };
426
+ model: modelName,
427
+ }
398
428
  }
399
429
  }
400
430
 
@@ -406,9 +436,9 @@ class VertexAIProvider extends BaseProvider {
406
436
  contextWindow: 1048576,
407
437
  maxOutput: 8192,
408
438
  inputCost: 0.00000125,
409
- outputCost: 0.00000500,
439
+ outputCost: 0.000005,
410
440
  features: ['text', 'vision', 'tools', 'reasoning'],
411
- description: 'Latest Gemini model via Vertex AI'
441
+ description: 'Latest Gemini model via Vertex AI',
412
442
  },
413
443
  {
414
444
  id: 'gemini-1.5-pro',
@@ -416,9 +446,9 @@ class VertexAIProvider extends BaseProvider {
416
446
  contextWindow: 2097152,
417
447
  maxOutput: 8192,
418
448
  inputCost: 0.00000125,
419
- outputCost: 0.00000500,
449
+ outputCost: 0.000005,
420
450
  features: ['text', 'vision', 'tools'],
421
- description: 'Production-ready Gemini model via Vertex AI'
451
+ description: 'Production-ready Gemini model via Vertex AI',
422
452
  },
423
453
  {
424
454
  id: 'gemini-1.5-flash',
@@ -426,13 +456,13 @@ class VertexAIProvider extends BaseProvider {
426
456
  contextWindow: 1048576,
427
457
  maxOutput: 8192,
428
458
  inputCost: 0.00000075,
429
- outputCost: 0.00000300,
459
+ outputCost: 0.000003,
430
460
  features: ['text', 'vision', 'tools'],
431
- description: 'Fast Gemini model via Vertex AI'
432
- }
433
- ];
461
+ description: 'Fast Gemini model via Vertex AI',
462
+ },
463
+ ]
434
464
  }
435
465
  }
436
466
 
437
467
  // Apply mixins to add standard provider functionality
438
- export default applyMixins(VertexAIProvider, 'vertex');
468
+ export default applyMixins(VertexAIProvider, 'vertex')