ak-gemini 1.0.9 → 1.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/index.cjs +89 -9
  2. package/index.js +129 -10
  3. package/package.json +1 -1
  4. package/types.d.ts +36 -5
package/index.cjs CHANGED
@@ -134,6 +134,8 @@ var AITransformer = class {
134
134
  this.transformWithValidation = prepareAndValidateMessage.bind(this);
135
135
  this.estimate = estimateTokenUsage.bind(this);
136
136
  this.estimateTokenUsage = estimateTokenUsage.bind(this);
137
+ this.updateSystemInstructions = updateSystemInstructions.bind(this);
138
+ this.estimateCost = estimateCost.bind(this);
137
139
  }
138
140
  };
139
141
  var index_default = AITransformer;
@@ -163,8 +165,17 @@ function AITransformFactory(options = {}) {
163
165
  this.logLevel = "info";
164
166
  logger_default.level = "info";
165
167
  }
168
+ this.vertexai = options.vertexai || false;
169
+ this.project = options.project || process.env.GOOGLE_CLOUD_PROJECT || null;
170
+ this.location = options.location || process.env.GOOGLE_CLOUD_LOCATION || "us-central1";
171
+ this.googleAuthOptions = options.googleAuthOptions || null;
166
172
  this.apiKey = options.apiKey !== void 0 && options.apiKey !== null ? options.apiKey : GEMINI_API_KEY;
167
- if (!this.apiKey) throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var.");
173
+ if (!this.vertexai && !this.apiKey) {
174
+ throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var. For Vertex AI, set vertexai: true with project and location.");
175
+ }
176
+ if (this.vertexai && !this.project) {
177
+ throw new Error("Vertex AI requires a project ID. Provide via options.project or GOOGLE_CLOUD_PROJECT env var.");
178
+ }
168
179
  this.chatConfig = {
169
180
  ...DEFAULT_CHAT_CONFIG,
170
181
  ...options.chatConfig,
@@ -225,6 +236,10 @@ function AITransformFactory(options = {}) {
225
236
  this.onlyJSON = options.onlyJSON !== void 0 ? options.onlyJSON : true;
226
237
  this.enableGrounding = options.enableGrounding || false;
227
238
  this.groundingConfig = options.groundingConfig || {};
239
+ this.labels = options.labels || {};
240
+ if (Object.keys(this.labels).length > 0 && logger_default.level !== "silent") {
241
+ logger_default.debug(`Billing labels configured: ${JSON.stringify(this.labels)}`);
242
+ }
228
243
  if (this.promptKey === this.answerKey) {
229
244
  throw new Error("Source and target keys cannot be the same. Please provide distinct keys.");
230
245
  }
@@ -232,10 +247,27 @@ function AITransformFactory(options = {}) {
232
247
  logger_default.debug(`Creating AI Transformer with model: ${this.modelName}`);
233
248
  logger_default.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
234
249
  logger_default.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
235
- logger_default.debug(`Using API key: ${this.apiKey.substring(0, 10)}...`);
250
+ if (this.vertexai) {
251
+ logger_default.debug(`Using Vertex AI - Project: ${this.project}, Location: ${this.location}`);
252
+ if (this.googleAuthOptions?.keyFilename) {
253
+ logger_default.debug(`Auth: Service account key file: ${this.googleAuthOptions.keyFilename}`);
254
+ } else if (this.googleAuthOptions?.credentials) {
255
+ logger_default.debug(`Auth: Inline credentials provided`);
256
+ } else {
257
+ logger_default.debug(`Auth: Application Default Credentials (ADC)`);
258
+ }
259
+ } else {
260
+ logger_default.debug(`Using Gemini API with key: ${this.apiKey.substring(0, 10)}...`);
261
+ }
236
262
  logger_default.debug(`Grounding ${this.enableGrounding ? "ENABLED" : "DISABLED"} (costs $35/1k queries)`);
237
263
  }
238
- const ai = new import_genai.GoogleGenAI({ apiKey: this.apiKey });
264
+ const clientOptions = this.vertexai ? {
265
+ vertexai: true,
266
+ project: this.project,
267
+ location: this.location,
268
+ ...this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions }
269
+ } : { apiKey: this.apiKey };
270
+ const ai = new import_genai.GoogleGenAI(clientOptions);
239
271
  this.genAIClient = ai;
240
272
  this.chat = null;
241
273
  }
@@ -245,7 +277,10 @@ async function initChat(force = false) {
245
277
  const chatOptions = {
246
278
  model: this.modelName,
247
279
  // @ts-ignore
248
- config: this.chatConfig,
280
+ config: {
281
+ ...this.chatConfig,
282
+ ...Object.keys(this.labels).length > 0 && { labels: this.labels }
283
+ },
249
284
  history: []
250
285
  };
251
286
  if (this.enableGrounding) {
@@ -325,20 +360,29 @@ ${contextText}
325
360
  this.chat = await this.genAIClient.chats.create({
326
361
  model: this.modelName,
327
362
  // @ts-ignore
328
- config: this.chatConfig,
363
+ config: {
364
+ ...this.chatConfig,
365
+ ...Object.keys(this.labels).length > 0 && { labels: this.labels }
366
+ },
329
367
  history: [...currentHistory, ...historyToAdd]
330
368
  });
331
369
  const newHistory = this.chat.getHistory();
332
370
  logger_default.debug(`Created new chat session with ${newHistory.length} examples.`);
333
371
  return newHistory;
334
372
  }
335
- async function rawMessage(sourcePayload) {
373
+ async function rawMessage(sourcePayload, messageOptions = {}) {
336
374
  if (!this.chat) {
337
375
  throw new Error("Chat session not initialized.");
338
376
  }
339
377
  const actualPayload = typeof sourcePayload === "string" ? sourcePayload : JSON.stringify(sourcePayload, null, 2);
378
+ const mergedLabels = { ...this.labels, ...messageOptions.labels || {} };
379
+ const hasLabels = Object.keys(mergedLabels).length > 0;
340
380
  try {
341
- const result = await this.chat.sendMessage({ message: actualPayload });
381
+ const sendParams = { message: actualPayload };
382
+ if (hasLabels) {
383
+ sendParams.config = { labels: mergedLabels };
384
+ }
385
+ const result = await this.chat.sendMessage(sendParams);
342
386
  const modelResponse = result.text;
343
387
  const extractedJSON = extractJSON(modelResponse);
344
388
  if (extractedJSON?.data) {
@@ -397,9 +441,13 @@ async function prepareAndValidateMessage(sourcePayload, options = {}, validatorF
397
441
  } else {
398
442
  throw new Error("Invalid source payload. Must be a JSON object or string.");
399
443
  }
444
+ const messageOptions = {};
445
+ if (options.labels) {
446
+ messageOptions.labels = options.labels;
447
+ }
400
448
  for (let attempt = 0; attempt <= maxRetries; attempt++) {
401
449
  try {
402
- const transformedPayload = attempt === 0 ? await this.rawMessage(lastPayload) : await this.rebuild(lastPayload, lastError.message);
450
+ const transformedPayload = attempt === 0 ? await this.rawMessage(lastPayload, messageOptions) : await this.rebuild(lastPayload, lastError.message);
403
451
  lastPayload = transformedPayload;
404
452
  if (validatorFn) {
405
453
  await validatorFn(transformedPayload);
@@ -472,13 +520,36 @@ async function estimateTokenUsage(nextPayload) {
472
520
  });
473
521
  return resp;
474
522
  }
523
+ var MODEL_PRICING = {
524
+ "gemini-2.5-flash": { input: 0.15, output: 0.6 },
525
+ "gemini-2.5-flash-lite": { input: 0.02, output: 0.1 },
526
+ "gemini-2.5-pro": { input: 2.5, output: 10 },
527
+ "gemini-3-pro": { input: 2, output: 12 },
528
+ "gemini-3-pro-preview": { input: 2, output: 12 },
529
+ "gemini-2.0-flash": { input: 0.1, output: 0.4 },
530
+ "gemini-2.0-flash-lite": { input: 0.02, output: 0.1 }
531
+ };
532
+ async function estimateCost(nextPayload) {
533
+ const tokenInfo = await this.estimateTokenUsage(nextPayload);
534
+ const pricing = MODEL_PRICING[this.modelName] || { input: 0, output: 0 };
535
+ return {
536
+ totalTokens: tokenInfo.totalTokens,
537
+ model: this.modelName,
538
+ pricing,
539
+ estimatedInputCost: tokenInfo.totalTokens / 1e6 * pricing.input,
540
+ note: "Cost is for input tokens only; output cost depends on response length"
541
+ };
542
+ }
475
543
  async function resetChat() {
476
544
  if (this.chat) {
477
545
  logger_default.debug("Resetting Gemini chat session...");
478
546
  const chatOptions = {
479
547
  model: this.modelName,
480
548
  // @ts-ignore
481
- config: this.chatConfig,
549
+ config: {
550
+ ...this.chatConfig,
551
+ ...Object.keys(this.labels).length > 0 && { labels: this.labels }
552
+ },
482
553
  history: []
483
554
  };
484
555
  if (this.enableGrounding) {
@@ -500,6 +571,15 @@ function getChatHistory() {
500
571
  }
501
572
  return this.chat.getHistory();
502
573
  }
574
+ async function updateSystemInstructions(newInstructions) {
575
+ if (!newInstructions || typeof newInstructions !== "string") {
576
+ throw new Error("System instructions must be a non-empty string");
577
+ }
578
+ this.systemInstructions = newInstructions.trim();
579
+ this.chatConfig.systemInstruction = this.systemInstructions;
580
+ logger_default.debug("Updating system instructions and reinitializing chat...");
581
+ await this.init(true);
582
+ }
503
583
  function attemptJSONRecovery(text, maxAttempts = 100) {
504
584
  if (!text || typeof text !== "string") return null;
505
585
  try {
package/index.js CHANGED
@@ -134,6 +134,8 @@ class AITransformer {
134
134
  this.transformWithValidation = prepareAndValidateMessage.bind(this);
135
135
  this.estimate = estimateTokenUsage.bind(this);
136
136
  this.estimateTokenUsage = estimateTokenUsage.bind(this);
137
+ this.updateSystemInstructions = updateSystemInstructions.bind(this);
138
+ this.estimateCost = estimateCost.bind(this);
137
139
  }
138
140
  }
139
141
 
@@ -179,8 +181,22 @@ function AITransformFactory(options = {}) {
179
181
  log.level = 'info';
180
182
  }
181
183
 
184
+ // Vertex AI configuration
185
+ this.vertexai = options.vertexai || false;
186
+ this.project = options.project || process.env.GOOGLE_CLOUD_PROJECT || null;
187
+ this.location = options.location || process.env.GOOGLE_CLOUD_LOCATION || 'us-central1';
188
+ this.googleAuthOptions = options.googleAuthOptions || null;
189
+
190
+ // API Key (for Gemini API, not Vertex AI)
182
191
  this.apiKey = options.apiKey !== undefined && options.apiKey !== null ? options.apiKey : GEMINI_API_KEY;
183
- if (!this.apiKey) throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var.");
192
+
193
+ // Validate authentication - need either API key (for Gemini API) or Vertex AI config
194
+ if (!this.vertexai && !this.apiKey) {
195
+ throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var. For Vertex AI, set vertexai: true with project and location.");
196
+ }
197
+ if (this.vertexai && !this.project) {
198
+ throw new Error("Vertex AI requires a project ID. Provide via options.project or GOOGLE_CLOUD_PROJECT env var.");
199
+ }
184
200
 
185
201
  // Build chat config, making sure systemInstruction uses the custom instructions
186
202
  this.chatConfig = {
@@ -269,6 +285,12 @@ function AITransformFactory(options = {}) {
269
285
  this.enableGrounding = options.enableGrounding || false;
270
286
  this.groundingConfig = options.groundingConfig || {};
271
287
 
288
+ // Billing labels for cost segmentation
289
+ this.labels = options.labels || {};
290
+ if (Object.keys(this.labels).length > 0 && log.level !== 'silent') {
291
+ log.debug(`Billing labels configured: ${JSON.stringify(this.labels)}`);
292
+ }
293
+
272
294
  if (this.promptKey === this.answerKey) {
273
295
  throw new Error("Source and target keys cannot be the same. Please provide distinct keys.");
274
296
  }
@@ -277,12 +299,33 @@ function AITransformFactory(options = {}) {
277
299
  log.debug(`Creating AI Transformer with model: ${this.modelName}`);
278
300
  log.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
279
301
  log.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
280
- // Log API key prefix for tracking (first 10 chars only for security)
281
- log.debug(`Using API key: ${this.apiKey.substring(0, 10)}...`);
302
+ // Log authentication method
303
+ if (this.vertexai) {
304
+ log.debug(`Using Vertex AI - Project: ${this.project}, Location: ${this.location}`);
305
+ if (this.googleAuthOptions?.keyFilename) {
306
+ log.debug(`Auth: Service account key file: ${this.googleAuthOptions.keyFilename}`);
307
+ } else if (this.googleAuthOptions?.credentials) {
308
+ log.debug(`Auth: Inline credentials provided`);
309
+ } else {
310
+ log.debug(`Auth: Application Default Credentials (ADC)`);
311
+ }
312
+ } else {
313
+ log.debug(`Using Gemini API with key: ${this.apiKey.substring(0, 10)}...`);
314
+ }
282
315
  log.debug(`Grounding ${this.enableGrounding ? 'ENABLED' : 'DISABLED'} (costs $35/1k queries)`);
283
316
  }
284
317
 
285
- const ai = new GoogleGenAI({ apiKey: this.apiKey });
318
+ // Initialize Google GenAI client with appropriate configuration
319
+ const clientOptions = this.vertexai
320
+ ? {
321
+ vertexai: true,
322
+ project: this.project,
323
+ location: this.location,
324
+ ...(this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions })
325
+ }
326
+ : { apiKey: this.apiKey };
327
+
328
+ const ai = new GoogleGenAI(clientOptions);
286
329
  this.genAIClient = ai;
287
330
  this.chat = null;
288
331
  }
@@ -302,7 +345,10 @@ async function initChat(force = false) {
302
345
  const chatOptions = {
303
346
  model: this.modelName,
304
347
  // @ts-ignore
305
- config: this.chatConfig,
348
+ config: {
349
+ ...this.chatConfig,
350
+ ...(Object.keys(this.labels).length > 0 && { labels: this.labels })
351
+ },
306
352
  history: [],
307
353
  };
308
354
 
@@ -414,7 +460,10 @@ async function seedWithExamples(examples) {
414
460
  this.chat = await this.genAIClient.chats.create({
415
461
  model: this.modelName,
416
462
  // @ts-ignore
417
- config: this.chatConfig,
463
+ config: {
464
+ ...this.chatConfig,
465
+ ...(Object.keys(this.labels).length > 0 && { labels: this.labels })
466
+ },
418
467
  history: [...currentHistory, ...historyToAdd],
419
468
  });
420
469
 
@@ -435,9 +484,10 @@ async function seedWithExamples(examples) {
435
484
  * No validation or retry logic.
436
485
  * @this {ExportedAPI}
437
486
  * @param {Object|string} sourcePayload - The source payload.
487
+ * @param {Object} [messageOptions] - Optional per-message options (e.g., labels).
438
488
  * @returns {Promise<Object>} - The transformed payload.
439
489
  */
440
- async function rawMessage(sourcePayload) {
490
+ async function rawMessage(sourcePayload, messageOptions = {}) {
441
491
  if (!this.chat) {
442
492
  throw new Error("Chat session not initialized.");
443
493
  }
@@ -446,8 +496,19 @@ async function rawMessage(sourcePayload) {
446
496
  ? sourcePayload
447
497
  : JSON.stringify(sourcePayload, null, 2);
448
498
 
499
+ // Merge instance labels with per-message labels (per-message takes precedence)
500
+ const mergedLabels = { ...this.labels, ...(messageOptions.labels || {}) };
501
+ const hasLabels = Object.keys(mergedLabels).length > 0;
502
+
449
503
  try {
450
- const result = await this.chat.sendMessage({ message: actualPayload });
504
+ const sendParams = { message: actualPayload };
505
+
506
+ // Add config with labels if we have any
507
+ if (hasLabels) {
508
+ sendParams.config = { labels: mergedLabels };
509
+ }
510
+
511
+ const result = await this.chat.sendMessage(sendParams);
451
512
  const modelResponse = result.text;
452
513
  const extractedJSON = extractJSON(modelResponse); // Assuming extractJSON is defined
453
514
 
@@ -541,11 +602,17 @@ async function prepareAndValidateMessage(sourcePayload, options = {}, validatorF
541
602
  throw new Error("Invalid source payload. Must be a JSON object or string.");
542
603
  }
543
604
 
605
+ // Extract per-message labels for passing to rawMessage
606
+ const messageOptions = {};
607
+ if (options.labels) {
608
+ messageOptions.labels = options.labels;
609
+ }
610
+
544
611
  for (let attempt = 0; attempt <= maxRetries; attempt++) {
545
612
  try {
546
613
  // Step 1: Get the transformed payload
547
614
  const transformedPayload = (attempt === 0)
548
- ? await this.rawMessage(lastPayload) // Use the new raw method
615
+ ? await this.rawMessage(lastPayload, messageOptions) // Use the new raw method with per-message options
549
616
  : await this.rebuild(lastPayload, lastError.message);
550
617
 
551
618
  lastPayload = transformedPayload; // Always update lastPayload *before* validation
@@ -669,6 +736,37 @@ async function estimateTokenUsage(nextPayload) {
669
736
  return resp; // includes totalTokens, possibly breakdown
670
737
  }
671
738
 
739
+ // Model pricing per million tokens (as of Dec 2025)
740
+ // https://ai.google.dev/gemini-api/docs/pricing
741
+ const MODEL_PRICING = {
742
+ 'gemini-2.5-flash': { input: 0.15, output: 0.60 },
743
+ 'gemini-2.5-flash-lite': { input: 0.02, output: 0.10 },
744
+ 'gemini-2.5-pro': { input: 2.50, output: 10.00 },
745
+ 'gemini-3-pro': { input: 2.00, output: 12.00 },
746
+ 'gemini-3-pro-preview': { input: 2.00, output: 12.00 },
747
+ 'gemini-2.0-flash': { input: 0.10, output: 0.40 },
748
+ 'gemini-2.0-flash-lite': { input: 0.02, output: 0.10 }
749
+ };
750
+
751
+ /**
752
+ * Estimates the cost of sending a payload based on token count and model pricing.
753
+ * @this {ExportedAPI}
754
+ * @param {object|string} nextPayload - The next user message to be sent (object or string)
755
+ * @returns {Promise<Object>} - Cost estimation including tokens, model, pricing, and estimated input cost
756
+ */
757
+ async function estimateCost(nextPayload) {
758
+ const tokenInfo = await this.estimateTokenUsage(nextPayload);
759
+ const pricing = MODEL_PRICING[this.modelName] || { input: 0, output: 0 };
760
+
761
+ return {
762
+ totalTokens: tokenInfo.totalTokens,
763
+ model: this.modelName,
764
+ pricing: pricing,
765
+ estimatedInputCost: (tokenInfo.totalTokens / 1_000_000) * pricing.input,
766
+ note: 'Cost is for input tokens only; output cost depends on response length'
767
+ };
768
+ }
769
+
672
770
 
673
771
  /**
674
772
  * Resets the current chat session, clearing all history and examples
@@ -683,7 +781,10 @@ async function resetChat() {
683
781
  const chatOptions = {
684
782
  model: this.modelName,
685
783
  // @ts-ignore
686
- config: this.chatConfig,
784
+ config: {
785
+ ...this.chatConfig,
786
+ ...(Object.keys(this.labels).length > 0 && { labels: this.labels })
787
+ },
687
788
  history: [],
688
789
  };
689
790
 
@@ -714,6 +815,24 @@ function getChatHistory() {
714
815
  return this.chat.getHistory();
715
816
  }
716
817
 
818
+ /**
819
+ * Updates system instructions and reinitializes the chat session
820
+ * @this {ExportedAPI}
821
+ * @param {string} newInstructions - The new system instructions
822
+ * @returns {Promise<void>}
823
+ */
824
+ async function updateSystemInstructions(newInstructions) {
825
+ if (!newInstructions || typeof newInstructions !== 'string') {
826
+ throw new Error('System instructions must be a non-empty string');
827
+ }
828
+
829
+ this.systemInstructions = newInstructions.trim();
830
+ this.chatConfig.systemInstruction = this.systemInstructions;
831
+
832
+ log.debug('Updating system instructions and reinitializing chat...');
833
+ await this.init(true); // Force reinitialize with new instructions
834
+ }
835
+
717
836
 
718
837
  /*
719
838
  ----
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "ak-gemini",
3
3
  "author": "ak@mixpanel.com",
4
4
  "description": "AK's Generative AI Helper for doing... transforms",
5
- "version": "1.0.9",
5
+ "version": "1.0.11",
6
6
  "main": "index.js",
7
7
  "files": [
8
8
  "index.js",
package/types.d.ts CHANGED
@@ -26,6 +26,8 @@ export interface ChatConfig {
26
26
  safetySettings?: SafetySetting[]; // Safety settings array
27
27
  responseSchema?: Object; // Schema for validating model responses
28
28
  thinkingConfig?: ThinkingConfig; // Thinking features configuration
29
+ labels?: Record<string, string>; // Labels for billing segmentation
30
+ tools?: any[]; // Tools configuration (e.g., grounding)
29
31
  [key: string]: any; // Additional properties for flexibility
30
32
  }
31
33
 
@@ -48,11 +50,13 @@ export interface AITransformerContext {
48
50
  seed?: () => Promise<void>; // Function to seed the transformer with examples
49
51
  message?: (payload: Record<string, unknown>) => Promise<Record<string, unknown>>; // Function to send messages to the model
50
52
  rebuild?: (lastPayload: Record<string, unknown>, serverError: string) => Promise<Record<string, unknown>>; // Function to rebuild the transformer
51
- rawMessage?: (payload: Record<string, unknown> | string) => Promise<Record<string, unknown>>; // Function to send raw messages to the model
53
+ rawMessage?: (payload: Record<string, unknown> | string, messageOptions?: { labels?: Record<string, string> }) => Promise<Record<string, unknown>>; // Function to send raw messages to the model
52
54
  genAIClient?: GoogleGenAI; // Google GenAI client instance
53
55
  onlyJSON?: boolean; // If true, only JSON responses are allowed
54
56
  enableGrounding?: boolean; // Enable Google Search grounding (default: false, WARNING: costs $35/1k queries)
55
57
  groundingConfig?: Record<string, any>; // Additional grounding configuration options
58
+ labels?: Record<string, string>; // Custom labels for billing segmentation (keys: 1-63 chars lowercase, values: max 63 chars)
59
+ estimateTokenUsage?: (nextPayload: Record<string, unknown> | string) => Promise<{ totalTokens: number; breakdown?: any }>;
56
60
 
57
61
  }
58
62
 
@@ -71,6 +75,17 @@ export interface ExampleFileContent {
71
75
  examples: TransformationExample[];
72
76
  }
73
77
 
78
+ // Google Auth options for Vertex AI authentication
79
+ // See: https://github.com/googleapis/google-auth-library-nodejs/blob/main/src/auth/googleauth.ts
80
+ export interface GoogleAuthOptions {
81
+ keyFilename?: string; // Path to a .json, .pem, or .p12 key file
82
+ keyFile?: string; // Alias for keyFilename
83
+ credentials?: { client_email?: string; private_key?: string; [key: string]: any }; // Object containing client_email and private_key
84
+ scopes?: string | string[]; // Required scopes for the API request
85
+ projectId?: string; // Your project ID (alias for project)
86
+ universeDomain?: string; // The default service domain for a Cloud universe
87
+ }
88
+
74
89
  export interface AITransformerOptions {
75
90
  // ? https://ai.google.dev/gemini-api/docs/models
76
91
  modelName?: string; // The Gemini model to use
@@ -91,12 +106,20 @@ export interface AITransformerOptions {
91
106
  retryDelay?: number; // Initial retry delay in milliseconds
92
107
  // ? https://ai.google.dev/gemini-api/docs/structured-output
93
108
  responseSchema?: Object; // Schema for validating model responses
94
- apiKey?: string; // API key for Google GenAI
109
+ apiKey?: string; // API key for Google GenAI (Gemini API)
95
110
  onlyJSON?: boolean; // If true, only JSON responses are allowed
96
111
  asyncValidator?: AsyncValidatorFunction; // Optional async validator function for response validation
97
112
  logLevel?: 'trace' | 'debug' | 'info' | 'warn' | 'error' | 'fatal' | 'none'; // Log level for the logger (defaults to 'info', 'none' disables logging)
98
113
  enableGrounding?: boolean; // Enable Google Search grounding (default: false, WARNING: costs $35/1k queries)
99
114
  groundingConfig?: Record<string, any>; // Additional grounding configuration options
115
+ labels?: Record<string, string>; // Custom labels for billing segmentation
116
+
117
+ // Vertex AI Authentication Options
118
+ // Use these instead of apiKey for Vertex AI with service account authentication
119
+ vertexai?: boolean; // Set to true to use Vertex AI instead of Gemini API
120
+ project?: string; // Google Cloud project ID (required for Vertex AI)
121
+ location?: string; // Google Cloud location/region (e.g., 'us-central1') - required for Vertex AI
122
+ googleAuthOptions?: GoogleAuthOptions; // Authentication options for Vertex AI (keyFilename, credentials, etc.)
100
123
  }
101
124
 
102
125
  // Async validator function type
@@ -126,12 +149,13 @@ export declare class AITransformer {
126
149
  logLevel: 'trace' | 'debug' | 'info' | 'warn' | 'error' | 'fatal' | 'none';
127
150
  enableGrounding: boolean;
128
151
  groundingConfig: Record<string, any>;
152
+ labels: Record<string, string>;
129
153
 
130
154
  // Methods
131
155
  init(force?: boolean): Promise<void>;
132
156
  seed(examples?: TransformationExample[]): Promise<any>;
133
157
  message(payload: Record<string, unknown>, opts?: object, validatorFn?: AsyncValidatorFunction | null): Promise<Record<string, unknown>>;
134
- rawMessage(sourcePayload: Record<string, unknown> | string): Promise<Record<string, unknown> | any>;
158
+ rawMessage(sourcePayload: Record<string, unknown> | string, messageOptions?: { labels?: Record<string, string> }): Promise<Record<string, unknown> | any>;
135
159
  transformWithValidation(sourcePayload: Record<string, unknown>, validatorFn: AsyncValidatorFunction, options?: object): Promise<Record<string, unknown>>;
136
160
  messageAndValidate(sourcePayload: Record<string, unknown>, validatorFn: AsyncValidatorFunction, options?: object): Promise<Record<string, unknown>>;
137
161
  rebuild(lastPayload: Record<string, unknown>, serverError: string): Promise<Record<string, unknown>>;
@@ -139,8 +163,15 @@ export declare class AITransformer {
139
163
  getHistory(): Array<any>;
140
164
  estimateTokenUsage(nextPayload: Record<string, unknown> | string): Promise<{ totalTokens: number; breakdown?: any }>;
141
165
  estimate(nextPayload: Record<string, unknown> | string): Promise<{ totalTokens: number; breakdown?: any }>;
166
+ updateSystemInstructions(newInstructions: string): Promise<void>;
167
+ estimateCost(nextPayload: Record<string, unknown> | string): Promise<{
168
+ totalTokens: number;
169
+ model: string;
170
+ pricing: { input: number; output: number };
171
+ estimatedInputCost: number;
172
+ note: string;
173
+ }>;
142
174
  }
143
175
 
144
176
  // Default export
145
- declare const _default: typeof AITransformer;
146
- export default _default;
177
+ export default AITransformer;