ak-gemini 1.0.10 → 1.0.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs +79 -9
- package/index.js +110 -10
- package/package.json +1 -1
- package/types.d.ts +34 -3
package/index.cjs
CHANGED
|
@@ -135,6 +135,7 @@ var AITransformer = class {
|
|
|
135
135
|
this.estimate = estimateTokenUsage.bind(this);
|
|
136
136
|
this.estimateTokenUsage = estimateTokenUsage.bind(this);
|
|
137
137
|
this.updateSystemInstructions = updateSystemInstructions.bind(this);
|
|
138
|
+
this.estimateCost = estimateCost.bind(this);
|
|
138
139
|
}
|
|
139
140
|
};
|
|
140
141
|
var index_default = AITransformer;
|
|
@@ -164,8 +165,17 @@ function AITransformFactory(options = {}) {
|
|
|
164
165
|
this.logLevel = "info";
|
|
165
166
|
logger_default.level = "info";
|
|
166
167
|
}
|
|
168
|
+
this.vertexai = options.vertexai || false;
|
|
169
|
+
this.project = options.project || process.env.GOOGLE_CLOUD_PROJECT || null;
|
|
170
|
+
this.location = options.location || process.env.GOOGLE_CLOUD_LOCATION || "us-central1";
|
|
171
|
+
this.googleAuthOptions = options.googleAuthOptions || null;
|
|
167
172
|
this.apiKey = options.apiKey !== void 0 && options.apiKey !== null ? options.apiKey : GEMINI_API_KEY;
|
|
168
|
-
if (!this.
|
|
173
|
+
if (!this.vertexai && !this.apiKey) {
|
|
174
|
+
throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var. For Vertex AI, set vertexai: true with project and location.");
|
|
175
|
+
}
|
|
176
|
+
if (this.vertexai && !this.project) {
|
|
177
|
+
throw new Error("Vertex AI requires a project ID. Provide via options.project or GOOGLE_CLOUD_PROJECT env var.");
|
|
178
|
+
}
|
|
169
179
|
this.chatConfig = {
|
|
170
180
|
...DEFAULT_CHAT_CONFIG,
|
|
171
181
|
...options.chatConfig,
|
|
@@ -226,6 +236,10 @@ function AITransformFactory(options = {}) {
|
|
|
226
236
|
this.onlyJSON = options.onlyJSON !== void 0 ? options.onlyJSON : true;
|
|
227
237
|
this.enableGrounding = options.enableGrounding || false;
|
|
228
238
|
this.groundingConfig = options.groundingConfig || {};
|
|
239
|
+
this.labels = options.labels || {};
|
|
240
|
+
if (Object.keys(this.labels).length > 0 && logger_default.level !== "silent") {
|
|
241
|
+
logger_default.debug(`Billing labels configured: ${JSON.stringify(this.labels)}`);
|
|
242
|
+
}
|
|
229
243
|
if (this.promptKey === this.answerKey) {
|
|
230
244
|
throw new Error("Source and target keys cannot be the same. Please provide distinct keys.");
|
|
231
245
|
}
|
|
@@ -233,10 +247,27 @@ function AITransformFactory(options = {}) {
|
|
|
233
247
|
logger_default.debug(`Creating AI Transformer with model: ${this.modelName}`);
|
|
234
248
|
logger_default.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
|
|
235
249
|
logger_default.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
|
|
236
|
-
|
|
250
|
+
if (this.vertexai) {
|
|
251
|
+
logger_default.debug(`Using Vertex AI - Project: ${this.project}, Location: ${this.location}`);
|
|
252
|
+
if (this.googleAuthOptions?.keyFilename) {
|
|
253
|
+
logger_default.debug(`Auth: Service account key file: ${this.googleAuthOptions.keyFilename}`);
|
|
254
|
+
} else if (this.googleAuthOptions?.credentials) {
|
|
255
|
+
logger_default.debug(`Auth: Inline credentials provided`);
|
|
256
|
+
} else {
|
|
257
|
+
logger_default.debug(`Auth: Application Default Credentials (ADC)`);
|
|
258
|
+
}
|
|
259
|
+
} else {
|
|
260
|
+
logger_default.debug(`Using Gemini API with key: ${this.apiKey.substring(0, 10)}...`);
|
|
261
|
+
}
|
|
237
262
|
logger_default.debug(`Grounding ${this.enableGrounding ? "ENABLED" : "DISABLED"} (costs $35/1k queries)`);
|
|
238
263
|
}
|
|
239
|
-
const
|
|
264
|
+
const clientOptions = this.vertexai ? {
|
|
265
|
+
vertexai: true,
|
|
266
|
+
project: this.project,
|
|
267
|
+
location: this.location,
|
|
268
|
+
...this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions }
|
|
269
|
+
} : { apiKey: this.apiKey };
|
|
270
|
+
const ai = new import_genai.GoogleGenAI(clientOptions);
|
|
240
271
|
this.genAIClient = ai;
|
|
241
272
|
this.chat = null;
|
|
242
273
|
}
|
|
@@ -246,7 +277,10 @@ async function initChat(force = false) {
|
|
|
246
277
|
const chatOptions = {
|
|
247
278
|
model: this.modelName,
|
|
248
279
|
// @ts-ignore
|
|
249
|
-
config:
|
|
280
|
+
config: {
|
|
281
|
+
...this.chatConfig,
|
|
282
|
+
...Object.keys(this.labels).length > 0 && { labels: this.labels }
|
|
283
|
+
},
|
|
250
284
|
history: []
|
|
251
285
|
};
|
|
252
286
|
if (this.enableGrounding) {
|
|
@@ -326,20 +360,29 @@ ${contextText}
|
|
|
326
360
|
this.chat = await this.genAIClient.chats.create({
|
|
327
361
|
model: this.modelName,
|
|
328
362
|
// @ts-ignore
|
|
329
|
-
config:
|
|
363
|
+
config: {
|
|
364
|
+
...this.chatConfig,
|
|
365
|
+
...Object.keys(this.labels).length > 0 && { labels: this.labels }
|
|
366
|
+
},
|
|
330
367
|
history: [...currentHistory, ...historyToAdd]
|
|
331
368
|
});
|
|
332
369
|
const newHistory = this.chat.getHistory();
|
|
333
370
|
logger_default.debug(`Created new chat session with ${newHistory.length} examples.`);
|
|
334
371
|
return newHistory;
|
|
335
372
|
}
|
|
336
|
-
async function rawMessage(sourcePayload) {
|
|
373
|
+
async function rawMessage(sourcePayload, messageOptions = {}) {
|
|
337
374
|
if (!this.chat) {
|
|
338
375
|
throw new Error("Chat session not initialized.");
|
|
339
376
|
}
|
|
340
377
|
const actualPayload = typeof sourcePayload === "string" ? sourcePayload : JSON.stringify(sourcePayload, null, 2);
|
|
378
|
+
const mergedLabels = { ...this.labels, ...messageOptions.labels || {} };
|
|
379
|
+
const hasLabels = Object.keys(mergedLabels).length > 0;
|
|
341
380
|
try {
|
|
342
|
-
const
|
|
381
|
+
const sendParams = { message: actualPayload };
|
|
382
|
+
if (hasLabels) {
|
|
383
|
+
sendParams.config = { labels: mergedLabels };
|
|
384
|
+
}
|
|
385
|
+
const result = await this.chat.sendMessage(sendParams);
|
|
343
386
|
const modelResponse = result.text;
|
|
344
387
|
const extractedJSON = extractJSON(modelResponse);
|
|
345
388
|
if (extractedJSON?.data) {
|
|
@@ -398,9 +441,13 @@ async function prepareAndValidateMessage(sourcePayload, options = {}, validatorF
|
|
|
398
441
|
} else {
|
|
399
442
|
throw new Error("Invalid source payload. Must be a JSON object or string.");
|
|
400
443
|
}
|
|
444
|
+
const messageOptions = {};
|
|
445
|
+
if (options.labels) {
|
|
446
|
+
messageOptions.labels = options.labels;
|
|
447
|
+
}
|
|
401
448
|
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
402
449
|
try {
|
|
403
|
-
const transformedPayload = attempt === 0 ? await this.rawMessage(lastPayload) : await this.rebuild(lastPayload, lastError.message);
|
|
450
|
+
const transformedPayload = attempt === 0 ? await this.rawMessage(lastPayload, messageOptions) : await this.rebuild(lastPayload, lastError.message);
|
|
404
451
|
lastPayload = transformedPayload;
|
|
405
452
|
if (validatorFn) {
|
|
406
453
|
await validatorFn(transformedPayload);
|
|
@@ -473,13 +520,36 @@ async function estimateTokenUsage(nextPayload) {
|
|
|
473
520
|
});
|
|
474
521
|
return resp;
|
|
475
522
|
}
|
|
523
|
+
var MODEL_PRICING = {
|
|
524
|
+
"gemini-2.5-flash": { input: 0.15, output: 0.6 },
|
|
525
|
+
"gemini-2.5-flash-lite": { input: 0.02, output: 0.1 },
|
|
526
|
+
"gemini-2.5-pro": { input: 2.5, output: 10 },
|
|
527
|
+
"gemini-3-pro": { input: 2, output: 12 },
|
|
528
|
+
"gemini-3-pro-preview": { input: 2, output: 12 },
|
|
529
|
+
"gemini-2.0-flash": { input: 0.1, output: 0.4 },
|
|
530
|
+
"gemini-2.0-flash-lite": { input: 0.02, output: 0.1 }
|
|
531
|
+
};
|
|
532
|
+
async function estimateCost(nextPayload) {
|
|
533
|
+
const tokenInfo = await this.estimateTokenUsage(nextPayload);
|
|
534
|
+
const pricing = MODEL_PRICING[this.modelName] || { input: 0, output: 0 };
|
|
535
|
+
return {
|
|
536
|
+
totalTokens: tokenInfo.totalTokens,
|
|
537
|
+
model: this.modelName,
|
|
538
|
+
pricing,
|
|
539
|
+
estimatedInputCost: tokenInfo.totalTokens / 1e6 * pricing.input,
|
|
540
|
+
note: "Cost is for input tokens only; output cost depends on response length"
|
|
541
|
+
};
|
|
542
|
+
}
|
|
476
543
|
async function resetChat() {
|
|
477
544
|
if (this.chat) {
|
|
478
545
|
logger_default.debug("Resetting Gemini chat session...");
|
|
479
546
|
const chatOptions = {
|
|
480
547
|
model: this.modelName,
|
|
481
548
|
// @ts-ignore
|
|
482
|
-
config:
|
|
549
|
+
config: {
|
|
550
|
+
...this.chatConfig,
|
|
551
|
+
...Object.keys(this.labels).length > 0 && { labels: this.labels }
|
|
552
|
+
},
|
|
483
553
|
history: []
|
|
484
554
|
};
|
|
485
555
|
if (this.enableGrounding) {
|
package/index.js
CHANGED
|
@@ -135,6 +135,7 @@ class AITransformer {
|
|
|
135
135
|
this.estimate = estimateTokenUsage.bind(this);
|
|
136
136
|
this.estimateTokenUsage = estimateTokenUsage.bind(this);
|
|
137
137
|
this.updateSystemInstructions = updateSystemInstructions.bind(this);
|
|
138
|
+
this.estimateCost = estimateCost.bind(this);
|
|
138
139
|
}
|
|
139
140
|
}
|
|
140
141
|
|
|
@@ -180,8 +181,22 @@ function AITransformFactory(options = {}) {
|
|
|
180
181
|
log.level = 'info';
|
|
181
182
|
}
|
|
182
183
|
|
|
184
|
+
// Vertex AI configuration
|
|
185
|
+
this.vertexai = options.vertexai || false;
|
|
186
|
+
this.project = options.project || process.env.GOOGLE_CLOUD_PROJECT || null;
|
|
187
|
+
this.location = options.location || process.env.GOOGLE_CLOUD_LOCATION || 'us-central1';
|
|
188
|
+
this.googleAuthOptions = options.googleAuthOptions || null;
|
|
189
|
+
|
|
190
|
+
// API Key (for Gemini API, not Vertex AI)
|
|
183
191
|
this.apiKey = options.apiKey !== undefined && options.apiKey !== null ? options.apiKey : GEMINI_API_KEY;
|
|
184
|
-
|
|
192
|
+
|
|
193
|
+
// Validate authentication - need either API key (for Gemini API) or Vertex AI config
|
|
194
|
+
if (!this.vertexai && !this.apiKey) {
|
|
195
|
+
throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var. For Vertex AI, set vertexai: true with project and location.");
|
|
196
|
+
}
|
|
197
|
+
if (this.vertexai && !this.project) {
|
|
198
|
+
throw new Error("Vertex AI requires a project ID. Provide via options.project or GOOGLE_CLOUD_PROJECT env var.");
|
|
199
|
+
}
|
|
185
200
|
|
|
186
201
|
// Build chat config, making sure systemInstruction uses the custom instructions
|
|
187
202
|
this.chatConfig = {
|
|
@@ -270,6 +285,12 @@ function AITransformFactory(options = {}) {
|
|
|
270
285
|
this.enableGrounding = options.enableGrounding || false;
|
|
271
286
|
this.groundingConfig = options.groundingConfig || {};
|
|
272
287
|
|
|
288
|
+
// Billing labels for cost segmentation
|
|
289
|
+
this.labels = options.labels || {};
|
|
290
|
+
if (Object.keys(this.labels).length > 0 && log.level !== 'silent') {
|
|
291
|
+
log.debug(`Billing labels configured: ${JSON.stringify(this.labels)}`);
|
|
292
|
+
}
|
|
293
|
+
|
|
273
294
|
if (this.promptKey === this.answerKey) {
|
|
274
295
|
throw new Error("Source and target keys cannot be the same. Please provide distinct keys.");
|
|
275
296
|
}
|
|
@@ -278,12 +299,33 @@ function AITransformFactory(options = {}) {
|
|
|
278
299
|
log.debug(`Creating AI Transformer with model: ${this.modelName}`);
|
|
279
300
|
log.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
|
|
280
301
|
log.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
|
|
281
|
-
// Log
|
|
282
|
-
|
|
302
|
+
// Log authentication method
|
|
303
|
+
if (this.vertexai) {
|
|
304
|
+
log.debug(`Using Vertex AI - Project: ${this.project}, Location: ${this.location}`);
|
|
305
|
+
if (this.googleAuthOptions?.keyFilename) {
|
|
306
|
+
log.debug(`Auth: Service account key file: ${this.googleAuthOptions.keyFilename}`);
|
|
307
|
+
} else if (this.googleAuthOptions?.credentials) {
|
|
308
|
+
log.debug(`Auth: Inline credentials provided`);
|
|
309
|
+
} else {
|
|
310
|
+
log.debug(`Auth: Application Default Credentials (ADC)`);
|
|
311
|
+
}
|
|
312
|
+
} else {
|
|
313
|
+
log.debug(`Using Gemini API with key: ${this.apiKey.substring(0, 10)}...`);
|
|
314
|
+
}
|
|
283
315
|
log.debug(`Grounding ${this.enableGrounding ? 'ENABLED' : 'DISABLED'} (costs $35/1k queries)`);
|
|
284
316
|
}
|
|
285
317
|
|
|
286
|
-
|
|
318
|
+
// Initialize Google GenAI client with appropriate configuration
|
|
319
|
+
const clientOptions = this.vertexai
|
|
320
|
+
? {
|
|
321
|
+
vertexai: true,
|
|
322
|
+
project: this.project,
|
|
323
|
+
location: this.location,
|
|
324
|
+
...(this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions })
|
|
325
|
+
}
|
|
326
|
+
: { apiKey: this.apiKey };
|
|
327
|
+
|
|
328
|
+
const ai = new GoogleGenAI(clientOptions);
|
|
287
329
|
this.genAIClient = ai;
|
|
288
330
|
this.chat = null;
|
|
289
331
|
}
|
|
@@ -303,7 +345,10 @@ async function initChat(force = false) {
|
|
|
303
345
|
const chatOptions = {
|
|
304
346
|
model: this.modelName,
|
|
305
347
|
// @ts-ignore
|
|
306
|
-
config:
|
|
348
|
+
config: {
|
|
349
|
+
...this.chatConfig,
|
|
350
|
+
...(Object.keys(this.labels).length > 0 && { labels: this.labels })
|
|
351
|
+
},
|
|
307
352
|
history: [],
|
|
308
353
|
};
|
|
309
354
|
|
|
@@ -415,7 +460,10 @@ async function seedWithExamples(examples) {
|
|
|
415
460
|
this.chat = await this.genAIClient.chats.create({
|
|
416
461
|
model: this.modelName,
|
|
417
462
|
// @ts-ignore
|
|
418
|
-
config:
|
|
463
|
+
config: {
|
|
464
|
+
...this.chatConfig,
|
|
465
|
+
...(Object.keys(this.labels).length > 0 && { labels: this.labels })
|
|
466
|
+
},
|
|
419
467
|
history: [...currentHistory, ...historyToAdd],
|
|
420
468
|
});
|
|
421
469
|
|
|
@@ -436,9 +484,10 @@ async function seedWithExamples(examples) {
|
|
|
436
484
|
* No validation or retry logic.
|
|
437
485
|
* @this {ExportedAPI}
|
|
438
486
|
* @param {Object|string} sourcePayload - The source payload.
|
|
487
|
+
* @param {Object} [messageOptions] - Optional per-message options (e.g., labels).
|
|
439
488
|
* @returns {Promise<Object>} - The transformed payload.
|
|
440
489
|
*/
|
|
441
|
-
async function rawMessage(sourcePayload) {
|
|
490
|
+
async function rawMessage(sourcePayload, messageOptions = {}) {
|
|
442
491
|
if (!this.chat) {
|
|
443
492
|
throw new Error("Chat session not initialized.");
|
|
444
493
|
}
|
|
@@ -447,8 +496,19 @@ async function rawMessage(sourcePayload) {
|
|
|
447
496
|
? sourcePayload
|
|
448
497
|
: JSON.stringify(sourcePayload, null, 2);
|
|
449
498
|
|
|
499
|
+
// Merge instance labels with per-message labels (per-message takes precedence)
|
|
500
|
+
const mergedLabels = { ...this.labels, ...(messageOptions.labels || {}) };
|
|
501
|
+
const hasLabels = Object.keys(mergedLabels).length > 0;
|
|
502
|
+
|
|
450
503
|
try {
|
|
451
|
-
const
|
|
504
|
+
const sendParams = { message: actualPayload };
|
|
505
|
+
|
|
506
|
+
// Add config with labels if we have any
|
|
507
|
+
if (hasLabels) {
|
|
508
|
+
sendParams.config = { labels: mergedLabels };
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
const result = await this.chat.sendMessage(sendParams);
|
|
452
512
|
const modelResponse = result.text;
|
|
453
513
|
const extractedJSON = extractJSON(modelResponse); // Assuming extractJSON is defined
|
|
454
514
|
|
|
@@ -542,11 +602,17 @@ async function prepareAndValidateMessage(sourcePayload, options = {}, validatorF
|
|
|
542
602
|
throw new Error("Invalid source payload. Must be a JSON object or string.");
|
|
543
603
|
}
|
|
544
604
|
|
|
605
|
+
// Extract per-message labels for passing to rawMessage
|
|
606
|
+
const messageOptions = {};
|
|
607
|
+
if (options.labels) {
|
|
608
|
+
messageOptions.labels = options.labels;
|
|
609
|
+
}
|
|
610
|
+
|
|
545
611
|
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
546
612
|
try {
|
|
547
613
|
// Step 1: Get the transformed payload
|
|
548
614
|
const transformedPayload = (attempt === 0)
|
|
549
|
-
? await this.rawMessage(lastPayload) // Use the new raw method
|
|
615
|
+
? await this.rawMessage(lastPayload, messageOptions) // Use the new raw method with per-message options
|
|
550
616
|
: await this.rebuild(lastPayload, lastError.message);
|
|
551
617
|
|
|
552
618
|
lastPayload = transformedPayload; // Always update lastPayload *before* validation
|
|
@@ -670,6 +736,37 @@ async function estimateTokenUsage(nextPayload) {
|
|
|
670
736
|
return resp; // includes totalTokens, possibly breakdown
|
|
671
737
|
}
|
|
672
738
|
|
|
739
|
+
// Model pricing per million tokens (as of Dec 2025)
|
|
740
|
+
// https://ai.google.dev/gemini-api/docs/pricing
|
|
741
|
+
const MODEL_PRICING = {
|
|
742
|
+
'gemini-2.5-flash': { input: 0.15, output: 0.60 },
|
|
743
|
+
'gemini-2.5-flash-lite': { input: 0.02, output: 0.10 },
|
|
744
|
+
'gemini-2.5-pro': { input: 2.50, output: 10.00 },
|
|
745
|
+
'gemini-3-pro': { input: 2.00, output: 12.00 },
|
|
746
|
+
'gemini-3-pro-preview': { input: 2.00, output: 12.00 },
|
|
747
|
+
'gemini-2.0-flash': { input: 0.10, output: 0.40 },
|
|
748
|
+
'gemini-2.0-flash-lite': { input: 0.02, output: 0.10 }
|
|
749
|
+
};
|
|
750
|
+
|
|
751
|
+
/**
|
|
752
|
+
* Estimates the cost of sending a payload based on token count and model pricing.
|
|
753
|
+
* @this {ExportedAPI}
|
|
754
|
+
* @param {object|string} nextPayload - The next user message to be sent (object or string)
|
|
755
|
+
* @returns {Promise<Object>} - Cost estimation including tokens, model, pricing, and estimated input cost
|
|
756
|
+
*/
|
|
757
|
+
async function estimateCost(nextPayload) {
|
|
758
|
+
const tokenInfo = await this.estimateTokenUsage(nextPayload);
|
|
759
|
+
const pricing = MODEL_PRICING[this.modelName] || { input: 0, output: 0 };
|
|
760
|
+
|
|
761
|
+
return {
|
|
762
|
+
totalTokens: tokenInfo.totalTokens,
|
|
763
|
+
model: this.modelName,
|
|
764
|
+
pricing: pricing,
|
|
765
|
+
estimatedInputCost: (tokenInfo.totalTokens / 1_000_000) * pricing.input,
|
|
766
|
+
note: 'Cost is for input tokens only; output cost depends on response length'
|
|
767
|
+
};
|
|
768
|
+
}
|
|
769
|
+
|
|
673
770
|
|
|
674
771
|
/**
|
|
675
772
|
* Resets the current chat session, clearing all history and examples
|
|
@@ -684,7 +781,10 @@ async function resetChat() {
|
|
|
684
781
|
const chatOptions = {
|
|
685
782
|
model: this.modelName,
|
|
686
783
|
// @ts-ignore
|
|
687
|
-
config:
|
|
784
|
+
config: {
|
|
785
|
+
...this.chatConfig,
|
|
786
|
+
...(Object.keys(this.labels).length > 0 && { labels: this.labels })
|
|
787
|
+
},
|
|
688
788
|
history: [],
|
|
689
789
|
};
|
|
690
790
|
|
package/package.json
CHANGED
package/types.d.ts
CHANGED
|
@@ -26,6 +26,8 @@ export interface ChatConfig {
|
|
|
26
26
|
safetySettings?: SafetySetting[]; // Safety settings array
|
|
27
27
|
responseSchema?: Object; // Schema for validating model responses
|
|
28
28
|
thinkingConfig?: ThinkingConfig; // Thinking features configuration
|
|
29
|
+
labels?: Record<string, string>; // Labels for billing segmentation
|
|
30
|
+
tools?: any[]; // Tools configuration (e.g., grounding)
|
|
29
31
|
[key: string]: any; // Additional properties for flexibility
|
|
30
32
|
}
|
|
31
33
|
|
|
@@ -48,11 +50,13 @@ export interface AITransformerContext {
|
|
|
48
50
|
seed?: () => Promise<void>; // Function to seed the transformer with examples
|
|
49
51
|
message?: (payload: Record<string, unknown>) => Promise<Record<string, unknown>>; // Function to send messages to the model
|
|
50
52
|
rebuild?: (lastPayload: Record<string, unknown>, serverError: string) => Promise<Record<string, unknown>>; // Function to rebuild the transformer
|
|
51
|
-
rawMessage?: (payload: Record<string, unknown> | string) => Promise<Record<string, unknown>>; // Function to send raw messages to the model
|
|
53
|
+
rawMessage?: (payload: Record<string, unknown> | string, messageOptions?: { labels?: Record<string, string> }) => Promise<Record<string, unknown>>; // Function to send raw messages to the model
|
|
52
54
|
genAIClient?: GoogleGenAI; // Google GenAI client instance
|
|
53
55
|
onlyJSON?: boolean; // If true, only JSON responses are allowed
|
|
54
56
|
enableGrounding?: boolean; // Enable Google Search grounding (default: false, WARNING: costs $35/1k queries)
|
|
55
57
|
groundingConfig?: Record<string, any>; // Additional grounding configuration options
|
|
58
|
+
labels?: Record<string, string>; // Custom labels for billing segmentation (keys: 1-63 chars lowercase, values: max 63 chars)
|
|
59
|
+
estimateTokenUsage?: (nextPayload: Record<string, unknown> | string) => Promise<{ totalTokens: number; breakdown?: any }>;
|
|
56
60
|
|
|
57
61
|
}
|
|
58
62
|
|
|
@@ -71,6 +75,17 @@ export interface ExampleFileContent {
|
|
|
71
75
|
examples: TransformationExample[];
|
|
72
76
|
}
|
|
73
77
|
|
|
78
|
+
// Google Auth options for Vertex AI authentication
|
|
79
|
+
// See: https://github.com/googleapis/google-auth-library-nodejs/blob/main/src/auth/googleauth.ts
|
|
80
|
+
export interface GoogleAuthOptions {
|
|
81
|
+
keyFilename?: string; // Path to a .json, .pem, or .p12 key file
|
|
82
|
+
keyFile?: string; // Alias for keyFilename
|
|
83
|
+
credentials?: { client_email?: string; private_key?: string; [key: string]: any }; // Object containing client_email and private_key
|
|
84
|
+
scopes?: string | string[]; // Required scopes for the API request
|
|
85
|
+
projectId?: string; // Your project ID (alias for project)
|
|
86
|
+
universeDomain?: string; // The default service domain for a Cloud universe
|
|
87
|
+
}
|
|
88
|
+
|
|
74
89
|
export interface AITransformerOptions {
|
|
75
90
|
// ? https://ai.google.dev/gemini-api/docs/models
|
|
76
91
|
modelName?: string; // The Gemini model to use
|
|
@@ -91,12 +106,20 @@ export interface AITransformerOptions {
|
|
|
91
106
|
retryDelay?: number; // Initial retry delay in milliseconds
|
|
92
107
|
// ? https://ai.google.dev/gemini-api/docs/structured-output
|
|
93
108
|
responseSchema?: Object; // Schema for validating model responses
|
|
94
|
-
apiKey?: string; // API key for Google GenAI
|
|
109
|
+
apiKey?: string; // API key for Google GenAI (Gemini API)
|
|
95
110
|
onlyJSON?: boolean; // If true, only JSON responses are allowed
|
|
96
111
|
asyncValidator?: AsyncValidatorFunction; // Optional async validator function for response validation
|
|
97
112
|
logLevel?: 'trace' | 'debug' | 'info' | 'warn' | 'error' | 'fatal' | 'none'; // Log level for the logger (defaults to 'info', 'none' disables logging)
|
|
98
113
|
enableGrounding?: boolean; // Enable Google Search grounding (default: false, WARNING: costs $35/1k queries)
|
|
99
114
|
groundingConfig?: Record<string, any>; // Additional grounding configuration options
|
|
115
|
+
labels?: Record<string, string>; // Custom labels for billing segmentation
|
|
116
|
+
|
|
117
|
+
// Vertex AI Authentication Options
|
|
118
|
+
// Use these instead of apiKey for Vertex AI with service account authentication
|
|
119
|
+
vertexai?: boolean; // Set to true to use Vertex AI instead of Gemini API
|
|
120
|
+
project?: string; // Google Cloud project ID (required for Vertex AI)
|
|
121
|
+
location?: string; // Google Cloud location/region (e.g., 'us-central1') - required for Vertex AI
|
|
122
|
+
googleAuthOptions?: GoogleAuthOptions; // Authentication options for Vertex AI (keyFilename, credentials, etc.)
|
|
100
123
|
}
|
|
101
124
|
|
|
102
125
|
// Async validator function type
|
|
@@ -126,12 +149,13 @@ export declare class AITransformer {
|
|
|
126
149
|
logLevel: 'trace' | 'debug' | 'info' | 'warn' | 'error' | 'fatal' | 'none';
|
|
127
150
|
enableGrounding: boolean;
|
|
128
151
|
groundingConfig: Record<string, any>;
|
|
152
|
+
labels: Record<string, string>;
|
|
129
153
|
|
|
130
154
|
// Methods
|
|
131
155
|
init(force?: boolean): Promise<void>;
|
|
132
156
|
seed(examples?: TransformationExample[]): Promise<any>;
|
|
133
157
|
message(payload: Record<string, unknown>, opts?: object, validatorFn?: AsyncValidatorFunction | null): Promise<Record<string, unknown>>;
|
|
134
|
-
rawMessage(sourcePayload: Record<string, unknown> | string): Promise<Record<string, unknown> | any>;
|
|
158
|
+
rawMessage(sourcePayload: Record<string, unknown> | string, messageOptions?: { labels?: Record<string, string> }): Promise<Record<string, unknown> | any>;
|
|
135
159
|
transformWithValidation(sourcePayload: Record<string, unknown>, validatorFn: AsyncValidatorFunction, options?: object): Promise<Record<string, unknown>>;
|
|
136
160
|
messageAndValidate(sourcePayload: Record<string, unknown>, validatorFn: AsyncValidatorFunction, options?: object): Promise<Record<string, unknown>>;
|
|
137
161
|
rebuild(lastPayload: Record<string, unknown>, serverError: string): Promise<Record<string, unknown>>;
|
|
@@ -140,6 +164,13 @@ export declare class AITransformer {
|
|
|
140
164
|
estimateTokenUsage(nextPayload: Record<string, unknown> | string): Promise<{ totalTokens: number; breakdown?: any }>;
|
|
141
165
|
estimate(nextPayload: Record<string, unknown> | string): Promise<{ totalTokens: number; breakdown?: any }>;
|
|
142
166
|
updateSystemInstructions(newInstructions: string): Promise<void>;
|
|
167
|
+
estimateCost(nextPayload: Record<string, unknown> | string): Promise<{
|
|
168
|
+
totalTokens: number;
|
|
169
|
+
model: string;
|
|
170
|
+
pricing: { input: number; output: number };
|
|
171
|
+
estimatedInputCost: number;
|
|
172
|
+
note: string;
|
|
173
|
+
}>;
|
|
143
174
|
}
|
|
144
175
|
|
|
145
176
|
// Default export
|