ak-gemini 1.2.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -1,1504 +1,41 @@
1
1
  /**
2
- * @fileoverview
3
- * Generic AI transformation module that can be configured for different use cases.
4
- * Supports various models, system instructions, chat configurations, and example datasets.
5
- */
6
-
7
- /**
8
- * @typedef {import('./types').SafetySetting} SafetySetting
9
- * @typedef {import('./types').ChatConfig} ChatConfig
10
- * @typedef {import('./types').TransformationExample} TransformationExample
11
- * @typedef {import('./types').ExampleFileContent} ExampleFileContent
12
- * @typedef {import('./types').AITransformerOptions} AITransformerOptions
13
- * @typedef {import('./types').AsyncValidatorFunction} AsyncValidatorFunction
14
- * @typedef {import('./types').AITransformerContext} ExportedAPI
15
- *
16
- */
17
-
18
- //env
19
- import dotenv from 'dotenv';
20
- dotenv.config();
21
- const { NODE_ENV = "unknown", GEMINI_API_KEY, LOG_LEVEL = "" } = process.env;
22
-
23
-
24
-
25
- //deps
26
- import { GoogleGenAI, HarmCategory, HarmBlockThreshold, ThinkingLevel } from '@google/genai';
27
- import u from 'ak-tools';
28
- import path from 'path';
29
- import log from './logger.js';
30
- export { log };
31
- export { ThinkingLevel, HarmCategory, HarmBlockThreshold };
32
-
33
-
34
-
35
- // defaults
36
- const DEFAULT_SAFETY_SETTINGS = [
37
- { category: HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: HarmBlockThreshold.BLOCK_NONE },
38
- { category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_NONE }
39
- ];
40
-
41
- const DEFAULT_SYSTEM_INSTRUCTIONS = `
42
- You are an expert JSON transformation engine. Your task is to accurately convert data payloads from one format to another.
43
-
44
- You will be provided with example transformations (Source JSON -> Target JSON).
45
-
46
- Learn the mapping rules from these examples.
47
-
48
- When presented with new Source JSON, apply the learned transformation rules to produce a new Target JSON payload.
49
-
50
- Always respond ONLY with a valid JSON object that strictly adheres to the expected output format.
51
-
52
- Do not include any additional text, explanations, or formatting before or after the JSON object.
53
- `;
54
-
55
- const DEFAULT_THINKING_CONFIG = {
56
- thinkingBudget: 0
57
- };
58
-
59
- const DEFAULT_MAX_OUTPUT_TOKENS = 50_000; // Default ceiling for output tokens
60
-
61
- // Models that support thinking features (as of Dec 2024)
62
- // Using regex patterns for more precise matching
63
- const THINKING_SUPPORTED_MODELS = [
64
- /^gemini-3-flash(-preview)?$/,
65
- /^gemini-3-pro(-preview|-image-preview)?$/,
66
- /^gemini-2\.5-pro/,
67
- /^gemini-2\.5-flash(-preview)?$/,
68
- /^gemini-2\.5-flash-lite(-preview)?$/,
69
- /^gemini-2\.0-flash$/ // Experimental support, exact match only
70
- ];
71
-
72
- const DEFAULT_CHAT_CONFIG = {
73
- responseMimeType: 'application/json',
74
- temperature: 0.2,
75
- topP: 0.95,
76
- topK: 64,
77
- systemInstruction: DEFAULT_SYSTEM_INSTRUCTIONS,
78
- safetySettings: DEFAULT_SAFETY_SETTINGS
79
- };
80
-
81
- /**
82
- * @typedef {import('./types').AITransformer} AITransformerUtility
83
- */
84
-
85
-
86
-
87
- /**
88
- * main export class for AI Transformer
89
- * @class AITransformer
90
- * @type {AITransformerUtility}
91
- * @description A class that provides methods to initialize, seed, transform, and manage AI-based transformations using Google Gemini API.
92
- * @implements {ExportedAPI}
93
- */
94
- class AITransformer {
95
- /**
96
- * @param {AITransformerOptions} [options={}] - Configuration options for the transformer
97
- *
98
- */
99
- constructor(options = {}) {
100
- this.modelName = "";
101
- this.promptKey = "";
102
- this.answerKey = "";
103
- this.contextKey = "";
104
- this.explanationKey = "";
105
- this.systemInstructionKey = "";
106
- this.maxRetries = 3;
107
- this.retryDelay = 1000;
108
- // this.systemInstructions = "";
109
- this.chatConfig = {};
110
- this.apiKey = GEMINI_API_KEY;
111
- this.onlyJSON = true; // always return JSON
112
- this.asyncValidator = null; // for transformWithValidation
113
- this.logLevel = 'info'; // default log level
114
- this.lastResponseMetadata = null; // stores metadata from last API response
115
- this.exampleCount = 0; // tracks number of example history items from seed()
116
- // Cumulative usage tracking across retry attempts
117
- this._cumulativeUsage = {
118
- promptTokens: 0,
119
- responseTokens: 0,
120
- totalTokens: 0,
121
- attempts: 0
122
- };
123
- AITransformFactory.call(this, options);
124
-
125
- //external API
126
- this.init = initChat.bind(this);
127
- this.seed = seedWithExamples.bind(this);
128
-
129
- // Internal "raw" message sender
130
- this.rawMessage = rawMessage.bind(this);
131
-
132
- // The public `.message()` method uses the GLOBAL validator
133
- this.message = (payload, opts = {}, validatorFn = null) => {
134
-
135
- return prepareAndValidateMessage.call(this, payload, opts, validatorFn || this.asyncValidator);
136
- };
137
-
138
- this.rebuild = rebuildPayload.bind(this);
139
- this.reset = resetChat.bind(this);
140
- this.getHistory = getChatHistory.bind(this);
141
- this.messageAndValidate = prepareAndValidateMessage.bind(this);
142
- this.transformWithValidation = prepareAndValidateMessage.bind(this);
143
- this.estimate = estimateInputTokens.bind(this);
144
- this.updateSystemInstructions = updateSystemInstructions.bind(this);
145
- this.estimateCost = estimateCost.bind(this);
146
- this.clearConversation = clearConversation.bind(this);
147
- this.getLastUsage = getLastUsage.bind(this);
148
- }
149
- }
150
-
151
- export default AITransformer;
152
- export { attemptJSONRecovery }; // Export for testing
153
- export { default as AIAgent } from './agent.js';
154
-
155
- /**
156
- * factory function to create an AI Transformer instance
157
- * @param {AITransformerOptions} [options={}] - Configuration options for the transformer
158
- * @returns {void} - An instance of AITransformer with initialized properties and methods
159
- */
160
- function AITransformFactory(options = {}) {
161
- // ? https://ai.google.dev/gemini-api/docs/models
162
- this.modelName = options.modelName || 'gemini-2.5-flash';
163
-
164
- // Only use default if systemInstructions was not provided at all
165
- if (options.systemInstructions === undefined) {
166
- this.systemInstructions = DEFAULT_SYSTEM_INSTRUCTIONS;
167
- } else {
168
- // Use the provided value (could be null, false, or a custom string)
169
- this.systemInstructions = options.systemInstructions;
170
- }
171
-
172
- // Configure log level - priority: options.logLevel > LOG_LEVEL env > NODE_ENV based defaults > 'info'
173
- if (options.logLevel) {
174
- this.logLevel = options.logLevel;
175
- if (this.logLevel === 'none') {
176
- // Set to silent to disable all logging
177
- log.level = 'silent';
178
- } else {
179
- // Set the log level as specified
180
- log.level = this.logLevel;
181
- }
182
- } else if (LOG_LEVEL) {
183
- // Use environment variable if no option specified
184
- this.logLevel = LOG_LEVEL;
185
- log.level = LOG_LEVEL;
186
- } else if (NODE_ENV === 'dev') {
187
- this.logLevel = 'debug';
188
- log.level = 'debug';
189
- } else if (NODE_ENV === 'test') {
190
- this.logLevel = 'warn';
191
- log.level = 'warn';
192
- } else if (NODE_ENV.startsWith('prod')) {
193
- this.logLevel = 'error';
194
- log.level = 'error';
195
- } else {
196
- // Default to info
197
- this.logLevel = 'info';
198
- log.level = 'info';
199
- }
200
-
201
- // Vertex AI configuration
202
- this.vertexai = options.vertexai || false;
203
- this.project = options.project || process.env.GOOGLE_CLOUD_PROJECT || null;
204
- this.location = options.location || process.env.GOOGLE_CLOUD_LOCATION || undefined;
205
- this.googleAuthOptions = options.googleAuthOptions || null;
206
-
207
- // API Key (for Gemini API, not Vertex AI)
208
- this.apiKey = options.apiKey !== undefined && options.apiKey !== null ? options.apiKey : GEMINI_API_KEY;
209
-
210
- // Validate authentication - need either API key (for Gemini API) or Vertex AI config
211
- if (!this.vertexai && !this.apiKey) {
212
- throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var. For Vertex AI, set vertexai: true with project and location.");
213
- }
214
- if (this.vertexai && !this.project) {
215
- throw new Error("Vertex AI requires a project ID. Provide via options.project or GOOGLE_CLOUD_PROJECT env var.");
216
- }
217
-
218
- // Build chat config, making sure systemInstruction uses the custom instructions
219
- this.chatConfig = {
220
- ...DEFAULT_CHAT_CONFIG,
221
- ...options.chatConfig
222
- };
223
-
224
- // Handle systemInstructions: use custom if provided, otherwise keep default from DEFAULT_CHAT_CONFIG
225
- // If explicitly set to null/false, remove it entirely
226
- if (this.systemInstructions) {
227
- this.chatConfig.systemInstruction = this.systemInstructions;
228
- } else if (options.systemInstructions !== undefined) {
229
- // Explicitly set to null/false/empty - remove system instruction
230
- delete this.chatConfig.systemInstruction;
231
- }
232
-
233
- // Handle maxOutputTokens with explicit null check
234
- // Priority: options.maxOutputTokens > options.chatConfig.maxOutputTokens > DEFAULT
235
- // Setting to null explicitly removes the limit
236
- if (options.maxOutputTokens !== undefined) {
237
- if (options.maxOutputTokens === null) {
238
- delete this.chatConfig.maxOutputTokens;
239
- } else {
240
- this.chatConfig.maxOutputTokens = options.maxOutputTokens;
241
- }
242
- } else if (options.chatConfig?.maxOutputTokens !== undefined) {
243
- if (options.chatConfig.maxOutputTokens === null) {
244
- delete this.chatConfig.maxOutputTokens;
245
- } else {
246
- this.chatConfig.maxOutputTokens = options.chatConfig.maxOutputTokens;
247
- }
248
- } else {
249
- this.chatConfig.maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS;
250
- }
251
-
252
- // Only add thinkingConfig if the model supports it
253
- const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some(pattern =>
254
- pattern.test(this.modelName)
255
- );
256
-
257
- // Handle thinkingConfig - null explicitly removes it, undefined means not specified
258
- if (options.thinkingConfig !== undefined) {
259
- if (options.thinkingConfig === null) {
260
- // Explicitly remove thinkingConfig if set to null
261
- delete this.chatConfig.thinkingConfig;
262
- if (log.level !== 'silent') {
263
- log.debug(`thinkingConfig set to null - removed from configuration`);
264
- }
265
- } else if (modelSupportsThinking) {
266
- // Handle thinkingConfig - merge with defaults
267
- const thinkingConfig = {
268
- ...DEFAULT_THINKING_CONFIG,
269
- ...options.thinkingConfig
270
- };
271
-
272
- // Gemini API does not allow both thinkingBudget and thinkingLevel together.
273
- // If user specified thinkingLevel, remove thinkingBudget (user preference wins)
274
- if (options.thinkingConfig?.thinkingLevel !== undefined) {
275
- delete thinkingConfig.thinkingBudget;
276
- }
277
-
278
- this.chatConfig.thinkingConfig = thinkingConfig;
279
-
280
- if (log.level !== 'silent') {
281
- log.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig: ${JSON.stringify(thinkingConfig)}`);
282
- }
283
- } else {
284
- if (log.level !== 'silent') {
285
- log.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
286
- }
287
- }
288
- }
289
-
290
- // response schema is optional, but if provided, it should be a valid JSON schema
291
- if (options.responseSchema) {
292
- this.chatConfig.responseSchema = options.responseSchema;
293
- }
294
-
295
- // examples file is optional, but if provided, it should contain valid PROMPT and ANSWER keys
296
- this.examplesFile = options.examplesFile || null;
297
- this.exampleData = options.exampleData || null; // can be used instead of examplesFile
298
-
299
- // Use configurable keys with fallbacks
300
- this.promptKey = options.promptKey || options.sourceKey || 'PROMPT';
301
- this.answerKey = options.answerKey || options.targetKey || 'ANSWER';
302
- this.contextKey = options.contextKey || 'CONTEXT'; // Optional key for context
303
- this.explanationKey = options.explanationKey || 'EXPLANATION'; // Optional key for explanations
304
- this.systemInstructionsKey = options.systemInstructionsKey || 'SYSTEM'; // Optional key for system instructions
305
-
306
- // Retry configuration
307
- this.maxRetries = options.maxRetries || 3;
308
- this.retryDelay = options.retryDelay || 1000;
309
-
310
- //allow async validation function
311
- this.asyncValidator = options.asyncValidator || null; // Function to validate transformed payloads
312
-
313
- //are we forcing json responses only?
314
- this.onlyJSON = options.onlyJSON !== undefined ? options.onlyJSON : true; // If true, only return JSON responses
315
-
316
- // Grounding configuration (disabled by default to avoid costs)
317
- this.enableGrounding = options.enableGrounding || false;
318
- this.groundingConfig = options.groundingConfig || {};
319
-
320
- // Billing labels for cost segmentation (Vertex AI only)
321
- this.labels = options.labels || {};
322
- if (Object.keys(this.labels).length > 0 && log.level !== 'silent') {
323
- if (!this.vertexai) {
324
- log.warn(`Billing labels are only supported with Vertex AI. Labels will be ignored.`);
325
- } else {
326
- log.debug(`Billing labels configured: ${JSON.stringify(this.labels)}`);
327
- }
328
- }
329
-
330
- if (this.promptKey === this.answerKey) {
331
- throw new Error("Source and target keys cannot be the same. Please provide distinct keys.");
332
- }
333
-
334
- if (log.level !== 'silent') {
335
- log.debug(`Creating AI Transformer with model: ${this.modelName}`);
336
- log.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
337
- log.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
338
- // Log authentication method
339
- if (this.vertexai) {
340
- log.debug(`Using Vertex AI - Project: ${this.project}, Location: ${this.location || 'global (default)'}`);
341
- if (this.googleAuthOptions?.keyFilename) {
342
- log.debug(`Auth: Service account key file: ${this.googleAuthOptions.keyFilename}`);
343
- } else if (this.googleAuthOptions?.credentials) {
344
- log.debug(`Auth: Inline credentials provided`);
345
- } else {
346
- log.debug(`Auth: Application Default Credentials (ADC)`);
347
- }
348
- } else {
349
- log.debug(`Using Gemini API with key: ${this.apiKey.substring(0, 10)}...`);
350
- }
351
- log.debug(`Grounding ${this.enableGrounding ? 'ENABLED' : 'DISABLED'} (costs $35/1k queries)`);
352
- }
353
-
354
- // Initialize Google GenAI client with appropriate configuration
355
- const clientOptions = this.vertexai
356
- ? {
357
- vertexai: true,
358
- project: this.project,
359
- ...(this.location && { location: this.location }),
360
- ...(this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions })
361
- }
362
- : { apiKey: this.apiKey };
363
-
364
- const ai = new GoogleGenAI(clientOptions);
365
- this.genAIClient = ai;
366
- this.chat = null;
367
- }
368
-
369
- /**
370
- * Initializes the chat session with the specified model and configurations.
371
- * @param {boolean} [force=false] - If true, forces reinitialization of the chat session.
372
- * @this {ExportedAPI}
373
- * @returns {Promise<void>}
374
- */
375
- async function initChat(force = false) {
376
- if (this.chat && !force) return;
377
-
378
- log.debug(`Initializing Gemini chat session with model: ${this.modelName}...`);
379
-
380
- // Add grounding tools if enabled
381
- const chatOptions = {
382
- model: this.modelName,
383
- // @ts-ignore
384
- config: {
385
- ...this.chatConfig,
386
- ...(this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels })
387
- },
388
- history: [],
389
- };
390
-
391
- // Only add tools if grounding is explicitly enabled
392
- if (this.enableGrounding) {
393
- chatOptions.config.tools = [{
394
- googleSearch: this.groundingConfig
395
- }];
396
- log.debug(`Search grounding ENABLED for this session (WARNING: costs $35/1k queries)`);
397
- }
398
-
399
- this.chat = await this.genAIClient.chats.create(chatOptions);
400
-
401
- try {
402
- await this.genAIClient.models.list();
403
- log.debug("Gemini API connection successful.");
404
- } catch (e) {
405
- throw new Error(`Gemini chat initialization failed: ${e.message}`);
406
- }
407
-
408
-
409
-
410
- log.debug("Gemini chat session initialized.");
411
- }
412
-
413
- /**
414
- * Seeds the chat session with example transformations.
415
- * @this {ExportedAPI}
416
- * @param {TransformationExample[]} [examples] - An array of transformation examples.
417
- * @this {ExportedAPI}
418
- * @returns {Promise<void>}
419
- */
420
- async function seedWithExamples(examples) {
421
- await this.init();
422
-
423
- if (!examples || !Array.isArray(examples) || examples.length === 0) {
424
- if (this.examplesFile) {
425
- log.debug(`No examples provided, loading from file: ${this.examplesFile}`);
426
- try {
427
- // @ts-ignore
428
- examples = await u.load(path.resolve(this.examplesFile), true);
429
- }
430
- catch (err) {
431
- throw new Error(`Could not load examples from file: ${this.examplesFile}. Please check the file path and format.`);
432
- }
433
- }
434
-
435
- else if (this.exampleData) {
436
- log.debug(`Using example data provided in options.`);
437
- if (Array.isArray(this.exampleData)) {
438
- examples = this.exampleData;
439
- } else {
440
- throw new Error(`Invalid example data provided. Expected an array of examples.`);
441
- }
442
- }
443
-
444
- else {
445
- log.debug("No examples provided and no examples file specified. Skipping seeding.");
446
- return;
447
- }
448
- }
449
-
450
- const instructionExample = examples.find(ex => ex[this.systemInstructionsKey]);
451
- if (instructionExample) {
452
- log.debug(`Found system instructions in examples; reinitializing chat with new instructions.`);
453
- this.systemInstructions = instructionExample[this.systemInstructionsKey];
454
- this.chatConfig.systemInstruction = this.systemInstructions;
455
- await this.init(true); // Reinitialize chat with new system instructions
456
- }
457
-
458
- log.debug(`Seeding chat with ${examples.length} transformation examples...`);
459
- const historyToAdd = [];
460
-
461
- for (const example of examples) {
462
- // Use the configurable keys from constructor
463
- const contextValue = example[this.contextKey] || "";
464
- const promptValue = example[this.promptKey] || "";
465
- const answerValue = example[this.answerKey] || "";
466
- const explanationValue = example[this.explanationKey] || "";
467
- let userText = "";
468
- let modelResponse = {};
469
-
470
- // Add context as user message with special formatting to make it part of the example flow
471
- if (contextValue) {
472
- let contextText = isJSON(contextValue) ? JSON.stringify(contextValue, null, 2) : contextValue;
473
- // Prefix context to make it clear it's contextual information
474
- userText += `CONTEXT:\n${contextText}\n\n`;
475
- }
476
-
477
- if (promptValue) {
478
- let promptText = isJSON(promptValue) ? JSON.stringify(promptValue, null, 2) : promptValue;
479
- userText += promptText;
480
- }
481
-
482
- if (answerValue) modelResponse.data = answerValue;
483
- if (explanationValue) modelResponse.explanation = explanationValue;
484
- const modelText = JSON.stringify(modelResponse, null, 2);
485
-
486
- if (userText.trim().length && modelText.trim().length > 0) {
487
- historyToAdd.push({ role: 'user', parts: [{ text: userText.trim() }] });
488
- historyToAdd.push({ role: 'model', parts: [{ text: modelText.trim() }] });
489
- }
490
-
491
- }
492
-
493
-
494
- const currentHistory = this?.chat?.getHistory() || [];
495
- log.debug(`Adding ${historyToAdd.length} examples to chat history (${currentHistory.length} current examples)...`);
496
- this.chat = await this.genAIClient.chats.create({
497
- model: this.modelName,
498
- // @ts-ignore
499
- config: {
500
- ...this.chatConfig,
501
- ...(this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels })
502
- },
503
- history: [...currentHistory, ...historyToAdd],
504
- });
505
-
506
- // Track example count for clearConversation() and stateless messages
507
- this.exampleCount = currentHistory.length + historyToAdd.length;
508
-
509
- const newHistory = this.chat.getHistory();
510
- log.debug(`Created new chat session with ${newHistory.length} examples.`);
511
- return newHistory;
512
- }
513
-
514
- /**
515
- * Transforms a source JSON payload into a target JSON payload
516
- * @param {Object} sourcePayload - The source payload (as a JavaScript object).
517
- * @returns {Promise<Object>} - The transformed target payload (as a JavaScript object).
518
- * @throws {Error} If the transformation fails or returns invalid JSON.
519
- */
520
- /**
521
- * (Internal) Sends a single prompt to the model and parses the response.
522
- * No validation or retry logic.
523
- * @this {ExportedAPI}
524
- * @param {Object|string} sourcePayload - The source payload.
525
- * @param {Object} [messageOptions] - Optional per-message options (e.g., labels).
526
- * @returns {Promise<Object>} - The transformed payload.
527
- */
528
- async function rawMessage(sourcePayload, messageOptions = {}) {
529
- if (!this.chat) {
530
- throw new Error("Chat session not initialized.");
531
- }
532
-
533
- const actualPayload = typeof sourcePayload === 'string'
534
- ? sourcePayload
535
- : JSON.stringify(sourcePayload, null, 2);
536
-
537
- // Merge instance labels with per-message labels (per-message takes precedence)
538
- // Labels only supported with Vertex AI
539
- const mergedLabels = { ...this.labels, ...(messageOptions.labels || {}) };
540
- const hasLabels = this.vertexai && Object.keys(mergedLabels).length > 0;
541
-
542
- try {
543
- const sendParams = { message: actualPayload };
544
-
545
- // Add config with labels if we have any (Vertex AI only)
546
- if (hasLabels) {
547
- sendParams.config = { labels: mergedLabels };
548
- }
549
-
550
- const result = await this.chat.sendMessage(sendParams);
551
-
552
- // Capture and log response metadata for model verification and debugging
553
- this.lastResponseMetadata = {
554
- modelVersion: result.modelVersion || null,
555
- requestedModel: this.modelName,
556
- promptTokens: result.usageMetadata?.promptTokenCount || 0,
557
- responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
558
- totalTokens: result.usageMetadata?.totalTokenCount || 0,
559
- timestamp: Date.now()
560
- };
561
-
562
- if (result.usageMetadata && log.level !== 'silent') {
563
- log.debug(`API response metadata: ${JSON.stringify({
564
- modelVersion: result.modelVersion || 'not-provided',
565
- requestedModel: this.modelName,
566
- promptTokens: result.usageMetadata.promptTokenCount,
567
- responseTokens: result.usageMetadata.candidatesTokenCount,
568
- totalTokens: result.usageMetadata.totalTokenCount
569
- })}`);
570
- }
571
-
572
- const modelResponse = result.text;
573
- const extractedJSON = extractJSON(modelResponse); // Assuming extractJSON is defined
574
-
575
- // Unwrap the 'data' property if it exists
576
- if (extractedJSON?.data) {
577
- return extractedJSON.data;
578
- }
579
- return extractedJSON;
580
-
581
- } catch (error) {
582
- if (this.onlyJSON && error.message.includes("Could not extract valid JSON")) {
583
- throw new Error(`Invalid JSON response from Gemini: ${error.message}`);
584
- }
585
- // For other API errors, just re-throw
586
- throw new Error(`Transformation failed: ${error.message}`);
587
- }
588
- }
589
-
590
- /**
591
- * (Engine) Transforms a payload with validation and automatic retry logic.
592
- * @this {ExportedAPI}
593
- * @param {Object} sourcePayload - The source payload to transform.
594
- * @param {Object} [options] - Options for the validation process.
595
- * @param {AsyncValidatorFunction | null} validatorFn - The specific validator to use for this run.
596
- * @returns {Promise<Object>} - The validated transformed payload.
597
- */
598
- async function prepareAndValidateMessage(sourcePayload, options = {}, validatorFn = null) {
599
- if (!this.chat) {
600
- throw new Error("Chat session not initialized. Please call init() first.");
601
- }
602
-
603
- // Handle stateless messages separately - they don't add to chat history
604
- if (options.stateless) {
605
- return await statelessMessage.call(this, sourcePayload, options, validatorFn);
606
- }
607
-
608
- const maxRetries = options.maxRetries ?? this.maxRetries;
609
- const retryDelay = options.retryDelay ?? this.retryDelay;
610
-
611
- // Check if grounding should be enabled for this specific message
612
- const enableGroundingForMessage = options.enableGrounding ?? this.enableGrounding;
613
- const groundingConfigForMessage = options.groundingConfig ?? this.groundingConfig;
614
-
615
- // Reinitialize chat if grounding settings changed for this message
616
- if (enableGroundingForMessage !== this.enableGrounding) {
617
- const originalGrounding = this.enableGrounding;
618
- const originalConfig = this.groundingConfig;
619
-
620
- try {
621
- // Temporarily change grounding settings
622
- this.enableGrounding = enableGroundingForMessage;
623
- this.groundingConfig = groundingConfigForMessage;
624
-
625
- // Force reinit with new settings
626
- await this.init(true);
627
-
628
- // Log the change
629
- if (enableGroundingForMessage) {
630
- log.warn(`Search grounding ENABLED for this message (WARNING: costs $35/1k queries)`);
631
- } else {
632
- log.debug(`Search grounding DISABLED for this message`);
633
- }
634
- } catch (error) {
635
- // Restore original settings on error
636
- this.enableGrounding = originalGrounding;
637
- this.groundingConfig = originalConfig;
638
- throw error;
639
- }
640
-
641
- // Schedule restoration after message completes
642
- const restoreGrounding = async () => {
643
- this.enableGrounding = originalGrounding;
644
- this.groundingConfig = originalConfig;
645
- await this.init(true);
646
- };
647
-
648
- // Store restoration function to call after message completes
649
- options._restoreGrounding = restoreGrounding;
650
- }
651
-
652
- let lastError = null;
653
- let lastPayload = null; // Store the payload that caused the validation error
654
-
655
- // Prepare the payload
656
- if (sourcePayload && isJSON(sourcePayload)) {
657
- lastPayload = JSON.stringify(sourcePayload, null, 2);
658
- } else if (typeof sourcePayload === 'string') {
659
- lastPayload = sourcePayload;
660
- }
661
- else if (typeof sourcePayload === 'boolean' || typeof sourcePayload === 'number') {
662
- lastPayload = sourcePayload.toString();
663
- }
664
- else if (sourcePayload === null || sourcePayload === undefined) {
665
- lastPayload = JSON.stringify({}); // Convert null/undefined to empty object
666
- }
667
- else {
668
- throw new Error("Invalid source payload. Must be a JSON object or string.");
669
- }
670
-
671
- // Extract per-message labels for passing to rawMessage
672
- const messageOptions = {};
673
- if (options.labels) {
674
- messageOptions.labels = options.labels;
675
- }
676
-
677
- // Reset cumulative usage tracking for this message call
678
- this._cumulativeUsage = {
679
- promptTokens: 0,
680
- responseTokens: 0,
681
- totalTokens: 0,
682
- attempts: 0
683
- };
684
-
685
- for (let attempt = 0; attempt <= maxRetries; attempt++) {
686
- try {
687
- // Step 1: Get the transformed payload
688
- const transformedPayload = (attempt === 0)
689
- ? await this.rawMessage(lastPayload, messageOptions) // Use the new raw method with per-message options
690
- : await this.rebuild(lastPayload, lastError.message);
691
-
692
- // Accumulate token usage from this attempt
693
- if (this.lastResponseMetadata) {
694
- this._cumulativeUsage.promptTokens += this.lastResponseMetadata.promptTokens || 0;
695
- this._cumulativeUsage.responseTokens += this.lastResponseMetadata.responseTokens || 0;
696
- this._cumulativeUsage.totalTokens += this.lastResponseMetadata.totalTokens || 0;
697
- this._cumulativeUsage.attempts = attempt + 1;
698
- }
699
-
700
- lastPayload = transformedPayload; // Always update lastPayload *before* validation
701
-
702
- // Step 2: Validate if a validator is provided
703
- if (validatorFn) {
704
- await validatorFn(transformedPayload); // Validator throws on failure
705
- }
706
-
707
- // Step 3: Success!
708
- log.debug(`Transformation succeeded on attempt ${attempt + 1}`);
709
-
710
- // Restore original grounding settings if they were changed
711
- if (options._restoreGrounding) {
712
- await options._restoreGrounding();
713
- }
714
-
715
- return transformedPayload;
716
-
717
- } catch (error) {
718
- lastError = error;
719
- log.warn(`Attempt ${attempt + 1} failed: ${error.message}`);
720
-
721
- if (attempt >= maxRetries) {
722
- log.error(`All ${maxRetries + 1} attempts failed.`)
723
- ;
724
- // Restore original grounding settings even on failure
725
- if (options._restoreGrounding) {
726
- await options._restoreGrounding();
727
- }
728
-
729
- throw new Error(`Transformation failed after ${maxRetries + 1} attempts. Last error: ${error.message}`);
730
- }
731
-
732
- // Wait before retrying
733
- const delay = retryDelay * Math.pow(2, attempt);
734
- await new Promise(res => setTimeout(res, delay));
735
- }
736
- }
737
- }
738
-
739
- /**
740
- * Rebuilds a payload based on server error feedback
741
- * @this {ExportedAPI}
742
- * @param {Object} lastPayload - The payload that failed validation
743
- * @param {string} serverError - The error message from the server
744
- * @returns {Promise<Object>} - A new corrected payload
745
- * @throws {Error} If the rebuild process fails.
746
- */
747
- async function rebuildPayload(lastPayload, serverError) {
748
- await this.init(); // Ensure chat is initialized
749
- const prompt = `
750
- The previous JSON payload (below) failed validation.
751
- The server's error message is quoted afterward.
752
-
753
- ---------------- BAD PAYLOAD ----------------
754
- ${JSON.stringify(lastPayload, null, 2)}
755
-
756
-
757
- ---------------- SERVER ERROR ----------------
758
- ${serverError}
759
-
760
- Please return a NEW JSON payload that corrects the issue.
761
- Respond with JSON only – no comments or explanations.
762
- `;
763
-
764
- let result;
765
- try {
766
- result = await this.chat.sendMessage({ message: prompt });
767
-
768
- // Capture and log response metadata for rebuild calls too
769
- this.lastResponseMetadata = {
770
- modelVersion: result.modelVersion || null,
771
- requestedModel: this.modelName,
772
- promptTokens: result.usageMetadata?.promptTokenCount || 0,
773
- responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
774
- totalTokens: result.usageMetadata?.totalTokenCount || 0,
775
- timestamp: Date.now()
776
- };
777
-
778
- if (result.usageMetadata && log.level !== 'silent') {
779
- log.debug(`Rebuild response metadata - tokens used: ${result.usageMetadata.totalTokenCount}`);
780
- }
781
- } catch (err) {
782
- throw new Error(`Gemini call failed while repairing payload: ${err.message}`);
783
- }
784
-
785
- try {
786
- const text = result.text ?? result.response ?? '';
787
- return typeof text === 'object' ? text : JSON.parse(text);
788
- } catch (parseErr) {
789
- throw new Error(`Gemini returned non-JSON while repairing payload: ${parseErr.message}`);
790
- }
791
- }
792
-
793
-
794
-
795
-
796
- /**
797
- * Estimate INPUT tokens only for a payload before sending.
798
- * This estimates the tokens that will be consumed by your prompt (input), NOT the response (output).
799
- * Includes: system instructions + chat history (seeded examples) + your new message.
800
- * Use this to preview input token costs and avoid exceeding context window limits.
2
+ * @fileoverview ak-gemini — Easy-to-use wrappers on @google/genai.
801
3
  *
802
- * NOTE: Output tokens cannot be predicted before the API call. Use getLastUsage() after
803
- * calling message() to see actual input + output token consumption.
804
- *
805
- * @this {ExportedAPI}
806
- * @param {object|string} nextPayload - The next user message to be sent (object or string)
807
- * @returns {Promise<{ inputTokens: number }>} - Estimated input token count
808
- */
809
- async function estimateInputTokens(nextPayload) {
810
- // Compose the conversation contents, Gemini-style
811
- const contents = [];
812
-
813
- // (1) System instructions (if applicable)
814
- if (this.systemInstructions) {
815
- // Add as a 'system' part; adjust role if Gemini supports
816
- contents.push({ parts: [{ text: this.systemInstructions }] });
817
- }
818
-
819
- // (2) All current chat history (seeded examples + real user/model turns)
820
- if (this.chat && typeof this.chat.getHistory === "function") {
821
- const history = this.chat.getHistory();
822
- if (Array.isArray(history) && history.length > 0) {
823
- contents.push(...history);
824
- }
825
- }
826
-
827
- // (3) The next user message
828
- const nextMessage = typeof nextPayload === "string"
829
- ? nextPayload
830
- : JSON.stringify(nextPayload, null, 2);
831
-
832
- contents.push({ parts: [{ text: nextMessage }] });
833
-
834
- // Call Gemini's token estimator
835
- const resp = await this.genAIClient.models.countTokens({
836
- model: this.modelName,
837
- contents,
838
- });
839
-
840
- // Return with clear naming - this is INPUT tokens only
841
- return { inputTokens: resp.totalTokens };
842
- }
843
-
844
- // Model pricing per million tokens (as of Dec 2025)
845
- // https://ai.google.dev/gemini-api/docs/pricing
846
- const MODEL_PRICING = {
847
- 'gemini-2.5-flash': { input: 0.15, output: 0.60 },
848
- 'gemini-2.5-flash-lite': { input: 0.02, output: 0.10 },
849
- 'gemini-2.5-pro': { input: 2.50, output: 10.00 },
850
- 'gemini-3-pro': { input: 2.00, output: 12.00 },
851
- 'gemini-3-pro-preview': { input: 2.00, output: 12.00 },
852
- 'gemini-2.0-flash': { input: 0.10, output: 0.40 },
853
- 'gemini-2.0-flash-lite': { input: 0.02, output: 0.10 }
854
- };
855
-
856
- /**
857
- * Estimates the cost of sending a payload based on input token count and model pricing.
858
- * NOTE: This only estimates INPUT cost. Output cost depends on response length and cannot be predicted.
859
- * @this {ExportedAPI}
860
- * @param {object|string} nextPayload - The next user message to be sent (object or string)
861
- * @returns {Promise<Object>} - Cost estimation including input tokens, model, pricing, and estimated input cost
862
- */
863
- async function estimateCost(nextPayload) {
864
- const tokenInfo = await this.estimate(nextPayload);
865
- const pricing = MODEL_PRICING[this.modelName] || { input: 0, output: 0 };
866
-
867
- return {
868
- inputTokens: tokenInfo.inputTokens,
869
- model: this.modelName,
870
- pricing: pricing,
871
- estimatedInputCost: (tokenInfo.inputTokens / 1_000_000) * pricing.input,
872
- note: 'Cost is for input tokens only; output cost depends on response length'
873
- };
874
- }
875
-
876
-
877
- /**
878
- * Resets the current chat session, clearing all history and examples
879
- * @this {ExportedAPI}
880
- * @returns {Promise<void>}
881
- */
882
- async function resetChat() {
883
- if (this.chat) {
884
- log.debug("Resetting Gemini chat session...");
885
-
886
- // Prepare chat options with grounding if enabled
887
- const chatOptions = {
888
- model: this.modelName,
889
- // @ts-ignore
890
- config: {
891
- ...this.chatConfig,
892
- ...(this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels })
893
- },
894
- history: [],
895
- };
896
-
897
- // Only add tools if grounding is explicitly enabled
898
- if (this.enableGrounding) {
899
- chatOptions.config.tools = [{
900
- googleSearch: this.groundingConfig
901
- }];
902
- log.debug(`Search grounding preserved during reset (WARNING: costs $35/1k queries)`);
903
- }
904
-
905
- this.chat = await this.genAIClient.chats.create(chatOptions);
906
- log.debug("Chat session reset.");
907
- } else {
908
- log.warn("Cannot reset chat session: chat not yet initialized.");
909
- }
910
- }
911
-
912
- /**
913
- * Retrieves the current conversation history for debugging or inspection
914
- * @returns {Array<Object>} - An array of message objects in the conversation.
915
- */
916
- function getChatHistory() {
917
- if (!this.chat) {
918
- log.warn("Chat session not initialized. No history available.");
919
- return [];
920
- }
921
- return this.chat.getHistory();
922
- }
923
-
924
- /**
925
- * Updates system instructions and reinitializes the chat session
926
- * @this {ExportedAPI}
927
- * @param {string} newInstructions - The new system instructions
928
- * @returns {Promise<void>}
929
- */
930
- async function updateSystemInstructions(newInstructions) {
931
- if (!newInstructions || typeof newInstructions !== 'string') {
932
- throw new Error('System instructions must be a non-empty string');
933
- }
934
-
935
- this.systemInstructions = newInstructions.trim();
936
- this.chatConfig.systemInstruction = this.systemInstructions;
937
-
938
- log.debug('Updating system instructions and reinitializing chat...');
939
- await this.init(true); // Force reinitialize with new instructions
940
- }
941
-
942
- /**
943
- * Clears conversation history while preserving seeded examples.
944
- * Useful for starting a fresh conversation within the same session
945
- * without losing the few-shot learning examples.
946
- * @this {ExportedAPI}
947
- * @returns {Promise<void>}
948
- */
949
- async function clearConversation() {
950
- if (!this.chat) {
951
- log.warn("Cannot clear conversation: chat not initialized.");
952
- return;
953
- }
954
-
955
- const history = this.chat.getHistory();
956
- const exampleHistory = history.slice(0, this.exampleCount || 0);
957
-
958
- this.chat = await this.genAIClient.chats.create({
959
- model: this.modelName,
960
- // @ts-ignore
961
- config: {
962
- ...this.chatConfig,
963
- ...(this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels })
964
- },
965
- history: exampleHistory,
966
- });
967
-
968
- // Reset usage tracking for the new conversation
969
- this.lastResponseMetadata = null;
970
- this._cumulativeUsage = {
971
- promptTokens: 0,
972
- responseTokens: 0,
973
- totalTokens: 0,
974
- attempts: 0
975
- };
976
-
977
- log.debug(`Conversation cleared. Preserved ${exampleHistory.length} example items.`);
978
- }
979
-
980
- /**
981
- * Returns structured usage data from the last message call for billing verification.
982
- * Includes CUMULATIVE token counts across all retry attempts.
983
- * Call this after message() or statelessMessage() to get actual token consumption.
4
+ * Exports:
5
+ * - Transformer AI-powered JSON transformation via few-shot learning
6
+ * - Chat — Multi-turn text conversation with AI
7
+ * - Message — Stateless one-off messages to AI
8
+ * - ToolAgent AI agent with user-provided tools
9
+ * - CodeAgent AI agent that writes and executes code (stub)
10
+ * - BaseGemini — Base class for building custom wrappers
984
11
  *
985
- * @this {ExportedAPI}
986
- * @returns {Object|null} Usage data with promptTokens, responseTokens, totalTokens, attempts, etc.
987
- * Returns null if no API call has been made yet.
988
- */
989
- function getLastUsage() {
990
- if (!this.lastResponseMetadata) {
991
- return null;
992
- }
993
-
994
- const meta = this.lastResponseMetadata;
995
- const cumulative = this._cumulativeUsage || { promptTokens: 0, responseTokens: 0, totalTokens: 0, attempts: 1 };
996
-
997
- // Use cumulative tokens if tracking was active (attempts > 0), otherwise fall back to last response
998
- const useCumulative = cumulative.attempts > 0;
999
-
1000
- return {
1001
- // Token breakdown for billing - CUMULATIVE across all retry attempts
1002
- promptTokens: useCumulative ? cumulative.promptTokens : meta.promptTokens,
1003
- responseTokens: useCumulative ? cumulative.responseTokens : meta.responseTokens,
1004
- totalTokens: useCumulative ? cumulative.totalTokens : meta.totalTokens,
1005
-
1006
- // Number of attempts (1 = success on first try, 2+ = retries were needed)
1007
- attempts: useCumulative ? cumulative.attempts : 1,
1008
-
1009
- // Model verification for billing cross-check
1010
- modelVersion: meta.modelVersion, // Actual model that responded (e.g., 'gemini-2.5-flash-001')
1011
- requestedModel: meta.requestedModel, // Model you requested (e.g., 'gemini-2.5-flash')
1012
-
1013
- // Timestamp for audit trail
1014
- timestamp: meta.timestamp
1015
- };
1016
- }
1017
-
1018
- /**
1019
- * Sends a one-off message using generateContent (not chat).
1020
- * Does NOT affect chat history - useful for isolated requests.
1021
- * @this {ExportedAPI}
1022
- * @param {Object|string} sourcePayload - The source payload.
1023
- * @param {Object} [options] - Options including labels.
1024
- * @param {AsyncValidatorFunction|null} [validatorFn] - Optional validator.
1025
- * @returns {Promise<Object>} - The transformed payload.
1026
- */
1027
- async function statelessMessage(sourcePayload, options = {}, validatorFn = null) {
1028
- if (!this.chat) {
1029
- throw new Error("Chat session not initialized. Please call init() first.");
1030
- }
1031
-
1032
- const payloadStr = typeof sourcePayload === 'string'
1033
- ? sourcePayload
1034
- : JSON.stringify(sourcePayload, null, 2);
1035
-
1036
- // Build contents including examples from current chat history
1037
- const contents = [];
1038
-
1039
- // Include seeded examples if we have them
1040
- if (this.exampleCount > 0) {
1041
- const history = this.chat.getHistory();
1042
- const exampleHistory = history.slice(0, this.exampleCount);
1043
- contents.push(...exampleHistory);
1044
- }
1045
-
1046
- // Add the user message
1047
- contents.push({ role: 'user', parts: [{ text: payloadStr }] });
1048
-
1049
- // Merge labels (Vertex AI only)
1050
- const mergedLabels = { ...this.labels, ...(options.labels || {}) };
1051
-
1052
- // Use generateContent instead of chat.sendMessage
1053
- const result = await this.genAIClient.models.generateContent({
1054
- model: this.modelName,
1055
- contents: contents,
1056
- config: {
1057
- ...this.chatConfig,
1058
- ...(this.vertexai && Object.keys(mergedLabels).length > 0 && { labels: mergedLabels })
1059
- }
1060
- });
1061
-
1062
- // Capture and log response metadata
1063
- this.lastResponseMetadata = {
1064
- modelVersion: result.modelVersion || null,
1065
- requestedModel: this.modelName,
1066
- promptTokens: result.usageMetadata?.promptTokenCount || 0,
1067
- responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
1068
- totalTokens: result.usageMetadata?.totalTokenCount || 0,
1069
- timestamp: Date.now()
1070
- };
1071
-
1072
- // Set cumulative usage for stateless message (single attempt, no retries)
1073
- this._cumulativeUsage = {
1074
- promptTokens: this.lastResponseMetadata.promptTokens,
1075
- responseTokens: this.lastResponseMetadata.responseTokens,
1076
- totalTokens: this.lastResponseMetadata.totalTokens,
1077
- attempts: 1
1078
- };
1079
-
1080
- if (result.usageMetadata && log.level !== 'silent') {
1081
- log.debug(`Stateless message metadata: ${JSON.stringify({
1082
- modelVersion: result.modelVersion || 'not-provided',
1083
- promptTokens: result.usageMetadata.promptTokenCount,
1084
- responseTokens: result.usageMetadata.candidatesTokenCount
1085
- })}`);
1086
- }
1087
-
1088
- const modelResponse = result.text;
1089
- const extractedJSON = extractJSON(modelResponse);
1090
-
1091
- let transformedPayload = extractedJSON?.data ? extractedJSON.data : extractedJSON;
1092
-
1093
- // Validate if a validator is provided
1094
- if (validatorFn) {
1095
- await validatorFn(transformedPayload);
1096
- }
1097
-
1098
- return transformedPayload;
1099
- }
1100
-
1101
-
1102
- /*
1103
- ----
1104
- HELPERS
1105
- ----
1106
- */
1107
-
1108
- /**
1109
- * Attempts to recover truncated JSON by progressively removing characters from the end
1110
- * until valid JSON is found or recovery fails
1111
- * @param {string} text - The potentially truncated JSON string
1112
- * @param {number} maxAttempts - Maximum number of characters to remove
1113
- * @returns {Object|null} - Parsed JSON object or null if recovery fails
1114
- */
1115
- function attemptJSONRecovery(text, maxAttempts = 100) {
1116
- if (!text || typeof text !== 'string') return null;
1117
-
1118
- // First, try parsing as-is
1119
- try {
1120
- return JSON.parse(text);
1121
- } catch (e) {
1122
- // Continue with recovery
1123
- }
1124
-
1125
- let workingText = text.trim();
1126
-
1127
- // First attempt: try to close unclosed structures without removing characters
1128
- // Count open/close braces and brackets in the original text
1129
- let braces = 0;
1130
- let brackets = 0;
1131
- let inString = false;
1132
- let escapeNext = false;
1133
-
1134
- for (let j = 0; j < workingText.length; j++) {
1135
- const char = workingText[j];
1136
-
1137
- if (escapeNext) {
1138
- escapeNext = false;
1139
- continue;
1140
- }
1141
-
1142
- if (char === '\\') {
1143
- escapeNext = true;
1144
- continue;
1145
- }
1146
-
1147
- if (char === '"') {
1148
- inString = !inString;
1149
- continue;
1150
- }
1151
-
1152
- if (!inString) {
1153
- if (char === '{') braces++;
1154
- else if (char === '}') braces--;
1155
- else if (char === '[') brackets++;
1156
- else if (char === ']') brackets--;
1157
- }
1158
- }
1159
-
1160
- // Try to fix by just adding closing characters
1161
- if ((braces > 0 || brackets > 0 || inString) && workingText.length > 2) {
1162
- let fixedText = workingText;
1163
-
1164
- // Close any open strings first
1165
- if (inString) {
1166
- fixedText += '"';
1167
- }
1168
-
1169
- // Add missing closing characters
1170
- while (braces > 0) {
1171
- fixedText += '}';
1172
- braces--;
1173
- }
1174
- while (brackets > 0) {
1175
- fixedText += ']';
1176
- brackets--;
1177
- }
1178
-
1179
- try {
1180
- const result = JSON.parse(fixedText);
1181
- if (log.level !== 'silent') {
1182
- log.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
1183
- }
1184
- return result;
1185
- } catch (e) {
1186
- // Simple fix didn't work, continue with more aggressive recovery
1187
- }
1188
- }
1189
-
1190
- // Second attempt: progressively remove characters from the end
1191
-
1192
- for (let i = 0; i < maxAttempts && workingText.length > 2; i++) {
1193
- // Remove one character from the end
1194
- workingText = workingText.slice(0, -1);
1195
-
1196
- // Count open/close braces and brackets
1197
- let braces = 0;
1198
- let brackets = 0;
1199
- let inString = false;
1200
- let escapeNext = false;
1201
-
1202
- for (let j = 0; j < workingText.length; j++) {
1203
- const char = workingText[j];
1204
-
1205
- if (escapeNext) {
1206
- escapeNext = false;
1207
- continue;
1208
- }
1209
-
1210
- if (char === '\\') {
1211
- escapeNext = true;
1212
- continue;
1213
- }
1214
-
1215
- if (char === '"') {
1216
- inString = !inString;
1217
- continue;
1218
- }
1219
-
1220
- if (!inString) {
1221
- if (char === '{') braces++;
1222
- else if (char === '}') braces--;
1223
- else if (char === '[') brackets++;
1224
- else if (char === ']') brackets--;
1225
- }
1226
- }
1227
-
1228
- // If we have balanced braces/brackets, try parsing
1229
- if (braces === 0 && brackets === 0 && !inString) {
1230
- try {
1231
- const result = JSON.parse(workingText);
1232
- if (log.level !== 'silent') {
1233
- log.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by removing ${i + 1} characters from the end.`);
1234
- }
1235
- return result;
1236
- } catch (e) {
1237
- // Continue trying
1238
- }
1239
- }
1240
-
1241
- // After a few attempts, try adding closing characters
1242
- if (i > 5) {
1243
- let fixedText = workingText;
1244
-
1245
- // Close any open strings first
1246
- if (inString) {
1247
- fixedText += '"';
1248
- }
1249
-
1250
- // Add missing closing characters
1251
- while (braces > 0) {
1252
- fixedText += '}';
1253
- braces--;
1254
- }
1255
- while (brackets > 0) {
1256
- fixedText += ']';
1257
- brackets--;
1258
- }
1259
-
1260
- try {
1261
- const result = JSON.parse(fixedText);
1262
- if (log.level !== 'silent') {
1263
- log.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
1264
- }
1265
- return result;
1266
- } catch (e) {
1267
- // Recovery failed, continue trying
1268
- }
1269
- }
1270
- }
1271
-
1272
- return null;
1273
- }
1274
-
1275
- function isJSON(data) {
1276
- try {
1277
- const attempt = JSON.stringify(data);
1278
- if (attempt?.startsWith('{') || attempt?.startsWith('[')) {
1279
- if (attempt?.endsWith('}') || attempt?.endsWith(']')) {
1280
- return true;
1281
- }
1282
- }
1283
- return false;
1284
- } catch (e) {
1285
- return false;
1286
- }
1287
- }
1288
-
1289
- function isJSONStr(string) {
1290
- if (typeof string !== 'string') return false;
1291
- try {
1292
- const result = JSON.parse(string);
1293
- const type = Object.prototype.toString.call(result);
1294
- return type === '[object Object]' || type === '[object Array]';
1295
- } catch (err) {
1296
- return false;
1297
- }
1298
- }
1299
-
1300
- function extractJSON(text) {
1301
- if (!text || typeof text !== 'string') {
1302
- throw new Error('No text provided for JSON extraction');
1303
- }
1304
-
1305
- // Strategy 1: Try parsing the entire response as JSON
1306
- if (isJSONStr(text.trim())) {
1307
- return JSON.parse(text.trim());
1308
- }
1309
-
1310
- // Strategy 2: Look for JSON code blocks (```json...``` or ```...```)
1311
- const codeBlockPatterns = [
1312
- /```json\s*\n?([\s\S]*?)\n?\s*```/gi,
1313
- /```\s*\n?([\s\S]*?)\n?\s*```/gi
1314
- ];
1315
-
1316
- for (const pattern of codeBlockPatterns) {
1317
- const matches = text.match(pattern);
1318
- if (matches) {
1319
- for (const match of matches) {
1320
- const jsonContent = match.replace(/```json\s*\n?/gi, '').replace(/```\s*\n?/gi, '').trim();
1321
- if (isJSONStr(jsonContent)) {
1322
- return JSON.parse(jsonContent);
1323
- }
1324
- }
1325
- }
1326
- }
1327
-
1328
- // Strategy 3: Look for JSON objects/arrays using bracket matching
1329
- const jsonPatterns = [
1330
- // Match complete JSON objects
1331
- /\{[\s\S]*\}/g,
1332
- // Match complete JSON arrays
1333
- /\[[\s\S]*\]/g
1334
- ];
1335
-
1336
- for (const pattern of jsonPatterns) {
1337
- const matches = text.match(pattern);
1338
- if (matches) {
1339
- for (const match of matches) {
1340
- const candidate = match.trim();
1341
- if (isJSONStr(candidate)) {
1342
- return JSON.parse(candidate);
1343
- }
1344
- }
1345
- }
1346
- }
1347
-
1348
- // Strategy 4: Advanced bracket matching for nested structures
1349
- const advancedExtract = findCompleteJSONStructures(text);
1350
- if (advancedExtract.length > 0) {
1351
- // Return the first valid JSON structure found
1352
- for (const candidate of advancedExtract) {
1353
- if (isJSONStr(candidate)) {
1354
- return JSON.parse(candidate);
1355
- }
1356
- }
1357
- }
1358
-
1359
- // Strategy 5: Clean up common formatting issues and retry
1360
- const cleanedText = text
1361
- .replace(/^\s*Sure,?\s*here\s+is\s+your?\s+.*?[:\n]/gi, '') // Remove conversational intros
1362
- .replace(/^\s*Here\s+is\s+the\s+.*?[:\n]/gi, '')
1363
- .replace(/^\s*The\s+.*?is\s*[:\n]/gi, '')
1364
- .replace(/\/\*[\s\S]*?\*\//g, '') // Remove /* comments */
1365
- .replace(/\/\/.*$/gm, '') // Remove // comments
1366
- .trim();
1367
-
1368
- if (isJSONStr(cleanedText)) {
1369
- return JSON.parse(cleanedText);
1370
- }
1371
-
1372
- // Strategy 6: Last resort - attempt recovery for potentially truncated JSON
1373
- // This is especially useful when maxOutputTokens might have cut off the response
1374
- const recoveredJSON = attemptJSONRecovery(text);
1375
- if (recoveredJSON !== null) {
1376
- return recoveredJSON;
1377
- }
1378
-
1379
- // If all else fails, throw an error with helpful information
1380
- throw new Error(`Could not extract valid JSON from model response. Response preview: ${text.substring(0, 200)}...`);
1381
- }
1382
-
1383
- function findCompleteJSONStructures(text) {
1384
- const results = [];
1385
- const startChars = ['{', '['];
1386
-
1387
- for (let i = 0; i < text.length; i++) {
1388
- if (startChars.includes(text[i])) {
1389
- const extracted = extractCompleteStructure(text, i);
1390
- if (extracted) {
1391
- results.push(extracted);
1392
- }
1393
- }
1394
- }
1395
-
1396
- return results;
1397
- }
1398
-
1399
-
1400
- function extractCompleteStructure(text, startPos) {
1401
- const startChar = text[startPos];
1402
- const endChar = startChar === '{' ? '}' : ']';
1403
- let depth = 0;
1404
- let inString = false;
1405
- let escaped = false;
1406
-
1407
- for (let i = startPos; i < text.length; i++) {
1408
- const char = text[i];
1409
-
1410
- if (escaped) {
1411
- escaped = false;
1412
- continue;
1413
- }
1414
-
1415
- if (char === '\\' && inString) {
1416
- escaped = true;
1417
- continue;
1418
- }
1419
-
1420
- if (char === '"' && !escaped) {
1421
- inString = !inString;
1422
- continue;
1423
- }
1424
-
1425
- if (!inString) {
1426
- if (char === startChar) {
1427
- depth++;
1428
- } else if (char === endChar) {
1429
- depth--;
1430
- if (depth === 0) {
1431
- return text.substring(startPos, i + 1);
1432
- }
1433
- }
1434
- }
1435
- }
1436
-
1437
- return null; // Incomplete structure
1438
- }
1439
-
1440
- if (import.meta.url === new URL(`file://${process.argv[1]}`).href) {
1441
- log.info("RUNNING AI Transformer as standalone script...");
1442
- (
1443
- async () => {
1444
- try {
1445
- log.info("Initializing AI Transformer...");
1446
- const transformer = new AITransformer({
1447
- modelName: 'gemini-2.5-flash',
1448
- sourceKey: 'INPUT', // Custom source key
1449
- targetKey: 'OUTPUT', // Custom target key
1450
- contextKey: 'CONTEXT', // Custom context key
1451
- maxRetries: 2,
1452
-
1453
- });
1454
-
1455
- const examples = [
1456
- {
1457
- CONTEXT: "Generate professional profiles with emoji representations",
1458
- INPUT: { "name": "Alice" },
1459
- OUTPUT: { "name": "Alice", "profession": "data scientist", "life_as_told_by_emoji": ["🔬", "💡", "📊", "🧠", "🌟"] }
1460
- },
1461
- {
1462
- INPUT: { "name": "Bob" },
1463
- OUTPUT: { "name": "Bob", "profession": "product manager", "life_as_told_by_emoji": ["📋", "🤝", "🚀", "💬", "🎯"] }
1464
- },
1465
- {
1466
- INPUT: { "name": "Eve" },
1467
- OUTPUT: { "name": "Even", "profession": "security analyst", "life_as_told_by_emoji": ["🕵️‍♀️", "🔒", "💻", "👀", "⚡️"] }
1468
- },
1469
- ];
1470
-
1471
- await transformer.init();
1472
- await transformer.seed(examples);
1473
- log.info("AI Transformer initialized and seeded with examples.");
1474
-
1475
- // Test normal transformation
1476
- const normalResponse = await transformer.message({ "name": "AK" });
1477
- log.info(`Normal Payload Transformed: ${JSON.stringify(normalResponse)}`);
1478
-
1479
- // Test transformation with validation
1480
- const mockValidator = async (payload) => {
1481
- // Simulate validation logic
1482
- if (!payload.profession || !payload.life_as_told_by_emoji) {
1483
- throw new Error("Missing required fields: profession or life_as_told_by_emoji");
1484
- }
1485
- if (!Array.isArray(payload.life_as_told_by_emoji)) {
1486
- throw new Error("life_as_told_by_emoji must be an array");
1487
- }
1488
- return payload; // Return the payload if validation passes
1489
- };
1490
-
1491
- const validatedResponse = await transformer.messageAndValidate(
1492
- { "name": "Lynn" },
1493
- {},
1494
- mockValidator
1495
- );
1496
- log.info(`Validated Payload Transformed: ${JSON.stringify(validatedResponse)}`);
1497
-
1498
- if (NODE_ENV === 'dev') debugger;
1499
- } catch (error) {
1500
- log.error(`Error in AI Transformer script: ${error?.message || error}`);
1501
- if (NODE_ENV === 'dev') debugger;
1502
- }
1503
- })();
1504
- }
12
+ * @example
13
+ * ```javascript
14
+ * import { Transformer, Chat, Message, ToolAgent } from 'ak-gemini';
15
+ * // or
16
+ * import AI from 'ak-gemini';
17
+ * const t = new AI.Transformer({ ... });
18
+ * ```
19
+ */
20
+
21
+ // ── Named Exports ──
22
+
23
+ export { default as Transformer } from './transformer.js';
24
+ export { default as Chat } from './chat.js';
25
+ export { default as Message } from './message.js';
26
+ export { default as ToolAgent } from './tool-agent.js';
27
+ export { default as CodeAgent } from './code-agent.js';
28
+ export { default as BaseGemini } from './base.js';
29
+ export { default as log } from './logger.js';
30
+ export { ThinkingLevel, HarmCategory, HarmBlockThreshold } from '@google/genai';
31
+ export { extractJSON, attemptJSONRecovery } from './json-helpers.js';
32
+
33
+ // ── Default Export (namespace object) ──
34
+
35
+ import Transformer from './transformer.js';
36
+ import Chat from './chat.js';
37
+ import Message from './message.js';
38
+ import ToolAgent from './tool-agent.js';
39
+ import CodeAgent from './code-agent.js';
40
+
41
+ export default { Transformer, Chat, Message, ToolAgent, CodeAgent };