ak-gemini 1.0.7 → 1.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/index.cjs +158 -12
  2. package/index.js +220 -14
  3. package/package.json +2 -1
  4. package/types.d.ts +2 -0
package/index.cjs CHANGED
@@ -30,6 +30,7 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
30
30
  var index_exports = {};
31
31
  __export(index_exports, {
32
32
  ThinkingLevel: () => import_genai.ThinkingLevel,
33
+ attemptJSONRecovery: () => attemptJSONRecovery,
33
34
  default: () => index_default,
34
35
  log: () => logger_default
35
36
  });
@@ -79,6 +80,7 @@ var DEFAULT_THINKING_CONFIG = {
79
80
  thinkingBudget: 0,
80
81
  thinkingLevel: import_genai.ThinkingLevel.MINIMAL
81
82
  };
83
+ var DEFAULT_MAX_OUTPUT_TOKENS = 1e5;
82
84
  var THINKING_SUPPORTED_MODELS = [
83
85
  /^gemini-3-flash(-preview)?$/,
84
86
  /^gemini-3-pro(-preview|-image-preview)?$/,
@@ -166,21 +168,43 @@ function AITransformFactory(options = {}) {
166
168
  ...options.chatConfig,
167
169
  systemInstruction: this.systemInstructions
168
170
  };
171
+ if (options.maxOutputTokens !== void 0) {
172
+ if (options.maxOutputTokens === null) {
173
+ delete this.chatConfig.maxOutputTokens;
174
+ } else {
175
+ this.chatConfig.maxOutputTokens = options.maxOutputTokens;
176
+ }
177
+ } else if (options.chatConfig?.maxOutputTokens !== void 0) {
178
+ if (options.chatConfig.maxOutputTokens === null) {
179
+ delete this.chatConfig.maxOutputTokens;
180
+ } else {
181
+ this.chatConfig.maxOutputTokens = options.chatConfig.maxOutputTokens;
182
+ }
183
+ } else {
184
+ this.chatConfig.maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS;
185
+ }
169
186
  const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some(
170
187
  (pattern) => pattern.test(this.modelName)
171
188
  );
172
- if (modelSupportsThinking && options.thinkingConfig) {
173
- const thinkingConfig = {
174
- ...DEFAULT_THINKING_CONFIG,
175
- ...options.thinkingConfig
176
- };
177
- this.chatConfig.thinkingConfig = thinkingConfig;
178
- if (logger_default.level !== "silent") {
179
- logger_default.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig:`, thinkingConfig);
180
- }
181
- } else if (options.thinkingConfig && !modelSupportsThinking) {
182
- if (logger_default.level !== "silent") {
183
- logger_default.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
189
+ if (options.thinkingConfig !== void 0) {
190
+ if (options.thinkingConfig === null) {
191
+ delete this.chatConfig.thinkingConfig;
192
+ if (logger_default.level !== "silent") {
193
+ logger_default.debug(`thinkingConfig set to null - removed from configuration`);
194
+ }
195
+ } else if (modelSupportsThinking) {
196
+ const thinkingConfig = {
197
+ ...DEFAULT_THINKING_CONFIG,
198
+ ...options.thinkingConfig
199
+ };
200
+ this.chatConfig.thinkingConfig = thinkingConfig;
201
+ if (logger_default.level !== "silent") {
202
+ logger_default.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig:`, thinkingConfig);
203
+ }
204
+ } else {
205
+ if (logger_default.level !== "silent") {
206
+ logger_default.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
207
+ }
184
208
  }
185
209
  }
186
210
  if (options.responseSchema) {
@@ -203,6 +227,7 @@ function AITransformFactory(options = {}) {
203
227
  if (logger_default.level !== "silent") {
204
228
  logger_default.debug(`Creating AI Transformer with model: ${this.modelName}`);
205
229
  logger_default.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
230
+ logger_default.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
206
231
  }
207
232
  const ai = new import_genai.GoogleGenAI({ apiKey: this.apiKey });
208
233
  this.genAIClient = ai;
@@ -423,6 +448,122 @@ function getChatHistory() {
423
448
  }
424
449
  return this.chat.getHistory();
425
450
  }
451
+ function attemptJSONRecovery(text, maxAttempts = 100) {
452
+ if (!text || typeof text !== "string") return null;
453
+ try {
454
+ return JSON.parse(text);
455
+ } catch (e) {
456
+ }
457
+ let workingText = text.trim();
458
+ let braces = 0;
459
+ let brackets = 0;
460
+ let inString = false;
461
+ let escapeNext = false;
462
+ for (let j = 0; j < workingText.length; j++) {
463
+ const char = workingText[j];
464
+ if (escapeNext) {
465
+ escapeNext = false;
466
+ continue;
467
+ }
468
+ if (char === "\\") {
469
+ escapeNext = true;
470
+ continue;
471
+ }
472
+ if (char === '"') {
473
+ inString = !inString;
474
+ continue;
475
+ }
476
+ if (!inString) {
477
+ if (char === "{") braces++;
478
+ else if (char === "}") braces--;
479
+ else if (char === "[") brackets++;
480
+ else if (char === "]") brackets--;
481
+ }
482
+ }
483
+ if ((braces > 0 || brackets > 0 || inString) && workingText.length > 2) {
484
+ let fixedText = workingText;
485
+ if (inString) {
486
+ fixedText += '"';
487
+ }
488
+ while (braces > 0) {
489
+ fixedText += "}";
490
+ braces--;
491
+ }
492
+ while (brackets > 0) {
493
+ fixedText += "]";
494
+ brackets--;
495
+ }
496
+ try {
497
+ const result = JSON.parse(fixedText);
498
+ if (logger_default.level !== "silent") {
499
+ logger_default.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
500
+ }
501
+ return result;
502
+ } catch (e) {
503
+ }
504
+ }
505
+ for (let i = 0; i < maxAttempts && workingText.length > 2; i++) {
506
+ workingText = workingText.slice(0, -1);
507
+ let braces2 = 0;
508
+ let brackets2 = 0;
509
+ let inString2 = false;
510
+ let escapeNext2 = false;
511
+ for (let j = 0; j < workingText.length; j++) {
512
+ const char = workingText[j];
513
+ if (escapeNext2) {
514
+ escapeNext2 = false;
515
+ continue;
516
+ }
517
+ if (char === "\\") {
518
+ escapeNext2 = true;
519
+ continue;
520
+ }
521
+ if (char === '"') {
522
+ inString2 = !inString2;
523
+ continue;
524
+ }
525
+ if (!inString2) {
526
+ if (char === "{") braces2++;
527
+ else if (char === "}") braces2--;
528
+ else if (char === "[") brackets2++;
529
+ else if (char === "]") brackets2--;
530
+ }
531
+ }
532
+ if (braces2 === 0 && brackets2 === 0 && !inString2) {
533
+ try {
534
+ const result = JSON.parse(workingText);
535
+ if (logger_default.level !== "silent") {
536
+ logger_default.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by removing ${i + 1} characters from the end.`);
537
+ }
538
+ return result;
539
+ } catch (e) {
540
+ }
541
+ }
542
+ if (i > 5) {
543
+ let fixedText = workingText;
544
+ if (inString2) {
545
+ fixedText += '"';
546
+ }
547
+ while (braces2 > 0) {
548
+ fixedText += "}";
549
+ braces2--;
550
+ }
551
+ while (brackets2 > 0) {
552
+ fixedText += "]";
553
+ brackets2--;
554
+ }
555
+ try {
556
+ const result = JSON.parse(fixedText);
557
+ if (logger_default.level !== "silent") {
558
+ logger_default.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
559
+ }
560
+ return result;
561
+ } catch (e) {
562
+ }
563
+ }
564
+ }
565
+ return null;
566
+ }
426
567
  function isJSON(data) {
427
568
  try {
428
569
  const attempt = JSON.stringify(data);
@@ -497,6 +638,10 @@ function extractJSON(text) {
497
638
  if (isJSONStr(cleanedText)) {
498
639
  return JSON.parse(cleanedText);
499
640
  }
641
+ const recoveredJSON = attemptJSONRecovery(text);
642
+ if (recoveredJSON !== null) {
643
+ return recoveredJSON;
644
+ }
500
645
  throw new Error(`Could not extract valid JSON from model response. Response preview: ${text.substring(0, 200)}...`);
501
646
  }
502
647
  function findCompleteJSONStructures(text) {
@@ -604,5 +749,6 @@ if (import_meta.url === new URL(`file://${process.argv[1]}`).href) {
604
749
  // Annotate the CommonJS export names for ESM import in node:
605
750
  0 && (module.exports = {
606
751
  ThinkingLevel,
752
+ attemptJSONRecovery,
607
753
  log
608
754
  });
package/index.js CHANGED
@@ -57,6 +57,8 @@ const DEFAULT_THINKING_CONFIG = {
57
57
  thinkingLevel: ThinkingLevel.MINIMAL
58
58
  };
59
59
 
60
+ const DEFAULT_MAX_OUTPUT_TOKENS = 100000; // Default ceiling for output tokens
61
+
60
62
  // Models that support thinking features (as of Dec 2024)
61
63
  // Using regex patterns for more precise matching
62
64
  const THINKING_SUPPORTED_MODELS = [
@@ -136,6 +138,7 @@ class AITransformer {
136
138
  }
137
139
 
138
140
  export default AITransformer;
141
+ export { attemptJSONRecovery }; // Export for testing
139
142
 
140
143
  /**
141
144
  * factory function to create an AI Transformer instance
@@ -186,25 +189,53 @@ function AITransformFactory(options = {}) {
186
189
  systemInstruction: this.systemInstructions
187
190
  };
188
191
 
192
+ // Handle maxOutputTokens with explicit null check
193
+ // Priority: options.maxOutputTokens > options.chatConfig.maxOutputTokens > DEFAULT
194
+ // Setting to null explicitly removes the limit
195
+ if (options.maxOutputTokens !== undefined) {
196
+ if (options.maxOutputTokens === null) {
197
+ delete this.chatConfig.maxOutputTokens;
198
+ } else {
199
+ this.chatConfig.maxOutputTokens = options.maxOutputTokens;
200
+ }
201
+ } else if (options.chatConfig?.maxOutputTokens !== undefined) {
202
+ if (options.chatConfig.maxOutputTokens === null) {
203
+ delete this.chatConfig.maxOutputTokens;
204
+ } else {
205
+ this.chatConfig.maxOutputTokens = options.chatConfig.maxOutputTokens;
206
+ }
207
+ } else {
208
+ this.chatConfig.maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS;
209
+ }
210
+
189
211
  // Only add thinkingConfig if the model supports it
190
212
  const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some(pattern =>
191
213
  pattern.test(this.modelName)
192
214
  );
193
215
 
194
- if (modelSupportsThinking && options.thinkingConfig) {
195
- // Handle thinkingConfig - merge with defaults
196
- const thinkingConfig = {
197
- ...DEFAULT_THINKING_CONFIG,
198
- ...options.thinkingConfig
199
- };
200
- this.chatConfig.thinkingConfig = thinkingConfig;
201
-
202
- if (log.level !== 'silent') {
203
- log.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig:`, thinkingConfig);
204
- }
205
- } else if (options.thinkingConfig && !modelSupportsThinking) {
206
- if (log.level !== 'silent') {
207
- log.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
216
+ // Handle thinkingConfig - null explicitly removes it, undefined means not specified
217
+ if (options.thinkingConfig !== undefined) {
218
+ if (options.thinkingConfig === null) {
219
+ // Explicitly remove thinkingConfig if set to null
220
+ delete this.chatConfig.thinkingConfig;
221
+ if (log.level !== 'silent') {
222
+ log.debug(`thinkingConfig set to null - removed from configuration`);
223
+ }
224
+ } else if (modelSupportsThinking) {
225
+ // Handle thinkingConfig - merge with defaults
226
+ const thinkingConfig = {
227
+ ...DEFAULT_THINKING_CONFIG,
228
+ ...options.thinkingConfig
229
+ };
230
+ this.chatConfig.thinkingConfig = thinkingConfig;
231
+
232
+ if (log.level !== 'silent') {
233
+ log.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig:`, thinkingConfig);
234
+ }
235
+ } else {
236
+ if (log.level !== 'silent') {
237
+ log.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
238
+ }
208
239
  }
209
240
  }
210
241
 
@@ -241,6 +272,7 @@ function AITransformFactory(options = {}) {
241
272
  if (log.level !== 'silent') {
242
273
  log.debug(`Creating AI Transformer with model: ${this.modelName}`);
243
274
  log.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
275
+ log.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
244
276
  }
245
277
 
246
278
  const ai = new GoogleGenAI({ apiKey: this.apiKey });
@@ -606,6 +638,173 @@ HELPERS
606
638
  ----
607
639
  */
608
640
 
641
+ /**
642
+ * Attempts to recover truncated JSON by progressively removing characters from the end
643
+ * until valid JSON is found or recovery fails
644
+ * @param {string} text - The potentially truncated JSON string
645
+ * @param {number} maxAttempts - Maximum number of characters to remove
646
+ * @returns {Object|null} - Parsed JSON object or null if recovery fails
647
+ */
648
+ function attemptJSONRecovery(text, maxAttempts = 100) {
649
+ if (!text || typeof text !== 'string') return null;
650
+
651
+ // First, try parsing as-is
652
+ try {
653
+ return JSON.parse(text);
654
+ } catch (e) {
655
+ // Continue with recovery
656
+ }
657
+
658
+ let workingText = text.trim();
659
+
660
+ // First attempt: try to close unclosed structures without removing characters
661
+ // Count open/close braces and brackets in the original text
662
+ let braces = 0;
663
+ let brackets = 0;
664
+ let inString = false;
665
+ let escapeNext = false;
666
+
667
+ for (let j = 0; j < workingText.length; j++) {
668
+ const char = workingText[j];
669
+
670
+ if (escapeNext) {
671
+ escapeNext = false;
672
+ continue;
673
+ }
674
+
675
+ if (char === '\\') {
676
+ escapeNext = true;
677
+ continue;
678
+ }
679
+
680
+ if (char === '"') {
681
+ inString = !inString;
682
+ continue;
683
+ }
684
+
685
+ if (!inString) {
686
+ if (char === '{') braces++;
687
+ else if (char === '}') braces--;
688
+ else if (char === '[') brackets++;
689
+ else if (char === ']') brackets--;
690
+ }
691
+ }
692
+
693
+ // Try to fix by just adding closing characters
694
+ if ((braces > 0 || brackets > 0 || inString) && workingText.length > 2) {
695
+ let fixedText = workingText;
696
+
697
+ // Close any open strings first
698
+ if (inString) {
699
+ fixedText += '"';
700
+ }
701
+
702
+ // Add missing closing characters
703
+ while (braces > 0) {
704
+ fixedText += '}';
705
+ braces--;
706
+ }
707
+ while (brackets > 0) {
708
+ fixedText += ']';
709
+ brackets--;
710
+ }
711
+
712
+ try {
713
+ const result = JSON.parse(fixedText);
714
+ if (log.level !== 'silent') {
715
+ log.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
716
+ }
717
+ return result;
718
+ } catch (e) {
719
+ // Simple fix didn't work, continue with more aggressive recovery
720
+ }
721
+ }
722
+
723
+ // Second attempt: progressively remove characters from the end
724
+
725
+ for (let i = 0; i < maxAttempts && workingText.length > 2; i++) {
726
+ // Remove one character from the end
727
+ workingText = workingText.slice(0, -1);
728
+
729
+ // Count open/close braces and brackets
730
+ let braces = 0;
731
+ let brackets = 0;
732
+ let inString = false;
733
+ let escapeNext = false;
734
+
735
+ for (let j = 0; j < workingText.length; j++) {
736
+ const char = workingText[j];
737
+
738
+ if (escapeNext) {
739
+ escapeNext = false;
740
+ continue;
741
+ }
742
+
743
+ if (char === '\\') {
744
+ escapeNext = true;
745
+ continue;
746
+ }
747
+
748
+ if (char === '"') {
749
+ inString = !inString;
750
+ continue;
751
+ }
752
+
753
+ if (!inString) {
754
+ if (char === '{') braces++;
755
+ else if (char === '}') braces--;
756
+ else if (char === '[') brackets++;
757
+ else if (char === ']') brackets--;
758
+ }
759
+ }
760
+
761
+ // If we have balanced braces/brackets, try parsing
762
+ if (braces === 0 && brackets === 0 && !inString) {
763
+ try {
764
+ const result = JSON.parse(workingText);
765
+ if (log.level !== 'silent') {
766
+ log.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by removing ${i + 1} characters from the end.`);
767
+ }
768
+ return result;
769
+ } catch (e) {
770
+ // Continue trying
771
+ }
772
+ }
773
+
774
+ // After a few attempts, try adding closing characters
775
+ if (i > 5) {
776
+ let fixedText = workingText;
777
+
778
+ // Close any open strings first
779
+ if (inString) {
780
+ fixedText += '"';
781
+ }
782
+
783
+ // Add missing closing characters
784
+ while (braces > 0) {
785
+ fixedText += '}';
786
+ braces--;
787
+ }
788
+ while (brackets > 0) {
789
+ fixedText += ']';
790
+ brackets--;
791
+ }
792
+
793
+ try {
794
+ const result = JSON.parse(fixedText);
795
+ if (log.level !== 'silent') {
796
+ log.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
797
+ }
798
+ return result;
799
+ } catch (e) {
800
+ // Recovery failed, continue trying
801
+ }
802
+ }
803
+ }
804
+
805
+ return null;
806
+ }
807
+
609
808
  function isJSON(data) {
610
809
  try {
611
810
  const attempt = JSON.stringify(data);
@@ -703,6 +902,13 @@ function extractJSON(text) {
703
902
  return JSON.parse(cleanedText);
704
903
  }
705
904
 
905
+ // Strategy 6: Last resort - attempt recovery for potentially truncated JSON
906
+ // This is especially useful when maxOutputTokens might have cut off the response
907
+ const recoveredJSON = attemptJSONRecovery(text);
908
+ if (recoveredJSON !== null) {
909
+ return recoveredJSON;
910
+ }
911
+
706
912
  // If all else fails, throw an error with helpful information
707
913
  throw new Error(`Could not extract valid JSON from model response. Response preview: ${text.substring(0, 200)}...`);
708
914
  }
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "ak-gemini",
3
3
  "author": "ak@mixpanel.com",
4
4
  "description": "AK's Generative AI Helper for doing... transforms",
5
- "version": "1.0.7",
5
+ "version": "1.0.8",
6
6
  "main": "index.js",
7
7
  "files": [
8
8
  "index.js",
@@ -39,6 +39,7 @@
39
39
  "prune": "rm -rf tmp/*",
40
40
  "test": "node --no-warnings --experimental-vm-modules node_modules/jest/bin/jest.js",
41
41
  "test:unit": "npm test -- tests/module.test.js",
42
+ "test:fixed": "npm test -- --testNamePattern=\"should use context in the prompt and transform accordingly|should augment the payload as instructed by system instructions|should succeed on the first try if validation passes|should handle invalid model names|should handle multiple concurrent messages|should use the constructor-provided asyncValidator|should override system instructions from the file\"",
42
43
  "build:cjs": "esbuild index.js --bundle --platform=node --format=cjs --outfile=index.cjs --external:@google/genai --external:ak-tools --external:dotenv --external:pino-pretty --external:pino",
43
44
  "coverage": "node --no-warnings --experimental-vm-modules node_modules/jest/bin/jest.js --coverage",
44
45
  "typecheck": "tsc --noEmit",
package/types.d.ts CHANGED
@@ -21,6 +21,7 @@ export interface ChatConfig {
21
21
  temperature?: number; // Controls randomness (0.0 to 1.0)
22
22
  topP?: number; // Controls diversity via nucleus sampling
23
23
  topK?: number; // Controls diversity by limiting top-k tokens
24
+ maxOutputTokens?: number; // Maximum number of tokens that can be generated in the response
24
25
  systemInstruction?: string; // System instruction for the model
25
26
  safetySettings?: SafetySetting[]; // Safety settings array
26
27
  responseSchema?: Object; // Schema for validating model responses
@@ -74,6 +75,7 @@ export interface AITransformerOptions {
74
75
  systemInstructions?: string; // Custom system instructions for the model
75
76
  chatConfig?: ChatConfig; // Configuration object for the chat session
76
77
  thinkingConfig?: ThinkingConfig; // Thinking features configuration (defaults to thinkingBudget: 0, thinkingLevel: "MINIMAL")
78
+ maxOutputTokens?: number; // Maximum number of tokens that can be generated in the response (defaults to 100000)
77
79
  examplesFile?: string; // Path to JSON file containing transformation examples
78
80
  exampleData?: TransformationExample[]; // Inline examples to seed the transformer
79
81
  sourceKey?: string; // Key name for source data in examples (alias for promptKey)