ak-gemini 1.0.6 → 1.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/index.cjs +180 -2
  2. package/index.js +249 -3
  3. package/package.json +6 -4
  4. package/types.d.ts +16 -1
package/index.cjs CHANGED
@@ -29,6 +29,8 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
29
29
  // index.js
30
30
  var index_exports = {};
31
31
  __export(index_exports, {
32
+ ThinkingLevel: () => import_genai.ThinkingLevel,
33
+ attemptJSONRecovery: () => attemptJSONRecovery,
32
34
  default: () => index_default,
33
35
  log: () => logger_default
34
36
  });
@@ -64,7 +66,7 @@ var DEFAULT_SAFETY_SETTINGS = [
64
66
  var DEFAULT_SYSTEM_INSTRUCTIONS = `
65
67
  You are an expert JSON transformation engine. Your task is to accurately convert data payloads from one format to another.
66
68
 
67
- You will be provided with example transformations (Source JSON -> Target JSON).
69
+ You will be provided with example transformations (Source JSON -> Target JSON).
68
70
 
69
71
  Learn the mapping rules from these examples.
70
72
 
@@ -72,8 +74,22 @@ When presented with new Source JSON, apply the learned transformation rules to p
72
74
 
73
75
  Always respond ONLY with a valid JSON object that strictly adheres to the expected output format.
74
76
 
75
- Do not include any additional text, explanations, or formatting before or after the JSON object.
77
+ Do not include any additional text, explanations, or formatting before or after the JSON object.
76
78
  `;
79
+ var DEFAULT_THINKING_CONFIG = {
80
+ thinkingBudget: 0,
81
+ thinkingLevel: import_genai.ThinkingLevel.MINIMAL
82
+ };
83
+ var DEFAULT_MAX_OUTPUT_TOKENS = 1e5;
84
+ var THINKING_SUPPORTED_MODELS = [
85
+ /^gemini-3-flash(-preview)?$/,
86
+ /^gemini-3-pro(-preview|-image-preview)?$/,
87
+ /^gemini-2\.5-pro/,
88
+ /^gemini-2\.5-flash(-preview)?$/,
89
+ /^gemini-2\.5-flash-lite(-preview)?$/,
90
+ /^gemini-2\.0-flash$/
91
+ // Experimental support, exact match only
92
+ ];
77
93
  var DEFAULT_CHAT_CONFIG = {
78
94
  responseMimeType: "application/json",
79
95
  temperature: 0.2,
@@ -152,6 +168,45 @@ function AITransformFactory(options = {}) {
152
168
  ...options.chatConfig,
153
169
  systemInstruction: this.systemInstructions
154
170
  };
171
+ if (options.maxOutputTokens !== void 0) {
172
+ if (options.maxOutputTokens === null) {
173
+ delete this.chatConfig.maxOutputTokens;
174
+ } else {
175
+ this.chatConfig.maxOutputTokens = options.maxOutputTokens;
176
+ }
177
+ } else if (options.chatConfig?.maxOutputTokens !== void 0) {
178
+ if (options.chatConfig.maxOutputTokens === null) {
179
+ delete this.chatConfig.maxOutputTokens;
180
+ } else {
181
+ this.chatConfig.maxOutputTokens = options.chatConfig.maxOutputTokens;
182
+ }
183
+ } else {
184
+ this.chatConfig.maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS;
185
+ }
186
+ const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some(
187
+ (pattern) => pattern.test(this.modelName)
188
+ );
189
+ if (options.thinkingConfig !== void 0) {
190
+ if (options.thinkingConfig === null) {
191
+ delete this.chatConfig.thinkingConfig;
192
+ if (logger_default.level !== "silent") {
193
+ logger_default.debug(`thinkingConfig set to null - removed from configuration`);
194
+ }
195
+ } else if (modelSupportsThinking) {
196
+ const thinkingConfig = {
197
+ ...DEFAULT_THINKING_CONFIG,
198
+ ...options.thinkingConfig
199
+ };
200
+ this.chatConfig.thinkingConfig = thinkingConfig;
201
+ if (logger_default.level !== "silent") {
202
+ logger_default.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig:`, thinkingConfig);
203
+ }
204
+ } else {
205
+ if (logger_default.level !== "silent") {
206
+ logger_default.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
207
+ }
208
+ }
209
+ }
155
210
  if (options.responseSchema) {
156
211
  this.chatConfig.responseSchema = options.responseSchema;
157
212
  }
@@ -172,6 +227,7 @@ function AITransformFactory(options = {}) {
172
227
  if (logger_default.level !== "silent") {
173
228
  logger_default.debug(`Creating AI Transformer with model: ${this.modelName}`);
174
229
  logger_default.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
230
+ logger_default.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
175
231
  }
176
232
  const ai = new import_genai.GoogleGenAI({ apiKey: this.apiKey });
177
233
  this.genAIClient = ai;
@@ -392,6 +448,122 @@ function getChatHistory() {
392
448
  }
393
449
  return this.chat.getHistory();
394
450
  }
451
+ function attemptJSONRecovery(text, maxAttempts = 100) {
452
+ if (!text || typeof text !== "string") return null;
453
+ try {
454
+ return JSON.parse(text);
455
+ } catch (e) {
456
+ }
457
+ let workingText = text.trim();
458
+ let braces = 0;
459
+ let brackets = 0;
460
+ let inString = false;
461
+ let escapeNext = false;
462
+ for (let j = 0; j < workingText.length; j++) {
463
+ const char = workingText[j];
464
+ if (escapeNext) {
465
+ escapeNext = false;
466
+ continue;
467
+ }
468
+ if (char === "\\") {
469
+ escapeNext = true;
470
+ continue;
471
+ }
472
+ if (char === '"') {
473
+ inString = !inString;
474
+ continue;
475
+ }
476
+ if (!inString) {
477
+ if (char === "{") braces++;
478
+ else if (char === "}") braces--;
479
+ else if (char === "[") brackets++;
480
+ else if (char === "]") brackets--;
481
+ }
482
+ }
483
+ if ((braces > 0 || brackets > 0 || inString) && workingText.length > 2) {
484
+ let fixedText = workingText;
485
+ if (inString) {
486
+ fixedText += '"';
487
+ }
488
+ while (braces > 0) {
489
+ fixedText += "}";
490
+ braces--;
491
+ }
492
+ while (brackets > 0) {
493
+ fixedText += "]";
494
+ brackets--;
495
+ }
496
+ try {
497
+ const result = JSON.parse(fixedText);
498
+ if (logger_default.level !== "silent") {
499
+ logger_default.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
500
+ }
501
+ return result;
502
+ } catch (e) {
503
+ }
504
+ }
505
+ for (let i = 0; i < maxAttempts && workingText.length > 2; i++) {
506
+ workingText = workingText.slice(0, -1);
507
+ let braces2 = 0;
508
+ let brackets2 = 0;
509
+ let inString2 = false;
510
+ let escapeNext2 = false;
511
+ for (let j = 0; j < workingText.length; j++) {
512
+ const char = workingText[j];
513
+ if (escapeNext2) {
514
+ escapeNext2 = false;
515
+ continue;
516
+ }
517
+ if (char === "\\") {
518
+ escapeNext2 = true;
519
+ continue;
520
+ }
521
+ if (char === '"') {
522
+ inString2 = !inString2;
523
+ continue;
524
+ }
525
+ if (!inString2) {
526
+ if (char === "{") braces2++;
527
+ else if (char === "}") braces2--;
528
+ else if (char === "[") brackets2++;
529
+ else if (char === "]") brackets2--;
530
+ }
531
+ }
532
+ if (braces2 === 0 && brackets2 === 0 && !inString2) {
533
+ try {
534
+ const result = JSON.parse(workingText);
535
+ if (logger_default.level !== "silent") {
536
+ logger_default.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by removing ${i + 1} characters from the end.`);
537
+ }
538
+ return result;
539
+ } catch (e) {
540
+ }
541
+ }
542
+ if (i > 5) {
543
+ let fixedText = workingText;
544
+ if (inString2) {
545
+ fixedText += '"';
546
+ }
547
+ while (braces2 > 0) {
548
+ fixedText += "}";
549
+ braces2--;
550
+ }
551
+ while (brackets2 > 0) {
552
+ fixedText += "]";
553
+ brackets2--;
554
+ }
555
+ try {
556
+ const result = JSON.parse(fixedText);
557
+ if (logger_default.level !== "silent") {
558
+ logger_default.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
559
+ }
560
+ return result;
561
+ } catch (e) {
562
+ }
563
+ }
564
+ }
565
+ return null;
566
+ }
395
567
  function isJSON(data) {
396
568
  try {
397
569
  const attempt = JSON.stringify(data);
@@ -466,6 +638,10 @@ function extractJSON(text) {
466
638
  if (isJSONStr(cleanedText)) {
467
639
  return JSON.parse(cleanedText);
468
640
  }
641
+ const recoveredJSON = attemptJSONRecovery(text);
642
+ if (recoveredJSON !== null) {
643
+ return recoveredJSON;
644
+ }
469
645
  throw new Error(`Could not extract valid JSON from model response. Response preview: ${text.substring(0, 200)}...`);
470
646
  }
471
647
  function findCompleteJSONStructures(text) {
@@ -572,5 +748,7 @@ if (import_meta.url === new URL(`file://${process.argv[1]}`).href) {
572
748
  }
573
749
  // Annotate the CommonJS export names for ESM import in node:
574
750
  0 && (module.exports = {
751
+ ThinkingLevel,
752
+ attemptJSONRecovery,
575
753
  log
576
754
  });
package/index.js CHANGED
@@ -23,11 +23,12 @@ const { NODE_ENV = "unknown", GEMINI_API_KEY, LOG_LEVEL = "" } = process.env;
23
23
 
24
24
 
25
25
  //deps
26
- import { GoogleGenAI, HarmCategory, HarmBlockThreshold } from '@google/genai';
26
+ import { GoogleGenAI, HarmCategory, HarmBlockThreshold, ThinkingLevel } from '@google/genai';
27
27
  import u from 'ak-tools';
28
28
  import path from 'path';
29
29
  import log from './logger.js';
30
30
  export { log };
31
+ export { ThinkingLevel };
31
32
 
32
33
 
33
34
 
@@ -40,7 +41,7 @@ const DEFAULT_SAFETY_SETTINGS = [
40
41
  const DEFAULT_SYSTEM_INSTRUCTIONS = `
41
42
  You are an expert JSON transformation engine. Your task is to accurately convert data payloads from one format to another.
42
43
 
43
- You will be provided with example transformations (Source JSON -> Target JSON).
44
+ You will be provided with example transformations (Source JSON -> Target JSON).
44
45
 
45
46
  Learn the mapping rules from these examples.
46
47
 
@@ -48,9 +49,27 @@ When presented with new Source JSON, apply the learned transformation rules to p
48
49
 
49
50
  Always respond ONLY with a valid JSON object that strictly adheres to the expected output format.
50
51
 
51
- Do not include any additional text, explanations, or formatting before or after the JSON object.
52
+ Do not include any additional text, explanations, or formatting before or after the JSON object.
52
53
  `;
53
54
 
55
+ const DEFAULT_THINKING_CONFIG = {
56
+ thinkingBudget: 0,
57
+ thinkingLevel: ThinkingLevel.MINIMAL
58
+ };
59
+
60
+ const DEFAULT_MAX_OUTPUT_TOKENS = 100000; // Default ceiling for output tokens
61
+
62
+ // Models that support thinking features (as of Dec 2024)
63
+ // Using regex patterns for more precise matching
64
+ const THINKING_SUPPORTED_MODELS = [
65
+ /^gemini-3-flash(-preview)?$/,
66
+ /^gemini-3-pro(-preview|-image-preview)?$/,
67
+ /^gemini-2\.5-pro/,
68
+ /^gemini-2\.5-flash(-preview)?$/,
69
+ /^gemini-2\.5-flash-lite(-preview)?$/,
70
+ /^gemini-2\.0-flash$/ // Experimental support, exact match only
71
+ ];
72
+
54
73
  const DEFAULT_CHAT_CONFIG = {
55
74
  responseMimeType: 'application/json',
56
75
  temperature: 0.2,
@@ -119,6 +138,7 @@ class AITransformer {
119
138
  }
120
139
 
121
140
  export default AITransformer;
141
+ export { attemptJSONRecovery }; // Export for testing
122
142
 
123
143
  /**
124
144
  * factory function to create an AI Transformer instance
@@ -161,6 +181,7 @@ function AITransformFactory(options = {}) {
161
181
 
162
182
  this.apiKey = options.apiKey !== undefined && options.apiKey !== null ? options.apiKey : GEMINI_API_KEY;
163
183
  if (!this.apiKey) throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var.");
184
+
164
185
  // Build chat config, making sure systemInstruction uses the custom instructions
165
186
  this.chatConfig = {
166
187
  ...DEFAULT_CHAT_CONFIG,
@@ -168,6 +189,56 @@ function AITransformFactory(options = {}) {
168
189
  systemInstruction: this.systemInstructions
169
190
  };
170
191
 
192
+ // Handle maxOutputTokens with explicit null check
193
+ // Priority: options.maxOutputTokens > options.chatConfig.maxOutputTokens > DEFAULT
194
+ // Setting to null explicitly removes the limit
195
+ if (options.maxOutputTokens !== undefined) {
196
+ if (options.maxOutputTokens === null) {
197
+ delete this.chatConfig.maxOutputTokens;
198
+ } else {
199
+ this.chatConfig.maxOutputTokens = options.maxOutputTokens;
200
+ }
201
+ } else if (options.chatConfig?.maxOutputTokens !== undefined) {
202
+ if (options.chatConfig.maxOutputTokens === null) {
203
+ delete this.chatConfig.maxOutputTokens;
204
+ } else {
205
+ this.chatConfig.maxOutputTokens = options.chatConfig.maxOutputTokens;
206
+ }
207
+ } else {
208
+ this.chatConfig.maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS;
209
+ }
210
+
211
+ // Only add thinkingConfig if the model supports it
212
+ const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some(pattern =>
213
+ pattern.test(this.modelName)
214
+ );
215
+
216
+ // Handle thinkingConfig - null explicitly removes it, undefined means not specified
217
+ if (options.thinkingConfig !== undefined) {
218
+ if (options.thinkingConfig === null) {
219
+ // Explicitly remove thinkingConfig if set to null
220
+ delete this.chatConfig.thinkingConfig;
221
+ if (log.level !== 'silent') {
222
+ log.debug(`thinkingConfig set to null - removed from configuration`);
223
+ }
224
+ } else if (modelSupportsThinking) {
225
+ // Handle thinkingConfig - merge with defaults
226
+ const thinkingConfig = {
227
+ ...DEFAULT_THINKING_CONFIG,
228
+ ...options.thinkingConfig
229
+ };
230
+ this.chatConfig.thinkingConfig = thinkingConfig;
231
+
232
+ if (log.level !== 'silent') {
233
+ log.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig:`, thinkingConfig);
234
+ }
235
+ } else {
236
+ if (log.level !== 'silent') {
237
+ log.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
238
+ }
239
+ }
240
+ }
241
+
171
242
  // response schema is optional, but if provided, it should be a valid JSON schema
172
243
  if (options.responseSchema) {
173
244
  this.chatConfig.responseSchema = options.responseSchema;
@@ -201,6 +272,7 @@ function AITransformFactory(options = {}) {
201
272
  if (log.level !== 'silent') {
202
273
  log.debug(`Creating AI Transformer with model: ${this.modelName}`);
203
274
  log.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
275
+ log.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
204
276
  }
205
277
 
206
278
  const ai = new GoogleGenAI({ apiKey: this.apiKey });
@@ -566,6 +638,173 @@ HELPERS
566
638
  ----
567
639
  */
568
640
 
641
+ /**
642
+ * Attempts to recover truncated JSON by progressively removing characters from the end
643
+ * until valid JSON is found or recovery fails
644
+ * @param {string} text - The potentially truncated JSON string
645
+ * @param {number} maxAttempts - Maximum number of characters to remove
646
+ * @returns {Object|null} - Parsed JSON object or null if recovery fails
647
+ */
648
+ function attemptJSONRecovery(text, maxAttempts = 100) {
649
+ if (!text || typeof text !== 'string') return null;
650
+
651
+ // First, try parsing as-is
652
+ try {
653
+ return JSON.parse(text);
654
+ } catch (e) {
655
+ // Continue with recovery
656
+ }
657
+
658
+ let workingText = text.trim();
659
+
660
+ // First attempt: try to close unclosed structures without removing characters
661
+ // Count open/close braces and brackets in the original text
662
+ let braces = 0;
663
+ let brackets = 0;
664
+ let inString = false;
665
+ let escapeNext = false;
666
+
667
+ for (let j = 0; j < workingText.length; j++) {
668
+ const char = workingText[j];
669
+
670
+ if (escapeNext) {
671
+ escapeNext = false;
672
+ continue;
673
+ }
674
+
675
+ if (char === '\\') {
676
+ escapeNext = true;
677
+ continue;
678
+ }
679
+
680
+ if (char === '"') {
681
+ inString = !inString;
682
+ continue;
683
+ }
684
+
685
+ if (!inString) {
686
+ if (char === '{') braces++;
687
+ else if (char === '}') braces--;
688
+ else if (char === '[') brackets++;
689
+ else if (char === ']') brackets--;
690
+ }
691
+ }
692
+
693
+ // Try to fix by just adding closing characters
694
+ if ((braces > 0 || brackets > 0 || inString) && workingText.length > 2) {
695
+ let fixedText = workingText;
696
+
697
+ // Close any open strings first
698
+ if (inString) {
699
+ fixedText += '"';
700
+ }
701
+
702
+ // Add missing closing characters
703
+ while (braces > 0) {
704
+ fixedText += '}';
705
+ braces--;
706
+ }
707
+ while (brackets > 0) {
708
+ fixedText += ']';
709
+ brackets--;
710
+ }
711
+
712
+ try {
713
+ const result = JSON.parse(fixedText);
714
+ if (log.level !== 'silent') {
715
+ log.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
716
+ }
717
+ return result;
718
+ } catch (e) {
719
+ // Simple fix didn't work, continue with more aggressive recovery
720
+ }
721
+ }
722
+
723
+ // Second attempt: progressively remove characters from the end
724
+
725
+ for (let i = 0; i < maxAttempts && workingText.length > 2; i++) {
726
+ // Remove one character from the end
727
+ workingText = workingText.slice(0, -1);
728
+
729
+ // Count open/close braces and brackets
730
+ let braces = 0;
731
+ let brackets = 0;
732
+ let inString = false;
733
+ let escapeNext = false;
734
+
735
+ for (let j = 0; j < workingText.length; j++) {
736
+ const char = workingText[j];
737
+
738
+ if (escapeNext) {
739
+ escapeNext = false;
740
+ continue;
741
+ }
742
+
743
+ if (char === '\\') {
744
+ escapeNext = true;
745
+ continue;
746
+ }
747
+
748
+ if (char === '"') {
749
+ inString = !inString;
750
+ continue;
751
+ }
752
+
753
+ if (!inString) {
754
+ if (char === '{') braces++;
755
+ else if (char === '}') braces--;
756
+ else if (char === '[') brackets++;
757
+ else if (char === ']') brackets--;
758
+ }
759
+ }
760
+
761
+ // If we have balanced braces/brackets, try parsing
762
+ if (braces === 0 && brackets === 0 && !inString) {
763
+ try {
764
+ const result = JSON.parse(workingText);
765
+ if (log.level !== 'silent') {
766
+ log.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by removing ${i + 1} characters from the end.`);
767
+ }
768
+ return result;
769
+ } catch (e) {
770
+ // Continue trying
771
+ }
772
+ }
773
+
774
+ // After a few attempts, try adding closing characters
775
+ if (i > 5) {
776
+ let fixedText = workingText;
777
+
778
+ // Close any open strings first
779
+ if (inString) {
780
+ fixedText += '"';
781
+ }
782
+
783
+ // Add missing closing characters
784
+ while (braces > 0) {
785
+ fixedText += '}';
786
+ braces--;
787
+ }
788
+ while (brackets > 0) {
789
+ fixedText += ']';
790
+ brackets--;
791
+ }
792
+
793
+ try {
794
+ const result = JSON.parse(fixedText);
795
+ if (log.level !== 'silent') {
796
+ log.warn(`JSON response appears truncated (possibly hit maxOutputTokens limit). Recovered by adding closing characters.`);
797
+ }
798
+ return result;
799
+ } catch (e) {
800
+ // Recovery failed, continue trying
801
+ }
802
+ }
803
+ }
804
+
805
+ return null;
806
+ }
807
+
569
808
  function isJSON(data) {
570
809
  try {
571
810
  const attempt = JSON.stringify(data);
@@ -663,6 +902,13 @@ function extractJSON(text) {
663
902
  return JSON.parse(cleanedText);
664
903
  }
665
904
 
905
+ // Strategy 6: Last resort - attempt recovery for potentially truncated JSON
906
+ // This is especially useful when maxOutputTokens might have cut off the response
907
+ const recoveredJSON = attemptJSONRecovery(text);
908
+ if (recoveredJSON !== null) {
909
+ return recoveredJSON;
910
+ }
911
+
666
912
  // If all else fails, throw an error with helpful information
667
913
  throw new Error(`Could not extract valid JSON from model response. Response preview: ${text.substring(0, 200)}...`);
668
914
  }
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "ak-gemini",
3
3
  "author": "ak@mixpanel.com",
4
4
  "description": "AK's Generative AI Helper for doing... transforms",
5
- "version": "1.0.6",
5
+ "version": "1.0.8",
6
6
  "main": "index.js",
7
7
  "files": [
8
8
  "index.js",
@@ -33,15 +33,17 @@
33
33
  "homepage": "https://github.com/ak--47/ak-gemini#readme",
34
34
  "scripts": {
35
35
  "prepublishOnly": "npm run typecheck && npm run build:cjs",
36
- "post": "npm publish --access public",
36
+ "post": "npm publish",
37
37
  "release": "npm version patch && npm publish --access public",
38
38
  "update-deps": "npx npm-check-updates -u && npm install",
39
39
  "prune": "rm -rf tmp/*",
40
40
  "test": "node --no-warnings --experimental-vm-modules node_modules/jest/bin/jest.js",
41
41
  "test:unit": "npm test -- tests/module.test.js",
42
+ "test:fixed": "npm test -- --testNamePattern=\"should use context in the prompt and transform accordingly|should augment the payload as instructed by system instructions|should succeed on the first try if validation passes|should handle invalid model names|should handle multiple concurrent messages|should use the constructor-provided asyncValidator|should override system instructions from the file\"",
42
43
  "build:cjs": "esbuild index.js --bundle --platform=node --format=cjs --outfile=index.cjs --external:@google/genai --external:ak-tools --external:dotenv --external:pino-pretty --external:pino",
43
44
  "coverage": "node --no-warnings --experimental-vm-modules node_modules/jest/bin/jest.js --coverage",
44
- "typecheck": "tsc --noEmit"
45
+ "typecheck": "tsc --noEmit",
46
+ "update:gemini": "npm install @google/genai@latest"
45
47
  },
46
48
  "type": "module",
47
49
  "keywords": [
@@ -51,7 +53,7 @@
51
53
  ],
52
54
  "license": "ISC",
53
55
  "dependencies": {
54
- "@google/genai": "^1.25.0",
56
+ "@google/genai": "^1.34.0",
55
57
  "ak-tools": "^1.1.12",
56
58
  "dotenv": "^16.5.0",
57
59
  "pino": "^9.7.0",
package/types.d.ts CHANGED
@@ -1,4 +1,15 @@
1
- import type { GoogleGenAI } from '@google/genai';
1
+ import type { GoogleGenAI, ThinkingLevel } from '@google/genai';
2
+
3
+ export { ThinkingLevel };
4
+
5
+ export interface ThinkingConfig {
6
+ /** Indicates whether to include thoughts in the response. If true, thoughts are returned only if the model supports thought and thoughts are available. */
7
+ includeThoughts?: boolean;
8
+ /** Indicates the thinking budget in tokens. 0 is DISABLED. -1 is AUTOMATIC. The default values and allowed ranges are model dependent. */
9
+ thinkingBudget?: number;
10
+ /** Optional. The number of thoughts tokens that the model should generate. */
11
+ thinkingLevel?: ThinkingLevel;
12
+ }
2
13
 
3
14
  export interface SafetySetting {
4
15
  category: string; // The harm category
@@ -10,9 +21,11 @@ export interface ChatConfig {
10
21
  temperature?: number; // Controls randomness (0.0 to 1.0)
11
22
  topP?: number; // Controls diversity via nucleus sampling
12
23
  topK?: number; // Controls diversity by limiting top-k tokens
24
+ maxOutputTokens?: number; // Maximum number of tokens that can be generated in the response
13
25
  systemInstruction?: string; // System instruction for the model
14
26
  safetySettings?: SafetySetting[]; // Safety settings array
15
27
  responseSchema?: Object; // Schema for validating model responses
28
+ thinkingConfig?: ThinkingConfig; // Thinking features configuration
16
29
  [key: string]: any; // Additional properties for flexibility
17
30
  }
18
31
 
@@ -61,6 +74,8 @@ export interface AITransformerOptions {
61
74
  modelName?: string; // The Gemini model to use
62
75
  systemInstructions?: string; // Custom system instructions for the model
63
76
  chatConfig?: ChatConfig; // Configuration object for the chat session
77
+ thinkingConfig?: ThinkingConfig; // Thinking features configuration (defaults to thinkingBudget: 0, thinkingLevel: "MINIMAL")
78
+ maxOutputTokens?: number; // Maximum number of tokens that can be generated in the response (defaults to 100000)
64
79
  examplesFile?: string; // Path to JSON file containing transformation examples
65
80
  exampleData?: TransformationExample[]; // Inline examples to seed the transformer
66
81
  sourceKey?: string; // Key name for source data in examples (alias for promptKey)