ak-gemini 1.0.5 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (6) hide show
  1. package/README.md +4 -3
  2. package/index.cjs +284 -86
  3. package/index.js +417 -125
  4. package/package.json +21 -16
  5. package/types.d.ts +125 -0
  6. package/types.ts +0 -65
package/README.md CHANGED
@@ -7,7 +7,7 @@ Use this to power LLM-driven data pipelines, JSON mapping, or any automated AI t
7
7
 
8
8
  ## Features
9
9
 
10
- * **Model-Agnostic:** Use any Gemini model (`gemini-2.0-flash` by default)
10
+ * **Model-Agnostic:** Use any Gemini model (`gemini-2.5-flash` by default)
11
11
  * **Declarative Few-shot Examples:** Seed transformations using example mappings, with support for custom keys (`PROMPT`, `ANSWER`, `CONTEXT`, or your own)
12
12
  * **Automatic Validation & Repair:** Validate outputs with your own async function; auto-repair failed payloads with LLM feedback loop (exponential backoff, fully configurable)
13
13
  * **Token Counting & Safety:** Preview the *exact* Gemini token consumption for any operation—including all examples, instructions, and your input—before sending, so you can avoid window errors and manage costs.
@@ -47,7 +47,7 @@ or pass it directly in the constructor options.
47
47
  import AITransformer from 'ak-gemini';
48
48
 
49
49
  const transformer = new AITransformer({
50
- modelName: 'gemini-2.0-flash', // or your preferred Gemini model
50
+ modelName: 'gemini-2.5-flash', // or your preferred Gemini model
51
51
  sourceKey: 'INPUT', // Custom prompt key (default: 'PROMPT')
52
52
  targetKey: 'OUTPUT', // Custom answer key (default: 'ANSWER')
53
53
  contextKey: 'CONTEXT', // Optional, for per-example context
@@ -116,7 +116,7 @@ new AITransformer(options)
116
116
 
117
117
  | Option | Type | Default | Description |
118
118
  | ------------------ | ------ | ------------------ | ------------------------------------------------- |
119
- | modelName | string | 'gemini-2.0-flash' | Gemini model to use |
119
+ | modelName | string | 'gemini-2.5-flash' | Gemini model to use |
120
120
  | sourceKey | string | 'PROMPT' | Key for prompt/example input |
121
121
  | targetKey | string | 'ANSWER' | Key for expected output in examples |
122
122
  | contextKey | string | 'CONTEXT' | Key for per-example context (optional) |
@@ -125,6 +125,7 @@ new AITransformer(options)
125
125
  | responseSchema | object | null | Optional JSON schema for strict output validation |
126
126
  | maxRetries | number | 3 | Retries for validation+rebuild loop |
127
127
  | retryDelay | number | 1000 | Initial retry delay in ms (exponential backoff) |
128
+ | logLevel | string | 'info' | Log level: 'trace', 'debug', 'info', 'warn', 'error', 'fatal', or 'none' |
128
129
  | chatConfig | object | ... | Gemini chat config overrides |
129
130
  | systemInstructions | string | ... | System prompt for Gemini |
130
131
 
package/index.cjs CHANGED
@@ -29,7 +29,7 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
29
29
  // index.js
30
30
  var index_exports = {};
31
31
  __export(index_exports, {
32
- default: () => AITransformer,
32
+ default: () => index_default,
33
33
  log: () => logger_default
34
34
  });
35
35
  module.exports = __toCommonJS(index_exports);
@@ -56,10 +56,7 @@ var logger_default = logger;
56
56
  // index.js
57
57
  var import_meta = {};
58
58
  import_dotenv.default.config();
59
- var { NODE_ENV = "unknown", GEMINI_API_KEY } = process.env;
60
- if (NODE_ENV === "dev") logger_default.level = "debug";
61
- if (NODE_ENV === "test") logger_default.level = "warn";
62
- if (NODE_ENV.startsWith("prod")) logger_default.level = "error";
59
+ var { NODE_ENV = "unknown", GEMINI_API_KEY, LOG_LEVEL = "" } = process.env;
63
60
  var DEFAULT_SAFETY_SETTINGS = [
64
61
  { category: import_genai.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: import_genai.HarmBlockThreshold.BLOCK_NONE },
65
62
  { category: import_genai.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: import_genai.HarmBlockThreshold.BLOCK_NONE }
@@ -75,7 +72,7 @@ When presented with new Source JSON, apply the learned transformation rules to p
75
72
 
76
73
  Always respond ONLY with a valid JSON object that strictly adheres to the expected output format.
77
74
 
78
- Do not include any additional text, explanations, or formatting before or after the JSON object.
75
+ Do not include any additional text, explanations, or formatting before or after the JSON object.
79
76
  `;
80
77
  var DEFAULT_CHAT_CONFIG = {
81
78
  responseMimeType: "application/json",
@@ -95,26 +92,60 @@ var AITransformer = class {
95
92
  this.promptKey = "";
96
93
  this.answerKey = "";
97
94
  this.contextKey = "";
95
+ this.explanationKey = "";
96
+ this.systemInstructionKey = "";
98
97
  this.maxRetries = 3;
99
98
  this.retryDelay = 1e3;
100
99
  this.systemInstructions = "";
101
100
  this.chatConfig = {};
102
101
  this.apiKey = GEMINI_API_KEY;
102
+ this.onlyJSON = true;
103
+ this.asyncValidator = null;
104
+ this.logLevel = "info";
103
105
  AITransformFactory.call(this, options);
104
106
  this.init = initChat.bind(this);
105
107
  this.seed = seedWithExamples.bind(this);
106
- this.message = transformJSON.bind(this);
108
+ this.rawMessage = rawMessage.bind(this);
109
+ this.message = (payload, opts = {}, validatorFn = null) => {
110
+ return prepareAndValidateMessage.call(this, payload, opts, validatorFn || this.asyncValidator);
111
+ };
107
112
  this.rebuild = rebuildPayload.bind(this);
108
113
  this.reset = resetChat.bind(this);
109
114
  this.getHistory = getChatHistory.bind(this);
110
- this.transformWithValidation = transformWithValidation.bind(this);
115
+ this.messageAndValidate = prepareAndValidateMessage.bind(this);
116
+ this.transformWithValidation = prepareAndValidateMessage.bind(this);
111
117
  this.estimate = estimateTokenUsage.bind(this);
118
+ this.estimateTokenUsage = estimateTokenUsage.bind(this);
112
119
  }
113
120
  };
121
+ var index_default = AITransformer;
114
122
  function AITransformFactory(options = {}) {
115
- this.modelName = options.modelName || "gemini-2.0-flash";
123
+ this.modelName = options.modelName || "gemini-2.5-flash";
116
124
  this.systemInstructions = options.systemInstructions || DEFAULT_SYSTEM_INSTRUCTIONS;
117
- this.apiKey = options.apiKey || GEMINI_API_KEY;
125
+ if (options.logLevel) {
126
+ this.logLevel = options.logLevel;
127
+ if (this.logLevel === "none") {
128
+ logger_default.level = "silent";
129
+ } else {
130
+ logger_default.level = this.logLevel;
131
+ }
132
+ } else if (LOG_LEVEL) {
133
+ this.logLevel = LOG_LEVEL;
134
+ logger_default.level = LOG_LEVEL;
135
+ } else if (NODE_ENV === "dev") {
136
+ this.logLevel = "debug";
137
+ logger_default.level = "debug";
138
+ } else if (NODE_ENV === "test") {
139
+ this.logLevel = "warn";
140
+ logger_default.level = "warn";
141
+ } else if (NODE_ENV.startsWith("prod")) {
142
+ this.logLevel = "error";
143
+ logger_default.level = "error";
144
+ } else {
145
+ this.logLevel = "info";
146
+ logger_default.level = "info";
147
+ }
148
+ this.apiKey = options.apiKey !== void 0 && options.apiKey !== null ? options.apiKey : GEMINI_API_KEY;
118
149
  if (!this.apiKey) throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var.");
119
150
  this.chatConfig = {
120
151
  ...DEFAULT_CHAT_CONFIG,
@@ -126,22 +157,28 @@ function AITransformFactory(options = {}) {
126
157
  }
127
158
  this.examplesFile = options.examplesFile || null;
128
159
  this.exampleData = options.exampleData || null;
129
- this.promptKey = options.sourceKey || "PROMPT";
130
- this.answerKey = options.targetKey || "ANSWER";
160
+ this.promptKey = options.promptKey || options.sourceKey || "PROMPT";
161
+ this.answerKey = options.answerKey || options.targetKey || "ANSWER";
131
162
  this.contextKey = options.contextKey || "CONTEXT";
163
+ this.explanationKey = options.explanationKey || "EXPLANATION";
164
+ this.systemInstructionsKey = options.systemInstructionsKey || "SYSTEM";
132
165
  this.maxRetries = options.maxRetries || 3;
133
166
  this.retryDelay = options.retryDelay || 1e3;
167
+ this.asyncValidator = options.asyncValidator || null;
168
+ this.onlyJSON = options.onlyJSON !== void 0 ? options.onlyJSON : true;
134
169
  if (this.promptKey === this.answerKey) {
135
170
  throw new Error("Source and target keys cannot be the same. Please provide distinct keys.");
136
171
  }
137
- logger_default.debug(`Creating AI Transformer with model: ${this.modelName}`);
138
- logger_default.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
172
+ if (logger_default.level !== "silent") {
173
+ logger_default.debug(`Creating AI Transformer with model: ${this.modelName}`);
174
+ logger_default.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
175
+ }
139
176
  const ai = new import_genai.GoogleGenAI({ apiKey: this.apiKey });
140
177
  this.genAIClient = ai;
141
178
  this.chat = null;
142
179
  }
143
- async function initChat() {
144
- if (this.chat) return;
180
+ async function initChat(force = false) {
181
+ if (this.chat && !force) return;
145
182
  logger_default.debug(`Initializing Gemini chat session with model: ${this.modelName}...`);
146
183
  this.chat = await this.genAIClient.chats.create({
147
184
  model: this.modelName,
@@ -149,6 +186,12 @@ async function initChat() {
149
186
  config: this.chatConfig,
150
187
  history: []
151
188
  });
189
+ try {
190
+ await this.genAIClient.models.list();
191
+ logger_default.debug("Gemini API connection successful.");
192
+ } catch (e) {
193
+ throw new Error(`Gemini chat initialization failed: ${e.message}`);
194
+ }
152
195
  logger_default.debug("Gemini chat session initialized.");
153
196
  }
154
197
  async function seedWithExamples(examples) {
@@ -156,117 +199,130 @@ async function seedWithExamples(examples) {
156
199
  if (!examples || !Array.isArray(examples) || examples.length === 0) {
157
200
  if (this.examplesFile) {
158
201
  logger_default.debug(`No examples provided, loading from file: ${this.examplesFile}`);
159
- examples = await import_ak_tools.default.load(import_path.default.resolve(this.examplesFile), true);
202
+ try {
203
+ examples = await import_ak_tools.default.load(import_path.default.resolve(this.examplesFile), true);
204
+ } catch (err) {
205
+ throw new Error(`Could not load examples from file: ${this.examplesFile}. Please check the file path and format.`);
206
+ }
207
+ } else if (this.exampleData) {
208
+ logger_default.debug(`Using example data provided in options.`);
209
+ if (Array.isArray(this.exampleData)) {
210
+ examples = this.exampleData;
211
+ } else {
212
+ throw new Error(`Invalid example data provided. Expected an array of examples.`);
213
+ }
160
214
  } else {
161
215
  logger_default.debug("No examples provided and no examples file specified. Skipping seeding.");
162
216
  return;
163
217
  }
164
218
  }
219
+ const instructionExample = examples.find((ex) => ex[this.systemInstructionsKey]);
220
+ if (instructionExample) {
221
+ logger_default.debug(`Found system instructions in examples; reinitializing chat with new instructions.`);
222
+ this.systemInstructions = instructionExample[this.systemInstructionsKey];
223
+ this.chatConfig.systemInstruction = this.systemInstructions;
224
+ await this.init(true);
225
+ }
165
226
  logger_default.debug(`Seeding chat with ${examples.length} transformation examples...`);
166
227
  const historyToAdd = [];
167
228
  for (const example of examples) {
168
229
  const contextValue = example[this.contextKey] || "";
169
230
  const promptValue = example[this.promptKey] || "";
170
231
  const answerValue = example[this.answerKey] || "";
232
+ const explanationValue = example[this.explanationKey] || "";
233
+ let userText = "";
234
+ let modelResponse = {};
171
235
  if (contextValue) {
172
- let contextText = import_ak_tools.default.isJSON(contextValue) ? JSON.stringify(contextValue, null, 2) : contextValue;
173
- historyToAdd.push({
174
- role: "user",
175
- parts: [{ text: `Context: ${contextText}` }]
176
- });
177
- historyToAdd.push({
178
- role: "model",
179
- parts: [{ text: "I understand the context." }]
180
- });
236
+ let contextText = isJSON(contextValue) ? JSON.stringify(contextValue, null, 2) : contextValue;
237
+ userText += `CONTEXT:
238
+ ${contextText}
239
+
240
+ `;
181
241
  }
182
242
  if (promptValue) {
183
- let promptText = import_ak_tools.default.isJSON(promptValue) ? JSON.stringify(promptValue, null, 2) : promptValue;
184
- historyToAdd.push({ role: "user", parts: [{ text: promptText }] });
243
+ let promptText = isJSON(promptValue) ? JSON.stringify(promptValue, null, 2) : promptValue;
244
+ userText += promptText;
185
245
  }
186
- if (answerValue) {
187
- let answerText = import_ak_tools.default.isJSON(answerValue) ? JSON.stringify(answerValue, null, 2) : answerValue;
188
- historyToAdd.push({ role: "model", parts: [{ text: answerText }] });
246
+ if (answerValue) modelResponse.data = answerValue;
247
+ if (explanationValue) modelResponse.explanation = explanationValue;
248
+ const modelText = JSON.stringify(modelResponse, null, 2);
249
+ if (userText.trim().length && modelText.trim().length > 0) {
250
+ historyToAdd.push({ role: "user", parts: [{ text: userText.trim() }] });
251
+ historyToAdd.push({ role: "model", parts: [{ text: modelText.trim() }] });
189
252
  }
190
253
  }
191
254
  const currentHistory = this?.chat?.getHistory() || [];
255
+ logger_default.debug(`Adding ${historyToAdd.length} examples to chat history (${currentHistory.length} current examples)...`);
192
256
  this.chat = await this.genAIClient.chats.create({
193
257
  model: this.modelName,
194
258
  // @ts-ignore
195
259
  config: this.chatConfig,
196
260
  history: [...currentHistory, ...historyToAdd]
197
261
  });
198
- logger_default.debug("Transformation examples seeded successfully.");
262
+ const newHistory = this.chat.getHistory();
263
+ logger_default.debug(`Created new chat session with ${newHistory.length} examples.`);
264
+ return newHistory;
199
265
  }
200
- async function transformJSON(sourcePayload) {
266
+ async function rawMessage(sourcePayload) {
201
267
  if (!this.chat) {
202
- throw new Error("Chat session not initialized. Call initChat() or seedWithExamples() first.");
268
+ throw new Error("Chat session not initialized.");
203
269
  }
204
- let result;
205
- let actualPayload;
206
- if (sourcePayload && import_ak_tools.default.isJSON(sourcePayload)) actualPayload = JSON.stringify(sourcePayload, null, 2);
207
- else if (typeof sourcePayload === "string") actualPayload = sourcePayload;
208
- else throw new Error("Invalid source payload. Must be a JSON object or a valid JSON string.");
270
+ const actualPayload = typeof sourcePayload === "string" ? sourcePayload : JSON.stringify(sourcePayload, null, 2);
209
271
  try {
210
- result = await this.chat.sendMessage({ message: actualPayload });
272
+ const result = await this.chat.sendMessage({ message: actualPayload });
273
+ const modelResponse = result.text;
274
+ const extractedJSON = extractJSON(modelResponse);
275
+ if (extractedJSON?.data) {
276
+ return extractedJSON.data;
277
+ }
278
+ return extractedJSON;
211
279
  } catch (error) {
212
- logger_default.error("Error with Gemini API:", error);
280
+ if (this.onlyJSON && error.message.includes("Could not extract valid JSON")) {
281
+ throw new Error(`Invalid JSON response from Gemini: ${error.message}`);
282
+ }
213
283
  throw new Error(`Transformation failed: ${error.message}`);
214
284
  }
215
- try {
216
- const modelResponse = result.text;
217
- const parsedResponse = JSON.parse(modelResponse);
218
- return parsedResponse;
219
- } catch (parseError) {
220
- logger_default.error("Error parsing Gemini response:", parseError);
221
- throw new Error(`Invalid JSON response from Gemini: ${parseError.message}`);
222
- }
223
285
  }
224
- async function transformWithValidation(sourcePayload, validatorFn, options = {}) {
286
+ async function prepareAndValidateMessage(sourcePayload, options = {}, validatorFn = null) {
287
+ if (!this.chat) {
288
+ throw new Error("Chat session not initialized. Please call init() first.");
289
+ }
225
290
  const maxRetries = options.maxRetries ?? this.maxRetries;
226
291
  const retryDelay = options.retryDelay ?? this.retryDelay;
227
- let lastPayload = null;
228
292
  let lastError = null;
293
+ let lastPayload = null;
294
+ if (sourcePayload && isJSON(sourcePayload)) {
295
+ lastPayload = JSON.stringify(sourcePayload, null, 2);
296
+ } else if (typeof sourcePayload === "string") {
297
+ lastPayload = sourcePayload;
298
+ } else if (typeof sourcePayload === "boolean" || typeof sourcePayload === "number") {
299
+ lastPayload = sourcePayload.toString();
300
+ } else if (sourcePayload === null || sourcePayload === void 0) {
301
+ lastPayload = JSON.stringify({});
302
+ } else {
303
+ throw new Error("Invalid source payload. Must be a JSON object or string.");
304
+ }
229
305
  for (let attempt = 0; attempt <= maxRetries; attempt++) {
230
306
  try {
231
- const transformedPayload = attempt === 0 ? await this.message(sourcePayload) : await this.rebuild(lastPayload, lastError.message);
232
- const validatedPayload = await validatorFn(transformedPayload);
233
- logger_default.debug(`Transformation and validation succeeded on attempt ${attempt + 1}`);
234
- return validatedPayload;
307
+ const transformedPayload = attempt === 0 ? await this.rawMessage(lastPayload) : await this.rebuild(lastPayload, lastError.message);
308
+ lastPayload = transformedPayload;
309
+ if (validatorFn) {
310
+ await validatorFn(transformedPayload);
311
+ }
312
+ logger_default.debug(`Transformation succeeded on attempt ${attempt + 1}`);
313
+ return transformedPayload;
235
314
  } catch (error) {
236
315
  lastError = error;
237
- if (attempt === 0) {
238
- lastPayload = await this.message(sourcePayload).catch(() => null);
239
- }
240
- if (attempt < maxRetries) {
241
- const delay = retryDelay * Math.pow(2, attempt);
242
- logger_default.warn(`Attempt ${attempt + 1} failed, retrying in ${delay}ms...`, error.message);
243
- await new Promise((res) => setTimeout(res, delay));
244
- } else {
245
- logger_default.error(`All ${maxRetries + 1} attempts failed`);
246
- throw new Error(`Transformation with validation failed after ${maxRetries + 1} attempts. Last error: ${error.message}`);
316
+ logger_default.warn(`Attempt ${attempt + 1} failed: ${error.message}`);
317
+ if (attempt >= maxRetries) {
318
+ logger_default.error(`All ${maxRetries + 1} attempts failed.`);
319
+ throw new Error(`Transformation failed after ${maxRetries + 1} attempts. Last error: ${error.message}`);
247
320
  }
321
+ const delay = retryDelay * Math.pow(2, attempt);
322
+ await new Promise((res) => setTimeout(res, delay));
248
323
  }
249
324
  }
250
325
  }
251
- async function estimateTokenUsage(nextPayload) {
252
- const contents = [];
253
- if (this.systemInstructions) {
254
- contents.push({ parts: [{ text: this.systemInstructions }] });
255
- }
256
- if (this.chat && typeof this.chat.getHistory === "function") {
257
- const history = this.chat.getHistory();
258
- if (Array.isArray(history) && history.length > 0) {
259
- contents.push(...history);
260
- }
261
- }
262
- const nextMessage = typeof nextPayload === "string" ? nextPayload : JSON.stringify(nextPayload, null, 2);
263
- contents.push({ parts: [{ text: nextMessage }] });
264
- const resp = await this.genAIClient.models.countTokens({
265
- model: this.modelName,
266
- contents
267
- });
268
- return resp;
269
- }
270
326
  async function rebuildPayload(lastPayload, serverError) {
271
327
  await this.init();
272
328
  const prompt = `
@@ -276,6 +332,7 @@ The server's error message is quoted afterward.
276
332
  ---------------- BAD PAYLOAD ----------------
277
333
  ${JSON.stringify(lastPayload, null, 2)}
278
334
 
335
+
279
336
  ---------------- SERVER ERROR ----------------
280
337
  ${serverError}
281
338
 
@@ -295,6 +352,25 @@ Respond with JSON only \u2013 no comments or explanations.
295
352
  throw new Error(`Gemini returned non-JSON while repairing payload: ${parseErr.message}`);
296
353
  }
297
354
  }
355
+ async function estimateTokenUsage(nextPayload) {
356
+ const contents = [];
357
+ if (this.systemInstructions) {
358
+ contents.push({ parts: [{ text: this.systemInstructions }] });
359
+ }
360
+ if (this.chat && typeof this.chat.getHistory === "function") {
361
+ const history = this.chat.getHistory();
362
+ if (Array.isArray(history) && history.length > 0) {
363
+ contents.push(...history);
364
+ }
365
+ }
366
+ const nextMessage = typeof nextPayload === "string" ? nextPayload : JSON.stringify(nextPayload, null, 2);
367
+ contents.push({ parts: [{ text: nextMessage }] });
368
+ const resp = await this.genAIClient.models.countTokens({
369
+ model: this.modelName,
370
+ contents
371
+ });
372
+ return resp;
373
+ }
298
374
  async function resetChat() {
299
375
  if (this.chat) {
300
376
  logger_default.debug("Resetting Gemini chat session...");
@@ -316,13 +392,135 @@ function getChatHistory() {
316
392
  }
317
393
  return this.chat.getHistory();
318
394
  }
395
+ function isJSON(data) {
396
+ try {
397
+ const attempt = JSON.stringify(data);
398
+ if (attempt?.startsWith("{") || attempt?.startsWith("[")) {
399
+ if (attempt?.endsWith("}") || attempt?.endsWith("]")) {
400
+ return true;
401
+ }
402
+ }
403
+ return false;
404
+ } catch (e) {
405
+ return false;
406
+ }
407
+ }
408
+ function isJSONStr(string) {
409
+ if (typeof string !== "string") return false;
410
+ try {
411
+ const result = JSON.parse(string);
412
+ const type = Object.prototype.toString.call(result);
413
+ return type === "[object Object]" || type === "[object Array]";
414
+ } catch (err) {
415
+ return false;
416
+ }
417
+ }
418
+ function extractJSON(text) {
419
+ if (!text || typeof text !== "string") {
420
+ throw new Error("No text provided for JSON extraction");
421
+ }
422
+ if (isJSONStr(text.trim())) {
423
+ return JSON.parse(text.trim());
424
+ }
425
+ const codeBlockPatterns = [
426
+ /```json\s*\n?([\s\S]*?)\n?\s*```/gi,
427
+ /```\s*\n?([\s\S]*?)\n?\s*```/gi
428
+ ];
429
+ for (const pattern of codeBlockPatterns) {
430
+ const matches = text.match(pattern);
431
+ if (matches) {
432
+ for (const match of matches) {
433
+ const jsonContent = match.replace(/```json\s*\n?/gi, "").replace(/```\s*\n?/gi, "").trim();
434
+ if (isJSONStr(jsonContent)) {
435
+ return JSON.parse(jsonContent);
436
+ }
437
+ }
438
+ }
439
+ }
440
+ const jsonPatterns = [
441
+ // Match complete JSON objects
442
+ /\{[\s\S]*\}/g,
443
+ // Match complete JSON arrays
444
+ /\[[\s\S]*\]/g
445
+ ];
446
+ for (const pattern of jsonPatterns) {
447
+ const matches = text.match(pattern);
448
+ if (matches) {
449
+ for (const match of matches) {
450
+ const candidate = match.trim();
451
+ if (isJSONStr(candidate)) {
452
+ return JSON.parse(candidate);
453
+ }
454
+ }
455
+ }
456
+ }
457
+ const advancedExtract = findCompleteJSONStructures(text);
458
+ if (advancedExtract.length > 0) {
459
+ for (const candidate of advancedExtract) {
460
+ if (isJSONStr(candidate)) {
461
+ return JSON.parse(candidate);
462
+ }
463
+ }
464
+ }
465
+ const cleanedText = text.replace(/^\s*Sure,?\s*here\s+is\s+your?\s+.*?[:\n]/gi, "").replace(/^\s*Here\s+is\s+the\s+.*?[:\n]/gi, "").replace(/^\s*The\s+.*?is\s*[:\n]/gi, "").replace(/\/\*[\s\S]*?\*\//g, "").replace(/\/\/.*$/gm, "").trim();
466
+ if (isJSONStr(cleanedText)) {
467
+ return JSON.parse(cleanedText);
468
+ }
469
+ throw new Error(`Could not extract valid JSON from model response. Response preview: ${text.substring(0, 200)}...`);
470
+ }
471
+ function findCompleteJSONStructures(text) {
472
+ const results = [];
473
+ const startChars = ["{", "["];
474
+ for (let i = 0; i < text.length; i++) {
475
+ if (startChars.includes(text[i])) {
476
+ const extracted = extractCompleteStructure(text, i);
477
+ if (extracted) {
478
+ results.push(extracted);
479
+ }
480
+ }
481
+ }
482
+ return results;
483
+ }
484
+ function extractCompleteStructure(text, startPos) {
485
+ const startChar = text[startPos];
486
+ const endChar = startChar === "{" ? "}" : "]";
487
+ let depth = 0;
488
+ let inString = false;
489
+ let escaped = false;
490
+ for (let i = startPos; i < text.length; i++) {
491
+ const char = text[i];
492
+ if (escaped) {
493
+ escaped = false;
494
+ continue;
495
+ }
496
+ if (char === "\\" && inString) {
497
+ escaped = true;
498
+ continue;
499
+ }
500
+ if (char === '"' && !escaped) {
501
+ inString = !inString;
502
+ continue;
503
+ }
504
+ if (!inString) {
505
+ if (char === startChar) {
506
+ depth++;
507
+ } else if (char === endChar) {
508
+ depth--;
509
+ if (depth === 0) {
510
+ return text.substring(startPos, i + 1);
511
+ }
512
+ }
513
+ }
514
+ }
515
+ return null;
516
+ }
319
517
  if (import_meta.url === new URL(`file://${process.argv[1]}`).href) {
320
518
  logger_default.info("RUNNING AI Transformer as standalone script...");
321
519
  (async () => {
322
520
  try {
323
521
  logger_default.info("Initializing AI Transformer...");
324
522
  const transformer = new AITransformer({
325
- modelName: "gemini-2.0-flash",
523
+ modelName: "gemini-2.5-flash",
326
524
  sourceKey: "INPUT",
327
525
  // Custom source key
328
526
  targetKey: "OUTPUT",
@@ -360,7 +558,7 @@ if (import_meta.url === new URL(`file://${process.argv[1]}`).href) {
360
558
  }
361
559
  return payload;
362
560
  };
363
- const validatedResponse = await transformer.transformWithValidation(
561
+ const validatedResponse = await transformer.messageAndValidate(
364
562
  { "name": "Lynn" },
365
563
  mockValidator
366
564
  );