ak-gemini 1.1.13 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (6) hide show
  1. package/agent.js +481 -0
  2. package/index.cjs +503 -31
  3. package/index.js +10 -9
  4. package/package.json +12 -10
  5. package/tools.js +134 -0
  6. package/types.d.ts +63 -0
package/index.cjs CHANGED
@@ -29,16 +29,17 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
29
29
  // index.js
30
30
  var index_exports = {};
31
31
  __export(index_exports, {
32
- HarmBlockThreshold: () => import_genai.HarmBlockThreshold,
33
- HarmCategory: () => import_genai.HarmCategory,
34
- ThinkingLevel: () => import_genai.ThinkingLevel,
32
+ AIAgent: () => agent_default,
33
+ HarmBlockThreshold: () => import_genai2.HarmBlockThreshold,
34
+ HarmCategory: () => import_genai2.HarmCategory,
35
+ ThinkingLevel: () => import_genai2.ThinkingLevel,
35
36
  attemptJSONRecovery: () => attemptJSONRecovery,
36
37
  default: () => index_default,
37
38
  log: () => logger_default
38
39
  });
39
40
  module.exports = __toCommonJS(index_exports);
40
- var import_dotenv = __toESM(require("dotenv"), 1);
41
- var import_genai = require("@google/genai");
41
+ var import_dotenv2 = __toESM(require("dotenv"), 1);
42
+ var import_genai2 = require("@google/genai");
42
43
  var import_ak_tools = __toESM(require("ak-tools"), 1);
43
44
  var import_path = __toESM(require("path"), 1);
44
45
 
@@ -59,14 +60,484 @@ var logger = (0, import_pino.default)({
59
60
  });
60
61
  var logger_default = logger;
61
62
 
62
- // index.js
63
- var import_meta = {};
63
+ // agent.js
64
+ var import_dotenv = __toESM(require("dotenv"), 1);
65
+ var import_genai = require("@google/genai");
66
+
67
+ // tools.js
68
+ var MAX_RESPONSE_LENGTH = 5e4;
69
+ function parseBody(text) {
70
+ const body = text.length > MAX_RESPONSE_LENGTH ? text.slice(0, MAX_RESPONSE_LENGTH) + "\n...[TRUNCATED]" : text;
71
+ try {
72
+ return JSON.parse(body);
73
+ } catch {
74
+ return body;
75
+ }
76
+ }
77
+ var BUILT_IN_DECLARATIONS = [
78
+ {
79
+ name: "http_get",
80
+ description: "Make an HTTP GET request to any URL. Returns the response status and body as text. Use for fetching web pages, REST APIs, or any HTTP resource.",
81
+ parametersJsonSchema: {
82
+ type: "object",
83
+ properties: {
84
+ url: { type: "string", description: "The full URL to request (including https://)" },
85
+ headers: {
86
+ type: "object",
87
+ description: "Optional HTTP headers as key-value pairs",
88
+ additionalProperties: { type: "string" }
89
+ }
90
+ },
91
+ required: ["url"]
92
+ }
93
+ },
94
+ {
95
+ name: "http_post",
96
+ description: "Make an HTTP POST request to any URL with a JSON body. Returns the response status and body as text.",
97
+ parametersJsonSchema: {
98
+ type: "object",
99
+ properties: {
100
+ url: { type: "string", description: "The full URL to request (including https://)" },
101
+ body: { type: "object", description: "The JSON body to send" },
102
+ headers: {
103
+ type: "object",
104
+ description: "Optional HTTP headers as key-value pairs",
105
+ additionalProperties: { type: "string" }
106
+ }
107
+ },
108
+ required: ["url"]
109
+ }
110
+ },
111
+ {
112
+ name: "write_markdown",
113
+ description: "Generate a structured markdown document such as a report, analysis, summary, or formatted findings. The content will be captured and returned to the caller.",
114
+ parametersJsonSchema: {
115
+ type: "object",
116
+ properties: {
117
+ filename: { type: "string", description: 'Suggested filename for the document (e.g. "report.md")' },
118
+ title: { type: "string", description: "Document title" },
119
+ content: { type: "string", description: "Full markdown content of the document" }
120
+ },
121
+ required: ["filename", "content"]
122
+ }
123
+ }
124
+ ];
125
+ async function executeBuiltInTool(name, args, options = {}) {
126
+ const { httpTimeout = 3e4, onToolCall, onMarkdown } = options;
127
+ if (onToolCall) {
128
+ try {
129
+ onToolCall(name, args);
130
+ } catch (e) {
131
+ logger_default.warn(`onToolCall callback error: ${e.message}`);
132
+ }
133
+ }
134
+ switch (name) {
135
+ case "http_get": {
136
+ logger_default.debug(`http_get: ${args.url}`);
137
+ const resp = await fetch(args.url, {
138
+ method: "GET",
139
+ headers: args.headers || {},
140
+ signal: AbortSignal.timeout(httpTimeout)
141
+ });
142
+ const text = await resp.text();
143
+ return { status: resp.status, statusText: resp.statusText, body: parseBody(text) };
144
+ }
145
+ case "http_post": {
146
+ logger_default.debug(`http_post: ${args.url}`);
147
+ const headers = { "Content-Type": "application/json", ...args.headers || {} };
148
+ const resp = await fetch(args.url, {
149
+ method: "POST",
150
+ headers,
151
+ body: args.body ? JSON.stringify(args.body) : void 0,
152
+ signal: AbortSignal.timeout(httpTimeout)
153
+ });
154
+ const text = await resp.text();
155
+ return { status: resp.status, statusText: resp.statusText, body: parseBody(text) };
156
+ }
157
+ case "write_markdown": {
158
+ logger_default.debug(`write_markdown: ${args.filename}`);
159
+ if (onMarkdown) {
160
+ try {
161
+ onMarkdown(args.filename, args.content);
162
+ } catch (e) {
163
+ logger_default.warn(`onMarkdown callback error: ${e.message}`);
164
+ }
165
+ }
166
+ return { written: true, filename: args.filename, length: args.content.length };
167
+ }
168
+ default:
169
+ throw new Error(`Unknown tool: ${name}`);
170
+ }
171
+ }
172
+
173
+ // agent.js
64
174
  import_dotenv.default.config();
65
- var { NODE_ENV = "unknown", GEMINI_API_KEY, LOG_LEVEL = "" } = process.env;
175
+ var { NODE_ENV = "unknown", LOG_LEVEL = "" } = process.env;
66
176
  var DEFAULT_SAFETY_SETTINGS = [
67
177
  { category: import_genai.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: import_genai.HarmBlockThreshold.BLOCK_NONE },
68
178
  { category: import_genai.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: import_genai.HarmBlockThreshold.BLOCK_NONE }
69
179
  ];
180
+ var DEFAULT_THINKING_CONFIG = {
181
+ thinkingBudget: 0
182
+ };
183
+ var THINKING_SUPPORTED_MODELS = [
184
+ /^gemini-3-flash(-preview)?$/,
185
+ /^gemini-3-pro(-preview|-image-preview)?$/,
186
+ /^gemini-2\.5-pro/,
187
+ /^gemini-2\.5-flash(-preview)?$/,
188
+ /^gemini-2\.5-flash-lite(-preview)?$/,
189
+ /^gemini-2\.0-flash$/
190
+ ];
191
+ var AIAgent = class {
192
+ /**
193
+ * Create a new AIAgent instance.
194
+ * @param {AIAgentOptions} [options={}] - Configuration options (see AIAgentOptions in types.d.ts)
195
+ */
196
+ constructor(options = {}) {
197
+ this.modelName = options.modelName || "gemini-2.5-flash";
198
+ this.systemPrompt = options.systemPrompt || "You are a helpful AI assistant.";
199
+ this.maxToolRounds = options.maxToolRounds || 10;
200
+ this.httpTimeout = options.httpTimeout || 3e4;
201
+ this.maxRetries = options.maxRetries || 3;
202
+ this.onToolCall = options.onToolCall || null;
203
+ this.onMarkdown = options.onMarkdown || null;
204
+ this.labels = options.labels || {};
205
+ this.vertexai = options.vertexai || false;
206
+ this.project = options.project || process.env.GOOGLE_CLOUD_PROJECT || null;
207
+ this.location = options.location || process.env.GOOGLE_CLOUD_LOCATION || void 0;
208
+ this.googleAuthOptions = options.googleAuthOptions || null;
209
+ this.apiKey = options.apiKey !== void 0 && options.apiKey !== null ? options.apiKey : process.env.GEMINI_API_KEY;
210
+ if (!this.vertexai && !this.apiKey) {
211
+ throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var. For Vertex AI, set vertexai: true with project and location.");
212
+ }
213
+ if (this.vertexai && !this.project) {
214
+ throw new Error("Vertex AI requires a project ID. Provide via options.project or GOOGLE_CLOUD_PROJECT env var.");
215
+ }
216
+ this._configureLogLevel(options.logLevel);
217
+ this.chatConfig = {
218
+ temperature: 0.7,
219
+ topP: 0.95,
220
+ topK: 64,
221
+ safetySettings: DEFAULT_SAFETY_SETTINGS,
222
+ systemInstruction: this.systemPrompt,
223
+ maxOutputTokens: options.chatConfig?.maxOutputTokens || 5e4,
224
+ ...options.chatConfig
225
+ };
226
+ this.chatConfig.systemInstruction = this.systemPrompt;
227
+ this._configureThinking(options.thinkingConfig);
228
+ this.chatConfig.tools = [{ functionDeclarations: BUILT_IN_DECLARATIONS }];
229
+ this.chatConfig.toolConfig = { functionCallingConfig: { mode: "AUTO" } };
230
+ this.genAIClient = null;
231
+ this.chatSession = null;
232
+ this.lastResponseMetadata = null;
233
+ this._markdownFiles = [];
234
+ logger_default.debug(`AIAgent created with model: ${this.modelName}`);
235
+ }
236
+ /**
237
+ * Initialize the agent — creates the GenAI client and chat session.
238
+ * Called automatically by chat() and stream() if not called explicitly.
239
+ * Idempotent — safe to call multiple times.
240
+ * @returns {Promise<void>}
241
+ */
242
+ async init() {
243
+ if (this.chatSession) return;
244
+ const clientOptions = this.vertexai ? {
245
+ vertexai: true,
246
+ project: this.project,
247
+ ...this.location && { location: this.location },
248
+ ...this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions }
249
+ } : { apiKey: this.apiKey };
250
+ this.genAIClient = new import_genai.GoogleGenAI(clientOptions);
251
+ this.chatSession = this.genAIClient.chats.create({
252
+ model: this.modelName,
253
+ config: {
254
+ ...this.chatConfig,
255
+ ...this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels }
256
+ },
257
+ history: []
258
+ });
259
+ try {
260
+ await this.genAIClient.models.list();
261
+ logger_default.debug("AIAgent: Gemini API connection successful.");
262
+ } catch (e) {
263
+ throw new Error(`AIAgent initialization failed: ${e.message}`);
264
+ }
265
+ logger_default.debug("AIAgent: Chat session initialized.");
266
+ }
267
+ /**
268
+ * Send a message and get a complete response (non-streaming).
269
+ * Automatically handles the tool-use loop — if the model requests tool calls,
270
+ * they are executed and results sent back until the model produces a final response.
271
+ *
272
+ * @param {string} message - The user's message
273
+ * @returns {Promise<AgentResponse>} Response with text, toolCalls, markdownFiles, and usage
274
+ * @example
275
+ * const res = await agent.chat('Fetch https://api.example.com/users');
276
+ * console.log(res.text); // Agent's summary
277
+ * console.log(res.toolCalls); // [{name: 'http_get', args: {...}, result: {...}}]
278
+ */
279
+ async chat(message) {
280
+ if (!this.chatSession) await this.init();
281
+ this._markdownFiles = [];
282
+ const allToolCalls = [];
283
+ let response = await this.chatSession.sendMessage({ message });
284
+ for (let round = 0; round < this.maxToolRounds; round++) {
285
+ const functionCalls = response.functionCalls;
286
+ if (!functionCalls || functionCalls.length === 0) break;
287
+ const toolResults = await Promise.all(
288
+ functionCalls.map(async (call) => {
289
+ let result;
290
+ try {
291
+ result = await executeBuiltInTool(call.name, call.args, {
292
+ httpTimeout: this.httpTimeout,
293
+ onToolCall: this.onToolCall,
294
+ onMarkdown: this.onMarkdown
295
+ });
296
+ } catch (err) {
297
+ logger_default.warn(`Tool ${call.name} failed: ${err.message}`);
298
+ result = { error: err.message };
299
+ }
300
+ allToolCalls.push({ name: call.name, args: call.args, result });
301
+ if (call.name === "write_markdown" && call.args) {
302
+ this._markdownFiles.push({
303
+ filename: (
304
+ /** @type {string} */
305
+ call.args.filename
306
+ ),
307
+ content: (
308
+ /** @type {string} */
309
+ call.args.content
310
+ )
311
+ });
312
+ }
313
+ return { id: call.id, name: call.name, result };
314
+ })
315
+ );
316
+ response = await this.chatSession.sendMessage({
317
+ message: toolResults.map((r) => ({
318
+ functionResponse: {
319
+ id: r.id,
320
+ name: r.name,
321
+ response: { output: r.result }
322
+ }
323
+ }))
324
+ });
325
+ }
326
+ this._captureMetadata(response);
327
+ return {
328
+ text: response.text || "",
329
+ toolCalls: allToolCalls,
330
+ markdownFiles: [...this._markdownFiles],
331
+ usage: this.getLastUsage()
332
+ };
333
+ }
334
+ /**
335
+ * Send a message and stream the response as events.
336
+ * Automatically handles the tool-use loop between streamed rounds.
337
+ *
338
+ * Event types:
339
+ * - `text` — A chunk of the agent's text response (yield as it arrives)
340
+ * - `tool_call` — The agent is about to call a tool (includes toolName and args)
341
+ * - `tool_result` — A tool finished executing (includes toolName and result)
342
+ * - `markdown` — A markdown document was generated (includes filename and content)
343
+ * - `done` — The agent finished (includes fullText, markdownFiles, usage)
344
+ *
345
+ * @param {string} message - The user's message
346
+ * @yields {AgentStreamEvent}
347
+ * @example
348
+ * for await (const event of agent.stream('Analyze this API...')) {
349
+ * if (event.type === 'text') process.stdout.write(event.text);
350
+ * if (event.type === 'tool_call') console.log(`Calling: ${event.toolName}`);
351
+ * if (event.type === 'done') console.log(`\nTokens: ${event.usage?.totalTokens}`);
352
+ * }
353
+ */
354
+ async *stream(message) {
355
+ if (!this.chatSession) await this.init();
356
+ this._markdownFiles = [];
357
+ const allToolCalls = [];
358
+ let fullText = "";
359
+ let streamResponse = await this.chatSession.sendMessageStream({ message });
360
+ for (let round = 0; round < this.maxToolRounds; round++) {
361
+ let roundText = "";
362
+ const functionCalls = [];
363
+ for await (const chunk of streamResponse) {
364
+ if (chunk.functionCalls) {
365
+ functionCalls.push(...chunk.functionCalls);
366
+ } else if (chunk.candidates?.[0]?.content?.parts?.[0]?.text) {
367
+ const text = chunk.candidates[0].content.parts[0].text;
368
+ roundText += text;
369
+ fullText += text;
370
+ yield { type: "text", text };
371
+ }
372
+ }
373
+ if (functionCalls.length === 0) {
374
+ yield {
375
+ type: "done",
376
+ fullText,
377
+ markdownFiles: [...this._markdownFiles],
378
+ usage: this.getLastUsage()
379
+ };
380
+ return;
381
+ }
382
+ const toolResults = [];
383
+ for (const call of functionCalls) {
384
+ yield { type: "tool_call", toolName: call.name, args: call.args };
385
+ let result;
386
+ try {
387
+ result = await executeBuiltInTool(call.name, call.args, {
388
+ httpTimeout: this.httpTimeout,
389
+ onToolCall: this.onToolCall,
390
+ onMarkdown: this.onMarkdown
391
+ });
392
+ } catch (err) {
393
+ logger_default.warn(`Tool ${call.name} failed: ${err.message}`);
394
+ result = { error: err.message };
395
+ }
396
+ allToolCalls.push({ name: call.name, args: call.args, result });
397
+ yield { type: "tool_result", toolName: call.name, result };
398
+ if (call.name === "write_markdown" && call.args) {
399
+ const mdFilename = (
400
+ /** @type {string} */
401
+ call.args.filename
402
+ );
403
+ const mdContent = (
404
+ /** @type {string} */
405
+ call.args.content
406
+ );
407
+ this._markdownFiles.push({ filename: mdFilename, content: mdContent });
408
+ yield { type: "markdown", filename: mdFilename, content: mdContent };
409
+ }
410
+ toolResults.push({ id: call.id, name: call.name, result });
411
+ }
412
+ streamResponse = await this.chatSession.sendMessageStream({
413
+ message: toolResults.map((r) => ({
414
+ functionResponse: {
415
+ id: r.id,
416
+ name: r.name,
417
+ response: { output: r.result }
418
+ }
419
+ }))
420
+ });
421
+ }
422
+ yield {
423
+ type: "done",
424
+ fullText,
425
+ markdownFiles: [...this._markdownFiles],
426
+ usage: this.getLastUsage(),
427
+ warning: "Max tool rounds reached"
428
+ };
429
+ }
430
+ /**
431
+ * Clear conversation history while preserving tools and system prompt.
432
+ * Useful for starting a new user session without re-initializing the agent.
433
+ * @returns {Promise<void>}
434
+ */
435
+ async clearHistory() {
436
+ this.chatSession = this.genAIClient.chats.create({
437
+ model: this.modelName,
438
+ config: {
439
+ ...this.chatConfig,
440
+ ...this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels }
441
+ },
442
+ history: []
443
+ });
444
+ this._markdownFiles = [];
445
+ this.lastResponseMetadata = null;
446
+ logger_default.debug("AIAgent: Conversation history cleared.");
447
+ }
448
+ /**
449
+ * Get conversation history.
450
+ * @param {boolean} [curated=false]
451
+ * @returns {any[]}
452
+ */
453
+ getHistory(curated = false) {
454
+ if (!this.chatSession) return [];
455
+ return this.chatSession.getHistory(curated);
456
+ }
457
+ /**
458
+ * Get structured usage data from the last API call.
459
+ * Returns null if no API call has been made yet.
460
+ * @returns {UsageData|null} Usage data with promptTokens, responseTokens, totalTokens, etc.
461
+ */
462
+ getLastUsage() {
463
+ if (!this.lastResponseMetadata) return null;
464
+ const m = this.lastResponseMetadata;
465
+ return {
466
+ promptTokens: m.promptTokens,
467
+ responseTokens: m.responseTokens,
468
+ totalTokens: m.totalTokens,
469
+ attempts: 1,
470
+ modelVersion: m.modelVersion,
471
+ requestedModel: this.modelName,
472
+ timestamp: m.timestamp
473
+ };
474
+ }
475
+ // --- Private helpers ---
476
+ /**
477
+ * Capture response metadata (model version, token counts) from an API response.
478
+ * @param {import('@google/genai').GenerateContentResponse} response
479
+ * @private
480
+ */
481
+ _captureMetadata(response) {
482
+ this.lastResponseMetadata = {
483
+ modelVersion: response.modelVersion || null,
484
+ requestedModel: this.modelName,
485
+ promptTokens: response.usageMetadata?.promptTokenCount || 0,
486
+ responseTokens: response.usageMetadata?.candidatesTokenCount || 0,
487
+ totalTokens: response.usageMetadata?.totalTokenCount || 0,
488
+ timestamp: Date.now()
489
+ };
490
+ }
491
+ /** @private */
492
+ _configureLogLevel(logLevel) {
493
+ if (logLevel) {
494
+ if (logLevel === "none") {
495
+ logger_default.level = "silent";
496
+ } else {
497
+ logger_default.level = logLevel;
498
+ }
499
+ } else if (LOG_LEVEL) {
500
+ logger_default.level = LOG_LEVEL;
501
+ } else if (NODE_ENV === "dev") {
502
+ logger_default.level = "debug";
503
+ } else if (NODE_ENV === "test") {
504
+ logger_default.level = "warn";
505
+ } else if (NODE_ENV.startsWith("prod")) {
506
+ logger_default.level = "error";
507
+ } else {
508
+ logger_default.level = "info";
509
+ }
510
+ }
511
+ /** @private */
512
+ _configureThinking(thinkingConfig) {
513
+ const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some((p) => p.test(this.modelName));
514
+ if (thinkingConfig === void 0) return;
515
+ if (thinkingConfig === null) {
516
+ delete this.chatConfig.thinkingConfig;
517
+ return;
518
+ }
519
+ if (!modelSupportsThinking) {
520
+ logger_default.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
521
+ return;
522
+ }
523
+ const config = { ...DEFAULT_THINKING_CONFIG, ...thinkingConfig };
524
+ if (thinkingConfig.thinkingLevel !== void 0) {
525
+ delete config.thinkingBudget;
526
+ }
527
+ this.chatConfig.thinkingConfig = config;
528
+ logger_default.debug(`Thinking config applied: ${JSON.stringify(config)}`);
529
+ }
530
+ };
531
+ var agent_default = AIAgent;
532
+
533
+ // index.js
534
+ var import_meta = {};
535
+ import_dotenv2.default.config();
536
+ var { NODE_ENV: NODE_ENV2 = "unknown", GEMINI_API_KEY, LOG_LEVEL: LOG_LEVEL2 = "" } = process.env;
537
+ var DEFAULT_SAFETY_SETTINGS2 = [
538
+ { category: import_genai2.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: import_genai2.HarmBlockThreshold.BLOCK_NONE },
539
+ { category: import_genai2.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: import_genai2.HarmBlockThreshold.BLOCK_NONE }
540
+ ];
70
541
  var DEFAULT_SYSTEM_INSTRUCTIONS = `
71
542
  You are an expert JSON transformation engine. Your task is to accurately convert data payloads from one format to another.
72
543
 
@@ -80,11 +551,11 @@ Always respond ONLY with a valid JSON object that strictly adheres to the expect
80
551
 
81
552
  Do not include any additional text, explanations, or formatting before or after the JSON object.
82
553
  `;
83
- var DEFAULT_THINKING_CONFIG = {
554
+ var DEFAULT_THINKING_CONFIG2 = {
84
555
  thinkingBudget: 0
85
556
  };
86
557
  var DEFAULT_MAX_OUTPUT_TOKENS = 5e4;
87
- var THINKING_SUPPORTED_MODELS = [
558
+ var THINKING_SUPPORTED_MODELS2 = [
88
559
  /^gemini-3-flash(-preview)?$/,
89
560
  /^gemini-3-pro(-preview|-image-preview)?$/,
90
561
  /^gemini-2\.5-pro/,
@@ -99,7 +570,7 @@ var DEFAULT_CHAT_CONFIG = {
99
570
  topP: 0.95,
100
571
  topK: 64,
101
572
  systemInstruction: DEFAULT_SYSTEM_INSTRUCTIONS,
102
- safetySettings: DEFAULT_SAFETY_SETTINGS
573
+ safetySettings: DEFAULT_SAFETY_SETTINGS2
103
574
  };
104
575
  var AITransformer = class {
105
576
  /**
@@ -162,16 +633,16 @@ function AITransformFactory(options = {}) {
162
633
  } else {
163
634
  logger_default.level = this.logLevel;
164
635
  }
165
- } else if (LOG_LEVEL) {
166
- this.logLevel = LOG_LEVEL;
167
- logger_default.level = LOG_LEVEL;
168
- } else if (NODE_ENV === "dev") {
636
+ } else if (LOG_LEVEL2) {
637
+ this.logLevel = LOG_LEVEL2;
638
+ logger_default.level = LOG_LEVEL2;
639
+ } else if (NODE_ENV2 === "dev") {
169
640
  this.logLevel = "debug";
170
641
  logger_default.level = "debug";
171
- } else if (NODE_ENV === "test") {
642
+ } else if (NODE_ENV2 === "test") {
172
643
  this.logLevel = "warn";
173
644
  logger_default.level = "warn";
174
- } else if (NODE_ENV.startsWith("prod")) {
645
+ } else if (NODE_ENV2.startsWith("prod")) {
175
646
  this.logLevel = "error";
176
647
  logger_default.level = "error";
177
648
  } else {
@@ -213,7 +684,7 @@ function AITransformFactory(options = {}) {
213
684
  } else {
214
685
  this.chatConfig.maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS;
215
686
  }
216
- const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some(
687
+ const modelSupportsThinking = THINKING_SUPPORTED_MODELS2.some(
217
688
  (pattern) => pattern.test(this.modelName)
218
689
  );
219
690
  if (options.thinkingConfig !== void 0) {
@@ -224,7 +695,7 @@ function AITransformFactory(options = {}) {
224
695
  }
225
696
  } else if (modelSupportsThinking) {
226
697
  const thinkingConfig = {
227
- ...DEFAULT_THINKING_CONFIG,
698
+ ...DEFAULT_THINKING_CONFIG2,
228
699
  ...options.thinkingConfig
229
700
  };
230
701
  if (options.thinkingConfig?.thinkingLevel !== void 0) {
@@ -232,7 +703,7 @@ function AITransformFactory(options = {}) {
232
703
  }
233
704
  this.chatConfig.thinkingConfig = thinkingConfig;
234
705
  if (logger_default.level !== "silent") {
235
- logger_default.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig:`, thinkingConfig);
706
+ logger_default.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig: ${JSON.stringify(thinkingConfig)}`);
236
707
  }
237
708
  } else {
238
709
  if (logger_default.level !== "silent") {
@@ -291,7 +762,7 @@ function AITransformFactory(options = {}) {
291
762
  ...this.location && { location: this.location },
292
763
  ...this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions }
293
764
  } : { apiKey: this.apiKey };
294
- const ai = new import_genai.GoogleGenAI(clientOptions);
765
+ const ai = new import_genai2.GoogleGenAI(clientOptions);
295
766
  this.genAIClient = ai;
296
767
  this.chat = null;
297
768
  }
@@ -417,13 +888,13 @@ async function rawMessage(sourcePayload, messageOptions = {}) {
417
888
  timestamp: Date.now()
418
889
  };
419
890
  if (result.usageMetadata && logger_default.level !== "silent") {
420
- logger_default.debug(`API response metadata:`, {
891
+ logger_default.debug(`API response metadata: ${JSON.stringify({
421
892
  modelVersion: result.modelVersion || "not-provided",
422
893
  requestedModel: this.modelName,
423
894
  promptTokens: result.usageMetadata.promptTokenCount,
424
895
  responseTokens: result.usageMetadata.candidatesTokenCount,
425
896
  totalTokens: result.usageMetadata.totalTokenCount
426
- });
897
+ })}`);
427
898
  }
428
899
  const modelResponse = result.text;
429
900
  const extractedJSON = extractJSON(modelResponse);
@@ -557,7 +1028,7 @@ Respond with JSON only \u2013 no comments or explanations.
557
1028
  timestamp: Date.now()
558
1029
  };
559
1030
  if (result.usageMetadata && logger_default.level !== "silent") {
560
- logger_default.debug(`Rebuild response metadata - tokens used:`, result.usageMetadata.totalTokenCount);
1031
+ logger_default.debug(`Rebuild response metadata - tokens used: ${result.usageMetadata.totalTokenCount}`);
561
1032
  }
562
1033
  } catch (err) {
563
1034
  throw new Error(`Gemini call failed while repairing payload: ${err.message}`);
@@ -732,11 +1203,11 @@ async function statelessMessage(sourcePayload, options = {}, validatorFn = null)
732
1203
  attempts: 1
733
1204
  };
734
1205
  if (result.usageMetadata && logger_default.level !== "silent") {
735
- logger_default.debug(`Stateless message metadata:`, {
1206
+ logger_default.debug(`Stateless message metadata: ${JSON.stringify({
736
1207
  modelVersion: result.modelVersion || "not-provided",
737
1208
  promptTokens: result.usageMetadata.promptTokenCount,
738
1209
  responseTokens: result.usageMetadata.candidatesTokenCount
739
- });
1210
+ })}`);
740
1211
  }
741
1212
  const modelResponse = result.text;
742
1213
  const extractedJSON = extractJSON(modelResponse);
@@ -1022,7 +1493,7 @@ if (import_meta.url === new URL(`file://${process.argv[1]}`).href) {
1022
1493
  await transformer.seed(examples);
1023
1494
  logger_default.info("AI Transformer initialized and seeded with examples.");
1024
1495
  const normalResponse = await transformer.message({ "name": "AK" });
1025
- logger_default.info("Normal Payload Transformed", normalResponse);
1496
+ logger_default.info(`Normal Payload Transformed: ${JSON.stringify(normalResponse)}`);
1026
1497
  const mockValidator = async (payload) => {
1027
1498
  if (!payload.profession || !payload.life_as_told_by_emoji) {
1028
1499
  throw new Error("Missing required fields: profession or life_as_told_by_emoji");
@@ -1037,16 +1508,17 @@ if (import_meta.url === new URL(`file://${process.argv[1]}`).href) {
1037
1508
  {},
1038
1509
  mockValidator
1039
1510
  );
1040
- logger_default.info("Validated Payload Transformed", validatedResponse);
1041
- if (NODE_ENV === "dev") debugger;
1511
+ logger_default.info(`Validated Payload Transformed: ${JSON.stringify(validatedResponse)}`);
1512
+ if (NODE_ENV2 === "dev") debugger;
1042
1513
  } catch (error) {
1043
- logger_default.error("Error in AI Transformer script:", error);
1044
- if (NODE_ENV === "dev") debugger;
1514
+ logger_default.error(`Error in AI Transformer script: ${error?.message || error}`);
1515
+ if (NODE_ENV2 === "dev") debugger;
1045
1516
  }
1046
1517
  })();
1047
1518
  }
1048
1519
  // Annotate the CommonJS export names for ESM import in node:
1049
1520
  0 && (module.exports = {
1521
+ AIAgent,
1050
1522
  HarmBlockThreshold,
1051
1523
  HarmCategory,
1052
1524
  ThinkingLevel,