ak-gemini 1.1.13 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +259 -294
- package/base.js +485 -0
- package/chat.js +87 -0
- package/code-agent.js +563 -0
- package/index.cjs +1596 -789
- package/index.js +38 -1500
- package/json-helpers.js +352 -0
- package/message.js +170 -0
- package/package.json +23 -14
- package/tool-agent.js +311 -0
- package/transformer.js +502 -0
- package/types.d.ts +376 -189
package/index.cjs
CHANGED
|
@@ -29,18 +29,25 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
29
29
|
// index.js
|
|
30
30
|
var index_exports = {};
|
|
31
31
|
__export(index_exports, {
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
32
|
+
BaseGemini: () => base_default,
|
|
33
|
+
Chat: () => chat_default,
|
|
34
|
+
CodeAgent: () => code_agent_default,
|
|
35
|
+
HarmBlockThreshold: () => import_genai2.HarmBlockThreshold,
|
|
36
|
+
HarmCategory: () => import_genai2.HarmCategory,
|
|
37
|
+
Message: () => message_default,
|
|
38
|
+
ThinkingLevel: () => import_genai2.ThinkingLevel,
|
|
39
|
+
ToolAgent: () => tool_agent_default,
|
|
40
|
+
Transformer: () => transformer_default,
|
|
35
41
|
attemptJSONRecovery: () => attemptJSONRecovery,
|
|
36
42
|
default: () => index_default,
|
|
43
|
+
extractJSON: () => extractJSON,
|
|
37
44
|
log: () => logger_default
|
|
38
45
|
});
|
|
39
46
|
module.exports = __toCommonJS(index_exports);
|
|
47
|
+
|
|
48
|
+
// base.js
|
|
40
49
|
var import_dotenv = __toESM(require("dotenv"), 1);
|
|
41
50
|
var import_genai = require("@google/genai");
|
|
42
|
-
var import_ak_tools = __toESM(require("ak-tools"), 1);
|
|
43
|
-
var import_path = __toESM(require("path"), 1);
|
|
44
51
|
|
|
45
52
|
// logger.js
|
|
46
53
|
var import_pino = __toESM(require("pino"), 1);
|
|
@@ -59,692 +66,29 @@ var logger = (0, import_pino.default)({
|
|
|
59
66
|
});
|
|
60
67
|
var logger_default = logger;
|
|
61
68
|
|
|
62
|
-
//
|
|
63
|
-
|
|
64
|
-
import_dotenv.default.config();
|
|
65
|
-
var { NODE_ENV = "unknown", GEMINI_API_KEY, LOG_LEVEL = "" } = process.env;
|
|
66
|
-
var DEFAULT_SAFETY_SETTINGS = [
|
|
67
|
-
{ category: import_genai.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: import_genai.HarmBlockThreshold.BLOCK_NONE },
|
|
68
|
-
{ category: import_genai.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: import_genai.HarmBlockThreshold.BLOCK_NONE }
|
|
69
|
-
];
|
|
70
|
-
var DEFAULT_SYSTEM_INSTRUCTIONS = `
|
|
71
|
-
You are an expert JSON transformation engine. Your task is to accurately convert data payloads from one format to another.
|
|
72
|
-
|
|
73
|
-
You will be provided with example transformations (Source JSON -> Target JSON).
|
|
74
|
-
|
|
75
|
-
Learn the mapping rules from these examples.
|
|
76
|
-
|
|
77
|
-
When presented with new Source JSON, apply the learned transformation rules to produce a new Target JSON payload.
|
|
78
|
-
|
|
79
|
-
Always respond ONLY with a valid JSON object that strictly adheres to the expected output format.
|
|
80
|
-
|
|
81
|
-
Do not include any additional text, explanations, or formatting before or after the JSON object.
|
|
82
|
-
`;
|
|
83
|
-
var DEFAULT_THINKING_CONFIG = {
|
|
84
|
-
thinkingBudget: 0
|
|
85
|
-
};
|
|
86
|
-
var DEFAULT_MAX_OUTPUT_TOKENS = 5e4;
|
|
87
|
-
var THINKING_SUPPORTED_MODELS = [
|
|
88
|
-
/^gemini-3-flash(-preview)?$/,
|
|
89
|
-
/^gemini-3-pro(-preview|-image-preview)?$/,
|
|
90
|
-
/^gemini-2\.5-pro/,
|
|
91
|
-
/^gemini-2\.5-flash(-preview)?$/,
|
|
92
|
-
/^gemini-2\.5-flash-lite(-preview)?$/,
|
|
93
|
-
/^gemini-2\.0-flash$/
|
|
94
|
-
// Experimental support, exact match only
|
|
95
|
-
];
|
|
96
|
-
var DEFAULT_CHAT_CONFIG = {
|
|
97
|
-
responseMimeType: "application/json",
|
|
98
|
-
temperature: 0.2,
|
|
99
|
-
topP: 0.95,
|
|
100
|
-
topK: 64,
|
|
101
|
-
systemInstruction: DEFAULT_SYSTEM_INSTRUCTIONS,
|
|
102
|
-
safetySettings: DEFAULT_SAFETY_SETTINGS
|
|
103
|
-
};
|
|
104
|
-
var AITransformer = class {
|
|
105
|
-
/**
|
|
106
|
-
* @param {AITransformerOptions} [options={}] - Configuration options for the transformer
|
|
107
|
-
*
|
|
108
|
-
*/
|
|
109
|
-
constructor(options = {}) {
|
|
110
|
-
this.modelName = "";
|
|
111
|
-
this.promptKey = "";
|
|
112
|
-
this.answerKey = "";
|
|
113
|
-
this.contextKey = "";
|
|
114
|
-
this.explanationKey = "";
|
|
115
|
-
this.systemInstructionKey = "";
|
|
116
|
-
this.maxRetries = 3;
|
|
117
|
-
this.retryDelay = 1e3;
|
|
118
|
-
this.chatConfig = {};
|
|
119
|
-
this.apiKey = GEMINI_API_KEY;
|
|
120
|
-
this.onlyJSON = true;
|
|
121
|
-
this.asyncValidator = null;
|
|
122
|
-
this.logLevel = "info";
|
|
123
|
-
this.lastResponseMetadata = null;
|
|
124
|
-
this.exampleCount = 0;
|
|
125
|
-
this._cumulativeUsage = {
|
|
126
|
-
promptTokens: 0,
|
|
127
|
-
responseTokens: 0,
|
|
128
|
-
totalTokens: 0,
|
|
129
|
-
attempts: 0
|
|
130
|
-
};
|
|
131
|
-
AITransformFactory.call(this, options);
|
|
132
|
-
this.init = initChat.bind(this);
|
|
133
|
-
this.seed = seedWithExamples.bind(this);
|
|
134
|
-
this.rawMessage = rawMessage.bind(this);
|
|
135
|
-
this.message = (payload, opts = {}, validatorFn = null) => {
|
|
136
|
-
return prepareAndValidateMessage.call(this, payload, opts, validatorFn || this.asyncValidator);
|
|
137
|
-
};
|
|
138
|
-
this.rebuild = rebuildPayload.bind(this);
|
|
139
|
-
this.reset = resetChat.bind(this);
|
|
140
|
-
this.getHistory = getChatHistory.bind(this);
|
|
141
|
-
this.messageAndValidate = prepareAndValidateMessage.bind(this);
|
|
142
|
-
this.transformWithValidation = prepareAndValidateMessage.bind(this);
|
|
143
|
-
this.estimate = estimateInputTokens.bind(this);
|
|
144
|
-
this.updateSystemInstructions = updateSystemInstructions.bind(this);
|
|
145
|
-
this.estimateCost = estimateCost.bind(this);
|
|
146
|
-
this.clearConversation = clearConversation.bind(this);
|
|
147
|
-
this.getLastUsage = getLastUsage.bind(this);
|
|
148
|
-
}
|
|
149
|
-
};
|
|
150
|
-
var index_default = AITransformer;
|
|
151
|
-
function AITransformFactory(options = {}) {
|
|
152
|
-
this.modelName = options.modelName || "gemini-2.5-flash";
|
|
153
|
-
if (options.systemInstructions === void 0) {
|
|
154
|
-
this.systemInstructions = DEFAULT_SYSTEM_INSTRUCTIONS;
|
|
155
|
-
} else {
|
|
156
|
-
this.systemInstructions = options.systemInstructions;
|
|
157
|
-
}
|
|
158
|
-
if (options.logLevel) {
|
|
159
|
-
this.logLevel = options.logLevel;
|
|
160
|
-
if (this.logLevel === "none") {
|
|
161
|
-
logger_default.level = "silent";
|
|
162
|
-
} else {
|
|
163
|
-
logger_default.level = this.logLevel;
|
|
164
|
-
}
|
|
165
|
-
} else if (LOG_LEVEL) {
|
|
166
|
-
this.logLevel = LOG_LEVEL;
|
|
167
|
-
logger_default.level = LOG_LEVEL;
|
|
168
|
-
} else if (NODE_ENV === "dev") {
|
|
169
|
-
this.logLevel = "debug";
|
|
170
|
-
logger_default.level = "debug";
|
|
171
|
-
} else if (NODE_ENV === "test") {
|
|
172
|
-
this.logLevel = "warn";
|
|
173
|
-
logger_default.level = "warn";
|
|
174
|
-
} else if (NODE_ENV.startsWith("prod")) {
|
|
175
|
-
this.logLevel = "error";
|
|
176
|
-
logger_default.level = "error";
|
|
177
|
-
} else {
|
|
178
|
-
this.logLevel = "info";
|
|
179
|
-
logger_default.level = "info";
|
|
180
|
-
}
|
|
181
|
-
this.vertexai = options.vertexai || false;
|
|
182
|
-
this.project = options.project || process.env.GOOGLE_CLOUD_PROJECT || null;
|
|
183
|
-
this.location = options.location || process.env.GOOGLE_CLOUD_LOCATION || void 0;
|
|
184
|
-
this.googleAuthOptions = options.googleAuthOptions || null;
|
|
185
|
-
this.apiKey = options.apiKey !== void 0 && options.apiKey !== null ? options.apiKey : GEMINI_API_KEY;
|
|
186
|
-
if (!this.vertexai && !this.apiKey) {
|
|
187
|
-
throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var. For Vertex AI, set vertexai: true with project and location.");
|
|
188
|
-
}
|
|
189
|
-
if (this.vertexai && !this.project) {
|
|
190
|
-
throw new Error("Vertex AI requires a project ID. Provide via options.project or GOOGLE_CLOUD_PROJECT env var.");
|
|
191
|
-
}
|
|
192
|
-
this.chatConfig = {
|
|
193
|
-
...DEFAULT_CHAT_CONFIG,
|
|
194
|
-
...options.chatConfig
|
|
195
|
-
};
|
|
196
|
-
if (this.systemInstructions) {
|
|
197
|
-
this.chatConfig.systemInstruction = this.systemInstructions;
|
|
198
|
-
} else if (options.systemInstructions !== void 0) {
|
|
199
|
-
delete this.chatConfig.systemInstruction;
|
|
200
|
-
}
|
|
201
|
-
if (options.maxOutputTokens !== void 0) {
|
|
202
|
-
if (options.maxOutputTokens === null) {
|
|
203
|
-
delete this.chatConfig.maxOutputTokens;
|
|
204
|
-
} else {
|
|
205
|
-
this.chatConfig.maxOutputTokens = options.maxOutputTokens;
|
|
206
|
-
}
|
|
207
|
-
} else if (options.chatConfig?.maxOutputTokens !== void 0) {
|
|
208
|
-
if (options.chatConfig.maxOutputTokens === null) {
|
|
209
|
-
delete this.chatConfig.maxOutputTokens;
|
|
210
|
-
} else {
|
|
211
|
-
this.chatConfig.maxOutputTokens = options.chatConfig.maxOutputTokens;
|
|
212
|
-
}
|
|
213
|
-
} else {
|
|
214
|
-
this.chatConfig.maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS;
|
|
215
|
-
}
|
|
216
|
-
const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some(
|
|
217
|
-
(pattern) => pattern.test(this.modelName)
|
|
218
|
-
);
|
|
219
|
-
if (options.thinkingConfig !== void 0) {
|
|
220
|
-
if (options.thinkingConfig === null) {
|
|
221
|
-
delete this.chatConfig.thinkingConfig;
|
|
222
|
-
if (logger_default.level !== "silent") {
|
|
223
|
-
logger_default.debug(`thinkingConfig set to null - removed from configuration`);
|
|
224
|
-
}
|
|
225
|
-
} else if (modelSupportsThinking) {
|
|
226
|
-
const thinkingConfig = {
|
|
227
|
-
...DEFAULT_THINKING_CONFIG,
|
|
228
|
-
...options.thinkingConfig
|
|
229
|
-
};
|
|
230
|
-
if (options.thinkingConfig?.thinkingLevel !== void 0) {
|
|
231
|
-
delete thinkingConfig.thinkingBudget;
|
|
232
|
-
}
|
|
233
|
-
this.chatConfig.thinkingConfig = thinkingConfig;
|
|
234
|
-
if (logger_default.level !== "silent") {
|
|
235
|
-
logger_default.debug(`Model ${this.modelName} supports thinking. Applied thinkingConfig:`, thinkingConfig);
|
|
236
|
-
}
|
|
237
|
-
} else {
|
|
238
|
-
if (logger_default.level !== "silent") {
|
|
239
|
-
logger_default.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
|
|
240
|
-
}
|
|
241
|
-
}
|
|
242
|
-
}
|
|
243
|
-
if (options.responseSchema) {
|
|
244
|
-
this.chatConfig.responseSchema = options.responseSchema;
|
|
245
|
-
}
|
|
246
|
-
this.examplesFile = options.examplesFile || null;
|
|
247
|
-
this.exampleData = options.exampleData || null;
|
|
248
|
-
this.promptKey = options.promptKey || options.sourceKey || "PROMPT";
|
|
249
|
-
this.answerKey = options.answerKey || options.targetKey || "ANSWER";
|
|
250
|
-
this.contextKey = options.contextKey || "CONTEXT";
|
|
251
|
-
this.explanationKey = options.explanationKey || "EXPLANATION";
|
|
252
|
-
this.systemInstructionsKey = options.systemInstructionsKey || "SYSTEM";
|
|
253
|
-
this.maxRetries = options.maxRetries || 3;
|
|
254
|
-
this.retryDelay = options.retryDelay || 1e3;
|
|
255
|
-
this.asyncValidator = options.asyncValidator || null;
|
|
256
|
-
this.onlyJSON = options.onlyJSON !== void 0 ? options.onlyJSON : true;
|
|
257
|
-
this.enableGrounding = options.enableGrounding || false;
|
|
258
|
-
this.groundingConfig = options.groundingConfig || {};
|
|
259
|
-
this.labels = options.labels || {};
|
|
260
|
-
if (Object.keys(this.labels).length > 0 && logger_default.level !== "silent") {
|
|
261
|
-
if (!this.vertexai) {
|
|
262
|
-
logger_default.warn(`Billing labels are only supported with Vertex AI. Labels will be ignored.`);
|
|
263
|
-
} else {
|
|
264
|
-
logger_default.debug(`Billing labels configured: ${JSON.stringify(this.labels)}`);
|
|
265
|
-
}
|
|
266
|
-
}
|
|
267
|
-
if (this.promptKey === this.answerKey) {
|
|
268
|
-
throw new Error("Source and target keys cannot be the same. Please provide distinct keys.");
|
|
269
|
-
}
|
|
270
|
-
if (logger_default.level !== "silent") {
|
|
271
|
-
logger_default.debug(`Creating AI Transformer with model: ${this.modelName}`);
|
|
272
|
-
logger_default.debug(`Using keys - Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
|
|
273
|
-
logger_default.debug(`Max output tokens set to: ${this.chatConfig.maxOutputTokens}`);
|
|
274
|
-
if (this.vertexai) {
|
|
275
|
-
logger_default.debug(`Using Vertex AI - Project: ${this.project}, Location: ${this.location || "global (default)"}`);
|
|
276
|
-
if (this.googleAuthOptions?.keyFilename) {
|
|
277
|
-
logger_default.debug(`Auth: Service account key file: ${this.googleAuthOptions.keyFilename}`);
|
|
278
|
-
} else if (this.googleAuthOptions?.credentials) {
|
|
279
|
-
logger_default.debug(`Auth: Inline credentials provided`);
|
|
280
|
-
} else {
|
|
281
|
-
logger_default.debug(`Auth: Application Default Credentials (ADC)`);
|
|
282
|
-
}
|
|
283
|
-
} else {
|
|
284
|
-
logger_default.debug(`Using Gemini API with key: ${this.apiKey.substring(0, 10)}...`);
|
|
285
|
-
}
|
|
286
|
-
logger_default.debug(`Grounding ${this.enableGrounding ? "ENABLED" : "DISABLED"} (costs $35/1k queries)`);
|
|
287
|
-
}
|
|
288
|
-
const clientOptions = this.vertexai ? {
|
|
289
|
-
vertexai: true,
|
|
290
|
-
project: this.project,
|
|
291
|
-
...this.location && { location: this.location },
|
|
292
|
-
...this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions }
|
|
293
|
-
} : { apiKey: this.apiKey };
|
|
294
|
-
const ai = new import_genai.GoogleGenAI(clientOptions);
|
|
295
|
-
this.genAIClient = ai;
|
|
296
|
-
this.chat = null;
|
|
297
|
-
}
|
|
298
|
-
async function initChat(force = false) {
|
|
299
|
-
if (this.chat && !force) return;
|
|
300
|
-
logger_default.debug(`Initializing Gemini chat session with model: ${this.modelName}...`);
|
|
301
|
-
const chatOptions = {
|
|
302
|
-
model: this.modelName,
|
|
303
|
-
// @ts-ignore
|
|
304
|
-
config: {
|
|
305
|
-
...this.chatConfig,
|
|
306
|
-
...this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels }
|
|
307
|
-
},
|
|
308
|
-
history: []
|
|
309
|
-
};
|
|
310
|
-
if (this.enableGrounding) {
|
|
311
|
-
chatOptions.config.tools = [{
|
|
312
|
-
googleSearch: this.groundingConfig
|
|
313
|
-
}];
|
|
314
|
-
logger_default.debug(`Search grounding ENABLED for this session (WARNING: costs $35/1k queries)`);
|
|
315
|
-
}
|
|
316
|
-
this.chat = await this.genAIClient.chats.create(chatOptions);
|
|
317
|
-
try {
|
|
318
|
-
await this.genAIClient.models.list();
|
|
319
|
-
logger_default.debug("Gemini API connection successful.");
|
|
320
|
-
} catch (e) {
|
|
321
|
-
throw new Error(`Gemini chat initialization failed: ${e.message}`);
|
|
322
|
-
}
|
|
323
|
-
logger_default.debug("Gemini chat session initialized.");
|
|
324
|
-
}
|
|
325
|
-
async function seedWithExamples(examples) {
|
|
326
|
-
await this.init();
|
|
327
|
-
if (!examples || !Array.isArray(examples) || examples.length === 0) {
|
|
328
|
-
if (this.examplesFile) {
|
|
329
|
-
logger_default.debug(`No examples provided, loading from file: ${this.examplesFile}`);
|
|
330
|
-
try {
|
|
331
|
-
examples = await import_ak_tools.default.load(import_path.default.resolve(this.examplesFile), true);
|
|
332
|
-
} catch (err) {
|
|
333
|
-
throw new Error(`Could not load examples from file: ${this.examplesFile}. Please check the file path and format.`);
|
|
334
|
-
}
|
|
335
|
-
} else if (this.exampleData) {
|
|
336
|
-
logger_default.debug(`Using example data provided in options.`);
|
|
337
|
-
if (Array.isArray(this.exampleData)) {
|
|
338
|
-
examples = this.exampleData;
|
|
339
|
-
} else {
|
|
340
|
-
throw new Error(`Invalid example data provided. Expected an array of examples.`);
|
|
341
|
-
}
|
|
342
|
-
} else {
|
|
343
|
-
logger_default.debug("No examples provided and no examples file specified. Skipping seeding.");
|
|
344
|
-
return;
|
|
345
|
-
}
|
|
346
|
-
}
|
|
347
|
-
const instructionExample = examples.find((ex) => ex[this.systemInstructionsKey]);
|
|
348
|
-
if (instructionExample) {
|
|
349
|
-
logger_default.debug(`Found system instructions in examples; reinitializing chat with new instructions.`);
|
|
350
|
-
this.systemInstructions = instructionExample[this.systemInstructionsKey];
|
|
351
|
-
this.chatConfig.systemInstruction = this.systemInstructions;
|
|
352
|
-
await this.init(true);
|
|
353
|
-
}
|
|
354
|
-
logger_default.debug(`Seeding chat with ${examples.length} transformation examples...`);
|
|
355
|
-
const historyToAdd = [];
|
|
356
|
-
for (const example of examples) {
|
|
357
|
-
const contextValue = example[this.contextKey] || "";
|
|
358
|
-
const promptValue = example[this.promptKey] || "";
|
|
359
|
-
const answerValue = example[this.answerKey] || "";
|
|
360
|
-
const explanationValue = example[this.explanationKey] || "";
|
|
361
|
-
let userText = "";
|
|
362
|
-
let modelResponse = {};
|
|
363
|
-
if (contextValue) {
|
|
364
|
-
let contextText = isJSON(contextValue) ? JSON.stringify(contextValue, null, 2) : contextValue;
|
|
365
|
-
userText += `CONTEXT:
|
|
366
|
-
${contextText}
|
|
367
|
-
|
|
368
|
-
`;
|
|
369
|
-
}
|
|
370
|
-
if (promptValue) {
|
|
371
|
-
let promptText = isJSON(promptValue) ? JSON.stringify(promptValue, null, 2) : promptValue;
|
|
372
|
-
userText += promptText;
|
|
373
|
-
}
|
|
374
|
-
if (answerValue) modelResponse.data = answerValue;
|
|
375
|
-
if (explanationValue) modelResponse.explanation = explanationValue;
|
|
376
|
-
const modelText = JSON.stringify(modelResponse, null, 2);
|
|
377
|
-
if (userText.trim().length && modelText.trim().length > 0) {
|
|
378
|
-
historyToAdd.push({ role: "user", parts: [{ text: userText.trim() }] });
|
|
379
|
-
historyToAdd.push({ role: "model", parts: [{ text: modelText.trim() }] });
|
|
380
|
-
}
|
|
381
|
-
}
|
|
382
|
-
const currentHistory = this?.chat?.getHistory() || [];
|
|
383
|
-
logger_default.debug(`Adding ${historyToAdd.length} examples to chat history (${currentHistory.length} current examples)...`);
|
|
384
|
-
this.chat = await this.genAIClient.chats.create({
|
|
385
|
-
model: this.modelName,
|
|
386
|
-
// @ts-ignore
|
|
387
|
-
config: {
|
|
388
|
-
...this.chatConfig,
|
|
389
|
-
...this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels }
|
|
390
|
-
},
|
|
391
|
-
history: [...currentHistory, ...historyToAdd]
|
|
392
|
-
});
|
|
393
|
-
this.exampleCount = currentHistory.length + historyToAdd.length;
|
|
394
|
-
const newHistory = this.chat.getHistory();
|
|
395
|
-
logger_default.debug(`Created new chat session with ${newHistory.length} examples.`);
|
|
396
|
-
return newHistory;
|
|
397
|
-
}
|
|
398
|
-
async function rawMessage(sourcePayload, messageOptions = {}) {
|
|
399
|
-
if (!this.chat) {
|
|
400
|
-
throw new Error("Chat session not initialized.");
|
|
401
|
-
}
|
|
402
|
-
const actualPayload = typeof sourcePayload === "string" ? sourcePayload : JSON.stringify(sourcePayload, null, 2);
|
|
403
|
-
const mergedLabels = { ...this.labels, ...messageOptions.labels || {} };
|
|
404
|
-
const hasLabels = this.vertexai && Object.keys(mergedLabels).length > 0;
|
|
69
|
+
// json-helpers.js
|
|
70
|
+
function isJSON(data) {
|
|
405
71
|
try {
|
|
406
|
-
const
|
|
407
|
-
if (
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
const result = await this.chat.sendMessage(sendParams);
|
|
411
|
-
this.lastResponseMetadata = {
|
|
412
|
-
modelVersion: result.modelVersion || null,
|
|
413
|
-
requestedModel: this.modelName,
|
|
414
|
-
promptTokens: result.usageMetadata?.promptTokenCount || 0,
|
|
415
|
-
responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
|
|
416
|
-
totalTokens: result.usageMetadata?.totalTokenCount || 0,
|
|
417
|
-
timestamp: Date.now()
|
|
418
|
-
};
|
|
419
|
-
if (result.usageMetadata && logger_default.level !== "silent") {
|
|
420
|
-
logger_default.debug(`API response metadata:`, {
|
|
421
|
-
modelVersion: result.modelVersion || "not-provided",
|
|
422
|
-
requestedModel: this.modelName,
|
|
423
|
-
promptTokens: result.usageMetadata.promptTokenCount,
|
|
424
|
-
responseTokens: result.usageMetadata.candidatesTokenCount,
|
|
425
|
-
totalTokens: result.usageMetadata.totalTokenCount
|
|
426
|
-
});
|
|
427
|
-
}
|
|
428
|
-
const modelResponse = result.text;
|
|
429
|
-
const extractedJSON = extractJSON(modelResponse);
|
|
430
|
-
if (extractedJSON?.data) {
|
|
431
|
-
return extractedJSON.data;
|
|
432
|
-
}
|
|
433
|
-
return extractedJSON;
|
|
434
|
-
} catch (error) {
|
|
435
|
-
if (this.onlyJSON && error.message.includes("Could not extract valid JSON")) {
|
|
436
|
-
throw new Error(`Invalid JSON response from Gemini: ${error.message}`);
|
|
437
|
-
}
|
|
438
|
-
throw new Error(`Transformation failed: ${error.message}`);
|
|
439
|
-
}
|
|
440
|
-
}
|
|
441
|
-
async function prepareAndValidateMessage(sourcePayload, options = {}, validatorFn = null) {
|
|
442
|
-
if (!this.chat) {
|
|
443
|
-
throw new Error("Chat session not initialized. Please call init() first.");
|
|
444
|
-
}
|
|
445
|
-
if (options.stateless) {
|
|
446
|
-
return await statelessMessage.call(this, sourcePayload, options, validatorFn);
|
|
447
|
-
}
|
|
448
|
-
const maxRetries = options.maxRetries ?? this.maxRetries;
|
|
449
|
-
const retryDelay = options.retryDelay ?? this.retryDelay;
|
|
450
|
-
const enableGroundingForMessage = options.enableGrounding ?? this.enableGrounding;
|
|
451
|
-
const groundingConfigForMessage = options.groundingConfig ?? this.groundingConfig;
|
|
452
|
-
if (enableGroundingForMessage !== this.enableGrounding) {
|
|
453
|
-
const originalGrounding = this.enableGrounding;
|
|
454
|
-
const originalConfig = this.groundingConfig;
|
|
455
|
-
try {
|
|
456
|
-
this.enableGrounding = enableGroundingForMessage;
|
|
457
|
-
this.groundingConfig = groundingConfigForMessage;
|
|
458
|
-
await this.init(true);
|
|
459
|
-
if (enableGroundingForMessage) {
|
|
460
|
-
logger_default.warn(`Search grounding ENABLED for this message (WARNING: costs $35/1k queries)`);
|
|
461
|
-
} else {
|
|
462
|
-
logger_default.debug(`Search grounding DISABLED for this message`);
|
|
463
|
-
}
|
|
464
|
-
} catch (error) {
|
|
465
|
-
this.enableGrounding = originalGrounding;
|
|
466
|
-
this.groundingConfig = originalConfig;
|
|
467
|
-
throw error;
|
|
468
|
-
}
|
|
469
|
-
const restoreGrounding = async () => {
|
|
470
|
-
this.enableGrounding = originalGrounding;
|
|
471
|
-
this.groundingConfig = originalConfig;
|
|
472
|
-
await this.init(true);
|
|
473
|
-
};
|
|
474
|
-
options._restoreGrounding = restoreGrounding;
|
|
475
|
-
}
|
|
476
|
-
let lastError = null;
|
|
477
|
-
let lastPayload = null;
|
|
478
|
-
if (sourcePayload && isJSON(sourcePayload)) {
|
|
479
|
-
lastPayload = JSON.stringify(sourcePayload, null, 2);
|
|
480
|
-
} else if (typeof sourcePayload === "string") {
|
|
481
|
-
lastPayload = sourcePayload;
|
|
482
|
-
} else if (typeof sourcePayload === "boolean" || typeof sourcePayload === "number") {
|
|
483
|
-
lastPayload = sourcePayload.toString();
|
|
484
|
-
} else if (sourcePayload === null || sourcePayload === void 0) {
|
|
485
|
-
lastPayload = JSON.stringify({});
|
|
486
|
-
} else {
|
|
487
|
-
throw new Error("Invalid source payload. Must be a JSON object or string.");
|
|
488
|
-
}
|
|
489
|
-
const messageOptions = {};
|
|
490
|
-
if (options.labels) {
|
|
491
|
-
messageOptions.labels = options.labels;
|
|
492
|
-
}
|
|
493
|
-
this._cumulativeUsage = {
|
|
494
|
-
promptTokens: 0,
|
|
495
|
-
responseTokens: 0,
|
|
496
|
-
totalTokens: 0,
|
|
497
|
-
attempts: 0
|
|
498
|
-
};
|
|
499
|
-
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
500
|
-
try {
|
|
501
|
-
const transformedPayload = attempt === 0 ? await this.rawMessage(lastPayload, messageOptions) : await this.rebuild(lastPayload, lastError.message);
|
|
502
|
-
if (this.lastResponseMetadata) {
|
|
503
|
-
this._cumulativeUsage.promptTokens += this.lastResponseMetadata.promptTokens || 0;
|
|
504
|
-
this._cumulativeUsage.responseTokens += this.lastResponseMetadata.responseTokens || 0;
|
|
505
|
-
this._cumulativeUsage.totalTokens += this.lastResponseMetadata.totalTokens || 0;
|
|
506
|
-
this._cumulativeUsage.attempts = attempt + 1;
|
|
507
|
-
}
|
|
508
|
-
lastPayload = transformedPayload;
|
|
509
|
-
if (validatorFn) {
|
|
510
|
-
await validatorFn(transformedPayload);
|
|
511
|
-
}
|
|
512
|
-
logger_default.debug(`Transformation succeeded on attempt ${attempt + 1}`);
|
|
513
|
-
if (options._restoreGrounding) {
|
|
514
|
-
await options._restoreGrounding();
|
|
515
|
-
}
|
|
516
|
-
return transformedPayload;
|
|
517
|
-
} catch (error) {
|
|
518
|
-
lastError = error;
|
|
519
|
-
logger_default.warn(`Attempt ${attempt + 1} failed: ${error.message}`);
|
|
520
|
-
if (attempt >= maxRetries) {
|
|
521
|
-
logger_default.error(`All ${maxRetries + 1} attempts failed.`);
|
|
522
|
-
if (options._restoreGrounding) {
|
|
523
|
-
await options._restoreGrounding();
|
|
524
|
-
}
|
|
525
|
-
throw new Error(`Transformation failed after ${maxRetries + 1} attempts. Last error: ${error.message}`);
|
|
72
|
+
const attempt = JSON.stringify(data);
|
|
73
|
+
if (attempt?.startsWith("{") || attempt?.startsWith("[")) {
|
|
74
|
+
if (attempt?.endsWith("}") || attempt?.endsWith("]")) {
|
|
75
|
+
return true;
|
|
526
76
|
}
|
|
527
|
-
const delay = retryDelay * Math.pow(2, attempt);
|
|
528
|
-
await new Promise((res) => setTimeout(res, delay));
|
|
529
77
|
}
|
|
78
|
+
return false;
|
|
79
|
+
} catch (e) {
|
|
80
|
+
return false;
|
|
530
81
|
}
|
|
531
82
|
}
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
const prompt = `
|
|
535
|
-
The previous JSON payload (below) failed validation.
|
|
536
|
-
The server's error message is quoted afterward.
|
|
537
|
-
|
|
538
|
-
---------------- BAD PAYLOAD ----------------
|
|
539
|
-
${JSON.stringify(lastPayload, null, 2)}
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
---------------- SERVER ERROR ----------------
|
|
543
|
-
${serverError}
|
|
544
|
-
|
|
545
|
-
Please return a NEW JSON payload that corrects the issue.
|
|
546
|
-
Respond with JSON only \u2013 no comments or explanations.
|
|
547
|
-
`;
|
|
548
|
-
let result;
|
|
83
|
+
function isJSONStr(string) {
|
|
84
|
+
if (typeof string !== "string") return false;
|
|
549
85
|
try {
|
|
550
|
-
result =
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
requestedModel: this.modelName,
|
|
554
|
-
promptTokens: result.usageMetadata?.promptTokenCount || 0,
|
|
555
|
-
responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
|
|
556
|
-
totalTokens: result.usageMetadata?.totalTokenCount || 0,
|
|
557
|
-
timestamp: Date.now()
|
|
558
|
-
};
|
|
559
|
-
if (result.usageMetadata && logger_default.level !== "silent") {
|
|
560
|
-
logger_default.debug(`Rebuild response metadata - tokens used:`, result.usageMetadata.totalTokenCount);
|
|
561
|
-
}
|
|
86
|
+
const result = JSON.parse(string);
|
|
87
|
+
const type = Object.prototype.toString.call(result);
|
|
88
|
+
return type === "[object Object]" || type === "[object Array]";
|
|
562
89
|
} catch (err) {
|
|
563
|
-
|
|
564
|
-
}
|
|
565
|
-
try {
|
|
566
|
-
const text = result.text ?? result.response ?? "";
|
|
567
|
-
return typeof text === "object" ? text : JSON.parse(text);
|
|
568
|
-
} catch (parseErr) {
|
|
569
|
-
throw new Error(`Gemini returned non-JSON while repairing payload: ${parseErr.message}`);
|
|
570
|
-
}
|
|
571
|
-
}
|
|
572
|
-
async function estimateInputTokens(nextPayload) {
|
|
573
|
-
const contents = [];
|
|
574
|
-
if (this.systemInstructions) {
|
|
575
|
-
contents.push({ parts: [{ text: this.systemInstructions }] });
|
|
576
|
-
}
|
|
577
|
-
if (this.chat && typeof this.chat.getHistory === "function") {
|
|
578
|
-
const history = this.chat.getHistory();
|
|
579
|
-
if (Array.isArray(history) && history.length > 0) {
|
|
580
|
-
contents.push(...history);
|
|
581
|
-
}
|
|
582
|
-
}
|
|
583
|
-
const nextMessage = typeof nextPayload === "string" ? nextPayload : JSON.stringify(nextPayload, null, 2);
|
|
584
|
-
contents.push({ parts: [{ text: nextMessage }] });
|
|
585
|
-
const resp = await this.genAIClient.models.countTokens({
|
|
586
|
-
model: this.modelName,
|
|
587
|
-
contents
|
|
588
|
-
});
|
|
589
|
-
return { inputTokens: resp.totalTokens };
|
|
590
|
-
}
|
|
591
|
-
var MODEL_PRICING = {
|
|
592
|
-
"gemini-2.5-flash": { input: 0.15, output: 0.6 },
|
|
593
|
-
"gemini-2.5-flash-lite": { input: 0.02, output: 0.1 },
|
|
594
|
-
"gemini-2.5-pro": { input: 2.5, output: 10 },
|
|
595
|
-
"gemini-3-pro": { input: 2, output: 12 },
|
|
596
|
-
"gemini-3-pro-preview": { input: 2, output: 12 },
|
|
597
|
-
"gemini-2.0-flash": { input: 0.1, output: 0.4 },
|
|
598
|
-
"gemini-2.0-flash-lite": { input: 0.02, output: 0.1 }
|
|
599
|
-
};
|
|
600
|
-
async function estimateCost(nextPayload) {
|
|
601
|
-
const tokenInfo = await this.estimate(nextPayload);
|
|
602
|
-
const pricing = MODEL_PRICING[this.modelName] || { input: 0, output: 0 };
|
|
603
|
-
return {
|
|
604
|
-
inputTokens: tokenInfo.inputTokens,
|
|
605
|
-
model: this.modelName,
|
|
606
|
-
pricing,
|
|
607
|
-
estimatedInputCost: tokenInfo.inputTokens / 1e6 * pricing.input,
|
|
608
|
-
note: "Cost is for input tokens only; output cost depends on response length"
|
|
609
|
-
};
|
|
610
|
-
}
|
|
611
|
-
async function resetChat() {
|
|
612
|
-
if (this.chat) {
|
|
613
|
-
logger_default.debug("Resetting Gemini chat session...");
|
|
614
|
-
const chatOptions = {
|
|
615
|
-
model: this.modelName,
|
|
616
|
-
// @ts-ignore
|
|
617
|
-
config: {
|
|
618
|
-
...this.chatConfig,
|
|
619
|
-
...this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels }
|
|
620
|
-
},
|
|
621
|
-
history: []
|
|
622
|
-
};
|
|
623
|
-
if (this.enableGrounding) {
|
|
624
|
-
chatOptions.config.tools = [{
|
|
625
|
-
googleSearch: this.groundingConfig
|
|
626
|
-
}];
|
|
627
|
-
logger_default.debug(`Search grounding preserved during reset (WARNING: costs $35/1k queries)`);
|
|
628
|
-
}
|
|
629
|
-
this.chat = await this.genAIClient.chats.create(chatOptions);
|
|
630
|
-
logger_default.debug("Chat session reset.");
|
|
631
|
-
} else {
|
|
632
|
-
logger_default.warn("Cannot reset chat session: chat not yet initialized.");
|
|
633
|
-
}
|
|
634
|
-
}
|
|
635
|
-
function getChatHistory() {
|
|
636
|
-
if (!this.chat) {
|
|
637
|
-
logger_default.warn("Chat session not initialized. No history available.");
|
|
638
|
-
return [];
|
|
639
|
-
}
|
|
640
|
-
return this.chat.getHistory();
|
|
641
|
-
}
|
|
642
|
-
async function updateSystemInstructions(newInstructions) {
|
|
643
|
-
if (!newInstructions || typeof newInstructions !== "string") {
|
|
644
|
-
throw new Error("System instructions must be a non-empty string");
|
|
645
|
-
}
|
|
646
|
-
this.systemInstructions = newInstructions.trim();
|
|
647
|
-
this.chatConfig.systemInstruction = this.systemInstructions;
|
|
648
|
-
logger_default.debug("Updating system instructions and reinitializing chat...");
|
|
649
|
-
await this.init(true);
|
|
650
|
-
}
|
|
651
|
-
async function clearConversation() {
|
|
652
|
-
if (!this.chat) {
|
|
653
|
-
logger_default.warn("Cannot clear conversation: chat not initialized.");
|
|
654
|
-
return;
|
|
655
|
-
}
|
|
656
|
-
const history = this.chat.getHistory();
|
|
657
|
-
const exampleHistory = history.slice(0, this.exampleCount || 0);
|
|
658
|
-
this.chat = await this.genAIClient.chats.create({
|
|
659
|
-
model: this.modelName,
|
|
660
|
-
// @ts-ignore
|
|
661
|
-
config: {
|
|
662
|
-
...this.chatConfig,
|
|
663
|
-
...this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels }
|
|
664
|
-
},
|
|
665
|
-
history: exampleHistory
|
|
666
|
-
});
|
|
667
|
-
this.lastResponseMetadata = null;
|
|
668
|
-
this._cumulativeUsage = {
|
|
669
|
-
promptTokens: 0,
|
|
670
|
-
responseTokens: 0,
|
|
671
|
-
totalTokens: 0,
|
|
672
|
-
attempts: 0
|
|
673
|
-
};
|
|
674
|
-
logger_default.debug(`Conversation cleared. Preserved ${exampleHistory.length} example items.`);
|
|
675
|
-
}
|
|
676
|
-
function getLastUsage() {
|
|
677
|
-
if (!this.lastResponseMetadata) {
|
|
678
|
-
return null;
|
|
679
|
-
}
|
|
680
|
-
const meta = this.lastResponseMetadata;
|
|
681
|
-
const cumulative = this._cumulativeUsage || { promptTokens: 0, responseTokens: 0, totalTokens: 0, attempts: 1 };
|
|
682
|
-
const useCumulative = cumulative.attempts > 0;
|
|
683
|
-
return {
|
|
684
|
-
// Token breakdown for billing - CUMULATIVE across all retry attempts
|
|
685
|
-
promptTokens: useCumulative ? cumulative.promptTokens : meta.promptTokens,
|
|
686
|
-
responseTokens: useCumulative ? cumulative.responseTokens : meta.responseTokens,
|
|
687
|
-
totalTokens: useCumulative ? cumulative.totalTokens : meta.totalTokens,
|
|
688
|
-
// Number of attempts (1 = success on first try, 2+ = retries were needed)
|
|
689
|
-
attempts: useCumulative ? cumulative.attempts : 1,
|
|
690
|
-
// Model verification for billing cross-check
|
|
691
|
-
modelVersion: meta.modelVersion,
|
|
692
|
-
// Actual model that responded (e.g., 'gemini-2.5-flash-001')
|
|
693
|
-
requestedModel: meta.requestedModel,
|
|
694
|
-
// Model you requested (e.g., 'gemini-2.5-flash')
|
|
695
|
-
// Timestamp for audit trail
|
|
696
|
-
timestamp: meta.timestamp
|
|
697
|
-
};
|
|
698
|
-
}
|
|
699
|
-
async function statelessMessage(sourcePayload, options = {}, validatorFn = null) {
|
|
700
|
-
if (!this.chat) {
|
|
701
|
-
throw new Error("Chat session not initialized. Please call init() first.");
|
|
702
|
-
}
|
|
703
|
-
const payloadStr = typeof sourcePayload === "string" ? sourcePayload : JSON.stringify(sourcePayload, null, 2);
|
|
704
|
-
const contents = [];
|
|
705
|
-
if (this.exampleCount > 0) {
|
|
706
|
-
const history = this.chat.getHistory();
|
|
707
|
-
const exampleHistory = history.slice(0, this.exampleCount);
|
|
708
|
-
contents.push(...exampleHistory);
|
|
709
|
-
}
|
|
710
|
-
contents.push({ role: "user", parts: [{ text: payloadStr }] });
|
|
711
|
-
const mergedLabels = { ...this.labels, ...options.labels || {} };
|
|
712
|
-
const result = await this.genAIClient.models.generateContent({
|
|
713
|
-
model: this.modelName,
|
|
714
|
-
contents,
|
|
715
|
-
config: {
|
|
716
|
-
...this.chatConfig,
|
|
717
|
-
...this.vertexai && Object.keys(mergedLabels).length > 0 && { labels: mergedLabels }
|
|
718
|
-
}
|
|
719
|
-
});
|
|
720
|
-
this.lastResponseMetadata = {
|
|
721
|
-
modelVersion: result.modelVersion || null,
|
|
722
|
-
requestedModel: this.modelName,
|
|
723
|
-
promptTokens: result.usageMetadata?.promptTokenCount || 0,
|
|
724
|
-
responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
|
|
725
|
-
totalTokens: result.usageMetadata?.totalTokenCount || 0,
|
|
726
|
-
timestamp: Date.now()
|
|
727
|
-
};
|
|
728
|
-
this._cumulativeUsage = {
|
|
729
|
-
promptTokens: this.lastResponseMetadata.promptTokens,
|
|
730
|
-
responseTokens: this.lastResponseMetadata.responseTokens,
|
|
731
|
-
totalTokens: this.lastResponseMetadata.totalTokens,
|
|
732
|
-
attempts: 1
|
|
733
|
-
};
|
|
734
|
-
if (result.usageMetadata && logger_default.level !== "silent") {
|
|
735
|
-
logger_default.debug(`Stateless message metadata:`, {
|
|
736
|
-
modelVersion: result.modelVersion || "not-provided",
|
|
737
|
-
promptTokens: result.usageMetadata.promptTokenCount,
|
|
738
|
-
responseTokens: result.usageMetadata.candidatesTokenCount
|
|
739
|
-
});
|
|
740
|
-
}
|
|
741
|
-
const modelResponse = result.text;
|
|
742
|
-
const extractedJSON = extractJSON(modelResponse);
|
|
743
|
-
let transformedPayload = extractedJSON?.data ? extractedJSON.data : extractedJSON;
|
|
744
|
-
if (validatorFn) {
|
|
745
|
-
await validatorFn(transformedPayload);
|
|
90
|
+
return false;
|
|
746
91
|
}
|
|
747
|
-
return transformedPayload;
|
|
748
92
|
}
|
|
749
93
|
function attemptJSONRecovery(text, maxAttempts = 100) {
|
|
750
94
|
if (!text || typeof text !== "string") return null;
|
|
@@ -862,28 +206,51 @@ function attemptJSONRecovery(text, maxAttempts = 100) {
|
|
|
862
206
|
}
|
|
863
207
|
return null;
|
|
864
208
|
}
|
|
865
|
-
function
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
209
|
+
function extractCompleteStructure(text, startPos) {
|
|
210
|
+
const startChar = text[startPos];
|
|
211
|
+
const endChar = startChar === "{" ? "}" : "]";
|
|
212
|
+
let depth = 0;
|
|
213
|
+
let inString = false;
|
|
214
|
+
let escaped = false;
|
|
215
|
+
for (let i = startPos; i < text.length; i++) {
|
|
216
|
+
const char = text[i];
|
|
217
|
+
if (escaped) {
|
|
218
|
+
escaped = false;
|
|
219
|
+
continue;
|
|
220
|
+
}
|
|
221
|
+
if (char === "\\" && inString) {
|
|
222
|
+
escaped = true;
|
|
223
|
+
continue;
|
|
224
|
+
}
|
|
225
|
+
if (char === '"' && !escaped) {
|
|
226
|
+
inString = !inString;
|
|
227
|
+
continue;
|
|
228
|
+
}
|
|
229
|
+
if (!inString) {
|
|
230
|
+
if (char === startChar) {
|
|
231
|
+
depth++;
|
|
232
|
+
} else if (char === endChar) {
|
|
233
|
+
depth--;
|
|
234
|
+
if (depth === 0) {
|
|
235
|
+
return text.substring(startPos, i + 1);
|
|
236
|
+
}
|
|
871
237
|
}
|
|
872
238
|
}
|
|
873
|
-
return false;
|
|
874
|
-
} catch (e) {
|
|
875
|
-
return false;
|
|
876
239
|
}
|
|
240
|
+
return null;
|
|
877
241
|
}
|
|
878
|
-
function
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
242
|
+
function findCompleteJSONStructures(text) {
|
|
243
|
+
const results = [];
|
|
244
|
+
const startChars = ["{", "["];
|
|
245
|
+
for (let i = 0; i < text.length; i++) {
|
|
246
|
+
if (startChars.includes(text[i])) {
|
|
247
|
+
const extracted = extractCompleteStructure(text, i);
|
|
248
|
+
if (extracted) {
|
|
249
|
+
results.push(extracted);
|
|
250
|
+
}
|
|
251
|
+
}
|
|
886
252
|
}
|
|
253
|
+
return results;
|
|
887
254
|
}
|
|
888
255
|
function extractJSON(text) {
|
|
889
256
|
if (!text || typeof text !== "string") {
|
|
@@ -908,9 +275,7 @@ function extractJSON(text) {
|
|
|
908
275
|
}
|
|
909
276
|
}
|
|
910
277
|
const jsonPatterns = [
|
|
911
|
-
// Match complete JSON objects
|
|
912
278
|
/\{[\s\S]*\}/g,
|
|
913
|
-
// Match complete JSON arrays
|
|
914
279
|
/\[[\s\S]*\]/g
|
|
915
280
|
];
|
|
916
281
|
for (const pattern of jsonPatterns) {
|
|
@@ -942,114 +307,1556 @@ function extractJSON(text) {
|
|
|
942
307
|
}
|
|
943
308
|
throw new Error(`Could not extract valid JSON from model response. Response preview: ${text.substring(0, 200)}...`);
|
|
944
309
|
}
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
310
|
+
|
|
311
|
+
// base.js
|
|
312
|
+
import_dotenv.default.config();
|
|
313
|
+
var { NODE_ENV = "unknown", LOG_LEVEL = "" } = process.env;
|
|
314
|
+
var DEFAULT_SAFETY_SETTINGS = [
|
|
315
|
+
{ category: import_genai.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: import_genai.HarmBlockThreshold.BLOCK_NONE },
|
|
316
|
+
{ category: import_genai.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: import_genai.HarmBlockThreshold.BLOCK_NONE }
|
|
317
|
+
];
|
|
318
|
+
var DEFAULT_THINKING_CONFIG = {
|
|
319
|
+
thinkingBudget: 0
|
|
320
|
+
};
|
|
321
|
+
var DEFAULT_MAX_OUTPUT_TOKENS = 5e4;
|
|
322
|
+
var THINKING_SUPPORTED_MODELS = [
|
|
323
|
+
/^gemini-3-flash(-preview)?$/,
|
|
324
|
+
/^gemini-3-pro(-preview|-image-preview)?$/,
|
|
325
|
+
/^gemini-2\.5-pro/,
|
|
326
|
+
/^gemini-2\.5-flash(-preview)?$/,
|
|
327
|
+
/^gemini-2\.5-flash-lite(-preview)?$/,
|
|
328
|
+
/^gemini-2\.0-flash$/
|
|
329
|
+
];
|
|
330
|
+
var MODEL_PRICING = {
|
|
331
|
+
"gemini-2.5-flash": { input: 0.15, output: 0.6 },
|
|
332
|
+
"gemini-2.5-flash-lite": { input: 0.02, output: 0.1 },
|
|
333
|
+
"gemini-2.5-pro": { input: 2.5, output: 10 },
|
|
334
|
+
"gemini-3-pro": { input: 2, output: 12 },
|
|
335
|
+
"gemini-3-pro-preview": { input: 2, output: 12 },
|
|
336
|
+
"gemini-2.0-flash": { input: 0.1, output: 0.4 },
|
|
337
|
+
"gemini-2.0-flash-lite": { input: 0.02, output: 0.1 }
|
|
338
|
+
};
|
|
339
|
+
var BaseGemini = class {
|
|
340
|
+
/**
|
|
341
|
+
* @param {BaseGeminiOptions} [options={}]
|
|
342
|
+
*/
|
|
343
|
+
constructor(options = {}) {
|
|
344
|
+
this.modelName = options.modelName || "gemini-2.5-flash";
|
|
345
|
+
if (options.systemPrompt !== void 0) {
|
|
346
|
+
this.systemPrompt = options.systemPrompt;
|
|
347
|
+
} else {
|
|
348
|
+
this.systemPrompt = null;
|
|
349
|
+
}
|
|
350
|
+
this.vertexai = options.vertexai || false;
|
|
351
|
+
this.project = options.project || process.env.GOOGLE_CLOUD_PROJECT || null;
|
|
352
|
+
this.location = options.location || process.env.GOOGLE_CLOUD_LOCATION || void 0;
|
|
353
|
+
this.googleAuthOptions = options.googleAuthOptions || null;
|
|
354
|
+
this.apiKey = options.apiKey !== void 0 && options.apiKey !== null ? options.apiKey : process.env.GEMINI_API_KEY;
|
|
355
|
+
if (!this.vertexai && !this.apiKey) {
|
|
356
|
+
throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var. For Vertex AI, set vertexai: true with project and location.");
|
|
357
|
+
}
|
|
358
|
+
if (this.vertexai && !this.project) {
|
|
359
|
+
throw new Error("Vertex AI requires a project ID. Provide via options.project or GOOGLE_CLOUD_PROJECT env var.");
|
|
360
|
+
}
|
|
361
|
+
this._configureLogLevel(options.logLevel);
|
|
362
|
+
this.labels = options.labels || {};
|
|
363
|
+
this.chatConfig = {
|
|
364
|
+
temperature: 0.7,
|
|
365
|
+
topP: 0.95,
|
|
366
|
+
topK: 64,
|
|
367
|
+
safetySettings: DEFAULT_SAFETY_SETTINGS,
|
|
368
|
+
...options.chatConfig
|
|
369
|
+
};
|
|
370
|
+
if (this.systemPrompt) {
|
|
371
|
+
this.chatConfig.systemInstruction = this.systemPrompt;
|
|
372
|
+
} else if (this.systemPrompt === null && options.systemPrompt === void 0) {
|
|
373
|
+
} else if (options.systemPrompt === null || options.systemPrompt === false) {
|
|
374
|
+
delete this.chatConfig.systemInstruction;
|
|
375
|
+
}
|
|
376
|
+
if (options.maxOutputTokens !== void 0) {
|
|
377
|
+
if (options.maxOutputTokens === null) {
|
|
378
|
+
delete this.chatConfig.maxOutputTokens;
|
|
379
|
+
} else {
|
|
380
|
+
this.chatConfig.maxOutputTokens = options.maxOutputTokens;
|
|
381
|
+
}
|
|
382
|
+
} else if (options.chatConfig?.maxOutputTokens !== void 0) {
|
|
383
|
+
if (options.chatConfig.maxOutputTokens === null) {
|
|
384
|
+
delete this.chatConfig.maxOutputTokens;
|
|
385
|
+
}
|
|
386
|
+
} else {
|
|
387
|
+
this.chatConfig.maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS;
|
|
954
388
|
}
|
|
389
|
+
this._configureThinking(options.thinkingConfig);
|
|
390
|
+
const clientOptions = this.vertexai ? {
|
|
391
|
+
vertexai: true,
|
|
392
|
+
project: this.project,
|
|
393
|
+
...this.location && { location: this.location },
|
|
394
|
+
...this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions }
|
|
395
|
+
} : { apiKey: this.apiKey };
|
|
396
|
+
this.genAIClient = new import_genai.GoogleGenAI(clientOptions);
|
|
397
|
+
this.chatSession = null;
|
|
398
|
+
this.lastResponseMetadata = null;
|
|
399
|
+
this.exampleCount = 0;
|
|
400
|
+
this._cumulativeUsage = {
|
|
401
|
+
promptTokens: 0,
|
|
402
|
+
responseTokens: 0,
|
|
403
|
+
totalTokens: 0,
|
|
404
|
+
attempts: 0
|
|
405
|
+
};
|
|
406
|
+
logger_default.debug(`${this.constructor.name} created with model: ${this.modelName}`);
|
|
955
407
|
}
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
408
|
+
// ── Initialization ───────────────────────────────────────────────────────
|
|
409
|
+
/**
|
|
410
|
+
* Initializes the chat session. Idempotent unless force=true.
|
|
411
|
+
* Subclasses can override `_getChatCreateOptions()` to customize.
|
|
412
|
+
* @param {boolean} [force=false]
|
|
413
|
+
* @returns {Promise<void>}
|
|
414
|
+
*/
|
|
415
|
+
async init(force = false) {
|
|
416
|
+
if (this.chatSession && !force) return;
|
|
417
|
+
logger_default.debug(`Initializing ${this.constructor.name} chat session with model: ${this.modelName}...`);
|
|
418
|
+
const chatOptions = this._getChatCreateOptions();
|
|
419
|
+
this.chatSession = this.genAIClient.chats.create(chatOptions);
|
|
420
|
+
try {
|
|
421
|
+
await this.genAIClient.models.list();
|
|
422
|
+
logger_default.debug(`${this.constructor.name}: API connection successful.`);
|
|
423
|
+
} catch (e) {
|
|
424
|
+
throw new Error(`${this.constructor.name} initialization failed: ${e.message}`);
|
|
969
425
|
}
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
426
|
+
logger_default.debug(`${this.constructor.name}: Chat session initialized.`);
|
|
427
|
+
}
|
|
428
|
+
/**
|
|
429
|
+
* Builds the options object for `genAIClient.chats.create()`.
|
|
430
|
+
* Override in subclasses to add tools, grounding, etc.
|
|
431
|
+
* @returns {Object}
|
|
432
|
+
* @protected
|
|
433
|
+
*/
|
|
434
|
+
_getChatCreateOptions() {
|
|
435
|
+
return {
|
|
436
|
+
model: this.modelName,
|
|
437
|
+
config: {
|
|
438
|
+
...this.chatConfig,
|
|
439
|
+
...this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels }
|
|
440
|
+
},
|
|
441
|
+
history: []
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
// ── Chat Session Management ──────────────────────────────────────────────
|
|
445
|
+
/**
|
|
446
|
+
* Creates a new chat session with the given history.
|
|
447
|
+
* Internal helper used by init, seed, clearHistory, reset.
|
|
448
|
+
* @param {Array} [history=[]]
|
|
449
|
+
* @returns {Object} The new chat session
|
|
450
|
+
* @protected
|
|
451
|
+
*/
|
|
452
|
+
_createChatSession(history = []) {
|
|
453
|
+
const opts = this._getChatCreateOptions();
|
|
454
|
+
opts.history = history;
|
|
455
|
+
return this.genAIClient.chats.create(opts);
|
|
456
|
+
}
|
|
457
|
+
/**
|
|
458
|
+
* Retrieves the current conversation history.
|
|
459
|
+
* @param {boolean} [curated=false]
|
|
460
|
+
* @returns {Array<Object>}
|
|
461
|
+
*/
|
|
462
|
+
getHistory(curated = false) {
|
|
463
|
+
if (!this.chatSession) {
|
|
464
|
+
logger_default.warn("Chat session not initialized. No history available.");
|
|
465
|
+
return [];
|
|
973
466
|
}
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
467
|
+
return this.chatSession.getHistory(curated);
|
|
468
|
+
}
|
|
469
|
+
/**
|
|
470
|
+
* Clears conversation history. Recreates chat session with empty history.
|
|
471
|
+
* Subclasses may override to preserve seeded examples.
|
|
472
|
+
* @returns {Promise<void>}
|
|
473
|
+
*/
|
|
474
|
+
async clearHistory() {
|
|
475
|
+
if (!this.chatSession) {
|
|
476
|
+
logger_default.warn(`Cannot clear history: chat not initialized.`);
|
|
477
|
+
return;
|
|
977
478
|
}
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
479
|
+
this.chatSession = this._createChatSession([]);
|
|
480
|
+
this.lastResponseMetadata = null;
|
|
481
|
+
this._cumulativeUsage = { promptTokens: 0, responseTokens: 0, totalTokens: 0, attempts: 0 };
|
|
482
|
+
logger_default.debug(`${this.constructor.name}: Conversation history cleared.`);
|
|
483
|
+
}
|
|
484
|
+
// ── Few-Shot Seeding ─────────────────────────────────────────────────────
|
|
485
|
+
/**
|
|
486
|
+
* Seeds the chat session with example input/output pairs for few-shot learning.
|
|
487
|
+
* @param {TransformationExample[]} examples - Array of example objects
|
|
488
|
+
* @param {Object} [opts={}] - Key configuration
|
|
489
|
+
* @param {string} [opts.promptKey='PROMPT'] - Key for input data in examples
|
|
490
|
+
* @param {string} [opts.answerKey='ANSWER'] - Key for output data in examples
|
|
491
|
+
* @param {string} [opts.contextKey='CONTEXT'] - Key for optional context
|
|
492
|
+
* @param {string} [opts.explanationKey='EXPLANATION'] - Key for optional explanations
|
|
493
|
+
* @param {string} [opts.systemPromptKey='SYSTEM'] - Key for system prompt overrides in examples
|
|
494
|
+
* @returns {Promise<Array>} The updated chat history
|
|
495
|
+
*/
|
|
496
|
+
async seed(examples, opts = {}) {
|
|
497
|
+
await this.init();
|
|
498
|
+
if (!examples || !Array.isArray(examples) || examples.length === 0) {
|
|
499
|
+
logger_default.debug("No examples provided. Skipping seeding.");
|
|
500
|
+
return this.getHistory();
|
|
501
|
+
}
|
|
502
|
+
const promptKey = opts.promptKey || "PROMPT";
|
|
503
|
+
const answerKey = opts.answerKey || "ANSWER";
|
|
504
|
+
const contextKey = opts.contextKey || "CONTEXT";
|
|
505
|
+
const explanationKey = opts.explanationKey || "EXPLANATION";
|
|
506
|
+
const systemPromptKey = opts.systemPromptKey || "SYSTEM";
|
|
507
|
+
const instructionExample = examples.find((ex) => ex[systemPromptKey]);
|
|
508
|
+
if (instructionExample) {
|
|
509
|
+
logger_default.debug(`Found system prompt in examples; reinitializing chat.`);
|
|
510
|
+
this.systemPrompt = instructionExample[systemPromptKey];
|
|
511
|
+
this.chatConfig.systemInstruction = /** @type {string} */
|
|
512
|
+
this.systemPrompt;
|
|
513
|
+
await this.init(true);
|
|
514
|
+
}
|
|
515
|
+
logger_default.debug(`Seeding chat with ${examples.length} examples...`);
|
|
516
|
+
const historyToAdd = [];
|
|
517
|
+
for (const example of examples) {
|
|
518
|
+
const contextValue = example[contextKey] || "";
|
|
519
|
+
const promptValue = example[promptKey] || "";
|
|
520
|
+
const answerValue = example[answerKey] || "";
|
|
521
|
+
const explanationValue = example[explanationKey] || "";
|
|
522
|
+
let userText = "";
|
|
523
|
+
let modelResponse = {};
|
|
524
|
+
if (contextValue) {
|
|
525
|
+
let contextText = isJSON(contextValue) ? JSON.stringify(contextValue, null, 2) : contextValue;
|
|
526
|
+
userText += `CONTEXT:
|
|
527
|
+
${contextText}
|
|
528
|
+
|
|
529
|
+
`;
|
|
530
|
+
}
|
|
531
|
+
if (promptValue) {
|
|
532
|
+
let promptText = isJSON(promptValue) ? JSON.stringify(promptValue, null, 2) : promptValue;
|
|
533
|
+
userText += promptText;
|
|
534
|
+
}
|
|
535
|
+
if (answerValue) modelResponse.data = answerValue;
|
|
536
|
+
if (explanationValue) modelResponse.explanation = explanationValue;
|
|
537
|
+
const modelText = JSON.stringify(modelResponse, null, 2);
|
|
538
|
+
if (userText.trim().length && modelText.trim().length > 0) {
|
|
539
|
+
historyToAdd.push({ role: "user", parts: [{ text: userText.trim() }] });
|
|
540
|
+
historyToAdd.push({ role: "model", parts: [{ text: modelText.trim() }] });
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
const currentHistory = this.chatSession?.getHistory() || [];
|
|
544
|
+
logger_default.debug(`Adding ${historyToAdd.length} items to chat history (${currentHistory.length} existing)...`);
|
|
545
|
+
this.chatSession = this._createChatSession([...currentHistory, ...historyToAdd]);
|
|
546
|
+
this.exampleCount = currentHistory.length + historyToAdd.length;
|
|
547
|
+
const newHistory = this.chatSession.getHistory();
|
|
548
|
+
logger_default.debug(`Chat session now has ${newHistory.length} history items.`);
|
|
549
|
+
return newHistory;
|
|
550
|
+
}
|
|
551
|
+
// ── Response Metadata ────────────────────────────────────────────────────
|
|
552
|
+
/**
|
|
553
|
+
* Captures response metadata (model version, token counts) from an API response.
|
|
554
|
+
* @param {Object} response - The API response object
|
|
555
|
+
* @protected
|
|
556
|
+
*/
|
|
557
|
+
_captureMetadata(response) {
|
|
558
|
+
this.lastResponseMetadata = {
|
|
559
|
+
modelVersion: response.modelVersion || null,
|
|
560
|
+
requestedModel: this.modelName,
|
|
561
|
+
promptTokens: response.usageMetadata?.promptTokenCount || 0,
|
|
562
|
+
responseTokens: response.usageMetadata?.candidatesTokenCount || 0,
|
|
563
|
+
totalTokens: response.usageMetadata?.totalTokenCount || 0,
|
|
564
|
+
timestamp: Date.now()
|
|
565
|
+
};
|
|
566
|
+
}
|
|
567
|
+
/**
|
|
568
|
+
* Returns structured usage data from the last API call for billing verification.
|
|
569
|
+
* Includes CUMULATIVE token counts across all retry attempts.
|
|
570
|
+
* @returns {UsageData|null} Usage data or null if no API call has been made.
|
|
571
|
+
*/
|
|
572
|
+
getLastUsage() {
|
|
573
|
+
if (!this.lastResponseMetadata) return null;
|
|
574
|
+
const meta = this.lastResponseMetadata;
|
|
575
|
+
const cumulative = this._cumulativeUsage || { promptTokens: 0, responseTokens: 0, totalTokens: 0, attempts: 1 };
|
|
576
|
+
const useCumulative = cumulative.attempts > 0;
|
|
577
|
+
return {
|
|
578
|
+
promptTokens: useCumulative ? cumulative.promptTokens : meta.promptTokens,
|
|
579
|
+
responseTokens: useCumulative ? cumulative.responseTokens : meta.responseTokens,
|
|
580
|
+
totalTokens: useCumulative ? cumulative.totalTokens : meta.totalTokens,
|
|
581
|
+
attempts: useCumulative ? cumulative.attempts : 1,
|
|
582
|
+
modelVersion: meta.modelVersion,
|
|
583
|
+
requestedModel: meta.requestedModel,
|
|
584
|
+
timestamp: meta.timestamp
|
|
585
|
+
};
|
|
586
|
+
}
|
|
587
|
+
// ── Token Estimation ─────────────────────────────────────────────────────
|
|
588
|
+
/**
|
|
589
|
+
* Estimates INPUT token count for a payload before sending.
|
|
590
|
+
* Includes system prompt + chat history + your new message.
|
|
591
|
+
* @param {Object|string} nextPayload - The next message to estimate
|
|
592
|
+
* @returns {Promise<{ inputTokens: number }>}
|
|
593
|
+
*/
|
|
594
|
+
async estimate(nextPayload) {
|
|
595
|
+
const contents = [];
|
|
596
|
+
if (this.systemPrompt) {
|
|
597
|
+
contents.push({ parts: [{ text: this.systemPrompt }] });
|
|
598
|
+
}
|
|
599
|
+
if (this.chatSession && typeof this.chatSession.getHistory === "function") {
|
|
600
|
+
const history = this.chatSession.getHistory();
|
|
601
|
+
if (Array.isArray(history) && history.length > 0) {
|
|
602
|
+
contents.push(...history);
|
|
603
|
+
}
|
|
604
|
+
}
|
|
605
|
+
const nextMessage = typeof nextPayload === "string" ? nextPayload : JSON.stringify(nextPayload, null, 2);
|
|
606
|
+
contents.push({ parts: [{ text: nextMessage }] });
|
|
607
|
+
const resp = await this.genAIClient.models.countTokens({
|
|
608
|
+
model: this.modelName,
|
|
609
|
+
contents
|
|
610
|
+
});
|
|
611
|
+
return { inputTokens: resp.totalTokens };
|
|
612
|
+
}
|
|
613
|
+
/**
|
|
614
|
+
* Estimates the INPUT cost of sending a payload based on model pricing.
|
|
615
|
+
* @param {Object|string} nextPayload - The next message to estimate
|
|
616
|
+
* @returns {Promise<Object>} Cost estimation
|
|
617
|
+
*/
|
|
618
|
+
async estimateCost(nextPayload) {
|
|
619
|
+
const tokenInfo = await this.estimate(nextPayload);
|
|
620
|
+
const pricing = MODEL_PRICING[this.modelName] || { input: 0, output: 0 };
|
|
621
|
+
return {
|
|
622
|
+
inputTokens: tokenInfo.inputTokens,
|
|
623
|
+
model: this.modelName,
|
|
624
|
+
pricing,
|
|
625
|
+
estimatedInputCost: tokenInfo.inputTokens / 1e6 * pricing.input,
|
|
626
|
+
note: "Cost is for input tokens only; output cost depends on response length"
|
|
627
|
+
};
|
|
628
|
+
}
|
|
629
|
+
// ── Private Helpers ──────────────────────────────────────────────────────
|
|
630
|
+
/**
|
|
631
|
+
* Configures the log level based on options, env vars, or NODE_ENV.
|
|
632
|
+
* @param {string} [logLevel]
|
|
633
|
+
* @private
|
|
634
|
+
*/
|
|
635
|
+
_configureLogLevel(logLevel) {
|
|
636
|
+
if (logLevel) {
|
|
637
|
+
if (logLevel === "none") {
|
|
638
|
+
logger_default.level = "silent";
|
|
639
|
+
} else {
|
|
640
|
+
logger_default.level = logLevel;
|
|
641
|
+
}
|
|
642
|
+
} else if (LOG_LEVEL) {
|
|
643
|
+
logger_default.level = LOG_LEVEL;
|
|
644
|
+
} else if (NODE_ENV === "dev") {
|
|
645
|
+
logger_default.level = "debug";
|
|
646
|
+
} else if (NODE_ENV === "test") {
|
|
647
|
+
logger_default.level = "warn";
|
|
648
|
+
} else if (NODE_ENV.startsWith("prod")) {
|
|
649
|
+
logger_default.level = "error";
|
|
650
|
+
} else {
|
|
651
|
+
logger_default.level = "info";
|
|
652
|
+
}
|
|
653
|
+
}
|
|
654
|
+
/**
|
|
655
|
+
* Configures thinking settings based on model support.
|
|
656
|
+
* @param {Object|null|undefined} thinkingConfig
|
|
657
|
+
* @private
|
|
658
|
+
*/
|
|
659
|
+
_configureThinking(thinkingConfig) {
|
|
660
|
+
const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some((p) => p.test(this.modelName));
|
|
661
|
+
if (thinkingConfig === void 0) return;
|
|
662
|
+
if (thinkingConfig === null) {
|
|
663
|
+
delete this.chatConfig.thinkingConfig;
|
|
664
|
+
logger_default.debug(`thinkingConfig set to null - removed from configuration`);
|
|
665
|
+
return;
|
|
666
|
+
}
|
|
667
|
+
if (!modelSupportsThinking) {
|
|
668
|
+
logger_default.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
|
|
669
|
+
return;
|
|
670
|
+
}
|
|
671
|
+
const config = { ...DEFAULT_THINKING_CONFIG, ...thinkingConfig };
|
|
672
|
+
if (thinkingConfig.thinkingLevel !== void 0) {
|
|
673
|
+
delete config.thinkingBudget;
|
|
674
|
+
}
|
|
675
|
+
this.chatConfig.thinkingConfig = config;
|
|
676
|
+
logger_default.debug(`Thinking config applied: ${JSON.stringify(config)}`);
|
|
677
|
+
}
|
|
678
|
+
};
|
|
679
|
+
var base_default = BaseGemini;
|
|
680
|
+
|
|
681
|
+
// transformer.js
|
|
682
|
+
var import_promises = __toESM(require("fs/promises"), 1);
|
|
683
|
+
var import_path = __toESM(require("path"), 1);
|
|
684
|
+
var DEFAULT_SYSTEM_INSTRUCTIONS = `
|
|
685
|
+
You are an expert JSON transformation engine. Your task is to accurately convert data payloads from one format to another.
|
|
686
|
+
|
|
687
|
+
You will be provided with example transformations (Source JSON -> Target JSON).
|
|
688
|
+
|
|
689
|
+
Learn the mapping rules from these examples.
|
|
690
|
+
|
|
691
|
+
When presented with new Source JSON, apply the learned transformation rules to produce a new Target JSON payload.
|
|
692
|
+
|
|
693
|
+
Always respond ONLY with a valid JSON object that strictly adheres to the expected output format.
|
|
694
|
+
|
|
695
|
+
Do not include any additional text, explanations, or formatting before or after the JSON object.
|
|
696
|
+
`;
|
|
697
|
+
var Transformer = class extends base_default {
|
|
698
|
+
/**
|
|
699
|
+
* @param {TransformerOptions} [options={}]
|
|
700
|
+
*/
|
|
701
|
+
constructor(options = {}) {
|
|
702
|
+
if (options.systemPrompt === void 0) {
|
|
703
|
+
options = { ...options, systemPrompt: DEFAULT_SYSTEM_INSTRUCTIONS };
|
|
704
|
+
}
|
|
705
|
+
super(options);
|
|
706
|
+
this.chatConfig.responseMimeType = "application/json";
|
|
707
|
+
this.onlyJSON = options.onlyJSON !== void 0 ? options.onlyJSON : true;
|
|
708
|
+
if (options.responseSchema) {
|
|
709
|
+
this.chatConfig.responseSchema = options.responseSchema;
|
|
710
|
+
}
|
|
711
|
+
this.promptKey = options.promptKey || options.sourceKey || "PROMPT";
|
|
712
|
+
this.answerKey = options.answerKey || options.targetKey || "ANSWER";
|
|
713
|
+
this.contextKey = options.contextKey || "CONTEXT";
|
|
714
|
+
this.explanationKey = options.explanationKey || "EXPLANATION";
|
|
715
|
+
this.systemPromptKey = options.systemPromptKey || "SYSTEM";
|
|
716
|
+
if (this.promptKey === this.answerKey) {
|
|
717
|
+
throw new Error("Source and target keys cannot be the same. Please provide distinct keys.");
|
|
718
|
+
}
|
|
719
|
+
this.examplesFile = options.examplesFile || null;
|
|
720
|
+
this.exampleData = options.exampleData || null;
|
|
721
|
+
this.asyncValidator = options.asyncValidator || null;
|
|
722
|
+
this.maxRetries = options.maxRetries || 3;
|
|
723
|
+
this.retryDelay = options.retryDelay || 1e3;
|
|
724
|
+
this.enableGrounding = options.enableGrounding || false;
|
|
725
|
+
this.groundingConfig = options.groundingConfig || {};
|
|
726
|
+
logger_default.debug(`Transformer keys \u2014 Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
|
|
727
|
+
}
|
|
728
|
+
// ── Chat Create Options Override ──────────────────────────────────────────
|
|
729
|
+
/** @protected */
|
|
730
|
+
_getChatCreateOptions() {
|
|
731
|
+
const opts = super._getChatCreateOptions();
|
|
732
|
+
if (this.enableGrounding) {
|
|
733
|
+
opts.config.tools = [{ googleSearch: this.groundingConfig }];
|
|
734
|
+
logger_default.debug(`Search grounding ENABLED (WARNING: costs $35/1k queries)`);
|
|
735
|
+
}
|
|
736
|
+
return opts;
|
|
737
|
+
}
|
|
738
|
+
// ── Seeding ──────────────────────────────────────────────────────────────
|
|
739
|
+
/**
|
|
740
|
+
* Seeds the chat with transformation examples using the configured key mapping.
|
|
741
|
+
* Overrides base seed() to use Transformer-specific keys and support
|
|
742
|
+
* examplesFile/exampleData fallbacks.
|
|
743
|
+
*
|
|
744
|
+
* @param {TransformationExample[]} [examples] - Array of example objects
|
|
745
|
+
* @returns {Promise<Array>} The updated chat history
|
|
746
|
+
*/
|
|
747
|
+
async seed(examples) {
|
|
748
|
+
await this.init();
|
|
749
|
+
if (!examples || !Array.isArray(examples) || examples.length === 0) {
|
|
750
|
+
if (this.examplesFile) {
|
|
751
|
+
logger_default.debug(`No examples provided, loading from file: ${this.examplesFile}`);
|
|
752
|
+
try {
|
|
753
|
+
const filePath = import_path.default.resolve(this.examplesFile);
|
|
754
|
+
const raw = await import_promises.default.readFile(filePath, "utf-8");
|
|
755
|
+
examples = JSON.parse(raw);
|
|
756
|
+
} catch (err) {
|
|
757
|
+
throw new Error(`Could not load examples from file: ${this.examplesFile}. ${err.message}`);
|
|
758
|
+
}
|
|
759
|
+
} else if (this.exampleData) {
|
|
760
|
+
logger_default.debug(`Using example data provided in options.`);
|
|
761
|
+
if (Array.isArray(this.exampleData)) {
|
|
762
|
+
examples = this.exampleData;
|
|
763
|
+
} else {
|
|
764
|
+
throw new Error(`Invalid example data provided. Expected an array of examples.`);
|
|
985
765
|
}
|
|
766
|
+
} else {
|
|
767
|
+
logger_default.debug("No examples provided and no examples file specified. Skipping seeding.");
|
|
768
|
+
return this.getHistory();
|
|
986
769
|
}
|
|
987
770
|
}
|
|
771
|
+
return await super.seed(examples, {
|
|
772
|
+
promptKey: this.promptKey,
|
|
773
|
+
answerKey: this.answerKey,
|
|
774
|
+
contextKey: this.contextKey,
|
|
775
|
+
explanationKey: this.explanationKey,
|
|
776
|
+
systemPromptKey: this.systemPromptKey
|
|
777
|
+
});
|
|
988
778
|
}
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
779
|
+
// ── Primary Send Method ──────────────────────────────────────────────────
|
|
780
|
+
/**
|
|
781
|
+
* Transforms a payload using the seeded examples and model.
|
|
782
|
+
* Includes validation and automatic retry with AI-powered error correction.
|
|
783
|
+
*
|
|
784
|
+
* @param {Object|string} payload - The source payload to transform
|
|
785
|
+
* @param {import('./types').SendOptions} [opts={}] - Per-message options
|
|
786
|
+
* @param {AsyncValidatorFunction|null} [validatorFn] - Validator for this call (overrides constructor validator)
|
|
787
|
+
* @returns {Promise<Object>} The transformed payload
|
|
788
|
+
*/
|
|
789
|
+
async send(payload, opts = {}, validatorFn = null) {
|
|
790
|
+
if (!this.chatSession) {
|
|
791
|
+
throw new Error("Chat session not initialized. Please call init() first.");
|
|
792
|
+
}
|
|
793
|
+
const validator = validatorFn || this.asyncValidator;
|
|
794
|
+
if (opts.stateless) {
|
|
795
|
+
return await this._statelessSend(payload, opts, validator);
|
|
796
|
+
}
|
|
797
|
+
const maxRetries = opts.maxRetries ?? this.maxRetries;
|
|
798
|
+
const retryDelay = opts.retryDelay ?? this.retryDelay;
|
|
799
|
+
if (opts.enableGrounding !== void 0 && opts.enableGrounding !== this.enableGrounding) {
|
|
800
|
+
const originalGrounding = this.enableGrounding;
|
|
801
|
+
const originalConfig = this.groundingConfig;
|
|
802
|
+
try {
|
|
803
|
+
this.enableGrounding = opts.enableGrounding;
|
|
804
|
+
this.groundingConfig = opts.groundingConfig ?? this.groundingConfig;
|
|
805
|
+
await this.init(true);
|
|
806
|
+
} catch (error) {
|
|
807
|
+
this.enableGrounding = originalGrounding;
|
|
808
|
+
this.groundingConfig = originalConfig;
|
|
809
|
+
throw error;
|
|
810
|
+
}
|
|
811
|
+
opts._restoreGrounding = async () => {
|
|
812
|
+
this.enableGrounding = originalGrounding;
|
|
813
|
+
this.groundingConfig = originalConfig;
|
|
814
|
+
await this.init(true);
|
|
815
|
+
};
|
|
816
|
+
}
|
|
817
|
+
let lastPayload = this._preparePayload(payload);
|
|
818
|
+
const messageOptions = {};
|
|
819
|
+
if (opts.labels) messageOptions.labels = opts.labels;
|
|
820
|
+
this._cumulativeUsage = { promptTokens: 0, responseTokens: 0, totalTokens: 0, attempts: 0 };
|
|
821
|
+
let lastError = null;
|
|
822
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
823
|
+
try {
|
|
824
|
+
const transformedPayload = attempt === 0 ? await this.rawSend(lastPayload, messageOptions) : await this.rebuild(lastPayload, lastError.message);
|
|
825
|
+
if (this.lastResponseMetadata) {
|
|
826
|
+
this._cumulativeUsage.promptTokens += this.lastResponseMetadata.promptTokens || 0;
|
|
827
|
+
this._cumulativeUsage.responseTokens += this.lastResponseMetadata.responseTokens || 0;
|
|
828
|
+
this._cumulativeUsage.totalTokens += this.lastResponseMetadata.totalTokens || 0;
|
|
829
|
+
this._cumulativeUsage.attempts = attempt + 1;
|
|
830
|
+
}
|
|
831
|
+
lastPayload = transformedPayload;
|
|
832
|
+
if (validator) {
|
|
833
|
+
await validator(transformedPayload);
|
|
834
|
+
}
|
|
835
|
+
logger_default.debug(`Transformation succeeded on attempt ${attempt + 1}`);
|
|
836
|
+
if (opts._restoreGrounding) await opts._restoreGrounding();
|
|
837
|
+
return transformedPayload;
|
|
838
|
+
} catch (error) {
|
|
839
|
+
lastError = error;
|
|
840
|
+
logger_default.warn(`Attempt ${attempt + 1} failed: ${error.message}`);
|
|
841
|
+
if (attempt >= maxRetries) {
|
|
842
|
+
logger_default.error(`All ${maxRetries + 1} attempts failed.`);
|
|
843
|
+
if (opts._restoreGrounding) await opts._restoreGrounding();
|
|
844
|
+
throw new Error(`Transformation failed after ${maxRetries + 1} attempts. Last error: ${error.message}`);
|
|
845
|
+
}
|
|
846
|
+
const delay = retryDelay * Math.pow(2, attempt);
|
|
847
|
+
await new Promise((res) => setTimeout(res, delay));
|
|
848
|
+
}
|
|
849
|
+
}
|
|
850
|
+
}
|
|
851
|
+
// ── Raw Send ─────────────────────────────────────────────────────────────
|
|
852
|
+
/**
|
|
853
|
+
* Sends a single prompt to the model and parses the JSON response.
|
|
854
|
+
* No validation or retry logic.
|
|
855
|
+
*
|
|
856
|
+
* @param {Object|string} payload - The source payload
|
|
857
|
+
* @param {Object} [messageOptions={}] - Per-message options (e.g., labels)
|
|
858
|
+
* @returns {Promise<Object>} The transformed payload
|
|
859
|
+
*/
|
|
860
|
+
async rawSend(payload, messageOptions = {}) {
|
|
861
|
+
if (!this.chatSession) {
|
|
862
|
+
throw new Error("Chat session not initialized.");
|
|
863
|
+
}
|
|
864
|
+
const actualPayload = typeof payload === "string" ? payload : JSON.stringify(payload, null, 2);
|
|
865
|
+
const mergedLabels = { ...this.labels, ...messageOptions.labels || {} };
|
|
866
|
+
const hasLabels = this.vertexai && Object.keys(mergedLabels).length > 0;
|
|
867
|
+
try {
|
|
868
|
+
const sendParams = { message: actualPayload };
|
|
869
|
+
if (hasLabels) {
|
|
870
|
+
sendParams.config = { labels: mergedLabels };
|
|
871
|
+
}
|
|
872
|
+
const result = await this.chatSession.sendMessage(sendParams);
|
|
873
|
+
this._captureMetadata(result);
|
|
874
|
+
if (result.usageMetadata && logger_default.level !== "silent") {
|
|
875
|
+
logger_default.debug(`API response: model=${result.modelVersion || "unknown"}, tokens=${result.usageMetadata.totalTokenCount}`);
|
|
876
|
+
}
|
|
877
|
+
const modelResponse = result.text;
|
|
878
|
+
const extractedJSON = extractJSON(modelResponse);
|
|
879
|
+
if (extractedJSON?.data) {
|
|
880
|
+
return extractedJSON.data;
|
|
881
|
+
}
|
|
882
|
+
return extractedJSON;
|
|
883
|
+
} catch (error) {
|
|
884
|
+
if (this.onlyJSON && error.message.includes("Could not extract valid JSON")) {
|
|
885
|
+
throw new Error(`Invalid JSON response from Gemini: ${error.message}`);
|
|
886
|
+
}
|
|
887
|
+
throw new Error(`Transformation failed: ${error.message}`);
|
|
888
|
+
}
|
|
889
|
+
}
|
|
890
|
+
// ── Rebuild ──────────────────────────────────────────────────────────────
|
|
891
|
+
/**
|
|
892
|
+
* Asks the model to fix a payload that failed validation.
|
|
893
|
+
*
|
|
894
|
+
* @param {Object} lastPayload - The payload that failed
|
|
895
|
+
* @param {string} serverError - The error message
|
|
896
|
+
* @returns {Promise<Object>} Corrected payload
|
|
897
|
+
*/
|
|
898
|
+
async rebuild(lastPayload, serverError) {
|
|
899
|
+
await this.init();
|
|
900
|
+
const prompt = `
|
|
901
|
+
The previous JSON payload (below) failed validation.
|
|
902
|
+
The server's error message is quoted afterward.
|
|
903
|
+
|
|
904
|
+
---------------- BAD PAYLOAD ----------------
|
|
905
|
+
${JSON.stringify(lastPayload, null, 2)}
|
|
906
|
+
|
|
907
|
+
|
|
908
|
+
---------------- SERVER ERROR ----------------
|
|
909
|
+
${serverError}
|
|
910
|
+
|
|
911
|
+
Please return a NEW JSON payload that corrects the issue.
|
|
912
|
+
Respond with JSON only \u2013 no comments or explanations.
|
|
913
|
+
`;
|
|
914
|
+
let result;
|
|
994
915
|
try {
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
916
|
+
result = await this.chatSession.sendMessage({ message: prompt });
|
|
917
|
+
this._captureMetadata(result);
|
|
918
|
+
} catch (err) {
|
|
919
|
+
throw new Error(`Gemini call failed while repairing payload: ${err.message}`);
|
|
920
|
+
}
|
|
921
|
+
try {
|
|
922
|
+
const text = result.text ?? result.response ?? "";
|
|
923
|
+
return typeof text === "object" ? text : JSON.parse(text);
|
|
924
|
+
} catch (parseErr) {
|
|
925
|
+
throw new Error(`Gemini returned non-JSON while repairing payload: ${parseErr.message}`);
|
|
926
|
+
}
|
|
927
|
+
}
|
|
928
|
+
// ── Stateless Send ───────────────────────────────────────────────────────
|
|
929
|
+
/**
|
|
930
|
+
* Sends a one-off message using generateContent (not chat).
|
|
931
|
+
* Does NOT affect chat history.
|
|
932
|
+
* @param {Object|string} payload
|
|
933
|
+
* @param {Object} [opts={}]
|
|
934
|
+
* @param {AsyncValidatorFunction|null} [validatorFn]
|
|
935
|
+
* @returns {Promise<Object>}
|
|
936
|
+
* @private
|
|
937
|
+
*/
|
|
938
|
+
async _statelessSend(payload, opts = {}, validatorFn = null) {
|
|
939
|
+
if (!this.chatSession) {
|
|
940
|
+
throw new Error("Chat session not initialized. Please call init() first.");
|
|
941
|
+
}
|
|
942
|
+
const payloadStr = typeof payload === "string" ? payload : JSON.stringify(payload, null, 2);
|
|
943
|
+
const contents = [];
|
|
944
|
+
if (this.exampleCount > 0) {
|
|
945
|
+
const history = this.chatSession.getHistory();
|
|
946
|
+
const exampleHistory = history.slice(0, this.exampleCount);
|
|
947
|
+
contents.push(...exampleHistory);
|
|
948
|
+
}
|
|
949
|
+
contents.push({ role: "user", parts: [{ text: payloadStr }] });
|
|
950
|
+
const mergedLabels = { ...this.labels, ...opts.labels || {} };
|
|
951
|
+
const result = await this.genAIClient.models.generateContent({
|
|
952
|
+
model: this.modelName,
|
|
953
|
+
contents,
|
|
954
|
+
config: {
|
|
955
|
+
...this.chatConfig,
|
|
956
|
+
...this.vertexai && Object.keys(mergedLabels).length > 0 && { labels: mergedLabels }
|
|
957
|
+
}
|
|
958
|
+
});
|
|
959
|
+
this._captureMetadata(result);
|
|
960
|
+
this._cumulativeUsage = {
|
|
961
|
+
promptTokens: this.lastResponseMetadata.promptTokens,
|
|
962
|
+
responseTokens: this.lastResponseMetadata.responseTokens,
|
|
963
|
+
totalTokens: this.lastResponseMetadata.totalTokens,
|
|
964
|
+
attempts: 1
|
|
965
|
+
};
|
|
966
|
+
const modelResponse = result.text;
|
|
967
|
+
const extractedJSON = extractJSON(modelResponse);
|
|
968
|
+
let transformedPayload = extractedJSON?.data ? extractedJSON.data : extractedJSON;
|
|
969
|
+
if (validatorFn) {
|
|
970
|
+
await validatorFn(transformedPayload);
|
|
971
|
+
}
|
|
972
|
+
return transformedPayload;
|
|
973
|
+
}
|
|
974
|
+
// ── History Management ───────────────────────────────────────────────────
|
|
975
|
+
/**
|
|
976
|
+
* Clears conversation history while preserving seeded examples.
|
|
977
|
+
* @returns {Promise<void>}
|
|
978
|
+
*/
|
|
979
|
+
async clearHistory() {
|
|
980
|
+
if (!this.chatSession) {
|
|
981
|
+
logger_default.warn("Cannot clear history: chat not initialized.");
|
|
982
|
+
return;
|
|
983
|
+
}
|
|
984
|
+
const history = this.chatSession.getHistory();
|
|
985
|
+
const exampleHistory = history.slice(0, this.exampleCount || 0);
|
|
986
|
+
this.chatSession = this._createChatSession(exampleHistory);
|
|
987
|
+
this.lastResponseMetadata = null;
|
|
988
|
+
this._cumulativeUsage = { promptTokens: 0, responseTokens: 0, totalTokens: 0, attempts: 0 };
|
|
989
|
+
logger_default.debug(`Conversation cleared. Preserved ${exampleHistory.length} example items.`);
|
|
990
|
+
}
|
|
991
|
+
/**
|
|
992
|
+
* Fully resets the chat session, clearing all history including examples.
|
|
993
|
+
* @returns {Promise<void>}
|
|
994
|
+
*/
|
|
995
|
+
async reset() {
|
|
996
|
+
if (this.chatSession) {
|
|
997
|
+
logger_default.debug("Resetting chat session...");
|
|
998
|
+
this.chatSession = this._createChatSession([]);
|
|
999
|
+
this.exampleCount = 0;
|
|
1000
|
+
logger_default.debug("Chat session reset.");
|
|
1001
|
+
} else {
|
|
1002
|
+
logger_default.warn("Cannot reset: chat not yet initialized.");
|
|
1003
|
+
}
|
|
1004
|
+
}
|
|
1005
|
+
/**
|
|
1006
|
+
* Updates system prompt and reinitializes the chat session.
|
|
1007
|
+
* @param {string} newPrompt - The new system prompt
|
|
1008
|
+
* @returns {Promise<void>}
|
|
1009
|
+
*/
|
|
1010
|
+
async updateSystemPrompt(newPrompt) {
|
|
1011
|
+
if (!newPrompt || typeof newPrompt !== "string") {
|
|
1012
|
+
throw new Error("System prompt must be a non-empty string");
|
|
1013
|
+
}
|
|
1014
|
+
this.systemPrompt = newPrompt.trim();
|
|
1015
|
+
this.chatConfig.systemInstruction = this.systemPrompt;
|
|
1016
|
+
logger_default.debug("Updating system prompt and reinitializing chat...");
|
|
1017
|
+
await this.init(true);
|
|
1018
|
+
}
|
|
1019
|
+
// ── Private Helpers ──────────────────────────────────────────────────────
|
|
1020
|
+
/**
|
|
1021
|
+
* Normalizes a payload to a string for sending.
|
|
1022
|
+
* @param {*} payload
|
|
1023
|
+
* @returns {string}
|
|
1024
|
+
* @private
|
|
1025
|
+
*/
|
|
1026
|
+
_preparePayload(payload) {
|
|
1027
|
+
if (payload && isJSON(payload)) {
|
|
1028
|
+
return JSON.stringify(payload, null, 2);
|
|
1029
|
+
} else if (typeof payload === "string") {
|
|
1030
|
+
return payload;
|
|
1031
|
+
} else if (typeof payload === "boolean" || typeof payload === "number") {
|
|
1032
|
+
return payload.toString();
|
|
1033
|
+
} else if (payload === null || payload === void 0) {
|
|
1034
|
+
return JSON.stringify({});
|
|
1035
|
+
} else {
|
|
1036
|
+
throw new Error("Invalid source payload. Must be a JSON object or string.");
|
|
1037
|
+
}
|
|
1038
|
+
}
|
|
1039
|
+
};
|
|
1040
|
+
var transformer_default = Transformer;
|
|
1041
|
+
|
|
1042
|
+
// chat.js
|
|
1043
|
+
var Chat = class extends base_default {
|
|
1044
|
+
/**
|
|
1045
|
+
* @param {ChatOptions} [options={}]
|
|
1046
|
+
*/
|
|
1047
|
+
constructor(options = {}) {
|
|
1048
|
+
if (options.systemPrompt === void 0) {
|
|
1049
|
+
options = { ...options, systemPrompt: "You are a helpful AI assistant." };
|
|
1050
|
+
}
|
|
1051
|
+
super(options);
|
|
1052
|
+
logger_default.debug(`Chat created with model: ${this.modelName}`);
|
|
1053
|
+
}
|
|
1054
|
+
/**
|
|
1055
|
+
* Send a text message and get a response. Adds to conversation history.
|
|
1056
|
+
*
|
|
1057
|
+
* @param {string} message - The user's message
|
|
1058
|
+
* @param {Object} [opts={}] - Per-message options
|
|
1059
|
+
* @param {Record<string, string>} [opts.labels] - Per-message billing labels
|
|
1060
|
+
* @returns {Promise<ChatResponse>} Response with text and usage data
|
|
1061
|
+
*/
|
|
1062
|
+
async send(message, opts = {}) {
|
|
1063
|
+
if (!this.chatSession) await this.init();
|
|
1064
|
+
const mergedLabels = { ...this.labels, ...opts.labels || {} };
|
|
1065
|
+
const hasLabels = this.vertexai && Object.keys(mergedLabels).length > 0;
|
|
1066
|
+
const sendParams = { message };
|
|
1067
|
+
if (hasLabels) {
|
|
1068
|
+
sendParams.config = { labels: mergedLabels };
|
|
1069
|
+
}
|
|
1070
|
+
const result = await this.chatSession.sendMessage(sendParams);
|
|
1071
|
+
this._captureMetadata(result);
|
|
1072
|
+
this._cumulativeUsage = {
|
|
1073
|
+
promptTokens: this.lastResponseMetadata.promptTokens,
|
|
1074
|
+
responseTokens: this.lastResponseMetadata.responseTokens,
|
|
1075
|
+
totalTokens: this.lastResponseMetadata.totalTokens,
|
|
1076
|
+
attempts: 1
|
|
1077
|
+
};
|
|
1078
|
+
return {
|
|
1079
|
+
text: result.text || "",
|
|
1080
|
+
usage: this.getLastUsage()
|
|
1081
|
+
};
|
|
1082
|
+
}
|
|
1083
|
+
};
|
|
1084
|
+
var chat_default = Chat;
|
|
1085
|
+
|
|
1086
|
+
// message.js
|
|
1087
|
+
var Message = class extends base_default {
|
|
1088
|
+
/**
|
|
1089
|
+
* @param {MessageOptions} [options={}]
|
|
1090
|
+
*/
|
|
1091
|
+
constructor(options = {}) {
|
|
1092
|
+
super(options);
|
|
1093
|
+
if (options.responseSchema) {
|
|
1094
|
+
this.chatConfig.responseSchema = options.responseSchema;
|
|
1095
|
+
}
|
|
1096
|
+
if (options.responseMimeType) {
|
|
1097
|
+
this.chatConfig.responseMimeType = options.responseMimeType;
|
|
1098
|
+
}
|
|
1099
|
+
this._isStructured = !!(options.responseSchema || options.responseMimeType === "application/json");
|
|
1100
|
+
logger_default.debug(`Message created (structured=${this._isStructured})`);
|
|
1101
|
+
}
|
|
1102
|
+
/**
|
|
1103
|
+
* Initialize the Message client.
|
|
1104
|
+
* Override: creates genAIClient only, NO chat session (stateless).
|
|
1105
|
+
* @param {boolean} [force=false]
|
|
1106
|
+
* @returns {Promise<void>}
|
|
1107
|
+
*/
|
|
1108
|
+
async init(force = false) {
|
|
1109
|
+
if (this._initialized && !force) return;
|
|
1110
|
+
logger_default.debug(`Initializing ${this.constructor.name} with model: ${this.modelName}...`);
|
|
1111
|
+
try {
|
|
1112
|
+
await this.genAIClient.models.list();
|
|
1113
|
+
logger_default.debug(`${this.constructor.name}: API connection successful.`);
|
|
1114
|
+
} catch (e) {
|
|
1115
|
+
throw new Error(`${this.constructor.name} initialization failed: ${e.message}`);
|
|
1116
|
+
}
|
|
1117
|
+
this._initialized = true;
|
|
1118
|
+
logger_default.debug(`${this.constructor.name}: Initialized (stateless mode).`);
|
|
1119
|
+
}
|
|
1120
|
+
/**
|
|
1121
|
+
* Send a stateless message and get a response.
|
|
1122
|
+
* Each call is independent — no history is maintained.
|
|
1123
|
+
*
|
|
1124
|
+
* @param {Object|string} payload - The message or data to send
|
|
1125
|
+
* @param {Object} [opts={}] - Per-message options
|
|
1126
|
+
* @param {Record<string, string>} [opts.labels] - Per-message billing labels
|
|
1127
|
+
* @returns {Promise<MessageResponse>} Response with text, optional data, and usage
|
|
1128
|
+
*/
|
|
1129
|
+
async send(payload, opts = {}) {
|
|
1130
|
+
if (!this._initialized) await this.init();
|
|
1131
|
+
const payloadStr = typeof payload === "string" ? payload : JSON.stringify(payload, null, 2);
|
|
1132
|
+
const contents = [{ role: "user", parts: [{ text: payloadStr }] }];
|
|
1133
|
+
const mergedLabels = { ...this.labels, ...opts.labels || {} };
|
|
1134
|
+
const result = await this.genAIClient.models.generateContent({
|
|
1135
|
+
model: this.modelName,
|
|
1136
|
+
contents,
|
|
1137
|
+
config: {
|
|
1138
|
+
...this.chatConfig,
|
|
1139
|
+
...this.vertexai && Object.keys(mergedLabels).length > 0 && { labels: mergedLabels }
|
|
1140
|
+
}
|
|
1141
|
+
});
|
|
1142
|
+
this._captureMetadata(result);
|
|
1143
|
+
this._cumulativeUsage = {
|
|
1144
|
+
promptTokens: this.lastResponseMetadata.promptTokens,
|
|
1145
|
+
responseTokens: this.lastResponseMetadata.responseTokens,
|
|
1146
|
+
totalTokens: this.lastResponseMetadata.totalTokens,
|
|
1147
|
+
attempts: 1
|
|
1148
|
+
};
|
|
1149
|
+
if (result.usageMetadata && logger_default.level !== "silent") {
|
|
1150
|
+
logger_default.debug(`Message response: model=${result.modelVersion || "unknown"}, tokens=${result.usageMetadata.totalTokenCount}`);
|
|
1151
|
+
}
|
|
1152
|
+
const text = result.text || "";
|
|
1153
|
+
const response = {
|
|
1154
|
+
text,
|
|
1155
|
+
usage: this.getLastUsage()
|
|
1156
|
+
};
|
|
1157
|
+
if (this._isStructured) {
|
|
1158
|
+
try {
|
|
1159
|
+
response.data = extractJSON(text);
|
|
1160
|
+
} catch (e) {
|
|
1161
|
+
logger_default.warn(`Could not parse structured response: ${e.message}`);
|
|
1162
|
+
response.data = null;
|
|
1163
|
+
}
|
|
1164
|
+
}
|
|
1165
|
+
return response;
|
|
1166
|
+
}
|
|
1167
|
+
// ── No-ops for stateless class ──
|
|
1168
|
+
/** @returns {Array} Always returns empty array (stateless). */
|
|
1169
|
+
getHistory() {
|
|
1170
|
+
return [];
|
|
1171
|
+
}
|
|
1172
|
+
/** No-op (stateless). */
|
|
1173
|
+
async clearHistory() {
|
|
1174
|
+
}
|
|
1175
|
+
/** Not supported on Message (stateless). */
|
|
1176
|
+
async seed() {
|
|
1177
|
+
logger_default.warn("Message is stateless \u2014 seed() has no effect. Use Transformer or Chat for few-shot learning.");
|
|
1178
|
+
return [];
|
|
1179
|
+
}
|
|
1180
|
+
/**
|
|
1181
|
+
* Not supported on Message (stateless).
|
|
1182
|
+
* @param {any} [_nextPayload]
|
|
1183
|
+
* @returns {Promise<{inputTokens: number}>}
|
|
1184
|
+
*/
|
|
1185
|
+
async estimate(_nextPayload) {
|
|
1186
|
+
throw new Error("Message is stateless \u2014 use estimate() on Chat or Transformer which have conversation context.");
|
|
1187
|
+
}
|
|
1188
|
+
};
|
|
1189
|
+
var message_default = Message;
|
|
1190
|
+
|
|
1191
|
+
// tool-agent.js
|
|
1192
|
+
var ToolAgent = class extends base_default {
|
|
1193
|
+
/**
|
|
1194
|
+
* @param {ToolAgentOptions} [options={}]
|
|
1195
|
+
*/
|
|
1196
|
+
constructor(options = {}) {
|
|
1197
|
+
if (options.systemPrompt === void 0) {
|
|
1198
|
+
options = { ...options, systemPrompt: "You are a helpful AI assistant." };
|
|
1199
|
+
}
|
|
1200
|
+
super(options);
|
|
1201
|
+
this.tools = options.tools || [];
|
|
1202
|
+
this.toolExecutor = options.toolExecutor || null;
|
|
1203
|
+
if (this.tools.length > 0 && !this.toolExecutor) {
|
|
1204
|
+
throw new Error("ToolAgent: tools provided without a toolExecutor. Provide a toolExecutor function to handle tool calls.");
|
|
1205
|
+
}
|
|
1206
|
+
if (this.toolExecutor && this.tools.length === 0) {
|
|
1207
|
+
throw new Error("ToolAgent: toolExecutor provided without tools. Provide tool declarations so the model knows what tools are available.");
|
|
1208
|
+
}
|
|
1209
|
+
this.maxToolRounds = options.maxToolRounds || 10;
|
|
1210
|
+
this.onToolCall = options.onToolCall || null;
|
|
1211
|
+
this.onBeforeExecution = options.onBeforeExecution || null;
|
|
1212
|
+
this._stopped = false;
|
|
1213
|
+
if (this.tools.length > 0) {
|
|
1214
|
+
this.chatConfig.tools = [{ functionDeclarations: this.tools }];
|
|
1215
|
+
this.chatConfig.toolConfig = { functionCallingConfig: { mode: "AUTO" } };
|
|
1216
|
+
}
|
|
1217
|
+
logger_default.debug(`ToolAgent created with ${this.tools.length} tools`);
|
|
1218
|
+
}
|
|
1219
|
+
// ── Non-Streaming Chat ───────────────────────────────────────────────────
|
|
1220
|
+
/**
|
|
1221
|
+
* Send a message and get a complete response (non-streaming).
|
|
1222
|
+
* Automatically handles the tool-use loop.
|
|
1223
|
+
*
|
|
1224
|
+
* @param {string} message - The user's message
|
|
1225
|
+
* @param {Object} [opts={}] - Per-message options
|
|
1226
|
+
* @param {Record<string, string>} [opts.labels] - Per-message billing labels
|
|
1227
|
+
* @returns {Promise<AgentResponse>} Response with text, toolCalls, and usage
|
|
1228
|
+
*/
|
|
1229
|
+
async chat(message, opts = {}) {
|
|
1230
|
+
if (!this.chatSession) await this.init();
|
|
1231
|
+
this._stopped = false;
|
|
1232
|
+
const allToolCalls = [];
|
|
1233
|
+
let response = await this.chatSession.sendMessage({ message });
|
|
1234
|
+
for (let round = 0; round < this.maxToolRounds; round++) {
|
|
1235
|
+
if (this._stopped) break;
|
|
1236
|
+
const functionCalls = response.functionCalls;
|
|
1237
|
+
if (!functionCalls || functionCalls.length === 0) break;
|
|
1238
|
+
const toolResults = await Promise.all(
|
|
1239
|
+
functionCalls.map(async (call) => {
|
|
1240
|
+
if (this.onToolCall) {
|
|
1241
|
+
try {
|
|
1242
|
+
this.onToolCall(call.name, call.args);
|
|
1243
|
+
} catch (e) {
|
|
1244
|
+
logger_default.warn(`onToolCall callback error: ${e.message}`);
|
|
1245
|
+
}
|
|
1246
|
+
}
|
|
1247
|
+
if (this.onBeforeExecution) {
|
|
1248
|
+
try {
|
|
1249
|
+
const allowed = await this.onBeforeExecution(call.name, call.args);
|
|
1250
|
+
if (allowed === false) {
|
|
1251
|
+
const result2 = { error: "Execution denied by onBeforeExecution callback" };
|
|
1252
|
+
allToolCalls.push({ name: call.name, args: call.args, result: result2 });
|
|
1253
|
+
return { id: call.id, name: call.name, result: result2 };
|
|
1254
|
+
}
|
|
1255
|
+
} catch (e) {
|
|
1256
|
+
logger_default.warn(`onBeforeExecution callback error: ${e.message}`);
|
|
1257
|
+
}
|
|
1258
|
+
}
|
|
1259
|
+
let result;
|
|
1260
|
+
try {
|
|
1261
|
+
result = await this.toolExecutor(call.name, call.args);
|
|
1262
|
+
} catch (err) {
|
|
1263
|
+
logger_default.warn(`Tool ${call.name} failed: ${err.message}`);
|
|
1264
|
+
result = { error: err.message };
|
|
1265
|
+
}
|
|
1266
|
+
allToolCalls.push({ name: call.name, args: call.args, result });
|
|
1267
|
+
return { id: call.id, name: call.name, result };
|
|
1268
|
+
})
|
|
1269
|
+
);
|
|
1270
|
+
response = await this.chatSession.sendMessage({
|
|
1271
|
+
message: toolResults.map((r) => ({
|
|
1272
|
+
functionResponse: {
|
|
1273
|
+
id: r.id,
|
|
1274
|
+
name: r.name,
|
|
1275
|
+
response: { output: r.result }
|
|
1276
|
+
}
|
|
1277
|
+
}))
|
|
1278
|
+
});
|
|
1279
|
+
}
|
|
1280
|
+
this._captureMetadata(response);
|
|
1281
|
+
this._cumulativeUsage = {
|
|
1282
|
+
promptTokens: this.lastResponseMetadata.promptTokens,
|
|
1283
|
+
responseTokens: this.lastResponseMetadata.responseTokens,
|
|
1284
|
+
totalTokens: this.lastResponseMetadata.totalTokens,
|
|
1285
|
+
attempts: 1
|
|
1286
|
+
};
|
|
1287
|
+
return {
|
|
1288
|
+
text: response.text || "",
|
|
1289
|
+
toolCalls: allToolCalls,
|
|
1290
|
+
usage: this.getLastUsage()
|
|
1291
|
+
};
|
|
1292
|
+
}
|
|
1293
|
+
// ── Streaming ────────────────────────────────────────────────────────────
|
|
1294
|
+
/**
|
|
1295
|
+
* Send a message and stream the response as events.
|
|
1296
|
+
* Automatically handles the tool-use loop between streamed rounds.
|
|
1297
|
+
*
|
|
1298
|
+
* Event types:
|
|
1299
|
+
* - `text` — A chunk of the agent's text response
|
|
1300
|
+
* - `tool_call` — The agent is about to call a tool
|
|
1301
|
+
* - `tool_result` — A tool finished executing
|
|
1302
|
+
* - `done` — The agent finished
|
|
1303
|
+
*
|
|
1304
|
+
* @param {string} message - The user's message
|
|
1305
|
+
* @param {Object} [opts={}] - Per-message options
|
|
1306
|
+
* @yields {AgentStreamEvent}
|
|
1307
|
+
*/
|
|
1308
|
+
async *stream(message, opts = {}) {
|
|
1309
|
+
if (!this.chatSession) await this.init();
|
|
1310
|
+
this._stopped = false;
|
|
1311
|
+
const allToolCalls = [];
|
|
1312
|
+
let fullText = "";
|
|
1313
|
+
let streamResponse = await this.chatSession.sendMessageStream({ message });
|
|
1314
|
+
for (let round = 0; round < this.maxToolRounds; round++) {
|
|
1315
|
+
if (this._stopped) break;
|
|
1316
|
+
let roundText = "";
|
|
1317
|
+
const functionCalls = [];
|
|
1318
|
+
for await (const chunk of streamResponse) {
|
|
1319
|
+
if (chunk.functionCalls) {
|
|
1320
|
+
functionCalls.push(...chunk.functionCalls);
|
|
1321
|
+
} else if (chunk.candidates?.[0]?.content?.parts?.[0]?.text) {
|
|
1322
|
+
const text = chunk.candidates[0].content.parts[0].text;
|
|
1323
|
+
roundText += text;
|
|
1324
|
+
fullText += text;
|
|
1325
|
+
yield { type: "text", text };
|
|
1326
|
+
}
|
|
1327
|
+
}
|
|
1328
|
+
if (functionCalls.length === 0) {
|
|
1329
|
+
yield {
|
|
1330
|
+
type: "done",
|
|
1331
|
+
fullText,
|
|
1332
|
+
usage: this.getLastUsage()
|
|
1333
|
+
};
|
|
1334
|
+
return;
|
|
1335
|
+
}
|
|
1336
|
+
const toolResults = [];
|
|
1337
|
+
for (const call of functionCalls) {
|
|
1338
|
+
if (this._stopped) break;
|
|
1339
|
+
yield { type: "tool_call", toolName: call.name, args: call.args };
|
|
1340
|
+
if (this.onToolCall) {
|
|
1341
|
+
try {
|
|
1342
|
+
this.onToolCall(call.name, call.args);
|
|
1343
|
+
} catch (e) {
|
|
1344
|
+
logger_default.warn(`onToolCall callback error: ${e.message}`);
|
|
1345
|
+
}
|
|
1346
|
+
}
|
|
1347
|
+
let denied = false;
|
|
1348
|
+
if (this.onBeforeExecution) {
|
|
1349
|
+
try {
|
|
1350
|
+
const allowed = await this.onBeforeExecution(call.name, call.args);
|
|
1351
|
+
if (allowed === false) denied = true;
|
|
1352
|
+
} catch (e) {
|
|
1353
|
+
logger_default.warn(`onBeforeExecution callback error: ${e.message}`);
|
|
1354
|
+
}
|
|
1355
|
+
}
|
|
1356
|
+
let result;
|
|
1357
|
+
if (denied) {
|
|
1358
|
+
result = { error: "Execution denied by onBeforeExecution callback" };
|
|
1359
|
+
} else {
|
|
1360
|
+
try {
|
|
1361
|
+
result = await this.toolExecutor(call.name, call.args);
|
|
1362
|
+
} catch (err) {
|
|
1363
|
+
logger_default.warn(`Tool ${call.name} failed: ${err.message}`);
|
|
1364
|
+
result = { error: err.message };
|
|
1365
|
+
}
|
|
1366
|
+
}
|
|
1367
|
+
allToolCalls.push({ name: call.name, args: call.args, result });
|
|
1368
|
+
yield { type: "tool_result", toolName: call.name, result };
|
|
1369
|
+
toolResults.push({ id: call.id, name: call.name, result });
|
|
1370
|
+
}
|
|
1371
|
+
streamResponse = await this.chatSession.sendMessageStream({
|
|
1372
|
+
message: toolResults.map((r) => ({
|
|
1373
|
+
functionResponse: {
|
|
1374
|
+
id: r.id,
|
|
1375
|
+
name: r.name,
|
|
1376
|
+
response: { output: r.result }
|
|
1377
|
+
}
|
|
1378
|
+
}))
|
|
1005
1379
|
});
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1380
|
+
}
|
|
1381
|
+
yield {
|
|
1382
|
+
type: "done",
|
|
1383
|
+
fullText,
|
|
1384
|
+
usage: this.getLastUsage(),
|
|
1385
|
+
warning: this._stopped ? "Agent was stopped" : "Max tool rounds reached"
|
|
1386
|
+
};
|
|
1387
|
+
}
|
|
1388
|
+
// ── Stop ────────────────────────────────────────────────────────────────
|
|
1389
|
+
/**
|
|
1390
|
+
* Stop the agent before the next tool execution round.
|
|
1391
|
+
* If called during a chat() or stream() loop, the agent will finish
|
|
1392
|
+
* the current round and then stop.
|
|
1393
|
+
*/
|
|
1394
|
+
stop() {
|
|
1395
|
+
this._stopped = true;
|
|
1396
|
+
logger_default.info("ToolAgent stopped");
|
|
1397
|
+
}
|
|
1398
|
+
};
|
|
1399
|
+
var tool_agent_default = ToolAgent;
|
|
1400
|
+
|
|
1401
|
+
// code-agent.js
|
|
1402
|
+
var import_node_child_process = require("node:child_process");
|
|
1403
|
+
var import_promises2 = require("node:fs/promises");
|
|
1404
|
+
var import_node_path = require("node:path");
|
|
1405
|
+
var import_node_crypto = require("node:crypto");
|
|
1406
|
+
var MAX_OUTPUT_CHARS = 5e4;
|
|
1407
|
+
var MAX_FILE_TREE_LINES = 500;
|
|
1408
|
+
var IGNORE_DIRS = /* @__PURE__ */ new Set(["node_modules", ".git", "dist", "coverage", ".next", "build", "__pycache__"]);
|
|
1409
|
+
var CodeAgent = class extends base_default {
|
|
1410
|
+
/**
|
|
1411
|
+
* @param {CodeAgentOptions} [options={}]
|
|
1412
|
+
*/
|
|
1413
|
+
constructor(options = {}) {
|
|
1414
|
+
if (options.systemPrompt === void 0) {
|
|
1415
|
+
options = { ...options, systemPrompt: "" };
|
|
1416
|
+
}
|
|
1417
|
+
super(options);
|
|
1418
|
+
this.workingDirectory = options.workingDirectory || process.cwd();
|
|
1419
|
+
this.maxRounds = options.maxRounds || 10;
|
|
1420
|
+
this.timeout = options.timeout || 3e4;
|
|
1421
|
+
this.onBeforeExecution = options.onBeforeExecution || null;
|
|
1422
|
+
this.onCodeExecution = options.onCodeExecution || null;
|
|
1423
|
+
this._codebaseContext = null;
|
|
1424
|
+
this._contextGathered = false;
|
|
1425
|
+
this._stopped = false;
|
|
1426
|
+
this._activeProcess = null;
|
|
1427
|
+
this._userSystemPrompt = options.systemPrompt || "";
|
|
1428
|
+
this._allExecutions = [];
|
|
1429
|
+
this.chatConfig.tools = [{
|
|
1430
|
+
functionDeclarations: [{
|
|
1431
|
+
name: "execute_code",
|
|
1432
|
+
description: "Execute JavaScript code in a Node.js child process. The code has access to all Node.js built-in modules (fs, path, child_process, http, etc.). Use console.log() to produce output that will be returned to you. The code runs in the working directory with the same environment variables as the parent process.",
|
|
1433
|
+
parametersJsonSchema: {
|
|
1434
|
+
type: "object",
|
|
1435
|
+
properties: {
|
|
1436
|
+
code: {
|
|
1437
|
+
type: "string",
|
|
1438
|
+
description: "JavaScript code to execute. Use console.log() for output. You can import any built-in Node.js module."
|
|
1439
|
+
}
|
|
1440
|
+
},
|
|
1441
|
+
required: ["code"]
|
|
1019
1442
|
}
|
|
1443
|
+
}]
|
|
1444
|
+
}];
|
|
1445
|
+
this.chatConfig.toolConfig = { functionCallingConfig: { mode: "AUTO" } };
|
|
1446
|
+
logger_default.debug(`CodeAgent created for directory: ${this.workingDirectory}`);
|
|
1447
|
+
}
|
|
1448
|
+
// ── Init ─────────────────────────────────────────────────────────────────
|
|
1449
|
+
/**
|
|
1450
|
+
* Initialize the agent: gather codebase context, build system prompt,
|
|
1451
|
+
* and create the chat session.
|
|
1452
|
+
* @param {boolean} [force=false]
|
|
1453
|
+
*/
|
|
1454
|
+
async init(force = false) {
|
|
1455
|
+
if (this.chatSession && !force) return;
|
|
1456
|
+
if (!this._contextGathered || force) {
|
|
1457
|
+
await this._gatherCodebaseContext();
|
|
1458
|
+
}
|
|
1459
|
+
const systemPrompt = this._buildSystemPrompt();
|
|
1460
|
+
this.chatConfig.systemInstruction = systemPrompt;
|
|
1461
|
+
await super.init(force);
|
|
1462
|
+
}
|
|
1463
|
+
// ── Context Gathering ────────────────────────────────────────────────────
|
|
1464
|
+
/**
|
|
1465
|
+
* Gather file tree and key file contents from the working directory.
|
|
1466
|
+
* @private
|
|
1467
|
+
*/
|
|
1468
|
+
async _gatherCodebaseContext() {
|
|
1469
|
+
let fileTree = "";
|
|
1470
|
+
try {
|
|
1471
|
+
fileTree = await this._getFileTreeGit();
|
|
1472
|
+
} catch {
|
|
1473
|
+
logger_default.debug("git ls-files failed, falling back to readdir");
|
|
1474
|
+
fileTree = await this._getFileTreeReaddir(this.workingDirectory, 0, 3);
|
|
1475
|
+
}
|
|
1476
|
+
const lines = fileTree.split("\n");
|
|
1477
|
+
if (lines.length > MAX_FILE_TREE_LINES) {
|
|
1478
|
+
const truncated = lines.slice(0, MAX_FILE_TREE_LINES).join("\n");
|
|
1479
|
+
fileTree = `${truncated}
|
|
1480
|
+
... (${lines.length - MAX_FILE_TREE_LINES} more files)`;
|
|
1481
|
+
}
|
|
1482
|
+
let npmPackages = [];
|
|
1483
|
+
try {
|
|
1484
|
+
const pkgPath = (0, import_node_path.join)(this.workingDirectory, "package.json");
|
|
1485
|
+
const pkg = JSON.parse(await (0, import_promises2.readFile)(pkgPath, "utf-8"));
|
|
1486
|
+
npmPackages = [
|
|
1487
|
+
...Object.keys(pkg.dependencies || {}),
|
|
1488
|
+
...Object.keys(pkg.devDependencies || {})
|
|
1020
1489
|
];
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1490
|
+
} catch {
|
|
1491
|
+
}
|
|
1492
|
+
this._codebaseContext = { fileTree, npmPackages };
|
|
1493
|
+
this._contextGathered = true;
|
|
1494
|
+
}
|
|
1495
|
+
/**
|
|
1496
|
+
* Get file tree using git ls-files.
|
|
1497
|
+
* @private
|
|
1498
|
+
* @returns {Promise<string>}
|
|
1499
|
+
*/
|
|
1500
|
+
async _getFileTreeGit() {
|
|
1501
|
+
return new Promise((resolve, reject) => {
|
|
1502
|
+
(0, import_node_child_process.execFile)("git", ["ls-files"], {
|
|
1503
|
+
cwd: this.workingDirectory,
|
|
1504
|
+
timeout: 5e3,
|
|
1505
|
+
maxBuffer: 5 * 1024 * 1024
|
|
1506
|
+
}, (err, stdout) => {
|
|
1507
|
+
if (err) return reject(err);
|
|
1508
|
+
resolve(stdout.trim());
|
|
1509
|
+
});
|
|
1510
|
+
});
|
|
1511
|
+
}
|
|
1512
|
+
/**
|
|
1513
|
+
* Fallback file tree via recursive readdir.
|
|
1514
|
+
* @private
|
|
1515
|
+
* @param {string} dir
|
|
1516
|
+
* @param {number} depth
|
|
1517
|
+
* @param {number} maxDepth
|
|
1518
|
+
* @returns {Promise<string>}
|
|
1519
|
+
*/
|
|
1520
|
+
async _getFileTreeReaddir(dir, depth, maxDepth) {
|
|
1521
|
+
if (depth >= maxDepth) return "";
|
|
1522
|
+
const entries = [];
|
|
1523
|
+
try {
|
|
1524
|
+
const items = await (0, import_promises2.readdir)(dir, { withFileTypes: true });
|
|
1525
|
+
for (const item of items) {
|
|
1526
|
+
if (IGNORE_DIRS.has(item.name)) continue;
|
|
1527
|
+
if (item.name.startsWith(".") && depth === 0 && item.isDirectory()) continue;
|
|
1528
|
+
const relativePath = (0, import_node_path.join)(dir, item.name).replace(this.workingDirectory + "/", "");
|
|
1529
|
+
if (item.isFile()) {
|
|
1530
|
+
entries.push(relativePath);
|
|
1531
|
+
} else if (item.isDirectory()) {
|
|
1532
|
+
entries.push(relativePath + "/");
|
|
1533
|
+
const subEntries = await this._getFileTreeReaddir((0, import_node_path.join)(dir, item.name), depth + 1, maxDepth);
|
|
1534
|
+
if (subEntries) entries.push(subEntries);
|
|
1029
1535
|
}
|
|
1030
|
-
|
|
1031
|
-
|
|
1536
|
+
}
|
|
1537
|
+
} catch {
|
|
1538
|
+
}
|
|
1539
|
+
return entries.join("\n");
|
|
1540
|
+
}
|
|
1541
|
+
/**
|
|
1542
|
+
* Build the full system prompt with codebase context.
|
|
1543
|
+
* @private
|
|
1544
|
+
* @returns {string}
|
|
1545
|
+
*/
|
|
1546
|
+
_buildSystemPrompt() {
|
|
1547
|
+
const { fileTree, npmPackages } = this._codebaseContext || { fileTree: "", npmPackages: [] };
|
|
1548
|
+
let prompt = `You are a coding agent working in ${this.workingDirectory}.
|
|
1549
|
+
|
|
1550
|
+
## Instructions
|
|
1551
|
+
- Use the execute_code tool to accomplish tasks by writing JavaScript code
|
|
1552
|
+
- Your code runs in a Node.js child process with access to all built-in modules
|
|
1553
|
+
- IMPORTANT: Your code runs as an ES module (.mjs). Use import syntax, NOT require():
|
|
1554
|
+
- import fs from 'fs';
|
|
1555
|
+
- import path from 'path';
|
|
1556
|
+
- import { execSync } from 'child_process';
|
|
1557
|
+
- Use console.log() to produce output \u2014 that's how results are returned to you
|
|
1558
|
+
- Write efficient scripts that do multiple things per execution when possible
|
|
1559
|
+
- For parallel async operations, use Promise.all():
|
|
1560
|
+
const [a, b] = await Promise.all([fetchA(), fetchB()]);
|
|
1561
|
+
- Read files with fs.readFileSync() when you need to understand their contents
|
|
1562
|
+
- Handle errors in your scripts with try/catch so you get useful error messages
|
|
1563
|
+
- Top-level await is supported
|
|
1564
|
+
- The working directory is: ${this.workingDirectory}`;
|
|
1565
|
+
if (fileTree) {
|
|
1566
|
+
prompt += `
|
|
1567
|
+
|
|
1568
|
+
## File Tree
|
|
1569
|
+
\`\`\`
|
|
1570
|
+
${fileTree}
|
|
1571
|
+
\`\`\``;
|
|
1572
|
+
}
|
|
1573
|
+
if (npmPackages.length > 0) {
|
|
1574
|
+
prompt += `
|
|
1575
|
+
|
|
1576
|
+
## Available Packages
|
|
1577
|
+
These npm packages are installed and can be imported: ${npmPackages.join(", ")}`;
|
|
1578
|
+
}
|
|
1579
|
+
if (this._userSystemPrompt) {
|
|
1580
|
+
prompt += `
|
|
1581
|
+
|
|
1582
|
+
## Additional Instructions
|
|
1583
|
+
${this._userSystemPrompt}`;
|
|
1584
|
+
}
|
|
1585
|
+
return prompt;
|
|
1586
|
+
}
|
|
1587
|
+
// ── Code Execution ───────────────────────────────────────────────────────
|
|
1588
|
+
/**
|
|
1589
|
+
* Execute a JavaScript code string in a child process.
|
|
1590
|
+
* @private
|
|
1591
|
+
* @param {string} code - JavaScript code to execute
|
|
1592
|
+
* @returns {Promise<{stdout: string, stderr: string, exitCode: number, denied?: boolean}>}
|
|
1593
|
+
*/
|
|
1594
|
+
async _executeCode(code) {
|
|
1595
|
+
if (this._stopped) {
|
|
1596
|
+
return { stdout: "", stderr: "Agent was stopped", exitCode: -1 };
|
|
1597
|
+
}
|
|
1598
|
+
if (this.onBeforeExecution) {
|
|
1599
|
+
try {
|
|
1600
|
+
const allowed = await this.onBeforeExecution(code);
|
|
1601
|
+
if (allowed === false) {
|
|
1602
|
+
return { stdout: "", stderr: "Execution denied by onBeforeExecution callback", exitCode: -1, denied: true };
|
|
1032
1603
|
}
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
{ "name": "Lynn" },
|
|
1037
|
-
{},
|
|
1038
|
-
mockValidator
|
|
1039
|
-
);
|
|
1040
|
-
logger_default.info("Validated Payload Transformed", validatedResponse);
|
|
1041
|
-
if (NODE_ENV === "dev") debugger;
|
|
1042
|
-
} catch (error) {
|
|
1043
|
-
logger_default.error("Error in AI Transformer script:", error);
|
|
1044
|
-
if (NODE_ENV === "dev") debugger;
|
|
1604
|
+
} catch (e) {
|
|
1605
|
+
logger_default.warn(`onBeforeExecution callback error: ${e.message}`);
|
|
1606
|
+
}
|
|
1045
1607
|
}
|
|
1046
|
-
|
|
1047
|
-
|
|
1608
|
+
const tempFile = (0, import_node_path.join)(this.workingDirectory, `.code-agent-tmp-${(0, import_node_crypto.randomUUID)()}.mjs`);
|
|
1609
|
+
try {
|
|
1610
|
+
await (0, import_promises2.writeFile)(tempFile, code, "utf-8");
|
|
1611
|
+
const result = await new Promise((resolve) => {
|
|
1612
|
+
const child = (0, import_node_child_process.execFile)("node", [tempFile], {
|
|
1613
|
+
cwd: this.workingDirectory,
|
|
1614
|
+
timeout: this.timeout,
|
|
1615
|
+
env: process.env,
|
|
1616
|
+
maxBuffer: 10 * 1024 * 1024
|
|
1617
|
+
}, (err, stdout, stderr) => {
|
|
1618
|
+
this._activeProcess = null;
|
|
1619
|
+
if (err) {
|
|
1620
|
+
resolve({
|
|
1621
|
+
stdout: err.stdout || stdout || "",
|
|
1622
|
+
stderr: (err.stderr || stderr || "") + (err.killed ? "\n[EXECUTION TIMED OUT]" : ""),
|
|
1623
|
+
exitCode: err.code || 1
|
|
1624
|
+
});
|
|
1625
|
+
} else {
|
|
1626
|
+
resolve({ stdout: stdout || "", stderr: stderr || "", exitCode: 0 });
|
|
1627
|
+
}
|
|
1628
|
+
});
|
|
1629
|
+
this._activeProcess = child;
|
|
1630
|
+
});
|
|
1631
|
+
const totalLen = result.stdout.length + result.stderr.length;
|
|
1632
|
+
if (totalLen > MAX_OUTPUT_CHARS) {
|
|
1633
|
+
const half = Math.floor(MAX_OUTPUT_CHARS / 2);
|
|
1634
|
+
if (result.stdout.length > half) {
|
|
1635
|
+
result.stdout = result.stdout.slice(0, half) + "\n...[OUTPUT TRUNCATED]";
|
|
1636
|
+
}
|
|
1637
|
+
if (result.stderr.length > half) {
|
|
1638
|
+
result.stderr = result.stderr.slice(0, half) + "\n...[STDERR TRUNCATED]";
|
|
1639
|
+
}
|
|
1640
|
+
}
|
|
1641
|
+
this._allExecutions.push({ code, output: result.stdout, stderr: result.stderr, exitCode: result.exitCode });
|
|
1642
|
+
if (this.onCodeExecution) {
|
|
1643
|
+
try {
|
|
1644
|
+
this.onCodeExecution(code, result);
|
|
1645
|
+
} catch (e) {
|
|
1646
|
+
logger_default.warn(`onCodeExecution callback error: ${e.message}`);
|
|
1647
|
+
}
|
|
1648
|
+
}
|
|
1649
|
+
return result;
|
|
1650
|
+
} finally {
|
|
1651
|
+
try {
|
|
1652
|
+
await (0, import_promises2.unlink)(tempFile);
|
|
1653
|
+
} catch {
|
|
1654
|
+
}
|
|
1655
|
+
}
|
|
1656
|
+
}
|
|
1657
|
+
/**
|
|
1658
|
+
* Format execution result as a string for the model.
|
|
1659
|
+
* @private
|
|
1660
|
+
* @param {{stdout: string, stderr: string, exitCode: number}} result
|
|
1661
|
+
* @returns {string}
|
|
1662
|
+
*/
|
|
1663
|
+
_formatOutput(result) {
|
|
1664
|
+
let output = "";
|
|
1665
|
+
if (result.stdout) output += result.stdout;
|
|
1666
|
+
if (result.stderr) output += (output ? "\n" : "") + `[STDERR]: ${result.stderr}`;
|
|
1667
|
+
if (result.exitCode !== 0) output += (output ? "\n" : "") + `[EXIT CODE]: ${result.exitCode}`;
|
|
1668
|
+
return output || "(no output)";
|
|
1669
|
+
}
|
|
1670
|
+
// ── Non-Streaming Chat ───────────────────────────────────────────────────
|
|
1671
|
+
/**
|
|
1672
|
+
* Send a message and get a complete response (non-streaming).
|
|
1673
|
+
* Automatically handles the code execution loop.
|
|
1674
|
+
*
|
|
1675
|
+
* @param {string} message - The user's message
|
|
1676
|
+
* @param {Object} [opts={}] - Per-message options
|
|
1677
|
+
* @param {Record<string, string>} [opts.labels] - Per-message billing labels
|
|
1678
|
+
* @returns {Promise<CodeAgentResponse>} Response with text, codeExecutions, and usage
|
|
1679
|
+
*/
|
|
1680
|
+
async chat(message, opts = {}) {
|
|
1681
|
+
if (!this.chatSession) await this.init();
|
|
1682
|
+
this._stopped = false;
|
|
1683
|
+
const codeExecutions = [];
|
|
1684
|
+
let response = await this.chatSession.sendMessage({ message });
|
|
1685
|
+
for (let round = 0; round < this.maxRounds; round++) {
|
|
1686
|
+
if (this._stopped) break;
|
|
1687
|
+
const functionCalls = response.functionCalls;
|
|
1688
|
+
if (!functionCalls || functionCalls.length === 0) break;
|
|
1689
|
+
const results = [];
|
|
1690
|
+
for (const call of functionCalls) {
|
|
1691
|
+
if (this._stopped) break;
|
|
1692
|
+
const code = call.args?.code || "";
|
|
1693
|
+
const result = await this._executeCode(code);
|
|
1694
|
+
codeExecutions.push({
|
|
1695
|
+
code,
|
|
1696
|
+
output: result.stdout,
|
|
1697
|
+
stderr: result.stderr,
|
|
1698
|
+
exitCode: result.exitCode
|
|
1699
|
+
});
|
|
1700
|
+
results.push({
|
|
1701
|
+
id: call.id,
|
|
1702
|
+
name: call.name,
|
|
1703
|
+
result: this._formatOutput(result)
|
|
1704
|
+
});
|
|
1705
|
+
}
|
|
1706
|
+
if (this._stopped) break;
|
|
1707
|
+
response = await this.chatSession.sendMessage({
|
|
1708
|
+
message: results.map((r) => ({
|
|
1709
|
+
functionResponse: {
|
|
1710
|
+
id: r.id,
|
|
1711
|
+
name: r.name,
|
|
1712
|
+
response: { output: r.result }
|
|
1713
|
+
}
|
|
1714
|
+
}))
|
|
1715
|
+
});
|
|
1716
|
+
}
|
|
1717
|
+
this._captureMetadata(response);
|
|
1718
|
+
this._cumulativeUsage = {
|
|
1719
|
+
promptTokens: this.lastResponseMetadata.promptTokens,
|
|
1720
|
+
responseTokens: this.lastResponseMetadata.responseTokens,
|
|
1721
|
+
totalTokens: this.lastResponseMetadata.totalTokens,
|
|
1722
|
+
attempts: 1
|
|
1723
|
+
};
|
|
1724
|
+
return {
|
|
1725
|
+
text: response.text || "",
|
|
1726
|
+
codeExecutions,
|
|
1727
|
+
usage: this.getLastUsage()
|
|
1728
|
+
};
|
|
1729
|
+
}
|
|
1730
|
+
// ── Streaming ────────────────────────────────────────────────────────────
|
|
1731
|
+
/**
|
|
1732
|
+
* Send a message and stream the response as events.
|
|
1733
|
+
* Automatically handles the code execution loop between streamed rounds.
|
|
1734
|
+
*
|
|
1735
|
+
* Event types:
|
|
1736
|
+
* - `text` — A chunk of the agent's text response
|
|
1737
|
+
* - `code` — The agent is about to execute code
|
|
1738
|
+
* - `output` — Code finished executing
|
|
1739
|
+
* - `done` — The agent finished
|
|
1740
|
+
*
|
|
1741
|
+
* @param {string} message - The user's message
|
|
1742
|
+
* @param {Object} [opts={}] - Per-message options
|
|
1743
|
+
* @yields {CodeAgentStreamEvent}
|
|
1744
|
+
*/
|
|
1745
|
+
async *stream(message, opts = {}) {
|
|
1746
|
+
if (!this.chatSession) await this.init();
|
|
1747
|
+
this._stopped = false;
|
|
1748
|
+
const codeExecutions = [];
|
|
1749
|
+
let fullText = "";
|
|
1750
|
+
let streamResponse = await this.chatSession.sendMessageStream({ message });
|
|
1751
|
+
for (let round = 0; round < this.maxRounds; round++) {
|
|
1752
|
+
if (this._stopped) break;
|
|
1753
|
+
const functionCalls = [];
|
|
1754
|
+
for await (const chunk of streamResponse) {
|
|
1755
|
+
if (chunk.functionCalls) {
|
|
1756
|
+
functionCalls.push(...chunk.functionCalls);
|
|
1757
|
+
} else if (chunk.candidates?.[0]?.content?.parts?.[0]?.text) {
|
|
1758
|
+
const text = chunk.candidates[0].content.parts[0].text;
|
|
1759
|
+
fullText += text;
|
|
1760
|
+
yield { type: "text", text };
|
|
1761
|
+
}
|
|
1762
|
+
}
|
|
1763
|
+
if (functionCalls.length === 0) {
|
|
1764
|
+
yield {
|
|
1765
|
+
type: "done",
|
|
1766
|
+
fullText,
|
|
1767
|
+
codeExecutions,
|
|
1768
|
+
usage: this.getLastUsage()
|
|
1769
|
+
};
|
|
1770
|
+
return;
|
|
1771
|
+
}
|
|
1772
|
+
const results = [];
|
|
1773
|
+
for (const call of functionCalls) {
|
|
1774
|
+
if (this._stopped) break;
|
|
1775
|
+
const code = call.args?.code || "";
|
|
1776
|
+
yield { type: "code", code };
|
|
1777
|
+
const result = await this._executeCode(code);
|
|
1778
|
+
codeExecutions.push({
|
|
1779
|
+
code,
|
|
1780
|
+
output: result.stdout,
|
|
1781
|
+
stderr: result.stderr,
|
|
1782
|
+
exitCode: result.exitCode
|
|
1783
|
+
});
|
|
1784
|
+
yield {
|
|
1785
|
+
type: "output",
|
|
1786
|
+
code,
|
|
1787
|
+
stdout: result.stdout,
|
|
1788
|
+
stderr: result.stderr,
|
|
1789
|
+
exitCode: result.exitCode
|
|
1790
|
+
};
|
|
1791
|
+
results.push({
|
|
1792
|
+
id: call.id,
|
|
1793
|
+
name: call.name,
|
|
1794
|
+
result: this._formatOutput(result)
|
|
1795
|
+
});
|
|
1796
|
+
}
|
|
1797
|
+
if (this._stopped) break;
|
|
1798
|
+
streamResponse = await this.chatSession.sendMessageStream({
|
|
1799
|
+
message: results.map((r) => ({
|
|
1800
|
+
functionResponse: {
|
|
1801
|
+
id: r.id,
|
|
1802
|
+
name: r.name,
|
|
1803
|
+
response: { output: r.result }
|
|
1804
|
+
}
|
|
1805
|
+
}))
|
|
1806
|
+
});
|
|
1807
|
+
}
|
|
1808
|
+
yield {
|
|
1809
|
+
type: "done",
|
|
1810
|
+
fullText,
|
|
1811
|
+
codeExecutions,
|
|
1812
|
+
usage: this.getLastUsage(),
|
|
1813
|
+
warning: this._stopped ? "Agent was stopped" : "Max tool rounds reached"
|
|
1814
|
+
};
|
|
1815
|
+
}
|
|
1816
|
+
// ── Dump ─────────────────────────────────────────────────────────────────
|
|
1817
|
+
/**
|
|
1818
|
+
* Returns all code scripts the agent has written across all chat/stream calls.
|
|
1819
|
+
* @returns {Array<{fileName: string, script: string}>}
|
|
1820
|
+
*/
|
|
1821
|
+
dump() {
|
|
1822
|
+
return this._allExecutions.map((exec, i) => ({
|
|
1823
|
+
fileName: `script-${i + 1}.mjs`,
|
|
1824
|
+
script: exec.code
|
|
1825
|
+
}));
|
|
1826
|
+
}
|
|
1827
|
+
// ── Stop ─────────────────────────────────────────────────────────────────
|
|
1828
|
+
/**
|
|
1829
|
+
* Stop the agent before the next code execution.
|
|
1830
|
+
* If a child process is currently running, it will be killed.
|
|
1831
|
+
*/
|
|
1832
|
+
stop() {
|
|
1833
|
+
this._stopped = true;
|
|
1834
|
+
if (this._activeProcess) {
|
|
1835
|
+
try {
|
|
1836
|
+
this._activeProcess.kill("SIGTERM");
|
|
1837
|
+
} catch {
|
|
1838
|
+
}
|
|
1839
|
+
}
|
|
1840
|
+
logger_default.info("CodeAgent stopped");
|
|
1841
|
+
}
|
|
1842
|
+
};
|
|
1843
|
+
var code_agent_default = CodeAgent;
|
|
1844
|
+
|
|
1845
|
+
// index.js
|
|
1846
|
+
var import_genai2 = require("@google/genai");
|
|
1847
|
+
var index_default = { Transformer: transformer_default, Chat: chat_default, Message: message_default, ToolAgent: tool_agent_default, CodeAgent: code_agent_default };
|
|
1048
1848
|
// Annotate the CommonJS export names for ESM import in node:
|
|
1049
1849
|
0 && (module.exports = {
|
|
1850
|
+
BaseGemini,
|
|
1851
|
+
Chat,
|
|
1852
|
+
CodeAgent,
|
|
1050
1853
|
HarmBlockThreshold,
|
|
1051
1854
|
HarmCategory,
|
|
1855
|
+
Message,
|
|
1052
1856
|
ThinkingLevel,
|
|
1857
|
+
ToolAgent,
|
|
1858
|
+
Transformer,
|
|
1053
1859
|
attemptJSONRecovery,
|
|
1860
|
+
extractJSON,
|
|
1054
1861
|
log
|
|
1055
1862
|
});
|