ak-gemini 1.2.0 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +259 -294
- package/base.js +485 -0
- package/chat.js +87 -0
- package/code-agent.js +687 -0
- package/index.cjs +1928 -1213
- package/index.js +40 -1501
- package/json-helpers.js +352 -0
- package/message.js +170 -0
- package/package.json +14 -7
- package/tool-agent.js +312 -0
- package/transformer.js +502 -0
- package/types.d.ts +452 -241
- package/agent.js +0 -481
- package/tools.js +0 -134
package/base.js
ADDED
|
@@ -0,0 +1,485 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview BaseGemini class — shared foundation for all ak-gemini classes.
|
|
3
|
+
* Handles authentication, client initialization, thinking config, log levels,
|
|
4
|
+
* safety settings, token estimation, cost tracking, and chat session management.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import dotenv from 'dotenv';
|
|
8
|
+
dotenv.config();
|
|
9
|
+
const { NODE_ENV = "unknown", LOG_LEVEL = "" } = process.env;
|
|
10
|
+
|
|
11
|
+
import { GoogleGenAI, HarmCategory, HarmBlockThreshold } from '@google/genai';
|
|
12
|
+
import log from './logger.js';
|
|
13
|
+
import { isJSON } from './json-helpers.js';
|
|
14
|
+
|
|
15
|
+
// ── Constants ────────────────────────────────────────────────────────────────
|
|
16
|
+
|
|
17
|
+
const DEFAULT_SAFETY_SETTINGS = [
|
|
18
|
+
{ category: HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: HarmBlockThreshold.BLOCK_NONE },
|
|
19
|
+
{ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_NONE }
|
|
20
|
+
];
|
|
21
|
+
|
|
22
|
+
const DEFAULT_THINKING_CONFIG = {
|
|
23
|
+
thinkingBudget: 0
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
const DEFAULT_MAX_OUTPUT_TOKENS = 50_000;
|
|
27
|
+
|
|
28
|
+
/** Models that support thinking features */
|
|
29
|
+
const THINKING_SUPPORTED_MODELS = [
|
|
30
|
+
/^gemini-3-flash(-preview)?$/,
|
|
31
|
+
/^gemini-3-pro(-preview|-image-preview)?$/,
|
|
32
|
+
/^gemini-2\.5-pro/,
|
|
33
|
+
/^gemini-2\.5-flash(-preview)?$/,
|
|
34
|
+
/^gemini-2\.5-flash-lite(-preview)?$/,
|
|
35
|
+
/^gemini-2\.0-flash$/
|
|
36
|
+
];
|
|
37
|
+
|
|
38
|
+
/** Model pricing per million tokens (as of Dec 2025) */
|
|
39
|
+
const MODEL_PRICING = {
|
|
40
|
+
'gemini-2.5-flash': { input: 0.15, output: 0.60 },
|
|
41
|
+
'gemini-2.5-flash-lite': { input: 0.02, output: 0.10 },
|
|
42
|
+
'gemini-2.5-pro': { input: 2.50, output: 10.00 },
|
|
43
|
+
'gemini-3-pro': { input: 2.00, output: 12.00 },
|
|
44
|
+
'gemini-3-pro-preview': { input: 2.00, output: 12.00 },
|
|
45
|
+
'gemini-2.0-flash': { input: 0.10, output: 0.40 },
|
|
46
|
+
'gemini-2.0-flash-lite': { input: 0.02, output: 0.10 }
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
export { DEFAULT_SAFETY_SETTINGS, DEFAULT_THINKING_CONFIG, THINKING_SUPPORTED_MODELS, MODEL_PRICING, DEFAULT_MAX_OUTPUT_TOKENS };
|
|
50
|
+
|
|
51
|
+
// ── BaseGemini Class ─────────────────────────────────────────────────────────
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* @typedef {import('./types').BaseGeminiOptions} BaseGeminiOptions
|
|
55
|
+
* @typedef {import('./types').UsageData} UsageData
|
|
56
|
+
* @typedef {import('./types').TransformationExample} TransformationExample
|
|
57
|
+
*/
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Base class for all ak-gemini wrappers.
|
|
61
|
+
* Provides shared initialization, authentication, chat session management,
|
|
62
|
+
* token estimation, cost tracking, and usage reporting.
|
|
63
|
+
*
|
|
64
|
+
* Not typically instantiated directly — use Transformer, Chat, Message, ToolAgent, or CodeAgent.
|
|
65
|
+
*/
|
|
66
|
+
class BaseGemini {
|
|
67
|
+
/**
|
|
68
|
+
* @param {BaseGeminiOptions} [options={}]
|
|
69
|
+
*/
|
|
70
|
+
constructor(options = {}) {
|
|
71
|
+
// ── Model ──
|
|
72
|
+
this.modelName = options.modelName || 'gemini-2.5-flash';
|
|
73
|
+
|
|
74
|
+
// ── System Prompt ──
|
|
75
|
+
// Subclasses set their own default if options.systemPrompt is undefined
|
|
76
|
+
if (options.systemPrompt !== undefined) {
|
|
77
|
+
this.systemPrompt = options.systemPrompt;
|
|
78
|
+
} else {
|
|
79
|
+
this.systemPrompt = null; // subclasses override this default
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// ── Auth ──
|
|
83
|
+
this.vertexai = options.vertexai || false;
|
|
84
|
+
this.project = options.project || process.env.GOOGLE_CLOUD_PROJECT || null;
|
|
85
|
+
this.location = options.location || process.env.GOOGLE_CLOUD_LOCATION || undefined;
|
|
86
|
+
this.googleAuthOptions = options.googleAuthOptions || null;
|
|
87
|
+
this.apiKey = options.apiKey !== undefined && options.apiKey !== null ? options.apiKey : process.env.GEMINI_API_KEY;
|
|
88
|
+
|
|
89
|
+
if (!this.vertexai && !this.apiKey) {
|
|
90
|
+
throw new Error("Missing Gemini API key. Provide via options.apiKey or GEMINI_API_KEY env var. For Vertex AI, set vertexai: true with project and location.");
|
|
91
|
+
}
|
|
92
|
+
if (this.vertexai && !this.project) {
|
|
93
|
+
throw new Error("Vertex AI requires a project ID. Provide via options.project or GOOGLE_CLOUD_PROJECT env var.");
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// ── Logging ──
|
|
97
|
+
this._configureLogLevel(options.logLevel);
|
|
98
|
+
|
|
99
|
+
// ── Labels ──
|
|
100
|
+
this.labels = options.labels || {};
|
|
101
|
+
|
|
102
|
+
// ── Chat Config ──
|
|
103
|
+
this.chatConfig = {
|
|
104
|
+
temperature: 0.7,
|
|
105
|
+
topP: 0.95,
|
|
106
|
+
topK: 64,
|
|
107
|
+
safetySettings: DEFAULT_SAFETY_SETTINGS,
|
|
108
|
+
...options.chatConfig
|
|
109
|
+
};
|
|
110
|
+
|
|
111
|
+
// Apply systemPrompt to chatConfig
|
|
112
|
+
if (this.systemPrompt) {
|
|
113
|
+
this.chatConfig.systemInstruction = this.systemPrompt;
|
|
114
|
+
} else if (this.systemPrompt === null && options.systemPrompt === undefined) {
|
|
115
|
+
// Subclass hasn't set a default yet — leave systemInstruction alone
|
|
116
|
+
// (subclass constructor will handle it)
|
|
117
|
+
} else if (options.systemPrompt === null || options.systemPrompt === false) {
|
|
118
|
+
// Explicitly disabled
|
|
119
|
+
delete this.chatConfig.systemInstruction;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// ── Max Output Tokens ──
|
|
123
|
+
if (options.maxOutputTokens !== undefined) {
|
|
124
|
+
if (options.maxOutputTokens === null) {
|
|
125
|
+
delete this.chatConfig.maxOutputTokens;
|
|
126
|
+
} else {
|
|
127
|
+
this.chatConfig.maxOutputTokens = options.maxOutputTokens;
|
|
128
|
+
}
|
|
129
|
+
} else if (options.chatConfig?.maxOutputTokens !== undefined) {
|
|
130
|
+
if (options.chatConfig.maxOutputTokens === null) {
|
|
131
|
+
delete this.chatConfig.maxOutputTokens;
|
|
132
|
+
}
|
|
133
|
+
// else already set via spread above
|
|
134
|
+
} else {
|
|
135
|
+
this.chatConfig.maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// ── Thinking Config ──
|
|
139
|
+
this._configureThinking(options.thinkingConfig);
|
|
140
|
+
|
|
141
|
+
// ── GenAI Client ──
|
|
142
|
+
const clientOptions = this.vertexai
|
|
143
|
+
? {
|
|
144
|
+
vertexai: true,
|
|
145
|
+
project: this.project,
|
|
146
|
+
...(this.location && { location: this.location }),
|
|
147
|
+
...(this.googleAuthOptions && { googleAuthOptions: this.googleAuthOptions })
|
|
148
|
+
}
|
|
149
|
+
: { apiKey: this.apiKey };
|
|
150
|
+
|
|
151
|
+
this.genAIClient = new GoogleGenAI(clientOptions);
|
|
152
|
+
|
|
153
|
+
// ── State ──
|
|
154
|
+
this.chatSession = null;
|
|
155
|
+
this.lastResponseMetadata = null;
|
|
156
|
+
this.exampleCount = 0;
|
|
157
|
+
this._cumulativeUsage = {
|
|
158
|
+
promptTokens: 0,
|
|
159
|
+
responseTokens: 0,
|
|
160
|
+
totalTokens: 0,
|
|
161
|
+
attempts: 0
|
|
162
|
+
};
|
|
163
|
+
|
|
164
|
+
log.debug(`${this.constructor.name} created with model: ${this.modelName}`);
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// ── Initialization ───────────────────────────────────────────────────────
|
|
168
|
+
|
|
169
|
+
/**
|
|
170
|
+
* Initializes the chat session. Idempotent unless force=true.
|
|
171
|
+
* Subclasses can override `_getChatCreateOptions()` to customize.
|
|
172
|
+
* @param {boolean} [force=false]
|
|
173
|
+
* @returns {Promise<void>}
|
|
174
|
+
*/
|
|
175
|
+
async init(force = false) {
|
|
176
|
+
if (this.chatSession && !force) return;
|
|
177
|
+
|
|
178
|
+
log.debug(`Initializing ${this.constructor.name} chat session with model: ${this.modelName}...`);
|
|
179
|
+
|
|
180
|
+
const chatOptions = this._getChatCreateOptions();
|
|
181
|
+
this.chatSession = this.genAIClient.chats.create(chatOptions);
|
|
182
|
+
|
|
183
|
+
try {
|
|
184
|
+
await this.genAIClient.models.list();
|
|
185
|
+
log.debug(`${this.constructor.name}: API connection successful.`);
|
|
186
|
+
} catch (e) {
|
|
187
|
+
throw new Error(`${this.constructor.name} initialization failed: ${e.message}`);
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
log.debug(`${this.constructor.name}: Chat session initialized.`);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
/**
|
|
194
|
+
* Builds the options object for `genAIClient.chats.create()`.
|
|
195
|
+
* Override in subclasses to add tools, grounding, etc.
|
|
196
|
+
* @returns {Object}
|
|
197
|
+
* @protected
|
|
198
|
+
*/
|
|
199
|
+
_getChatCreateOptions() {
|
|
200
|
+
return {
|
|
201
|
+
model: this.modelName,
|
|
202
|
+
config: {
|
|
203
|
+
...this.chatConfig,
|
|
204
|
+
...(this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels })
|
|
205
|
+
},
|
|
206
|
+
history: []
|
|
207
|
+
};
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// ── Chat Session Management ──────────────────────────────────────────────
|
|
211
|
+
|
|
212
|
+
/**
|
|
213
|
+
* Creates a new chat session with the given history.
|
|
214
|
+
* Internal helper used by init, seed, clearHistory, reset.
|
|
215
|
+
* @param {Array} [history=[]]
|
|
216
|
+
* @returns {Object} The new chat session
|
|
217
|
+
* @protected
|
|
218
|
+
*/
|
|
219
|
+
_createChatSession(history = []) {
|
|
220
|
+
const opts = this._getChatCreateOptions();
|
|
221
|
+
opts.history = history;
|
|
222
|
+
return this.genAIClient.chats.create(opts);
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
/**
|
|
226
|
+
* Retrieves the current conversation history.
|
|
227
|
+
* @param {boolean} [curated=false]
|
|
228
|
+
* @returns {Array<Object>}
|
|
229
|
+
*/
|
|
230
|
+
getHistory(curated = false) {
|
|
231
|
+
if (!this.chatSession) {
|
|
232
|
+
log.warn("Chat session not initialized. No history available.");
|
|
233
|
+
return [];
|
|
234
|
+
}
|
|
235
|
+
return this.chatSession.getHistory(curated);
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
/**
|
|
239
|
+
* Clears conversation history. Recreates chat session with empty history.
|
|
240
|
+
* Subclasses may override to preserve seeded examples.
|
|
241
|
+
* @returns {Promise<void>}
|
|
242
|
+
*/
|
|
243
|
+
async clearHistory() {
|
|
244
|
+
if (!this.chatSession) {
|
|
245
|
+
log.warn(`Cannot clear history: chat not initialized.`);
|
|
246
|
+
return;
|
|
247
|
+
}
|
|
248
|
+
this.chatSession = this._createChatSession([]);
|
|
249
|
+
this.lastResponseMetadata = null;
|
|
250
|
+
this._cumulativeUsage = { promptTokens: 0, responseTokens: 0, totalTokens: 0, attempts: 0 };
|
|
251
|
+
log.debug(`${this.constructor.name}: Conversation history cleared.`);
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
// ── Few-Shot Seeding ─────────────────────────────────────────────────────
|
|
255
|
+
|
|
256
|
+
/**
|
|
257
|
+
* Seeds the chat session with example input/output pairs for few-shot learning.
|
|
258
|
+
* @param {TransformationExample[]} examples - Array of example objects
|
|
259
|
+
* @param {Object} [opts={}] - Key configuration
|
|
260
|
+
* @param {string} [opts.promptKey='PROMPT'] - Key for input data in examples
|
|
261
|
+
* @param {string} [opts.answerKey='ANSWER'] - Key for output data in examples
|
|
262
|
+
* @param {string} [opts.contextKey='CONTEXT'] - Key for optional context
|
|
263
|
+
* @param {string} [opts.explanationKey='EXPLANATION'] - Key for optional explanations
|
|
264
|
+
* @param {string} [opts.systemPromptKey='SYSTEM'] - Key for system prompt overrides in examples
|
|
265
|
+
* @returns {Promise<Array>} The updated chat history
|
|
266
|
+
*/
|
|
267
|
+
async seed(examples, opts = {}) {
|
|
268
|
+
await this.init();
|
|
269
|
+
|
|
270
|
+
if (!examples || !Array.isArray(examples) || examples.length === 0) {
|
|
271
|
+
log.debug("No examples provided. Skipping seeding.");
|
|
272
|
+
return this.getHistory();
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
const promptKey = opts.promptKey || 'PROMPT';
|
|
276
|
+
const answerKey = opts.answerKey || 'ANSWER';
|
|
277
|
+
const contextKey = opts.contextKey || 'CONTEXT';
|
|
278
|
+
const explanationKey = opts.explanationKey || 'EXPLANATION';
|
|
279
|
+
const systemPromptKey = opts.systemPromptKey || 'SYSTEM';
|
|
280
|
+
|
|
281
|
+
// Check for system prompt override in examples
|
|
282
|
+
const instructionExample = examples.find(ex => ex[systemPromptKey]);
|
|
283
|
+
if (instructionExample) {
|
|
284
|
+
log.debug(`Found system prompt in examples; reinitializing chat.`);
|
|
285
|
+
this.systemPrompt = instructionExample[systemPromptKey];
|
|
286
|
+
this.chatConfig.systemInstruction = /** @type {string} */ (this.systemPrompt);
|
|
287
|
+
await this.init(true);
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
log.debug(`Seeding chat with ${examples.length} examples...`);
|
|
291
|
+
const historyToAdd = [];
|
|
292
|
+
|
|
293
|
+
for (const example of examples) {
|
|
294
|
+
const contextValue = example[contextKey] || "";
|
|
295
|
+
const promptValue = example[promptKey] || "";
|
|
296
|
+
const answerValue = example[answerKey] || "";
|
|
297
|
+
const explanationValue = example[explanationKey] || "";
|
|
298
|
+
let userText = "";
|
|
299
|
+
let modelResponse = {};
|
|
300
|
+
|
|
301
|
+
if (contextValue) {
|
|
302
|
+
let contextText = isJSON(contextValue) ? JSON.stringify(contextValue, null, 2) : contextValue;
|
|
303
|
+
userText += `CONTEXT:\n${contextText}\n\n`;
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
if (promptValue) {
|
|
307
|
+
let promptText = isJSON(promptValue) ? JSON.stringify(promptValue, null, 2) : promptValue;
|
|
308
|
+
userText += promptText;
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
if (answerValue) modelResponse.data = answerValue;
|
|
312
|
+
if (explanationValue) modelResponse.explanation = explanationValue;
|
|
313
|
+
const modelText = JSON.stringify(modelResponse, null, 2);
|
|
314
|
+
|
|
315
|
+
if (userText.trim().length && modelText.trim().length > 0) {
|
|
316
|
+
historyToAdd.push({ role: 'user', parts: [{ text: userText.trim() }] });
|
|
317
|
+
historyToAdd.push({ role: 'model', parts: [{ text: modelText.trim() }] });
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
const currentHistory = this.chatSession?.getHistory() || [];
|
|
322
|
+
log.debug(`Adding ${historyToAdd.length} items to chat history (${currentHistory.length} existing)...`);
|
|
323
|
+
|
|
324
|
+
this.chatSession = this._createChatSession([...currentHistory, ...historyToAdd]);
|
|
325
|
+
|
|
326
|
+
this.exampleCount = currentHistory.length + historyToAdd.length;
|
|
327
|
+
|
|
328
|
+
const newHistory = this.chatSession.getHistory();
|
|
329
|
+
log.debug(`Chat session now has ${newHistory.length} history items.`);
|
|
330
|
+
return newHistory;
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// ── Response Metadata ────────────────────────────────────────────────────
|
|
334
|
+
|
|
335
|
+
/**
|
|
336
|
+
* Captures response metadata (model version, token counts) from an API response.
|
|
337
|
+
* @param {Object} response - The API response object
|
|
338
|
+
* @protected
|
|
339
|
+
*/
|
|
340
|
+
_captureMetadata(response) {
|
|
341
|
+
this.lastResponseMetadata = {
|
|
342
|
+
modelVersion: response.modelVersion || null,
|
|
343
|
+
requestedModel: this.modelName,
|
|
344
|
+
promptTokens: response.usageMetadata?.promptTokenCount || 0,
|
|
345
|
+
responseTokens: response.usageMetadata?.candidatesTokenCount || 0,
|
|
346
|
+
totalTokens: response.usageMetadata?.totalTokenCount || 0,
|
|
347
|
+
timestamp: Date.now()
|
|
348
|
+
};
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
/**
|
|
352
|
+
* Returns structured usage data from the last API call for billing verification.
|
|
353
|
+
* Includes CUMULATIVE token counts across all retry attempts.
|
|
354
|
+
* @returns {UsageData|null} Usage data or null if no API call has been made.
|
|
355
|
+
*/
|
|
356
|
+
getLastUsage() {
|
|
357
|
+
if (!this.lastResponseMetadata) return null;
|
|
358
|
+
|
|
359
|
+
const meta = this.lastResponseMetadata;
|
|
360
|
+
const cumulative = this._cumulativeUsage || { promptTokens: 0, responseTokens: 0, totalTokens: 0, attempts: 1 };
|
|
361
|
+
const useCumulative = cumulative.attempts > 0;
|
|
362
|
+
|
|
363
|
+
return {
|
|
364
|
+
promptTokens: useCumulative ? cumulative.promptTokens : meta.promptTokens,
|
|
365
|
+
responseTokens: useCumulative ? cumulative.responseTokens : meta.responseTokens,
|
|
366
|
+
totalTokens: useCumulative ? cumulative.totalTokens : meta.totalTokens,
|
|
367
|
+
attempts: useCumulative ? cumulative.attempts : 1,
|
|
368
|
+
modelVersion: meta.modelVersion,
|
|
369
|
+
requestedModel: meta.requestedModel,
|
|
370
|
+
timestamp: meta.timestamp
|
|
371
|
+
};
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
// ── Token Estimation ─────────────────────────────────────────────────────
|
|
375
|
+
|
|
376
|
+
/**
|
|
377
|
+
* Estimates INPUT token count for a payload before sending.
|
|
378
|
+
* Includes system prompt + chat history + your new message.
|
|
379
|
+
* @param {Object|string} nextPayload - The next message to estimate
|
|
380
|
+
* @returns {Promise<{ inputTokens: number }>}
|
|
381
|
+
*/
|
|
382
|
+
async estimate(nextPayload) {
|
|
383
|
+
const contents = [];
|
|
384
|
+
|
|
385
|
+
if (this.systemPrompt) {
|
|
386
|
+
contents.push({ parts: [{ text: this.systemPrompt }] });
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
if (this.chatSession && typeof this.chatSession.getHistory === "function") {
|
|
390
|
+
const history = this.chatSession.getHistory();
|
|
391
|
+
if (Array.isArray(history) && history.length > 0) {
|
|
392
|
+
contents.push(...history);
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
const nextMessage = typeof nextPayload === "string"
|
|
397
|
+
? nextPayload
|
|
398
|
+
: JSON.stringify(nextPayload, null, 2);
|
|
399
|
+
|
|
400
|
+
contents.push({ parts: [{ text: nextMessage }] });
|
|
401
|
+
|
|
402
|
+
const resp = await this.genAIClient.models.countTokens({
|
|
403
|
+
model: this.modelName,
|
|
404
|
+
contents,
|
|
405
|
+
});
|
|
406
|
+
|
|
407
|
+
return { inputTokens: resp.totalTokens };
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
/**
|
|
411
|
+
* Estimates the INPUT cost of sending a payload based on model pricing.
|
|
412
|
+
* @param {Object|string} nextPayload - The next message to estimate
|
|
413
|
+
* @returns {Promise<Object>} Cost estimation
|
|
414
|
+
*/
|
|
415
|
+
async estimateCost(nextPayload) {
|
|
416
|
+
const tokenInfo = await this.estimate(nextPayload);
|
|
417
|
+
const pricing = MODEL_PRICING[this.modelName] || { input: 0, output: 0 };
|
|
418
|
+
|
|
419
|
+
return {
|
|
420
|
+
inputTokens: tokenInfo.inputTokens,
|
|
421
|
+
model: this.modelName,
|
|
422
|
+
pricing: pricing,
|
|
423
|
+
estimatedInputCost: (tokenInfo.inputTokens / 1_000_000) * pricing.input,
|
|
424
|
+
note: 'Cost is for input tokens only; output cost depends on response length'
|
|
425
|
+
};
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
// ── Private Helpers ──────────────────────────────────────────────────────
|
|
429
|
+
|
|
430
|
+
/**
|
|
431
|
+
* Configures the log level based on options, env vars, or NODE_ENV.
|
|
432
|
+
* @param {string} [logLevel]
|
|
433
|
+
* @private
|
|
434
|
+
*/
|
|
435
|
+
_configureLogLevel(logLevel) {
|
|
436
|
+
if (logLevel) {
|
|
437
|
+
if (logLevel === 'none') {
|
|
438
|
+
log.level = 'silent';
|
|
439
|
+
} else {
|
|
440
|
+
log.level = logLevel;
|
|
441
|
+
}
|
|
442
|
+
} else if (LOG_LEVEL) {
|
|
443
|
+
log.level = LOG_LEVEL;
|
|
444
|
+
} else if (NODE_ENV === 'dev') {
|
|
445
|
+
log.level = 'debug';
|
|
446
|
+
} else if (NODE_ENV === 'test') {
|
|
447
|
+
log.level = 'warn';
|
|
448
|
+
} else if (NODE_ENV.startsWith('prod')) {
|
|
449
|
+
log.level = 'error';
|
|
450
|
+
} else {
|
|
451
|
+
log.level = 'info';
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
/**
|
|
456
|
+
* Configures thinking settings based on model support.
|
|
457
|
+
* @param {Object|null|undefined} thinkingConfig
|
|
458
|
+
* @private
|
|
459
|
+
*/
|
|
460
|
+
_configureThinking(thinkingConfig) {
|
|
461
|
+
const modelSupportsThinking = THINKING_SUPPORTED_MODELS.some(p => p.test(this.modelName));
|
|
462
|
+
|
|
463
|
+
if (thinkingConfig === undefined) return;
|
|
464
|
+
|
|
465
|
+
if (thinkingConfig === null) {
|
|
466
|
+
delete this.chatConfig.thinkingConfig;
|
|
467
|
+
log.debug(`thinkingConfig set to null - removed from configuration`);
|
|
468
|
+
return;
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
if (!modelSupportsThinking) {
|
|
472
|
+
log.warn(`Model ${this.modelName} does not support thinking features. Ignoring thinkingConfig.`);
|
|
473
|
+
return;
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
const config = { ...DEFAULT_THINKING_CONFIG, ...thinkingConfig };
|
|
477
|
+
if (thinkingConfig.thinkingLevel !== undefined) {
|
|
478
|
+
delete config.thinkingBudget;
|
|
479
|
+
}
|
|
480
|
+
this.chatConfig.thinkingConfig = config;
|
|
481
|
+
log.debug(`Thinking config applied: ${JSON.stringify(config)}`);
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
export default BaseGemini;
|
package/chat.js
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Chat class — multi-turn text conversation with AI.
|
|
3
|
+
* Extends BaseGemini with simple send/receive text messaging and conversation history.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import BaseGemini from './base.js';
|
|
7
|
+
import log from './logger.js';
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* @typedef {import('./types').ChatOptions} ChatOptions
|
|
11
|
+
* @typedef {import('./types').ChatResponse} ChatResponse
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Multi-turn text conversation with AI.
|
|
16
|
+
* Maintains conversation history for contextual back-and-forth exchanges.
|
|
17
|
+
* Returns plain text responses (not JSON).
|
|
18
|
+
*
|
|
19
|
+
* @example
|
|
20
|
+
* ```javascript
|
|
21
|
+
* import { Chat } from 'ak-gemini';
|
|
22
|
+
*
|
|
23
|
+
* const chat = new Chat({
|
|
24
|
+
* systemPrompt: 'You are a friendly tutor who explains concepts simply.'
|
|
25
|
+
* });
|
|
26
|
+
*
|
|
27
|
+
* await chat.init();
|
|
28
|
+
* const r1 = await chat.send('What is recursion?');
|
|
29
|
+
* console.log(r1.text);
|
|
30
|
+
*
|
|
31
|
+
* const r2 = await chat.send('Can you give me an example in JavaScript?');
|
|
32
|
+
* console.log(r2.text); // Remembers the recursion context
|
|
33
|
+
* ```
|
|
34
|
+
*/
|
|
35
|
+
class Chat extends BaseGemini {
|
|
36
|
+
/**
|
|
37
|
+
* @param {ChatOptions} [options={}]
|
|
38
|
+
*/
|
|
39
|
+
constructor(options = {}) {
|
|
40
|
+
if (options.systemPrompt === undefined) {
|
|
41
|
+
options = { ...options, systemPrompt: 'You are a helpful AI assistant.' };
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
super(options);
|
|
45
|
+
|
|
46
|
+
log.debug(`Chat created with model: ${this.modelName}`);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Send a text message and get a response. Adds to conversation history.
|
|
51
|
+
*
|
|
52
|
+
* @param {string} message - The user's message
|
|
53
|
+
* @param {Object} [opts={}] - Per-message options
|
|
54
|
+
* @param {Record<string, string>} [opts.labels] - Per-message billing labels
|
|
55
|
+
* @returns {Promise<ChatResponse>} Response with text and usage data
|
|
56
|
+
*/
|
|
57
|
+
async send(message, opts = {}) {
|
|
58
|
+
if (!this.chatSession) await this.init();
|
|
59
|
+
|
|
60
|
+
const mergedLabels = { ...this.labels, ...(opts.labels || {}) };
|
|
61
|
+
const hasLabels = this.vertexai && Object.keys(mergedLabels).length > 0;
|
|
62
|
+
|
|
63
|
+
const sendParams = { message };
|
|
64
|
+
if (hasLabels) {
|
|
65
|
+
sendParams.config = { labels: mergedLabels };
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
const result = await this.chatSession.sendMessage(sendParams);
|
|
69
|
+
|
|
70
|
+
this._captureMetadata(result);
|
|
71
|
+
|
|
72
|
+
// Set cumulative usage (single attempt for Chat)
|
|
73
|
+
this._cumulativeUsage = {
|
|
74
|
+
promptTokens: this.lastResponseMetadata.promptTokens,
|
|
75
|
+
responseTokens: this.lastResponseMetadata.responseTokens,
|
|
76
|
+
totalTokens: this.lastResponseMetadata.totalTokens,
|
|
77
|
+
attempts: 1
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
return {
|
|
81
|
+
text: result.text || '',
|
|
82
|
+
usage: this.getLastUsage()
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
export default Chat;
|