ak-gemini 1.0.11 → 1.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +51 -1
- package/index.cjs +98 -1
- package/index.js +152 -1
- package/package.json +1 -1
- package/types.d.ts +42 -8
package/README.md
CHANGED
|
@@ -11,6 +11,8 @@ Use this to power LLM-driven data pipelines, JSON mapping, or any automated AI t
|
|
|
11
11
|
* **Declarative Few-shot Examples:** Seed transformations using example mappings, with support for custom keys (`PROMPT`, `ANSWER`, `CONTEXT`, or your own)
|
|
12
12
|
* **Automatic Validation & Repair:** Validate outputs with your own async function; auto-repair failed payloads with LLM feedback loop (exponential backoff, fully configurable)
|
|
13
13
|
* **Token Counting & Safety:** Preview the *exact* Gemini token consumption for any operation—including all examples, instructions, and your input—before sending, so you can avoid window errors and manage costs.
|
|
14
|
+
* **Conversation Management:** Clear conversation history while preserving examples, or send stateless one-off messages that don't affect history
|
|
15
|
+
* **Response Metadata:** Access actual model version and token counts from API responses for billing verification and debugging
|
|
14
16
|
* **Strong TypeScript/JSDoc Typings:** All public APIs fully typed (see `/types`)
|
|
15
17
|
* **Minimal API Surface:** Dead simple, no ceremony—init, seed, transform, validate.
|
|
16
18
|
* **Robust Logging:** Pluggable logger for all steps, easy debugging
|
|
@@ -106,6 +108,25 @@ console.log(validPayload);
|
|
|
106
108
|
|
|
107
109
|
---
|
|
108
110
|
|
|
111
|
+
### 5. **Conversation Management**
|
|
112
|
+
|
|
113
|
+
Manage chat history to control costs and isolate requests:
|
|
114
|
+
|
|
115
|
+
```js
|
|
116
|
+
// Clear conversation history while preserving seeded examples
|
|
117
|
+
await transformer.clearConversation();
|
|
118
|
+
|
|
119
|
+
// Send a stateless message that doesn't affect chat history
|
|
120
|
+
const result = await transformer.message({ query: "one-off question" }, { stateless: true });
|
|
121
|
+
|
|
122
|
+
// Check actual model and token usage from last API call
|
|
123
|
+
console.log(transformer.lastResponseMetadata);
|
|
124
|
+
// → { modelVersion: 'gemini-2.5-flash-001', requestedModel: 'gemini-2.5-flash',
|
|
125
|
+
// promptTokens: 150, responseTokens: 42, totalTokens: 192, timestamp: 1703... }
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
---
|
|
129
|
+
|
|
109
130
|
## API
|
|
110
131
|
|
|
111
132
|
### Constructor
|
|
@@ -142,10 +163,14 @@ Initializes Gemini chat session (idempotent).
|
|
|
142
163
|
Seeds the model with example transformations (uses keys from constructor).
|
|
143
164
|
You can omit `examples` to use the `examplesFile` (if provided).
|
|
144
165
|
|
|
145
|
-
#### `await transformer.message(sourcePayload)`
|
|
166
|
+
#### `await transformer.message(sourcePayload, options?)`
|
|
146
167
|
|
|
147
168
|
Transforms input JSON to output JSON using the seeded examples and system instructions. Throws if estimated token window would be exceeded.
|
|
148
169
|
|
|
170
|
+
**Options:**
|
|
171
|
+
- `stateless: true` — Send a one-off message without affecting chat history (uses `generateContent` instead of chat)
|
|
172
|
+
- `labels: {}` — Per-message billing labels
|
|
173
|
+
|
|
149
174
|
#### `await transformer.estimateTokenUsage(sourcePayload)`
|
|
150
175
|
|
|
151
176
|
Returns `{ totalTokens, breakdown }` for the *full request* that would be sent to Gemini (system instructions + all examples + your sourcePayload as the new prompt).
|
|
@@ -168,6 +193,31 @@ Resets the Gemini chat session, clearing all history/examples.
|
|
|
168
193
|
|
|
169
194
|
Returns the current chat history (for debugging).
|
|
170
195
|
|
|
196
|
+
#### `await transformer.clearConversation()`
|
|
197
|
+
|
|
198
|
+
Clears conversation history while preserving seeded examples. Useful for starting fresh user sessions without re-seeding.
|
|
199
|
+
|
|
200
|
+
---
|
|
201
|
+
|
|
202
|
+
### Properties
|
|
203
|
+
|
|
204
|
+
#### `transformer.lastResponseMetadata`
|
|
205
|
+
|
|
206
|
+
After each API call, contains metadata from the response:
|
|
207
|
+
|
|
208
|
+
```js
|
|
209
|
+
{
|
|
210
|
+
modelVersion: string | null, // Actual model version that responded (e.g., 'gemini-2.5-flash-001')
|
|
211
|
+
requestedModel: string, // Model you requested (e.g., 'gemini-2.5-flash')
|
|
212
|
+
promptTokens: number, // Tokens in the prompt
|
|
213
|
+
responseTokens: number, // Tokens in the response
|
|
214
|
+
totalTokens: number, // Total tokens used
|
|
215
|
+
timestamp: number // When response was received
|
|
216
|
+
}
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
Useful for verifying billing, debugging model behavior, and tracking token usage.
|
|
220
|
+
|
|
171
221
|
---
|
|
172
222
|
|
|
173
223
|
## Examples
|
package/index.cjs
CHANGED
|
@@ -82,7 +82,7 @@ var DEFAULT_THINKING_CONFIG = {
|
|
|
82
82
|
thinkingBudget: 0,
|
|
83
83
|
thinkingLevel: import_genai.ThinkingLevel.MINIMAL
|
|
84
84
|
};
|
|
85
|
-
var DEFAULT_MAX_OUTPUT_TOKENS =
|
|
85
|
+
var DEFAULT_MAX_OUTPUT_TOKENS = 5e4;
|
|
86
86
|
var THINKING_SUPPORTED_MODELS = [
|
|
87
87
|
/^gemini-3-flash(-preview)?$/,
|
|
88
88
|
/^gemini-3-pro(-preview|-image-preview)?$/,
|
|
@@ -120,6 +120,8 @@ var AITransformer = class {
|
|
|
120
120
|
this.onlyJSON = true;
|
|
121
121
|
this.asyncValidator = null;
|
|
122
122
|
this.logLevel = "info";
|
|
123
|
+
this.lastResponseMetadata = null;
|
|
124
|
+
this.exampleCount = 0;
|
|
123
125
|
AITransformFactory.call(this, options);
|
|
124
126
|
this.init = initChat.bind(this);
|
|
125
127
|
this.seed = seedWithExamples.bind(this);
|
|
@@ -136,6 +138,7 @@ var AITransformer = class {
|
|
|
136
138
|
this.estimateTokenUsage = estimateTokenUsage.bind(this);
|
|
137
139
|
this.updateSystemInstructions = updateSystemInstructions.bind(this);
|
|
138
140
|
this.estimateCost = estimateCost.bind(this);
|
|
141
|
+
this.clearConversation = clearConversation.bind(this);
|
|
139
142
|
}
|
|
140
143
|
};
|
|
141
144
|
var index_default = AITransformer;
|
|
@@ -366,6 +369,7 @@ ${contextText}
|
|
|
366
369
|
},
|
|
367
370
|
history: [...currentHistory, ...historyToAdd]
|
|
368
371
|
});
|
|
372
|
+
this.exampleCount = currentHistory.length + historyToAdd.length;
|
|
369
373
|
const newHistory = this.chat.getHistory();
|
|
370
374
|
logger_default.debug(`Created new chat session with ${newHistory.length} examples.`);
|
|
371
375
|
return newHistory;
|
|
@@ -383,6 +387,23 @@ async function rawMessage(sourcePayload, messageOptions = {}) {
|
|
|
383
387
|
sendParams.config = { labels: mergedLabels };
|
|
384
388
|
}
|
|
385
389
|
const result = await this.chat.sendMessage(sendParams);
|
|
390
|
+
this.lastResponseMetadata = {
|
|
391
|
+
modelVersion: result.modelVersion || null,
|
|
392
|
+
requestedModel: this.modelName,
|
|
393
|
+
promptTokens: result.usageMetadata?.promptTokenCount || 0,
|
|
394
|
+
responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
|
|
395
|
+
totalTokens: result.usageMetadata?.totalTokenCount || 0,
|
|
396
|
+
timestamp: Date.now()
|
|
397
|
+
};
|
|
398
|
+
if (result.usageMetadata && logger_default.level !== "silent") {
|
|
399
|
+
logger_default.debug(`API response metadata:`, {
|
|
400
|
+
modelVersion: result.modelVersion || "not-provided",
|
|
401
|
+
requestedModel: this.modelName,
|
|
402
|
+
promptTokens: result.usageMetadata.promptTokenCount,
|
|
403
|
+
responseTokens: result.usageMetadata.candidatesTokenCount,
|
|
404
|
+
totalTokens: result.usageMetadata.totalTokenCount
|
|
405
|
+
});
|
|
406
|
+
}
|
|
386
407
|
const modelResponse = result.text;
|
|
387
408
|
const extractedJSON = extractJSON(modelResponse);
|
|
388
409
|
if (extractedJSON?.data) {
|
|
@@ -400,6 +421,9 @@ async function prepareAndValidateMessage(sourcePayload, options = {}, validatorF
|
|
|
400
421
|
if (!this.chat) {
|
|
401
422
|
throw new Error("Chat session not initialized. Please call init() first.");
|
|
402
423
|
}
|
|
424
|
+
if (options.stateless) {
|
|
425
|
+
return await statelessMessage.call(this, sourcePayload, options, validatorFn);
|
|
426
|
+
}
|
|
403
427
|
const maxRetries = options.maxRetries ?? this.maxRetries;
|
|
404
428
|
const retryDelay = options.retryDelay ?? this.retryDelay;
|
|
405
429
|
const enableGroundingForMessage = options.enableGrounding ?? this.enableGrounding;
|
|
@@ -491,6 +515,17 @@ Respond with JSON only \u2013 no comments or explanations.
|
|
|
491
515
|
let result;
|
|
492
516
|
try {
|
|
493
517
|
result = await this.chat.sendMessage({ message: prompt });
|
|
518
|
+
this.lastResponseMetadata = {
|
|
519
|
+
modelVersion: result.modelVersion || null,
|
|
520
|
+
requestedModel: this.modelName,
|
|
521
|
+
promptTokens: result.usageMetadata?.promptTokenCount || 0,
|
|
522
|
+
responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
|
|
523
|
+
totalTokens: result.usageMetadata?.totalTokenCount || 0,
|
|
524
|
+
timestamp: Date.now()
|
|
525
|
+
};
|
|
526
|
+
if (result.usageMetadata && logger_default.level !== "silent") {
|
|
527
|
+
logger_default.debug(`Rebuild response metadata - tokens used:`, result.usageMetadata.totalTokenCount);
|
|
528
|
+
}
|
|
494
529
|
} catch (err) {
|
|
495
530
|
throw new Error(`Gemini call failed while repairing payload: ${err.message}`);
|
|
496
531
|
}
|
|
@@ -580,6 +615,68 @@ async function updateSystemInstructions(newInstructions) {
|
|
|
580
615
|
logger_default.debug("Updating system instructions and reinitializing chat...");
|
|
581
616
|
await this.init(true);
|
|
582
617
|
}
|
|
618
|
+
async function clearConversation() {
|
|
619
|
+
if (!this.chat) {
|
|
620
|
+
logger_default.warn("Cannot clear conversation: chat not initialized.");
|
|
621
|
+
return;
|
|
622
|
+
}
|
|
623
|
+
const history = this.chat.getHistory();
|
|
624
|
+
const exampleHistory = history.slice(0, this.exampleCount || 0);
|
|
625
|
+
this.chat = await this.genAIClient.chats.create({
|
|
626
|
+
model: this.modelName,
|
|
627
|
+
// @ts-ignore
|
|
628
|
+
config: {
|
|
629
|
+
...this.chatConfig,
|
|
630
|
+
...Object.keys(this.labels).length > 0 && { labels: this.labels }
|
|
631
|
+
},
|
|
632
|
+
history: exampleHistory
|
|
633
|
+
});
|
|
634
|
+
logger_default.debug(`Conversation cleared. Preserved ${exampleHistory.length} example items.`);
|
|
635
|
+
}
|
|
636
|
+
async function statelessMessage(sourcePayload, options = {}, validatorFn = null) {
|
|
637
|
+
if (!this.chat) {
|
|
638
|
+
throw new Error("Chat session not initialized. Please call init() first.");
|
|
639
|
+
}
|
|
640
|
+
const payloadStr = typeof sourcePayload === "string" ? sourcePayload : JSON.stringify(sourcePayload, null, 2);
|
|
641
|
+
const contents = [];
|
|
642
|
+
if (this.exampleCount > 0) {
|
|
643
|
+
const history = this.chat.getHistory();
|
|
644
|
+
const exampleHistory = history.slice(0, this.exampleCount);
|
|
645
|
+
contents.push(...exampleHistory);
|
|
646
|
+
}
|
|
647
|
+
contents.push({ role: "user", parts: [{ text: payloadStr }] });
|
|
648
|
+
const mergedLabels = { ...this.labels, ...options.labels || {} };
|
|
649
|
+
const result = await this.genAIClient.models.generateContent({
|
|
650
|
+
model: this.modelName,
|
|
651
|
+
contents,
|
|
652
|
+
config: {
|
|
653
|
+
...this.chatConfig,
|
|
654
|
+
...Object.keys(mergedLabels).length > 0 && { labels: mergedLabels }
|
|
655
|
+
}
|
|
656
|
+
});
|
|
657
|
+
this.lastResponseMetadata = {
|
|
658
|
+
modelVersion: result.modelVersion || null,
|
|
659
|
+
requestedModel: this.modelName,
|
|
660
|
+
promptTokens: result.usageMetadata?.promptTokenCount || 0,
|
|
661
|
+
responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
|
|
662
|
+
totalTokens: result.usageMetadata?.totalTokenCount || 0,
|
|
663
|
+
timestamp: Date.now()
|
|
664
|
+
};
|
|
665
|
+
if (result.usageMetadata && logger_default.level !== "silent") {
|
|
666
|
+
logger_default.debug(`Stateless message metadata:`, {
|
|
667
|
+
modelVersion: result.modelVersion || "not-provided",
|
|
668
|
+
promptTokens: result.usageMetadata.promptTokenCount,
|
|
669
|
+
responseTokens: result.usageMetadata.candidatesTokenCount
|
|
670
|
+
});
|
|
671
|
+
}
|
|
672
|
+
const modelResponse = result.text;
|
|
673
|
+
const extractedJSON = extractJSON(modelResponse);
|
|
674
|
+
let transformedPayload = extractedJSON?.data ? extractedJSON.data : extractedJSON;
|
|
675
|
+
if (validatorFn) {
|
|
676
|
+
await validatorFn(transformedPayload);
|
|
677
|
+
}
|
|
678
|
+
return transformedPayload;
|
|
679
|
+
}
|
|
583
680
|
function attemptJSONRecovery(text, maxAttempts = 100) {
|
|
584
681
|
if (!text || typeof text !== "string") return null;
|
|
585
682
|
try {
|
package/index.js
CHANGED
|
@@ -57,7 +57,7 @@ const DEFAULT_THINKING_CONFIG = {
|
|
|
57
57
|
thinkingLevel: ThinkingLevel.MINIMAL
|
|
58
58
|
};
|
|
59
59
|
|
|
60
|
-
const DEFAULT_MAX_OUTPUT_TOKENS =
|
|
60
|
+
const DEFAULT_MAX_OUTPUT_TOKENS = 50_000; // Default ceiling for output tokens
|
|
61
61
|
|
|
62
62
|
// Models that support thinking features (as of Dec 2024)
|
|
63
63
|
// Using regex patterns for more precise matching
|
|
@@ -112,6 +112,8 @@ class AITransformer {
|
|
|
112
112
|
this.onlyJSON = true; // always return JSON
|
|
113
113
|
this.asyncValidator = null; // for transformWithValidation
|
|
114
114
|
this.logLevel = 'info'; // default log level
|
|
115
|
+
this.lastResponseMetadata = null; // stores metadata from last API response
|
|
116
|
+
this.exampleCount = 0; // tracks number of example history items from seed()
|
|
115
117
|
AITransformFactory.call(this, options);
|
|
116
118
|
|
|
117
119
|
//external API
|
|
@@ -136,6 +138,7 @@ class AITransformer {
|
|
|
136
138
|
this.estimateTokenUsage = estimateTokenUsage.bind(this);
|
|
137
139
|
this.updateSystemInstructions = updateSystemInstructions.bind(this);
|
|
138
140
|
this.estimateCost = estimateCost.bind(this);
|
|
141
|
+
this.clearConversation = clearConversation.bind(this);
|
|
139
142
|
}
|
|
140
143
|
}
|
|
141
144
|
|
|
@@ -467,6 +470,8 @@ async function seedWithExamples(examples) {
|
|
|
467
470
|
history: [...currentHistory, ...historyToAdd],
|
|
468
471
|
});
|
|
469
472
|
|
|
473
|
+
// Track example count for clearConversation() and stateless messages
|
|
474
|
+
this.exampleCount = currentHistory.length + historyToAdd.length;
|
|
470
475
|
|
|
471
476
|
const newHistory = this.chat.getHistory();
|
|
472
477
|
log.debug(`Created new chat session with ${newHistory.length} examples.`);
|
|
@@ -509,6 +514,27 @@ async function rawMessage(sourcePayload, messageOptions = {}) {
|
|
|
509
514
|
}
|
|
510
515
|
|
|
511
516
|
const result = await this.chat.sendMessage(sendParams);
|
|
517
|
+
|
|
518
|
+
// Capture and log response metadata for model verification and debugging
|
|
519
|
+
this.lastResponseMetadata = {
|
|
520
|
+
modelVersion: result.modelVersion || null,
|
|
521
|
+
requestedModel: this.modelName,
|
|
522
|
+
promptTokens: result.usageMetadata?.promptTokenCount || 0,
|
|
523
|
+
responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
|
|
524
|
+
totalTokens: result.usageMetadata?.totalTokenCount || 0,
|
|
525
|
+
timestamp: Date.now()
|
|
526
|
+
};
|
|
527
|
+
|
|
528
|
+
if (result.usageMetadata && log.level !== 'silent') {
|
|
529
|
+
log.debug(`API response metadata:`, {
|
|
530
|
+
modelVersion: result.modelVersion || 'not-provided',
|
|
531
|
+
requestedModel: this.modelName,
|
|
532
|
+
promptTokens: result.usageMetadata.promptTokenCount,
|
|
533
|
+
responseTokens: result.usageMetadata.candidatesTokenCount,
|
|
534
|
+
totalTokens: result.usageMetadata.totalTokenCount
|
|
535
|
+
});
|
|
536
|
+
}
|
|
537
|
+
|
|
512
538
|
const modelResponse = result.text;
|
|
513
539
|
const extractedJSON = extractJSON(modelResponse); // Assuming extractJSON is defined
|
|
514
540
|
|
|
@@ -539,6 +565,12 @@ async function prepareAndValidateMessage(sourcePayload, options = {}, validatorF
|
|
|
539
565
|
if (!this.chat) {
|
|
540
566
|
throw new Error("Chat session not initialized. Please call init() first.");
|
|
541
567
|
}
|
|
568
|
+
|
|
569
|
+
// Handle stateless messages separately - they don't add to chat history
|
|
570
|
+
if (options.stateless) {
|
|
571
|
+
return await statelessMessage.call(this, sourcePayload, options, validatorFn);
|
|
572
|
+
}
|
|
573
|
+
|
|
542
574
|
const maxRetries = options.maxRetries ?? this.maxRetries;
|
|
543
575
|
const retryDelay = options.retryDelay ?? this.retryDelay;
|
|
544
576
|
|
|
@@ -656,6 +688,7 @@ async function prepareAndValidateMessage(sourcePayload, options = {}, validatorF
|
|
|
656
688
|
|
|
657
689
|
/**
|
|
658
690
|
* Rebuilds a payload based on server error feedback
|
|
691
|
+
* @this {ExportedAPI}
|
|
659
692
|
* @param {Object} lastPayload - The payload that failed validation
|
|
660
693
|
* @param {string} serverError - The error message from the server
|
|
661
694
|
* @returns {Promise<Object>} - A new corrected payload
|
|
@@ -681,6 +714,20 @@ Respond with JSON only – no comments or explanations.
|
|
|
681
714
|
let result;
|
|
682
715
|
try {
|
|
683
716
|
result = await this.chat.sendMessage({ message: prompt });
|
|
717
|
+
|
|
718
|
+
// Capture and log response metadata for rebuild calls too
|
|
719
|
+
this.lastResponseMetadata = {
|
|
720
|
+
modelVersion: result.modelVersion || null,
|
|
721
|
+
requestedModel: this.modelName,
|
|
722
|
+
promptTokens: result.usageMetadata?.promptTokenCount || 0,
|
|
723
|
+
responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
|
|
724
|
+
totalTokens: result.usageMetadata?.totalTokenCount || 0,
|
|
725
|
+
timestamp: Date.now()
|
|
726
|
+
};
|
|
727
|
+
|
|
728
|
+
if (result.usageMetadata && log.level !== 'silent') {
|
|
729
|
+
log.debug(`Rebuild response metadata - tokens used:`, result.usageMetadata.totalTokenCount);
|
|
730
|
+
}
|
|
684
731
|
} catch (err) {
|
|
685
732
|
throw new Error(`Gemini call failed while repairing payload: ${err.message}`);
|
|
686
733
|
}
|
|
@@ -833,6 +880,110 @@ async function updateSystemInstructions(newInstructions) {
|
|
|
833
880
|
await this.init(true); // Force reinitialize with new instructions
|
|
834
881
|
}
|
|
835
882
|
|
|
883
|
+
/**
|
|
884
|
+
* Clears conversation history while preserving seeded examples.
|
|
885
|
+
* Useful for starting a fresh conversation within the same session
|
|
886
|
+
* without losing the few-shot learning examples.
|
|
887
|
+
* @this {ExportedAPI}
|
|
888
|
+
* @returns {Promise<void>}
|
|
889
|
+
*/
|
|
890
|
+
async function clearConversation() {
|
|
891
|
+
if (!this.chat) {
|
|
892
|
+
log.warn("Cannot clear conversation: chat not initialized.");
|
|
893
|
+
return;
|
|
894
|
+
}
|
|
895
|
+
|
|
896
|
+
const history = this.chat.getHistory();
|
|
897
|
+
const exampleHistory = history.slice(0, this.exampleCount || 0);
|
|
898
|
+
|
|
899
|
+
this.chat = await this.genAIClient.chats.create({
|
|
900
|
+
model: this.modelName,
|
|
901
|
+
// @ts-ignore
|
|
902
|
+
config: {
|
|
903
|
+
...this.chatConfig,
|
|
904
|
+
...(Object.keys(this.labels).length > 0 && { labels: this.labels })
|
|
905
|
+
},
|
|
906
|
+
history: exampleHistory,
|
|
907
|
+
});
|
|
908
|
+
|
|
909
|
+
log.debug(`Conversation cleared. Preserved ${exampleHistory.length} example items.`);
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
/**
|
|
913
|
+
* Sends a one-off message using generateContent (not chat).
|
|
914
|
+
* Does NOT affect chat history - useful for isolated requests.
|
|
915
|
+
* @this {ExportedAPI}
|
|
916
|
+
* @param {Object|string} sourcePayload - The source payload.
|
|
917
|
+
* @param {Object} [options] - Options including labels.
|
|
918
|
+
* @param {AsyncValidatorFunction|null} [validatorFn] - Optional validator.
|
|
919
|
+
* @returns {Promise<Object>} - The transformed payload.
|
|
920
|
+
*/
|
|
921
|
+
async function statelessMessage(sourcePayload, options = {}, validatorFn = null) {
|
|
922
|
+
if (!this.chat) {
|
|
923
|
+
throw new Error("Chat session not initialized. Please call init() first.");
|
|
924
|
+
}
|
|
925
|
+
|
|
926
|
+
const payloadStr = typeof sourcePayload === 'string'
|
|
927
|
+
? sourcePayload
|
|
928
|
+
: JSON.stringify(sourcePayload, null, 2);
|
|
929
|
+
|
|
930
|
+
// Build contents including examples from current chat history
|
|
931
|
+
const contents = [];
|
|
932
|
+
|
|
933
|
+
// Include seeded examples if we have them
|
|
934
|
+
if (this.exampleCount > 0) {
|
|
935
|
+
const history = this.chat.getHistory();
|
|
936
|
+
const exampleHistory = history.slice(0, this.exampleCount);
|
|
937
|
+
contents.push(...exampleHistory);
|
|
938
|
+
}
|
|
939
|
+
|
|
940
|
+
// Add the user message
|
|
941
|
+
contents.push({ role: 'user', parts: [{ text: payloadStr }] });
|
|
942
|
+
|
|
943
|
+
// Merge labels
|
|
944
|
+
const mergedLabels = { ...this.labels, ...(options.labels || {}) };
|
|
945
|
+
|
|
946
|
+
// Use generateContent instead of chat.sendMessage
|
|
947
|
+
const result = await this.genAIClient.models.generateContent({
|
|
948
|
+
model: this.modelName,
|
|
949
|
+
contents: contents,
|
|
950
|
+
config: {
|
|
951
|
+
...this.chatConfig,
|
|
952
|
+
...(Object.keys(mergedLabels).length > 0 && { labels: mergedLabels })
|
|
953
|
+
}
|
|
954
|
+
});
|
|
955
|
+
|
|
956
|
+
// Capture and log response metadata
|
|
957
|
+
this.lastResponseMetadata = {
|
|
958
|
+
modelVersion: result.modelVersion || null,
|
|
959
|
+
requestedModel: this.modelName,
|
|
960
|
+
promptTokens: result.usageMetadata?.promptTokenCount || 0,
|
|
961
|
+
responseTokens: result.usageMetadata?.candidatesTokenCount || 0,
|
|
962
|
+
totalTokens: result.usageMetadata?.totalTokenCount || 0,
|
|
963
|
+
timestamp: Date.now()
|
|
964
|
+
};
|
|
965
|
+
|
|
966
|
+
if (result.usageMetadata && log.level !== 'silent') {
|
|
967
|
+
log.debug(`Stateless message metadata:`, {
|
|
968
|
+
modelVersion: result.modelVersion || 'not-provided',
|
|
969
|
+
promptTokens: result.usageMetadata.promptTokenCount,
|
|
970
|
+
responseTokens: result.usageMetadata.candidatesTokenCount
|
|
971
|
+
});
|
|
972
|
+
}
|
|
973
|
+
|
|
974
|
+
const modelResponse = result.text;
|
|
975
|
+
const extractedJSON = extractJSON(modelResponse);
|
|
976
|
+
|
|
977
|
+
let transformedPayload = extractedJSON?.data ? extractedJSON.data : extractedJSON;
|
|
978
|
+
|
|
979
|
+
// Validate if a validator is provided
|
|
980
|
+
if (validatorFn) {
|
|
981
|
+
await validatorFn(transformedPayload);
|
|
982
|
+
}
|
|
983
|
+
|
|
984
|
+
return transformedPayload;
|
|
985
|
+
}
|
|
986
|
+
|
|
836
987
|
|
|
837
988
|
/*
|
|
838
989
|
----
|
package/package.json
CHANGED
package/types.d.ts
CHANGED
|
@@ -31,6 +31,26 @@ export interface ChatConfig {
|
|
|
31
31
|
[key: string]: any; // Additional properties for flexibility
|
|
32
32
|
}
|
|
33
33
|
|
|
34
|
+
/** Metadata from the last API response, useful for debugging and cost tracking */
|
|
35
|
+
export interface ResponseMetadata {
|
|
36
|
+
modelVersion: string | null; // The actual model version that responded
|
|
37
|
+
requestedModel: string; // The model that was requested
|
|
38
|
+
promptTokens: number; // Number of tokens in the prompt
|
|
39
|
+
responseTokens: number; // Number of tokens in the response
|
|
40
|
+
totalTokens: number; // Total tokens used
|
|
41
|
+
timestamp: number; // Timestamp of when the response was received
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/** Options for per-message configuration */
|
|
45
|
+
export interface MessageOptions {
|
|
46
|
+
labels?: Record<string, string>; // Per-message billing labels
|
|
47
|
+
stateless?: boolean; // If true, send message without affecting chat history
|
|
48
|
+
maxRetries?: number; // Override max retries for this message
|
|
49
|
+
retryDelay?: number; // Override retry delay for this message
|
|
50
|
+
enableGrounding?: boolean; // Override grounding setting for this message
|
|
51
|
+
groundingConfig?: Record<string, any>; // Override grounding config for this message
|
|
52
|
+
}
|
|
53
|
+
|
|
34
54
|
export interface AITransformerContext {
|
|
35
55
|
modelName?: string;
|
|
36
56
|
systemInstructions?: string;
|
|
@@ -47,8 +67,8 @@ export interface AITransformerContext {
|
|
|
47
67
|
maxRetries?: number;
|
|
48
68
|
retryDelay?: number;
|
|
49
69
|
init?: (force?: boolean) => Promise<void>; // Initialization function
|
|
50
|
-
seed?: () => Promise<void>; // Function to seed the transformer with examples
|
|
51
|
-
message?: (payload: Record<string, unknown
|
|
70
|
+
seed?: () => Promise<void>; // Function to seed the transformer with examples
|
|
71
|
+
message?: (payload: Record<string, unknown>, opts?: MessageOptions, validatorFn?: AsyncValidatorFunction | null) => Promise<Record<string, unknown>>; // Function to send messages to the model
|
|
52
72
|
rebuild?: (lastPayload: Record<string, unknown>, serverError: string) => Promise<Record<string, unknown>>; // Function to rebuild the transformer
|
|
53
73
|
rawMessage?: (payload: Record<string, unknown> | string, messageOptions?: { labels?: Record<string, string> }) => Promise<Record<string, unknown>>; // Function to send raw messages to the model
|
|
54
74
|
genAIClient?: GoogleGenAI; // Google GenAI client instance
|
|
@@ -57,7 +77,9 @@ export interface AITransformerContext {
|
|
|
57
77
|
groundingConfig?: Record<string, any>; // Additional grounding configuration options
|
|
58
78
|
labels?: Record<string, string>; // Custom labels for billing segmentation (keys: 1-63 chars lowercase, values: max 63 chars)
|
|
59
79
|
estimateTokenUsage?: (nextPayload: Record<string, unknown> | string) => Promise<{ totalTokens: number; breakdown?: any }>;
|
|
60
|
-
|
|
80
|
+
lastResponseMetadata?: ResponseMetadata | null; // Metadata from the last API response
|
|
81
|
+
exampleCount?: number; // Number of example history items from seed()
|
|
82
|
+
clearConversation?: () => Promise<void>; // Clears conversation history while preserving examples
|
|
61
83
|
}
|
|
62
84
|
|
|
63
85
|
export interface TransformationExample {
|
|
@@ -92,7 +114,7 @@ export interface AITransformerOptions {
|
|
|
92
114
|
systemInstructions?: string; // Custom system instructions for the model
|
|
93
115
|
chatConfig?: ChatConfig; // Configuration object for the chat session
|
|
94
116
|
thinkingConfig?: ThinkingConfig; // Thinking features configuration (defaults to thinkingBudget: 0, thinkingLevel: "MINIMAL")
|
|
95
|
-
maxOutputTokens?: number; // Maximum number of tokens that can be generated in the response (defaults to
|
|
117
|
+
maxOutputTokens?: number; // Maximum number of tokens that can be generated in the response (defaults to 50000)
|
|
96
118
|
examplesFile?: string; // Path to JSON file containing transformation examples
|
|
97
119
|
exampleData?: TransformationExample[]; // Inline examples to seed the transformer
|
|
98
120
|
sourceKey?: string; // Key name for source data in examples (alias for promptKey)
|
|
@@ -150,14 +172,24 @@ export declare class AITransformer {
|
|
|
150
172
|
enableGrounding: boolean;
|
|
151
173
|
groundingConfig: Record<string, any>;
|
|
152
174
|
labels: Record<string, string>;
|
|
153
|
-
|
|
175
|
+
/** Metadata from the last API response (model version, token counts, etc.) */
|
|
176
|
+
lastResponseMetadata: ResponseMetadata | null;
|
|
177
|
+
/** Number of history items that are seeded examples (used by clearConversation) */
|
|
178
|
+
exampleCount: number;
|
|
179
|
+
|
|
154
180
|
// Methods
|
|
155
181
|
init(force?: boolean): Promise<void>;
|
|
156
182
|
seed(examples?: TransformationExample[]): Promise<any>;
|
|
157
|
-
|
|
183
|
+
/**
|
|
184
|
+
* Send a message to the model.
|
|
185
|
+
* @param payload - The payload to transform
|
|
186
|
+
* @param opts - Options including { stateless: true } to send without affecting history
|
|
187
|
+
* @param validatorFn - Optional validator function
|
|
188
|
+
*/
|
|
189
|
+
message(payload: Record<string, unknown>, opts?: MessageOptions, validatorFn?: AsyncValidatorFunction | null): Promise<Record<string, unknown>>;
|
|
158
190
|
rawMessage(sourcePayload: Record<string, unknown> | string, messageOptions?: { labels?: Record<string, string> }): Promise<Record<string, unknown> | any>;
|
|
159
|
-
transformWithValidation(sourcePayload: Record<string, unknown>, validatorFn: AsyncValidatorFunction, options?:
|
|
160
|
-
messageAndValidate(sourcePayload: Record<string, unknown>, validatorFn: AsyncValidatorFunction, options?:
|
|
191
|
+
transformWithValidation(sourcePayload: Record<string, unknown>, validatorFn: AsyncValidatorFunction, options?: MessageOptions): Promise<Record<string, unknown>>;
|
|
192
|
+
messageAndValidate(sourcePayload: Record<string, unknown>, validatorFn: AsyncValidatorFunction, options?: MessageOptions): Promise<Record<string, unknown>>;
|
|
161
193
|
rebuild(lastPayload: Record<string, unknown>, serverError: string): Promise<Record<string, unknown>>;
|
|
162
194
|
reset(): Promise<void>;
|
|
163
195
|
getHistory(): Array<any>;
|
|
@@ -171,6 +203,8 @@ export declare class AITransformer {
|
|
|
171
203
|
estimatedInputCost: number;
|
|
172
204
|
note: string;
|
|
173
205
|
}>;
|
|
206
|
+
/** Clears conversation history while preserving seeded examples */
|
|
207
|
+
clearConversation(): Promise<void>;
|
|
174
208
|
}
|
|
175
209
|
|
|
176
210
|
// Default export
|