ak-gemini 2.0.2 → 2.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/GUIDE.md CHANGED
@@ -22,12 +22,15 @@ npm install ak-gemini
22
22
  7. [ToolAgent — Agent with Custom Tools](#toolagent--agent-with-custom-tools)
23
23
  8. [CodeAgent — Agent That Writes and Runs Code](#codeagent--agent-that-writes-and-runs-code)
24
24
  9. [RagAgent — Document & Data Q&A](#ragagent--document--data-qa)
25
- 10. [Observability & Usage Tracking](#observability--usage-tracking)
26
- 11. [Thinking Configuration](#thinking-configuration)
27
- 12. [Error Handling & Retries](#error-handling--retries)
28
- 13. [Performance Tips](#performance-tips)
29
- 14. [Common Integration Patterns](#common-integration-patterns)
30
- 15. [Quick Reference](#quick-reference)
25
+ 10. [Embedding Vector Embeddings](#embedding--vector-embeddings)
26
+ 11. [Google Search Grounding](#google-search-grounding)
27
+ 12. [Context Caching](#context-caching)
28
+ 13. [Observability & Usage Tracking](#observability--usage-tracking)
29
+ 14. [Thinking Configuration](#thinking-configuration)
30
+ 15. [Error Handling & Retries](#error-handling--retries)
31
+ 16. [Performance Tips](#performance-tips)
32
+ 17. [Common Integration Patterns](#common-integration-patterns)
33
+ 18. [Quick Reference](#quick-reference)
31
34
 
32
35
  ---
33
36
 
@@ -96,6 +99,7 @@ Vertex AI uses Application Default Credentials. Run `gcloud auth application-def
96
99
  | Give the AI tools to call (APIs, DB, etc.) | `ToolAgent` | `chat()` / `stream()` |
97
100
  | Let the AI write and run JavaScript | `CodeAgent` | `chat()` / `stream()` |
98
101
  | Q&A over documents, files, or data | `RagAgent` | `chat()` / `stream()` |
102
+ | Generate vector embeddings | `Embedding` | `embed()` / `embedBatch()` |
99
103
 
100
104
  **Rule of thumb**: Start with `Message` for the simplest integration. Move to `Chat` if you need history. Use `Transformer` when you need structured JSON output with validation. Use agents when the AI needs to take action.
101
105
 
@@ -570,6 +574,313 @@ Prefer `localFiles` and `localData` when possible — they skip the upload step
570
574
 
571
575
  ---
572
576
 
577
+ ## Embedding — Vector Embeddings
578
+
579
+ Generate vector embeddings for similarity search, clustering, classification, and deduplication. The `Embedding` class uses Google's text embedding models and provides a simple API for single and batch operations.
580
+
581
+ ```javascript
582
+ import { Embedding } from 'ak-gemini';
583
+
584
+ const embedder = new Embedding({
585
+ modelName: 'gemini-embedding-001', // default
586
+ });
587
+ ```
588
+
589
+ ### Basic Embedding
590
+
591
+ ```javascript
592
+ const result = await embedder.embed('The quick brown fox jumps over the lazy dog');
593
+ console.log(result.values); // [0.012, -0.034, 0.056, ...] — 768 dimensions by default
594
+ console.log(result.values.length); // 768
595
+ ```
596
+
597
+ ### Batch Embedding
598
+
599
+ Embed multiple texts in a single API call for efficiency:
600
+
601
+ ```javascript
602
+ const texts = [
603
+ 'Machine learning fundamentals',
604
+ 'Deep neural networks',
605
+ 'How to bake sourdough bread',
606
+ ];
607
+
608
+ const results = await embedder.embedBatch(texts);
609
+ // results[0].values, results[1].values, results[2].values
610
+ ```
611
+
612
+ ### Task Types
613
+
614
+ Task types optimize embeddings for specific use cases:
615
+
616
+ ```javascript
617
+ // For documents being indexed
618
+ const docEmbedder = new Embedding({
619
+ taskType: 'RETRIEVAL_DOCUMENT',
620
+ title: 'API Reference' // title only applies to RETRIEVAL_DOCUMENT
621
+ });
622
+
623
+ // For search queries against those documents
624
+ const queryEmbedder = new Embedding({
625
+ taskType: 'RETRIEVAL_QUERY'
626
+ });
627
+
628
+ // Other task types
629
+ new Embedding({ taskType: 'SEMANTIC_SIMILARITY' });
630
+ new Embedding({ taskType: 'CLUSTERING' });
631
+ new Embedding({ taskType: 'CLASSIFICATION' });
632
+ ```
633
+
634
+ **Best practice**: Use `RETRIEVAL_DOCUMENT` when embedding content to store, and `RETRIEVAL_QUERY` when embedding the user's search query.
635
+
636
+ ### Output Dimensionality
637
+
638
+ Reduce embedding dimensions to save storage space (trade-off with accuracy):
639
+
640
+ ```javascript
641
+ // Constructor-level
642
+ const embedder = new Embedding({ outputDimensionality: 256 });
643
+
644
+ // Per-call override
645
+ const result = await embedder.embed('Hello', { outputDimensionality: 128 });
646
+ console.log(result.values.length); // 128
647
+ ```
648
+
649
+ Supported by `gemini-embedding-001` (not `text-embedding-001`).
650
+
651
+ ### Cosine Similarity
652
+
653
+ Compare two embeddings without an API call:
654
+
655
+ ```javascript
656
+ const [a, b] = await Promise.all([
657
+ embedder.embed('cats are great pets'),
658
+ embedder.embed('dogs are wonderful companions'),
659
+ ]);
660
+
661
+ const score = embedder.similarity(a.values, b.values);
662
+ // score ≈ 0.85 (semantically similar)
663
+ ```
664
+
665
+ Returns a value between -1 (opposite) and 1 (identical). Typical thresholds:
666
+ - `> 0.8` — very similar
667
+ - `0.5–0.8` — somewhat related
668
+ - `< 0.5` — different topics
669
+
670
+ ### Integration Pattern: Semantic Search
671
+
672
+ ```javascript
673
+ // Index phase
674
+ const documents = ['doc1 text...', 'doc2 text...', 'doc3 text...'];
675
+ const docEmbedder = new Embedding({ taskType: 'RETRIEVAL_DOCUMENT' });
676
+ const docVectors = await docEmbedder.embedBatch(documents);
677
+
678
+ // Search phase
679
+ const queryEmbedder = new Embedding({ taskType: 'RETRIEVAL_QUERY' });
680
+ const queryVector = await queryEmbedder.embed('how do I authenticate?');
681
+
682
+ // Find best match
683
+ const scores = docVectors.map((doc, i) => ({
684
+ index: i,
685
+ score: queryEmbedder.similarity(queryVector.values, doc.values)
686
+ }));
687
+ scores.sort((a, b) => b.score - a.score);
688
+ console.log('Best match:', documents[scores[0].index]);
689
+ ```
690
+
691
+ ### When to Use Embedding
692
+
693
+ - Semantic search — find documents similar to a query
694
+ - Deduplication — detect near-duplicate content
695
+ - Clustering — group similar items together
696
+ - Classification — compare against known category embeddings
697
+ - Recommendation — find items similar to user preferences
698
+
699
+ ---
700
+
701
+ ## Google Search Grounding
702
+
703
+ Ground model responses in real-time Google Search results. Available on **all classes** via `enableGrounding` — not just Transformer.
704
+
705
+ **Warning**: Google Search grounding costs approximately **$35 per 1,000 queries**. Use selectively.
706
+
707
+ ### Basic Usage
708
+
709
+ ```javascript
710
+ import { Chat } from 'ak-gemini';
711
+
712
+ const chat = new Chat({
713
+ enableGrounding: true
714
+ });
715
+
716
+ const result = await chat.send('What happened in tech news today?');
717
+ console.log(result.text); // Response grounded in current search results
718
+ ```
719
+
720
+ ### Grounding Metadata
721
+
722
+ When grounding is enabled, `getLastUsage()` includes source attribution:
723
+
724
+ ```javascript
725
+ const usage = chat.getLastUsage();
726
+
727
+ if (usage.groundingMetadata) {
728
+ // Search queries the model executed
729
+ console.log('Queries:', usage.groundingMetadata.webSearchQueries);
730
+
731
+ // Source citations
732
+ for (const chunk of usage.groundingMetadata.groundingChunks || []) {
733
+ if (chunk.web) {
734
+ console.log(`Source: ${chunk.web.title} — ${chunk.web.uri}`);
735
+ }
736
+ }
737
+ }
738
+ ```
739
+
740
+ ### Grounding Configuration
741
+
742
+ ```javascript
743
+ const chat = new Chat({
744
+ enableGrounding: true,
745
+ groundingConfig: {
746
+ // Exclude specific domains
747
+ excludeDomains: ['reddit.com', 'twitter.com'],
748
+
749
+ // Filter by time range (Gemini API only)
750
+ timeRangeFilter: {
751
+ startTime: '2025-01-01T00:00:00Z',
752
+ endTime: '2025-12-31T23:59:59Z'
753
+ }
754
+ }
755
+ });
756
+ ```
757
+
758
+ ### Grounding with ToolAgent
759
+
760
+ Grounding works alongside user-defined tools — both are merged into the tools array automatically:
761
+
762
+ ```javascript
763
+ const agent = new ToolAgent({
764
+ enableGrounding: true,
765
+ tools: [
766
+ { name: 'save_result', description: 'Save a research result', parametersJsonSchema: { type: 'object', properties: { title: { type: 'string' }, summary: { type: 'string' } }, required: ['title', 'summary'] } }
767
+ ],
768
+ toolExecutor: async (name, args) => {
769
+ if (name === 'save_result') return await db.insert(args);
770
+ }
771
+ });
772
+
773
+ // The agent can search the web AND call your tools
774
+ const result = await agent.chat('Research the latest AI safety developments and save the key findings');
775
+ ```
776
+
777
+ ### Per-Message Grounding Toggle (Transformer)
778
+
779
+ Transformer supports toggling grounding per-message without rebuilding the instance:
780
+
781
+ ```javascript
782
+ const t = new Transformer({ enableGrounding: false });
783
+
784
+ // Enable grounding for just this call
785
+ const result = await t.send(payload, { enableGrounding: true });
786
+
787
+ // Back to no grounding for subsequent calls
788
+ ```
789
+
790
+ ### When to Use Grounding
791
+
792
+ - Questions about current events, recent news, or real-time data
793
+ - Fact-checking or verification tasks
794
+ - Research assistants that need up-to-date information
795
+ - Any scenario where the model's training data cutoff is a limitation
796
+
797
+ ---
798
+
799
+ ## Context Caching
800
+
801
+ Cache system prompts, documents, or tool definitions to reduce costs when making many API calls with the same large context. Cached tokens are billed at a reduced rate.
802
+
803
+ ### When Context Caching Helps
804
+
805
+ - **Large system prompts** reused across many calls
806
+ - **RagAgent** with the same document set serving many queries
807
+ - **ToolAgent** with many tool definitions
808
+ - Any scenario with high token count in repeated context
809
+
810
+ ### Create and Use a Cache
811
+
812
+ ```javascript
813
+ import { Chat } from 'ak-gemini';
814
+
815
+ const chat = new Chat({
816
+ systemPrompt: veryLongSystemPrompt // e.g., 10,000+ tokens
817
+ });
818
+
819
+ // Create a cache (auto-uses this instance's model and systemPrompt)
820
+ const cache = await chat.createCache({
821
+ ttl: '3600s', // 1 hour
822
+ displayName: 'my-app-system-prompt'
823
+ });
824
+
825
+ console.log(cache.name); // Server-generated resource name
826
+ console.log(cache.expireTime); // When it expires
827
+
828
+ // Attach the cache to this instance
829
+ await chat.useCache(cache.name);
830
+
831
+ // All subsequent calls use cached tokens at reduced cost
832
+ const r1 = await chat.send('Hello');
833
+ const r2 = await chat.send('Tell me more');
834
+ ```
835
+
836
+ ### Cache Management
837
+
838
+ ```javascript
839
+ // List all caches
840
+ const caches = await chat.listCaches();
841
+
842
+ // Get cache details
843
+ const info = await chat.getCache(cache.name);
844
+ console.log(info.usageMetadata?.totalTokenCount);
845
+
846
+ // Extend TTL
847
+ await chat.updateCache(cache.name, { ttl: '7200s' });
848
+
849
+ // Delete when done
850
+ await chat.deleteCache(cache.name);
851
+ ```
852
+
853
+ ### Cache with Constructor
854
+
855
+ If you already have a cache name, pass it directly:
856
+
857
+ ```javascript
858
+ const chat = new Chat({
859
+ cachedContent: 'projects/my-project/locations/us-central1/cachedContents/abc123'
860
+ });
861
+ ```
862
+
863
+ ### What Can Be Cached
864
+
865
+ The `createCache()` config accepts:
866
+
867
+ | Field | Description |
868
+ |---|---|
869
+ | `systemInstruction` | System prompt (auto-populated from instance if not provided) |
870
+ | `contents` | Content messages to cache |
871
+ | `tools` | Tool declarations to cache |
872
+ | `toolConfig` | Tool configuration to cache |
873
+ | `ttl` | Time-to-live (e.g., `'3600s'`) |
874
+ | `displayName` | Human-readable label |
875
+
876
+ ### Cost Savings
877
+
878
+ Context caching reduces input token costs for cached content. The exact savings depend on the model — check [Google's pricing page](https://ai.google.dev/pricing) for current rates. The trade-off is the cache storage cost and the minimum cache size requirement.
879
+
880
+ **Rule of thumb**: Caching pays off when you make many calls with the same large context (system prompt + documents) within the cache TTL.
881
+
882
+ ---
883
+
573
884
  ## Observability & Usage Tracking
574
885
 
575
886
  Every class provides consistent observability hooks.
@@ -954,7 +1265,7 @@ const result = await chat.send('Find users who signed up in the last 7 days');
954
1265
 
955
1266
  ```javascript
956
1267
  // Named exports
957
- import { Transformer, Chat, Message, ToolAgent, CodeAgent, RagAgent, BaseGemini, log } from 'ak-gemini';
1268
+ import { Transformer, Chat, Message, ToolAgent, CodeAgent, RagAgent, Embedding, BaseGemini, log } from 'ak-gemini';
958
1269
  import { extractJSON, attemptJSONRecovery } from 'ak-gemini';
959
1270
  import { ThinkingLevel, HarmCategory, HarmBlockThreshold } from 'ak-gemini';
960
1271
 
@@ -962,7 +1273,7 @@ import { ThinkingLevel, HarmCategory, HarmBlockThreshold } from 'ak-gemini';
962
1273
  import AI from 'ak-gemini';
963
1274
 
964
1275
  // CommonJS
965
- const { Transformer, Chat } = require('ak-gemini');
1276
+ const { Transformer, Chat, Embedding } = require('ak-gemini');
966
1277
  ```
967
1278
 
968
1279
  ### Constructor Options (All Classes)
@@ -980,6 +1291,9 @@ const { Transformer, Chat } = require('ak-gemini');
980
1291
  | `maxOutputTokens` | number \| null | `50000` |
981
1292
  | `logLevel` | string | based on `NODE_ENV` |
982
1293
  | `labels` | object | `{}` (Vertex AI only) |
1294
+ | `enableGrounding` | boolean | `false` |
1295
+ | `groundingConfig` | object | `{}` |
1296
+ | `cachedContent` | string | `null` |
983
1297
 
984
1298
  ### Methods Available on All Classes
985
1299
 
@@ -992,3 +1306,9 @@ const { Transformer, Chat } = require('ak-gemini');
992
1306
  | `getLastUsage()` | `UsageData \| null` | Token usage from last call |
993
1307
  | `estimate(payload)` | `Promise<{ inputTokens }>` | Estimate input tokens |
994
1308
  | `estimateCost(payload)` | `Promise<object>` | Estimate cost in dollars |
1309
+ | `createCache(config?)` | `Promise<CachedContentInfo>` | Create a context cache |
1310
+ | `getCache(name)` | `Promise<CachedContentInfo>` | Get cache details |
1311
+ | `listCaches()` | `Promise<any>` | List all caches |
1312
+ | `updateCache(name, config?)` | `Promise<CachedContentInfo>` | Update cache TTL |
1313
+ | `deleteCache(name)` | `Promise<void>` | Delete a cache |
1314
+ | `useCache(name)` | `Promise<void>` | Attach a cache to this instance |
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # ak-gemini
2
2
 
3
- **Modular, type-safe wrapper for Google's Gemini AI.** Five class exports for different interaction patterns — JSON transformation, chat, stateless messages, tool-using agents, and code-writing agents — all sharing a common base.
3
+ **Modular, type-safe wrapper for Google's Gemini AI.** Seven class exports for different interaction patterns — JSON transformation, chat, stateless messages, tool-using agents, code-writing agents, document Q&A, and embeddings — all sharing a common base.
4
4
 
5
5
  ```sh
6
6
  npm install ak-gemini
@@ -17,7 +17,7 @@ export GEMINI_API_KEY=your-key
17
17
  ```
18
18
 
19
19
  ```javascript
20
- import { Transformer, Chat, Message, ToolAgent, CodeAgent } from 'ak-gemini';
20
+ import { Transformer, Chat, Message, ToolAgent, CodeAgent, RagAgent, Embedding } from 'ak-gemini';
21
21
  ```
22
22
 
23
23
  ---
@@ -176,6 +176,27 @@ for await (const event of agent.stream('Refactor the auth module')) {
176
176
  }
177
177
  ```
178
178
 
179
+ ### Embedding — Vector Embeddings
180
+
181
+ Generate vector embeddings for similarity search, clustering, and classification.
182
+
183
+ ```javascript
184
+ const embedder = new Embedding({
185
+ modelName: 'gemini-embedding-001', // default
186
+ taskType: 'RETRIEVAL_DOCUMENT'
187
+ });
188
+
189
+ // Single text
190
+ const result = await embedder.embed('Hello world');
191
+ console.log(result.values); // [0.012, -0.034, ...]
192
+
193
+ // Batch
194
+ const results = await embedder.embedBatch(['Hello', 'World']);
195
+
196
+ // Cosine similarity (pure math, no API call)
197
+ const score = embedder.similarity(results[0].values, results[1].values);
198
+ ```
199
+
179
200
  ---
180
201
 
181
202
  ## Stopping Agents
@@ -252,6 +273,43 @@ new Chat({
252
273
  });
253
274
  ```
254
275
 
276
+ ### Google Search Grounding
277
+
278
+ Ground responses in real-time web search results. Available on all classes.
279
+
280
+ ```javascript
281
+ const chat = new Chat({
282
+ enableGrounding: true,
283
+ groundingConfig: { excludeDomains: ['example.com'] }
284
+ });
285
+
286
+ const result = await chat.send('Who won the 2026 Super Bowl?');
287
+ const sources = result.usage?.groundingMetadata?.groundingChunks;
288
+ ```
289
+
290
+ **Warning**: Google Search grounding costs ~$35/1k queries.
291
+
292
+ ### Context Caching
293
+
294
+ Reduce costs by caching repeated system prompts, documents, or tool definitions.
295
+
296
+ ```javascript
297
+ const chat = new Chat({ systemPrompt: longSystemPrompt });
298
+
299
+ // Create a cache
300
+ const cache = await chat.createCache({
301
+ ttl: '3600s',
302
+ displayName: 'my-system-prompt-cache'
303
+ });
304
+
305
+ // Use the cache (subsequent calls use cached tokens at reduced cost)
306
+ await chat.useCache(cache.name);
307
+ const result = await chat.send('Hello!');
308
+
309
+ // Clean up
310
+ await chat.deleteCache(cache.name);
311
+ ```
312
+
255
313
  ### Billing Labels (Vertex AI)
256
314
 
257
315
  ```javascript
@@ -281,6 +339,9 @@ All classes accept `BaseGeminiOptions`:
281
339
  | `maxOutputTokens` | number | `50000` | Max tokens in response (`null` removes limit) |
282
340
  | `logLevel` | string | based on NODE_ENV | `'trace'`\|`'debug'`\|`'info'`\|`'warn'`\|`'error'`\|`'none'` |
283
341
  | `labels` | object | — | Billing labels (Vertex AI) |
342
+ | `enableGrounding` | boolean | `false` | Enable Google Search grounding |
343
+ | `groundingConfig` | object | — | Grounding config (excludeDomains, timeRangeFilter) |
344
+ | `cachedContent` | string | — | Cached content resource name |
284
345
 
285
346
  ### Transformer-Specific
286
347
 
@@ -293,8 +354,6 @@ All classes accept `BaseGeminiOptions`:
293
354
  | `retryDelay` | number | `1000` | Initial retry delay (ms) |
294
355
  | `responseSchema` | object | — | JSON schema for output validation |
295
356
  | `asyncValidator` | function | — | Global async validator |
296
- | `enableGrounding` | boolean | `false` | Enable Google Search grounding |
297
-
298
357
  ### ToolAgent-Specific
299
358
 
300
359
  | Option | Type | Default | Description |
@@ -322,21 +381,31 @@ All classes accept `BaseGeminiOptions`:
322
381
  | `responseSchema` | object | — | Schema for structured output |
323
382
  | `responseMimeType` | string | — | e.g. `'application/json'` |
324
383
 
384
+ ### Embedding-Specific
385
+
386
+ | Option | Type | Default | Description |
387
+ |--------|------|---------|-------------|
388
+ | `taskType` | string | — | `'RETRIEVAL_DOCUMENT'`, `'RETRIEVAL_QUERY'`, `'SEMANTIC_SIMILARITY'`, `'CLUSTERING'` |
389
+ | `title` | string | — | Document title (only with `RETRIEVAL_DOCUMENT`) |
390
+ | `outputDimensionality` | number | — | Output vector dimensions |
391
+ | `autoTruncate` | boolean | `true` | Auto-truncate long inputs |
392
+
325
393
  ---
326
394
 
327
395
  ## Exports
328
396
 
329
397
  ```javascript
330
398
  // Named exports
331
- import { Transformer, Chat, Message, ToolAgent, CodeAgent, BaseGemini, log } from 'ak-gemini';
399
+ import { Transformer, Chat, Message, ToolAgent, CodeAgent, RagAgent, Embedding, BaseGemini, log } from 'ak-gemini';
332
400
  import { extractJSON, attemptJSONRecovery } from 'ak-gemini';
333
401
 
334
402
  // Default export (namespace)
335
403
  import AI from 'ak-gemini';
336
404
  new AI.Transformer({ ... });
405
+ new AI.Embedding({ ... });
337
406
 
338
407
  // CommonJS
339
- const { Transformer, Chat } = require('ak-gemini');
408
+ const { Transformer, Chat, Embedding } = require('ak-gemini');
340
409
  ```
341
410
 
342
411
  ---
package/base.js CHANGED
@@ -5,7 +5,7 @@
5
5
  */
6
6
 
7
7
  import dotenv from 'dotenv';
8
- dotenv.config();
8
+ dotenv.config({ quiet: true });
9
9
  const { NODE_ENV = "unknown", LOG_LEVEL = "" } = process.env;
10
10
 
11
11
  import { GoogleGenAI, HarmCategory, HarmBlockThreshold } from '@google/genai';
@@ -43,7 +43,8 @@ const MODEL_PRICING = {
43
43
  'gemini-3-pro': { input: 2.00, output: 12.00 },
44
44
  'gemini-3-pro-preview': { input: 2.00, output: 12.00 },
45
45
  'gemini-2.0-flash': { input: 0.10, output: 0.40 },
46
- 'gemini-2.0-flash-lite': { input: 0.02, output: 0.10 }
46
+ 'gemini-2.0-flash-lite': { input: 0.02, output: 0.10 },
47
+ 'gemini-embedding-001': { input: 0.006, output: 0 }
47
48
  };
48
49
 
49
50
  export { DEFAULT_SAFETY_SETTINGS, DEFAULT_THINKING_CONFIG, THINKING_SUPPORTED_MODELS, MODEL_PRICING, DEFAULT_MAX_OUTPUT_TOKENS };
@@ -99,6 +100,13 @@ class BaseGemini {
99
100
  // ── Labels ──
100
101
  this.labels = options.labels || {};
101
102
 
103
+ // ── Grounding ──
104
+ this.enableGrounding = options.enableGrounding || false;
105
+ this.groundingConfig = options.groundingConfig || {};
106
+
107
+ // ── Caching ──
108
+ this.cachedContent = options.cachedContent || null;
109
+
102
110
  // ── Chat Config ──
103
111
  this.chatConfig = {
104
112
  temperature: 0.7,
@@ -197,14 +205,24 @@ class BaseGemini {
197
205
  * @protected
198
206
  */
199
207
  _getChatCreateOptions() {
200
- return {
208
+ const opts = {
201
209
  model: this.modelName,
202
210
  config: {
203
211
  ...this.chatConfig,
204
- ...(this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels })
212
+ ...(this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels }),
213
+ ...(this.cachedContent && { cachedContent: this.cachedContent })
205
214
  },
206
215
  history: []
207
216
  };
217
+
218
+ // Merge grounding into tools (preserving existing tools like functionDeclarations)
219
+ if (this.enableGrounding) {
220
+ const existingTools = opts.config.tools || [];
221
+ opts.config.tools = [...existingTools, { googleSearch: this.groundingConfig }];
222
+ log.debug('Search grounding ENABLED (WARNING: costs $35/1k queries)');
223
+ }
224
+
225
+ return opts;
208
226
  }
209
227
 
210
228
  // ── Chat Session Management ──────────────────────────────────────────────
@@ -344,7 +362,8 @@ class BaseGemini {
344
362
  promptTokens: response.usageMetadata?.promptTokenCount || 0,
345
363
  responseTokens: response.usageMetadata?.candidatesTokenCount || 0,
346
364
  totalTokens: response.usageMetadata?.totalTokenCount || 0,
347
- timestamp: Date.now()
365
+ timestamp: Date.now(),
366
+ groundingMetadata: response.candidates?.[0]?.groundingMetadata || null
348
367
  };
349
368
  }
350
369
 
@@ -367,7 +386,8 @@ class BaseGemini {
367
386
  attempts: useCumulative ? cumulative.attempts : 1,
368
387
  modelVersion: meta.modelVersion,
369
388
  requestedModel: meta.requestedModel,
370
- timestamp: meta.timestamp
389
+ timestamp: meta.timestamp,
390
+ groundingMetadata: meta.groundingMetadata || null
371
391
  };
372
392
  }
373
393
 
@@ -425,6 +445,112 @@ class BaseGemini {
425
445
  };
426
446
  }
427
447
 
448
+ // ── Context Caching ─────────────────────────────────────────────────────
449
+
450
+ /**
451
+ * Creates a cached content resource for cost reduction on repeated prompts.
452
+ * Auto-populates model and systemInstruction from this instance if not provided.
453
+ * @param {Object} [config={}] - Cache configuration
454
+ * @param {string} [config.model] - Model (defaults to this.modelName)
455
+ * @param {string} [config.ttl] - Time-to-live (e.g., '3600s')
456
+ * @param {string} [config.displayName] - Human-readable name
457
+ * @param {Array} [config.contents] - Content to cache
458
+ * @param {string} [config.systemInstruction] - System prompt to cache (defaults to this.systemPrompt)
459
+ * @param {Array} [config.tools] - Tools to cache
460
+ * @param {Object} [config.toolConfig] - Tool configuration to cache
461
+ * @returns {Promise<Object>} The created cache resource
462
+ */
463
+ async createCache(config = {}) {
464
+ const cacheConfig = {};
465
+ if (config.ttl) cacheConfig.ttl = config.ttl;
466
+ if (config.displayName) cacheConfig.displayName = config.displayName;
467
+ if (config.contents) cacheConfig.contents = config.contents;
468
+ if (config.tools) cacheConfig.tools = config.tools;
469
+ if (config.toolConfig) cacheConfig.toolConfig = config.toolConfig;
470
+
471
+ // Auto-populate systemInstruction from instance if not provided
472
+ const sysInstruction = config.systemInstruction !== undefined ? config.systemInstruction : this.systemPrompt;
473
+ if (sysInstruction) cacheConfig.systemInstruction = sysInstruction;
474
+
475
+ const cached = await this.genAIClient.caches.create({
476
+ model: config.model || this.modelName,
477
+ config: cacheConfig
478
+ });
479
+
480
+ log.debug(`Cache created: ${cached.name}`);
481
+ return cached;
482
+ }
483
+
484
+ /**
485
+ * Retrieves a cached content resource by name.
486
+ * @param {string} cacheName - Server-generated resource name
487
+ * @returns {Promise<Object>} The cached content resource
488
+ */
489
+ async getCache(cacheName) {
490
+ return await this.genAIClient.caches.get({ name: cacheName });
491
+ }
492
+
493
+ /**
494
+ * Lists all cached content resources.
495
+ * @returns {Promise<Object>} Pager of cached content resources
496
+ */
497
+ async listCaches() {
498
+ const pager = await this.genAIClient.caches.list();
499
+ const results = [];
500
+ for await (const cache of pager) {
501
+ results.push(cache);
502
+ }
503
+ return results;
504
+ }
505
+
506
+ /**
507
+ * Updates a cached content resource (TTL or expiration).
508
+ * @param {string} cacheName - Server-generated resource name
509
+ * @param {Object} [config={}] - Update config
510
+ * @param {string} [config.ttl] - New TTL (e.g., '7200s')
511
+ * @param {string} [config.expireTime] - New expiration (RFC 3339)
512
+ * @returns {Promise<Object>} The updated cache resource
513
+ */
514
+ async updateCache(cacheName, config = {}) {
515
+ return await this.genAIClient.caches.update({
516
+ name: cacheName,
517
+ config: {
518
+ ...(config.ttl && { ttl: config.ttl }),
519
+ ...(config.expireTime && { expireTime: config.expireTime })
520
+ }
521
+ });
522
+ }
523
+
524
+ /**
525
+ * Deletes a cached content resource.
526
+ * Clears this.cachedContent if it matches the deleted cache.
527
+ * @param {string} cacheName - Server-generated resource name
528
+ * @returns {Promise<void>}
529
+ */
530
+ async deleteCache(cacheName) {
531
+ await this.genAIClient.caches.delete({ name: cacheName });
532
+ log.debug(`Cache deleted: ${cacheName}`);
533
+ if (this.cachedContent === cacheName) {
534
+ this.cachedContent = null;
535
+ }
536
+ }
537
+
538
+ /**
539
+ * Sets the cached content for this instance and reinitializes the session.
540
+ * @param {string} cacheName - Server-generated cache resource name
541
+ * @returns {Promise<void>}
542
+ */
543
+ async useCache(cacheName) {
544
+ this.cachedContent = cacheName;
545
+ // When using cached content, remove systemInstruction from chatConfig
546
+ // since it's already baked into the cache — the API rejects duplicates
547
+ delete this.chatConfig.systemInstruction;
548
+ if (this.chatSession) {
549
+ await this.init(true);
550
+ }
551
+ log.debug(`Using cache: ${cacheName}`);
552
+ }
553
+
428
554
  // ── Private Helpers ──────────────────────────────────────────────────────
429
555
 
430
556
  /**
package/index.cjs CHANGED
@@ -32,6 +32,7 @@ __export(index_exports, {
32
32
  BaseGemini: () => base_default,
33
33
  Chat: () => chat_default,
34
34
  CodeAgent: () => code_agent_default,
35
+ Embedding: () => Embedding,
35
36
  HarmBlockThreshold: () => import_genai2.HarmBlockThreshold,
36
37
  HarmCategory: () => import_genai2.HarmCategory,
37
38
  Message: () => message_default,
@@ -310,7 +311,7 @@ function extractJSON(text) {
310
311
  }
311
312
 
312
313
  // base.js
313
- import_dotenv.default.config();
314
+ import_dotenv.default.config({ quiet: true });
314
315
  var { NODE_ENV = "unknown", LOG_LEVEL = "" } = process.env;
315
316
  var DEFAULT_SAFETY_SETTINGS = [
316
317
  { category: import_genai.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: import_genai.HarmBlockThreshold.BLOCK_NONE },
@@ -335,7 +336,8 @@ var MODEL_PRICING = {
335
336
  "gemini-3-pro": { input: 2, output: 12 },
336
337
  "gemini-3-pro-preview": { input: 2, output: 12 },
337
338
  "gemini-2.0-flash": { input: 0.1, output: 0.4 },
338
- "gemini-2.0-flash-lite": { input: 0.02, output: 0.1 }
339
+ "gemini-2.0-flash-lite": { input: 0.02, output: 0.1 },
340
+ "gemini-embedding-001": { input: 6e-3, output: 0 }
339
341
  };
340
342
  var BaseGemini = class {
341
343
  /**
@@ -361,6 +363,9 @@ var BaseGemini = class {
361
363
  }
362
364
  this._configureLogLevel(options.logLevel);
363
365
  this.labels = options.labels || {};
366
+ this.enableGrounding = options.enableGrounding || false;
367
+ this.groundingConfig = options.groundingConfig || {};
368
+ this.cachedContent = options.cachedContent || null;
364
369
  this.chatConfig = {
365
370
  temperature: 0.7,
366
371
  topP: 0.95,
@@ -433,14 +438,21 @@ var BaseGemini = class {
433
438
  * @protected
434
439
  */
435
440
  _getChatCreateOptions() {
436
- return {
441
+ const opts = {
437
442
  model: this.modelName,
438
443
  config: {
439
444
  ...this.chatConfig,
440
- ...this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels }
445
+ ...this.vertexai && Object.keys(this.labels).length > 0 && { labels: this.labels },
446
+ ...this.cachedContent && { cachedContent: this.cachedContent }
441
447
  },
442
448
  history: []
443
449
  };
450
+ if (this.enableGrounding) {
451
+ const existingTools = opts.config.tools || [];
452
+ opts.config.tools = [...existingTools, { googleSearch: this.groundingConfig }];
453
+ logger_default.debug("Search grounding ENABLED (WARNING: costs $35/1k queries)");
454
+ }
455
+ return opts;
444
456
  }
445
457
  // ── Chat Session Management ──────────────────────────────────────────────
446
458
  /**
@@ -562,7 +574,8 @@ ${contextText}
562
574
  promptTokens: response.usageMetadata?.promptTokenCount || 0,
563
575
  responseTokens: response.usageMetadata?.candidatesTokenCount || 0,
564
576
  totalTokens: response.usageMetadata?.totalTokenCount || 0,
565
- timestamp: Date.now()
577
+ timestamp: Date.now(),
578
+ groundingMetadata: response.candidates?.[0]?.groundingMetadata || null
566
579
  };
567
580
  }
568
581
  /**
@@ -582,7 +595,8 @@ ${contextText}
582
595
  attempts: useCumulative ? cumulative.attempts : 1,
583
596
  modelVersion: meta.modelVersion,
584
597
  requestedModel: meta.requestedModel,
585
- timestamp: meta.timestamp
598
+ timestamp: meta.timestamp,
599
+ groundingMetadata: meta.groundingMetadata || null
586
600
  };
587
601
  }
588
602
  // ── Token Estimation ─────────────────────────────────────────────────────
@@ -627,6 +641,99 @@ ${contextText}
627
641
  note: "Cost is for input tokens only; output cost depends on response length"
628
642
  };
629
643
  }
644
+ // ── Context Caching ─────────────────────────────────────────────────────
645
+ /**
646
+ * Creates a cached content resource for cost reduction on repeated prompts.
647
+ * Auto-populates model and systemInstruction from this instance if not provided.
648
+ * @param {Object} [config={}] - Cache configuration
649
+ * @param {string} [config.model] - Model (defaults to this.modelName)
650
+ * @param {string} [config.ttl] - Time-to-live (e.g., '3600s')
651
+ * @param {string} [config.displayName] - Human-readable name
652
+ * @param {Array} [config.contents] - Content to cache
653
+ * @param {string} [config.systemInstruction] - System prompt to cache (defaults to this.systemPrompt)
654
+ * @param {Array} [config.tools] - Tools to cache
655
+ * @param {Object} [config.toolConfig] - Tool configuration to cache
656
+ * @returns {Promise<Object>} The created cache resource
657
+ */
658
+ async createCache(config = {}) {
659
+ const cacheConfig = {};
660
+ if (config.ttl) cacheConfig.ttl = config.ttl;
661
+ if (config.displayName) cacheConfig.displayName = config.displayName;
662
+ if (config.contents) cacheConfig.contents = config.contents;
663
+ if (config.tools) cacheConfig.tools = config.tools;
664
+ if (config.toolConfig) cacheConfig.toolConfig = config.toolConfig;
665
+ const sysInstruction = config.systemInstruction !== void 0 ? config.systemInstruction : this.systemPrompt;
666
+ if (sysInstruction) cacheConfig.systemInstruction = sysInstruction;
667
+ const cached = await this.genAIClient.caches.create({
668
+ model: config.model || this.modelName,
669
+ config: cacheConfig
670
+ });
671
+ logger_default.debug(`Cache created: ${cached.name}`);
672
+ return cached;
673
+ }
674
+ /**
675
+ * Retrieves a cached content resource by name.
676
+ * @param {string} cacheName - Server-generated resource name
677
+ * @returns {Promise<Object>} The cached content resource
678
+ */
679
+ async getCache(cacheName) {
680
+ return await this.genAIClient.caches.get({ name: cacheName });
681
+ }
682
+ /**
683
+ * Lists all cached content resources.
684
+ * @returns {Promise<Object>} Pager of cached content resources
685
+ */
686
+ async listCaches() {
687
+ const pager = await this.genAIClient.caches.list();
688
+ const results = [];
689
+ for await (const cache of pager) {
690
+ results.push(cache);
691
+ }
692
+ return results;
693
+ }
694
+ /**
695
+ * Updates a cached content resource (TTL or expiration).
696
+ * @param {string} cacheName - Server-generated resource name
697
+ * @param {Object} [config={}] - Update config
698
+ * @param {string} [config.ttl] - New TTL (e.g., '7200s')
699
+ * @param {string} [config.expireTime] - New expiration (RFC 3339)
700
+ * @returns {Promise<Object>} The updated cache resource
701
+ */
702
+ async updateCache(cacheName, config = {}) {
703
+ return await this.genAIClient.caches.update({
704
+ name: cacheName,
705
+ config: {
706
+ ...config.ttl && { ttl: config.ttl },
707
+ ...config.expireTime && { expireTime: config.expireTime }
708
+ }
709
+ });
710
+ }
711
+ /**
712
+ * Deletes a cached content resource.
713
+ * Clears this.cachedContent if it matches the deleted cache.
714
+ * @param {string} cacheName - Server-generated resource name
715
+ * @returns {Promise<void>}
716
+ */
717
+ async deleteCache(cacheName) {
718
+ await this.genAIClient.caches.delete({ name: cacheName });
719
+ logger_default.debug(`Cache deleted: ${cacheName}`);
720
+ if (this.cachedContent === cacheName) {
721
+ this.cachedContent = null;
722
+ }
723
+ }
724
+ /**
725
+ * Sets the cached content for this instance and reinitializes the session.
726
+ * @param {string} cacheName - Server-generated cache resource name
727
+ * @returns {Promise<void>}
728
+ */
729
+ async useCache(cacheName) {
730
+ this.cachedContent = cacheName;
731
+ delete this.chatConfig.systemInstruction;
732
+ if (this.chatSession) {
733
+ await this.init(true);
734
+ }
735
+ logger_default.debug(`Using cache: ${cacheName}`);
736
+ }
630
737
  // ── Private Helpers ──────────────────────────────────────────────────────
631
738
  /**
632
739
  * Configures the log level based on options, env vars, or NODE_ENV.
@@ -722,20 +829,8 @@ var Transformer = class extends base_default {
722
829
  this.asyncValidator = options.asyncValidator || null;
723
830
  this.maxRetries = options.maxRetries || 3;
724
831
  this.retryDelay = options.retryDelay || 1e3;
725
- this.enableGrounding = options.enableGrounding || false;
726
- this.groundingConfig = options.groundingConfig || {};
727
832
  logger_default.debug(`Transformer keys \u2014 Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
728
833
  }
729
- // ── Chat Create Options Override ──────────────────────────────────────────
730
- /** @protected */
731
- _getChatCreateOptions() {
732
- const opts = super._getChatCreateOptions();
733
- if (this.enableGrounding) {
734
- opts.config.tools = [{ googleSearch: this.groundingConfig }];
735
- logger_default.debug(`Search grounding ENABLED (WARNING: costs $35/1k queries)`);
736
- }
737
- return opts;
738
- }
739
834
  // ── Seeding ──────────────────────────────────────────────────────────────
740
835
  /**
741
836
  * Seeds the chat with transformation examples using the configured key mapping.
@@ -2221,14 +2316,152 @@ ${serialized}` });
2221
2316
  };
2222
2317
  var rag_agent_default = RagAgent;
2223
2318
 
2319
+ // embedding.js
2320
+ var Embedding = class extends base_default {
2321
+ /**
2322
+ * @param {import('./types.d.ts').EmbeddingOptions} [options={}]
2323
+ */
2324
+ constructor(options = {}) {
2325
+ if (options.modelName === void 0) {
2326
+ options = { ...options, modelName: "gemini-embedding-001" };
2327
+ }
2328
+ if (options.systemPrompt === void 0) {
2329
+ options = { ...options, systemPrompt: null };
2330
+ }
2331
+ super(options);
2332
+ this.taskType = options.taskType || null;
2333
+ this.title = options.title || null;
2334
+ this.outputDimensionality = options.outputDimensionality || null;
2335
+ this.autoTruncate = options.autoTruncate ?? true;
2336
+ logger_default.debug(`Embedding created with model: ${this.modelName}`);
2337
+ }
2338
+ /**
2339
+ * Initialize the Embedding client.
2340
+ * Override: validates API connection only, NO chat session (stateless).
2341
+ * @param {boolean} [force=false]
2342
+ * @returns {Promise<void>}
2343
+ */
2344
+ async init(force = false) {
2345
+ if (this._initialized && !force) return;
2346
+ logger_default.debug(`Initializing ${this.constructor.name} with model: ${this.modelName}...`);
2347
+ try {
2348
+ await this.genAIClient.models.list();
2349
+ logger_default.debug(`${this.constructor.name}: API connection successful.`);
2350
+ } catch (e) {
2351
+ throw new Error(`${this.constructor.name} initialization failed: ${e.message}`);
2352
+ }
2353
+ this._initialized = true;
2354
+ logger_default.debug(`${this.constructor.name}: Initialized (stateless mode).`);
2355
+ }
2356
+ /**
2357
+ * Builds the config object for embedContent calls.
2358
+ * @param {Object} [overrides={}] - Per-call config overrides
2359
+ * @returns {Object} The config object
2360
+ * @private
2361
+ */
2362
+ _buildConfig(overrides = {}) {
2363
+ const config = {};
2364
+ const taskType = overrides.taskType || this.taskType;
2365
+ const title = overrides.title || this.title;
2366
+ const dims = overrides.outputDimensionality || this.outputDimensionality;
2367
+ if (taskType) config.taskType = taskType;
2368
+ if (title) config.title = title;
2369
+ if (dims) config.outputDimensionality = dims;
2370
+ return config;
2371
+ }
2372
+ /**
2373
+ * Embed a single text string.
2374
+ * @param {string} text - The text to embed
2375
+ * @param {Object} [config={}] - Per-call config overrides
2376
+ * @param {string} [config.taskType] - Override task type
2377
+ * @param {string} [config.title] - Override title
2378
+ * @param {number} [config.outputDimensionality] - Override dimensions
2379
+
2380
+ * @returns {Promise<import('./types.d.ts').EmbeddingResult>} The embedding result
2381
+ */
2382
+ async embed(text, config = {}) {
2383
+ if (!this._initialized) await this.init();
2384
+ const result = await this.genAIClient.models.embedContent({
2385
+ model: this.modelName,
2386
+ contents: text,
2387
+ config: this._buildConfig(config)
2388
+ });
2389
+ return result.embeddings[0];
2390
+ }
2391
+ /**
2392
+ * Embed multiple text strings in a single API call.
2393
+ * @param {string[]} texts - Array of texts to embed
2394
+ * @param {Object} [config={}] - Per-call config overrides
2395
+ * @param {string} [config.taskType] - Override task type
2396
+ * @param {string} [config.title] - Override title
2397
+ * @param {number} [config.outputDimensionality] - Override dimensions
2398
+
2399
+ * @returns {Promise<import('./types.d.ts').EmbeddingResult[]>} Array of embedding results
2400
+ */
2401
+ async embedBatch(texts, config = {}) {
2402
+ if (!this._initialized) await this.init();
2403
+ const result = await this.genAIClient.models.embedContent({
2404
+ model: this.modelName,
2405
+ contents: texts,
2406
+ config: this._buildConfig(config)
2407
+ });
2408
+ return result.embeddings;
2409
+ }
2410
+ /**
2411
+ * Compute cosine similarity between two embedding vectors.
2412
+ * Pure math — no API call.
2413
+ * @param {number[]} a - First embedding vector
2414
+ * @param {number[]} b - Second embedding vector
2415
+ * @returns {number} Cosine similarity between -1 and 1
2416
+ */
2417
+ similarity(a, b) {
2418
+ if (!a || !b || a.length !== b.length) {
2419
+ throw new Error("Vectors must be non-null and have the same length");
2420
+ }
2421
+ let dot = 0;
2422
+ let magA = 0;
2423
+ let magB = 0;
2424
+ for (let i = 0; i < a.length; i++) {
2425
+ dot += a[i] * b[i];
2426
+ magA += a[i] * a[i];
2427
+ magB += b[i] * b[i];
2428
+ }
2429
+ const magnitude = Math.sqrt(magA) * Math.sqrt(magB);
2430
+ if (magnitude === 0) return 0;
2431
+ return dot / magnitude;
2432
+ }
2433
+ // ── No-ops (embeddings don't use chat sessions) ──
2434
+ /** @returns {any[]} Always returns empty array */
2435
+ getHistory() {
2436
+ return [];
2437
+ }
2438
+ /** No-op for Embedding */
2439
+ async clearHistory() {
2440
+ }
2441
+ /** No-op for Embedding */
2442
+ async seed() {
2443
+ logger_default.warn("Embedding.seed() is a no-op \u2014 embeddings do not support few-shot examples.");
2444
+ return [];
2445
+ }
2446
+ /**
2447
+ * @param {any} _nextPayload
2448
+ * @throws {Error} Embedding does not support token estimation
2449
+ * @returns {Promise<{inputTokens: number}>}
2450
+ */
2451
+ async estimate(_nextPayload) {
2452
+ throw new Error("Embedding does not support token estimation. Use embed() directly.");
2453
+ }
2454
+ };
2455
+
2224
2456
  // index.js
2225
2457
  var import_genai2 = require("@google/genai");
2226
- var index_default = { Transformer: transformer_default, Chat: chat_default, Message: message_default, ToolAgent: tool_agent_default, CodeAgent: code_agent_default, RagAgent: rag_agent_default };
2458
+ var index_default = { Transformer: transformer_default, Chat: chat_default, Message: message_default, ToolAgent: tool_agent_default, CodeAgent: code_agent_default, RagAgent: rag_agent_default, Embedding };
2227
2459
  // Annotate the CommonJS export names for ESM import in node:
2228
2460
  0 && (module.exports = {
2229
2461
  BaseGemini,
2230
2462
  Chat,
2231
2463
  CodeAgent,
2464
+ Embedding,
2232
2465
  HarmBlockThreshold,
2233
2466
  HarmCategory,
2234
2467
  Message,
package/index.js CHANGED
@@ -26,6 +26,7 @@ export { default as Message } from './message.js';
26
26
  export { default as ToolAgent } from './tool-agent.js';
27
27
  export { default as CodeAgent } from './code-agent.js';
28
28
  export { default as RagAgent } from './rag-agent.js';
29
+ export { default as Embedding } from './embedding.js';
29
30
  export { default as BaseGemini } from './base.js';
30
31
  export { default as log } from './logger.js';
31
32
  export { ThinkingLevel, HarmCategory, HarmBlockThreshold } from '@google/genai';
@@ -39,5 +40,6 @@ import Message from './message.js';
39
40
  import ToolAgent from './tool-agent.js';
40
41
  import CodeAgent from './code-agent.js';
41
42
  import RagAgent from './rag-agent.js';
43
+ import Embedding from './embedding.js';
42
44
 
43
- export default { Transformer, Chat, Message, ToolAgent, CodeAgent, RagAgent };
45
+ export default { Transformer, Chat, Message, ToolAgent, CodeAgent, RagAgent, Embedding };
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "ak-gemini",
3
3
  "author": "ak@mixpanel.com",
4
4
  "description": "AK's Generative AI Helper for doing... everything",
5
- "version": "2.0.2",
5
+ "version": "2.0.3",
6
6
  "main": "index.js",
7
7
  "files": [
8
8
  "index.js",
package/transformer.js CHANGED
@@ -96,27 +96,9 @@ class Transformer extends BaseGemini {
96
96
  this.maxRetries = options.maxRetries || 3;
97
97
  this.retryDelay = options.retryDelay || 1000;
98
98
 
99
- // ── Grounding ──
100
- this.enableGrounding = options.enableGrounding || false;
101
- this.groundingConfig = options.groundingConfig || {};
102
-
103
99
  log.debug(`Transformer keys — Source: "${this.promptKey}", Target: "${this.answerKey}", Context: "${this.contextKey}"`);
104
100
  }
105
101
 
106
- // ── Chat Create Options Override ──────────────────────────────────────────
107
-
108
- /** @protected */
109
- _getChatCreateOptions() {
110
- const opts = super._getChatCreateOptions();
111
-
112
- if (this.enableGrounding) {
113
- opts.config.tools = [{ googleSearch: this.groundingConfig }];
114
- log.debug(`Search grounding ENABLED (WARNING: costs $35/1k queries)`);
115
- }
116
-
117
- return opts;
118
- }
119
-
120
102
  // ── Seeding ──────────────────────────────────────────────────────────────
121
103
 
122
104
  /**
package/types.d.ts CHANGED
@@ -32,6 +32,23 @@ export interface ChatConfig {
32
32
  [key: string]: any;
33
33
  }
34
34
 
35
+ export interface GroundingChunk {
36
+ web?: { uri?: string; title?: string; domain?: string };
37
+ }
38
+
39
+ export interface GroundingSupport {
40
+ segment?: any;
41
+ groundingChunkIndices?: number[];
42
+ confidenceScores?: number[];
43
+ }
44
+
45
+ export interface GroundingMetadata {
46
+ groundingChunks?: GroundingChunk[];
47
+ groundingSupports?: GroundingSupport[];
48
+ webSearchQueries?: string[];
49
+ searchEntryPoint?: { renderedContent?: string };
50
+ }
51
+
35
52
  export interface ResponseMetadata {
36
53
  modelVersion: string | null;
37
54
  requestedModel: string;
@@ -39,6 +56,7 @@ export interface ResponseMetadata {
39
56
  responseTokens: number;
40
57
  totalTokens: number;
41
58
  timestamp: number;
59
+ groundingMetadata?: GroundingMetadata | null;
42
60
  }
43
61
 
44
62
  export interface UsageData {
@@ -55,6 +73,7 @@ export interface UsageData {
55
73
  /** Model you requested (e.g., 'gemini-2.5-flash') */
56
74
  requestedModel: string;
57
75
  timestamp: number;
76
+ groundingMetadata?: GroundingMetadata | null;
58
77
  }
59
78
 
60
79
  export interface TransformationExample {
@@ -77,6 +96,38 @@ export interface GoogleAuthOptions {
77
96
  universeDomain?: string;
78
97
  }
79
98
 
99
+ export interface CacheConfig {
100
+ /** Model to cache for (defaults to instance modelName) */
101
+ model?: string;
102
+ /** Time-to-live duration (e.g., '3600s') */
103
+ ttl?: string;
104
+ /** Human-readable display name */
105
+ displayName?: string;
106
+ /** Content to cache */
107
+ contents?: any[];
108
+ /** System instruction to cache (defaults to instance systemPrompt) */
109
+ systemInstruction?: string;
110
+ /** Tools to cache */
111
+ tools?: any[];
112
+ /** Tool configuration to cache */
113
+ toolConfig?: any;
114
+ }
115
+
116
+ export interface CachedContentInfo {
117
+ /** Server-generated resource name */
118
+ name: string;
119
+ /** User-provided display name */
120
+ displayName?: string;
121
+ /** Model this cache is for */
122
+ model: string;
123
+ /** Creation timestamp */
124
+ createTime: string;
125
+ /** Expiration timestamp */
126
+ expireTime: string;
127
+ /** Cache usage metadata */
128
+ usageMetadata?: { totalTokenCount?: number };
129
+ }
130
+
80
131
  export type AsyncValidatorFunction = (payload: Record<string, unknown>) => Promise<unknown>;
81
132
  export type LogLevel = 'trace' | 'debug' | 'info' | 'warn' | 'error' | 'fatal' | 'none';
82
133
 
@@ -110,6 +161,14 @@ export interface BaseGeminiOptions {
110
161
 
111
162
  /** Billing labels for cost segmentation (Vertex AI only) */
112
163
  labels?: Record<string, string>;
164
+
165
+ /** Enable Google Search grounding (WARNING: costs $35/1k queries) */
166
+ enableGrounding?: boolean;
167
+ /** Google Search grounding configuration (searchTypes, excludeDomains, timeRangeFilter) */
168
+ groundingConfig?: Record<string, any>;
169
+
170
+ /** Cached content resource name to use for this session */
171
+ cachedContent?: string;
113
172
  }
114
173
 
115
174
  export interface TransformerOptions extends BaseGeminiOptions {
@@ -158,6 +217,35 @@ export interface MessageOptions extends BaseGeminiOptions {
158
217
  responseMimeType?: string;
159
218
  }
160
219
 
220
+ export type EmbeddingTaskType = 'RETRIEVAL_DOCUMENT' | 'RETRIEVAL_QUERY' | 'SEMANTIC_SIMILARITY' | 'CLUSTERING' | 'CLASSIFICATION' | 'QUESTION_ANSWERING' | 'FACT_VERIFICATION';
221
+
222
+ export interface EmbeddingOptions extends BaseGeminiOptions {
223
+ /** Embedding task type (affects how embeddings are optimized) */
224
+ taskType?: EmbeddingTaskType;
225
+ /** Title for the document being embedded (only with RETRIEVAL_DOCUMENT) */
226
+ title?: string;
227
+ /** Output dimensionality for the embedding vector */
228
+ outputDimensionality?: number;
229
+ /** Whether to auto-truncate long inputs (default: true) */
230
+ autoTruncate?: boolean;
231
+ }
232
+
233
+ export interface EmbedConfig {
234
+ /** Override task type for this call */
235
+ taskType?: EmbeddingTaskType;
236
+ /** Override title for this call */
237
+ title?: string;
238
+ /** Override output dimensionality for this call */
239
+ outputDimensionality?: number;
240
+ }
241
+
242
+ export interface EmbeddingResult {
243
+ /** The embedding vector */
244
+ values?: number[];
245
+ /** Embedding statistics (Vertex AI) */
246
+ statistics?: { tokenCount?: number; truncated?: boolean };
247
+ }
248
+
161
249
  /** Tool declaration in @google/genai FunctionDeclaration format */
162
250
  export interface ToolDeclaration {
163
251
  name: string;
@@ -374,6 +462,9 @@ export declare class BaseGemini {
374
462
  exampleCount: number;
375
463
  labels: Record<string, string>;
376
464
  vertexai: boolean;
465
+ enableGrounding: boolean;
466
+ groundingConfig: Record<string, any>;
467
+ cachedContent: string | null;
377
468
 
378
469
  init(force?: boolean): Promise<void>;
379
470
  seed(examples?: TransformationExample[], opts?: SeedOptions): Promise<any[]>;
@@ -388,6 +479,14 @@ export declare class BaseGemini {
388
479
  estimatedInputCost: number;
389
480
  note: string;
390
481
  }>;
482
+
483
+ // Context Caching
484
+ createCache(config?: CacheConfig): Promise<CachedContentInfo>;
485
+ getCache(cacheName: string): Promise<CachedContentInfo>;
486
+ listCaches(): Promise<CachedContentInfo[]>;
487
+ updateCache(cacheName: string, config?: { ttl?: string; expireTime?: string }): Promise<CachedContentInfo>;
488
+ deleteCache(cacheName: string): Promise<void>;
489
+ useCache(cacheName: string): Promise<void>;
391
490
  }
392
491
 
393
492
  export declare class Transformer extends BaseGemini {
@@ -401,8 +500,6 @@ export declare class Transformer extends BaseGemini {
401
500
  asyncValidator: AsyncValidatorFunction | null;
402
501
  maxRetries: number;
403
502
  retryDelay: number;
404
- enableGrounding: boolean;
405
-
406
503
  seed(examples?: TransformationExample[]): Promise<any[]>;
407
504
  send(payload: Record<string, unknown> | string, opts?: SendOptions, validatorFn?: AsyncValidatorFunction | null): Promise<Record<string, unknown>>;
408
505
  rawSend(payload: Record<string, unknown> | string, messageOptions?: { labels?: Record<string, string> }): Promise<Record<string, unknown>>;
@@ -496,6 +593,23 @@ export declare class CodeAgent extends BaseGemini {
496
593
  stop(): void;
497
594
  }
498
595
 
596
+ export declare class Embedding extends BaseGemini {
597
+ constructor(options?: EmbeddingOptions);
598
+
599
+ taskType: EmbeddingTaskType | null;
600
+ title: string | null;
601
+ outputDimensionality: number | null;
602
+ autoTruncate: boolean;
603
+
604
+ init(force?: boolean): Promise<void>;
605
+ /** Embed a single text string */
606
+ embed(text: string, config?: EmbedConfig): Promise<EmbeddingResult>;
607
+ /** Embed multiple text strings in a single API call */
608
+ embedBatch(texts: string[], config?: EmbedConfig): Promise<EmbeddingResult[]>;
609
+ /** Compute cosine similarity between two embedding vectors (-1 to 1) */
610
+ similarity(a: number[], b: number[]): number;
611
+ }
612
+
499
613
  // ── Module Exports ───────────────────────────────────────────────────────────
500
614
 
501
615
  export declare function extractJSON(text: string): any;
@@ -508,6 +622,7 @@ declare const _default: {
508
622
  ToolAgent: typeof ToolAgent;
509
623
  CodeAgent: typeof CodeAgent;
510
624
  RagAgent: typeof RagAgent;
625
+ Embedding: typeof Embedding;
511
626
  };
512
627
 
513
628
  export default _default;