@ai-sdk/google 3.0.12 → 3.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # @ai-sdk/google
2
2
 
3
+ ## 3.0.14
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [462ad00]
8
+ - @ai-sdk/provider-utils@4.0.10
9
+
10
+ ## 3.0.13
11
+
12
+ ### Patch Changes
13
+
14
+ - 4de5a1d: chore: excluded tests from src folder in npm package
15
+ - Updated dependencies [4de5a1d]
16
+ - @ai-sdk/provider@3.0.5
17
+ - @ai-sdk/provider-utils@4.0.9
18
+
3
19
  ## 3.0.12
4
20
 
5
21
  ### Patch Changes
package/dist/index.js CHANGED
@@ -30,7 +30,7 @@ module.exports = __toCommonJS(src_exports);
30
30
  var import_provider_utils15 = require("@ai-sdk/provider-utils");
31
31
 
32
32
  // src/version.ts
33
- var VERSION = true ? "3.0.12" : "0.0.0-test";
33
+ var VERSION = true ? "3.0.14" : "0.0.0-test";
34
34
 
35
35
  // src/google-generative-ai-embedding-model.ts
36
36
  var import_provider = require("@ai-sdk/provider");
package/dist/index.mjs CHANGED
@@ -7,7 +7,7 @@ import {
7
7
  } from "@ai-sdk/provider-utils";
8
8
 
9
9
  // src/version.ts
10
- var VERSION = true ? "3.0.12" : "0.0.0-test";
10
+ var VERSION = true ? "3.0.14" : "0.0.0-test";
11
11
 
12
12
  // src/google-generative-ai-embedding-model.ts
13
13
  import {
@@ -362,7 +362,7 @@ const { text: meatLasagna, providerMetadata } = await generateText({
362
362
  });
363
363
 
364
364
  // Check cached token count in usage metadata
365
- console.log('Cached tokens:', providerMetadata.google?.usageMetadata);
365
+ console.log('Cached tokens:', providerMetadata.google);
366
366
  // e.g.
367
367
  // {
368
368
  // groundingMetadata: null,
@@ -389,42 +389,45 @@ For guaranteed cost savings, you can still use explicit caching with Gemini 2.5
389
389
 
390
390
  ```ts
391
391
  import { google } from '@ai-sdk/google';
392
- import { GoogleAICacheManager } from '@google/generative-ai/server';
392
+ import { GoogleGenAI } from '@google/genai';
393
393
  import { generateText } from 'ai';
394
394
 
395
- const cacheManager = new GoogleAICacheManager(
396
- process.env.GOOGLE_GENERATIVE_AI_API_KEY,
397
- );
395
+ const ai = new GoogleGenAI({
396
+ apiKey: process.env.GOOGLE_GENERATIVE_AI_API_KEY,
397
+ });
398
398
 
399
399
  const model = 'gemini-2.5-pro';
400
400
 
401
- const { name: cachedContent } = await cacheManager.create({
401
+ // Create a cache with the content you want to reuse
402
+ const cache = await ai.caches.create({
402
403
  model,
403
- contents: [
404
- {
405
- role: 'user',
406
- parts: [{ text: '1000 Lasagna Recipes...' }],
407
- },
408
- ],
409
- ttlSeconds: 60 * 5,
404
+ config: {
405
+ contents: [
406
+ {
407
+ role: 'user',
408
+ parts: [{ text: '1000 Lasagna Recipes...' }],
409
+ },
410
+ ],
411
+ ttl: '300s', // Cache expires after 5 minutes
412
+ },
410
413
  });
411
414
 
412
- const { text: veggieLasangaRecipe } = await generateText({
415
+ const { text: veggieLasagnaRecipe } = await generateText({
413
416
  model: google(model),
414
417
  prompt: 'Write a vegetarian lasagna recipe for 4 people.',
415
418
  providerOptions: {
416
419
  google: {
417
- cachedContent,
420
+ cachedContent: cache.name,
418
421
  },
419
422
  },
420
423
  });
421
424
 
422
- const { text: meatLasangaRecipe } = await generateText({
425
+ const { text: meatLasagnaRecipe } = await generateText({
423
426
  model: google(model),
424
427
  prompt: 'Write a meat lasagna recipe for 12 people.',
425
428
  providerOptions: {
426
429
  google: {
427
- cachedContent,
430
+ cachedContent: cache.name,
428
431
  },
429
432
  },
430
433
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/google",
3
- "version": "3.0.12",
3
+ "version": "3.0.14",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -10,6 +10,10 @@
10
10
  "dist/**/*",
11
11
  "docs/**/*",
12
12
  "src",
13
+ "!src/**/*.test.ts",
14
+ "!src/**/*.test-d.ts",
15
+ "!src/**/__snapshots__",
16
+ "!src/**/__fixtures__",
13
17
  "CHANGELOG.md",
14
18
  "README.md",
15
19
  "internal.d.ts"
@@ -32,16 +36,16 @@
32
36
  }
33
37
  },
34
38
  "dependencies": {
35
- "@ai-sdk/provider": "3.0.4",
36
- "@ai-sdk/provider-utils": "4.0.8"
39
+ "@ai-sdk/provider": "3.0.5",
40
+ "@ai-sdk/provider-utils": "4.0.10"
37
41
  },
38
42
  "devDependencies": {
39
43
  "@types/node": "20.17.24",
40
44
  "tsup": "^8",
41
45
  "typescript": "5.8.3",
42
46
  "zod": "3.25.76",
43
- "@vercel/ai-tsconfig": "0.0.0",
44
- "@ai-sdk/test-server": "1.0.2"
47
+ "@ai-sdk/test-server": "1.0.3",
48
+ "@vercel/ai-tsconfig": "0.0.0"
45
49
  },
46
50
  "peerDependencies": {
47
51
  "zod": "^3.25.76 || ^4.1.8"
@@ -1,33 +0,0 @@
1
- // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
-
3
- exports[`GoogleGenerativeAIEmbeddingModel > should expose the raw response 1`] = `
4
- {
5
- "body": {
6
- "embeddings": [
7
- {
8
- "values": [
9
- 0.1,
10
- 0.2,
11
- 0.3,
12
- 0.4,
13
- 0.5,
14
- ],
15
- },
16
- {
17
- "values": [
18
- 0.6,
19
- 0.7,
20
- 0.8,
21
- 0.9,
22
- 1,
23
- ],
24
- },
25
- ],
26
- },
27
- "headers": {
28
- "content-length": "80",
29
- "content-type": "application/json",
30
- "test-header": "test-value",
31
- },
32
- }
33
- `;