@ai-sdk/google 2.1.0-beta.1 → 2.1.0-beta.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +77 -0
- package/dist/index.d.mts +20 -12
- package/dist/index.d.ts +20 -12
- package/dist/index.js +59 -27
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +60 -28
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +7 -7
- package/dist/internal/index.d.ts +7 -7
- package/dist/internal/index.js +41 -17
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +41 -17
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +5 -5
package/dist/index.mjs
CHANGED
@@ -2,9 +2,13 @@
|
|
2
2
|
import {
|
3
3
|
generateId as generateId2,
|
4
4
|
loadApiKey,
|
5
|
-
withoutTrailingSlash
|
5
|
+
withoutTrailingSlash,
|
6
|
+
withUserAgentSuffix
|
6
7
|
} from "@ai-sdk/provider-utils";
|
7
8
|
|
9
|
+
// src/version.ts
|
10
|
+
var VERSION = true ? "2.1.0-beta.10" : "0.0.0-test";
|
11
|
+
|
8
12
|
// src/google-generative-ai-embedding-model.ts
|
9
13
|
import {
|
10
14
|
TooManyEmbeddingValuesForCallError
|
@@ -68,7 +72,7 @@ var googleGenerativeAIEmbeddingProviderOptions = z2.object({
|
|
68
72
|
// src/google-generative-ai-embedding-model.ts
|
69
73
|
var GoogleGenerativeAIEmbeddingModel = class {
|
70
74
|
constructor(modelId, config) {
|
71
|
-
this.specificationVersion = "
|
75
|
+
this.specificationVersion = "v3";
|
72
76
|
this.maxEmbeddingsPerCall = 2048;
|
73
77
|
this.supportsParallelCalls = true;
|
74
78
|
this.modelId = modelId;
|
@@ -277,7 +281,7 @@ import {
|
|
277
281
|
} from "@ai-sdk/provider";
|
278
282
|
import { convertToBase64 } from "@ai-sdk/provider-utils";
|
279
283
|
function convertToGoogleGenerativeAIMessages(prompt, options) {
|
280
|
-
var _a;
|
284
|
+
var _a, _b;
|
281
285
|
const systemInstructionParts = [];
|
282
286
|
const contents = [];
|
283
287
|
let systemMessagesAllowed = true;
|
@@ -329,12 +333,12 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
|
329
333
|
contents.push({
|
330
334
|
role: "model",
|
331
335
|
parts: content.map((part) => {
|
332
|
-
var _a2,
|
336
|
+
var _a2, _b2, _c, _d, _e, _f;
|
333
337
|
switch (part.type) {
|
334
338
|
case "text": {
|
335
339
|
return part.text.length === 0 ? void 0 : {
|
336
340
|
text: part.text,
|
337
|
-
thoughtSignature: (
|
341
|
+
thoughtSignature: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.google) == null ? void 0 : _b2.thoughtSignature
|
338
342
|
};
|
339
343
|
}
|
340
344
|
case "reasoning": {
|
@@ -419,7 +423,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
|
419
423
|
name: part.toolName,
|
420
424
|
response: {
|
421
425
|
name: part.toolName,
|
422
|
-
content: output.value
|
426
|
+
content: output.type === "execution-denied" ? (_b = output.reason) != null ? _b : "Tool execution denied." : output.value
|
423
427
|
}
|
424
428
|
}
|
425
429
|
});
|
@@ -513,7 +517,18 @@ var googleGenerativeAIProviderOptions = z4.object({
|
|
513
517
|
*
|
514
518
|
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls
|
515
519
|
*/
|
516
|
-
labels: z4.record(z4.string(), z4.string()).optional()
|
520
|
+
labels: z4.record(z4.string(), z4.string()).optional(),
|
521
|
+
/**
|
522
|
+
* Optional. If specified, the media resolution specified will be used.
|
523
|
+
*
|
524
|
+
* https://ai.google.dev/api/generate-content#MediaResolution
|
525
|
+
*/
|
526
|
+
mediaResolution: z4.enum([
|
527
|
+
"MEDIA_RESOLUTION_UNSPECIFIED",
|
528
|
+
"MEDIA_RESOLUTION_LOW",
|
529
|
+
"MEDIA_RESOLUTION_MEDIUM",
|
530
|
+
"MEDIA_RESOLUTION_HIGH"
|
531
|
+
]).optional()
|
517
532
|
});
|
518
533
|
|
519
534
|
// src/google-prepare-tools.ts
|
@@ -748,7 +763,7 @@ var urlContext = createProviderDefinedToolFactory2({
|
|
748
763
|
// src/google-generative-ai-language-model.ts
|
749
764
|
var GoogleGenerativeAILanguageModel = class {
|
750
765
|
constructor(modelId, config) {
|
751
|
-
this.specificationVersion = "
|
766
|
+
this.specificationVersion = "v3";
|
752
767
|
var _a;
|
753
768
|
this.modelId = modelId;
|
754
769
|
this.config = config;
|
@@ -826,7 +841,10 @@ var GoogleGenerativeAILanguageModel = class {
|
|
826
841
|
},
|
827
842
|
// provider options:
|
828
843
|
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
829
|
-
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
844
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig,
|
845
|
+
...(googleOptions == null ? void 0 : googleOptions.mediaResolution) && {
|
846
|
+
mediaResolution: googleOptions.mediaResolution
|
847
|
+
}
|
830
848
|
},
|
831
849
|
contents,
|
832
850
|
systemInstruction: isGemmaModel ? void 0 : systemInstruction,
|
@@ -840,7 +858,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
840
858
|
};
|
841
859
|
}
|
842
860
|
async doGenerate(options) {
|
843
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
861
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
|
844
862
|
const { args, warnings } = await this.getArgs(options);
|
845
863
|
const body = JSON.stringify(args);
|
846
864
|
const mergedHeaders = combineHeaders2(
|
@@ -936,9 +954,10 @@ var GoogleGenerativeAILanguageModel = class {
|
|
936
954
|
warnings,
|
937
955
|
providerMetadata: {
|
938
956
|
google: {
|
939
|
-
|
940
|
-
|
941
|
-
|
957
|
+
promptFeedback: (_j = response.promptFeedback) != null ? _j : null,
|
958
|
+
groundingMetadata: (_k = candidate.groundingMetadata) != null ? _k : null,
|
959
|
+
urlContextMetadata: (_l = candidate.urlContextMetadata) != null ? _l : null,
|
960
|
+
safetyRatings: (_m = candidate.safetyRatings) != null ? _m : null,
|
942
961
|
usageMetadata: usageMetadata != null ? usageMetadata : null
|
943
962
|
}
|
944
963
|
},
|
@@ -989,7 +1008,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
989
1008
|
controller.enqueue({ type: "stream-start", warnings });
|
990
1009
|
},
|
991
1010
|
transform(chunk, controller) {
|
992
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
1011
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
993
1012
|
if (options.includeRawChunks) {
|
994
1013
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
995
1014
|
}
|
@@ -1163,9 +1182,10 @@ var GoogleGenerativeAILanguageModel = class {
|
|
1163
1182
|
});
|
1164
1183
|
providerMetadata = {
|
1165
1184
|
google: {
|
1166
|
-
|
1167
|
-
|
1168
|
-
|
1185
|
+
promptFeedback: (_i = value.promptFeedback) != null ? _i : null,
|
1186
|
+
groundingMetadata: (_j = candidate.groundingMetadata) != null ? _j : null,
|
1187
|
+
urlContextMetadata: (_k = candidate.urlContextMetadata) != null ? _k : null,
|
1188
|
+
safetyRatings: (_l = candidate.safetyRatings) != null ? _l : null
|
1169
1189
|
}
|
1170
1190
|
};
|
1171
1191
|
if (usageMetadata != null) {
|
@@ -1293,7 +1313,11 @@ var responseSchema = z7.object({
|
|
1293
1313
|
urlContextMetadata: urlContextMetadataSchema.nullish()
|
1294
1314
|
})
|
1295
1315
|
),
|
1296
|
-
usageMetadata: usageSchema.nullish()
|
1316
|
+
usageMetadata: usageSchema.nullish(),
|
1317
|
+
promptFeedback: z7.object({
|
1318
|
+
blockReason: z7.string().nullish(),
|
1319
|
+
safetyRatings: z7.array(safetyRatingSchema).nullish()
|
1320
|
+
}).nullish()
|
1297
1321
|
});
|
1298
1322
|
var chunkSchema = z7.object({
|
1299
1323
|
candidates: z7.array(
|
@@ -1305,7 +1329,11 @@ var chunkSchema = z7.object({
|
|
1305
1329
|
urlContextMetadata: urlContextMetadataSchema.nullish()
|
1306
1330
|
})
|
1307
1331
|
).nullish(),
|
1308
|
-
usageMetadata: usageSchema.nullish()
|
1332
|
+
usageMetadata: usageSchema.nullish(),
|
1333
|
+
promptFeedback: z7.object({
|
1334
|
+
blockReason: z7.string().nullish(),
|
1335
|
+
safetyRatings: z7.array(safetyRatingSchema).nullish()
|
1336
|
+
}).nullish()
|
1309
1337
|
});
|
1310
1338
|
|
1311
1339
|
// src/tool/code-execution.ts
|
@@ -1363,7 +1391,7 @@ var GoogleGenerativeAIImageModel = class {
|
|
1363
1391
|
this.modelId = modelId;
|
1364
1392
|
this.settings = settings;
|
1365
1393
|
this.config = config;
|
1366
|
-
this.specificationVersion = "
|
1394
|
+
this.specificationVersion = "v3";
|
1367
1395
|
}
|
1368
1396
|
get maxImagesPerCall() {
|
1369
1397
|
var _a;
|
@@ -1461,14 +1489,17 @@ var googleImageProviderOptionsSchema = z9.object({
|
|
1461
1489
|
function createGoogleGenerativeAI(options = {}) {
|
1462
1490
|
var _a;
|
1463
1491
|
const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
|
1464
|
-
const getHeaders = () => (
|
1465
|
-
|
1466
|
-
|
1467
|
-
|
1468
|
-
|
1469
|
-
|
1470
|
-
|
1471
|
-
|
1492
|
+
const getHeaders = () => withUserAgentSuffix(
|
1493
|
+
{
|
1494
|
+
"x-goog-api-key": loadApiKey({
|
1495
|
+
apiKey: options.apiKey,
|
1496
|
+
environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
|
1497
|
+
description: "Google Generative AI"
|
1498
|
+
}),
|
1499
|
+
...options.headers
|
1500
|
+
},
|
1501
|
+
`ai-sdk/google/${VERSION}`
|
1502
|
+
);
|
1472
1503
|
const createChatModel = (modelId) => {
|
1473
1504
|
var _a2;
|
1474
1505
|
return new GoogleGenerativeAILanguageModel(modelId, {
|
@@ -1524,6 +1555,7 @@ function createGoogleGenerativeAI(options = {}) {
|
|
1524
1555
|
}
|
1525
1556
|
var google = createGoogleGenerativeAI();
|
1526
1557
|
export {
|
1558
|
+
VERSION,
|
1527
1559
|
createGoogleGenerativeAI,
|
1528
1560
|
google
|
1529
1561
|
};
|