@ai-sdk/google 2.1.0-beta.1 → 2.1.0-beta.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2,9 +2,13 @@
2
2
  import {
3
3
  generateId as generateId2,
4
4
  loadApiKey,
5
- withoutTrailingSlash
5
+ withoutTrailingSlash,
6
+ withUserAgentSuffix
6
7
  } from "@ai-sdk/provider-utils";
7
8
 
9
+ // src/version.ts
10
+ var VERSION = true ? "2.1.0-beta.11" : "0.0.0-test";
11
+
8
12
  // src/google-generative-ai-embedding-model.ts
9
13
  import {
10
14
  TooManyEmbeddingValuesForCallError
@@ -68,7 +72,7 @@ var googleGenerativeAIEmbeddingProviderOptions = z2.object({
68
72
  // src/google-generative-ai-embedding-model.ts
69
73
  var GoogleGenerativeAIEmbeddingModel = class {
70
74
  constructor(modelId, config) {
71
- this.specificationVersion = "v2";
75
+ this.specificationVersion = "v3";
72
76
  this.maxEmbeddingsPerCall = 2048;
73
77
  this.supportsParallelCalls = true;
74
78
  this.modelId = modelId;
@@ -277,7 +281,7 @@ import {
277
281
  } from "@ai-sdk/provider";
278
282
  import { convertToBase64 } from "@ai-sdk/provider-utils";
279
283
  function convertToGoogleGenerativeAIMessages(prompt, options) {
280
- var _a;
284
+ var _a, _b;
281
285
  const systemInstructionParts = [];
282
286
  const contents = [];
283
287
  let systemMessagesAllowed = true;
@@ -329,12 +333,12 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
329
333
  contents.push({
330
334
  role: "model",
331
335
  parts: content.map((part) => {
332
- var _a2, _b, _c, _d, _e, _f;
336
+ var _a2, _b2, _c, _d, _e, _f;
333
337
  switch (part.type) {
334
338
  case "text": {
335
339
  return part.text.length === 0 ? void 0 : {
336
340
  text: part.text,
337
- thoughtSignature: (_b = (_a2 = part.providerOptions) == null ? void 0 : _a2.google) == null ? void 0 : _b.thoughtSignature
341
+ thoughtSignature: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.google) == null ? void 0 : _b2.thoughtSignature
338
342
  };
339
343
  }
340
344
  case "reasoning": {
@@ -419,7 +423,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
419
423
  name: part.toolName,
420
424
  response: {
421
425
  name: part.toolName,
422
- content: output.value
426
+ content: output.type === "execution-denied" ? (_b = output.reason) != null ? _b : "Tool execution denied." : output.value
423
427
  }
424
428
  }
425
429
  });
@@ -513,7 +517,18 @@ var googleGenerativeAIProviderOptions = z4.object({
513
517
  *
514
518
  * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls
515
519
  */
516
- labels: z4.record(z4.string(), z4.string()).optional()
520
+ labels: z4.record(z4.string(), z4.string()).optional(),
521
+ /**
522
+ * Optional. If specified, the media resolution specified will be used.
523
+ *
524
+ * https://ai.google.dev/api/generate-content#MediaResolution
525
+ */
526
+ mediaResolution: z4.enum([
527
+ "MEDIA_RESOLUTION_UNSPECIFIED",
528
+ "MEDIA_RESOLUTION_LOW",
529
+ "MEDIA_RESOLUTION_MEDIUM",
530
+ "MEDIA_RESOLUTION_HIGH"
531
+ ]).optional()
517
532
  });
518
533
 
519
534
  // src/google-prepare-tools.ts
@@ -528,7 +543,12 @@ function prepareTools({
528
543
  var _a;
529
544
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
530
545
  const toolWarnings = [];
531
- const isGemini2 = modelId.includes("gemini-2");
546
+ const isLatest = [
547
+ "gemini-flash-latest",
548
+ "gemini-flash-lite-latest",
549
+ "gemini-pro-latest"
550
+ ].some((id) => id === modelId);
551
+ const isGemini2 = modelId.includes("gemini-2") || isLatest;
532
552
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
533
553
  if (tools == null) {
534
554
  return { tools: void 0, toolConfig: void 0, toolWarnings };
@@ -748,7 +768,7 @@ var urlContext = createProviderDefinedToolFactory2({
748
768
  // src/google-generative-ai-language-model.ts
749
769
  var GoogleGenerativeAILanguageModel = class {
750
770
  constructor(modelId, config) {
751
- this.specificationVersion = "v2";
771
+ this.specificationVersion = "v3";
752
772
  var _a;
753
773
  this.modelId = modelId;
754
774
  this.config = config;
@@ -826,7 +846,10 @@ var GoogleGenerativeAILanguageModel = class {
826
846
  },
827
847
  // provider options:
828
848
  responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
829
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
849
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig,
850
+ ...(googleOptions == null ? void 0 : googleOptions.mediaResolution) && {
851
+ mediaResolution: googleOptions.mediaResolution
852
+ }
830
853
  },
831
854
  contents,
832
855
  systemInstruction: isGemmaModel ? void 0 : systemInstruction,
@@ -840,7 +863,7 @@ var GoogleGenerativeAILanguageModel = class {
840
863
  };
841
864
  }
842
865
  async doGenerate(options) {
843
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
866
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
844
867
  const { args, warnings } = await this.getArgs(options);
845
868
  const body = JSON.stringify(args);
846
869
  const mergedHeaders = combineHeaders2(
@@ -936,9 +959,10 @@ var GoogleGenerativeAILanguageModel = class {
936
959
  warnings,
937
960
  providerMetadata: {
938
961
  google: {
939
- groundingMetadata: (_j = candidate.groundingMetadata) != null ? _j : null,
940
- urlContextMetadata: (_k = candidate.urlContextMetadata) != null ? _k : null,
941
- safetyRatings: (_l = candidate.safetyRatings) != null ? _l : null,
962
+ promptFeedback: (_j = response.promptFeedback) != null ? _j : null,
963
+ groundingMetadata: (_k = candidate.groundingMetadata) != null ? _k : null,
964
+ urlContextMetadata: (_l = candidate.urlContextMetadata) != null ? _l : null,
965
+ safetyRatings: (_m = candidate.safetyRatings) != null ? _m : null,
942
966
  usageMetadata: usageMetadata != null ? usageMetadata : null
943
967
  }
944
968
  },
@@ -989,7 +1013,7 @@ var GoogleGenerativeAILanguageModel = class {
989
1013
  controller.enqueue({ type: "stream-start", warnings });
990
1014
  },
991
1015
  transform(chunk, controller) {
992
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1016
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
993
1017
  if (options.includeRawChunks) {
994
1018
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
995
1019
  }
@@ -1163,9 +1187,10 @@ var GoogleGenerativeAILanguageModel = class {
1163
1187
  });
1164
1188
  providerMetadata = {
1165
1189
  google: {
1166
- groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
1167
- urlContextMetadata: (_j = candidate.urlContextMetadata) != null ? _j : null,
1168
- safetyRatings: (_k = candidate.safetyRatings) != null ? _k : null
1190
+ promptFeedback: (_i = value.promptFeedback) != null ? _i : null,
1191
+ groundingMetadata: (_j = candidate.groundingMetadata) != null ? _j : null,
1192
+ urlContextMetadata: (_k = candidate.urlContextMetadata) != null ? _k : null,
1193
+ safetyRatings: (_l = candidate.safetyRatings) != null ? _l : null
1169
1194
  }
1170
1195
  };
1171
1196
  if (usageMetadata != null) {
@@ -1293,7 +1318,11 @@ var responseSchema = z7.object({
1293
1318
  urlContextMetadata: urlContextMetadataSchema.nullish()
1294
1319
  })
1295
1320
  ),
1296
- usageMetadata: usageSchema.nullish()
1321
+ usageMetadata: usageSchema.nullish(),
1322
+ promptFeedback: z7.object({
1323
+ blockReason: z7.string().nullish(),
1324
+ safetyRatings: z7.array(safetyRatingSchema).nullish()
1325
+ }).nullish()
1297
1326
  });
1298
1327
  var chunkSchema = z7.object({
1299
1328
  candidates: z7.array(
@@ -1305,7 +1334,11 @@ var chunkSchema = z7.object({
1305
1334
  urlContextMetadata: urlContextMetadataSchema.nullish()
1306
1335
  })
1307
1336
  ).nullish(),
1308
- usageMetadata: usageSchema.nullish()
1337
+ usageMetadata: usageSchema.nullish(),
1338
+ promptFeedback: z7.object({
1339
+ blockReason: z7.string().nullish(),
1340
+ safetyRatings: z7.array(safetyRatingSchema).nullish()
1341
+ }).nullish()
1309
1342
  });
1310
1343
 
1311
1344
  // src/tool/code-execution.ts
@@ -1363,7 +1396,7 @@ var GoogleGenerativeAIImageModel = class {
1363
1396
  this.modelId = modelId;
1364
1397
  this.settings = settings;
1365
1398
  this.config = config;
1366
- this.specificationVersion = "v2";
1399
+ this.specificationVersion = "v3";
1367
1400
  }
1368
1401
  get maxImagesPerCall() {
1369
1402
  var _a;
@@ -1461,14 +1494,17 @@ var googleImageProviderOptionsSchema = z9.object({
1461
1494
  function createGoogleGenerativeAI(options = {}) {
1462
1495
  var _a;
1463
1496
  const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
1464
- const getHeaders = () => ({
1465
- "x-goog-api-key": loadApiKey({
1466
- apiKey: options.apiKey,
1467
- environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
1468
- description: "Google Generative AI"
1469
- }),
1470
- ...options.headers
1471
- });
1497
+ const getHeaders = () => withUserAgentSuffix(
1498
+ {
1499
+ "x-goog-api-key": loadApiKey({
1500
+ apiKey: options.apiKey,
1501
+ environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
1502
+ description: "Google Generative AI"
1503
+ }),
1504
+ ...options.headers
1505
+ },
1506
+ `ai-sdk/google/${VERSION}`
1507
+ );
1472
1508
  const createChatModel = (modelId) => {
1473
1509
  var _a2;
1474
1510
  return new GoogleGenerativeAILanguageModel(modelId, {
@@ -1524,6 +1560,7 @@ function createGoogleGenerativeAI(options = {}) {
1524
1560
  }
1525
1561
  var google = createGoogleGenerativeAI();
1526
1562
  export {
1563
+ VERSION,
1527
1564
  createGoogleGenerativeAI,
1528
1565
  google
1529
1566
  };