@ai-sdk/google 2.1.0-beta.0 → 2.1.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2,9 +2,13 @@
2
2
  import {
3
3
  generateId as generateId2,
4
4
  loadApiKey,
5
- withoutTrailingSlash
5
+ withoutTrailingSlash,
6
+ withUserAgentSuffix
6
7
  } from "@ai-sdk/provider-utils";
7
8
 
9
+ // src/version.ts
10
+ var VERSION = true ? "2.1.0-beta.10" : "0.0.0-test";
11
+
8
12
  // src/google-generative-ai-embedding-model.ts
9
13
  import {
10
14
  TooManyEmbeddingValuesForCallError
@@ -68,7 +72,7 @@ var googleGenerativeAIEmbeddingProviderOptions = z2.object({
68
72
  // src/google-generative-ai-embedding-model.ts
69
73
  var GoogleGenerativeAIEmbeddingModel = class {
70
74
  constructor(modelId, config) {
71
- this.specificationVersion = "v2";
75
+ this.specificationVersion = "v3";
72
76
  this.maxEmbeddingsPerCall = 2048;
73
77
  this.supportsParallelCalls = true;
74
78
  this.modelId = modelId;
@@ -200,12 +204,9 @@ function convertJSONSchemaToOpenAPISchema(jsonSchema) {
200
204
  enum: enumValues
201
205
  } = jsonSchema;
202
206
  const result = {};
203
- if (description)
204
- result.description = description;
205
- if (required)
206
- result.required = required;
207
- if (format)
208
- result.format = format;
207
+ if (description) result.description = description;
208
+ if (required) result.required = required;
209
+ if (format) result.format = format;
209
210
  if (constValue !== void 0) {
210
211
  result.enum = [constValue];
211
212
  }
@@ -280,7 +281,7 @@ import {
280
281
  } from "@ai-sdk/provider";
281
282
  import { convertToBase64 } from "@ai-sdk/provider-utils";
282
283
  function convertToGoogleGenerativeAIMessages(prompt, options) {
283
- var _a;
284
+ var _a, _b;
284
285
  const systemInstructionParts = [];
285
286
  const contents = [];
286
287
  let systemMessagesAllowed = true;
@@ -332,12 +333,12 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
332
333
  contents.push({
333
334
  role: "model",
334
335
  parts: content.map((part) => {
335
- var _a2, _b, _c, _d, _e, _f;
336
+ var _a2, _b2, _c, _d, _e, _f;
336
337
  switch (part.type) {
337
338
  case "text": {
338
339
  return part.text.length === 0 ? void 0 : {
339
340
  text: part.text,
340
- thoughtSignature: (_b = (_a2 = part.providerOptions) == null ? void 0 : _a2.google) == null ? void 0 : _b.thoughtSignature
341
+ thoughtSignature: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.google) == null ? void 0 : _b2.thoughtSignature
341
342
  };
342
343
  }
343
344
  case "reasoning": {
@@ -422,7 +423,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
422
423
  name: part.toolName,
423
424
  response: {
424
425
  name: part.toolName,
425
- content: output.value
426
+ content: output.type === "execution-denied" ? (_b = output.reason) != null ? _b : "Tool execution denied." : output.value
426
427
  }
427
428
  }
428
429
  });
@@ -516,7 +517,18 @@ var googleGenerativeAIProviderOptions = z4.object({
516
517
  *
517
518
  * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls
518
519
  */
519
- labels: z4.record(z4.string(), z4.string()).optional()
520
+ labels: z4.record(z4.string(), z4.string()).optional(),
521
+ /**
522
+ * Optional. If specified, the media resolution specified will be used.
523
+ *
524
+ * https://ai.google.dev/api/generate-content#MediaResolution
525
+ */
526
+ mediaResolution: z4.enum([
527
+ "MEDIA_RESOLUTION_UNSPECIFIED",
528
+ "MEDIA_RESOLUTION_LOW",
529
+ "MEDIA_RESOLUTION_MEDIUM",
530
+ "MEDIA_RESOLUTION_HIGH"
531
+ ]).optional()
520
532
  });
521
533
 
522
534
  // src/google-prepare-tools.ts
@@ -751,7 +763,7 @@ var urlContext = createProviderDefinedToolFactory2({
751
763
  // src/google-generative-ai-language-model.ts
752
764
  var GoogleGenerativeAILanguageModel = class {
753
765
  constructor(modelId, config) {
754
- this.specificationVersion = "v2";
766
+ this.specificationVersion = "v3";
755
767
  var _a;
756
768
  this.modelId = modelId;
757
769
  this.config = config;
@@ -829,7 +841,10 @@ var GoogleGenerativeAILanguageModel = class {
829
841
  },
830
842
  // provider options:
831
843
  responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
832
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
844
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig,
845
+ ...(googleOptions == null ? void 0 : googleOptions.mediaResolution) && {
846
+ mediaResolution: googleOptions.mediaResolution
847
+ }
833
848
  },
834
849
  contents,
835
850
  systemInstruction: isGemmaModel ? void 0 : systemInstruction,
@@ -843,7 +858,7 @@ var GoogleGenerativeAILanguageModel = class {
843
858
  };
844
859
  }
845
860
  async doGenerate(options) {
846
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
861
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
847
862
  const { args, warnings } = await this.getArgs(options);
848
863
  const body = JSON.stringify(args);
849
864
  const mergedHeaders = combineHeaders2(
@@ -939,9 +954,10 @@ var GoogleGenerativeAILanguageModel = class {
939
954
  warnings,
940
955
  providerMetadata: {
941
956
  google: {
942
- groundingMetadata: (_j = candidate.groundingMetadata) != null ? _j : null,
943
- urlContextMetadata: (_k = candidate.urlContextMetadata) != null ? _k : null,
944
- safetyRatings: (_l = candidate.safetyRatings) != null ? _l : null,
957
+ promptFeedback: (_j = response.promptFeedback) != null ? _j : null,
958
+ groundingMetadata: (_k = candidate.groundingMetadata) != null ? _k : null,
959
+ urlContextMetadata: (_l = candidate.urlContextMetadata) != null ? _l : null,
960
+ safetyRatings: (_m = candidate.safetyRatings) != null ? _m : null,
945
961
  usageMetadata: usageMetadata != null ? usageMetadata : null
946
962
  }
947
963
  },
@@ -992,7 +1008,7 @@ var GoogleGenerativeAILanguageModel = class {
992
1008
  controller.enqueue({ type: "stream-start", warnings });
993
1009
  },
994
1010
  transform(chunk, controller) {
995
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1011
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
996
1012
  if (options.includeRawChunks) {
997
1013
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
998
1014
  }
@@ -1166,9 +1182,10 @@ var GoogleGenerativeAILanguageModel = class {
1166
1182
  });
1167
1183
  providerMetadata = {
1168
1184
  google: {
1169
- groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
1170
- urlContextMetadata: (_j = candidate.urlContextMetadata) != null ? _j : null,
1171
- safetyRatings: (_k = candidate.safetyRatings) != null ? _k : null
1185
+ promptFeedback: (_i = value.promptFeedback) != null ? _i : null,
1186
+ groundingMetadata: (_j = candidate.groundingMetadata) != null ? _j : null,
1187
+ urlContextMetadata: (_k = candidate.urlContextMetadata) != null ? _k : null,
1188
+ safetyRatings: (_l = candidate.safetyRatings) != null ? _l : null
1172
1189
  }
1173
1190
  };
1174
1191
  if (usageMetadata != null) {
@@ -1296,7 +1313,11 @@ var responseSchema = z7.object({
1296
1313
  urlContextMetadata: urlContextMetadataSchema.nullish()
1297
1314
  })
1298
1315
  ),
1299
- usageMetadata: usageSchema.nullish()
1316
+ usageMetadata: usageSchema.nullish(),
1317
+ promptFeedback: z7.object({
1318
+ blockReason: z7.string().nullish(),
1319
+ safetyRatings: z7.array(safetyRatingSchema).nullish()
1320
+ }).nullish()
1300
1321
  });
1301
1322
  var chunkSchema = z7.object({
1302
1323
  candidates: z7.array(
@@ -1308,7 +1329,11 @@ var chunkSchema = z7.object({
1308
1329
  urlContextMetadata: urlContextMetadataSchema.nullish()
1309
1330
  })
1310
1331
  ).nullish(),
1311
- usageMetadata: usageSchema.nullish()
1332
+ usageMetadata: usageSchema.nullish(),
1333
+ promptFeedback: z7.object({
1334
+ blockReason: z7.string().nullish(),
1335
+ safetyRatings: z7.array(safetyRatingSchema).nullish()
1336
+ }).nullish()
1312
1337
  });
1313
1338
 
1314
1339
  // src/tool/code-execution.ts
@@ -1366,7 +1391,7 @@ var GoogleGenerativeAIImageModel = class {
1366
1391
  this.modelId = modelId;
1367
1392
  this.settings = settings;
1368
1393
  this.config = config;
1369
- this.specificationVersion = "v2";
1394
+ this.specificationVersion = "v3";
1370
1395
  }
1371
1396
  get maxImagesPerCall() {
1372
1397
  var _a;
@@ -1464,14 +1489,17 @@ var googleImageProviderOptionsSchema = z9.object({
1464
1489
  function createGoogleGenerativeAI(options = {}) {
1465
1490
  var _a;
1466
1491
  const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
1467
- const getHeaders = () => ({
1468
- "x-goog-api-key": loadApiKey({
1469
- apiKey: options.apiKey,
1470
- environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
1471
- description: "Google Generative AI"
1472
- }),
1473
- ...options.headers
1474
- });
1492
+ const getHeaders = () => withUserAgentSuffix(
1493
+ {
1494
+ "x-goog-api-key": loadApiKey({
1495
+ apiKey: options.apiKey,
1496
+ environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
1497
+ description: "Google Generative AI"
1498
+ }),
1499
+ ...options.headers
1500
+ },
1501
+ `ai-sdk/google/${VERSION}`
1502
+ );
1475
1503
  const createChatModel = (modelId) => {
1476
1504
  var _a2;
1477
1505
  return new GoogleGenerativeAILanguageModel(modelId, {
@@ -1527,6 +1555,7 @@ function createGoogleGenerativeAI(options = {}) {
1527
1555
  }
1528
1556
  var google = createGoogleGenerativeAI();
1529
1557
  export {
1558
+ VERSION,
1530
1559
  createGoogleGenerativeAI,
1531
1560
  google
1532
1561
  };