@ai-sdk/google 2.0.0-canary.12 → 2.0.0-canary.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +11 -0
- package/dist/index.d.mts +92 -56
- package/dist/index.d.ts +92 -56
- package/dist/index.js +167 -92
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +167 -92
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -81
- package/dist/internal/index.d.ts +2 -81
- package/dist/internal/index.js +161 -82
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +161 -82
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
@@ -72,18 +72,14 @@ var googleGenerativeAIEmbeddingProviderOptions = z2.object({
|
|
72
72
|
var GoogleGenerativeAIEmbeddingModel = class {
|
73
73
|
constructor(modelId, config) {
|
74
74
|
this.specificationVersion = "v2";
|
75
|
+
this.maxEmbeddingsPerCall = 2048;
|
76
|
+
this.supportsParallelCalls = true;
|
75
77
|
this.modelId = modelId;
|
76
78
|
this.config = config;
|
77
79
|
}
|
78
80
|
get provider() {
|
79
81
|
return this.config.provider;
|
80
82
|
}
|
81
|
-
get maxEmbeddingsPerCall() {
|
82
|
-
return 2048;
|
83
|
-
}
|
84
|
-
get supportsParallelCalls() {
|
85
|
-
return true;
|
86
|
-
}
|
87
83
|
async doEmbed({
|
88
84
|
values,
|
89
85
|
headers,
|
@@ -149,7 +145,7 @@ import {
|
|
149
145
|
postJsonToApi as postJsonToApi2,
|
150
146
|
resolve as resolve2
|
151
147
|
} from "@ai-sdk/provider-utils";
|
152
|
-
import { z as
|
148
|
+
import { z as z5 } from "zod";
|
153
149
|
|
154
150
|
// src/convert-json-schema-to-openapi-schema.ts
|
155
151
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
@@ -370,6 +366,92 @@ function getModelPath(modelId) {
|
|
370
366
|
return modelId.includes("/") ? modelId : `models/${modelId}`;
|
371
367
|
}
|
372
368
|
|
369
|
+
// src/google-generative-ai-options.ts
|
370
|
+
import { z as z4 } from "zod";
|
371
|
+
var dynamicRetrievalConfig = z4.object({
|
372
|
+
/**
|
373
|
+
* The mode of the predictor to be used in dynamic retrieval.
|
374
|
+
*/
|
375
|
+
mode: z4.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
|
376
|
+
/**
|
377
|
+
* The threshold to be used in dynamic retrieval. If not set, a system default
|
378
|
+
* value is used.
|
379
|
+
*/
|
380
|
+
dynamicThreshold: z4.number().optional()
|
381
|
+
});
|
382
|
+
var googleGenerativeAIProviderOptions = z4.object({
|
383
|
+
responseModalities: z4.array(z4.enum(["TEXT", "IMAGE"])).optional(),
|
384
|
+
thinkingConfig: z4.object({
|
385
|
+
thinkingBudget: z4.number().optional()
|
386
|
+
}).optional(),
|
387
|
+
/**
|
388
|
+
Optional.
|
389
|
+
The name of the cached content used as context to serve the prediction.
|
390
|
+
Format: cachedContents/{cachedContent}
|
391
|
+
*/
|
392
|
+
cachedContent: z4.string().optional(),
|
393
|
+
/**
|
394
|
+
* Optional. Enable structured output. Default is true.
|
395
|
+
*
|
396
|
+
* This is useful when the JSON Schema contains elements that are
|
397
|
+
* not supported by the OpenAPI schema version that
|
398
|
+
* Google Generative AI uses. You can use this to disable
|
399
|
+
* structured outputs if you need to.
|
400
|
+
*/
|
401
|
+
structuredOutputs: z4.boolean().optional(),
|
402
|
+
/**
|
403
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
404
|
+
*/
|
405
|
+
safetySettings: z4.array(
|
406
|
+
z4.object({
|
407
|
+
category: z4.enum([
|
408
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
409
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
410
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
411
|
+
"HARM_CATEGORY_HARASSMENT",
|
412
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
413
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
414
|
+
]),
|
415
|
+
threshold: z4.enum([
|
416
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
417
|
+
"BLOCK_LOW_AND_ABOVE",
|
418
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
419
|
+
"BLOCK_ONLY_HIGH",
|
420
|
+
"BLOCK_NONE",
|
421
|
+
"OFF"
|
422
|
+
])
|
423
|
+
})
|
424
|
+
).optional(),
|
425
|
+
threshold: z4.enum([
|
426
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
427
|
+
"BLOCK_LOW_AND_ABOVE",
|
428
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
429
|
+
"BLOCK_ONLY_HIGH",
|
430
|
+
"BLOCK_NONE",
|
431
|
+
"OFF"
|
432
|
+
]).optional(),
|
433
|
+
/**
|
434
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
435
|
+
*
|
436
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
437
|
+
*/
|
438
|
+
audioTimestamp: z4.boolean().optional(),
|
439
|
+
/**
|
440
|
+
Optional. When enabled, the model will use Google search to ground the response.
|
441
|
+
|
442
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
|
443
|
+
*/
|
444
|
+
useSearchGrounding: z4.boolean().optional(),
|
445
|
+
/**
|
446
|
+
Optional. Specifies the dynamic retrieval configuration.
|
447
|
+
|
448
|
+
@note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
|
449
|
+
|
450
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
|
451
|
+
*/
|
452
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
|
453
|
+
});
|
454
|
+
|
373
455
|
// src/google-prepare-tools.ts
|
374
456
|
import {
|
375
457
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
@@ -378,7 +460,7 @@ function prepareTools({
|
|
378
460
|
tools,
|
379
461
|
toolChoice,
|
380
462
|
useSearchGrounding,
|
381
|
-
dynamicRetrievalConfig,
|
463
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig2,
|
382
464
|
modelId
|
383
465
|
}) {
|
384
466
|
var _a;
|
@@ -389,7 +471,7 @@ function prepareTools({
|
|
389
471
|
if (useSearchGrounding) {
|
390
472
|
return {
|
391
473
|
tools: isGemini2 ? { googleSearch: {} } : {
|
392
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !
|
474
|
+
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
|
393
475
|
},
|
394
476
|
toolConfig: void 0,
|
395
477
|
toolWarnings
|
@@ -486,10 +568,9 @@ function mapGoogleGenerativeAIFinishReason({
|
|
486
568
|
|
487
569
|
// src/google-generative-ai-language-model.ts
|
488
570
|
var GoogleGenerativeAILanguageModel = class {
|
489
|
-
constructor(modelId,
|
571
|
+
constructor(modelId, config) {
|
490
572
|
this.specificationVersion = "v2";
|
491
573
|
this.modelId = modelId;
|
492
|
-
this.settings = settings;
|
493
574
|
this.config = config;
|
494
575
|
}
|
495
576
|
get provider() {
|
@@ -519,7 +600,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
519
600
|
const googleOptions = await parseProviderOptions2({
|
520
601
|
provider: "google",
|
521
602
|
providerOptions,
|
522
|
-
schema:
|
603
|
+
schema: googleGenerativeAIProviderOptions
|
523
604
|
});
|
524
605
|
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
|
525
606
|
const {
|
@@ -529,8 +610,8 @@ var GoogleGenerativeAILanguageModel = class {
|
|
529
610
|
} = prepareTools({
|
530
611
|
tools,
|
531
612
|
toolChoice,
|
532
|
-
useSearchGrounding: (_a =
|
533
|
-
dynamicRetrievalConfig:
|
613
|
+
useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
|
614
|
+
dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
|
534
615
|
modelId: this.modelId
|
535
616
|
});
|
536
617
|
return {
|
@@ -550,9 +631,9 @@ var GoogleGenerativeAILanguageModel = class {
|
|
550
631
|
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
551
632
|
// so this is needed as an escape hatch:
|
552
633
|
// TODO convert into provider option
|
553
|
-
((_b =
|
554
|
-
...
|
555
|
-
audioTimestamp:
|
634
|
+
((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
635
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
636
|
+
audioTimestamp: googleOptions.audioTimestamp
|
556
637
|
},
|
557
638
|
// provider options:
|
558
639
|
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
@@ -560,10 +641,10 @@ var GoogleGenerativeAILanguageModel = class {
|
|
560
641
|
},
|
561
642
|
contents,
|
562
643
|
systemInstruction,
|
563
|
-
safetySettings:
|
644
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
564
645
|
tools: googleTools,
|
565
646
|
toolConfig: googleToolConfig,
|
566
|
-
cachedContent:
|
647
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
567
648
|
},
|
568
649
|
warnings: [...warnings, ...toolWarnings]
|
569
650
|
};
|
@@ -811,100 +892,94 @@ function extractSources({
|
|
811
892
|
title: chunk.web.title
|
812
893
|
}));
|
813
894
|
}
|
814
|
-
var contentSchema =
|
815
|
-
role:
|
816
|
-
parts:
|
817
|
-
|
818
|
-
|
819
|
-
text:
|
895
|
+
var contentSchema = z5.object({
|
896
|
+
role: z5.string(),
|
897
|
+
parts: z5.array(
|
898
|
+
z5.union([
|
899
|
+
z5.object({
|
900
|
+
text: z5.string()
|
820
901
|
}),
|
821
|
-
|
822
|
-
functionCall:
|
823
|
-
name:
|
824
|
-
args:
|
902
|
+
z5.object({
|
903
|
+
functionCall: z5.object({
|
904
|
+
name: z5.string(),
|
905
|
+
args: z5.unknown()
|
825
906
|
})
|
826
907
|
}),
|
827
|
-
|
828
|
-
inlineData:
|
829
|
-
mimeType:
|
830
|
-
data:
|
908
|
+
z5.object({
|
909
|
+
inlineData: z5.object({
|
910
|
+
mimeType: z5.string(),
|
911
|
+
data: z5.string()
|
831
912
|
})
|
832
913
|
})
|
833
914
|
])
|
834
915
|
).nullish()
|
835
916
|
});
|
836
|
-
var groundingChunkSchema =
|
837
|
-
web:
|
838
|
-
retrievedContext:
|
917
|
+
var groundingChunkSchema = z5.object({
|
918
|
+
web: z5.object({ uri: z5.string(), title: z5.string() }).nullish(),
|
919
|
+
retrievedContext: z5.object({ uri: z5.string(), title: z5.string() }).nullish()
|
839
920
|
});
|
840
|
-
var groundingMetadataSchema =
|
841
|
-
webSearchQueries:
|
842
|
-
retrievalQueries:
|
843
|
-
searchEntryPoint:
|
844
|
-
groundingChunks:
|
845
|
-
groundingSupports:
|
846
|
-
|
847
|
-
segment:
|
848
|
-
startIndex:
|
849
|
-
endIndex:
|
850
|
-
text:
|
921
|
+
var groundingMetadataSchema = z5.object({
|
922
|
+
webSearchQueries: z5.array(z5.string()).nullish(),
|
923
|
+
retrievalQueries: z5.array(z5.string()).nullish(),
|
924
|
+
searchEntryPoint: z5.object({ renderedContent: z5.string() }).nullish(),
|
925
|
+
groundingChunks: z5.array(groundingChunkSchema).nullish(),
|
926
|
+
groundingSupports: z5.array(
|
927
|
+
z5.object({
|
928
|
+
segment: z5.object({
|
929
|
+
startIndex: z5.number().nullish(),
|
930
|
+
endIndex: z5.number().nullish(),
|
931
|
+
text: z5.string().nullish()
|
851
932
|
}),
|
852
|
-
segment_text:
|
853
|
-
groundingChunkIndices:
|
854
|
-
supportChunkIndices:
|
855
|
-
confidenceScores:
|
856
|
-
confidenceScore:
|
933
|
+
segment_text: z5.string().nullish(),
|
934
|
+
groundingChunkIndices: z5.array(z5.number()).nullish(),
|
935
|
+
supportChunkIndices: z5.array(z5.number()).nullish(),
|
936
|
+
confidenceScores: z5.array(z5.number()).nullish(),
|
937
|
+
confidenceScore: z5.array(z5.number()).nullish()
|
857
938
|
})
|
858
939
|
).nullish(),
|
859
|
-
retrievalMetadata:
|
860
|
-
|
861
|
-
webDynamicRetrievalScore:
|
940
|
+
retrievalMetadata: z5.union([
|
941
|
+
z5.object({
|
942
|
+
webDynamicRetrievalScore: z5.number()
|
862
943
|
}),
|
863
|
-
|
944
|
+
z5.object({})
|
864
945
|
]).nullish()
|
865
946
|
});
|
866
|
-
var safetyRatingSchema =
|
867
|
-
category:
|
868
|
-
probability:
|
869
|
-
probabilityScore:
|
870
|
-
severity:
|
871
|
-
severityScore:
|
872
|
-
blocked:
|
947
|
+
var safetyRatingSchema = z5.object({
|
948
|
+
category: z5.string(),
|
949
|
+
probability: z5.string(),
|
950
|
+
probabilityScore: z5.number().nullish(),
|
951
|
+
severity: z5.string().nullish(),
|
952
|
+
severityScore: z5.number().nullish(),
|
953
|
+
blocked: z5.boolean().nullish()
|
873
954
|
});
|
874
|
-
var responseSchema =
|
875
|
-
candidates:
|
876
|
-
|
877
|
-
content: contentSchema.nullish().or(
|
878
|
-
finishReason:
|
879
|
-
safetyRatings:
|
955
|
+
var responseSchema = z5.object({
|
956
|
+
candidates: z5.array(
|
957
|
+
z5.object({
|
958
|
+
content: contentSchema.nullish().or(z5.object({}).strict()),
|
959
|
+
finishReason: z5.string().nullish(),
|
960
|
+
safetyRatings: z5.array(safetyRatingSchema).nullish(),
|
880
961
|
groundingMetadata: groundingMetadataSchema.nullish()
|
881
962
|
})
|
882
963
|
),
|
883
|
-
usageMetadata:
|
884
|
-
promptTokenCount:
|
885
|
-
candidatesTokenCount:
|
886
|
-
totalTokenCount:
|
964
|
+
usageMetadata: z5.object({
|
965
|
+
promptTokenCount: z5.number().nullish(),
|
966
|
+
candidatesTokenCount: z5.number().nullish(),
|
967
|
+
totalTokenCount: z5.number().nullish()
|
887
968
|
}).nullish()
|
888
969
|
});
|
889
|
-
var chunkSchema =
|
890
|
-
candidates:
|
891
|
-
|
970
|
+
var chunkSchema = z5.object({
|
971
|
+
candidates: z5.array(
|
972
|
+
z5.object({
|
892
973
|
content: contentSchema.nullish(),
|
893
|
-
finishReason:
|
894
|
-
safetyRatings:
|
974
|
+
finishReason: z5.string().nullish(),
|
975
|
+
safetyRatings: z5.array(safetyRatingSchema).nullish(),
|
895
976
|
groundingMetadata: groundingMetadataSchema.nullish()
|
896
977
|
})
|
897
978
|
).nullish(),
|
898
|
-
usageMetadata:
|
899
|
-
promptTokenCount:
|
900
|
-
candidatesTokenCount:
|
901
|
-
totalTokenCount:
|
902
|
-
}).nullish()
|
903
|
-
});
|
904
|
-
var googleGenerativeAIProviderOptionsSchema = z4.object({
|
905
|
-
responseModalities: z4.array(z4.enum(["TEXT", "IMAGE"])).nullish(),
|
906
|
-
thinkingConfig: z4.object({
|
907
|
-
thinkingBudget: z4.number().nullish()
|
979
|
+
usageMetadata: z5.object({
|
980
|
+
promptTokenCount: z5.number().nullish(),
|
981
|
+
candidatesTokenCount: z5.number().nullish(),
|
982
|
+
totalTokenCount: z5.number().nullish()
|
908
983
|
}).nullish()
|
909
984
|
});
|
910
985
|
|
@@ -920,9 +995,9 @@ function createGoogleGenerativeAI(options = {}) {
|
|
920
995
|
}),
|
921
996
|
...options.headers
|
922
997
|
});
|
923
|
-
const createChatModel = (modelId
|
998
|
+
const createChatModel = (modelId) => {
|
924
999
|
var _a2;
|
925
|
-
return new GoogleGenerativeAILanguageModel(modelId,
|
1000
|
+
return new GoogleGenerativeAILanguageModel(modelId, {
|
926
1001
|
provider: "google.generative-ai",
|
927
1002
|
baseURL,
|
928
1003
|
headers: getHeaders,
|
@@ -942,13 +1017,13 @@ function createGoogleGenerativeAI(options = {}) {
|
|
942
1017
|
headers: getHeaders,
|
943
1018
|
fetch: options.fetch
|
944
1019
|
});
|
945
|
-
const provider = function(modelId
|
1020
|
+
const provider = function(modelId) {
|
946
1021
|
if (new.target) {
|
947
1022
|
throw new Error(
|
948
1023
|
"The Google Generative AI model function cannot be called with the new keyword."
|
949
1024
|
);
|
950
1025
|
}
|
951
|
-
return createChatModel(modelId
|
1026
|
+
return createChatModel(modelId);
|
952
1027
|
};
|
953
1028
|
provider.languageModel = createChatModel;
|
954
1029
|
provider.chat = createChatModel;
|