@ai-sdk/google 1.2.8 → 2.0.0-canary.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -9
- package/dist/index.d.mts +7 -7
- package/dist/index.d.ts +7 -7
- package/dist/index.js +171 -165
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +171 -163
- package/dist/index.mjs.map +1 -1
- package/internal/dist/index.d.mts +6 -6
- package/internal/dist/index.d.ts +6 -6
- package/internal/dist/index.js +1 -1
- package/internal/dist/index.js.map +1 -1
- package/internal/dist/index.mjs +1 -1
- package/internal/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
@@ -1,23 +1,26 @@
|
|
1
1
|
# @ai-sdk/google
|
2
2
|
|
3
|
-
##
|
3
|
+
## 2.0.0-canary.1
|
4
4
|
|
5
5
|
### Patch Changes
|
6
6
|
|
7
|
-
-
|
7
|
+
- 779d916: feat: add provider option schemas for vertex imagegen and google genai
|
8
|
+
- Updated dependencies [060370c]
|
9
|
+
- Updated dependencies [0c0c0b3]
|
10
|
+
- Updated dependencies [63d791d]
|
11
|
+
- @ai-sdk/provider-utils@3.0.0-canary.1
|
8
12
|
|
9
|
-
##
|
13
|
+
## 2.0.0-canary.0
|
10
14
|
|
11
|
-
###
|
12
|
-
|
13
|
-
- 1789884: feat: add provider option schemas for vertex imagegen and google genai
|
15
|
+
### Major Changes
|
14
16
|
|
15
|
-
|
17
|
+
- d5f588f: AI SDK 5
|
16
18
|
|
17
19
|
### Patch Changes
|
18
20
|
|
19
|
-
- Updated dependencies [
|
20
|
-
- @ai-sdk/provider-utils@
|
21
|
+
- Updated dependencies [d5f588f]
|
22
|
+
- @ai-sdk/provider-utils@3.0.0-canary.0
|
23
|
+
- @ai-sdk/provider@2.0.0-canary.0
|
21
24
|
|
22
25
|
## 1.2.5
|
23
26
|
|
package/dist/index.d.mts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { z } from 'zod';
|
2
|
+
import { ProviderV2, LanguageModelV2, EmbeddingModelV1 } from '@ai-sdk/provider';
|
2
3
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
3
|
-
import { ProviderV1, LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';
|
4
4
|
|
5
5
|
declare const googleErrorDataSchema: z.ZodObject<{
|
6
6
|
error: z.ZodObject<{
|
@@ -64,7 +64,7 @@ interface GoogleGenerativeAISettings {
|
|
64
64
|
*/
|
65
65
|
safetySettings?: Array<{
|
66
66
|
category: 'HARM_CATEGORY_UNSPECIFIED' | 'HARM_CATEGORY_HATE_SPEECH' | 'HARM_CATEGORY_DANGEROUS_CONTENT' | 'HARM_CATEGORY_HARASSMENT' | 'HARM_CATEGORY_SEXUALLY_EXPLICIT' | 'HARM_CATEGORY_CIVIC_INTEGRITY';
|
67
|
-
threshold: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' | 'BLOCK_LOW_AND_ABOVE' | 'BLOCK_MEDIUM_AND_ABOVE' | 'BLOCK_ONLY_HIGH' | 'BLOCK_NONE'
|
67
|
+
threshold: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' | 'BLOCK_LOW_AND_ABOVE' | 'BLOCK_MEDIUM_AND_ABOVE' | 'BLOCK_ONLY_HIGH' | 'BLOCK_NONE';
|
68
68
|
}>;
|
69
69
|
/**
|
70
70
|
* Optional. Enables timestamp understanding for audio-only files.
|
@@ -297,14 +297,14 @@ interface GoogleGenerativeAIEmbeddingSettings {
|
|
297
297
|
outputDimensionality?: number;
|
298
298
|
}
|
299
299
|
|
300
|
-
interface GoogleGenerativeAIProvider extends
|
301
|
-
(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings):
|
302
|
-
languageModel(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings):
|
303
|
-
chat(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings):
|
300
|
+
interface GoogleGenerativeAIProvider extends ProviderV2 {
|
301
|
+
(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings): LanguageModelV2;
|
302
|
+
languageModel(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings): LanguageModelV2;
|
303
|
+
chat(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings): LanguageModelV2;
|
304
304
|
/**
|
305
305
|
* @deprecated Use `chat()` instead.
|
306
306
|
*/
|
307
|
-
generativeAI(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings):
|
307
|
+
generativeAI(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings): LanguageModelV2;
|
308
308
|
/**
|
309
309
|
@deprecated Use `textEmbeddingModel()` instead.
|
310
310
|
*/
|
package/dist/index.d.ts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { z } from 'zod';
|
2
|
+
import { ProviderV2, LanguageModelV2, EmbeddingModelV1 } from '@ai-sdk/provider';
|
2
3
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
3
|
-
import { ProviderV1, LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';
|
4
4
|
|
5
5
|
declare const googleErrorDataSchema: z.ZodObject<{
|
6
6
|
error: z.ZodObject<{
|
@@ -64,7 +64,7 @@ interface GoogleGenerativeAISettings {
|
|
64
64
|
*/
|
65
65
|
safetySettings?: Array<{
|
66
66
|
category: 'HARM_CATEGORY_UNSPECIFIED' | 'HARM_CATEGORY_HATE_SPEECH' | 'HARM_CATEGORY_DANGEROUS_CONTENT' | 'HARM_CATEGORY_HARASSMENT' | 'HARM_CATEGORY_SEXUALLY_EXPLICIT' | 'HARM_CATEGORY_CIVIC_INTEGRITY';
|
67
|
-
threshold: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' | 'BLOCK_LOW_AND_ABOVE' | 'BLOCK_MEDIUM_AND_ABOVE' | 'BLOCK_ONLY_HIGH' | 'BLOCK_NONE'
|
67
|
+
threshold: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' | 'BLOCK_LOW_AND_ABOVE' | 'BLOCK_MEDIUM_AND_ABOVE' | 'BLOCK_ONLY_HIGH' | 'BLOCK_NONE';
|
68
68
|
}>;
|
69
69
|
/**
|
70
70
|
* Optional. Enables timestamp understanding for audio-only files.
|
@@ -297,14 +297,14 @@ interface GoogleGenerativeAIEmbeddingSettings {
|
|
297
297
|
outputDimensionality?: number;
|
298
298
|
}
|
299
299
|
|
300
|
-
interface GoogleGenerativeAIProvider extends
|
301
|
-
(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings):
|
302
|
-
languageModel(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings):
|
303
|
-
chat(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings):
|
300
|
+
interface GoogleGenerativeAIProvider extends ProviderV2 {
|
301
|
+
(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings): LanguageModelV2;
|
302
|
+
languageModel(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings): LanguageModelV2;
|
303
|
+
chat(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings): LanguageModelV2;
|
304
304
|
/**
|
305
305
|
* @deprecated Use `chat()` instead.
|
306
306
|
*/
|
307
|
-
generativeAI(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings):
|
307
|
+
generativeAI(modelId: GoogleGenerativeAIModelId, settings?: GoogleGenerativeAISettings): LanguageModelV2;
|
308
308
|
/**
|
309
309
|
@deprecated Use `textEmbeddingModel()` instead.
|
310
310
|
*/
|
package/dist/index.js
CHANGED
@@ -26,12 +26,95 @@ __export(src_exports, {
|
|
26
26
|
module.exports = __toCommonJS(src_exports);
|
27
27
|
|
28
28
|
// src/google-provider.ts
|
29
|
+
var import_provider4 = require("@ai-sdk/provider");
|
29
30
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
30
31
|
|
31
|
-
// src/google-generative-ai-
|
32
|
-
var
|
32
|
+
// src/google-generative-ai-embedding-model.ts
|
33
|
+
var import_provider = require("@ai-sdk/provider");
|
34
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
33
35
|
var import_zod2 = require("zod");
|
34
36
|
|
37
|
+
// src/google-error.ts
|
38
|
+
var import_provider_utils = require("@ai-sdk/provider-utils");
|
39
|
+
var import_zod = require("zod");
|
40
|
+
var googleErrorDataSchema = import_zod.z.object({
|
41
|
+
error: import_zod.z.object({
|
42
|
+
code: import_zod.z.number().nullable(),
|
43
|
+
message: import_zod.z.string(),
|
44
|
+
status: import_zod.z.string()
|
45
|
+
})
|
46
|
+
});
|
47
|
+
var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
48
|
+
errorSchema: googleErrorDataSchema,
|
49
|
+
errorToMessage: (data) => data.error.message
|
50
|
+
});
|
51
|
+
|
52
|
+
// src/google-generative-ai-embedding-model.ts
|
53
|
+
var GoogleGenerativeAIEmbeddingModel = class {
|
54
|
+
constructor(modelId, settings, config) {
|
55
|
+
this.specificationVersion = "v1";
|
56
|
+
this.modelId = modelId;
|
57
|
+
this.settings = settings;
|
58
|
+
this.config = config;
|
59
|
+
}
|
60
|
+
get provider() {
|
61
|
+
return this.config.provider;
|
62
|
+
}
|
63
|
+
get maxEmbeddingsPerCall() {
|
64
|
+
return 2048;
|
65
|
+
}
|
66
|
+
get supportsParallelCalls() {
|
67
|
+
return true;
|
68
|
+
}
|
69
|
+
async doEmbed({
|
70
|
+
values,
|
71
|
+
headers,
|
72
|
+
abortSignal
|
73
|
+
}) {
|
74
|
+
if (values.length > this.maxEmbeddingsPerCall) {
|
75
|
+
throw new import_provider.TooManyEmbeddingValuesForCallError({
|
76
|
+
provider: this.provider,
|
77
|
+
modelId: this.modelId,
|
78
|
+
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
79
|
+
values
|
80
|
+
});
|
81
|
+
}
|
82
|
+
const mergedHeaders = (0, import_provider_utils2.combineHeaders)(
|
83
|
+
await (0, import_provider_utils2.resolve)(this.config.headers),
|
84
|
+
headers
|
85
|
+
);
|
86
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
|
87
|
+
url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
|
88
|
+
headers: mergedHeaders,
|
89
|
+
body: {
|
90
|
+
requests: values.map((value) => ({
|
91
|
+
model: `models/${this.modelId}`,
|
92
|
+
content: { role: "user", parts: [{ text: value }] },
|
93
|
+
outputDimensionality: this.settings.outputDimensionality
|
94
|
+
}))
|
95
|
+
},
|
96
|
+
failedResponseHandler: googleFailedResponseHandler,
|
97
|
+
successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
|
98
|
+
googleGenerativeAITextEmbeddingResponseSchema
|
99
|
+
),
|
100
|
+
abortSignal,
|
101
|
+
fetch: this.config.fetch
|
102
|
+
});
|
103
|
+
return {
|
104
|
+
embeddings: response.embeddings.map((item) => item.values),
|
105
|
+
usage: void 0,
|
106
|
+
rawResponse: { headers: responseHeaders }
|
107
|
+
};
|
108
|
+
}
|
109
|
+
};
|
110
|
+
var googleGenerativeAITextEmbeddingResponseSchema = import_zod2.z.object({
|
111
|
+
embeddings: import_zod2.z.array(import_zod2.z.object({ values: import_zod2.z.array(import_zod2.z.number()) }))
|
112
|
+
});
|
113
|
+
|
114
|
+
// src/google-generative-ai-language-model.ts
|
115
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
116
|
+
var import_zod3 = require("zod");
|
117
|
+
|
35
118
|
// src/convert-json-schema-to-openapi-schema.ts
|
36
119
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
37
120
|
if (isEmptyObjectSchema(jsonSchema)) {
|
@@ -130,8 +213,8 @@ function isEmptyObjectSchema(jsonSchema) {
|
|
130
213
|
}
|
131
214
|
|
132
215
|
// src/convert-to-google-generative-ai-messages.ts
|
133
|
-
var
|
134
|
-
var
|
216
|
+
var import_provider2 = require("@ai-sdk/provider");
|
217
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
135
218
|
function convertToGoogleGenerativeAIMessages(prompt) {
|
136
219
|
var _a, _b;
|
137
220
|
const systemInstructionParts = [];
|
@@ -141,7 +224,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
141
224
|
switch (role) {
|
142
225
|
case "system": {
|
143
226
|
if (!systemMessagesAllowed) {
|
144
|
-
throw new
|
227
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
145
228
|
functionality: "system messages are only supported at the beginning of the conversation"
|
146
229
|
});
|
147
230
|
}
|
@@ -167,7 +250,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
167
250
|
} : {
|
168
251
|
inlineData: {
|
169
252
|
mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
|
170
|
-
data: (0,
|
253
|
+
data: (0, import_provider_utils3.convertUint8ArrayToBase64)(part.image)
|
171
254
|
}
|
172
255
|
}
|
173
256
|
);
|
@@ -205,12 +288,12 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
205
288
|
}
|
206
289
|
case "file": {
|
207
290
|
if (part.mimeType !== "image/png") {
|
208
|
-
throw new
|
291
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
209
292
|
functionality: "Only PNG images are supported in assistant messages"
|
210
293
|
});
|
211
294
|
}
|
212
295
|
if (part.data instanceof URL) {
|
213
|
-
throw new
|
296
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
214
297
|
functionality: "File data URLs in assistant messages are not supported"
|
215
298
|
});
|
216
299
|
}
|
@@ -263,23 +346,8 @@ function getModelPath(modelId) {
|
|
263
346
|
return modelId.includes("/") ? modelId : `models/${modelId}`;
|
264
347
|
}
|
265
348
|
|
266
|
-
// src/google-error.ts
|
267
|
-
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
268
|
-
var import_zod = require("zod");
|
269
|
-
var googleErrorDataSchema = import_zod.z.object({
|
270
|
-
error: import_zod.z.object({
|
271
|
-
code: import_zod.z.number().nullable(),
|
272
|
-
message: import_zod.z.string(),
|
273
|
-
status: import_zod.z.string()
|
274
|
-
})
|
275
|
-
});
|
276
|
-
var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
|
277
|
-
errorSchema: googleErrorDataSchema,
|
278
|
-
errorToMessage: (data) => data.error.message
|
279
|
-
});
|
280
|
-
|
281
349
|
// src/google-prepare-tools.ts
|
282
|
-
var
|
350
|
+
var import_provider3 = require("@ai-sdk/provider");
|
283
351
|
function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId) {
|
284
352
|
var _a, _b;
|
285
353
|
const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
|
@@ -351,7 +419,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
351
419
|
};
|
352
420
|
default: {
|
353
421
|
const _exhaustiveCheck = type;
|
354
|
-
throw new
|
422
|
+
throw new import_provider3.UnsupportedFunctionalityError({
|
355
423
|
functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
|
356
424
|
});
|
357
425
|
}
|
@@ -388,7 +456,7 @@ function mapGoogleGenerativeAIFinishReason({
|
|
388
456
|
// src/google-generative-ai-language-model.ts
|
389
457
|
var GoogleGenerativeAILanguageModel = class {
|
390
458
|
constructor(modelId, settings, config) {
|
391
|
-
this.specificationVersion = "
|
459
|
+
this.specificationVersion = "v2";
|
392
460
|
this.defaultObjectGenerationMode = "json";
|
393
461
|
this.supportsImageUrls = false;
|
394
462
|
this.modelId = modelId;
|
@@ -419,7 +487,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
419
487
|
var _a, _b;
|
420
488
|
const type = mode.type;
|
421
489
|
const warnings = [];
|
422
|
-
const googleOptions = (0,
|
490
|
+
const googleOptions = (0, import_provider_utils4.parseProviderOptions)({
|
423
491
|
provider: "google",
|
424
492
|
providerOptions: providerMetadata,
|
425
493
|
schema: googleGenerativeAIProviderOptionsSchema
|
@@ -521,22 +589,22 @@ var GoogleGenerativeAILanguageModel = class {
|
|
521
589
|
var _a, _b, _c, _d, _e;
|
522
590
|
const { args, warnings } = await this.getArgs(options);
|
523
591
|
const body = JSON.stringify(args);
|
524
|
-
const mergedHeaders = (0,
|
525
|
-
await (0,
|
592
|
+
const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
|
593
|
+
await (0, import_provider_utils4.resolve)(this.config.headers),
|
526
594
|
options.headers
|
527
595
|
);
|
528
596
|
const {
|
529
597
|
responseHeaders,
|
530
598
|
value: response,
|
531
599
|
rawValue: rawResponse
|
532
|
-
} = await (0,
|
600
|
+
} = await (0, import_provider_utils4.postJsonToApi)({
|
533
601
|
url: `${this.config.baseURL}/${getModelPath(
|
534
602
|
this.modelId
|
535
603
|
)}:generateContent`,
|
536
604
|
headers: mergedHeaders,
|
537
605
|
body: args,
|
538
606
|
failedResponseHandler: googleFailedResponseHandler,
|
539
|
-
successfulResponseHandler: (0,
|
607
|
+
successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(responseSchema),
|
540
608
|
abortSignal: options.abortSignal,
|
541
609
|
fetch: this.config.fetch
|
542
610
|
});
|
@@ -582,18 +650,18 @@ var GoogleGenerativeAILanguageModel = class {
|
|
582
650
|
async doStream(options) {
|
583
651
|
const { args, warnings } = await this.getArgs(options);
|
584
652
|
const body = JSON.stringify(args);
|
585
|
-
const headers = (0,
|
586
|
-
await (0,
|
653
|
+
const headers = (0, import_provider_utils4.combineHeaders)(
|
654
|
+
await (0, import_provider_utils4.resolve)(this.config.headers),
|
587
655
|
options.headers
|
588
656
|
);
|
589
|
-
const { responseHeaders, value: response } = await (0,
|
657
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
590
658
|
url: `${this.config.baseURL}/${getModelPath(
|
591
659
|
this.modelId
|
592
660
|
)}:streamGenerateContent?alt=sse`,
|
593
661
|
headers,
|
594
662
|
body: args,
|
595
663
|
failedResponseHandler: googleFailedResponseHandler,
|
596
|
-
successfulResponseHandler: (0,
|
664
|
+
successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(chunkSchema),
|
597
665
|
abortSignal: options.abortSignal,
|
598
666
|
fetch: this.config.fetch
|
599
667
|
});
|
@@ -744,163 +812,98 @@ function extractSources({
|
|
744
812
|
title: chunk.web.title
|
745
813
|
}));
|
746
814
|
}
|
747
|
-
var contentSchema =
|
748
|
-
role:
|
749
|
-
parts:
|
750
|
-
|
751
|
-
|
752
|
-
text:
|
815
|
+
var contentSchema = import_zod3.z.object({
|
816
|
+
role: import_zod3.z.string(),
|
817
|
+
parts: import_zod3.z.array(
|
818
|
+
import_zod3.z.union([
|
819
|
+
import_zod3.z.object({
|
820
|
+
text: import_zod3.z.string()
|
753
821
|
}),
|
754
|
-
|
755
|
-
functionCall:
|
756
|
-
name:
|
757
|
-
args:
|
822
|
+
import_zod3.z.object({
|
823
|
+
functionCall: import_zod3.z.object({
|
824
|
+
name: import_zod3.z.string(),
|
825
|
+
args: import_zod3.z.unknown()
|
758
826
|
})
|
759
827
|
}),
|
760
|
-
|
761
|
-
inlineData:
|
762
|
-
mimeType:
|
763
|
-
data:
|
828
|
+
import_zod3.z.object({
|
829
|
+
inlineData: import_zod3.z.object({
|
830
|
+
mimeType: import_zod3.z.string(),
|
831
|
+
data: import_zod3.z.string()
|
764
832
|
})
|
765
833
|
})
|
766
834
|
])
|
767
835
|
).nullish()
|
768
836
|
});
|
769
|
-
var groundingChunkSchema =
|
770
|
-
web:
|
771
|
-
retrievedContext:
|
837
|
+
var groundingChunkSchema = import_zod3.z.object({
|
838
|
+
web: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish(),
|
839
|
+
retrievedContext: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish()
|
772
840
|
});
|
773
|
-
var groundingMetadataSchema =
|
774
|
-
webSearchQueries:
|
775
|
-
retrievalQueries:
|
776
|
-
searchEntryPoint:
|
777
|
-
groundingChunks:
|
778
|
-
groundingSupports:
|
779
|
-
|
780
|
-
segment:
|
781
|
-
startIndex:
|
782
|
-
endIndex:
|
783
|
-
text:
|
841
|
+
var groundingMetadataSchema = import_zod3.z.object({
|
842
|
+
webSearchQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
|
843
|
+
retrievalQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
|
844
|
+
searchEntryPoint: import_zod3.z.object({ renderedContent: import_zod3.z.string() }).nullish(),
|
845
|
+
groundingChunks: import_zod3.z.array(groundingChunkSchema).nullish(),
|
846
|
+
groundingSupports: import_zod3.z.array(
|
847
|
+
import_zod3.z.object({
|
848
|
+
segment: import_zod3.z.object({
|
849
|
+
startIndex: import_zod3.z.number().nullish(),
|
850
|
+
endIndex: import_zod3.z.number().nullish(),
|
851
|
+
text: import_zod3.z.string().nullish()
|
784
852
|
}),
|
785
|
-
segment_text:
|
786
|
-
groundingChunkIndices:
|
787
|
-
supportChunkIndices:
|
788
|
-
confidenceScores:
|
789
|
-
confidenceScore:
|
853
|
+
segment_text: import_zod3.z.string().nullish(),
|
854
|
+
groundingChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
|
855
|
+
supportChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
|
856
|
+
confidenceScores: import_zod3.z.array(import_zod3.z.number()).nullish(),
|
857
|
+
confidenceScore: import_zod3.z.array(import_zod3.z.number()).nullish()
|
790
858
|
})
|
791
859
|
).nullish(),
|
792
|
-
retrievalMetadata:
|
793
|
-
|
794
|
-
webDynamicRetrievalScore:
|
860
|
+
retrievalMetadata: import_zod3.z.union([
|
861
|
+
import_zod3.z.object({
|
862
|
+
webDynamicRetrievalScore: import_zod3.z.number()
|
795
863
|
}),
|
796
|
-
|
864
|
+
import_zod3.z.object({})
|
797
865
|
]).nullish()
|
798
866
|
});
|
799
|
-
var safetyRatingSchema =
|
800
|
-
category:
|
801
|
-
probability:
|
802
|
-
probabilityScore:
|
803
|
-
severity:
|
804
|
-
severityScore:
|
805
|
-
blocked:
|
867
|
+
var safetyRatingSchema = import_zod3.z.object({
|
868
|
+
category: import_zod3.z.string(),
|
869
|
+
probability: import_zod3.z.string(),
|
870
|
+
probabilityScore: import_zod3.z.number().nullish(),
|
871
|
+
severity: import_zod3.z.string().nullish(),
|
872
|
+
severityScore: import_zod3.z.number().nullish(),
|
873
|
+
blocked: import_zod3.z.boolean().nullish()
|
806
874
|
});
|
807
|
-
var responseSchema =
|
808
|
-
candidates:
|
809
|
-
|
810
|
-
content: contentSchema.nullish().or(
|
811
|
-
finishReason:
|
812
|
-
safetyRatings:
|
875
|
+
var responseSchema = import_zod3.z.object({
|
876
|
+
candidates: import_zod3.z.array(
|
877
|
+
import_zod3.z.object({
|
878
|
+
content: contentSchema.nullish().or(import_zod3.z.object({}).strict()),
|
879
|
+
finishReason: import_zod3.z.string().nullish(),
|
880
|
+
safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
|
813
881
|
groundingMetadata: groundingMetadataSchema.nullish()
|
814
882
|
})
|
815
883
|
),
|
816
|
-
usageMetadata:
|
817
|
-
promptTokenCount:
|
818
|
-
candidatesTokenCount:
|
819
|
-
totalTokenCount:
|
884
|
+
usageMetadata: import_zod3.z.object({
|
885
|
+
promptTokenCount: import_zod3.z.number().nullish(),
|
886
|
+
candidatesTokenCount: import_zod3.z.number().nullish(),
|
887
|
+
totalTokenCount: import_zod3.z.number().nullish()
|
820
888
|
}).nullish()
|
821
889
|
});
|
822
|
-
var chunkSchema =
|
823
|
-
candidates:
|
824
|
-
|
890
|
+
var chunkSchema = import_zod3.z.object({
|
891
|
+
candidates: import_zod3.z.array(
|
892
|
+
import_zod3.z.object({
|
825
893
|
content: contentSchema.nullish(),
|
826
|
-
finishReason:
|
827
|
-
safetyRatings:
|
894
|
+
finishReason: import_zod3.z.string().nullish(),
|
895
|
+
safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
|
828
896
|
groundingMetadata: groundingMetadataSchema.nullish()
|
829
897
|
})
|
830
898
|
).nullish(),
|
831
|
-
usageMetadata:
|
832
|
-
promptTokenCount:
|
833
|
-
candidatesTokenCount:
|
834
|
-
totalTokenCount:
|
899
|
+
usageMetadata: import_zod3.z.object({
|
900
|
+
promptTokenCount: import_zod3.z.number().nullish(),
|
901
|
+
candidatesTokenCount: import_zod3.z.number().nullish(),
|
902
|
+
totalTokenCount: import_zod3.z.number().nullish()
|
835
903
|
}).nullish()
|
836
904
|
});
|
837
|
-
var googleGenerativeAIProviderOptionsSchema =
|
838
|
-
responseModalities:
|
839
|
-
});
|
840
|
-
|
841
|
-
// src/google-generative-ai-embedding-model.ts
|
842
|
-
var import_provider3 = require("@ai-sdk/provider");
|
843
|
-
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
844
|
-
var import_zod3 = require("zod");
|
845
|
-
var GoogleGenerativeAIEmbeddingModel = class {
|
846
|
-
constructor(modelId, settings, config) {
|
847
|
-
this.specificationVersion = "v1";
|
848
|
-
this.modelId = modelId;
|
849
|
-
this.settings = settings;
|
850
|
-
this.config = config;
|
851
|
-
}
|
852
|
-
get provider() {
|
853
|
-
return this.config.provider;
|
854
|
-
}
|
855
|
-
get maxEmbeddingsPerCall() {
|
856
|
-
return 2048;
|
857
|
-
}
|
858
|
-
get supportsParallelCalls() {
|
859
|
-
return true;
|
860
|
-
}
|
861
|
-
async doEmbed({
|
862
|
-
values,
|
863
|
-
headers,
|
864
|
-
abortSignal
|
865
|
-
}) {
|
866
|
-
if (values.length > this.maxEmbeddingsPerCall) {
|
867
|
-
throw new import_provider3.TooManyEmbeddingValuesForCallError({
|
868
|
-
provider: this.provider,
|
869
|
-
modelId: this.modelId,
|
870
|
-
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
871
|
-
values
|
872
|
-
});
|
873
|
-
}
|
874
|
-
const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
|
875
|
-
await (0, import_provider_utils4.resolve)(this.config.headers),
|
876
|
-
headers
|
877
|
-
);
|
878
|
-
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
879
|
-
url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
|
880
|
-
headers: mergedHeaders,
|
881
|
-
body: {
|
882
|
-
requests: values.map((value) => ({
|
883
|
-
model: `models/${this.modelId}`,
|
884
|
-
content: { role: "user", parts: [{ text: value }] },
|
885
|
-
outputDimensionality: this.settings.outputDimensionality
|
886
|
-
}))
|
887
|
-
},
|
888
|
-
failedResponseHandler: googleFailedResponseHandler,
|
889
|
-
successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
|
890
|
-
googleGenerativeAITextEmbeddingResponseSchema
|
891
|
-
),
|
892
|
-
abortSignal,
|
893
|
-
fetch: this.config.fetch
|
894
|
-
});
|
895
|
-
return {
|
896
|
-
embeddings: response.embeddings.map((item) => item.values),
|
897
|
-
usage: void 0,
|
898
|
-
rawResponse: { headers: responseHeaders }
|
899
|
-
};
|
900
|
-
}
|
901
|
-
};
|
902
|
-
var googleGenerativeAITextEmbeddingResponseSchema = import_zod3.z.object({
|
903
|
-
embeddings: import_zod3.z.array(import_zod3.z.object({ values: import_zod3.z.array(import_zod3.z.number()) }))
|
905
|
+
var googleGenerativeAIProviderOptionsSchema = import_zod3.z.object({
|
906
|
+
responseModalities: import_zod3.z.array(import_zod3.z.enum(["TEXT", "IMAGE"])).nullish()
|
904
907
|
});
|
905
908
|
|
906
909
|
// src/google-supported-file-url.ts
|
@@ -951,6 +954,9 @@ function createGoogleGenerativeAI(options = {}) {
|
|
951
954
|
provider.embedding = createEmbeddingModel;
|
952
955
|
provider.textEmbedding = createEmbeddingModel;
|
953
956
|
provider.textEmbeddingModel = createEmbeddingModel;
|
957
|
+
provider.imageModel = (modelId) => {
|
958
|
+
throw new import_provider4.NoSuchModelError({ modelId, modelType: "imageModel" });
|
959
|
+
};
|
954
960
|
return provider;
|
955
961
|
}
|
956
962
|
var google = createGoogleGenerativeAI();
|