@ai-sdk/google 1.2.18 → 2.0.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +168 -34
- package/dist/index.d.mts +102 -91
- package/dist/index.d.ts +102 -91
- package/dist/index.js +449 -394
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +454 -394
- package/dist/index.mjs.map +1 -1
- package/{internal/dist → dist/internal}/index.d.mts +13 -97
- package/{internal/dist → dist/internal}/index.d.ts +13 -97
- package/{internal/dist → dist/internal}/index.js +301 -289
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +304 -290
- package/dist/internal/index.mjs.map +1 -0
- package/internal.d.ts +1 -0
- package/package.json +19 -18
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
package/dist/index.js
CHANGED
@@ -26,15 +26,136 @@ __export(src_exports, {
|
|
26
26
|
module.exports = __toCommonJS(src_exports);
|
27
27
|
|
28
28
|
// src/google-provider.ts
|
29
|
+
var import_provider4 = require("@ai-sdk/provider");
|
29
30
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
30
31
|
|
31
|
-
// src/google-generative-ai-
|
32
|
-
var
|
32
|
+
// src/google-generative-ai-embedding-model.ts
|
33
|
+
var import_provider = require("@ai-sdk/provider");
|
34
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
35
|
+
var import_zod3 = require("zod");
|
36
|
+
|
37
|
+
// src/google-error.ts
|
38
|
+
var import_provider_utils = require("@ai-sdk/provider-utils");
|
39
|
+
var import_zod = require("zod");
|
40
|
+
var googleErrorDataSchema = import_zod.z.object({
|
41
|
+
error: import_zod.z.object({
|
42
|
+
code: import_zod.z.number().nullable(),
|
43
|
+
message: import_zod.z.string(),
|
44
|
+
status: import_zod.z.string()
|
45
|
+
})
|
46
|
+
});
|
47
|
+
var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
48
|
+
errorSchema: googleErrorDataSchema,
|
49
|
+
errorToMessage: (data) => data.error.message
|
50
|
+
});
|
51
|
+
|
52
|
+
// src/google-generative-ai-embedding-options.ts
|
33
53
|
var import_zod2 = require("zod");
|
54
|
+
var googleGenerativeAIEmbeddingProviderOptions = import_zod2.z.object({
|
55
|
+
/**
|
56
|
+
* Optional. Optional reduced dimension for the output embedding.
|
57
|
+
* If set, excessive values in the output embedding are truncated from the end.
|
58
|
+
*/
|
59
|
+
outputDimensionality: import_zod2.z.number().optional(),
|
60
|
+
/**
|
61
|
+
* Optional. Specifies the task type for generating embeddings.
|
62
|
+
* Supported task types:
|
63
|
+
* - SEMANTIC_SIMILARITY: Optimized for text similarity.
|
64
|
+
* - CLASSIFICATION: Optimized for text classification.
|
65
|
+
* - CLUSTERING: Optimized for clustering texts based on similarity.
|
66
|
+
* - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
|
67
|
+
* - RETRIEVAL_QUERY: Optimized for query-based retrieval.
|
68
|
+
* - QUESTION_ANSWERING: Optimized for answering questions.
|
69
|
+
* - FACT_VERIFICATION: Optimized for verifying factual information.
|
70
|
+
* - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
|
71
|
+
*/
|
72
|
+
taskType: import_zod2.z.enum([
|
73
|
+
"SEMANTIC_SIMILARITY",
|
74
|
+
"CLASSIFICATION",
|
75
|
+
"CLUSTERING",
|
76
|
+
"RETRIEVAL_DOCUMENT",
|
77
|
+
"RETRIEVAL_QUERY",
|
78
|
+
"QUESTION_ANSWERING",
|
79
|
+
"FACT_VERIFICATION",
|
80
|
+
"CODE_RETRIEVAL_QUERY"
|
81
|
+
]).optional()
|
82
|
+
});
|
83
|
+
|
84
|
+
// src/google-generative-ai-embedding-model.ts
|
85
|
+
var GoogleGenerativeAIEmbeddingModel = class {
|
86
|
+
constructor(modelId, config) {
|
87
|
+
this.specificationVersion = "v2";
|
88
|
+
this.maxEmbeddingsPerCall = 2048;
|
89
|
+
this.supportsParallelCalls = true;
|
90
|
+
this.modelId = modelId;
|
91
|
+
this.config = config;
|
92
|
+
}
|
93
|
+
get provider() {
|
94
|
+
return this.config.provider;
|
95
|
+
}
|
96
|
+
async doEmbed({
|
97
|
+
values,
|
98
|
+
headers,
|
99
|
+
abortSignal,
|
100
|
+
providerOptions
|
101
|
+
}) {
|
102
|
+
const googleOptions = await (0, import_provider_utils2.parseProviderOptions)({
|
103
|
+
provider: "google",
|
104
|
+
providerOptions,
|
105
|
+
schema: googleGenerativeAIEmbeddingProviderOptions
|
106
|
+
});
|
107
|
+
if (values.length > this.maxEmbeddingsPerCall) {
|
108
|
+
throw new import_provider.TooManyEmbeddingValuesForCallError({
|
109
|
+
provider: this.provider,
|
110
|
+
modelId: this.modelId,
|
111
|
+
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
112
|
+
values
|
113
|
+
});
|
114
|
+
}
|
115
|
+
const mergedHeaders = (0, import_provider_utils2.combineHeaders)(
|
116
|
+
await (0, import_provider_utils2.resolve)(this.config.headers),
|
117
|
+
headers
|
118
|
+
);
|
119
|
+
const {
|
120
|
+
responseHeaders,
|
121
|
+
value: response,
|
122
|
+
rawValue
|
123
|
+
} = await (0, import_provider_utils2.postJsonToApi)({
|
124
|
+
url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
|
125
|
+
headers: mergedHeaders,
|
126
|
+
body: {
|
127
|
+
requests: values.map((value) => ({
|
128
|
+
model: `models/${this.modelId}`,
|
129
|
+
content: { role: "user", parts: [{ text: value }] },
|
130
|
+
outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
|
131
|
+
taskType: googleOptions == null ? void 0 : googleOptions.taskType
|
132
|
+
}))
|
133
|
+
},
|
134
|
+
failedResponseHandler: googleFailedResponseHandler,
|
135
|
+
successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
|
136
|
+
googleGenerativeAITextEmbeddingResponseSchema
|
137
|
+
),
|
138
|
+
abortSignal,
|
139
|
+
fetch: this.config.fetch
|
140
|
+
});
|
141
|
+
return {
|
142
|
+
embeddings: response.embeddings.map((item) => item.values),
|
143
|
+
usage: void 0,
|
144
|
+
response: { headers: responseHeaders, body: rawValue }
|
145
|
+
};
|
146
|
+
}
|
147
|
+
};
|
148
|
+
var googleGenerativeAITextEmbeddingResponseSchema = import_zod3.z.object({
|
149
|
+
embeddings: import_zod3.z.array(import_zod3.z.object({ values: import_zod3.z.array(import_zod3.z.number()) }))
|
150
|
+
});
|
151
|
+
|
152
|
+
// src/google-generative-ai-language-model.ts
|
153
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
154
|
+
var import_zod5 = require("zod");
|
34
155
|
|
35
156
|
// src/convert-json-schema-to-openapi-schema.ts
|
36
157
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
37
|
-
if (isEmptyObjectSchema(jsonSchema)) {
|
158
|
+
if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
|
38
159
|
return void 0;
|
39
160
|
}
|
40
161
|
if (typeof jsonSchema === "boolean") {
|
@@ -130,10 +251,9 @@ function isEmptyObjectSchema(jsonSchema) {
|
|
130
251
|
}
|
131
252
|
|
132
253
|
// src/convert-to-google-generative-ai-messages.ts
|
133
|
-
var
|
134
|
-
var
|
254
|
+
var import_provider2 = require("@ai-sdk/provider");
|
255
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
135
256
|
function convertToGoogleGenerativeAIMessages(prompt) {
|
136
|
-
var _a, _b;
|
137
257
|
const systemInstructionParts = [];
|
138
258
|
const contents = [];
|
139
259
|
let systemMessagesAllowed = true;
|
@@ -141,7 +261,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
141
261
|
switch (role) {
|
142
262
|
case "system": {
|
143
263
|
if (!systemMessagesAllowed) {
|
144
|
-
throw new
|
264
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
145
265
|
functionality: "system messages are only supported at the beginning of the conversation"
|
146
266
|
});
|
147
267
|
}
|
@@ -157,33 +277,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
157
277
|
parts.push({ text: part.text });
|
158
278
|
break;
|
159
279
|
}
|
160
|
-
case "image": {
|
161
|
-
parts.push(
|
162
|
-
part.image instanceof URL ? {
|
163
|
-
fileData: {
|
164
|
-
mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
|
165
|
-
fileUri: part.image.toString()
|
166
|
-
}
|
167
|
-
} : {
|
168
|
-
inlineData: {
|
169
|
-
mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
|
170
|
-
data: (0, import_provider_utils.convertUint8ArrayToBase64)(part.image)
|
171
|
-
}
|
172
|
-
}
|
173
|
-
);
|
174
|
-
break;
|
175
|
-
}
|
176
280
|
case "file": {
|
281
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
177
282
|
parts.push(
|
178
283
|
part.data instanceof URL ? {
|
179
284
|
fileData: {
|
180
|
-
mimeType:
|
285
|
+
mimeType: mediaType,
|
181
286
|
fileUri: part.data.toString()
|
182
287
|
}
|
183
288
|
} : {
|
184
289
|
inlineData: {
|
185
|
-
mimeType:
|
186
|
-
data: part.data
|
290
|
+
mimeType: mediaType,
|
291
|
+
data: (0, import_provider_utils3.convertToBase64)(part.data)
|
187
292
|
}
|
188
293
|
}
|
189
294
|
);
|
@@ -204,20 +309,20 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
204
309
|
return part.text.length === 0 ? void 0 : { text: part.text };
|
205
310
|
}
|
206
311
|
case "file": {
|
207
|
-
if (part.
|
208
|
-
throw new
|
312
|
+
if (part.mediaType !== "image/png") {
|
313
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
209
314
|
functionality: "Only PNG images are supported in assistant messages"
|
210
315
|
});
|
211
316
|
}
|
212
317
|
if (part.data instanceof URL) {
|
213
|
-
throw new
|
318
|
+
throw new import_provider2.UnsupportedFunctionalityError({
|
214
319
|
functionality: "File data URLs in assistant messages are not supported"
|
215
320
|
});
|
216
321
|
}
|
217
322
|
return {
|
218
323
|
inlineData: {
|
219
|
-
mimeType: part.
|
220
|
-
data: part.data
|
324
|
+
mimeType: part.mediaType,
|
325
|
+
data: (0, import_provider_utils3.convertToBase64)(part.data)
|
221
326
|
}
|
222
327
|
};
|
223
328
|
}
|
@@ -263,33 +368,110 @@ function getModelPath(modelId) {
|
|
263
368
|
return modelId.includes("/") ? modelId : `models/${modelId}`;
|
264
369
|
}
|
265
370
|
|
266
|
-
// src/google-
|
267
|
-
var
|
268
|
-
var
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
371
|
+
// src/google-generative-ai-options.ts
|
372
|
+
var import_zod4 = require("zod");
|
373
|
+
var dynamicRetrievalConfig = import_zod4.z.object({
|
374
|
+
/**
|
375
|
+
* The mode of the predictor to be used in dynamic retrieval.
|
376
|
+
*/
|
377
|
+
mode: import_zod4.z.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
|
378
|
+
/**
|
379
|
+
* The threshold to be used in dynamic retrieval. If not set, a system default
|
380
|
+
* value is used.
|
381
|
+
*/
|
382
|
+
dynamicThreshold: import_zod4.z.number().optional()
|
275
383
|
});
|
276
|
-
var
|
277
|
-
|
278
|
-
|
384
|
+
var googleGenerativeAIProviderOptions = import_zod4.z.object({
|
385
|
+
responseModalities: import_zod4.z.array(import_zod4.z.enum(["TEXT", "IMAGE"])).optional(),
|
386
|
+
thinkingConfig: import_zod4.z.object({
|
387
|
+
thinkingBudget: import_zod4.z.number().optional()
|
388
|
+
}).optional(),
|
389
|
+
/**
|
390
|
+
Optional.
|
391
|
+
The name of the cached content used as context to serve the prediction.
|
392
|
+
Format: cachedContents/{cachedContent}
|
393
|
+
*/
|
394
|
+
cachedContent: import_zod4.z.string().optional(),
|
395
|
+
/**
|
396
|
+
* Optional. Enable structured output. Default is true.
|
397
|
+
*
|
398
|
+
* This is useful when the JSON Schema contains elements that are
|
399
|
+
* not supported by the OpenAPI schema version that
|
400
|
+
* Google Generative AI uses. You can use this to disable
|
401
|
+
* structured outputs if you need to.
|
402
|
+
*/
|
403
|
+
structuredOutputs: import_zod4.z.boolean().optional(),
|
404
|
+
/**
|
405
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
406
|
+
*/
|
407
|
+
safetySettings: import_zod4.z.array(
|
408
|
+
import_zod4.z.object({
|
409
|
+
category: import_zod4.z.enum([
|
410
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
411
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
412
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
413
|
+
"HARM_CATEGORY_HARASSMENT",
|
414
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
415
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
416
|
+
]),
|
417
|
+
threshold: import_zod4.z.enum([
|
418
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
419
|
+
"BLOCK_LOW_AND_ABOVE",
|
420
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
421
|
+
"BLOCK_ONLY_HIGH",
|
422
|
+
"BLOCK_NONE",
|
423
|
+
"OFF"
|
424
|
+
])
|
425
|
+
})
|
426
|
+
).optional(),
|
427
|
+
threshold: import_zod4.z.enum([
|
428
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
429
|
+
"BLOCK_LOW_AND_ABOVE",
|
430
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
431
|
+
"BLOCK_ONLY_HIGH",
|
432
|
+
"BLOCK_NONE",
|
433
|
+
"OFF"
|
434
|
+
]).optional(),
|
435
|
+
/**
|
436
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
437
|
+
*
|
438
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
439
|
+
*/
|
440
|
+
audioTimestamp: import_zod4.z.boolean().optional(),
|
441
|
+
/**
|
442
|
+
Optional. When enabled, the model will use Google search to ground the response.
|
443
|
+
|
444
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
|
445
|
+
*/
|
446
|
+
useSearchGrounding: import_zod4.z.boolean().optional(),
|
447
|
+
/**
|
448
|
+
Optional. Specifies the dynamic retrieval configuration.
|
449
|
+
|
450
|
+
@note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
|
451
|
+
|
452
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
|
453
|
+
*/
|
454
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
|
279
455
|
});
|
280
456
|
|
281
457
|
// src/google-prepare-tools.ts
|
282
|
-
var
|
283
|
-
function prepareTools(
|
284
|
-
|
285
|
-
|
458
|
+
var import_provider3 = require("@ai-sdk/provider");
|
459
|
+
function prepareTools({
|
460
|
+
tools,
|
461
|
+
toolChoice,
|
462
|
+
useSearchGrounding,
|
463
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig2,
|
464
|
+
modelId
|
465
|
+
}) {
|
466
|
+
var _a;
|
467
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
286
468
|
const toolWarnings = [];
|
287
469
|
const isGemini2 = modelId.includes("gemini-2");
|
288
470
|
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
|
289
471
|
if (useSearchGrounding) {
|
290
472
|
return {
|
291
473
|
tools: isGemini2 ? { googleSearch: {} } : {
|
292
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !
|
474
|
+
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
|
293
475
|
},
|
294
476
|
toolConfig: void 0,
|
295
477
|
toolWarnings
|
@@ -305,12 +487,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
305
487
|
} else {
|
306
488
|
functionDeclarations.push({
|
307
489
|
name: tool.name,
|
308
|
-
description: (
|
490
|
+
description: (_a = tool.description) != null ? _a : "",
|
309
491
|
parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
|
310
492
|
});
|
311
493
|
}
|
312
494
|
}
|
313
|
-
const toolChoice = mode.toolChoice;
|
314
495
|
if (toolChoice == null) {
|
315
496
|
return {
|
316
497
|
tools: { functionDeclarations },
|
@@ -351,8 +532,8 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
351
532
|
};
|
352
533
|
default: {
|
353
534
|
const _exhaustiveCheck = type;
|
354
|
-
throw new
|
355
|
-
functionality: `
|
535
|
+
throw new import_provider3.UnsupportedFunctionalityError({
|
536
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
356
537
|
});
|
357
538
|
}
|
358
539
|
}
|
@@ -387,25 +568,21 @@ function mapGoogleGenerativeAIFinishReason({
|
|
387
568
|
|
388
569
|
// src/google-generative-ai-language-model.ts
|
389
570
|
var GoogleGenerativeAILanguageModel = class {
|
390
|
-
constructor(modelId,
|
391
|
-
this.specificationVersion = "
|
392
|
-
this.defaultObjectGenerationMode = "json";
|
393
|
-
this.supportsImageUrls = false;
|
571
|
+
constructor(modelId, config) {
|
572
|
+
this.specificationVersion = "v2";
|
394
573
|
this.modelId = modelId;
|
395
|
-
this.settings = settings;
|
396
574
|
this.config = config;
|
397
575
|
}
|
398
|
-
get supportsStructuredOutputs() {
|
399
|
-
var _a;
|
400
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : true;
|
401
|
-
}
|
402
576
|
get provider() {
|
403
577
|
return this.config.provider;
|
404
578
|
}
|
579
|
+
get supportedUrls() {
|
580
|
+
var _a, _b, _c;
|
581
|
+
return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
|
582
|
+
}
|
405
583
|
async getArgs({
|
406
|
-
mode,
|
407
584
|
prompt,
|
408
|
-
|
585
|
+
maxOutputTokens,
|
409
586
|
temperature,
|
410
587
|
topP,
|
411
588
|
topK,
|
@@ -414,203 +591,168 @@ var GoogleGenerativeAILanguageModel = class {
|
|
414
591
|
stopSequences,
|
415
592
|
responseFormat,
|
416
593
|
seed,
|
417
|
-
|
594
|
+
tools,
|
595
|
+
toolChoice,
|
596
|
+
providerOptions
|
418
597
|
}) {
|
419
|
-
var _a, _b
|
420
|
-
const type = mode.type;
|
598
|
+
var _a, _b;
|
421
599
|
const warnings = [];
|
422
|
-
const googleOptions = (0,
|
600
|
+
const googleOptions = await (0, import_provider_utils4.parseProviderOptions)({
|
423
601
|
provider: "google",
|
424
|
-
providerOptions
|
425
|
-
schema:
|
602
|
+
providerOptions,
|
603
|
+
schema: googleGenerativeAIProviderOptions
|
426
604
|
});
|
427
|
-
if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
|
428
|
-
warnings.push({
|
429
|
-
type: "other",
|
430
|
-
message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
|
431
|
-
});
|
432
|
-
}
|
433
|
-
const generationConfig = {
|
434
|
-
// standardized settings:
|
435
|
-
maxOutputTokens: maxTokens,
|
436
|
-
temperature,
|
437
|
-
topK,
|
438
|
-
topP,
|
439
|
-
frequencyPenalty,
|
440
|
-
presencePenalty,
|
441
|
-
stopSequences,
|
442
|
-
seed,
|
443
|
-
// response format:
|
444
|
-
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
445
|
-
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
446
|
-
// so this is needed as an escape hatch:
|
447
|
-
this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
448
|
-
...this.settings.audioTimestamp && {
|
449
|
-
audioTimestamp: this.settings.audioTimestamp
|
450
|
-
},
|
451
|
-
// provider options:
|
452
|
-
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
453
|
-
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
454
|
-
};
|
455
605
|
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
contents,
|
488
|
-
systemInstruction,
|
489
|
-
safetySettings: this.settings.safetySettings,
|
490
|
-
cachedContent: this.settings.cachedContent
|
491
|
-
},
|
492
|
-
warnings
|
493
|
-
};
|
494
|
-
}
|
495
|
-
case "object-tool": {
|
496
|
-
return {
|
497
|
-
args: {
|
498
|
-
generationConfig,
|
499
|
-
contents,
|
500
|
-
tools: {
|
501
|
-
functionDeclarations: [
|
502
|
-
{
|
503
|
-
name: mode.tool.name,
|
504
|
-
description: (_c = mode.tool.description) != null ? _c : "",
|
505
|
-
parameters: convertJSONSchemaToOpenAPISchema(
|
506
|
-
mode.tool.parameters
|
507
|
-
)
|
508
|
-
}
|
509
|
-
]
|
510
|
-
},
|
511
|
-
toolConfig: { functionCallingConfig: { mode: "ANY" } },
|
512
|
-
safetySettings: this.settings.safetySettings,
|
513
|
-
cachedContent: this.settings.cachedContent
|
606
|
+
const {
|
607
|
+
tools: googleTools,
|
608
|
+
toolConfig: googleToolConfig,
|
609
|
+
toolWarnings
|
610
|
+
} = prepareTools({
|
611
|
+
tools,
|
612
|
+
toolChoice,
|
613
|
+
useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
|
614
|
+
dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
|
615
|
+
modelId: this.modelId
|
616
|
+
});
|
617
|
+
return {
|
618
|
+
args: {
|
619
|
+
generationConfig: {
|
620
|
+
// standardized settings:
|
621
|
+
maxOutputTokens,
|
622
|
+
temperature,
|
623
|
+
topK,
|
624
|
+
topP,
|
625
|
+
frequencyPenalty,
|
626
|
+
presencePenalty,
|
627
|
+
stopSequences,
|
628
|
+
seed,
|
629
|
+
// response format:
|
630
|
+
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
631
|
+
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
632
|
+
// so this is needed as an escape hatch:
|
633
|
+
// TODO convert into provider option
|
634
|
+
((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
635
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
636
|
+
audioTimestamp: googleOptions.audioTimestamp
|
514
637
|
},
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
638
|
+
// provider options:
|
639
|
+
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
640
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
641
|
+
},
|
642
|
+
contents,
|
643
|
+
systemInstruction,
|
644
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
645
|
+
tools: googleTools,
|
646
|
+
toolConfig: googleToolConfig,
|
647
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
648
|
+
},
|
649
|
+
warnings: [...warnings, ...toolWarnings]
|
650
|
+
};
|
526
651
|
}
|
527
652
|
async doGenerate(options) {
|
528
|
-
var _a, _b, _c, _d, _e;
|
653
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
529
654
|
const { args, warnings } = await this.getArgs(options);
|
530
655
|
const body = JSON.stringify(args);
|
531
|
-
const mergedHeaders = (0,
|
532
|
-
await (0,
|
656
|
+
const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
|
657
|
+
await (0, import_provider_utils4.resolve)(this.config.headers),
|
533
658
|
options.headers
|
534
659
|
);
|
535
660
|
const {
|
536
661
|
responseHeaders,
|
537
662
|
value: response,
|
538
663
|
rawValue: rawResponse
|
539
|
-
} = await (0,
|
664
|
+
} = await (0, import_provider_utils4.postJsonToApi)({
|
540
665
|
url: `${this.config.baseURL}/${getModelPath(
|
541
666
|
this.modelId
|
542
667
|
)}:generateContent`,
|
543
668
|
headers: mergedHeaders,
|
544
669
|
body: args,
|
545
670
|
failedResponseHandler: googleFailedResponseHandler,
|
546
|
-
successfulResponseHandler: (0,
|
671
|
+
successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(responseSchema),
|
547
672
|
abortSignal: options.abortSignal,
|
548
673
|
fetch: this.config.fetch
|
549
674
|
});
|
550
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
551
675
|
const candidate = response.candidates[0];
|
552
|
-
const
|
553
|
-
const
|
554
|
-
|
555
|
-
|
676
|
+
const content = [];
|
677
|
+
const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
|
678
|
+
for (const part of parts) {
|
679
|
+
if ("text" in part && part.text.length > 0) {
|
680
|
+
content.push({ type: "text", text: part.text });
|
681
|
+
} else if ("functionCall" in part) {
|
682
|
+
content.push({
|
683
|
+
type: "tool-call",
|
684
|
+
toolCallType: "function",
|
685
|
+
toolCallId: this.config.generateId(),
|
686
|
+
toolName: part.functionCall.name,
|
687
|
+
args: JSON.stringify(part.functionCall.args)
|
688
|
+
});
|
689
|
+
} else if ("inlineData" in part) {
|
690
|
+
content.push({
|
691
|
+
type: "file",
|
692
|
+
data: part.inlineData.data,
|
693
|
+
mediaType: part.inlineData.mimeType
|
694
|
+
});
|
695
|
+
}
|
696
|
+
}
|
697
|
+
const sources = (_b = extractSources({
|
698
|
+
groundingMetadata: candidate.groundingMetadata,
|
556
699
|
generateId: this.config.generateId
|
557
|
-
});
|
700
|
+
})) != null ? _b : [];
|
701
|
+
for (const source of sources) {
|
702
|
+
content.push(source);
|
703
|
+
}
|
558
704
|
const usageMetadata = response.usageMetadata;
|
559
705
|
return {
|
560
|
-
|
561
|
-
reasoning: getReasoningDetailsFromParts(parts),
|
562
|
-
files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
|
563
|
-
data: part.inlineData.data,
|
564
|
-
mimeType: part.inlineData.mimeType
|
565
|
-
})),
|
566
|
-
toolCalls,
|
706
|
+
content,
|
567
707
|
finishReason: mapGoogleGenerativeAIFinishReason({
|
568
708
|
finishReason: candidate.finishReason,
|
569
|
-
hasToolCalls:
|
709
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
570
710
|
}),
|
571
711
|
usage: {
|
572
|
-
|
573
|
-
|
712
|
+
inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
|
713
|
+
outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
|
714
|
+
totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
|
715
|
+
reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
|
716
|
+
cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
|
574
717
|
},
|
575
|
-
rawCall: { rawPrompt, rawSettings },
|
576
|
-
rawResponse: { headers: responseHeaders, body: rawResponse },
|
577
718
|
warnings,
|
578
719
|
providerMetadata: {
|
579
720
|
google: {
|
580
|
-
groundingMetadata: (
|
581
|
-
safetyRatings: (
|
721
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
722
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
582
723
|
}
|
583
724
|
},
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
725
|
+
request: { body },
|
726
|
+
response: {
|
727
|
+
// TODO timestamp, model id, id
|
728
|
+
headers: responseHeaders,
|
729
|
+
body: rawResponse
|
730
|
+
}
|
589
731
|
};
|
590
732
|
}
|
591
733
|
async doStream(options) {
|
592
734
|
const { args, warnings } = await this.getArgs(options);
|
593
735
|
const body = JSON.stringify(args);
|
594
|
-
const headers = (0,
|
595
|
-
await (0,
|
736
|
+
const headers = (0, import_provider_utils4.combineHeaders)(
|
737
|
+
await (0, import_provider_utils4.resolve)(this.config.headers),
|
596
738
|
options.headers
|
597
739
|
);
|
598
|
-
const { responseHeaders, value: response } = await (0,
|
740
|
+
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
599
741
|
url: `${this.config.baseURL}/${getModelPath(
|
600
742
|
this.modelId
|
601
743
|
)}:streamGenerateContent?alt=sse`,
|
602
744
|
headers,
|
603
745
|
body: args,
|
604
746
|
failedResponseHandler: googleFailedResponseHandler,
|
605
|
-
successfulResponseHandler: (0,
|
747
|
+
successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(chunkSchema),
|
606
748
|
abortSignal: options.abortSignal,
|
607
749
|
fetch: this.config.fetch
|
608
750
|
});
|
609
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
610
751
|
let finishReason = "unknown";
|
611
|
-
|
612
|
-
|
613
|
-
|
752
|
+
const usage = {
|
753
|
+
inputTokens: void 0,
|
754
|
+
outputTokens: void 0,
|
755
|
+
totalTokens: void 0
|
614
756
|
};
|
615
757
|
let providerMetadata = void 0;
|
616
758
|
const generateId2 = this.config.generateId;
|
@@ -618,8 +760,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
618
760
|
return {
|
619
761
|
stream: response.pipeThrough(
|
620
762
|
new TransformStream({
|
763
|
+
start(controller) {
|
764
|
+
controller.enqueue({ type: "stream-start", warnings });
|
765
|
+
},
|
621
766
|
transform(chunk, controller) {
|
622
|
-
var _a, _b, _c, _d, _e, _f;
|
767
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
623
768
|
if (!chunk.success) {
|
624
769
|
controller.enqueue({ type: "error", error: chunk.error });
|
625
770
|
return;
|
@@ -627,12 +772,13 @@ var GoogleGenerativeAILanguageModel = class {
|
|
627
772
|
const value = chunk.value;
|
628
773
|
const usageMetadata = value.usageMetadata;
|
629
774
|
if (usageMetadata != null) {
|
630
|
-
usage =
|
631
|
-
|
632
|
-
|
633
|
-
|
775
|
+
usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
|
776
|
+
usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
|
777
|
+
usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
|
778
|
+
usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
|
779
|
+
usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
|
634
780
|
}
|
635
|
-
const candidate = (
|
781
|
+
const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
|
636
782
|
if (candidate == null) {
|
637
783
|
return;
|
638
784
|
}
|
@@ -640,28 +786,14 @@ var GoogleGenerativeAILanguageModel = class {
|
|
640
786
|
if (content != null) {
|
641
787
|
const deltaText = getTextFromParts(content.parts);
|
642
788
|
if (deltaText != null) {
|
643
|
-
controller.enqueue(
|
644
|
-
type: "text-delta",
|
645
|
-
textDelta: deltaText
|
646
|
-
});
|
647
|
-
}
|
648
|
-
const reasoningDeltaText = getReasoningDetailsFromParts(
|
649
|
-
content.parts
|
650
|
-
);
|
651
|
-
if (reasoningDeltaText != null) {
|
652
|
-
for (const part of reasoningDeltaText) {
|
653
|
-
controller.enqueue({
|
654
|
-
type: "reasoning",
|
655
|
-
textDelta: part.text
|
656
|
-
});
|
657
|
-
}
|
789
|
+
controller.enqueue(deltaText);
|
658
790
|
}
|
659
791
|
const inlineDataParts = getInlineDataParts(content.parts);
|
660
792
|
if (inlineDataParts != null) {
|
661
793
|
for (const part of inlineDataParts) {
|
662
794
|
controller.enqueue({
|
663
795
|
type: "file",
|
664
|
-
|
796
|
+
mediaType: part.inlineData.mimeType,
|
665
797
|
data: part.inlineData.data
|
666
798
|
});
|
667
799
|
}
|
@@ -695,17 +827,17 @@ var GoogleGenerativeAILanguageModel = class {
|
|
695
827
|
finishReason: candidate.finishReason,
|
696
828
|
hasToolCalls
|
697
829
|
});
|
698
|
-
const sources = (
|
830
|
+
const sources = (_g = extractSources({
|
699
831
|
groundingMetadata: candidate.groundingMetadata,
|
700
832
|
generateId: generateId2
|
701
|
-
})) != null ?
|
833
|
+
})) != null ? _g : [];
|
702
834
|
for (const source of sources) {
|
703
|
-
controller.enqueue(
|
835
|
+
controller.enqueue(source);
|
704
836
|
}
|
705
837
|
providerMetadata = {
|
706
838
|
google: {
|
707
|
-
groundingMetadata: (
|
708
|
-
safetyRatings: (
|
839
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
840
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
709
841
|
}
|
710
842
|
};
|
711
843
|
}
|
@@ -720,9 +852,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
720
852
|
}
|
721
853
|
})
|
722
854
|
),
|
723
|
-
|
724
|
-
rawResponse: { headers: responseHeaders },
|
725
|
-
warnings,
|
855
|
+
response: { headers: responseHeaders },
|
726
856
|
request: { body }
|
727
857
|
};
|
728
858
|
}
|
@@ -735,6 +865,7 @@ function getToolCallsFromParts({
|
|
735
865
|
(part) => "functionCall" in part
|
736
866
|
);
|
737
867
|
return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
|
868
|
+
type: "tool-call",
|
738
869
|
toolCallType: "function",
|
739
870
|
toolCallId: generateId2(),
|
740
871
|
toolName: part.functionCall.name,
|
@@ -742,16 +873,11 @@ function getToolCallsFromParts({
|
|
742
873
|
}));
|
743
874
|
}
|
744
875
|
function getTextFromParts(parts) {
|
745
|
-
const textParts = parts == null ? void 0 : parts.filter(
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
}
|
750
|
-
function getReasoningDetailsFromParts(parts) {
|
751
|
-
const reasoningParts = parts == null ? void 0 : parts.filter(
|
752
|
-
(part) => "text" in part && part.thought === true
|
753
|
-
);
|
754
|
-
return reasoningParts == null || reasoningParts.length === 0 ? void 0 : reasoningParts.map((part) => ({ type: "text", text: part.text }));
|
876
|
+
const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
|
877
|
+
return textParts == null || textParts.length === 0 ? void 0 : {
|
878
|
+
type: "text",
|
879
|
+
text: textParts.map((part) => part.text).join("")
|
880
|
+
};
|
755
881
|
}
|
756
882
|
function getInlineDataParts(parts) {
|
757
883
|
return parts == null ? void 0 : parts.filter(
|
@@ -766,182 +892,103 @@ function extractSources({
|
|
766
892
|
return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
|
767
893
|
(chunk) => chunk.web != null
|
768
894
|
).map((chunk) => ({
|
895
|
+
type: "source",
|
769
896
|
sourceType: "url",
|
770
897
|
id: generateId2(),
|
771
898
|
url: chunk.web.uri,
|
772
899
|
title: chunk.web.title
|
773
900
|
}));
|
774
901
|
}
|
775
|
-
var contentSchema =
|
776
|
-
role:
|
777
|
-
parts:
|
778
|
-
|
779
|
-
|
780
|
-
text:
|
781
|
-
thought: import_zod2.z.boolean().nullish()
|
902
|
+
var contentSchema = import_zod5.z.object({
|
903
|
+
role: import_zod5.z.string(),
|
904
|
+
parts: import_zod5.z.array(
|
905
|
+
import_zod5.z.union([
|
906
|
+
import_zod5.z.object({
|
907
|
+
text: import_zod5.z.string()
|
782
908
|
}),
|
783
|
-
|
784
|
-
functionCall:
|
785
|
-
name:
|
786
|
-
args:
|
909
|
+
import_zod5.z.object({
|
910
|
+
functionCall: import_zod5.z.object({
|
911
|
+
name: import_zod5.z.string(),
|
912
|
+
args: import_zod5.z.unknown()
|
787
913
|
})
|
788
914
|
}),
|
789
|
-
|
790
|
-
inlineData:
|
791
|
-
mimeType:
|
792
|
-
data:
|
915
|
+
import_zod5.z.object({
|
916
|
+
inlineData: import_zod5.z.object({
|
917
|
+
mimeType: import_zod5.z.string(),
|
918
|
+
data: import_zod5.z.string()
|
793
919
|
})
|
794
920
|
})
|
795
921
|
])
|
796
922
|
).nullish()
|
797
923
|
});
|
798
|
-
var groundingChunkSchema =
|
799
|
-
web:
|
800
|
-
retrievedContext:
|
924
|
+
var groundingChunkSchema = import_zod5.z.object({
|
925
|
+
web: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish(),
|
926
|
+
retrievedContext: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish()
|
801
927
|
});
|
802
|
-
var groundingMetadataSchema =
|
803
|
-
webSearchQueries:
|
804
|
-
retrievalQueries:
|
805
|
-
searchEntryPoint:
|
806
|
-
groundingChunks:
|
807
|
-
groundingSupports:
|
808
|
-
|
809
|
-
segment:
|
810
|
-
startIndex:
|
811
|
-
endIndex:
|
812
|
-
text:
|
928
|
+
var groundingMetadataSchema = import_zod5.z.object({
|
929
|
+
webSearchQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
|
930
|
+
retrievalQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
|
931
|
+
searchEntryPoint: import_zod5.z.object({ renderedContent: import_zod5.z.string() }).nullish(),
|
932
|
+
groundingChunks: import_zod5.z.array(groundingChunkSchema).nullish(),
|
933
|
+
groundingSupports: import_zod5.z.array(
|
934
|
+
import_zod5.z.object({
|
935
|
+
segment: import_zod5.z.object({
|
936
|
+
startIndex: import_zod5.z.number().nullish(),
|
937
|
+
endIndex: import_zod5.z.number().nullish(),
|
938
|
+
text: import_zod5.z.string().nullish()
|
813
939
|
}),
|
814
|
-
segment_text:
|
815
|
-
groundingChunkIndices:
|
816
|
-
supportChunkIndices:
|
817
|
-
confidenceScores:
|
818
|
-
confidenceScore:
|
940
|
+
segment_text: import_zod5.z.string().nullish(),
|
941
|
+
groundingChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
|
942
|
+
supportChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
|
943
|
+
confidenceScores: import_zod5.z.array(import_zod5.z.number()).nullish(),
|
944
|
+
confidenceScore: import_zod5.z.array(import_zod5.z.number()).nullish()
|
819
945
|
})
|
820
946
|
).nullish(),
|
821
|
-
retrievalMetadata:
|
822
|
-
|
823
|
-
webDynamicRetrievalScore:
|
947
|
+
retrievalMetadata: import_zod5.z.union([
|
948
|
+
import_zod5.z.object({
|
949
|
+
webDynamicRetrievalScore: import_zod5.z.number()
|
824
950
|
}),
|
825
|
-
|
951
|
+
import_zod5.z.object({})
|
826
952
|
]).nullish()
|
827
953
|
});
|
828
|
-
var safetyRatingSchema =
|
829
|
-
category:
|
830
|
-
probability:
|
831
|
-
probabilityScore:
|
832
|
-
severity:
|
833
|
-
severityScore:
|
834
|
-
blocked:
|
954
|
+
var safetyRatingSchema = import_zod5.z.object({
|
955
|
+
category: import_zod5.z.string().nullish(),
|
956
|
+
probability: import_zod5.z.string().nullish(),
|
957
|
+
probabilityScore: import_zod5.z.number().nullish(),
|
958
|
+
severity: import_zod5.z.string().nullish(),
|
959
|
+
severityScore: import_zod5.z.number().nullish(),
|
960
|
+
blocked: import_zod5.z.boolean().nullish()
|
835
961
|
});
|
836
|
-
var
|
837
|
-
|
838
|
-
|
839
|
-
|
840
|
-
|
841
|
-
|
962
|
+
var usageSchema = import_zod5.z.object({
|
963
|
+
cachedContentTokenCount: import_zod5.z.number().nullish(),
|
964
|
+
thoughtsTokenCount: import_zod5.z.number().nullish(),
|
965
|
+
promptTokenCount: import_zod5.z.number().nullish(),
|
966
|
+
candidatesTokenCount: import_zod5.z.number().nullish(),
|
967
|
+
totalTokenCount: import_zod5.z.number().nullish()
|
968
|
+
});
|
969
|
+
var responseSchema = import_zod5.z.object({
|
970
|
+
candidates: import_zod5.z.array(
|
971
|
+
import_zod5.z.object({
|
972
|
+
content: contentSchema.nullish().or(import_zod5.z.object({}).strict()),
|
973
|
+
finishReason: import_zod5.z.string().nullish(),
|
974
|
+
safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
|
842
975
|
groundingMetadata: groundingMetadataSchema.nullish()
|
843
976
|
})
|
844
977
|
),
|
845
|
-
usageMetadata:
|
846
|
-
promptTokenCount: import_zod2.z.number().nullish(),
|
847
|
-
candidatesTokenCount: import_zod2.z.number().nullish(),
|
848
|
-
totalTokenCount: import_zod2.z.number().nullish()
|
849
|
-
}).nullish()
|
978
|
+
usageMetadata: usageSchema.nullish()
|
850
979
|
});
|
851
|
-
var chunkSchema =
|
852
|
-
candidates:
|
853
|
-
|
980
|
+
var chunkSchema = import_zod5.z.object({
|
981
|
+
candidates: import_zod5.z.array(
|
982
|
+
import_zod5.z.object({
|
854
983
|
content: contentSchema.nullish(),
|
855
|
-
finishReason:
|
856
|
-
safetyRatings:
|
984
|
+
finishReason: import_zod5.z.string().nullish(),
|
985
|
+
safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
|
857
986
|
groundingMetadata: groundingMetadataSchema.nullish()
|
858
987
|
})
|
859
988
|
).nullish(),
|
860
|
-
usageMetadata:
|
861
|
-
promptTokenCount: import_zod2.z.number().nullish(),
|
862
|
-
candidatesTokenCount: import_zod2.z.number().nullish(),
|
863
|
-
totalTokenCount: import_zod2.z.number().nullish()
|
864
|
-
}).nullish()
|
865
|
-
});
|
866
|
-
var googleGenerativeAIProviderOptionsSchema = import_zod2.z.object({
|
867
|
-
responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).nullish(),
|
868
|
-
thinkingConfig: import_zod2.z.object({
|
869
|
-
thinkingBudget: import_zod2.z.number().nullish(),
|
870
|
-
includeThoughts: import_zod2.z.boolean().nullish()
|
871
|
-
}).nullish()
|
872
|
-
});
|
873
|
-
|
874
|
-
// src/google-generative-ai-embedding-model.ts
|
875
|
-
var import_provider3 = require("@ai-sdk/provider");
|
876
|
-
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
877
|
-
var import_zod3 = require("zod");
|
878
|
-
var GoogleGenerativeAIEmbeddingModel = class {
|
879
|
-
constructor(modelId, settings, config) {
|
880
|
-
this.specificationVersion = "v1";
|
881
|
-
this.modelId = modelId;
|
882
|
-
this.settings = settings;
|
883
|
-
this.config = config;
|
884
|
-
}
|
885
|
-
get provider() {
|
886
|
-
return this.config.provider;
|
887
|
-
}
|
888
|
-
get maxEmbeddingsPerCall() {
|
889
|
-
return 2048;
|
890
|
-
}
|
891
|
-
get supportsParallelCalls() {
|
892
|
-
return true;
|
893
|
-
}
|
894
|
-
async doEmbed({
|
895
|
-
values,
|
896
|
-
headers,
|
897
|
-
abortSignal
|
898
|
-
}) {
|
899
|
-
if (values.length > this.maxEmbeddingsPerCall) {
|
900
|
-
throw new import_provider3.TooManyEmbeddingValuesForCallError({
|
901
|
-
provider: this.provider,
|
902
|
-
modelId: this.modelId,
|
903
|
-
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
904
|
-
values
|
905
|
-
});
|
906
|
-
}
|
907
|
-
const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
|
908
|
-
await (0, import_provider_utils4.resolve)(this.config.headers),
|
909
|
-
headers
|
910
|
-
);
|
911
|
-
const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
|
912
|
-
url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
|
913
|
-
headers: mergedHeaders,
|
914
|
-
body: {
|
915
|
-
requests: values.map((value) => ({
|
916
|
-
model: `models/${this.modelId}`,
|
917
|
-
content: { role: "user", parts: [{ text: value }] },
|
918
|
-
outputDimensionality: this.settings.outputDimensionality,
|
919
|
-
taskType: this.settings.taskType
|
920
|
-
}))
|
921
|
-
},
|
922
|
-
failedResponseHandler: googleFailedResponseHandler,
|
923
|
-
successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
|
924
|
-
googleGenerativeAITextEmbeddingResponseSchema
|
925
|
-
),
|
926
|
-
abortSignal,
|
927
|
-
fetch: this.config.fetch
|
928
|
-
});
|
929
|
-
return {
|
930
|
-
embeddings: response.embeddings.map((item) => item.values),
|
931
|
-
usage: void 0,
|
932
|
-
rawResponse: { headers: responseHeaders }
|
933
|
-
};
|
934
|
-
}
|
935
|
-
};
|
936
|
-
var googleGenerativeAITextEmbeddingResponseSchema = import_zod3.z.object({
|
937
|
-
embeddings: import_zod3.z.array(import_zod3.z.object({ values: import_zod3.z.array(import_zod3.z.number()) }))
|
989
|
+
usageMetadata: usageSchema.nullish()
|
938
990
|
});
|
939
991
|
|
940
|
-
// src/google-supported-file-url.ts
|
941
|
-
function isSupportedFileUrl(url) {
|
942
|
-
return url.toString().startsWith("https://generativelanguage.googleapis.com/v1beta/files/");
|
943
|
-
}
|
944
|
-
|
945
992
|
// src/google-provider.ts
|
946
993
|
function createGoogleGenerativeAI(options = {}) {
|
947
994
|
var _a;
|
@@ -954,30 +1001,35 @@ function createGoogleGenerativeAI(options = {}) {
|
|
954
1001
|
}),
|
955
1002
|
...options.headers
|
956
1003
|
});
|
957
|
-
const createChatModel = (modelId
|
1004
|
+
const createChatModel = (modelId) => {
|
958
1005
|
var _a2;
|
959
|
-
return new GoogleGenerativeAILanguageModel(modelId,
|
1006
|
+
return new GoogleGenerativeAILanguageModel(modelId, {
|
960
1007
|
provider: "google.generative-ai",
|
961
1008
|
baseURL,
|
962
1009
|
headers: getHeaders,
|
963
1010
|
generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils5.generateId,
|
964
|
-
|
1011
|
+
supportedUrls: () => ({
|
1012
|
+
"*": [
|
1013
|
+
// HTTP URLs:
|
1014
|
+
/^https?:\/\/.*$/
|
1015
|
+
]
|
1016
|
+
}),
|
965
1017
|
fetch: options.fetch
|
966
1018
|
});
|
967
1019
|
};
|
968
|
-
const createEmbeddingModel = (modelId
|
1020
|
+
const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
|
969
1021
|
provider: "google.generative-ai",
|
970
1022
|
baseURL,
|
971
1023
|
headers: getHeaders,
|
972
1024
|
fetch: options.fetch
|
973
1025
|
});
|
974
|
-
const provider = function(modelId
|
1026
|
+
const provider = function(modelId) {
|
975
1027
|
if (new.target) {
|
976
1028
|
throw new Error(
|
977
1029
|
"The Google Generative AI model function cannot be called with the new keyword."
|
978
1030
|
);
|
979
1031
|
}
|
980
|
-
return createChatModel(modelId
|
1032
|
+
return createChatModel(modelId);
|
981
1033
|
};
|
982
1034
|
provider.languageModel = createChatModel;
|
983
1035
|
provider.chat = createChatModel;
|
@@ -985,6 +1037,9 @@ function createGoogleGenerativeAI(options = {}) {
|
|
985
1037
|
provider.embedding = createEmbeddingModel;
|
986
1038
|
provider.textEmbedding = createEmbeddingModel;
|
987
1039
|
provider.textEmbeddingModel = createEmbeddingModel;
|
1040
|
+
provider.imageModel = (modelId) => {
|
1041
|
+
throw new import_provider4.NoSuchModelError({ modelId, modelType: "imageModel" });
|
1042
|
+
};
|
988
1043
|
return provider;
|
989
1044
|
}
|
990
1045
|
var google = createGoogleGenerativeAI();
|