@ai-sdk/google 1.2.18 → 2.0.0-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +176 -34
- package/dist/index.d.mts +102 -91
- package/dist/index.d.ts +102 -91
- package/dist/index.js +449 -394
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +454 -394
- package/dist/index.mjs.map +1 -1
- package/{internal/dist → dist/internal}/index.d.mts +13 -97
- package/{internal/dist → dist/internal}/index.d.ts +13 -97
- package/{internal/dist → dist/internal}/index.js +301 -289
- package/dist/internal/index.js.map +1 -0
- package/{internal/dist → dist/internal}/index.mjs +304 -290
- package/dist/internal/index.mjs.map +1 -0
- package/internal.d.ts +1 -0
- package/package.json +19 -18
- package/internal/dist/index.js.map +0 -1
- package/internal/dist/index.mjs.map +0 -1
package/dist/index.mjs
CHANGED
@@ -1,24 +1,155 @@
|
|
1
1
|
// src/google-provider.ts
|
2
|
+
import {
|
3
|
+
NoSuchModelError
|
4
|
+
} from "@ai-sdk/provider";
|
2
5
|
import {
|
3
6
|
generateId,
|
4
7
|
loadApiKey,
|
5
8
|
withoutTrailingSlash
|
6
9
|
} from "@ai-sdk/provider-utils";
|
7
10
|
|
8
|
-
// src/google-generative-ai-
|
11
|
+
// src/google-generative-ai-embedding-model.ts
|
12
|
+
import {
|
13
|
+
TooManyEmbeddingValuesForCallError
|
14
|
+
} from "@ai-sdk/provider";
|
9
15
|
import {
|
10
16
|
combineHeaders,
|
11
|
-
createEventSourceResponseHandler,
|
12
17
|
createJsonResponseHandler,
|
13
18
|
parseProviderOptions,
|
14
19
|
postJsonToApi,
|
15
20
|
resolve
|
16
21
|
} from "@ai-sdk/provider-utils";
|
22
|
+
import { z as z3 } from "zod";
|
23
|
+
|
24
|
+
// src/google-error.ts
|
25
|
+
import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
|
26
|
+
import { z } from "zod";
|
27
|
+
var googleErrorDataSchema = z.object({
|
28
|
+
error: z.object({
|
29
|
+
code: z.number().nullable(),
|
30
|
+
message: z.string(),
|
31
|
+
status: z.string()
|
32
|
+
})
|
33
|
+
});
|
34
|
+
var googleFailedResponseHandler = createJsonErrorResponseHandler({
|
35
|
+
errorSchema: googleErrorDataSchema,
|
36
|
+
errorToMessage: (data) => data.error.message
|
37
|
+
});
|
38
|
+
|
39
|
+
// src/google-generative-ai-embedding-options.ts
|
17
40
|
import { z as z2 } from "zod";
|
41
|
+
var googleGenerativeAIEmbeddingProviderOptions = z2.object({
|
42
|
+
/**
|
43
|
+
* Optional. Optional reduced dimension for the output embedding.
|
44
|
+
* If set, excessive values in the output embedding are truncated from the end.
|
45
|
+
*/
|
46
|
+
outputDimensionality: z2.number().optional(),
|
47
|
+
/**
|
48
|
+
* Optional. Specifies the task type for generating embeddings.
|
49
|
+
* Supported task types:
|
50
|
+
* - SEMANTIC_SIMILARITY: Optimized for text similarity.
|
51
|
+
* - CLASSIFICATION: Optimized for text classification.
|
52
|
+
* - CLUSTERING: Optimized for clustering texts based on similarity.
|
53
|
+
* - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
|
54
|
+
* - RETRIEVAL_QUERY: Optimized for query-based retrieval.
|
55
|
+
* - QUESTION_ANSWERING: Optimized for answering questions.
|
56
|
+
* - FACT_VERIFICATION: Optimized for verifying factual information.
|
57
|
+
* - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
|
58
|
+
*/
|
59
|
+
taskType: z2.enum([
|
60
|
+
"SEMANTIC_SIMILARITY",
|
61
|
+
"CLASSIFICATION",
|
62
|
+
"CLUSTERING",
|
63
|
+
"RETRIEVAL_DOCUMENT",
|
64
|
+
"RETRIEVAL_QUERY",
|
65
|
+
"QUESTION_ANSWERING",
|
66
|
+
"FACT_VERIFICATION",
|
67
|
+
"CODE_RETRIEVAL_QUERY"
|
68
|
+
]).optional()
|
69
|
+
});
|
70
|
+
|
71
|
+
// src/google-generative-ai-embedding-model.ts
|
72
|
+
var GoogleGenerativeAIEmbeddingModel = class {
|
73
|
+
constructor(modelId, config) {
|
74
|
+
this.specificationVersion = "v2";
|
75
|
+
this.maxEmbeddingsPerCall = 2048;
|
76
|
+
this.supportsParallelCalls = true;
|
77
|
+
this.modelId = modelId;
|
78
|
+
this.config = config;
|
79
|
+
}
|
80
|
+
get provider() {
|
81
|
+
return this.config.provider;
|
82
|
+
}
|
83
|
+
async doEmbed({
|
84
|
+
values,
|
85
|
+
headers,
|
86
|
+
abortSignal,
|
87
|
+
providerOptions
|
88
|
+
}) {
|
89
|
+
const googleOptions = await parseProviderOptions({
|
90
|
+
provider: "google",
|
91
|
+
providerOptions,
|
92
|
+
schema: googleGenerativeAIEmbeddingProviderOptions
|
93
|
+
});
|
94
|
+
if (values.length > this.maxEmbeddingsPerCall) {
|
95
|
+
throw new TooManyEmbeddingValuesForCallError({
|
96
|
+
provider: this.provider,
|
97
|
+
modelId: this.modelId,
|
98
|
+
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
99
|
+
values
|
100
|
+
});
|
101
|
+
}
|
102
|
+
const mergedHeaders = combineHeaders(
|
103
|
+
await resolve(this.config.headers),
|
104
|
+
headers
|
105
|
+
);
|
106
|
+
const {
|
107
|
+
responseHeaders,
|
108
|
+
value: response,
|
109
|
+
rawValue
|
110
|
+
} = await postJsonToApi({
|
111
|
+
url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
|
112
|
+
headers: mergedHeaders,
|
113
|
+
body: {
|
114
|
+
requests: values.map((value) => ({
|
115
|
+
model: `models/${this.modelId}`,
|
116
|
+
content: { role: "user", parts: [{ text: value }] },
|
117
|
+
outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
|
118
|
+
taskType: googleOptions == null ? void 0 : googleOptions.taskType
|
119
|
+
}))
|
120
|
+
},
|
121
|
+
failedResponseHandler: googleFailedResponseHandler,
|
122
|
+
successfulResponseHandler: createJsonResponseHandler(
|
123
|
+
googleGenerativeAITextEmbeddingResponseSchema
|
124
|
+
),
|
125
|
+
abortSignal,
|
126
|
+
fetch: this.config.fetch
|
127
|
+
});
|
128
|
+
return {
|
129
|
+
embeddings: response.embeddings.map((item) => item.values),
|
130
|
+
usage: void 0,
|
131
|
+
response: { headers: responseHeaders, body: rawValue }
|
132
|
+
};
|
133
|
+
}
|
134
|
+
};
|
135
|
+
var googleGenerativeAITextEmbeddingResponseSchema = z3.object({
|
136
|
+
embeddings: z3.array(z3.object({ values: z3.array(z3.number()) }))
|
137
|
+
});
|
138
|
+
|
139
|
+
// src/google-generative-ai-language-model.ts
|
140
|
+
import {
|
141
|
+
combineHeaders as combineHeaders2,
|
142
|
+
createEventSourceResponseHandler,
|
143
|
+
createJsonResponseHandler as createJsonResponseHandler2,
|
144
|
+
parseProviderOptions as parseProviderOptions2,
|
145
|
+
postJsonToApi as postJsonToApi2,
|
146
|
+
resolve as resolve2
|
147
|
+
} from "@ai-sdk/provider-utils";
|
148
|
+
import { z as z5 } from "zod";
|
18
149
|
|
19
150
|
// src/convert-json-schema-to-openapi-schema.ts
|
20
151
|
function convertJSONSchemaToOpenAPISchema(jsonSchema) {
|
21
|
-
if (isEmptyObjectSchema(jsonSchema)) {
|
152
|
+
if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
|
22
153
|
return void 0;
|
23
154
|
}
|
24
155
|
if (typeof jsonSchema === "boolean") {
|
@@ -117,9 +248,10 @@ function isEmptyObjectSchema(jsonSchema) {
|
|
117
248
|
import {
|
118
249
|
UnsupportedFunctionalityError
|
119
250
|
} from "@ai-sdk/provider";
|
120
|
-
import {
|
251
|
+
import {
|
252
|
+
convertToBase64
|
253
|
+
} from "@ai-sdk/provider-utils";
|
121
254
|
function convertToGoogleGenerativeAIMessages(prompt) {
|
122
|
-
var _a, _b;
|
123
255
|
const systemInstructionParts = [];
|
124
256
|
const contents = [];
|
125
257
|
let systemMessagesAllowed = true;
|
@@ -143,33 +275,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
143
275
|
parts.push({ text: part.text });
|
144
276
|
break;
|
145
277
|
}
|
146
|
-
case "image": {
|
147
|
-
parts.push(
|
148
|
-
part.image instanceof URL ? {
|
149
|
-
fileData: {
|
150
|
-
mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
|
151
|
-
fileUri: part.image.toString()
|
152
|
-
}
|
153
|
-
} : {
|
154
|
-
inlineData: {
|
155
|
-
mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
|
156
|
-
data: convertUint8ArrayToBase64(part.image)
|
157
|
-
}
|
158
|
-
}
|
159
|
-
);
|
160
|
-
break;
|
161
|
-
}
|
162
278
|
case "file": {
|
279
|
+
const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
|
163
280
|
parts.push(
|
164
281
|
part.data instanceof URL ? {
|
165
282
|
fileData: {
|
166
|
-
mimeType:
|
283
|
+
mimeType: mediaType,
|
167
284
|
fileUri: part.data.toString()
|
168
285
|
}
|
169
286
|
} : {
|
170
287
|
inlineData: {
|
171
|
-
mimeType:
|
172
|
-
data: part.data
|
288
|
+
mimeType: mediaType,
|
289
|
+
data: convertToBase64(part.data)
|
173
290
|
}
|
174
291
|
}
|
175
292
|
);
|
@@ -190,7 +307,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
190
307
|
return part.text.length === 0 ? void 0 : { text: part.text };
|
191
308
|
}
|
192
309
|
case "file": {
|
193
|
-
if (part.
|
310
|
+
if (part.mediaType !== "image/png") {
|
194
311
|
throw new UnsupportedFunctionalityError({
|
195
312
|
functionality: "Only PNG images are supported in assistant messages"
|
196
313
|
});
|
@@ -202,8 +319,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
|
|
202
319
|
}
|
203
320
|
return {
|
204
321
|
inlineData: {
|
205
|
-
mimeType: part.
|
206
|
-
data: part.data
|
322
|
+
mimeType: part.mediaType,
|
323
|
+
data: convertToBase64(part.data)
|
207
324
|
}
|
208
325
|
};
|
209
326
|
}
|
@@ -249,35 +366,112 @@ function getModelPath(modelId) {
|
|
249
366
|
return modelId.includes("/") ? modelId : `models/${modelId}`;
|
250
367
|
}
|
251
368
|
|
252
|
-
// src/google-
|
253
|
-
import {
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
369
|
+
// src/google-generative-ai-options.ts
|
370
|
+
import { z as z4 } from "zod";
|
371
|
+
var dynamicRetrievalConfig = z4.object({
|
372
|
+
/**
|
373
|
+
* The mode of the predictor to be used in dynamic retrieval.
|
374
|
+
*/
|
375
|
+
mode: z4.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
|
376
|
+
/**
|
377
|
+
* The threshold to be used in dynamic retrieval. If not set, a system default
|
378
|
+
* value is used.
|
379
|
+
*/
|
380
|
+
dynamicThreshold: z4.number().optional()
|
261
381
|
});
|
262
|
-
var
|
263
|
-
|
264
|
-
|
382
|
+
var googleGenerativeAIProviderOptions = z4.object({
|
383
|
+
responseModalities: z4.array(z4.enum(["TEXT", "IMAGE"])).optional(),
|
384
|
+
thinkingConfig: z4.object({
|
385
|
+
thinkingBudget: z4.number().optional()
|
386
|
+
}).optional(),
|
387
|
+
/**
|
388
|
+
Optional.
|
389
|
+
The name of the cached content used as context to serve the prediction.
|
390
|
+
Format: cachedContents/{cachedContent}
|
391
|
+
*/
|
392
|
+
cachedContent: z4.string().optional(),
|
393
|
+
/**
|
394
|
+
* Optional. Enable structured output. Default is true.
|
395
|
+
*
|
396
|
+
* This is useful when the JSON Schema contains elements that are
|
397
|
+
* not supported by the OpenAPI schema version that
|
398
|
+
* Google Generative AI uses. You can use this to disable
|
399
|
+
* structured outputs if you need to.
|
400
|
+
*/
|
401
|
+
structuredOutputs: z4.boolean().optional(),
|
402
|
+
/**
|
403
|
+
Optional. A list of unique safety settings for blocking unsafe content.
|
404
|
+
*/
|
405
|
+
safetySettings: z4.array(
|
406
|
+
z4.object({
|
407
|
+
category: z4.enum([
|
408
|
+
"HARM_CATEGORY_UNSPECIFIED",
|
409
|
+
"HARM_CATEGORY_HATE_SPEECH",
|
410
|
+
"HARM_CATEGORY_DANGEROUS_CONTENT",
|
411
|
+
"HARM_CATEGORY_HARASSMENT",
|
412
|
+
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
413
|
+
"HARM_CATEGORY_CIVIC_INTEGRITY"
|
414
|
+
]),
|
415
|
+
threshold: z4.enum([
|
416
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
417
|
+
"BLOCK_LOW_AND_ABOVE",
|
418
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
419
|
+
"BLOCK_ONLY_HIGH",
|
420
|
+
"BLOCK_NONE",
|
421
|
+
"OFF"
|
422
|
+
])
|
423
|
+
})
|
424
|
+
).optional(),
|
425
|
+
threshold: z4.enum([
|
426
|
+
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
|
427
|
+
"BLOCK_LOW_AND_ABOVE",
|
428
|
+
"BLOCK_MEDIUM_AND_ABOVE",
|
429
|
+
"BLOCK_ONLY_HIGH",
|
430
|
+
"BLOCK_NONE",
|
431
|
+
"OFF"
|
432
|
+
]).optional(),
|
433
|
+
/**
|
434
|
+
* Optional. Enables timestamp understanding for audio-only files.
|
435
|
+
*
|
436
|
+
* https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
|
437
|
+
*/
|
438
|
+
audioTimestamp: z4.boolean().optional(),
|
439
|
+
/**
|
440
|
+
Optional. When enabled, the model will use Google search to ground the response.
|
441
|
+
|
442
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
|
443
|
+
*/
|
444
|
+
useSearchGrounding: z4.boolean().optional(),
|
445
|
+
/**
|
446
|
+
Optional. Specifies the dynamic retrieval configuration.
|
447
|
+
|
448
|
+
@note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
|
449
|
+
|
450
|
+
@see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
|
451
|
+
*/
|
452
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
|
265
453
|
});
|
266
454
|
|
267
455
|
// src/google-prepare-tools.ts
|
268
456
|
import {
|
269
457
|
UnsupportedFunctionalityError as UnsupportedFunctionalityError2
|
270
458
|
} from "@ai-sdk/provider";
|
271
|
-
function prepareTools(
|
272
|
-
|
273
|
-
|
459
|
+
function prepareTools({
|
460
|
+
tools,
|
461
|
+
toolChoice,
|
462
|
+
useSearchGrounding,
|
463
|
+
dynamicRetrievalConfig: dynamicRetrievalConfig2,
|
464
|
+
modelId
|
465
|
+
}) {
|
466
|
+
var _a;
|
467
|
+
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
|
274
468
|
const toolWarnings = [];
|
275
469
|
const isGemini2 = modelId.includes("gemini-2");
|
276
470
|
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
|
277
471
|
if (useSearchGrounding) {
|
278
472
|
return {
|
279
473
|
tools: isGemini2 ? { googleSearch: {} } : {
|
280
|
-
googleSearchRetrieval: !supportsDynamicRetrieval || !
|
474
|
+
googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
|
281
475
|
},
|
282
476
|
toolConfig: void 0,
|
283
477
|
toolWarnings
|
@@ -293,12 +487,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
293
487
|
} else {
|
294
488
|
functionDeclarations.push({
|
295
489
|
name: tool.name,
|
296
|
-
description: (
|
490
|
+
description: (_a = tool.description) != null ? _a : "",
|
297
491
|
parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
|
298
492
|
});
|
299
493
|
}
|
300
494
|
}
|
301
|
-
const toolChoice = mode.toolChoice;
|
302
495
|
if (toolChoice == null) {
|
303
496
|
return {
|
304
497
|
tools: { functionDeclarations },
|
@@ -340,7 +533,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
|
|
340
533
|
default: {
|
341
534
|
const _exhaustiveCheck = type;
|
342
535
|
throw new UnsupportedFunctionalityError2({
|
343
|
-
functionality: `
|
536
|
+
functionality: `tool choice type: ${_exhaustiveCheck}`
|
344
537
|
});
|
345
538
|
}
|
346
539
|
}
|
@@ -375,25 +568,21 @@ function mapGoogleGenerativeAIFinishReason({
|
|
375
568
|
|
376
569
|
// src/google-generative-ai-language-model.ts
|
377
570
|
var GoogleGenerativeAILanguageModel = class {
|
378
|
-
constructor(modelId,
|
379
|
-
this.specificationVersion = "
|
380
|
-
this.defaultObjectGenerationMode = "json";
|
381
|
-
this.supportsImageUrls = false;
|
571
|
+
constructor(modelId, config) {
|
572
|
+
this.specificationVersion = "v2";
|
382
573
|
this.modelId = modelId;
|
383
|
-
this.settings = settings;
|
384
574
|
this.config = config;
|
385
575
|
}
|
386
|
-
get supportsStructuredOutputs() {
|
387
|
-
var _a;
|
388
|
-
return (_a = this.settings.structuredOutputs) != null ? _a : true;
|
389
|
-
}
|
390
576
|
get provider() {
|
391
577
|
return this.config.provider;
|
392
578
|
}
|
579
|
+
get supportedUrls() {
|
580
|
+
var _a, _b, _c;
|
581
|
+
return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
|
582
|
+
}
|
393
583
|
async getArgs({
|
394
|
-
mode,
|
395
584
|
prompt,
|
396
|
-
|
585
|
+
maxOutputTokens,
|
397
586
|
temperature,
|
398
587
|
topP,
|
399
588
|
topK,
|
@@ -402,188 +591,153 @@ var GoogleGenerativeAILanguageModel = class {
|
|
402
591
|
stopSequences,
|
403
592
|
responseFormat,
|
404
593
|
seed,
|
405
|
-
|
594
|
+
tools,
|
595
|
+
toolChoice,
|
596
|
+
providerOptions
|
406
597
|
}) {
|
407
|
-
var _a, _b
|
408
|
-
const type = mode.type;
|
598
|
+
var _a, _b;
|
409
599
|
const warnings = [];
|
410
|
-
const googleOptions =
|
600
|
+
const googleOptions = await parseProviderOptions2({
|
411
601
|
provider: "google",
|
412
|
-
providerOptions
|
413
|
-
schema:
|
602
|
+
providerOptions,
|
603
|
+
schema: googleGenerativeAIProviderOptions
|
414
604
|
});
|
415
|
-
if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
|
416
|
-
warnings.push({
|
417
|
-
type: "other",
|
418
|
-
message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
|
419
|
-
});
|
420
|
-
}
|
421
|
-
const generationConfig = {
|
422
|
-
// standardized settings:
|
423
|
-
maxOutputTokens: maxTokens,
|
424
|
-
temperature,
|
425
|
-
topK,
|
426
|
-
topP,
|
427
|
-
frequencyPenalty,
|
428
|
-
presencePenalty,
|
429
|
-
stopSequences,
|
430
|
-
seed,
|
431
|
-
// response format:
|
432
|
-
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
433
|
-
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
434
|
-
// so this is needed as an escape hatch:
|
435
|
-
this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
436
|
-
...this.settings.audioTimestamp && {
|
437
|
-
audioTimestamp: this.settings.audioTimestamp
|
438
|
-
},
|
439
|
-
// provider options:
|
440
|
-
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
441
|
-
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
442
|
-
};
|
443
605
|
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
contents,
|
476
|
-
systemInstruction,
|
477
|
-
safetySettings: this.settings.safetySettings,
|
478
|
-
cachedContent: this.settings.cachedContent
|
479
|
-
},
|
480
|
-
warnings
|
481
|
-
};
|
482
|
-
}
|
483
|
-
case "object-tool": {
|
484
|
-
return {
|
485
|
-
args: {
|
486
|
-
generationConfig,
|
487
|
-
contents,
|
488
|
-
tools: {
|
489
|
-
functionDeclarations: [
|
490
|
-
{
|
491
|
-
name: mode.tool.name,
|
492
|
-
description: (_c = mode.tool.description) != null ? _c : "",
|
493
|
-
parameters: convertJSONSchemaToOpenAPISchema(
|
494
|
-
mode.tool.parameters
|
495
|
-
)
|
496
|
-
}
|
497
|
-
]
|
498
|
-
},
|
499
|
-
toolConfig: { functionCallingConfig: { mode: "ANY" } },
|
500
|
-
safetySettings: this.settings.safetySettings,
|
501
|
-
cachedContent: this.settings.cachedContent
|
606
|
+
const {
|
607
|
+
tools: googleTools,
|
608
|
+
toolConfig: googleToolConfig,
|
609
|
+
toolWarnings
|
610
|
+
} = prepareTools({
|
611
|
+
tools,
|
612
|
+
toolChoice,
|
613
|
+
useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
|
614
|
+
dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
|
615
|
+
modelId: this.modelId
|
616
|
+
});
|
617
|
+
return {
|
618
|
+
args: {
|
619
|
+
generationConfig: {
|
620
|
+
// standardized settings:
|
621
|
+
maxOutputTokens,
|
622
|
+
temperature,
|
623
|
+
topK,
|
624
|
+
topP,
|
625
|
+
frequencyPenalty,
|
626
|
+
presencePenalty,
|
627
|
+
stopSequences,
|
628
|
+
seed,
|
629
|
+
// response format:
|
630
|
+
responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
|
631
|
+
responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
|
632
|
+
// so this is needed as an escape hatch:
|
633
|
+
// TODO convert into provider option
|
634
|
+
((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
|
635
|
+
...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
|
636
|
+
audioTimestamp: googleOptions.audioTimestamp
|
502
637
|
},
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
638
|
+
// provider options:
|
639
|
+
responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
|
640
|
+
thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
|
641
|
+
},
|
642
|
+
contents,
|
643
|
+
systemInstruction,
|
644
|
+
safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
|
645
|
+
tools: googleTools,
|
646
|
+
toolConfig: googleToolConfig,
|
647
|
+
cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
|
648
|
+
},
|
649
|
+
warnings: [...warnings, ...toolWarnings]
|
650
|
+
};
|
514
651
|
}
|
515
652
|
async doGenerate(options) {
|
516
|
-
var _a, _b, _c, _d, _e;
|
653
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
517
654
|
const { args, warnings } = await this.getArgs(options);
|
518
655
|
const body = JSON.stringify(args);
|
519
|
-
const mergedHeaders =
|
520
|
-
await
|
656
|
+
const mergedHeaders = combineHeaders2(
|
657
|
+
await resolve2(this.config.headers),
|
521
658
|
options.headers
|
522
659
|
);
|
523
660
|
const {
|
524
661
|
responseHeaders,
|
525
662
|
value: response,
|
526
663
|
rawValue: rawResponse
|
527
|
-
} = await
|
664
|
+
} = await postJsonToApi2({
|
528
665
|
url: `${this.config.baseURL}/${getModelPath(
|
529
666
|
this.modelId
|
530
667
|
)}:generateContent`,
|
531
668
|
headers: mergedHeaders,
|
532
669
|
body: args,
|
533
670
|
failedResponseHandler: googleFailedResponseHandler,
|
534
|
-
successfulResponseHandler:
|
671
|
+
successfulResponseHandler: createJsonResponseHandler2(responseSchema),
|
535
672
|
abortSignal: options.abortSignal,
|
536
673
|
fetch: this.config.fetch
|
537
674
|
});
|
538
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
539
675
|
const candidate = response.candidates[0];
|
540
|
-
const
|
541
|
-
const
|
542
|
-
|
543
|
-
|
676
|
+
const content = [];
|
677
|
+
const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
|
678
|
+
for (const part of parts) {
|
679
|
+
if ("text" in part && part.text.length > 0) {
|
680
|
+
content.push({ type: "text", text: part.text });
|
681
|
+
} else if ("functionCall" in part) {
|
682
|
+
content.push({
|
683
|
+
type: "tool-call",
|
684
|
+
toolCallType: "function",
|
685
|
+
toolCallId: this.config.generateId(),
|
686
|
+
toolName: part.functionCall.name,
|
687
|
+
args: JSON.stringify(part.functionCall.args)
|
688
|
+
});
|
689
|
+
} else if ("inlineData" in part) {
|
690
|
+
content.push({
|
691
|
+
type: "file",
|
692
|
+
data: part.inlineData.data,
|
693
|
+
mediaType: part.inlineData.mimeType
|
694
|
+
});
|
695
|
+
}
|
696
|
+
}
|
697
|
+
const sources = (_b = extractSources({
|
698
|
+
groundingMetadata: candidate.groundingMetadata,
|
544
699
|
generateId: this.config.generateId
|
545
|
-
});
|
700
|
+
})) != null ? _b : [];
|
701
|
+
for (const source of sources) {
|
702
|
+
content.push(source);
|
703
|
+
}
|
546
704
|
const usageMetadata = response.usageMetadata;
|
547
705
|
return {
|
548
|
-
|
549
|
-
reasoning: getReasoningDetailsFromParts(parts),
|
550
|
-
files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
|
551
|
-
data: part.inlineData.data,
|
552
|
-
mimeType: part.inlineData.mimeType
|
553
|
-
})),
|
554
|
-
toolCalls,
|
706
|
+
content,
|
555
707
|
finishReason: mapGoogleGenerativeAIFinishReason({
|
556
708
|
finishReason: candidate.finishReason,
|
557
|
-
hasToolCalls:
|
709
|
+
hasToolCalls: content.some((part) => part.type === "tool-call")
|
558
710
|
}),
|
559
711
|
usage: {
|
560
|
-
|
561
|
-
|
712
|
+
inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
|
713
|
+
outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
|
714
|
+
totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
|
715
|
+
reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
|
716
|
+
cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
|
562
717
|
},
|
563
|
-
rawCall: { rawPrompt, rawSettings },
|
564
|
-
rawResponse: { headers: responseHeaders, body: rawResponse },
|
565
718
|
warnings,
|
566
719
|
providerMetadata: {
|
567
720
|
google: {
|
568
|
-
groundingMetadata: (
|
569
|
-
safetyRatings: (
|
721
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
722
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
570
723
|
}
|
571
724
|
},
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
725
|
+
request: { body },
|
726
|
+
response: {
|
727
|
+
// TODO timestamp, model id, id
|
728
|
+
headers: responseHeaders,
|
729
|
+
body: rawResponse
|
730
|
+
}
|
577
731
|
};
|
578
732
|
}
|
579
733
|
async doStream(options) {
|
580
734
|
const { args, warnings } = await this.getArgs(options);
|
581
735
|
const body = JSON.stringify(args);
|
582
|
-
const headers =
|
583
|
-
await
|
736
|
+
const headers = combineHeaders2(
|
737
|
+
await resolve2(this.config.headers),
|
584
738
|
options.headers
|
585
739
|
);
|
586
|
-
const { responseHeaders, value: response } = await
|
740
|
+
const { responseHeaders, value: response } = await postJsonToApi2({
|
587
741
|
url: `${this.config.baseURL}/${getModelPath(
|
588
742
|
this.modelId
|
589
743
|
)}:streamGenerateContent?alt=sse`,
|
@@ -594,11 +748,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
594
748
|
abortSignal: options.abortSignal,
|
595
749
|
fetch: this.config.fetch
|
596
750
|
});
|
597
|
-
const { contents: rawPrompt, ...rawSettings } = args;
|
598
751
|
let finishReason = "unknown";
|
599
|
-
|
600
|
-
|
601
|
-
|
752
|
+
const usage = {
|
753
|
+
inputTokens: void 0,
|
754
|
+
outputTokens: void 0,
|
755
|
+
totalTokens: void 0
|
602
756
|
};
|
603
757
|
let providerMetadata = void 0;
|
604
758
|
const generateId2 = this.config.generateId;
|
@@ -606,8 +760,11 @@ var GoogleGenerativeAILanguageModel = class {
|
|
606
760
|
return {
|
607
761
|
stream: response.pipeThrough(
|
608
762
|
new TransformStream({
|
763
|
+
start(controller) {
|
764
|
+
controller.enqueue({ type: "stream-start", warnings });
|
765
|
+
},
|
609
766
|
transform(chunk, controller) {
|
610
|
-
var _a, _b, _c, _d, _e, _f;
|
767
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
611
768
|
if (!chunk.success) {
|
612
769
|
controller.enqueue({ type: "error", error: chunk.error });
|
613
770
|
return;
|
@@ -615,12 +772,13 @@ var GoogleGenerativeAILanguageModel = class {
|
|
615
772
|
const value = chunk.value;
|
616
773
|
const usageMetadata = value.usageMetadata;
|
617
774
|
if (usageMetadata != null) {
|
618
|
-
usage =
|
619
|
-
|
620
|
-
|
621
|
-
|
775
|
+
usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
|
776
|
+
usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
|
777
|
+
usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
|
778
|
+
usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
|
779
|
+
usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
|
622
780
|
}
|
623
|
-
const candidate = (
|
781
|
+
const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
|
624
782
|
if (candidate == null) {
|
625
783
|
return;
|
626
784
|
}
|
@@ -628,28 +786,14 @@ var GoogleGenerativeAILanguageModel = class {
|
|
628
786
|
if (content != null) {
|
629
787
|
const deltaText = getTextFromParts(content.parts);
|
630
788
|
if (deltaText != null) {
|
631
|
-
controller.enqueue(
|
632
|
-
type: "text-delta",
|
633
|
-
textDelta: deltaText
|
634
|
-
});
|
635
|
-
}
|
636
|
-
const reasoningDeltaText = getReasoningDetailsFromParts(
|
637
|
-
content.parts
|
638
|
-
);
|
639
|
-
if (reasoningDeltaText != null) {
|
640
|
-
for (const part of reasoningDeltaText) {
|
641
|
-
controller.enqueue({
|
642
|
-
type: "reasoning",
|
643
|
-
textDelta: part.text
|
644
|
-
});
|
645
|
-
}
|
789
|
+
controller.enqueue(deltaText);
|
646
790
|
}
|
647
791
|
const inlineDataParts = getInlineDataParts(content.parts);
|
648
792
|
if (inlineDataParts != null) {
|
649
793
|
for (const part of inlineDataParts) {
|
650
794
|
controller.enqueue({
|
651
795
|
type: "file",
|
652
|
-
|
796
|
+
mediaType: part.inlineData.mimeType,
|
653
797
|
data: part.inlineData.data
|
654
798
|
});
|
655
799
|
}
|
@@ -683,17 +827,17 @@ var GoogleGenerativeAILanguageModel = class {
|
|
683
827
|
finishReason: candidate.finishReason,
|
684
828
|
hasToolCalls
|
685
829
|
});
|
686
|
-
const sources = (
|
830
|
+
const sources = (_g = extractSources({
|
687
831
|
groundingMetadata: candidate.groundingMetadata,
|
688
832
|
generateId: generateId2
|
689
|
-
})) != null ?
|
833
|
+
})) != null ? _g : [];
|
690
834
|
for (const source of sources) {
|
691
|
-
controller.enqueue(
|
835
|
+
controller.enqueue(source);
|
692
836
|
}
|
693
837
|
providerMetadata = {
|
694
838
|
google: {
|
695
|
-
groundingMetadata: (
|
696
|
-
safetyRatings: (
|
839
|
+
groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
|
840
|
+
safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
|
697
841
|
}
|
698
842
|
};
|
699
843
|
}
|
@@ -708,9 +852,7 @@ var GoogleGenerativeAILanguageModel = class {
|
|
708
852
|
}
|
709
853
|
})
|
710
854
|
),
|
711
|
-
|
712
|
-
rawResponse: { headers: responseHeaders },
|
713
|
-
warnings,
|
855
|
+
response: { headers: responseHeaders },
|
714
856
|
request: { body }
|
715
857
|
};
|
716
858
|
}
|
@@ -723,6 +865,7 @@ function getToolCallsFromParts({
|
|
723
865
|
(part) => "functionCall" in part
|
724
866
|
);
|
725
867
|
return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
|
868
|
+
type: "tool-call",
|
726
869
|
toolCallType: "function",
|
727
870
|
toolCallId: generateId2(),
|
728
871
|
toolName: part.functionCall.name,
|
@@ -730,16 +873,11 @@ function getToolCallsFromParts({
|
|
730
873
|
}));
|
731
874
|
}
|
732
875
|
function getTextFromParts(parts) {
|
733
|
-
const textParts = parts == null ? void 0 : parts.filter(
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
}
|
738
|
-
function getReasoningDetailsFromParts(parts) {
|
739
|
-
const reasoningParts = parts == null ? void 0 : parts.filter(
|
740
|
-
(part) => "text" in part && part.thought === true
|
741
|
-
);
|
742
|
-
return reasoningParts == null || reasoningParts.length === 0 ? void 0 : reasoningParts.map((part) => ({ type: "text", text: part.text }));
|
876
|
+
const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
|
877
|
+
return textParts == null || textParts.length === 0 ? void 0 : {
|
878
|
+
type: "text",
|
879
|
+
text: textParts.map((part) => part.text).join("")
|
880
|
+
};
|
743
881
|
}
|
744
882
|
function getInlineDataParts(parts) {
|
745
883
|
return parts == null ? void 0 : parts.filter(
|
@@ -754,189 +892,103 @@ function extractSources({
|
|
754
892
|
return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
|
755
893
|
(chunk) => chunk.web != null
|
756
894
|
).map((chunk) => ({
|
895
|
+
type: "source",
|
757
896
|
sourceType: "url",
|
758
897
|
id: generateId2(),
|
759
898
|
url: chunk.web.uri,
|
760
899
|
title: chunk.web.title
|
761
900
|
}));
|
762
901
|
}
|
763
|
-
var contentSchema =
|
764
|
-
role:
|
765
|
-
parts:
|
766
|
-
|
767
|
-
|
768
|
-
text:
|
769
|
-
thought: z2.boolean().nullish()
|
902
|
+
var contentSchema = z5.object({
|
903
|
+
role: z5.string(),
|
904
|
+
parts: z5.array(
|
905
|
+
z5.union([
|
906
|
+
z5.object({
|
907
|
+
text: z5.string()
|
770
908
|
}),
|
771
|
-
|
772
|
-
functionCall:
|
773
|
-
name:
|
774
|
-
args:
|
909
|
+
z5.object({
|
910
|
+
functionCall: z5.object({
|
911
|
+
name: z5.string(),
|
912
|
+
args: z5.unknown()
|
775
913
|
})
|
776
914
|
}),
|
777
|
-
|
778
|
-
inlineData:
|
779
|
-
mimeType:
|
780
|
-
data:
|
915
|
+
z5.object({
|
916
|
+
inlineData: z5.object({
|
917
|
+
mimeType: z5.string(),
|
918
|
+
data: z5.string()
|
781
919
|
})
|
782
920
|
})
|
783
921
|
])
|
784
922
|
).nullish()
|
785
923
|
});
|
786
|
-
var groundingChunkSchema =
|
787
|
-
web:
|
788
|
-
retrievedContext:
|
924
|
+
var groundingChunkSchema = z5.object({
|
925
|
+
web: z5.object({ uri: z5.string(), title: z5.string() }).nullish(),
|
926
|
+
retrievedContext: z5.object({ uri: z5.string(), title: z5.string() }).nullish()
|
789
927
|
});
|
790
|
-
var groundingMetadataSchema =
|
791
|
-
webSearchQueries:
|
792
|
-
retrievalQueries:
|
793
|
-
searchEntryPoint:
|
794
|
-
groundingChunks:
|
795
|
-
groundingSupports:
|
796
|
-
|
797
|
-
segment:
|
798
|
-
startIndex:
|
799
|
-
endIndex:
|
800
|
-
text:
|
928
|
+
var groundingMetadataSchema = z5.object({
|
929
|
+
webSearchQueries: z5.array(z5.string()).nullish(),
|
930
|
+
retrievalQueries: z5.array(z5.string()).nullish(),
|
931
|
+
searchEntryPoint: z5.object({ renderedContent: z5.string() }).nullish(),
|
932
|
+
groundingChunks: z5.array(groundingChunkSchema).nullish(),
|
933
|
+
groundingSupports: z5.array(
|
934
|
+
z5.object({
|
935
|
+
segment: z5.object({
|
936
|
+
startIndex: z5.number().nullish(),
|
937
|
+
endIndex: z5.number().nullish(),
|
938
|
+
text: z5.string().nullish()
|
801
939
|
}),
|
802
|
-
segment_text:
|
803
|
-
groundingChunkIndices:
|
804
|
-
supportChunkIndices:
|
805
|
-
confidenceScores:
|
806
|
-
confidenceScore:
|
940
|
+
segment_text: z5.string().nullish(),
|
941
|
+
groundingChunkIndices: z5.array(z5.number()).nullish(),
|
942
|
+
supportChunkIndices: z5.array(z5.number()).nullish(),
|
943
|
+
confidenceScores: z5.array(z5.number()).nullish(),
|
944
|
+
confidenceScore: z5.array(z5.number()).nullish()
|
807
945
|
})
|
808
946
|
).nullish(),
|
809
|
-
retrievalMetadata:
|
810
|
-
|
811
|
-
webDynamicRetrievalScore:
|
947
|
+
retrievalMetadata: z5.union([
|
948
|
+
z5.object({
|
949
|
+
webDynamicRetrievalScore: z5.number()
|
812
950
|
}),
|
813
|
-
|
951
|
+
z5.object({})
|
814
952
|
]).nullish()
|
815
953
|
});
|
816
|
-
var safetyRatingSchema =
|
817
|
-
category:
|
818
|
-
probability:
|
819
|
-
probabilityScore:
|
820
|
-
severity:
|
821
|
-
severityScore:
|
822
|
-
blocked:
|
954
|
+
var safetyRatingSchema = z5.object({
|
955
|
+
category: z5.string().nullish(),
|
956
|
+
probability: z5.string().nullish(),
|
957
|
+
probabilityScore: z5.number().nullish(),
|
958
|
+
severity: z5.string().nullish(),
|
959
|
+
severityScore: z5.number().nullish(),
|
960
|
+
blocked: z5.boolean().nullish()
|
961
|
+
});
|
962
|
+
var usageSchema = z5.object({
|
963
|
+
cachedContentTokenCount: z5.number().nullish(),
|
964
|
+
thoughtsTokenCount: z5.number().nullish(),
|
965
|
+
promptTokenCount: z5.number().nullish(),
|
966
|
+
candidatesTokenCount: z5.number().nullish(),
|
967
|
+
totalTokenCount: z5.number().nullish()
|
823
968
|
});
|
824
|
-
var responseSchema =
|
825
|
-
candidates:
|
826
|
-
|
827
|
-
content: contentSchema.nullish().or(
|
828
|
-
finishReason:
|
829
|
-
safetyRatings:
|
969
|
+
var responseSchema = z5.object({
|
970
|
+
candidates: z5.array(
|
971
|
+
z5.object({
|
972
|
+
content: contentSchema.nullish().or(z5.object({}).strict()),
|
973
|
+
finishReason: z5.string().nullish(),
|
974
|
+
safetyRatings: z5.array(safetyRatingSchema).nullish(),
|
830
975
|
groundingMetadata: groundingMetadataSchema.nullish()
|
831
976
|
})
|
832
977
|
),
|
833
|
-
usageMetadata:
|
834
|
-
promptTokenCount: z2.number().nullish(),
|
835
|
-
candidatesTokenCount: z2.number().nullish(),
|
836
|
-
totalTokenCount: z2.number().nullish()
|
837
|
-
}).nullish()
|
978
|
+
usageMetadata: usageSchema.nullish()
|
838
979
|
});
|
839
|
-
var chunkSchema =
|
840
|
-
candidates:
|
841
|
-
|
980
|
+
var chunkSchema = z5.object({
|
981
|
+
candidates: z5.array(
|
982
|
+
z5.object({
|
842
983
|
content: contentSchema.nullish(),
|
843
|
-
finishReason:
|
844
|
-
safetyRatings:
|
984
|
+
finishReason: z5.string().nullish(),
|
985
|
+
safetyRatings: z5.array(safetyRatingSchema).nullish(),
|
845
986
|
groundingMetadata: groundingMetadataSchema.nullish()
|
846
987
|
})
|
847
988
|
).nullish(),
|
848
|
-
usageMetadata:
|
849
|
-
promptTokenCount: z2.number().nullish(),
|
850
|
-
candidatesTokenCount: z2.number().nullish(),
|
851
|
-
totalTokenCount: z2.number().nullish()
|
852
|
-
}).nullish()
|
853
|
-
});
|
854
|
-
var googleGenerativeAIProviderOptionsSchema = z2.object({
|
855
|
-
responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).nullish(),
|
856
|
-
thinkingConfig: z2.object({
|
857
|
-
thinkingBudget: z2.number().nullish(),
|
858
|
-
includeThoughts: z2.boolean().nullish()
|
859
|
-
}).nullish()
|
860
|
-
});
|
861
|
-
|
862
|
-
// src/google-generative-ai-embedding-model.ts
|
863
|
-
import {
|
864
|
-
TooManyEmbeddingValuesForCallError
|
865
|
-
} from "@ai-sdk/provider";
|
866
|
-
import {
|
867
|
-
combineHeaders as combineHeaders2,
|
868
|
-
createJsonResponseHandler as createJsonResponseHandler2,
|
869
|
-
postJsonToApi as postJsonToApi2,
|
870
|
-
resolve as resolve2
|
871
|
-
} from "@ai-sdk/provider-utils";
|
872
|
-
import { z as z3 } from "zod";
|
873
|
-
var GoogleGenerativeAIEmbeddingModel = class {
|
874
|
-
constructor(modelId, settings, config) {
|
875
|
-
this.specificationVersion = "v1";
|
876
|
-
this.modelId = modelId;
|
877
|
-
this.settings = settings;
|
878
|
-
this.config = config;
|
879
|
-
}
|
880
|
-
get provider() {
|
881
|
-
return this.config.provider;
|
882
|
-
}
|
883
|
-
get maxEmbeddingsPerCall() {
|
884
|
-
return 2048;
|
885
|
-
}
|
886
|
-
get supportsParallelCalls() {
|
887
|
-
return true;
|
888
|
-
}
|
889
|
-
async doEmbed({
|
890
|
-
values,
|
891
|
-
headers,
|
892
|
-
abortSignal
|
893
|
-
}) {
|
894
|
-
if (values.length > this.maxEmbeddingsPerCall) {
|
895
|
-
throw new TooManyEmbeddingValuesForCallError({
|
896
|
-
provider: this.provider,
|
897
|
-
modelId: this.modelId,
|
898
|
-
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
|
899
|
-
values
|
900
|
-
});
|
901
|
-
}
|
902
|
-
const mergedHeaders = combineHeaders2(
|
903
|
-
await resolve2(this.config.headers),
|
904
|
-
headers
|
905
|
-
);
|
906
|
-
const { responseHeaders, value: response } = await postJsonToApi2({
|
907
|
-
url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
|
908
|
-
headers: mergedHeaders,
|
909
|
-
body: {
|
910
|
-
requests: values.map((value) => ({
|
911
|
-
model: `models/${this.modelId}`,
|
912
|
-
content: { role: "user", parts: [{ text: value }] },
|
913
|
-
outputDimensionality: this.settings.outputDimensionality,
|
914
|
-
taskType: this.settings.taskType
|
915
|
-
}))
|
916
|
-
},
|
917
|
-
failedResponseHandler: googleFailedResponseHandler,
|
918
|
-
successfulResponseHandler: createJsonResponseHandler2(
|
919
|
-
googleGenerativeAITextEmbeddingResponseSchema
|
920
|
-
),
|
921
|
-
abortSignal,
|
922
|
-
fetch: this.config.fetch
|
923
|
-
});
|
924
|
-
return {
|
925
|
-
embeddings: response.embeddings.map((item) => item.values),
|
926
|
-
usage: void 0,
|
927
|
-
rawResponse: { headers: responseHeaders }
|
928
|
-
};
|
929
|
-
}
|
930
|
-
};
|
931
|
-
var googleGenerativeAITextEmbeddingResponseSchema = z3.object({
|
932
|
-
embeddings: z3.array(z3.object({ values: z3.array(z3.number()) }))
|
989
|
+
usageMetadata: usageSchema.nullish()
|
933
990
|
});
|
934
991
|
|
935
|
-
// src/google-supported-file-url.ts
|
936
|
-
function isSupportedFileUrl(url) {
|
937
|
-
return url.toString().startsWith("https://generativelanguage.googleapis.com/v1beta/files/");
|
938
|
-
}
|
939
|
-
|
940
992
|
// src/google-provider.ts
|
941
993
|
function createGoogleGenerativeAI(options = {}) {
|
942
994
|
var _a;
|
@@ -949,30 +1001,35 @@ function createGoogleGenerativeAI(options = {}) {
|
|
949
1001
|
}),
|
950
1002
|
...options.headers
|
951
1003
|
});
|
952
|
-
const createChatModel = (modelId
|
1004
|
+
const createChatModel = (modelId) => {
|
953
1005
|
var _a2;
|
954
|
-
return new GoogleGenerativeAILanguageModel(modelId,
|
1006
|
+
return new GoogleGenerativeAILanguageModel(modelId, {
|
955
1007
|
provider: "google.generative-ai",
|
956
1008
|
baseURL,
|
957
1009
|
headers: getHeaders,
|
958
1010
|
generateId: (_a2 = options.generateId) != null ? _a2 : generateId,
|
959
|
-
|
1011
|
+
supportedUrls: () => ({
|
1012
|
+
"*": [
|
1013
|
+
// HTTP URLs:
|
1014
|
+
/^https?:\/\/.*$/
|
1015
|
+
]
|
1016
|
+
}),
|
960
1017
|
fetch: options.fetch
|
961
1018
|
});
|
962
1019
|
};
|
963
|
-
const createEmbeddingModel = (modelId
|
1020
|
+
const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
|
964
1021
|
provider: "google.generative-ai",
|
965
1022
|
baseURL,
|
966
1023
|
headers: getHeaders,
|
967
1024
|
fetch: options.fetch
|
968
1025
|
});
|
969
|
-
const provider = function(modelId
|
1026
|
+
const provider = function(modelId) {
|
970
1027
|
if (new.target) {
|
971
1028
|
throw new Error(
|
972
1029
|
"The Google Generative AI model function cannot be called with the new keyword."
|
973
1030
|
);
|
974
1031
|
}
|
975
|
-
return createChatModel(modelId
|
1032
|
+
return createChatModel(modelId);
|
976
1033
|
};
|
977
1034
|
provider.languageModel = createChatModel;
|
978
1035
|
provider.chat = createChatModel;
|
@@ -980,6 +1037,9 @@ function createGoogleGenerativeAI(options = {}) {
|
|
980
1037
|
provider.embedding = createEmbeddingModel;
|
981
1038
|
provider.textEmbedding = createEmbeddingModel;
|
982
1039
|
provider.textEmbeddingModel = createEmbeddingModel;
|
1040
|
+
provider.imageModel = (modelId) => {
|
1041
|
+
throw new NoSuchModelError({ modelId, modelType: "imageModel" });
|
1042
|
+
};
|
983
1043
|
return provider;
|
984
1044
|
}
|
985
1045
|
var google = createGoogleGenerativeAI();
|