@grindxp/cli 0.1.7 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/dist/index.js +766 -134
  2. package/dist/web/client/assets/Copy.es-Bs4NgJu-.js +1 -0
  3. package/dist/web/client/assets/Sword.es-2Xm7T3t2.js +1 -0
  4. package/dist/web/client/assets/geist-cyrillic-wght-normal-CHSlOQsW.woff2 +0 -0
  5. package/dist/web/client/assets/geist-latin-ext-wght-normal-DMtmJ5ZE.woff2 +0 -0
  6. package/dist/web/client/assets/geist-latin-wght-normal-Dm3htQBi.woff2 +0 -0
  7. package/dist/web/client/assets/index-6XDcqRbL.js +42 -0
  8. package/dist/web/client/assets/index-BXM1N6tm.js +1 -0
  9. package/dist/web/client/assets/index-B_KMiE38.js +1 -0
  10. package/dist/web/client/assets/index-CGj2rOLm.js +1 -0
  11. package/dist/web/client/assets/index-CS5BuFbt.js +1 -0
  12. package/dist/web/client/assets/index-CYsASiu-.js +1 -0
  13. package/dist/web/client/assets/index-DAvwM0SX.js +1 -0
  14. package/dist/web/client/assets/index-DCBFp5DJ.js +1 -0
  15. package/dist/web/client/assets/index-DjKt1qNz.js +1 -0
  16. package/dist/web/client/assets/index-PIcFs1vr.js +1 -0
  17. package/dist/web/client/assets/instrument-serif-latin-400-italic-DKMiL14s.woff2 +0 -0
  18. package/dist/web/client/assets/instrument-serif-latin-400-italic-u__WvvIK.woff +0 -0
  19. package/dist/web/client/assets/instrument-serif-latin-400-normal-BVbkICAY.woff +0 -0
  20. package/dist/web/client/assets/instrument-serif-latin-400-normal-DnYpCC2O.woff2 +0 -0
  21. package/dist/web/client/assets/instrument-serif-latin-ext-400-italic-C9HzH3YL.woff2 +0 -0
  22. package/dist/web/client/assets/instrument-serif-latin-ext-400-italic-D7-lnxEk.woff +0 -0
  23. package/dist/web/client/assets/instrument-serif-latin-ext-400-normal-C2je3j2s.woff2 +0 -0
  24. package/dist/web/client/assets/instrument-serif-latin-ext-400-normal-CFCUzsTy.woff +0 -0
  25. package/dist/web/client/assets/jetbrains-mono-cyrillic-wght-normal-D73BlboJ.woff2 +0 -0
  26. package/dist/web/client/assets/jetbrains-mono-greek-wght-normal-Bw9x6K1M.woff2 +0 -0
  27. package/dist/web/client/assets/jetbrains-mono-latin-ext-wght-normal-DBQx-q_a.woff2 +0 -0
  28. package/dist/web/client/assets/jetbrains-mono-latin-wght-normal-B9CIFXIH.woff2 +0 -0
  29. package/dist/web/client/assets/jetbrains-mono-vietnamese-wght-normal-Bt-aOZkq.woff2 +0 -0
  30. package/dist/web/client/assets/main-BI1EOhmt.js +18 -0
  31. package/dist/web/client/assets/styles-7TpWqjrh.css +1 -0
  32. package/dist/web/client/favicon.ico +0 -0
  33. package/dist/web/server/assets/_tanstack-start-manifest_v-B_rvI8DG.js +4 -0
  34. package/dist/web/server/assets/agent.functions-BL3upUNr.js +19541 -0
  35. package/dist/web/server/assets/data.functions-DZmdFOMQ.js +285 -0
  36. package/dist/web/server/assets/index-4SxmUYH6.js +14 -0
  37. package/dist/web/server/assets/index-B2ULpkv2.js +4587 -0
  38. package/dist/web/server/assets/index-BGBMycx-.js +2275 -0
  39. package/dist/web/server/assets/index-BL8u2X7w.js +14 -0
  40. package/dist/web/server/assets/index-BQUCDamI.js +5924 -0
  41. package/dist/web/server/assets/index-BRRsXrOi.js +14 -0
  42. package/dist/web/server/assets/index-BiD7uOOh.js +14 -0
  43. package/dist/web/server/assets/index-CB8UtTN8.js +66 -0
  44. package/dist/web/server/assets/index-D2yaimYL.js +14 -0
  45. package/dist/web/server/assets/index-D3RUqTdb.js +14 -0
  46. package/dist/web/server/assets/index-DTB2dYCz.js +1426 -0
  47. package/dist/web/server/assets/index-DfU25rnD.js +477 -0
  48. package/dist/web/server/assets/index-SHH7zSKt.js +66 -0
  49. package/dist/web/server/assets/router-CXyGzWDS.js +589 -0
  50. package/dist/web/server/assets/sessions-UCWtijHE.js +438 -0
  51. package/dist/web/server/assets/start-HYkvq4Ni.js +4 -0
  52. package/dist/web/server/assets/token-DGoahKjI.js +86 -0
  53. package/dist/web/server/assets/token-util-BopJPy-I.js +451 -0
  54. package/dist/web/server/assets/token-util-Bw35afYM.js +30 -0
  55. package/dist/web/server/assets/vault.server-CscY5Z8e.js +19357 -0
  56. package/dist/web/server/server.js +4889 -0
  57. package/package.json +53 -51
@@ -0,0 +1,2275 @@
1
+ import { w as withoutTrailingSlash, g as generateId, p as parseProviderOptions, a as combineHeaders, r as resolve, b as postJsonToApi, e as withUserAgentSuffix, d as loadApiKey, U as UnsupportedFunctionalityError, j as convertToBase64, f as createJsonResponseHandler, h as createEventSourceResponseHandler, y as lazySchema, x as createJsonErrorResponseHandler, z as zodSchema, o as object, q as number, _ as _enum, D as record, s as string, n as boolean, k as array, F as createProviderToolFactoryWithOutputSchema, E as createProviderToolFactory, u as union, C as unknown, T as TooManyEmbeddingValuesForCallError, Y as convertUint8ArrayToBase64, $ as AISDKError, a0 as delay, a1 as getFromApi } from "./agent.functions-BL3upUNr.js";
2
+ import "./sessions-UCWtijHE.js";
3
+ import "../server.js";
4
+ import "node:async_hooks";
5
+ import "node:stream";
6
+ import "react/jsx-runtime";
7
+ import "@tanstack/react-router/ssr/server";
8
+ import "@tanstack/react-router";
9
+ import "./vault.server-CscY5Z8e.js";
10
+ import "node:fs";
11
+ import "node:os";
12
+ import "node:path";
13
+ import "path";
14
+ import "fs";
15
+ import "child_process";
16
+ import "node:buffer";
17
+ import "events";
18
+ import "https";
19
+ import "http";
20
+ import "net";
21
+ import "tls";
22
+ import "crypto";
23
+ import "stream";
24
+ import "url";
25
+ import "zlib";
26
+ import "buffer";
27
+ import "node:crypto";
28
+ import "fs/promises";
29
+ import "os";
30
+ import "node:child_process";
31
+ var VERSION = "3.0.30";
32
+ var googleErrorDataSchema = lazySchema(
33
+ () => zodSchema(
34
+ object({
35
+ error: object({
36
+ code: number().nullable(),
37
+ message: string(),
38
+ status: string()
39
+ })
40
+ })
41
+ )
42
+ );
43
+ var googleFailedResponseHandler = createJsonErrorResponseHandler({
44
+ errorSchema: googleErrorDataSchema,
45
+ errorToMessage: (data) => data.error.message
46
+ });
47
+ var googleEmbeddingModelOptions = lazySchema(
48
+ () => zodSchema(
49
+ object({
50
+ /**
51
+ * Optional. Optional reduced dimension for the output embedding.
52
+ * If set, excessive values in the output embedding are truncated from the end.
53
+ */
54
+ outputDimensionality: number().optional(),
55
+ /**
56
+ * Optional. Specifies the task type for generating embeddings.
57
+ * Supported task types:
58
+ * - SEMANTIC_SIMILARITY: Optimized for text similarity.
59
+ * - CLASSIFICATION: Optimized for text classification.
60
+ * - CLUSTERING: Optimized for clustering texts based on similarity.
61
+ * - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
62
+ * - RETRIEVAL_QUERY: Optimized for query-based retrieval.
63
+ * - QUESTION_ANSWERING: Optimized for answering questions.
64
+ * - FACT_VERIFICATION: Optimized for verifying factual information.
65
+ * - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
66
+ */
67
+ taskType: _enum([
68
+ "SEMANTIC_SIMILARITY",
69
+ "CLASSIFICATION",
70
+ "CLUSTERING",
71
+ "RETRIEVAL_DOCUMENT",
72
+ "RETRIEVAL_QUERY",
73
+ "QUESTION_ANSWERING",
74
+ "FACT_VERIFICATION",
75
+ "CODE_RETRIEVAL_QUERY"
76
+ ]).optional()
77
+ })
78
+ )
79
+ );
80
+ var GoogleGenerativeAIEmbeddingModel = class {
81
+ constructor(modelId, config) {
82
+ this.specificationVersion = "v3";
83
+ this.maxEmbeddingsPerCall = 2048;
84
+ this.supportsParallelCalls = true;
85
+ this.modelId = modelId;
86
+ this.config = config;
87
+ }
88
+ get provider() {
89
+ return this.config.provider;
90
+ }
91
+ async doEmbed({
92
+ values,
93
+ headers,
94
+ abortSignal,
95
+ providerOptions
96
+ }) {
97
+ const googleOptions = await parseProviderOptions({
98
+ provider: "google",
99
+ providerOptions,
100
+ schema: googleEmbeddingModelOptions
101
+ });
102
+ if (values.length > this.maxEmbeddingsPerCall) {
103
+ throw new TooManyEmbeddingValuesForCallError({
104
+ provider: this.provider,
105
+ modelId: this.modelId,
106
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
107
+ values
108
+ });
109
+ }
110
+ const mergedHeaders = combineHeaders(
111
+ await resolve(this.config.headers),
112
+ headers
113
+ );
114
+ if (values.length === 1) {
115
+ const {
116
+ responseHeaders: responseHeaders2,
117
+ value: response2,
118
+ rawValue: rawValue2
119
+ } = await postJsonToApi({
120
+ url: `${this.config.baseURL}/models/${this.modelId}:embedContent`,
121
+ headers: mergedHeaders,
122
+ body: {
123
+ model: `models/${this.modelId}`,
124
+ content: {
125
+ parts: [{ text: values[0] }]
126
+ },
127
+ outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
128
+ taskType: googleOptions == null ? void 0 : googleOptions.taskType
129
+ },
130
+ failedResponseHandler: googleFailedResponseHandler,
131
+ successfulResponseHandler: createJsonResponseHandler(
132
+ googleGenerativeAISingleEmbeddingResponseSchema
133
+ ),
134
+ abortSignal,
135
+ fetch: this.config.fetch
136
+ });
137
+ return {
138
+ warnings: [],
139
+ embeddings: [response2.embedding.values],
140
+ usage: void 0,
141
+ response: { headers: responseHeaders2, body: rawValue2 }
142
+ };
143
+ }
144
+ const {
145
+ responseHeaders,
146
+ value: response,
147
+ rawValue
148
+ } = await postJsonToApi({
149
+ url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
150
+ headers: mergedHeaders,
151
+ body: {
152
+ requests: values.map((value) => ({
153
+ model: `models/${this.modelId}`,
154
+ content: { role: "user", parts: [{ text: value }] },
155
+ outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
156
+ taskType: googleOptions == null ? void 0 : googleOptions.taskType
157
+ }))
158
+ },
159
+ failedResponseHandler: googleFailedResponseHandler,
160
+ successfulResponseHandler: createJsonResponseHandler(
161
+ googleGenerativeAITextEmbeddingResponseSchema
162
+ ),
163
+ abortSignal,
164
+ fetch: this.config.fetch
165
+ });
166
+ return {
167
+ warnings: [],
168
+ embeddings: response.embeddings.map((item) => item.values),
169
+ usage: void 0,
170
+ response: { headers: responseHeaders, body: rawValue }
171
+ };
172
+ }
173
+ };
174
+ var googleGenerativeAITextEmbeddingResponseSchema = lazySchema(
175
+ () => zodSchema(
176
+ object({
177
+ embeddings: array(object({ values: array(number()) }))
178
+ })
179
+ )
180
+ );
181
+ var googleGenerativeAISingleEmbeddingResponseSchema = lazySchema(
182
+ () => zodSchema(
183
+ object({
184
+ embedding: object({ values: array(number()) })
185
+ })
186
+ )
187
+ );
188
+ function convertGoogleGenerativeAIUsage(usage) {
189
+ var _a, _b, _c, _d;
190
+ if (usage == null) {
191
+ return {
192
+ inputTokens: {
193
+ total: void 0,
194
+ noCache: void 0,
195
+ cacheRead: void 0,
196
+ cacheWrite: void 0
197
+ },
198
+ outputTokens: {
199
+ total: void 0,
200
+ text: void 0,
201
+ reasoning: void 0
202
+ },
203
+ raw: void 0
204
+ };
205
+ }
206
+ const promptTokens = (_a = usage.promptTokenCount) != null ? _a : 0;
207
+ const candidatesTokens = (_b = usage.candidatesTokenCount) != null ? _b : 0;
208
+ const cachedContentTokens = (_c = usage.cachedContentTokenCount) != null ? _c : 0;
209
+ const thoughtsTokens = (_d = usage.thoughtsTokenCount) != null ? _d : 0;
210
+ return {
211
+ inputTokens: {
212
+ total: promptTokens,
213
+ noCache: promptTokens - cachedContentTokens,
214
+ cacheRead: cachedContentTokens,
215
+ cacheWrite: void 0
216
+ },
217
+ outputTokens: {
218
+ total: candidatesTokens + thoughtsTokens,
219
+ text: candidatesTokens,
220
+ reasoning: thoughtsTokens
221
+ },
222
+ raw: usage
223
+ };
224
+ }
225
+ function convertJSONSchemaToOpenAPISchema(jsonSchema, isRoot = true) {
226
+ if (jsonSchema == null) {
227
+ return void 0;
228
+ }
229
+ if (isEmptyObjectSchema(jsonSchema)) {
230
+ if (isRoot) {
231
+ return void 0;
232
+ }
233
+ if (typeof jsonSchema === "object" && jsonSchema.description) {
234
+ return { type: "object", description: jsonSchema.description };
235
+ }
236
+ return { type: "object" };
237
+ }
238
+ if (typeof jsonSchema === "boolean") {
239
+ return { type: "boolean", properties: {} };
240
+ }
241
+ const {
242
+ type,
243
+ description,
244
+ required,
245
+ properties,
246
+ items,
247
+ allOf,
248
+ anyOf,
249
+ oneOf,
250
+ format,
251
+ const: constValue,
252
+ minLength,
253
+ enum: enumValues
254
+ } = jsonSchema;
255
+ const result = {};
256
+ if (description) result.description = description;
257
+ if (required) result.required = required;
258
+ if (format) result.format = format;
259
+ if (constValue !== void 0) {
260
+ result.enum = [constValue];
261
+ }
262
+ if (type) {
263
+ if (Array.isArray(type)) {
264
+ const hasNull = type.includes("null");
265
+ const nonNullTypes = type.filter((t) => t !== "null");
266
+ if (nonNullTypes.length === 0) {
267
+ result.type = "null";
268
+ } else {
269
+ result.anyOf = nonNullTypes.map((t) => ({ type: t }));
270
+ if (hasNull) {
271
+ result.nullable = true;
272
+ }
273
+ }
274
+ } else {
275
+ result.type = type;
276
+ }
277
+ }
278
+ if (enumValues !== void 0) {
279
+ result.enum = enumValues;
280
+ }
281
+ if (properties != null) {
282
+ result.properties = Object.entries(properties).reduce(
283
+ (acc, [key, value]) => {
284
+ acc[key] = convertJSONSchemaToOpenAPISchema(value, false);
285
+ return acc;
286
+ },
287
+ {}
288
+ );
289
+ }
290
+ if (items) {
291
+ result.items = Array.isArray(items) ? items.map((item) => convertJSONSchemaToOpenAPISchema(item, false)) : convertJSONSchemaToOpenAPISchema(items, false);
292
+ }
293
+ if (allOf) {
294
+ result.allOf = allOf.map(
295
+ (item) => convertJSONSchemaToOpenAPISchema(item, false)
296
+ );
297
+ }
298
+ if (anyOf) {
299
+ if (anyOf.some(
300
+ (schema) => typeof schema === "object" && (schema == null ? void 0 : schema.type) === "null"
301
+ )) {
302
+ const nonNullSchemas = anyOf.filter(
303
+ (schema) => !(typeof schema === "object" && (schema == null ? void 0 : schema.type) === "null")
304
+ );
305
+ if (nonNullSchemas.length === 1) {
306
+ const converted = convertJSONSchemaToOpenAPISchema(
307
+ nonNullSchemas[0],
308
+ false
309
+ );
310
+ if (typeof converted === "object") {
311
+ result.nullable = true;
312
+ Object.assign(result, converted);
313
+ }
314
+ } else {
315
+ result.anyOf = nonNullSchemas.map(
316
+ (item) => convertJSONSchemaToOpenAPISchema(item, false)
317
+ );
318
+ result.nullable = true;
319
+ }
320
+ } else {
321
+ result.anyOf = anyOf.map(
322
+ (item) => convertJSONSchemaToOpenAPISchema(item, false)
323
+ );
324
+ }
325
+ }
326
+ if (oneOf) {
327
+ result.oneOf = oneOf.map(
328
+ (item) => convertJSONSchemaToOpenAPISchema(item, false)
329
+ );
330
+ }
331
+ if (minLength !== void 0) {
332
+ result.minLength = minLength;
333
+ }
334
+ return result;
335
+ }
336
+ function isEmptyObjectSchema(jsonSchema) {
337
+ return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0) && !jsonSchema.additionalProperties;
338
+ }
339
+ function convertToGoogleGenerativeAIMessages(prompt, options) {
340
+ var _a, _b, _c;
341
+ const systemInstructionParts = [];
342
+ const contents = [];
343
+ let systemMessagesAllowed = true;
344
+ const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
345
+ const providerOptionsName = (_b = options == null ? void 0 : options.providerOptionsName) != null ? _b : "google";
346
+ for (const { role, content } of prompt) {
347
+ switch (role) {
348
+ case "system": {
349
+ if (!systemMessagesAllowed) {
350
+ throw new UnsupportedFunctionalityError({
351
+ functionality: "system messages are only supported at the beginning of the conversation"
352
+ });
353
+ }
354
+ systemInstructionParts.push({ text: content });
355
+ break;
356
+ }
357
+ case "user": {
358
+ systemMessagesAllowed = false;
359
+ const parts = [];
360
+ for (const part of content) {
361
+ switch (part.type) {
362
+ case "text": {
363
+ parts.push({ text: part.text });
364
+ break;
365
+ }
366
+ case "file": {
367
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
368
+ parts.push(
369
+ part.data instanceof URL ? {
370
+ fileData: {
371
+ mimeType: mediaType,
372
+ fileUri: part.data.toString()
373
+ }
374
+ } : {
375
+ inlineData: {
376
+ mimeType: mediaType,
377
+ data: convertToBase64(part.data)
378
+ }
379
+ }
380
+ );
381
+ break;
382
+ }
383
+ }
384
+ }
385
+ contents.push({ role: "user", parts });
386
+ break;
387
+ }
388
+ case "assistant": {
389
+ systemMessagesAllowed = false;
390
+ contents.push({
391
+ role: "model",
392
+ parts: content.map((part) => {
393
+ var _a2, _b2, _c2;
394
+ const providerOpts = (_c2 = (_a2 = part.providerOptions) == null ? void 0 : _a2[providerOptionsName]) != null ? _c2 : providerOptionsName !== "google" ? (_b2 = part.providerOptions) == null ? void 0 : _b2.google : void 0;
395
+ const thoughtSignature = (providerOpts == null ? void 0 : providerOpts.thoughtSignature) != null ? String(providerOpts.thoughtSignature) : void 0;
396
+ switch (part.type) {
397
+ case "text": {
398
+ return part.text.length === 0 ? void 0 : {
399
+ text: part.text,
400
+ thoughtSignature
401
+ };
402
+ }
403
+ case "reasoning": {
404
+ return part.text.length === 0 ? void 0 : {
405
+ text: part.text,
406
+ thought: true,
407
+ thoughtSignature
408
+ };
409
+ }
410
+ case "file": {
411
+ if (part.data instanceof URL) {
412
+ throw new UnsupportedFunctionalityError({
413
+ functionality: "File data URLs in assistant messages are not supported"
414
+ });
415
+ }
416
+ return {
417
+ inlineData: {
418
+ mimeType: part.mediaType,
419
+ data: convertToBase64(part.data)
420
+ },
421
+ thoughtSignature
422
+ };
423
+ }
424
+ case "tool-call": {
425
+ return {
426
+ functionCall: {
427
+ name: part.toolName,
428
+ args: part.input
429
+ },
430
+ thoughtSignature
431
+ };
432
+ }
433
+ }
434
+ }).filter((part) => part !== void 0)
435
+ });
436
+ break;
437
+ }
438
+ case "tool": {
439
+ systemMessagesAllowed = false;
440
+ const parts = [];
441
+ for (const part of content) {
442
+ if (part.type === "tool-approval-response") {
443
+ continue;
444
+ }
445
+ const output = part.output;
446
+ if (output.type === "content") {
447
+ for (const contentPart of output.value) {
448
+ switch (contentPart.type) {
449
+ case "text":
450
+ parts.push({
451
+ functionResponse: {
452
+ name: part.toolName,
453
+ response: {
454
+ name: part.toolName,
455
+ content: contentPart.text
456
+ }
457
+ }
458
+ });
459
+ break;
460
+ case "image-data":
461
+ parts.push(
462
+ {
463
+ inlineData: {
464
+ mimeType: contentPart.mediaType,
465
+ data: contentPart.data
466
+ }
467
+ },
468
+ {
469
+ text: "Tool executed successfully and returned this image as a response"
470
+ }
471
+ );
472
+ break;
473
+ default:
474
+ parts.push({ text: JSON.stringify(contentPart) });
475
+ break;
476
+ }
477
+ }
478
+ } else {
479
+ parts.push({
480
+ functionResponse: {
481
+ name: part.toolName,
482
+ response: {
483
+ name: part.toolName,
484
+ content: output.type === "execution-denied" ? (_c = output.reason) != null ? _c : "Tool execution denied." : output.value
485
+ }
486
+ }
487
+ });
488
+ }
489
+ }
490
+ contents.push({
491
+ role: "user",
492
+ parts
493
+ });
494
+ break;
495
+ }
496
+ }
497
+ }
498
+ if (isGemmaModel && systemInstructionParts.length > 0 && contents.length > 0 && contents[0].role === "user") {
499
+ const systemText = systemInstructionParts.map((part) => part.text).join("\n\n");
500
+ contents[0].parts.unshift({ text: systemText + "\n\n" });
501
+ }
502
+ return {
503
+ systemInstruction: systemInstructionParts.length > 0 && !isGemmaModel ? { parts: systemInstructionParts } : void 0,
504
+ contents
505
+ };
506
+ }
507
+ function getModelPath(modelId) {
508
+ return modelId.includes("/") ? modelId : `models/${modelId}`;
509
+ }
510
+ var googleLanguageModelOptions = lazySchema(
511
+ () => zodSchema(
512
+ object({
513
+ responseModalities: array(_enum(["TEXT", "IMAGE"])).optional(),
514
+ thinkingConfig: object({
515
+ thinkingBudget: number().optional(),
516
+ includeThoughts: boolean().optional(),
517
+ // https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#thinking_level
518
+ thinkingLevel: _enum(["minimal", "low", "medium", "high"]).optional()
519
+ }).optional(),
520
+ /**
521
+ * Optional.
522
+ * The name of the cached content used as context to serve the prediction.
523
+ * Format: cachedContents/{cachedContent}
524
+ */
525
+ cachedContent: string().optional(),
526
+ /**
527
+ * Optional. Enable structured output. Default is true.
528
+ *
529
+ * This is useful when the JSON Schema contains elements that are
530
+ * not supported by the OpenAPI schema version that
531
+ * Google Generative AI uses. You can use this to disable
532
+ * structured outputs if you need to.
533
+ */
534
+ structuredOutputs: boolean().optional(),
535
+ /**
536
+ * Optional. A list of unique safety settings for blocking unsafe content.
537
+ */
538
+ safetySettings: array(
539
+ object({
540
+ category: _enum([
541
+ "HARM_CATEGORY_UNSPECIFIED",
542
+ "HARM_CATEGORY_HATE_SPEECH",
543
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
544
+ "HARM_CATEGORY_HARASSMENT",
545
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
546
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
547
+ ]),
548
+ threshold: _enum([
549
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
550
+ "BLOCK_LOW_AND_ABOVE",
551
+ "BLOCK_MEDIUM_AND_ABOVE",
552
+ "BLOCK_ONLY_HIGH",
553
+ "BLOCK_NONE",
554
+ "OFF"
555
+ ])
556
+ })
557
+ ).optional(),
558
+ threshold: _enum([
559
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
560
+ "BLOCK_LOW_AND_ABOVE",
561
+ "BLOCK_MEDIUM_AND_ABOVE",
562
+ "BLOCK_ONLY_HIGH",
563
+ "BLOCK_NONE",
564
+ "OFF"
565
+ ]).optional(),
566
+ /**
567
+ * Optional. Enables timestamp understanding for audio-only files.
568
+ *
569
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
570
+ */
571
+ audioTimestamp: boolean().optional(),
572
+ /**
573
+ * Optional. Defines labels used in billing reports. Available on Vertex AI only.
574
+ *
575
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls
576
+ */
577
+ labels: record(string(), string()).optional(),
578
+ /**
579
+ * Optional. If specified, the media resolution specified will be used.
580
+ *
581
+ * https://ai.google.dev/api/generate-content#MediaResolution
582
+ */
583
+ mediaResolution: _enum([
584
+ "MEDIA_RESOLUTION_UNSPECIFIED",
585
+ "MEDIA_RESOLUTION_LOW",
586
+ "MEDIA_RESOLUTION_MEDIUM",
587
+ "MEDIA_RESOLUTION_HIGH"
588
+ ]).optional(),
589
+ /**
590
+ * Optional. Configures the image generation aspect ratio for Gemini models.
591
+ *
592
+ * https://ai.google.dev/gemini-api/docs/image-generation#aspect_ratios
593
+ */
594
+ imageConfig: object({
595
+ aspectRatio: _enum([
596
+ "1:1",
597
+ "2:3",
598
+ "3:2",
599
+ "3:4",
600
+ "4:3",
601
+ "4:5",
602
+ "5:4",
603
+ "9:16",
604
+ "16:9",
605
+ "21:9"
606
+ ]).optional(),
607
+ imageSize: _enum(["1K", "2K", "4K"]).optional()
608
+ }).optional(),
609
+ /**
610
+ * Optional. Configuration for grounding retrieval.
611
+ * Used to provide location context for Google Maps and Google Search grounding.
612
+ *
613
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/grounding-with-google-maps
614
+ */
615
+ retrievalConfig: object({
616
+ latLng: object({
617
+ latitude: number(),
618
+ longitude: number()
619
+ }).optional()
620
+ }).optional()
621
+ })
622
+ )
623
+ );
624
+ function prepareTools({
625
+ tools,
626
+ toolChoice,
627
+ modelId
628
+ }) {
629
+ var _a;
630
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
631
+ const toolWarnings = [];
632
+ const isLatest = [
633
+ "gemini-flash-latest",
634
+ "gemini-flash-lite-latest",
635
+ "gemini-pro-latest"
636
+ ].some((id) => id === modelId);
637
+ const isGemini2orNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || isLatest;
638
+ const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
639
+ const supportsFileSearch = modelId.includes("gemini-2.5") || modelId.includes("gemini-3");
640
+ if (tools == null) {
641
+ return { tools: void 0, toolConfig: void 0, toolWarnings };
642
+ }
643
+ const hasFunctionTools = tools.some((tool) => tool.type === "function");
644
+ const hasProviderTools = tools.some((tool) => tool.type === "provider");
645
+ if (hasFunctionTools && hasProviderTools) {
646
+ toolWarnings.push({
647
+ type: "unsupported",
648
+ feature: `combination of function and provider-defined tools`
649
+ });
650
+ }
651
+ if (hasProviderTools) {
652
+ const googleTools2 = [];
653
+ const ProviderTools = tools.filter((tool) => tool.type === "provider");
654
+ ProviderTools.forEach((tool) => {
655
+ switch (tool.id) {
656
+ case "google.google_search":
657
+ if (isGemini2orNewer) {
658
+ googleTools2.push({ googleSearch: {} });
659
+ } else if (supportsDynamicRetrieval) {
660
+ googleTools2.push({
661
+ googleSearchRetrieval: {
662
+ dynamicRetrievalConfig: {
663
+ mode: tool.args.mode,
664
+ dynamicThreshold: tool.args.dynamicThreshold
665
+ }
666
+ }
667
+ });
668
+ } else {
669
+ googleTools2.push({ googleSearchRetrieval: {} });
670
+ }
671
+ break;
672
+ case "google.enterprise_web_search":
673
+ if (isGemini2orNewer) {
674
+ googleTools2.push({ enterpriseWebSearch: {} });
675
+ } else {
676
+ toolWarnings.push({
677
+ type: "unsupported",
678
+ feature: `provider-defined tool ${tool.id}`,
679
+ details: "Enterprise Web Search requires Gemini 2.0 or newer."
680
+ });
681
+ }
682
+ break;
683
+ case "google.url_context":
684
+ if (isGemini2orNewer) {
685
+ googleTools2.push({ urlContext: {} });
686
+ } else {
687
+ toolWarnings.push({
688
+ type: "unsupported",
689
+ feature: `provider-defined tool ${tool.id}`,
690
+ details: "The URL context tool is not supported with other Gemini models than Gemini 2."
691
+ });
692
+ }
693
+ break;
694
+ case "google.code_execution":
695
+ if (isGemini2orNewer) {
696
+ googleTools2.push({ codeExecution: {} });
697
+ } else {
698
+ toolWarnings.push({
699
+ type: "unsupported",
700
+ feature: `provider-defined tool ${tool.id}`,
701
+ details: "The code execution tools is not supported with other Gemini models than Gemini 2."
702
+ });
703
+ }
704
+ break;
705
+ case "google.file_search":
706
+ if (supportsFileSearch) {
707
+ googleTools2.push({ fileSearch: { ...tool.args } });
708
+ } else {
709
+ toolWarnings.push({
710
+ type: "unsupported",
711
+ feature: `provider-defined tool ${tool.id}`,
712
+ details: "The file search tool is only supported with Gemini 2.5 models and Gemini 3 models."
713
+ });
714
+ }
715
+ break;
716
+ case "google.vertex_rag_store":
717
+ if (isGemini2orNewer) {
718
+ googleTools2.push({
719
+ retrieval: {
720
+ vertex_rag_store: {
721
+ rag_resources: {
722
+ rag_corpus: tool.args.ragCorpus
723
+ },
724
+ similarity_top_k: tool.args.topK
725
+ }
726
+ }
727
+ });
728
+ } else {
729
+ toolWarnings.push({
730
+ type: "unsupported",
731
+ feature: `provider-defined tool ${tool.id}`,
732
+ details: "The RAG store tool is not supported with other Gemini models than Gemini 2."
733
+ });
734
+ }
735
+ break;
736
+ case "google.google_maps":
737
+ if (isGemini2orNewer) {
738
+ googleTools2.push({ googleMaps: {} });
739
+ } else {
740
+ toolWarnings.push({
741
+ type: "unsupported",
742
+ feature: `provider-defined tool ${tool.id}`,
743
+ details: "The Google Maps grounding tool is not supported with Gemini models other than Gemini 2 or newer."
744
+ });
745
+ }
746
+ break;
747
+ default:
748
+ toolWarnings.push({
749
+ type: "unsupported",
750
+ feature: `provider-defined tool ${tool.id}`
751
+ });
752
+ break;
753
+ }
754
+ });
755
+ return {
756
+ tools: googleTools2.length > 0 ? googleTools2 : void 0,
757
+ toolConfig: void 0,
758
+ toolWarnings
759
+ };
760
+ }
761
+ const functionDeclarations = [];
762
+ for (const tool of tools) {
763
+ switch (tool.type) {
764
+ case "function":
765
+ functionDeclarations.push({
766
+ name: tool.name,
767
+ description: (_a = tool.description) != null ? _a : "",
768
+ parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
769
+ });
770
+ break;
771
+ default:
772
+ toolWarnings.push({
773
+ type: "unsupported",
774
+ feature: `function tool ${tool.name}`
775
+ });
776
+ break;
777
+ }
778
+ }
779
+ if (toolChoice == null) {
780
+ return {
781
+ tools: [{ functionDeclarations }],
782
+ toolConfig: void 0,
783
+ toolWarnings
784
+ };
785
+ }
786
+ const type = toolChoice.type;
787
+ switch (type) {
788
+ case "auto":
789
+ return {
790
+ tools: [{ functionDeclarations }],
791
+ toolConfig: { functionCallingConfig: { mode: "AUTO" } },
792
+ toolWarnings
793
+ };
794
+ case "none":
795
+ return {
796
+ tools: [{ functionDeclarations }],
797
+ toolConfig: { functionCallingConfig: { mode: "NONE" } },
798
+ toolWarnings
799
+ };
800
+ case "required":
801
+ return {
802
+ tools: [{ functionDeclarations }],
803
+ toolConfig: { functionCallingConfig: { mode: "ANY" } },
804
+ toolWarnings
805
+ };
806
+ case "tool":
807
+ return {
808
+ tools: [{ functionDeclarations }],
809
+ toolConfig: {
810
+ functionCallingConfig: {
811
+ mode: "ANY",
812
+ allowedFunctionNames: [toolChoice.toolName]
813
+ }
814
+ },
815
+ toolWarnings
816
+ };
817
+ default: {
818
+ const _exhaustiveCheck = type;
819
+ throw new UnsupportedFunctionalityError({
820
+ functionality: `tool choice type: ${_exhaustiveCheck}`
821
+ });
822
+ }
823
+ }
824
+ }
825
+ function mapGoogleGenerativeAIFinishReason({
826
+ finishReason,
827
+ hasToolCalls
828
+ }) {
829
+ switch (finishReason) {
830
+ case "STOP":
831
+ return hasToolCalls ? "tool-calls" : "stop";
832
+ case "MAX_TOKENS":
833
+ return "length";
834
+ case "IMAGE_SAFETY":
835
+ case "RECITATION":
836
+ case "SAFETY":
837
+ case "BLOCKLIST":
838
+ case "PROHIBITED_CONTENT":
839
+ case "SPII":
840
+ return "content-filter";
841
+ case "MALFORMED_FUNCTION_CALL":
842
+ return "error";
843
+ case "FINISH_REASON_UNSPECIFIED":
844
+ case "OTHER":
845
+ default:
846
+ return "other";
847
+ }
848
+ }
849
+ var GoogleGenerativeAILanguageModel = class {
850
+ constructor(modelId, config) {
851
+ this.specificationVersion = "v3";
852
+ var _a;
853
+ this.modelId = modelId;
854
+ this.config = config;
855
+ this.generateId = (_a = config.generateId) != null ? _a : generateId;
856
+ }
857
+ get provider() {
858
+ return this.config.provider;
859
+ }
860
+ get supportedUrls() {
861
+ var _a, _b, _c;
862
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
863
+ }
864
+ async getArgs({
865
+ prompt,
866
+ maxOutputTokens,
867
+ temperature,
868
+ topP,
869
+ topK,
870
+ frequencyPenalty,
871
+ presencePenalty,
872
+ stopSequences,
873
+ responseFormat,
874
+ seed,
875
+ tools,
876
+ toolChoice,
877
+ providerOptions
878
+ }) {
879
+ var _a;
880
+ const warnings = [];
881
+ const providerOptionsName = this.config.provider.includes("vertex") ? "vertex" : "google";
882
+ let googleOptions = await parseProviderOptions({
883
+ provider: providerOptionsName,
884
+ providerOptions,
885
+ schema: googleLanguageModelOptions
886
+ });
887
+ if (googleOptions == null && providerOptionsName !== "google") {
888
+ googleOptions = await parseProviderOptions({
889
+ provider: "google",
890
+ providerOptions,
891
+ schema: googleLanguageModelOptions
892
+ });
893
+ }
894
+ if ((tools == null ? void 0 : tools.some(
895
+ (tool) => tool.type === "provider" && tool.id === "google.vertex_rag_store"
896
+ )) && !this.config.provider.startsWith("google.vertex.")) {
897
+ warnings.push({
898
+ type: "other",
899
+ message: `The 'vertex_rag_store' tool is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
900
+ });
901
+ }
902
+ const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
903
+ const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
904
+ prompt,
905
+ { isGemmaModel, providerOptionsName }
906
+ );
907
+ const {
908
+ tools: googleTools2,
909
+ toolConfig: googleToolConfig,
910
+ toolWarnings
911
+ } = prepareTools({
912
+ tools,
913
+ toolChoice,
914
+ modelId: this.modelId
915
+ });
916
+ return {
917
+ args: {
918
+ generationConfig: {
919
+ // standardized settings:
920
+ maxOutputTokens,
921
+ temperature,
922
+ topK,
923
+ topP,
924
+ frequencyPenalty,
925
+ presencePenalty,
926
+ stopSequences,
927
+ seed,
928
+ // response format:
929
+ responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
930
+ responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
931
+ // so this is needed as an escape hatch:
932
+ // TODO convert into provider option
933
+ ((_a = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _a : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
934
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
935
+ audioTimestamp: googleOptions.audioTimestamp
936
+ },
937
+ // provider options:
938
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
939
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig,
940
+ ...(googleOptions == null ? void 0 : googleOptions.mediaResolution) && {
941
+ mediaResolution: googleOptions.mediaResolution
942
+ },
943
+ ...(googleOptions == null ? void 0 : googleOptions.imageConfig) && {
944
+ imageConfig: googleOptions.imageConfig
945
+ }
946
+ },
947
+ contents,
948
+ systemInstruction: isGemmaModel ? void 0 : systemInstruction,
949
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
950
+ tools: googleTools2,
951
+ toolConfig: (googleOptions == null ? void 0 : googleOptions.retrievalConfig) ? {
952
+ ...googleToolConfig,
953
+ retrievalConfig: googleOptions.retrievalConfig
954
+ } : googleToolConfig,
955
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent,
956
+ labels: googleOptions == null ? void 0 : googleOptions.labels
957
+ },
958
+ warnings: [...warnings, ...toolWarnings],
959
+ providerOptionsName
960
+ };
961
+ }
962
+ async doGenerate(options) {
963
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
964
+ const { args, warnings, providerOptionsName } = await this.getArgs(options);
965
+ const mergedHeaders = combineHeaders(
966
+ await resolve(this.config.headers),
967
+ options.headers
968
+ );
969
+ const {
970
+ responseHeaders,
971
+ value: response,
972
+ rawValue: rawResponse
973
+ } = await postJsonToApi({
974
+ url: `${this.config.baseURL}/${getModelPath(
975
+ this.modelId
976
+ )}:generateContent`,
977
+ headers: mergedHeaders,
978
+ body: args,
979
+ failedResponseHandler: googleFailedResponseHandler,
980
+ successfulResponseHandler: createJsonResponseHandler(responseSchema),
981
+ abortSignal: options.abortSignal,
982
+ fetch: this.config.fetch
983
+ });
984
+ const candidate = response.candidates[0];
985
+ const content = [];
986
+ const parts = (_b = (_a = candidate.content) == null ? void 0 : _a.parts) != null ? _b : [];
987
+ const usageMetadata = response.usageMetadata;
988
+ let lastCodeExecutionToolCallId;
989
+ for (const part of parts) {
990
+ if ("executableCode" in part && ((_c = part.executableCode) == null ? void 0 : _c.code)) {
991
+ const toolCallId = this.config.generateId();
992
+ lastCodeExecutionToolCallId = toolCallId;
993
+ content.push({
994
+ type: "tool-call",
995
+ toolCallId,
996
+ toolName: "code_execution",
997
+ input: JSON.stringify(part.executableCode),
998
+ providerExecuted: true
999
+ });
1000
+ } else if ("codeExecutionResult" in part && part.codeExecutionResult) {
1001
+ content.push({
1002
+ type: "tool-result",
1003
+ // Assumes a result directly follows its corresponding call part.
1004
+ toolCallId: lastCodeExecutionToolCallId,
1005
+ toolName: "code_execution",
1006
+ result: {
1007
+ outcome: part.codeExecutionResult.outcome,
1008
+ output: (_d = part.codeExecutionResult.output) != null ? _d : ""
1009
+ }
1010
+ });
1011
+ lastCodeExecutionToolCallId = void 0;
1012
+ } else if ("text" in part && part.text != null) {
1013
+ const thoughtSignatureMetadata = part.thoughtSignature ? {
1014
+ [providerOptionsName]: {
1015
+ thoughtSignature: part.thoughtSignature
1016
+ }
1017
+ } : void 0;
1018
+ if (part.text.length === 0) {
1019
+ if (thoughtSignatureMetadata != null && content.length > 0) {
1020
+ const lastContent = content[content.length - 1];
1021
+ lastContent.providerMetadata = thoughtSignatureMetadata;
1022
+ }
1023
+ } else {
1024
+ content.push({
1025
+ type: part.thought === true ? "reasoning" : "text",
1026
+ text: part.text,
1027
+ providerMetadata: thoughtSignatureMetadata
1028
+ });
1029
+ }
1030
+ } else if ("functionCall" in part) {
1031
+ content.push({
1032
+ type: "tool-call",
1033
+ toolCallId: this.config.generateId(),
1034
+ toolName: part.functionCall.name,
1035
+ input: JSON.stringify(part.functionCall.args),
1036
+ providerMetadata: part.thoughtSignature ? {
1037
+ [providerOptionsName]: {
1038
+ thoughtSignature: part.thoughtSignature
1039
+ }
1040
+ } : void 0
1041
+ });
1042
+ } else if ("inlineData" in part) {
1043
+ content.push({
1044
+ type: "file",
1045
+ data: part.inlineData.data,
1046
+ mediaType: part.inlineData.mimeType,
1047
+ providerMetadata: part.thoughtSignature ? {
1048
+ [providerOptionsName]: {
1049
+ thoughtSignature: part.thoughtSignature
1050
+ }
1051
+ } : void 0
1052
+ });
1053
+ }
1054
+ }
1055
+ const sources = (_e = extractSources({
1056
+ groundingMetadata: candidate.groundingMetadata,
1057
+ generateId: this.config.generateId
1058
+ })) != null ? _e : [];
1059
+ for (const source of sources) {
1060
+ content.push(source);
1061
+ }
1062
+ return {
1063
+ content,
1064
+ finishReason: {
1065
+ unified: mapGoogleGenerativeAIFinishReason({
1066
+ finishReason: candidate.finishReason,
1067
+ // Only count client-executed tool calls for finish reason determination.
1068
+ hasToolCalls: content.some(
1069
+ (part) => part.type === "tool-call" && !part.providerExecuted
1070
+ )
1071
+ }),
1072
+ raw: (_f = candidate.finishReason) != null ? _f : void 0
1073
+ },
1074
+ usage: convertGoogleGenerativeAIUsage(usageMetadata),
1075
+ warnings,
1076
+ providerMetadata: {
1077
+ [providerOptionsName]: {
1078
+ promptFeedback: (_g = response.promptFeedback) != null ? _g : null,
1079
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
1080
+ urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
1081
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
1082
+ usageMetadata: usageMetadata != null ? usageMetadata : null
1083
+ }
1084
+ },
1085
+ request: { body: args },
1086
+ response: {
1087
+ // TODO timestamp, model id, id
1088
+ headers: responseHeaders,
1089
+ body: rawResponse
1090
+ }
1091
+ };
1092
+ }
1093
+ async doStream(options) {
1094
+ const { args, warnings, providerOptionsName } = await this.getArgs(options);
1095
+ const headers = combineHeaders(
1096
+ await resolve(this.config.headers),
1097
+ options.headers
1098
+ );
1099
+ const { responseHeaders, value: response } = await postJsonToApi({
1100
+ url: `${this.config.baseURL}/${getModelPath(
1101
+ this.modelId
1102
+ )}:streamGenerateContent?alt=sse`,
1103
+ headers,
1104
+ body: args,
1105
+ failedResponseHandler: googleFailedResponseHandler,
1106
+ successfulResponseHandler: createEventSourceResponseHandler(chunkSchema),
1107
+ abortSignal: options.abortSignal,
1108
+ fetch: this.config.fetch
1109
+ });
1110
+ let finishReason = {
1111
+ unified: "other",
1112
+ raw: void 0
1113
+ };
1114
+ let usage = void 0;
1115
+ let providerMetadata = void 0;
1116
+ const generateId3 = this.config.generateId;
1117
+ let hasToolCalls = false;
1118
+ let currentTextBlockId = null;
1119
+ let currentReasoningBlockId = null;
1120
+ let blockCounter = 0;
1121
+ const emittedSourceUrls = /* @__PURE__ */ new Set();
1122
+ let lastCodeExecutionToolCallId;
1123
+ return {
1124
+ stream: response.pipeThrough(
1125
+ new TransformStream({
1126
+ start(controller) {
1127
+ controller.enqueue({ type: "stream-start", warnings });
1128
+ },
1129
+ transform(chunk, controller) {
1130
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1131
+ if (options.includeRawChunks) {
1132
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1133
+ }
1134
+ if (!chunk.success) {
1135
+ controller.enqueue({ type: "error", error: chunk.error });
1136
+ return;
1137
+ }
1138
+ const value = chunk.value;
1139
+ const usageMetadata = value.usageMetadata;
1140
+ if (usageMetadata != null) {
1141
+ usage = usageMetadata;
1142
+ }
1143
+ const candidate = (_a = value.candidates) == null ? void 0 : _a[0];
1144
+ if (candidate == null) {
1145
+ return;
1146
+ }
1147
+ const content = candidate.content;
1148
+ const sources = extractSources({
1149
+ groundingMetadata: candidate.groundingMetadata,
1150
+ generateId: generateId3
1151
+ });
1152
+ if (sources != null) {
1153
+ for (const source of sources) {
1154
+ if (source.sourceType === "url" && !emittedSourceUrls.has(source.url)) {
1155
+ emittedSourceUrls.add(source.url);
1156
+ controller.enqueue(source);
1157
+ }
1158
+ }
1159
+ }
1160
+ if (content != null) {
1161
+ const parts = (_b = content.parts) != null ? _b : [];
1162
+ for (const part of parts) {
1163
+ if ("executableCode" in part && ((_c = part.executableCode) == null ? void 0 : _c.code)) {
1164
+ const toolCallId = generateId3();
1165
+ lastCodeExecutionToolCallId = toolCallId;
1166
+ controller.enqueue({
1167
+ type: "tool-call",
1168
+ toolCallId,
1169
+ toolName: "code_execution",
1170
+ input: JSON.stringify(part.executableCode),
1171
+ providerExecuted: true
1172
+ });
1173
+ } else if ("codeExecutionResult" in part && part.codeExecutionResult) {
1174
+ const toolCallId = lastCodeExecutionToolCallId;
1175
+ if (toolCallId) {
1176
+ controller.enqueue({
1177
+ type: "tool-result",
1178
+ toolCallId,
1179
+ toolName: "code_execution",
1180
+ result: {
1181
+ outcome: part.codeExecutionResult.outcome,
1182
+ output: (_d = part.codeExecutionResult.output) != null ? _d : ""
1183
+ }
1184
+ });
1185
+ lastCodeExecutionToolCallId = void 0;
1186
+ }
1187
+ } else if ("text" in part && part.text != null) {
1188
+ const thoughtSignatureMetadata = part.thoughtSignature ? {
1189
+ [providerOptionsName]: {
1190
+ thoughtSignature: part.thoughtSignature
1191
+ }
1192
+ } : void 0;
1193
+ if (part.text.length === 0) {
1194
+ if (thoughtSignatureMetadata != null && currentTextBlockId !== null) {
1195
+ controller.enqueue({
1196
+ type: "text-delta",
1197
+ id: currentTextBlockId,
1198
+ delta: "",
1199
+ providerMetadata: thoughtSignatureMetadata
1200
+ });
1201
+ }
1202
+ } else if (part.thought === true) {
1203
+ if (currentTextBlockId !== null) {
1204
+ controller.enqueue({
1205
+ type: "text-end",
1206
+ id: currentTextBlockId
1207
+ });
1208
+ currentTextBlockId = null;
1209
+ }
1210
+ if (currentReasoningBlockId === null) {
1211
+ currentReasoningBlockId = String(blockCounter++);
1212
+ controller.enqueue({
1213
+ type: "reasoning-start",
1214
+ id: currentReasoningBlockId,
1215
+ providerMetadata: thoughtSignatureMetadata
1216
+ });
1217
+ }
1218
+ controller.enqueue({
1219
+ type: "reasoning-delta",
1220
+ id: currentReasoningBlockId,
1221
+ delta: part.text,
1222
+ providerMetadata: thoughtSignatureMetadata
1223
+ });
1224
+ } else {
1225
+ if (currentReasoningBlockId !== null) {
1226
+ controller.enqueue({
1227
+ type: "reasoning-end",
1228
+ id: currentReasoningBlockId
1229
+ });
1230
+ currentReasoningBlockId = null;
1231
+ }
1232
+ if (currentTextBlockId === null) {
1233
+ currentTextBlockId = String(blockCounter++);
1234
+ controller.enqueue({
1235
+ type: "text-start",
1236
+ id: currentTextBlockId,
1237
+ providerMetadata: thoughtSignatureMetadata
1238
+ });
1239
+ }
1240
+ controller.enqueue({
1241
+ type: "text-delta",
1242
+ id: currentTextBlockId,
1243
+ delta: part.text,
1244
+ providerMetadata: thoughtSignatureMetadata
1245
+ });
1246
+ }
1247
+ } else if ("inlineData" in part) {
1248
+ controller.enqueue({
1249
+ type: "file",
1250
+ mediaType: part.inlineData.mimeType,
1251
+ data: part.inlineData.data
1252
+ });
1253
+ }
1254
+ }
1255
+ const toolCallDeltas = getToolCallsFromParts({
1256
+ parts: content.parts,
1257
+ generateId: generateId3,
1258
+ providerOptionsName
1259
+ });
1260
+ if (toolCallDeltas != null) {
1261
+ for (const toolCall of toolCallDeltas) {
1262
+ controller.enqueue({
1263
+ type: "tool-input-start",
1264
+ id: toolCall.toolCallId,
1265
+ toolName: toolCall.toolName,
1266
+ providerMetadata: toolCall.providerMetadata
1267
+ });
1268
+ controller.enqueue({
1269
+ type: "tool-input-delta",
1270
+ id: toolCall.toolCallId,
1271
+ delta: toolCall.args,
1272
+ providerMetadata: toolCall.providerMetadata
1273
+ });
1274
+ controller.enqueue({
1275
+ type: "tool-input-end",
1276
+ id: toolCall.toolCallId,
1277
+ providerMetadata: toolCall.providerMetadata
1278
+ });
1279
+ controller.enqueue({
1280
+ type: "tool-call",
1281
+ toolCallId: toolCall.toolCallId,
1282
+ toolName: toolCall.toolName,
1283
+ input: toolCall.args,
1284
+ providerMetadata: toolCall.providerMetadata
1285
+ });
1286
+ hasToolCalls = true;
1287
+ }
1288
+ }
1289
+ }
1290
+ if (candidate.finishReason != null) {
1291
+ finishReason = {
1292
+ unified: mapGoogleGenerativeAIFinishReason({
1293
+ finishReason: candidate.finishReason,
1294
+ hasToolCalls
1295
+ }),
1296
+ raw: candidate.finishReason
1297
+ };
1298
+ providerMetadata = {
1299
+ [providerOptionsName]: {
1300
+ promptFeedback: (_e = value.promptFeedback) != null ? _e : null,
1301
+ groundingMetadata: (_f = candidate.groundingMetadata) != null ? _f : null,
1302
+ urlContextMetadata: (_g = candidate.urlContextMetadata) != null ? _g : null,
1303
+ safetyRatings: (_h = candidate.safetyRatings) != null ? _h : null
1304
+ }
1305
+ };
1306
+ if (usageMetadata != null) {
1307
+ providerMetadata[providerOptionsName].usageMetadata = usageMetadata;
1308
+ }
1309
+ }
1310
+ },
1311
+ flush(controller) {
1312
+ if (currentTextBlockId !== null) {
1313
+ controller.enqueue({
1314
+ type: "text-end",
1315
+ id: currentTextBlockId
1316
+ });
1317
+ }
1318
+ if (currentReasoningBlockId !== null) {
1319
+ controller.enqueue({
1320
+ type: "reasoning-end",
1321
+ id: currentReasoningBlockId
1322
+ });
1323
+ }
1324
+ controller.enqueue({
1325
+ type: "finish",
1326
+ finishReason,
1327
+ usage: convertGoogleGenerativeAIUsage(usage),
1328
+ providerMetadata
1329
+ });
1330
+ }
1331
+ })
1332
+ ),
1333
+ response: { headers: responseHeaders },
1334
+ request: { body: args }
1335
+ };
1336
+ }
1337
+ };
1338
+ function getToolCallsFromParts({
1339
+ parts,
1340
+ generateId: generateId3,
1341
+ providerOptionsName
1342
+ }) {
1343
+ const functionCallParts = parts == null ? void 0 : parts.filter(
1344
+ (part) => "functionCall" in part
1345
+ );
1346
+ return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
1347
+ type: "tool-call",
1348
+ toolCallId: generateId3(),
1349
+ toolName: part.functionCall.name,
1350
+ args: JSON.stringify(part.functionCall.args),
1351
+ providerMetadata: part.thoughtSignature ? {
1352
+ [providerOptionsName]: {
1353
+ thoughtSignature: part.thoughtSignature
1354
+ }
1355
+ } : void 0
1356
+ }));
1357
+ }
1358
+ function extractSources({
1359
+ groundingMetadata,
1360
+ generateId: generateId3
1361
+ }) {
1362
+ var _a, _b, _c, _d, _e;
1363
+ if (!(groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks)) {
1364
+ return void 0;
1365
+ }
1366
+ const sources = [];
1367
+ for (const chunk of groundingMetadata.groundingChunks) {
1368
+ if (chunk.web != null) {
1369
+ sources.push({
1370
+ type: "source",
1371
+ sourceType: "url",
1372
+ id: generateId3(),
1373
+ url: chunk.web.uri,
1374
+ title: (_a = chunk.web.title) != null ? _a : void 0
1375
+ });
1376
+ } else if (chunk.retrievedContext != null) {
1377
+ const uri = chunk.retrievedContext.uri;
1378
+ const fileSearchStore = chunk.retrievedContext.fileSearchStore;
1379
+ if (uri && (uri.startsWith("http://") || uri.startsWith("https://"))) {
1380
+ sources.push({
1381
+ type: "source",
1382
+ sourceType: "url",
1383
+ id: generateId3(),
1384
+ url: uri,
1385
+ title: (_b = chunk.retrievedContext.title) != null ? _b : void 0
1386
+ });
1387
+ } else if (uri) {
1388
+ const title = (_c = chunk.retrievedContext.title) != null ? _c : "Unknown Document";
1389
+ let mediaType = "application/octet-stream";
1390
+ let filename = void 0;
1391
+ if (uri.endsWith(".pdf")) {
1392
+ mediaType = "application/pdf";
1393
+ filename = uri.split("/").pop();
1394
+ } else if (uri.endsWith(".txt")) {
1395
+ mediaType = "text/plain";
1396
+ filename = uri.split("/").pop();
1397
+ } else if (uri.endsWith(".docx")) {
1398
+ mediaType = "application/vnd.openxmlformats-officedocument.wordprocessingml.document";
1399
+ filename = uri.split("/").pop();
1400
+ } else if (uri.endsWith(".doc")) {
1401
+ mediaType = "application/msword";
1402
+ filename = uri.split("/").pop();
1403
+ } else if (uri.match(/\.(md|markdown)$/)) {
1404
+ mediaType = "text/markdown";
1405
+ filename = uri.split("/").pop();
1406
+ } else {
1407
+ filename = uri.split("/").pop();
1408
+ }
1409
+ sources.push({
1410
+ type: "source",
1411
+ sourceType: "document",
1412
+ id: generateId3(),
1413
+ mediaType,
1414
+ title,
1415
+ filename
1416
+ });
1417
+ } else if (fileSearchStore) {
1418
+ const title = (_d = chunk.retrievedContext.title) != null ? _d : "Unknown Document";
1419
+ sources.push({
1420
+ type: "source",
1421
+ sourceType: "document",
1422
+ id: generateId3(),
1423
+ mediaType: "application/octet-stream",
1424
+ title,
1425
+ filename: fileSearchStore.split("/").pop()
1426
+ });
1427
+ }
1428
+ } else if (chunk.maps != null) {
1429
+ if (chunk.maps.uri) {
1430
+ sources.push({
1431
+ type: "source",
1432
+ sourceType: "url",
1433
+ id: generateId3(),
1434
+ url: chunk.maps.uri,
1435
+ title: (_e = chunk.maps.title) != null ? _e : void 0
1436
+ });
1437
+ }
1438
+ }
1439
+ }
1440
+ return sources.length > 0 ? sources : void 0;
1441
+ }
1442
+ var getGroundingMetadataSchema = () => object({
1443
+ webSearchQueries: array(string()).nullish(),
1444
+ retrievalQueries: array(string()).nullish(),
1445
+ searchEntryPoint: object({ renderedContent: string() }).nullish(),
1446
+ groundingChunks: array(
1447
+ object({
1448
+ web: object({ uri: string(), title: string().nullish() }).nullish(),
1449
+ retrievedContext: object({
1450
+ uri: string().nullish(),
1451
+ title: string().nullish(),
1452
+ text: string().nullish(),
1453
+ fileSearchStore: string().nullish()
1454
+ }).nullish(),
1455
+ maps: object({
1456
+ uri: string().nullish(),
1457
+ title: string().nullish(),
1458
+ text: string().nullish(),
1459
+ placeId: string().nullish()
1460
+ }).nullish()
1461
+ })
1462
+ ).nullish(),
1463
+ groundingSupports: array(
1464
+ object({
1465
+ segment: object({
1466
+ startIndex: number().nullish(),
1467
+ endIndex: number().nullish(),
1468
+ text: string().nullish()
1469
+ }).nullish(),
1470
+ segment_text: string().nullish(),
1471
+ groundingChunkIndices: array(number()).nullish(),
1472
+ supportChunkIndices: array(number()).nullish(),
1473
+ confidenceScores: array(number()).nullish(),
1474
+ confidenceScore: array(number()).nullish()
1475
+ })
1476
+ ).nullish(),
1477
+ retrievalMetadata: union([
1478
+ object({
1479
+ webDynamicRetrievalScore: number()
1480
+ }),
1481
+ object({})
1482
+ ]).nullish()
1483
+ });
1484
+ var getContentSchema = () => object({
1485
+ parts: array(
1486
+ union([
1487
+ // note: order matters since text can be fully empty
1488
+ object({
1489
+ functionCall: object({
1490
+ name: string(),
1491
+ args: unknown()
1492
+ }),
1493
+ thoughtSignature: string().nullish()
1494
+ }),
1495
+ object({
1496
+ inlineData: object({
1497
+ mimeType: string(),
1498
+ data: string()
1499
+ }),
1500
+ thoughtSignature: string().nullish()
1501
+ }),
1502
+ object({
1503
+ executableCode: object({
1504
+ language: string(),
1505
+ code: string()
1506
+ }).nullish(),
1507
+ codeExecutionResult: object({
1508
+ outcome: string(),
1509
+ output: string().nullish()
1510
+ }).nullish(),
1511
+ text: string().nullish(),
1512
+ thought: boolean().nullish(),
1513
+ thoughtSignature: string().nullish()
1514
+ })
1515
+ ])
1516
+ ).nullish()
1517
+ });
1518
+ var getSafetyRatingSchema = () => object({
1519
+ category: string().nullish(),
1520
+ probability: string().nullish(),
1521
+ probabilityScore: number().nullish(),
1522
+ severity: string().nullish(),
1523
+ severityScore: number().nullish(),
1524
+ blocked: boolean().nullish()
1525
+ });
1526
+ var usageSchema = object({
1527
+ cachedContentTokenCount: number().nullish(),
1528
+ thoughtsTokenCount: number().nullish(),
1529
+ promptTokenCount: number().nullish(),
1530
+ candidatesTokenCount: number().nullish(),
1531
+ totalTokenCount: number().nullish(),
1532
+ // https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest/v1/GenerateContentResponse#TrafficType
1533
+ trafficType: string().nullish()
1534
+ });
1535
+ var getUrlContextMetadataSchema = () => object({
1536
+ urlMetadata: array(
1537
+ object({
1538
+ retrievedUrl: string(),
1539
+ urlRetrievalStatus: string()
1540
+ })
1541
+ )
1542
+ });
1543
+ var responseSchema = lazySchema(
1544
+ () => zodSchema(
1545
+ object({
1546
+ candidates: array(
1547
+ object({
1548
+ content: getContentSchema().nullish().or(object({}).strict()),
1549
+ finishReason: string().nullish(),
1550
+ safetyRatings: array(getSafetyRatingSchema()).nullish(),
1551
+ groundingMetadata: getGroundingMetadataSchema().nullish(),
1552
+ urlContextMetadata: getUrlContextMetadataSchema().nullish()
1553
+ })
1554
+ ),
1555
+ usageMetadata: usageSchema.nullish(),
1556
+ promptFeedback: object({
1557
+ blockReason: string().nullish(),
1558
+ safetyRatings: array(getSafetyRatingSchema()).nullish()
1559
+ }).nullish()
1560
+ })
1561
+ )
1562
+ );
1563
+ var chunkSchema = lazySchema(
1564
+ () => zodSchema(
1565
+ object({
1566
+ candidates: array(
1567
+ object({
1568
+ content: getContentSchema().nullish(),
1569
+ finishReason: string().nullish(),
1570
+ safetyRatings: array(getSafetyRatingSchema()).nullish(),
1571
+ groundingMetadata: getGroundingMetadataSchema().nullish(),
1572
+ urlContextMetadata: getUrlContextMetadataSchema().nullish()
1573
+ })
1574
+ ).nullish(),
1575
+ usageMetadata: usageSchema.nullish(),
1576
+ promptFeedback: object({
1577
+ blockReason: string().nullish(),
1578
+ safetyRatings: array(getSafetyRatingSchema()).nullish()
1579
+ }).nullish()
1580
+ })
1581
+ )
1582
+ );
1583
+ var codeExecution = createProviderToolFactoryWithOutputSchema({
1584
+ id: "google.code_execution",
1585
+ inputSchema: object({
1586
+ language: string().describe("The programming language of the code."),
1587
+ code: string().describe("The code to be executed.")
1588
+ }),
1589
+ outputSchema: object({
1590
+ outcome: string().describe('The outcome of the execution (e.g., "OUTCOME_OK").'),
1591
+ output: string().describe("The output from the code execution.")
1592
+ })
1593
+ });
1594
+ var enterpriseWebSearch = createProviderToolFactory({
1595
+ id: "google.enterprise_web_search",
1596
+ inputSchema: lazySchema(() => zodSchema(object({})))
1597
+ });
1598
+ var fileSearchArgsBaseSchema = object({
1599
+ /** The names of the file_search_stores to retrieve from.
1600
+ * Example: `fileSearchStores/my-file-search-store-123`
1601
+ */
1602
+ fileSearchStoreNames: array(string()).describe(
1603
+ "The names of the file_search_stores to retrieve from. Example: `fileSearchStores/my-file-search-store-123`"
1604
+ ),
1605
+ /** The number of file search retrieval chunks to retrieve. */
1606
+ topK: number().int().positive().describe("The number of file search retrieval chunks to retrieve.").optional(),
1607
+ /** Metadata filter to apply to the file search retrieval documents.
1608
+ * See https://google.aip.dev/160 for the syntax of the filter expression.
1609
+ */
1610
+ metadataFilter: string().describe(
1611
+ "Metadata filter to apply to the file search retrieval documents. See https://google.aip.dev/160 for the syntax of the filter expression."
1612
+ ).optional()
1613
+ }).passthrough();
1614
+ var fileSearchArgsSchema = lazySchema(
1615
+ () => zodSchema(fileSearchArgsBaseSchema)
1616
+ );
1617
+ var fileSearch = createProviderToolFactory({
1618
+ id: "google.file_search",
1619
+ inputSchema: fileSearchArgsSchema
1620
+ });
1621
+ var googleMaps = createProviderToolFactory({
1622
+ id: "google.google_maps",
1623
+ inputSchema: lazySchema(() => zodSchema(object({})))
1624
+ });
1625
+ var googleSearch = createProviderToolFactory({
1626
+ id: "google.google_search",
1627
+ inputSchema: lazySchema(
1628
+ () => zodSchema(
1629
+ object({
1630
+ mode: _enum(["MODE_DYNAMIC", "MODE_UNSPECIFIED"]).default("MODE_UNSPECIFIED"),
1631
+ dynamicThreshold: number().default(1)
1632
+ })
1633
+ )
1634
+ )
1635
+ });
1636
+ var urlContext = createProviderToolFactory({
1637
+ id: "google.url_context",
1638
+ inputSchema: lazySchema(() => zodSchema(object({})))
1639
+ });
1640
+ var vertexRagStore = createProviderToolFactory({
1641
+ id: "google.vertex_rag_store",
1642
+ inputSchema: object({
1643
+ ragCorpus: string(),
1644
+ topK: number().optional()
1645
+ })
1646
+ });
1647
+ var googleTools = {
1648
+ /**
1649
+ * Creates a Google search tool that gives Google direct access to real-time web content.
1650
+ * Must have name "google_search".
1651
+ */
1652
+ googleSearch,
1653
+ /**
1654
+ * Creates an Enterprise Web Search tool for grounding responses using a compliance-focused web index.
1655
+ * Designed for highly-regulated industries (finance, healthcare, public sector).
1656
+ * Does not log customer data and supports VPC service controls.
1657
+ * Must have name "enterprise_web_search".
1658
+ *
1659
+ * @note Only available on Vertex AI. Requires Gemini 2.0 or newer.
1660
+ *
1661
+ * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/web-grounding-enterprise
1662
+ */
1663
+ enterpriseWebSearch,
1664
+ /**
1665
+ * Creates a Google Maps grounding tool that gives the model access to Google Maps data.
1666
+ * Must have name "google_maps".
1667
+ *
1668
+ * @see https://ai.google.dev/gemini-api/docs/maps-grounding
1669
+ * @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/grounding-with-google-maps
1670
+ */
1671
+ googleMaps,
1672
+ /**
1673
+ * Creates a URL context tool that gives Google direct access to real-time web content.
1674
+ * Must have name "url_context".
1675
+ */
1676
+ urlContext,
1677
+ /**
1678
+ * Enables Retrieval Augmented Generation (RAG) via the Gemini File Search tool.
1679
+ * Must have name "file_search".
1680
+ *
1681
+ * @param fileSearchStoreNames - Fully-qualified File Search store resource names.
1682
+ * @param metadataFilter - Optional filter expression to restrict the files that can be retrieved.
1683
+ * @param topK - Optional result limit for the number of chunks returned from File Search.
1684
+ *
1685
+ * @see https://ai.google.dev/gemini-api/docs/file-search
1686
+ */
1687
+ fileSearch,
1688
+ /**
1689
+ * A tool that enables the model to generate and run Python code.
1690
+ * Must have name "code_execution".
1691
+ *
1692
+ * @note Ensure the selected model supports Code Execution.
1693
+ * Multi-tool usage with the code execution tool is typically compatible with Gemini >=2 models.
1694
+ *
1695
+ * @see https://ai.google.dev/gemini-api/docs/code-execution (Google AI)
1696
+ * @see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/code-execution-api (Vertex AI)
1697
+ */
1698
+ codeExecution,
1699
+ /**
1700
+ * Creates a Vertex RAG Store tool that enables the model to perform RAG searches against a Vertex RAG Store.
1701
+ * Must have name "vertex_rag_store".
1702
+ */
1703
+ vertexRagStore
1704
+ };
1705
+ var GoogleGenerativeAIImageModel = class {
1706
+ constructor(modelId, settings, config) {
1707
+ this.modelId = modelId;
1708
+ this.settings = settings;
1709
+ this.config = config;
1710
+ this.specificationVersion = "v3";
1711
+ }
1712
+ get maxImagesPerCall() {
1713
+ if (this.settings.maxImagesPerCall != null) {
1714
+ return this.settings.maxImagesPerCall;
1715
+ }
1716
+ if (isGeminiModel(this.modelId)) {
1717
+ return 10;
1718
+ }
1719
+ return 4;
1720
+ }
1721
+ get provider() {
1722
+ return this.config.provider;
1723
+ }
1724
+ async doGenerate(options) {
1725
+ if (isGeminiModel(this.modelId)) {
1726
+ return this.doGenerateGemini(options);
1727
+ }
1728
+ return this.doGenerateImagen(options);
1729
+ }
1730
+ async doGenerateImagen(options) {
1731
+ var _a, _b, _c;
1732
+ const {
1733
+ prompt,
1734
+ n = 1,
1735
+ size,
1736
+ aspectRatio = "1:1",
1737
+ seed,
1738
+ providerOptions,
1739
+ headers,
1740
+ abortSignal,
1741
+ files,
1742
+ mask
1743
+ } = options;
1744
+ const warnings = [];
1745
+ if (files != null && files.length > 0) {
1746
+ throw new Error(
1747
+ "Google Generative AI does not support image editing with Imagen models. Use Google Vertex AI (@ai-sdk/google-vertex) for image editing capabilities."
1748
+ );
1749
+ }
1750
+ if (mask != null) {
1751
+ throw new Error(
1752
+ "Google Generative AI does not support image editing with masks. Use Google Vertex AI (@ai-sdk/google-vertex) for image editing capabilities."
1753
+ );
1754
+ }
1755
+ if (size != null) {
1756
+ warnings.push({
1757
+ type: "unsupported",
1758
+ feature: "size",
1759
+ details: "This model does not support the `size` option. Use `aspectRatio` instead."
1760
+ });
1761
+ }
1762
+ if (seed != null) {
1763
+ warnings.push({
1764
+ type: "unsupported",
1765
+ feature: "seed",
1766
+ details: "This model does not support the `seed` option through this provider."
1767
+ });
1768
+ }
1769
+ const googleOptions = await parseProviderOptions({
1770
+ provider: "google",
1771
+ providerOptions,
1772
+ schema: googleImageModelOptionsSchema
1773
+ });
1774
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1775
+ const parameters = {
1776
+ sampleCount: n
1777
+ };
1778
+ if (aspectRatio != null) {
1779
+ parameters.aspectRatio = aspectRatio;
1780
+ }
1781
+ if (googleOptions) {
1782
+ Object.assign(parameters, googleOptions);
1783
+ }
1784
+ const body = {
1785
+ instances: [{ prompt }],
1786
+ parameters
1787
+ };
1788
+ const { responseHeaders, value: response } = await postJsonToApi({
1789
+ url: `${this.config.baseURL}/models/${this.modelId}:predict`,
1790
+ headers: combineHeaders(await resolve(this.config.headers), headers),
1791
+ body,
1792
+ failedResponseHandler: googleFailedResponseHandler,
1793
+ successfulResponseHandler: createJsonResponseHandler(
1794
+ googleImageResponseSchema
1795
+ ),
1796
+ abortSignal,
1797
+ fetch: this.config.fetch
1798
+ });
1799
+ return {
1800
+ images: response.predictions.map(
1801
+ (p) => p.bytesBase64Encoded
1802
+ ),
1803
+ warnings,
1804
+ providerMetadata: {
1805
+ google: {
1806
+ images: response.predictions.map(() => ({
1807
+ // Add any prediction-specific metadata here
1808
+ }))
1809
+ }
1810
+ },
1811
+ response: {
1812
+ timestamp: currentDate,
1813
+ modelId: this.modelId,
1814
+ headers: responseHeaders
1815
+ }
1816
+ };
1817
+ }
1818
+ async doGenerateGemini(options) {
1819
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
1820
+ const {
1821
+ prompt,
1822
+ n,
1823
+ size,
1824
+ aspectRatio,
1825
+ seed,
1826
+ providerOptions,
1827
+ headers,
1828
+ abortSignal,
1829
+ files,
1830
+ mask
1831
+ } = options;
1832
+ const warnings = [];
1833
+ if (mask != null) {
1834
+ throw new Error(
1835
+ "Gemini image models do not support mask-based image editing."
1836
+ );
1837
+ }
1838
+ if (n != null && n > 1) {
1839
+ throw new Error(
1840
+ "Gemini image models do not support generating a set number of images per call. Use n=1 or omit the n parameter."
1841
+ );
1842
+ }
1843
+ if (size != null) {
1844
+ warnings.push({
1845
+ type: "unsupported",
1846
+ feature: "size",
1847
+ details: "This model does not support the `size` option. Use `aspectRatio` instead."
1848
+ });
1849
+ }
1850
+ const userContent = [];
1851
+ if (prompt != null) {
1852
+ userContent.push({ type: "text", text: prompt });
1853
+ }
1854
+ if (files != null && files.length > 0) {
1855
+ for (const file of files) {
1856
+ if (file.type === "url") {
1857
+ userContent.push({
1858
+ type: "file",
1859
+ data: new URL(file.url),
1860
+ mediaType: "image/*"
1861
+ });
1862
+ } else {
1863
+ userContent.push({
1864
+ type: "file",
1865
+ data: typeof file.data === "string" ? file.data : new Uint8Array(file.data),
1866
+ mediaType: file.mediaType
1867
+ });
1868
+ }
1869
+ }
1870
+ }
1871
+ const languageModelPrompt = [
1872
+ { role: "user", content: userContent }
1873
+ ];
1874
+ const languageModel = new GoogleGenerativeAILanguageModel(this.modelId, {
1875
+ provider: this.config.provider,
1876
+ baseURL: this.config.baseURL,
1877
+ headers: (_a = this.config.headers) != null ? _a : {},
1878
+ fetch: this.config.fetch,
1879
+ generateId: (_b = this.config.generateId) != null ? _b : generateId
1880
+ });
1881
+ const result = await languageModel.doGenerate({
1882
+ prompt: languageModelPrompt,
1883
+ seed,
1884
+ providerOptions: {
1885
+ google: {
1886
+ responseModalities: ["IMAGE"],
1887
+ imageConfig: aspectRatio ? {
1888
+ aspectRatio
1889
+ } : void 0,
1890
+ ...(_c = providerOptions == null ? void 0 : providerOptions.google) != null ? _c : {}
1891
+ }
1892
+ },
1893
+ headers,
1894
+ abortSignal
1895
+ });
1896
+ const currentDate = (_f = (_e = (_d = this.config._internal) == null ? void 0 : _d.currentDate) == null ? void 0 : _e.call(_d)) != null ? _f : /* @__PURE__ */ new Date();
1897
+ const images = [];
1898
+ for (const part of result.content) {
1899
+ if (part.type === "file" && part.mediaType.startsWith("image/")) {
1900
+ images.push(convertToBase64(part.data));
1901
+ }
1902
+ }
1903
+ return {
1904
+ images,
1905
+ warnings,
1906
+ providerMetadata: {
1907
+ google: {
1908
+ images: images.map(() => ({}))
1909
+ }
1910
+ },
1911
+ response: {
1912
+ timestamp: currentDate,
1913
+ modelId: this.modelId,
1914
+ headers: (_g = result.response) == null ? void 0 : _g.headers
1915
+ },
1916
+ usage: result.usage ? {
1917
+ inputTokens: result.usage.inputTokens.total,
1918
+ outputTokens: result.usage.outputTokens.total,
1919
+ totalTokens: ((_h = result.usage.inputTokens.total) != null ? _h : 0) + ((_i = result.usage.outputTokens.total) != null ? _i : 0)
1920
+ } : void 0
1921
+ };
1922
+ }
1923
+ };
1924
+ function isGeminiModel(modelId) {
1925
+ return modelId.startsWith("gemini-");
1926
+ }
1927
+ var googleImageResponseSchema = lazySchema(
1928
+ () => zodSchema(
1929
+ object({
1930
+ predictions: array(object({ bytesBase64Encoded: string() })).default([])
1931
+ })
1932
+ )
1933
+ );
1934
+ var googleImageModelOptionsSchema = lazySchema(
1935
+ () => zodSchema(
1936
+ object({
1937
+ personGeneration: _enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
1938
+ aspectRatio: _enum(["1:1", "3:4", "4:3", "9:16", "16:9"]).nullish()
1939
+ })
1940
+ )
1941
+ );
1942
+ var GoogleGenerativeAIVideoModel = class {
1943
+ constructor(modelId, config) {
1944
+ this.modelId = modelId;
1945
+ this.config = config;
1946
+ this.specificationVersion = "v3";
1947
+ }
1948
+ get provider() {
1949
+ return this.config.provider;
1950
+ }
1951
+ get maxVideosPerCall() {
1952
+ return 4;
1953
+ }
1954
+ async doGenerate(options) {
1955
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1956
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1957
+ const warnings = [];
1958
+ const googleOptions = await parseProviderOptions({
1959
+ provider: "google",
1960
+ providerOptions: options.providerOptions,
1961
+ schema: googleVideoModelOptionsSchema
1962
+ });
1963
+ const instances = [{}];
1964
+ const instance = instances[0];
1965
+ if (options.prompt != null) {
1966
+ instance.prompt = options.prompt;
1967
+ }
1968
+ if (options.image != null) {
1969
+ if (options.image.type === "url") {
1970
+ warnings.push({
1971
+ type: "unsupported",
1972
+ feature: "URL-based image input",
1973
+ details: "Google Generative AI video models require base64-encoded images. URL will be ignored."
1974
+ });
1975
+ } else {
1976
+ const base64Data = typeof options.image.data === "string" ? options.image.data : convertUint8ArrayToBase64(options.image.data);
1977
+ instance.image = {
1978
+ inlineData: {
1979
+ mimeType: options.image.mediaType || "image/png",
1980
+ data: base64Data
1981
+ }
1982
+ };
1983
+ }
1984
+ }
1985
+ if ((googleOptions == null ? void 0 : googleOptions.referenceImages) != null) {
1986
+ instance.referenceImages = googleOptions.referenceImages.map((refImg) => {
1987
+ if (refImg.bytesBase64Encoded) {
1988
+ return {
1989
+ inlineData: {
1990
+ mimeType: "image/png",
1991
+ data: refImg.bytesBase64Encoded
1992
+ }
1993
+ };
1994
+ } else if (refImg.gcsUri) {
1995
+ return {
1996
+ gcsUri: refImg.gcsUri
1997
+ };
1998
+ }
1999
+ return refImg;
2000
+ });
2001
+ }
2002
+ const parameters = {
2003
+ sampleCount: options.n
2004
+ };
2005
+ if (options.aspectRatio) {
2006
+ parameters.aspectRatio = options.aspectRatio;
2007
+ }
2008
+ if (options.resolution) {
2009
+ const resolutionMap = {
2010
+ "1280x720": "720p",
2011
+ "1920x1080": "1080p",
2012
+ "3840x2160": "4k"
2013
+ };
2014
+ parameters.resolution = resolutionMap[options.resolution] || options.resolution;
2015
+ }
2016
+ if (options.duration) {
2017
+ parameters.durationSeconds = options.duration;
2018
+ }
2019
+ if (options.seed) {
2020
+ parameters.seed = options.seed;
2021
+ }
2022
+ if (googleOptions != null) {
2023
+ const opts = googleOptions;
2024
+ if (opts.personGeneration !== void 0 && opts.personGeneration !== null) {
2025
+ parameters.personGeneration = opts.personGeneration;
2026
+ }
2027
+ if (opts.negativePrompt !== void 0 && opts.negativePrompt !== null) {
2028
+ parameters.negativePrompt = opts.negativePrompt;
2029
+ }
2030
+ for (const [key, value] of Object.entries(opts)) {
2031
+ if (![
2032
+ "pollIntervalMs",
2033
+ "pollTimeoutMs",
2034
+ "personGeneration",
2035
+ "negativePrompt",
2036
+ "referenceImages"
2037
+ ].includes(key)) {
2038
+ parameters[key] = value;
2039
+ }
2040
+ }
2041
+ }
2042
+ const { value: operation } = await postJsonToApi({
2043
+ url: `${this.config.baseURL}/models/${this.modelId}:predictLongRunning`,
2044
+ headers: combineHeaders(
2045
+ await resolve(this.config.headers),
2046
+ options.headers
2047
+ ),
2048
+ body: {
2049
+ instances,
2050
+ parameters
2051
+ },
2052
+ successfulResponseHandler: createJsonResponseHandler(
2053
+ googleOperationSchema
2054
+ ),
2055
+ failedResponseHandler: googleFailedResponseHandler,
2056
+ abortSignal: options.abortSignal,
2057
+ fetch: this.config.fetch
2058
+ });
2059
+ const operationName = operation.name;
2060
+ if (!operationName) {
2061
+ throw new AISDKError({
2062
+ name: "GOOGLE_VIDEO_GENERATION_ERROR",
2063
+ message: "No operation name returned from API"
2064
+ });
2065
+ }
2066
+ const pollIntervalMs = (_d = googleOptions == null ? void 0 : googleOptions.pollIntervalMs) != null ? _d : 1e4;
2067
+ const pollTimeoutMs = (_e = googleOptions == null ? void 0 : googleOptions.pollTimeoutMs) != null ? _e : 6e5;
2068
+ const startTime = Date.now();
2069
+ let finalOperation = operation;
2070
+ let responseHeaders;
2071
+ while (!finalOperation.done) {
2072
+ if (Date.now() - startTime > pollTimeoutMs) {
2073
+ throw new AISDKError({
2074
+ name: "GOOGLE_VIDEO_GENERATION_TIMEOUT",
2075
+ message: `Video generation timed out after ${pollTimeoutMs}ms`
2076
+ });
2077
+ }
2078
+ await delay(pollIntervalMs);
2079
+ if ((_f = options.abortSignal) == null ? void 0 : _f.aborted) {
2080
+ throw new AISDKError({
2081
+ name: "GOOGLE_VIDEO_GENERATION_ABORTED",
2082
+ message: "Video generation request was aborted"
2083
+ });
2084
+ }
2085
+ const { value: statusOperation, responseHeaders: pollHeaders } = await getFromApi({
2086
+ url: `${this.config.baseURL}/${operationName}`,
2087
+ headers: combineHeaders(
2088
+ await resolve(this.config.headers),
2089
+ options.headers
2090
+ ),
2091
+ successfulResponseHandler: createJsonResponseHandler(
2092
+ googleOperationSchema
2093
+ ),
2094
+ failedResponseHandler: googleFailedResponseHandler,
2095
+ abortSignal: options.abortSignal,
2096
+ fetch: this.config.fetch
2097
+ });
2098
+ finalOperation = statusOperation;
2099
+ responseHeaders = pollHeaders;
2100
+ }
2101
+ if (finalOperation.error) {
2102
+ throw new AISDKError({
2103
+ name: "GOOGLE_VIDEO_GENERATION_FAILED",
2104
+ message: `Video generation failed: ${finalOperation.error.message}`
2105
+ });
2106
+ }
2107
+ const response = finalOperation.response;
2108
+ if (!((_g = response == null ? void 0 : response.generateVideoResponse) == null ? void 0 : _g.generatedSamples) || response.generateVideoResponse.generatedSamples.length === 0) {
2109
+ throw new AISDKError({
2110
+ name: "GOOGLE_VIDEO_GENERATION_ERROR",
2111
+ message: `No videos in response. Response: ${JSON.stringify(finalOperation)}`
2112
+ });
2113
+ }
2114
+ const videos = [];
2115
+ const videoMetadata = [];
2116
+ const resolvedHeaders = await resolve(this.config.headers);
2117
+ const apiKey = resolvedHeaders == null ? void 0 : resolvedHeaders["x-goog-api-key"];
2118
+ for (const generatedSample of response.generateVideoResponse.generatedSamples) {
2119
+ if ((_h = generatedSample.video) == null ? void 0 : _h.uri) {
2120
+ const urlWithAuth = apiKey ? `${generatedSample.video.uri}${generatedSample.video.uri.includes("?") ? "&" : "?"}key=${apiKey}` : generatedSample.video.uri;
2121
+ videos.push({
2122
+ type: "url",
2123
+ url: urlWithAuth,
2124
+ mediaType: "video/mp4"
2125
+ });
2126
+ videoMetadata.push({
2127
+ uri: generatedSample.video.uri
2128
+ });
2129
+ }
2130
+ }
2131
+ if (videos.length === 0) {
2132
+ throw new AISDKError({
2133
+ name: "GOOGLE_VIDEO_GENERATION_ERROR",
2134
+ message: "No valid videos in response"
2135
+ });
2136
+ }
2137
+ return {
2138
+ videos,
2139
+ warnings,
2140
+ response: {
2141
+ timestamp: currentDate,
2142
+ modelId: this.modelId,
2143
+ headers: responseHeaders
2144
+ },
2145
+ providerMetadata: {
2146
+ google: {
2147
+ videos: videoMetadata
2148
+ }
2149
+ }
2150
+ };
2151
+ }
2152
+ };
2153
+ var googleOperationSchema = object({
2154
+ name: string().nullish(),
2155
+ done: boolean().nullish(),
2156
+ error: object({
2157
+ code: number().nullish(),
2158
+ message: string(),
2159
+ status: string().nullish()
2160
+ }).nullish(),
2161
+ response: object({
2162
+ generateVideoResponse: object({
2163
+ generatedSamples: array(
2164
+ object({
2165
+ video: object({
2166
+ uri: string().nullish()
2167
+ }).nullish()
2168
+ })
2169
+ ).nullish()
2170
+ }).nullish()
2171
+ }).nullish()
2172
+ });
2173
+ var googleVideoModelOptionsSchema = lazySchema(
2174
+ () => zodSchema(
2175
+ object({
2176
+ pollIntervalMs: number().positive().nullish(),
2177
+ pollTimeoutMs: number().positive().nullish(),
2178
+ personGeneration: _enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
2179
+ negativePrompt: string().nullish(),
2180
+ referenceImages: array(
2181
+ object({
2182
+ bytesBase64Encoded: string().nullish(),
2183
+ gcsUri: string().nullish()
2184
+ })
2185
+ ).nullish()
2186
+ }).passthrough()
2187
+ )
2188
+ );
2189
+ function createGoogleGenerativeAI(options = {}) {
2190
+ var _a, _b;
2191
+ const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
2192
+ const providerName = (_b = options.name) != null ? _b : "google.generative-ai";
2193
+ const getHeaders = () => withUserAgentSuffix(
2194
+ {
2195
+ "x-goog-api-key": loadApiKey({
2196
+ apiKey: options.apiKey,
2197
+ environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
2198
+ description: "Google Generative AI"
2199
+ }),
2200
+ ...options.headers
2201
+ },
2202
+ `ai-sdk/google/${VERSION}`
2203
+ );
2204
+ const createChatModel = (modelId) => {
2205
+ var _a2;
2206
+ return new GoogleGenerativeAILanguageModel(modelId, {
2207
+ provider: providerName,
2208
+ baseURL,
2209
+ headers: getHeaders,
2210
+ generateId: (_a2 = options.generateId) != null ? _a2 : generateId,
2211
+ supportedUrls: () => ({
2212
+ "*": [
2213
+ // Google Generative Language "files" endpoint
2214
+ // e.g. https://generativelanguage.googleapis.com/v1beta/files/...
2215
+ new RegExp(`^${baseURL}/files/.*$`),
2216
+ // YouTube URLs (public or unlisted videos)
2217
+ new RegExp(
2218
+ `^https://(?:www\\.)?youtube\\.com/watch\\?v=[\\w-]+(?:&[\\w=&.-]*)?$`
2219
+ ),
2220
+ new RegExp(`^https://youtu\\.be/[\\w-]+(?:\\?[\\w=&.-]*)?$`)
2221
+ ]
2222
+ }),
2223
+ fetch: options.fetch
2224
+ });
2225
+ };
2226
+ const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
2227
+ provider: providerName,
2228
+ baseURL,
2229
+ headers: getHeaders,
2230
+ fetch: options.fetch
2231
+ });
2232
+ const createImageModel = (modelId, settings = {}) => new GoogleGenerativeAIImageModel(modelId, settings, {
2233
+ provider: providerName,
2234
+ baseURL,
2235
+ headers: getHeaders,
2236
+ fetch: options.fetch
2237
+ });
2238
+ const createVideoModel = (modelId) => {
2239
+ var _a2;
2240
+ return new GoogleGenerativeAIVideoModel(modelId, {
2241
+ provider: providerName,
2242
+ baseURL,
2243
+ headers: getHeaders,
2244
+ fetch: options.fetch,
2245
+ generateId: (_a2 = options.generateId) != null ? _a2 : generateId
2246
+ });
2247
+ };
2248
+ const provider = function(modelId) {
2249
+ if (new.target) {
2250
+ throw new Error(
2251
+ "The Google Generative AI model function cannot be called with the new keyword."
2252
+ );
2253
+ }
2254
+ return createChatModel(modelId);
2255
+ };
2256
+ provider.specificationVersion = "v3";
2257
+ provider.languageModel = createChatModel;
2258
+ provider.chat = createChatModel;
2259
+ provider.generativeAI = createChatModel;
2260
+ provider.embedding = createEmbeddingModel;
2261
+ provider.embeddingModel = createEmbeddingModel;
2262
+ provider.textEmbedding = createEmbeddingModel;
2263
+ provider.textEmbeddingModel = createEmbeddingModel;
2264
+ provider.image = createImageModel;
2265
+ provider.imageModel = createImageModel;
2266
+ provider.video = createVideoModel;
2267
+ provider.videoModel = createVideoModel;
2268
+ provider.tools = googleTools;
2269
+ return provider;
2270
+ }
2271
+ createGoogleGenerativeAI();
2272
+ export {
2273
+ VERSION,
2274
+ createGoogleGenerativeAI
2275
+ };