@ai-sdk/google 2.0.0-canary.8 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,14 +21,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
21
21
  var internal_exports = {};
22
22
  __export(internal_exports, {
23
23
  GoogleGenerativeAILanguageModel: () => GoogleGenerativeAILanguageModel,
24
- groundingMetadataSchema: () => groundingMetadataSchema,
24
+ googleTools: () => googleTools,
25
25
  safetyRatingSchema: () => safetyRatingSchema
26
26
  });
27
27
  module.exports = __toCommonJS(internal_exports);
28
28
 
29
29
  // src/google-generative-ai-language-model.ts
30
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
31
- var import_zod2 = require("zod");
30
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
31
+ var import_v45 = require("zod/v4");
32
32
 
33
33
  // src/convert-json-schema-to-openapi-schema.ts
34
34
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
@@ -124,16 +124,18 @@ function convertJSONSchemaToOpenAPISchema(jsonSchema) {
124
124
  return result;
125
125
  }
126
126
  function isEmptyObjectSchema(jsonSchema) {
127
- return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0);
127
+ return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0) && !jsonSchema.additionalProperties;
128
128
  }
129
129
 
130
130
  // src/convert-to-google-generative-ai-messages.ts
131
131
  var import_provider = require("@ai-sdk/provider");
132
132
  var import_provider_utils = require("@ai-sdk/provider-utils");
133
- function convertToGoogleGenerativeAIMessages(prompt) {
133
+ function convertToGoogleGenerativeAIMessages(prompt, options) {
134
+ var _a;
134
135
  const systemInstructionParts = [];
135
136
  const contents = [];
136
137
  let systemMessagesAllowed = true;
138
+ const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
137
139
  for (const { role, content } of prompt) {
138
140
  switch (role) {
139
141
  case "system": {
@@ -207,7 +209,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
207
209
  return {
208
210
  functionCall: {
209
211
  name: part.toolName,
210
- args: part.args
212
+ args: part.input
211
213
  }
212
214
  };
213
215
  }
@@ -225,7 +227,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
225
227
  name: part.toolName,
226
228
  response: {
227
229
  name: part.toolName,
228
- content: part.result
230
+ content: part.output.value
229
231
  }
230
232
  }
231
233
  }))
@@ -234,8 +236,12 @@ function convertToGoogleGenerativeAIMessages(prompt) {
234
236
  }
235
237
  }
236
238
  }
239
+ if (isGemmaModel && systemInstructionParts.length > 0 && contents.length > 0 && contents[0].role === "user") {
240
+ const systemText = systemInstructionParts.map((part) => part.text).join("\n\n");
241
+ contents[0].parts.unshift({ text: systemText + "\n\n" });
242
+ }
237
243
  return {
238
- systemInstruction: systemInstructionParts.length > 0 ? { parts: systemInstructionParts } : void 0,
244
+ systemInstruction: systemInstructionParts.length > 0 && !isGemmaModel ? { parts: systemInstructionParts } : void 0,
239
245
  contents
240
246
  };
241
247
  }
@@ -247,12 +253,12 @@ function getModelPath(modelId) {
247
253
 
248
254
  // src/google-error.ts
249
255
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
250
- var import_zod = require("zod");
251
- var googleErrorDataSchema = import_zod.z.object({
252
- error: import_zod.z.object({
253
- code: import_zod.z.number().nullable(),
254
- message: import_zod.z.string(),
255
- status: import_zod.z.string()
256
+ var import_v4 = require("zod/v4");
257
+ var googleErrorDataSchema = import_v4.z.object({
258
+ error: import_v4.z.object({
259
+ code: import_v4.z.number().nullable(),
260
+ message: import_v4.z.string(),
261
+ status: import_v4.z.string()
256
262
  })
257
263
  });
258
264
  var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -260,13 +266,73 @@ var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
260
266
  errorToMessage: (data) => data.error.message
261
267
  });
262
268
 
269
+ // src/google-generative-ai-options.ts
270
+ var import_v42 = require("zod/v4");
271
+ var googleGenerativeAIProviderOptions = import_v42.z.object({
272
+ responseModalities: import_v42.z.array(import_v42.z.enum(["TEXT", "IMAGE"])).optional(),
273
+ thinkingConfig: import_v42.z.object({
274
+ thinkingBudget: import_v42.z.number().optional(),
275
+ includeThoughts: import_v42.z.boolean().optional()
276
+ }).optional(),
277
+ /**
278
+ Optional.
279
+ The name of the cached content used as context to serve the prediction.
280
+ Format: cachedContents/{cachedContent}
281
+ */
282
+ cachedContent: import_v42.z.string().optional(),
283
+ /**
284
+ * Optional. Enable structured output. Default is true.
285
+ *
286
+ * This is useful when the JSON Schema contains elements that are
287
+ * not supported by the OpenAPI schema version that
288
+ * Google Generative AI uses. You can use this to disable
289
+ * structured outputs if you need to.
290
+ */
291
+ structuredOutputs: import_v42.z.boolean().optional(),
292
+ /**
293
+ Optional. A list of unique safety settings for blocking unsafe content.
294
+ */
295
+ safetySettings: import_v42.z.array(
296
+ import_v42.z.object({
297
+ category: import_v42.z.enum([
298
+ "HARM_CATEGORY_UNSPECIFIED",
299
+ "HARM_CATEGORY_HATE_SPEECH",
300
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
301
+ "HARM_CATEGORY_HARASSMENT",
302
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
303
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
304
+ ]),
305
+ threshold: import_v42.z.enum([
306
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
307
+ "BLOCK_LOW_AND_ABOVE",
308
+ "BLOCK_MEDIUM_AND_ABOVE",
309
+ "BLOCK_ONLY_HIGH",
310
+ "BLOCK_NONE",
311
+ "OFF"
312
+ ])
313
+ })
314
+ ).optional(),
315
+ threshold: import_v42.z.enum([
316
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
317
+ "BLOCK_LOW_AND_ABOVE",
318
+ "BLOCK_MEDIUM_AND_ABOVE",
319
+ "BLOCK_ONLY_HIGH",
320
+ "BLOCK_NONE",
321
+ "OFF"
322
+ ]).optional(),
323
+ /**
324
+ * Optional. Enables timestamp understanding for audio-only files.
325
+ *
326
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
327
+ */
328
+ audioTimestamp: import_v42.z.boolean().optional()
329
+ });
330
+
263
331
  // src/google-prepare-tools.ts
264
332
  var import_provider2 = require("@ai-sdk/provider");
265
333
  function prepareTools({
266
334
  tools,
267
335
  toolChoice,
268
- useSearchGrounding,
269
- dynamicRetrievalConfig,
270
336
  modelId
271
337
  }) {
272
338
  var _a;
@@ -274,28 +340,87 @@ function prepareTools({
274
340
  const toolWarnings = [];
275
341
  const isGemini2 = modelId.includes("gemini-2");
276
342
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
277
- if (useSearchGrounding) {
343
+ if (tools == null) {
344
+ return { tools: void 0, toolConfig: void 0, toolWarnings };
345
+ }
346
+ const hasFunctionTools = tools.some((tool) => tool.type === "function");
347
+ const hasProviderDefinedTools = tools.some(
348
+ (tool) => tool.type === "provider-defined"
349
+ );
350
+ if (hasFunctionTools && hasProviderDefinedTools) {
351
+ toolWarnings.push({
352
+ type: "unsupported-tool",
353
+ tool: tools.find((tool) => tool.type === "function"),
354
+ details: "Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both."
355
+ });
356
+ }
357
+ if (hasProviderDefinedTools) {
358
+ const googleTools2 = {};
359
+ const providerDefinedTools = tools.filter(
360
+ (tool) => tool.type === "provider-defined"
361
+ );
362
+ providerDefinedTools.forEach((tool) => {
363
+ switch (tool.id) {
364
+ case "google.google_search":
365
+ if (isGemini2) {
366
+ googleTools2.googleSearch = {};
367
+ } else if (supportsDynamicRetrieval) {
368
+ googleTools2.googleSearchRetrieval = {
369
+ dynamicRetrievalConfig: {
370
+ mode: tool.args.mode,
371
+ dynamicThreshold: tool.args.dynamicThreshold
372
+ }
373
+ };
374
+ } else {
375
+ googleTools2.googleSearchRetrieval = {};
376
+ }
377
+ break;
378
+ case "google.url_context":
379
+ if (isGemini2) {
380
+ googleTools2.urlContext = {};
381
+ } else {
382
+ toolWarnings.push({
383
+ type: "unsupported-tool",
384
+ tool,
385
+ details: "The URL context tool is not supported with other Gemini models than Gemini 2."
386
+ });
387
+ }
388
+ break;
389
+ case "google.code_execution":
390
+ if (isGemini2) {
391
+ googleTools2.codeExecution = {};
392
+ } else {
393
+ toolWarnings.push({
394
+ type: "unsupported-tool",
395
+ tool,
396
+ details: "The code execution tools is not supported with other Gemini models than Gemini 2."
397
+ });
398
+ }
399
+ break;
400
+ default:
401
+ toolWarnings.push({ type: "unsupported-tool", tool });
402
+ break;
403
+ }
404
+ });
278
405
  return {
279
- tools: isGemini2 ? { googleSearch: {} } : {
280
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
281
- },
406
+ tools: Object.keys(googleTools2).length > 0 ? googleTools2 : void 0,
282
407
  toolConfig: void 0,
283
408
  toolWarnings
284
409
  };
285
410
  }
286
- if (tools == null) {
287
- return { tools: void 0, toolConfig: void 0, toolWarnings };
288
- }
289
411
  const functionDeclarations = [];
290
412
  for (const tool of tools) {
291
- if (tool.type === "provider-defined") {
292
- toolWarnings.push({ type: "unsupported-tool", tool });
293
- } else {
294
- functionDeclarations.push({
295
- name: tool.name,
296
- description: (_a = tool.description) != null ? _a : "",
297
- parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
298
- });
413
+ switch (tool.type) {
414
+ case "function":
415
+ functionDeclarations.push({
416
+ name: tool.name,
417
+ description: (_a = tool.description) != null ? _a : "",
418
+ parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
419
+ });
420
+ break;
421
+ default:
422
+ toolWarnings.push({ type: "unsupported-tool", tool });
423
+ break;
299
424
  }
300
425
  }
301
426
  if (toolChoice == null) {
@@ -372,23 +497,80 @@ function mapGoogleGenerativeAIFinishReason({
372
497
  }
373
498
  }
374
499
 
500
+ // src/tool/google-search.ts
501
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
502
+ var import_v43 = require("zod/v4");
503
+ var groundingChunkSchema = import_v43.z.object({
504
+ web: import_v43.z.object({ uri: import_v43.z.string(), title: import_v43.z.string() }).nullish(),
505
+ retrievedContext: import_v43.z.object({ uri: import_v43.z.string(), title: import_v43.z.string() }).nullish()
506
+ });
507
+ var groundingMetadataSchema = import_v43.z.object({
508
+ webSearchQueries: import_v43.z.array(import_v43.z.string()).nullish(),
509
+ retrievalQueries: import_v43.z.array(import_v43.z.string()).nullish(),
510
+ searchEntryPoint: import_v43.z.object({ renderedContent: import_v43.z.string() }).nullish(),
511
+ groundingChunks: import_v43.z.array(groundingChunkSchema).nullish(),
512
+ groundingSupports: import_v43.z.array(
513
+ import_v43.z.object({
514
+ segment: import_v43.z.object({
515
+ startIndex: import_v43.z.number().nullish(),
516
+ endIndex: import_v43.z.number().nullish(),
517
+ text: import_v43.z.string().nullish()
518
+ }),
519
+ segment_text: import_v43.z.string().nullish(),
520
+ groundingChunkIndices: import_v43.z.array(import_v43.z.number()).nullish(),
521
+ supportChunkIndices: import_v43.z.array(import_v43.z.number()).nullish(),
522
+ confidenceScores: import_v43.z.array(import_v43.z.number()).nullish(),
523
+ confidenceScore: import_v43.z.array(import_v43.z.number()).nullish()
524
+ })
525
+ ).nullish(),
526
+ retrievalMetadata: import_v43.z.union([
527
+ import_v43.z.object({
528
+ webDynamicRetrievalScore: import_v43.z.number()
529
+ }),
530
+ import_v43.z.object({})
531
+ ]).nullish()
532
+ });
533
+ var googleSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
534
+ id: "google.google_search",
535
+ name: "google_search",
536
+ inputSchema: import_v43.z.object({
537
+ mode: import_v43.z.enum(["MODE_DYNAMIC", "MODE_UNSPECIFIED"]).default("MODE_UNSPECIFIED"),
538
+ dynamicThreshold: import_v43.z.number().default(1)
539
+ })
540
+ });
541
+
542
+ // src/tool/url-context.ts
543
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
544
+ var import_v44 = require("zod/v4");
545
+ var urlMetadataSchema = import_v44.z.object({
546
+ retrievedUrl: import_v44.z.string(),
547
+ urlRetrievalStatus: import_v44.z.string()
548
+ });
549
+ var urlContextMetadataSchema = import_v44.z.object({
550
+ urlMetadata: import_v44.z.array(urlMetadataSchema)
551
+ });
552
+ var urlContext = (0, import_provider_utils4.createProviderDefinedToolFactory)({
553
+ id: "google.url_context",
554
+ name: "url_context",
555
+ inputSchema: import_v44.z.object({})
556
+ });
557
+
375
558
  // src/google-generative-ai-language-model.ts
376
559
  var GoogleGenerativeAILanguageModel = class {
377
- constructor(modelId, settings, config) {
560
+ constructor(modelId, config) {
378
561
  this.specificationVersion = "v2";
379
- this.defaultObjectGenerationMode = "json";
380
- this.supportsImageUrls = false;
562
+ var _a;
381
563
  this.modelId = modelId;
382
- this.settings = settings;
383
564
  this.config = config;
384
- }
385
- get supportsStructuredOutputs() {
386
- var _a;
387
- return (_a = this.settings.structuredOutputs) != null ? _a : true;
565
+ this.generateId = (_a = config.generateId) != null ? _a : import_provider_utils5.generateId;
388
566
  }
389
567
  get provider() {
390
568
  return this.config.provider;
391
569
  }
570
+ get supportedUrls() {
571
+ var _a, _b, _c;
572
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
573
+ }
392
574
  async getArgs({
393
575
  prompt,
394
576
  maxOutputTokens,
@@ -404,23 +586,31 @@ var GoogleGenerativeAILanguageModel = class {
404
586
  toolChoice,
405
587
  providerOptions
406
588
  }) {
407
- var _a;
589
+ var _a, _b;
408
590
  const warnings = [];
409
- const googleOptions = (0, import_provider_utils3.parseProviderOptions)({
591
+ const googleOptions = await (0, import_provider_utils5.parseProviderOptions)({
410
592
  provider: "google",
411
593
  providerOptions,
412
- schema: googleGenerativeAIProviderOptionsSchema
594
+ schema: googleGenerativeAIProviderOptions
413
595
  });
414
- const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
596
+ if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
597
+ warnings.push({
598
+ type: "other",
599
+ message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
600
+ });
601
+ }
602
+ const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
603
+ const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
604
+ prompt,
605
+ { isGemmaModel }
606
+ );
415
607
  const {
416
- tools: googleTools,
608
+ tools: googleTools2,
417
609
  toolConfig: googleToolConfig,
418
610
  toolWarnings
419
611
  } = prepareTools({
420
612
  tools,
421
613
  toolChoice,
422
- useSearchGrounding: (_a = this.settings.useSearchGrounding) != null ? _a : false,
423
- dynamicRetrievalConfig: this.settings.dynamicRetrievalConfig,
424
614
  modelId: this.modelId
425
615
  });
426
616
  return {
@@ -439,83 +629,127 @@ var GoogleGenerativeAILanguageModel = class {
439
629
  responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
440
630
  responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
441
631
  // so this is needed as an escape hatch:
442
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
443
- ...this.settings.audioTimestamp && {
444
- audioTimestamp: this.settings.audioTimestamp
632
+ // TODO convert into provider option
633
+ ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
634
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
635
+ audioTimestamp: googleOptions.audioTimestamp
445
636
  },
446
637
  // provider options:
447
- responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities
638
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
639
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
448
640
  },
449
641
  contents,
450
- systemInstruction,
451
- safetySettings: this.settings.safetySettings,
452
- tools: googleTools,
642
+ systemInstruction: isGemmaModel ? void 0 : systemInstruction,
643
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
644
+ tools: googleTools2,
453
645
  toolConfig: googleToolConfig,
454
- cachedContent: this.settings.cachedContent
646
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
455
647
  },
456
648
  warnings: [...warnings, ...toolWarnings]
457
649
  };
458
650
  }
459
- supportsUrl(url) {
460
- return this.config.isSupportedUrl(url);
461
- }
462
651
  async doGenerate(options) {
463
- var _a, _b, _c, _d, _e;
652
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
464
653
  const { args, warnings } = await this.getArgs(options);
465
654
  const body = JSON.stringify(args);
466
- const mergedHeaders = (0, import_provider_utils3.combineHeaders)(
467
- await (0, import_provider_utils3.resolve)(this.config.headers),
655
+ const mergedHeaders = (0, import_provider_utils5.combineHeaders)(
656
+ await (0, import_provider_utils5.resolve)(this.config.headers),
468
657
  options.headers
469
658
  );
470
659
  const {
471
660
  responseHeaders,
472
661
  value: response,
473
662
  rawValue: rawResponse
474
- } = await (0, import_provider_utils3.postJsonToApi)({
663
+ } = await (0, import_provider_utils5.postJsonToApi)({
475
664
  url: `${this.config.baseURL}/${getModelPath(
476
665
  this.modelId
477
666
  )}:generateContent`,
478
667
  headers: mergedHeaders,
479
668
  body: args,
480
669
  failedResponseHandler: googleFailedResponseHandler,
481
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(responseSchema),
670
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(responseSchema),
482
671
  abortSignal: options.abortSignal,
483
672
  fetch: this.config.fetch
484
673
  });
485
674
  const candidate = response.candidates[0];
486
- const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : candidate.content.parts;
487
- const toolCalls = getToolCallsFromParts({
488
- parts,
489
- generateId: this.config.generateId
490
- });
675
+ const content = [];
676
+ const parts = (_b = (_a = candidate.content) == null ? void 0 : _a.parts) != null ? _b : [];
491
677
  const usageMetadata = response.usageMetadata;
678
+ let lastCodeExecutionToolCallId;
679
+ for (const part of parts) {
680
+ if ("executableCode" in part && ((_c = part.executableCode) == null ? void 0 : _c.code)) {
681
+ const toolCallId = this.config.generateId();
682
+ lastCodeExecutionToolCallId = toolCallId;
683
+ content.push({
684
+ type: "tool-call",
685
+ toolCallId,
686
+ toolName: "code_execution",
687
+ input: JSON.stringify(part.executableCode),
688
+ providerExecuted: true
689
+ });
690
+ } else if ("codeExecutionResult" in part && part.codeExecutionResult) {
691
+ content.push({
692
+ type: "tool-result",
693
+ // Assumes a result directly follows its corresponding call part.
694
+ toolCallId: lastCodeExecutionToolCallId,
695
+ toolName: "code_execution",
696
+ result: {
697
+ outcome: part.codeExecutionResult.outcome,
698
+ output: part.codeExecutionResult.output
699
+ },
700
+ providerExecuted: true
701
+ });
702
+ lastCodeExecutionToolCallId = void 0;
703
+ } else if ("text" in part && part.text != null && part.text.length > 0) {
704
+ if (part.thought === true) {
705
+ content.push({ type: "reasoning", text: part.text });
706
+ } else {
707
+ content.push({ type: "text", text: part.text });
708
+ }
709
+ } else if ("functionCall" in part) {
710
+ content.push({
711
+ type: "tool-call",
712
+ toolCallId: this.config.generateId(),
713
+ toolName: part.functionCall.name,
714
+ input: JSON.stringify(part.functionCall.args)
715
+ });
716
+ } else if ("inlineData" in part) {
717
+ content.push({
718
+ type: "file",
719
+ data: part.inlineData.data,
720
+ mediaType: part.inlineData.mimeType
721
+ });
722
+ }
723
+ }
724
+ const sources = (_d = extractSources({
725
+ groundingMetadata: candidate.groundingMetadata,
726
+ generateId: this.config.generateId
727
+ })) != null ? _d : [];
728
+ for (const source of sources) {
729
+ content.push(source);
730
+ }
492
731
  return {
493
- text: getTextFromParts(parts),
494
- files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
495
- type: "file",
496
- data: part.inlineData.data,
497
- mediaType: part.inlineData.mimeType
498
- })),
499
- toolCalls,
732
+ content,
500
733
  finishReason: mapGoogleGenerativeAIFinishReason({
501
734
  finishReason: candidate.finishReason,
502
- hasToolCalls: toolCalls != null && toolCalls.length > 0
735
+ hasToolCalls: content.some((part) => part.type === "tool-call")
503
736
  }),
504
737
  usage: {
505
- inputTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : void 0,
506
- outputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : void 0
738
+ inputTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _e : void 0,
739
+ outputTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _f : void 0,
740
+ totalTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _g : void 0,
741
+ reasoningTokens: (_h = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _h : void 0,
742
+ cachedInputTokens: (_i = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _i : void 0
507
743
  },
508
744
  warnings,
509
745
  providerMetadata: {
510
746
  google: {
511
- groundingMetadata: (_d = candidate.groundingMetadata) != null ? _d : null,
512
- safetyRatings: (_e = candidate.safetyRatings) != null ? _e : null
747
+ groundingMetadata: (_j = candidate.groundingMetadata) != null ? _j : null,
748
+ urlContextMetadata: (_k = candidate.urlContextMetadata) != null ? _k : null,
749
+ safetyRatings: (_l = candidate.safetyRatings) != null ? _l : null,
750
+ usageMetadata: usageMetadata != null ? usageMetadata : null
513
751
  }
514
752
  },
515
- sources: extractSources({
516
- groundingMetadata: candidate.groundingMetadata,
517
- generateId: this.config.generateId
518
- }),
519
753
  request: { body },
520
754
  response: {
521
755
  // TODO timestamp, model id, id
@@ -527,34 +761,46 @@ var GoogleGenerativeAILanguageModel = class {
527
761
  async doStream(options) {
528
762
  const { args, warnings } = await this.getArgs(options);
529
763
  const body = JSON.stringify(args);
530
- const headers = (0, import_provider_utils3.combineHeaders)(
531
- await (0, import_provider_utils3.resolve)(this.config.headers),
764
+ const headers = (0, import_provider_utils5.combineHeaders)(
765
+ await (0, import_provider_utils5.resolve)(this.config.headers),
532
766
  options.headers
533
767
  );
534
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
768
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
535
769
  url: `${this.config.baseURL}/${getModelPath(
536
770
  this.modelId
537
771
  )}:streamGenerateContent?alt=sse`,
538
772
  headers,
539
773
  body: args,
540
774
  failedResponseHandler: googleFailedResponseHandler,
541
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(chunkSchema),
775
+ successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(chunkSchema),
542
776
  abortSignal: options.abortSignal,
543
777
  fetch: this.config.fetch
544
778
  });
545
779
  let finishReason = "unknown";
546
780
  const usage = {
547
781
  inputTokens: void 0,
548
- outputTokens: void 0
782
+ outputTokens: void 0,
783
+ totalTokens: void 0
549
784
  };
550
785
  let providerMetadata = void 0;
551
- const generateId = this.config.generateId;
786
+ const generateId2 = this.config.generateId;
552
787
  let hasToolCalls = false;
788
+ let currentTextBlockId = null;
789
+ let currentReasoningBlockId = null;
790
+ let blockCounter = 0;
791
+ const emittedSourceUrls = /* @__PURE__ */ new Set();
792
+ let lastCodeExecutionToolCallId;
553
793
  return {
554
794
  stream: response.pipeThrough(
555
795
  new TransformStream({
796
+ start(controller) {
797
+ controller.enqueue({ type: "stream-start", warnings });
798
+ },
556
799
  transform(chunk, controller) {
557
- var _a, _b, _c, _d, _e, _f;
800
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
801
+ if (options.includeRawChunks) {
802
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
803
+ }
558
804
  if (!chunk.success) {
559
805
  controller.enqueue({ type: "error", error: chunk.error });
560
806
  return;
@@ -564,16 +810,99 @@ var GoogleGenerativeAILanguageModel = class {
564
810
  if (usageMetadata != null) {
565
811
  usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
566
812
  usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
813
+ usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
814
+ usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
815
+ usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
567
816
  }
568
- const candidate = (_c = value.candidates) == null ? void 0 : _c[0];
817
+ const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
569
818
  if (candidate == null) {
570
819
  return;
571
820
  }
572
821
  const content = candidate.content;
822
+ const sources = extractSources({
823
+ groundingMetadata: candidate.groundingMetadata,
824
+ generateId: generateId2
825
+ });
826
+ if (sources != null) {
827
+ for (const source of sources) {
828
+ if (source.sourceType === "url" && !emittedSourceUrls.has(source.url)) {
829
+ emittedSourceUrls.add(source.url);
830
+ controller.enqueue(source);
831
+ }
832
+ }
833
+ }
573
834
  if (content != null) {
574
- const deltaText = getTextFromParts(content.parts);
575
- if (deltaText != null) {
576
- controller.enqueue(deltaText);
835
+ const parts = (_g = content.parts) != null ? _g : [];
836
+ for (const part of parts) {
837
+ if ("executableCode" in part && ((_h = part.executableCode) == null ? void 0 : _h.code)) {
838
+ const toolCallId = generateId2();
839
+ lastCodeExecutionToolCallId = toolCallId;
840
+ controller.enqueue({
841
+ type: "tool-call",
842
+ toolCallId,
843
+ toolName: "code_execution",
844
+ input: JSON.stringify(part.executableCode),
845
+ providerExecuted: true
846
+ });
847
+ hasToolCalls = true;
848
+ } else if ("codeExecutionResult" in part && part.codeExecutionResult) {
849
+ const toolCallId = lastCodeExecutionToolCallId;
850
+ if (toolCallId) {
851
+ controller.enqueue({
852
+ type: "tool-result",
853
+ toolCallId,
854
+ toolName: "code_execution",
855
+ result: {
856
+ outcome: part.codeExecutionResult.outcome,
857
+ output: part.codeExecutionResult.output
858
+ },
859
+ providerExecuted: true
860
+ });
861
+ lastCodeExecutionToolCallId = void 0;
862
+ }
863
+ } else if ("text" in part && part.text != null && part.text.length > 0) {
864
+ if (part.thought === true) {
865
+ if (currentTextBlockId !== null) {
866
+ controller.enqueue({
867
+ type: "text-end",
868
+ id: currentTextBlockId
869
+ });
870
+ currentTextBlockId = null;
871
+ }
872
+ if (currentReasoningBlockId === null) {
873
+ currentReasoningBlockId = String(blockCounter++);
874
+ controller.enqueue({
875
+ type: "reasoning-start",
876
+ id: currentReasoningBlockId
877
+ });
878
+ }
879
+ controller.enqueue({
880
+ type: "reasoning-delta",
881
+ id: currentReasoningBlockId,
882
+ delta: part.text
883
+ });
884
+ } else {
885
+ if (currentReasoningBlockId !== null) {
886
+ controller.enqueue({
887
+ type: "reasoning-end",
888
+ id: currentReasoningBlockId
889
+ });
890
+ currentReasoningBlockId = null;
891
+ }
892
+ if (currentTextBlockId === null) {
893
+ currentTextBlockId = String(blockCounter++);
894
+ controller.enqueue({
895
+ type: "text-start",
896
+ id: currentTextBlockId
897
+ });
898
+ }
899
+ controller.enqueue({
900
+ type: "text-delta",
901
+ id: currentTextBlockId,
902
+ delta: part.text
903
+ });
904
+ }
905
+ }
577
906
  }
578
907
  const inlineDataParts = getInlineDataParts(content.parts);
579
908
  if (inlineDataParts != null) {
@@ -587,23 +916,29 @@ var GoogleGenerativeAILanguageModel = class {
587
916
  }
588
917
  const toolCallDeltas = getToolCallsFromParts({
589
918
  parts: content.parts,
590
- generateId
919
+ generateId: generateId2
591
920
  });
592
921
  if (toolCallDeltas != null) {
593
922
  for (const toolCall of toolCallDeltas) {
594
923
  controller.enqueue({
595
- type: "tool-call-delta",
596
- toolCallType: "function",
597
- toolCallId: toolCall.toolCallId,
598
- toolName: toolCall.toolName,
599
- argsTextDelta: toolCall.args
924
+ type: "tool-input-start",
925
+ id: toolCall.toolCallId,
926
+ toolName: toolCall.toolName
927
+ });
928
+ controller.enqueue({
929
+ type: "tool-input-delta",
930
+ id: toolCall.toolCallId,
931
+ delta: toolCall.args
932
+ });
933
+ controller.enqueue({
934
+ type: "tool-input-end",
935
+ id: toolCall.toolCallId
600
936
  });
601
937
  controller.enqueue({
602
938
  type: "tool-call",
603
- toolCallType: "function",
604
939
  toolCallId: toolCall.toolCallId,
605
940
  toolName: toolCall.toolName,
606
- args: toolCall.args
941
+ input: toolCall.args
607
942
  });
608
943
  hasToolCalls = true;
609
944
  }
@@ -614,22 +949,31 @@ var GoogleGenerativeAILanguageModel = class {
614
949
  finishReason: candidate.finishReason,
615
950
  hasToolCalls
616
951
  });
617
- const sources = (_d = extractSources({
618
- groundingMetadata: candidate.groundingMetadata,
619
- generateId
620
- })) != null ? _d : [];
621
- for (const source of sources) {
622
- controller.enqueue(source);
623
- }
624
952
  providerMetadata = {
625
953
  google: {
626
- groundingMetadata: (_e = candidate.groundingMetadata) != null ? _e : null,
627
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
954
+ groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
955
+ urlContextMetadata: (_j = candidate.urlContextMetadata) != null ? _j : null,
956
+ safetyRatings: (_k = candidate.safetyRatings) != null ? _k : null
628
957
  }
629
958
  };
959
+ if (usageMetadata != null) {
960
+ providerMetadata.google.usageMetadata = usageMetadata;
961
+ }
630
962
  }
631
963
  },
632
964
  flush(controller) {
965
+ if (currentTextBlockId !== null) {
966
+ controller.enqueue({
967
+ type: "text-end",
968
+ id: currentTextBlockId
969
+ });
970
+ }
971
+ if (currentReasoningBlockId !== null) {
972
+ controller.enqueue({
973
+ type: "reasoning-end",
974
+ id: currentReasoningBlockId
975
+ });
976
+ }
633
977
  controller.enqueue({
634
978
  type: "finish",
635
979
  finishReason,
@@ -640,33 +984,24 @@ var GoogleGenerativeAILanguageModel = class {
640
984
  })
641
985
  ),
642
986
  response: { headers: responseHeaders },
643
- warnings,
644
987
  request: { body }
645
988
  };
646
989
  }
647
990
  };
648
991
  function getToolCallsFromParts({
649
992
  parts,
650
- generateId
993
+ generateId: generateId2
651
994
  }) {
652
995
  const functionCallParts = parts == null ? void 0 : parts.filter(
653
996
  (part) => "functionCall" in part
654
997
  );
655
998
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
656
999
  type: "tool-call",
657
- toolCallType: "function",
658
- toolCallId: generateId(),
1000
+ toolCallId: generateId2(),
659
1001
  toolName: part.functionCall.name,
660
1002
  args: JSON.stringify(part.functionCall.args)
661
1003
  }));
662
1004
  }
663
- function getTextFromParts(parts) {
664
- const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
665
- return textParts == null || textParts.length === 0 ? void 0 : {
666
- type: "text",
667
- text: textParts.map((part) => part.text).join("")
668
- };
669
- }
670
1005
  function getInlineDataParts(parts) {
671
1006
  return parts == null ? void 0 : parts.filter(
672
1007
  (part) => "inlineData" in part
@@ -674,7 +1009,7 @@ function getInlineDataParts(parts) {
674
1009
  }
675
1010
  function extractSources({
676
1011
  groundingMetadata,
677
- generateId
1012
+ generateId: generateId2
678
1013
  }) {
679
1014
  var _a;
680
1015
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
@@ -682,108 +1017,126 @@ function extractSources({
682
1017
  ).map((chunk) => ({
683
1018
  type: "source",
684
1019
  sourceType: "url",
685
- id: generateId(),
1020
+ id: generateId2(),
686
1021
  url: chunk.web.uri,
687
1022
  title: chunk.web.title
688
1023
  }));
689
1024
  }
690
- var contentSchema = import_zod2.z.object({
691
- role: import_zod2.z.string(),
692
- parts: import_zod2.z.array(
693
- import_zod2.z.union([
694
- import_zod2.z.object({
695
- text: import_zod2.z.string()
696
- }),
697
- import_zod2.z.object({
698
- functionCall: import_zod2.z.object({
699
- name: import_zod2.z.string(),
700
- args: import_zod2.z.unknown()
1025
+ var contentSchema = import_v45.z.object({
1026
+ parts: import_v45.z.array(
1027
+ import_v45.z.union([
1028
+ // note: order matters since text can be fully empty
1029
+ import_v45.z.object({
1030
+ functionCall: import_v45.z.object({
1031
+ name: import_v45.z.string(),
1032
+ args: import_v45.z.unknown()
701
1033
  })
702
1034
  }),
703
- import_zod2.z.object({
704
- inlineData: import_zod2.z.object({
705
- mimeType: import_zod2.z.string(),
706
- data: import_zod2.z.string()
1035
+ import_v45.z.object({
1036
+ inlineData: import_v45.z.object({
1037
+ mimeType: import_v45.z.string(),
1038
+ data: import_v45.z.string()
707
1039
  })
1040
+ }),
1041
+ import_v45.z.object({
1042
+ executableCode: import_v45.z.object({
1043
+ language: import_v45.z.string(),
1044
+ code: import_v45.z.string()
1045
+ }).nullish(),
1046
+ codeExecutionResult: import_v45.z.object({
1047
+ outcome: import_v45.z.string(),
1048
+ output: import_v45.z.string()
1049
+ }).nullish(),
1050
+ text: import_v45.z.string().nullish(),
1051
+ thought: import_v45.z.boolean().nullish()
708
1052
  })
709
1053
  ])
710
1054
  ).nullish()
711
1055
  });
712
- var groundingChunkSchema = import_zod2.z.object({
713
- web: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish(),
714
- retrievedContext: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish()
1056
+ var safetyRatingSchema = import_v45.z.object({
1057
+ category: import_v45.z.string().nullish(),
1058
+ probability: import_v45.z.string().nullish(),
1059
+ probabilityScore: import_v45.z.number().nullish(),
1060
+ severity: import_v45.z.string().nullish(),
1061
+ severityScore: import_v45.z.number().nullish(),
1062
+ blocked: import_v45.z.boolean().nullish()
715
1063
  });
716
- var groundingMetadataSchema = import_zod2.z.object({
717
- webSearchQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
718
- retrievalQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
719
- searchEntryPoint: import_zod2.z.object({ renderedContent: import_zod2.z.string() }).nullish(),
720
- groundingChunks: import_zod2.z.array(groundingChunkSchema).nullish(),
721
- groundingSupports: import_zod2.z.array(
722
- import_zod2.z.object({
723
- segment: import_zod2.z.object({
724
- startIndex: import_zod2.z.number().nullish(),
725
- endIndex: import_zod2.z.number().nullish(),
726
- text: import_zod2.z.string().nullish()
727
- }),
728
- segment_text: import_zod2.z.string().nullish(),
729
- groundingChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
730
- supportChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
731
- confidenceScores: import_zod2.z.array(import_zod2.z.number()).nullish(),
732
- confidenceScore: import_zod2.z.array(import_zod2.z.number()).nullish()
733
- })
734
- ).nullish(),
735
- retrievalMetadata: import_zod2.z.union([
736
- import_zod2.z.object({
737
- webDynamicRetrievalScore: import_zod2.z.number()
738
- }),
739
- import_zod2.z.object({})
740
- ]).nullish()
741
- });
742
- var safetyRatingSchema = import_zod2.z.object({
743
- category: import_zod2.z.string(),
744
- probability: import_zod2.z.string(),
745
- probabilityScore: import_zod2.z.number().nullish(),
746
- severity: import_zod2.z.string().nullish(),
747
- severityScore: import_zod2.z.number().nullish(),
748
- blocked: import_zod2.z.boolean().nullish()
1064
+ var usageSchema = import_v45.z.object({
1065
+ cachedContentTokenCount: import_v45.z.number().nullish(),
1066
+ thoughtsTokenCount: import_v45.z.number().nullish(),
1067
+ promptTokenCount: import_v45.z.number().nullish(),
1068
+ candidatesTokenCount: import_v45.z.number().nullish(),
1069
+ totalTokenCount: import_v45.z.number().nullish()
749
1070
  });
750
- var responseSchema = import_zod2.z.object({
751
- candidates: import_zod2.z.array(
752
- import_zod2.z.object({
753
- content: contentSchema.nullish().or(import_zod2.z.object({}).strict()),
754
- finishReason: import_zod2.z.string().nullish(),
755
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
756
- groundingMetadata: groundingMetadataSchema.nullish()
1071
+ var responseSchema = import_v45.z.object({
1072
+ candidates: import_v45.z.array(
1073
+ import_v45.z.object({
1074
+ content: contentSchema.nullish().or(import_v45.z.object({}).strict()),
1075
+ finishReason: import_v45.z.string().nullish(),
1076
+ safetyRatings: import_v45.z.array(safetyRatingSchema).nullish(),
1077
+ groundingMetadata: groundingMetadataSchema.nullish(),
1078
+ urlContextMetadata: urlContextMetadataSchema.nullish()
757
1079
  })
758
1080
  ),
759
- usageMetadata: import_zod2.z.object({
760
- promptTokenCount: import_zod2.z.number().nullish(),
761
- candidatesTokenCount: import_zod2.z.number().nullish(),
762
- totalTokenCount: import_zod2.z.number().nullish()
763
- }).nullish()
1081
+ usageMetadata: usageSchema.nullish()
764
1082
  });
765
- var chunkSchema = import_zod2.z.object({
766
- candidates: import_zod2.z.array(
767
- import_zod2.z.object({
1083
+ var chunkSchema = import_v45.z.object({
1084
+ candidates: import_v45.z.array(
1085
+ import_v45.z.object({
768
1086
  content: contentSchema.nullish(),
769
- finishReason: import_zod2.z.string().nullish(),
770
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
771
- groundingMetadata: groundingMetadataSchema.nullish()
1087
+ finishReason: import_v45.z.string().nullish(),
1088
+ safetyRatings: import_v45.z.array(safetyRatingSchema).nullish(),
1089
+ groundingMetadata: groundingMetadataSchema.nullish(),
1090
+ urlContextMetadata: urlContextMetadataSchema.nullish()
772
1091
  })
773
1092
  ).nullish(),
774
- usageMetadata: import_zod2.z.object({
775
- promptTokenCount: import_zod2.z.number().nullish(),
776
- candidatesTokenCount: import_zod2.z.number().nullish(),
777
- totalTokenCount: import_zod2.z.number().nullish()
778
- }).nullish()
1093
+ usageMetadata: usageSchema.nullish()
779
1094
  });
780
- var googleGenerativeAIProviderOptionsSchema = import_zod2.z.object({
781
- responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).nullish()
1095
+
1096
+ // src/tool/code-execution.ts
1097
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
1098
+ var import_v46 = require("zod/v4");
1099
+ var codeExecution = (0, import_provider_utils6.createProviderDefinedToolFactoryWithOutputSchema)({
1100
+ id: "google.code_execution",
1101
+ name: "code_execution",
1102
+ inputSchema: import_v46.z.object({
1103
+ language: import_v46.z.string().describe("The programming language of the code."),
1104
+ code: import_v46.z.string().describe("The code to be executed.")
1105
+ }),
1106
+ outputSchema: import_v46.z.object({
1107
+ outcome: import_v46.z.string().describe('The outcome of the execution (e.g., "OUTCOME_OK").'),
1108
+ output: import_v46.z.string().describe("The output from the code execution.")
1109
+ })
782
1110
  });
1111
+
1112
+ // src/google-tools.ts
1113
+ var googleTools = {
1114
+ /**
1115
+ * Creates a Google search tool that gives Google direct access to real-time web content.
1116
+ * Must have name "google_search".
1117
+ */
1118
+ googleSearch,
1119
+ /**
1120
+ * Creates a URL context tool that gives Google direct access to real-time web content.
1121
+ * Must have name "url_context".
1122
+ */
1123
+ urlContext,
1124
+ /**
1125
+ * A tool that enables the model to generate and run Python code.
1126
+ * Must have name "code_execution".
1127
+ *
1128
+ * @note Ensure the selected model supports Code Execution.
1129
+ * Multi-tool usage with the code execution tool is typically compatible with Gemini >=2 models.
1130
+ *
1131
+ * @see https://ai.google.dev/gemini-api/docs/code-execution (Google AI)
1132
+ * @see https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/code-execution-api (Vertex AI)
1133
+ */
1134
+ codeExecution
1135
+ };
783
1136
  // Annotate the CommonJS export names for ESM import in node:
784
1137
  0 && (module.exports = {
785
1138
  GoogleGenerativeAILanguageModel,
786
- groundingMetadataSchema,
1139
+ googleTools,
787
1140
  safetyRatingSchema
788
1141
  });
789
1142
  //# sourceMappingURL=index.js.map