@ai-sdk/google 2.0.0-alpha.9 → 2.0.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,14 +21,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
21
21
  var internal_exports = {};
22
22
  __export(internal_exports, {
23
23
  GoogleGenerativeAILanguageModel: () => GoogleGenerativeAILanguageModel,
24
- groundingMetadataSchema: () => groundingMetadataSchema,
24
+ googleTools: () => googleTools,
25
25
  safetyRatingSchema: () => safetyRatingSchema
26
26
  });
27
27
  module.exports = __toCommonJS(internal_exports);
28
28
 
29
29
  // src/google-generative-ai-language-model.ts
30
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
31
- var import_zod3 = require("zod");
30
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
31
+ var import_v45 = require("zod/v4");
32
32
 
33
33
  // src/convert-json-schema-to-openapi-schema.ts
34
34
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
@@ -124,16 +124,18 @@ function convertJSONSchemaToOpenAPISchema(jsonSchema) {
124
124
  return result;
125
125
  }
126
126
  function isEmptyObjectSchema(jsonSchema) {
127
- return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0);
127
+ return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0) && !jsonSchema.additionalProperties;
128
128
  }
129
129
 
130
130
  // src/convert-to-google-generative-ai-messages.ts
131
131
  var import_provider = require("@ai-sdk/provider");
132
132
  var import_provider_utils = require("@ai-sdk/provider-utils");
133
- function convertToGoogleGenerativeAIMessages(prompt) {
133
+ function convertToGoogleGenerativeAIMessages(prompt, options) {
134
+ var _a;
134
135
  const systemInstructionParts = [];
135
136
  const contents = [];
136
137
  let systemMessagesAllowed = true;
138
+ const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
137
139
  for (const { role, content } of prompt) {
138
140
  switch (role) {
139
141
  case "system": {
@@ -207,7 +209,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
207
209
  return {
208
210
  functionCall: {
209
211
  name: part.toolName,
210
- args: part.args
212
+ args: part.input
211
213
  }
212
214
  };
213
215
  }
@@ -225,7 +227,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
225
227
  name: part.toolName,
226
228
  response: {
227
229
  name: part.toolName,
228
- content: part.result
230
+ content: part.output.value
229
231
  }
230
232
  }
231
233
  }))
@@ -234,8 +236,12 @@ function convertToGoogleGenerativeAIMessages(prompt) {
234
236
  }
235
237
  }
236
238
  }
239
+ if (isGemmaModel && systemInstructionParts.length > 0 && contents.length > 0 && contents[0].role === "user") {
240
+ const systemText = systemInstructionParts.map((part) => part.text).join("\n\n");
241
+ contents[0].parts.unshift({ text: systemText + "\n\n" });
242
+ }
237
243
  return {
238
- systemInstruction: systemInstructionParts.length > 0 ? { parts: systemInstructionParts } : void 0,
244
+ systemInstruction: systemInstructionParts.length > 0 && !isGemmaModel ? { parts: systemInstructionParts } : void 0,
239
245
  contents
240
246
  };
241
247
  }
@@ -247,12 +253,12 @@ function getModelPath(modelId) {
247
253
 
248
254
  // src/google-error.ts
249
255
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
250
- var import_zod = require("zod");
251
- var googleErrorDataSchema = import_zod.z.object({
252
- error: import_zod.z.object({
253
- code: import_zod.z.number().nullable(),
254
- message: import_zod.z.string(),
255
- status: import_zod.z.string()
256
+ var import_v4 = require("zod/v4");
257
+ var googleErrorDataSchema = import_v4.z.object({
258
+ error: import_v4.z.object({
259
+ code: import_v4.z.number().nullable(),
260
+ message: import_v4.z.string(),
261
+ status: import_v4.z.string()
256
262
  })
257
263
  });
258
264
  var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -261,29 +267,19 @@ var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
261
267
  });
262
268
 
263
269
  // src/google-generative-ai-options.ts
264
- var import_zod2 = require("zod");
265
- var dynamicRetrievalConfig = import_zod2.z.object({
266
- /**
267
- * The mode of the predictor to be used in dynamic retrieval.
268
- */
269
- mode: import_zod2.z.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
270
- /**
271
- * The threshold to be used in dynamic retrieval. If not set, a system default
272
- * value is used.
273
- */
274
- dynamicThreshold: import_zod2.z.number().optional()
275
- });
276
- var googleGenerativeAIProviderOptions = import_zod2.z.object({
277
- responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).optional(),
278
- thinkingConfig: import_zod2.z.object({
279
- thinkingBudget: import_zod2.z.number().optional()
270
+ var import_v42 = require("zod/v4");
271
+ var googleGenerativeAIProviderOptions = import_v42.z.object({
272
+ responseModalities: import_v42.z.array(import_v42.z.enum(["TEXT", "IMAGE"])).optional(),
273
+ thinkingConfig: import_v42.z.object({
274
+ thinkingBudget: import_v42.z.number().optional(),
275
+ includeThoughts: import_v42.z.boolean().optional()
280
276
  }).optional(),
281
277
  /**
282
278
  Optional.
283
279
  The name of the cached content used as context to serve the prediction.
284
280
  Format: cachedContents/{cachedContent}
285
281
  */
286
- cachedContent: import_zod2.z.string().optional(),
282
+ cachedContent: import_v42.z.string().optional(),
287
283
  /**
288
284
  * Optional. Enable structured output. Default is true.
289
285
  *
@@ -292,13 +288,13 @@ var googleGenerativeAIProviderOptions = import_zod2.z.object({
292
288
  * Google Generative AI uses. You can use this to disable
293
289
  * structured outputs if you need to.
294
290
  */
295
- structuredOutputs: import_zod2.z.boolean().optional(),
291
+ structuredOutputs: import_v42.z.boolean().optional(),
296
292
  /**
297
293
  Optional. A list of unique safety settings for blocking unsafe content.
298
294
  */
299
- safetySettings: import_zod2.z.array(
300
- import_zod2.z.object({
301
- category: import_zod2.z.enum([
295
+ safetySettings: import_v42.z.array(
296
+ import_v42.z.object({
297
+ category: import_v42.z.enum([
302
298
  "HARM_CATEGORY_UNSPECIFIED",
303
299
  "HARM_CATEGORY_HATE_SPEECH",
304
300
  "HARM_CATEGORY_DANGEROUS_CONTENT",
@@ -306,7 +302,7 @@ var googleGenerativeAIProviderOptions = import_zod2.z.object({
306
302
  "HARM_CATEGORY_SEXUALLY_EXPLICIT",
307
303
  "HARM_CATEGORY_CIVIC_INTEGRITY"
308
304
  ]),
309
- threshold: import_zod2.z.enum([
305
+ threshold: import_v42.z.enum([
310
306
  "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
311
307
  "BLOCK_LOW_AND_ABOVE",
312
308
  "BLOCK_MEDIUM_AND_ABOVE",
@@ -316,7 +312,7 @@ var googleGenerativeAIProviderOptions = import_zod2.z.object({
316
312
  ])
317
313
  })
318
314
  ).optional(),
319
- threshold: import_zod2.z.enum([
315
+ threshold: import_v42.z.enum([
320
316
  "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
321
317
  "BLOCK_LOW_AND_ABOVE",
322
318
  "BLOCK_MEDIUM_AND_ABOVE",
@@ -329,21 +325,7 @@ var googleGenerativeAIProviderOptions = import_zod2.z.object({
329
325
  *
330
326
  * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
331
327
  */
332
- audioTimestamp: import_zod2.z.boolean().optional(),
333
- /**
334
- Optional. When enabled, the model will use Google search to ground the response.
335
-
336
- @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
337
- */
338
- useSearchGrounding: import_zod2.z.boolean().optional(),
339
- /**
340
- Optional. Specifies the dynamic retrieval configuration.
341
-
342
- @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
343
-
344
- @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
345
- */
346
- dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
328
+ audioTimestamp: import_v42.z.boolean().optional()
347
329
  });
348
330
 
349
331
  // src/google-prepare-tools.ts
@@ -351,8 +333,6 @@ var import_provider2 = require("@ai-sdk/provider");
351
333
  function prepareTools({
352
334
  tools,
353
335
  toolChoice,
354
- useSearchGrounding,
355
- dynamicRetrievalConfig: dynamicRetrievalConfig2,
356
336
  modelId
357
337
  }) {
358
338
  var _a;
@@ -360,28 +340,76 @@ function prepareTools({
360
340
  const toolWarnings = [];
361
341
  const isGemini2 = modelId.includes("gemini-2");
362
342
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
363
- if (useSearchGrounding) {
343
+ if (tools == null) {
344
+ return { tools: void 0, toolConfig: void 0, toolWarnings };
345
+ }
346
+ const hasFunctionTools = tools.some((tool) => tool.type === "function");
347
+ const hasProviderDefinedTools = tools.some(
348
+ (tool) => tool.type === "provider-defined"
349
+ );
350
+ if (hasFunctionTools && hasProviderDefinedTools) {
351
+ toolWarnings.push({
352
+ type: "unsupported-tool",
353
+ tool: tools.find((tool) => tool.type === "function"),
354
+ details: "Cannot mix function tools with provider-defined tools in the same request. Please use either function tools or provider-defined tools, but not both."
355
+ });
356
+ }
357
+ if (hasProviderDefinedTools) {
358
+ const googleTools2 = {};
359
+ const providerDefinedTools = tools.filter(
360
+ (tool) => tool.type === "provider-defined"
361
+ );
362
+ providerDefinedTools.forEach((tool) => {
363
+ switch (tool.id) {
364
+ case "google.google_search":
365
+ if (isGemini2) {
366
+ googleTools2.googleSearch = {};
367
+ } else if (supportsDynamicRetrieval) {
368
+ googleTools2.googleSearchRetrieval = {
369
+ dynamicRetrievalConfig: {
370
+ mode: tool.args.mode,
371
+ dynamicThreshold: tool.args.dynamicThreshold
372
+ }
373
+ };
374
+ } else {
375
+ googleTools2.googleSearchRetrieval = {};
376
+ }
377
+ break;
378
+ case "google.url_context":
379
+ if (isGemini2) {
380
+ googleTools2.urlContext = {};
381
+ } else {
382
+ toolWarnings.push({
383
+ type: "unsupported-tool",
384
+ tool,
385
+ details: "The URL context tool is not supported with other Gemini models than Gemini 2."
386
+ });
387
+ }
388
+ break;
389
+ default:
390
+ toolWarnings.push({ type: "unsupported-tool", tool });
391
+ break;
392
+ }
393
+ });
364
394
  return {
365
- tools: isGemini2 ? { googleSearch: {} } : {
366
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
367
- },
395
+ tools: Object.keys(googleTools2).length > 0 ? googleTools2 : void 0,
368
396
  toolConfig: void 0,
369
397
  toolWarnings
370
398
  };
371
399
  }
372
- if (tools == null) {
373
- return { tools: void 0, toolConfig: void 0, toolWarnings };
374
- }
375
400
  const functionDeclarations = [];
376
401
  for (const tool of tools) {
377
- if (tool.type === "provider-defined") {
378
- toolWarnings.push({ type: "unsupported-tool", tool });
379
- } else {
380
- functionDeclarations.push({
381
- name: tool.name,
382
- description: (_a = tool.description) != null ? _a : "",
383
- parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
384
- });
402
+ switch (tool.type) {
403
+ case "function":
404
+ functionDeclarations.push({
405
+ name: tool.name,
406
+ description: (_a = tool.description) != null ? _a : "",
407
+ parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
408
+ });
409
+ break;
410
+ default:
411
+ toolWarnings.push({ type: "unsupported-tool", tool });
412
+ break;
385
413
  }
386
414
  }
387
415
  if (toolChoice == null) {
@@ -458,12 +486,72 @@ function mapGoogleGenerativeAIFinishReason({
458
486
  }
459
487
  }
460
488
 
489
+ // src/tool/google-search.ts
490
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
491
+ var import_v43 = require("zod/v4");
492
+ var groundingChunkSchema = import_v43.z.object({
493
+ web: import_v43.z.object({ uri: import_v43.z.string(), title: import_v43.z.string() }).nullish(),
494
+ retrievedContext: import_v43.z.object({ uri: import_v43.z.string(), title: import_v43.z.string() }).nullish()
495
+ });
496
+ var groundingMetadataSchema = import_v43.z.object({
497
+ webSearchQueries: import_v43.z.array(import_v43.z.string()).nullish(),
498
+ retrievalQueries: import_v43.z.array(import_v43.z.string()).nullish(),
499
+ searchEntryPoint: import_v43.z.object({ renderedContent: import_v43.z.string() }).nullish(),
500
+ groundingChunks: import_v43.z.array(groundingChunkSchema).nullish(),
501
+ groundingSupports: import_v43.z.array(
502
+ import_v43.z.object({
503
+ segment: import_v43.z.object({
504
+ startIndex: import_v43.z.number().nullish(),
505
+ endIndex: import_v43.z.number().nullish(),
506
+ text: import_v43.z.string().nullish()
507
+ }),
508
+ segment_text: import_v43.z.string().nullish(),
509
+ groundingChunkIndices: import_v43.z.array(import_v43.z.number()).nullish(),
510
+ supportChunkIndices: import_v43.z.array(import_v43.z.number()).nullish(),
511
+ confidenceScores: import_v43.z.array(import_v43.z.number()).nullish(),
512
+ confidenceScore: import_v43.z.array(import_v43.z.number()).nullish()
513
+ })
514
+ ).nullish(),
515
+ retrievalMetadata: import_v43.z.union([
516
+ import_v43.z.object({
517
+ webDynamicRetrievalScore: import_v43.z.number()
518
+ }),
519
+ import_v43.z.object({})
520
+ ]).nullish()
521
+ });
522
+ var googleSearch = (0, import_provider_utils3.createProviderDefinedToolFactory)({
523
+ id: "google.google_search",
524
+ name: "google_search",
525
+ inputSchema: import_v43.z.object({
526
+ mode: import_v43.z.enum(["MODE_DYNAMIC", "MODE_UNSPECIFIED"]).default("MODE_UNSPECIFIED"),
527
+ dynamicThreshold: import_v43.z.number().default(1)
528
+ })
529
+ });
530
+
531
+ // src/tool/url-context.ts
532
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
533
+ var import_v44 = require("zod/v4");
534
+ var urlMetadataSchema = import_v44.z.object({
535
+ retrievedUrl: import_v44.z.string(),
536
+ urlRetrievalStatus: import_v44.z.string()
537
+ });
538
+ var urlContextMetadataSchema = import_v44.z.object({
539
+ urlMetadata: import_v44.z.array(urlMetadataSchema)
540
+ });
541
+ var urlContext = (0, import_provider_utils4.createProviderDefinedToolFactory)({
542
+ id: "google.url_context",
543
+ name: "url_context",
544
+ inputSchema: import_v44.z.object({})
545
+ });
546
+
461
547
  // src/google-generative-ai-language-model.ts
462
548
  var GoogleGenerativeAILanguageModel = class {
463
549
  constructor(modelId, config) {
464
550
  this.specificationVersion = "v2";
551
+ var _a;
465
552
  this.modelId = modelId;
466
553
  this.config = config;
554
+ this.generateId = (_a = config.generateId) != null ? _a : import_provider_utils5.generateId;
467
555
  }
468
556
  get provider() {
469
557
  return this.config.provider;
@@ -489,21 +577,29 @@ var GoogleGenerativeAILanguageModel = class {
489
577
  }) {
490
578
  var _a, _b;
491
579
  const warnings = [];
492
- const googleOptions = await (0, import_provider_utils3.parseProviderOptions)({
580
+ const googleOptions = await (0, import_provider_utils5.parseProviderOptions)({
493
581
  provider: "google",
494
582
  providerOptions,
495
583
  schema: googleGenerativeAIProviderOptions
496
584
  });
497
- const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
585
+ if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
586
+ warnings.push({
587
+ type: "other",
588
+ message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
589
+ });
590
+ }
591
+ const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
592
+ const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
593
+ prompt,
594
+ { isGemmaModel }
595
+ );
498
596
  const {
499
- tools: googleTools,
597
+ tools: googleTools2,
500
598
  toolConfig: googleToolConfig,
501
599
  toolWarnings
502
600
  } = prepareTools({
503
601
  tools,
504
602
  toolChoice,
505
- useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
506
- dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
507
603
  modelId: this.modelId
508
604
  });
509
605
  return {
@@ -532,9 +628,9 @@ var GoogleGenerativeAILanguageModel = class {
532
628
  thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
533
629
  },
534
630
  contents,
535
- systemInstruction,
631
+ systemInstruction: isGemmaModel ? void 0 : systemInstruction,
536
632
  safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
537
- tools: googleTools,
633
+ tools: googleTools2,
538
634
  toolConfig: googleToolConfig,
539
635
  cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
540
636
  },
@@ -542,41 +638,45 @@ var GoogleGenerativeAILanguageModel = class {
542
638
  };
543
639
  }
544
640
  async doGenerate(options) {
545
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
641
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
546
642
  const { args, warnings } = await this.getArgs(options);
547
643
  const body = JSON.stringify(args);
548
- const mergedHeaders = (0, import_provider_utils3.combineHeaders)(
549
- await (0, import_provider_utils3.resolve)(this.config.headers),
644
+ const mergedHeaders = (0, import_provider_utils5.combineHeaders)(
645
+ await (0, import_provider_utils5.resolve)(this.config.headers),
550
646
  options.headers
551
647
  );
552
648
  const {
553
649
  responseHeaders,
554
650
  value: response,
555
651
  rawValue: rawResponse
556
- } = await (0, import_provider_utils3.postJsonToApi)({
652
+ } = await (0, import_provider_utils5.postJsonToApi)({
557
653
  url: `${this.config.baseURL}/${getModelPath(
558
654
  this.modelId
559
655
  )}:generateContent`,
560
656
  headers: mergedHeaders,
561
657
  body: args,
562
658
  failedResponseHandler: googleFailedResponseHandler,
563
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(responseSchema),
659
+ successfulResponseHandler: (0, import_provider_utils5.createJsonResponseHandler)(responseSchema),
564
660
  abortSignal: options.abortSignal,
565
661
  fetch: this.config.fetch
566
662
  });
567
663
  const candidate = response.candidates[0];
568
664
  const content = [];
569
665
  const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
666
+ const usageMetadata = response.usageMetadata;
570
667
  for (const part of parts) {
571
- if ("text" in part && part.text.length > 0) {
572
- content.push({ type: "text", text: part.text });
668
+ if ("text" in part && part.text != null && part.text.length > 0) {
669
+ if (part.thought === true) {
670
+ content.push({ type: "reasoning", text: part.text });
671
+ } else {
672
+ content.push({ type: "text", text: part.text });
673
+ }
573
674
  } else if ("functionCall" in part) {
574
675
  content.push({
575
676
  type: "tool-call",
576
- toolCallType: "function",
577
677
  toolCallId: this.config.generateId(),
578
678
  toolName: part.functionCall.name,
579
- args: JSON.stringify(part.functionCall.args)
679
+ input: JSON.stringify(part.functionCall.args)
580
680
  });
581
681
  } else if ("inlineData" in part) {
582
682
  content.push({
@@ -593,7 +693,6 @@ var GoogleGenerativeAILanguageModel = class {
593
693
  for (const source of sources) {
594
694
  content.push(source);
595
695
  }
596
- const usageMetadata = response.usageMetadata;
597
696
  return {
598
697
  content,
599
698
  finishReason: mapGoogleGenerativeAIFinishReason({
@@ -611,7 +710,9 @@ var GoogleGenerativeAILanguageModel = class {
611
710
  providerMetadata: {
612
711
  google: {
613
712
  groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
614
- safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
713
+ urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
714
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
715
+ usageMetadata: usageMetadata != null ? usageMetadata : null
615
716
  }
616
717
  },
617
718
  request: { body },
@@ -625,18 +726,18 @@ var GoogleGenerativeAILanguageModel = class {
625
726
  async doStream(options) {
626
727
  const { args, warnings } = await this.getArgs(options);
627
728
  const body = JSON.stringify(args);
628
- const headers = (0, import_provider_utils3.combineHeaders)(
629
- await (0, import_provider_utils3.resolve)(this.config.headers),
729
+ const headers = (0, import_provider_utils5.combineHeaders)(
730
+ await (0, import_provider_utils5.resolve)(this.config.headers),
630
731
  options.headers
631
732
  );
632
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
733
+ const { responseHeaders, value: response } = await (0, import_provider_utils5.postJsonToApi)({
633
734
  url: `${this.config.baseURL}/${getModelPath(
634
735
  this.modelId
635
736
  )}:streamGenerateContent?alt=sse`,
636
737
  headers,
637
738
  body: args,
638
739
  failedResponseHandler: googleFailedResponseHandler,
639
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(chunkSchema),
740
+ successfulResponseHandler: (0, import_provider_utils5.createEventSourceResponseHandler)(chunkSchema),
640
741
  abortSignal: options.abortSignal,
641
742
  fetch: this.config.fetch
642
743
  });
@@ -647,8 +748,12 @@ var GoogleGenerativeAILanguageModel = class {
647
748
  totalTokens: void 0
648
749
  };
649
750
  let providerMetadata = void 0;
650
- const generateId = this.config.generateId;
751
+ const generateId2 = this.config.generateId;
651
752
  let hasToolCalls = false;
753
+ let currentTextBlockId = null;
754
+ let currentReasoningBlockId = null;
755
+ let blockCounter = 0;
756
+ const emittedSourceUrls = /* @__PURE__ */ new Set();
652
757
  return {
653
758
  stream: response.pipeThrough(
654
759
  new TransformStream({
@@ -656,7 +761,10 @@ var GoogleGenerativeAILanguageModel = class {
656
761
  controller.enqueue({ type: "stream-start", warnings });
657
762
  },
658
763
  transform(chunk, controller) {
659
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
764
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
765
+ if (options.includeRawChunks) {
766
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
767
+ }
660
768
  if (!chunk.success) {
661
769
  controller.enqueue({ type: "error", error: chunk.error });
662
770
  return;
@@ -675,10 +783,64 @@ var GoogleGenerativeAILanguageModel = class {
675
783
  return;
676
784
  }
677
785
  const content = candidate.content;
786
+ const sources = extractSources({
787
+ groundingMetadata: candidate.groundingMetadata,
788
+ generateId: generateId2
789
+ });
790
+ if (sources != null) {
791
+ for (const source of sources) {
792
+ if (source.sourceType === "url" && !emittedSourceUrls.has(source.url)) {
793
+ emittedSourceUrls.add(source.url);
794
+ controller.enqueue(source);
795
+ }
796
+ }
797
+ }
678
798
  if (content != null) {
679
- const deltaText = getTextFromParts(content.parts);
680
- if (deltaText != null) {
681
- controller.enqueue(deltaText);
799
+ const parts = (_g = content.parts) != null ? _g : [];
800
+ for (const part of parts) {
801
+ if ("text" in part && part.text != null && part.text.length > 0) {
802
+ if (part.thought === true) {
803
+ if (currentTextBlockId !== null) {
804
+ controller.enqueue({
805
+ type: "text-end",
806
+ id: currentTextBlockId
807
+ });
808
+ currentTextBlockId = null;
809
+ }
810
+ if (currentReasoningBlockId === null) {
811
+ currentReasoningBlockId = String(blockCounter++);
812
+ controller.enqueue({
813
+ type: "reasoning-start",
814
+ id: currentReasoningBlockId
815
+ });
816
+ }
817
+ controller.enqueue({
818
+ type: "reasoning-delta",
819
+ id: currentReasoningBlockId,
820
+ delta: part.text
821
+ });
822
+ } else {
823
+ if (currentReasoningBlockId !== null) {
824
+ controller.enqueue({
825
+ type: "reasoning-end",
826
+ id: currentReasoningBlockId
827
+ });
828
+ currentReasoningBlockId = null;
829
+ }
830
+ if (currentTextBlockId === null) {
831
+ currentTextBlockId = String(blockCounter++);
832
+ controller.enqueue({
833
+ type: "text-start",
834
+ id: currentTextBlockId
835
+ });
836
+ }
837
+ controller.enqueue({
838
+ type: "text-delta",
839
+ id: currentTextBlockId,
840
+ delta: part.text
841
+ });
842
+ }
843
+ }
682
844
  }
683
845
  const inlineDataParts = getInlineDataParts(content.parts);
684
846
  if (inlineDataParts != null) {
@@ -692,23 +854,29 @@ var GoogleGenerativeAILanguageModel = class {
692
854
  }
693
855
  const toolCallDeltas = getToolCallsFromParts({
694
856
  parts: content.parts,
695
- generateId
857
+ generateId: generateId2
696
858
  });
697
859
  if (toolCallDeltas != null) {
698
860
  for (const toolCall of toolCallDeltas) {
699
861
  controller.enqueue({
700
- type: "tool-call-delta",
701
- toolCallType: "function",
702
- toolCallId: toolCall.toolCallId,
703
- toolName: toolCall.toolName,
704
- argsTextDelta: toolCall.args
862
+ type: "tool-input-start",
863
+ id: toolCall.toolCallId,
864
+ toolName: toolCall.toolName
865
+ });
866
+ controller.enqueue({
867
+ type: "tool-input-delta",
868
+ id: toolCall.toolCallId,
869
+ delta: toolCall.args
870
+ });
871
+ controller.enqueue({
872
+ type: "tool-input-end",
873
+ id: toolCall.toolCallId
705
874
  });
706
875
  controller.enqueue({
707
876
  type: "tool-call",
708
- toolCallType: "function",
709
877
  toolCallId: toolCall.toolCallId,
710
878
  toolName: toolCall.toolName,
711
- args: toolCall.args
879
+ input: toolCall.args
712
880
  });
713
881
  hasToolCalls = true;
714
882
  }
@@ -719,22 +887,31 @@ var GoogleGenerativeAILanguageModel = class {
719
887
  finishReason: candidate.finishReason,
720
888
  hasToolCalls
721
889
  });
722
- const sources = (_g = extractSources({
723
- groundingMetadata: candidate.groundingMetadata,
724
- generateId
725
- })) != null ? _g : [];
726
- for (const source of sources) {
727
- controller.enqueue(source);
728
- }
729
890
  providerMetadata = {
730
891
  google: {
731
892
  groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
732
- safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
893
+ urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
894
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
733
895
  }
734
896
  };
897
+ if (usageMetadata != null) {
898
+ providerMetadata.google.usageMetadata = usageMetadata;
899
+ }
735
900
  }
736
901
  },
737
902
  flush(controller) {
903
+ if (currentTextBlockId !== null) {
904
+ controller.enqueue({
905
+ type: "text-end",
906
+ id: currentTextBlockId
907
+ });
908
+ }
909
+ if (currentReasoningBlockId !== null) {
910
+ controller.enqueue({
911
+ type: "reasoning-end",
912
+ id: currentReasoningBlockId
913
+ });
914
+ }
738
915
  controller.enqueue({
739
916
  type: "finish",
740
917
  finishReason,
@@ -751,26 +928,18 @@ var GoogleGenerativeAILanguageModel = class {
751
928
  };
752
929
  function getToolCallsFromParts({
753
930
  parts,
754
- generateId
931
+ generateId: generateId2
755
932
  }) {
756
933
  const functionCallParts = parts == null ? void 0 : parts.filter(
757
934
  (part) => "functionCall" in part
758
935
  );
759
936
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
760
937
  type: "tool-call",
761
- toolCallType: "function",
762
- toolCallId: generateId(),
938
+ toolCallId: generateId2(),
763
939
  toolName: part.functionCall.name,
764
940
  args: JSON.stringify(part.functionCall.args)
765
941
  }));
766
942
  }
767
- function getTextFromParts(parts) {
768
- const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
769
- return textParts == null || textParts.length === 0 ? void 0 : {
770
- type: "text",
771
- text: textParts.map((part) => part.text).join("")
772
- };
773
- }
774
943
  function getInlineDataParts(parts) {
775
944
  return parts == null ? void 0 : parts.filter(
776
945
  (part) => "inlineData" in part
@@ -778,7 +947,7 @@ function getInlineDataParts(parts) {
778
947
  }
779
948
  function extractSources({
780
949
  groundingMetadata,
781
- generateId
950
+ generateId: generateId2
782
951
  }) {
783
952
  var _a;
784
953
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
@@ -786,104 +955,91 @@ function extractSources({
786
955
  ).map((chunk) => ({
787
956
  type: "source",
788
957
  sourceType: "url",
789
- id: generateId(),
958
+ id: generateId2(),
790
959
  url: chunk.web.uri,
791
960
  title: chunk.web.title
792
961
  }));
793
962
  }
794
- var contentSchema = import_zod3.z.object({
795
- role: import_zod3.z.string(),
796
- parts: import_zod3.z.array(
797
- import_zod3.z.union([
798
- import_zod3.z.object({
799
- text: import_zod3.z.string()
800
- }),
801
- import_zod3.z.object({
802
- functionCall: import_zod3.z.object({
803
- name: import_zod3.z.string(),
804
- args: import_zod3.z.unknown()
963
+ var contentSchema = import_v45.z.object({
964
+ parts: import_v45.z.array(
965
+ import_v45.z.union([
966
+ // note: order matters since text can be fully empty
967
+ import_v45.z.object({
968
+ functionCall: import_v45.z.object({
969
+ name: import_v45.z.string(),
970
+ args: import_v45.z.unknown()
805
971
  })
806
972
  }),
807
- import_zod3.z.object({
808
- inlineData: import_zod3.z.object({
809
- mimeType: import_zod3.z.string(),
810
- data: import_zod3.z.string()
973
+ import_v45.z.object({
974
+ inlineData: import_v45.z.object({
975
+ mimeType: import_v45.z.string(),
976
+ data: import_v45.z.string()
811
977
  })
978
+ }),
979
+ import_v45.z.object({
980
+ text: import_v45.z.string().nullish(),
981
+ thought: import_v45.z.boolean().nullish()
812
982
  })
813
983
  ])
814
984
  ).nullish()
815
985
  });
816
- var groundingChunkSchema = import_zod3.z.object({
817
- web: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish(),
818
- retrievedContext: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish()
986
+ var safetyRatingSchema = import_v45.z.object({
987
+ category: import_v45.z.string().nullish(),
988
+ probability: import_v45.z.string().nullish(),
989
+ probabilityScore: import_v45.z.number().nullish(),
990
+ severity: import_v45.z.string().nullish(),
991
+ severityScore: import_v45.z.number().nullish(),
992
+ blocked: import_v45.z.boolean().nullish()
819
993
  });
820
- var groundingMetadataSchema = import_zod3.z.object({
821
- webSearchQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
822
- retrievalQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
823
- searchEntryPoint: import_zod3.z.object({ renderedContent: import_zod3.z.string() }).nullish(),
824
- groundingChunks: import_zod3.z.array(groundingChunkSchema).nullish(),
825
- groundingSupports: import_zod3.z.array(
826
- import_zod3.z.object({
827
- segment: import_zod3.z.object({
828
- startIndex: import_zod3.z.number().nullish(),
829
- endIndex: import_zod3.z.number().nullish(),
830
- text: import_zod3.z.string().nullish()
831
- }),
832
- segment_text: import_zod3.z.string().nullish(),
833
- groundingChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
834
- supportChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
835
- confidenceScores: import_zod3.z.array(import_zod3.z.number()).nullish(),
836
- confidenceScore: import_zod3.z.array(import_zod3.z.number()).nullish()
837
- })
838
- ).nullish(),
839
- retrievalMetadata: import_zod3.z.union([
840
- import_zod3.z.object({
841
- webDynamicRetrievalScore: import_zod3.z.number()
842
- }),
843
- import_zod3.z.object({})
844
- ]).nullish()
994
+ var usageSchema = import_v45.z.object({
995
+ cachedContentTokenCount: import_v45.z.number().nullish(),
996
+ thoughtsTokenCount: import_v45.z.number().nullish(),
997
+ promptTokenCount: import_v45.z.number().nullish(),
998
+ candidatesTokenCount: import_v45.z.number().nullish(),
999
+ totalTokenCount: import_v45.z.number().nullish()
845
1000
  });
846
- var safetyRatingSchema = import_zod3.z.object({
847
- category: import_zod3.z.string().nullish(),
848
- probability: import_zod3.z.string().nullish(),
849
- probabilityScore: import_zod3.z.number().nullish(),
850
- severity: import_zod3.z.string().nullish(),
851
- severityScore: import_zod3.z.number().nullish(),
852
- blocked: import_zod3.z.boolean().nullish()
853
- });
854
- var usageSchema = import_zod3.z.object({
855
- cachedContentTokenCount: import_zod3.z.number().nullish(),
856
- thoughtsTokenCount: import_zod3.z.number().nullish(),
857
- promptTokenCount: import_zod3.z.number().nullish(),
858
- candidatesTokenCount: import_zod3.z.number().nullish(),
859
- totalTokenCount: import_zod3.z.number().nullish()
860
- });
861
- var responseSchema = import_zod3.z.object({
862
- candidates: import_zod3.z.array(
863
- import_zod3.z.object({
864
- content: contentSchema.nullish().or(import_zod3.z.object({}).strict()),
865
- finishReason: import_zod3.z.string().nullish(),
866
- safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
867
- groundingMetadata: groundingMetadataSchema.nullish()
1001
+ var responseSchema = import_v45.z.object({
1002
+ candidates: import_v45.z.array(
1003
+ import_v45.z.object({
1004
+ content: contentSchema.nullish().or(import_v45.z.object({}).strict()),
1005
+ finishReason: import_v45.z.string().nullish(),
1006
+ safetyRatings: import_v45.z.array(safetyRatingSchema).nullish(),
1007
+ groundingMetadata: groundingMetadataSchema.nullish(),
1008
+ urlContextMetadata: urlContextMetadataSchema.nullish()
868
1009
  })
869
1010
  ),
870
1011
  usageMetadata: usageSchema.nullish()
871
1012
  });
872
- var chunkSchema = import_zod3.z.object({
873
- candidates: import_zod3.z.array(
874
- import_zod3.z.object({
1013
+ var chunkSchema = import_v45.z.object({
1014
+ candidates: import_v45.z.array(
1015
+ import_v45.z.object({
875
1016
  content: contentSchema.nullish(),
876
- finishReason: import_zod3.z.string().nullish(),
877
- safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
878
- groundingMetadata: groundingMetadataSchema.nullish()
1017
+ finishReason: import_v45.z.string().nullish(),
1018
+ safetyRatings: import_v45.z.array(safetyRatingSchema).nullish(),
1019
+ groundingMetadata: groundingMetadataSchema.nullish(),
1020
+ urlContextMetadata: urlContextMetadataSchema.nullish()
879
1021
  })
880
1022
  ).nullish(),
881
1023
  usageMetadata: usageSchema.nullish()
882
1024
  });
1025
+
1026
+ // src/google-tools.ts
1027
+ var googleTools = {
1028
+ /**
1029
+ * Creates a Google search tool that gives Google direct access to real-time web content.
1030
+ * Must have name "google_search".
1031
+ */
1032
+ googleSearch,
1033
+ /**
1034
+ * Creates a URL context tool that gives Google direct access to real-time web content.
1035
+ * Must have name "url_context".
1036
+ */
1037
+ urlContext
1038
+ };
883
1039
  // Annotate the CommonJS export names for ESM import in node:
884
1040
  0 && (module.exports = {
885
1041
  GoogleGenerativeAILanguageModel,
886
- groundingMetadataSchema,
1042
+ googleTools,
887
1043
  safetyRatingSchema
888
1044
  });
889
1045
  //# sourceMappingURL=index.js.map