@ai-sdk/google 1.2.17 → 2.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,11 +28,11 @@ module.exports = __toCommonJS(internal_exports);
28
28
 
29
29
  // src/google-generative-ai-language-model.ts
30
30
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
31
- var import_zod2 = require("zod");
31
+ var import_zod3 = require("zod");
32
32
 
33
33
  // src/convert-json-schema-to-openapi-schema.ts
34
34
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
35
- if (isEmptyObjectSchema(jsonSchema)) {
35
+ if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
36
36
  return void 0;
37
37
  }
38
38
  if (typeof jsonSchema === "boolean") {
@@ -131,7 +131,6 @@ function isEmptyObjectSchema(jsonSchema) {
131
131
  var import_provider = require("@ai-sdk/provider");
132
132
  var import_provider_utils = require("@ai-sdk/provider-utils");
133
133
  function convertToGoogleGenerativeAIMessages(prompt) {
134
- var _a, _b;
135
134
  const systemInstructionParts = [];
136
135
  const contents = [];
137
136
  let systemMessagesAllowed = true;
@@ -155,33 +154,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
155
154
  parts.push({ text: part.text });
156
155
  break;
157
156
  }
158
- case "image": {
159
- parts.push(
160
- part.image instanceof URL ? {
161
- fileData: {
162
- mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
163
- fileUri: part.image.toString()
164
- }
165
- } : {
166
- inlineData: {
167
- mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
168
- data: (0, import_provider_utils.convertUint8ArrayToBase64)(part.image)
169
- }
170
- }
171
- );
172
- break;
173
- }
174
157
  case "file": {
158
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
175
159
  parts.push(
176
160
  part.data instanceof URL ? {
177
161
  fileData: {
178
- mimeType: part.mimeType,
162
+ mimeType: mediaType,
179
163
  fileUri: part.data.toString()
180
164
  }
181
165
  } : {
182
166
  inlineData: {
183
- mimeType: part.mimeType,
184
- data: part.data
167
+ mimeType: mediaType,
168
+ data: (0, import_provider_utils.convertToBase64)(part.data)
185
169
  }
186
170
  }
187
171
  );
@@ -202,7 +186,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
202
186
  return part.text.length === 0 ? void 0 : { text: part.text };
203
187
  }
204
188
  case "file": {
205
- if (part.mimeType !== "image/png") {
189
+ if (part.mediaType !== "image/png") {
206
190
  throw new import_provider.UnsupportedFunctionalityError({
207
191
  functionality: "Only PNG images are supported in assistant messages"
208
192
  });
@@ -214,8 +198,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
214
198
  }
215
199
  return {
216
200
  inlineData: {
217
- mimeType: part.mimeType,
218
- data: part.data
201
+ mimeType: part.mediaType,
202
+ data: (0, import_provider_utils.convertToBase64)(part.data)
219
203
  }
220
204
  };
221
205
  }
@@ -276,18 +260,110 @@ var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
276
260
  errorToMessage: (data) => data.error.message
277
261
  });
278
262
 
263
+ // src/google-generative-ai-options.ts
264
+ var import_zod2 = require("zod");
265
+ var dynamicRetrievalConfig = import_zod2.z.object({
266
+ /**
267
+ * The mode of the predictor to be used in dynamic retrieval.
268
+ */
269
+ mode: import_zod2.z.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
270
+ /**
271
+ * The threshold to be used in dynamic retrieval. If not set, a system default
272
+ * value is used.
273
+ */
274
+ dynamicThreshold: import_zod2.z.number().optional()
275
+ });
276
+ var googleGenerativeAIProviderOptions = import_zod2.z.object({
277
+ responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).optional(),
278
+ thinkingConfig: import_zod2.z.object({
279
+ thinkingBudget: import_zod2.z.number().optional()
280
+ }).optional(),
281
+ /**
282
+ Optional.
283
+ The name of the cached content used as context to serve the prediction.
284
+ Format: cachedContents/{cachedContent}
285
+ */
286
+ cachedContent: import_zod2.z.string().optional(),
287
+ /**
288
+ * Optional. Enable structured output. Default is true.
289
+ *
290
+ * This is useful when the JSON Schema contains elements that are
291
+ * not supported by the OpenAPI schema version that
292
+ * Google Generative AI uses. You can use this to disable
293
+ * structured outputs if you need to.
294
+ */
295
+ structuredOutputs: import_zod2.z.boolean().optional(),
296
+ /**
297
+ Optional. A list of unique safety settings for blocking unsafe content.
298
+ */
299
+ safetySettings: import_zod2.z.array(
300
+ import_zod2.z.object({
301
+ category: import_zod2.z.enum([
302
+ "HARM_CATEGORY_UNSPECIFIED",
303
+ "HARM_CATEGORY_HATE_SPEECH",
304
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
305
+ "HARM_CATEGORY_HARASSMENT",
306
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
307
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
308
+ ]),
309
+ threshold: import_zod2.z.enum([
310
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
311
+ "BLOCK_LOW_AND_ABOVE",
312
+ "BLOCK_MEDIUM_AND_ABOVE",
313
+ "BLOCK_ONLY_HIGH",
314
+ "BLOCK_NONE",
315
+ "OFF"
316
+ ])
317
+ })
318
+ ).optional(),
319
+ threshold: import_zod2.z.enum([
320
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
321
+ "BLOCK_LOW_AND_ABOVE",
322
+ "BLOCK_MEDIUM_AND_ABOVE",
323
+ "BLOCK_ONLY_HIGH",
324
+ "BLOCK_NONE",
325
+ "OFF"
326
+ ]).optional(),
327
+ /**
328
+ * Optional. Enables timestamp understanding for audio-only files.
329
+ *
330
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
331
+ */
332
+ audioTimestamp: import_zod2.z.boolean().optional(),
333
+ /**
334
+ Optional. When enabled, the model will use Google search to ground the response.
335
+
336
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
337
+ */
338
+ useSearchGrounding: import_zod2.z.boolean().optional(),
339
+ /**
340
+ Optional. Specifies the dynamic retrieval configuration.
341
+
342
+ @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
343
+
344
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
345
+ */
346
+ dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
347
+ });
348
+
279
349
  // src/google-prepare-tools.ts
280
350
  var import_provider2 = require("@ai-sdk/provider");
281
- function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId) {
282
- var _a, _b;
283
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
351
+ function prepareTools({
352
+ tools,
353
+ toolChoice,
354
+ useSearchGrounding,
355
+ dynamicRetrievalConfig: dynamicRetrievalConfig2,
356
+ modelId
357
+ }) {
358
+ var _a;
359
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
284
360
  const toolWarnings = [];
285
361
  const isGemini2 = modelId.includes("gemini-2");
286
362
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
287
363
  if (useSearchGrounding) {
288
364
  return {
289
365
  tools: isGemini2 ? { googleSearch: {} } : {
290
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
366
+ googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
291
367
  },
292
368
  toolConfig: void 0,
293
369
  toolWarnings
@@ -303,12 +379,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
303
379
  } else {
304
380
  functionDeclarations.push({
305
381
  name: tool.name,
306
- description: (_b = tool.description) != null ? _b : "",
382
+ description: (_a = tool.description) != null ? _a : "",
307
383
  parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
308
384
  });
309
385
  }
310
386
  }
311
- const toolChoice = mode.toolChoice;
312
387
  if (toolChoice == null) {
313
388
  return {
314
389
  tools: { functionDeclarations },
@@ -350,7 +425,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
350
425
  default: {
351
426
  const _exhaustiveCheck = type;
352
427
  throw new import_provider2.UnsupportedFunctionalityError({
353
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
428
+ functionality: `tool choice type: ${_exhaustiveCheck}`
354
429
  });
355
430
  }
356
431
  }
@@ -385,25 +460,21 @@ function mapGoogleGenerativeAIFinishReason({
385
460
 
386
461
  // src/google-generative-ai-language-model.ts
387
462
  var GoogleGenerativeAILanguageModel = class {
388
- constructor(modelId, settings, config) {
389
- this.specificationVersion = "v1";
390
- this.defaultObjectGenerationMode = "json";
391
- this.supportsImageUrls = false;
463
+ constructor(modelId, config) {
464
+ this.specificationVersion = "v2";
392
465
  this.modelId = modelId;
393
- this.settings = settings;
394
466
  this.config = config;
395
467
  }
396
- get supportsStructuredOutputs() {
397
- var _a;
398
- return (_a = this.settings.structuredOutputs) != null ? _a : true;
399
- }
400
468
  get provider() {
401
469
  return this.config.provider;
402
470
  }
471
+ get supportedUrls() {
472
+ var _a, _b, _c;
473
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
474
+ }
403
475
  async getArgs({
404
- mode,
405
476
  prompt,
406
- maxTokens,
477
+ maxOutputTokens,
407
478
  temperature,
408
479
  topP,
409
480
  topK,
@@ -412,112 +483,66 @@ var GoogleGenerativeAILanguageModel = class {
412
483
  stopSequences,
413
484
  responseFormat,
414
485
  seed,
415
- providerMetadata
486
+ tools,
487
+ toolChoice,
488
+ providerOptions
416
489
  }) {
417
490
  var _a, _b;
418
- const type = mode.type;
419
491
  const warnings = [];
420
- const googleOptions = (0, import_provider_utils3.parseProviderOptions)({
492
+ const googleOptions = await (0, import_provider_utils3.parseProviderOptions)({
421
493
  provider: "google",
422
- providerOptions: providerMetadata,
423
- schema: googleGenerativeAIProviderOptionsSchema
494
+ providerOptions,
495
+ schema: googleGenerativeAIProviderOptions
424
496
  });
425
- const generationConfig = {
426
- // standardized settings:
427
- maxOutputTokens: maxTokens,
428
- temperature,
429
- topK,
430
- topP,
431
- frequencyPenalty,
432
- presencePenalty,
433
- stopSequences,
434
- seed,
435
- // response format:
436
- responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
437
- responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
438
- // so this is needed as an escape hatch:
439
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
440
- ...this.settings.audioTimestamp && {
441
- audioTimestamp: this.settings.audioTimestamp
442
- },
443
- // provider options:
444
- responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
445
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
446
- };
447
497
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
448
- switch (type) {
449
- case "regular": {
450
- const { tools, toolConfig, toolWarnings } = prepareTools(
451
- mode,
452
- (_a = this.settings.useSearchGrounding) != null ? _a : false,
453
- this.settings.dynamicRetrievalConfig,
454
- this.modelId
455
- );
456
- return {
457
- args: {
458
- generationConfig,
459
- contents,
460
- systemInstruction,
461
- safetySettings: this.settings.safetySettings,
462
- tools,
463
- toolConfig,
464
- cachedContent: this.settings.cachedContent
465
- },
466
- warnings: [...warnings, ...toolWarnings]
467
- };
468
- }
469
- case "object-json": {
470
- return {
471
- args: {
472
- generationConfig: {
473
- ...generationConfig,
474
- responseMimeType: "application/json",
475
- responseSchema: mode.schema != null && // Google GenAI does not support all OpenAPI Schema features,
476
- // so this is needed as an escape hatch:
477
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(mode.schema) : void 0
478
- },
479
- contents,
480
- systemInstruction,
481
- safetySettings: this.settings.safetySettings,
482
- cachedContent: this.settings.cachedContent
483
- },
484
- warnings
485
- };
486
- }
487
- case "object-tool": {
488
- return {
489
- args: {
490
- generationConfig,
491
- contents,
492
- tools: {
493
- functionDeclarations: [
494
- {
495
- name: mode.tool.name,
496
- description: (_b = mode.tool.description) != null ? _b : "",
497
- parameters: convertJSONSchemaToOpenAPISchema(
498
- mode.tool.parameters
499
- )
500
- }
501
- ]
502
- },
503
- toolConfig: { functionCallingConfig: { mode: "ANY" } },
504
- safetySettings: this.settings.safetySettings,
505
- cachedContent: this.settings.cachedContent
498
+ const {
499
+ tools: googleTools,
500
+ toolConfig: googleToolConfig,
501
+ toolWarnings
502
+ } = prepareTools({
503
+ tools,
504
+ toolChoice,
505
+ useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
506
+ dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
507
+ modelId: this.modelId
508
+ });
509
+ return {
510
+ args: {
511
+ generationConfig: {
512
+ // standardized settings:
513
+ maxOutputTokens,
514
+ temperature,
515
+ topK,
516
+ topP,
517
+ frequencyPenalty,
518
+ presencePenalty,
519
+ stopSequences,
520
+ seed,
521
+ // response format:
522
+ responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
523
+ responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
524
+ // so this is needed as an escape hatch:
525
+ // TODO convert into provider option
526
+ ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
527
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
528
+ audioTimestamp: googleOptions.audioTimestamp
506
529
  },
507
- warnings
508
- };
509
- }
510
- default: {
511
- const _exhaustiveCheck = type;
512
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
513
- }
514
- }
515
- }
516
- supportsUrl(url) {
517
- return this.config.isSupportedUrl(url);
530
+ // provider options:
531
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
532
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
533
+ },
534
+ contents,
535
+ systemInstruction,
536
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
537
+ tools: googleTools,
538
+ toolConfig: googleToolConfig,
539
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
540
+ },
541
+ warnings: [...warnings, ...toolWarnings]
542
+ };
518
543
  }
519
544
  async doGenerate(options) {
520
- var _a, _b, _c, _d, _e;
545
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
521
546
  const { args, warnings } = await this.getArgs(options);
522
547
  const body = JSON.stringify(args);
523
548
  const mergedHeaders = (0, import_provider_utils3.combineHeaders)(
@@ -539,43 +564,62 @@ var GoogleGenerativeAILanguageModel = class {
539
564
  abortSignal: options.abortSignal,
540
565
  fetch: this.config.fetch
541
566
  });
542
- const { contents: rawPrompt, ...rawSettings } = args;
543
567
  const candidate = response.candidates[0];
544
- const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : candidate.content.parts;
545
- const toolCalls = getToolCallsFromParts({
546
- parts,
568
+ const content = [];
569
+ const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
570
+ for (const part of parts) {
571
+ if ("text" in part && part.text.length > 0) {
572
+ content.push({ type: "text", text: part.text });
573
+ } else if ("functionCall" in part) {
574
+ content.push({
575
+ type: "tool-call",
576
+ toolCallType: "function",
577
+ toolCallId: this.config.generateId(),
578
+ toolName: part.functionCall.name,
579
+ args: JSON.stringify(part.functionCall.args)
580
+ });
581
+ } else if ("inlineData" in part) {
582
+ content.push({
583
+ type: "file",
584
+ data: part.inlineData.data,
585
+ mediaType: part.inlineData.mimeType
586
+ });
587
+ }
588
+ }
589
+ const sources = (_b = extractSources({
590
+ groundingMetadata: candidate.groundingMetadata,
547
591
  generateId: this.config.generateId
548
- });
592
+ })) != null ? _b : [];
593
+ for (const source of sources) {
594
+ content.push(source);
595
+ }
549
596
  const usageMetadata = response.usageMetadata;
550
597
  return {
551
- text: getTextFromParts(parts),
552
- files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
553
- data: part.inlineData.data,
554
- mimeType: part.inlineData.mimeType
555
- })),
556
- toolCalls,
598
+ content,
557
599
  finishReason: mapGoogleGenerativeAIFinishReason({
558
600
  finishReason: candidate.finishReason,
559
- hasToolCalls: toolCalls != null && toolCalls.length > 0
601
+ hasToolCalls: content.some((part) => part.type === "tool-call")
560
602
  }),
561
603
  usage: {
562
- promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
563
- completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
604
+ inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
605
+ outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
606
+ totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
607
+ reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
608
+ cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
564
609
  },
565
- rawCall: { rawPrompt, rawSettings },
566
- rawResponse: { headers: responseHeaders, body: rawResponse },
567
610
  warnings,
568
611
  providerMetadata: {
569
612
  google: {
570
- groundingMetadata: (_d = candidate.groundingMetadata) != null ? _d : null,
571
- safetyRatings: (_e = candidate.safetyRatings) != null ? _e : null
613
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
614
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
572
615
  }
573
616
  },
574
- sources: extractSources({
575
- groundingMetadata: candidate.groundingMetadata,
576
- generateId: this.config.generateId
577
- }),
578
- request: { body }
617
+ request: { body },
618
+ response: {
619
+ // TODO timestamp, model id, id
620
+ headers: responseHeaders,
621
+ body: rawResponse
622
+ }
579
623
  };
580
624
  }
581
625
  async doStream(options) {
@@ -596,11 +640,11 @@ var GoogleGenerativeAILanguageModel = class {
596
640
  abortSignal: options.abortSignal,
597
641
  fetch: this.config.fetch
598
642
  });
599
- const { contents: rawPrompt, ...rawSettings } = args;
600
643
  let finishReason = "unknown";
601
- let usage = {
602
- promptTokens: Number.NaN,
603
- completionTokens: Number.NaN
644
+ const usage = {
645
+ inputTokens: void 0,
646
+ outputTokens: void 0,
647
+ totalTokens: void 0
604
648
  };
605
649
  let providerMetadata = void 0;
606
650
  const generateId = this.config.generateId;
@@ -608,8 +652,11 @@ var GoogleGenerativeAILanguageModel = class {
608
652
  return {
609
653
  stream: response.pipeThrough(
610
654
  new TransformStream({
655
+ start(controller) {
656
+ controller.enqueue({ type: "stream-start", warnings });
657
+ },
611
658
  transform(chunk, controller) {
612
- var _a, _b, _c, _d, _e, _f;
659
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
613
660
  if (!chunk.success) {
614
661
  controller.enqueue({ type: "error", error: chunk.error });
615
662
  return;
@@ -617,12 +664,13 @@ var GoogleGenerativeAILanguageModel = class {
617
664
  const value = chunk.value;
618
665
  const usageMetadata = value.usageMetadata;
619
666
  if (usageMetadata != null) {
620
- usage = {
621
- promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
622
- completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
623
- };
667
+ usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
668
+ usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
669
+ usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
670
+ usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
671
+ usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
624
672
  }
625
- const candidate = (_c = value.candidates) == null ? void 0 : _c[0];
673
+ const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
626
674
  if (candidate == null) {
627
675
  return;
628
676
  }
@@ -630,17 +678,14 @@ var GoogleGenerativeAILanguageModel = class {
630
678
  if (content != null) {
631
679
  const deltaText = getTextFromParts(content.parts);
632
680
  if (deltaText != null) {
633
- controller.enqueue({
634
- type: "text-delta",
635
- textDelta: deltaText
636
- });
681
+ controller.enqueue(deltaText);
637
682
  }
638
683
  const inlineDataParts = getInlineDataParts(content.parts);
639
684
  if (inlineDataParts != null) {
640
685
  for (const part of inlineDataParts) {
641
686
  controller.enqueue({
642
687
  type: "file",
643
- mimeType: part.inlineData.mimeType,
688
+ mediaType: part.inlineData.mimeType,
644
689
  data: part.inlineData.data
645
690
  });
646
691
  }
@@ -674,17 +719,17 @@ var GoogleGenerativeAILanguageModel = class {
674
719
  finishReason: candidate.finishReason,
675
720
  hasToolCalls
676
721
  });
677
- const sources = (_d = extractSources({
722
+ const sources = (_g = extractSources({
678
723
  groundingMetadata: candidate.groundingMetadata,
679
724
  generateId
680
- })) != null ? _d : [];
725
+ })) != null ? _g : [];
681
726
  for (const source of sources) {
682
- controller.enqueue({ type: "source", source });
727
+ controller.enqueue(source);
683
728
  }
684
729
  providerMetadata = {
685
730
  google: {
686
- groundingMetadata: (_e = candidate.groundingMetadata) != null ? _e : null,
687
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
731
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
732
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
688
733
  }
689
734
  };
690
735
  }
@@ -699,9 +744,7 @@ var GoogleGenerativeAILanguageModel = class {
699
744
  }
700
745
  })
701
746
  ),
702
- rawCall: { rawPrompt, rawSettings },
703
- rawResponse: { headers: responseHeaders },
704
- warnings,
747
+ response: { headers: responseHeaders },
705
748
  request: { body }
706
749
  };
707
750
  }
@@ -714,6 +757,7 @@ function getToolCallsFromParts({
714
757
  (part) => "functionCall" in part
715
758
  );
716
759
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
760
+ type: "tool-call",
717
761
  toolCallType: "function",
718
762
  toolCallId: generateId(),
719
763
  toolName: part.functionCall.name,
@@ -722,7 +766,10 @@ function getToolCallsFromParts({
722
766
  }
723
767
  function getTextFromParts(parts) {
724
768
  const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
725
- return textParts == null || textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join("");
769
+ return textParts == null || textParts.length === 0 ? void 0 : {
770
+ type: "text",
771
+ text: textParts.map((part) => part.text).join("")
772
+ };
726
773
  }
727
774
  function getInlineDataParts(parts) {
728
775
  return parts == null ? void 0 : parts.filter(
@@ -737,107 +784,101 @@ function extractSources({
737
784
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
738
785
  (chunk) => chunk.web != null
739
786
  ).map((chunk) => ({
787
+ type: "source",
740
788
  sourceType: "url",
741
789
  id: generateId(),
742
790
  url: chunk.web.uri,
743
791
  title: chunk.web.title
744
792
  }));
745
793
  }
746
- var contentSchema = import_zod2.z.object({
747
- role: import_zod2.z.string(),
748
- parts: import_zod2.z.array(
749
- import_zod2.z.union([
750
- import_zod2.z.object({
751
- text: import_zod2.z.string()
794
+ var contentSchema = import_zod3.z.object({
795
+ role: import_zod3.z.string(),
796
+ parts: import_zod3.z.array(
797
+ import_zod3.z.union([
798
+ import_zod3.z.object({
799
+ text: import_zod3.z.string()
752
800
  }),
753
- import_zod2.z.object({
754
- functionCall: import_zod2.z.object({
755
- name: import_zod2.z.string(),
756
- args: import_zod2.z.unknown()
801
+ import_zod3.z.object({
802
+ functionCall: import_zod3.z.object({
803
+ name: import_zod3.z.string(),
804
+ args: import_zod3.z.unknown()
757
805
  })
758
806
  }),
759
- import_zod2.z.object({
760
- inlineData: import_zod2.z.object({
761
- mimeType: import_zod2.z.string(),
762
- data: import_zod2.z.string()
807
+ import_zod3.z.object({
808
+ inlineData: import_zod3.z.object({
809
+ mimeType: import_zod3.z.string(),
810
+ data: import_zod3.z.string()
763
811
  })
764
812
  })
765
813
  ])
766
814
  ).nullish()
767
815
  });
768
- var groundingChunkSchema = import_zod2.z.object({
769
- web: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish(),
770
- retrievedContext: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish()
816
+ var groundingChunkSchema = import_zod3.z.object({
817
+ web: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish(),
818
+ retrievedContext: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish()
771
819
  });
772
- var groundingMetadataSchema = import_zod2.z.object({
773
- webSearchQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
774
- retrievalQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
775
- searchEntryPoint: import_zod2.z.object({ renderedContent: import_zod2.z.string() }).nullish(),
776
- groundingChunks: import_zod2.z.array(groundingChunkSchema).nullish(),
777
- groundingSupports: import_zod2.z.array(
778
- import_zod2.z.object({
779
- segment: import_zod2.z.object({
780
- startIndex: import_zod2.z.number().nullish(),
781
- endIndex: import_zod2.z.number().nullish(),
782
- text: import_zod2.z.string().nullish()
820
+ var groundingMetadataSchema = import_zod3.z.object({
821
+ webSearchQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
822
+ retrievalQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
823
+ searchEntryPoint: import_zod3.z.object({ renderedContent: import_zod3.z.string() }).nullish(),
824
+ groundingChunks: import_zod3.z.array(groundingChunkSchema).nullish(),
825
+ groundingSupports: import_zod3.z.array(
826
+ import_zod3.z.object({
827
+ segment: import_zod3.z.object({
828
+ startIndex: import_zod3.z.number().nullish(),
829
+ endIndex: import_zod3.z.number().nullish(),
830
+ text: import_zod3.z.string().nullish()
783
831
  }),
784
- segment_text: import_zod2.z.string().nullish(),
785
- groundingChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
786
- supportChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
787
- confidenceScores: import_zod2.z.array(import_zod2.z.number()).nullish(),
788
- confidenceScore: import_zod2.z.array(import_zod2.z.number()).nullish()
832
+ segment_text: import_zod3.z.string().nullish(),
833
+ groundingChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
834
+ supportChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
835
+ confidenceScores: import_zod3.z.array(import_zod3.z.number()).nullish(),
836
+ confidenceScore: import_zod3.z.array(import_zod3.z.number()).nullish()
789
837
  })
790
838
  ).nullish(),
791
- retrievalMetadata: import_zod2.z.union([
792
- import_zod2.z.object({
793
- webDynamicRetrievalScore: import_zod2.z.number()
839
+ retrievalMetadata: import_zod3.z.union([
840
+ import_zod3.z.object({
841
+ webDynamicRetrievalScore: import_zod3.z.number()
794
842
  }),
795
- import_zod2.z.object({})
843
+ import_zod3.z.object({})
796
844
  ]).nullish()
797
845
  });
798
- var safetyRatingSchema = import_zod2.z.object({
799
- category: import_zod2.z.string().nullish(),
800
- probability: import_zod2.z.string().nullish(),
801
- probabilityScore: import_zod2.z.number().nullish(),
802
- severity: import_zod2.z.string().nullish(),
803
- severityScore: import_zod2.z.number().nullish(),
804
- blocked: import_zod2.z.boolean().nullish()
846
+ var safetyRatingSchema = import_zod3.z.object({
847
+ category: import_zod3.z.string().nullish(),
848
+ probability: import_zod3.z.string().nullish(),
849
+ probabilityScore: import_zod3.z.number().nullish(),
850
+ severity: import_zod3.z.string().nullish(),
851
+ severityScore: import_zod3.z.number().nullish(),
852
+ blocked: import_zod3.z.boolean().nullish()
805
853
  });
806
- var responseSchema = import_zod2.z.object({
807
- candidates: import_zod2.z.array(
808
- import_zod2.z.object({
809
- content: contentSchema.nullish().or(import_zod2.z.object({}).strict()),
810
- finishReason: import_zod2.z.string().nullish(),
811
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
854
+ var usageSchema = import_zod3.z.object({
855
+ cachedContentTokenCount: import_zod3.z.number().nullish(),
856
+ thoughtsTokenCount: import_zod3.z.number().nullish(),
857
+ promptTokenCount: import_zod3.z.number().nullish(),
858
+ candidatesTokenCount: import_zod3.z.number().nullish(),
859
+ totalTokenCount: import_zod3.z.number().nullish()
860
+ });
861
+ var responseSchema = import_zod3.z.object({
862
+ candidates: import_zod3.z.array(
863
+ import_zod3.z.object({
864
+ content: contentSchema.nullish().or(import_zod3.z.object({}).strict()),
865
+ finishReason: import_zod3.z.string().nullish(),
866
+ safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
812
867
  groundingMetadata: groundingMetadataSchema.nullish()
813
868
  })
814
869
  ),
815
- usageMetadata: import_zod2.z.object({
816
- promptTokenCount: import_zod2.z.number().nullish(),
817
- candidatesTokenCount: import_zod2.z.number().nullish(),
818
- totalTokenCount: import_zod2.z.number().nullish()
819
- }).nullish()
870
+ usageMetadata: usageSchema.nullish()
820
871
  });
821
- var chunkSchema = import_zod2.z.object({
822
- candidates: import_zod2.z.array(
823
- import_zod2.z.object({
872
+ var chunkSchema = import_zod3.z.object({
873
+ candidates: import_zod3.z.array(
874
+ import_zod3.z.object({
824
875
  content: contentSchema.nullish(),
825
- finishReason: import_zod2.z.string().nullish(),
826
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
876
+ finishReason: import_zod3.z.string().nullish(),
877
+ safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
827
878
  groundingMetadata: groundingMetadataSchema.nullish()
828
879
  })
829
880
  ).nullish(),
830
- usageMetadata: import_zod2.z.object({
831
- promptTokenCount: import_zod2.z.number().nullish(),
832
- candidatesTokenCount: import_zod2.z.number().nullish(),
833
- totalTokenCount: import_zod2.z.number().nullish()
834
- }).nullish()
835
- });
836
- var googleGenerativeAIProviderOptionsSchema = import_zod2.z.object({
837
- responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).nullish(),
838
- thinkingConfig: import_zod2.z.object({
839
- thinkingBudget: import_zod2.z.number().nullish()
840
- }).nullish()
881
+ usageMetadata: usageSchema.nullish()
841
882
  });
842
883
  // Annotate the CommonJS export names for ESM import in node:
843
884
  0 && (module.exports = {