@ai-sdk/google 1.2.18 → 2.0.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,11 +28,11 @@ module.exports = __toCommonJS(internal_exports);
28
28
 
29
29
  // src/google-generative-ai-language-model.ts
30
30
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
31
- var import_zod2 = require("zod");
31
+ var import_zod3 = require("zod");
32
32
 
33
33
  // src/convert-json-schema-to-openapi-schema.ts
34
34
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
35
- if (isEmptyObjectSchema(jsonSchema)) {
35
+ if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
36
36
  return void 0;
37
37
  }
38
38
  if (typeof jsonSchema === "boolean") {
@@ -131,7 +131,6 @@ function isEmptyObjectSchema(jsonSchema) {
131
131
  var import_provider = require("@ai-sdk/provider");
132
132
  var import_provider_utils = require("@ai-sdk/provider-utils");
133
133
  function convertToGoogleGenerativeAIMessages(prompt) {
134
- var _a, _b;
135
134
  const systemInstructionParts = [];
136
135
  const contents = [];
137
136
  let systemMessagesAllowed = true;
@@ -155,33 +154,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
155
154
  parts.push({ text: part.text });
156
155
  break;
157
156
  }
158
- case "image": {
159
- parts.push(
160
- part.image instanceof URL ? {
161
- fileData: {
162
- mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
163
- fileUri: part.image.toString()
164
- }
165
- } : {
166
- inlineData: {
167
- mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
168
- data: (0, import_provider_utils.convertUint8ArrayToBase64)(part.image)
169
- }
170
- }
171
- );
172
- break;
173
- }
174
157
  case "file": {
158
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
175
159
  parts.push(
176
160
  part.data instanceof URL ? {
177
161
  fileData: {
178
- mimeType: part.mimeType,
162
+ mimeType: mediaType,
179
163
  fileUri: part.data.toString()
180
164
  }
181
165
  } : {
182
166
  inlineData: {
183
- mimeType: part.mimeType,
184
- data: part.data
167
+ mimeType: mediaType,
168
+ data: (0, import_provider_utils.convertToBase64)(part.data)
185
169
  }
186
170
  }
187
171
  );
@@ -202,7 +186,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
202
186
  return part.text.length === 0 ? void 0 : { text: part.text };
203
187
  }
204
188
  case "file": {
205
- if (part.mimeType !== "image/png") {
189
+ if (part.mediaType !== "image/png") {
206
190
  throw new import_provider.UnsupportedFunctionalityError({
207
191
  functionality: "Only PNG images are supported in assistant messages"
208
192
  });
@@ -214,8 +198,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
214
198
  }
215
199
  return {
216
200
  inlineData: {
217
- mimeType: part.mimeType,
218
- data: part.data
201
+ mimeType: part.mediaType,
202
+ data: (0, import_provider_utils.convertToBase64)(part.data)
219
203
  }
220
204
  };
221
205
  }
@@ -276,18 +260,110 @@ var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
276
260
  errorToMessage: (data) => data.error.message
277
261
  });
278
262
 
263
+ // src/google-generative-ai-options.ts
264
+ var import_zod2 = require("zod");
265
+ var dynamicRetrievalConfig = import_zod2.z.object({
266
+ /**
267
+ * The mode of the predictor to be used in dynamic retrieval.
268
+ */
269
+ mode: import_zod2.z.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
270
+ /**
271
+ * The threshold to be used in dynamic retrieval. If not set, a system default
272
+ * value is used.
273
+ */
274
+ dynamicThreshold: import_zod2.z.number().optional()
275
+ });
276
+ var googleGenerativeAIProviderOptions = import_zod2.z.object({
277
+ responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).optional(),
278
+ thinkingConfig: import_zod2.z.object({
279
+ thinkingBudget: import_zod2.z.number().optional()
280
+ }).optional(),
281
+ /**
282
+ Optional.
283
+ The name of the cached content used as context to serve the prediction.
284
+ Format: cachedContents/{cachedContent}
285
+ */
286
+ cachedContent: import_zod2.z.string().optional(),
287
+ /**
288
+ * Optional. Enable structured output. Default is true.
289
+ *
290
+ * This is useful when the JSON Schema contains elements that are
291
+ * not supported by the OpenAPI schema version that
292
+ * Google Generative AI uses. You can use this to disable
293
+ * structured outputs if you need to.
294
+ */
295
+ structuredOutputs: import_zod2.z.boolean().optional(),
296
+ /**
297
+ Optional. A list of unique safety settings for blocking unsafe content.
298
+ */
299
+ safetySettings: import_zod2.z.array(
300
+ import_zod2.z.object({
301
+ category: import_zod2.z.enum([
302
+ "HARM_CATEGORY_UNSPECIFIED",
303
+ "HARM_CATEGORY_HATE_SPEECH",
304
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
305
+ "HARM_CATEGORY_HARASSMENT",
306
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
307
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
308
+ ]),
309
+ threshold: import_zod2.z.enum([
310
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
311
+ "BLOCK_LOW_AND_ABOVE",
312
+ "BLOCK_MEDIUM_AND_ABOVE",
313
+ "BLOCK_ONLY_HIGH",
314
+ "BLOCK_NONE",
315
+ "OFF"
316
+ ])
317
+ })
318
+ ).optional(),
319
+ threshold: import_zod2.z.enum([
320
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
321
+ "BLOCK_LOW_AND_ABOVE",
322
+ "BLOCK_MEDIUM_AND_ABOVE",
323
+ "BLOCK_ONLY_HIGH",
324
+ "BLOCK_NONE",
325
+ "OFF"
326
+ ]).optional(),
327
+ /**
328
+ * Optional. Enables timestamp understanding for audio-only files.
329
+ *
330
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
331
+ */
332
+ audioTimestamp: import_zod2.z.boolean().optional(),
333
+ /**
334
+ Optional. When enabled, the model will use Google search to ground the response.
335
+
336
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
337
+ */
338
+ useSearchGrounding: import_zod2.z.boolean().optional(),
339
+ /**
340
+ Optional. Specifies the dynamic retrieval configuration.
341
+
342
+ @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
343
+
344
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
345
+ */
346
+ dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
347
+ });
348
+
279
349
  // src/google-prepare-tools.ts
280
350
  var import_provider2 = require("@ai-sdk/provider");
281
- function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId) {
282
- var _a, _b;
283
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
351
+ function prepareTools({
352
+ tools,
353
+ toolChoice,
354
+ useSearchGrounding,
355
+ dynamicRetrievalConfig: dynamicRetrievalConfig2,
356
+ modelId
357
+ }) {
358
+ var _a;
359
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
284
360
  const toolWarnings = [];
285
361
  const isGemini2 = modelId.includes("gemini-2");
286
362
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
287
363
  if (useSearchGrounding) {
288
364
  return {
289
365
  tools: isGemini2 ? { googleSearch: {} } : {
290
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
366
+ googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
291
367
  },
292
368
  toolConfig: void 0,
293
369
  toolWarnings
@@ -303,12 +379,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
303
379
  } else {
304
380
  functionDeclarations.push({
305
381
  name: tool.name,
306
- description: (_b = tool.description) != null ? _b : "",
382
+ description: (_a = tool.description) != null ? _a : "",
307
383
  parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
308
384
  });
309
385
  }
310
386
  }
311
- const toolChoice = mode.toolChoice;
312
387
  if (toolChoice == null) {
313
388
  return {
314
389
  tools: { functionDeclarations },
@@ -350,7 +425,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
350
425
  default: {
351
426
  const _exhaustiveCheck = type;
352
427
  throw new import_provider2.UnsupportedFunctionalityError({
353
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
428
+ functionality: `tool choice type: ${_exhaustiveCheck}`
354
429
  });
355
430
  }
356
431
  }
@@ -385,25 +460,21 @@ function mapGoogleGenerativeAIFinishReason({
385
460
 
386
461
  // src/google-generative-ai-language-model.ts
387
462
  var GoogleGenerativeAILanguageModel = class {
388
- constructor(modelId, settings, config) {
389
- this.specificationVersion = "v1";
390
- this.defaultObjectGenerationMode = "json";
391
- this.supportsImageUrls = false;
463
+ constructor(modelId, config) {
464
+ this.specificationVersion = "v2";
392
465
  this.modelId = modelId;
393
- this.settings = settings;
394
466
  this.config = config;
395
467
  }
396
- get supportsStructuredOutputs() {
397
- var _a;
398
- return (_a = this.settings.structuredOutputs) != null ? _a : true;
399
- }
400
468
  get provider() {
401
469
  return this.config.provider;
402
470
  }
471
+ get supportedUrls() {
472
+ var _a, _b, _c;
473
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
474
+ }
403
475
  async getArgs({
404
- mode,
405
476
  prompt,
406
- maxTokens,
477
+ maxOutputTokens,
407
478
  temperature,
408
479
  topP,
409
480
  topK,
@@ -412,118 +483,66 @@ var GoogleGenerativeAILanguageModel = class {
412
483
  stopSequences,
413
484
  responseFormat,
414
485
  seed,
415
- providerMetadata
486
+ tools,
487
+ toolChoice,
488
+ providerOptions
416
489
  }) {
417
- var _a, _b, _c;
418
- const type = mode.type;
490
+ var _a, _b;
419
491
  const warnings = [];
420
- const googleOptions = (0, import_provider_utils3.parseProviderOptions)({
492
+ const googleOptions = await (0, import_provider_utils3.parseProviderOptions)({
421
493
  provider: "google",
422
- providerOptions: providerMetadata,
423
- schema: googleGenerativeAIProviderOptionsSchema
494
+ providerOptions,
495
+ schema: googleGenerativeAIProviderOptions
424
496
  });
425
- if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
426
- warnings.push({
427
- type: "other",
428
- message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
429
- });
430
- }
431
- const generationConfig = {
432
- // standardized settings:
433
- maxOutputTokens: maxTokens,
434
- temperature,
435
- topK,
436
- topP,
437
- frequencyPenalty,
438
- presencePenalty,
439
- stopSequences,
440
- seed,
441
- // response format:
442
- responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
443
- responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
444
- // so this is needed as an escape hatch:
445
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
446
- ...this.settings.audioTimestamp && {
447
- audioTimestamp: this.settings.audioTimestamp
448
- },
449
- // provider options:
450
- responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
451
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
452
- };
453
497
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
454
- switch (type) {
455
- case "regular": {
456
- const { tools, toolConfig, toolWarnings } = prepareTools(
457
- mode,
458
- (_b = this.settings.useSearchGrounding) != null ? _b : false,
459
- this.settings.dynamicRetrievalConfig,
460
- this.modelId
461
- );
462
- return {
463
- args: {
464
- generationConfig,
465
- contents,
466
- systemInstruction,
467
- safetySettings: this.settings.safetySettings,
468
- tools,
469
- toolConfig,
470
- cachedContent: this.settings.cachedContent
471
- },
472
- warnings: [...warnings, ...toolWarnings]
473
- };
474
- }
475
- case "object-json": {
476
- return {
477
- args: {
478
- generationConfig: {
479
- ...generationConfig,
480
- responseMimeType: "application/json",
481
- responseSchema: mode.schema != null && // Google GenAI does not support all OpenAPI Schema features,
482
- // so this is needed as an escape hatch:
483
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(mode.schema) : void 0
484
- },
485
- contents,
486
- systemInstruction,
487
- safetySettings: this.settings.safetySettings,
488
- cachedContent: this.settings.cachedContent
489
- },
490
- warnings
491
- };
492
- }
493
- case "object-tool": {
494
- return {
495
- args: {
496
- generationConfig,
497
- contents,
498
- tools: {
499
- functionDeclarations: [
500
- {
501
- name: mode.tool.name,
502
- description: (_c = mode.tool.description) != null ? _c : "",
503
- parameters: convertJSONSchemaToOpenAPISchema(
504
- mode.tool.parameters
505
- )
506
- }
507
- ]
508
- },
509
- toolConfig: { functionCallingConfig: { mode: "ANY" } },
510
- safetySettings: this.settings.safetySettings,
511
- cachedContent: this.settings.cachedContent
498
+ const {
499
+ tools: googleTools,
500
+ toolConfig: googleToolConfig,
501
+ toolWarnings
502
+ } = prepareTools({
503
+ tools,
504
+ toolChoice,
505
+ useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
506
+ dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
507
+ modelId: this.modelId
508
+ });
509
+ return {
510
+ args: {
511
+ generationConfig: {
512
+ // standardized settings:
513
+ maxOutputTokens,
514
+ temperature,
515
+ topK,
516
+ topP,
517
+ frequencyPenalty,
518
+ presencePenalty,
519
+ stopSequences,
520
+ seed,
521
+ // response format:
522
+ responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
523
+ responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
524
+ // so this is needed as an escape hatch:
525
+ // TODO convert into provider option
526
+ ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
527
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
528
+ audioTimestamp: googleOptions.audioTimestamp
512
529
  },
513
- warnings
514
- };
515
- }
516
- default: {
517
- const _exhaustiveCheck = type;
518
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
519
- }
520
- }
521
- }
522
- supportsUrl(url) {
523
- return this.config.isSupportedUrl(url);
530
+ // provider options:
531
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
532
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
533
+ },
534
+ contents,
535
+ systemInstruction,
536
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
537
+ tools: googleTools,
538
+ toolConfig: googleToolConfig,
539
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
540
+ },
541
+ warnings: [...warnings, ...toolWarnings]
542
+ };
524
543
  }
525
544
  async doGenerate(options) {
526
- var _a, _b, _c, _d, _e;
545
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
527
546
  const { args, warnings } = await this.getArgs(options);
528
547
  const body = JSON.stringify(args);
529
548
  const mergedHeaders = (0, import_provider_utils3.combineHeaders)(
@@ -545,45 +564,62 @@ var GoogleGenerativeAILanguageModel = class {
545
564
  abortSignal: options.abortSignal,
546
565
  fetch: this.config.fetch
547
566
  });
548
- const { contents: rawPrompt, ...rawSettings } = args;
549
567
  const candidate = response.candidates[0];
550
- const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : candidate.content.parts;
551
- const toolCalls = getToolCallsFromParts({
552
- parts,
553
- // Use candidateParts
568
+ const content = [];
569
+ const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
570
+ for (const part of parts) {
571
+ if ("text" in part && part.text.length > 0) {
572
+ content.push({ type: "text", text: part.text });
573
+ } else if ("functionCall" in part) {
574
+ content.push({
575
+ type: "tool-call",
576
+ toolCallType: "function",
577
+ toolCallId: this.config.generateId(),
578
+ toolName: part.functionCall.name,
579
+ args: JSON.stringify(part.functionCall.args)
580
+ });
581
+ } else if ("inlineData" in part) {
582
+ content.push({
583
+ type: "file",
584
+ data: part.inlineData.data,
585
+ mediaType: part.inlineData.mimeType
586
+ });
587
+ }
588
+ }
589
+ const sources = (_b = extractSources({
590
+ groundingMetadata: candidate.groundingMetadata,
554
591
  generateId: this.config.generateId
555
- });
592
+ })) != null ? _b : [];
593
+ for (const source of sources) {
594
+ content.push(source);
595
+ }
556
596
  const usageMetadata = response.usageMetadata;
557
597
  return {
558
- text: getTextFromParts(parts),
559
- reasoning: getReasoningDetailsFromParts(parts),
560
- files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
561
- data: part.inlineData.data,
562
- mimeType: part.inlineData.mimeType
563
- })),
564
- toolCalls,
598
+ content,
565
599
  finishReason: mapGoogleGenerativeAIFinishReason({
566
600
  finishReason: candidate.finishReason,
567
- hasToolCalls: toolCalls != null && toolCalls.length > 0
601
+ hasToolCalls: content.some((part) => part.type === "tool-call")
568
602
  }),
569
603
  usage: {
570
- promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
571
- completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
604
+ inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
605
+ outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
606
+ totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
607
+ reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
608
+ cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
572
609
  },
573
- rawCall: { rawPrompt, rawSettings },
574
- rawResponse: { headers: responseHeaders, body: rawResponse },
575
610
  warnings,
576
611
  providerMetadata: {
577
612
  google: {
578
- groundingMetadata: (_d = candidate.groundingMetadata) != null ? _d : null,
579
- safetyRatings: (_e = candidate.safetyRatings) != null ? _e : null
613
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
614
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
580
615
  }
581
616
  },
582
- sources: extractSources({
583
- groundingMetadata: candidate.groundingMetadata,
584
- generateId: this.config.generateId
585
- }),
586
- request: { body }
617
+ request: { body },
618
+ response: {
619
+ // TODO timestamp, model id, id
620
+ headers: responseHeaders,
621
+ body: rawResponse
622
+ }
587
623
  };
588
624
  }
589
625
  async doStream(options) {
@@ -604,11 +640,11 @@ var GoogleGenerativeAILanguageModel = class {
604
640
  abortSignal: options.abortSignal,
605
641
  fetch: this.config.fetch
606
642
  });
607
- const { contents: rawPrompt, ...rawSettings } = args;
608
643
  let finishReason = "unknown";
609
- let usage = {
610
- promptTokens: Number.NaN,
611
- completionTokens: Number.NaN
644
+ const usage = {
645
+ inputTokens: void 0,
646
+ outputTokens: void 0,
647
+ totalTokens: void 0
612
648
  };
613
649
  let providerMetadata = void 0;
614
650
  const generateId = this.config.generateId;
@@ -616,8 +652,11 @@ var GoogleGenerativeAILanguageModel = class {
616
652
  return {
617
653
  stream: response.pipeThrough(
618
654
  new TransformStream({
655
+ start(controller) {
656
+ controller.enqueue({ type: "stream-start", warnings });
657
+ },
619
658
  transform(chunk, controller) {
620
- var _a, _b, _c, _d, _e, _f;
659
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
621
660
  if (!chunk.success) {
622
661
  controller.enqueue({ type: "error", error: chunk.error });
623
662
  return;
@@ -625,12 +664,13 @@ var GoogleGenerativeAILanguageModel = class {
625
664
  const value = chunk.value;
626
665
  const usageMetadata = value.usageMetadata;
627
666
  if (usageMetadata != null) {
628
- usage = {
629
- promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
630
- completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
631
- };
667
+ usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
668
+ usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
669
+ usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
670
+ usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
671
+ usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
632
672
  }
633
- const candidate = (_c = value.candidates) == null ? void 0 : _c[0];
673
+ const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
634
674
  if (candidate == null) {
635
675
  return;
636
676
  }
@@ -638,28 +678,14 @@ var GoogleGenerativeAILanguageModel = class {
638
678
  if (content != null) {
639
679
  const deltaText = getTextFromParts(content.parts);
640
680
  if (deltaText != null) {
641
- controller.enqueue({
642
- type: "text-delta",
643
- textDelta: deltaText
644
- });
645
- }
646
- const reasoningDeltaText = getReasoningDetailsFromParts(
647
- content.parts
648
- );
649
- if (reasoningDeltaText != null) {
650
- for (const part of reasoningDeltaText) {
651
- controller.enqueue({
652
- type: "reasoning",
653
- textDelta: part.text
654
- });
655
- }
681
+ controller.enqueue(deltaText);
656
682
  }
657
683
  const inlineDataParts = getInlineDataParts(content.parts);
658
684
  if (inlineDataParts != null) {
659
685
  for (const part of inlineDataParts) {
660
686
  controller.enqueue({
661
687
  type: "file",
662
- mimeType: part.inlineData.mimeType,
688
+ mediaType: part.inlineData.mimeType,
663
689
  data: part.inlineData.data
664
690
  });
665
691
  }
@@ -693,17 +719,17 @@ var GoogleGenerativeAILanguageModel = class {
693
719
  finishReason: candidate.finishReason,
694
720
  hasToolCalls
695
721
  });
696
- const sources = (_d = extractSources({
722
+ const sources = (_g = extractSources({
697
723
  groundingMetadata: candidate.groundingMetadata,
698
724
  generateId
699
- })) != null ? _d : [];
725
+ })) != null ? _g : [];
700
726
  for (const source of sources) {
701
- controller.enqueue({ type: "source", source });
727
+ controller.enqueue(source);
702
728
  }
703
729
  providerMetadata = {
704
730
  google: {
705
- groundingMetadata: (_e = candidate.groundingMetadata) != null ? _e : null,
706
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
731
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
732
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
707
733
  }
708
734
  };
709
735
  }
@@ -718,9 +744,7 @@ var GoogleGenerativeAILanguageModel = class {
718
744
  }
719
745
  })
720
746
  ),
721
- rawCall: { rawPrompt, rawSettings },
722
- rawResponse: { headers: responseHeaders },
723
- warnings,
747
+ response: { headers: responseHeaders },
724
748
  request: { body }
725
749
  };
726
750
  }
@@ -733,6 +757,7 @@ function getToolCallsFromParts({
733
757
  (part) => "functionCall" in part
734
758
  );
735
759
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
760
+ type: "tool-call",
736
761
  toolCallType: "function",
737
762
  toolCallId: generateId(),
738
763
  toolName: part.functionCall.name,
@@ -740,16 +765,11 @@ function getToolCallsFromParts({
740
765
  }));
741
766
  }
742
767
  function getTextFromParts(parts) {
743
- const textParts = parts == null ? void 0 : parts.filter(
744
- (part) => "text" in part && part.thought !== true
745
- );
746
- return textParts == null || textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join("");
747
- }
748
- function getReasoningDetailsFromParts(parts) {
749
- const reasoningParts = parts == null ? void 0 : parts.filter(
750
- (part) => "text" in part && part.thought === true
751
- );
752
- return reasoningParts == null || reasoningParts.length === 0 ? void 0 : reasoningParts.map((part) => ({ type: "text", text: part.text }));
768
+ const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
769
+ return textParts == null || textParts.length === 0 ? void 0 : {
770
+ type: "text",
771
+ text: textParts.map((part) => part.text).join("")
772
+ };
753
773
  }
754
774
  function getInlineDataParts(parts) {
755
775
  return parts == null ? void 0 : parts.filter(
@@ -764,109 +784,101 @@ function extractSources({
764
784
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
765
785
  (chunk) => chunk.web != null
766
786
  ).map((chunk) => ({
787
+ type: "source",
767
788
  sourceType: "url",
768
789
  id: generateId(),
769
790
  url: chunk.web.uri,
770
791
  title: chunk.web.title
771
792
  }));
772
793
  }
773
- var contentSchema = import_zod2.z.object({
774
- role: import_zod2.z.string(),
775
- parts: import_zod2.z.array(
776
- import_zod2.z.union([
777
- import_zod2.z.object({
778
- text: import_zod2.z.string(),
779
- thought: import_zod2.z.boolean().nullish()
794
+ var contentSchema = import_zod3.z.object({
795
+ role: import_zod3.z.string(),
796
+ parts: import_zod3.z.array(
797
+ import_zod3.z.union([
798
+ import_zod3.z.object({
799
+ text: import_zod3.z.string()
780
800
  }),
781
- import_zod2.z.object({
782
- functionCall: import_zod2.z.object({
783
- name: import_zod2.z.string(),
784
- args: import_zod2.z.unknown()
801
+ import_zod3.z.object({
802
+ functionCall: import_zod3.z.object({
803
+ name: import_zod3.z.string(),
804
+ args: import_zod3.z.unknown()
785
805
  })
786
806
  }),
787
- import_zod2.z.object({
788
- inlineData: import_zod2.z.object({
789
- mimeType: import_zod2.z.string(),
790
- data: import_zod2.z.string()
807
+ import_zod3.z.object({
808
+ inlineData: import_zod3.z.object({
809
+ mimeType: import_zod3.z.string(),
810
+ data: import_zod3.z.string()
791
811
  })
792
812
  })
793
813
  ])
794
814
  ).nullish()
795
815
  });
796
- var groundingChunkSchema = import_zod2.z.object({
797
- web: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish(),
798
- retrievedContext: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish()
816
+ var groundingChunkSchema = import_zod3.z.object({
817
+ web: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish(),
818
+ retrievedContext: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish()
799
819
  });
800
- var groundingMetadataSchema = import_zod2.z.object({
801
- webSearchQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
802
- retrievalQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
803
- searchEntryPoint: import_zod2.z.object({ renderedContent: import_zod2.z.string() }).nullish(),
804
- groundingChunks: import_zod2.z.array(groundingChunkSchema).nullish(),
805
- groundingSupports: import_zod2.z.array(
806
- import_zod2.z.object({
807
- segment: import_zod2.z.object({
808
- startIndex: import_zod2.z.number().nullish(),
809
- endIndex: import_zod2.z.number().nullish(),
810
- text: import_zod2.z.string().nullish()
820
+ var groundingMetadataSchema = import_zod3.z.object({
821
+ webSearchQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
822
+ retrievalQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
823
+ searchEntryPoint: import_zod3.z.object({ renderedContent: import_zod3.z.string() }).nullish(),
824
+ groundingChunks: import_zod3.z.array(groundingChunkSchema).nullish(),
825
+ groundingSupports: import_zod3.z.array(
826
+ import_zod3.z.object({
827
+ segment: import_zod3.z.object({
828
+ startIndex: import_zod3.z.number().nullish(),
829
+ endIndex: import_zod3.z.number().nullish(),
830
+ text: import_zod3.z.string().nullish()
811
831
  }),
812
- segment_text: import_zod2.z.string().nullish(),
813
- groundingChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
814
- supportChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
815
- confidenceScores: import_zod2.z.array(import_zod2.z.number()).nullish(),
816
- confidenceScore: import_zod2.z.array(import_zod2.z.number()).nullish()
832
+ segment_text: import_zod3.z.string().nullish(),
833
+ groundingChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
834
+ supportChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
835
+ confidenceScores: import_zod3.z.array(import_zod3.z.number()).nullish(),
836
+ confidenceScore: import_zod3.z.array(import_zod3.z.number()).nullish()
817
837
  })
818
838
  ).nullish(),
819
- retrievalMetadata: import_zod2.z.union([
820
- import_zod2.z.object({
821
- webDynamicRetrievalScore: import_zod2.z.number()
839
+ retrievalMetadata: import_zod3.z.union([
840
+ import_zod3.z.object({
841
+ webDynamicRetrievalScore: import_zod3.z.number()
822
842
  }),
823
- import_zod2.z.object({})
843
+ import_zod3.z.object({})
824
844
  ]).nullish()
825
845
  });
826
- var safetyRatingSchema = import_zod2.z.object({
827
- category: import_zod2.z.string().nullish(),
828
- probability: import_zod2.z.string().nullish(),
829
- probabilityScore: import_zod2.z.number().nullish(),
830
- severity: import_zod2.z.string().nullish(),
831
- severityScore: import_zod2.z.number().nullish(),
832
- blocked: import_zod2.z.boolean().nullish()
846
+ var safetyRatingSchema = import_zod3.z.object({
847
+ category: import_zod3.z.string().nullish(),
848
+ probability: import_zod3.z.string().nullish(),
849
+ probabilityScore: import_zod3.z.number().nullish(),
850
+ severity: import_zod3.z.string().nullish(),
851
+ severityScore: import_zod3.z.number().nullish(),
852
+ blocked: import_zod3.z.boolean().nullish()
833
853
  });
834
- var responseSchema = import_zod2.z.object({
835
- candidates: import_zod2.z.array(
836
- import_zod2.z.object({
837
- content: contentSchema.nullish().or(import_zod2.z.object({}).strict()),
838
- finishReason: import_zod2.z.string().nullish(),
839
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
854
+ var usageSchema = import_zod3.z.object({
855
+ cachedContentTokenCount: import_zod3.z.number().nullish(),
856
+ thoughtsTokenCount: import_zod3.z.number().nullish(),
857
+ promptTokenCount: import_zod3.z.number().nullish(),
858
+ candidatesTokenCount: import_zod3.z.number().nullish(),
859
+ totalTokenCount: import_zod3.z.number().nullish()
860
+ });
861
+ var responseSchema = import_zod3.z.object({
862
+ candidates: import_zod3.z.array(
863
+ import_zod3.z.object({
864
+ content: contentSchema.nullish().or(import_zod3.z.object({}).strict()),
865
+ finishReason: import_zod3.z.string().nullish(),
866
+ safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
840
867
  groundingMetadata: groundingMetadataSchema.nullish()
841
868
  })
842
869
  ),
843
- usageMetadata: import_zod2.z.object({
844
- promptTokenCount: import_zod2.z.number().nullish(),
845
- candidatesTokenCount: import_zod2.z.number().nullish(),
846
- totalTokenCount: import_zod2.z.number().nullish()
847
- }).nullish()
870
+ usageMetadata: usageSchema.nullish()
848
871
  });
849
- var chunkSchema = import_zod2.z.object({
850
- candidates: import_zod2.z.array(
851
- import_zod2.z.object({
872
+ var chunkSchema = import_zod3.z.object({
873
+ candidates: import_zod3.z.array(
874
+ import_zod3.z.object({
852
875
  content: contentSchema.nullish(),
853
- finishReason: import_zod2.z.string().nullish(),
854
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
876
+ finishReason: import_zod3.z.string().nullish(),
877
+ safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
855
878
  groundingMetadata: groundingMetadataSchema.nullish()
856
879
  })
857
880
  ).nullish(),
858
- usageMetadata: import_zod2.z.object({
859
- promptTokenCount: import_zod2.z.number().nullish(),
860
- candidatesTokenCount: import_zod2.z.number().nullish(),
861
- totalTokenCount: import_zod2.z.number().nullish()
862
- }).nullish()
863
- });
864
- var googleGenerativeAIProviderOptionsSchema = import_zod2.z.object({
865
- responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).nullish(),
866
- thinkingConfig: import_zod2.z.object({
867
- thinkingBudget: import_zod2.z.number().nullish(),
868
- includeThoughts: import_zod2.z.boolean().nullish()
869
- }).nullish()
881
+ usageMetadata: usageSchema.nullish()
870
882
  });
871
883
  // Annotate the CommonJS export names for ESM import in node:
872
884
  0 && (module.exports = {