@ai-sdk/google 1.2.19 → 2.0.0-alpha.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,11 +28,11 @@ module.exports = __toCommonJS(internal_exports);
28
28
 
29
29
  // src/google-generative-ai-language-model.ts
30
30
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
31
- var import_zod2 = require("zod");
31
+ var import_zod3 = require("zod");
32
32
 
33
33
  // src/convert-json-schema-to-openapi-schema.ts
34
34
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
35
- if (isEmptyObjectSchema(jsonSchema)) {
35
+ if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
36
36
  return void 0;
37
37
  }
38
38
  if (typeof jsonSchema === "boolean") {
@@ -131,7 +131,6 @@ function isEmptyObjectSchema(jsonSchema) {
131
131
  var import_provider = require("@ai-sdk/provider");
132
132
  var import_provider_utils = require("@ai-sdk/provider-utils");
133
133
  function convertToGoogleGenerativeAIMessages(prompt) {
134
- var _a, _b;
135
134
  const systemInstructionParts = [];
136
135
  const contents = [];
137
136
  let systemMessagesAllowed = true;
@@ -155,33 +154,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
155
154
  parts.push({ text: part.text });
156
155
  break;
157
156
  }
158
- case "image": {
159
- parts.push(
160
- part.image instanceof URL ? {
161
- fileData: {
162
- mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
163
- fileUri: part.image.toString()
164
- }
165
- } : {
166
- inlineData: {
167
- mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
168
- data: (0, import_provider_utils.convertUint8ArrayToBase64)(part.image)
169
- }
170
- }
171
- );
172
- break;
173
- }
174
157
  case "file": {
158
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
175
159
  parts.push(
176
160
  part.data instanceof URL ? {
177
161
  fileData: {
178
- mimeType: part.mimeType,
162
+ mimeType: mediaType,
179
163
  fileUri: part.data.toString()
180
164
  }
181
165
  } : {
182
166
  inlineData: {
183
- mimeType: part.mimeType,
184
- data: part.data
167
+ mimeType: mediaType,
168
+ data: (0, import_provider_utils.convertToBase64)(part.data)
185
169
  }
186
170
  }
187
171
  );
@@ -202,7 +186,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
202
186
  return part.text.length === 0 ? void 0 : { text: part.text };
203
187
  }
204
188
  case "file": {
205
- if (part.mimeType !== "image/png") {
189
+ if (part.mediaType !== "image/png") {
206
190
  throw new import_provider.UnsupportedFunctionalityError({
207
191
  functionality: "Only PNG images are supported in assistant messages"
208
192
  });
@@ -214,8 +198,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
214
198
  }
215
199
  return {
216
200
  inlineData: {
217
- mimeType: part.mimeType,
218
- data: part.data
201
+ mimeType: part.mediaType,
202
+ data: (0, import_provider_utils.convertToBase64)(part.data)
219
203
  }
220
204
  };
221
205
  }
@@ -276,18 +260,111 @@ var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResp
276
260
  errorToMessage: (data) => data.error.message
277
261
  });
278
262
 
263
+ // src/google-generative-ai-options.ts
264
+ var import_zod2 = require("zod");
265
+ var dynamicRetrievalConfig = import_zod2.z.object({
266
+ /**
267
+ * The mode of the predictor to be used in dynamic retrieval.
268
+ */
269
+ mode: import_zod2.z.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
270
+ /**
271
+ * The threshold to be used in dynamic retrieval. If not set, a system default
272
+ * value is used.
273
+ */
274
+ dynamicThreshold: import_zod2.z.number().optional()
275
+ });
276
+ var googleGenerativeAIProviderOptions = import_zod2.z.object({
277
+ responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).optional(),
278
+ thinkingConfig: import_zod2.z.object({
279
+ thinkingBudget: import_zod2.z.number().optional(),
280
+ includeThoughts: import_zod2.z.boolean().optional()
281
+ }).optional(),
282
+ /**
283
+ Optional.
284
+ The name of the cached content used as context to serve the prediction.
285
+ Format: cachedContents/{cachedContent}
286
+ */
287
+ cachedContent: import_zod2.z.string().optional(),
288
+ /**
289
+ * Optional. Enable structured output. Default is true.
290
+ *
291
+ * This is useful when the JSON Schema contains elements that are
292
+ * not supported by the OpenAPI schema version that
293
+ * Google Generative AI uses. You can use this to disable
294
+ * structured outputs if you need to.
295
+ */
296
+ structuredOutputs: import_zod2.z.boolean().optional(),
297
+ /**
298
+ Optional. A list of unique safety settings for blocking unsafe content.
299
+ */
300
+ safetySettings: import_zod2.z.array(
301
+ import_zod2.z.object({
302
+ category: import_zod2.z.enum([
303
+ "HARM_CATEGORY_UNSPECIFIED",
304
+ "HARM_CATEGORY_HATE_SPEECH",
305
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
306
+ "HARM_CATEGORY_HARASSMENT",
307
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
308
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
309
+ ]),
310
+ threshold: import_zod2.z.enum([
311
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
312
+ "BLOCK_LOW_AND_ABOVE",
313
+ "BLOCK_MEDIUM_AND_ABOVE",
314
+ "BLOCK_ONLY_HIGH",
315
+ "BLOCK_NONE",
316
+ "OFF"
317
+ ])
318
+ })
319
+ ).optional(),
320
+ threshold: import_zod2.z.enum([
321
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
322
+ "BLOCK_LOW_AND_ABOVE",
323
+ "BLOCK_MEDIUM_AND_ABOVE",
324
+ "BLOCK_ONLY_HIGH",
325
+ "BLOCK_NONE",
326
+ "OFF"
327
+ ]).optional(),
328
+ /**
329
+ * Optional. Enables timestamp understanding for audio-only files.
330
+ *
331
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
332
+ */
333
+ audioTimestamp: import_zod2.z.boolean().optional(),
334
+ /**
335
+ Optional. When enabled, the model will use Google search to ground the response.
336
+
337
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
338
+ */
339
+ useSearchGrounding: import_zod2.z.boolean().optional(),
340
+ /**
341
+ Optional. Specifies the dynamic retrieval configuration.
342
+
343
+ @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
344
+
345
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
346
+ */
347
+ dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
348
+ });
349
+
279
350
  // src/google-prepare-tools.ts
280
351
  var import_provider2 = require("@ai-sdk/provider");
281
- function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId) {
282
- var _a, _b;
283
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
352
+ function prepareTools({
353
+ tools,
354
+ toolChoice,
355
+ useSearchGrounding,
356
+ dynamicRetrievalConfig: dynamicRetrievalConfig2,
357
+ modelId
358
+ }) {
359
+ var _a;
360
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
284
361
  const toolWarnings = [];
285
362
  const isGemini2 = modelId.includes("gemini-2");
286
363
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
287
364
  if (useSearchGrounding) {
288
365
  return {
289
366
  tools: isGemini2 ? { googleSearch: {} } : {
290
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
367
+ googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
291
368
  },
292
369
  toolConfig: void 0,
293
370
  toolWarnings
@@ -303,12 +380,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
303
380
  } else {
304
381
  functionDeclarations.push({
305
382
  name: tool.name,
306
- description: (_b = tool.description) != null ? _b : "",
383
+ description: (_a = tool.description) != null ? _a : "",
307
384
  parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
308
385
  });
309
386
  }
310
387
  }
311
- const toolChoice = mode.toolChoice;
312
388
  if (toolChoice == null) {
313
389
  return {
314
390
  tools: { functionDeclarations },
@@ -350,7 +426,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
350
426
  default: {
351
427
  const _exhaustiveCheck = type;
352
428
  throw new import_provider2.UnsupportedFunctionalityError({
353
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
429
+ functionality: `tool choice type: ${_exhaustiveCheck}`
354
430
  });
355
431
  }
356
432
  }
@@ -385,25 +461,21 @@ function mapGoogleGenerativeAIFinishReason({
385
461
 
386
462
  // src/google-generative-ai-language-model.ts
387
463
  var GoogleGenerativeAILanguageModel = class {
388
- constructor(modelId, settings, config) {
389
- this.specificationVersion = "v1";
390
- this.defaultObjectGenerationMode = "json";
391
- this.supportsImageUrls = false;
464
+ constructor(modelId, config) {
465
+ this.specificationVersion = "v2";
392
466
  this.modelId = modelId;
393
- this.settings = settings;
394
467
  this.config = config;
395
468
  }
396
- get supportsStructuredOutputs() {
397
- var _a;
398
- return (_a = this.settings.structuredOutputs) != null ? _a : true;
399
- }
400
469
  get provider() {
401
470
  return this.config.provider;
402
471
  }
472
+ get supportedUrls() {
473
+ var _a, _b, _c;
474
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
475
+ }
403
476
  async getArgs({
404
- mode,
405
477
  prompt,
406
- maxTokens,
478
+ maxOutputTokens,
407
479
  temperature,
408
480
  topP,
409
481
  topK,
@@ -412,15 +484,16 @@ var GoogleGenerativeAILanguageModel = class {
412
484
  stopSequences,
413
485
  responseFormat,
414
486
  seed,
415
- providerMetadata
487
+ tools,
488
+ toolChoice,
489
+ providerOptions
416
490
  }) {
417
491
  var _a, _b, _c;
418
- const type = mode.type;
419
492
  const warnings = [];
420
- const googleOptions = (0, import_provider_utils3.parseProviderOptions)({
493
+ const googleOptions = await (0, import_provider_utils3.parseProviderOptions)({
421
494
  provider: "google",
422
- providerOptions: providerMetadata,
423
- schema: googleGenerativeAIProviderOptionsSchema
495
+ providerOptions,
496
+ schema: googleGenerativeAIProviderOptions
424
497
  });
425
498
  if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
426
499
  warnings.push({
@@ -428,102 +501,55 @@ var GoogleGenerativeAILanguageModel = class {
428
501
  message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
429
502
  });
430
503
  }
431
- const generationConfig = {
432
- // standardized settings:
433
- maxOutputTokens: maxTokens,
434
- temperature,
435
- topK,
436
- topP,
437
- frequencyPenalty,
438
- presencePenalty,
439
- stopSequences,
440
- seed,
441
- // response format:
442
- responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
443
- responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
444
- // so this is needed as an escape hatch:
445
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
446
- ...this.settings.audioTimestamp && {
447
- audioTimestamp: this.settings.audioTimestamp
448
- },
449
- // provider options:
450
- responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
451
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
452
- };
453
504
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
454
- switch (type) {
455
- case "regular": {
456
- const { tools, toolConfig, toolWarnings } = prepareTools(
457
- mode,
458
- (_b = this.settings.useSearchGrounding) != null ? _b : false,
459
- this.settings.dynamicRetrievalConfig,
460
- this.modelId
461
- );
462
- return {
463
- args: {
464
- generationConfig,
465
- contents,
466
- systemInstruction,
467
- safetySettings: this.settings.safetySettings,
468
- tools,
469
- toolConfig,
470
- cachedContent: this.settings.cachedContent
471
- },
472
- warnings: [...warnings, ...toolWarnings]
473
- };
474
- }
475
- case "object-json": {
476
- return {
477
- args: {
478
- generationConfig: {
479
- ...generationConfig,
480
- responseMimeType: "application/json",
481
- responseSchema: mode.schema != null && // Google GenAI does not support all OpenAPI Schema features,
482
- // so this is needed as an escape hatch:
483
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(mode.schema) : void 0
484
- },
485
- contents,
486
- systemInstruction,
487
- safetySettings: this.settings.safetySettings,
488
- cachedContent: this.settings.cachedContent
489
- },
490
- warnings
491
- };
492
- }
493
- case "object-tool": {
494
- return {
495
- args: {
496
- generationConfig,
497
- contents,
498
- tools: {
499
- functionDeclarations: [
500
- {
501
- name: mode.tool.name,
502
- description: (_c = mode.tool.description) != null ? _c : "",
503
- parameters: convertJSONSchemaToOpenAPISchema(
504
- mode.tool.parameters
505
- )
506
- }
507
- ]
508
- },
509
- toolConfig: { functionCallingConfig: { mode: "ANY" } },
510
- safetySettings: this.settings.safetySettings,
511
- cachedContent: this.settings.cachedContent
505
+ const {
506
+ tools: googleTools,
507
+ toolConfig: googleToolConfig,
508
+ toolWarnings
509
+ } = prepareTools({
510
+ tools,
511
+ toolChoice,
512
+ useSearchGrounding: (_b = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _b : false,
513
+ dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
514
+ modelId: this.modelId
515
+ });
516
+ return {
517
+ args: {
518
+ generationConfig: {
519
+ // standardized settings:
520
+ maxOutputTokens,
521
+ temperature,
522
+ topK,
523
+ topP,
524
+ frequencyPenalty,
525
+ presencePenalty,
526
+ stopSequences,
527
+ seed,
528
+ // response format:
529
+ responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
530
+ responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
531
+ // so this is needed as an escape hatch:
532
+ // TODO convert into provider option
533
+ ((_c = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _c : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
534
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
535
+ audioTimestamp: googleOptions.audioTimestamp
512
536
  },
513
- warnings
514
- };
515
- }
516
- default: {
517
- const _exhaustiveCheck = type;
518
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
519
- }
520
- }
521
- }
522
- supportsUrl(url) {
523
- return this.config.isSupportedUrl(url);
537
+ // provider options:
538
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
539
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
540
+ },
541
+ contents,
542
+ systemInstruction,
543
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
544
+ tools: googleTools,
545
+ toolConfig: googleToolConfig,
546
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
547
+ },
548
+ warnings: [...warnings, ...toolWarnings]
549
+ };
524
550
  }
525
551
  async doGenerate(options) {
526
- var _a, _b, _c, _d, _e;
552
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
527
553
  const { args, warnings } = await this.getArgs(options);
528
554
  const body = JSON.stringify(args);
529
555
  const mergedHeaders = (0, import_provider_utils3.combineHeaders)(
@@ -545,45 +571,66 @@ var GoogleGenerativeAILanguageModel = class {
545
571
  abortSignal: options.abortSignal,
546
572
  fetch: this.config.fetch
547
573
  });
548
- const { contents: rawPrompt, ...rawSettings } = args;
549
574
  const candidate = response.candidates[0];
550
- const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : candidate.content.parts;
551
- const toolCalls = getToolCallsFromParts({
552
- parts,
553
- // Use candidateParts
554
- generateId: this.config.generateId
555
- });
575
+ const content = [];
576
+ const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
556
577
  const usageMetadata = response.usageMetadata;
578
+ for (const part of parts) {
579
+ if ("text" in part && part.text != null && part.text.length > 0) {
580
+ if (part.thought === true) {
581
+ content.push({ type: "reasoning", text: part.text });
582
+ } else {
583
+ content.push({ type: "text", text: part.text });
584
+ }
585
+ } else if ("functionCall" in part) {
586
+ content.push({
587
+ type: "tool-call",
588
+ toolCallType: "function",
589
+ toolCallId: this.config.generateId(),
590
+ toolName: part.functionCall.name,
591
+ args: JSON.stringify(part.functionCall.args)
592
+ });
593
+ } else if ("inlineData" in part) {
594
+ content.push({
595
+ type: "file",
596
+ data: part.inlineData.data,
597
+ mediaType: part.inlineData.mimeType
598
+ });
599
+ }
600
+ }
601
+ const sources = (_b = extractSources({
602
+ groundingMetadata: candidate.groundingMetadata,
603
+ generateId: this.config.generateId
604
+ })) != null ? _b : [];
605
+ for (const source of sources) {
606
+ content.push(source);
607
+ }
557
608
  return {
558
- text: getTextFromParts(parts),
559
- reasoning: getReasoningDetailsFromParts(parts),
560
- files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
561
- data: part.inlineData.data,
562
- mimeType: part.inlineData.mimeType
563
- })),
564
- toolCalls,
609
+ content,
565
610
  finishReason: mapGoogleGenerativeAIFinishReason({
566
611
  finishReason: candidate.finishReason,
567
- hasToolCalls: toolCalls != null && toolCalls.length > 0
612
+ hasToolCalls: content.some((part) => part.type === "tool-call")
568
613
  }),
569
614
  usage: {
570
- promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
571
- completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
615
+ inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
616
+ outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
617
+ totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
618
+ reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
619
+ cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
572
620
  },
573
- rawCall: { rawPrompt, rawSettings },
574
- rawResponse: { headers: responseHeaders, body: rawResponse },
575
621
  warnings,
576
622
  providerMetadata: {
577
623
  google: {
578
- groundingMetadata: (_d = candidate.groundingMetadata) != null ? _d : null,
579
- safetyRatings: (_e = candidate.safetyRatings) != null ? _e : null
624
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
625
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
580
626
  }
581
627
  },
582
- sources: extractSources({
583
- groundingMetadata: candidate.groundingMetadata,
584
- generateId: this.config.generateId
585
- }),
586
- request: { body }
628
+ request: { body },
629
+ response: {
630
+ // TODO timestamp, model id, id
631
+ headers: responseHeaders,
632
+ body: rawResponse
633
+ }
587
634
  };
588
635
  }
589
636
  async doStream(options) {
@@ -604,11 +651,11 @@ var GoogleGenerativeAILanguageModel = class {
604
651
  abortSignal: options.abortSignal,
605
652
  fetch: this.config.fetch
606
653
  });
607
- const { contents: rawPrompt, ...rawSettings } = args;
608
654
  let finishReason = "unknown";
609
- let usage = {
610
- promptTokens: Number.NaN,
611
- completionTokens: Number.NaN
655
+ const usage = {
656
+ inputTokens: void 0,
657
+ outputTokens: void 0,
658
+ totalTokens: void 0
612
659
  };
613
660
  let providerMetadata = void 0;
614
661
  const generateId = this.config.generateId;
@@ -616,8 +663,11 @@ var GoogleGenerativeAILanguageModel = class {
616
663
  return {
617
664
  stream: response.pipeThrough(
618
665
  new TransformStream({
666
+ start(controller) {
667
+ controller.enqueue({ type: "stream-start", warnings });
668
+ },
619
669
  transform(chunk, controller) {
620
- var _a, _b, _c, _d, _e, _f;
670
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
621
671
  if (!chunk.success) {
622
672
  controller.enqueue({ type: "error", error: chunk.error });
623
673
  return;
@@ -625,33 +675,26 @@ var GoogleGenerativeAILanguageModel = class {
625
675
  const value = chunk.value;
626
676
  const usageMetadata = value.usageMetadata;
627
677
  if (usageMetadata != null) {
628
- usage = {
629
- promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
630
- completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
631
- };
678
+ usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
679
+ usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
680
+ usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
681
+ usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
682
+ usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
632
683
  }
633
- const candidate = (_c = value.candidates) == null ? void 0 : _c[0];
684
+ const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
634
685
  if (candidate == null) {
635
686
  return;
636
687
  }
637
688
  const content = candidate.content;
638
689
  if (content != null) {
639
- const deltaText = getTextFromParts(content.parts);
640
- if (deltaText != null) {
641
- controller.enqueue({
642
- type: "text-delta",
643
- textDelta: deltaText
644
- });
645
- }
646
- const reasoningDeltaText = getReasoningDetailsFromParts(
647
- content.parts
648
- );
649
- if (reasoningDeltaText != null) {
650
- for (const part of reasoningDeltaText) {
651
- controller.enqueue({
652
- type: "reasoning",
653
- textDelta: part.text
654
- });
690
+ const parts = (_g = content.parts) != null ? _g : [];
691
+ for (const part of parts) {
692
+ if ("text" in part && part.text != null && part.text.length > 0) {
693
+ if (part.thought === true) {
694
+ controller.enqueue({ type: "reasoning", text: part.text });
695
+ } else {
696
+ controller.enqueue({ type: "text", text: part.text });
697
+ }
655
698
  }
656
699
  }
657
700
  const inlineDataParts = getInlineDataParts(content.parts);
@@ -659,7 +702,7 @@ var GoogleGenerativeAILanguageModel = class {
659
702
  for (const part of inlineDataParts) {
660
703
  controller.enqueue({
661
704
  type: "file",
662
- mimeType: part.inlineData.mimeType,
705
+ mediaType: part.inlineData.mimeType,
663
706
  data: part.inlineData.data
664
707
  });
665
708
  }
@@ -693,17 +736,17 @@ var GoogleGenerativeAILanguageModel = class {
693
736
  finishReason: candidate.finishReason,
694
737
  hasToolCalls
695
738
  });
696
- const sources = (_d = extractSources({
739
+ const sources = (_h = extractSources({
697
740
  groundingMetadata: candidate.groundingMetadata,
698
741
  generateId
699
- })) != null ? _d : [];
742
+ })) != null ? _h : [];
700
743
  for (const source of sources) {
701
- controller.enqueue({ type: "source", source });
744
+ controller.enqueue(source);
702
745
  }
703
746
  providerMetadata = {
704
747
  google: {
705
- groundingMetadata: (_e = candidate.groundingMetadata) != null ? _e : null,
706
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
748
+ groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
749
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
707
750
  }
708
751
  };
709
752
  }
@@ -718,9 +761,7 @@ var GoogleGenerativeAILanguageModel = class {
718
761
  }
719
762
  })
720
763
  ),
721
- rawCall: { rawPrompt, rawSettings },
722
- rawResponse: { headers: responseHeaders },
723
- warnings,
764
+ response: { headers: responseHeaders },
724
765
  request: { body }
725
766
  };
726
767
  }
@@ -733,24 +774,13 @@ function getToolCallsFromParts({
733
774
  (part) => "functionCall" in part
734
775
  );
735
776
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
777
+ type: "tool-call",
736
778
  toolCallType: "function",
737
779
  toolCallId: generateId(),
738
780
  toolName: part.functionCall.name,
739
781
  args: JSON.stringify(part.functionCall.args)
740
782
  }));
741
783
  }
742
- function getTextFromParts(parts) {
743
- const textParts = parts == null ? void 0 : parts.filter(
744
- (part) => "text" in part && part.thought !== true
745
- );
746
- return textParts == null || textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join("");
747
- }
748
- function getReasoningDetailsFromParts(parts) {
749
- const reasoningParts = parts == null ? void 0 : parts.filter(
750
- (part) => "text" in part && part.thought === true && part.text != null
751
- );
752
- return reasoningParts == null || reasoningParts.length === 0 ? void 0 : reasoningParts.map((part) => ({ type: "text", text: part.text }));
753
- }
754
784
  function getInlineDataParts(parts) {
755
785
  return parts == null ? void 0 : parts.filter(
756
786
  (part) => "inlineData" in part
@@ -764,109 +794,102 @@ function extractSources({
764
794
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
765
795
  (chunk) => chunk.web != null
766
796
  ).map((chunk) => ({
797
+ type: "source",
767
798
  sourceType: "url",
768
799
  id: generateId(),
769
800
  url: chunk.web.uri,
770
801
  title: chunk.web.title
771
802
  }));
772
803
  }
773
- var contentSchema = import_zod2.z.object({
774
- parts: import_zod2.z.array(
775
- import_zod2.z.union([
804
+ var contentSchema = import_zod3.z.object({
805
+ parts: import_zod3.z.array(
806
+ import_zod3.z.union([
776
807
  // note: order matters since text can be fully empty
777
- import_zod2.z.object({
778
- functionCall: import_zod2.z.object({
779
- name: import_zod2.z.string(),
780
- args: import_zod2.z.unknown()
808
+ import_zod3.z.object({
809
+ functionCall: import_zod3.z.object({
810
+ name: import_zod3.z.string(),
811
+ args: import_zod3.z.unknown()
781
812
  })
782
813
  }),
783
- import_zod2.z.object({
784
- inlineData: import_zod2.z.object({
785
- mimeType: import_zod2.z.string(),
786
- data: import_zod2.z.string()
814
+ import_zod3.z.object({
815
+ inlineData: import_zod3.z.object({
816
+ mimeType: import_zod3.z.string(),
817
+ data: import_zod3.z.string()
787
818
  })
788
819
  }),
789
- import_zod2.z.object({
790
- text: import_zod2.z.string().nullish(),
791
- thought: import_zod2.z.boolean().nullish()
820
+ import_zod3.z.object({
821
+ text: import_zod3.z.string().nullish(),
822
+ thought: import_zod3.z.boolean().nullish()
792
823
  })
793
824
  ])
794
825
  ).nullish()
795
826
  });
796
- var groundingChunkSchema = import_zod2.z.object({
797
- web: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish(),
798
- retrievedContext: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish()
827
+ var groundingChunkSchema = import_zod3.z.object({
828
+ web: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish(),
829
+ retrievedContext: import_zod3.z.object({ uri: import_zod3.z.string(), title: import_zod3.z.string() }).nullish()
799
830
  });
800
- var groundingMetadataSchema = import_zod2.z.object({
801
- webSearchQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
802
- retrievalQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
803
- searchEntryPoint: import_zod2.z.object({ renderedContent: import_zod2.z.string() }).nullish(),
804
- groundingChunks: import_zod2.z.array(groundingChunkSchema).nullish(),
805
- groundingSupports: import_zod2.z.array(
806
- import_zod2.z.object({
807
- segment: import_zod2.z.object({
808
- startIndex: import_zod2.z.number().nullish(),
809
- endIndex: import_zod2.z.number().nullish(),
810
- text: import_zod2.z.string().nullish()
831
+ var groundingMetadataSchema = import_zod3.z.object({
832
+ webSearchQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
833
+ retrievalQueries: import_zod3.z.array(import_zod3.z.string()).nullish(),
834
+ searchEntryPoint: import_zod3.z.object({ renderedContent: import_zod3.z.string() }).nullish(),
835
+ groundingChunks: import_zod3.z.array(groundingChunkSchema).nullish(),
836
+ groundingSupports: import_zod3.z.array(
837
+ import_zod3.z.object({
838
+ segment: import_zod3.z.object({
839
+ startIndex: import_zod3.z.number().nullish(),
840
+ endIndex: import_zod3.z.number().nullish(),
841
+ text: import_zod3.z.string().nullish()
811
842
  }),
812
- segment_text: import_zod2.z.string().nullish(),
813
- groundingChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
814
- supportChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
815
- confidenceScores: import_zod2.z.array(import_zod2.z.number()).nullish(),
816
- confidenceScore: import_zod2.z.array(import_zod2.z.number()).nullish()
843
+ segment_text: import_zod3.z.string().nullish(),
844
+ groundingChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
845
+ supportChunkIndices: import_zod3.z.array(import_zod3.z.number()).nullish(),
846
+ confidenceScores: import_zod3.z.array(import_zod3.z.number()).nullish(),
847
+ confidenceScore: import_zod3.z.array(import_zod3.z.number()).nullish()
817
848
  })
818
849
  ).nullish(),
819
- retrievalMetadata: import_zod2.z.union([
820
- import_zod2.z.object({
821
- webDynamicRetrievalScore: import_zod2.z.number()
850
+ retrievalMetadata: import_zod3.z.union([
851
+ import_zod3.z.object({
852
+ webDynamicRetrievalScore: import_zod3.z.number()
822
853
  }),
823
- import_zod2.z.object({})
854
+ import_zod3.z.object({})
824
855
  ]).nullish()
825
856
  });
826
- var safetyRatingSchema = import_zod2.z.object({
827
- category: import_zod2.z.string().nullish(),
828
- probability: import_zod2.z.string().nullish(),
829
- probabilityScore: import_zod2.z.number().nullish(),
830
- severity: import_zod2.z.string().nullish(),
831
- severityScore: import_zod2.z.number().nullish(),
832
- blocked: import_zod2.z.boolean().nullish()
857
+ var safetyRatingSchema = import_zod3.z.object({
858
+ category: import_zod3.z.string().nullish(),
859
+ probability: import_zod3.z.string().nullish(),
860
+ probabilityScore: import_zod3.z.number().nullish(),
861
+ severity: import_zod3.z.string().nullish(),
862
+ severityScore: import_zod3.z.number().nullish(),
863
+ blocked: import_zod3.z.boolean().nullish()
833
864
  });
834
- var responseSchema = import_zod2.z.object({
835
- candidates: import_zod2.z.array(
836
- import_zod2.z.object({
837
- content: contentSchema.nullish().or(import_zod2.z.object({}).strict()),
838
- finishReason: import_zod2.z.string().nullish(),
839
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
865
+ var usageSchema = import_zod3.z.object({
866
+ cachedContentTokenCount: import_zod3.z.number().nullish(),
867
+ thoughtsTokenCount: import_zod3.z.number().nullish(),
868
+ promptTokenCount: import_zod3.z.number().nullish(),
869
+ candidatesTokenCount: import_zod3.z.number().nullish(),
870
+ totalTokenCount: import_zod3.z.number().nullish()
871
+ });
872
+ var responseSchema = import_zod3.z.object({
873
+ candidates: import_zod3.z.array(
874
+ import_zod3.z.object({
875
+ content: contentSchema.nullish().or(import_zod3.z.object({}).strict()),
876
+ finishReason: import_zod3.z.string().nullish(),
877
+ safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
840
878
  groundingMetadata: groundingMetadataSchema.nullish()
841
879
  })
842
880
  ),
843
- usageMetadata: import_zod2.z.object({
844
- promptTokenCount: import_zod2.z.number().nullish(),
845
- candidatesTokenCount: import_zod2.z.number().nullish(),
846
- totalTokenCount: import_zod2.z.number().nullish()
847
- }).nullish()
881
+ usageMetadata: usageSchema.nullish()
848
882
  });
849
- var chunkSchema = import_zod2.z.object({
850
- candidates: import_zod2.z.array(
851
- import_zod2.z.object({
883
+ var chunkSchema = import_zod3.z.object({
884
+ candidates: import_zod3.z.array(
885
+ import_zod3.z.object({
852
886
  content: contentSchema.nullish(),
853
- finishReason: import_zod2.z.string().nullish(),
854
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
887
+ finishReason: import_zod3.z.string().nullish(),
888
+ safetyRatings: import_zod3.z.array(safetyRatingSchema).nullish(),
855
889
  groundingMetadata: groundingMetadataSchema.nullish()
856
890
  })
857
891
  ).nullish(),
858
- usageMetadata: import_zod2.z.object({
859
- promptTokenCount: import_zod2.z.number().nullish(),
860
- candidatesTokenCount: import_zod2.z.number().nullish(),
861
- totalTokenCount: import_zod2.z.number().nullish()
862
- }).nullish()
863
- });
864
- var googleGenerativeAIProviderOptionsSchema = import_zod2.z.object({
865
- responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).nullish(),
866
- thinkingConfig: import_zod2.z.object({
867
- thinkingBudget: import_zod2.z.number().nullish(),
868
- includeThoughts: import_zod2.z.boolean().nullish()
869
- }).nullish()
892
+ usageMetadata: usageSchema.nullish()
870
893
  });
871
894
  // Annotate the CommonJS export names for ESM import in node:
872
895
  0 && (module.exports = {