@ai-sdk/google 1.2.19 → 2.0.0-alpha.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,11 +7,11 @@ import {
7
7
  postJsonToApi,
8
8
  resolve
9
9
  } from "@ai-sdk/provider-utils";
10
- import { z as z2 } from "zod";
10
+ import { z as z3 } from "zod";
11
11
 
12
12
  // src/convert-json-schema-to-openapi-schema.ts
13
13
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
14
- if (isEmptyObjectSchema(jsonSchema)) {
14
+ if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
15
15
  return void 0;
16
16
  }
17
17
  if (typeof jsonSchema === "boolean") {
@@ -110,9 +110,10 @@ function isEmptyObjectSchema(jsonSchema) {
110
110
  import {
111
111
  UnsupportedFunctionalityError
112
112
  } from "@ai-sdk/provider";
113
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
113
+ import {
114
+ convertToBase64
115
+ } from "@ai-sdk/provider-utils";
114
116
  function convertToGoogleGenerativeAIMessages(prompt) {
115
- var _a, _b;
116
117
  const systemInstructionParts = [];
117
118
  const contents = [];
118
119
  let systemMessagesAllowed = true;
@@ -136,33 +137,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
136
137
  parts.push({ text: part.text });
137
138
  break;
138
139
  }
139
- case "image": {
140
- parts.push(
141
- part.image instanceof URL ? {
142
- fileData: {
143
- mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
144
- fileUri: part.image.toString()
145
- }
146
- } : {
147
- inlineData: {
148
- mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
149
- data: convertUint8ArrayToBase64(part.image)
150
- }
151
- }
152
- );
153
- break;
154
- }
155
140
  case "file": {
141
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
156
142
  parts.push(
157
143
  part.data instanceof URL ? {
158
144
  fileData: {
159
- mimeType: part.mimeType,
145
+ mimeType: mediaType,
160
146
  fileUri: part.data.toString()
161
147
  }
162
148
  } : {
163
149
  inlineData: {
164
- mimeType: part.mimeType,
165
- data: part.data
150
+ mimeType: mediaType,
151
+ data: convertToBase64(part.data)
166
152
  }
167
153
  }
168
154
  );
@@ -183,7 +169,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
183
169
  return part.text.length === 0 ? void 0 : { text: part.text };
184
170
  }
185
171
  case "file": {
186
- if (part.mimeType !== "image/png") {
172
+ if (part.mediaType !== "image/png") {
187
173
  throw new UnsupportedFunctionalityError({
188
174
  functionality: "Only PNG images are supported in assistant messages"
189
175
  });
@@ -195,8 +181,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
195
181
  }
196
182
  return {
197
183
  inlineData: {
198
- mimeType: part.mimeType,
199
- data: part.data
184
+ mimeType: part.mediaType,
185
+ data: convertToBase64(part.data)
200
186
  }
201
187
  };
202
188
  }
@@ -257,20 +243,113 @@ var googleFailedResponseHandler = createJsonErrorResponseHandler({
257
243
  errorToMessage: (data) => data.error.message
258
244
  });
259
245
 
246
+ // src/google-generative-ai-options.ts
247
+ import { z as z2 } from "zod";
248
+ var dynamicRetrievalConfig = z2.object({
249
+ /**
250
+ * The mode of the predictor to be used in dynamic retrieval.
251
+ */
252
+ mode: z2.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
253
+ /**
254
+ * The threshold to be used in dynamic retrieval. If not set, a system default
255
+ * value is used.
256
+ */
257
+ dynamicThreshold: z2.number().optional()
258
+ });
259
+ var googleGenerativeAIProviderOptions = z2.object({
260
+ responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).optional(),
261
+ thinkingConfig: z2.object({
262
+ thinkingBudget: z2.number().optional(),
263
+ includeThoughts: z2.boolean().optional()
264
+ }).optional(),
265
+ /**
266
+ Optional.
267
+ The name of the cached content used as context to serve the prediction.
268
+ Format: cachedContents/{cachedContent}
269
+ */
270
+ cachedContent: z2.string().optional(),
271
+ /**
272
+ * Optional. Enable structured output. Default is true.
273
+ *
274
+ * This is useful when the JSON Schema contains elements that are
275
+ * not supported by the OpenAPI schema version that
276
+ * Google Generative AI uses. You can use this to disable
277
+ * structured outputs if you need to.
278
+ */
279
+ structuredOutputs: z2.boolean().optional(),
280
+ /**
281
+ Optional. A list of unique safety settings for blocking unsafe content.
282
+ */
283
+ safetySettings: z2.array(
284
+ z2.object({
285
+ category: z2.enum([
286
+ "HARM_CATEGORY_UNSPECIFIED",
287
+ "HARM_CATEGORY_HATE_SPEECH",
288
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
289
+ "HARM_CATEGORY_HARASSMENT",
290
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
291
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
292
+ ]),
293
+ threshold: z2.enum([
294
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
295
+ "BLOCK_LOW_AND_ABOVE",
296
+ "BLOCK_MEDIUM_AND_ABOVE",
297
+ "BLOCK_ONLY_HIGH",
298
+ "BLOCK_NONE",
299
+ "OFF"
300
+ ])
301
+ })
302
+ ).optional(),
303
+ threshold: z2.enum([
304
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
305
+ "BLOCK_LOW_AND_ABOVE",
306
+ "BLOCK_MEDIUM_AND_ABOVE",
307
+ "BLOCK_ONLY_HIGH",
308
+ "BLOCK_NONE",
309
+ "OFF"
310
+ ]).optional(),
311
+ /**
312
+ * Optional. Enables timestamp understanding for audio-only files.
313
+ *
314
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
315
+ */
316
+ audioTimestamp: z2.boolean().optional(),
317
+ /**
318
+ Optional. When enabled, the model will use Google search to ground the response.
319
+
320
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
321
+ */
322
+ useSearchGrounding: z2.boolean().optional(),
323
+ /**
324
+ Optional. Specifies the dynamic retrieval configuration.
325
+
326
+ @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
327
+
328
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
329
+ */
330
+ dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
331
+ });
332
+
260
333
  // src/google-prepare-tools.ts
261
334
  import {
262
335
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
263
336
  } from "@ai-sdk/provider";
264
- function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId) {
265
- var _a, _b;
266
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
337
+ function prepareTools({
338
+ tools,
339
+ toolChoice,
340
+ useSearchGrounding,
341
+ dynamicRetrievalConfig: dynamicRetrievalConfig2,
342
+ modelId
343
+ }) {
344
+ var _a;
345
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
267
346
  const toolWarnings = [];
268
347
  const isGemini2 = modelId.includes("gemini-2");
269
348
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
270
349
  if (useSearchGrounding) {
271
350
  return {
272
351
  tools: isGemini2 ? { googleSearch: {} } : {
273
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
352
+ googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
274
353
  },
275
354
  toolConfig: void 0,
276
355
  toolWarnings
@@ -286,12 +365,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
286
365
  } else {
287
366
  functionDeclarations.push({
288
367
  name: tool.name,
289
- description: (_b = tool.description) != null ? _b : "",
368
+ description: (_a = tool.description) != null ? _a : "",
290
369
  parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
291
370
  });
292
371
  }
293
372
  }
294
- const toolChoice = mode.toolChoice;
295
373
  if (toolChoice == null) {
296
374
  return {
297
375
  tools: { functionDeclarations },
@@ -333,7 +411,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
333
411
  default: {
334
412
  const _exhaustiveCheck = type;
335
413
  throw new UnsupportedFunctionalityError2({
336
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
414
+ functionality: `tool choice type: ${_exhaustiveCheck}`
337
415
  });
338
416
  }
339
417
  }
@@ -368,25 +446,21 @@ function mapGoogleGenerativeAIFinishReason({
368
446
 
369
447
  // src/google-generative-ai-language-model.ts
370
448
  var GoogleGenerativeAILanguageModel = class {
371
- constructor(modelId, settings, config) {
372
- this.specificationVersion = "v1";
373
- this.defaultObjectGenerationMode = "json";
374
- this.supportsImageUrls = false;
449
+ constructor(modelId, config) {
450
+ this.specificationVersion = "v2";
375
451
  this.modelId = modelId;
376
- this.settings = settings;
377
452
  this.config = config;
378
453
  }
379
- get supportsStructuredOutputs() {
380
- var _a;
381
- return (_a = this.settings.structuredOutputs) != null ? _a : true;
382
- }
383
454
  get provider() {
384
455
  return this.config.provider;
385
456
  }
457
+ get supportedUrls() {
458
+ var _a, _b, _c;
459
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
460
+ }
386
461
  async getArgs({
387
- mode,
388
462
  prompt,
389
- maxTokens,
463
+ maxOutputTokens,
390
464
  temperature,
391
465
  topP,
392
466
  topK,
@@ -395,15 +469,16 @@ var GoogleGenerativeAILanguageModel = class {
395
469
  stopSequences,
396
470
  responseFormat,
397
471
  seed,
398
- providerMetadata
472
+ tools,
473
+ toolChoice,
474
+ providerOptions
399
475
  }) {
400
476
  var _a, _b, _c;
401
- const type = mode.type;
402
477
  const warnings = [];
403
- const googleOptions = parseProviderOptions({
478
+ const googleOptions = await parseProviderOptions({
404
479
  provider: "google",
405
- providerOptions: providerMetadata,
406
- schema: googleGenerativeAIProviderOptionsSchema
480
+ providerOptions,
481
+ schema: googleGenerativeAIProviderOptions
407
482
  });
408
483
  if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
409
484
  warnings.push({
@@ -411,102 +486,55 @@ var GoogleGenerativeAILanguageModel = class {
411
486
  message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
412
487
  });
413
488
  }
414
- const generationConfig = {
415
- // standardized settings:
416
- maxOutputTokens: maxTokens,
417
- temperature,
418
- topK,
419
- topP,
420
- frequencyPenalty,
421
- presencePenalty,
422
- stopSequences,
423
- seed,
424
- // response format:
425
- responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
426
- responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
427
- // so this is needed as an escape hatch:
428
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
429
- ...this.settings.audioTimestamp && {
430
- audioTimestamp: this.settings.audioTimestamp
431
- },
432
- // provider options:
433
- responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
434
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
435
- };
436
489
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
437
- switch (type) {
438
- case "regular": {
439
- const { tools, toolConfig, toolWarnings } = prepareTools(
440
- mode,
441
- (_b = this.settings.useSearchGrounding) != null ? _b : false,
442
- this.settings.dynamicRetrievalConfig,
443
- this.modelId
444
- );
445
- return {
446
- args: {
447
- generationConfig,
448
- contents,
449
- systemInstruction,
450
- safetySettings: this.settings.safetySettings,
451
- tools,
452
- toolConfig,
453
- cachedContent: this.settings.cachedContent
454
- },
455
- warnings: [...warnings, ...toolWarnings]
456
- };
457
- }
458
- case "object-json": {
459
- return {
460
- args: {
461
- generationConfig: {
462
- ...generationConfig,
463
- responseMimeType: "application/json",
464
- responseSchema: mode.schema != null && // Google GenAI does not support all OpenAPI Schema features,
465
- // so this is needed as an escape hatch:
466
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(mode.schema) : void 0
467
- },
468
- contents,
469
- systemInstruction,
470
- safetySettings: this.settings.safetySettings,
471
- cachedContent: this.settings.cachedContent
472
- },
473
- warnings
474
- };
475
- }
476
- case "object-tool": {
477
- return {
478
- args: {
479
- generationConfig,
480
- contents,
481
- tools: {
482
- functionDeclarations: [
483
- {
484
- name: mode.tool.name,
485
- description: (_c = mode.tool.description) != null ? _c : "",
486
- parameters: convertJSONSchemaToOpenAPISchema(
487
- mode.tool.parameters
488
- )
489
- }
490
- ]
491
- },
492
- toolConfig: { functionCallingConfig: { mode: "ANY" } },
493
- safetySettings: this.settings.safetySettings,
494
- cachedContent: this.settings.cachedContent
490
+ const {
491
+ tools: googleTools,
492
+ toolConfig: googleToolConfig,
493
+ toolWarnings
494
+ } = prepareTools({
495
+ tools,
496
+ toolChoice,
497
+ useSearchGrounding: (_b = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _b : false,
498
+ dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
499
+ modelId: this.modelId
500
+ });
501
+ return {
502
+ args: {
503
+ generationConfig: {
504
+ // standardized settings:
505
+ maxOutputTokens,
506
+ temperature,
507
+ topK,
508
+ topP,
509
+ frequencyPenalty,
510
+ presencePenalty,
511
+ stopSequences,
512
+ seed,
513
+ // response format:
514
+ responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
515
+ responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
516
+ // so this is needed as an escape hatch:
517
+ // TODO convert into provider option
518
+ ((_c = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _c : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
519
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
520
+ audioTimestamp: googleOptions.audioTimestamp
495
521
  },
496
- warnings
497
- };
498
- }
499
- default: {
500
- const _exhaustiveCheck = type;
501
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
502
- }
503
- }
504
- }
505
- supportsUrl(url) {
506
- return this.config.isSupportedUrl(url);
522
+ // provider options:
523
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
524
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
525
+ },
526
+ contents,
527
+ systemInstruction,
528
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
529
+ tools: googleTools,
530
+ toolConfig: googleToolConfig,
531
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
532
+ },
533
+ warnings: [...warnings, ...toolWarnings]
534
+ };
507
535
  }
508
536
  async doGenerate(options) {
509
- var _a, _b, _c, _d, _e;
537
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
510
538
  const { args, warnings } = await this.getArgs(options);
511
539
  const body = JSON.stringify(args);
512
540
  const mergedHeaders = combineHeaders(
@@ -528,45 +556,66 @@ var GoogleGenerativeAILanguageModel = class {
528
556
  abortSignal: options.abortSignal,
529
557
  fetch: this.config.fetch
530
558
  });
531
- const { contents: rawPrompt, ...rawSettings } = args;
532
559
  const candidate = response.candidates[0];
533
- const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : candidate.content.parts;
534
- const toolCalls = getToolCallsFromParts({
535
- parts,
536
- // Use candidateParts
537
- generateId: this.config.generateId
538
- });
560
+ const content = [];
561
+ const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
539
562
  const usageMetadata = response.usageMetadata;
563
+ for (const part of parts) {
564
+ if ("text" in part && part.text != null && part.text.length > 0) {
565
+ if (part.thought === true) {
566
+ content.push({ type: "reasoning", text: part.text });
567
+ } else {
568
+ content.push({ type: "text", text: part.text });
569
+ }
570
+ } else if ("functionCall" in part) {
571
+ content.push({
572
+ type: "tool-call",
573
+ toolCallType: "function",
574
+ toolCallId: this.config.generateId(),
575
+ toolName: part.functionCall.name,
576
+ args: JSON.stringify(part.functionCall.args)
577
+ });
578
+ } else if ("inlineData" in part) {
579
+ content.push({
580
+ type: "file",
581
+ data: part.inlineData.data,
582
+ mediaType: part.inlineData.mimeType
583
+ });
584
+ }
585
+ }
586
+ const sources = (_b = extractSources({
587
+ groundingMetadata: candidate.groundingMetadata,
588
+ generateId: this.config.generateId
589
+ })) != null ? _b : [];
590
+ for (const source of sources) {
591
+ content.push(source);
592
+ }
540
593
  return {
541
- text: getTextFromParts(parts),
542
- reasoning: getReasoningDetailsFromParts(parts),
543
- files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
544
- data: part.inlineData.data,
545
- mimeType: part.inlineData.mimeType
546
- })),
547
- toolCalls,
594
+ content,
548
595
  finishReason: mapGoogleGenerativeAIFinishReason({
549
596
  finishReason: candidate.finishReason,
550
- hasToolCalls: toolCalls != null && toolCalls.length > 0
597
+ hasToolCalls: content.some((part) => part.type === "tool-call")
551
598
  }),
552
599
  usage: {
553
- promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
554
- completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
600
+ inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
601
+ outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
602
+ totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
603
+ reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
604
+ cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
555
605
  },
556
- rawCall: { rawPrompt, rawSettings },
557
- rawResponse: { headers: responseHeaders, body: rawResponse },
558
606
  warnings,
559
607
  providerMetadata: {
560
608
  google: {
561
- groundingMetadata: (_d = candidate.groundingMetadata) != null ? _d : null,
562
- safetyRatings: (_e = candidate.safetyRatings) != null ? _e : null
609
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
610
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
563
611
  }
564
612
  },
565
- sources: extractSources({
566
- groundingMetadata: candidate.groundingMetadata,
567
- generateId: this.config.generateId
568
- }),
569
- request: { body }
613
+ request: { body },
614
+ response: {
615
+ // TODO timestamp, model id, id
616
+ headers: responseHeaders,
617
+ body: rawResponse
618
+ }
570
619
  };
571
620
  }
572
621
  async doStream(options) {
@@ -587,11 +636,11 @@ var GoogleGenerativeAILanguageModel = class {
587
636
  abortSignal: options.abortSignal,
588
637
  fetch: this.config.fetch
589
638
  });
590
- const { contents: rawPrompt, ...rawSettings } = args;
591
639
  let finishReason = "unknown";
592
- let usage = {
593
- promptTokens: Number.NaN,
594
- completionTokens: Number.NaN
640
+ const usage = {
641
+ inputTokens: void 0,
642
+ outputTokens: void 0,
643
+ totalTokens: void 0
595
644
  };
596
645
  let providerMetadata = void 0;
597
646
  const generateId = this.config.generateId;
@@ -599,8 +648,11 @@ var GoogleGenerativeAILanguageModel = class {
599
648
  return {
600
649
  stream: response.pipeThrough(
601
650
  new TransformStream({
651
+ start(controller) {
652
+ controller.enqueue({ type: "stream-start", warnings });
653
+ },
602
654
  transform(chunk, controller) {
603
- var _a, _b, _c, _d, _e, _f;
655
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
604
656
  if (!chunk.success) {
605
657
  controller.enqueue({ type: "error", error: chunk.error });
606
658
  return;
@@ -608,33 +660,26 @@ var GoogleGenerativeAILanguageModel = class {
608
660
  const value = chunk.value;
609
661
  const usageMetadata = value.usageMetadata;
610
662
  if (usageMetadata != null) {
611
- usage = {
612
- promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
613
- completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
614
- };
663
+ usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
664
+ usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
665
+ usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
666
+ usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
667
+ usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
615
668
  }
616
- const candidate = (_c = value.candidates) == null ? void 0 : _c[0];
669
+ const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
617
670
  if (candidate == null) {
618
671
  return;
619
672
  }
620
673
  const content = candidate.content;
621
674
  if (content != null) {
622
- const deltaText = getTextFromParts(content.parts);
623
- if (deltaText != null) {
624
- controller.enqueue({
625
- type: "text-delta",
626
- textDelta: deltaText
627
- });
628
- }
629
- const reasoningDeltaText = getReasoningDetailsFromParts(
630
- content.parts
631
- );
632
- if (reasoningDeltaText != null) {
633
- for (const part of reasoningDeltaText) {
634
- controller.enqueue({
635
- type: "reasoning",
636
- textDelta: part.text
637
- });
675
+ const parts = (_g = content.parts) != null ? _g : [];
676
+ for (const part of parts) {
677
+ if ("text" in part && part.text != null && part.text.length > 0) {
678
+ if (part.thought === true) {
679
+ controller.enqueue({ type: "reasoning", text: part.text });
680
+ } else {
681
+ controller.enqueue({ type: "text", text: part.text });
682
+ }
638
683
  }
639
684
  }
640
685
  const inlineDataParts = getInlineDataParts(content.parts);
@@ -642,7 +687,7 @@ var GoogleGenerativeAILanguageModel = class {
642
687
  for (const part of inlineDataParts) {
643
688
  controller.enqueue({
644
689
  type: "file",
645
- mimeType: part.inlineData.mimeType,
690
+ mediaType: part.inlineData.mimeType,
646
691
  data: part.inlineData.data
647
692
  });
648
693
  }
@@ -676,17 +721,17 @@ var GoogleGenerativeAILanguageModel = class {
676
721
  finishReason: candidate.finishReason,
677
722
  hasToolCalls
678
723
  });
679
- const sources = (_d = extractSources({
724
+ const sources = (_h = extractSources({
680
725
  groundingMetadata: candidate.groundingMetadata,
681
726
  generateId
682
- })) != null ? _d : [];
727
+ })) != null ? _h : [];
683
728
  for (const source of sources) {
684
- controller.enqueue({ type: "source", source });
729
+ controller.enqueue(source);
685
730
  }
686
731
  providerMetadata = {
687
732
  google: {
688
- groundingMetadata: (_e = candidate.groundingMetadata) != null ? _e : null,
689
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
733
+ groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
734
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
690
735
  }
691
736
  };
692
737
  }
@@ -701,9 +746,7 @@ var GoogleGenerativeAILanguageModel = class {
701
746
  }
702
747
  })
703
748
  ),
704
- rawCall: { rawPrompt, rawSettings },
705
- rawResponse: { headers: responseHeaders },
706
- warnings,
749
+ response: { headers: responseHeaders },
707
750
  request: { body }
708
751
  };
709
752
  }
@@ -716,24 +759,13 @@ function getToolCallsFromParts({
716
759
  (part) => "functionCall" in part
717
760
  );
718
761
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
762
+ type: "tool-call",
719
763
  toolCallType: "function",
720
764
  toolCallId: generateId(),
721
765
  toolName: part.functionCall.name,
722
766
  args: JSON.stringify(part.functionCall.args)
723
767
  }));
724
768
  }
725
- function getTextFromParts(parts) {
726
- const textParts = parts == null ? void 0 : parts.filter(
727
- (part) => "text" in part && part.thought !== true
728
- );
729
- return textParts == null || textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join("");
730
- }
731
- function getReasoningDetailsFromParts(parts) {
732
- const reasoningParts = parts == null ? void 0 : parts.filter(
733
- (part) => "text" in part && part.thought === true && part.text != null
734
- );
735
- return reasoningParts == null || reasoningParts.length === 0 ? void 0 : reasoningParts.map((part) => ({ type: "text", text: part.text }));
736
- }
737
769
  function getInlineDataParts(parts) {
738
770
  return parts == null ? void 0 : parts.filter(
739
771
  (part) => "inlineData" in part
@@ -747,109 +779,102 @@ function extractSources({
747
779
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
748
780
  (chunk) => chunk.web != null
749
781
  ).map((chunk) => ({
782
+ type: "source",
750
783
  sourceType: "url",
751
784
  id: generateId(),
752
785
  url: chunk.web.uri,
753
786
  title: chunk.web.title
754
787
  }));
755
788
  }
756
- var contentSchema = z2.object({
757
- parts: z2.array(
758
- z2.union([
789
+ var contentSchema = z3.object({
790
+ parts: z3.array(
791
+ z3.union([
759
792
  // note: order matters since text can be fully empty
760
- z2.object({
761
- functionCall: z2.object({
762
- name: z2.string(),
763
- args: z2.unknown()
793
+ z3.object({
794
+ functionCall: z3.object({
795
+ name: z3.string(),
796
+ args: z3.unknown()
764
797
  })
765
798
  }),
766
- z2.object({
767
- inlineData: z2.object({
768
- mimeType: z2.string(),
769
- data: z2.string()
799
+ z3.object({
800
+ inlineData: z3.object({
801
+ mimeType: z3.string(),
802
+ data: z3.string()
770
803
  })
771
804
  }),
772
- z2.object({
773
- text: z2.string().nullish(),
774
- thought: z2.boolean().nullish()
805
+ z3.object({
806
+ text: z3.string().nullish(),
807
+ thought: z3.boolean().nullish()
775
808
  })
776
809
  ])
777
810
  ).nullish()
778
811
  });
779
- var groundingChunkSchema = z2.object({
780
- web: z2.object({ uri: z2.string(), title: z2.string() }).nullish(),
781
- retrievedContext: z2.object({ uri: z2.string(), title: z2.string() }).nullish()
812
+ var groundingChunkSchema = z3.object({
813
+ web: z3.object({ uri: z3.string(), title: z3.string() }).nullish(),
814
+ retrievedContext: z3.object({ uri: z3.string(), title: z3.string() }).nullish()
782
815
  });
783
- var groundingMetadataSchema = z2.object({
784
- webSearchQueries: z2.array(z2.string()).nullish(),
785
- retrievalQueries: z2.array(z2.string()).nullish(),
786
- searchEntryPoint: z2.object({ renderedContent: z2.string() }).nullish(),
787
- groundingChunks: z2.array(groundingChunkSchema).nullish(),
788
- groundingSupports: z2.array(
789
- z2.object({
790
- segment: z2.object({
791
- startIndex: z2.number().nullish(),
792
- endIndex: z2.number().nullish(),
793
- text: z2.string().nullish()
816
+ var groundingMetadataSchema = z3.object({
817
+ webSearchQueries: z3.array(z3.string()).nullish(),
818
+ retrievalQueries: z3.array(z3.string()).nullish(),
819
+ searchEntryPoint: z3.object({ renderedContent: z3.string() }).nullish(),
820
+ groundingChunks: z3.array(groundingChunkSchema).nullish(),
821
+ groundingSupports: z3.array(
822
+ z3.object({
823
+ segment: z3.object({
824
+ startIndex: z3.number().nullish(),
825
+ endIndex: z3.number().nullish(),
826
+ text: z3.string().nullish()
794
827
  }),
795
- segment_text: z2.string().nullish(),
796
- groundingChunkIndices: z2.array(z2.number()).nullish(),
797
- supportChunkIndices: z2.array(z2.number()).nullish(),
798
- confidenceScores: z2.array(z2.number()).nullish(),
799
- confidenceScore: z2.array(z2.number()).nullish()
828
+ segment_text: z3.string().nullish(),
829
+ groundingChunkIndices: z3.array(z3.number()).nullish(),
830
+ supportChunkIndices: z3.array(z3.number()).nullish(),
831
+ confidenceScores: z3.array(z3.number()).nullish(),
832
+ confidenceScore: z3.array(z3.number()).nullish()
800
833
  })
801
834
  ).nullish(),
802
- retrievalMetadata: z2.union([
803
- z2.object({
804
- webDynamicRetrievalScore: z2.number()
835
+ retrievalMetadata: z3.union([
836
+ z3.object({
837
+ webDynamicRetrievalScore: z3.number()
805
838
  }),
806
- z2.object({})
839
+ z3.object({})
807
840
  ]).nullish()
808
841
  });
809
- var safetyRatingSchema = z2.object({
810
- category: z2.string().nullish(),
811
- probability: z2.string().nullish(),
812
- probabilityScore: z2.number().nullish(),
813
- severity: z2.string().nullish(),
814
- severityScore: z2.number().nullish(),
815
- blocked: z2.boolean().nullish()
842
+ var safetyRatingSchema = z3.object({
843
+ category: z3.string().nullish(),
844
+ probability: z3.string().nullish(),
845
+ probabilityScore: z3.number().nullish(),
846
+ severity: z3.string().nullish(),
847
+ severityScore: z3.number().nullish(),
848
+ blocked: z3.boolean().nullish()
816
849
  });
817
- var responseSchema = z2.object({
818
- candidates: z2.array(
819
- z2.object({
820
- content: contentSchema.nullish().or(z2.object({}).strict()),
821
- finishReason: z2.string().nullish(),
822
- safetyRatings: z2.array(safetyRatingSchema).nullish(),
850
+ var usageSchema = z3.object({
851
+ cachedContentTokenCount: z3.number().nullish(),
852
+ thoughtsTokenCount: z3.number().nullish(),
853
+ promptTokenCount: z3.number().nullish(),
854
+ candidatesTokenCount: z3.number().nullish(),
855
+ totalTokenCount: z3.number().nullish()
856
+ });
857
+ var responseSchema = z3.object({
858
+ candidates: z3.array(
859
+ z3.object({
860
+ content: contentSchema.nullish().or(z3.object({}).strict()),
861
+ finishReason: z3.string().nullish(),
862
+ safetyRatings: z3.array(safetyRatingSchema).nullish(),
823
863
  groundingMetadata: groundingMetadataSchema.nullish()
824
864
  })
825
865
  ),
826
- usageMetadata: z2.object({
827
- promptTokenCount: z2.number().nullish(),
828
- candidatesTokenCount: z2.number().nullish(),
829
- totalTokenCount: z2.number().nullish()
830
- }).nullish()
866
+ usageMetadata: usageSchema.nullish()
831
867
  });
832
- var chunkSchema = z2.object({
833
- candidates: z2.array(
834
- z2.object({
868
+ var chunkSchema = z3.object({
869
+ candidates: z3.array(
870
+ z3.object({
835
871
  content: contentSchema.nullish(),
836
- finishReason: z2.string().nullish(),
837
- safetyRatings: z2.array(safetyRatingSchema).nullish(),
872
+ finishReason: z3.string().nullish(),
873
+ safetyRatings: z3.array(safetyRatingSchema).nullish(),
838
874
  groundingMetadata: groundingMetadataSchema.nullish()
839
875
  })
840
876
  ).nullish(),
841
- usageMetadata: z2.object({
842
- promptTokenCount: z2.number().nullish(),
843
- candidatesTokenCount: z2.number().nullish(),
844
- totalTokenCount: z2.number().nullish()
845
- }).nullish()
846
- });
847
- var googleGenerativeAIProviderOptionsSchema = z2.object({
848
- responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).nullish(),
849
- thinkingConfig: z2.object({
850
- thinkingBudget: z2.number().nullish(),
851
- includeThoughts: z2.boolean().nullish()
852
- }).nullish()
877
+ usageMetadata: usageSchema.nullish()
853
878
  });
854
879
  export {
855
880
  GoogleGenerativeAILanguageModel,