@ai-sdk/google 1.2.18 → 2.0.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,11 +7,11 @@ import {
7
7
  postJsonToApi,
8
8
  resolve
9
9
  } from "@ai-sdk/provider-utils";
10
- import { z as z2 } from "zod";
10
+ import { z as z3 } from "zod";
11
11
 
12
12
  // src/convert-json-schema-to-openapi-schema.ts
13
13
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
14
- if (isEmptyObjectSchema(jsonSchema)) {
14
+ if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
15
15
  return void 0;
16
16
  }
17
17
  if (typeof jsonSchema === "boolean") {
@@ -110,9 +110,10 @@ function isEmptyObjectSchema(jsonSchema) {
110
110
  import {
111
111
  UnsupportedFunctionalityError
112
112
  } from "@ai-sdk/provider";
113
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
113
+ import {
114
+ convertToBase64
115
+ } from "@ai-sdk/provider-utils";
114
116
  function convertToGoogleGenerativeAIMessages(prompt) {
115
- var _a, _b;
116
117
  const systemInstructionParts = [];
117
118
  const contents = [];
118
119
  let systemMessagesAllowed = true;
@@ -136,33 +137,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
136
137
  parts.push({ text: part.text });
137
138
  break;
138
139
  }
139
- case "image": {
140
- parts.push(
141
- part.image instanceof URL ? {
142
- fileData: {
143
- mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
144
- fileUri: part.image.toString()
145
- }
146
- } : {
147
- inlineData: {
148
- mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
149
- data: convertUint8ArrayToBase64(part.image)
150
- }
151
- }
152
- );
153
- break;
154
- }
155
140
  case "file": {
141
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
156
142
  parts.push(
157
143
  part.data instanceof URL ? {
158
144
  fileData: {
159
- mimeType: part.mimeType,
145
+ mimeType: mediaType,
160
146
  fileUri: part.data.toString()
161
147
  }
162
148
  } : {
163
149
  inlineData: {
164
- mimeType: part.mimeType,
165
- data: part.data
150
+ mimeType: mediaType,
151
+ data: convertToBase64(part.data)
166
152
  }
167
153
  }
168
154
  );
@@ -183,7 +169,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
183
169
  return part.text.length === 0 ? void 0 : { text: part.text };
184
170
  }
185
171
  case "file": {
186
- if (part.mimeType !== "image/png") {
172
+ if (part.mediaType !== "image/png") {
187
173
  throw new UnsupportedFunctionalityError({
188
174
  functionality: "Only PNG images are supported in assistant messages"
189
175
  });
@@ -195,8 +181,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
195
181
  }
196
182
  return {
197
183
  inlineData: {
198
- mimeType: part.mimeType,
199
- data: part.data
184
+ mimeType: part.mediaType,
185
+ data: convertToBase64(part.data)
200
186
  }
201
187
  };
202
188
  }
@@ -257,20 +243,112 @@ var googleFailedResponseHandler = createJsonErrorResponseHandler({
257
243
  errorToMessage: (data) => data.error.message
258
244
  });
259
245
 
246
+ // src/google-generative-ai-options.ts
247
+ import { z as z2 } from "zod";
248
+ var dynamicRetrievalConfig = z2.object({
249
+ /**
250
+ * The mode of the predictor to be used in dynamic retrieval.
251
+ */
252
+ mode: z2.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
253
+ /**
254
+ * The threshold to be used in dynamic retrieval. If not set, a system default
255
+ * value is used.
256
+ */
257
+ dynamicThreshold: z2.number().optional()
258
+ });
259
+ var googleGenerativeAIProviderOptions = z2.object({
260
+ responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).optional(),
261
+ thinkingConfig: z2.object({
262
+ thinkingBudget: z2.number().optional()
263
+ }).optional(),
264
+ /**
265
+ Optional.
266
+ The name of the cached content used as context to serve the prediction.
267
+ Format: cachedContents/{cachedContent}
268
+ */
269
+ cachedContent: z2.string().optional(),
270
+ /**
271
+ * Optional. Enable structured output. Default is true.
272
+ *
273
+ * This is useful when the JSON Schema contains elements that are
274
+ * not supported by the OpenAPI schema version that
275
+ * Google Generative AI uses. You can use this to disable
276
+ * structured outputs if you need to.
277
+ */
278
+ structuredOutputs: z2.boolean().optional(),
279
+ /**
280
+ Optional. A list of unique safety settings for blocking unsafe content.
281
+ */
282
+ safetySettings: z2.array(
283
+ z2.object({
284
+ category: z2.enum([
285
+ "HARM_CATEGORY_UNSPECIFIED",
286
+ "HARM_CATEGORY_HATE_SPEECH",
287
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
288
+ "HARM_CATEGORY_HARASSMENT",
289
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
290
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
291
+ ]),
292
+ threshold: z2.enum([
293
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
294
+ "BLOCK_LOW_AND_ABOVE",
295
+ "BLOCK_MEDIUM_AND_ABOVE",
296
+ "BLOCK_ONLY_HIGH",
297
+ "BLOCK_NONE",
298
+ "OFF"
299
+ ])
300
+ })
301
+ ).optional(),
302
+ threshold: z2.enum([
303
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
304
+ "BLOCK_LOW_AND_ABOVE",
305
+ "BLOCK_MEDIUM_AND_ABOVE",
306
+ "BLOCK_ONLY_HIGH",
307
+ "BLOCK_NONE",
308
+ "OFF"
309
+ ]).optional(),
310
+ /**
311
+ * Optional. Enables timestamp understanding for audio-only files.
312
+ *
313
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
314
+ */
315
+ audioTimestamp: z2.boolean().optional(),
316
+ /**
317
+ Optional. When enabled, the model will use Google search to ground the response.
318
+
319
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
320
+ */
321
+ useSearchGrounding: z2.boolean().optional(),
322
+ /**
323
+ Optional. Specifies the dynamic retrieval configuration.
324
+
325
+ @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
326
+
327
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
328
+ */
329
+ dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
330
+ });
331
+
260
332
  // src/google-prepare-tools.ts
261
333
  import {
262
334
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
263
335
  } from "@ai-sdk/provider";
264
- function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId) {
265
- var _a, _b;
266
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
336
+ function prepareTools({
337
+ tools,
338
+ toolChoice,
339
+ useSearchGrounding,
340
+ dynamicRetrievalConfig: dynamicRetrievalConfig2,
341
+ modelId
342
+ }) {
343
+ var _a;
344
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
267
345
  const toolWarnings = [];
268
346
  const isGemini2 = modelId.includes("gemini-2");
269
347
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
270
348
  if (useSearchGrounding) {
271
349
  return {
272
350
  tools: isGemini2 ? { googleSearch: {} } : {
273
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
351
+ googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
274
352
  },
275
353
  toolConfig: void 0,
276
354
  toolWarnings
@@ -286,12 +364,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
286
364
  } else {
287
365
  functionDeclarations.push({
288
366
  name: tool.name,
289
- description: (_b = tool.description) != null ? _b : "",
367
+ description: (_a = tool.description) != null ? _a : "",
290
368
  parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
291
369
  });
292
370
  }
293
371
  }
294
- const toolChoice = mode.toolChoice;
295
372
  if (toolChoice == null) {
296
373
  return {
297
374
  tools: { functionDeclarations },
@@ -333,7 +410,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
333
410
  default: {
334
411
  const _exhaustiveCheck = type;
335
412
  throw new UnsupportedFunctionalityError2({
336
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
413
+ functionality: `tool choice type: ${_exhaustiveCheck}`
337
414
  });
338
415
  }
339
416
  }
@@ -368,25 +445,21 @@ function mapGoogleGenerativeAIFinishReason({
368
445
 
369
446
  // src/google-generative-ai-language-model.ts
370
447
  var GoogleGenerativeAILanguageModel = class {
371
- constructor(modelId, settings, config) {
372
- this.specificationVersion = "v1";
373
- this.defaultObjectGenerationMode = "json";
374
- this.supportsImageUrls = false;
448
+ constructor(modelId, config) {
449
+ this.specificationVersion = "v2";
375
450
  this.modelId = modelId;
376
- this.settings = settings;
377
451
  this.config = config;
378
452
  }
379
- get supportsStructuredOutputs() {
380
- var _a;
381
- return (_a = this.settings.structuredOutputs) != null ? _a : true;
382
- }
383
453
  get provider() {
384
454
  return this.config.provider;
385
455
  }
456
+ get supportedUrls() {
457
+ var _a, _b, _c;
458
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
459
+ }
386
460
  async getArgs({
387
- mode,
388
461
  prompt,
389
- maxTokens,
462
+ maxOutputTokens,
390
463
  temperature,
391
464
  topP,
392
465
  topK,
@@ -395,118 +468,66 @@ var GoogleGenerativeAILanguageModel = class {
395
468
  stopSequences,
396
469
  responseFormat,
397
470
  seed,
398
- providerMetadata
471
+ tools,
472
+ toolChoice,
473
+ providerOptions
399
474
  }) {
400
- var _a, _b, _c;
401
- const type = mode.type;
475
+ var _a, _b;
402
476
  const warnings = [];
403
- const googleOptions = parseProviderOptions({
477
+ const googleOptions = await parseProviderOptions({
404
478
  provider: "google",
405
- providerOptions: providerMetadata,
406
- schema: googleGenerativeAIProviderOptionsSchema
479
+ providerOptions,
480
+ schema: googleGenerativeAIProviderOptions
407
481
  });
408
- if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
409
- warnings.push({
410
- type: "other",
411
- message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
412
- });
413
- }
414
- const generationConfig = {
415
- // standardized settings:
416
- maxOutputTokens: maxTokens,
417
- temperature,
418
- topK,
419
- topP,
420
- frequencyPenalty,
421
- presencePenalty,
422
- stopSequences,
423
- seed,
424
- // response format:
425
- responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
426
- responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
427
- // so this is needed as an escape hatch:
428
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
429
- ...this.settings.audioTimestamp && {
430
- audioTimestamp: this.settings.audioTimestamp
431
- },
432
- // provider options:
433
- responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
434
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
435
- };
436
482
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
437
- switch (type) {
438
- case "regular": {
439
- const { tools, toolConfig, toolWarnings } = prepareTools(
440
- mode,
441
- (_b = this.settings.useSearchGrounding) != null ? _b : false,
442
- this.settings.dynamicRetrievalConfig,
443
- this.modelId
444
- );
445
- return {
446
- args: {
447
- generationConfig,
448
- contents,
449
- systemInstruction,
450
- safetySettings: this.settings.safetySettings,
451
- tools,
452
- toolConfig,
453
- cachedContent: this.settings.cachedContent
454
- },
455
- warnings: [...warnings, ...toolWarnings]
456
- };
457
- }
458
- case "object-json": {
459
- return {
460
- args: {
461
- generationConfig: {
462
- ...generationConfig,
463
- responseMimeType: "application/json",
464
- responseSchema: mode.schema != null && // Google GenAI does not support all OpenAPI Schema features,
465
- // so this is needed as an escape hatch:
466
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(mode.schema) : void 0
467
- },
468
- contents,
469
- systemInstruction,
470
- safetySettings: this.settings.safetySettings,
471
- cachedContent: this.settings.cachedContent
472
- },
473
- warnings
474
- };
475
- }
476
- case "object-tool": {
477
- return {
478
- args: {
479
- generationConfig,
480
- contents,
481
- tools: {
482
- functionDeclarations: [
483
- {
484
- name: mode.tool.name,
485
- description: (_c = mode.tool.description) != null ? _c : "",
486
- parameters: convertJSONSchemaToOpenAPISchema(
487
- mode.tool.parameters
488
- )
489
- }
490
- ]
491
- },
492
- toolConfig: { functionCallingConfig: { mode: "ANY" } },
493
- safetySettings: this.settings.safetySettings,
494
- cachedContent: this.settings.cachedContent
483
+ const {
484
+ tools: googleTools,
485
+ toolConfig: googleToolConfig,
486
+ toolWarnings
487
+ } = prepareTools({
488
+ tools,
489
+ toolChoice,
490
+ useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
491
+ dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
492
+ modelId: this.modelId
493
+ });
494
+ return {
495
+ args: {
496
+ generationConfig: {
497
+ // standardized settings:
498
+ maxOutputTokens,
499
+ temperature,
500
+ topK,
501
+ topP,
502
+ frequencyPenalty,
503
+ presencePenalty,
504
+ stopSequences,
505
+ seed,
506
+ // response format:
507
+ responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
508
+ responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
509
+ // so this is needed as an escape hatch:
510
+ // TODO convert into provider option
511
+ ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
512
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
513
+ audioTimestamp: googleOptions.audioTimestamp
495
514
  },
496
- warnings
497
- };
498
- }
499
- default: {
500
- const _exhaustiveCheck = type;
501
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
502
- }
503
- }
504
- }
505
- supportsUrl(url) {
506
- return this.config.isSupportedUrl(url);
515
+ // provider options:
516
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
517
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
518
+ },
519
+ contents,
520
+ systemInstruction,
521
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
522
+ tools: googleTools,
523
+ toolConfig: googleToolConfig,
524
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
525
+ },
526
+ warnings: [...warnings, ...toolWarnings]
527
+ };
507
528
  }
508
529
  async doGenerate(options) {
509
- var _a, _b, _c, _d, _e;
530
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
510
531
  const { args, warnings } = await this.getArgs(options);
511
532
  const body = JSON.stringify(args);
512
533
  const mergedHeaders = combineHeaders(
@@ -528,45 +549,62 @@ var GoogleGenerativeAILanguageModel = class {
528
549
  abortSignal: options.abortSignal,
529
550
  fetch: this.config.fetch
530
551
  });
531
- const { contents: rawPrompt, ...rawSettings } = args;
532
552
  const candidate = response.candidates[0];
533
- const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : candidate.content.parts;
534
- const toolCalls = getToolCallsFromParts({
535
- parts,
536
- // Use candidateParts
553
+ const content = [];
554
+ const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
555
+ for (const part of parts) {
556
+ if ("text" in part && part.text.length > 0) {
557
+ content.push({ type: "text", text: part.text });
558
+ } else if ("functionCall" in part) {
559
+ content.push({
560
+ type: "tool-call",
561
+ toolCallType: "function",
562
+ toolCallId: this.config.generateId(),
563
+ toolName: part.functionCall.name,
564
+ args: JSON.stringify(part.functionCall.args)
565
+ });
566
+ } else if ("inlineData" in part) {
567
+ content.push({
568
+ type: "file",
569
+ data: part.inlineData.data,
570
+ mediaType: part.inlineData.mimeType
571
+ });
572
+ }
573
+ }
574
+ const sources = (_b = extractSources({
575
+ groundingMetadata: candidate.groundingMetadata,
537
576
  generateId: this.config.generateId
538
- });
577
+ })) != null ? _b : [];
578
+ for (const source of sources) {
579
+ content.push(source);
580
+ }
539
581
  const usageMetadata = response.usageMetadata;
540
582
  return {
541
- text: getTextFromParts(parts),
542
- reasoning: getReasoningDetailsFromParts(parts),
543
- files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
544
- data: part.inlineData.data,
545
- mimeType: part.inlineData.mimeType
546
- })),
547
- toolCalls,
583
+ content,
548
584
  finishReason: mapGoogleGenerativeAIFinishReason({
549
585
  finishReason: candidate.finishReason,
550
- hasToolCalls: toolCalls != null && toolCalls.length > 0
586
+ hasToolCalls: content.some((part) => part.type === "tool-call")
551
587
  }),
552
588
  usage: {
553
- promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
554
- completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
589
+ inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
590
+ outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
591
+ totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
592
+ reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
593
+ cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
555
594
  },
556
- rawCall: { rawPrompt, rawSettings },
557
- rawResponse: { headers: responseHeaders, body: rawResponse },
558
595
  warnings,
559
596
  providerMetadata: {
560
597
  google: {
561
- groundingMetadata: (_d = candidate.groundingMetadata) != null ? _d : null,
562
- safetyRatings: (_e = candidate.safetyRatings) != null ? _e : null
598
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
599
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
563
600
  }
564
601
  },
565
- sources: extractSources({
566
- groundingMetadata: candidate.groundingMetadata,
567
- generateId: this.config.generateId
568
- }),
569
- request: { body }
602
+ request: { body },
603
+ response: {
604
+ // TODO timestamp, model id, id
605
+ headers: responseHeaders,
606
+ body: rawResponse
607
+ }
570
608
  };
571
609
  }
572
610
  async doStream(options) {
@@ -587,11 +625,11 @@ var GoogleGenerativeAILanguageModel = class {
587
625
  abortSignal: options.abortSignal,
588
626
  fetch: this.config.fetch
589
627
  });
590
- const { contents: rawPrompt, ...rawSettings } = args;
591
628
  let finishReason = "unknown";
592
- let usage = {
593
- promptTokens: Number.NaN,
594
- completionTokens: Number.NaN
629
+ const usage = {
630
+ inputTokens: void 0,
631
+ outputTokens: void 0,
632
+ totalTokens: void 0
595
633
  };
596
634
  let providerMetadata = void 0;
597
635
  const generateId = this.config.generateId;
@@ -599,8 +637,11 @@ var GoogleGenerativeAILanguageModel = class {
599
637
  return {
600
638
  stream: response.pipeThrough(
601
639
  new TransformStream({
640
+ start(controller) {
641
+ controller.enqueue({ type: "stream-start", warnings });
642
+ },
602
643
  transform(chunk, controller) {
603
- var _a, _b, _c, _d, _e, _f;
644
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
604
645
  if (!chunk.success) {
605
646
  controller.enqueue({ type: "error", error: chunk.error });
606
647
  return;
@@ -608,12 +649,13 @@ var GoogleGenerativeAILanguageModel = class {
608
649
  const value = chunk.value;
609
650
  const usageMetadata = value.usageMetadata;
610
651
  if (usageMetadata != null) {
611
- usage = {
612
- promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
613
- completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
614
- };
652
+ usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
653
+ usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
654
+ usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
655
+ usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
656
+ usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
615
657
  }
616
- const candidate = (_c = value.candidates) == null ? void 0 : _c[0];
658
+ const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
617
659
  if (candidate == null) {
618
660
  return;
619
661
  }
@@ -621,28 +663,14 @@ var GoogleGenerativeAILanguageModel = class {
621
663
  if (content != null) {
622
664
  const deltaText = getTextFromParts(content.parts);
623
665
  if (deltaText != null) {
624
- controller.enqueue({
625
- type: "text-delta",
626
- textDelta: deltaText
627
- });
628
- }
629
- const reasoningDeltaText = getReasoningDetailsFromParts(
630
- content.parts
631
- );
632
- if (reasoningDeltaText != null) {
633
- for (const part of reasoningDeltaText) {
634
- controller.enqueue({
635
- type: "reasoning",
636
- textDelta: part.text
637
- });
638
- }
666
+ controller.enqueue(deltaText);
639
667
  }
640
668
  const inlineDataParts = getInlineDataParts(content.parts);
641
669
  if (inlineDataParts != null) {
642
670
  for (const part of inlineDataParts) {
643
671
  controller.enqueue({
644
672
  type: "file",
645
- mimeType: part.inlineData.mimeType,
673
+ mediaType: part.inlineData.mimeType,
646
674
  data: part.inlineData.data
647
675
  });
648
676
  }
@@ -676,17 +704,17 @@ var GoogleGenerativeAILanguageModel = class {
676
704
  finishReason: candidate.finishReason,
677
705
  hasToolCalls
678
706
  });
679
- const sources = (_d = extractSources({
707
+ const sources = (_g = extractSources({
680
708
  groundingMetadata: candidate.groundingMetadata,
681
709
  generateId
682
- })) != null ? _d : [];
710
+ })) != null ? _g : [];
683
711
  for (const source of sources) {
684
- controller.enqueue({ type: "source", source });
712
+ controller.enqueue(source);
685
713
  }
686
714
  providerMetadata = {
687
715
  google: {
688
- groundingMetadata: (_e = candidate.groundingMetadata) != null ? _e : null,
689
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
716
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
717
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
690
718
  }
691
719
  };
692
720
  }
@@ -701,9 +729,7 @@ var GoogleGenerativeAILanguageModel = class {
701
729
  }
702
730
  })
703
731
  ),
704
- rawCall: { rawPrompt, rawSettings },
705
- rawResponse: { headers: responseHeaders },
706
- warnings,
732
+ response: { headers: responseHeaders },
707
733
  request: { body }
708
734
  };
709
735
  }
@@ -716,6 +742,7 @@ function getToolCallsFromParts({
716
742
  (part) => "functionCall" in part
717
743
  );
718
744
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
745
+ type: "tool-call",
719
746
  toolCallType: "function",
720
747
  toolCallId: generateId(),
721
748
  toolName: part.functionCall.name,
@@ -723,16 +750,11 @@ function getToolCallsFromParts({
723
750
  }));
724
751
  }
725
752
  function getTextFromParts(parts) {
726
- const textParts = parts == null ? void 0 : parts.filter(
727
- (part) => "text" in part && part.thought !== true
728
- );
729
- return textParts == null || textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join("");
730
- }
731
- function getReasoningDetailsFromParts(parts) {
732
- const reasoningParts = parts == null ? void 0 : parts.filter(
733
- (part) => "text" in part && part.thought === true
734
- );
735
- return reasoningParts == null || reasoningParts.length === 0 ? void 0 : reasoningParts.map((part) => ({ type: "text", text: part.text }));
753
+ const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
754
+ return textParts == null || textParts.length === 0 ? void 0 : {
755
+ type: "text",
756
+ text: textParts.map((part) => part.text).join("")
757
+ };
736
758
  }
737
759
  function getInlineDataParts(parts) {
738
760
  return parts == null ? void 0 : parts.filter(
@@ -747,109 +769,101 @@ function extractSources({
747
769
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
748
770
  (chunk) => chunk.web != null
749
771
  ).map((chunk) => ({
772
+ type: "source",
750
773
  sourceType: "url",
751
774
  id: generateId(),
752
775
  url: chunk.web.uri,
753
776
  title: chunk.web.title
754
777
  }));
755
778
  }
756
- var contentSchema = z2.object({
757
- role: z2.string(),
758
- parts: z2.array(
759
- z2.union([
760
- z2.object({
761
- text: z2.string(),
762
- thought: z2.boolean().nullish()
779
+ var contentSchema = z3.object({
780
+ role: z3.string(),
781
+ parts: z3.array(
782
+ z3.union([
783
+ z3.object({
784
+ text: z3.string()
763
785
  }),
764
- z2.object({
765
- functionCall: z2.object({
766
- name: z2.string(),
767
- args: z2.unknown()
786
+ z3.object({
787
+ functionCall: z3.object({
788
+ name: z3.string(),
789
+ args: z3.unknown()
768
790
  })
769
791
  }),
770
- z2.object({
771
- inlineData: z2.object({
772
- mimeType: z2.string(),
773
- data: z2.string()
792
+ z3.object({
793
+ inlineData: z3.object({
794
+ mimeType: z3.string(),
795
+ data: z3.string()
774
796
  })
775
797
  })
776
798
  ])
777
799
  ).nullish()
778
800
  });
779
- var groundingChunkSchema = z2.object({
780
- web: z2.object({ uri: z2.string(), title: z2.string() }).nullish(),
781
- retrievedContext: z2.object({ uri: z2.string(), title: z2.string() }).nullish()
801
+ var groundingChunkSchema = z3.object({
802
+ web: z3.object({ uri: z3.string(), title: z3.string() }).nullish(),
803
+ retrievedContext: z3.object({ uri: z3.string(), title: z3.string() }).nullish()
782
804
  });
783
- var groundingMetadataSchema = z2.object({
784
- webSearchQueries: z2.array(z2.string()).nullish(),
785
- retrievalQueries: z2.array(z2.string()).nullish(),
786
- searchEntryPoint: z2.object({ renderedContent: z2.string() }).nullish(),
787
- groundingChunks: z2.array(groundingChunkSchema).nullish(),
788
- groundingSupports: z2.array(
789
- z2.object({
790
- segment: z2.object({
791
- startIndex: z2.number().nullish(),
792
- endIndex: z2.number().nullish(),
793
- text: z2.string().nullish()
805
+ var groundingMetadataSchema = z3.object({
806
+ webSearchQueries: z3.array(z3.string()).nullish(),
807
+ retrievalQueries: z3.array(z3.string()).nullish(),
808
+ searchEntryPoint: z3.object({ renderedContent: z3.string() }).nullish(),
809
+ groundingChunks: z3.array(groundingChunkSchema).nullish(),
810
+ groundingSupports: z3.array(
811
+ z3.object({
812
+ segment: z3.object({
813
+ startIndex: z3.number().nullish(),
814
+ endIndex: z3.number().nullish(),
815
+ text: z3.string().nullish()
794
816
  }),
795
- segment_text: z2.string().nullish(),
796
- groundingChunkIndices: z2.array(z2.number()).nullish(),
797
- supportChunkIndices: z2.array(z2.number()).nullish(),
798
- confidenceScores: z2.array(z2.number()).nullish(),
799
- confidenceScore: z2.array(z2.number()).nullish()
817
+ segment_text: z3.string().nullish(),
818
+ groundingChunkIndices: z3.array(z3.number()).nullish(),
819
+ supportChunkIndices: z3.array(z3.number()).nullish(),
820
+ confidenceScores: z3.array(z3.number()).nullish(),
821
+ confidenceScore: z3.array(z3.number()).nullish()
800
822
  })
801
823
  ).nullish(),
802
- retrievalMetadata: z2.union([
803
- z2.object({
804
- webDynamicRetrievalScore: z2.number()
824
+ retrievalMetadata: z3.union([
825
+ z3.object({
826
+ webDynamicRetrievalScore: z3.number()
805
827
  }),
806
- z2.object({})
828
+ z3.object({})
807
829
  ]).nullish()
808
830
  });
809
- var safetyRatingSchema = z2.object({
810
- category: z2.string().nullish(),
811
- probability: z2.string().nullish(),
812
- probabilityScore: z2.number().nullish(),
813
- severity: z2.string().nullish(),
814
- severityScore: z2.number().nullish(),
815
- blocked: z2.boolean().nullish()
831
+ var safetyRatingSchema = z3.object({
832
+ category: z3.string().nullish(),
833
+ probability: z3.string().nullish(),
834
+ probabilityScore: z3.number().nullish(),
835
+ severity: z3.string().nullish(),
836
+ severityScore: z3.number().nullish(),
837
+ blocked: z3.boolean().nullish()
816
838
  });
817
- var responseSchema = z2.object({
818
- candidates: z2.array(
819
- z2.object({
820
- content: contentSchema.nullish().or(z2.object({}).strict()),
821
- finishReason: z2.string().nullish(),
822
- safetyRatings: z2.array(safetyRatingSchema).nullish(),
839
+ var usageSchema = z3.object({
840
+ cachedContentTokenCount: z3.number().nullish(),
841
+ thoughtsTokenCount: z3.number().nullish(),
842
+ promptTokenCount: z3.number().nullish(),
843
+ candidatesTokenCount: z3.number().nullish(),
844
+ totalTokenCount: z3.number().nullish()
845
+ });
846
+ var responseSchema = z3.object({
847
+ candidates: z3.array(
848
+ z3.object({
849
+ content: contentSchema.nullish().or(z3.object({}).strict()),
850
+ finishReason: z3.string().nullish(),
851
+ safetyRatings: z3.array(safetyRatingSchema).nullish(),
823
852
  groundingMetadata: groundingMetadataSchema.nullish()
824
853
  })
825
854
  ),
826
- usageMetadata: z2.object({
827
- promptTokenCount: z2.number().nullish(),
828
- candidatesTokenCount: z2.number().nullish(),
829
- totalTokenCount: z2.number().nullish()
830
- }).nullish()
855
+ usageMetadata: usageSchema.nullish()
831
856
  });
832
- var chunkSchema = z2.object({
833
- candidates: z2.array(
834
- z2.object({
857
+ var chunkSchema = z3.object({
858
+ candidates: z3.array(
859
+ z3.object({
835
860
  content: contentSchema.nullish(),
836
- finishReason: z2.string().nullish(),
837
- safetyRatings: z2.array(safetyRatingSchema).nullish(),
861
+ finishReason: z3.string().nullish(),
862
+ safetyRatings: z3.array(safetyRatingSchema).nullish(),
838
863
  groundingMetadata: groundingMetadataSchema.nullish()
839
864
  })
840
865
  ).nullish(),
841
- usageMetadata: z2.object({
842
- promptTokenCount: z2.number().nullish(),
843
- candidatesTokenCount: z2.number().nullish(),
844
- totalTokenCount: z2.number().nullish()
845
- }).nullish()
846
- });
847
- var googleGenerativeAIProviderOptionsSchema = z2.object({
848
- responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).nullish(),
849
- thinkingConfig: z2.object({
850
- thinkingBudget: z2.number().nullish(),
851
- includeThoughts: z2.boolean().nullish()
852
- }).nullish()
866
+ usageMetadata: usageSchema.nullish()
853
867
  });
854
868
  export {
855
869
  GoogleGenerativeAILanguageModel,