@ai-sdk/google 1.2.17 → 2.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,11 +7,11 @@ import {
7
7
  postJsonToApi,
8
8
  resolve
9
9
  } from "@ai-sdk/provider-utils";
10
- import { z as z2 } from "zod";
10
+ import { z as z3 } from "zod";
11
11
 
12
12
  // src/convert-json-schema-to-openapi-schema.ts
13
13
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
14
- if (isEmptyObjectSchema(jsonSchema)) {
14
+ if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
15
15
  return void 0;
16
16
  }
17
17
  if (typeof jsonSchema === "boolean") {
@@ -110,9 +110,10 @@ function isEmptyObjectSchema(jsonSchema) {
110
110
  import {
111
111
  UnsupportedFunctionalityError
112
112
  } from "@ai-sdk/provider";
113
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
113
+ import {
114
+ convertToBase64
115
+ } from "@ai-sdk/provider-utils";
114
116
  function convertToGoogleGenerativeAIMessages(prompt) {
115
- var _a, _b;
116
117
  const systemInstructionParts = [];
117
118
  const contents = [];
118
119
  let systemMessagesAllowed = true;
@@ -136,33 +137,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
136
137
  parts.push({ text: part.text });
137
138
  break;
138
139
  }
139
- case "image": {
140
- parts.push(
141
- part.image instanceof URL ? {
142
- fileData: {
143
- mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
144
- fileUri: part.image.toString()
145
- }
146
- } : {
147
- inlineData: {
148
- mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
149
- data: convertUint8ArrayToBase64(part.image)
150
- }
151
- }
152
- );
153
- break;
154
- }
155
140
  case "file": {
141
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
156
142
  parts.push(
157
143
  part.data instanceof URL ? {
158
144
  fileData: {
159
- mimeType: part.mimeType,
145
+ mimeType: mediaType,
160
146
  fileUri: part.data.toString()
161
147
  }
162
148
  } : {
163
149
  inlineData: {
164
- mimeType: part.mimeType,
165
- data: part.data
150
+ mimeType: mediaType,
151
+ data: convertToBase64(part.data)
166
152
  }
167
153
  }
168
154
  );
@@ -183,7 +169,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
183
169
  return part.text.length === 0 ? void 0 : { text: part.text };
184
170
  }
185
171
  case "file": {
186
- if (part.mimeType !== "image/png") {
172
+ if (part.mediaType !== "image/png") {
187
173
  throw new UnsupportedFunctionalityError({
188
174
  functionality: "Only PNG images are supported in assistant messages"
189
175
  });
@@ -195,8 +181,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
195
181
  }
196
182
  return {
197
183
  inlineData: {
198
- mimeType: part.mimeType,
199
- data: part.data
184
+ mimeType: part.mediaType,
185
+ data: convertToBase64(part.data)
200
186
  }
201
187
  };
202
188
  }
@@ -257,20 +243,112 @@ var googleFailedResponseHandler = createJsonErrorResponseHandler({
257
243
  errorToMessage: (data) => data.error.message
258
244
  });
259
245
 
246
+ // src/google-generative-ai-options.ts
247
+ import { z as z2 } from "zod";
248
+ var dynamicRetrievalConfig = z2.object({
249
+ /**
250
+ * The mode of the predictor to be used in dynamic retrieval.
251
+ */
252
+ mode: z2.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
253
+ /**
254
+ * The threshold to be used in dynamic retrieval. If not set, a system default
255
+ * value is used.
256
+ */
257
+ dynamicThreshold: z2.number().optional()
258
+ });
259
+ var googleGenerativeAIProviderOptions = z2.object({
260
+ responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).optional(),
261
+ thinkingConfig: z2.object({
262
+ thinkingBudget: z2.number().optional()
263
+ }).optional(),
264
+ /**
265
+ Optional.
266
+ The name of the cached content used as context to serve the prediction.
267
+ Format: cachedContents/{cachedContent}
268
+ */
269
+ cachedContent: z2.string().optional(),
270
+ /**
271
+ * Optional. Enable structured output. Default is true.
272
+ *
273
+ * This is useful when the JSON Schema contains elements that are
274
+ * not supported by the OpenAPI schema version that
275
+ * Google Generative AI uses. You can use this to disable
276
+ * structured outputs if you need to.
277
+ */
278
+ structuredOutputs: z2.boolean().optional(),
279
+ /**
280
+ Optional. A list of unique safety settings for blocking unsafe content.
281
+ */
282
+ safetySettings: z2.array(
283
+ z2.object({
284
+ category: z2.enum([
285
+ "HARM_CATEGORY_UNSPECIFIED",
286
+ "HARM_CATEGORY_HATE_SPEECH",
287
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
288
+ "HARM_CATEGORY_HARASSMENT",
289
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
290
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
291
+ ]),
292
+ threshold: z2.enum([
293
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
294
+ "BLOCK_LOW_AND_ABOVE",
295
+ "BLOCK_MEDIUM_AND_ABOVE",
296
+ "BLOCK_ONLY_HIGH",
297
+ "BLOCK_NONE",
298
+ "OFF"
299
+ ])
300
+ })
301
+ ).optional(),
302
+ threshold: z2.enum([
303
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
304
+ "BLOCK_LOW_AND_ABOVE",
305
+ "BLOCK_MEDIUM_AND_ABOVE",
306
+ "BLOCK_ONLY_HIGH",
307
+ "BLOCK_NONE",
308
+ "OFF"
309
+ ]).optional(),
310
+ /**
311
+ * Optional. Enables timestamp understanding for audio-only files.
312
+ *
313
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
314
+ */
315
+ audioTimestamp: z2.boolean().optional(),
316
+ /**
317
+ Optional. When enabled, the model will use Google search to ground the response.
318
+
319
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
320
+ */
321
+ useSearchGrounding: z2.boolean().optional(),
322
+ /**
323
+ Optional. Specifies the dynamic retrieval configuration.
324
+
325
+ @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
326
+
327
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
328
+ */
329
+ dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
330
+ });
331
+
260
332
  // src/google-prepare-tools.ts
261
333
  import {
262
334
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
263
335
  } from "@ai-sdk/provider";
264
- function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId) {
265
- var _a, _b;
266
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
336
+ function prepareTools({
337
+ tools,
338
+ toolChoice,
339
+ useSearchGrounding,
340
+ dynamicRetrievalConfig: dynamicRetrievalConfig2,
341
+ modelId
342
+ }) {
343
+ var _a;
344
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
267
345
  const toolWarnings = [];
268
346
  const isGemini2 = modelId.includes("gemini-2");
269
347
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
270
348
  if (useSearchGrounding) {
271
349
  return {
272
350
  tools: isGemini2 ? { googleSearch: {} } : {
273
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
351
+ googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
274
352
  },
275
353
  toolConfig: void 0,
276
354
  toolWarnings
@@ -286,12 +364,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
286
364
  } else {
287
365
  functionDeclarations.push({
288
366
  name: tool.name,
289
- description: (_b = tool.description) != null ? _b : "",
367
+ description: (_a = tool.description) != null ? _a : "",
290
368
  parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
291
369
  });
292
370
  }
293
371
  }
294
- const toolChoice = mode.toolChoice;
295
372
  if (toolChoice == null) {
296
373
  return {
297
374
  tools: { functionDeclarations },
@@ -333,7 +410,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
333
410
  default: {
334
411
  const _exhaustiveCheck = type;
335
412
  throw new UnsupportedFunctionalityError2({
336
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
413
+ functionality: `tool choice type: ${_exhaustiveCheck}`
337
414
  });
338
415
  }
339
416
  }
@@ -368,25 +445,21 @@ function mapGoogleGenerativeAIFinishReason({
368
445
 
369
446
  // src/google-generative-ai-language-model.ts
370
447
  var GoogleGenerativeAILanguageModel = class {
371
- constructor(modelId, settings, config) {
372
- this.specificationVersion = "v1";
373
- this.defaultObjectGenerationMode = "json";
374
- this.supportsImageUrls = false;
448
+ constructor(modelId, config) {
449
+ this.specificationVersion = "v2";
375
450
  this.modelId = modelId;
376
- this.settings = settings;
377
451
  this.config = config;
378
452
  }
379
- get supportsStructuredOutputs() {
380
- var _a;
381
- return (_a = this.settings.structuredOutputs) != null ? _a : true;
382
- }
383
453
  get provider() {
384
454
  return this.config.provider;
385
455
  }
456
+ get supportedUrls() {
457
+ var _a, _b, _c;
458
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
459
+ }
386
460
  async getArgs({
387
- mode,
388
461
  prompt,
389
- maxTokens,
462
+ maxOutputTokens,
390
463
  temperature,
391
464
  topP,
392
465
  topK,
@@ -395,112 +468,66 @@ var GoogleGenerativeAILanguageModel = class {
395
468
  stopSequences,
396
469
  responseFormat,
397
470
  seed,
398
- providerMetadata
471
+ tools,
472
+ toolChoice,
473
+ providerOptions
399
474
  }) {
400
475
  var _a, _b;
401
- const type = mode.type;
402
476
  const warnings = [];
403
- const googleOptions = parseProviderOptions({
477
+ const googleOptions = await parseProviderOptions({
404
478
  provider: "google",
405
- providerOptions: providerMetadata,
406
- schema: googleGenerativeAIProviderOptionsSchema
479
+ providerOptions,
480
+ schema: googleGenerativeAIProviderOptions
407
481
  });
408
- const generationConfig = {
409
- // standardized settings:
410
- maxOutputTokens: maxTokens,
411
- temperature,
412
- topK,
413
- topP,
414
- frequencyPenalty,
415
- presencePenalty,
416
- stopSequences,
417
- seed,
418
- // response format:
419
- responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
420
- responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
421
- // so this is needed as an escape hatch:
422
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
423
- ...this.settings.audioTimestamp && {
424
- audioTimestamp: this.settings.audioTimestamp
425
- },
426
- // provider options:
427
- responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
428
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
429
- };
430
482
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
431
- switch (type) {
432
- case "regular": {
433
- const { tools, toolConfig, toolWarnings } = prepareTools(
434
- mode,
435
- (_a = this.settings.useSearchGrounding) != null ? _a : false,
436
- this.settings.dynamicRetrievalConfig,
437
- this.modelId
438
- );
439
- return {
440
- args: {
441
- generationConfig,
442
- contents,
443
- systemInstruction,
444
- safetySettings: this.settings.safetySettings,
445
- tools,
446
- toolConfig,
447
- cachedContent: this.settings.cachedContent
448
- },
449
- warnings: [...warnings, ...toolWarnings]
450
- };
451
- }
452
- case "object-json": {
453
- return {
454
- args: {
455
- generationConfig: {
456
- ...generationConfig,
457
- responseMimeType: "application/json",
458
- responseSchema: mode.schema != null && // Google GenAI does not support all OpenAPI Schema features,
459
- // so this is needed as an escape hatch:
460
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(mode.schema) : void 0
461
- },
462
- contents,
463
- systemInstruction,
464
- safetySettings: this.settings.safetySettings,
465
- cachedContent: this.settings.cachedContent
466
- },
467
- warnings
468
- };
469
- }
470
- case "object-tool": {
471
- return {
472
- args: {
473
- generationConfig,
474
- contents,
475
- tools: {
476
- functionDeclarations: [
477
- {
478
- name: mode.tool.name,
479
- description: (_b = mode.tool.description) != null ? _b : "",
480
- parameters: convertJSONSchemaToOpenAPISchema(
481
- mode.tool.parameters
482
- )
483
- }
484
- ]
485
- },
486
- toolConfig: { functionCallingConfig: { mode: "ANY" } },
487
- safetySettings: this.settings.safetySettings,
488
- cachedContent: this.settings.cachedContent
483
+ const {
484
+ tools: googleTools,
485
+ toolConfig: googleToolConfig,
486
+ toolWarnings
487
+ } = prepareTools({
488
+ tools,
489
+ toolChoice,
490
+ useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
491
+ dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
492
+ modelId: this.modelId
493
+ });
494
+ return {
495
+ args: {
496
+ generationConfig: {
497
+ // standardized settings:
498
+ maxOutputTokens,
499
+ temperature,
500
+ topK,
501
+ topP,
502
+ frequencyPenalty,
503
+ presencePenalty,
504
+ stopSequences,
505
+ seed,
506
+ // response format:
507
+ responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
508
+ responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
509
+ // so this is needed as an escape hatch:
510
+ // TODO convert into provider option
511
+ ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
512
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
513
+ audioTimestamp: googleOptions.audioTimestamp
489
514
  },
490
- warnings
491
- };
492
- }
493
- default: {
494
- const _exhaustiveCheck = type;
495
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
496
- }
497
- }
498
- }
499
- supportsUrl(url) {
500
- return this.config.isSupportedUrl(url);
515
+ // provider options:
516
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
517
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
518
+ },
519
+ contents,
520
+ systemInstruction,
521
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
522
+ tools: googleTools,
523
+ toolConfig: googleToolConfig,
524
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
525
+ },
526
+ warnings: [...warnings, ...toolWarnings]
527
+ };
501
528
  }
502
529
  async doGenerate(options) {
503
- var _a, _b, _c, _d, _e;
530
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
504
531
  const { args, warnings } = await this.getArgs(options);
505
532
  const body = JSON.stringify(args);
506
533
  const mergedHeaders = combineHeaders(
@@ -522,43 +549,62 @@ var GoogleGenerativeAILanguageModel = class {
522
549
  abortSignal: options.abortSignal,
523
550
  fetch: this.config.fetch
524
551
  });
525
- const { contents: rawPrompt, ...rawSettings } = args;
526
552
  const candidate = response.candidates[0];
527
- const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : candidate.content.parts;
528
- const toolCalls = getToolCallsFromParts({
529
- parts,
553
+ const content = [];
554
+ const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
555
+ for (const part of parts) {
556
+ if ("text" in part && part.text.length > 0) {
557
+ content.push({ type: "text", text: part.text });
558
+ } else if ("functionCall" in part) {
559
+ content.push({
560
+ type: "tool-call",
561
+ toolCallType: "function",
562
+ toolCallId: this.config.generateId(),
563
+ toolName: part.functionCall.name,
564
+ args: JSON.stringify(part.functionCall.args)
565
+ });
566
+ } else if ("inlineData" in part) {
567
+ content.push({
568
+ type: "file",
569
+ data: part.inlineData.data,
570
+ mediaType: part.inlineData.mimeType
571
+ });
572
+ }
573
+ }
574
+ const sources = (_b = extractSources({
575
+ groundingMetadata: candidate.groundingMetadata,
530
576
  generateId: this.config.generateId
531
- });
577
+ })) != null ? _b : [];
578
+ for (const source of sources) {
579
+ content.push(source);
580
+ }
532
581
  const usageMetadata = response.usageMetadata;
533
582
  return {
534
- text: getTextFromParts(parts),
535
- files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
536
- data: part.inlineData.data,
537
- mimeType: part.inlineData.mimeType
538
- })),
539
- toolCalls,
583
+ content,
540
584
  finishReason: mapGoogleGenerativeAIFinishReason({
541
585
  finishReason: candidate.finishReason,
542
- hasToolCalls: toolCalls != null && toolCalls.length > 0
586
+ hasToolCalls: content.some((part) => part.type === "tool-call")
543
587
  }),
544
588
  usage: {
545
- promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
546
- completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
589
+ inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
590
+ outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
591
+ totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
592
+ reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
593
+ cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
547
594
  },
548
- rawCall: { rawPrompt, rawSettings },
549
- rawResponse: { headers: responseHeaders, body: rawResponse },
550
595
  warnings,
551
596
  providerMetadata: {
552
597
  google: {
553
- groundingMetadata: (_d = candidate.groundingMetadata) != null ? _d : null,
554
- safetyRatings: (_e = candidate.safetyRatings) != null ? _e : null
598
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
599
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
555
600
  }
556
601
  },
557
- sources: extractSources({
558
- groundingMetadata: candidate.groundingMetadata,
559
- generateId: this.config.generateId
560
- }),
561
- request: { body }
602
+ request: { body },
603
+ response: {
604
+ // TODO timestamp, model id, id
605
+ headers: responseHeaders,
606
+ body: rawResponse
607
+ }
562
608
  };
563
609
  }
564
610
  async doStream(options) {
@@ -579,11 +625,11 @@ var GoogleGenerativeAILanguageModel = class {
579
625
  abortSignal: options.abortSignal,
580
626
  fetch: this.config.fetch
581
627
  });
582
- const { contents: rawPrompt, ...rawSettings } = args;
583
628
  let finishReason = "unknown";
584
- let usage = {
585
- promptTokens: Number.NaN,
586
- completionTokens: Number.NaN
629
+ const usage = {
630
+ inputTokens: void 0,
631
+ outputTokens: void 0,
632
+ totalTokens: void 0
587
633
  };
588
634
  let providerMetadata = void 0;
589
635
  const generateId = this.config.generateId;
@@ -591,8 +637,11 @@ var GoogleGenerativeAILanguageModel = class {
591
637
  return {
592
638
  stream: response.pipeThrough(
593
639
  new TransformStream({
640
+ start(controller) {
641
+ controller.enqueue({ type: "stream-start", warnings });
642
+ },
594
643
  transform(chunk, controller) {
595
- var _a, _b, _c, _d, _e, _f;
644
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
596
645
  if (!chunk.success) {
597
646
  controller.enqueue({ type: "error", error: chunk.error });
598
647
  return;
@@ -600,12 +649,13 @@ var GoogleGenerativeAILanguageModel = class {
600
649
  const value = chunk.value;
601
650
  const usageMetadata = value.usageMetadata;
602
651
  if (usageMetadata != null) {
603
- usage = {
604
- promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
605
- completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
606
- };
652
+ usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
653
+ usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
654
+ usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
655
+ usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
656
+ usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
607
657
  }
608
- const candidate = (_c = value.candidates) == null ? void 0 : _c[0];
658
+ const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
609
659
  if (candidate == null) {
610
660
  return;
611
661
  }
@@ -613,17 +663,14 @@ var GoogleGenerativeAILanguageModel = class {
613
663
  if (content != null) {
614
664
  const deltaText = getTextFromParts(content.parts);
615
665
  if (deltaText != null) {
616
- controller.enqueue({
617
- type: "text-delta",
618
- textDelta: deltaText
619
- });
666
+ controller.enqueue(deltaText);
620
667
  }
621
668
  const inlineDataParts = getInlineDataParts(content.parts);
622
669
  if (inlineDataParts != null) {
623
670
  for (const part of inlineDataParts) {
624
671
  controller.enqueue({
625
672
  type: "file",
626
- mimeType: part.inlineData.mimeType,
673
+ mediaType: part.inlineData.mimeType,
627
674
  data: part.inlineData.data
628
675
  });
629
676
  }
@@ -657,17 +704,17 @@ var GoogleGenerativeAILanguageModel = class {
657
704
  finishReason: candidate.finishReason,
658
705
  hasToolCalls
659
706
  });
660
- const sources = (_d = extractSources({
707
+ const sources = (_g = extractSources({
661
708
  groundingMetadata: candidate.groundingMetadata,
662
709
  generateId
663
- })) != null ? _d : [];
710
+ })) != null ? _g : [];
664
711
  for (const source of sources) {
665
- controller.enqueue({ type: "source", source });
712
+ controller.enqueue(source);
666
713
  }
667
714
  providerMetadata = {
668
715
  google: {
669
- groundingMetadata: (_e = candidate.groundingMetadata) != null ? _e : null,
670
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
716
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
717
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
671
718
  }
672
719
  };
673
720
  }
@@ -682,9 +729,7 @@ var GoogleGenerativeAILanguageModel = class {
682
729
  }
683
730
  })
684
731
  ),
685
- rawCall: { rawPrompt, rawSettings },
686
- rawResponse: { headers: responseHeaders },
687
- warnings,
732
+ response: { headers: responseHeaders },
688
733
  request: { body }
689
734
  };
690
735
  }
@@ -697,6 +742,7 @@ function getToolCallsFromParts({
697
742
  (part) => "functionCall" in part
698
743
  );
699
744
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
745
+ type: "tool-call",
700
746
  toolCallType: "function",
701
747
  toolCallId: generateId(),
702
748
  toolName: part.functionCall.name,
@@ -705,7 +751,10 @@ function getToolCallsFromParts({
705
751
  }
706
752
  function getTextFromParts(parts) {
707
753
  const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
708
- return textParts == null || textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join("");
754
+ return textParts == null || textParts.length === 0 ? void 0 : {
755
+ type: "text",
756
+ text: textParts.map((part) => part.text).join("")
757
+ };
709
758
  }
710
759
  function getInlineDataParts(parts) {
711
760
  return parts == null ? void 0 : parts.filter(
@@ -720,107 +769,101 @@ function extractSources({
720
769
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
721
770
  (chunk) => chunk.web != null
722
771
  ).map((chunk) => ({
772
+ type: "source",
723
773
  sourceType: "url",
724
774
  id: generateId(),
725
775
  url: chunk.web.uri,
726
776
  title: chunk.web.title
727
777
  }));
728
778
  }
729
- var contentSchema = z2.object({
730
- role: z2.string(),
731
- parts: z2.array(
732
- z2.union([
733
- z2.object({
734
- text: z2.string()
779
+ var contentSchema = z3.object({
780
+ role: z3.string(),
781
+ parts: z3.array(
782
+ z3.union([
783
+ z3.object({
784
+ text: z3.string()
735
785
  }),
736
- z2.object({
737
- functionCall: z2.object({
738
- name: z2.string(),
739
- args: z2.unknown()
786
+ z3.object({
787
+ functionCall: z3.object({
788
+ name: z3.string(),
789
+ args: z3.unknown()
740
790
  })
741
791
  }),
742
- z2.object({
743
- inlineData: z2.object({
744
- mimeType: z2.string(),
745
- data: z2.string()
792
+ z3.object({
793
+ inlineData: z3.object({
794
+ mimeType: z3.string(),
795
+ data: z3.string()
746
796
  })
747
797
  })
748
798
  ])
749
799
  ).nullish()
750
800
  });
751
- var groundingChunkSchema = z2.object({
752
- web: z2.object({ uri: z2.string(), title: z2.string() }).nullish(),
753
- retrievedContext: z2.object({ uri: z2.string(), title: z2.string() }).nullish()
801
+ var groundingChunkSchema = z3.object({
802
+ web: z3.object({ uri: z3.string(), title: z3.string() }).nullish(),
803
+ retrievedContext: z3.object({ uri: z3.string(), title: z3.string() }).nullish()
754
804
  });
755
- var groundingMetadataSchema = z2.object({
756
- webSearchQueries: z2.array(z2.string()).nullish(),
757
- retrievalQueries: z2.array(z2.string()).nullish(),
758
- searchEntryPoint: z2.object({ renderedContent: z2.string() }).nullish(),
759
- groundingChunks: z2.array(groundingChunkSchema).nullish(),
760
- groundingSupports: z2.array(
761
- z2.object({
762
- segment: z2.object({
763
- startIndex: z2.number().nullish(),
764
- endIndex: z2.number().nullish(),
765
- text: z2.string().nullish()
805
+ var groundingMetadataSchema = z3.object({
806
+ webSearchQueries: z3.array(z3.string()).nullish(),
807
+ retrievalQueries: z3.array(z3.string()).nullish(),
808
+ searchEntryPoint: z3.object({ renderedContent: z3.string() }).nullish(),
809
+ groundingChunks: z3.array(groundingChunkSchema).nullish(),
810
+ groundingSupports: z3.array(
811
+ z3.object({
812
+ segment: z3.object({
813
+ startIndex: z3.number().nullish(),
814
+ endIndex: z3.number().nullish(),
815
+ text: z3.string().nullish()
766
816
  }),
767
- segment_text: z2.string().nullish(),
768
- groundingChunkIndices: z2.array(z2.number()).nullish(),
769
- supportChunkIndices: z2.array(z2.number()).nullish(),
770
- confidenceScores: z2.array(z2.number()).nullish(),
771
- confidenceScore: z2.array(z2.number()).nullish()
817
+ segment_text: z3.string().nullish(),
818
+ groundingChunkIndices: z3.array(z3.number()).nullish(),
819
+ supportChunkIndices: z3.array(z3.number()).nullish(),
820
+ confidenceScores: z3.array(z3.number()).nullish(),
821
+ confidenceScore: z3.array(z3.number()).nullish()
772
822
  })
773
823
  ).nullish(),
774
- retrievalMetadata: z2.union([
775
- z2.object({
776
- webDynamicRetrievalScore: z2.number()
824
+ retrievalMetadata: z3.union([
825
+ z3.object({
826
+ webDynamicRetrievalScore: z3.number()
777
827
  }),
778
- z2.object({})
828
+ z3.object({})
779
829
  ]).nullish()
780
830
  });
781
- var safetyRatingSchema = z2.object({
782
- category: z2.string().nullish(),
783
- probability: z2.string().nullish(),
784
- probabilityScore: z2.number().nullish(),
785
- severity: z2.string().nullish(),
786
- severityScore: z2.number().nullish(),
787
- blocked: z2.boolean().nullish()
831
+ var safetyRatingSchema = z3.object({
832
+ category: z3.string().nullish(),
833
+ probability: z3.string().nullish(),
834
+ probabilityScore: z3.number().nullish(),
835
+ severity: z3.string().nullish(),
836
+ severityScore: z3.number().nullish(),
837
+ blocked: z3.boolean().nullish()
788
838
  });
789
- var responseSchema = z2.object({
790
- candidates: z2.array(
791
- z2.object({
792
- content: contentSchema.nullish().or(z2.object({}).strict()),
793
- finishReason: z2.string().nullish(),
794
- safetyRatings: z2.array(safetyRatingSchema).nullish(),
839
+ var usageSchema = z3.object({
840
+ cachedContentTokenCount: z3.number().nullish(),
841
+ thoughtsTokenCount: z3.number().nullish(),
842
+ promptTokenCount: z3.number().nullish(),
843
+ candidatesTokenCount: z3.number().nullish(),
844
+ totalTokenCount: z3.number().nullish()
845
+ });
846
+ var responseSchema = z3.object({
847
+ candidates: z3.array(
848
+ z3.object({
849
+ content: contentSchema.nullish().or(z3.object({}).strict()),
850
+ finishReason: z3.string().nullish(),
851
+ safetyRatings: z3.array(safetyRatingSchema).nullish(),
795
852
  groundingMetadata: groundingMetadataSchema.nullish()
796
853
  })
797
854
  ),
798
- usageMetadata: z2.object({
799
- promptTokenCount: z2.number().nullish(),
800
- candidatesTokenCount: z2.number().nullish(),
801
- totalTokenCount: z2.number().nullish()
802
- }).nullish()
855
+ usageMetadata: usageSchema.nullish()
803
856
  });
804
- var chunkSchema = z2.object({
805
- candidates: z2.array(
806
- z2.object({
857
+ var chunkSchema = z3.object({
858
+ candidates: z3.array(
859
+ z3.object({
807
860
  content: contentSchema.nullish(),
808
- finishReason: z2.string().nullish(),
809
- safetyRatings: z2.array(safetyRatingSchema).nullish(),
861
+ finishReason: z3.string().nullish(),
862
+ safetyRatings: z3.array(safetyRatingSchema).nullish(),
810
863
  groundingMetadata: groundingMetadataSchema.nullish()
811
864
  })
812
865
  ).nullish(),
813
- usageMetadata: z2.object({
814
- promptTokenCount: z2.number().nullish(),
815
- candidatesTokenCount: z2.number().nullish(),
816
- totalTokenCount: z2.number().nullish()
817
- }).nullish()
818
- });
819
- var googleGenerativeAIProviderOptionsSchema = z2.object({
820
- responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).nullish(),
821
- thinkingConfig: z2.object({
822
- thinkingBudget: z2.number().nullish()
823
- }).nullish()
866
+ usageMetadata: usageSchema.nullish()
824
867
  });
825
868
  export {
826
869
  GoogleGenerativeAILanguageModel,