@ai-sdk/google 1.2.17 → 2.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,24 +1,155 @@
1
1
  // src/google-provider.ts
2
+ import {
3
+ NoSuchModelError
4
+ } from "@ai-sdk/provider";
2
5
  import {
3
6
  generateId,
4
7
  loadApiKey,
5
8
  withoutTrailingSlash
6
9
  } from "@ai-sdk/provider-utils";
7
10
 
8
- // src/google-generative-ai-language-model.ts
11
+ // src/google-generative-ai-embedding-model.ts
12
+ import {
13
+ TooManyEmbeddingValuesForCallError
14
+ } from "@ai-sdk/provider";
9
15
  import {
10
16
  combineHeaders,
11
- createEventSourceResponseHandler,
12
17
  createJsonResponseHandler,
13
18
  parseProviderOptions,
14
19
  postJsonToApi,
15
20
  resolve
16
21
  } from "@ai-sdk/provider-utils";
22
+ import { z as z3 } from "zod";
23
+
24
+ // src/google-error.ts
25
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
26
+ import { z } from "zod";
27
+ var googleErrorDataSchema = z.object({
28
+ error: z.object({
29
+ code: z.number().nullable(),
30
+ message: z.string(),
31
+ status: z.string()
32
+ })
33
+ });
34
+ var googleFailedResponseHandler = createJsonErrorResponseHandler({
35
+ errorSchema: googleErrorDataSchema,
36
+ errorToMessage: (data) => data.error.message
37
+ });
38
+
39
+ // src/google-generative-ai-embedding-options.ts
17
40
  import { z as z2 } from "zod";
41
+ var googleGenerativeAIEmbeddingProviderOptions = z2.object({
42
+ /**
43
+ * Optional. Optional reduced dimension for the output embedding.
44
+ * If set, excessive values in the output embedding are truncated from the end.
45
+ */
46
+ outputDimensionality: z2.number().optional(),
47
+ /**
48
+ * Optional. Specifies the task type for generating embeddings.
49
+ * Supported task types:
50
+ * - SEMANTIC_SIMILARITY: Optimized for text similarity.
51
+ * - CLASSIFICATION: Optimized for text classification.
52
+ * - CLUSTERING: Optimized for clustering texts based on similarity.
53
+ * - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
54
+ * - RETRIEVAL_QUERY: Optimized for query-based retrieval.
55
+ * - QUESTION_ANSWERING: Optimized for answering questions.
56
+ * - FACT_VERIFICATION: Optimized for verifying factual information.
57
+ * - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
58
+ */
59
+ taskType: z2.enum([
60
+ "SEMANTIC_SIMILARITY",
61
+ "CLASSIFICATION",
62
+ "CLUSTERING",
63
+ "RETRIEVAL_DOCUMENT",
64
+ "RETRIEVAL_QUERY",
65
+ "QUESTION_ANSWERING",
66
+ "FACT_VERIFICATION",
67
+ "CODE_RETRIEVAL_QUERY"
68
+ ]).optional()
69
+ });
70
+
71
+ // src/google-generative-ai-embedding-model.ts
72
+ var GoogleGenerativeAIEmbeddingModel = class {
73
+ constructor(modelId, config) {
74
+ this.specificationVersion = "v2";
75
+ this.maxEmbeddingsPerCall = 2048;
76
+ this.supportsParallelCalls = true;
77
+ this.modelId = modelId;
78
+ this.config = config;
79
+ }
80
+ get provider() {
81
+ return this.config.provider;
82
+ }
83
+ async doEmbed({
84
+ values,
85
+ headers,
86
+ abortSignal,
87
+ providerOptions
88
+ }) {
89
+ const googleOptions = await parseProviderOptions({
90
+ provider: "google",
91
+ providerOptions,
92
+ schema: googleGenerativeAIEmbeddingProviderOptions
93
+ });
94
+ if (values.length > this.maxEmbeddingsPerCall) {
95
+ throw new TooManyEmbeddingValuesForCallError({
96
+ provider: this.provider,
97
+ modelId: this.modelId,
98
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
99
+ values
100
+ });
101
+ }
102
+ const mergedHeaders = combineHeaders(
103
+ await resolve(this.config.headers),
104
+ headers
105
+ );
106
+ const {
107
+ responseHeaders,
108
+ value: response,
109
+ rawValue
110
+ } = await postJsonToApi({
111
+ url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
112
+ headers: mergedHeaders,
113
+ body: {
114
+ requests: values.map((value) => ({
115
+ model: `models/${this.modelId}`,
116
+ content: { role: "user", parts: [{ text: value }] },
117
+ outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
118
+ taskType: googleOptions == null ? void 0 : googleOptions.taskType
119
+ }))
120
+ },
121
+ failedResponseHandler: googleFailedResponseHandler,
122
+ successfulResponseHandler: createJsonResponseHandler(
123
+ googleGenerativeAITextEmbeddingResponseSchema
124
+ ),
125
+ abortSignal,
126
+ fetch: this.config.fetch
127
+ });
128
+ return {
129
+ embeddings: response.embeddings.map((item) => item.values),
130
+ usage: void 0,
131
+ response: { headers: responseHeaders, body: rawValue }
132
+ };
133
+ }
134
+ };
135
+ var googleGenerativeAITextEmbeddingResponseSchema = z3.object({
136
+ embeddings: z3.array(z3.object({ values: z3.array(z3.number()) }))
137
+ });
138
+
139
+ // src/google-generative-ai-language-model.ts
140
+ import {
141
+ combineHeaders as combineHeaders2,
142
+ createEventSourceResponseHandler,
143
+ createJsonResponseHandler as createJsonResponseHandler2,
144
+ parseProviderOptions as parseProviderOptions2,
145
+ postJsonToApi as postJsonToApi2,
146
+ resolve as resolve2
147
+ } from "@ai-sdk/provider-utils";
148
+ import { z as z5 } from "zod";
18
149
 
19
150
  // src/convert-json-schema-to-openapi-schema.ts
20
151
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
21
- if (isEmptyObjectSchema(jsonSchema)) {
152
+ if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
22
153
  return void 0;
23
154
  }
24
155
  if (typeof jsonSchema === "boolean") {
@@ -117,9 +248,10 @@ function isEmptyObjectSchema(jsonSchema) {
117
248
  import {
118
249
  UnsupportedFunctionalityError
119
250
  } from "@ai-sdk/provider";
120
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
251
+ import {
252
+ convertToBase64
253
+ } from "@ai-sdk/provider-utils";
121
254
  function convertToGoogleGenerativeAIMessages(prompt) {
122
- var _a, _b;
123
255
  const systemInstructionParts = [];
124
256
  const contents = [];
125
257
  let systemMessagesAllowed = true;
@@ -143,33 +275,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
143
275
  parts.push({ text: part.text });
144
276
  break;
145
277
  }
146
- case "image": {
147
- parts.push(
148
- part.image instanceof URL ? {
149
- fileData: {
150
- mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
151
- fileUri: part.image.toString()
152
- }
153
- } : {
154
- inlineData: {
155
- mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
156
- data: convertUint8ArrayToBase64(part.image)
157
- }
158
- }
159
- );
160
- break;
161
- }
162
278
  case "file": {
279
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
163
280
  parts.push(
164
281
  part.data instanceof URL ? {
165
282
  fileData: {
166
- mimeType: part.mimeType,
283
+ mimeType: mediaType,
167
284
  fileUri: part.data.toString()
168
285
  }
169
286
  } : {
170
287
  inlineData: {
171
- mimeType: part.mimeType,
172
- data: part.data
288
+ mimeType: mediaType,
289
+ data: convertToBase64(part.data)
173
290
  }
174
291
  }
175
292
  );
@@ -190,7 +307,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
190
307
  return part.text.length === 0 ? void 0 : { text: part.text };
191
308
  }
192
309
  case "file": {
193
- if (part.mimeType !== "image/png") {
310
+ if (part.mediaType !== "image/png") {
194
311
  throw new UnsupportedFunctionalityError({
195
312
  functionality: "Only PNG images are supported in assistant messages"
196
313
  });
@@ -202,8 +319,8 @@ function convertToGoogleGenerativeAIMessages(prompt) {
202
319
  }
203
320
  return {
204
321
  inlineData: {
205
- mimeType: part.mimeType,
206
- data: part.data
322
+ mimeType: part.mediaType,
323
+ data: convertToBase64(part.data)
207
324
  }
208
325
  };
209
326
  }
@@ -249,35 +366,112 @@ function getModelPath(modelId) {
249
366
  return modelId.includes("/") ? modelId : `models/${modelId}`;
250
367
  }
251
368
 
252
- // src/google-error.ts
253
- import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
254
- import { z } from "zod";
255
- var googleErrorDataSchema = z.object({
256
- error: z.object({
257
- code: z.number().nullable(),
258
- message: z.string(),
259
- status: z.string()
260
- })
369
+ // src/google-generative-ai-options.ts
370
+ import { z as z4 } from "zod";
371
+ var dynamicRetrievalConfig = z4.object({
372
+ /**
373
+ * The mode of the predictor to be used in dynamic retrieval.
374
+ */
375
+ mode: z4.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
376
+ /**
377
+ * The threshold to be used in dynamic retrieval. If not set, a system default
378
+ * value is used.
379
+ */
380
+ dynamicThreshold: z4.number().optional()
261
381
  });
262
- var googleFailedResponseHandler = createJsonErrorResponseHandler({
263
- errorSchema: googleErrorDataSchema,
264
- errorToMessage: (data) => data.error.message
382
+ var googleGenerativeAIProviderOptions = z4.object({
383
+ responseModalities: z4.array(z4.enum(["TEXT", "IMAGE"])).optional(),
384
+ thinkingConfig: z4.object({
385
+ thinkingBudget: z4.number().optional()
386
+ }).optional(),
387
+ /**
388
+ Optional.
389
+ The name of the cached content used as context to serve the prediction.
390
+ Format: cachedContents/{cachedContent}
391
+ */
392
+ cachedContent: z4.string().optional(),
393
+ /**
394
+ * Optional. Enable structured output. Default is true.
395
+ *
396
+ * This is useful when the JSON Schema contains elements that are
397
+ * not supported by the OpenAPI schema version that
398
+ * Google Generative AI uses. You can use this to disable
399
+ * structured outputs if you need to.
400
+ */
401
+ structuredOutputs: z4.boolean().optional(),
402
+ /**
403
+ Optional. A list of unique safety settings for blocking unsafe content.
404
+ */
405
+ safetySettings: z4.array(
406
+ z4.object({
407
+ category: z4.enum([
408
+ "HARM_CATEGORY_UNSPECIFIED",
409
+ "HARM_CATEGORY_HATE_SPEECH",
410
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
411
+ "HARM_CATEGORY_HARASSMENT",
412
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
413
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
414
+ ]),
415
+ threshold: z4.enum([
416
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
417
+ "BLOCK_LOW_AND_ABOVE",
418
+ "BLOCK_MEDIUM_AND_ABOVE",
419
+ "BLOCK_ONLY_HIGH",
420
+ "BLOCK_NONE",
421
+ "OFF"
422
+ ])
423
+ })
424
+ ).optional(),
425
+ threshold: z4.enum([
426
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
427
+ "BLOCK_LOW_AND_ABOVE",
428
+ "BLOCK_MEDIUM_AND_ABOVE",
429
+ "BLOCK_ONLY_HIGH",
430
+ "BLOCK_NONE",
431
+ "OFF"
432
+ ]).optional(),
433
+ /**
434
+ * Optional. Enables timestamp understanding for audio-only files.
435
+ *
436
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
437
+ */
438
+ audioTimestamp: z4.boolean().optional(),
439
+ /**
440
+ Optional. When enabled, the model will use Google search to ground the response.
441
+
442
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
443
+ */
444
+ useSearchGrounding: z4.boolean().optional(),
445
+ /**
446
+ Optional. Specifies the dynamic retrieval configuration.
447
+
448
+ @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
449
+
450
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
451
+ */
452
+ dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
265
453
  });
266
454
 
267
455
  // src/google-prepare-tools.ts
268
456
  import {
269
457
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
270
458
  } from "@ai-sdk/provider";
271
- function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId) {
272
- var _a, _b;
273
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
459
+ function prepareTools({
460
+ tools,
461
+ toolChoice,
462
+ useSearchGrounding,
463
+ dynamicRetrievalConfig: dynamicRetrievalConfig2,
464
+ modelId
465
+ }) {
466
+ var _a;
467
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
274
468
  const toolWarnings = [];
275
469
  const isGemini2 = modelId.includes("gemini-2");
276
470
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
277
471
  if (useSearchGrounding) {
278
472
  return {
279
473
  tools: isGemini2 ? { googleSearch: {} } : {
280
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
474
+ googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
281
475
  },
282
476
  toolConfig: void 0,
283
477
  toolWarnings
@@ -293,12 +487,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
293
487
  } else {
294
488
  functionDeclarations.push({
295
489
  name: tool.name,
296
- description: (_b = tool.description) != null ? _b : "",
490
+ description: (_a = tool.description) != null ? _a : "",
297
491
  parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
298
492
  });
299
493
  }
300
494
  }
301
- const toolChoice = mode.toolChoice;
302
495
  if (toolChoice == null) {
303
496
  return {
304
497
  tools: { functionDeclarations },
@@ -340,7 +533,7 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
340
533
  default: {
341
534
  const _exhaustiveCheck = type;
342
535
  throw new UnsupportedFunctionalityError2({
343
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
536
+ functionality: `tool choice type: ${_exhaustiveCheck}`
344
537
  });
345
538
  }
346
539
  }
@@ -375,25 +568,21 @@ function mapGoogleGenerativeAIFinishReason({
375
568
 
376
569
  // src/google-generative-ai-language-model.ts
377
570
  var GoogleGenerativeAILanguageModel = class {
378
- constructor(modelId, settings, config) {
379
- this.specificationVersion = "v1";
380
- this.defaultObjectGenerationMode = "json";
381
- this.supportsImageUrls = false;
571
+ constructor(modelId, config) {
572
+ this.specificationVersion = "v2";
382
573
  this.modelId = modelId;
383
- this.settings = settings;
384
574
  this.config = config;
385
575
  }
386
- get supportsStructuredOutputs() {
387
- var _a;
388
- return (_a = this.settings.structuredOutputs) != null ? _a : true;
389
- }
390
576
  get provider() {
391
577
  return this.config.provider;
392
578
  }
579
+ get supportedUrls() {
580
+ var _a, _b, _c;
581
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
582
+ }
393
583
  async getArgs({
394
- mode,
395
584
  prompt,
396
- maxTokens,
585
+ maxOutputTokens,
397
586
  temperature,
398
587
  topP,
399
588
  topK,
@@ -402,180 +591,153 @@ var GoogleGenerativeAILanguageModel = class {
402
591
  stopSequences,
403
592
  responseFormat,
404
593
  seed,
405
- providerMetadata
594
+ tools,
595
+ toolChoice,
596
+ providerOptions
406
597
  }) {
407
598
  var _a, _b;
408
- const type = mode.type;
409
599
  const warnings = [];
410
- const googleOptions = parseProviderOptions({
600
+ const googleOptions = await parseProviderOptions2({
411
601
  provider: "google",
412
- providerOptions: providerMetadata,
413
- schema: googleGenerativeAIProviderOptionsSchema
602
+ providerOptions,
603
+ schema: googleGenerativeAIProviderOptions
414
604
  });
415
- const generationConfig = {
416
- // standardized settings:
417
- maxOutputTokens: maxTokens,
418
- temperature,
419
- topK,
420
- topP,
421
- frequencyPenalty,
422
- presencePenalty,
423
- stopSequences,
424
- seed,
425
- // response format:
426
- responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
427
- responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
428
- // so this is needed as an escape hatch:
429
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
430
- ...this.settings.audioTimestamp && {
431
- audioTimestamp: this.settings.audioTimestamp
432
- },
433
- // provider options:
434
- responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
435
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
436
- };
437
605
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
438
- switch (type) {
439
- case "regular": {
440
- const { tools, toolConfig, toolWarnings } = prepareTools(
441
- mode,
442
- (_a = this.settings.useSearchGrounding) != null ? _a : false,
443
- this.settings.dynamicRetrievalConfig,
444
- this.modelId
445
- );
446
- return {
447
- args: {
448
- generationConfig,
449
- contents,
450
- systemInstruction,
451
- safetySettings: this.settings.safetySettings,
452
- tools,
453
- toolConfig,
454
- cachedContent: this.settings.cachedContent
455
- },
456
- warnings: [...warnings, ...toolWarnings]
457
- };
458
- }
459
- case "object-json": {
460
- return {
461
- args: {
462
- generationConfig: {
463
- ...generationConfig,
464
- responseMimeType: "application/json",
465
- responseSchema: mode.schema != null && // Google GenAI does not support all OpenAPI Schema features,
466
- // so this is needed as an escape hatch:
467
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(mode.schema) : void 0
468
- },
469
- contents,
470
- systemInstruction,
471
- safetySettings: this.settings.safetySettings,
472
- cachedContent: this.settings.cachedContent
473
- },
474
- warnings
475
- };
476
- }
477
- case "object-tool": {
478
- return {
479
- args: {
480
- generationConfig,
481
- contents,
482
- tools: {
483
- functionDeclarations: [
484
- {
485
- name: mode.tool.name,
486
- description: (_b = mode.tool.description) != null ? _b : "",
487
- parameters: convertJSONSchemaToOpenAPISchema(
488
- mode.tool.parameters
489
- )
490
- }
491
- ]
492
- },
493
- toolConfig: { functionCallingConfig: { mode: "ANY" } },
494
- safetySettings: this.settings.safetySettings,
495
- cachedContent: this.settings.cachedContent
606
+ const {
607
+ tools: googleTools,
608
+ toolConfig: googleToolConfig,
609
+ toolWarnings
610
+ } = prepareTools({
611
+ tools,
612
+ toolChoice,
613
+ useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
614
+ dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
615
+ modelId: this.modelId
616
+ });
617
+ return {
618
+ args: {
619
+ generationConfig: {
620
+ // standardized settings:
621
+ maxOutputTokens,
622
+ temperature,
623
+ topK,
624
+ topP,
625
+ frequencyPenalty,
626
+ presencePenalty,
627
+ stopSequences,
628
+ seed,
629
+ // response format:
630
+ responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
631
+ responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
632
+ // so this is needed as an escape hatch:
633
+ // TODO convert into provider option
634
+ ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
635
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
636
+ audioTimestamp: googleOptions.audioTimestamp
496
637
  },
497
- warnings
498
- };
499
- }
500
- default: {
501
- const _exhaustiveCheck = type;
502
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
503
- }
504
- }
505
- }
506
- supportsUrl(url) {
507
- return this.config.isSupportedUrl(url);
638
+ // provider options:
639
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
640
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
641
+ },
642
+ contents,
643
+ systemInstruction,
644
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
645
+ tools: googleTools,
646
+ toolConfig: googleToolConfig,
647
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
648
+ },
649
+ warnings: [...warnings, ...toolWarnings]
650
+ };
508
651
  }
509
652
  async doGenerate(options) {
510
- var _a, _b, _c, _d, _e;
653
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
511
654
  const { args, warnings } = await this.getArgs(options);
512
655
  const body = JSON.stringify(args);
513
- const mergedHeaders = combineHeaders(
514
- await resolve(this.config.headers),
656
+ const mergedHeaders = combineHeaders2(
657
+ await resolve2(this.config.headers),
515
658
  options.headers
516
659
  );
517
660
  const {
518
661
  responseHeaders,
519
662
  value: response,
520
663
  rawValue: rawResponse
521
- } = await postJsonToApi({
664
+ } = await postJsonToApi2({
522
665
  url: `${this.config.baseURL}/${getModelPath(
523
666
  this.modelId
524
667
  )}:generateContent`,
525
668
  headers: mergedHeaders,
526
669
  body: args,
527
670
  failedResponseHandler: googleFailedResponseHandler,
528
- successfulResponseHandler: createJsonResponseHandler(responseSchema),
671
+ successfulResponseHandler: createJsonResponseHandler2(responseSchema),
529
672
  abortSignal: options.abortSignal,
530
673
  fetch: this.config.fetch
531
674
  });
532
- const { contents: rawPrompt, ...rawSettings } = args;
533
675
  const candidate = response.candidates[0];
534
- const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : candidate.content.parts;
535
- const toolCalls = getToolCallsFromParts({
536
- parts,
676
+ const content = [];
677
+ const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
678
+ for (const part of parts) {
679
+ if ("text" in part && part.text.length > 0) {
680
+ content.push({ type: "text", text: part.text });
681
+ } else if ("functionCall" in part) {
682
+ content.push({
683
+ type: "tool-call",
684
+ toolCallType: "function",
685
+ toolCallId: this.config.generateId(),
686
+ toolName: part.functionCall.name,
687
+ args: JSON.stringify(part.functionCall.args)
688
+ });
689
+ } else if ("inlineData" in part) {
690
+ content.push({
691
+ type: "file",
692
+ data: part.inlineData.data,
693
+ mediaType: part.inlineData.mimeType
694
+ });
695
+ }
696
+ }
697
+ const sources = (_b = extractSources({
698
+ groundingMetadata: candidate.groundingMetadata,
537
699
  generateId: this.config.generateId
538
- });
700
+ })) != null ? _b : [];
701
+ for (const source of sources) {
702
+ content.push(source);
703
+ }
539
704
  const usageMetadata = response.usageMetadata;
540
705
  return {
541
- text: getTextFromParts(parts),
542
- files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
543
- data: part.inlineData.data,
544
- mimeType: part.inlineData.mimeType
545
- })),
546
- toolCalls,
706
+ content,
547
707
  finishReason: mapGoogleGenerativeAIFinishReason({
548
708
  finishReason: candidate.finishReason,
549
- hasToolCalls: toolCalls != null && toolCalls.length > 0
709
+ hasToolCalls: content.some((part) => part.type === "tool-call")
550
710
  }),
551
711
  usage: {
552
- promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
553
- completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
712
+ inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
713
+ outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
714
+ totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
715
+ reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
716
+ cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
554
717
  },
555
- rawCall: { rawPrompt, rawSettings },
556
- rawResponse: { headers: responseHeaders, body: rawResponse },
557
718
  warnings,
558
719
  providerMetadata: {
559
720
  google: {
560
- groundingMetadata: (_d = candidate.groundingMetadata) != null ? _d : null,
561
- safetyRatings: (_e = candidate.safetyRatings) != null ? _e : null
721
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
722
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
562
723
  }
563
724
  },
564
- sources: extractSources({
565
- groundingMetadata: candidate.groundingMetadata,
566
- generateId: this.config.generateId
567
- }),
568
- request: { body }
725
+ request: { body },
726
+ response: {
727
+ // TODO timestamp, model id, id
728
+ headers: responseHeaders,
729
+ body: rawResponse
730
+ }
569
731
  };
570
732
  }
571
733
  async doStream(options) {
572
734
  const { args, warnings } = await this.getArgs(options);
573
735
  const body = JSON.stringify(args);
574
- const headers = combineHeaders(
575
- await resolve(this.config.headers),
736
+ const headers = combineHeaders2(
737
+ await resolve2(this.config.headers),
576
738
  options.headers
577
739
  );
578
- const { responseHeaders, value: response } = await postJsonToApi({
740
+ const { responseHeaders, value: response } = await postJsonToApi2({
579
741
  url: `${this.config.baseURL}/${getModelPath(
580
742
  this.modelId
581
743
  )}:streamGenerateContent?alt=sse`,
@@ -586,11 +748,11 @@ var GoogleGenerativeAILanguageModel = class {
586
748
  abortSignal: options.abortSignal,
587
749
  fetch: this.config.fetch
588
750
  });
589
- const { contents: rawPrompt, ...rawSettings } = args;
590
751
  let finishReason = "unknown";
591
- let usage = {
592
- promptTokens: Number.NaN,
593
- completionTokens: Number.NaN
752
+ const usage = {
753
+ inputTokens: void 0,
754
+ outputTokens: void 0,
755
+ totalTokens: void 0
594
756
  };
595
757
  let providerMetadata = void 0;
596
758
  const generateId2 = this.config.generateId;
@@ -598,8 +760,11 @@ var GoogleGenerativeAILanguageModel = class {
598
760
  return {
599
761
  stream: response.pipeThrough(
600
762
  new TransformStream({
763
+ start(controller) {
764
+ controller.enqueue({ type: "stream-start", warnings });
765
+ },
601
766
  transform(chunk, controller) {
602
- var _a, _b, _c, _d, _e, _f;
767
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
603
768
  if (!chunk.success) {
604
769
  controller.enqueue({ type: "error", error: chunk.error });
605
770
  return;
@@ -607,12 +772,13 @@ var GoogleGenerativeAILanguageModel = class {
607
772
  const value = chunk.value;
608
773
  const usageMetadata = value.usageMetadata;
609
774
  if (usageMetadata != null) {
610
- usage = {
611
- promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
612
- completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
613
- };
775
+ usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
776
+ usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
777
+ usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
778
+ usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
779
+ usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
614
780
  }
615
- const candidate = (_c = value.candidates) == null ? void 0 : _c[0];
781
+ const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
616
782
  if (candidate == null) {
617
783
  return;
618
784
  }
@@ -620,17 +786,14 @@ var GoogleGenerativeAILanguageModel = class {
620
786
  if (content != null) {
621
787
  const deltaText = getTextFromParts(content.parts);
622
788
  if (deltaText != null) {
623
- controller.enqueue({
624
- type: "text-delta",
625
- textDelta: deltaText
626
- });
789
+ controller.enqueue(deltaText);
627
790
  }
628
791
  const inlineDataParts = getInlineDataParts(content.parts);
629
792
  if (inlineDataParts != null) {
630
793
  for (const part of inlineDataParts) {
631
794
  controller.enqueue({
632
795
  type: "file",
633
- mimeType: part.inlineData.mimeType,
796
+ mediaType: part.inlineData.mimeType,
634
797
  data: part.inlineData.data
635
798
  });
636
799
  }
@@ -664,17 +827,17 @@ var GoogleGenerativeAILanguageModel = class {
664
827
  finishReason: candidate.finishReason,
665
828
  hasToolCalls
666
829
  });
667
- const sources = (_d = extractSources({
830
+ const sources = (_g = extractSources({
668
831
  groundingMetadata: candidate.groundingMetadata,
669
832
  generateId: generateId2
670
- })) != null ? _d : [];
833
+ })) != null ? _g : [];
671
834
  for (const source of sources) {
672
- controller.enqueue({ type: "source", source });
835
+ controller.enqueue(source);
673
836
  }
674
837
  providerMetadata = {
675
838
  google: {
676
- groundingMetadata: (_e = candidate.groundingMetadata) != null ? _e : null,
677
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
839
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
840
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
678
841
  }
679
842
  };
680
843
  }
@@ -689,9 +852,7 @@ var GoogleGenerativeAILanguageModel = class {
689
852
  }
690
853
  })
691
854
  ),
692
- rawCall: { rawPrompt, rawSettings },
693
- rawResponse: { headers: responseHeaders },
694
- warnings,
855
+ response: { headers: responseHeaders },
695
856
  request: { body }
696
857
  };
697
858
  }
@@ -704,6 +865,7 @@ function getToolCallsFromParts({
704
865
  (part) => "functionCall" in part
705
866
  );
706
867
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
868
+ type: "tool-call",
707
869
  toolCallType: "function",
708
870
  toolCallId: generateId2(),
709
871
  toolName: part.functionCall.name,
@@ -712,7 +874,10 @@ function getToolCallsFromParts({
712
874
  }
713
875
  function getTextFromParts(parts) {
714
876
  const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
715
- return textParts == null || textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join("");
877
+ return textParts == null || textParts.length === 0 ? void 0 : {
878
+ type: "text",
879
+ text: textParts.map((part) => part.text).join("")
880
+ };
716
881
  }
717
882
  function getInlineDataParts(parts) {
718
883
  return parts == null ? void 0 : parts.filter(
@@ -727,187 +892,103 @@ function extractSources({
727
892
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
728
893
  (chunk) => chunk.web != null
729
894
  ).map((chunk) => ({
895
+ type: "source",
730
896
  sourceType: "url",
731
897
  id: generateId2(),
732
898
  url: chunk.web.uri,
733
899
  title: chunk.web.title
734
900
  }));
735
901
  }
736
- var contentSchema = z2.object({
737
- role: z2.string(),
738
- parts: z2.array(
739
- z2.union([
740
- z2.object({
741
- text: z2.string()
902
+ var contentSchema = z5.object({
903
+ role: z5.string(),
904
+ parts: z5.array(
905
+ z5.union([
906
+ z5.object({
907
+ text: z5.string()
742
908
  }),
743
- z2.object({
744
- functionCall: z2.object({
745
- name: z2.string(),
746
- args: z2.unknown()
909
+ z5.object({
910
+ functionCall: z5.object({
911
+ name: z5.string(),
912
+ args: z5.unknown()
747
913
  })
748
914
  }),
749
- z2.object({
750
- inlineData: z2.object({
751
- mimeType: z2.string(),
752
- data: z2.string()
915
+ z5.object({
916
+ inlineData: z5.object({
917
+ mimeType: z5.string(),
918
+ data: z5.string()
753
919
  })
754
920
  })
755
921
  ])
756
922
  ).nullish()
757
923
  });
758
- var groundingChunkSchema = z2.object({
759
- web: z2.object({ uri: z2.string(), title: z2.string() }).nullish(),
760
- retrievedContext: z2.object({ uri: z2.string(), title: z2.string() }).nullish()
924
+ var groundingChunkSchema = z5.object({
925
+ web: z5.object({ uri: z5.string(), title: z5.string() }).nullish(),
926
+ retrievedContext: z5.object({ uri: z5.string(), title: z5.string() }).nullish()
761
927
  });
762
- var groundingMetadataSchema = z2.object({
763
- webSearchQueries: z2.array(z2.string()).nullish(),
764
- retrievalQueries: z2.array(z2.string()).nullish(),
765
- searchEntryPoint: z2.object({ renderedContent: z2.string() }).nullish(),
766
- groundingChunks: z2.array(groundingChunkSchema).nullish(),
767
- groundingSupports: z2.array(
768
- z2.object({
769
- segment: z2.object({
770
- startIndex: z2.number().nullish(),
771
- endIndex: z2.number().nullish(),
772
- text: z2.string().nullish()
928
+ var groundingMetadataSchema = z5.object({
929
+ webSearchQueries: z5.array(z5.string()).nullish(),
930
+ retrievalQueries: z5.array(z5.string()).nullish(),
931
+ searchEntryPoint: z5.object({ renderedContent: z5.string() }).nullish(),
932
+ groundingChunks: z5.array(groundingChunkSchema).nullish(),
933
+ groundingSupports: z5.array(
934
+ z5.object({
935
+ segment: z5.object({
936
+ startIndex: z5.number().nullish(),
937
+ endIndex: z5.number().nullish(),
938
+ text: z5.string().nullish()
773
939
  }),
774
- segment_text: z2.string().nullish(),
775
- groundingChunkIndices: z2.array(z2.number()).nullish(),
776
- supportChunkIndices: z2.array(z2.number()).nullish(),
777
- confidenceScores: z2.array(z2.number()).nullish(),
778
- confidenceScore: z2.array(z2.number()).nullish()
940
+ segment_text: z5.string().nullish(),
941
+ groundingChunkIndices: z5.array(z5.number()).nullish(),
942
+ supportChunkIndices: z5.array(z5.number()).nullish(),
943
+ confidenceScores: z5.array(z5.number()).nullish(),
944
+ confidenceScore: z5.array(z5.number()).nullish()
779
945
  })
780
946
  ).nullish(),
781
- retrievalMetadata: z2.union([
782
- z2.object({
783
- webDynamicRetrievalScore: z2.number()
947
+ retrievalMetadata: z5.union([
948
+ z5.object({
949
+ webDynamicRetrievalScore: z5.number()
784
950
  }),
785
- z2.object({})
951
+ z5.object({})
786
952
  ]).nullish()
787
953
  });
788
- var safetyRatingSchema = z2.object({
789
- category: z2.string().nullish(),
790
- probability: z2.string().nullish(),
791
- probabilityScore: z2.number().nullish(),
792
- severity: z2.string().nullish(),
793
- severityScore: z2.number().nullish(),
794
- blocked: z2.boolean().nullish()
954
+ var safetyRatingSchema = z5.object({
955
+ category: z5.string().nullish(),
956
+ probability: z5.string().nullish(),
957
+ probabilityScore: z5.number().nullish(),
958
+ severity: z5.string().nullish(),
959
+ severityScore: z5.number().nullish(),
960
+ blocked: z5.boolean().nullish()
961
+ });
962
+ var usageSchema = z5.object({
963
+ cachedContentTokenCount: z5.number().nullish(),
964
+ thoughtsTokenCount: z5.number().nullish(),
965
+ promptTokenCount: z5.number().nullish(),
966
+ candidatesTokenCount: z5.number().nullish(),
967
+ totalTokenCount: z5.number().nullish()
795
968
  });
796
- var responseSchema = z2.object({
797
- candidates: z2.array(
798
- z2.object({
799
- content: contentSchema.nullish().or(z2.object({}).strict()),
800
- finishReason: z2.string().nullish(),
801
- safetyRatings: z2.array(safetyRatingSchema).nullish(),
969
+ var responseSchema = z5.object({
970
+ candidates: z5.array(
971
+ z5.object({
972
+ content: contentSchema.nullish().or(z5.object({}).strict()),
973
+ finishReason: z5.string().nullish(),
974
+ safetyRatings: z5.array(safetyRatingSchema).nullish(),
802
975
  groundingMetadata: groundingMetadataSchema.nullish()
803
976
  })
804
977
  ),
805
- usageMetadata: z2.object({
806
- promptTokenCount: z2.number().nullish(),
807
- candidatesTokenCount: z2.number().nullish(),
808
- totalTokenCount: z2.number().nullish()
809
- }).nullish()
978
+ usageMetadata: usageSchema.nullish()
810
979
  });
811
- var chunkSchema = z2.object({
812
- candidates: z2.array(
813
- z2.object({
980
+ var chunkSchema = z5.object({
981
+ candidates: z5.array(
982
+ z5.object({
814
983
  content: contentSchema.nullish(),
815
- finishReason: z2.string().nullish(),
816
- safetyRatings: z2.array(safetyRatingSchema).nullish(),
984
+ finishReason: z5.string().nullish(),
985
+ safetyRatings: z5.array(safetyRatingSchema).nullish(),
817
986
  groundingMetadata: groundingMetadataSchema.nullish()
818
987
  })
819
988
  ).nullish(),
820
- usageMetadata: z2.object({
821
- promptTokenCount: z2.number().nullish(),
822
- candidatesTokenCount: z2.number().nullish(),
823
- totalTokenCount: z2.number().nullish()
824
- }).nullish()
825
- });
826
- var googleGenerativeAIProviderOptionsSchema = z2.object({
827
- responseModalities: z2.array(z2.enum(["TEXT", "IMAGE"])).nullish(),
828
- thinkingConfig: z2.object({
829
- thinkingBudget: z2.number().nullish()
830
- }).nullish()
831
- });
832
-
833
- // src/google-generative-ai-embedding-model.ts
834
- import {
835
- TooManyEmbeddingValuesForCallError
836
- } from "@ai-sdk/provider";
837
- import {
838
- combineHeaders as combineHeaders2,
839
- createJsonResponseHandler as createJsonResponseHandler2,
840
- postJsonToApi as postJsonToApi2,
841
- resolve as resolve2
842
- } from "@ai-sdk/provider-utils";
843
- import { z as z3 } from "zod";
844
- var GoogleGenerativeAIEmbeddingModel = class {
845
- constructor(modelId, settings, config) {
846
- this.specificationVersion = "v1";
847
- this.modelId = modelId;
848
- this.settings = settings;
849
- this.config = config;
850
- }
851
- get provider() {
852
- return this.config.provider;
853
- }
854
- get maxEmbeddingsPerCall() {
855
- return 2048;
856
- }
857
- get supportsParallelCalls() {
858
- return true;
859
- }
860
- async doEmbed({
861
- values,
862
- headers,
863
- abortSignal
864
- }) {
865
- if (values.length > this.maxEmbeddingsPerCall) {
866
- throw new TooManyEmbeddingValuesForCallError({
867
- provider: this.provider,
868
- modelId: this.modelId,
869
- maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
870
- values
871
- });
872
- }
873
- const mergedHeaders = combineHeaders2(
874
- await resolve2(this.config.headers),
875
- headers
876
- );
877
- const { responseHeaders, value: response } = await postJsonToApi2({
878
- url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
879
- headers: mergedHeaders,
880
- body: {
881
- requests: values.map((value) => ({
882
- model: `models/${this.modelId}`,
883
- content: { role: "user", parts: [{ text: value }] },
884
- outputDimensionality: this.settings.outputDimensionality,
885
- taskType: this.settings.taskType
886
- }))
887
- },
888
- failedResponseHandler: googleFailedResponseHandler,
889
- successfulResponseHandler: createJsonResponseHandler2(
890
- googleGenerativeAITextEmbeddingResponseSchema
891
- ),
892
- abortSignal,
893
- fetch: this.config.fetch
894
- });
895
- return {
896
- embeddings: response.embeddings.map((item) => item.values),
897
- usage: void 0,
898
- rawResponse: { headers: responseHeaders }
899
- };
900
- }
901
- };
902
- var googleGenerativeAITextEmbeddingResponseSchema = z3.object({
903
- embeddings: z3.array(z3.object({ values: z3.array(z3.number()) }))
989
+ usageMetadata: usageSchema.nullish()
904
990
  });
905
991
 
906
- // src/google-supported-file-url.ts
907
- function isSupportedFileUrl(url) {
908
- return url.toString().startsWith("https://generativelanguage.googleapis.com/v1beta/files/");
909
- }
910
-
911
992
  // src/google-provider.ts
912
993
  function createGoogleGenerativeAI(options = {}) {
913
994
  var _a;
@@ -920,30 +1001,35 @@ function createGoogleGenerativeAI(options = {}) {
920
1001
  }),
921
1002
  ...options.headers
922
1003
  });
923
- const createChatModel = (modelId, settings = {}) => {
1004
+ const createChatModel = (modelId) => {
924
1005
  var _a2;
925
- return new GoogleGenerativeAILanguageModel(modelId, settings, {
1006
+ return new GoogleGenerativeAILanguageModel(modelId, {
926
1007
  provider: "google.generative-ai",
927
1008
  baseURL,
928
1009
  headers: getHeaders,
929
1010
  generateId: (_a2 = options.generateId) != null ? _a2 : generateId,
930
- isSupportedUrl: isSupportedFileUrl,
1011
+ supportedUrls: () => ({
1012
+ "*": [
1013
+ // HTTP URLs:
1014
+ /^https?:\/\/.*$/
1015
+ ]
1016
+ }),
931
1017
  fetch: options.fetch
932
1018
  });
933
1019
  };
934
- const createEmbeddingModel = (modelId, settings = {}) => new GoogleGenerativeAIEmbeddingModel(modelId, settings, {
1020
+ const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
935
1021
  provider: "google.generative-ai",
936
1022
  baseURL,
937
1023
  headers: getHeaders,
938
1024
  fetch: options.fetch
939
1025
  });
940
- const provider = function(modelId, settings) {
1026
+ const provider = function(modelId) {
941
1027
  if (new.target) {
942
1028
  throw new Error(
943
1029
  "The Google Generative AI model function cannot be called with the new keyword."
944
1030
  );
945
1031
  }
946
- return createChatModel(modelId, settings);
1032
+ return createChatModel(modelId);
947
1033
  };
948
1034
  provider.languageModel = createChatModel;
949
1035
  provider.chat = createChatModel;
@@ -951,6 +1037,9 @@ function createGoogleGenerativeAI(options = {}) {
951
1037
  provider.embedding = createEmbeddingModel;
952
1038
  provider.textEmbedding = createEmbeddingModel;
953
1039
  provider.textEmbeddingModel = createEmbeddingModel;
1040
+ provider.imageModel = (modelId) => {
1041
+ throw new NoSuchModelError({ modelId, modelType: "imageModel" });
1042
+ };
954
1043
  return provider;
955
1044
  }
956
1045
  var google = createGoogleGenerativeAI();