@ai-sdk/google 1.2.19 → 2.0.0-alpha.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,15 +26,136 @@ __export(src_exports, {
26
26
  module.exports = __toCommonJS(src_exports);
27
27
 
28
28
  // src/google-provider.ts
29
+ var import_provider4 = require("@ai-sdk/provider");
29
30
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
30
31
 
31
- // src/google-generative-ai-language-model.ts
32
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
32
+ // src/google-generative-ai-embedding-model.ts
33
+ var import_provider = require("@ai-sdk/provider");
34
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
35
+ var import_zod3 = require("zod");
36
+
37
+ // src/google-error.ts
38
+ var import_provider_utils = require("@ai-sdk/provider-utils");
39
+ var import_zod = require("zod");
40
+ var googleErrorDataSchema = import_zod.z.object({
41
+ error: import_zod.z.object({
42
+ code: import_zod.z.number().nullable(),
43
+ message: import_zod.z.string(),
44
+ status: import_zod.z.string()
45
+ })
46
+ });
47
+ var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
48
+ errorSchema: googleErrorDataSchema,
49
+ errorToMessage: (data) => data.error.message
50
+ });
51
+
52
+ // src/google-generative-ai-embedding-options.ts
33
53
  var import_zod2 = require("zod");
54
+ var googleGenerativeAIEmbeddingProviderOptions = import_zod2.z.object({
55
+ /**
56
+ * Optional. Optional reduced dimension for the output embedding.
57
+ * If set, excessive values in the output embedding are truncated from the end.
58
+ */
59
+ outputDimensionality: import_zod2.z.number().optional(),
60
+ /**
61
+ * Optional. Specifies the task type for generating embeddings.
62
+ * Supported task types:
63
+ * - SEMANTIC_SIMILARITY: Optimized for text similarity.
64
+ * - CLASSIFICATION: Optimized for text classification.
65
+ * - CLUSTERING: Optimized for clustering texts based on similarity.
66
+ * - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
67
+ * - RETRIEVAL_QUERY: Optimized for query-based retrieval.
68
+ * - QUESTION_ANSWERING: Optimized for answering questions.
69
+ * - FACT_VERIFICATION: Optimized for verifying factual information.
70
+ * - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
71
+ */
72
+ taskType: import_zod2.z.enum([
73
+ "SEMANTIC_SIMILARITY",
74
+ "CLASSIFICATION",
75
+ "CLUSTERING",
76
+ "RETRIEVAL_DOCUMENT",
77
+ "RETRIEVAL_QUERY",
78
+ "QUESTION_ANSWERING",
79
+ "FACT_VERIFICATION",
80
+ "CODE_RETRIEVAL_QUERY"
81
+ ]).optional()
82
+ });
83
+
84
+ // src/google-generative-ai-embedding-model.ts
85
+ var GoogleGenerativeAIEmbeddingModel = class {
86
+ constructor(modelId, config) {
87
+ this.specificationVersion = "v2";
88
+ this.maxEmbeddingsPerCall = 2048;
89
+ this.supportsParallelCalls = true;
90
+ this.modelId = modelId;
91
+ this.config = config;
92
+ }
93
+ get provider() {
94
+ return this.config.provider;
95
+ }
96
+ async doEmbed({
97
+ values,
98
+ headers,
99
+ abortSignal,
100
+ providerOptions
101
+ }) {
102
+ const googleOptions = await (0, import_provider_utils2.parseProviderOptions)({
103
+ provider: "google",
104
+ providerOptions,
105
+ schema: googleGenerativeAIEmbeddingProviderOptions
106
+ });
107
+ if (values.length > this.maxEmbeddingsPerCall) {
108
+ throw new import_provider.TooManyEmbeddingValuesForCallError({
109
+ provider: this.provider,
110
+ modelId: this.modelId,
111
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
112
+ values
113
+ });
114
+ }
115
+ const mergedHeaders = (0, import_provider_utils2.combineHeaders)(
116
+ await (0, import_provider_utils2.resolve)(this.config.headers),
117
+ headers
118
+ );
119
+ const {
120
+ responseHeaders,
121
+ value: response,
122
+ rawValue
123
+ } = await (0, import_provider_utils2.postJsonToApi)({
124
+ url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
125
+ headers: mergedHeaders,
126
+ body: {
127
+ requests: values.map((value) => ({
128
+ model: `models/${this.modelId}`,
129
+ content: { role: "user", parts: [{ text: value }] },
130
+ outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
131
+ taskType: googleOptions == null ? void 0 : googleOptions.taskType
132
+ }))
133
+ },
134
+ failedResponseHandler: googleFailedResponseHandler,
135
+ successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
136
+ googleGenerativeAITextEmbeddingResponseSchema
137
+ ),
138
+ abortSignal,
139
+ fetch: this.config.fetch
140
+ });
141
+ return {
142
+ embeddings: response.embeddings.map((item) => item.values),
143
+ usage: void 0,
144
+ response: { headers: responseHeaders, body: rawValue }
145
+ };
146
+ }
147
+ };
148
+ var googleGenerativeAITextEmbeddingResponseSchema = import_zod3.z.object({
149
+ embeddings: import_zod3.z.array(import_zod3.z.object({ values: import_zod3.z.array(import_zod3.z.number()) }))
150
+ });
151
+
152
+ // src/google-generative-ai-language-model.ts
153
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
154
+ var import_zod5 = require("zod");
34
155
 
35
156
  // src/convert-json-schema-to-openapi-schema.ts
36
157
  function convertJSONSchemaToOpenAPISchema(jsonSchema) {
37
- if (isEmptyObjectSchema(jsonSchema)) {
158
+ if (jsonSchema == null || isEmptyObjectSchema(jsonSchema)) {
38
159
  return void 0;
39
160
  }
40
161
  if (typeof jsonSchema === "boolean") {
@@ -130,10 +251,9 @@ function isEmptyObjectSchema(jsonSchema) {
130
251
  }
131
252
 
132
253
  // src/convert-to-google-generative-ai-messages.ts
133
- var import_provider = require("@ai-sdk/provider");
134
- var import_provider_utils = require("@ai-sdk/provider-utils");
254
+ var import_provider2 = require("@ai-sdk/provider");
255
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
135
256
  function convertToGoogleGenerativeAIMessages(prompt) {
136
- var _a, _b;
137
257
  const systemInstructionParts = [];
138
258
  const contents = [];
139
259
  let systemMessagesAllowed = true;
@@ -141,7 +261,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
141
261
  switch (role) {
142
262
  case "system": {
143
263
  if (!systemMessagesAllowed) {
144
- throw new import_provider.UnsupportedFunctionalityError({
264
+ throw new import_provider2.UnsupportedFunctionalityError({
145
265
  functionality: "system messages are only supported at the beginning of the conversation"
146
266
  });
147
267
  }
@@ -157,33 +277,18 @@ function convertToGoogleGenerativeAIMessages(prompt) {
157
277
  parts.push({ text: part.text });
158
278
  break;
159
279
  }
160
- case "image": {
161
- parts.push(
162
- part.image instanceof URL ? {
163
- fileData: {
164
- mimeType: (_a = part.mimeType) != null ? _a : "image/jpeg",
165
- fileUri: part.image.toString()
166
- }
167
- } : {
168
- inlineData: {
169
- mimeType: (_b = part.mimeType) != null ? _b : "image/jpeg",
170
- data: (0, import_provider_utils.convertUint8ArrayToBase64)(part.image)
171
- }
172
- }
173
- );
174
- break;
175
- }
176
280
  case "file": {
281
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
177
282
  parts.push(
178
283
  part.data instanceof URL ? {
179
284
  fileData: {
180
- mimeType: part.mimeType,
285
+ mimeType: mediaType,
181
286
  fileUri: part.data.toString()
182
287
  }
183
288
  } : {
184
289
  inlineData: {
185
- mimeType: part.mimeType,
186
- data: part.data
290
+ mimeType: mediaType,
291
+ data: (0, import_provider_utils3.convertToBase64)(part.data)
187
292
  }
188
293
  }
189
294
  );
@@ -204,20 +309,20 @@ function convertToGoogleGenerativeAIMessages(prompt) {
204
309
  return part.text.length === 0 ? void 0 : { text: part.text };
205
310
  }
206
311
  case "file": {
207
- if (part.mimeType !== "image/png") {
208
- throw new import_provider.UnsupportedFunctionalityError({
312
+ if (part.mediaType !== "image/png") {
313
+ throw new import_provider2.UnsupportedFunctionalityError({
209
314
  functionality: "Only PNG images are supported in assistant messages"
210
315
  });
211
316
  }
212
317
  if (part.data instanceof URL) {
213
- throw new import_provider.UnsupportedFunctionalityError({
318
+ throw new import_provider2.UnsupportedFunctionalityError({
214
319
  functionality: "File data URLs in assistant messages are not supported"
215
320
  });
216
321
  }
217
322
  return {
218
323
  inlineData: {
219
- mimeType: part.mimeType,
220
- data: part.data
324
+ mimeType: part.mediaType,
325
+ data: (0, import_provider_utils3.convertToBase64)(part.data)
221
326
  }
222
327
  };
223
328
  }
@@ -263,33 +368,111 @@ function getModelPath(modelId) {
263
368
  return modelId.includes("/") ? modelId : `models/${modelId}`;
264
369
  }
265
370
 
266
- // src/google-error.ts
267
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
268
- var import_zod = require("zod");
269
- var googleErrorDataSchema = import_zod.z.object({
270
- error: import_zod.z.object({
271
- code: import_zod.z.number().nullable(),
272
- message: import_zod.z.string(),
273
- status: import_zod.z.string()
274
- })
371
+ // src/google-generative-ai-options.ts
372
+ var import_zod4 = require("zod");
373
+ var dynamicRetrievalConfig = import_zod4.z.object({
374
+ /**
375
+ * The mode of the predictor to be used in dynamic retrieval.
376
+ */
377
+ mode: import_zod4.z.enum(["MODE_UNSPECIFIED", "MODE_DYNAMIC"]).optional(),
378
+ /**
379
+ * The threshold to be used in dynamic retrieval. If not set, a system default
380
+ * value is used.
381
+ */
382
+ dynamicThreshold: import_zod4.z.number().optional()
275
383
  });
276
- var googleFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
277
- errorSchema: googleErrorDataSchema,
278
- errorToMessage: (data) => data.error.message
384
+ var googleGenerativeAIProviderOptions = import_zod4.z.object({
385
+ responseModalities: import_zod4.z.array(import_zod4.z.enum(["TEXT", "IMAGE"])).optional(),
386
+ thinkingConfig: import_zod4.z.object({
387
+ thinkingBudget: import_zod4.z.number().optional(),
388
+ includeThoughts: import_zod4.z.boolean().optional()
389
+ }).optional(),
390
+ /**
391
+ Optional.
392
+ The name of the cached content used as context to serve the prediction.
393
+ Format: cachedContents/{cachedContent}
394
+ */
395
+ cachedContent: import_zod4.z.string().optional(),
396
+ /**
397
+ * Optional. Enable structured output. Default is true.
398
+ *
399
+ * This is useful when the JSON Schema contains elements that are
400
+ * not supported by the OpenAPI schema version that
401
+ * Google Generative AI uses. You can use this to disable
402
+ * structured outputs if you need to.
403
+ */
404
+ structuredOutputs: import_zod4.z.boolean().optional(),
405
+ /**
406
+ Optional. A list of unique safety settings for blocking unsafe content.
407
+ */
408
+ safetySettings: import_zod4.z.array(
409
+ import_zod4.z.object({
410
+ category: import_zod4.z.enum([
411
+ "HARM_CATEGORY_UNSPECIFIED",
412
+ "HARM_CATEGORY_HATE_SPEECH",
413
+ "HARM_CATEGORY_DANGEROUS_CONTENT",
414
+ "HARM_CATEGORY_HARASSMENT",
415
+ "HARM_CATEGORY_SEXUALLY_EXPLICIT",
416
+ "HARM_CATEGORY_CIVIC_INTEGRITY"
417
+ ]),
418
+ threshold: import_zod4.z.enum([
419
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
420
+ "BLOCK_LOW_AND_ABOVE",
421
+ "BLOCK_MEDIUM_AND_ABOVE",
422
+ "BLOCK_ONLY_HIGH",
423
+ "BLOCK_NONE",
424
+ "OFF"
425
+ ])
426
+ })
427
+ ).optional(),
428
+ threshold: import_zod4.z.enum([
429
+ "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
430
+ "BLOCK_LOW_AND_ABOVE",
431
+ "BLOCK_MEDIUM_AND_ABOVE",
432
+ "BLOCK_ONLY_HIGH",
433
+ "BLOCK_NONE",
434
+ "OFF"
435
+ ]).optional(),
436
+ /**
437
+ * Optional. Enables timestamp understanding for audio-only files.
438
+ *
439
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
440
+ */
441
+ audioTimestamp: import_zod4.z.boolean().optional(),
442
+ /**
443
+ Optional. When enabled, the model will use Google search to ground the response.
444
+
445
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/overview
446
+ */
447
+ useSearchGrounding: import_zod4.z.boolean().optional(),
448
+ /**
449
+ Optional. Specifies the dynamic retrieval configuration.
450
+
451
+ @note Dynamic retrieval is only compatible with Gemini 1.5 Flash.
452
+
453
+ @see https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-with-google-search#dynamic-retrieval
454
+ */
455
+ dynamicRetrievalConfig: dynamicRetrievalConfig.optional()
279
456
  });
280
457
 
281
458
  // src/google-prepare-tools.ts
282
- var import_provider2 = require("@ai-sdk/provider");
283
- function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId) {
284
- var _a, _b;
285
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
459
+ var import_provider3 = require("@ai-sdk/provider");
460
+ function prepareTools({
461
+ tools,
462
+ toolChoice,
463
+ useSearchGrounding,
464
+ dynamicRetrievalConfig: dynamicRetrievalConfig2,
465
+ modelId
466
+ }) {
467
+ var _a;
468
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
286
469
  const toolWarnings = [];
287
470
  const isGemini2 = modelId.includes("gemini-2");
288
471
  const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
289
472
  if (useSearchGrounding) {
290
473
  return {
291
474
  tools: isGemini2 ? { googleSearch: {} } : {
292
- googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig ? {} : { dynamicRetrievalConfig }
475
+ googleSearchRetrieval: !supportsDynamicRetrieval || !dynamicRetrievalConfig2 ? {} : { dynamicRetrievalConfig: dynamicRetrievalConfig2 }
293
476
  },
294
477
  toolConfig: void 0,
295
478
  toolWarnings
@@ -305,12 +488,11 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
305
488
  } else {
306
489
  functionDeclarations.push({
307
490
  name: tool.name,
308
- description: (_b = tool.description) != null ? _b : "",
491
+ description: (_a = tool.description) != null ? _a : "",
309
492
  parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
310
493
  });
311
494
  }
312
495
  }
313
- const toolChoice = mode.toolChoice;
314
496
  if (toolChoice == null) {
315
497
  return {
316
498
  tools: { functionDeclarations },
@@ -351,8 +533,8 @@ function prepareTools(mode, useSearchGrounding, dynamicRetrievalConfig, modelId)
351
533
  };
352
534
  default: {
353
535
  const _exhaustiveCheck = type;
354
- throw new import_provider2.UnsupportedFunctionalityError({
355
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
536
+ throw new import_provider3.UnsupportedFunctionalityError({
537
+ functionality: `tool choice type: ${_exhaustiveCheck}`
356
538
  });
357
539
  }
358
540
  }
@@ -387,25 +569,21 @@ function mapGoogleGenerativeAIFinishReason({
387
569
 
388
570
  // src/google-generative-ai-language-model.ts
389
571
  var GoogleGenerativeAILanguageModel = class {
390
- constructor(modelId, settings, config) {
391
- this.specificationVersion = "v1";
392
- this.defaultObjectGenerationMode = "json";
393
- this.supportsImageUrls = false;
572
+ constructor(modelId, config) {
573
+ this.specificationVersion = "v2";
394
574
  this.modelId = modelId;
395
- this.settings = settings;
396
575
  this.config = config;
397
576
  }
398
- get supportsStructuredOutputs() {
399
- var _a;
400
- return (_a = this.settings.structuredOutputs) != null ? _a : true;
401
- }
402
577
  get provider() {
403
578
  return this.config.provider;
404
579
  }
580
+ get supportedUrls() {
581
+ var _a, _b, _c;
582
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
583
+ }
405
584
  async getArgs({
406
- mode,
407
585
  prompt,
408
- maxTokens,
586
+ maxOutputTokens,
409
587
  temperature,
410
588
  topP,
411
589
  topK,
@@ -414,15 +592,16 @@ var GoogleGenerativeAILanguageModel = class {
414
592
  stopSequences,
415
593
  responseFormat,
416
594
  seed,
417
- providerMetadata
595
+ tools,
596
+ toolChoice,
597
+ providerOptions
418
598
  }) {
419
599
  var _a, _b, _c;
420
- const type = mode.type;
421
600
  const warnings = [];
422
- const googleOptions = (0, import_provider_utils3.parseProviderOptions)({
601
+ const googleOptions = await (0, import_provider_utils4.parseProviderOptions)({
423
602
  provider: "google",
424
- providerOptions: providerMetadata,
425
- schema: googleGenerativeAIProviderOptionsSchema
603
+ providerOptions,
604
+ schema: googleGenerativeAIProviderOptions
426
605
  });
427
606
  if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
428
607
  warnings.push({
@@ -430,187 +609,161 @@ var GoogleGenerativeAILanguageModel = class {
430
609
  message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
431
610
  });
432
611
  }
433
- const generationConfig = {
434
- // standardized settings:
435
- maxOutputTokens: maxTokens,
436
- temperature,
437
- topK,
438
- topP,
439
- frequencyPenalty,
440
- presencePenalty,
441
- stopSequences,
442
- seed,
443
- // response format:
444
- responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
445
- responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
446
- // so this is needed as an escape hatch:
447
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
448
- ...this.settings.audioTimestamp && {
449
- audioTimestamp: this.settings.audioTimestamp
450
- },
451
- // provider options:
452
- responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
453
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
454
- };
455
612
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
456
- switch (type) {
457
- case "regular": {
458
- const { tools, toolConfig, toolWarnings } = prepareTools(
459
- mode,
460
- (_b = this.settings.useSearchGrounding) != null ? _b : false,
461
- this.settings.dynamicRetrievalConfig,
462
- this.modelId
463
- );
464
- return {
465
- args: {
466
- generationConfig,
467
- contents,
468
- systemInstruction,
469
- safetySettings: this.settings.safetySettings,
470
- tools,
471
- toolConfig,
472
- cachedContent: this.settings.cachedContent
473
- },
474
- warnings: [...warnings, ...toolWarnings]
475
- };
476
- }
477
- case "object-json": {
478
- return {
479
- args: {
480
- generationConfig: {
481
- ...generationConfig,
482
- responseMimeType: "application/json",
483
- responseSchema: mode.schema != null && // Google GenAI does not support all OpenAPI Schema features,
484
- // so this is needed as an escape hatch:
485
- this.supportsStructuredOutputs ? convertJSONSchemaToOpenAPISchema(mode.schema) : void 0
486
- },
487
- contents,
488
- systemInstruction,
489
- safetySettings: this.settings.safetySettings,
490
- cachedContent: this.settings.cachedContent
491
- },
492
- warnings
493
- };
494
- }
495
- case "object-tool": {
496
- return {
497
- args: {
498
- generationConfig,
499
- contents,
500
- tools: {
501
- functionDeclarations: [
502
- {
503
- name: mode.tool.name,
504
- description: (_c = mode.tool.description) != null ? _c : "",
505
- parameters: convertJSONSchemaToOpenAPISchema(
506
- mode.tool.parameters
507
- )
508
- }
509
- ]
510
- },
511
- toolConfig: { functionCallingConfig: { mode: "ANY" } },
512
- safetySettings: this.settings.safetySettings,
513
- cachedContent: this.settings.cachedContent
613
+ const {
614
+ tools: googleTools,
615
+ toolConfig: googleToolConfig,
616
+ toolWarnings
617
+ } = prepareTools({
618
+ tools,
619
+ toolChoice,
620
+ useSearchGrounding: (_b = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _b : false,
621
+ dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
622
+ modelId: this.modelId
623
+ });
624
+ return {
625
+ args: {
626
+ generationConfig: {
627
+ // standardized settings:
628
+ maxOutputTokens,
629
+ temperature,
630
+ topK,
631
+ topP,
632
+ frequencyPenalty,
633
+ presencePenalty,
634
+ stopSequences,
635
+ seed,
636
+ // response format:
637
+ responseMimeType: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? "application/json" : void 0,
638
+ responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
639
+ // so this is needed as an escape hatch:
640
+ // TODO convert into provider option
641
+ ((_c = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _c : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
642
+ ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
643
+ audioTimestamp: googleOptions.audioTimestamp
514
644
  },
515
- warnings
516
- };
517
- }
518
- default: {
519
- const _exhaustiveCheck = type;
520
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
521
- }
522
- }
523
- }
524
- supportsUrl(url) {
525
- return this.config.isSupportedUrl(url);
645
+ // provider options:
646
+ responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
647
+ thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
648
+ },
649
+ contents,
650
+ systemInstruction,
651
+ safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
652
+ tools: googleTools,
653
+ toolConfig: googleToolConfig,
654
+ cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent
655
+ },
656
+ warnings: [...warnings, ...toolWarnings]
657
+ };
526
658
  }
527
659
  async doGenerate(options) {
528
- var _a, _b, _c, _d, _e;
660
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
529
661
  const { args, warnings } = await this.getArgs(options);
530
662
  const body = JSON.stringify(args);
531
- const mergedHeaders = (0, import_provider_utils3.combineHeaders)(
532
- await (0, import_provider_utils3.resolve)(this.config.headers),
663
+ const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
664
+ await (0, import_provider_utils4.resolve)(this.config.headers),
533
665
  options.headers
534
666
  );
535
667
  const {
536
668
  responseHeaders,
537
669
  value: response,
538
670
  rawValue: rawResponse
539
- } = await (0, import_provider_utils3.postJsonToApi)({
671
+ } = await (0, import_provider_utils4.postJsonToApi)({
540
672
  url: `${this.config.baseURL}/${getModelPath(
541
673
  this.modelId
542
674
  )}:generateContent`,
543
675
  headers: mergedHeaders,
544
676
  body: args,
545
677
  failedResponseHandler: googleFailedResponseHandler,
546
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(responseSchema),
678
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(responseSchema),
547
679
  abortSignal: options.abortSignal,
548
680
  fetch: this.config.fetch
549
681
  });
550
- const { contents: rawPrompt, ...rawSettings } = args;
551
682
  const candidate = response.candidates[0];
552
- const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : candidate.content.parts;
553
- const toolCalls = getToolCallsFromParts({
554
- parts,
555
- // Use candidateParts
556
- generateId: this.config.generateId
557
- });
683
+ const content = [];
684
+ const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
558
685
  const usageMetadata = response.usageMetadata;
686
+ for (const part of parts) {
687
+ if ("text" in part && part.text != null && part.text.length > 0) {
688
+ if (part.thought === true) {
689
+ content.push({ type: "reasoning", text: part.text });
690
+ } else {
691
+ content.push({ type: "text", text: part.text });
692
+ }
693
+ } else if ("functionCall" in part) {
694
+ content.push({
695
+ type: "tool-call",
696
+ toolCallType: "function",
697
+ toolCallId: this.config.generateId(),
698
+ toolName: part.functionCall.name,
699
+ args: JSON.stringify(part.functionCall.args)
700
+ });
701
+ } else if ("inlineData" in part) {
702
+ content.push({
703
+ type: "file",
704
+ data: part.inlineData.data,
705
+ mediaType: part.inlineData.mimeType
706
+ });
707
+ }
708
+ }
709
+ const sources = (_b = extractSources({
710
+ groundingMetadata: candidate.groundingMetadata,
711
+ generateId: this.config.generateId
712
+ })) != null ? _b : [];
713
+ for (const source of sources) {
714
+ content.push(source);
715
+ }
559
716
  return {
560
- text: getTextFromParts(parts),
561
- reasoning: getReasoningDetailsFromParts(parts),
562
- files: (_a = getInlineDataParts(parts)) == null ? void 0 : _a.map((part) => ({
563
- data: part.inlineData.data,
564
- mimeType: part.inlineData.mimeType
565
- })),
566
- toolCalls,
717
+ content,
567
718
  finishReason: mapGoogleGenerativeAIFinishReason({
568
719
  finishReason: candidate.finishReason,
569
- hasToolCalls: toolCalls != null && toolCalls.length > 0
720
+ hasToolCalls: content.some((part) => part.type === "tool-call")
570
721
  }),
571
722
  usage: {
572
- promptTokens: (_b = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _b : NaN,
573
- completionTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _c : NaN
723
+ inputTokens: (_c = usageMetadata == null ? void 0 : usageMetadata.promptTokenCount) != null ? _c : void 0,
724
+ outputTokens: (_d = usageMetadata == null ? void 0 : usageMetadata.candidatesTokenCount) != null ? _d : void 0,
725
+ totalTokens: (_e = usageMetadata == null ? void 0 : usageMetadata.totalTokenCount) != null ? _e : void 0,
726
+ reasoningTokens: (_f = usageMetadata == null ? void 0 : usageMetadata.thoughtsTokenCount) != null ? _f : void 0,
727
+ cachedInputTokens: (_g = usageMetadata == null ? void 0 : usageMetadata.cachedContentTokenCount) != null ? _g : void 0
574
728
  },
575
- rawCall: { rawPrompt, rawSettings },
576
- rawResponse: { headers: responseHeaders, body: rawResponse },
577
729
  warnings,
578
730
  providerMetadata: {
579
731
  google: {
580
- groundingMetadata: (_d = candidate.groundingMetadata) != null ? _d : null,
581
- safetyRatings: (_e = candidate.safetyRatings) != null ? _e : null
732
+ groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
733
+ safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
582
734
  }
583
735
  },
584
- sources: extractSources({
585
- groundingMetadata: candidate.groundingMetadata,
586
- generateId: this.config.generateId
587
- }),
588
- request: { body }
736
+ request: { body },
737
+ response: {
738
+ // TODO timestamp, model id, id
739
+ headers: responseHeaders,
740
+ body: rawResponse
741
+ }
589
742
  };
590
743
  }
591
744
  async doStream(options) {
592
745
  const { args, warnings } = await this.getArgs(options);
593
746
  const body = JSON.stringify(args);
594
- const headers = (0, import_provider_utils3.combineHeaders)(
595
- await (0, import_provider_utils3.resolve)(this.config.headers),
747
+ const headers = (0, import_provider_utils4.combineHeaders)(
748
+ await (0, import_provider_utils4.resolve)(this.config.headers),
596
749
  options.headers
597
750
  );
598
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
751
+ const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
599
752
  url: `${this.config.baseURL}/${getModelPath(
600
753
  this.modelId
601
754
  )}:streamGenerateContent?alt=sse`,
602
755
  headers,
603
756
  body: args,
604
757
  failedResponseHandler: googleFailedResponseHandler,
605
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(chunkSchema),
758
+ successfulResponseHandler: (0, import_provider_utils4.createEventSourceResponseHandler)(chunkSchema),
606
759
  abortSignal: options.abortSignal,
607
760
  fetch: this.config.fetch
608
761
  });
609
- const { contents: rawPrompt, ...rawSettings } = args;
610
762
  let finishReason = "unknown";
611
- let usage = {
612
- promptTokens: Number.NaN,
613
- completionTokens: Number.NaN
763
+ const usage = {
764
+ inputTokens: void 0,
765
+ outputTokens: void 0,
766
+ totalTokens: void 0
614
767
  };
615
768
  let providerMetadata = void 0;
616
769
  const generateId2 = this.config.generateId;
@@ -618,8 +771,11 @@ var GoogleGenerativeAILanguageModel = class {
618
771
  return {
619
772
  stream: response.pipeThrough(
620
773
  new TransformStream({
774
+ start(controller) {
775
+ controller.enqueue({ type: "stream-start", warnings });
776
+ },
621
777
  transform(chunk, controller) {
622
- var _a, _b, _c, _d, _e, _f;
778
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
623
779
  if (!chunk.success) {
624
780
  controller.enqueue({ type: "error", error: chunk.error });
625
781
  return;
@@ -627,33 +783,26 @@ var GoogleGenerativeAILanguageModel = class {
627
783
  const value = chunk.value;
628
784
  const usageMetadata = value.usageMetadata;
629
785
  if (usageMetadata != null) {
630
- usage = {
631
- promptTokens: (_a = usageMetadata.promptTokenCount) != null ? _a : NaN,
632
- completionTokens: (_b = usageMetadata.candidatesTokenCount) != null ? _b : NaN
633
- };
786
+ usage.inputTokens = (_a = usageMetadata.promptTokenCount) != null ? _a : void 0;
787
+ usage.outputTokens = (_b = usageMetadata.candidatesTokenCount) != null ? _b : void 0;
788
+ usage.totalTokens = (_c = usageMetadata.totalTokenCount) != null ? _c : void 0;
789
+ usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
790
+ usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
634
791
  }
635
- const candidate = (_c = value.candidates) == null ? void 0 : _c[0];
792
+ const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
636
793
  if (candidate == null) {
637
794
  return;
638
795
  }
639
796
  const content = candidate.content;
640
797
  if (content != null) {
641
- const deltaText = getTextFromParts(content.parts);
642
- if (deltaText != null) {
643
- controller.enqueue({
644
- type: "text-delta",
645
- textDelta: deltaText
646
- });
647
- }
648
- const reasoningDeltaText = getReasoningDetailsFromParts(
649
- content.parts
650
- );
651
- if (reasoningDeltaText != null) {
652
- for (const part of reasoningDeltaText) {
653
- controller.enqueue({
654
- type: "reasoning",
655
- textDelta: part.text
656
- });
798
+ const parts = (_g = content.parts) != null ? _g : [];
799
+ for (const part of parts) {
800
+ if ("text" in part && part.text != null && part.text.length > 0) {
801
+ if (part.thought === true) {
802
+ controller.enqueue({ type: "reasoning", text: part.text });
803
+ } else {
804
+ controller.enqueue({ type: "text", text: part.text });
805
+ }
657
806
  }
658
807
  }
659
808
  const inlineDataParts = getInlineDataParts(content.parts);
@@ -661,7 +810,7 @@ var GoogleGenerativeAILanguageModel = class {
661
810
  for (const part of inlineDataParts) {
662
811
  controller.enqueue({
663
812
  type: "file",
664
- mimeType: part.inlineData.mimeType,
813
+ mediaType: part.inlineData.mimeType,
665
814
  data: part.inlineData.data
666
815
  });
667
816
  }
@@ -695,17 +844,17 @@ var GoogleGenerativeAILanguageModel = class {
695
844
  finishReason: candidate.finishReason,
696
845
  hasToolCalls
697
846
  });
698
- const sources = (_d = extractSources({
847
+ const sources = (_h = extractSources({
699
848
  groundingMetadata: candidate.groundingMetadata,
700
849
  generateId: generateId2
701
- })) != null ? _d : [];
850
+ })) != null ? _h : [];
702
851
  for (const source of sources) {
703
- controller.enqueue({ type: "source", source });
852
+ controller.enqueue(source);
704
853
  }
705
854
  providerMetadata = {
706
855
  google: {
707
- groundingMetadata: (_e = candidate.groundingMetadata) != null ? _e : null,
708
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
856
+ groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
857
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
709
858
  }
710
859
  };
711
860
  }
@@ -720,9 +869,7 @@ var GoogleGenerativeAILanguageModel = class {
720
869
  }
721
870
  })
722
871
  ),
723
- rawCall: { rawPrompt, rawSettings },
724
- rawResponse: { headers: responseHeaders },
725
- warnings,
872
+ response: { headers: responseHeaders },
726
873
  request: { body }
727
874
  };
728
875
  }
@@ -735,24 +882,13 @@ function getToolCallsFromParts({
735
882
  (part) => "functionCall" in part
736
883
  );
737
884
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
885
+ type: "tool-call",
738
886
  toolCallType: "function",
739
887
  toolCallId: generateId2(),
740
888
  toolName: part.functionCall.name,
741
889
  args: JSON.stringify(part.functionCall.args)
742
890
  }));
743
891
  }
744
- function getTextFromParts(parts) {
745
- const textParts = parts == null ? void 0 : parts.filter(
746
- (part) => "text" in part && part.thought !== true
747
- );
748
- return textParts == null || textParts.length === 0 ? void 0 : textParts.map((part) => part.text).join("");
749
- }
750
- function getReasoningDetailsFromParts(parts) {
751
- const reasoningParts = parts == null ? void 0 : parts.filter(
752
- (part) => "text" in part && part.thought === true && part.text != null
753
- );
754
- return reasoningParts == null || reasoningParts.length === 0 ? void 0 : reasoningParts.map((part) => ({ type: "text", text: part.text }));
755
- }
756
892
  function getInlineDataParts(parts) {
757
893
  return parts == null ? void 0 : parts.filter(
758
894
  (part) => "inlineData" in part
@@ -766,182 +902,104 @@ function extractSources({
766
902
  return (_a = groundingMetadata == null ? void 0 : groundingMetadata.groundingChunks) == null ? void 0 : _a.filter(
767
903
  (chunk) => chunk.web != null
768
904
  ).map((chunk) => ({
905
+ type: "source",
769
906
  sourceType: "url",
770
907
  id: generateId2(),
771
908
  url: chunk.web.uri,
772
909
  title: chunk.web.title
773
910
  }));
774
911
  }
775
- var contentSchema = import_zod2.z.object({
776
- parts: import_zod2.z.array(
777
- import_zod2.z.union([
912
+ var contentSchema = import_zod5.z.object({
913
+ parts: import_zod5.z.array(
914
+ import_zod5.z.union([
778
915
  // note: order matters since text can be fully empty
779
- import_zod2.z.object({
780
- functionCall: import_zod2.z.object({
781
- name: import_zod2.z.string(),
782
- args: import_zod2.z.unknown()
916
+ import_zod5.z.object({
917
+ functionCall: import_zod5.z.object({
918
+ name: import_zod5.z.string(),
919
+ args: import_zod5.z.unknown()
783
920
  })
784
921
  }),
785
- import_zod2.z.object({
786
- inlineData: import_zod2.z.object({
787
- mimeType: import_zod2.z.string(),
788
- data: import_zod2.z.string()
922
+ import_zod5.z.object({
923
+ inlineData: import_zod5.z.object({
924
+ mimeType: import_zod5.z.string(),
925
+ data: import_zod5.z.string()
789
926
  })
790
927
  }),
791
- import_zod2.z.object({
792
- text: import_zod2.z.string().nullish(),
793
- thought: import_zod2.z.boolean().nullish()
928
+ import_zod5.z.object({
929
+ text: import_zod5.z.string().nullish(),
930
+ thought: import_zod5.z.boolean().nullish()
794
931
  })
795
932
  ])
796
933
  ).nullish()
797
934
  });
798
- var groundingChunkSchema = import_zod2.z.object({
799
- web: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish(),
800
- retrievedContext: import_zod2.z.object({ uri: import_zod2.z.string(), title: import_zod2.z.string() }).nullish()
935
+ var groundingChunkSchema = import_zod5.z.object({
936
+ web: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish(),
937
+ retrievedContext: import_zod5.z.object({ uri: import_zod5.z.string(), title: import_zod5.z.string() }).nullish()
801
938
  });
802
- var groundingMetadataSchema = import_zod2.z.object({
803
- webSearchQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
804
- retrievalQueries: import_zod2.z.array(import_zod2.z.string()).nullish(),
805
- searchEntryPoint: import_zod2.z.object({ renderedContent: import_zod2.z.string() }).nullish(),
806
- groundingChunks: import_zod2.z.array(groundingChunkSchema).nullish(),
807
- groundingSupports: import_zod2.z.array(
808
- import_zod2.z.object({
809
- segment: import_zod2.z.object({
810
- startIndex: import_zod2.z.number().nullish(),
811
- endIndex: import_zod2.z.number().nullish(),
812
- text: import_zod2.z.string().nullish()
939
+ var groundingMetadataSchema = import_zod5.z.object({
940
+ webSearchQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
941
+ retrievalQueries: import_zod5.z.array(import_zod5.z.string()).nullish(),
942
+ searchEntryPoint: import_zod5.z.object({ renderedContent: import_zod5.z.string() }).nullish(),
943
+ groundingChunks: import_zod5.z.array(groundingChunkSchema).nullish(),
944
+ groundingSupports: import_zod5.z.array(
945
+ import_zod5.z.object({
946
+ segment: import_zod5.z.object({
947
+ startIndex: import_zod5.z.number().nullish(),
948
+ endIndex: import_zod5.z.number().nullish(),
949
+ text: import_zod5.z.string().nullish()
813
950
  }),
814
- segment_text: import_zod2.z.string().nullish(),
815
- groundingChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
816
- supportChunkIndices: import_zod2.z.array(import_zod2.z.number()).nullish(),
817
- confidenceScores: import_zod2.z.array(import_zod2.z.number()).nullish(),
818
- confidenceScore: import_zod2.z.array(import_zod2.z.number()).nullish()
951
+ segment_text: import_zod5.z.string().nullish(),
952
+ groundingChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
953
+ supportChunkIndices: import_zod5.z.array(import_zod5.z.number()).nullish(),
954
+ confidenceScores: import_zod5.z.array(import_zod5.z.number()).nullish(),
955
+ confidenceScore: import_zod5.z.array(import_zod5.z.number()).nullish()
819
956
  })
820
957
  ).nullish(),
821
- retrievalMetadata: import_zod2.z.union([
822
- import_zod2.z.object({
823
- webDynamicRetrievalScore: import_zod2.z.number()
958
+ retrievalMetadata: import_zod5.z.union([
959
+ import_zod5.z.object({
960
+ webDynamicRetrievalScore: import_zod5.z.number()
824
961
  }),
825
- import_zod2.z.object({})
962
+ import_zod5.z.object({})
826
963
  ]).nullish()
827
964
  });
828
- var safetyRatingSchema = import_zod2.z.object({
829
- category: import_zod2.z.string().nullish(),
830
- probability: import_zod2.z.string().nullish(),
831
- probabilityScore: import_zod2.z.number().nullish(),
832
- severity: import_zod2.z.string().nullish(),
833
- severityScore: import_zod2.z.number().nullish(),
834
- blocked: import_zod2.z.boolean().nullish()
965
+ var safetyRatingSchema = import_zod5.z.object({
966
+ category: import_zod5.z.string().nullish(),
967
+ probability: import_zod5.z.string().nullish(),
968
+ probabilityScore: import_zod5.z.number().nullish(),
969
+ severity: import_zod5.z.string().nullish(),
970
+ severityScore: import_zod5.z.number().nullish(),
971
+ blocked: import_zod5.z.boolean().nullish()
835
972
  });
836
- var responseSchema = import_zod2.z.object({
837
- candidates: import_zod2.z.array(
838
- import_zod2.z.object({
839
- content: contentSchema.nullish().or(import_zod2.z.object({}).strict()),
840
- finishReason: import_zod2.z.string().nullish(),
841
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
973
+ var usageSchema = import_zod5.z.object({
974
+ cachedContentTokenCount: import_zod5.z.number().nullish(),
975
+ thoughtsTokenCount: import_zod5.z.number().nullish(),
976
+ promptTokenCount: import_zod5.z.number().nullish(),
977
+ candidatesTokenCount: import_zod5.z.number().nullish(),
978
+ totalTokenCount: import_zod5.z.number().nullish()
979
+ });
980
+ var responseSchema = import_zod5.z.object({
981
+ candidates: import_zod5.z.array(
982
+ import_zod5.z.object({
983
+ content: contentSchema.nullish().or(import_zod5.z.object({}).strict()),
984
+ finishReason: import_zod5.z.string().nullish(),
985
+ safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
842
986
  groundingMetadata: groundingMetadataSchema.nullish()
843
987
  })
844
988
  ),
845
- usageMetadata: import_zod2.z.object({
846
- promptTokenCount: import_zod2.z.number().nullish(),
847
- candidatesTokenCount: import_zod2.z.number().nullish(),
848
- totalTokenCount: import_zod2.z.number().nullish()
849
- }).nullish()
989
+ usageMetadata: usageSchema.nullish()
850
990
  });
851
- var chunkSchema = import_zod2.z.object({
852
- candidates: import_zod2.z.array(
853
- import_zod2.z.object({
991
+ var chunkSchema = import_zod5.z.object({
992
+ candidates: import_zod5.z.array(
993
+ import_zod5.z.object({
854
994
  content: contentSchema.nullish(),
855
- finishReason: import_zod2.z.string().nullish(),
856
- safetyRatings: import_zod2.z.array(safetyRatingSchema).nullish(),
995
+ finishReason: import_zod5.z.string().nullish(),
996
+ safetyRatings: import_zod5.z.array(safetyRatingSchema).nullish(),
857
997
  groundingMetadata: groundingMetadataSchema.nullish()
858
998
  })
859
999
  ).nullish(),
860
- usageMetadata: import_zod2.z.object({
861
- promptTokenCount: import_zod2.z.number().nullish(),
862
- candidatesTokenCount: import_zod2.z.number().nullish(),
863
- totalTokenCount: import_zod2.z.number().nullish()
864
- }).nullish()
865
- });
866
- var googleGenerativeAIProviderOptionsSchema = import_zod2.z.object({
867
- responseModalities: import_zod2.z.array(import_zod2.z.enum(["TEXT", "IMAGE"])).nullish(),
868
- thinkingConfig: import_zod2.z.object({
869
- thinkingBudget: import_zod2.z.number().nullish(),
870
- includeThoughts: import_zod2.z.boolean().nullish()
871
- }).nullish()
872
- });
873
-
874
- // src/google-generative-ai-embedding-model.ts
875
- var import_provider3 = require("@ai-sdk/provider");
876
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
877
- var import_zod3 = require("zod");
878
- var GoogleGenerativeAIEmbeddingModel = class {
879
- constructor(modelId, settings, config) {
880
- this.specificationVersion = "v1";
881
- this.modelId = modelId;
882
- this.settings = settings;
883
- this.config = config;
884
- }
885
- get provider() {
886
- return this.config.provider;
887
- }
888
- get maxEmbeddingsPerCall() {
889
- return 2048;
890
- }
891
- get supportsParallelCalls() {
892
- return true;
893
- }
894
- async doEmbed({
895
- values,
896
- headers,
897
- abortSignal
898
- }) {
899
- if (values.length > this.maxEmbeddingsPerCall) {
900
- throw new import_provider3.TooManyEmbeddingValuesForCallError({
901
- provider: this.provider,
902
- modelId: this.modelId,
903
- maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
904
- values
905
- });
906
- }
907
- const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
908
- await (0, import_provider_utils4.resolve)(this.config.headers),
909
- headers
910
- );
911
- const { responseHeaders, value: response } = await (0, import_provider_utils4.postJsonToApi)({
912
- url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
913
- headers: mergedHeaders,
914
- body: {
915
- requests: values.map((value) => ({
916
- model: `models/${this.modelId}`,
917
- content: { role: "user", parts: [{ text: value }] },
918
- outputDimensionality: this.settings.outputDimensionality,
919
- taskType: this.settings.taskType
920
- }))
921
- },
922
- failedResponseHandler: googleFailedResponseHandler,
923
- successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
924
- googleGenerativeAITextEmbeddingResponseSchema
925
- ),
926
- abortSignal,
927
- fetch: this.config.fetch
928
- });
929
- return {
930
- embeddings: response.embeddings.map((item) => item.values),
931
- usage: void 0,
932
- rawResponse: { headers: responseHeaders }
933
- };
934
- }
935
- };
936
- var googleGenerativeAITextEmbeddingResponseSchema = import_zod3.z.object({
937
- embeddings: import_zod3.z.array(import_zod3.z.object({ values: import_zod3.z.array(import_zod3.z.number()) }))
1000
+ usageMetadata: usageSchema.nullish()
938
1001
  });
939
1002
 
940
- // src/google-supported-file-url.ts
941
- function isSupportedFileUrl(url) {
942
- return url.toString().startsWith("https://generativelanguage.googleapis.com/v1beta/files/");
943
- }
944
-
945
1003
  // src/google-provider.ts
946
1004
  function createGoogleGenerativeAI(options = {}) {
947
1005
  var _a;
@@ -954,30 +1012,36 @@ function createGoogleGenerativeAI(options = {}) {
954
1012
  }),
955
1013
  ...options.headers
956
1014
  });
957
- const createChatModel = (modelId, settings = {}) => {
1015
+ const createChatModel = (modelId) => {
958
1016
  var _a2;
959
- return new GoogleGenerativeAILanguageModel(modelId, settings, {
1017
+ return new GoogleGenerativeAILanguageModel(modelId, {
960
1018
  provider: "google.generative-ai",
961
1019
  baseURL,
962
1020
  headers: getHeaders,
963
1021
  generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils5.generateId,
964
- isSupportedUrl: isSupportedFileUrl,
1022
+ supportedUrls: () => ({
1023
+ "*": [
1024
+ // Only allow requests to the Google Generative Language "files" endpoint
1025
+ // e.g. https://generativelanguage.googleapis.com/v1beta/files/...
1026
+ new RegExp(`^${baseURL}/files/.*$`)
1027
+ ]
1028
+ }),
965
1029
  fetch: options.fetch
966
1030
  });
967
1031
  };
968
- const createEmbeddingModel = (modelId, settings = {}) => new GoogleGenerativeAIEmbeddingModel(modelId, settings, {
1032
+ const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
969
1033
  provider: "google.generative-ai",
970
1034
  baseURL,
971
1035
  headers: getHeaders,
972
1036
  fetch: options.fetch
973
1037
  });
974
- const provider = function(modelId, settings) {
1038
+ const provider = function(modelId) {
975
1039
  if (new.target) {
976
1040
  throw new Error(
977
1041
  "The Google Generative AI model function cannot be called with the new keyword."
978
1042
  );
979
1043
  }
980
- return createChatModel(modelId, settings);
1044
+ return createChatModel(modelId);
981
1045
  };
982
1046
  provider.languageModel = createChatModel;
983
1047
  provider.chat = createChatModel;
@@ -985,6 +1049,9 @@ function createGoogleGenerativeAI(options = {}) {
985
1049
  provider.embedding = createEmbeddingModel;
986
1050
  provider.textEmbedding = createEmbeddingModel;
987
1051
  provider.textEmbeddingModel = createEmbeddingModel;
1052
+ provider.imageModel = (modelId) => {
1053
+ throw new import_provider4.NoSuchModelError({ modelId, modelType: "imageModel" });
1054
+ };
988
1055
  return provider;
989
1056
  }
990
1057
  var google = createGoogleGenerativeAI();