@ai-sdk/google 4.0.0-beta.9 → 4.0.0-canary.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +354 -4
  2. package/README.md +6 -4
  3. package/dist/index.d.ts +97 -54
  4. package/dist/index.js +1643 -575
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +62 -22
  7. package/dist/internal/index.js +1261 -449
  8. package/dist/internal/index.js.map +1 -1
  9. package/docs/{15-google-generative-ai.mdx → 15-google.mdx} +46 -40
  10. package/package.json +13 -14
  11. package/src/{convert-google-generative-ai-usage.ts → convert-google-usage.ts} +11 -4
  12. package/src/convert-json-schema-to-openapi-schema.ts +1 -1
  13. package/src/convert-to-google-messages.ts +585 -0
  14. package/src/{google-generative-ai-embedding-options.ts → google-embedding-model-options.ts} +2 -2
  15. package/src/{google-generative-ai-embedding-model.ts → google-embedding-model.ts} +28 -15
  16. package/src/google-error.ts +1 -1
  17. package/src/google-files.ts +225 -0
  18. package/src/google-image-model-options.ts +23 -0
  19. package/src/{google-generative-ai-image-model.ts → google-image-model.ts} +61 -49
  20. package/src/{google-generative-ai-image-settings.ts → google-image-settings.ts} +2 -2
  21. package/src/google-json-accumulator.ts +336 -0
  22. package/src/{google-generative-ai-options.ts → google-language-model-options.ts} +32 -5
  23. package/src/{google-generative-ai-language-model.ts → google-language-model.ts} +586 -191
  24. package/src/google-prepare-tools.ts +68 -8
  25. package/src/google-prompt.ts +82 -0
  26. package/src/google-provider.ts +56 -47
  27. package/src/google-video-model-options.ts +43 -0
  28. package/src/{google-generative-ai-video-model.ts → google-video-model.ts} +11 -50
  29. package/src/{google-generative-ai-video-settings.ts → google-video-settings.ts} +2 -1
  30. package/src/index.ts +28 -9
  31. package/src/internal/index.ts +2 -2
  32. package/src/{map-google-generative-ai-finish-reason.ts → map-google-finish-reason.ts} +2 -2
  33. package/src/tool/code-execution.ts +2 -2
  34. package/src/tool/enterprise-web-search.ts +9 -3
  35. package/src/tool/file-search.ts +5 -7
  36. package/src/tool/google-maps.ts +3 -2
  37. package/src/tool/google-search.ts +10 -11
  38. package/src/tool/url-context.ts +4 -2
  39. package/src/tool/vertex-rag-store.ts +9 -6
  40. package/dist/index.d.mts +0 -384
  41. package/dist/index.mjs +0 -2519
  42. package/dist/index.mjs.map +0 -1
  43. package/dist/internal/index.d.mts +0 -287
  44. package/dist/internal/index.mjs +0 -1708
  45. package/dist/internal/index.mjs.map +0 -1
  46. package/src/convert-to-google-generative-ai-messages.ts +0 -239
  47. package/src/google-generative-ai-prompt.ts +0 -47
@@ -0,0 +1,585 @@
1
+ import {
2
+ UnsupportedFunctionalityError,
3
+ type LanguageModelV4Prompt,
4
+ type LanguageModelV4ToolResultOutput,
5
+ } from '@ai-sdk/provider';
6
+ import {
7
+ convertToBase64,
8
+ getTopLevelMediaType,
9
+ isFullMediaType,
10
+ resolveFullMediaType,
11
+ resolveProviderReference,
12
+ } from '@ai-sdk/provider-utils';
13
+ import type {
14
+ GoogleContent,
15
+ GoogleContentPart,
16
+ GoogleFunctionResponsePart,
17
+ GooglePrompt,
18
+ } from './google-prompt';
19
+
20
+ const dataUrlRegex = /^data:([^;,]+);base64,(.+)$/s;
21
+
22
+ function parseBase64DataUrl(
23
+ value: string,
24
+ ): { mediaType: string; data: string } | undefined {
25
+ const match = dataUrlRegex.exec(value);
26
+ if (match == null) {
27
+ return undefined;
28
+ }
29
+
30
+ return {
31
+ mediaType: match[1],
32
+ data: match[2],
33
+ };
34
+ }
35
+
36
+ function convertUrlToolResultPart(
37
+ url: string,
38
+ ): GoogleFunctionResponsePart | undefined {
39
+ // Per https://ai.google.dev/api/caching#FunctionResponsePart, only inline data is supported.
40
+ // https://docs.cloud.google.com/vertex-ai/generative-ai/docs/model-reference/function-calling#functionresponsepart suggests that this
41
+ // may be different for Vertex, but this needs to be confirmed and further tested for both APIs.
42
+ const parsedDataUrl = parseBase64DataUrl(url);
43
+ if (parsedDataUrl == null) {
44
+ return undefined;
45
+ }
46
+
47
+ return {
48
+ inlineData: {
49
+ mimeType: parsedDataUrl.mediaType,
50
+ data: parsedDataUrl.data,
51
+ },
52
+ };
53
+ }
54
+
55
+ /*
56
+ * Appends tool result content parts to the message using the functionResponse
57
+ * format with support for multimodal parts (e.g. inline images/files alongside
58
+ * text). This format is supported by Gemini 3+ models.
59
+ */
60
+ function appendToolResultParts(
61
+ parts: GoogleContentPart[],
62
+ toolName: string,
63
+ outputValue: Extract<
64
+ LanguageModelV4ToolResultOutput,
65
+ { type: 'content' }
66
+ >['value'],
67
+ ): void {
68
+ const functionResponseParts: GoogleFunctionResponsePart[] = [];
69
+ const responseTextParts: string[] = [];
70
+
71
+ for (const contentPart of outputValue) {
72
+ switch (contentPart.type) {
73
+ case 'text': {
74
+ responseTextParts.push(contentPart.text);
75
+ break;
76
+ }
77
+ case 'file': {
78
+ if (contentPart.data.type === 'data') {
79
+ functionResponseParts.push({
80
+ inlineData: {
81
+ mimeType: resolveFullMediaType({ part: contentPart }),
82
+ data: convertToBase64(contentPart.data.data),
83
+ },
84
+ });
85
+ } else if (contentPart.data.type === 'url') {
86
+ const functionResponsePart = convertUrlToolResultPart(
87
+ contentPart.data.url.toString(),
88
+ );
89
+
90
+ if (functionResponsePart != null) {
91
+ functionResponseParts.push(functionResponsePart);
92
+ } else {
93
+ responseTextParts.push(JSON.stringify(contentPart));
94
+ }
95
+ } else {
96
+ responseTextParts.push(JSON.stringify(contentPart));
97
+ }
98
+ break;
99
+ }
100
+ default: {
101
+ responseTextParts.push(JSON.stringify(contentPart));
102
+ break;
103
+ }
104
+ }
105
+ }
106
+
107
+ parts.push({
108
+ functionResponse: {
109
+ name: toolName,
110
+ response: {
111
+ name: toolName,
112
+ content:
113
+ responseTextParts.length > 0
114
+ ? responseTextParts.join('\n')
115
+ : 'Tool executed successfully.',
116
+ },
117
+ ...(functionResponseParts.length > 0
118
+ ? { parts: functionResponseParts }
119
+ : {}),
120
+ },
121
+ });
122
+ }
123
+
124
+ /*
125
+ * Appends tool result content parts using a legacy format for pre-Gemini 3
126
+ * models that do not support multimodal parts within functionResponse. Instead,
127
+ * non-text content like images is sent as separate top-level inlineData parts.
128
+ */
129
+ function appendLegacyToolResultParts(
130
+ parts: GoogleContentPart[],
131
+ toolName: string,
132
+ outputValue: Extract<
133
+ LanguageModelV4ToolResultOutput,
134
+ { type: 'content' }
135
+ >['value'],
136
+ ): void {
137
+ for (const contentPart of outputValue) {
138
+ switch (contentPart.type) {
139
+ case 'text':
140
+ parts.push({
141
+ functionResponse: {
142
+ name: toolName,
143
+ response: {
144
+ name: toolName,
145
+ content: contentPart.text,
146
+ },
147
+ },
148
+ });
149
+ break;
150
+ case 'file': {
151
+ if (
152
+ contentPart.data.type === 'data' &&
153
+ getTopLevelMediaType(contentPart.mediaType) === 'image'
154
+ ) {
155
+ parts.push(
156
+ {
157
+ inlineData: {
158
+ mimeType: resolveFullMediaType({ part: contentPart }),
159
+ data: convertToBase64(contentPart.data.data),
160
+ },
161
+ },
162
+ {
163
+ text: 'Tool executed successfully and returned this image as a response',
164
+ },
165
+ );
166
+ } else {
167
+ parts.push({ text: JSON.stringify(contentPart) });
168
+ }
169
+ break;
170
+ }
171
+ default:
172
+ parts.push({ text: JSON.stringify(contentPart) });
173
+ break;
174
+ }
175
+ }
176
+ }
177
+
178
+ export function convertToGoogleMessages(
179
+ prompt: LanguageModelV4Prompt,
180
+ options?: {
181
+ isGemmaModel?: boolean;
182
+ /**
183
+ * Names to look up under `providerOptions` when reading per-part metadata
184
+ * (e.g. thought signatures). Tried in order; first match wins. For the
185
+ * Vertex provider this is `['googleVertex', 'vertex']` (new key first,
186
+ * legacy key as fallback) and for the Google provider it is `['google']`.
187
+ */
188
+ providerOptionsNames?: readonly string[];
189
+ supportsFunctionResponseParts?: boolean;
190
+ },
191
+ ): GooglePrompt {
192
+ const systemInstructionParts: Array<{ text: string }> = [];
193
+ const contents: Array<GoogleContent> = [];
194
+ let systemMessagesAllowed = true;
195
+ const isGemmaModel = options?.isGemmaModel ?? false;
196
+ const providerOptionsNames = options?.providerOptionsNames ?? ['google'];
197
+ const isVertexLike = !providerOptionsNames.includes('google');
198
+ const supportsFunctionResponseParts =
199
+ options?.supportsFunctionResponseParts ?? true;
200
+
201
+ const readProviderOpts = (part: {
202
+ providerOptions?: Record<string, unknown> | undefined;
203
+ }): Record<string, unknown> | undefined => {
204
+ for (const name of providerOptionsNames) {
205
+ const v = part.providerOptions?.[name];
206
+ if (v != null) return v as Record<string, unknown>;
207
+ }
208
+ // Cross-namespace fallback (gateway interop): Vertex providers may receive
209
+ // metadata under `google`, and the Google provider may receive metadata
210
+ // under `googleVertex`/`vertex`.
211
+ if (isVertexLike) {
212
+ return part.providerOptions?.google as
213
+ | Record<string, unknown>
214
+ | undefined;
215
+ }
216
+ return (part.providerOptions?.googleVertex ??
217
+ part.providerOptions?.vertex) as Record<string, unknown> | undefined;
218
+ };
219
+
220
+ for (const { role, content } of prompt) {
221
+ switch (role) {
222
+ case 'system': {
223
+ if (!systemMessagesAllowed) {
224
+ throw new UnsupportedFunctionalityError({
225
+ functionality:
226
+ 'system messages are only supported at the beginning of the conversation',
227
+ });
228
+ }
229
+
230
+ systemInstructionParts.push({ text: content });
231
+ break;
232
+ }
233
+
234
+ case 'user': {
235
+ systemMessagesAllowed = false;
236
+
237
+ const parts: GoogleContentPart[] = [];
238
+
239
+ for (const part of content) {
240
+ switch (part.type) {
241
+ case 'text': {
242
+ parts.push({ text: part.text });
243
+ break;
244
+ }
245
+
246
+ case 'file': {
247
+ switch (part.data.type) {
248
+ case 'url': {
249
+ parts.push({
250
+ fileData: {
251
+ mimeType: resolveFullMediaType({ part }),
252
+ fileUri: part.data.url.toString(),
253
+ },
254
+ });
255
+ break;
256
+ }
257
+ case 'reference': {
258
+ if (isVertexLike) {
259
+ throw new UnsupportedFunctionalityError({
260
+ functionality: 'file parts with provider references',
261
+ });
262
+ }
263
+
264
+ parts.push({
265
+ fileData: {
266
+ mimeType: resolveFullMediaType({ part }),
267
+ fileUri: resolveProviderReference({
268
+ reference: part.data.reference,
269
+ provider: 'google',
270
+ }),
271
+ },
272
+ });
273
+ break;
274
+ }
275
+ case 'text': {
276
+ parts.push({
277
+ inlineData: {
278
+ mimeType: isFullMediaType(part.mediaType)
279
+ ? part.mediaType
280
+ : 'text/plain',
281
+ data: convertToBase64(
282
+ new TextEncoder().encode(part.data.text),
283
+ ),
284
+ },
285
+ });
286
+ break;
287
+ }
288
+ case 'data': {
289
+ parts.push({
290
+ inlineData: {
291
+ mimeType: resolveFullMediaType({ part }),
292
+ data: convertToBase64(part.data.data),
293
+ },
294
+ });
295
+ break;
296
+ }
297
+ }
298
+
299
+ break;
300
+ }
301
+ }
302
+ }
303
+
304
+ contents.push({ role: 'user', parts });
305
+ break;
306
+ }
307
+
308
+ case 'assistant': {
309
+ systemMessagesAllowed = false;
310
+
311
+ contents.push({
312
+ role: 'model',
313
+ parts: content
314
+ .map(part => {
315
+ const providerOpts = readProviderOpts(part);
316
+ const thoughtSignature =
317
+ providerOpts?.thoughtSignature != null
318
+ ? String(providerOpts.thoughtSignature)
319
+ : undefined;
320
+
321
+ switch (part.type) {
322
+ case 'text': {
323
+ return part.text.length === 0
324
+ ? undefined
325
+ : {
326
+ text: part.text,
327
+ thoughtSignature,
328
+ };
329
+ }
330
+
331
+ case 'reasoning': {
332
+ return part.text.length === 0
333
+ ? undefined
334
+ : {
335
+ text: part.text,
336
+ thought: true,
337
+ thoughtSignature,
338
+ };
339
+ }
340
+
341
+ case 'reasoning-file': {
342
+ switch (part.data.type) {
343
+ case 'url': {
344
+ throw new UnsupportedFunctionalityError({
345
+ functionality:
346
+ 'File data URLs in assistant messages are not supported',
347
+ });
348
+ }
349
+ case 'data': {
350
+ return {
351
+ inlineData: {
352
+ mimeType: part.mediaType,
353
+ data: convertToBase64(part.data.data),
354
+ },
355
+ thought: true,
356
+ thoughtSignature,
357
+ };
358
+ }
359
+ }
360
+ break;
361
+ }
362
+
363
+ case 'file': {
364
+ switch (part.data.type) {
365
+ case 'url': {
366
+ throw new UnsupportedFunctionalityError({
367
+ functionality:
368
+ 'File data URLs in assistant messages are not supported',
369
+ });
370
+ }
371
+ case 'reference': {
372
+ if (isVertexLike) {
373
+ throw new UnsupportedFunctionalityError({
374
+ functionality: 'file parts with provider references',
375
+ });
376
+ }
377
+
378
+ return {
379
+ fileData: {
380
+ mimeType: part.mediaType,
381
+ fileUri: resolveProviderReference({
382
+ reference: part.data.reference,
383
+ provider: 'google',
384
+ }),
385
+ },
386
+ ...(providerOpts?.thought === true
387
+ ? { thought: true }
388
+ : {}),
389
+ thoughtSignature,
390
+ };
391
+ }
392
+ case 'text': {
393
+ return {
394
+ inlineData: {
395
+ mimeType: isFullMediaType(part.mediaType)
396
+ ? part.mediaType
397
+ : 'text/plain',
398
+ data: convertToBase64(
399
+ new TextEncoder().encode(part.data.text),
400
+ ),
401
+ },
402
+ ...(providerOpts?.thought === true
403
+ ? { thought: true }
404
+ : {}),
405
+ thoughtSignature,
406
+ };
407
+ }
408
+ case 'data': {
409
+ return {
410
+ inlineData: {
411
+ mimeType: part.mediaType,
412
+ data: convertToBase64(part.data.data),
413
+ },
414
+ ...(providerOpts?.thought === true
415
+ ? { thought: true }
416
+ : {}),
417
+ thoughtSignature,
418
+ };
419
+ }
420
+ }
421
+ break;
422
+ }
423
+
424
+ case 'tool-call': {
425
+ const serverToolCallId =
426
+ providerOpts?.serverToolCallId != null
427
+ ? String(providerOpts.serverToolCallId)
428
+ : undefined;
429
+ const serverToolType =
430
+ providerOpts?.serverToolType != null
431
+ ? String(providerOpts.serverToolType)
432
+ : undefined;
433
+
434
+ if (serverToolCallId && serverToolType) {
435
+ return {
436
+ toolCall: {
437
+ toolType: serverToolType,
438
+ args:
439
+ typeof part.input === 'string'
440
+ ? JSON.parse(part.input)
441
+ : part.input,
442
+ id: serverToolCallId,
443
+ },
444
+ thoughtSignature,
445
+ };
446
+ }
447
+
448
+ return {
449
+ functionCall: {
450
+ name: part.toolName,
451
+ args: part.input,
452
+ },
453
+ thoughtSignature,
454
+ };
455
+ }
456
+
457
+ case 'tool-result': {
458
+ const serverToolCallId =
459
+ providerOpts?.serverToolCallId != null
460
+ ? String(providerOpts.serverToolCallId)
461
+ : undefined;
462
+ const serverToolType =
463
+ providerOpts?.serverToolType != null
464
+ ? String(providerOpts.serverToolType)
465
+ : undefined;
466
+
467
+ if (serverToolCallId && serverToolType) {
468
+ return {
469
+ toolResponse: {
470
+ toolType: serverToolType,
471
+ response:
472
+ part.output.type === 'json' ? part.output.value : {},
473
+ id: serverToolCallId,
474
+ },
475
+ thoughtSignature,
476
+ };
477
+ }
478
+
479
+ return undefined;
480
+ }
481
+ }
482
+ })
483
+ .filter(part => part !== undefined),
484
+ });
485
+
486
+ break;
487
+ }
488
+
489
+ case 'tool': {
490
+ systemMessagesAllowed = false;
491
+
492
+ const parts: GoogleContentPart[] = [];
493
+
494
+ for (const part of content) {
495
+ if (part.type === 'tool-approval-response') {
496
+ continue;
497
+ }
498
+
499
+ const partProviderOpts = readProviderOpts(part);
500
+ const serverToolCallId =
501
+ partProviderOpts?.serverToolCallId != null
502
+ ? String(partProviderOpts.serverToolCallId)
503
+ : undefined;
504
+ const serverToolType =
505
+ partProviderOpts?.serverToolType != null
506
+ ? String(partProviderOpts.serverToolType)
507
+ : undefined;
508
+
509
+ if (serverToolCallId && serverToolType) {
510
+ const serverThoughtSignature =
511
+ partProviderOpts?.thoughtSignature != null
512
+ ? String(partProviderOpts.thoughtSignature)
513
+ : undefined;
514
+
515
+ if (contents.length > 0) {
516
+ const lastContent = contents[contents.length - 1];
517
+ if (lastContent.role === 'model') {
518
+ lastContent.parts.push({
519
+ toolResponse: {
520
+ toolType: serverToolType,
521
+ response:
522
+ part.output.type === 'json' ? part.output.value : {},
523
+ id: serverToolCallId,
524
+ },
525
+ thoughtSignature: serverThoughtSignature,
526
+ });
527
+ continue;
528
+ }
529
+ }
530
+ }
531
+
532
+ const output = part.output;
533
+
534
+ if (output.type === 'content') {
535
+ if (supportsFunctionResponseParts) {
536
+ appendToolResultParts(parts, part.toolName, output.value);
537
+ } else {
538
+ appendLegacyToolResultParts(parts, part.toolName, output.value);
539
+ }
540
+ } else {
541
+ parts.push({
542
+ functionResponse: {
543
+ name: part.toolName,
544
+ response: {
545
+ name: part.toolName,
546
+ content:
547
+ output.type === 'execution-denied'
548
+ ? (output.reason ?? 'Tool call execution denied.')
549
+ : output.value,
550
+ },
551
+ },
552
+ });
553
+ }
554
+ }
555
+
556
+ contents.push({
557
+ role: 'user',
558
+ parts,
559
+ });
560
+ break;
561
+ }
562
+ }
563
+ }
564
+
565
+ if (
566
+ isGemmaModel &&
567
+ systemInstructionParts.length > 0 &&
568
+ contents.length > 0 &&
569
+ contents[0].role === 'user'
570
+ ) {
571
+ const systemText = systemInstructionParts
572
+ .map(part => part.text)
573
+ .join('\n\n');
574
+
575
+ contents[0].parts.unshift({ text: systemText + '\n\n' });
576
+ }
577
+
578
+ return {
579
+ systemInstruction:
580
+ systemInstructionParts.length > 0 && !isGemmaModel
581
+ ? { parts: systemInstructionParts }
582
+ : undefined,
583
+ contents,
584
+ };
585
+ }
@@ -1,11 +1,11 @@
1
1
  import {
2
- type InferSchema,
3
2
  lazySchema,
4
3
  zodSchema,
4
+ type InferSchema,
5
5
  } from '@ai-sdk/provider-utils';
6
6
  import { z } from 'zod/v4';
7
7
 
8
- export type GoogleGenerativeAIEmbeddingModelId =
8
+ export type GoogleEmbeddingModelId =
9
9
  | 'gemini-embedding-001'
10
10
  | 'gemini-embedding-2-preview'
11
11
  | (string & {});
@@ -1,46 +1,59 @@
1
1
  import {
2
- EmbeddingModelV4,
3
2
  TooManyEmbeddingValuesForCallError,
3
+ type EmbeddingModelV4,
4
4
  } from '@ai-sdk/provider';
5
5
  import {
6
6
  combineHeaders,
7
7
  createJsonResponseHandler,
8
- FetchFunction,
9
8
  lazySchema,
10
9
  parseProviderOptions,
11
10
  postJsonToApi,
12
11
  resolve,
12
+ serializeModelOptions,
13
+ WORKFLOW_SERIALIZE,
14
+ WORKFLOW_DESERIALIZE,
13
15
  zodSchema,
16
+ type FetchFunction,
14
17
  } from '@ai-sdk/provider-utils';
15
18
  import { z } from 'zod/v4';
16
19
  import { googleFailedResponseHandler } from './google-error';
17
20
  import {
18
- GoogleGenerativeAIEmbeddingModelId,
19
21
  googleEmbeddingModelOptions,
20
- } from './google-generative-ai-embedding-options';
21
-
22
- type GoogleGenerativeAIEmbeddingConfig = {
22
+ type GoogleEmbeddingModelId,
23
+ } from './google-embedding-model-options';
24
+ type GoogleEmbeddingConfig = {
23
25
  provider: string;
24
26
  baseURL: string;
25
- headers: () => Record<string, string | undefined>;
27
+ headers?: () => Record<string, string | undefined>;
26
28
  fetch?: FetchFunction;
27
29
  };
28
30
 
29
- export class GoogleGenerativeAIEmbeddingModel implements EmbeddingModelV4 {
31
+ export class GoogleEmbeddingModel implements EmbeddingModelV4 {
30
32
  readonly specificationVersion = 'v4';
31
- readonly modelId: GoogleGenerativeAIEmbeddingModelId;
33
+ readonly modelId: GoogleEmbeddingModelId;
32
34
  readonly maxEmbeddingsPerCall = 2048;
33
35
  readonly supportsParallelCalls = true;
34
36
 
35
- private readonly config: GoogleGenerativeAIEmbeddingConfig;
37
+ private readonly config: GoogleEmbeddingConfig;
38
+
39
+ static [WORKFLOW_SERIALIZE](model: GoogleEmbeddingModel) {
40
+ return serializeModelOptions({
41
+ modelId: model.modelId,
42
+ config: model.config,
43
+ });
44
+ }
45
+
46
+ static [WORKFLOW_DESERIALIZE](options: {
47
+ modelId: string;
48
+ config: GoogleEmbeddingConfig;
49
+ }) {
50
+ return new GoogleEmbeddingModel(options.modelId, options.config);
51
+ }
36
52
 
37
53
  get provider(): string {
38
54
  return this.config.provider;
39
55
  }
40
- constructor(
41
- modelId: GoogleGenerativeAIEmbeddingModelId,
42
- config: GoogleGenerativeAIEmbeddingConfig,
43
- ) {
56
+ constructor(modelId: GoogleEmbeddingModelId, config: GoogleEmbeddingConfig) {
44
57
  this.modelId = modelId;
45
58
  this.config = config;
46
59
  }
@@ -70,7 +83,7 @@ export class GoogleGenerativeAIEmbeddingModel implements EmbeddingModelV4 {
70
83
  }
71
84
 
72
85
  const mergedHeaders = combineHeaders(
73
- await resolve(this.config.headers),
86
+ this.config.headers ? await resolve(this.config.headers) : undefined,
74
87
  headers,
75
88
  );
76
89
 
@@ -1,8 +1,8 @@
1
1
  import {
2
2
  createJsonErrorResponseHandler,
3
- type InferSchema,
4
3
  lazySchema,
5
4
  zodSchema,
5
+ type InferSchema,
6
6
  } from '@ai-sdk/provider-utils';
7
7
  import { z } from 'zod/v4';
8
8