@ai-sdk/google 4.0.0-beta.8 → 4.0.0-canary.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +351 -4
  2. package/README.md +6 -4
  3. package/dist/index.d.ts +97 -54
  4. package/dist/index.js +1644 -580
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +66 -26
  7. package/dist/internal/index.js +1258 -450
  8. package/dist/internal/index.js.map +1 -1
  9. package/docs/{15-google-generative-ai.mdx → 15-google.mdx} +46 -40
  10. package/package.json +13 -14
  11. package/src/{convert-google-generative-ai-usage.ts → convert-google-usage.ts} +12 -5
  12. package/src/convert-json-schema-to-openapi-schema.ts +1 -1
  13. package/src/convert-to-google-messages.ts +577 -0
  14. package/src/{google-generative-ai-embedding-options.ts → google-embedding-model-options.ts} +2 -2
  15. package/src/{google-generative-ai-embedding-model.ts → google-embedding-model.ts} +31 -18
  16. package/src/google-error.ts +1 -1
  17. package/src/google-files.ts +225 -0
  18. package/src/google-image-model-options.ts +23 -0
  19. package/src/{google-generative-ai-image-model.ts → google-image-model.ts} +74 -62
  20. package/src/{google-generative-ai-image-settings.ts → google-image-settings.ts} +2 -2
  21. package/src/google-json-accumulator.ts +336 -0
  22. package/src/{google-generative-ai-options.ts → google-language-model-options.ts} +32 -5
  23. package/src/{google-generative-ai-language-model.ts → google-language-model.ts} +609 -214
  24. package/src/google-prepare-tools.ts +72 -12
  25. package/src/google-prompt.ts +82 -0
  26. package/src/google-provider.ts +63 -54
  27. package/src/google-video-model-options.ts +43 -0
  28. package/src/{google-generative-ai-video-model.ts → google-video-model.ts} +17 -56
  29. package/src/{google-generative-ai-video-settings.ts → google-video-settings.ts} +2 -1
  30. package/src/index.ts +28 -9
  31. package/src/internal/index.ts +2 -2
  32. package/src/{map-google-generative-ai-finish-reason.ts → map-google-finish-reason.ts} +3 -3
  33. package/src/tool/code-execution.ts +2 -2
  34. package/src/tool/enterprise-web-search.ts +9 -3
  35. package/src/tool/file-search.ts +5 -7
  36. package/src/tool/google-maps.ts +3 -2
  37. package/src/tool/google-search.ts +10 -11
  38. package/src/tool/url-context.ts +4 -2
  39. package/src/tool/vertex-rag-store.ts +9 -6
  40. package/dist/index.d.mts +0 -384
  41. package/dist/index.mjs +0 -2519
  42. package/dist/index.mjs.map +0 -1
  43. package/dist/internal/index.d.mts +0 -287
  44. package/dist/internal/index.mjs +0 -1708
  45. package/dist/internal/index.mjs.map +0 -1
  46. package/src/convert-to-google-generative-ai-messages.ts +0 -239
  47. package/src/google-generative-ai-prompt.ts +0 -47
@@ -1,75 +1,96 @@
1
- import {
2
- LanguageModelV3,
3
- LanguageModelV3CallOptions,
4
- LanguageModelV3Content,
5
- LanguageModelV3FinishReason,
6
- LanguageModelV3GenerateResult,
7
- LanguageModelV3Source,
8
- LanguageModelV3StreamPart,
9
- LanguageModelV3StreamResult,
10
- SharedV3ProviderMetadata,
11
- SharedV3Warning,
1
+ import type {
2
+ LanguageModelV4,
3
+ LanguageModelV4CallOptions,
4
+ LanguageModelV4Content,
5
+ LanguageModelV4FinishReason,
6
+ LanguageModelV4GenerateResult,
7
+ LanguageModelV4Source,
8
+ LanguageModelV4StreamPart,
9
+ LanguageModelV4StreamResult,
10
+ JSONObject,
11
+ SharedV4ProviderMetadata,
12
+ SharedV4Warning,
12
13
  } from '@ai-sdk/provider';
13
14
  import {
14
15
  combineHeaders,
15
16
  createEventSourceResponseHandler,
16
17
  createJsonResponseHandler,
17
- FetchFunction,
18
18
  generateId,
19
- InferSchema,
19
+ isCustomReasoning,
20
20
  lazySchema,
21
+ mapReasoningToProviderBudget,
22
+ mapReasoningToProviderEffort,
21
23
  parseProviderOptions,
22
- ParseResult,
23
24
  postJsonToApi,
24
- Resolvable,
25
25
  resolve,
26
+ serializeModelOptions,
27
+ WORKFLOW_SERIALIZE,
28
+ WORKFLOW_DESERIALIZE,
26
29
  zodSchema,
30
+ type FetchFunction,
31
+ type InferSchema,
32
+ type ParseResult,
33
+ type Resolvable,
27
34
  } from '@ai-sdk/provider-utils';
28
35
  import { z } from 'zod/v4';
29
36
  import {
30
- convertGoogleGenerativeAIUsage,
31
- GoogleGenerativeAIUsageMetadata,
32
- } from './convert-google-generative-ai-usage';
37
+ convertGoogleUsage,
38
+ type GoogleUsageMetadata,
39
+ } from './convert-google-usage';
33
40
  import { convertJSONSchemaToOpenAPISchema } from './convert-json-schema-to-openapi-schema';
34
- import { convertToGoogleGenerativeAIMessages } from './convert-to-google-generative-ai-messages';
41
+ import { convertToGoogleMessages } from './convert-to-google-messages';
35
42
  import { getModelPath } from './get-model-path';
36
43
  import { googleFailedResponseHandler } from './google-error';
37
44
  import {
38
- GoogleGenerativeAIModelId,
39
45
  googleLanguageModelOptions,
40
- } from './google-generative-ai-options';
41
- import {
42
- GoogleGenerativeAIContentPart,
43
- GoogleGenerativeAIProviderMetadata,
44
- } from './google-generative-ai-prompt';
46
+ VertexServiceTierMap,
47
+ type GoogleLanguageModelOptions,
48
+ type GoogleModelId,
49
+ } from './google-language-model-options';
50
+ import type { GoogleProviderMetadata } from './google-prompt';
45
51
  import { prepareTools } from './google-prepare-tools';
46
- import { mapGoogleGenerativeAIFinishReason } from './map-google-generative-ai-finish-reason';
52
+ import {
53
+ GoogleJSONAccumulator,
54
+ type PartialArg,
55
+ } from './google-json-accumulator';
56
+ import { mapGoogleFinishReason } from './map-google-finish-reason';
47
57
 
48
- type GoogleGenerativeAIConfig = {
58
+ type GoogleConfig = {
49
59
  provider: string;
50
60
  baseURL: string;
51
- headers: Resolvable<Record<string, string | undefined>>;
61
+ headers?: Resolvable<Record<string, string | undefined>>;
52
62
  fetch?: FetchFunction;
53
63
  generateId: () => string;
54
64
 
55
65
  /**
56
66
  * The supported URLs for the model.
57
67
  */
58
- supportedUrls?: () => LanguageModelV3['supportedUrls'];
68
+ supportedUrls?: () => LanguageModelV4['supportedUrls'];
59
69
  };
60
70
 
61
- export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
62
- readonly specificationVersion = 'v3';
71
+ export class GoogleLanguageModel implements LanguageModelV4 {
72
+ readonly specificationVersion = 'v4';
63
73
 
64
- readonly modelId: GoogleGenerativeAIModelId;
74
+ readonly modelId: GoogleModelId;
65
75
 
66
- private readonly config: GoogleGenerativeAIConfig;
76
+ private readonly config: GoogleConfig;
67
77
  private readonly generateId: () => string;
68
78
 
69
- constructor(
70
- modelId: GoogleGenerativeAIModelId,
71
- config: GoogleGenerativeAIConfig,
72
- ) {
79
+ static [WORKFLOW_SERIALIZE](model: GoogleLanguageModel) {
80
+ return serializeModelOptions({
81
+ modelId: model.modelId,
82
+ config: model.config,
83
+ });
84
+ }
85
+
86
+ static [WORKFLOW_DESERIALIZE](options: {
87
+ modelId: string;
88
+ config: GoogleConfig;
89
+ }) {
90
+ return new GoogleLanguageModel(options.modelId, options.config);
91
+ }
92
+
93
+ constructor(modelId: GoogleModelId, config: GoogleConfig) {
73
94
  this.modelId = modelId;
74
95
  this.config = config;
75
96
  this.generateId = config.generateId ?? generateId;
@@ -83,33 +104,49 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
83
104
  return this.config.supportedUrls?.() ?? {};
84
105
  }
85
106
 
86
- private async getArgs({
87
- prompt,
88
- maxOutputTokens,
89
- temperature,
90
- topP,
91
- topK,
92
- frequencyPenalty,
93
- presencePenalty,
94
- stopSequences,
95
- responseFormat,
96
- seed,
97
- tools,
98
- toolChoice,
99
- providerOptions,
100
- }: LanguageModelV3CallOptions) {
101
- const warnings: SharedV3Warning[] = [];
102
-
103
- const providerOptionsName = this.config.provider.includes('vertex')
104
- ? 'vertex'
105
- : 'google';
106
- let googleOptions = await parseProviderOptions({
107
- provider: providerOptionsName,
107
+ private async getArgs(
108
+ {
109
+ prompt,
110
+ maxOutputTokens,
111
+ temperature,
112
+ topP,
113
+ topK,
114
+ frequencyPenalty,
115
+ presencePenalty,
116
+ stopSequences,
117
+ responseFormat,
118
+ seed,
119
+ tools,
120
+ toolChoice,
121
+ reasoning,
108
122
  providerOptions,
109
- schema: googleLanguageModelOptions,
110
- });
123
+ }: LanguageModelV4CallOptions,
124
+ { isStreaming = false }: { isStreaming?: boolean } = {},
125
+ ) {
126
+ const warnings: SharedV4Warning[] = [];
127
+
128
+ // Names to look up in providerOptions and to write into providerMetadata.
129
+ // For the Vertex provider we read both the new `googleVertex` key and the
130
+ // legacy `vertex` key (new takes precedence) and write under both for
131
+ // backward compatibility. For other Google providers we use just `google`.
132
+ const providerOptionsNames: readonly string[] =
133
+ this.config.provider.includes('vertex')
134
+ ? (['googleVertex', 'vertex'] as const)
135
+ : (['google'] as const);
136
+
137
+ let googleOptions: GoogleLanguageModelOptions | undefined;
138
+ for (const name of providerOptionsNames) {
139
+ googleOptions = await parseProviderOptions({
140
+ provider: name,
141
+ providerOptions,
142
+ schema: googleLanguageModelOptions,
143
+ });
144
+ if (googleOptions != null) break;
145
+ }
111
146
 
112
- if (googleOptions == null && providerOptionsName !== 'google') {
147
+ // Cross-namespace fallback: a Vertex provider may receive options under
148
+ // the `google` key (e.g. via the AI Gateway).
149
+ if (googleOptions == null && !providerOptionsNames.includes('google')) {
113
150
  googleOptions = await parseProviderOptions({
114
151
  provider: 'google',
115
152
  providerOptions,
@@ -118,12 +155,14 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
118
155
  }
119
156
 
120
157
  // Add warning if Vertex rag tools are used with a non-Vertex Google provider
158
+ const isVertexProvider = this.config.provider.startsWith('google.vertex.');
159
+
121
160
  if (
122
161
  tools?.some(
123
162
  tool =>
124
163
  tool.type === 'provider' && tool.id === 'google.vertex_rag_store',
125
164
  ) &&
126
- !this.config.provider.startsWith('google.vertex.')
165
+ !isVertexProvider
127
166
  ) {
128
167
  warnings.push({
129
168
  type: 'other',
@@ -134,12 +173,30 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
134
173
  });
135
174
  }
136
175
 
176
+ if (googleOptions?.streamFunctionCallArguments && !isVertexProvider) {
177
+ warnings.push({
178
+ type: 'other',
179
+ message:
180
+ "'streamFunctionCallArguments' is only supported on the Vertex AI API " +
181
+ 'and will be ignored with the current Google provider ' +
182
+ `(${this.config.provider}). See https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling#streaming-fc`,
183
+ });
184
+ }
185
+
186
+ // Vertex API requires another service tier format.
187
+ let sanitizedServiceTier: string | undefined = googleOptions?.serviceTier;
188
+ if (googleOptions?.serviceTier && isVertexProvider) {
189
+ sanitizedServiceTier = VertexServiceTierMap[googleOptions.serviceTier];
190
+ }
191
+
137
192
  const isGemmaModel = this.modelId.toLowerCase().startsWith('gemma-');
193
+ const supportsFunctionResponseParts = this.modelId.startsWith('gemini-3');
138
194
 
139
- const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
140
- prompt,
141
- { isGemmaModel, providerOptionsName },
142
- );
195
+ const { contents, systemInstruction } = convertToGoogleMessages(prompt, {
196
+ isGemmaModel,
197
+ providerOptionsNames,
198
+ supportsFunctionResponseParts,
199
+ });
143
200
 
144
201
  const {
145
202
  tools: googleTools,
@@ -149,7 +206,41 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
149
206
  tools,
150
207
  toolChoice,
151
208
  modelId: this.modelId,
209
+ isVertexProvider,
210
+ });
211
+
212
+ const resolvedThinking = resolveThinkingConfig({
213
+ reasoning,
214
+ modelId: this.modelId,
215
+ warnings,
152
216
  });
217
+ const thinkingConfig =
218
+ googleOptions?.thinkingConfig || resolvedThinking
219
+ ? { ...resolvedThinking, ...googleOptions?.thinkingConfig }
220
+ : undefined;
221
+
222
+ const streamFunctionCallArguments =
223
+ isStreaming && isVertexProvider
224
+ ? (googleOptions?.streamFunctionCallArguments ?? false)
225
+ : undefined;
226
+
227
+ const toolConfig =
228
+ googleToolConfig ||
229
+ streamFunctionCallArguments ||
230
+ googleOptions?.retrievalConfig
231
+ ? {
232
+ ...googleToolConfig,
233
+ ...(streamFunctionCallArguments && {
234
+ functionCallingConfig: {
235
+ ...googleToolConfig?.functionCallingConfig,
236
+ streamFunctionCallArguments: true as const,
237
+ },
238
+ }),
239
+ ...(googleOptions?.retrievalConfig && {
240
+ retrievalConfig: googleOptions.retrievalConfig,
241
+ }),
242
+ }
243
+ : undefined;
153
244
 
154
245
  return {
155
246
  args: {
@@ -182,7 +273,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
182
273
 
183
274
  // provider options:
184
275
  responseModalities: googleOptions?.responseModalities,
185
- thinkingConfig: googleOptions?.thinkingConfig,
276
+ thinkingConfig,
186
277
  ...(googleOptions?.mediaResolution && {
187
278
  mediaResolution: googleOptions.mediaResolution,
188
279
  }),
@@ -194,27 +285,28 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
194
285
  systemInstruction: isGemmaModel ? undefined : systemInstruction,
195
286
  safetySettings: googleOptions?.safetySettings,
196
287
  tools: googleTools,
197
- toolConfig: googleOptions?.retrievalConfig
198
- ? {
199
- ...googleToolConfig,
200
- retrievalConfig: googleOptions.retrievalConfig,
201
- }
202
- : googleToolConfig,
288
+ toolConfig,
203
289
  cachedContent: googleOptions?.cachedContent,
204
290
  labels: googleOptions?.labels,
291
+ serviceTier: sanitizedServiceTier,
205
292
  },
206
293
  warnings: [...warnings, ...toolWarnings],
207
- providerOptionsName,
294
+ providerOptionsNames,
208
295
  };
209
296
  }
210
297
 
211
298
  async doGenerate(
212
- options: LanguageModelV3CallOptions,
213
- ): Promise<LanguageModelV3GenerateResult> {
214
- const { args, warnings, providerOptionsName } = await this.getArgs(options);
299
+ options: LanguageModelV4CallOptions,
300
+ ): Promise<LanguageModelV4GenerateResult> {
301
+ const { args, warnings, providerOptionsNames } =
302
+ await this.getArgs(options);
303
+ const wrapProviderMetadata = (payload: Record<string, unknown>) =>
304
+ Object.fromEntries(
305
+ providerOptionsNames.map(name => [name, payload]),
306
+ ) as SharedV4ProviderMetadata;
215
307
 
216
308
  const mergedHeaders = combineHeaders(
217
- await resolve(this.config.headers),
309
+ this.config.headers ? await resolve(this.config.headers) : undefined,
218
310
  options.headers,
219
311
  );
220
312
 
@@ -235,7 +327,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
235
327
  });
236
328
 
237
329
  const candidate = response.candidates[0];
238
- const content: Array<LanguageModelV3Content> = [];
330
+ const content: Array<LanguageModelV4Content> = [];
239
331
 
240
332
  // map ordered parts to content:
241
333
  const parts = candidate.content?.parts ?? [];
@@ -244,6 +336,8 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
244
336
 
245
337
  // Associates a code execution result with its preceding call.
246
338
  let lastCodeExecutionToolCallId: string | undefined;
339
+ // Associates a server-side tool response with its preceding call (tool combination).
340
+ let lastServerToolCallId: string | undefined;
247
341
 
248
342
  // Build content array from all parts
249
343
  for (const part of parts) {
@@ -273,11 +367,9 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
273
367
  lastCodeExecutionToolCallId = undefined;
274
368
  } else if ('text' in part && part.text != null) {
275
369
  const thoughtSignatureMetadata = part.thoughtSignature
276
- ? {
277
- [providerOptionsName]: {
278
- thoughtSignature: part.thoughtSignature,
279
- },
280
- }
370
+ ? wrapProviderMetadata({
371
+ thoughtSignature: part.thoughtSignature,
372
+ })
281
373
  : undefined;
282
374
 
283
375
  if (part.text.length === 0) {
@@ -292,39 +384,78 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
292
384
  providerMetadata: thoughtSignatureMetadata,
293
385
  });
294
386
  }
295
- } else if ('functionCall' in part) {
387
+ } else if (
388
+ 'functionCall' in part &&
389
+ part.functionCall.name != null &&
390
+ part.functionCall.args != null
391
+ ) {
296
392
  content.push({
297
393
  type: 'tool-call' as const,
298
394
  toolCallId: this.config.generateId(),
299
395
  toolName: part.functionCall.name,
300
396
  input: JSON.stringify(part.functionCall.args),
301
397
  providerMetadata: part.thoughtSignature
302
- ? {
303
- [providerOptionsName]: {
304
- thoughtSignature: part.thoughtSignature,
305
- },
306
- }
398
+ ? wrapProviderMetadata({
399
+ thoughtSignature: part.thoughtSignature,
400
+ })
307
401
  : undefined,
308
402
  });
309
403
  } else if ('inlineData' in part) {
310
404
  const hasThought = part.thought === true;
311
405
  const hasThoughtSignature = !!part.thoughtSignature;
312
406
  content.push({
313
- type: 'file' as const,
314
- data: part.inlineData.data,
407
+ type: hasThought ? 'reasoning-file' : 'file',
408
+ data: { type: 'data', data: part.inlineData.data },
315
409
  mediaType: part.inlineData.mimeType,
316
- providerMetadata:
317
- hasThought || hasThoughtSignature
318
- ? {
319
- [providerOptionsName]: {
320
- ...(hasThought ? { thought: true } : {}),
321
- ...(hasThoughtSignature
322
- ? { thoughtSignature: part.thoughtSignature }
323
- : {}),
324
- },
325
- }
326
- : undefined,
410
+ providerMetadata: hasThoughtSignature
411
+ ? wrapProviderMetadata({
412
+ thoughtSignature: part.thoughtSignature,
413
+ })
414
+ : undefined,
327
415
  });
416
+ } else if ('toolCall' in part && part.toolCall) {
417
+ const toolCallId = part.toolCall.id ?? this.config.generateId();
418
+ lastServerToolCallId = toolCallId;
419
+ content.push({
420
+ type: 'tool-call',
421
+ toolCallId,
422
+ toolName: `server:${part.toolCall.toolType}`,
423
+ input: JSON.stringify(part.toolCall.args ?? {}),
424
+ providerExecuted: true,
425
+ dynamic: true,
426
+ providerMetadata: part.thoughtSignature
427
+ ? wrapProviderMetadata({
428
+ thoughtSignature: part.thoughtSignature,
429
+ serverToolCallId: toolCallId,
430
+ serverToolType: part.toolCall.toolType,
431
+ })
432
+ : wrapProviderMetadata({
433
+ serverToolCallId: toolCallId,
434
+ serverToolType: part.toolCall.toolType,
435
+ }),
436
+ });
437
+ } else if ('toolResponse' in part && part.toolResponse) {
438
+ const responseToolCallId =
439
+ lastServerToolCallId ??
440
+ part.toolResponse.id ??
441
+ this.config.generateId();
442
+ content.push({
443
+ type: 'tool-result',
444
+ toolCallId: responseToolCallId,
445
+ toolName: `server:${part.toolResponse.toolType}`,
446
+ result: (part.toolResponse.response ?? {}) as JSONObject,
447
+ providerMetadata: part.thoughtSignature
448
+ ? wrapProviderMetadata({
449
+ thoughtSignature: part.thoughtSignature,
450
+ serverToolCallId: responseToolCallId,
451
+ serverToolType: part.toolResponse.toolType,
452
+ })
453
+ : wrapProviderMetadata({
454
+ serverToolCallId: responseToolCallId,
455
+ serverToolType: part.toolResponse.toolType,
456
+ }),
457
+ });
458
+ lastServerToolCallId = undefined;
328
459
  }
329
460
  }
330
461
 
@@ -340,7 +471,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
340
471
  return {
341
472
  content,
342
473
  finishReason: {
343
- unified: mapGoogleGenerativeAIFinishReason({
474
+ unified: mapGoogleFinishReason({
344
475
  finishReason: candidate.finishReason,
345
476
  // Only count client-executed tool calls for finish reason determination.
346
477
  hasToolCalls: content.some(
@@ -349,18 +480,17 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
349
480
  }),
350
481
  raw: candidate.finishReason ?? undefined,
351
482
  },
352
- usage: convertGoogleGenerativeAIUsage(usageMetadata),
483
+ usage: convertGoogleUsage(usageMetadata),
353
484
  warnings,
354
- providerMetadata: {
355
- [providerOptionsName]: {
356
- promptFeedback: response.promptFeedback ?? null,
357
- groundingMetadata: candidate.groundingMetadata ?? null,
358
- urlContextMetadata: candidate.urlContextMetadata ?? null,
359
- safetyRatings: candidate.safetyRatings ?? null,
360
- usageMetadata: usageMetadata ?? null,
361
- finishMessage: candidate.finishMessage ?? null,
362
- } satisfies GoogleGenerativeAIProviderMetadata,
363
- },
485
+ providerMetadata: wrapProviderMetadata({
486
+ promptFeedback: response.promptFeedback ?? null,
487
+ groundingMetadata: candidate.groundingMetadata ?? null,
488
+ urlContextMetadata: candidate.urlContextMetadata ?? null,
489
+ safetyRatings: candidate.safetyRatings ?? null,
490
+ usageMetadata: usageMetadata ?? null,
491
+ finishMessage: candidate.finishMessage ?? null,
492
+ serviceTier: response.serviceTier ?? null,
493
+ } satisfies GoogleProviderMetadata),
364
494
  request: { body: args },
365
495
  response: {
366
496
  // TODO timestamp, model id, id
@@ -371,12 +501,19 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
371
501
  }
372
502
 
373
503
  async doStream(
374
- options: LanguageModelV3CallOptions,
375
- ): Promise<LanguageModelV3StreamResult> {
376
- const { args, warnings, providerOptionsName } = await this.getArgs(options);
504
+ options: LanguageModelV4CallOptions,
505
+ ): Promise<LanguageModelV4StreamResult> {
506
+ const { args, warnings, providerOptionsNames } = await this.getArgs(
507
+ options,
508
+ { isStreaming: true },
509
+ );
510
+ const wrapProviderMetadata = (payload: Record<string, unknown>) =>
511
+ Object.fromEntries(
512
+ providerOptionsNames.map(name => [name, payload]),
513
+ ) as SharedV4ProviderMetadata;
377
514
 
378
515
  const headers = combineHeaders(
379
- await resolve(this.config.headers),
516
+ this.config.headers ? await resolve(this.config.headers) : undefined,
380
517
  options.headers,
381
518
  );
382
519
 
@@ -392,14 +529,15 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
392
529
  fetch: this.config.fetch,
393
530
  });
394
531
 
395
- let finishReason: LanguageModelV3FinishReason = {
532
+ let finishReason: LanguageModelV4FinishReason = {
396
533
  unified: 'other',
397
534
  raw: undefined,
398
535
  };
399
- let usage: GoogleGenerativeAIUsageMetadata | undefined = undefined;
400
- let providerMetadata: SharedV3ProviderMetadata | undefined = undefined;
536
+ let usage: GoogleUsageMetadata | undefined = undefined;
537
+ let providerMetadata: SharedV4ProviderMetadata | undefined = undefined;
401
538
  let lastGroundingMetadata: GroundingMetadataSchema | null = null;
402
539
  let lastUrlContextMetadata: UrlContextMetadataSchema | null = null;
540
+ let serviceTier: string | null = null;
403
541
 
404
542
  const generateId = this.config.generateId;
405
543
  let hasToolCalls = false;
@@ -413,12 +551,21 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
413
551
  const emittedSourceUrls = new Set<string>();
414
552
  // Associates a code execution result with its preceding call.
415
553
  let lastCodeExecutionToolCallId: string | undefined;
554
+ // Associates a server-side tool response with its preceding call (tool combination).
555
+ let lastServerToolCallId: string | undefined;
556
+
557
+ const activeStreamingToolCalls: Array<{
558
+ toolCallId: string;
559
+ toolName: string;
560
+ accumulator: GoogleJSONAccumulator;
561
+ providerMetadata?: SharedV4ProviderMetadata;
562
+ }> = [];
416
563
 
417
564
  return {
418
565
  stream: response.pipeThrough(
419
566
  new TransformStream<
420
567
  ParseResult<ChunkSchema>,
421
- LanguageModelV3StreamPart
568
+ LanguageModelV4StreamPart
422
569
  >({
423
570
  start(controller) {
424
571
  controller.enqueue({ type: 'stream-start', warnings });
@@ -442,6 +589,10 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
442
589
  usage = usageMetadata;
443
590
  }
444
591
 
592
+ if (value.serviceTier != null) {
593
+ serviceTier = value.serviceTier;
594
+ }
595
+
445
596
  const candidate = value.candidates?.[0];
446
597
 
447
598
  // sometimes the API returns an empty candidates array
@@ -512,11 +663,9 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
512
663
  }
513
664
  } else if ('text' in part && part.text != null) {
514
665
  const thoughtSignatureMetadata = part.thoughtSignature
515
- ? {
516
- [providerOptionsName]: {
517
- thoughtSignature: part.thoughtSignature,
518
- },
519
- }
666
+ ? wrapProviderMetadata({
667
+ thoughtSignature: part.thoughtSignature,
668
+ })
520
669
  : undefined;
521
670
 
522
671
  if (part.text.length === 0) {
@@ -602,60 +751,205 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
602
751
 
603
752
  const hasThought = part.thought === true;
604
753
  const hasThoughtSignature = !!part.thoughtSignature;
605
- const fileMeta =
606
- hasThought || hasThoughtSignature
607
- ? {
608
- [providerOptionsName]: {
609
- ...(hasThought ? { thought: true } : {}),
610
- ...(hasThoughtSignature
611
- ? { thoughtSignature: part.thoughtSignature }
612
- : {}),
613
- },
614
- }
615
- : undefined;
754
+ const fileMeta = hasThoughtSignature
755
+ ? wrapProviderMetadata({
756
+ thoughtSignature: part.thoughtSignature,
757
+ })
758
+ : undefined;
616
759
  controller.enqueue({
617
- type: 'file',
760
+ type: hasThought ? 'reasoning-file' : 'file',
618
761
  mediaType: part.inlineData.mimeType,
619
- data: part.inlineData.data,
762
+ data: { type: 'data', data: part.inlineData.data },
620
763
  providerMetadata: fileMeta,
621
764
  });
765
+ } else if ('toolCall' in part && part.toolCall) {
766
+ const toolCallId = part.toolCall.id ?? generateId();
767
+ lastServerToolCallId = toolCallId;
768
+ const serverMeta = wrapProviderMetadata({
769
+ ...(part.thoughtSignature
770
+ ? { thoughtSignature: part.thoughtSignature }
771
+ : {}),
772
+ serverToolCallId: toolCallId,
773
+ serverToolType: part.toolCall.toolType,
774
+ });
775
+
776
+ controller.enqueue({
777
+ type: 'tool-call',
778
+ toolCallId,
779
+ toolName: `server:${part.toolCall.toolType}`,
780
+ input: JSON.stringify(part.toolCall.args ?? {}),
781
+ providerExecuted: true,
782
+ dynamic: true,
783
+ providerMetadata: serverMeta,
784
+ });
785
+ } else if ('toolResponse' in part && part.toolResponse) {
786
+ const responseToolCallId =
787
+ lastServerToolCallId ??
788
+ part.toolResponse.id ??
789
+ generateId();
790
+ const serverMeta = wrapProviderMetadata({
791
+ ...(part.thoughtSignature
792
+ ? { thoughtSignature: part.thoughtSignature }
793
+ : {}),
794
+ serverToolCallId: responseToolCallId,
795
+ serverToolType: part.toolResponse.toolType,
796
+ });
797
+
798
+ controller.enqueue({
799
+ type: 'tool-result',
800
+ toolCallId: responseToolCallId,
801
+ toolName: `server:${part.toolResponse.toolType}`,
802
+ result: (part.toolResponse.response ?? {}) as JSONObject,
803
+ providerMetadata: serverMeta,
804
+ });
805
+ lastServerToolCallId = undefined;
622
806
  }
623
807
  }
624
808
 
625
- const toolCallDeltas = getToolCallsFromParts({
626
- parts: content.parts,
627
- generateId,
628
- providerOptionsName,
629
- });
809
+ // Handle streaming and complete function calls
810
+ for (const part of parts) {
811
+ if (!('functionCall' in part)) continue;
812
+
813
+ const providerMeta = part.thoughtSignature
814
+ ? wrapProviderMetadata({
815
+ thoughtSignature: part.thoughtSignature,
816
+ })
817
+ : undefined;
818
+
819
+ const isStreamingChunk =
820
+ part.functionCall.partialArgs != null ||
821
+ (part.functionCall.name != null &&
822
+ part.functionCall.willContinue === true);
823
+ const isTerminalChunk =
824
+ part.functionCall.name == null &&
825
+ part.functionCall.args == null &&
826
+ part.functionCall.partialArgs == null &&
827
+ part.functionCall.willContinue == null;
828
+ const isCompleteCall =
829
+ part.functionCall.name != null &&
830
+ part.functionCall.args != null &&
831
+ part.functionCall.partialArgs == null;
832
+
833
+ if (isStreamingChunk) {
834
+ if (
835
+ part.functionCall.name != null &&
836
+ part.functionCall.willContinue === true
837
+ ) {
838
+ const toolCallId = generateId();
839
+ const accumulator = new GoogleJSONAccumulator();
840
+ activeStreamingToolCalls.push({
841
+ toolCallId,
842
+ toolName: part.functionCall.name,
843
+ accumulator,
844
+ providerMetadata: providerMeta,
845
+ });
846
+
847
+ controller.enqueue({
848
+ type: 'tool-input-start',
849
+ id: toolCallId,
850
+ toolName: part.functionCall.name,
851
+ providerMetadata: providerMeta,
852
+ });
853
+
854
+ if (part.functionCall.partialArgs != null) {
855
+ const { textDelta } = accumulator.processPartialArgs(
856
+ part.functionCall.partialArgs as PartialArg[],
857
+ );
858
+ if (textDelta.length > 0) {
859
+ controller.enqueue({
860
+ type: 'tool-input-delta',
861
+ id: toolCallId,
862
+ delta: textDelta,
863
+ providerMetadata: providerMeta,
864
+ });
865
+ }
866
+ }
867
+ } else if (
868
+ part.functionCall.partialArgs != null &&
869
+ activeStreamingToolCalls.length > 0
870
+ ) {
871
+ const active =
872
+ activeStreamingToolCalls[
873
+ activeStreamingToolCalls.length - 1
874
+ ];
875
+ const { textDelta } = active.accumulator.processPartialArgs(
876
+ part.functionCall.partialArgs as PartialArg[],
877
+ );
878
+ if (textDelta.length > 0) {
879
+ controller.enqueue({
880
+ type: 'tool-input-delta',
881
+ id: active.toolCallId,
882
+ delta: textDelta,
883
+ providerMetadata: providerMeta,
884
+ });
885
+ }
886
+ }
887
+ } else if (
888
+ isTerminalChunk &&
889
+ activeStreamingToolCalls.length > 0
890
+ ) {
891
+ const active = activeStreamingToolCalls.pop()!;
892
+ const { finalJSON, closingDelta } =
893
+ active.accumulator.finalize();
894
+
895
+ if (closingDelta.length > 0) {
896
+ controller.enqueue({
897
+ type: 'tool-input-delta',
898
+ id: active.toolCallId,
899
+ delta: closingDelta,
900
+ providerMetadata: active.providerMetadata,
901
+ });
902
+ }
903
+
904
+ controller.enqueue({
905
+ type: 'tool-input-end',
906
+ id: active.toolCallId,
907
+ providerMetadata: active.providerMetadata,
908
+ });
909
+
910
+ controller.enqueue({
911
+ type: 'tool-call',
912
+ toolCallId: active.toolCallId,
913
+ toolName: active.toolName,
914
+ input: finalJSON,
915
+ providerMetadata: active.providerMetadata,
916
+ });
917
+
918
+ hasToolCalls = true;
919
+ } else if (isCompleteCall) {
920
+ const toolCallId = generateId();
921
+ const toolName = part.functionCall.name!;
922
+ const args =
923
+ typeof part.functionCall.args === 'string'
924
+ ? part.functionCall.args
925
+ : JSON.stringify(part.functionCall.args ?? {});
630
926
 
631
- if (toolCallDeltas != null) {
632
- for (const toolCall of toolCallDeltas) {
633
927
  controller.enqueue({
634
928
  type: 'tool-input-start',
635
- id: toolCall.toolCallId,
636
- toolName: toolCall.toolName,
637
- providerMetadata: toolCall.providerMetadata,
929
+ id: toolCallId,
930
+ toolName,
931
+ providerMetadata: providerMeta,
638
932
  });
639
933
 
640
934
  controller.enqueue({
641
935
  type: 'tool-input-delta',
642
- id: toolCall.toolCallId,
643
- delta: toolCall.args,
644
- providerMetadata: toolCall.providerMetadata,
936
+ id: toolCallId,
937
+ delta: args,
938
+ providerMetadata: providerMeta,
645
939
  });
646
940
 
647
941
  controller.enqueue({
648
942
  type: 'tool-input-end',
649
- id: toolCall.toolCallId,
650
- providerMetadata: toolCall.providerMetadata,
943
+ id: toolCallId,
944
+ providerMetadata: providerMeta,
651
945
  });
652
946
 
653
947
  controller.enqueue({
654
948
  type: 'tool-call',
655
- toolCallId: toolCall.toolCallId,
656
- toolName: toolCall.toolName,
657
- input: toolCall.args,
658
- providerMetadata: toolCall.providerMetadata,
949
+ toolCallId,
950
+ toolName,
951
+ input: args,
952
+ providerMetadata: providerMeta,
659
953
  });
660
954
 
661
955
  hasToolCalls = true;
@@ -665,23 +959,22 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
665
959
 
666
960
  if (candidate.finishReason != null) {
667
961
  finishReason = {
668
- unified: mapGoogleGenerativeAIFinishReason({
962
+ unified: mapGoogleFinishReason({
669
963
  finishReason: candidate.finishReason,
670
964
  hasToolCalls,
671
965
  }),
672
966
  raw: candidate.finishReason,
673
967
  };
674
968
 
675
- providerMetadata = {
676
- [providerOptionsName]: {
677
- promptFeedback: value.promptFeedback ?? null,
678
- groundingMetadata: lastGroundingMetadata,
679
- urlContextMetadata: lastUrlContextMetadata,
680
- safetyRatings: candidate.safetyRatings ?? null,
681
- usageMetadata: usageMetadata ?? null,
682
- finishMessage: candidate.finishMessage ?? null,
683
- } satisfies GoogleGenerativeAIProviderMetadata,
684
- };
969
+ providerMetadata = wrapProviderMetadata({
970
+ promptFeedback: value.promptFeedback ?? null,
971
+ groundingMetadata: lastGroundingMetadata,
972
+ urlContextMetadata: lastUrlContextMetadata,
973
+ safetyRatings: candidate.safetyRatings ?? null,
974
+ usageMetadata: usageMetadata ?? null,
975
+ finishMessage: candidate.finishMessage ?? null,
976
+ serviceTier,
977
+ } satisfies GoogleProviderMetadata);
685
978
  }
686
979
  },
687
980
 
@@ -702,7 +995,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
702
995
  controller.enqueue({
703
996
  type: 'finish',
704
997
  finishReason,
705
- usage: convertGoogleGenerativeAIUsage(usage),
998
+ usage: convertGoogleUsage(usage),
706
999
  providerMetadata,
707
1000
  });
708
1001
  },
@@ -714,39 +1007,107 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
714
1007
  }
715
1008
  }
716
1009
 
717
- function getToolCallsFromParts({
718
- parts,
719
- generateId,
720
- providerOptionsName,
1010
+ function isGemini3Model(modelId: string): boolean {
1011
+ return /gemini-3[\.\-]/i.test(modelId) || /gemini-3$/i.test(modelId);
1012
+ }
1013
+
1014
+ function getMaxOutputTokensForGemini25Model(): number {
1015
+ return 65536;
1016
+ }
1017
+
1018
+ function getMaxThinkingTokensForGemini25Model(modelId: string): number {
1019
+ const id = modelId.toLowerCase();
1020
+ if (id.includes('2.5-pro') || id.includes('gemini-3-pro-image')) {
1021
+ return 32768;
1022
+ }
1023
+ return 24576;
1024
+ }
1025
+
1026
+ type GoogleThinkingConfig = NonNullable<
1027
+ InferSchema<typeof googleLanguageModelOptions>['thinkingConfig']
1028
+ >;
1029
+
1030
+ function resolveThinkingConfig({
1031
+ reasoning,
1032
+ modelId,
1033
+ warnings,
721
1034
  }: {
722
- parts: ContentSchema['parts'];
723
- generateId: () => string;
724
- providerOptionsName: string;
725
- }) {
726
- const functionCallParts = parts?.filter(
727
- part => 'functionCall' in part,
728
- ) as Array<
729
- GoogleGenerativeAIContentPart & {
730
- functionCall: { name: string; args: unknown };
731
- thoughtSignature?: string | null;
732
- }
1035
+ reasoning: LanguageModelV4CallOptions['reasoning'];
1036
+ modelId: string;
1037
+ warnings: SharedV4Warning[];
1038
+ }): Omit<GoogleThinkingConfig, 'includeThoughts'> | undefined {
1039
+ if (!isCustomReasoning(reasoning)) {
1040
+ return undefined;
1041
+ }
1042
+
1043
+ if (isGemini3Model(modelId) && !modelId.includes('gemini-3-pro-image')) {
1044
+ return resolveGemini3ThinkingConfig({ reasoning, warnings });
1045
+ }
1046
+
1047
+ return resolveGemini25ThinkingConfig({ reasoning, modelId, warnings });
1048
+ }
1049
+
1050
+ function resolveGemini3ThinkingConfig({
1051
+ reasoning,
1052
+ warnings,
1053
+ }: {
1054
+ reasoning: Exclude<
1055
+ LanguageModelV4CallOptions['reasoning'],
1056
+ 'provider-default' | undefined
733
1057
  >;
1058
+ warnings: SharedV4Warning[];
1059
+ }): Pick<GoogleThinkingConfig, 'thinkingLevel'> | undefined {
1060
+ if (reasoning === 'none') {
1061
+ // It's not possible to fully disable thinking with Gemini 3.
1062
+ return { thinkingLevel: 'minimal' };
1063
+ }
734
1064
 
735
- return functionCallParts == null || functionCallParts.length === 0
736
- ? undefined
737
- : functionCallParts.map(part => ({
738
- type: 'tool-call' as const,
739
- toolCallId: generateId(),
740
- toolName: part.functionCall.name,
741
- args: JSON.stringify(part.functionCall.args),
742
- providerMetadata: part.thoughtSignature
743
- ? {
744
- [providerOptionsName]: {
745
- thoughtSignature: part.thoughtSignature,
746
- },
747
- }
748
- : undefined,
749
- }));
1065
+ const thinkingLevel = mapReasoningToProviderEffort({
1066
+ reasoning,
1067
+ effortMap: {
1068
+ minimal: 'minimal',
1069
+ low: 'low',
1070
+ medium: 'medium',
1071
+ high: 'high',
1072
+ xhigh: 'high',
1073
+ },
1074
+ warnings,
1075
+ });
1076
+
1077
+ if (thinkingLevel == null) {
1078
+ return undefined;
1079
+ }
1080
+
1081
+ return { thinkingLevel };
1082
+ }
1083
+
1084
+ function resolveGemini25ThinkingConfig({
1085
+ reasoning,
1086
+ modelId,
1087
+ warnings,
1088
+ }: {
1089
+ reasoning: Exclude<
1090
+ LanguageModelV4CallOptions['reasoning'],
1091
+ 'provider-default' | undefined
1092
+ >;
1093
+ modelId: string;
1094
+ warnings: SharedV4Warning[];
1095
+ }): Pick<GoogleThinkingConfig, 'thinkingBudget'> | undefined {
1096
+ if (reasoning === 'none') {
1097
+ return { thinkingBudget: 0 };
1098
+ }
1099
+
1100
+ const thinkingBudget = mapReasoningToProviderBudget({
1101
+ reasoning,
1102
+ maxOutputTokens: getMaxOutputTokensForGemini25Model(),
1103
+ maxReasoningBudget: getMaxThinkingTokensForGemini25Model(modelId),
1104
+ minReasoningBudget: 0,
1105
+ warnings,
1106
+ });
1107
+ if (thinkingBudget == null) {
1108
+ return undefined;
1109
+ }
1110
+ return { thinkingBudget };
750
1111
  }
751
1112
 
752
1113
  function extractSources({
@@ -755,12 +1116,12 @@ function extractSources({
755
1116
  }: {
756
1117
  groundingMetadata: GroundingMetadataSchema | undefined | null;
757
1118
  generateId: () => string;
758
- }): undefined | LanguageModelV3Source[] {
1119
+ }): undefined | LanguageModelV4Source[] {
759
1120
  if (!groundingMetadata?.groundingChunks) {
760
1121
  return undefined;
761
1122
  }
762
1123
 
763
- const sources: LanguageModelV3Source[] = [];
1124
+ const sources: LanguageModelV4Source[] = [];
764
1125
 
765
1126
  for (const chunk of groundingMetadata.groundingChunks) {
766
1127
  if (chunk.web != null) {
@@ -926,6 +1287,15 @@ export const getGroundingMetadataSchema = () =>
926
1287
  .nullish(),
927
1288
  });
928
1289
 
1290
+ const partialArgSchema = z.object({
1291
+ jsonPath: z.string(),
1292
+ stringValue: z.string().nullish(),
1293
+ numberValue: z.number().nullish(),
1294
+ boolValue: z.boolean().nullish(),
1295
+ nullValue: z.unknown().nullish(),
1296
+ willContinue: z.boolean().nullish(),
1297
+ });
1298
+
929
1299
  const getContentSchema = () =>
930
1300
  z.object({
931
1301
  parts: z
@@ -934,8 +1304,10 @@ const getContentSchema = () =>
934
1304
  // note: order matters since text can be fully empty
935
1305
  z.object({
936
1306
  functionCall: z.object({
937
- name: z.string(),
938
- args: z.unknown(),
1307
+ name: z.string().nullish(),
1308
+ args: z.unknown().nullish(),
1309
+ partialArgs: z.array(partialArgSchema).nullish(),
1310
+ willContinue: z.boolean().nullish(),
939
1311
  }),
940
1312
  thoughtSignature: z.string().nullish(),
941
1313
  }),
@@ -947,6 +1319,22 @@ const getContentSchema = () =>
947
1319
  thought: z.boolean().nullish(),
948
1320
  thoughtSignature: z.string().nullish(),
949
1321
  }),
1322
+ z.object({
1323
+ toolCall: z.object({
1324
+ toolType: z.string(),
1325
+ args: z.unknown().nullish(),
1326
+ id: z.string(),
1327
+ }),
1328
+ thoughtSignature: z.string().nullish(),
1329
+ }),
1330
+ z.object({
1331
+ toolResponse: z.object({
1332
+ toolType: z.string(),
1333
+ response: z.unknown().nullish(),
1334
+ id: z.string(),
1335
+ }),
1336
+ thoughtSignature: z.string().nullish(),
1337
+ }),
950
1338
  z.object({
951
1339
  executableCode: z
952
1340
  .object({
@@ -980,6 +1368,15 @@ const getSafetyRatingSchema = () =>
980
1368
  blocked: z.boolean().nullish(),
981
1369
  });
982
1370
 
1371
+ const tokenDetailsSchema = z
1372
+ .array(
1373
+ z.object({
1374
+ modality: z.string(),
1375
+ tokenCount: z.number(),
1376
+ }),
1377
+ )
1378
+ .nullish();
1379
+
983
1380
  const usageSchema = z.object({
984
1381
  cachedContentTokenCount: z.number().nullish(),
985
1382
  thoughtsTokenCount: z.number().nullish(),
@@ -988,6 +1385,9 @@ const usageSchema = z.object({
988
1385
  totalTokenCount: z.number().nullish(),
989
1386
  // https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest/v1/GenerateContentResponse#TrafficType
990
1387
  trafficType: z.string().nullish(),
1388
+ // https://ai.google.dev/api/generate-content#Modality
1389
+ promptTokensDetails: tokenDetailsSchema,
1390
+ candidatesTokensDetails: tokenDetailsSchema,
991
1391
  });
992
1392
 
993
1393
  // https://ai.google.dev/api/generate-content#UrlRetrievalMetadata
@@ -1023,21 +1423,15 @@ const responseSchema = lazySchema(() =>
1023
1423
  safetyRatings: z.array(getSafetyRatingSchema()).nullish(),
1024
1424
  })
1025
1425
  .nullish(),
1426
+ serviceTier: z.string().nullish(),
1026
1427
  }),
1027
1428
  ),
1028
1429
  );
1029
1430
 
1030
- type ContentSchema = NonNullable<
1031
- InferSchema<typeof responseSchema>['candidates'][number]['content']
1032
- >;
1033
1431
  export type GroundingMetadataSchema = NonNullable<
1034
1432
  InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']
1035
1433
  >;
1036
1434
 
1037
- type GroundingChunkSchema = NonNullable<
1038
- GroundingMetadataSchema['groundingChunks']
1039
- >[number];
1040
-
1041
1435
  export type UrlContextMetadataSchema = NonNullable<
1042
1436
  InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']
1043
1437
  >;
@@ -1078,6 +1472,7 @@ const chunkSchema = lazySchema(() =>
1078
1472
  safetyRatings: z.array(getSafetyRatingSchema()).nullish(),
1079
1473
  })
1080
1474
  .nullish(),
1475
+ serviceTier: z.string().nullish(),
1081
1476
  }),
1082
1477
  ),
1083
1478
  );