@ai-sdk/google 3.0.10 → 3.0.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.js +1 -1
- package/dist/index.mjs +1 -1
- package/package.json +5 -4
- package/src/__snapshots__/google-generative-ai-embedding-model.test.ts.snap +33 -0
- package/src/convert-google-generative-ai-usage.ts +51 -0
- package/src/convert-json-schema-to-openapi-schema.test.ts +684 -0
- package/src/convert-json-schema-to-openapi-schema.ts +158 -0
- package/src/convert-to-google-generative-ai-messages.test.ts +495 -0
- package/src/convert-to-google-generative-ai-messages.ts +232 -0
- package/src/get-model-path.test.ts +16 -0
- package/src/get-model-path.ts +3 -0
- package/src/google-error.ts +26 -0
- package/src/google-generative-ai-embedding-model.test.ts +204 -0
- package/src/google-generative-ai-embedding-model.ts +159 -0
- package/src/google-generative-ai-embedding-options.ts +52 -0
- package/src/google-generative-ai-image-model.test.ts +411 -0
- package/src/google-generative-ai-image-model.ts +184 -0
- package/src/google-generative-ai-image-settings.ts +12 -0
- package/src/google-generative-ai-language-model.test.ts +4616 -0
- package/src/google-generative-ai-language-model.ts +1009 -0
- package/src/google-generative-ai-options.ts +193 -0
- package/src/google-generative-ai-prompt.ts +38 -0
- package/src/google-prepare-tools.test.ts +474 -0
- package/src/google-prepare-tools.ts +264 -0
- package/src/google-provider.test.ts +307 -0
- package/src/google-provider.ts +201 -0
- package/src/google-supported-file-url.test.ts +57 -0
- package/src/google-supported-file-url.ts +20 -0
- package/src/google-tools.ts +71 -0
- package/src/index.ts +11 -0
- package/src/internal/index.ts +3 -0
- package/src/map-google-generative-ai-finish-reason.ts +29 -0
- package/src/tool/code-execution.ts +35 -0
- package/src/tool/enterprise-web-search.ts +18 -0
- package/src/tool/file-search.ts +51 -0
- package/src/tool/google-maps.ts +14 -0
- package/src/tool/google-search.ts +40 -0
- package/src/tool/url-context.ts +16 -0
- package/src/tool/vertex-rag-store.ts +31 -0
- package/src/version.ts +6 -0
|
@@ -0,0 +1,1009 @@
|
|
|
1
|
+
import {
|
|
2
|
+
LanguageModelV3,
|
|
3
|
+
LanguageModelV3CallOptions,
|
|
4
|
+
LanguageModelV3Content,
|
|
5
|
+
LanguageModelV3FinishReason,
|
|
6
|
+
LanguageModelV3GenerateResult,
|
|
7
|
+
LanguageModelV3Source,
|
|
8
|
+
LanguageModelV3StreamPart,
|
|
9
|
+
LanguageModelV3StreamResult,
|
|
10
|
+
SharedV3ProviderMetadata,
|
|
11
|
+
SharedV3Warning,
|
|
12
|
+
} from '@ai-sdk/provider';
|
|
13
|
+
import {
|
|
14
|
+
combineHeaders,
|
|
15
|
+
createEventSourceResponseHandler,
|
|
16
|
+
createJsonResponseHandler,
|
|
17
|
+
FetchFunction,
|
|
18
|
+
generateId,
|
|
19
|
+
InferSchema,
|
|
20
|
+
lazySchema,
|
|
21
|
+
parseProviderOptions,
|
|
22
|
+
ParseResult,
|
|
23
|
+
postJsonToApi,
|
|
24
|
+
Resolvable,
|
|
25
|
+
resolve,
|
|
26
|
+
zodSchema,
|
|
27
|
+
} from '@ai-sdk/provider-utils';
|
|
28
|
+
import { z } from 'zod/v4';
|
|
29
|
+
import {
|
|
30
|
+
convertGoogleGenerativeAIUsage,
|
|
31
|
+
GoogleGenerativeAIUsageMetadata,
|
|
32
|
+
} from './convert-google-generative-ai-usage';
|
|
33
|
+
import { convertJSONSchemaToOpenAPISchema } from './convert-json-schema-to-openapi-schema';
|
|
34
|
+
import { convertToGoogleGenerativeAIMessages } from './convert-to-google-generative-ai-messages';
|
|
35
|
+
import { getModelPath } from './get-model-path';
|
|
36
|
+
import { googleFailedResponseHandler } from './google-error';
|
|
37
|
+
import {
|
|
38
|
+
GoogleGenerativeAIModelId,
|
|
39
|
+
googleGenerativeAIProviderOptions,
|
|
40
|
+
} from './google-generative-ai-options';
|
|
41
|
+
import { GoogleGenerativeAIContentPart } from './google-generative-ai-prompt';
|
|
42
|
+
import { prepareTools } from './google-prepare-tools';
|
|
43
|
+
import { mapGoogleGenerativeAIFinishReason } from './map-google-generative-ai-finish-reason';
|
|
44
|
+
|
|
45
|
+
type GoogleGenerativeAIConfig = {
|
|
46
|
+
provider: string;
|
|
47
|
+
baseURL: string;
|
|
48
|
+
headers: Resolvable<Record<string, string | undefined>>;
|
|
49
|
+
fetch?: FetchFunction;
|
|
50
|
+
generateId: () => string;
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* The supported URLs for the model.
|
|
54
|
+
*/
|
|
55
|
+
supportedUrls?: () => LanguageModelV3['supportedUrls'];
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
export class GoogleGenerativeAILanguageModel implements LanguageModelV3 {
|
|
59
|
+
readonly specificationVersion = 'v3';
|
|
60
|
+
|
|
61
|
+
readonly modelId: GoogleGenerativeAIModelId;
|
|
62
|
+
|
|
63
|
+
private readonly config: GoogleGenerativeAIConfig;
|
|
64
|
+
private readonly generateId: () => string;
|
|
65
|
+
|
|
66
|
+
constructor(
|
|
67
|
+
modelId: GoogleGenerativeAIModelId,
|
|
68
|
+
config: GoogleGenerativeAIConfig,
|
|
69
|
+
) {
|
|
70
|
+
this.modelId = modelId;
|
|
71
|
+
this.config = config;
|
|
72
|
+
this.generateId = config.generateId ?? generateId;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
get provider(): string {
|
|
76
|
+
return this.config.provider;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
get supportedUrls() {
|
|
80
|
+
return this.config.supportedUrls?.() ?? {};
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
private async getArgs({
|
|
84
|
+
prompt,
|
|
85
|
+
maxOutputTokens,
|
|
86
|
+
temperature,
|
|
87
|
+
topP,
|
|
88
|
+
topK,
|
|
89
|
+
frequencyPenalty,
|
|
90
|
+
presencePenalty,
|
|
91
|
+
stopSequences,
|
|
92
|
+
responseFormat,
|
|
93
|
+
seed,
|
|
94
|
+
tools,
|
|
95
|
+
toolChoice,
|
|
96
|
+
providerOptions,
|
|
97
|
+
}: LanguageModelV3CallOptions) {
|
|
98
|
+
const warnings: SharedV3Warning[] = [];
|
|
99
|
+
|
|
100
|
+
const providerOptionsName = this.config.provider.includes('vertex')
|
|
101
|
+
? 'vertex'
|
|
102
|
+
: 'google';
|
|
103
|
+
let googleOptions = await parseProviderOptions({
|
|
104
|
+
provider: providerOptionsName,
|
|
105
|
+
providerOptions,
|
|
106
|
+
schema: googleGenerativeAIProviderOptions,
|
|
107
|
+
});
|
|
108
|
+
|
|
109
|
+
if (googleOptions == null && providerOptionsName !== 'google') {
|
|
110
|
+
googleOptions = await parseProviderOptions({
|
|
111
|
+
provider: 'google',
|
|
112
|
+
providerOptions,
|
|
113
|
+
schema: googleGenerativeAIProviderOptions,
|
|
114
|
+
});
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// Add warning if Vertex rag tools are used with a non-Vertex Google provider
|
|
118
|
+
if (
|
|
119
|
+
tools?.some(
|
|
120
|
+
tool =>
|
|
121
|
+
tool.type === 'provider' && tool.id === 'google.vertex_rag_store',
|
|
122
|
+
) &&
|
|
123
|
+
!this.config.provider.startsWith('google.vertex.')
|
|
124
|
+
) {
|
|
125
|
+
warnings.push({
|
|
126
|
+
type: 'other',
|
|
127
|
+
message:
|
|
128
|
+
"The 'vertex_rag_store' tool is only supported with the Google Vertex provider " +
|
|
129
|
+
'and might not be supported or could behave unexpectedly with the current Google provider ' +
|
|
130
|
+
`(${this.config.provider}).`,
|
|
131
|
+
});
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
const isGemmaModel = this.modelId.toLowerCase().startsWith('gemma-');
|
|
135
|
+
|
|
136
|
+
const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
|
|
137
|
+
prompt,
|
|
138
|
+
{ isGemmaModel, providerOptionsName },
|
|
139
|
+
);
|
|
140
|
+
|
|
141
|
+
const {
|
|
142
|
+
tools: googleTools,
|
|
143
|
+
toolConfig: googleToolConfig,
|
|
144
|
+
toolWarnings,
|
|
145
|
+
} = prepareTools({
|
|
146
|
+
tools,
|
|
147
|
+
toolChoice,
|
|
148
|
+
modelId: this.modelId,
|
|
149
|
+
});
|
|
150
|
+
|
|
151
|
+
return {
|
|
152
|
+
args: {
|
|
153
|
+
generationConfig: {
|
|
154
|
+
// standardized settings:
|
|
155
|
+
maxOutputTokens,
|
|
156
|
+
temperature,
|
|
157
|
+
topK,
|
|
158
|
+
topP,
|
|
159
|
+
frequencyPenalty,
|
|
160
|
+
presencePenalty,
|
|
161
|
+
stopSequences,
|
|
162
|
+
seed,
|
|
163
|
+
|
|
164
|
+
// response format:
|
|
165
|
+
responseMimeType:
|
|
166
|
+
responseFormat?.type === 'json' ? 'application/json' : undefined,
|
|
167
|
+
responseSchema:
|
|
168
|
+
responseFormat?.type === 'json' &&
|
|
169
|
+
responseFormat.schema != null &&
|
|
170
|
+
// Google GenAI does not support all OpenAPI Schema features,
|
|
171
|
+
// so this is needed as an escape hatch:
|
|
172
|
+
// TODO convert into provider option
|
|
173
|
+
(googleOptions?.structuredOutputs ?? true)
|
|
174
|
+
? convertJSONSchemaToOpenAPISchema(responseFormat.schema)
|
|
175
|
+
: undefined,
|
|
176
|
+
...(googleOptions?.audioTimestamp && {
|
|
177
|
+
audioTimestamp: googleOptions.audioTimestamp,
|
|
178
|
+
}),
|
|
179
|
+
|
|
180
|
+
// provider options:
|
|
181
|
+
responseModalities: googleOptions?.responseModalities,
|
|
182
|
+
thinkingConfig: googleOptions?.thinkingConfig,
|
|
183
|
+
...(googleOptions?.mediaResolution && {
|
|
184
|
+
mediaResolution: googleOptions.mediaResolution,
|
|
185
|
+
}),
|
|
186
|
+
...(googleOptions?.imageConfig && {
|
|
187
|
+
imageConfig: googleOptions.imageConfig,
|
|
188
|
+
}),
|
|
189
|
+
},
|
|
190
|
+
contents,
|
|
191
|
+
systemInstruction: isGemmaModel ? undefined : systemInstruction,
|
|
192
|
+
safetySettings: googleOptions?.safetySettings,
|
|
193
|
+
tools: googleTools,
|
|
194
|
+
toolConfig: googleOptions?.retrievalConfig
|
|
195
|
+
? {
|
|
196
|
+
...googleToolConfig,
|
|
197
|
+
retrievalConfig: googleOptions.retrievalConfig,
|
|
198
|
+
}
|
|
199
|
+
: googleToolConfig,
|
|
200
|
+
cachedContent: googleOptions?.cachedContent,
|
|
201
|
+
labels: googleOptions?.labels,
|
|
202
|
+
},
|
|
203
|
+
warnings: [...warnings, ...toolWarnings],
|
|
204
|
+
providerOptionsName,
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
async doGenerate(
|
|
209
|
+
options: LanguageModelV3CallOptions,
|
|
210
|
+
): Promise<LanguageModelV3GenerateResult> {
|
|
211
|
+
const { args, warnings, providerOptionsName } = await this.getArgs(options);
|
|
212
|
+
|
|
213
|
+
const mergedHeaders = combineHeaders(
|
|
214
|
+
await resolve(this.config.headers),
|
|
215
|
+
options.headers,
|
|
216
|
+
);
|
|
217
|
+
|
|
218
|
+
const {
|
|
219
|
+
responseHeaders,
|
|
220
|
+
value: response,
|
|
221
|
+
rawValue: rawResponse,
|
|
222
|
+
} = await postJsonToApi({
|
|
223
|
+
url: `${this.config.baseURL}/${getModelPath(
|
|
224
|
+
this.modelId,
|
|
225
|
+
)}:generateContent`,
|
|
226
|
+
headers: mergedHeaders,
|
|
227
|
+
body: args,
|
|
228
|
+
failedResponseHandler: googleFailedResponseHandler,
|
|
229
|
+
successfulResponseHandler: createJsonResponseHandler(responseSchema),
|
|
230
|
+
abortSignal: options.abortSignal,
|
|
231
|
+
fetch: this.config.fetch,
|
|
232
|
+
});
|
|
233
|
+
|
|
234
|
+
const candidate = response.candidates[0];
|
|
235
|
+
const content: Array<LanguageModelV3Content> = [];
|
|
236
|
+
|
|
237
|
+
// map ordered parts to content:
|
|
238
|
+
const parts = candidate.content?.parts ?? [];
|
|
239
|
+
|
|
240
|
+
const usageMetadata = response.usageMetadata;
|
|
241
|
+
|
|
242
|
+
// Associates a code execution result with its preceding call.
|
|
243
|
+
let lastCodeExecutionToolCallId: string | undefined;
|
|
244
|
+
|
|
245
|
+
// Build content array from all parts
|
|
246
|
+
for (const part of parts) {
|
|
247
|
+
if ('executableCode' in part && part.executableCode?.code) {
|
|
248
|
+
const toolCallId = this.config.generateId();
|
|
249
|
+
lastCodeExecutionToolCallId = toolCallId;
|
|
250
|
+
|
|
251
|
+
content.push({
|
|
252
|
+
type: 'tool-call',
|
|
253
|
+
toolCallId,
|
|
254
|
+
toolName: 'code_execution',
|
|
255
|
+
input: JSON.stringify(part.executableCode),
|
|
256
|
+
providerExecuted: true,
|
|
257
|
+
});
|
|
258
|
+
} else if ('codeExecutionResult' in part && part.codeExecutionResult) {
|
|
259
|
+
content.push({
|
|
260
|
+
type: 'tool-result',
|
|
261
|
+
// Assumes a result directly follows its corresponding call part.
|
|
262
|
+
toolCallId: lastCodeExecutionToolCallId!,
|
|
263
|
+
toolName: 'code_execution',
|
|
264
|
+
result: {
|
|
265
|
+
outcome: part.codeExecutionResult.outcome,
|
|
266
|
+
output: part.codeExecutionResult.output,
|
|
267
|
+
},
|
|
268
|
+
});
|
|
269
|
+
// Clear the ID after use to avoid accidental reuse.
|
|
270
|
+
lastCodeExecutionToolCallId = undefined;
|
|
271
|
+
} else if ('text' in part && part.text != null && part.text.length > 0) {
|
|
272
|
+
content.push({
|
|
273
|
+
type: part.thought === true ? 'reasoning' : 'text',
|
|
274
|
+
text: part.text,
|
|
275
|
+
providerMetadata: part.thoughtSignature
|
|
276
|
+
? {
|
|
277
|
+
[providerOptionsName]: {
|
|
278
|
+
thoughtSignature: part.thoughtSignature,
|
|
279
|
+
},
|
|
280
|
+
}
|
|
281
|
+
: undefined,
|
|
282
|
+
});
|
|
283
|
+
} else if ('functionCall' in part) {
|
|
284
|
+
content.push({
|
|
285
|
+
type: 'tool-call' as const,
|
|
286
|
+
toolCallId: this.config.generateId(),
|
|
287
|
+
toolName: part.functionCall.name,
|
|
288
|
+
input: JSON.stringify(part.functionCall.args),
|
|
289
|
+
providerMetadata: part.thoughtSignature
|
|
290
|
+
? {
|
|
291
|
+
[providerOptionsName]: {
|
|
292
|
+
thoughtSignature: part.thoughtSignature,
|
|
293
|
+
},
|
|
294
|
+
}
|
|
295
|
+
: undefined,
|
|
296
|
+
});
|
|
297
|
+
} else if ('inlineData' in part) {
|
|
298
|
+
content.push({
|
|
299
|
+
type: 'file' as const,
|
|
300
|
+
data: part.inlineData.data,
|
|
301
|
+
mediaType: part.inlineData.mimeType,
|
|
302
|
+
providerMetadata: part.thoughtSignature
|
|
303
|
+
? {
|
|
304
|
+
[providerOptionsName]: {
|
|
305
|
+
thoughtSignature: part.thoughtSignature,
|
|
306
|
+
},
|
|
307
|
+
}
|
|
308
|
+
: undefined,
|
|
309
|
+
});
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
const sources =
|
|
314
|
+
extractSources({
|
|
315
|
+
groundingMetadata: candidate.groundingMetadata,
|
|
316
|
+
generateId: this.config.generateId,
|
|
317
|
+
}) ?? [];
|
|
318
|
+
for (const source of sources) {
|
|
319
|
+
content.push(source);
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
return {
|
|
323
|
+
content,
|
|
324
|
+
finishReason: {
|
|
325
|
+
unified: mapGoogleGenerativeAIFinishReason({
|
|
326
|
+
finishReason: candidate.finishReason,
|
|
327
|
+
// Only count client-executed tool calls for finish reason determination.
|
|
328
|
+
hasToolCalls: content.some(
|
|
329
|
+
part => part.type === 'tool-call' && !part.providerExecuted,
|
|
330
|
+
),
|
|
331
|
+
}),
|
|
332
|
+
raw: candidate.finishReason ?? undefined,
|
|
333
|
+
},
|
|
334
|
+
usage: convertGoogleGenerativeAIUsage(usageMetadata),
|
|
335
|
+
warnings,
|
|
336
|
+
providerMetadata: {
|
|
337
|
+
[providerOptionsName]: {
|
|
338
|
+
promptFeedback: response.promptFeedback ?? null,
|
|
339
|
+
groundingMetadata: candidate.groundingMetadata ?? null,
|
|
340
|
+
urlContextMetadata: candidate.urlContextMetadata ?? null,
|
|
341
|
+
safetyRatings: candidate.safetyRatings ?? null,
|
|
342
|
+
usageMetadata: usageMetadata ?? null,
|
|
343
|
+
},
|
|
344
|
+
},
|
|
345
|
+
request: { body: args },
|
|
346
|
+
response: {
|
|
347
|
+
// TODO timestamp, model id, id
|
|
348
|
+
headers: responseHeaders,
|
|
349
|
+
body: rawResponse,
|
|
350
|
+
},
|
|
351
|
+
};
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
async doStream(
|
|
355
|
+
options: LanguageModelV3CallOptions,
|
|
356
|
+
): Promise<LanguageModelV3StreamResult> {
|
|
357
|
+
const { args, warnings, providerOptionsName } = await this.getArgs(options);
|
|
358
|
+
|
|
359
|
+
const headers = combineHeaders(
|
|
360
|
+
await resolve(this.config.headers),
|
|
361
|
+
options.headers,
|
|
362
|
+
);
|
|
363
|
+
|
|
364
|
+
const { responseHeaders, value: response } = await postJsonToApi({
|
|
365
|
+
url: `${this.config.baseURL}/${getModelPath(
|
|
366
|
+
this.modelId,
|
|
367
|
+
)}:streamGenerateContent?alt=sse`,
|
|
368
|
+
headers,
|
|
369
|
+
body: args,
|
|
370
|
+
failedResponseHandler: googleFailedResponseHandler,
|
|
371
|
+
successfulResponseHandler: createEventSourceResponseHandler(chunkSchema),
|
|
372
|
+
abortSignal: options.abortSignal,
|
|
373
|
+
fetch: this.config.fetch,
|
|
374
|
+
});
|
|
375
|
+
|
|
376
|
+
let finishReason: LanguageModelV3FinishReason = {
|
|
377
|
+
unified: 'other',
|
|
378
|
+
raw: undefined,
|
|
379
|
+
};
|
|
380
|
+
let usage: GoogleGenerativeAIUsageMetadata | undefined = undefined;
|
|
381
|
+
let providerMetadata: SharedV3ProviderMetadata | undefined = undefined;
|
|
382
|
+
|
|
383
|
+
const generateId = this.config.generateId;
|
|
384
|
+
let hasToolCalls = false;
|
|
385
|
+
|
|
386
|
+
// Track active blocks to group consecutive parts of same type
|
|
387
|
+
let currentTextBlockId: string | null = null;
|
|
388
|
+
let currentReasoningBlockId: string | null = null;
|
|
389
|
+
let blockCounter = 0;
|
|
390
|
+
|
|
391
|
+
// Track emitted sources to prevent duplicates
|
|
392
|
+
const emittedSourceUrls = new Set<string>();
|
|
393
|
+
// Associates a code execution result with its preceding call.
|
|
394
|
+
let lastCodeExecutionToolCallId: string | undefined;
|
|
395
|
+
|
|
396
|
+
return {
|
|
397
|
+
stream: response.pipeThrough(
|
|
398
|
+
new TransformStream<
|
|
399
|
+
ParseResult<ChunkSchema>,
|
|
400
|
+
LanguageModelV3StreamPart
|
|
401
|
+
>({
|
|
402
|
+
start(controller) {
|
|
403
|
+
controller.enqueue({ type: 'stream-start', warnings });
|
|
404
|
+
},
|
|
405
|
+
|
|
406
|
+
transform(chunk, controller) {
|
|
407
|
+
if (options.includeRawChunks) {
|
|
408
|
+
controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
if (!chunk.success) {
|
|
412
|
+
controller.enqueue({ type: 'error', error: chunk.error });
|
|
413
|
+
return;
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
const value = chunk.value;
|
|
417
|
+
|
|
418
|
+
const usageMetadata = value.usageMetadata;
|
|
419
|
+
|
|
420
|
+
if (usageMetadata != null) {
|
|
421
|
+
usage = usageMetadata;
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
const candidate = value.candidates?.[0];
|
|
425
|
+
|
|
426
|
+
// sometimes the API returns an empty candidates array
|
|
427
|
+
if (candidate == null) {
|
|
428
|
+
return;
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
const content = candidate.content;
|
|
432
|
+
|
|
433
|
+
const sources = extractSources({
|
|
434
|
+
groundingMetadata: candidate.groundingMetadata,
|
|
435
|
+
generateId,
|
|
436
|
+
});
|
|
437
|
+
if (sources != null) {
|
|
438
|
+
for (const source of sources) {
|
|
439
|
+
if (
|
|
440
|
+
source.sourceType === 'url' &&
|
|
441
|
+
!emittedSourceUrls.has(source.url)
|
|
442
|
+
) {
|
|
443
|
+
emittedSourceUrls.add(source.url);
|
|
444
|
+
controller.enqueue(source);
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
// Process tool call's parts before determining finishReason to ensure hasToolCalls is properly set
|
|
450
|
+
if (content != null) {
|
|
451
|
+
// Process all parts in a single loop to preserve original order
|
|
452
|
+
const parts = content.parts ?? [];
|
|
453
|
+
for (const part of parts) {
|
|
454
|
+
if ('executableCode' in part && part.executableCode?.code) {
|
|
455
|
+
const toolCallId = generateId();
|
|
456
|
+
lastCodeExecutionToolCallId = toolCallId;
|
|
457
|
+
|
|
458
|
+
controller.enqueue({
|
|
459
|
+
type: 'tool-call',
|
|
460
|
+
toolCallId,
|
|
461
|
+
toolName: 'code_execution',
|
|
462
|
+
input: JSON.stringify(part.executableCode),
|
|
463
|
+
providerExecuted: true,
|
|
464
|
+
});
|
|
465
|
+
} else if (
|
|
466
|
+
'codeExecutionResult' in part &&
|
|
467
|
+
part.codeExecutionResult
|
|
468
|
+
) {
|
|
469
|
+
// Assumes a result directly follows its corresponding call part.
|
|
470
|
+
const toolCallId = lastCodeExecutionToolCallId;
|
|
471
|
+
|
|
472
|
+
if (toolCallId) {
|
|
473
|
+
controller.enqueue({
|
|
474
|
+
type: 'tool-result',
|
|
475
|
+
toolCallId,
|
|
476
|
+
toolName: 'code_execution',
|
|
477
|
+
result: {
|
|
478
|
+
outcome: part.codeExecutionResult.outcome,
|
|
479
|
+
output: part.codeExecutionResult.output,
|
|
480
|
+
},
|
|
481
|
+
});
|
|
482
|
+
// Clear the ID after use.
|
|
483
|
+
lastCodeExecutionToolCallId = undefined;
|
|
484
|
+
}
|
|
485
|
+
} else if (
|
|
486
|
+
'text' in part &&
|
|
487
|
+
part.text != null &&
|
|
488
|
+
part.text.length > 0
|
|
489
|
+
) {
|
|
490
|
+
if (part.thought === true) {
|
|
491
|
+
// End any active text block before starting reasoning
|
|
492
|
+
if (currentTextBlockId !== null) {
|
|
493
|
+
controller.enqueue({
|
|
494
|
+
type: 'text-end',
|
|
495
|
+
id: currentTextBlockId,
|
|
496
|
+
});
|
|
497
|
+
currentTextBlockId = null;
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
// Start new reasoning block if not already active
|
|
501
|
+
if (currentReasoningBlockId === null) {
|
|
502
|
+
currentReasoningBlockId = String(blockCounter++);
|
|
503
|
+
controller.enqueue({
|
|
504
|
+
type: 'reasoning-start',
|
|
505
|
+
id: currentReasoningBlockId,
|
|
506
|
+
providerMetadata: part.thoughtSignature
|
|
507
|
+
? {
|
|
508
|
+
[providerOptionsName]: {
|
|
509
|
+
thoughtSignature: part.thoughtSignature,
|
|
510
|
+
},
|
|
511
|
+
}
|
|
512
|
+
: undefined,
|
|
513
|
+
});
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
controller.enqueue({
|
|
517
|
+
type: 'reasoning-delta',
|
|
518
|
+
id: currentReasoningBlockId,
|
|
519
|
+
delta: part.text,
|
|
520
|
+
providerMetadata: part.thoughtSignature
|
|
521
|
+
? {
|
|
522
|
+
[providerOptionsName]: {
|
|
523
|
+
thoughtSignature: part.thoughtSignature,
|
|
524
|
+
},
|
|
525
|
+
}
|
|
526
|
+
: undefined,
|
|
527
|
+
});
|
|
528
|
+
} else {
|
|
529
|
+
// End any active reasoning block before starting text
|
|
530
|
+
if (currentReasoningBlockId !== null) {
|
|
531
|
+
controller.enqueue({
|
|
532
|
+
type: 'reasoning-end',
|
|
533
|
+
id: currentReasoningBlockId,
|
|
534
|
+
});
|
|
535
|
+
currentReasoningBlockId = null;
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
// Start new text block if not already active
|
|
539
|
+
if (currentTextBlockId === null) {
|
|
540
|
+
currentTextBlockId = String(blockCounter++);
|
|
541
|
+
controller.enqueue({
|
|
542
|
+
type: 'text-start',
|
|
543
|
+
id: currentTextBlockId,
|
|
544
|
+
providerMetadata: part.thoughtSignature
|
|
545
|
+
? {
|
|
546
|
+
[providerOptionsName]: {
|
|
547
|
+
thoughtSignature: part.thoughtSignature,
|
|
548
|
+
},
|
|
549
|
+
}
|
|
550
|
+
: undefined,
|
|
551
|
+
});
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
controller.enqueue({
|
|
555
|
+
type: 'text-delta',
|
|
556
|
+
id: currentTextBlockId,
|
|
557
|
+
delta: part.text,
|
|
558
|
+
providerMetadata: part.thoughtSignature
|
|
559
|
+
? {
|
|
560
|
+
[providerOptionsName]: {
|
|
561
|
+
thoughtSignature: part.thoughtSignature,
|
|
562
|
+
},
|
|
563
|
+
}
|
|
564
|
+
: undefined,
|
|
565
|
+
});
|
|
566
|
+
}
|
|
567
|
+
} else if ('inlineData' in part) {
|
|
568
|
+
// Process file parts inline to preserve order with text
|
|
569
|
+
controller.enqueue({
|
|
570
|
+
type: 'file',
|
|
571
|
+
mediaType: part.inlineData.mimeType,
|
|
572
|
+
data: part.inlineData.data,
|
|
573
|
+
});
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
const toolCallDeltas = getToolCallsFromParts({
|
|
578
|
+
parts: content.parts,
|
|
579
|
+
generateId,
|
|
580
|
+
providerOptionsName,
|
|
581
|
+
});
|
|
582
|
+
|
|
583
|
+
if (toolCallDeltas != null) {
|
|
584
|
+
for (const toolCall of toolCallDeltas) {
|
|
585
|
+
controller.enqueue({
|
|
586
|
+
type: 'tool-input-start',
|
|
587
|
+
id: toolCall.toolCallId,
|
|
588
|
+
toolName: toolCall.toolName,
|
|
589
|
+
providerMetadata: toolCall.providerMetadata,
|
|
590
|
+
});
|
|
591
|
+
|
|
592
|
+
controller.enqueue({
|
|
593
|
+
type: 'tool-input-delta',
|
|
594
|
+
id: toolCall.toolCallId,
|
|
595
|
+
delta: toolCall.args,
|
|
596
|
+
providerMetadata: toolCall.providerMetadata,
|
|
597
|
+
});
|
|
598
|
+
|
|
599
|
+
controller.enqueue({
|
|
600
|
+
type: 'tool-input-end',
|
|
601
|
+
id: toolCall.toolCallId,
|
|
602
|
+
providerMetadata: toolCall.providerMetadata,
|
|
603
|
+
});
|
|
604
|
+
|
|
605
|
+
controller.enqueue({
|
|
606
|
+
type: 'tool-call',
|
|
607
|
+
toolCallId: toolCall.toolCallId,
|
|
608
|
+
toolName: toolCall.toolName,
|
|
609
|
+
input: toolCall.args,
|
|
610
|
+
providerMetadata: toolCall.providerMetadata,
|
|
611
|
+
});
|
|
612
|
+
|
|
613
|
+
hasToolCalls = true;
|
|
614
|
+
}
|
|
615
|
+
}
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
if (candidate.finishReason != null) {
|
|
619
|
+
finishReason = {
|
|
620
|
+
unified: mapGoogleGenerativeAIFinishReason({
|
|
621
|
+
finishReason: candidate.finishReason,
|
|
622
|
+
hasToolCalls,
|
|
623
|
+
}),
|
|
624
|
+
raw: candidate.finishReason,
|
|
625
|
+
};
|
|
626
|
+
|
|
627
|
+
providerMetadata = {
|
|
628
|
+
[providerOptionsName]: {
|
|
629
|
+
promptFeedback: value.promptFeedback ?? null,
|
|
630
|
+
groundingMetadata: candidate.groundingMetadata ?? null,
|
|
631
|
+
urlContextMetadata: candidate.urlContextMetadata ?? null,
|
|
632
|
+
safetyRatings: candidate.safetyRatings ?? null,
|
|
633
|
+
},
|
|
634
|
+
};
|
|
635
|
+
if (usageMetadata != null) {
|
|
636
|
+
(
|
|
637
|
+
providerMetadata[providerOptionsName] as Record<
|
|
638
|
+
string,
|
|
639
|
+
unknown
|
|
640
|
+
>
|
|
641
|
+
).usageMetadata = usageMetadata;
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
},
|
|
645
|
+
|
|
646
|
+
flush(controller) {
|
|
647
|
+
// Close any open blocks before finishing
|
|
648
|
+
if (currentTextBlockId !== null) {
|
|
649
|
+
controller.enqueue({
|
|
650
|
+
type: 'text-end',
|
|
651
|
+
id: currentTextBlockId,
|
|
652
|
+
});
|
|
653
|
+
}
|
|
654
|
+
if (currentReasoningBlockId !== null) {
|
|
655
|
+
controller.enqueue({
|
|
656
|
+
type: 'reasoning-end',
|
|
657
|
+
id: currentReasoningBlockId,
|
|
658
|
+
});
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
controller.enqueue({
|
|
662
|
+
type: 'finish',
|
|
663
|
+
finishReason,
|
|
664
|
+
usage: convertGoogleGenerativeAIUsage(usage),
|
|
665
|
+
providerMetadata,
|
|
666
|
+
});
|
|
667
|
+
},
|
|
668
|
+
}),
|
|
669
|
+
),
|
|
670
|
+
response: { headers: responseHeaders },
|
|
671
|
+
request: { body: args },
|
|
672
|
+
};
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
function getToolCallsFromParts({
|
|
677
|
+
parts,
|
|
678
|
+
generateId,
|
|
679
|
+
providerOptionsName,
|
|
680
|
+
}: {
|
|
681
|
+
parts: ContentSchema['parts'];
|
|
682
|
+
generateId: () => string;
|
|
683
|
+
providerOptionsName: string;
|
|
684
|
+
}) {
|
|
685
|
+
const functionCallParts = parts?.filter(
|
|
686
|
+
part => 'functionCall' in part,
|
|
687
|
+
) as Array<
|
|
688
|
+
GoogleGenerativeAIContentPart & {
|
|
689
|
+
functionCall: { name: string; args: unknown };
|
|
690
|
+
thoughtSignature?: string | null;
|
|
691
|
+
}
|
|
692
|
+
>;
|
|
693
|
+
|
|
694
|
+
return functionCallParts == null || functionCallParts.length === 0
|
|
695
|
+
? undefined
|
|
696
|
+
: functionCallParts.map(part => ({
|
|
697
|
+
type: 'tool-call' as const,
|
|
698
|
+
toolCallId: generateId(),
|
|
699
|
+
toolName: part.functionCall.name,
|
|
700
|
+
args: JSON.stringify(part.functionCall.args),
|
|
701
|
+
providerMetadata: part.thoughtSignature
|
|
702
|
+
? {
|
|
703
|
+
[providerOptionsName]: {
|
|
704
|
+
thoughtSignature: part.thoughtSignature,
|
|
705
|
+
},
|
|
706
|
+
}
|
|
707
|
+
: undefined,
|
|
708
|
+
}));
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
function extractSources({
|
|
712
|
+
groundingMetadata,
|
|
713
|
+
generateId,
|
|
714
|
+
}: {
|
|
715
|
+
groundingMetadata: GroundingMetadataSchema | undefined | null;
|
|
716
|
+
generateId: () => string;
|
|
717
|
+
}): undefined | LanguageModelV3Source[] {
|
|
718
|
+
if (!groundingMetadata?.groundingChunks) {
|
|
719
|
+
return undefined;
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
const sources: LanguageModelV3Source[] = [];
|
|
723
|
+
|
|
724
|
+
for (const chunk of groundingMetadata.groundingChunks) {
|
|
725
|
+
if (chunk.web != null) {
|
|
726
|
+
// Handle web chunks as URL sources
|
|
727
|
+
sources.push({
|
|
728
|
+
type: 'source',
|
|
729
|
+
sourceType: 'url',
|
|
730
|
+
id: generateId(),
|
|
731
|
+
url: chunk.web.uri,
|
|
732
|
+
title: chunk.web.title ?? undefined,
|
|
733
|
+
});
|
|
734
|
+
} else if (chunk.retrievedContext != null) {
|
|
735
|
+
// Handle retrievedContext chunks from RAG operations
|
|
736
|
+
const uri = chunk.retrievedContext.uri;
|
|
737
|
+
const fileSearchStore = chunk.retrievedContext.fileSearchStore;
|
|
738
|
+
|
|
739
|
+
if (uri && (uri.startsWith('http://') || uri.startsWith('https://'))) {
|
|
740
|
+
// Old format: Google Search with HTTP/HTTPS URL
|
|
741
|
+
sources.push({
|
|
742
|
+
type: 'source',
|
|
743
|
+
sourceType: 'url',
|
|
744
|
+
id: generateId(),
|
|
745
|
+
url: uri,
|
|
746
|
+
title: chunk.retrievedContext.title ?? undefined,
|
|
747
|
+
});
|
|
748
|
+
} else if (uri) {
|
|
749
|
+
// Old format: Document with file path (gs://, etc.)
|
|
750
|
+
const title = chunk.retrievedContext.title ?? 'Unknown Document';
|
|
751
|
+
let mediaType = 'application/octet-stream';
|
|
752
|
+
let filename: string | undefined = undefined;
|
|
753
|
+
|
|
754
|
+
if (uri.endsWith('.pdf')) {
|
|
755
|
+
mediaType = 'application/pdf';
|
|
756
|
+
filename = uri.split('/').pop();
|
|
757
|
+
} else if (uri.endsWith('.txt')) {
|
|
758
|
+
mediaType = 'text/plain';
|
|
759
|
+
filename = uri.split('/').pop();
|
|
760
|
+
} else if (uri.endsWith('.docx')) {
|
|
761
|
+
mediaType =
|
|
762
|
+
'application/vnd.openxmlformats-officedocument.wordprocessingml.document';
|
|
763
|
+
filename = uri.split('/').pop();
|
|
764
|
+
} else if (uri.endsWith('.doc')) {
|
|
765
|
+
mediaType = 'application/msword';
|
|
766
|
+
filename = uri.split('/').pop();
|
|
767
|
+
} else if (uri.match(/\.(md|markdown)$/)) {
|
|
768
|
+
mediaType = 'text/markdown';
|
|
769
|
+
filename = uri.split('/').pop();
|
|
770
|
+
} else {
|
|
771
|
+
filename = uri.split('/').pop();
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
sources.push({
|
|
775
|
+
type: 'source',
|
|
776
|
+
sourceType: 'document',
|
|
777
|
+
id: generateId(),
|
|
778
|
+
mediaType,
|
|
779
|
+
title,
|
|
780
|
+
filename,
|
|
781
|
+
});
|
|
782
|
+
} else if (fileSearchStore) {
|
|
783
|
+
// New format: File Search with fileSearchStore (no uri)
|
|
784
|
+
const title = chunk.retrievedContext.title ?? 'Unknown Document';
|
|
785
|
+
sources.push({
|
|
786
|
+
type: 'source',
|
|
787
|
+
sourceType: 'document',
|
|
788
|
+
id: generateId(),
|
|
789
|
+
mediaType: 'application/octet-stream',
|
|
790
|
+
title,
|
|
791
|
+
filename: fileSearchStore.split('/').pop(),
|
|
792
|
+
});
|
|
793
|
+
}
|
|
794
|
+
} else if (chunk.maps != null) {
|
|
795
|
+
if (chunk.maps.uri) {
|
|
796
|
+
sources.push({
|
|
797
|
+
type: 'source',
|
|
798
|
+
sourceType: 'url',
|
|
799
|
+
id: generateId(),
|
|
800
|
+
url: chunk.maps.uri,
|
|
801
|
+
title: chunk.maps.title ?? undefined,
|
|
802
|
+
});
|
|
803
|
+
}
|
|
804
|
+
}
|
|
805
|
+
}
|
|
806
|
+
|
|
807
|
+
return sources.length > 0 ? sources : undefined;
|
|
808
|
+
}
|
|
809
|
+
|
|
810
|
+
export const getGroundingMetadataSchema = () =>
|
|
811
|
+
z.object({
|
|
812
|
+
webSearchQueries: z.array(z.string()).nullish(),
|
|
813
|
+
retrievalQueries: z.array(z.string()).nullish(),
|
|
814
|
+
searchEntryPoint: z.object({ renderedContent: z.string() }).nullish(),
|
|
815
|
+
groundingChunks: z
|
|
816
|
+
.array(
|
|
817
|
+
z.object({
|
|
818
|
+
web: z
|
|
819
|
+
.object({ uri: z.string(), title: z.string().nullish() })
|
|
820
|
+
.nullish(),
|
|
821
|
+
retrievedContext: z
|
|
822
|
+
.object({
|
|
823
|
+
uri: z.string().nullish(),
|
|
824
|
+
title: z.string().nullish(),
|
|
825
|
+
text: z.string().nullish(),
|
|
826
|
+
fileSearchStore: z.string().nullish(),
|
|
827
|
+
})
|
|
828
|
+
.nullish(),
|
|
829
|
+
maps: z
|
|
830
|
+
.object({
|
|
831
|
+
uri: z.string().nullish(),
|
|
832
|
+
title: z.string().nullish(),
|
|
833
|
+
text: z.string().nullish(),
|
|
834
|
+
placeId: z.string().nullish(),
|
|
835
|
+
})
|
|
836
|
+
.nullish(),
|
|
837
|
+
}),
|
|
838
|
+
)
|
|
839
|
+
.nullish(),
|
|
840
|
+
groundingSupports: z
|
|
841
|
+
.array(
|
|
842
|
+
z.object({
|
|
843
|
+
segment: z.object({
|
|
844
|
+
startIndex: z.number().nullish(),
|
|
845
|
+
endIndex: z.number().nullish(),
|
|
846
|
+
text: z.string().nullish(),
|
|
847
|
+
}),
|
|
848
|
+
segment_text: z.string().nullish(),
|
|
849
|
+
groundingChunkIndices: z.array(z.number()).nullish(),
|
|
850
|
+
supportChunkIndices: z.array(z.number()).nullish(),
|
|
851
|
+
confidenceScores: z.array(z.number()).nullish(),
|
|
852
|
+
confidenceScore: z.array(z.number()).nullish(),
|
|
853
|
+
}),
|
|
854
|
+
)
|
|
855
|
+
.nullish(),
|
|
856
|
+
retrievalMetadata: z
|
|
857
|
+
.union([
|
|
858
|
+
z.object({
|
|
859
|
+
webDynamicRetrievalScore: z.number(),
|
|
860
|
+
}),
|
|
861
|
+
z.object({}),
|
|
862
|
+
])
|
|
863
|
+
.nullish(),
|
|
864
|
+
});
|
|
865
|
+
|
|
866
|
+
const getContentSchema = () =>
|
|
867
|
+
z.object({
|
|
868
|
+
parts: z
|
|
869
|
+
.array(
|
|
870
|
+
z.union([
|
|
871
|
+
// note: order matters since text can be fully empty
|
|
872
|
+
z.object({
|
|
873
|
+
functionCall: z.object({
|
|
874
|
+
name: z.string(),
|
|
875
|
+
args: z.unknown(),
|
|
876
|
+
}),
|
|
877
|
+
thoughtSignature: z.string().nullish(),
|
|
878
|
+
}),
|
|
879
|
+
z.object({
|
|
880
|
+
inlineData: z.object({
|
|
881
|
+
mimeType: z.string(),
|
|
882
|
+
data: z.string(),
|
|
883
|
+
}),
|
|
884
|
+
thoughtSignature: z.string().nullish(),
|
|
885
|
+
}),
|
|
886
|
+
z.object({
|
|
887
|
+
executableCode: z
|
|
888
|
+
.object({
|
|
889
|
+
language: z.string(),
|
|
890
|
+
code: z.string(),
|
|
891
|
+
})
|
|
892
|
+
.nullish(),
|
|
893
|
+
codeExecutionResult: z
|
|
894
|
+
.object({
|
|
895
|
+
outcome: z.string(),
|
|
896
|
+
output: z.string(),
|
|
897
|
+
})
|
|
898
|
+
.nullish(),
|
|
899
|
+
text: z.string().nullish(),
|
|
900
|
+
thought: z.boolean().nullish(),
|
|
901
|
+
thoughtSignature: z.string().nullish(),
|
|
902
|
+
}),
|
|
903
|
+
]),
|
|
904
|
+
)
|
|
905
|
+
.nullish(),
|
|
906
|
+
});
|
|
907
|
+
|
|
908
|
+
// https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters
|
|
909
|
+
const getSafetyRatingSchema = () =>
|
|
910
|
+
z.object({
|
|
911
|
+
category: z.string().nullish(),
|
|
912
|
+
probability: z.string().nullish(),
|
|
913
|
+
probabilityScore: z.number().nullish(),
|
|
914
|
+
severity: z.string().nullish(),
|
|
915
|
+
severityScore: z.number().nullish(),
|
|
916
|
+
blocked: z.boolean().nullish(),
|
|
917
|
+
});
|
|
918
|
+
|
|
919
|
+
const usageSchema = z.object({
|
|
920
|
+
cachedContentTokenCount: z.number().nullish(),
|
|
921
|
+
thoughtsTokenCount: z.number().nullish(),
|
|
922
|
+
promptTokenCount: z.number().nullish(),
|
|
923
|
+
candidatesTokenCount: z.number().nullish(),
|
|
924
|
+
totalTokenCount: z.number().nullish(),
|
|
925
|
+
// https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest/v1/GenerateContentResponse#TrafficType
|
|
926
|
+
trafficType: z.string().nullish(),
|
|
927
|
+
});
|
|
928
|
+
|
|
929
|
+
// https://ai.google.dev/api/generate-content#UrlRetrievalMetadata
|
|
930
|
+
export const getUrlContextMetadataSchema = () =>
|
|
931
|
+
z.object({
|
|
932
|
+
urlMetadata: z.array(
|
|
933
|
+
z.object({
|
|
934
|
+
retrievedUrl: z.string(),
|
|
935
|
+
urlRetrievalStatus: z.string(),
|
|
936
|
+
}),
|
|
937
|
+
),
|
|
938
|
+
});
|
|
939
|
+
|
|
940
|
+
const responseSchema = lazySchema(() =>
|
|
941
|
+
zodSchema(
|
|
942
|
+
z.object({
|
|
943
|
+
candidates: z.array(
|
|
944
|
+
z.object({
|
|
945
|
+
content: getContentSchema().nullish().or(z.object({}).strict()),
|
|
946
|
+
finishReason: z.string().nullish(),
|
|
947
|
+
safetyRatings: z.array(getSafetyRatingSchema()).nullish(),
|
|
948
|
+
groundingMetadata: getGroundingMetadataSchema().nullish(),
|
|
949
|
+
urlContextMetadata: getUrlContextMetadataSchema().nullish(),
|
|
950
|
+
}),
|
|
951
|
+
),
|
|
952
|
+
usageMetadata: usageSchema.nullish(),
|
|
953
|
+
promptFeedback: z
|
|
954
|
+
.object({
|
|
955
|
+
blockReason: z.string().nullish(),
|
|
956
|
+
safetyRatings: z.array(getSafetyRatingSchema()).nullish(),
|
|
957
|
+
})
|
|
958
|
+
.nullish(),
|
|
959
|
+
}),
|
|
960
|
+
),
|
|
961
|
+
);
|
|
962
|
+
|
|
963
|
+
type ContentSchema = NonNullable<
|
|
964
|
+
InferSchema<typeof responseSchema>['candidates'][number]['content']
|
|
965
|
+
>;
|
|
966
|
+
export type GroundingMetadataSchema = NonNullable<
|
|
967
|
+
InferSchema<typeof responseSchema>['candidates'][number]['groundingMetadata']
|
|
968
|
+
>;
|
|
969
|
+
|
|
970
|
+
type GroundingChunkSchema = NonNullable<
|
|
971
|
+
GroundingMetadataSchema['groundingChunks']
|
|
972
|
+
>[number];
|
|
973
|
+
|
|
974
|
+
export type UrlContextMetadataSchema = NonNullable<
|
|
975
|
+
InferSchema<typeof responseSchema>['candidates'][number]['urlContextMetadata']
|
|
976
|
+
>;
|
|
977
|
+
|
|
978
|
+
export type SafetyRatingSchema = NonNullable<
|
|
979
|
+
InferSchema<typeof responseSchema>['candidates'][number]['safetyRatings']
|
|
980
|
+
>[number];
|
|
981
|
+
|
|
982
|
+
// limited version of the schema, focussed on what is needed for the implementation
|
|
983
|
+
// this approach limits breakages when the API changes and increases efficiency
|
|
984
|
+
const chunkSchema = lazySchema(() =>
|
|
985
|
+
zodSchema(
|
|
986
|
+
z.object({
|
|
987
|
+
candidates: z
|
|
988
|
+
.array(
|
|
989
|
+
z.object({
|
|
990
|
+
content: getContentSchema().nullish(),
|
|
991
|
+
finishReason: z.string().nullish(),
|
|
992
|
+
safetyRatings: z.array(getSafetyRatingSchema()).nullish(),
|
|
993
|
+
groundingMetadata: getGroundingMetadataSchema().nullish(),
|
|
994
|
+
urlContextMetadata: getUrlContextMetadataSchema().nullish(),
|
|
995
|
+
}),
|
|
996
|
+
)
|
|
997
|
+
.nullish(),
|
|
998
|
+
usageMetadata: usageSchema.nullish(),
|
|
999
|
+
promptFeedback: z
|
|
1000
|
+
.object({
|
|
1001
|
+
blockReason: z.string().nullish(),
|
|
1002
|
+
safetyRatings: z.array(getSafetyRatingSchema()).nullish(),
|
|
1003
|
+
})
|
|
1004
|
+
.nullish(),
|
|
1005
|
+
}),
|
|
1006
|
+
),
|
|
1007
|
+
);
|
|
1008
|
+
|
|
1009
|
+
type ChunkSchema = InferSchema<typeof chunkSchema>;
|