@ai-sdk/google 3.0.9 → 3.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +14 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.js.map +1 -1
  4. package/dist/index.mjs +1 -1
  5. package/dist/index.mjs.map +1 -1
  6. package/package.json +5 -4
  7. package/src/__snapshots__/google-generative-ai-embedding-model.test.ts.snap +33 -0
  8. package/src/convert-google-generative-ai-usage.ts +51 -0
  9. package/src/convert-json-schema-to-openapi-schema.test.ts +684 -0
  10. package/src/convert-json-schema-to-openapi-schema.ts +158 -0
  11. package/src/convert-to-google-generative-ai-messages.test.ts +495 -0
  12. package/src/convert-to-google-generative-ai-messages.ts +232 -0
  13. package/src/get-model-path.test.ts +16 -0
  14. package/src/get-model-path.ts +3 -0
  15. package/src/google-error.ts +26 -0
  16. package/src/google-generative-ai-embedding-model.test.ts +204 -0
  17. package/src/google-generative-ai-embedding-model.ts +159 -0
  18. package/src/google-generative-ai-embedding-options.ts +52 -0
  19. package/src/google-generative-ai-image-model.test.ts +411 -0
  20. package/src/google-generative-ai-image-model.ts +184 -0
  21. package/src/google-generative-ai-image-settings.ts +12 -0
  22. package/src/google-generative-ai-language-model.test.ts +4616 -0
  23. package/src/google-generative-ai-language-model.ts +1009 -0
  24. package/src/google-generative-ai-options.ts +193 -0
  25. package/src/google-generative-ai-prompt.ts +38 -0
  26. package/src/google-prepare-tools.test.ts +474 -0
  27. package/src/google-prepare-tools.ts +264 -0
  28. package/src/google-provider.test.ts +307 -0
  29. package/src/google-provider.ts +201 -0
  30. package/src/google-supported-file-url.test.ts +57 -0
  31. package/src/google-supported-file-url.ts +20 -0
  32. package/src/google-tools.ts +71 -0
  33. package/src/index.ts +11 -0
  34. package/src/internal/index.ts +3 -0
  35. package/src/map-google-generative-ai-finish-reason.ts +29 -0
  36. package/src/tool/code-execution.ts +35 -0
  37. package/src/tool/enterprise-web-search.ts +18 -0
  38. package/src/tool/file-search.ts +51 -0
  39. package/src/tool/google-maps.ts +14 -0
  40. package/src/tool/google-search.ts +40 -0
  41. package/src/tool/url-context.ts +16 -0
  42. package/src/tool/vertex-rag-store.ts +31 -0
  43. package/src/version.ts +6 -0
@@ -0,0 +1,232 @@
1
+ import {
2
+ LanguageModelV3Prompt,
3
+ UnsupportedFunctionalityError,
4
+ } from '@ai-sdk/provider';
5
+ import {
6
+ GoogleGenerativeAIContent,
7
+ GoogleGenerativeAIContentPart,
8
+ GoogleGenerativeAIPrompt,
9
+ } from './google-generative-ai-prompt';
10
+ import { convertToBase64 } from '@ai-sdk/provider-utils';
11
+
12
+ export function convertToGoogleGenerativeAIMessages(
13
+ prompt: LanguageModelV3Prompt,
14
+ options?: { isGemmaModel?: boolean; providerOptionsName?: string },
15
+ ): GoogleGenerativeAIPrompt {
16
+ const systemInstructionParts: Array<{ text: string }> = [];
17
+ const contents: Array<GoogleGenerativeAIContent> = [];
18
+ let systemMessagesAllowed = true;
19
+ const isGemmaModel = options?.isGemmaModel ?? false;
20
+ const providerOptionsName = options?.providerOptionsName ?? 'google';
21
+
22
+ for (const { role, content } of prompt) {
23
+ switch (role) {
24
+ case 'system': {
25
+ if (!systemMessagesAllowed) {
26
+ throw new UnsupportedFunctionalityError({
27
+ functionality:
28
+ 'system messages are only supported at the beginning of the conversation',
29
+ });
30
+ }
31
+
32
+ systemInstructionParts.push({ text: content });
33
+ break;
34
+ }
35
+
36
+ case 'user': {
37
+ systemMessagesAllowed = false;
38
+
39
+ const parts: GoogleGenerativeAIContentPart[] = [];
40
+
41
+ for (const part of content) {
42
+ switch (part.type) {
43
+ case 'text': {
44
+ parts.push({ text: part.text });
45
+ break;
46
+ }
47
+
48
+ case 'file': {
49
+ // default to image/jpeg for unknown image/* types
50
+ const mediaType =
51
+ part.mediaType === 'image/*' ? 'image/jpeg' : part.mediaType;
52
+
53
+ parts.push(
54
+ part.data instanceof URL
55
+ ? {
56
+ fileData: {
57
+ mimeType: mediaType,
58
+ fileUri: part.data.toString(),
59
+ },
60
+ }
61
+ : {
62
+ inlineData: {
63
+ mimeType: mediaType,
64
+ data: convertToBase64(part.data),
65
+ },
66
+ },
67
+ );
68
+
69
+ break;
70
+ }
71
+ }
72
+ }
73
+
74
+ contents.push({ role: 'user', parts });
75
+ break;
76
+ }
77
+
78
+ case 'assistant': {
79
+ systemMessagesAllowed = false;
80
+
81
+ contents.push({
82
+ role: 'model',
83
+ parts: content
84
+ .map(part => {
85
+ const providerOpts = part.providerOptions?.[providerOptionsName];
86
+ const thoughtSignature =
87
+ providerOpts?.thoughtSignature != null
88
+ ? String(providerOpts.thoughtSignature)
89
+ : undefined;
90
+
91
+ switch (part.type) {
92
+ case 'text': {
93
+ return part.text.length === 0
94
+ ? undefined
95
+ : {
96
+ text: part.text,
97
+ thoughtSignature,
98
+ };
99
+ }
100
+
101
+ case 'reasoning': {
102
+ return part.text.length === 0
103
+ ? undefined
104
+ : {
105
+ text: part.text,
106
+ thought: true,
107
+ thoughtSignature,
108
+ };
109
+ }
110
+
111
+ case 'file': {
112
+ if (part.data instanceof URL) {
113
+ throw new UnsupportedFunctionalityError({
114
+ functionality:
115
+ 'File data URLs in assistant messages are not supported',
116
+ });
117
+ }
118
+
119
+ return {
120
+ inlineData: {
121
+ mimeType: part.mediaType,
122
+ data: convertToBase64(part.data),
123
+ },
124
+ thoughtSignature,
125
+ };
126
+ }
127
+
128
+ case 'tool-call': {
129
+ return {
130
+ functionCall: {
131
+ name: part.toolName,
132
+ args: part.input,
133
+ },
134
+ thoughtSignature,
135
+ };
136
+ }
137
+ }
138
+ })
139
+ .filter(part => part !== undefined),
140
+ });
141
+ break;
142
+ }
143
+
144
+ case 'tool': {
145
+ systemMessagesAllowed = false;
146
+
147
+ const parts: GoogleGenerativeAIContentPart[] = [];
148
+
149
+ for (const part of content) {
150
+ if (part.type === 'tool-approval-response') {
151
+ continue;
152
+ }
153
+ const output = part.output;
154
+
155
+ if (output.type === 'content') {
156
+ for (const contentPart of output.value) {
157
+ switch (contentPart.type) {
158
+ case 'text':
159
+ parts.push({
160
+ functionResponse: {
161
+ name: part.toolName,
162
+ response: {
163
+ name: part.toolName,
164
+ content: contentPart.text,
165
+ },
166
+ },
167
+ });
168
+ break;
169
+ case 'image-data':
170
+ parts.push(
171
+ {
172
+ inlineData: {
173
+ mimeType: contentPart.mediaType,
174
+ data: contentPart.data,
175
+ },
176
+ },
177
+ {
178
+ text: 'Tool executed successfully and returned this image as a response',
179
+ },
180
+ );
181
+ break;
182
+ default:
183
+ parts.push({ text: JSON.stringify(contentPart) });
184
+ break;
185
+ }
186
+ }
187
+ } else {
188
+ parts.push({
189
+ functionResponse: {
190
+ name: part.toolName,
191
+ response: {
192
+ name: part.toolName,
193
+ content:
194
+ output.type === 'execution-denied'
195
+ ? (output.reason ?? 'Tool execution denied.')
196
+ : output.value,
197
+ },
198
+ },
199
+ });
200
+ }
201
+ }
202
+
203
+ contents.push({
204
+ role: 'user',
205
+ parts,
206
+ });
207
+ break;
208
+ }
209
+ }
210
+ }
211
+
212
+ if (
213
+ isGemmaModel &&
214
+ systemInstructionParts.length > 0 &&
215
+ contents.length > 0 &&
216
+ contents[0].role === 'user'
217
+ ) {
218
+ const systemText = systemInstructionParts
219
+ .map(part => part.text)
220
+ .join('\n\n');
221
+
222
+ contents[0].parts.unshift({ text: systemText + '\n\n' });
223
+ }
224
+
225
+ return {
226
+ systemInstruction:
227
+ systemInstructionParts.length > 0 && !isGemmaModel
228
+ ? { parts: systemInstructionParts }
229
+ : undefined,
230
+ contents,
231
+ };
232
+ }
@@ -0,0 +1,16 @@
1
+ import { getModelPath } from './get-model-path';
2
+ import { it, expect } from 'vitest';
3
+
4
+ it('should pass through model path for models/*', async () => {
5
+ expect(getModelPath('models/some-model')).toEqual('models/some-model');
6
+ });
7
+
8
+ it('should pass through model path for tunedModels/*', async () => {
9
+ expect(getModelPath('tunedModels/some-model')).toEqual(
10
+ 'tunedModels/some-model',
11
+ );
12
+ });
13
+
14
+ it('should add model path prefix to models without slash', async () => {
15
+ expect(getModelPath('some-model')).toEqual('models/some-model');
16
+ });
@@ -0,0 +1,3 @@
1
+ export function getModelPath(modelId: string): string {
2
+ return modelId.includes('/') ? modelId : `models/${modelId}`;
3
+ }
@@ -0,0 +1,26 @@
1
+ import {
2
+ createJsonErrorResponseHandler,
3
+ type InferSchema,
4
+ lazySchema,
5
+ zodSchema,
6
+ } from '@ai-sdk/provider-utils';
7
+ import { z } from 'zod/v4';
8
+
9
+ const googleErrorDataSchema = lazySchema(() =>
10
+ zodSchema(
11
+ z.object({
12
+ error: z.object({
13
+ code: z.number().nullable(),
14
+ message: z.string(),
15
+ status: z.string(),
16
+ }),
17
+ }),
18
+ ),
19
+ );
20
+
21
+ export type GoogleErrorData = InferSchema<typeof googleErrorDataSchema>;
22
+
23
+ export const googleFailedResponseHandler = createJsonErrorResponseHandler({
24
+ errorSchema: googleErrorDataSchema,
25
+ errorToMessage: data => data.error.message,
26
+ });
@@ -0,0 +1,204 @@
1
+ import { EmbeddingModelV3Embedding } from '@ai-sdk/provider';
2
+ import { createTestServer } from '@ai-sdk/test-server/with-vitest';
3
+ import { GoogleGenerativeAIEmbeddingModel } from './google-generative-ai-embedding-model';
4
+ import { createGoogleGenerativeAI } from './google-provider';
5
+ import { describe, it, expect, vi } from 'vitest';
6
+
7
+ vi.mock('./version', () => ({
8
+ VERSION: '0.0.0-test',
9
+ }));
10
+
11
+ const dummyEmbeddings = [
12
+ [0.1, 0.2, 0.3, 0.4, 0.5],
13
+ [0.6, 0.7, 0.8, 0.9, 1.0],
14
+ ];
15
+ const testValues = ['sunny day at the beach', 'rainy day in the city'];
16
+
17
+ const provider = createGoogleGenerativeAI({ apiKey: 'test-api-key' });
18
+ const model = provider.embeddingModel('gemini-embedding-001');
19
+
20
+ const URL =
21
+ 'https://generativelanguage.googleapis.com/v1beta/models/gemini-embedding-001:something';
22
+
23
+ const server = createTestServer({
24
+ [URL]: {},
25
+ });
26
+
27
+ describe('GoogleGenerativeAIEmbeddingModel', () => {
28
+ function prepareBatchJsonResponse({
29
+ embeddings = dummyEmbeddings,
30
+ headers,
31
+ }: {
32
+ embeddings?: EmbeddingModelV3Embedding[];
33
+ headers?: Record<string, string>;
34
+ } = {}) {
35
+ server.urls[URL].response = {
36
+ type: 'json-value',
37
+ headers,
38
+ body: {
39
+ embeddings: embeddings.map(embedding => ({ values: embedding })),
40
+ },
41
+ };
42
+ }
43
+
44
+ function prepareSingleJsonResponse({
45
+ embeddings = dummyEmbeddings,
46
+ headers,
47
+ }: {
48
+ embeddings?: EmbeddingModelV3Embedding[];
49
+ headers?: Record<string, string>;
50
+ } = {}) {
51
+ server.urls[URL].response = {
52
+ type: 'json-value',
53
+ headers,
54
+ body: {
55
+ embedding: { values: embeddings[0] },
56
+ },
57
+ };
58
+ }
59
+
60
+ it('should extract embedding', async () => {
61
+ prepareBatchJsonResponse();
62
+
63
+ const { embeddings } = await model.doEmbed({ values: testValues });
64
+
65
+ expect(embeddings).toStrictEqual(dummyEmbeddings);
66
+ });
67
+
68
+ it('should expose the raw response', async () => {
69
+ prepareBatchJsonResponse({
70
+ headers: {
71
+ 'test-header': 'test-value',
72
+ },
73
+ });
74
+
75
+ const { response } = await model.doEmbed({ values: testValues });
76
+
77
+ expect(response?.headers).toStrictEqual({
78
+ // default headers:
79
+ 'content-length': '80',
80
+ 'content-type': 'application/json',
81
+
82
+ // custom header
83
+ 'test-header': 'test-value',
84
+ });
85
+ expect(response).toMatchSnapshot();
86
+ });
87
+
88
+ it('should pass the model and the values', async () => {
89
+ prepareBatchJsonResponse();
90
+
91
+ await model.doEmbed({ values: testValues });
92
+
93
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
94
+ requests: testValues.map(value => ({
95
+ model: 'models/gemini-embedding-001',
96
+ content: { role: 'user', parts: [{ text: value }] },
97
+ })),
98
+ });
99
+ });
100
+
101
+ it('should pass the outputDimensionality setting', async () => {
102
+ prepareBatchJsonResponse();
103
+
104
+ await provider.embedding('gemini-embedding-001').doEmbed({
105
+ values: testValues,
106
+ providerOptions: {
107
+ google: { outputDimensionality: 64 },
108
+ },
109
+ });
110
+
111
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
112
+ requests: testValues.map(value => ({
113
+ model: 'models/gemini-embedding-001',
114
+ content: { role: 'user', parts: [{ text: value }] },
115
+ outputDimensionality: 64,
116
+ })),
117
+ });
118
+ });
119
+
120
+ it('should pass the taskType setting', async () => {
121
+ prepareBatchJsonResponse();
122
+
123
+ await provider.embedding('gemini-embedding-001').doEmbed({
124
+ values: testValues,
125
+ providerOptions: { google: { taskType: 'SEMANTIC_SIMILARITY' } },
126
+ });
127
+
128
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
129
+ requests: testValues.map(value => ({
130
+ model: 'models/gemini-embedding-001',
131
+ content: { role: 'user', parts: [{ text: value }] },
132
+ taskType: 'SEMANTIC_SIMILARITY',
133
+ })),
134
+ });
135
+ });
136
+
137
+ it('should pass headers', async () => {
138
+ prepareBatchJsonResponse();
139
+
140
+ const provider = createGoogleGenerativeAI({
141
+ apiKey: 'test-api-key',
142
+ headers: {
143
+ 'Custom-Provider-Header': 'provider-header-value',
144
+ },
145
+ });
146
+
147
+ await provider.embedding('gemini-embedding-001').doEmbed({
148
+ values: testValues,
149
+ headers: {
150
+ 'Custom-Request-Header': 'request-header-value',
151
+ },
152
+ });
153
+
154
+ expect(server.calls[0].requestHeaders).toStrictEqual({
155
+ 'x-goog-api-key': 'test-api-key',
156
+ 'content-type': 'application/json',
157
+ 'custom-provider-header': 'provider-header-value',
158
+ 'custom-request-header': 'request-header-value',
159
+ });
160
+ expect(server.calls[0].requestUserAgent).toContain(
161
+ `ai-sdk/google/0.0.0-test`,
162
+ );
163
+ });
164
+
165
+ it('should throw an error if too many values are provided', async () => {
166
+ const model = new GoogleGenerativeAIEmbeddingModel('gemini-embedding-001', {
167
+ provider: 'google.generative-ai',
168
+ baseURL: 'https://generativelanguage.googleapis.com/v1beta',
169
+ headers: () => ({}),
170
+ });
171
+
172
+ const tooManyValues = Array(2049).fill('test');
173
+
174
+ await expect(model.doEmbed({ values: tooManyValues })).rejects.toThrow(
175
+ 'Too many values for a single embedding call. The google.generative-ai model "gemini-embedding-001" can only embed up to 2048 values per call, but 2049 values were provided.',
176
+ );
177
+ });
178
+
179
+ it('should use the batch embeddings endpoint', async () => {
180
+ prepareBatchJsonResponse();
181
+ const model = provider.embeddingModel('gemini-embedding-001');
182
+ await model.doEmbed({
183
+ values: testValues,
184
+ });
185
+
186
+ expect(server.calls[0].requestUrl).toBe(
187
+ 'https://generativelanguage.googleapis.com/v1beta/models/gemini-embedding-001:batchEmbedContents',
188
+ );
189
+ });
190
+
191
+ it('should use the single embeddings endpoint', async () => {
192
+ prepareSingleJsonResponse();
193
+
194
+ const model = provider.embeddingModel('gemini-embedding-001');
195
+
196
+ await model.doEmbed({
197
+ values: [testValues[0]],
198
+ });
199
+
200
+ expect(server.calls[0].requestUrl).toBe(
201
+ 'https://generativelanguage.googleapis.com/v1beta/models/gemini-embedding-001:embedContent',
202
+ );
203
+ });
204
+ });
@@ -0,0 +1,159 @@
1
+ import {
2
+ EmbeddingModelV3,
3
+ TooManyEmbeddingValuesForCallError,
4
+ } from '@ai-sdk/provider';
5
+ import {
6
+ combineHeaders,
7
+ createJsonResponseHandler,
8
+ FetchFunction,
9
+ lazySchema,
10
+ parseProviderOptions,
11
+ postJsonToApi,
12
+ resolve,
13
+ zodSchema,
14
+ } from '@ai-sdk/provider-utils';
15
+ import { z } from 'zod/v4';
16
+ import { googleFailedResponseHandler } from './google-error';
17
+ import {
18
+ GoogleGenerativeAIEmbeddingModelId,
19
+ googleGenerativeAIEmbeddingProviderOptions,
20
+ } from './google-generative-ai-embedding-options';
21
+
22
+ type GoogleGenerativeAIEmbeddingConfig = {
23
+ provider: string;
24
+ baseURL: string;
25
+ headers: () => Record<string, string | undefined>;
26
+ fetch?: FetchFunction;
27
+ };
28
+
29
+ export class GoogleGenerativeAIEmbeddingModel implements EmbeddingModelV3 {
30
+ readonly specificationVersion = 'v3';
31
+ readonly modelId: GoogleGenerativeAIEmbeddingModelId;
32
+ readonly maxEmbeddingsPerCall = 2048;
33
+ readonly supportsParallelCalls = true;
34
+
35
+ private readonly config: GoogleGenerativeAIEmbeddingConfig;
36
+
37
+ get provider(): string {
38
+ return this.config.provider;
39
+ }
40
+ constructor(
41
+ modelId: GoogleGenerativeAIEmbeddingModelId,
42
+ config: GoogleGenerativeAIEmbeddingConfig,
43
+ ) {
44
+ this.modelId = modelId;
45
+ this.config = config;
46
+ }
47
+
48
+ async doEmbed({
49
+ values,
50
+ headers,
51
+ abortSignal,
52
+ providerOptions,
53
+ }: Parameters<EmbeddingModelV3['doEmbed']>[0]): Promise<
54
+ Awaited<ReturnType<EmbeddingModelV3['doEmbed']>>
55
+ > {
56
+ // Parse provider options
57
+ const googleOptions = await parseProviderOptions({
58
+ provider: 'google',
59
+ providerOptions,
60
+ schema: googleGenerativeAIEmbeddingProviderOptions,
61
+ });
62
+
63
+ if (values.length > this.maxEmbeddingsPerCall) {
64
+ throw new TooManyEmbeddingValuesForCallError({
65
+ provider: this.provider,
66
+ modelId: this.modelId,
67
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
68
+ values,
69
+ });
70
+ }
71
+
72
+ const mergedHeaders = combineHeaders(
73
+ await resolve(this.config.headers),
74
+ headers,
75
+ );
76
+
77
+ // For single embeddings, use the single endpoint (ratelimits, etc.)
78
+ if (values.length === 1) {
79
+ const {
80
+ responseHeaders,
81
+ value: response,
82
+ rawValue,
83
+ } = await postJsonToApi({
84
+ url: `${this.config.baseURL}/models/${this.modelId}:embedContent`,
85
+ headers: mergedHeaders,
86
+ body: {
87
+ model: `models/${this.modelId}`,
88
+ content: {
89
+ parts: [{ text: values[0] }],
90
+ },
91
+ outputDimensionality: googleOptions?.outputDimensionality,
92
+ taskType: googleOptions?.taskType,
93
+ },
94
+ failedResponseHandler: googleFailedResponseHandler,
95
+ successfulResponseHandler: createJsonResponseHandler(
96
+ googleGenerativeAISingleEmbeddingResponseSchema,
97
+ ),
98
+ abortSignal,
99
+ fetch: this.config.fetch,
100
+ });
101
+
102
+ return {
103
+ warnings: [],
104
+ embeddings: [response.embedding.values],
105
+ usage: undefined,
106
+ response: { headers: responseHeaders, body: rawValue },
107
+ };
108
+ }
109
+
110
+ const {
111
+ responseHeaders,
112
+ value: response,
113
+ rawValue,
114
+ } = await postJsonToApi({
115
+ url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
116
+ headers: mergedHeaders,
117
+ body: {
118
+ requests: values.map(value => ({
119
+ model: `models/${this.modelId}`,
120
+ content: { role: 'user', parts: [{ text: value }] },
121
+ outputDimensionality: googleOptions?.outputDimensionality,
122
+ taskType: googleOptions?.taskType,
123
+ })),
124
+ },
125
+ failedResponseHandler: googleFailedResponseHandler,
126
+ successfulResponseHandler: createJsonResponseHandler(
127
+ googleGenerativeAITextEmbeddingResponseSchema,
128
+ ),
129
+ abortSignal,
130
+ fetch: this.config.fetch,
131
+ });
132
+
133
+ return {
134
+ warnings: [],
135
+ embeddings: response.embeddings.map(item => item.values),
136
+ usage: undefined,
137
+ response: { headers: responseHeaders, body: rawValue },
138
+ };
139
+ }
140
+ }
141
+
142
+ // minimal version of the schema, focussed on what is needed for the implementation
143
+ // this approach limits breakages when the API changes and increases efficiency
144
+ const googleGenerativeAITextEmbeddingResponseSchema = lazySchema(() =>
145
+ zodSchema(
146
+ z.object({
147
+ embeddings: z.array(z.object({ values: z.array(z.number()) })),
148
+ }),
149
+ ),
150
+ );
151
+
152
+ // Schema for single embedding response
153
+ const googleGenerativeAISingleEmbeddingResponseSchema = lazySchema(() =>
154
+ zodSchema(
155
+ z.object({
156
+ embedding: z.object({ values: z.array(z.number()) }),
157
+ }),
158
+ ),
159
+ );
@@ -0,0 +1,52 @@
1
+ import {
2
+ type InferSchema,
3
+ lazySchema,
4
+ zodSchema,
5
+ } from '@ai-sdk/provider-utils';
6
+ import { z } from 'zod/v4';
7
+
8
+ export type GoogleGenerativeAIEmbeddingModelId =
9
+ | 'gemini-embedding-001'
10
+ | 'text-embedding-004'
11
+ | (string & {});
12
+
13
+ export const googleGenerativeAIEmbeddingProviderOptions = lazySchema(() =>
14
+ zodSchema(
15
+ z.object({
16
+ /**
17
+ * Optional. Optional reduced dimension for the output embedding.
18
+ * If set, excessive values in the output embedding are truncated from the end.
19
+ */
20
+ outputDimensionality: z.number().optional(),
21
+
22
+ /**
23
+ * Optional. Specifies the task type for generating embeddings.
24
+ * Supported task types:
25
+ * - SEMANTIC_SIMILARITY: Optimized for text similarity.
26
+ * - CLASSIFICATION: Optimized for text classification.
27
+ * - CLUSTERING: Optimized for clustering texts based on similarity.
28
+ * - RETRIEVAL_DOCUMENT: Optimized for document retrieval.
29
+ * - RETRIEVAL_QUERY: Optimized for query-based retrieval.
30
+ * - QUESTION_ANSWERING: Optimized for answering questions.
31
+ * - FACT_VERIFICATION: Optimized for verifying factual information.
32
+ * - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
33
+ */
34
+ taskType: z
35
+ .enum([
36
+ 'SEMANTIC_SIMILARITY',
37
+ 'CLASSIFICATION',
38
+ 'CLUSTERING',
39
+ 'RETRIEVAL_DOCUMENT',
40
+ 'RETRIEVAL_QUERY',
41
+ 'QUESTION_ANSWERING',
42
+ 'FACT_VERIFICATION',
43
+ 'CODE_RETRIEVAL_QUERY',
44
+ ])
45
+ .optional(),
46
+ }),
47
+ ),
48
+ );
49
+
50
+ export type GoogleGenerativeAIEmbeddingProviderOptions = InferSchema<
51
+ typeof googleGenerativeAIEmbeddingProviderOptions
52
+ >;