@core-ai/google-genai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Omnifact (https://omnifact.ai)
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,26 @@
1
+ # @core-ai/google-genai
2
+
3
+ Google GenAI provider package for `@core-ai/core-ai`.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @core-ai/core-ai @core-ai/google-genai zod
9
+ ```
10
+
11
+ ## Usage
12
+
13
+ ```ts
14
+ import { generate } from '@core-ai/core-ai';
15
+ import { createGoogleGenAI } from '@core-ai/google-genai';
16
+
17
+ const google = createGoogleGenAI({ apiKey: process.env.GOOGLE_API_KEY });
18
+ const model = google.chatModel('gemini-3-flash');
19
+
20
+ const result = await generate({
21
+ model,
22
+ messages: [{ role: 'user', content: 'Hello!' }],
23
+ });
24
+
25
+ console.log(result.content);
26
+ ```
@@ -0,0 +1,17 @@
1
+ import { GoogleGenAI } from '@google/genai';
2
+ import { ChatModel, EmbeddingModel, ImageModel } from '@core-ai/core-ai';
3
+
4
+ type GoogleGenAIProviderOptions = {
5
+ apiKey?: string;
6
+ apiVersion?: string;
7
+ baseUrl?: string;
8
+ client?: GoogleGenAI;
9
+ };
10
+ type GoogleGenAIProvider = {
11
+ chatModel(modelId: string): ChatModel;
12
+ embeddingModel(modelId: string): EmbeddingModel;
13
+ imageModel(modelId: string): ImageModel;
14
+ };
15
+ declare function createGoogleGenAI(options?: GoogleGenAIProviderOptions): GoogleGenAIProvider;
16
+
17
+ export { type GoogleGenAIProvider, type GoogleGenAIProviderOptions, createGoogleGenAI };
package/dist/index.js ADDED
@@ -0,0 +1,560 @@
1
+ // src/provider.ts
2
+ import { GoogleGenAI } from "@google/genai";
3
+
4
+ // src/chat-model.ts
5
+ import { createStreamResult } from "@core-ai/core-ai";
6
+
7
+ // src/chat-adapter.ts
8
+ import {
9
+ ApiError,
10
+ FunctionCallingConfigMode
11
+ } from "@google/genai";
12
+ import { zodToJsonSchema } from "zod-to-json-schema";
13
+ import { ProviderError } from "@core-ai/core-ai";
14
+ function convertMessages(messages) {
15
+ const systemParts = [];
16
+ const contents = [];
17
+ const toolCallNameById = /* @__PURE__ */ new Map();
18
+ for (const message of messages) {
19
+ if (message.role === "system") {
20
+ systemParts.push(message.content);
21
+ continue;
22
+ }
23
+ if (message.role === "user") {
24
+ const userParts = typeof message.content === "string" ? [{ text: message.content }] : message.content.map(convertUserContentPart);
25
+ contents.push({
26
+ role: "user",
27
+ parts: userParts
28
+ });
29
+ continue;
30
+ }
31
+ if (message.role === "assistant") {
32
+ const assistantParts = [];
33
+ if (message.content) {
34
+ assistantParts.push({ text: message.content });
35
+ }
36
+ for (const toolCall of message.toolCalls ?? []) {
37
+ toolCallNameById.set(toolCall.id, toolCall.name);
38
+ assistantParts.push({
39
+ functionCall: {
40
+ id: toolCall.id,
41
+ name: toolCall.name,
42
+ args: toolCall.arguments
43
+ }
44
+ });
45
+ }
46
+ contents.push({
47
+ role: "model",
48
+ parts: assistantParts.length > 0 ? assistantParts : [{ text: "" }]
49
+ });
50
+ continue;
51
+ }
52
+ const functionName = toolCallNameById.get(message.toolCallId) ?? message.toolCallId;
53
+ const response = message.isError ? { error: message.content } : { output: message.content };
54
+ const toolResponsePart = {
55
+ functionResponse: {
56
+ id: message.toolCallId,
57
+ name: functionName,
58
+ response
59
+ }
60
+ };
61
+ const lastContent = contents.at(-1);
62
+ if (lastContent && isToolResultContent(lastContent)) {
63
+ lastContent.parts?.push(toolResponsePart);
64
+ continue;
65
+ }
66
+ contents.push({
67
+ role: "user",
68
+ parts: [toolResponsePart]
69
+ });
70
+ }
71
+ return {
72
+ contents,
73
+ systemInstruction: systemParts.length > 0 ? systemParts.join("\n") : void 0
74
+ };
75
+ }
76
+ function convertUserContentPart(part) {
77
+ if (part.type === "text") {
78
+ return { text: part.text };
79
+ }
80
+ if (part.type === "image") {
81
+ if (part.source.type === "url") {
82
+ return {
83
+ fileData: {
84
+ fileUri: part.source.url,
85
+ mimeType: inferMimeTypeFromUrl(part.source.url)
86
+ }
87
+ };
88
+ }
89
+ return {
90
+ inlineData: {
91
+ data: part.source.data,
92
+ mimeType: part.source.mediaType
93
+ }
94
+ };
95
+ }
96
+ return {
97
+ inlineData: {
98
+ data: part.data,
99
+ mimeType: part.mimeType
100
+ }
101
+ };
102
+ }
103
+ function convertTools(tools) {
104
+ const functionDeclarations = Object.values(tools).map(
105
+ (tool) => {
106
+ const schema = zodToJsonSchema(tool.parameters);
107
+ const { $schema: _schema, ...parametersJsonSchema } = schema;
108
+ return {
109
+ name: tool.name,
110
+ description: tool.description,
111
+ parametersJsonSchema
112
+ };
113
+ }
114
+ );
115
+ if (functionDeclarations.length === 0) {
116
+ return [];
117
+ }
118
+ return [
119
+ {
120
+ functionDeclarations
121
+ }
122
+ ];
123
+ }
124
+ function convertToolChoice(choice) {
125
+ if (choice === "auto") {
126
+ return {
127
+ functionCallingConfig: {
128
+ mode: FunctionCallingConfigMode.AUTO
129
+ }
130
+ };
131
+ }
132
+ if (choice === "none") {
133
+ return {
134
+ functionCallingConfig: {
135
+ mode: FunctionCallingConfigMode.NONE
136
+ }
137
+ };
138
+ }
139
+ if (choice === "required") {
140
+ return {
141
+ functionCallingConfig: {
142
+ mode: FunctionCallingConfigMode.ANY
143
+ }
144
+ };
145
+ }
146
+ return {
147
+ functionCallingConfig: {
148
+ mode: FunctionCallingConfigMode.ANY,
149
+ allowedFunctionNames: [choice.toolName]
150
+ }
151
+ };
152
+ }
153
+ function isToolResultContent(content) {
154
+ if (content.role !== "user" || !content.parts || content.parts.length === 0) {
155
+ return false;
156
+ }
157
+ return content.parts.every((part) => part.functionResponse);
158
+ }
159
+ function inferMimeTypeFromUrl(url) {
160
+ const normalized = url.toLowerCase();
161
+ if (normalized.endsWith(".png")) {
162
+ return "image/png";
163
+ }
164
+ if (normalized.endsWith(".webp")) {
165
+ return "image/webp";
166
+ }
167
+ if (normalized.endsWith(".gif")) {
168
+ return "image/gif";
169
+ }
170
+ if (normalized.endsWith(".svg")) {
171
+ return "image/svg+xml";
172
+ }
173
+ if (normalized.endsWith(".jpg") || normalized.endsWith(".jpeg")) {
174
+ return "image/jpeg";
175
+ }
176
+ return "application/octet-stream";
177
+ }
178
+ function createGenerateRequest(modelId, options) {
179
+ const convertedMessages = convertMessages(options.messages);
180
+ const baseRequest = {
181
+ model: modelId,
182
+ contents: convertedMessages.contents,
183
+ config: {
184
+ ...convertedMessages.systemInstruction ? { systemInstruction: convertedMessages.systemInstruction } : {},
185
+ ...options.tools && Object.keys(options.tools).length > 0 ? { tools: convertTools(options.tools) } : {},
186
+ ...options.toolChoice ? { toolConfig: convertToolChoice(options.toolChoice) } : {},
187
+ ...options.config?.temperature !== void 0 ? { temperature: options.config.temperature } : {},
188
+ ...options.config?.maxTokens !== void 0 ? { maxOutputTokens: options.config.maxTokens } : {},
189
+ ...options.config?.topP !== void 0 ? { topP: options.config.topP } : {},
190
+ ...options.config?.stopSequences ? { stopSequences: options.config.stopSequences } : {},
191
+ ...options.config?.frequencyPenalty !== void 0 ? { frequencyPenalty: options.config.frequencyPenalty } : {},
192
+ ...options.config?.presencePenalty !== void 0 ? { presencePenalty: options.config.presencePenalty } : {}
193
+ }
194
+ };
195
+ const providerOptions = options.providerOptions;
196
+ if (!providerOptions) {
197
+ return baseRequest;
198
+ }
199
+ const providerConfig = asObject(providerOptions["config"]);
200
+ return {
201
+ ...baseRequest,
202
+ ...providerOptions,
203
+ config: {
204
+ ...baseRequest.config,
205
+ ...providerConfig
206
+ }
207
+ };
208
+ }
209
+ function mapGenerateResponse(response) {
210
+ const toolCalls = parseFunctionCalls(response.functionCalls);
211
+ const finishReason = mapFinishReason(
212
+ response.candidates?.[0]?.finishReason ?? void 0
213
+ );
214
+ if (!response.candidates?.[0]) {
215
+ return {
216
+ content: null,
217
+ toolCalls,
218
+ finishReason: toolCalls.length > 0 ? "tool-calls" : finishReason,
219
+ usage: mapUsage(response)
220
+ };
221
+ }
222
+ return {
223
+ content: response.text ?? null,
224
+ toolCalls,
225
+ finishReason: toolCalls.length > 0 ? "tool-calls" : finishReason,
226
+ usage: mapUsage(response)
227
+ };
228
+ }
229
+ function parseFunctionCalls(calls) {
230
+ if (!calls || calls.length === 0) {
231
+ return [];
232
+ }
233
+ return calls.map((call, index) => mapFunctionCall(call, index));
234
+ }
235
+ function mapFunctionCall(toolCall, index) {
236
+ return {
237
+ id: toolCall.id ?? `tool-${index}`,
238
+ name: toolCall.name ?? `tool-${index}`,
239
+ arguments: asObject(toolCall.args)
240
+ };
241
+ }
242
+ function mapFinishReason(reason) {
243
+ if (reason === "STOP") {
244
+ return "stop";
245
+ }
246
+ if (reason === "MAX_TOKENS") {
247
+ return "length";
248
+ }
249
+ if (reason === "SAFETY" || reason === "RECITATION" || reason === "BLOCKLIST" || reason === "PROHIBITED_CONTENT" || reason === "SPII" || reason === "IMAGE_SAFETY" || reason === "IMAGE_PROHIBITED_CONTENT" || reason === "IMAGE_RECITATION") {
250
+ return "content-filter";
251
+ }
252
+ return "unknown";
253
+ }
254
+ async function* transformStream(stream) {
255
+ const bufferedToolCalls = /* @__PURE__ */ new Map();
256
+ let finishReason = "unknown";
257
+ let sawToolCalls = false;
258
+ let usage = {
259
+ inputTokens: 0,
260
+ outputTokens: 0,
261
+ reasoningTokens: 0,
262
+ totalTokens: 0
263
+ };
264
+ for await (const chunk of stream) {
265
+ usage = mapUsage(chunk, usage);
266
+ if (chunk.text) {
267
+ yield {
268
+ type: "content-delta",
269
+ text: chunk.text
270
+ };
271
+ }
272
+ const functionCalls = chunk.functionCalls ?? [];
273
+ if (functionCalls.length > 0) {
274
+ sawToolCalls = true;
275
+ for (const [index, functionCall] of functionCalls.entries()) {
276
+ const mappedCall = mapFunctionCall(functionCall, index);
277
+ const existing = bufferedToolCalls.get(mappedCall.id);
278
+ if (!existing) {
279
+ bufferedToolCalls.set(mappedCall.id, mappedCall);
280
+ yield {
281
+ type: "tool-call-start",
282
+ toolCallId: mappedCall.id,
283
+ toolName: mappedCall.name
284
+ };
285
+ const serializedArguments = JSON.stringify(mappedCall.arguments);
286
+ if (serializedArguments !== "{}") {
287
+ yield {
288
+ type: "tool-call-delta",
289
+ toolCallId: mappedCall.id,
290
+ argumentsDelta: serializedArguments
291
+ };
292
+ }
293
+ continue;
294
+ }
295
+ const serializedExisting = JSON.stringify(existing.arguments);
296
+ const serializedNext = JSON.stringify(mappedCall.arguments);
297
+ if (serializedExisting !== serializedNext) {
298
+ bufferedToolCalls.set(mappedCall.id, mappedCall);
299
+ yield {
300
+ type: "tool-call-delta",
301
+ toolCallId: mappedCall.id,
302
+ argumentsDelta: serializedNext
303
+ };
304
+ }
305
+ }
306
+ }
307
+ const candidateFinishReason = mapFinishReason(
308
+ chunk.candidates?.[0]?.finishReason ?? void 0
309
+ );
310
+ if (candidateFinishReason !== "unknown") {
311
+ finishReason = candidateFinishReason;
312
+ }
313
+ }
314
+ for (const toolCall of bufferedToolCalls.values()) {
315
+ yield {
316
+ type: "tool-call-end",
317
+ toolCall
318
+ };
319
+ }
320
+ if (sawToolCalls && finishReason !== "content-filter") {
321
+ finishReason = "tool-calls";
322
+ }
323
+ yield {
324
+ type: "finish",
325
+ finishReason,
326
+ usage
327
+ };
328
+ }
329
+ function mapUsage(response, fallback) {
330
+ const inputTokens = response.usageMetadata?.promptTokenCount ?? fallback?.inputTokens ?? 0;
331
+ const textTokens = response.usageMetadata?.candidatesTokenCount ?? 0;
332
+ const reasoningTokens = response.usageMetadata?.thoughtsTokenCount ?? fallback?.reasoningTokens ?? 0;
333
+ const outputTokens = textTokens + reasoningTokens;
334
+ const totalTokens = response.usageMetadata?.totalTokenCount ?? fallback?.totalTokens ?? inputTokens + outputTokens;
335
+ return {
336
+ inputTokens,
337
+ outputTokens,
338
+ reasoningTokens,
339
+ totalTokens
340
+ };
341
+ }
342
+ function asObject(value) {
343
+ if (value && typeof value === "object" && !Array.isArray(value)) {
344
+ return value;
345
+ }
346
+ return {};
347
+ }
348
+ function wrapError(error) {
349
+ if (error instanceof ApiError) {
350
+ return new ProviderError(error.message, "google", error.status, error);
351
+ }
352
+ return new ProviderError(
353
+ error instanceof Error ? error.message : String(error),
354
+ "google",
355
+ void 0,
356
+ error
357
+ );
358
+ }
359
+
360
+ // src/chat-model.ts
361
+ function createGoogleGenAIChatModel(client, modelId) {
362
+ return {
363
+ provider: "google",
364
+ modelId,
365
+ async generate(options) {
366
+ try {
367
+ const request = createGenerateRequest(modelId, options);
368
+ const response = await client.models.generateContent(request);
369
+ return mapGenerateResponse(response);
370
+ } catch (error) {
371
+ throw wrapError(error);
372
+ }
373
+ },
374
+ async stream(options) {
375
+ try {
376
+ const request = createGenerateRequest(modelId, options);
377
+ const stream = await client.models.generateContentStream(request);
378
+ return createStreamResult(transformStream(stream));
379
+ } catch (error) {
380
+ throw wrapError(error);
381
+ }
382
+ }
383
+ };
384
+ }
385
+
386
+ // src/embedding-model.ts
387
+ import { ApiError as ApiError2 } from "@google/genai";
388
+ import { ProviderError as ProviderError2 } from "@core-ai/core-ai";
389
+ function createGoogleGenAIEmbeddingModel(client, modelId) {
390
+ return {
391
+ provider: "google",
392
+ modelId,
393
+ async embed(options) {
394
+ try {
395
+ const baseRequest = {
396
+ model: modelId,
397
+ contents: Array.isArray(options.input) ? options.input : [options.input],
398
+ ...options.dimensions !== void 0 ? {
399
+ config: {
400
+ outputDimensionality: options.dimensions
401
+ }
402
+ } : {}
403
+ };
404
+ const providerOptions = options.providerOptions;
405
+ const request = providerOptions ? {
406
+ ...baseRequest,
407
+ ...providerOptions,
408
+ config: {
409
+ ...baseRequest.config,
410
+ ...asObject2(providerOptions["config"])
411
+ }
412
+ } : baseRequest;
413
+ const response = await client.models.embedContent(request);
414
+ return {
415
+ embeddings: (response.embeddings ?? []).map(
416
+ (item) => item.values ?? []
417
+ ),
418
+ usage: {
419
+ inputTokens: (response.embeddings ?? []).reduce(
420
+ (total, item) => total + (item.statistics?.tokenCount ?? 0),
421
+ 0
422
+ )
423
+ }
424
+ };
425
+ } catch (error) {
426
+ throw wrapError2(error);
427
+ }
428
+ }
429
+ };
430
+ }
431
+ function wrapError2(error) {
432
+ if (error instanceof ApiError2) {
433
+ return new ProviderError2(error.message, "google", error.status, error);
434
+ }
435
+ return new ProviderError2(
436
+ error instanceof Error ? error.message : String(error),
437
+ "google",
438
+ void 0,
439
+ error
440
+ );
441
+ }
442
+ function asObject2(value) {
443
+ if (value && typeof value === "object" && !Array.isArray(value)) {
444
+ return value;
445
+ }
446
+ return {};
447
+ }
448
+
449
+ // src/image-model.ts
450
+ import { ApiError as ApiError3 } from "@google/genai";
451
+ import { ProviderError as ProviderError3 } from "@core-ai/core-ai";
452
+ function createGoogleGenAIImageModel(client, modelId) {
453
+ return {
454
+ provider: "google",
455
+ modelId,
456
+ async generate(options) {
457
+ try {
458
+ const baseRequest = {
459
+ model: modelId,
460
+ prompt: options.prompt,
461
+ config: {
462
+ ...options.n !== void 0 ? { numberOfImages: options.n } : {},
463
+ ...mapSizeToImageConfig(options.size)
464
+ }
465
+ };
466
+ const providerOptions = options.providerOptions;
467
+ const request = providerOptions ? {
468
+ ...baseRequest,
469
+ ...providerOptions,
470
+ config: {
471
+ ...baseRequest.config,
472
+ ...asObject3(providerOptions["config"])
473
+ }
474
+ } : baseRequest;
475
+ const response = await client.models.generateImages(request);
476
+ return {
477
+ images: (response.generatedImages ?? []).map((image) => ({
478
+ base64: image.image?.imageBytes ?? void 0,
479
+ url: image.image?.gcsUri ?? void 0,
480
+ revisedPrompt: image.enhancedPrompt ?? void 0
481
+ }))
482
+ };
483
+ } catch (error) {
484
+ throw wrapError3(error);
485
+ }
486
+ }
487
+ };
488
+ }
489
+ function wrapError3(error) {
490
+ if (error instanceof ApiError3) {
491
+ return new ProviderError3(error.message, "google", error.status, error);
492
+ }
493
+ return new ProviderError3(
494
+ error instanceof Error ? error.message : String(error),
495
+ "google",
496
+ void 0,
497
+ error
498
+ );
499
+ }
500
+ function mapSizeToImageConfig(size) {
501
+ if (!size) {
502
+ return {};
503
+ }
504
+ const match = /^(\d+)x(\d+)$/i.exec(size.trim());
505
+ if (!match) {
506
+ return {};
507
+ }
508
+ const width = Number(match[1]);
509
+ const height = Number(match[2]);
510
+ if (!Number.isFinite(width) || !Number.isFinite(height) || height === 0) {
511
+ return {};
512
+ }
513
+ const aspectRatio = simplifyRatio(width, height);
514
+ const largestDimension = Math.max(width, height);
515
+ return {
516
+ aspectRatio,
517
+ ...largestDimension <= 1024 ? { imageSize: "1K" } : largestDimension <= 2048 ? { imageSize: "2K" } : {}
518
+ };
519
+ }
520
+ function simplifyRatio(width, height) {
521
+ const divisor = greatestCommonDivisor(width, height);
522
+ return `${Math.round(width / divisor)}:${Math.round(height / divisor)}`;
523
+ }
524
+ function greatestCommonDivisor(a, b) {
525
+ let x = Math.abs(a);
526
+ let y = Math.abs(b);
527
+ while (y !== 0) {
528
+ const remainder = x % y;
529
+ x = y;
530
+ y = remainder;
531
+ }
532
+ return x === 0 ? 1 : x;
533
+ }
534
+ function asObject3(value) {
535
+ if (value && typeof value === "object" && !Array.isArray(value)) {
536
+ return value;
537
+ }
538
+ return {};
539
+ }
540
+
541
+ // src/provider.ts
542
+ function createGoogleGenAI(options = {}) {
543
+ const client = options.client ?? new GoogleGenAI({
544
+ apiKey: options.apiKey,
545
+ ...options.apiVersion ? { apiVersion: options.apiVersion } : {},
546
+ ...options.baseUrl ? {
547
+ httpOptions: {
548
+ baseUrl: options.baseUrl
549
+ }
550
+ } : {}
551
+ });
552
+ return {
553
+ chatModel: (modelId) => createGoogleGenAIChatModel(client, modelId),
554
+ embeddingModel: (modelId) => createGoogleGenAIEmbeddingModel(client, modelId),
555
+ imageModel: (modelId) => createGoogleGenAIImageModel(client, modelId)
556
+ };
557
+ }
558
+ export {
559
+ createGoogleGenAI
560
+ };
package/package.json ADDED
@@ -0,0 +1,48 @@
1
+ {
2
+ "name": "@core-ai/google-genai",
3
+ "version": "0.1.0",
4
+ "description": "Google GenAI provider package for @core-ai/core-ai",
5
+ "license": "MIT",
6
+ "author": "Omnifact (https://omnifact.ai)",
7
+ "repository": {
8
+ "type": "git",
9
+ "url": "git+https://github.com/agdevhq/ai-core.git",
10
+ "directory": "packages/google-genai"
11
+ },
12
+ "keywords": ["llm", "ai", "google", "genai", "provider", "sdk"],
13
+ "type": "module",
14
+ "main": "./dist/index.js",
15
+ "types": "./dist/index.d.ts",
16
+ "exports": {
17
+ ".": {
18
+ "types": "./dist/index.d.ts",
19
+ "import": "./dist/index.js"
20
+ }
21
+ },
22
+ "files": ["dist", "README.md", "LICENSE"],
23
+ "publishConfig": {
24
+ "access": "public"
25
+ },
26
+ "scripts": {
27
+ "build": "tsup",
28
+ "lint": "eslint src/ --max-warnings 0",
29
+ "check-types": "tsc --noEmit",
30
+ "prepublishOnly": "npm run build",
31
+ "test": "vitest run",
32
+ "test:watch": "vitest"
33
+ },
34
+ "dependencies": {
35
+ "@core-ai/core-ai": "^0.1.0",
36
+ "@google/genai": "^1.42.0",
37
+ "zod-to-json-schema": "^3.24.5"
38
+ },
39
+ "peerDependencies": {
40
+ "zod": "^3.25.76"
41
+ },
42
+ "devDependencies": {
43
+ "@core-ai/eslint-config": "^0.0.0",
44
+ "@core-ai/typescript-config": "^0.0.0",
45
+ "typescript": "^5.7.3",
46
+ "vitest": "^3.2.4"
47
+ }
48
+ }