@rlabs-inc/gemini-mcp 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/LICENCE +21 -0
  2. package/README.md +418 -0
  3. package/dist/gemini-client.d.ts +120 -0
  4. package/dist/gemini-client.js +399 -0
  5. package/dist/index.d.ts +8 -0
  6. package/dist/index.js +220 -0
  7. package/dist/tools/analyze.d.ts +10 -0
  8. package/dist/tools/analyze.js +96 -0
  9. package/dist/tools/brainstorm.d.ts +10 -0
  10. package/dist/tools/brainstorm.js +220 -0
  11. package/dist/tools/cache.d.ts +17 -0
  12. package/dist/tools/cache.js +286 -0
  13. package/dist/tools/code-exec.d.ts +17 -0
  14. package/dist/tools/code-exec.js +135 -0
  15. package/dist/tools/document.d.ts +16 -0
  16. package/dist/tools/document.js +333 -0
  17. package/dist/tools/image-edit.d.ts +16 -0
  18. package/dist/tools/image-edit.js +291 -0
  19. package/dist/tools/image-gen.d.ts +17 -0
  20. package/dist/tools/image-gen.js +148 -0
  21. package/dist/tools/query.d.ts +11 -0
  22. package/dist/tools/query.js +63 -0
  23. package/dist/tools/search.d.ts +15 -0
  24. package/dist/tools/search.js +128 -0
  25. package/dist/tools/speech.d.ts +17 -0
  26. package/dist/tools/speech.js +304 -0
  27. package/dist/tools/structured.d.ts +16 -0
  28. package/dist/tools/structured.js +247 -0
  29. package/dist/tools/summarize.d.ts +10 -0
  30. package/dist/tools/summarize.js +77 -0
  31. package/dist/tools/url-context.d.ts +17 -0
  32. package/dist/tools/url-context.js +226 -0
  33. package/dist/tools/video-gen.d.ts +11 -0
  34. package/dist/tools/video-gen.js +136 -0
  35. package/dist/tools/youtube.d.ts +16 -0
  36. package/dist/tools/youtube.js +218 -0
  37. package/dist/utils/logger.d.ts +33 -0
  38. package/dist/utils/logger.js +82 -0
  39. package/package.json +48 -0
@@ -0,0 +1,399 @@
1
+ /**
2
+ * Gemini Client - Provides access to Google's Generative AI models
3
+ *
4
+ * This module initializes and manages the connection to Google's Gemini API.
5
+ * Supports Gemini 3 Pro, Flash, image generation (Nano Banana Pro), and video generation (Veo).
6
+ *
7
+ * Key Gemini 3 Features:
8
+ * - Thinking Levels: Control reasoning depth (minimal, low, medium, high)
9
+ * - 4K Image Generation: Up to 4K resolution with Google Search grounding
10
+ * - Multi-turn Image Editing: Conversational image refinement
11
+ */
12
+ import { GoogleGenAI, Modality } from '@google/genai';
13
+ import { logger } from './utils/logger.js';
14
+ import * as fs from 'fs';
15
+ import * as path from 'path';
16
+ // Global clients
17
+ let genAI;
18
+ let proModelName;
19
+ let flashModelName;
20
+ let imageModelName;
21
+ let videoModelName;
22
+ // Output directory for generated files
23
+ let outputDir;
24
+ /**
25
+ * Initialize the Gemini client with configured models
26
+ */
27
+ export async function initGeminiClient() {
28
+ const apiKey = process.env.GEMINI_API_KEY;
29
+ if (!apiKey) {
30
+ throw new Error('GEMINI_API_KEY environment variable is required');
31
+ }
32
+ try {
33
+ // Initialize the API client
34
+ genAI = new GoogleGenAI({ apiKey });
35
+ // Set up models - Gemini 3 defaults (latest preview)
36
+ proModelName = process.env.GEMINI_PRO_MODEL || 'gemini-3-pro-preview';
37
+ flashModelName = process.env.GEMINI_FLASH_MODEL || 'gemini-3-flash-preview';
38
+ imageModelName = process.env.GEMINI_IMAGE_MODEL || 'gemini-3-pro-image-preview';
39
+ videoModelName = process.env.GEMINI_VIDEO_MODEL || 'veo-2.0-generate-001';
40
+ // Set up output directory for generated files
41
+ outputDir = process.env.GEMINI_OUTPUT_DIR || path.join(process.cwd(), 'gemini-output');
42
+ if (!fs.existsSync(outputDir)) {
43
+ fs.mkdirSync(outputDir, { recursive: true });
44
+ logger.info(`Created output directory: ${outputDir}`);
45
+ }
46
+ // Use the user's preferred model for init test, fallback to flash (higher free tier limits)
47
+ // This fixes issue #7 - init test was always using pro model causing 429 errors on free tier
48
+ const initModel = process.env.GEMINI_MODEL || flashModelName;
49
+ // Test connection with timeout and retry
50
+ let connected = false;
51
+ let attempts = 0;
52
+ const maxAttempts = 3;
53
+ while (!connected && attempts < maxAttempts) {
54
+ try {
55
+ attempts++;
56
+ logger.info(`Connecting to Gemini API (attempt ${attempts}/${maxAttempts}) using ${initModel}...`);
57
+ // Set up a timeout for the connection test
58
+ const timeoutPromise = new Promise((_, reject) => {
59
+ setTimeout(() => reject(new Error('Connection timeout')), 10000);
60
+ });
61
+ // Test connection with user's preferred model or flash (better free tier limits)
62
+ const connectionPromise = genAI.models.generateContent({
63
+ model: initModel,
64
+ contents: 'Test connection',
65
+ });
66
+ const result = await Promise.race([connectionPromise, timeoutPromise]);
67
+ if (!result) {
68
+ throw new Error('Failed to connect to Gemini API: empty response');
69
+ }
70
+ connected = true;
71
+ logger.info(`Successfully connected to Gemini API`);
72
+ logger.info(`Pro model: ${proModelName}`);
73
+ logger.info(`Flash model: ${flashModelName}`);
74
+ logger.info(`Image model: ${imageModelName}`);
75
+ logger.info(`Video model: ${videoModelName}`);
76
+ logger.info(`Output directory: ${outputDir}`);
77
+ }
78
+ catch (error) {
79
+ const errorMessage = error instanceof Error ? error.message : String(error);
80
+ logger.warn(`Connection attempt ${attempts} failed: ${errorMessage}`);
81
+ if (attempts >= maxAttempts) {
82
+ throw new Error(`Failed to connect to Gemini API after ${maxAttempts} attempts: ${errorMessage}`);
83
+ }
84
+ // Wait before retry
85
+ await new Promise((resolve) => setTimeout(resolve, 2000));
86
+ }
87
+ }
88
+ }
89
+ catch (error) {
90
+ logger.error('Failed to initialize Gemini client:', error);
91
+ throw error;
92
+ }
93
+ }
94
+ /**
95
+ * Generate content using the Gemini Pro model
96
+ *
97
+ * @param prompt - The prompt to send to Gemini
98
+ * @param options - Generation options including thinking level
99
+ * @returns The generated text response
100
+ *
101
+ * Gemini 3 Pro supports thinking levels: low, high (default)
102
+ */
103
+ export async function generateWithGeminiPro(prompt, options = {}) {
104
+ try {
105
+ logger.prompt(prompt);
106
+ const { thinkingLevel } = options;
107
+ // Build config with optional thinking level
108
+ // Note: Gemini 3 Pro only supports 'low' and 'high' thinking levels
109
+ const config = {};
110
+ if (thinkingLevel) {
111
+ // For Pro, only 'low' and 'high' are valid - map 'minimal' and 'medium' appropriately
112
+ const proThinkingLevel = thinkingLevel === 'minimal' || thinkingLevel === 'low' ? 'low' : 'high';
113
+ config.thinkingConfig = { thinkingLevel: proThinkingLevel };
114
+ logger.debug(`Using thinking level: ${proThinkingLevel} (requested: ${thinkingLevel})`);
115
+ }
116
+ const response = await genAI.models.generateContent({
117
+ model: proModelName,
118
+ contents: prompt,
119
+ config: Object.keys(config).length > 0 ? config : undefined,
120
+ });
121
+ const responseText = response.text || '';
122
+ logger.response(responseText);
123
+ return responseText;
124
+ }
125
+ catch (error) {
126
+ logger.error('Error generating content with Gemini Pro:', error);
127
+ throw error;
128
+ }
129
+ }
130
+ /**
131
+ * Generate content using the Gemini Flash model
132
+ *
133
+ * @param prompt - The prompt to send to Gemini
134
+ * @param options - Generation options including thinking level
135
+ * @returns The generated text response
136
+ *
137
+ * Gemini 3 Flash supports ALL thinking levels: minimal, low, medium, high (default)
138
+ */
139
+ export async function generateWithGeminiFlash(prompt, options = {}) {
140
+ try {
141
+ logger.prompt(prompt);
142
+ const { thinkingLevel } = options;
143
+ // Build config with optional thinking level
144
+ // Note: Gemini 3 Flash supports all thinking levels
145
+ const config = {};
146
+ if (thinkingLevel) {
147
+ config.thinkingConfig = { thinkingLevel };
148
+ logger.debug(`Using thinking level: ${thinkingLevel}`);
149
+ }
150
+ const response = await genAI.models.generateContent({
151
+ model: flashModelName,
152
+ contents: prompt,
153
+ config: Object.keys(config).length > 0 ? config : undefined,
154
+ });
155
+ const responseText = response.text || '';
156
+ logger.response(responseText);
157
+ return responseText;
158
+ }
159
+ catch (error) {
160
+ logger.error('Error generating content with Gemini Flash:', error);
161
+ throw error;
162
+ }
163
+ }
164
+ /**
165
+ * Generate content with a structured chat history
166
+ */
167
+ export async function generateWithChat(messages, useProModel = true) {
168
+ try {
169
+ const model = useProModel ? proModelName : flashModelName;
170
+ // Format messages for the Gemini API
171
+ const formattedContents = messages.map((msg) => ({
172
+ role: msg.role === 'user' ? 'user' : 'model',
173
+ parts: [{ text: msg.content }],
174
+ }));
175
+ logger.debug('Starting chat with messages:', JSON.stringify(messages, null, 2));
176
+ // Handle the conversation based on the last message
177
+ const lastMessage = messages[messages.length - 1];
178
+ if (lastMessage.role === 'user') {
179
+ logger.prompt(lastMessage.content);
180
+ // Generate content with the conversation history
181
+ const response = await genAI.models.generateContent({
182
+ model: model,
183
+ contents: formattedContents,
184
+ });
185
+ const responseText = response.text || '';
186
+ logger.response(responseText);
187
+ return responseText;
188
+ }
189
+ else {
190
+ // If the last message is from the model, we don't need to send anything
191
+ return lastMessage.content;
192
+ }
193
+ }
194
+ catch (error) {
195
+ logger.error('Error generating content with chat:', error);
196
+ throw error;
197
+ }
198
+ }
199
+ /**
200
+ * Generate an image using Gemini's Nano Banana Pro model (gemini-3-pro-image-preview)
201
+ *
202
+ * Features:
203
+ * - 4K resolution support (1K, 2K, 4K)
204
+ * - 10 aspect ratios (1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9, 21:9)
205
+ * - Google Search grounding for real-world accuracy
206
+ * - High-fidelity text rendering
207
+ */
208
+ export async function generateImage(prompt, options = {}) {
209
+ try {
210
+ const { aspectRatio = '1:1', imageSize = '2K', // Default to 2K for good balance of quality and speed
211
+ style, saveToFile = true, useGoogleSearch = false, } = options;
212
+ // Build the full prompt with style if provided
213
+ const fullPrompt = style ? `${prompt}, in ${style} style` : prompt;
214
+ logger.prompt(`Image generation: ${fullPrompt}`);
215
+ logger.debug(`Image config: ${aspectRatio}, ${imageSize}, search: ${useGoogleSearch}`);
216
+ // Build the config for Nano Banana Pro
217
+ const config = {
218
+ responseModalities: [Modality.TEXT, Modality.IMAGE],
219
+ imageConfig: {
220
+ aspectRatio,
221
+ imageSize,
222
+ },
223
+ };
224
+ // Add Google Search grounding if requested
225
+ if (useGoogleSearch) {
226
+ config.tools = [{ googleSearch: {} }];
227
+ }
228
+ const response = await genAI.models.generateContent({
229
+ model: imageModelName,
230
+ contents: fullPrompt,
231
+ config,
232
+ });
233
+ // Extract image from response
234
+ const candidates = response.candidates;
235
+ if (!candidates || candidates.length === 0) {
236
+ throw new Error('No candidates in image generation response');
237
+ }
238
+ const parts = candidates[0].content?.parts;
239
+ if (!parts) {
240
+ throw new Error('No parts in image generation response');
241
+ }
242
+ let imageData;
243
+ let mimeType = 'image/png';
244
+ let description;
245
+ for (const part of parts) {
246
+ if (part.inlineData) {
247
+ imageData = part.inlineData.data;
248
+ mimeType = part.inlineData.mimeType || 'image/png';
249
+ }
250
+ else if (part.text) {
251
+ description = part.text;
252
+ }
253
+ }
254
+ if (!imageData) {
255
+ throw new Error('No image data in response');
256
+ }
257
+ // Save to file if requested
258
+ let filePath = '';
259
+ if (saveToFile) {
260
+ const timestamp = Date.now();
261
+ const extension = mimeType.split('/')[1] || 'png';
262
+ const filename = `image-${timestamp}.${extension}`;
263
+ filePath = path.join(outputDir, filename);
264
+ const buffer = Buffer.from(imageData, 'base64');
265
+ fs.writeFileSync(filePath, buffer);
266
+ logger.info(`Image saved to: ${filePath}`);
267
+ }
268
+ logger.response(`Image generated successfully (${mimeType})`);
269
+ return {
270
+ base64: imageData,
271
+ mimeType,
272
+ filePath,
273
+ description,
274
+ };
275
+ }
276
+ catch (error) {
277
+ logger.error('Error generating image:', error);
278
+ throw error;
279
+ }
280
+ }
281
+ // Store active video operations for polling
282
+ const activeVideoOperations = new Map();
283
+ /**
284
+ * Start video generation using Gemini's Veo model
285
+ * Returns an operation that can be polled for completion
286
+ */
287
+ export async function startVideoGeneration(prompt, options = {}) {
288
+ try {
289
+ const { aspectRatio = '16:9', negativePrompt } = options;
290
+ logger.prompt(`Video generation: ${prompt}`);
291
+ const config = {
292
+ aspectRatio,
293
+ };
294
+ if (negativePrompt) {
295
+ config.negativePrompt = negativePrompt;
296
+ }
297
+ const operation = await genAI.models.generateVideos({
298
+ model: videoModelName,
299
+ prompt,
300
+ config,
301
+ });
302
+ const operationName = operation.name || `video-${Date.now()}`;
303
+ // Store the full operation object for later polling
304
+ activeVideoOperations.set(operationName, operation);
305
+ logger.info(`Video generation started: ${operationName}`);
306
+ return {
307
+ operationName,
308
+ status: 'pending',
309
+ };
310
+ }
311
+ catch (error) {
312
+ logger.error('Error starting video generation:', error);
313
+ throw error;
314
+ }
315
+ }
316
+ /**
317
+ * Check the status of a video generation operation
318
+ */
319
+ export async function checkVideoStatus(operationName) {
320
+ try {
321
+ logger.debug(`Checking video status: ${operationName}`);
322
+ // Get the stored operation object
323
+ let operation = activeVideoOperations.get(operationName);
324
+ if (!operation) {
325
+ return {
326
+ operationName,
327
+ status: 'failed',
328
+ error: 'Operation not found. It may have expired or the server was restarted.',
329
+ };
330
+ }
331
+ // Poll for updated status
332
+ const status = await genAI.operations.getVideosOperation({
333
+ operation: operation,
334
+ });
335
+ // Update stored operation
336
+ activeVideoOperations.set(operationName, status);
337
+ if (status.done) {
338
+ // Clean up stored operation
339
+ activeVideoOperations.delete(operationName);
340
+ if (status.error) {
341
+ return {
342
+ operationName,
343
+ status: 'failed',
344
+ error: String(status.error) || 'Unknown error',
345
+ };
346
+ }
347
+ // Video is ready - get the URI
348
+ const videoUri = status.response?.generatedVideos?.[0]?.video?.uri;
349
+ let filePath;
350
+ if (videoUri) {
351
+ // Download and save the video
352
+ const timestamp = Date.now();
353
+ const filename = `video-${timestamp}.mp4`;
354
+ filePath = path.join(outputDir, filename);
355
+ try {
356
+ // Fetch the video with API key in header
357
+ const response = await fetch(videoUri, {
358
+ headers: {
359
+ 'x-goog-api-key': process.env.GEMINI_API_KEY || '',
360
+ },
361
+ });
362
+ if (response.ok) {
363
+ const buffer = Buffer.from(await response.arrayBuffer());
364
+ fs.writeFileSync(filePath, buffer);
365
+ logger.info(`Video saved to: ${filePath}`);
366
+ }
367
+ else {
368
+ logger.warn(`Failed to download video: ${response.status}`);
369
+ filePath = undefined;
370
+ }
371
+ }
372
+ catch (downloadError) {
373
+ logger.warn('Failed to download video:', downloadError);
374
+ filePath = undefined;
375
+ }
376
+ }
377
+ return {
378
+ operationName,
379
+ status: 'completed',
380
+ videoUri,
381
+ filePath,
382
+ };
383
+ }
384
+ return {
385
+ operationName,
386
+ status: 'processing',
387
+ };
388
+ }
389
+ catch (error) {
390
+ logger.error('Error checking video status:', error);
391
+ throw error;
392
+ }
393
+ }
394
+ /**
395
+ * Get the output directory path
396
+ */
397
+ export function getOutputDir() {
398
+ return outputDir;
399
+ }
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * MCP Server Gemini - Integrates Google's Gemini models with Claude Code
4
+ *
5
+ * This MCP server provides access to Gemini models for use in Claude Code.
6
+ * Features include direct queries, brainstorming, and analysis tools.
7
+ */
8
+ export {};
package/dist/index.js ADDED
@@ -0,0 +1,220 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * MCP Server Gemini - Integrates Google's Gemini models with Claude Code
4
+ *
5
+ * This MCP server provides access to Gemini models for use in Claude Code.
6
+ * Features include direct queries, brainstorming, and analysis tools.
7
+ */
8
+ import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
9
+ import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
10
+ import { parseArgs } from 'node:util';
11
+ // Import tools
12
+ import { registerQueryTool } from './tools/query.js';
13
+ import { registerBrainstormTool } from './tools/brainstorm.js';
14
+ import { registerAnalyzeTool } from './tools/analyze.js';
15
+ import { registerSummarizeTool } from './tools/summarize.js';
16
+ import { registerImageGenTool } from './tools/image-gen.js';
17
+ import { registerImageEditTool } from './tools/image-edit.js';
18
+ import { registerVideoGenTool } from './tools/video-gen.js';
19
+ import { registerCodeExecTool } from './tools/code-exec.js';
20
+ import { registerSearchTool } from './tools/search.js';
21
+ import { registerStructuredTool } from './tools/structured.js';
22
+ import { registerYouTubeTool } from './tools/youtube.js';
23
+ import { registerDocumentTool } from './tools/document.js';
24
+ import { registerUrlContextTool } from './tools/url-context.js';
25
+ import { registerCacheTool } from './tools/cache.js';
26
+ import { registerSpeechTool } from './tools/speech.js';
27
+ // Import Gemini client and logger
28
+ import { initGeminiClient } from './gemini-client.js';
29
+ import { setupLogger, logger } from './utils/logger.js';
30
+ // Parse command line arguments
31
+ const { values } = parseArgs({
32
+ options: {
33
+ verbose: {
34
+ type: 'boolean',
35
+ short: 'v',
36
+ default: false,
37
+ },
38
+ quiet: {
39
+ type: 'boolean',
40
+ short: 'q',
41
+ default: false,
42
+ },
43
+ help: {
44
+ type: 'boolean',
45
+ short: 'h',
46
+ default: false,
47
+ },
48
+ },
49
+ });
50
+ // Show help if requested
51
+ if (values.help) {
52
+ console.log(`
53
+ MCP Server Gemini - Integrates Google's Gemini models with Claude Code
54
+
55
+ Usage:
56
+ gemini-mcp [options]
57
+
58
+ Options:
59
+ -v, --verbose Enable verbose logging (shows all prompts and responses)
60
+ -q, --quiet Run in quiet mode (minimal logging)
61
+ -h, --help Show this help message
62
+
63
+ Environment Variables:
64
+ GEMINI_API_KEY (required) Your Google Gemini API key
65
+ VERBOSE (optional) Set to "true" to enable verbose logging
66
+ QUIET (optional) Set to "true" to enable quiet mode
67
+ GEMINI_MODEL (optional) Default Gemini model to use
68
+ GEMINI_PRO_MODEL (optional) Specify Pro model variant
69
+ GEMINI_FLASH_MODEL (optional) Specify Flash model variant
70
+ `);
71
+ process.exit(0);
72
+ }
73
+ // Configure logging mode based on command line args or environment variables
74
+ let logLevel = 'normal';
75
+ if (values.verbose || process.env.VERBOSE === 'true') {
76
+ logLevel = 'verbose';
77
+ }
78
+ else if (values.quiet || process.env.QUIET === 'true') {
79
+ logLevel = 'quiet';
80
+ }
81
+ setupLogger(logLevel);
82
+ // Check for required API key
83
+ if (!process.env.GEMINI_API_KEY) {
84
+ logger.error('Error: GEMINI_API_KEY environment variable is required');
85
+ process.exit(1);
86
+ }
87
+ // Get model name from environment or use default
88
+ // Use Gemini 3 as default (latest frontier model)
89
+ const defaultModel = 'gemini-3-pro-preview';
90
+ const geminiModel = process.env.GEMINI_MODEL || defaultModel;
91
+ // Log model configuration for debugging
92
+ logger.debug(`Model configuration:
93
+ - GEMINI_MODEL: ${process.env.GEMINI_MODEL || '(not set, using default)'}
94
+ - GEMINI_PRO_MODEL: ${process.env.GEMINI_PRO_MODEL || '(not set, using default)'}
95
+ - GEMINI_FLASH_MODEL: ${process.env.GEMINI_FLASH_MODEL || '(not set, using default)'}`);
96
+ async function main() {
97
+ logger.info(`Starting MCP Gemini Server with model: ${geminiModel}`);
98
+ logger.info(`Logging mode: ${logLevel}`);
99
+ // Handle unexpected stdio errors
100
+ process.stdin.on('error', (err) => {
101
+ logger.error('STDIN error:', err);
102
+ // Don't exit, just log
103
+ });
104
+ process.stdout.on('error', (err) => {
105
+ logger.error('STDOUT error:', err);
106
+ // Don't exit, just log
107
+ });
108
+ try {
109
+ // Initialize Gemini client
110
+ await initGeminiClient();
111
+ // Create MCP server
112
+ const server = new McpServer({
113
+ name: 'Gemini',
114
+ version: '0.5.0',
115
+ });
116
+ // Register tools
117
+ registerQueryTool(server);
118
+ registerBrainstormTool(server);
119
+ registerAnalyzeTool(server);
120
+ registerSummarizeTool(server);
121
+ registerImageGenTool(server);
122
+ registerImageEditTool(server);
123
+ registerVideoGenTool(server);
124
+ registerCodeExecTool(server);
125
+ registerSearchTool(server);
126
+ registerStructuredTool(server);
127
+ registerYouTubeTool(server);
128
+ registerDocumentTool(server);
129
+ registerUrlContextTool(server);
130
+ registerCacheTool(server);
131
+ registerSpeechTool(server);
132
+ // Start server with stdio transport with enhanced error handling
133
+ const transport = new StdioServerTransport();
134
+ // Set up error handling for transport with improved error recovery
135
+ transport.onclose = () => {
136
+ logger.warn('MCP transport connection closed');
137
+ logger.debug('Connection closed event triggered');
138
+ // Attempt to recover connection after brief delay with backoff strategy
139
+ let reconnectAttempts = 0;
140
+ const maxReconnectAttempts = 5;
141
+ const attemptReconnect = () => {
142
+ if (reconnectAttempts >= maxReconnectAttempts) {
143
+ logger.error(`Failed to reconnect after ${maxReconnectAttempts} attempts`);
144
+ return;
145
+ }
146
+ reconnectAttempts++;
147
+ const delay = Math.min(1000 * Math.pow(1.5, reconnectAttempts - 1), 10000);
148
+ logger.info(`Attempting to reconnect (${reconnectAttempts}/${maxReconnectAttempts}) after ${delay}ms...`);
149
+ setTimeout(() => {
150
+ try {
151
+ // Check if stdin/stdout are still available
152
+ if (process.stdin.destroyed || process.stdout.destroyed) {
153
+ logger.error('Cannot reconnect: stdin or stdout is destroyed');
154
+ return;
155
+ }
156
+ server.connect(transport)
157
+ .then(() => {
158
+ logger.info('Successfully reconnected to MCP transport');
159
+ reconnectAttempts = 0;
160
+ })
161
+ .catch(e => {
162
+ logger.error('Reconnection failed:', e);
163
+ attemptReconnect(); // Try again with backoff
164
+ });
165
+ }
166
+ catch (e) {
167
+ logger.error('Error during reconnection attempt:', e);
168
+ attemptReconnect(); // Try again with backoff
169
+ }
170
+ }, delay);
171
+ };
172
+ attemptReconnect();
173
+ };
174
+ transport.onerror = (error) => {
175
+ logger.error('MCP transport error:', error);
176
+ // Log detailed error information for debugging
177
+ if (error instanceof Error) {
178
+ logger.debug(`Error name: ${error.name}, message: ${error.message}`);
179
+ logger.debug(`Stack trace: ${error.stack}`);
180
+ }
181
+ };
182
+ // Set up error handling for the connection with more diagnostics
183
+ try {
184
+ // Log environment diagnostic info before connecting
185
+ logger.debug(`Process details - PID: ${process.pid}, Node version: ${process.version}`);
186
+ logger.debug(`Environment variables: API_KEY=${process.env.GEMINI_API_KEY ? 'SET' : 'NOT_SET'}, VERBOSE=${process.env.VERBOSE || 'not set'}`);
187
+ logger.debug(`Process stdin/stdout state - isTTY: ${process.stdin.isTTY}, ${process.stdout.isTTY}`);
188
+ await server.connect(transport);
189
+ logger.info('MCP Gemini Server running');
190
+ }
191
+ catch (err) {
192
+ logger.error('Failed to connect MCP server transport:', err);
193
+ // More detailed error logging
194
+ if (err instanceof Error) {
195
+ logger.debug(`Error stack: ${err.stack}`);
196
+ logger.debug(`Error details: name=${err.name}, message=${err.message}`);
197
+ }
198
+ else {
199
+ logger.debug(`Non-Error object thrown: ${JSON.stringify(err)}`);
200
+ }
201
+ logger.warn('Server will attempt to continue running despite connection error');
202
+ }
203
+ // Handle process termination
204
+ process.on('SIGINT', async () => {
205
+ logger.info('Shutting down MCP Gemini Server');
206
+ await server.close();
207
+ process.exit(0);
208
+ });
209
+ process.on('SIGTERM', async () => {
210
+ logger.info('Shutting down MCP Gemini Server');
211
+ await server.close();
212
+ process.exit(0);
213
+ });
214
+ }
215
+ catch (error) {
216
+ logger.error('Failed to start MCP Gemini Server:', error);
217
+ process.exit(1);
218
+ }
219
+ }
220
+ main();
@@ -0,0 +1,10 @@
1
+ /**
2
+ * Analyze Tool - Provides analysis capabilities using Gemini models
3
+ *
4
+ * This tool allows analyzing code, text, or specific content with Gemini.
5
+ */
6
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
7
+ /**
8
+ * Register analysis tools with the MCP server
9
+ */
10
+ export declare function registerAnalyzeTool(server: McpServer): void;