mcp-server-gemini 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +155 -0
- package/LICENSE +21 -0
- package/README.md +180 -0
- package/dist/config/constants.js +71 -0
- package/dist/config/constants.js.map +1 -0
- package/dist/config/models.js +121 -0
- package/dist/config/models.js.map +1 -0
- package/dist/enhanced-stdio-server.js +1164 -0
- package/dist/enhanced-stdio-server.js.map +1 -0
- package/dist/i18n.js +109 -0
- package/dist/i18n.js.map +1 -0
- package/dist/server.js +251 -0
- package/dist/server.js.map +1 -0
- package/dist/tools/analyze-codebase.js +373 -0
- package/dist/tools/analyze-codebase.js.map +1 -0
- package/dist/tools/analyze-content.js +295 -0
- package/dist/tools/analyze-content.js.map +1 -0
- package/dist/tools/brainstorm.js +237 -0
- package/dist/tools/brainstorm.js.map +1 -0
- package/dist/tools/definitions.js +375 -0
- package/dist/tools/definitions.js.map +1 -0
- package/dist/tools/fix-ui.js +262 -0
- package/dist/tools/fix-ui.js.map +1 -0
- package/dist/tools/generate-ui.js +311 -0
- package/dist/tools/generate-ui.js.map +1 -0
- package/dist/tools/index.js +17 -0
- package/dist/tools/index.js.map +1 -0
- package/dist/tools/list-models.js +30 -0
- package/dist/tools/list-models.js.map +1 -0
- package/dist/tools/multimodal-query.js +83 -0
- package/dist/tools/multimodal-query.js.map +1 -0
- package/dist/tools/search.js +94 -0
- package/dist/tools/search.js.map +1 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/error-handler.js +69 -0
- package/dist/utils/error-handler.js.map +1 -0
- package/dist/utils/file-reader.js +470 -0
- package/dist/utils/file-reader.js.map +1 -0
- package/dist/utils/gemini-client.js +184 -0
- package/dist/utils/gemini-client.js.map +1 -0
- package/dist/utils/security.js +370 -0
- package/dist/utils/security.js.map +1 -0
- package/dist/utils/validators.js +150 -0
- package/dist/utils/validators.js.map +1 -0
- package/dist/windows-utils.js +175 -0
- package/dist/windows-utils.js.map +1 -0
- package/package.json +69 -0
|
@@ -0,0 +1,1164 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { GoogleGenAI } from '@google/genai';
|
|
3
|
+
import { createInterface } from 'readline';
|
|
4
|
+
// Increase max buffer size for large images (10MB)
|
|
5
|
+
if (process.stdin.setEncoding) {
|
|
6
|
+
process.stdin.setEncoding('utf8');
|
|
7
|
+
}
|
|
8
|
+
// Available Gemini models as of July 2025
|
|
9
|
+
const GEMINI_MODELS = {
|
|
10
|
+
// Thinking models (2.5 series) - latest and most capable
|
|
11
|
+
'gemini-2.5-pro': {
|
|
12
|
+
description: 'Most capable thinking model, best for complex reasoning and coding',
|
|
13
|
+
features: ['thinking', 'function_calling', 'json_mode', 'grounding', 'system_instructions'],
|
|
14
|
+
contextWindow: 2000000, // 2M tokens
|
|
15
|
+
thinking: true
|
|
16
|
+
},
|
|
17
|
+
'gemini-2.5-flash': {
|
|
18
|
+
description: 'Fast thinking model with best price/performance ratio',
|
|
19
|
+
features: ['thinking', 'function_calling', 'json_mode', 'grounding', 'system_instructions'],
|
|
20
|
+
contextWindow: 1000000, // 1M tokens
|
|
21
|
+
thinking: true
|
|
22
|
+
},
|
|
23
|
+
'gemini-2.5-flash-lite': {
|
|
24
|
+
description: 'Ultra-fast, cost-efficient thinking model for high-throughput tasks',
|
|
25
|
+
features: ['thinking', 'function_calling', 'json_mode', 'system_instructions'],
|
|
26
|
+
contextWindow: 1000000,
|
|
27
|
+
thinking: true
|
|
28
|
+
},
|
|
29
|
+
// 2.0 series
|
|
30
|
+
'gemini-2.0-flash': {
|
|
31
|
+
description: 'Fast, efficient model with 1M context window',
|
|
32
|
+
features: ['function_calling', 'json_mode', 'grounding', 'system_instructions'],
|
|
33
|
+
contextWindow: 1000000
|
|
34
|
+
},
|
|
35
|
+
'gemini-2.0-flash-lite': {
|
|
36
|
+
description: 'Most cost-efficient model for simple tasks',
|
|
37
|
+
features: ['function_calling', 'json_mode', 'system_instructions'],
|
|
38
|
+
contextWindow: 1000000
|
|
39
|
+
},
|
|
40
|
+
'gemini-2.0-pro-experimental': {
|
|
41
|
+
description: 'Experimental model with 2M context, excellent for coding',
|
|
42
|
+
features: ['function_calling', 'json_mode', 'grounding', 'system_instructions'],
|
|
43
|
+
contextWindow: 2000000
|
|
44
|
+
},
|
|
45
|
+
// Legacy models (for compatibility)
|
|
46
|
+
'gemini-1.5-pro': {
|
|
47
|
+
description: 'Previous generation pro model',
|
|
48
|
+
features: ['function_calling', 'json_mode', 'system_instructions'],
|
|
49
|
+
contextWindow: 2000000
|
|
50
|
+
},
|
|
51
|
+
'gemini-1.5-flash': {
|
|
52
|
+
description: 'Previous generation fast model',
|
|
53
|
+
features: ['function_calling', 'json_mode', 'system_instructions'],
|
|
54
|
+
contextWindow: 1000000
|
|
55
|
+
}
|
|
56
|
+
};
|
|
57
|
+
class EnhancedStdioMCPServer {
|
|
58
|
+
genAI;
|
|
59
|
+
conversations = new Map();
|
|
60
|
+
constructor(apiKey) {
|
|
61
|
+
this.genAI = new GoogleGenAI({ apiKey });
|
|
62
|
+
this.setupStdioInterface();
|
|
63
|
+
}
|
|
64
|
+
setupStdioInterface() {
|
|
65
|
+
const rl = createInterface({
|
|
66
|
+
input: process.stdin,
|
|
67
|
+
output: process.stdout,
|
|
68
|
+
terminal: false,
|
|
69
|
+
// Increase max line length for large image data
|
|
70
|
+
crlfDelay: Infinity
|
|
71
|
+
});
|
|
72
|
+
rl.on('line', (line) => {
|
|
73
|
+
if (line.trim()) {
|
|
74
|
+
try {
|
|
75
|
+
const request = JSON.parse(line);
|
|
76
|
+
this.handleRequest(request);
|
|
77
|
+
}
|
|
78
|
+
catch (error) {
|
|
79
|
+
console.error('Failed to parse message:', error);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
});
|
|
83
|
+
process.stdin.on('error', (err) => {
|
|
84
|
+
console.error('stdin error:', err);
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
async handleRequest(request) {
|
|
88
|
+
console.error('Handling request:', request.method);
|
|
89
|
+
try {
|
|
90
|
+
let response;
|
|
91
|
+
switch (request.method) {
|
|
92
|
+
case 'initialize':
|
|
93
|
+
response = {
|
|
94
|
+
jsonrpc: '2.0',
|
|
95
|
+
id: request.id,
|
|
96
|
+
result: {
|
|
97
|
+
protocolVersion: '2024-11-05',
|
|
98
|
+
serverInfo: {
|
|
99
|
+
name: 'mcp-server-gemini-enhanced',
|
|
100
|
+
version: '4.1.0'
|
|
101
|
+
},
|
|
102
|
+
capabilities: {
|
|
103
|
+
tools: {},
|
|
104
|
+
resources: {},
|
|
105
|
+
prompts: {}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
};
|
|
109
|
+
break;
|
|
110
|
+
case 'tools/list':
|
|
111
|
+
response = {
|
|
112
|
+
jsonrpc: '2.0',
|
|
113
|
+
id: request.id,
|
|
114
|
+
result: {
|
|
115
|
+
tools: this.getAvailableTools()
|
|
116
|
+
}
|
|
117
|
+
};
|
|
118
|
+
break;
|
|
119
|
+
case 'tools/call':
|
|
120
|
+
response = await this.handleToolCall(request);
|
|
121
|
+
break;
|
|
122
|
+
case 'resources/list':
|
|
123
|
+
response = {
|
|
124
|
+
jsonrpc: '2.0',
|
|
125
|
+
id: request.id,
|
|
126
|
+
result: {
|
|
127
|
+
resources: this.getAvailableResources()
|
|
128
|
+
}
|
|
129
|
+
};
|
|
130
|
+
break;
|
|
131
|
+
case 'resources/read':
|
|
132
|
+
response = await this.handleResourceRead(request);
|
|
133
|
+
break;
|
|
134
|
+
case 'prompts/list':
|
|
135
|
+
response = {
|
|
136
|
+
jsonrpc: '2.0',
|
|
137
|
+
id: request.id,
|
|
138
|
+
result: {
|
|
139
|
+
prompts: this.getAvailablePrompts()
|
|
140
|
+
}
|
|
141
|
+
};
|
|
142
|
+
break;
|
|
143
|
+
default:
|
|
144
|
+
if (!('id' in request)) {
|
|
145
|
+
console.error(`Notification received: ${request.method}`);
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
response = {
|
|
149
|
+
jsonrpc: '2.0',
|
|
150
|
+
id: request.id,
|
|
151
|
+
error: {
|
|
152
|
+
code: -32601,
|
|
153
|
+
message: 'Method not found'
|
|
154
|
+
}
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
this.sendResponse(response);
|
|
158
|
+
}
|
|
159
|
+
catch (error) {
|
|
160
|
+
const errorResponse = {
|
|
161
|
+
jsonrpc: '2.0',
|
|
162
|
+
id: request.id,
|
|
163
|
+
error: {
|
|
164
|
+
code: -32603,
|
|
165
|
+
message: error instanceof Error ? error.message : 'Internal error'
|
|
166
|
+
}
|
|
167
|
+
};
|
|
168
|
+
this.sendResponse(errorResponse);
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
getAvailableTools() {
|
|
172
|
+
return [
|
|
173
|
+
{
|
|
174
|
+
name: 'generate_text',
|
|
175
|
+
description: 'Generate text using Google Gemini with advanced features',
|
|
176
|
+
inputSchema: {
|
|
177
|
+
type: 'object',
|
|
178
|
+
properties: {
|
|
179
|
+
prompt: {
|
|
180
|
+
type: 'string',
|
|
181
|
+
description: 'The prompt to send to Gemini'
|
|
182
|
+
},
|
|
183
|
+
model: {
|
|
184
|
+
type: 'string',
|
|
185
|
+
description: 'Specific Gemini model to use',
|
|
186
|
+
enum: Object.keys(GEMINI_MODELS),
|
|
187
|
+
default: 'gemini-2.5-flash'
|
|
188
|
+
},
|
|
189
|
+
systemInstruction: {
|
|
190
|
+
type: 'string',
|
|
191
|
+
description: 'System instruction to guide model behavior'
|
|
192
|
+
},
|
|
193
|
+
temperature: {
|
|
194
|
+
type: 'number',
|
|
195
|
+
description: 'Temperature for generation (0-2)',
|
|
196
|
+
default: 0.7,
|
|
197
|
+
minimum: 0,
|
|
198
|
+
maximum: 2
|
|
199
|
+
},
|
|
200
|
+
maxTokens: {
|
|
201
|
+
type: 'number',
|
|
202
|
+
description: 'Maximum tokens to generate',
|
|
203
|
+
default: 2048
|
|
204
|
+
},
|
|
205
|
+
topK: {
|
|
206
|
+
type: 'number',
|
|
207
|
+
description: 'Top-k sampling parameter',
|
|
208
|
+
default: 40
|
|
209
|
+
},
|
|
210
|
+
topP: {
|
|
211
|
+
type: 'number',
|
|
212
|
+
description: 'Top-p (nucleus) sampling parameter',
|
|
213
|
+
default: 0.95
|
|
214
|
+
},
|
|
215
|
+
jsonMode: {
|
|
216
|
+
type: 'boolean',
|
|
217
|
+
description: 'Enable JSON mode for structured output',
|
|
218
|
+
default: false
|
|
219
|
+
},
|
|
220
|
+
jsonSchema: {
|
|
221
|
+
type: 'object',
|
|
222
|
+
description: 'JSON schema for structured output (when jsonMode is true)'
|
|
223
|
+
},
|
|
224
|
+
grounding: {
|
|
225
|
+
type: 'boolean',
|
|
226
|
+
description: 'Enable Google Search grounding for up-to-date information',
|
|
227
|
+
default: false
|
|
228
|
+
},
|
|
229
|
+
safetySettings: {
|
|
230
|
+
type: 'array',
|
|
231
|
+
description: 'Safety settings for content filtering',
|
|
232
|
+
items: {
|
|
233
|
+
type: 'object',
|
|
234
|
+
properties: {
|
|
235
|
+
category: {
|
|
236
|
+
type: 'string',
|
|
237
|
+
enum: ['HARM_CATEGORY_HARASSMENT', 'HARM_CATEGORY_HATE_SPEECH', 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'HARM_CATEGORY_DANGEROUS_CONTENT']
|
|
238
|
+
},
|
|
239
|
+
threshold: {
|
|
240
|
+
type: 'string',
|
|
241
|
+
enum: ['BLOCK_NONE', 'BLOCK_ONLY_HIGH', 'BLOCK_MEDIUM_AND_ABOVE', 'BLOCK_LOW_AND_ABOVE']
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
},
|
|
246
|
+
conversationId: {
|
|
247
|
+
type: 'string',
|
|
248
|
+
description: 'ID for maintaining conversation context'
|
|
249
|
+
}
|
|
250
|
+
},
|
|
251
|
+
required: ['prompt']
|
|
252
|
+
}
|
|
253
|
+
},
|
|
254
|
+
{
|
|
255
|
+
name: 'analyze_image',
|
|
256
|
+
description: 'Analyze images using Gemini vision capabilities',
|
|
257
|
+
inputSchema: {
|
|
258
|
+
type: 'object',
|
|
259
|
+
properties: {
|
|
260
|
+
prompt: {
|
|
261
|
+
type: 'string',
|
|
262
|
+
description: 'Question or instruction about the image'
|
|
263
|
+
},
|
|
264
|
+
imageUrl: {
|
|
265
|
+
type: 'string',
|
|
266
|
+
description: 'URL of the image to analyze'
|
|
267
|
+
},
|
|
268
|
+
imageBase64: {
|
|
269
|
+
type: 'string',
|
|
270
|
+
description: 'Base64-encoded image data (alternative to URL)'
|
|
271
|
+
},
|
|
272
|
+
model: {
|
|
273
|
+
type: 'string',
|
|
274
|
+
description: 'Vision-capable Gemini model',
|
|
275
|
+
enum: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash'],
|
|
276
|
+
default: 'gemini-2.5-flash'
|
|
277
|
+
}
|
|
278
|
+
},
|
|
279
|
+
required: ['prompt'],
|
|
280
|
+
oneOf: [
|
|
281
|
+
{ required: ['imageUrl'] },
|
|
282
|
+
{ required: ['imageBase64'] }
|
|
283
|
+
]
|
|
284
|
+
}
|
|
285
|
+
},
|
|
286
|
+
{
|
|
287
|
+
name: 'count_tokens',
|
|
288
|
+
description: 'Count tokens for a given text with a specific model',
|
|
289
|
+
inputSchema: {
|
|
290
|
+
type: 'object',
|
|
291
|
+
properties: {
|
|
292
|
+
text: {
|
|
293
|
+
type: 'string',
|
|
294
|
+
description: 'Text to count tokens for'
|
|
295
|
+
},
|
|
296
|
+
model: {
|
|
297
|
+
type: 'string',
|
|
298
|
+
description: 'Model to use for token counting',
|
|
299
|
+
enum: Object.keys(GEMINI_MODELS),
|
|
300
|
+
default: 'gemini-2.5-flash'
|
|
301
|
+
}
|
|
302
|
+
},
|
|
303
|
+
required: ['text']
|
|
304
|
+
}
|
|
305
|
+
},
|
|
306
|
+
{
|
|
307
|
+
name: 'list_models',
|
|
308
|
+
description: 'List all available Gemini models and their capabilities',
|
|
309
|
+
inputSchema: {
|
|
310
|
+
type: 'object',
|
|
311
|
+
properties: {
|
|
312
|
+
filter: {
|
|
313
|
+
type: 'string',
|
|
314
|
+
description: 'Filter models by capability',
|
|
315
|
+
enum: ['all', 'thinking', 'vision', 'grounding', 'json_mode']
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
},
|
|
320
|
+
{
|
|
321
|
+
name: 'embed_text',
|
|
322
|
+
description: 'Generate embeddings for text using Gemini embedding models',
|
|
323
|
+
inputSchema: {
|
|
324
|
+
type: 'object',
|
|
325
|
+
properties: {
|
|
326
|
+
text: {
|
|
327
|
+
type: 'string',
|
|
328
|
+
description: 'Text to generate embeddings for'
|
|
329
|
+
},
|
|
330
|
+
model: {
|
|
331
|
+
type: 'string',
|
|
332
|
+
description: 'Embedding model to use',
|
|
333
|
+
enum: ['text-embedding-004', 'text-multilingual-embedding-002'],
|
|
334
|
+
default: 'text-embedding-004'
|
|
335
|
+
}
|
|
336
|
+
},
|
|
337
|
+
required: ['text']
|
|
338
|
+
}
|
|
339
|
+
},
|
|
340
|
+
{
|
|
341
|
+
name: 'get_help',
|
|
342
|
+
description: 'Get help and usage information for the Gemini MCP server',
|
|
343
|
+
inputSchema: {
|
|
344
|
+
type: 'object',
|
|
345
|
+
properties: {
|
|
346
|
+
topic: {
|
|
347
|
+
type: 'string',
|
|
348
|
+
description: 'Help topic to get information about',
|
|
349
|
+
enum: ['overview', 'tools', 'models', 'parameters', 'examples', 'quick-start'],
|
|
350
|
+
default: 'overview'
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
];
|
|
356
|
+
}
|
|
357
|
+
getAvailableResources() {
|
|
358
|
+
return [
|
|
359
|
+
{
|
|
360
|
+
uri: 'gemini://models',
|
|
361
|
+
name: 'Available Gemini Models',
|
|
362
|
+
description: 'List of all available Gemini models and their capabilities',
|
|
363
|
+
mimeType: 'application/json'
|
|
364
|
+
},
|
|
365
|
+
{
|
|
366
|
+
uri: 'gemini://capabilities',
|
|
367
|
+
name: 'API Capabilities',
|
|
368
|
+
description: 'Detailed information about Gemini API capabilities',
|
|
369
|
+
mimeType: 'text/markdown'
|
|
370
|
+
},
|
|
371
|
+
{
|
|
372
|
+
uri: 'gemini://help/usage',
|
|
373
|
+
name: 'Usage Guide',
|
|
374
|
+
description: 'Complete guide on using all tools and features',
|
|
375
|
+
mimeType: 'text/markdown'
|
|
376
|
+
},
|
|
377
|
+
{
|
|
378
|
+
uri: 'gemini://help/parameters',
|
|
379
|
+
name: 'Parameters Reference',
|
|
380
|
+
description: 'Detailed documentation of all parameters',
|
|
381
|
+
mimeType: 'text/markdown'
|
|
382
|
+
},
|
|
383
|
+
{
|
|
384
|
+
uri: 'gemini://help/examples',
|
|
385
|
+
name: 'Examples',
|
|
386
|
+
description: 'Example usage patterns for common tasks',
|
|
387
|
+
mimeType: 'text/markdown'
|
|
388
|
+
}
|
|
389
|
+
];
|
|
390
|
+
}
|
|
391
|
+
getAvailablePrompts() {
|
|
392
|
+
return [
|
|
393
|
+
{
|
|
394
|
+
name: 'code_review',
|
|
395
|
+
description: 'Comprehensive code review with Gemini 2.5 Pro',
|
|
396
|
+
arguments: [
|
|
397
|
+
{
|
|
398
|
+
name: 'code',
|
|
399
|
+
description: 'Code to review',
|
|
400
|
+
required: true
|
|
401
|
+
},
|
|
402
|
+
{
|
|
403
|
+
name: 'language',
|
|
404
|
+
description: 'Programming language',
|
|
405
|
+
required: false
|
|
406
|
+
}
|
|
407
|
+
]
|
|
408
|
+
},
|
|
409
|
+
{
|
|
410
|
+
name: 'explain_with_thinking',
|
|
411
|
+
description: 'Deep explanation using Gemini 2.5 thinking capabilities',
|
|
412
|
+
arguments: [
|
|
413
|
+
{
|
|
414
|
+
name: 'topic',
|
|
415
|
+
description: 'Topic to explain',
|
|
416
|
+
required: true
|
|
417
|
+
},
|
|
418
|
+
{
|
|
419
|
+
name: 'level',
|
|
420
|
+
description: 'Explanation level (beginner/intermediate/expert)',
|
|
421
|
+
required: false
|
|
422
|
+
}
|
|
423
|
+
]
|
|
424
|
+
},
|
|
425
|
+
{
|
|
426
|
+
name: 'creative_writing',
|
|
427
|
+
description: 'Creative writing with style control',
|
|
428
|
+
arguments: [
|
|
429
|
+
{
|
|
430
|
+
name: 'prompt',
|
|
431
|
+
description: 'Writing prompt',
|
|
432
|
+
required: true
|
|
433
|
+
},
|
|
434
|
+
{
|
|
435
|
+
name: 'style',
|
|
436
|
+
description: 'Writing style',
|
|
437
|
+
required: false
|
|
438
|
+
},
|
|
439
|
+
{
|
|
440
|
+
name: 'length',
|
|
441
|
+
description: 'Desired length',
|
|
442
|
+
required: false
|
|
443
|
+
}
|
|
444
|
+
]
|
|
445
|
+
}
|
|
446
|
+
];
|
|
447
|
+
}
|
|
448
|
+
async handleToolCall(request) {
|
|
449
|
+
const { name, arguments: args } = request.params || {};
|
|
450
|
+
switch (name) {
|
|
451
|
+
case 'generate_text':
|
|
452
|
+
return await this.generateText(request.id, args);
|
|
453
|
+
case 'analyze_image':
|
|
454
|
+
return await this.analyzeImage(request.id, args);
|
|
455
|
+
case 'count_tokens':
|
|
456
|
+
return await this.countTokens(request.id, args);
|
|
457
|
+
case 'list_models':
|
|
458
|
+
return this.listModels(request.id, args);
|
|
459
|
+
case 'embed_text':
|
|
460
|
+
return await this.embedText(request.id, args);
|
|
461
|
+
case 'get_help':
|
|
462
|
+
return this.getHelp(request.id, args);
|
|
463
|
+
default:
|
|
464
|
+
return {
|
|
465
|
+
jsonrpc: '2.0',
|
|
466
|
+
id: request.id,
|
|
467
|
+
error: {
|
|
468
|
+
code: -32601,
|
|
469
|
+
message: `Unknown tool: ${name}`
|
|
470
|
+
}
|
|
471
|
+
};
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
async generateText(id, args) {
|
|
475
|
+
try {
|
|
476
|
+
const model = args.model || 'gemini-2.5-flash';
|
|
477
|
+
const modelInfo = GEMINI_MODELS[model];
|
|
478
|
+
if (!modelInfo) {
|
|
479
|
+
throw new Error(`Unknown model: ${model}`);
|
|
480
|
+
}
|
|
481
|
+
// Build generation config
|
|
482
|
+
const generationConfig = {
|
|
483
|
+
temperature: args.temperature || 0.7,
|
|
484
|
+
maxOutputTokens: args.maxTokens || 2048,
|
|
485
|
+
topK: args.topK || 40,
|
|
486
|
+
topP: args.topP || 0.95
|
|
487
|
+
};
|
|
488
|
+
// Add JSON mode if requested
|
|
489
|
+
if (args.jsonMode) {
|
|
490
|
+
generationConfig.responseMimeType = 'application/json';
|
|
491
|
+
if (args.jsonSchema) {
|
|
492
|
+
generationConfig.responseSchema = args.jsonSchema;
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
// Build the request
|
|
496
|
+
const requestBody = {
|
|
497
|
+
model,
|
|
498
|
+
contents: [{
|
|
499
|
+
parts: [{
|
|
500
|
+
text: args.prompt
|
|
501
|
+
}],
|
|
502
|
+
role: 'user'
|
|
503
|
+
}],
|
|
504
|
+
generationConfig
|
|
505
|
+
};
|
|
506
|
+
// Add system instruction if provided
|
|
507
|
+
if (args.systemInstruction) {
|
|
508
|
+
requestBody.systemInstruction = {
|
|
509
|
+
parts: [{
|
|
510
|
+
text: args.systemInstruction
|
|
511
|
+
}]
|
|
512
|
+
};
|
|
513
|
+
}
|
|
514
|
+
// Add safety settings if provided
|
|
515
|
+
if (args.safetySettings) {
|
|
516
|
+
requestBody.safetySettings = args.safetySettings;
|
|
517
|
+
}
|
|
518
|
+
// Add grounding if requested and supported
|
|
519
|
+
if (args.grounding && modelInfo.features.includes('grounding')) {
|
|
520
|
+
requestBody.tools = [{
|
|
521
|
+
googleSearch: {}
|
|
522
|
+
}];
|
|
523
|
+
}
|
|
524
|
+
// Handle conversation context
|
|
525
|
+
if (args.conversationId) {
|
|
526
|
+
const history = this.conversations.get(args.conversationId) || [];
|
|
527
|
+
if (history.length > 0) {
|
|
528
|
+
requestBody.contents = [...history, ...requestBody.contents];
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
// Call the API using the new SDK format
|
|
532
|
+
const result = await this.genAI.models.generateContent({
|
|
533
|
+
model,
|
|
534
|
+
...requestBody
|
|
535
|
+
});
|
|
536
|
+
const text = result.text || '';
|
|
537
|
+
// Update conversation history if needed
|
|
538
|
+
if (args.conversationId) {
|
|
539
|
+
const history = this.conversations.get(args.conversationId) || [];
|
|
540
|
+
history.push(...requestBody.contents);
|
|
541
|
+
history.push({
|
|
542
|
+
parts: [{
|
|
543
|
+
text: text
|
|
544
|
+
}],
|
|
545
|
+
role: 'model'
|
|
546
|
+
});
|
|
547
|
+
this.conversations.set(args.conversationId, history);
|
|
548
|
+
}
|
|
549
|
+
return {
|
|
550
|
+
jsonrpc: '2.0',
|
|
551
|
+
id,
|
|
552
|
+
result: {
|
|
553
|
+
content: [{
|
|
554
|
+
type: 'text',
|
|
555
|
+
text: text
|
|
556
|
+
}],
|
|
557
|
+
metadata: {
|
|
558
|
+
model,
|
|
559
|
+
tokensUsed: result.usageMetadata?.totalTokenCount,
|
|
560
|
+
candidatesCount: result.candidates?.length || 1,
|
|
561
|
+
finishReason: result.candidates?.[0]?.finishReason
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
};
|
|
565
|
+
}
|
|
566
|
+
catch (error) {
|
|
567
|
+
console.error('Error in generateText:', error);
|
|
568
|
+
return {
|
|
569
|
+
jsonrpc: '2.0',
|
|
570
|
+
id,
|
|
571
|
+
error: {
|
|
572
|
+
code: -32603,
|
|
573
|
+
message: error instanceof Error ? error.message : 'Internal error'
|
|
574
|
+
}
|
|
575
|
+
};
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
async analyzeImage(id, args) {
|
|
579
|
+
try {
|
|
580
|
+
const model = args.model || 'gemini-2.5-flash';
|
|
581
|
+
// Validate inputs
|
|
582
|
+
if (!args.imageUrl && !args.imageBase64) {
|
|
583
|
+
throw new Error('Either imageUrl or imageBase64 must be provided');
|
|
584
|
+
}
|
|
585
|
+
// Prepare image part
|
|
586
|
+
let imagePart;
|
|
587
|
+
if (args.imageUrl) {
|
|
588
|
+
// For URL, we'd need to fetch and convert to base64
|
|
589
|
+
// For now, we'll just pass the URL as instruction
|
|
590
|
+
imagePart = {
|
|
591
|
+
text: `[Image URL: ${args.imageUrl}]`
|
|
592
|
+
};
|
|
593
|
+
}
|
|
594
|
+
else if (args.imageBase64) {
|
|
595
|
+
// Log base64 data size for debugging
|
|
596
|
+
console.error(`Image base64 length: ${args.imageBase64.length}`);
|
|
597
|
+
// Extract MIME type and data
|
|
598
|
+
const matches = args.imageBase64.match(/^data:(.+);base64,(.+)$/);
|
|
599
|
+
if (matches) {
|
|
600
|
+
console.error(`MIME type: ${matches[1]}, Data length: ${matches[2].length}`);
|
|
601
|
+
imagePart = {
|
|
602
|
+
inlineData: {
|
|
603
|
+
mimeType: matches[1],
|
|
604
|
+
data: matches[2]
|
|
605
|
+
}
|
|
606
|
+
};
|
|
607
|
+
}
|
|
608
|
+
else {
|
|
609
|
+
// If no data URI format, assume raw base64
|
|
610
|
+
console.error('Raw base64 data detected');
|
|
611
|
+
imagePart = {
|
|
612
|
+
inlineData: {
|
|
613
|
+
mimeType: 'image/jpeg',
|
|
614
|
+
data: args.imageBase64
|
|
615
|
+
}
|
|
616
|
+
};
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
const result = await this.genAI.models.generateContent({
|
|
620
|
+
model,
|
|
621
|
+
contents: [{
|
|
622
|
+
parts: [
|
|
623
|
+
{ text: args.prompt },
|
|
624
|
+
imagePart
|
|
625
|
+
],
|
|
626
|
+
role: 'user'
|
|
627
|
+
}]
|
|
628
|
+
});
|
|
629
|
+
const text = result.text || '';
|
|
630
|
+
return {
|
|
631
|
+
jsonrpc: '2.0',
|
|
632
|
+
id,
|
|
633
|
+
result: {
|
|
634
|
+
content: [{
|
|
635
|
+
type: 'text',
|
|
636
|
+
text: text
|
|
637
|
+
}]
|
|
638
|
+
}
|
|
639
|
+
};
|
|
640
|
+
}
|
|
641
|
+
catch (error) {
|
|
642
|
+
console.error('Error in analyzeImage:', error);
|
|
643
|
+
return {
|
|
644
|
+
jsonrpc: '2.0',
|
|
645
|
+
id,
|
|
646
|
+
error: {
|
|
647
|
+
code: -32603,
|
|
648
|
+
message: `Image analysis failed: ${error instanceof Error ? error.message : 'Unknown error'}`
|
|
649
|
+
}
|
|
650
|
+
};
|
|
651
|
+
}
|
|
652
|
+
}
|
|
653
|
+
async countTokens(id, args) {
|
|
654
|
+
try {
|
|
655
|
+
const model = args.model || 'gemini-2.5-flash';
|
|
656
|
+
const result = await this.genAI.models.countTokens({
|
|
657
|
+
model,
|
|
658
|
+
contents: [{
|
|
659
|
+
parts: [{
|
|
660
|
+
text: args.text
|
|
661
|
+
}],
|
|
662
|
+
role: 'user'
|
|
663
|
+
}]
|
|
664
|
+
});
|
|
665
|
+
return {
|
|
666
|
+
jsonrpc: '2.0',
|
|
667
|
+
id,
|
|
668
|
+
result: {
|
|
669
|
+
content: [{
|
|
670
|
+
type: 'text',
|
|
671
|
+
text: `Token count: ${result.totalTokens}`
|
|
672
|
+
}],
|
|
673
|
+
metadata: {
|
|
674
|
+
tokenCount: result.totalTokens,
|
|
675
|
+
model
|
|
676
|
+
}
|
|
677
|
+
}
|
|
678
|
+
};
|
|
679
|
+
}
|
|
680
|
+
catch (error) {
|
|
681
|
+
return {
|
|
682
|
+
jsonrpc: '2.0',
|
|
683
|
+
id,
|
|
684
|
+
error: {
|
|
685
|
+
code: -32603,
|
|
686
|
+
message: error instanceof Error ? error.message : 'Internal error'
|
|
687
|
+
}
|
|
688
|
+
};
|
|
689
|
+
}
|
|
690
|
+
}
|
|
691
|
+
listModels(id, args) {
|
|
692
|
+
const filter = args?.filter || 'all';
|
|
693
|
+
let models = Object.entries(GEMINI_MODELS);
|
|
694
|
+
if (filter !== 'all') {
|
|
695
|
+
models = models.filter(([_, info]) => {
|
|
696
|
+
switch (filter) {
|
|
697
|
+
case 'thinking':
|
|
698
|
+
return 'thinking' in info && info.thinking === true;
|
|
699
|
+
case 'vision':
|
|
700
|
+
return info.features.includes('function_calling'); // All current models support vision
|
|
701
|
+
case 'grounding':
|
|
702
|
+
return info.features.includes('grounding');
|
|
703
|
+
case 'json_mode':
|
|
704
|
+
return info.features.includes('json_mode');
|
|
705
|
+
default:
|
|
706
|
+
return true;
|
|
707
|
+
}
|
|
708
|
+
});
|
|
709
|
+
}
|
|
710
|
+
const modelList = models.map(([name, info]) => ({
|
|
711
|
+
name,
|
|
712
|
+
...info
|
|
713
|
+
}));
|
|
714
|
+
return {
|
|
715
|
+
jsonrpc: '2.0',
|
|
716
|
+
id,
|
|
717
|
+
result: {
|
|
718
|
+
content: [{
|
|
719
|
+
type: 'text',
|
|
720
|
+
text: JSON.stringify(modelList, null, 2)
|
|
721
|
+
}],
|
|
722
|
+
metadata: {
|
|
723
|
+
count: modelList.length,
|
|
724
|
+
filter
|
|
725
|
+
}
|
|
726
|
+
}
|
|
727
|
+
};
|
|
728
|
+
}
|
|
729
|
+
async embedText(id, args) {
|
|
730
|
+
try {
|
|
731
|
+
const model = args.model || 'text-embedding-004';
|
|
732
|
+
const result = await this.genAI.models.embedContent({
|
|
733
|
+
model,
|
|
734
|
+
contents: args.text
|
|
735
|
+
});
|
|
736
|
+
return {
|
|
737
|
+
jsonrpc: '2.0',
|
|
738
|
+
id,
|
|
739
|
+
result: {
|
|
740
|
+
content: [{
|
|
741
|
+
type: 'text',
|
|
742
|
+
text: JSON.stringify({
|
|
743
|
+
embedding: result.embeddings?.[0]?.values || [],
|
|
744
|
+
model
|
|
745
|
+
})
|
|
746
|
+
}],
|
|
747
|
+
metadata: {
|
|
748
|
+
model,
|
|
749
|
+
dimensions: result.embeddings?.[0]?.values?.length || 0
|
|
750
|
+
}
|
|
751
|
+
}
|
|
752
|
+
};
|
|
753
|
+
}
|
|
754
|
+
catch (error) {
|
|
755
|
+
return {
|
|
756
|
+
jsonrpc: '2.0',
|
|
757
|
+
id,
|
|
758
|
+
error: {
|
|
759
|
+
code: -32603,
|
|
760
|
+
message: error instanceof Error ? error.message : 'Internal error'
|
|
761
|
+
}
|
|
762
|
+
};
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
async handleResourceRead(request) {
|
|
766
|
+
const uri = request.params?.uri;
|
|
767
|
+
if (!uri) {
|
|
768
|
+
return {
|
|
769
|
+
jsonrpc: '2.0',
|
|
770
|
+
id: request.id,
|
|
771
|
+
error: {
|
|
772
|
+
code: -32602,
|
|
773
|
+
message: 'Missing required parameter: uri'
|
|
774
|
+
}
|
|
775
|
+
};
|
|
776
|
+
}
|
|
777
|
+
let content = '';
|
|
778
|
+
let mimeType = 'text/plain';
|
|
779
|
+
switch (uri) {
|
|
780
|
+
case 'gemini://models':
|
|
781
|
+
content = JSON.stringify(GEMINI_MODELS, null, 2);
|
|
782
|
+
mimeType = 'application/json';
|
|
783
|
+
break;
|
|
784
|
+
case 'gemini://capabilities':
|
|
785
|
+
content = `# Gemini API Capabilities
|
|
786
|
+
|
|
787
|
+
## Text Generation
|
|
788
|
+
- All models support advanced text generation
|
|
789
|
+
- System instructions for behavior control
|
|
790
|
+
- Temperature, topK, topP for output control
|
|
791
|
+
- Token limits vary by model (1M-2M)
|
|
792
|
+
|
|
793
|
+
## Thinking Models (2.5 Series)
|
|
794
|
+
- Step-by-step reasoning before responding
|
|
795
|
+
- Better accuracy for complex problems
|
|
796
|
+
- Ideal for coding, analysis, and problem-solving
|
|
797
|
+
|
|
798
|
+
## JSON Mode
|
|
799
|
+
- Structured output with schema validation
|
|
800
|
+
- Available on all models
|
|
801
|
+
- Ensures consistent response format
|
|
802
|
+
|
|
803
|
+
## Google Search Grounding
|
|
804
|
+
- Real-time web search integration
|
|
805
|
+
- Available on select models
|
|
806
|
+
- Perfect for current events and facts
|
|
807
|
+
|
|
808
|
+
## Vision Capabilities
|
|
809
|
+
- Image analysis and understanding
|
|
810
|
+
- Available on most models
|
|
811
|
+
- Supports URLs and base64 images
|
|
812
|
+
|
|
813
|
+
## Embeddings
|
|
814
|
+
- Semantic text embeddings
|
|
815
|
+
- Multiple models available
|
|
816
|
+
- Multilingual support
|
|
817
|
+
|
|
818
|
+
## Safety Settings
|
|
819
|
+
- Granular content filtering
|
|
820
|
+
- Customizable thresholds
|
|
821
|
+
- Per-category control
|
|
822
|
+
|
|
823
|
+
## Conversation Memory
|
|
824
|
+
- Context retention across messages
|
|
825
|
+
- Session-based conversations
|
|
826
|
+
- Ideal for multi-turn interactions`;
|
|
827
|
+
mimeType = 'text/markdown';
|
|
828
|
+
break;
|
|
829
|
+
case 'gemini://help/usage':
|
|
830
|
+
content = this.getHelpContent('overview') + '\n\n' + this.getHelpContent('tools');
|
|
831
|
+
mimeType = 'text/markdown';
|
|
832
|
+
break;
|
|
833
|
+
case 'gemini://help/parameters':
|
|
834
|
+
content = this.getHelpContent('parameters');
|
|
835
|
+
mimeType = 'text/markdown';
|
|
836
|
+
break;
|
|
837
|
+
case 'gemini://help/examples':
|
|
838
|
+
content = this.getHelpContent('examples');
|
|
839
|
+
mimeType = 'text/markdown';
|
|
840
|
+
break;
|
|
841
|
+
default:
|
|
842
|
+
return {
|
|
843
|
+
jsonrpc: '2.0',
|
|
844
|
+
id: request.id,
|
|
845
|
+
error: {
|
|
846
|
+
code: -32602,
|
|
847
|
+
message: `Unknown resource: ${uri}`
|
|
848
|
+
}
|
|
849
|
+
};
|
|
850
|
+
}
|
|
851
|
+
return {
|
|
852
|
+
jsonrpc: '2.0',
|
|
853
|
+
id: request.id,
|
|
854
|
+
result: {
|
|
855
|
+
contents: [{
|
|
856
|
+
uri,
|
|
857
|
+
mimeType,
|
|
858
|
+
text: content
|
|
859
|
+
}]
|
|
860
|
+
}
|
|
861
|
+
};
|
|
862
|
+
}
|
|
863
|
+
getHelpContent(topic) {
|
|
864
|
+
// Extract help content generation to a separate method
|
|
865
|
+
switch (topic) {
|
|
866
|
+
case 'overview':
|
|
867
|
+
return `# Gemini MCP Server Help
|
|
868
|
+
|
|
869
|
+
Welcome to the Gemini MCP Server v4.1.0! This server provides access to Google's Gemini AI models through Claude Desktop.
|
|
870
|
+
|
|
871
|
+
## Available Tools
|
|
872
|
+
1. **generate_text** - Generate text with advanced features
|
|
873
|
+
2. **analyze_image** - Analyze images using vision models
|
|
874
|
+
3. **count_tokens** - Count tokens for cost estimation
|
|
875
|
+
4. **list_models** - List all available models
|
|
876
|
+
5. **embed_text** - Generate text embeddings
|
|
877
|
+
6. **get_help** - Get help on using this server
|
|
878
|
+
|
|
879
|
+
## Quick Start
|
|
880
|
+
- "Use Gemini to explain [topic]"
|
|
881
|
+
- "Analyze this image with Gemini"
|
|
882
|
+
- "List all Gemini models"
|
|
883
|
+
- "Get help on parameters"
|
|
884
|
+
|
|
885
|
+
## Key Features
|
|
886
|
+
- Latest Gemini 2.5 models with thinking capabilities
|
|
887
|
+
- JSON mode for structured output
|
|
888
|
+
- Google Search grounding for current information
|
|
889
|
+
- System instructions for behavior control
|
|
890
|
+
- Conversation memory for context
|
|
891
|
+
- Safety settings customization
|
|
892
|
+
|
|
893
|
+
Use "get help on tools" for detailed tool information.`;
|
|
894
|
+
case 'tools':
|
|
895
|
+
return `# Available Tools
|
|
896
|
+
|
|
897
|
+
## 1. generate_text
|
|
898
|
+
Generate text using Gemini models with advanced features.
|
|
899
|
+
|
|
900
|
+
**Parameters:**
|
|
901
|
+
- prompt (required): Your text prompt
|
|
902
|
+
- model: Choose from gemini-2.5-pro, gemini-2.5-flash, etc.
|
|
903
|
+
- temperature: 0-2 (default 0.7)
|
|
904
|
+
- maxTokens: Max output tokens (default 2048)
|
|
905
|
+
- systemInstruction: Guide model behavior
|
|
906
|
+
- jsonMode: Enable JSON output
|
|
907
|
+
- grounding: Enable Google Search
|
|
908
|
+
- conversationId: Maintain conversation context
|
|
909
|
+
|
|
910
|
+
**Example:** "Use Gemini 2.5 Pro to explain quantum computing"
|
|
911
|
+
|
|
912
|
+
## 2. analyze_image
|
|
913
|
+
Analyze images using vision-capable models.
|
|
914
|
+
|
|
915
|
+
**Parameters:**
|
|
916
|
+
- prompt (required): Question about the image
|
|
917
|
+
- imageUrl OR imageBase64 (required): Image source
|
|
918
|
+
- model: Vision-capable model (default gemini-2.5-flash)
|
|
919
|
+
|
|
920
|
+
**Example:** "Analyze this architecture diagram"
|
|
921
|
+
|
|
922
|
+
## 3. count_tokens
|
|
923
|
+
Count tokens for text with a specific model.
|
|
924
|
+
|
|
925
|
+
**Parameters:**
|
|
926
|
+
- text (required): Text to count
|
|
927
|
+
- model: Model for counting (default gemini-2.5-flash)
|
|
928
|
+
|
|
929
|
+
**Example:** "Count tokens for this paragraph"
|
|
930
|
+
|
|
931
|
+
## 4. list_models
|
|
932
|
+
List available models with optional filtering.
|
|
933
|
+
|
|
934
|
+
**Parameters:**
|
|
935
|
+
- filter: all, thinking, vision, grounding, json_mode
|
|
936
|
+
|
|
937
|
+
**Example:** "List models with thinking capability"
|
|
938
|
+
|
|
939
|
+
## 5. embed_text
|
|
940
|
+
Generate embeddings for semantic search.
|
|
941
|
+
|
|
942
|
+
**Parameters:**
|
|
943
|
+
- text (required): Text to embed
|
|
944
|
+
- model: text-embedding-004 or text-multilingual-embedding-002
|
|
945
|
+
|
|
946
|
+
**Example:** "Generate embeddings for similarity search"
|
|
947
|
+
|
|
948
|
+
## 6. get_help
|
|
949
|
+
Get help on using this server.
|
|
950
|
+
|
|
951
|
+
**Parameters:**
|
|
952
|
+
- topic: overview, tools, models, parameters, examples, quick-start
|
|
953
|
+
|
|
954
|
+
**Example:** "Get help on parameters"`;
|
|
955
|
+
case 'parameters':
|
|
956
|
+
return `# Parameter Reference
|
|
957
|
+
|
|
958
|
+
## generate_text Parameters
|
|
959
|
+
|
|
960
|
+
**Required:**
|
|
961
|
+
- prompt (string): Your text prompt
|
|
962
|
+
|
|
963
|
+
**Optional:**
|
|
964
|
+
- model (string): Model to use (default: gemini-2.5-flash)
|
|
965
|
+
- systemInstruction (string): System prompt for behavior
|
|
966
|
+
- temperature (0-2): Creativity level (default: 0.7)
|
|
967
|
+
- maxTokens (number): Max output tokens (default: 2048)
|
|
968
|
+
- topK (number): Top-k sampling (default: 40)
|
|
969
|
+
- topP (number): Nucleus sampling (default: 0.95)
|
|
970
|
+
- jsonMode (boolean): Enable JSON output
|
|
971
|
+
- jsonSchema (object): JSON schema for validation
|
|
972
|
+
- grounding (boolean): Enable Google Search
|
|
973
|
+
- conversationId (string): Conversation identifier
|
|
974
|
+
- safetySettings (array): Content filtering settings
|
|
975
|
+
|
|
976
|
+
## Temperature Guide
|
|
977
|
+
- 0.1-0.3: Precise, factual
|
|
978
|
+
- 0.5-0.8: Balanced (default 0.7)
|
|
979
|
+
- 1.0-1.5: Creative
|
|
980
|
+
- 1.5-2.0: Very creative
|
|
981
|
+
|
|
982
|
+
## JSON Mode Example
|
|
983
|
+
Enable jsonMode and provide jsonSchema:
|
|
984
|
+
{
|
|
985
|
+
"type": "object",
|
|
986
|
+
"properties": {
|
|
987
|
+
"sentiment": {"type": "string"},
|
|
988
|
+
"score": {"type": "number"}
|
|
989
|
+
}
|
|
990
|
+
}
|
|
991
|
+
|
|
992
|
+
## Safety Settings
|
|
993
|
+
Categories: HARASSMENT, HATE_SPEECH, SEXUALLY_EXPLICIT, DANGEROUS_CONTENT
|
|
994
|
+
Thresholds: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE`;
|
|
995
|
+
case 'examples':
|
|
996
|
+
return `# Usage Examples
|
|
997
|
+
|
|
998
|
+
## Basic Text Generation
|
|
999
|
+
"Use Gemini to explain machine learning"
|
|
1000
|
+
|
|
1001
|
+
## With Specific Model
|
|
1002
|
+
"Use Gemini 2.5 Pro to write a Python sorting function"
|
|
1003
|
+
|
|
1004
|
+
## With Temperature
|
|
1005
|
+
"Use Gemini with temperature 1.5 to write a creative story"
|
|
1006
|
+
|
|
1007
|
+
## JSON Mode
|
|
1008
|
+
"Use Gemini in JSON mode to analyze sentiment and return {sentiment, confidence, keywords}"
|
|
1009
|
+
|
|
1010
|
+
## With Grounding
|
|
1011
|
+
"Use Gemini with grounding to research latest AI developments"
|
|
1012
|
+
|
|
1013
|
+
## System Instructions
|
|
1014
|
+
"Use Gemini as a Python tutor to explain decorators"
|
|
1015
|
+
|
|
1016
|
+
## Conversation Context
|
|
1017
|
+
"Start conversation 'chat-001' about web development"
|
|
1018
|
+
"Continue chat-001 and ask about React hooks"
|
|
1019
|
+
|
|
1020
|
+
## Image Analysis
|
|
1021
|
+
"Analyze this screenshot and describe the UI elements"
|
|
1022
|
+
|
|
1023
|
+
## Token Counting
|
|
1024
|
+
"Count tokens for this document using gemini-2.5-pro"
|
|
1025
|
+
|
|
1026
|
+
## Complex Example
|
|
1027
|
+
"Use Gemini 2.5 Pro to review this code with:
|
|
1028
|
+
- System instruction: 'You are a security expert'
|
|
1029
|
+
- Temperature: 0.3
|
|
1030
|
+
- JSON mode with schema for findings
|
|
1031
|
+
- Grounding for latest security practices"`;
|
|
1032
|
+
default:
|
|
1033
|
+
return 'Unknown help topic.';
|
|
1034
|
+
}
|
|
1035
|
+
}
|
|
1036
|
+
getHelp(id, args) {
|
|
1037
|
+
const topic = args?.topic || 'overview';
|
|
1038
|
+
let helpContent = '';
|
|
1039
|
+
switch (topic) {
|
|
1040
|
+
case 'overview':
|
|
1041
|
+
helpContent = this.getHelpContent('overview');
|
|
1042
|
+
break;
|
|
1043
|
+
case 'tools':
|
|
1044
|
+
helpContent = this.getHelpContent('tools');
|
|
1045
|
+
break;
|
|
1046
|
+
case 'models':
|
|
1047
|
+
helpContent = `# Available Gemini Models
|
|
1048
|
+
|
|
1049
|
+
## Thinking Models (Latest - 2.5 Series)
|
|
1050
|
+
**gemini-2.5-pro**
|
|
1051
|
+
- Most capable, best for complex reasoning
|
|
1052
|
+
- 2M token context window
|
|
1053
|
+
- Features: thinking, JSON mode, grounding, system instructions
|
|
1054
|
+
|
|
1055
|
+
**gemini-2.5-flash** ⭐ Recommended
|
|
1056
|
+
- Best balance of speed and capability
|
|
1057
|
+
- 1M token context window
|
|
1058
|
+
- Features: thinking, JSON mode, grounding, system instructions
|
|
1059
|
+
|
|
1060
|
+
**gemini-2.5-flash-lite**
|
|
1061
|
+
- Ultra-fast, cost-efficient
|
|
1062
|
+
- 1M token context window
|
|
1063
|
+
- Features: thinking, JSON mode, system instructions
|
|
1064
|
+
|
|
1065
|
+
## Standard Models (2.0 Series)
|
|
1066
|
+
**gemini-2.0-flash**
|
|
1067
|
+
- Fast and efficient
|
|
1068
|
+
- 1M token context window
|
|
1069
|
+
- Features: JSON mode, grounding, system instructions
|
|
1070
|
+
|
|
1071
|
+
**gemini-2.0-flash-lite**
|
|
1072
|
+
- Most cost-efficient
|
|
1073
|
+
- 1M token context window
|
|
1074
|
+
- Features: JSON mode, system instructions
|
|
1075
|
+
|
|
1076
|
+
**gemini-2.0-pro-experimental**
|
|
1077
|
+
- Excellent for coding
|
|
1078
|
+
- 2M token context window
|
|
1079
|
+
- Features: JSON mode, grounding, system instructions
|
|
1080
|
+
|
|
1081
|
+
## Model Selection Guide
|
|
1082
|
+
- Complex reasoning: gemini-2.5-pro
|
|
1083
|
+
- General use: gemini-2.5-flash
|
|
1084
|
+
- Fast responses: gemini-2.5-flash-lite
|
|
1085
|
+
- Cost-sensitive: gemini-2.0-flash-lite
|
|
1086
|
+
- Coding tasks: gemini-2.0-pro-experimental`;
|
|
1087
|
+
break;
|
|
1088
|
+
case 'parameters':
|
|
1089
|
+
helpContent = this.getHelpContent('parameters');
|
|
1090
|
+
break;
|
|
1091
|
+
case 'examples':
|
|
1092
|
+
helpContent = this.getHelpContent('examples');
|
|
1093
|
+
break;
|
|
1094
|
+
case 'quick-start':
|
|
1095
|
+
helpContent = `# Quick Start Guide
|
|
1096
|
+
|
|
1097
|
+
## 1. Basic Usage
|
|
1098
|
+
Just ask naturally:
|
|
1099
|
+
- "Use Gemini to [your request]"
|
|
1100
|
+
- "Ask Gemini about [topic]"
|
|
1101
|
+
|
|
1102
|
+
## 2. Common Tasks
|
|
1103
|
+
|
|
1104
|
+
**Text Generation:**
|
|
1105
|
+
"Use Gemini to write a function that sorts arrays"
|
|
1106
|
+
|
|
1107
|
+
**Image Analysis:**
|
|
1108
|
+
"What's in this image?" [attach image]
|
|
1109
|
+
|
|
1110
|
+
**Model Info:**
|
|
1111
|
+
"List all Gemini models"
|
|
1112
|
+
|
|
1113
|
+
**Token Counting:**
|
|
1114
|
+
"Count tokens for my prompt"
|
|
1115
|
+
|
|
1116
|
+
## 3. Advanced Features
|
|
1117
|
+
|
|
1118
|
+
**JSON Output:**
|
|
1119
|
+
"Use Gemini in JSON mode to extract key points"
|
|
1120
|
+
|
|
1121
|
+
**Current Information:**
|
|
1122
|
+
"Use Gemini with grounding to get latest news"
|
|
1123
|
+
|
|
1124
|
+
**Conversations:**
|
|
1125
|
+
"Start a chat with Gemini about Python"
|
|
1126
|
+
|
|
1127
|
+
## 4. Tips
|
|
1128
|
+
- Use gemini-2.5-flash for most tasks
|
|
1129
|
+
- Lower temperature for facts, higher for creativity
|
|
1130
|
+
- Enable grounding for current information
|
|
1131
|
+
- Use conversation IDs to maintain context
|
|
1132
|
+
|
|
1133
|
+
## Need More Help?
|
|
1134
|
+
- "Get help on tools" - Detailed tool information
|
|
1135
|
+
- "Get help on parameters" - All parameters explained
|
|
1136
|
+
- "Get help on models" - Model selection guide`;
|
|
1137
|
+
break;
|
|
1138
|
+
default:
|
|
1139
|
+
helpContent = 'Unknown help topic. Available topics: overview, tools, models, parameters, examples, quick-start';
|
|
1140
|
+
}
|
|
1141
|
+
return {
|
|
1142
|
+
jsonrpc: '2.0',
|
|
1143
|
+
id,
|
|
1144
|
+
result: {
|
|
1145
|
+
content: [{
|
|
1146
|
+
type: 'text',
|
|
1147
|
+
text: helpContent
|
|
1148
|
+
}]
|
|
1149
|
+
}
|
|
1150
|
+
};
|
|
1151
|
+
}
|
|
1152
|
+
sendResponse(response) {
|
|
1153
|
+
const responseStr = JSON.stringify(response);
|
|
1154
|
+
process.stdout.write(responseStr + '\n');
|
|
1155
|
+
}
|
|
1156
|
+
}
|
|
1157
|
+
// Start the server
|
|
1158
|
+
const apiKey = process.env.GEMINI_API_KEY;
|
|
1159
|
+
if (!apiKey) {
|
|
1160
|
+
console.error('GEMINI_API_KEY environment variable is required');
|
|
1161
|
+
process.exit(1);
|
|
1162
|
+
}
|
|
1163
|
+
new EnhancedStdioMCPServer(apiKey);
|
|
1164
|
+
//# sourceMappingURL=enhanced-stdio-server.js.map
|