@rlabs-inc/gemini-mcp 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENCE +21 -0
- package/README.md +418 -0
- package/dist/gemini-client.d.ts +120 -0
- package/dist/gemini-client.js +399 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.js +220 -0
- package/dist/tools/analyze.d.ts +10 -0
- package/dist/tools/analyze.js +96 -0
- package/dist/tools/brainstorm.d.ts +10 -0
- package/dist/tools/brainstorm.js +220 -0
- package/dist/tools/cache.d.ts +17 -0
- package/dist/tools/cache.js +286 -0
- package/dist/tools/code-exec.d.ts +17 -0
- package/dist/tools/code-exec.js +135 -0
- package/dist/tools/document.d.ts +16 -0
- package/dist/tools/document.js +333 -0
- package/dist/tools/image-edit.d.ts +16 -0
- package/dist/tools/image-edit.js +291 -0
- package/dist/tools/image-gen.d.ts +17 -0
- package/dist/tools/image-gen.js +148 -0
- package/dist/tools/query.d.ts +11 -0
- package/dist/tools/query.js +63 -0
- package/dist/tools/search.d.ts +15 -0
- package/dist/tools/search.js +128 -0
- package/dist/tools/speech.d.ts +17 -0
- package/dist/tools/speech.js +304 -0
- package/dist/tools/structured.d.ts +16 -0
- package/dist/tools/structured.js +247 -0
- package/dist/tools/summarize.d.ts +10 -0
- package/dist/tools/summarize.js +77 -0
- package/dist/tools/url-context.d.ts +17 -0
- package/dist/tools/url-context.js +226 -0
- package/dist/tools/video-gen.d.ts +11 -0
- package/dist/tools/video-gen.js +136 -0
- package/dist/tools/youtube.d.ts +16 -0
- package/dist/tools/youtube.js +218 -0
- package/dist/utils/logger.d.ts +33 -0
- package/dist/utils/logger.js +82 -0
- package/package.json +48 -0
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Analyze Tool - Provides analysis capabilities using Gemini models
|
|
3
|
+
*
|
|
4
|
+
* This tool allows analyzing code, text, or specific content with Gemini.
|
|
5
|
+
*/
|
|
6
|
+
import { z } from "zod";
|
|
7
|
+
import { generateWithGeminiPro } from "../gemini-client.js";
|
|
8
|
+
/**
|
|
9
|
+
* Register analysis tools with the MCP server
|
|
10
|
+
*/
|
|
11
|
+
export function registerAnalyzeTool(server) {
|
|
12
|
+
// Code analysis tool
|
|
13
|
+
server.tool("gemini-analyze-code", {
|
|
14
|
+
code: z.string().describe("The code to analyze"),
|
|
15
|
+
language: z.string().optional().describe("The programming language of the code"),
|
|
16
|
+
focus: z.enum(["quality", "security", "performance", "bugs", "general"]).default("general").describe("What aspect to focus the analysis on")
|
|
17
|
+
}, async ({ code, language, focus }) => {
|
|
18
|
+
console.log(`Analyzing code with focus on ${focus}`);
|
|
19
|
+
try {
|
|
20
|
+
const langText = language ? `${language} code` : "code";
|
|
21
|
+
const prompt = `
|
|
22
|
+
Analyze the following ${langText} with a focus on ${focus}:
|
|
23
|
+
|
|
24
|
+
\`\`\`${language ? language : ""}
|
|
25
|
+
${code}
|
|
26
|
+
\`\`\`
|
|
27
|
+
|
|
28
|
+
Please provide:
|
|
29
|
+
${focus === "quality" ? "1. Code quality assessment\n2. Style and readability review\n3. Maintainability considerations\n4. Suggested improvements" : ""}
|
|
30
|
+
${focus === "security" ? "1. Security vulnerabilities identification\n2. Potential exploit vectors\n3. Security best practices assessment\n4. Security improvements" : ""}
|
|
31
|
+
${focus === "performance" ? "1. Performance bottlenecks\n2. Optimization opportunities\n3. Algorithmic complexity analysis\n4. Performance improvement suggestions" : ""}
|
|
32
|
+
${focus === "bugs" ? "1. Bugs and logical errors\n2. Edge cases that aren't handled\n3. Potential runtime errors\n4. Bug fix suggestions" : ""}
|
|
33
|
+
${focus === "general" ? "1. Overall code assessment\n2. Strengths and weaknesses\n3. Potential issues (bugs, security, performance)\n4. Suggested improvements" : ""}
|
|
34
|
+
`;
|
|
35
|
+
const response = await generateWithGeminiPro(prompt);
|
|
36
|
+
return {
|
|
37
|
+
content: [{
|
|
38
|
+
type: "text",
|
|
39
|
+
text: response
|
|
40
|
+
}]
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
catch (error) {
|
|
44
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
45
|
+
console.error(`Error analyzing code: ${errorMessage}`);
|
|
46
|
+
return {
|
|
47
|
+
content: [{
|
|
48
|
+
type: "text",
|
|
49
|
+
text: `Error: ${errorMessage}`
|
|
50
|
+
}],
|
|
51
|
+
isError: true
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
});
|
|
55
|
+
// Text analysis tool
|
|
56
|
+
server.tool("gemini-analyze-text", {
|
|
57
|
+
text: z.string().describe("The text to analyze"),
|
|
58
|
+
type: z.enum(["sentiment", "summary", "entities", "key-points", "general"]).default("general").describe("Type of analysis to perform")
|
|
59
|
+
}, async ({ text, type }) => {
|
|
60
|
+
console.log(`Analyzing text with focus on ${type}`);
|
|
61
|
+
try {
|
|
62
|
+
const prompt = `
|
|
63
|
+
Analyze the following text with a focus on ${type}:
|
|
64
|
+
|
|
65
|
+
"""
|
|
66
|
+
${text}
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
Please provide:
|
|
70
|
+
${type === "sentiment" ? "1. Overall sentiment (positive, negative, neutral)\n2. Sentiment intensity\n3. Key emotional elements\n4. Sentiment by topic/section if applicable" : ""}
|
|
71
|
+
${type === "summary" ? "1. Concise summary of the main points\n2. Key takeaways\n3. Important details\n4. Context and implications" : ""}
|
|
72
|
+
${type === "entities" ? "1. People mentioned\n2. Organizations mentioned\n3. Locations mentioned\n4. Other notable entities (products, events, etc.)" : ""}
|
|
73
|
+
${type === "key-points" ? "1. Main arguments or claims\n2. Supporting evidence\n3. Conclusions reached\n4. Logical structure analysis" : ""}
|
|
74
|
+
${type === "general" ? "1. Overall summary\n2. Key themes and topics\n3. Tone and style assessment\n4. Notable insights and implications" : ""}
|
|
75
|
+
`;
|
|
76
|
+
const response = await generateWithGeminiPro(prompt);
|
|
77
|
+
return {
|
|
78
|
+
content: [{
|
|
79
|
+
type: "text",
|
|
80
|
+
text: response
|
|
81
|
+
}]
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
catch (error) {
|
|
85
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
86
|
+
console.error(`Error analyzing text: ${errorMessage}`);
|
|
87
|
+
return {
|
|
88
|
+
content: [{
|
|
89
|
+
type: "text",
|
|
90
|
+
text: `Error: ${errorMessage}`
|
|
91
|
+
}],
|
|
92
|
+
isError: true
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
});
|
|
96
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Brainstorm Tool - Enables automatic collaborative brainstorming between Claude and Gemini
|
|
3
|
+
*
|
|
4
|
+
* This tool facilitates multi-round collaborative planning until consensus is reached.
|
|
5
|
+
*/
|
|
6
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
7
|
+
/**
|
|
8
|
+
* Register brainstorm tool with the MCP server
|
|
9
|
+
*/
|
|
10
|
+
export declare function registerBrainstormTool(server: McpServer): void;
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Brainstorm Tool - Enables automatic collaborative brainstorming between Claude and Gemini
|
|
3
|
+
*
|
|
4
|
+
* This tool facilitates multi-round collaborative planning until consensus is reached.
|
|
5
|
+
*/
|
|
6
|
+
import { z } from "zod";
|
|
7
|
+
import { generateWithGeminiPro } from "../gemini-client.js";
|
|
8
|
+
import { logger } from "../utils/logger.js";
|
|
9
|
+
/**
|
|
10
|
+
* Register brainstorm tool with the MCP server
|
|
11
|
+
*/
|
|
12
|
+
export function registerBrainstormTool(server) {
|
|
13
|
+
server.tool("gemini-brainstorm", {
|
|
14
|
+
prompt: z.string().describe("The problem statement or query to brainstorm about"),
|
|
15
|
+
maxRounds: z.number().int().min(1).max(5).default(3).describe("Maximum number of brainstorming rounds"),
|
|
16
|
+
claudeThoughts: z.string().describe("Claude's initial thoughts on the problem")
|
|
17
|
+
}, async ({ prompt, maxRounds = 3, claudeThoughts }) => {
|
|
18
|
+
logger.info(`Starting consensus-building brainstorm with Gemini: ${prompt.substring(0, 50)}...`);
|
|
19
|
+
try {
|
|
20
|
+
// Set up conversation tracking
|
|
21
|
+
const rounds = [];
|
|
22
|
+
let currentRound = 1;
|
|
23
|
+
let consensusReached = false;
|
|
24
|
+
// First round - Gemini responds to Claude's initial thoughts
|
|
25
|
+
const firstRoundPrompt = `
|
|
26
|
+
You're collaborating with Claude (another AI assistant) to address this user request:
|
|
27
|
+
|
|
28
|
+
---
|
|
29
|
+
${prompt}
|
|
30
|
+
---
|
|
31
|
+
|
|
32
|
+
Claude has shared these initial thoughts:
|
|
33
|
+
${claudeThoughts}
|
|
34
|
+
|
|
35
|
+
Analyze Claude's thoughts and respond with:
|
|
36
|
+
1. Points of agreement
|
|
37
|
+
2. Additional insights or perspectives
|
|
38
|
+
3. Any modifications to Claude's approach
|
|
39
|
+
4. Next steps to implement the solution
|
|
40
|
+
|
|
41
|
+
End your response with a "Consensus Score" from 1-10 indicating how aligned you are with Claude:
|
|
42
|
+
- 1-3: Significant differences in approach
|
|
43
|
+
- 4-6: Partial agreement with key differences
|
|
44
|
+
- 7-9: Strong alignment with minor refinements
|
|
45
|
+
- 10: Complete consensus
|
|
46
|
+
|
|
47
|
+
Format this as: "Consensus Score: [NUMBER]"
|
|
48
|
+
`;
|
|
49
|
+
const geminiFirstResponse = await generateWithGeminiPro(firstRoundPrompt);
|
|
50
|
+
// Extract consensus score
|
|
51
|
+
const consensusMatch = geminiFirstResponse.match(/Consensus Score:\s*(\d+)/i);
|
|
52
|
+
const consensusScore = consensusMatch ? parseInt(consensusMatch[1], 10) : 0;
|
|
53
|
+
// Record first round
|
|
54
|
+
rounds.push({
|
|
55
|
+
number: currentRound,
|
|
56
|
+
claudeThoughts: claudeThoughts,
|
|
57
|
+
geminiResponse: geminiFirstResponse,
|
|
58
|
+
consensusScore: consensusScore
|
|
59
|
+
});
|
|
60
|
+
// Check if we already have consensus
|
|
61
|
+
if (consensusScore >= 8) {
|
|
62
|
+
logger.info(`Consensus reached in first round with score ${consensusScore}`);
|
|
63
|
+
consensusReached = true;
|
|
64
|
+
}
|
|
65
|
+
// Continue with additional rounds if needed
|
|
66
|
+
currentRound++;
|
|
67
|
+
let lastClaudeThoughts = claudeThoughts;
|
|
68
|
+
let lastGeminiResponse = geminiFirstResponse;
|
|
69
|
+
while (currentRound <= maxRounds && !consensusReached) {
|
|
70
|
+
// Generate Claude's response to Gemini (using Gemini to simulate)
|
|
71
|
+
const simulateClaudePrompt = `
|
|
72
|
+
You will simulate Claude's response in our brainstorming process about this request:
|
|
73
|
+
|
|
74
|
+
---
|
|
75
|
+
${prompt}
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
Previous exchange:
|
|
79
|
+
1. Claude's initial thoughts:
|
|
80
|
+
${lastClaudeThoughts.substring(0, 300)}${lastClaudeThoughts.length > 300 ? '...' : ''}
|
|
81
|
+
|
|
82
|
+
2. Your (Gemini's) response:
|
|
83
|
+
${lastGeminiResponse}
|
|
84
|
+
|
|
85
|
+
As Claude, respond to Gemini's message. Use Claude's communication style:
|
|
86
|
+
1. Analytical, thoughtful, and balanced
|
|
87
|
+
2. Precise and well-structured
|
|
88
|
+
3. Careful about implementation details
|
|
89
|
+
4. Focused on understanding nuance
|
|
90
|
+
|
|
91
|
+
Include areas of agreement, further refinements, and a path forward.
|
|
92
|
+
|
|
93
|
+
End your response with a "Consensus Score" from 1-10:
|
|
94
|
+
- 1-3: Significant differences remain
|
|
95
|
+
- 4-6: Closer alignment but key differences exist
|
|
96
|
+
- 7-9: Strong alignment with minor refinements
|
|
97
|
+
- 10: Complete consensus
|
|
98
|
+
|
|
99
|
+
Format: "Consensus Score: [NUMBER]"
|
|
100
|
+
`;
|
|
101
|
+
const simulatedClaudeResponse = await generateWithGeminiPro(simulateClaudePrompt);
|
|
102
|
+
// Extract consensus score
|
|
103
|
+
const claudeConsensusMatch = simulatedClaudeResponse.match(/Consensus Score:\s*(\d+)/i);
|
|
104
|
+
const claudeConsensusScore = claudeConsensusMatch ? parseInt(claudeConsensusMatch[1], 10) : 0;
|
|
105
|
+
// Generate Gemini's response to Claude
|
|
106
|
+
const geminiFollowUpPrompt = `
|
|
107
|
+
Continuing our collaboration with Claude on this request:
|
|
108
|
+
|
|
109
|
+
---
|
|
110
|
+
${prompt}
|
|
111
|
+
---
|
|
112
|
+
|
|
113
|
+
Latest exchange:
|
|
114
|
+
1. Your previous response:
|
|
115
|
+
${lastGeminiResponse.substring(0, 300)}${lastGeminiResponse.length > 300 ? '...' : ''}
|
|
116
|
+
|
|
117
|
+
2. Claude's latest thoughts:
|
|
118
|
+
${simulatedClaudeResponse}
|
|
119
|
+
|
|
120
|
+
Respond to Claude's latest message, focusing on:
|
|
121
|
+
1. Addressing any remaining differences
|
|
122
|
+
2. Building on areas of agreement
|
|
123
|
+
3. Moving toward a final consensus solution
|
|
124
|
+
4. Clarifying implementation details
|
|
125
|
+
|
|
126
|
+
End your response with a "Consensus Score" from 1-10:
|
|
127
|
+
- 1-3: Significant differences remain
|
|
128
|
+
- 4-6: Closer alignment but key differences exist
|
|
129
|
+
- 7-9: Strong alignment with minor refinements
|
|
130
|
+
- 10: Complete consensus
|
|
131
|
+
|
|
132
|
+
Format: "Consensus Score: [NUMBER]"
|
|
133
|
+
`;
|
|
134
|
+
const geminiResponse = await generateWithGeminiPro(geminiFollowUpPrompt);
|
|
135
|
+
// Extract consensus score
|
|
136
|
+
const geminiConsensusMatch = geminiResponse.match(/Consensus Score:\s*(\d+)/i);
|
|
137
|
+
const geminiConsensusScore = geminiConsensusMatch ? parseInt(geminiConsensusMatch[1], 10) : 0;
|
|
138
|
+
// Record this round
|
|
139
|
+
rounds.push({
|
|
140
|
+
number: currentRound,
|
|
141
|
+
claudeThoughts: simulatedClaudeResponse,
|
|
142
|
+
geminiResponse: geminiResponse,
|
|
143
|
+
consensusScore: geminiConsensusScore
|
|
144
|
+
});
|
|
145
|
+
// Check if we've reached consensus
|
|
146
|
+
if (geminiConsensusScore >= 8 || claudeConsensusScore >= 8) {
|
|
147
|
+
logger.info(`Consensus reached in round ${currentRound} with score ${geminiConsensusScore}`);
|
|
148
|
+
consensusReached = true;
|
|
149
|
+
}
|
|
150
|
+
// Update for next round
|
|
151
|
+
lastClaudeThoughts = simulatedClaudeResponse;
|
|
152
|
+
lastGeminiResponse = geminiResponse;
|
|
153
|
+
currentRound++;
|
|
154
|
+
}
|
|
155
|
+
// Generate final synthesis of the conversation
|
|
156
|
+
const synthesisPrompt = `
|
|
157
|
+
You've completed a collaborative brainstorming session with Claude about:
|
|
158
|
+
|
|
159
|
+
---
|
|
160
|
+
${prompt}
|
|
161
|
+
---
|
|
162
|
+
|
|
163
|
+
Here's the conversation history:
|
|
164
|
+
${rounds.map(round => `
|
|
165
|
+
Round ${round.number}:
|
|
166
|
+
- Claude: ${round.claudeThoughts.substring(0, 250)}${round.claudeThoughts.length > 250 ? '...' : ''}
|
|
167
|
+
- Gemini: ${round.geminiResponse.substring(0, 250)}${round.geminiResponse.length > 250 ? '...' : ''}
|
|
168
|
+
- Consensus Score: ${round.consensusScore}/10
|
|
169
|
+
`).join('\n')}
|
|
170
|
+
|
|
171
|
+
Create a comprehensive synthesis that Claude can implement:
|
|
172
|
+
1. A clear summary of the approach both AIs agreed upon
|
|
173
|
+
2. A specific, actionable plan addressing the user's request
|
|
174
|
+
3. Key implementation details and considerations
|
|
175
|
+
4. Any technical requirements or resources needed
|
|
176
|
+
5. Next steps for Claude to execute
|
|
177
|
+
|
|
178
|
+
Your synthesis should be structured, thorough, and ready for Claude to implement.
|
|
179
|
+
`;
|
|
180
|
+
const finalSynthesis = await generateWithGeminiPro(synthesisPrompt);
|
|
181
|
+
// Format the conversation history
|
|
182
|
+
const conversationHistory = rounds.map(round => `
|
|
183
|
+
## Round ${round.number}
|
|
184
|
+
|
|
185
|
+
### Claude's Thoughts
|
|
186
|
+
${round.claudeThoughts}
|
|
187
|
+
|
|
188
|
+
### Gemini's Response
|
|
189
|
+
${round.geminiResponse}
|
|
190
|
+
|
|
191
|
+
**Consensus Score: ${round.consensusScore}/10**
|
|
192
|
+
`).join('\n\n---\n\n');
|
|
193
|
+
// Return the final result
|
|
194
|
+
return {
|
|
195
|
+
content: [{
|
|
196
|
+
type: "text",
|
|
197
|
+
text: `# Collaborative Solution: ${prompt.substring(0, 50)}...
|
|
198
|
+
|
|
199
|
+
## Final Synthesis
|
|
200
|
+
${finalSynthesis}
|
|
201
|
+
|
|
202
|
+
${consensusReached
|
|
203
|
+
? `\n\n*Consensus reached after ${rounds.length} rounds of collaboration.*`
|
|
204
|
+
: `\n\n*Maximum rounds (${maxRounds}) reached. Providing best synthesis.*`}
|
|
205
|
+
|
|
206
|
+
## Conversation History
|
|
207
|
+
${conversationHistory}`
|
|
208
|
+
}]
|
|
209
|
+
};
|
|
210
|
+
}
|
|
211
|
+
catch (error) {
|
|
212
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
213
|
+
logger.error(`Error in brainstorming: ${errorMessage}`);
|
|
214
|
+
return {
|
|
215
|
+
content: [{ type: "text", text: `Error: ${errorMessage}` }],
|
|
216
|
+
isError: true
|
|
217
|
+
};
|
|
218
|
+
}
|
|
219
|
+
});
|
|
220
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Context Caching Tool - Cache large content for cost savings
|
|
3
|
+
*
|
|
4
|
+
* When working with large documents or videos repeatedly, caching saves costs
|
|
5
|
+
* by storing the tokenized content and reusing it across requests.
|
|
6
|
+
*
|
|
7
|
+
* Useful for:
|
|
8
|
+
* - Chatbots with extensive system instructions
|
|
9
|
+
* - Repetitive analysis of large files
|
|
10
|
+
* - Recurring queries against document sets
|
|
11
|
+
* - Code repository analysis
|
|
12
|
+
*/
|
|
13
|
+
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
14
|
+
/**
|
|
15
|
+
* Register caching tools with the MCP server
|
|
16
|
+
*/
|
|
17
|
+
export declare function registerCacheTool(server: McpServer): void;
|
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Context Caching Tool - Cache large content for cost savings
|
|
3
|
+
*
|
|
4
|
+
* When working with large documents or videos repeatedly, caching saves costs
|
|
5
|
+
* by storing the tokenized content and reusing it across requests.
|
|
6
|
+
*
|
|
7
|
+
* Useful for:
|
|
8
|
+
* - Chatbots with extensive system instructions
|
|
9
|
+
* - Repetitive analysis of large files
|
|
10
|
+
* - Recurring queries against document sets
|
|
11
|
+
* - Code repository analysis
|
|
12
|
+
*/
|
|
13
|
+
import { z } from 'zod';
|
|
14
|
+
import { GoogleGenAI } from '@google/genai';
|
|
15
|
+
import { logger } from '../utils/logger.js';
|
|
16
|
+
import * as fs from 'fs';
|
|
17
|
+
import * as path from 'path';
|
|
18
|
+
// Store active caches for reference
|
|
19
|
+
const activeCaches = new Map();
|
|
20
|
+
/**
|
|
21
|
+
* Get MIME type from file extension
|
|
22
|
+
*/
|
|
23
|
+
function getMimeType(filePath) {
|
|
24
|
+
const ext = path.extname(filePath).toLowerCase();
|
|
25
|
+
const mimeTypes = {
|
|
26
|
+
'.pdf': 'application/pdf',
|
|
27
|
+
'.txt': 'text/plain',
|
|
28
|
+
'.csv': 'text/csv',
|
|
29
|
+
'.mp4': 'video/mp4',
|
|
30
|
+
'.mov': 'video/mov',
|
|
31
|
+
'.avi': 'video/avi',
|
|
32
|
+
'.webm': 'video/webm',
|
|
33
|
+
};
|
|
34
|
+
return mimeTypes[ext] || 'application/octet-stream';
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Register caching tools with the MCP server
|
|
38
|
+
*/
|
|
39
|
+
export function registerCacheTool(server) {
|
|
40
|
+
// Create a cache from a file
|
|
41
|
+
server.tool('gemini-create-cache', {
|
|
42
|
+
filePath: z.string().describe('Path to the file to cache'),
|
|
43
|
+
displayName: z.string().describe('A name to identify this cache'),
|
|
44
|
+
systemInstruction: z
|
|
45
|
+
.string()
|
|
46
|
+
.optional()
|
|
47
|
+
.describe('System instruction to include with the cache'),
|
|
48
|
+
ttlMinutes: z
|
|
49
|
+
.number()
|
|
50
|
+
.min(1)
|
|
51
|
+
.max(1440)
|
|
52
|
+
.default(60)
|
|
53
|
+
.describe('Time to live in minutes (1-1440, default 60)'),
|
|
54
|
+
}, async ({ filePath, displayName, systemInstruction, ttlMinutes }) => {
|
|
55
|
+
logger.info(`Creating cache: ${displayName}`);
|
|
56
|
+
try {
|
|
57
|
+
const apiKey = process.env.GEMINI_API_KEY;
|
|
58
|
+
if (!apiKey) {
|
|
59
|
+
throw new Error('GEMINI_API_KEY not set');
|
|
60
|
+
}
|
|
61
|
+
if (!fs.existsSync(filePath)) {
|
|
62
|
+
throw new Error(`File not found: ${filePath}`);
|
|
63
|
+
}
|
|
64
|
+
const genAI = new GoogleGenAI({ apiKey });
|
|
65
|
+
// Use a specific model version for caching (required)
|
|
66
|
+
const model = 'gemini-2.0-flash-001';
|
|
67
|
+
// Upload the file first
|
|
68
|
+
const fileBuffer = fs.readFileSync(filePath);
|
|
69
|
+
const mimeType = getMimeType(filePath);
|
|
70
|
+
logger.info(`Uploading file: ${filePath} (${mimeType})`);
|
|
71
|
+
const uploadedFile = await genAI.files.upload({
|
|
72
|
+
file: new Blob([fileBuffer], { type: mimeType }),
|
|
73
|
+
config: { mimeType },
|
|
74
|
+
});
|
|
75
|
+
// Create the cache
|
|
76
|
+
const cacheConfig = {
|
|
77
|
+
displayName,
|
|
78
|
+
contents: [
|
|
79
|
+
{
|
|
80
|
+
parts: [
|
|
81
|
+
{
|
|
82
|
+
fileData: {
|
|
83
|
+
fileUri: uploadedFile.uri,
|
|
84
|
+
mimeType: uploadedFile.mimeType,
|
|
85
|
+
},
|
|
86
|
+
},
|
|
87
|
+
],
|
|
88
|
+
},
|
|
89
|
+
],
|
|
90
|
+
ttl: `${ttlMinutes * 60}s`,
|
|
91
|
+
};
|
|
92
|
+
if (systemInstruction) {
|
|
93
|
+
cacheConfig.systemInstruction = systemInstruction;
|
|
94
|
+
}
|
|
95
|
+
const cache = await genAI.caches.create({
|
|
96
|
+
model,
|
|
97
|
+
config: cacheConfig,
|
|
98
|
+
});
|
|
99
|
+
// Store cache info
|
|
100
|
+
const expireTime = new Date(Date.now() + ttlMinutes * 60 * 1000);
|
|
101
|
+
activeCaches.set(displayName, {
|
|
102
|
+
name: cache.name || displayName,
|
|
103
|
+
model,
|
|
104
|
+
displayName,
|
|
105
|
+
expireTime,
|
|
106
|
+
});
|
|
107
|
+
logger.info(`Cache created: ${cache.name}`);
|
|
108
|
+
return {
|
|
109
|
+
content: [
|
|
110
|
+
{
|
|
111
|
+
type: 'text',
|
|
112
|
+
text: `Cache created successfully!\n\n**Name:** ${cache.name}\n**Display Name:** ${displayName}\n**Model:** ${model}\n**Expires:** ${expireTime.toISOString()}\n\nUse gemini-query-cache with this cache name to query the cached content.`,
|
|
113
|
+
},
|
|
114
|
+
],
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
catch (error) {
|
|
118
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
119
|
+
logger.error(`Error creating cache: ${errorMessage}`);
|
|
120
|
+
return {
|
|
121
|
+
content: [
|
|
122
|
+
{
|
|
123
|
+
type: 'text',
|
|
124
|
+
text: `Error creating cache: ${errorMessage}`,
|
|
125
|
+
},
|
|
126
|
+
],
|
|
127
|
+
isError: true,
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
});
|
|
131
|
+
// Query using a cached content
|
|
132
|
+
server.tool('gemini-query-cache', {
|
|
133
|
+
cacheName: z.string().describe('The cache name or display name'),
|
|
134
|
+
question: z.string().describe('Question to ask about the cached content'),
|
|
135
|
+
}, async ({ cacheName, question }) => {
|
|
136
|
+
logger.info(`Querying cache: ${cacheName}`);
|
|
137
|
+
try {
|
|
138
|
+
const apiKey = process.env.GEMINI_API_KEY;
|
|
139
|
+
if (!apiKey) {
|
|
140
|
+
throw new Error('GEMINI_API_KEY not set');
|
|
141
|
+
}
|
|
142
|
+
const genAI = new GoogleGenAI({ apiKey });
|
|
143
|
+
// Look up cache by display name or use as-is
|
|
144
|
+
const cacheInfo = activeCaches.get(cacheName);
|
|
145
|
+
const actualCacheName = cacheInfo?.name || cacheName;
|
|
146
|
+
const model = cacheInfo?.model || 'gemini-2.0-flash-001';
|
|
147
|
+
// Query with cached content
|
|
148
|
+
const response = await genAI.models.generateContent({
|
|
149
|
+
model,
|
|
150
|
+
contents: question,
|
|
151
|
+
config: {
|
|
152
|
+
cachedContent: actualCacheName,
|
|
153
|
+
},
|
|
154
|
+
});
|
|
155
|
+
const usageMetadata = response.usageMetadata;
|
|
156
|
+
let usageInfo = '';
|
|
157
|
+
if (usageMetadata) {
|
|
158
|
+
usageInfo = `\n\n---\n**Token Usage:**\n`;
|
|
159
|
+
usageInfo += `- Total: ${usageMetadata.totalTokenCount || 0}\n`;
|
|
160
|
+
usageInfo += `- Cached: ${usageMetadata.cachedContentTokenCount || 0}\n`;
|
|
161
|
+
usageInfo += `- New prompt: ${usageMetadata.promptTokenCount || 0}\n`;
|
|
162
|
+
usageInfo += `- Response: ${usageMetadata.candidatesTokenCount || 0}`;
|
|
163
|
+
}
|
|
164
|
+
logger.info('Cache query completed');
|
|
165
|
+
return {
|
|
166
|
+
content: [
|
|
167
|
+
{
|
|
168
|
+
type: 'text',
|
|
169
|
+
text: (response.text || 'No response.') + usageInfo,
|
|
170
|
+
},
|
|
171
|
+
],
|
|
172
|
+
};
|
|
173
|
+
}
|
|
174
|
+
catch (error) {
|
|
175
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
176
|
+
logger.error(`Error querying cache: ${errorMessage}`);
|
|
177
|
+
return {
|
|
178
|
+
content: [
|
|
179
|
+
{
|
|
180
|
+
type: 'text',
|
|
181
|
+
text: `Error querying cache: ${errorMessage}`,
|
|
182
|
+
},
|
|
183
|
+
],
|
|
184
|
+
isError: true,
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
});
|
|
188
|
+
// List active caches
|
|
189
|
+
server.tool('gemini-list-caches', {}, async () => {
|
|
190
|
+
logger.info('Listing caches');
|
|
191
|
+
try {
|
|
192
|
+
const apiKey = process.env.GEMINI_API_KEY;
|
|
193
|
+
if (!apiKey) {
|
|
194
|
+
throw new Error('GEMINI_API_KEY not set');
|
|
195
|
+
}
|
|
196
|
+
const genAI = new GoogleGenAI({ apiKey });
|
|
197
|
+
// Get caches from API
|
|
198
|
+
const caches = [];
|
|
199
|
+
const cacheList = await genAI.caches.list();
|
|
200
|
+
if (cacheList && Array.isArray(cacheList)) {
|
|
201
|
+
for (const cache of cacheList) {
|
|
202
|
+
caches.push(cache);
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
if (caches.length === 0) {
|
|
206
|
+
return {
|
|
207
|
+
content: [
|
|
208
|
+
{
|
|
209
|
+
type: 'text',
|
|
210
|
+
text: 'No active caches found.\n\nCreate one with gemini-create-cache.',
|
|
211
|
+
},
|
|
212
|
+
],
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
let text = '**Active Caches:**\n\n';
|
|
216
|
+
for (const cache of caches) {
|
|
217
|
+
text += `- **${cache.displayName || cache.name}**\n`;
|
|
218
|
+
text += ` - Name: ${cache.name}\n`;
|
|
219
|
+
text += ` - Model: ${cache.model}\n`;
|
|
220
|
+
text += ` - Expires: ${cache.expireTime}\n\n`;
|
|
221
|
+
}
|
|
222
|
+
return {
|
|
223
|
+
content: [
|
|
224
|
+
{
|
|
225
|
+
type: 'text',
|
|
226
|
+
text,
|
|
227
|
+
},
|
|
228
|
+
],
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
catch (error) {
|
|
232
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
233
|
+
logger.error(`Error listing caches: ${errorMessage}`);
|
|
234
|
+
return {
|
|
235
|
+
content: [
|
|
236
|
+
{
|
|
237
|
+
type: 'text',
|
|
238
|
+
text: `Error listing caches: ${errorMessage}`,
|
|
239
|
+
},
|
|
240
|
+
],
|
|
241
|
+
isError: true,
|
|
242
|
+
};
|
|
243
|
+
}
|
|
244
|
+
});
|
|
245
|
+
// Delete a cache
|
|
246
|
+
server.tool('gemini-delete-cache', {
|
|
247
|
+
cacheName: z.string().describe('The cache name to delete'),
|
|
248
|
+
}, async ({ cacheName }) => {
|
|
249
|
+
logger.info(`Deleting cache: ${cacheName}`);
|
|
250
|
+
try {
|
|
251
|
+
const apiKey = process.env.GEMINI_API_KEY;
|
|
252
|
+
if (!apiKey) {
|
|
253
|
+
throw new Error('GEMINI_API_KEY not set');
|
|
254
|
+
}
|
|
255
|
+
const genAI = new GoogleGenAI({ apiKey });
|
|
256
|
+
// Look up by display name
|
|
257
|
+
const cacheInfo = activeCaches.get(cacheName);
|
|
258
|
+
const actualCacheName = cacheInfo?.name || cacheName;
|
|
259
|
+
await genAI.caches.delete({ name: actualCacheName });
|
|
260
|
+
// Remove from local tracking
|
|
261
|
+
activeCaches.delete(cacheName);
|
|
262
|
+
logger.info('Cache deleted');
|
|
263
|
+
return {
|
|
264
|
+
content: [
|
|
265
|
+
{
|
|
266
|
+
type: 'text',
|
|
267
|
+
text: `Cache "${cacheName}" deleted successfully.`,
|
|
268
|
+
},
|
|
269
|
+
],
|
|
270
|
+
};
|
|
271
|
+
}
|
|
272
|
+
catch (error) {
|
|
273
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
274
|
+
logger.error(`Error deleting cache: ${errorMessage}`);
|
|
275
|
+
return {
|
|
276
|
+
content: [
|
|
277
|
+
{
|
|
278
|
+
type: 'text',
|
|
279
|
+
text: `Error deleting cache: ${errorMessage}`,
|
|
280
|
+
},
|
|
281
|
+
],
|
|
282
|
+
isError: true,
|
|
283
|
+
};
|
|
284
|
+
}
|
|
285
|
+
});
|
|
286
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Code Execution Tool - Let Gemini write and run Python code
|
|
3
|
+
*
|
|
4
|
+
* This tool enables Gemini to generate and execute Python code in a sandboxed environment.
|
|
5
|
+
* Useful for:
|
|
6
|
+
* - Data analysis with pandas
|
|
7
|
+
* - Math computations
|
|
8
|
+
* - Chart generation with matplotlib
|
|
9
|
+
* - File processing
|
|
10
|
+
*
|
|
11
|
+
* Supported libraries include: numpy, pandas, matplotlib, scipy, scikit-learn, tensorflow, and more.
|
|
12
|
+
*/
|
|
13
|
+
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
14
|
+
/**
|
|
15
|
+
* Register code execution tools with the MCP server
|
|
16
|
+
*/
|
|
17
|
+
export declare function registerCodeExecTool(server: McpServer): void;
|