@teamflojo/floimg-mcp 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +171 -0
- package/dist/index.d.ts +43 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +44 -0
- package/dist/index.js.map +1 -0
- package/dist/server.d.ts +3 -0
- package/dist/server.d.ts.map +1 -0
- package/dist/server.js +988 -0
- package/dist/server.js.map +1 -0
- package/package.json +60 -0
package/dist/server.js
ADDED
|
@@ -0,0 +1,988 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
3
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
4
|
+
import { CallToolRequestSchema, ListToolsRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
|
5
|
+
import { readFile, mkdir } from "fs/promises";
|
|
6
|
+
import { existsSync } from "fs";
|
|
7
|
+
import { join } from "path";
|
|
8
|
+
import createClient, { FloimgError } from "@teamflojo/floimg";
|
|
9
|
+
import { loadConfig } from "@teamflojo/floimg/config";
|
|
10
|
+
/**
|
|
11
|
+
* floimg MCP Server v0.1.0 - Smart Image Generation & Workflow Orchestration
|
|
12
|
+
*
|
|
13
|
+
* Key improvements:
|
|
14
|
+
* - Session workspace: Images stored with IDs, no byte passing between tools
|
|
15
|
+
* - File path references: Transform/save can reference any image by path or ID
|
|
16
|
+
* - Pipeline support: Multi-step workflows in a single call
|
|
17
|
+
* - Better intent routing: Recognizes AI image requests properly
|
|
18
|
+
*/
|
|
19
|
+
// Session workspace for storing images between tool calls
|
|
20
|
+
const SESSION_WORKSPACE = join(process.cwd(), ".floimg", "mcp-session");
|
|
21
|
+
const imageRegistry = new Map();
|
|
22
|
+
// Ensure workspace exists
|
|
23
|
+
async function ensureWorkspace() {
|
|
24
|
+
if (!existsSync(SESSION_WORKSPACE)) {
|
|
25
|
+
await mkdir(SESSION_WORKSPACE, { recursive: true });
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
// Generate unique image ID
|
|
29
|
+
function generateImageId() {
|
|
30
|
+
return `img_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
31
|
+
}
|
|
32
|
+
// Load image from various sources
|
|
33
|
+
async function loadImage(imageId, imagePath, imageBytes, mime) {
|
|
34
|
+
// Priority 1: imageId (reference to session image)
|
|
35
|
+
if (imageId) {
|
|
36
|
+
const registered = imageRegistry.get(imageId);
|
|
37
|
+
if (!registered) {
|
|
38
|
+
throw new Error(`Image ID not found: ${imageId}. Use generate_image first to create an image.`);
|
|
39
|
+
}
|
|
40
|
+
const bytes = await readFile(registered.path);
|
|
41
|
+
return {
|
|
42
|
+
bytes,
|
|
43
|
+
mime: registered.mime,
|
|
44
|
+
...registered.metadata,
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
// Priority 2: imagePath (reference to file on disk)
|
|
48
|
+
if (imagePath) {
|
|
49
|
+
const bytes = await readFile(imagePath);
|
|
50
|
+
// Detect MIME type from extension if not provided
|
|
51
|
+
const detectedMime = mime || detectMimeFromPath(imagePath);
|
|
52
|
+
return {
|
|
53
|
+
bytes,
|
|
54
|
+
mime: detectedMime,
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
// Priority 3: imageBytes (base64 encoded, for external images)
|
|
58
|
+
if (imageBytes && mime) {
|
|
59
|
+
return {
|
|
60
|
+
bytes: Buffer.from(imageBytes, "base64"),
|
|
61
|
+
mime: mime,
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
throw new Error("Must provide imageId, imagePath, or imageBytes+mime");
|
|
65
|
+
}
|
|
66
|
+
// Detect MIME type from file path
|
|
67
|
+
function detectMimeFromPath(path) {
|
|
68
|
+
const ext = path.split(".").pop()?.toLowerCase();
|
|
69
|
+
const mimeMap = {
|
|
70
|
+
svg: "image/svg+xml",
|
|
71
|
+
png: "image/png",
|
|
72
|
+
jpg: "image/jpeg",
|
|
73
|
+
jpeg: "image/jpeg",
|
|
74
|
+
webp: "image/webp",
|
|
75
|
+
avif: "image/avif",
|
|
76
|
+
};
|
|
77
|
+
return mimeMap[ext || ""] || "image/png";
|
|
78
|
+
}
|
|
79
|
+
// Plugin auto-discovery
|
|
80
|
+
async function loadAvailablePlugins(client) {
|
|
81
|
+
const plugins = [];
|
|
82
|
+
console.error("[floimg-mcp] Starting plugin discovery...");
|
|
83
|
+
const potentialPlugins = [
|
|
84
|
+
// Generator plugins (default export)
|
|
85
|
+
{ name: "quickchart", module: "@teamflojo/floimg-quickchart", type: "generator" },
|
|
86
|
+
{ name: "d3", module: "@teamflojo/floimg-d3", type: "generator" },
|
|
87
|
+
{ name: "mermaid", module: "@teamflojo/floimg-mermaid", type: "generator" },
|
|
88
|
+
{ name: "qr", module: "@teamflojo/floimg-qr", type: "generator" },
|
|
89
|
+
{ name: "screenshot", module: "@teamflojo/floimg-screenshot", type: "generator" },
|
|
90
|
+
// AI provider plugins (named exports)
|
|
91
|
+
{
|
|
92
|
+
name: "google",
|
|
93
|
+
module: "@teamflojo/floimg-google",
|
|
94
|
+
type: "ai",
|
|
95
|
+
exports: {
|
|
96
|
+
textProvider: "geminiText",
|
|
97
|
+
visionProvider: "geminiVision",
|
|
98
|
+
transformProvider: "geminiTransform",
|
|
99
|
+
generator: "geminiGenerate",
|
|
100
|
+
},
|
|
101
|
+
},
|
|
102
|
+
];
|
|
103
|
+
for (const pluginDef of potentialPlugins) {
|
|
104
|
+
try {
|
|
105
|
+
console.error(`[floimg-mcp] Attempting to load ${pluginDef.module}...`);
|
|
106
|
+
const plugin = await import(pluginDef.module);
|
|
107
|
+
if (pluginDef.type === "generator") {
|
|
108
|
+
// Generator plugin - use default export
|
|
109
|
+
if (!plugin.default) {
|
|
110
|
+
console.error(`[floimg-mcp] ⚠ ${pluginDef.module} has no default export`);
|
|
111
|
+
continue;
|
|
112
|
+
}
|
|
113
|
+
const generator = typeof plugin.default === "function" ? plugin.default() : plugin.default;
|
|
114
|
+
client.registerGenerator(generator);
|
|
115
|
+
plugins.push(pluginDef.name);
|
|
116
|
+
console.error(`[floimg-mcp] ✓ Loaded generator: ${pluginDef.name}`);
|
|
117
|
+
}
|
|
118
|
+
else if (pluginDef.type === "ai") {
|
|
119
|
+
// AI provider plugin - use named exports
|
|
120
|
+
const aiPlugin = pluginDef;
|
|
121
|
+
let loadedAny = false;
|
|
122
|
+
// Load text provider
|
|
123
|
+
if (aiPlugin.exports.textProvider && plugin[aiPlugin.exports.textProvider]) {
|
|
124
|
+
const textProviderFn = plugin[aiPlugin.exports.textProvider];
|
|
125
|
+
const textProvider = typeof textProviderFn === "function" ? textProviderFn() : textProviderFn;
|
|
126
|
+
client.registerTextProvider(textProvider);
|
|
127
|
+
console.error(`[floimg-mcp] ✓ Loaded text provider: ${textProvider.name}`);
|
|
128
|
+
loadedAny = true;
|
|
129
|
+
}
|
|
130
|
+
// Load vision provider
|
|
131
|
+
if (aiPlugin.exports.visionProvider && plugin[aiPlugin.exports.visionProvider]) {
|
|
132
|
+
const visionProviderFn = plugin[aiPlugin.exports.visionProvider];
|
|
133
|
+
const visionProvider = typeof visionProviderFn === "function" ? visionProviderFn() : visionProviderFn;
|
|
134
|
+
client.registerVisionProvider(visionProvider);
|
|
135
|
+
console.error(`[floimg-mcp] ✓ Loaded vision provider: ${visionProvider.name}`);
|
|
136
|
+
loadedAny = true;
|
|
137
|
+
}
|
|
138
|
+
// Load transform provider
|
|
139
|
+
if (aiPlugin.exports.transformProvider && plugin[aiPlugin.exports.transformProvider]) {
|
|
140
|
+
const transformProviderFn = plugin[aiPlugin.exports.transformProvider];
|
|
141
|
+
const transformProvider = typeof transformProviderFn === "function" ? transformProviderFn() : transformProviderFn;
|
|
142
|
+
client.registerTransformProvider(transformProvider);
|
|
143
|
+
console.error(`[floimg-mcp] ✓ Loaded transform provider: ${transformProvider.name}`);
|
|
144
|
+
loadedAny = true;
|
|
145
|
+
}
|
|
146
|
+
// Load generator
|
|
147
|
+
if (aiPlugin.exports.generator && plugin[aiPlugin.exports.generator]) {
|
|
148
|
+
const generatorFn = plugin[aiPlugin.exports.generator];
|
|
149
|
+
const generator = typeof generatorFn === "function" ? generatorFn() : generatorFn;
|
|
150
|
+
client.registerGenerator(generator);
|
|
151
|
+
console.error(`[floimg-mcp] ✓ Loaded generator: ${generator.name}`);
|
|
152
|
+
loadedAny = true;
|
|
153
|
+
}
|
|
154
|
+
if (loadedAny) {
|
|
155
|
+
plugins.push(aiPlugin.name);
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
catch (err) {
|
|
160
|
+
const error = err;
|
|
161
|
+
console.error(`[floimg-mcp] ✗ Failed to load ${pluginDef.module}: ${error.message}`);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
console.error(`[floimg-mcp] Plugin discovery complete. Loaded: ${plugins.join(", ") || "none"}`);
|
|
165
|
+
if (plugins.length === 0) {
|
|
166
|
+
console.error("[floimg-mcp] ⚠ No plugins found!");
|
|
167
|
+
console.error("[floimg-mcp] Install with: npm install -g @teamflojo/floimg-quickchart @teamflojo/floimg-mermaid @teamflojo/floimg-qr @teamflojo/floimg-d3 @teamflojo/floimg-screenshot @teamflojo/floimg-google");
|
|
168
|
+
console.error("[floimg-mcp] Only built-in generators (shapes, openai) will be available.");
|
|
169
|
+
}
|
|
170
|
+
return plugins;
|
|
171
|
+
}
|
|
172
|
+
// Smart generator selection based on intent - IMPROVED for v0.4.0
|
|
173
|
+
function selectGenerator(intent, params) {
|
|
174
|
+
const intentLower = intent.toLowerCase();
|
|
175
|
+
// QR codes
|
|
176
|
+
if (intentLower.includes("qr") || intentLower.includes("barcode")) {
|
|
177
|
+
return "qr";
|
|
178
|
+
}
|
|
179
|
+
// Screenshots
|
|
180
|
+
if (intentLower.includes("screenshot") ||
|
|
181
|
+
intentLower.includes("capture") ||
|
|
182
|
+
intentLower.includes("website") ||
|
|
183
|
+
intentLower.includes("webpage") ||
|
|
184
|
+
(intentLower.includes("url") && params.url)) {
|
|
185
|
+
return "screenshot";
|
|
186
|
+
}
|
|
187
|
+
// Diagrams (Mermaid)
|
|
188
|
+
if (intentLower.includes("flowchart") ||
|
|
189
|
+
intentLower.includes("diagram") ||
|
|
190
|
+
intentLower.includes("sequence") ||
|
|
191
|
+
intentLower.includes("gantt") ||
|
|
192
|
+
intentLower.includes("class diagram") ||
|
|
193
|
+
intentLower.includes("entity") ||
|
|
194
|
+
intentLower.includes("state") ||
|
|
195
|
+
intentLower.includes("mindmap")) {
|
|
196
|
+
return "mermaid";
|
|
197
|
+
}
|
|
198
|
+
// Charts & Data Visualization (check BEFORE AI detection)
|
|
199
|
+
if (intentLower.includes("chart") ||
|
|
200
|
+
intentLower.includes("graph") ||
|
|
201
|
+
intentLower.includes("plot") ||
|
|
202
|
+
intentLower.includes("visualiz")) {
|
|
203
|
+
// D3 for custom/complex visualizations
|
|
204
|
+
if (params.render ||
|
|
205
|
+
params.renderString ||
|
|
206
|
+
intentLower.includes("custom") ||
|
|
207
|
+
intentLower.includes("d3")) {
|
|
208
|
+
return "d3";
|
|
209
|
+
}
|
|
210
|
+
// QuickChart for standard charts
|
|
211
|
+
return "quickchart";
|
|
212
|
+
}
|
|
213
|
+
// AI Image Generation - IMPROVED: Better keyword detection
|
|
214
|
+
// Check for scene descriptions, subjects, art styles, etc.
|
|
215
|
+
const aiKeywords = [
|
|
216
|
+
"photo",
|
|
217
|
+
"picture",
|
|
218
|
+
"illustration",
|
|
219
|
+
"painting",
|
|
220
|
+
"drawing",
|
|
221
|
+
"scene",
|
|
222
|
+
"image of",
|
|
223
|
+
"portrait",
|
|
224
|
+
"landscape",
|
|
225
|
+
"artwork",
|
|
226
|
+
"realistic",
|
|
227
|
+
"photorealistic",
|
|
228
|
+
"stylized",
|
|
229
|
+
"artistic",
|
|
230
|
+
"dall-e",
|
|
231
|
+
"ai image",
|
|
232
|
+
"ai generated",
|
|
233
|
+
"generate image",
|
|
234
|
+
"person",
|
|
235
|
+
"people",
|
|
236
|
+
"animal",
|
|
237
|
+
"building",
|
|
238
|
+
"nature",
|
|
239
|
+
"stadium",
|
|
240
|
+
"player",
|
|
241
|
+
"celebrating",
|
|
242
|
+
"sunset",
|
|
243
|
+
"dramatic",
|
|
244
|
+
];
|
|
245
|
+
const hasAIKeyword = aiKeywords.some((keyword) => intentLower.includes(keyword));
|
|
246
|
+
const hasPromptParam = params.prompt !== undefined;
|
|
247
|
+
// Route to OpenAI if:
|
|
248
|
+
// 1. Has AI-related keywords OR
|
|
249
|
+
// 2. Has prompt parameter OR
|
|
250
|
+
// 3. Intent describes a scene/subject (more than 5 words)
|
|
251
|
+
const wordCount = intent.trim().split(/\s+/).length;
|
|
252
|
+
const isDescriptiveIntent = wordCount > 5 && !intentLower.includes("gradient") && !intentLower.includes("shape");
|
|
253
|
+
if (hasAIKeyword || hasPromptParam || isDescriptiveIntent) {
|
|
254
|
+
return "openai";
|
|
255
|
+
}
|
|
256
|
+
// Default to shapes for simple SVG graphics (gradients, basic shapes)
|
|
257
|
+
return "shapes";
|
|
258
|
+
}
|
|
259
|
+
// Initialize server
|
|
260
|
+
const server = new Server({
|
|
261
|
+
name: "floimg",
|
|
262
|
+
version: "0.1.0",
|
|
263
|
+
}, {
|
|
264
|
+
capabilities: {
|
|
265
|
+
tools: {},
|
|
266
|
+
},
|
|
267
|
+
});
|
|
268
|
+
// Define available tools
|
|
269
|
+
const TOOLS = [
|
|
270
|
+
{
|
|
271
|
+
name: "generate_image",
|
|
272
|
+
description: "Generate any type of image. Routes to the appropriate generator based on intent. " +
|
|
273
|
+
"Supports: AI images (DALL-E), charts (bar, line, pie), diagrams (flowcharts, sequence), " +
|
|
274
|
+
"QR codes, screenshots, data visualizations (D3), and simple shapes/gradients. " +
|
|
275
|
+
"Images are saved to session workspace and assigned an imageId for chaining operations.",
|
|
276
|
+
inputSchema: {
|
|
277
|
+
type: "object",
|
|
278
|
+
properties: {
|
|
279
|
+
intent: {
|
|
280
|
+
type: "string",
|
|
281
|
+
description: "Brief description to route to the right generator and provide simple defaults. " +
|
|
282
|
+
"For AI images: intent becomes the prompt (e.g., 'golden retriever in field'). " +
|
|
283
|
+
"For QR codes: include the URL (e.g., 'QR code for https://example.com'). " +
|
|
284
|
+
"For charts/diagrams: just routing hint (e.g., 'bar chart', 'flowchart') - must provide params with data.",
|
|
285
|
+
},
|
|
286
|
+
params: {
|
|
287
|
+
type: "object",
|
|
288
|
+
description: "Generator-specific parameters. " +
|
|
289
|
+
"AI images & QR codes: Optional (auto-filled from intent). " +
|
|
290
|
+
"Charts & diagrams: REQUIRED - must provide structured data. " +
|
|
291
|
+
"Examples: " +
|
|
292
|
+
"Charts: {type: 'bar', data: {labels: [...], datasets: [...]}}. " +
|
|
293
|
+
"Diagrams: {code: 'graph TD; A-->B'}. " +
|
|
294
|
+
"AI images: {prompt: '...', size: '1024x1024'} (or omit, uses intent). " +
|
|
295
|
+
"QR codes: {text: 'https://...'} (or omit if URL in intent).",
|
|
296
|
+
default: {},
|
|
297
|
+
},
|
|
298
|
+
saveTo: {
|
|
299
|
+
type: "string",
|
|
300
|
+
description: "Optional: Also save to a destination (filesystem or cloud). " +
|
|
301
|
+
"Examples: './output.png', 's3://bucket/key.png', 'r2://bucket/key.png'. " +
|
|
302
|
+
"Image is always saved to session workspace regardless.",
|
|
303
|
+
},
|
|
304
|
+
},
|
|
305
|
+
required: ["intent"],
|
|
306
|
+
},
|
|
307
|
+
},
|
|
308
|
+
{
|
|
309
|
+
name: "transform_image",
|
|
310
|
+
description: "Apply deterministic, pixel-precise transforms to images. Unlike AI regeneration " +
|
|
311
|
+
"(DALL-E, inpainting), these operations mathematically modify exactly what you specify—" +
|
|
312
|
+
"the rest of your image stays identical. When you say 'resize to 1200x630,' it does " +
|
|
313
|
+
"exactly that, guaranteed. " +
|
|
314
|
+
"Supports: resize, convert, blur, sharpen, grayscale, modulate (brightness/saturation/hue), " +
|
|
315
|
+
"tint, roundCorners, addText, addCaption, and preset filters. " +
|
|
316
|
+
"Reference images by: imageId (from generate_image), imagePath, or imageBytes. " +
|
|
317
|
+
"Each transform creates a new imageId for chaining.",
|
|
318
|
+
inputSchema: {
|
|
319
|
+
type: "object",
|
|
320
|
+
properties: {
|
|
321
|
+
imageId: {
|
|
322
|
+
type: "string",
|
|
323
|
+
description: "ID of image from previous generate_image or transform_image call",
|
|
324
|
+
},
|
|
325
|
+
imagePath: {
|
|
326
|
+
type: "string",
|
|
327
|
+
description: "Path to image file on disk (e.g., './my-image.png', 'generated/abc.png')",
|
|
328
|
+
},
|
|
329
|
+
imageBytes: {
|
|
330
|
+
type: "string",
|
|
331
|
+
description: "Base64-encoded image bytes (for external images not in session)",
|
|
332
|
+
},
|
|
333
|
+
mime: {
|
|
334
|
+
type: "string",
|
|
335
|
+
description: "MIME type (required only if using imageBytes, auto-detected for imagePath/imageId)",
|
|
336
|
+
},
|
|
337
|
+
operation: {
|
|
338
|
+
type: "string",
|
|
339
|
+
description: "Transform operation to apply",
|
|
340
|
+
enum: [
|
|
341
|
+
"convert",
|
|
342
|
+
"resize",
|
|
343
|
+
"composite",
|
|
344
|
+
"optimizeSvg",
|
|
345
|
+
"blur",
|
|
346
|
+
"sharpen",
|
|
347
|
+
"grayscale",
|
|
348
|
+
"negate",
|
|
349
|
+
"normalize",
|
|
350
|
+
"threshold",
|
|
351
|
+
"modulate",
|
|
352
|
+
"tint",
|
|
353
|
+
"extend",
|
|
354
|
+
"extract",
|
|
355
|
+
"roundCorners",
|
|
356
|
+
"addText",
|
|
357
|
+
"addCaption",
|
|
358
|
+
"preset",
|
|
359
|
+
],
|
|
360
|
+
},
|
|
361
|
+
params: {
|
|
362
|
+
type: "object",
|
|
363
|
+
description: "Parameters for the operation. Examples: " +
|
|
364
|
+
"resize: {width: 800, height: 600}, " +
|
|
365
|
+
"blur: {sigma: 5}, " +
|
|
366
|
+
"modulate: {brightness: 1.2, saturation: 1.3}, " +
|
|
367
|
+
"roundCorners: {radius: 20}, " +
|
|
368
|
+
"addText: {text: 'Hello', x: 100, y: 100, size: 48, color: '#fff', shadow: true}, " +
|
|
369
|
+
"addCaption: {text: 'Caption', position: 'bottom'}, " +
|
|
370
|
+
"preset: {name: 'vintage' | 'vibrant' | 'dramatic' | 'soft'}",
|
|
371
|
+
},
|
|
372
|
+
to: {
|
|
373
|
+
type: "string",
|
|
374
|
+
description: "Target MIME type (for convert operation)",
|
|
375
|
+
},
|
|
376
|
+
saveTo: {
|
|
377
|
+
type: "string",
|
|
378
|
+
description: "Optional: Also save to a destination (filesystem or cloud). " +
|
|
379
|
+
"Image is always saved to session workspace regardless.",
|
|
380
|
+
},
|
|
381
|
+
},
|
|
382
|
+
required: ["operation"],
|
|
383
|
+
},
|
|
384
|
+
},
|
|
385
|
+
{
|
|
386
|
+
name: "save_image",
|
|
387
|
+
description: "Save an image to filesystem or cloud storage (S3, Tigris, R2, etc.). " +
|
|
388
|
+
"Reference images by: imageId (from previous calls), imagePath (any file), or imageBytes (base64). " +
|
|
389
|
+
"Supports smart destination routing: './output.png' → filesystem, 's3://bucket/key' → S3. " +
|
|
390
|
+
"Returns public URL if saving to cloud storage.",
|
|
391
|
+
inputSchema: {
|
|
392
|
+
type: "object",
|
|
393
|
+
properties: {
|
|
394
|
+
imageId: {
|
|
395
|
+
type: "string",
|
|
396
|
+
description: "ID of image from previous generate_image or transform_image call",
|
|
397
|
+
},
|
|
398
|
+
imagePath: {
|
|
399
|
+
type: "string",
|
|
400
|
+
description: "Path to image file on disk",
|
|
401
|
+
},
|
|
402
|
+
imageBytes: {
|
|
403
|
+
type: "string",
|
|
404
|
+
description: "Base64-encoded image bytes",
|
|
405
|
+
},
|
|
406
|
+
mime: {
|
|
407
|
+
type: "string",
|
|
408
|
+
description: "MIME type (required only if using imageBytes)",
|
|
409
|
+
},
|
|
410
|
+
destination: {
|
|
411
|
+
type: "string",
|
|
412
|
+
description: "Where to save: './output.png', 's3://bucket/key.png', 'r2://bucket/key.png'",
|
|
413
|
+
},
|
|
414
|
+
provider: {
|
|
415
|
+
type: "string",
|
|
416
|
+
description: "Storage provider: 's3' or 'fs' (auto-detected from destination if not specified)",
|
|
417
|
+
},
|
|
418
|
+
},
|
|
419
|
+
required: ["destination"],
|
|
420
|
+
},
|
|
421
|
+
},
|
|
422
|
+
{
|
|
423
|
+
name: "run_pipeline",
|
|
424
|
+
description: "Execute a complete image workflow as one atomic operation. Combine AI generation " +
|
|
425
|
+
"with deterministic transforms and cloud upload in a single call. What typically " +
|
|
426
|
+
"requires 4+ separate tools becomes one pipeline: generate → transform → save. " +
|
|
427
|
+
"Each step auto-chains to the next. Session state means you can iterate: run a " +
|
|
428
|
+
"pipeline, then refine with additional transforms without starting over.",
|
|
429
|
+
inputSchema: {
|
|
430
|
+
type: "object",
|
|
431
|
+
properties: {
|
|
432
|
+
steps: {
|
|
433
|
+
type: "array",
|
|
434
|
+
description: "Array of steps to execute in order. Each step is an object with one key: " +
|
|
435
|
+
"'generate', 'transform', or 'save'. The value is the parameters for that operation.",
|
|
436
|
+
items: {
|
|
437
|
+
type: "object",
|
|
438
|
+
},
|
|
439
|
+
},
|
|
440
|
+
},
|
|
441
|
+
required: ["steps"],
|
|
442
|
+
},
|
|
443
|
+
},
|
|
444
|
+
{
|
|
445
|
+
name: "analyze_image",
|
|
446
|
+
description: "Analyze an image using AI vision (Claude Vision, GPT-4V, Gemini Vision, Ollama LLaVA). " +
|
|
447
|
+
"Returns text or structured JSON describing the image content. " +
|
|
448
|
+
"Reference images by: imageId (from previous calls), imagePath (any file), or imageBytes (base64). " +
|
|
449
|
+
"Useful for: extracting text, describing contents, detecting objects, answering questions about images.",
|
|
450
|
+
inputSchema: {
|
|
451
|
+
type: "object",
|
|
452
|
+
properties: {
|
|
453
|
+
imageId: {
|
|
454
|
+
type: "string",
|
|
455
|
+
description: "ID of image from previous generate_image or transform_image call",
|
|
456
|
+
},
|
|
457
|
+
imagePath: {
|
|
458
|
+
type: "string",
|
|
459
|
+
description: "Path to image file on disk (e.g., './my-image.png')",
|
|
460
|
+
},
|
|
461
|
+
imageBytes: {
|
|
462
|
+
type: "string",
|
|
463
|
+
description: "Base64-encoded image bytes",
|
|
464
|
+
},
|
|
465
|
+
mime: {
|
|
466
|
+
type: "string",
|
|
467
|
+
description: "MIME type (required only if using imageBytes)",
|
|
468
|
+
},
|
|
469
|
+
prompt: {
|
|
470
|
+
type: "string",
|
|
471
|
+
description: "What to analyze or ask about the image. " +
|
|
472
|
+
"Examples: 'Describe this image', 'What text is visible?', 'List all objects'",
|
|
473
|
+
},
|
|
474
|
+
provider: {
|
|
475
|
+
type: "string",
|
|
476
|
+
description: "Vision provider to use. Defaults to first available. " +
|
|
477
|
+
"Options: 'openai' (GPT-4V), 'anthropic' (Claude), 'ollama' (LLaVA), 'gemini'",
|
|
478
|
+
enum: ["openai", "anthropic", "ollama", "gemini"],
|
|
479
|
+
},
|
|
480
|
+
outputFormat: {
|
|
481
|
+
type: "string",
|
|
482
|
+
description: "Output format: 'text' (default) or 'json' (structured data)",
|
|
483
|
+
enum: ["text", "json"],
|
|
484
|
+
},
|
|
485
|
+
},
|
|
486
|
+
required: ["prompt"],
|
|
487
|
+
},
|
|
488
|
+
},
|
|
489
|
+
{
|
|
490
|
+
name: "generate_text",
|
|
491
|
+
description: "Generate text using AI (Claude, GPT-4, Gemini, Ollama Llama/Mistral). " +
|
|
492
|
+
"Useful for: creating prompts, writing descriptions, generating code, expanding on ideas. " +
|
|
493
|
+
"Can optionally take context from previous analysis to chain operations.",
|
|
494
|
+
inputSchema: {
|
|
495
|
+
type: "object",
|
|
496
|
+
properties: {
|
|
497
|
+
prompt: {
|
|
498
|
+
type: "string",
|
|
499
|
+
description: "The prompt or question to generate text for",
|
|
500
|
+
},
|
|
501
|
+
context: {
|
|
502
|
+
type: "string",
|
|
503
|
+
description: "Optional context to include (e.g., output from analyze_image)",
|
|
504
|
+
},
|
|
505
|
+
provider: {
|
|
506
|
+
type: "string",
|
|
507
|
+
description: "Text provider to use. Defaults to first available. " +
|
|
508
|
+
"Options: 'openai' (GPT-4), 'anthropic' (Claude), 'ollama' (Llama/Mistral), 'gemini'",
|
|
509
|
+
enum: ["openai", "anthropic", "ollama", "gemini"],
|
|
510
|
+
},
|
|
511
|
+
systemPrompt: {
|
|
512
|
+
type: "string",
|
|
513
|
+
description: "Optional system prompt to set the AI's behavior/role",
|
|
514
|
+
},
|
|
515
|
+
temperature: {
|
|
516
|
+
type: "number",
|
|
517
|
+
description: "Creativity level (0.0 = deterministic, 1.0 = creative). Default: 0.7",
|
|
518
|
+
},
|
|
519
|
+
maxTokens: {
|
|
520
|
+
type: "number",
|
|
521
|
+
description: "Maximum tokens to generate. Default: 1024",
|
|
522
|
+
},
|
|
523
|
+
},
|
|
524
|
+
required: ["prompt"],
|
|
525
|
+
},
|
|
526
|
+
},
|
|
527
|
+
];
|
|
528
|
+
// List tools handler
|
|
529
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
530
|
+
return { tools: TOOLS };
|
|
531
|
+
});
|
|
532
|
+
// Call tool handler
|
|
533
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
534
|
+
const { name, arguments: args } = request.params;
|
|
535
|
+
try {
|
|
536
|
+
await ensureWorkspace();
|
|
537
|
+
// Load configuration (from floimg.config.ts or .floimgrc.json)
|
|
538
|
+
const config = await loadConfig();
|
|
539
|
+
const client = createClient(config);
|
|
540
|
+
// Load available plugins
|
|
541
|
+
const availablePlugins = await loadAvailablePlugins(client);
|
|
542
|
+
console.error(`[floimg-mcp] Available generators: shapes, openai, ${availablePlugins.join(", ")}`);
|
|
543
|
+
switch (name) {
|
|
544
|
+
case "generate_image": {
|
|
545
|
+
const { intent, params = {}, saveTo, } = args;
|
|
546
|
+
if (!intent) {
|
|
547
|
+
throw new Error("'intent' parameter is required");
|
|
548
|
+
}
|
|
549
|
+
// Smart generator selection
|
|
550
|
+
const generator = selectGenerator(intent, params);
|
|
551
|
+
console.error(`[floimg-mcp] Intent: "${intent}" → Generator: ${generator}`);
|
|
552
|
+
// Auto-fill params for simple cases to improve UX
|
|
553
|
+
const finalParams = { ...params };
|
|
554
|
+
if (generator === "openai" && !finalParams.prompt) {
|
|
555
|
+
// For AI images: use intent as prompt
|
|
556
|
+
finalParams.prompt = intent;
|
|
557
|
+
finalParams.size = finalParams.size || "1024x1024";
|
|
558
|
+
console.error(`[floimg-mcp] Auto-filled: prompt="${intent}", size=${finalParams.size}`);
|
|
559
|
+
}
|
|
560
|
+
if (generator === "qr" && !finalParams.text) {
|
|
561
|
+
// For QR codes: extract URL from intent
|
|
562
|
+
const urlMatch = intent.match(/https?:\/\/[^\s]+/);
|
|
563
|
+
if (urlMatch) {
|
|
564
|
+
finalParams.text = urlMatch[0];
|
|
565
|
+
console.error(`[floimg-mcp] Auto-filled: text="${finalParams.text}"`);
|
|
566
|
+
}
|
|
567
|
+
else {
|
|
568
|
+
throw new Error("Could not extract URL from intent for QR code. " +
|
|
569
|
+
"Please provide params.text explicitly. " +
|
|
570
|
+
"Example: { intent: 'qr code', params: { text: 'https://example.com' } }");
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
// For charts and diagrams: params always required (too complex to extract)
|
|
574
|
+
if ((generator === "quickchart" || generator === "mermaid" || generator === "d3") &&
|
|
575
|
+
!finalParams.type &&
|
|
576
|
+
!finalParams.data &&
|
|
577
|
+
!finalParams.code &&
|
|
578
|
+
!finalParams.render) {
|
|
579
|
+
throw new Error(`${generator} requires explicit params. Intent is only for routing. ` +
|
|
580
|
+
`Please provide structured data: ` +
|
|
581
|
+
`${generator === "quickchart" ? '{ type: "bar", data: {...} }' : ""}` +
|
|
582
|
+
`${generator === "mermaid" ? '{ code: "graph TD; A-->B" }' : ""}` +
|
|
583
|
+
`${generator === "d3" ? '{ render: "...", data: [...] }' : ""}`);
|
|
584
|
+
}
|
|
585
|
+
const blob = await client.generate({
|
|
586
|
+
generator,
|
|
587
|
+
params: finalParams,
|
|
588
|
+
});
|
|
589
|
+
// Save to session workspace
|
|
590
|
+
const imageId = generateImageId();
|
|
591
|
+
const ext = getExtension(blob.mime);
|
|
592
|
+
const sessionPath = join(SESSION_WORKSPACE, `${imageId}.${ext}`);
|
|
593
|
+
await client.save(blob, sessionPath);
|
|
594
|
+
// Register in session
|
|
595
|
+
imageRegistry.set(imageId, {
|
|
596
|
+
path: sessionPath,
|
|
597
|
+
mime: blob.mime,
|
|
598
|
+
metadata: {
|
|
599
|
+
width: blob.width,
|
|
600
|
+
height: blob.height,
|
|
601
|
+
source: blob.source,
|
|
602
|
+
},
|
|
603
|
+
});
|
|
604
|
+
console.error(`[floimg-mcp] Saved to session: ${imageId} → ${sessionPath}`);
|
|
605
|
+
// Optionally save to additional destination
|
|
606
|
+
let cloudResult = null;
|
|
607
|
+
if (saveTo) {
|
|
608
|
+
cloudResult = await client.save(blob, saveTo);
|
|
609
|
+
console.error(`[floimg-mcp] Also saved to: ${saveTo}`);
|
|
610
|
+
}
|
|
611
|
+
return {
|
|
612
|
+
content: [
|
|
613
|
+
{
|
|
614
|
+
type: "text",
|
|
615
|
+
text: JSON.stringify({
|
|
616
|
+
success: true,
|
|
617
|
+
imageId,
|
|
618
|
+
generator,
|
|
619
|
+
session: {
|
|
620
|
+
path: sessionPath,
|
|
621
|
+
mime: blob.mime,
|
|
622
|
+
width: blob.width,
|
|
623
|
+
height: blob.height,
|
|
624
|
+
},
|
|
625
|
+
...(cloudResult && {
|
|
626
|
+
saved: {
|
|
627
|
+
location: cloudResult.location,
|
|
628
|
+
provider: cloudResult.provider,
|
|
629
|
+
size: cloudResult.size,
|
|
630
|
+
},
|
|
631
|
+
}),
|
|
632
|
+
}, null, 2),
|
|
633
|
+
},
|
|
634
|
+
],
|
|
635
|
+
};
|
|
636
|
+
}
|
|
637
|
+
case "transform_image": {
|
|
638
|
+
const { imageId, imagePath, imageBytes, mime, operation, params = {}, to, saveTo, } = args;
|
|
639
|
+
// Load input image
|
|
640
|
+
const inputBlob = await loadImage(imageId, imagePath, imageBytes, mime);
|
|
641
|
+
let resultBlob;
|
|
642
|
+
// Handle special cases that need specific parameters
|
|
643
|
+
if (operation === "convert") {
|
|
644
|
+
if (!to)
|
|
645
|
+
throw new Error("'to' parameter required for convert operation");
|
|
646
|
+
resultBlob = await client.transform({
|
|
647
|
+
blob: inputBlob,
|
|
648
|
+
op: "convert",
|
|
649
|
+
to: to,
|
|
650
|
+
params,
|
|
651
|
+
});
|
|
652
|
+
}
|
|
653
|
+
else if (operation === "resize") {
|
|
654
|
+
const { width, height } = params;
|
|
655
|
+
if (!width && !height)
|
|
656
|
+
throw new Error("'width' or 'height' required in params for resize");
|
|
657
|
+
resultBlob = await client.transform({
|
|
658
|
+
blob: inputBlob,
|
|
659
|
+
op: "resize",
|
|
660
|
+
params: { width, height, ...params },
|
|
661
|
+
});
|
|
662
|
+
}
|
|
663
|
+
else {
|
|
664
|
+
// All other operations use the generic params approach
|
|
665
|
+
resultBlob = await client.transform({
|
|
666
|
+
blob: inputBlob,
|
|
667
|
+
op: operation,
|
|
668
|
+
params,
|
|
669
|
+
});
|
|
670
|
+
}
|
|
671
|
+
// Save to session workspace
|
|
672
|
+
const newImageId = generateImageId();
|
|
673
|
+
const ext = getExtension(resultBlob.mime);
|
|
674
|
+
const sessionPath = join(SESSION_WORKSPACE, `${newImageId}.${ext}`);
|
|
675
|
+
await client.save(resultBlob, sessionPath);
|
|
676
|
+
// Register in session
|
|
677
|
+
imageRegistry.set(newImageId, {
|
|
678
|
+
path: sessionPath,
|
|
679
|
+
mime: resultBlob.mime,
|
|
680
|
+
metadata: {
|
|
681
|
+
width: resultBlob.width,
|
|
682
|
+
height: resultBlob.height,
|
|
683
|
+
source: resultBlob.source,
|
|
684
|
+
},
|
|
685
|
+
});
|
|
686
|
+
console.error(`[floimg-mcp] Transformed and saved: ${newImageId} → ${sessionPath}`);
|
|
687
|
+
// Optionally save to additional destination
|
|
688
|
+
let cloudResult = null;
|
|
689
|
+
if (saveTo) {
|
|
690
|
+
cloudResult = await client.save(resultBlob, saveTo);
|
|
691
|
+
console.error(`[floimg-mcp] Also saved to: ${saveTo}`);
|
|
692
|
+
}
|
|
693
|
+
return {
|
|
694
|
+
content: [
|
|
695
|
+
{
|
|
696
|
+
type: "text",
|
|
697
|
+
text: JSON.stringify({
|
|
698
|
+
success: true,
|
|
699
|
+
imageId: newImageId,
|
|
700
|
+
operation,
|
|
701
|
+
session: {
|
|
702
|
+
path: sessionPath,
|
|
703
|
+
mime: resultBlob.mime,
|
|
704
|
+
width: resultBlob.width,
|
|
705
|
+
height: resultBlob.height,
|
|
706
|
+
},
|
|
707
|
+
...(cloudResult && {
|
|
708
|
+
saved: {
|
|
709
|
+
location: cloudResult.location,
|
|
710
|
+
provider: cloudResult.provider,
|
|
711
|
+
size: cloudResult.size,
|
|
712
|
+
},
|
|
713
|
+
}),
|
|
714
|
+
}, null, 2),
|
|
715
|
+
},
|
|
716
|
+
],
|
|
717
|
+
};
|
|
718
|
+
}
|
|
719
|
+
case "save_image": {
|
|
720
|
+
const { imageId, imagePath, imageBytes, mime, destination, provider } = args;
|
|
721
|
+
// Load input image
|
|
722
|
+
const inputBlob = await loadImage(imageId, imagePath, imageBytes, mime);
|
|
723
|
+
const result = await client.save(inputBlob, provider ? { path: destination, provider } : destination);
|
|
724
|
+
console.error(`[floimg-mcp] Saved to: ${destination}`);
|
|
725
|
+
return {
|
|
726
|
+
content: [
|
|
727
|
+
{
|
|
728
|
+
type: "text",
|
|
729
|
+
text: JSON.stringify({
|
|
730
|
+
success: true,
|
|
731
|
+
location: result.location,
|
|
732
|
+
provider: result.provider,
|
|
733
|
+
size: result.size,
|
|
734
|
+
mime: result.mime,
|
|
735
|
+
}, null, 2),
|
|
736
|
+
},
|
|
737
|
+
],
|
|
738
|
+
};
|
|
739
|
+
}
|
|
740
|
+
case "run_pipeline": {
|
|
741
|
+
const { steps } = args;
|
|
742
|
+
if (!Array.isArray(steps) || steps.length === 0) {
|
|
743
|
+
throw new Error("'steps' must be a non-empty array");
|
|
744
|
+
}
|
|
745
|
+
let currentImageId;
|
|
746
|
+
const results = [];
|
|
747
|
+
for (let i = 0; i < steps.length; i++) {
|
|
748
|
+
const step = steps[i];
|
|
749
|
+
const stepType = Object.keys(step)[0]; // 'generate', 'transform', or 'save'
|
|
750
|
+
const stepParams = step[stepType];
|
|
751
|
+
console.error(`[floimg-mcp] Pipeline step ${i + 1}/${steps.length}: ${stepType}`);
|
|
752
|
+
if (stepType === "generate") {
|
|
753
|
+
// Generate step
|
|
754
|
+
const { intent, params = {} } = stepParams;
|
|
755
|
+
const generator = selectGenerator(intent, params);
|
|
756
|
+
// Auto-fill params for simple cases (same logic as generate_image tool)
|
|
757
|
+
const finalParams = { ...params };
|
|
758
|
+
if (generator === "openai" && !finalParams.prompt) {
|
|
759
|
+
finalParams.prompt = intent;
|
|
760
|
+
finalParams.size = finalParams.size || "1024x1024";
|
|
761
|
+
}
|
|
762
|
+
if (generator === "qr" && !finalParams.text) {
|
|
763
|
+
const urlMatch = intent.match(/https?:\/\/[^\s]+/);
|
|
764
|
+
if (urlMatch)
|
|
765
|
+
finalParams.text = urlMatch[0];
|
|
766
|
+
}
|
|
767
|
+
const blob = await client.generate({ generator, params: finalParams });
|
|
768
|
+
// Save to session
|
|
769
|
+
const imageId = generateImageId();
|
|
770
|
+
const ext = getExtension(blob.mime);
|
|
771
|
+
const sessionPath = join(SESSION_WORKSPACE, `${imageId}.${ext}`);
|
|
772
|
+
await client.save(blob, sessionPath);
|
|
773
|
+
imageRegistry.set(imageId, {
|
|
774
|
+
path: sessionPath,
|
|
775
|
+
mime: blob.mime,
|
|
776
|
+
metadata: { width: blob.width, height: blob.height, source: blob.source },
|
|
777
|
+
});
|
|
778
|
+
currentImageId = imageId;
|
|
779
|
+
results.push({ step: i + 1, type: "generate", imageId, generator });
|
|
780
|
+
}
|
|
781
|
+
else if (stepType === "transform") {
|
|
782
|
+
// Transform step - uses current image
|
|
783
|
+
if (!currentImageId) {
|
|
784
|
+
throw new Error(`Pipeline step ${i + 1}: transform requires a previous generate step`);
|
|
785
|
+
}
|
|
786
|
+
const { operation, params = {}, to } = stepParams;
|
|
787
|
+
const inputBlob = await loadImage(currentImageId);
|
|
788
|
+
let resultBlob;
|
|
789
|
+
if (operation === "convert") {
|
|
790
|
+
resultBlob = await client.transform({
|
|
791
|
+
blob: inputBlob,
|
|
792
|
+
op: "convert",
|
|
793
|
+
to: to,
|
|
794
|
+
params,
|
|
795
|
+
});
|
|
796
|
+
}
|
|
797
|
+
else if (operation === "resize") {
|
|
798
|
+
resultBlob = await client.transform({
|
|
799
|
+
blob: inputBlob,
|
|
800
|
+
op: "resize",
|
|
801
|
+
params,
|
|
802
|
+
});
|
|
803
|
+
}
|
|
804
|
+
else {
|
|
805
|
+
resultBlob = await client.transform({
|
|
806
|
+
blob: inputBlob,
|
|
807
|
+
op: operation,
|
|
808
|
+
params,
|
|
809
|
+
});
|
|
810
|
+
}
|
|
811
|
+
// Save to session
|
|
812
|
+
const newImageId = generateImageId();
|
|
813
|
+
const ext = getExtension(resultBlob.mime);
|
|
814
|
+
const sessionPath = join(SESSION_WORKSPACE, `${newImageId}.${ext}`);
|
|
815
|
+
await client.save(resultBlob, sessionPath);
|
|
816
|
+
imageRegistry.set(newImageId, {
|
|
817
|
+
path: sessionPath,
|
|
818
|
+
mime: resultBlob.mime,
|
|
819
|
+
metadata: { width: resultBlob.width, height: resultBlob.height },
|
|
820
|
+
});
|
|
821
|
+
currentImageId = newImageId;
|
|
822
|
+
results.push({ step: i + 1, type: "transform", operation, imageId: newImageId });
|
|
823
|
+
}
|
|
824
|
+
else if (stepType === "save") {
|
|
825
|
+
// Save step - saves current image
|
|
826
|
+
if (!currentImageId) {
|
|
827
|
+
throw new Error(`Pipeline step ${i + 1}: save requires a previous generate/transform step`);
|
|
828
|
+
}
|
|
829
|
+
const { destination, provider } = stepParams;
|
|
830
|
+
const inputBlob = await loadImage(currentImageId);
|
|
831
|
+
const result = await client.save(inputBlob, provider ? { path: destination, provider } : destination);
|
|
832
|
+
results.push({
|
|
833
|
+
step: i + 1,
|
|
834
|
+
type: "save",
|
|
835
|
+
location: result.location,
|
|
836
|
+
provider: result.provider,
|
|
837
|
+
size: result.size,
|
|
838
|
+
});
|
|
839
|
+
}
|
|
840
|
+
else {
|
|
841
|
+
throw new Error(`Unknown pipeline step type: ${stepType}. Use 'generate', 'transform', or 'save'.`);
|
|
842
|
+
}
|
|
843
|
+
}
|
|
844
|
+
return {
|
|
845
|
+
content: [
|
|
846
|
+
{
|
|
847
|
+
type: "text",
|
|
848
|
+
text: JSON.stringify({
|
|
849
|
+
success: true,
|
|
850
|
+
pipeline: {
|
|
851
|
+
totalSteps: steps.length,
|
|
852
|
+
finalImageId: currentImageId,
|
|
853
|
+
results,
|
|
854
|
+
},
|
|
855
|
+
}, null, 2),
|
|
856
|
+
},
|
|
857
|
+
],
|
|
858
|
+
};
|
|
859
|
+
}
|
|
860
|
+
case "analyze_image": {
|
|
861
|
+
const { imageId, imagePath, imageBytes, mime, prompt, provider, outputFormat = "text", } = args;
|
|
862
|
+
if (!prompt) {
|
|
863
|
+
throw new Error("'prompt' parameter is required");
|
|
864
|
+
}
|
|
865
|
+
// Load input image
|
|
866
|
+
const inputBlob = await loadImage(imageId, imagePath, imageBytes, mime);
|
|
867
|
+
// Check if vision providers are available
|
|
868
|
+
const capabilities = client.getCapabilities();
|
|
869
|
+
if (!capabilities.visionProviders || capabilities.visionProviders.length === 0) {
|
|
870
|
+
throw new Error("No vision providers configured. " +
|
|
871
|
+
"Configure AI providers in floimg.config.ts or environment variables. " +
|
|
872
|
+
"Supported: OpenAI (OPENAI_API_KEY), Anthropic (ANTHROPIC_API_KEY), " +
|
|
873
|
+
"Ollama (OLLAMA_BASE_URL), Gemini (GOOGLE_AI_API_KEY)");
|
|
874
|
+
}
|
|
875
|
+
// Select provider (use specified or first available)
|
|
876
|
+
const selectedProvider = provider || capabilities.visionProviders[0].name;
|
|
877
|
+
console.error(`[floimg-mcp] Analyzing image with provider: ${selectedProvider}`);
|
|
878
|
+
const result = await client.analyzeImage({
|
|
879
|
+
provider: selectedProvider,
|
|
880
|
+
blob: inputBlob,
|
|
881
|
+
params: {
|
|
882
|
+
prompt,
|
|
883
|
+
outputFormat,
|
|
884
|
+
},
|
|
885
|
+
});
|
|
886
|
+
console.error(`[floimg-mcp] Analysis complete: ${result.content.slice(0, 100)}...`);
|
|
887
|
+
return {
|
|
888
|
+
content: [
|
|
889
|
+
{
|
|
890
|
+
type: "text",
|
|
891
|
+
text: JSON.stringify({
|
|
892
|
+
success: true,
|
|
893
|
+
provider: selectedProvider,
|
|
894
|
+
outputFormat: result.type,
|
|
895
|
+
content: result.content,
|
|
896
|
+
...(result.parsed && { parsed: result.parsed }),
|
|
897
|
+
}, null, 2),
|
|
898
|
+
},
|
|
899
|
+
],
|
|
900
|
+
};
|
|
901
|
+
}
|
|
902
|
+
case "generate_text": {
|
|
903
|
+
const { prompt, context, provider, systemPrompt, temperature, maxTokens } = args;
|
|
904
|
+
if (!prompt) {
|
|
905
|
+
throw new Error("'prompt' parameter is required");
|
|
906
|
+
}
|
|
907
|
+
// Check if text providers are available
|
|
908
|
+
const capabilities = client.getCapabilities();
|
|
909
|
+
if (!capabilities.textProviders || capabilities.textProviders.length === 0) {
|
|
910
|
+
throw new Error("No text providers configured. " +
|
|
911
|
+
"Configure AI providers in floimg.config.ts or environment variables. " +
|
|
912
|
+
"Supported: OpenAI (OPENAI_API_KEY), Anthropic (ANTHROPIC_API_KEY), " +
|
|
913
|
+
"Ollama (OLLAMA_BASE_URL), Gemini (GOOGLE_AI_API_KEY)");
|
|
914
|
+
}
|
|
915
|
+
// Select provider (use specified or first available)
|
|
916
|
+
const selectedProvider = provider || capabilities.textProviders[0].name;
|
|
917
|
+
console.error(`[floimg-mcp] Generating text with provider: ${selectedProvider}`);
|
|
918
|
+
const result = await client.generateText({
|
|
919
|
+
provider: selectedProvider,
|
|
920
|
+
params: {
|
|
921
|
+
prompt,
|
|
922
|
+
...(context && { context }),
|
|
923
|
+
...(systemPrompt && { systemPrompt }),
|
|
924
|
+
...(temperature !== undefined && { temperature }),
|
|
925
|
+
...(maxTokens !== undefined && { maxTokens }),
|
|
926
|
+
},
|
|
927
|
+
});
|
|
928
|
+
console.error(`[floimg-mcp] Text generation complete: ${result.content.slice(0, 100)}...`);
|
|
929
|
+
return {
|
|
930
|
+
content: [
|
|
931
|
+
{
|
|
932
|
+
type: "text",
|
|
933
|
+
text: JSON.stringify({
|
|
934
|
+
success: true,
|
|
935
|
+
provider: selectedProvider,
|
|
936
|
+
content: result.content,
|
|
937
|
+
}, null, 2),
|
|
938
|
+
},
|
|
939
|
+
],
|
|
940
|
+
};
|
|
941
|
+
}
|
|
942
|
+
default:
|
|
943
|
+
throw new Error(`Unknown tool: ${name}`);
|
|
944
|
+
}
|
|
945
|
+
}
|
|
946
|
+
catch (error) {
|
|
947
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
948
|
+
const errorType = error instanceof FloimgError ? error.name : "Error";
|
|
949
|
+
return {
|
|
950
|
+
content: [
|
|
951
|
+
{
|
|
952
|
+
type: "text",
|
|
953
|
+
text: JSON.stringify({
|
|
954
|
+
success: false,
|
|
955
|
+
error: errorType,
|
|
956
|
+
message,
|
|
957
|
+
}, null, 2),
|
|
958
|
+
},
|
|
959
|
+
],
|
|
960
|
+
isError: true,
|
|
961
|
+
};
|
|
962
|
+
}
|
|
963
|
+
});
|
|
964
|
+
// Helper: Get file extension from MIME type
|
|
965
|
+
function getExtension(mime) {
|
|
966
|
+
const map = {
|
|
967
|
+
"image/svg+xml": "svg",
|
|
968
|
+
"image/png": "png",
|
|
969
|
+
"image/jpeg": "jpg",
|
|
970
|
+
"image/webp": "webp",
|
|
971
|
+
"image/avif": "avif",
|
|
972
|
+
};
|
|
973
|
+
return map[mime] || "png";
|
|
974
|
+
}
|
|
975
|
+
// Start server with stdio transport
|
|
976
|
+
async function main() {
|
|
977
|
+
const transport = new StdioServerTransport();
|
|
978
|
+
await server.connect(transport);
|
|
979
|
+
// Log to stderr (stdout is used for MCP communication)
|
|
980
|
+
console.error("floimg MCP server v0.1.0 running on stdio");
|
|
981
|
+
console.error("Session workspace:", SESSION_WORKSPACE);
|
|
982
|
+
console.error("Smart routing enabled - will auto-select best generator based on intent");
|
|
983
|
+
}
|
|
984
|
+
main().catch((error) => {
|
|
985
|
+
console.error("Fatal error:", error);
|
|
986
|
+
process.exit(1);
|
|
987
|
+
});
|
|
988
|
+
//# sourceMappingURL=server.js.map
|