@meshy-ai/meshy-mcp-server 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/.env.example +14 -0
  2. package/LICENSE +21 -0
  3. package/README.md +108 -0
  4. package/dist/constants.d.ts +123 -0
  5. package/dist/constants.js +169 -0
  6. package/dist/index.d.ts +8 -0
  7. package/dist/index.js +130 -0
  8. package/dist/instructions.d.ts +6 -0
  9. package/dist/instructions.js +90 -0
  10. package/dist/schemas/balance.d.ts +11 -0
  11. package/dist/schemas/balance.js +8 -0
  12. package/dist/schemas/common.d.ts +38 -0
  13. package/dist/schemas/common.js +52 -0
  14. package/dist/schemas/generation.d.ts +219 -0
  15. package/dist/schemas/generation.js +217 -0
  16. package/dist/schemas/image.d.ts +55 -0
  17. package/dist/schemas/image.js +46 -0
  18. package/dist/schemas/output.d.ts +75 -0
  19. package/dist/schemas/output.js +41 -0
  20. package/dist/schemas/postprocessing.d.ts +135 -0
  21. package/dist/schemas/postprocessing.js +123 -0
  22. package/dist/schemas/printing.d.ts +63 -0
  23. package/dist/schemas/printing.js +54 -0
  24. package/dist/schemas/tasks.d.ts +123 -0
  25. package/dist/schemas/tasks.js +85 -0
  26. package/dist/services/error-handler.d.ts +32 -0
  27. package/dist/services/error-handler.js +141 -0
  28. package/dist/services/file-utils.d.ts +15 -0
  29. package/dist/services/file-utils.js +55 -0
  30. package/dist/services/meshy-client.d.ts +54 -0
  31. package/dist/services/meshy-client.js +172 -0
  32. package/dist/services/output-manager.d.ts +52 -0
  33. package/dist/services/output-manager.js +284 -0
  34. package/dist/tools/balance.d.ts +9 -0
  35. package/dist/tools/balance.js +61 -0
  36. package/dist/tools/generation.d.ts +9 -0
  37. package/dist/tools/generation.js +419 -0
  38. package/dist/tools/image.d.ts +9 -0
  39. package/dist/tools/image.js +154 -0
  40. package/dist/tools/postprocessing.d.ts +9 -0
  41. package/dist/tools/postprocessing.js +405 -0
  42. package/dist/tools/printing.d.ts +9 -0
  43. package/dist/tools/printing.js +338 -0
  44. package/dist/tools/tasks.d.ts +9 -0
  45. package/dist/tools/tasks.js +1074 -0
  46. package/dist/tools/workspace.d.ts +9 -0
  47. package/dist/tools/workspace.js +161 -0
  48. package/dist/types.d.ts +261 -0
  49. package/dist/types.js +4 -0
  50. package/dist/utils/endpoints.d.ts +16 -0
  51. package/dist/utils/endpoints.js +38 -0
  52. package/dist/utils/request-builder.d.ts +15 -0
  53. package/dist/utils/request-builder.js +24 -0
  54. package/dist/utils/response-formatter.d.ts +27 -0
  55. package/dist/utils/response-formatter.js +37 -0
  56. package/dist/utils/slicer-detector.d.ts +29 -0
  57. package/dist/utils/slicer-detector.js +237 -0
  58. package/package.json +64 -0
@@ -0,0 +1,419 @@
1
+ /**
2
+ * Generation tools (text-to-3d, image-to-3d)
3
+ */
4
+ import { handleMeshyError } from "../services/error-handler.js";
5
+ import { resolveImageSource, fileToDataUri } from "../services/file-utils.js";
6
+ import { TextTo3DInputSchema, ImageTo3DInputSchema, TextTo3DRefineInputSchema, MultiImageTo3DInputSchema } from "../schemas/generation.js";
7
+ import { TaskCreatedOutputSchema } from "../schemas/output.js";
8
+ import { formatTaskCreatedResponse } from "../utils/response-formatter.js";
9
+ /**
10
+ * Register generation tools with the MCP server
11
+ */
12
+ export function registerGenerationTools(server, client) {
13
+ // Text-to-3D tool
14
+ server.registerTool("meshy_text_to_3d", {
15
+ title: "Generate 3D Model from Text",
16
+ description: `Generate a 3D model from a text description using Meshy AI.
17
+
18
+ This tool creates a new 3D generation task and returns a task_id that can be used to poll the status. The generation process is asynchronous and typically takes 2-3 minutes.
19
+
20
+ Args:
21
+ - prompt (string): Text description of the 3D model (2-600 characters)
22
+ - ai_model (enum): AI model - "meshy-5" (previous gen, 5 credits), "meshy-6" (best quality, 20 credits), "latest" (default, resolves to meshy-6). IMPORTANT: Ask the user which model to use before proceeding
23
+ - model_type (enum, optional): "standard" (default) or "lowpoly". When "lowpoly", ai_model/topology/target_polycount/should_remesh are ignored
24
+ - topology (enum, optional): Mesh topology - "quad" or "triangle"
25
+ - target_polycount (number, optional): Target polygon count (100–300,000)
26
+ - symmetry_mode (enum, optional): "off", "auto" (default), or "on"
27
+ - should_remesh (boolean, optional): Whether to remesh. Default false for meshy-6, true for others
28
+ - pose_mode (enum, optional): "a-pose" or "t-pose". IMPORTANT: Use "t-pose" when the user intends to rig or animate the model
29
+ - target_formats (string[], optional): Output formats. Default: all except 3mf. For 3D printing white model, pass ["obj"].
30
+ - auto_size (boolean, optional): AI auto-estimate real-world height. Default false.
31
+ - origin_at (enum, optional): "bottom" or "center". Default "bottom" when auto_size is true.
32
+ - response_format (enum): Output format - "markdown" or "json" (default: "markdown")
33
+
34
+ Workflow: This creates a PREVIEW (untextured mesh). After completion, ask the user if they want to add textures via meshy_text_to_3d_refine. Preview and refine ai_model should match — all models (meshy-5, meshy-6, latest) support both preview and refine.
35
+
36
+ For 3D printing: pass target_formats: ["obj"] to only generate OBJ format (faster).
37
+ For rigging/animation: use pose_mode: "t-pose" for best results.
38
+
39
+ Returns:
40
+ { "task_id": "abc-123-def", "status": "PENDING", "estimated_time": "2-3 minutes" }
41
+
42
+ Next Steps:
43
+ Use meshy_get_task_status with the task_id to wait for completion.
44
+
45
+ Examples:
46
+ - "Create a low-poly dragon" → { prompt: "dragon", model_type: "lowpoly" }
47
+ - "Generate for 3D print" → { prompt: "cat", target_formats: ["obj"] }
48
+ - "Character for animation" → { prompt: "warrior", pose_mode: "t-pose" }`,
49
+ inputSchema: TextTo3DInputSchema,
50
+ outputSchema: TaskCreatedOutputSchema,
51
+ annotations: {
52
+ readOnlyHint: false,
53
+ destructiveHint: false,
54
+ idempotentHint: false,
55
+ openWorldHint: true
56
+ }
57
+ }, async (params) => {
58
+ try {
59
+ // Prepare API request - flat structure matching Meshy API
60
+ const request = {
61
+ mode: "preview",
62
+ prompt: params.prompt,
63
+ ai_model: params.ai_model,
64
+ moderation: false
65
+ };
66
+ if (params.model_type) {
67
+ request.model_type = params.model_type;
68
+ }
69
+ if (params.target_polycount) {
70
+ request.target_polycount = params.target_polycount;
71
+ }
72
+ if (params.topology) {
73
+ request.topology = params.topology;
74
+ }
75
+ if (params.symmetry_mode) {
76
+ request.symmetry_mode = params.symmetry_mode;
77
+ }
78
+ if (params.should_remesh !== undefined) {
79
+ request.should_remesh = params.should_remesh;
80
+ }
81
+ if (params.pose_mode) {
82
+ request.pose_mode = params.pose_mode;
83
+ }
84
+ if (params.target_formats) {
85
+ request.target_formats = params.target_formats;
86
+ }
87
+ if (params.auto_size !== undefined)
88
+ request.auto_size = params.auto_size;
89
+ if (params.origin_at)
90
+ request.origin_at = params.origin_at;
91
+ // Create task via API
92
+ const response = await client.post("/openapi/v2/text-to-3d", request);
93
+ // API returns { "result": "task-id" }
94
+ const taskId = response.result;
95
+ const output = {
96
+ task_id: taskId,
97
+ status: "PENDING",
98
+ message: `3D generation task created successfully. Task ID: ${taskId}`,
99
+ estimated_time: "2-3 minutes"
100
+ };
101
+ return formatTaskCreatedResponse(output, params.response_format, "3D Generation Task Created", `Your 3D model is being generated from the prompt: "${params.prompt}"`);
102
+ }
103
+ catch (error) {
104
+ return {
105
+ isError: true,
106
+ content: [{
107
+ type: "text",
108
+ text: handleMeshyError(error)
109
+ }]
110
+ };
111
+ }
112
+ });
113
+ // Image-to-3D tool
114
+ server.registerTool("meshy_image_to_3d", {
115
+ title: "Generate 3D Model from Image",
116
+ description: `Generate a 3D model from a single image using Meshy AI.
117
+
118
+ This tool creates a new 3D generation task from an image and returns a task_id.
119
+
120
+ IMAGE INPUT (provide ONE, NEVER both):
121
+ - Local file → file_path: "/absolute/path/to/image.jpg" (RECOMMENDED)
122
+ - Remote URL → image_url: "https://example.com/image.jpg"
123
+ - NEVER manually base64-encode. NEVER use both file_path and image_url.
124
+
125
+ Other Args:
126
+ - ai_model: "meshy-5", "meshy-6", or "latest" (default). Ask user which model before proceeding
127
+ - model_type, pose_mode, topology, target_polycount, should_remesh, symmetry_mode
128
+ - should_texture: Whether to generate textures (default true). Set false for untextured mesh
129
+ - enable_pbr: PBR maps (default false). Set true for metallic/roughness/normal maps
130
+ - texture_prompt, texture_image_url: Guide texturing
131
+ - image_enhancement: Optimize input image (default true, meshy-6/latest only)
132
+ - remove_lighting: Remove highlights/shadows from base color texture (default true, meshy-6/latest only)
133
+ - save_pre_remeshed_model, response_format
134
+
135
+ Note: Image-to-3D does NOT have a separate refine step. Texturing is controlled by should_texture (default true).
136
+
137
+ Examples:
138
+ - Local file: { file_path: "/path/to/cat.jpg" }
139
+ - Public URL: { image_url: "https://example.com/cat.jpg" }
140
+ - With pose: { file_path: "/path/to/character.png", pose_mode: "a-pose" }
141
+
142
+ Error Handling:
143
+ - Returns "InvalidImageUrl" if image is not accessible
144
+ - Returns "File not found" if file_path doesn't exist`,
145
+ inputSchema: ImageTo3DInputSchema,
146
+ outputSchema: TaskCreatedOutputSchema,
147
+ annotations: {
148
+ readOnlyHint: false,
149
+ destructiveHint: false,
150
+ idempotentHint: false,
151
+ openWorldHint: true
152
+ }
153
+ }, async (params) => {
154
+ try {
155
+ const imageUrl = await resolveImageSource(params.image_url, params.file_path);
156
+ const request = {
157
+ image_url: imageUrl,
158
+ enable_pbr: params.enable_pbr,
159
+ moderation: false
160
+ };
161
+ if (params.ai_model) {
162
+ request.ai_model = params.ai_model;
163
+ }
164
+ if (params.model_type) {
165
+ request.model_type = params.model_type;
166
+ }
167
+ if (params.pose_mode) {
168
+ request.pose_mode = params.pose_mode;
169
+ }
170
+ if (params.topology) {
171
+ request.topology = params.topology;
172
+ }
173
+ if (params.target_polycount) {
174
+ request.target_polycount = params.target_polycount;
175
+ }
176
+ if (params.should_remesh !== undefined) {
177
+ request.should_remesh = params.should_remesh;
178
+ }
179
+ if (params.symmetry_mode) {
180
+ request.symmetry_mode = params.symmetry_mode;
181
+ }
182
+ if (params.should_texture !== undefined) {
183
+ request.should_texture = params.should_texture;
184
+ }
185
+ if (params.texture_prompt) {
186
+ request.texture_prompt = params.texture_prompt;
187
+ }
188
+ if (params.texture_image_url) {
189
+ request.texture_image_url = params.texture_image_url;
190
+ }
191
+ if (params.image_enhancement !== undefined) {
192
+ request.image_enhancement = params.image_enhancement;
193
+ }
194
+ if (params.remove_lighting !== undefined) {
195
+ request.remove_lighting = params.remove_lighting;
196
+ }
197
+ if (params.save_pre_remeshed_model !== undefined) {
198
+ request.save_pre_remeshed_model = params.save_pre_remeshed_model;
199
+ }
200
+ if (params.target_formats) {
201
+ request.target_formats = params.target_formats;
202
+ }
203
+ if (params.auto_size !== undefined)
204
+ request.auto_size = params.auto_size;
205
+ if (params.origin_at)
206
+ request.origin_at = params.origin_at;
207
+ // Create task via API (image-to-3d uses v1, not v2)
208
+ const response = await client.post("/openapi/v1/image-to-3d", request);
209
+ // API returns { "result": "task-id" }
210
+ const taskId = response.result;
211
+ const output = {
212
+ task_id: taskId,
213
+ status: "PENDING",
214
+ message: `3D generation task created successfully. Task ID: ${taskId}`,
215
+ estimated_time: "2-3 minutes"
216
+ };
217
+ return formatTaskCreatedResponse(output, params.response_format, "3D Generation Task Created (Image-to-3D)", "Your 3D model is being generated from the provided image.", "image-to-3d");
218
+ }
219
+ catch (error) {
220
+ return {
221
+ isError: true,
222
+ content: [{
223
+ type: "text",
224
+ text: handleMeshyError(error, { tool: "meshy_image_to_3d" })
225
+ }]
226
+ };
227
+ }
228
+ });
229
+ // Text-to-3D Refine tool
230
+ server.registerTool("meshy_text_to_3d_refine", {
231
+ title: "Refine Text-to-3D Preview",
232
+ description: `Apply textures to a completed text-to-3D preview mesh using Meshy AI.
233
+
234
+ This tool takes a completed preview task and generates a fully textured model. Run meshy_text_to_3d first to get a preview, then use this tool to add high-quality textures.
235
+
236
+ Args:
237
+ - preview_task_id (string): Task ID of the completed preview task to refine (required)
238
+ - enable_pbr (boolean): Enable physically-based rendering textures (default: false)
239
+ - texture_prompt (string, optional): Text to guide texturing. Max 600 characters
240
+ - texture_image_url (string, optional): Image URL to guide texturing
241
+ - ai_model (enum): AI model - "meshy-5", "meshy-6", or "latest" (default). Should match the preview's ai_model to avoid model mismatch errors.
242
+ - remove_lighting (boolean, optional): Remove highlights/shadows from base color texture. Default true. Only meshy-6/latest
243
+ - target_formats (string[], optional): Output formats. Default: all except 3mf.
244
+ - auto_size (boolean, optional): AI auto-estimate real-world height. Default false.
245
+ - origin_at (enum, optional): "bottom" or "center".
246
+ - response_format (enum): Output format - "markdown" or "json" (default: "markdown")
247
+
248
+ IMPORTANT: The ai_model used for refine should match the preview's ai_model. All models (meshy-5, meshy-6, latest) support refine.
249
+
250
+ Returns:
251
+ { "task_id": "abc-123-def", "status": "PENDING", "estimated_time": "2-3 minutes" }
252
+
253
+ Next Steps:
254
+ Use meshy_get_task_status with task_id and task_type="text-to-3d" to check progress.
255
+
256
+ Examples:
257
+ - Basic refine: { preview_task_id: "abc-123" }
258
+ - With PBR: { preview_task_id: "abc-123", enable_pbr: true }
259
+ - Guided texture: { preview_task_id: "abc-123", texture_prompt: "rusty metal" }`,
260
+ inputSchema: TextTo3DRefineInputSchema,
261
+ outputSchema: TaskCreatedOutputSchema,
262
+ annotations: {
263
+ readOnlyHint: false,
264
+ destructiveHint: false,
265
+ idempotentHint: false,
266
+ openWorldHint: true
267
+ }
268
+ }, async (params) => {
269
+ try {
270
+ const request = {
271
+ mode: "refine",
272
+ preview_task_id: params.preview_task_id,
273
+ enable_pbr: params.enable_pbr,
274
+ ai_model: params.ai_model
275
+ };
276
+ if (params.texture_prompt) {
277
+ request.texture_prompt = params.texture_prompt;
278
+ }
279
+ if (params.texture_image_url) {
280
+ request.texture_image_url = params.texture_image_url;
281
+ }
282
+ if (params.remove_lighting !== undefined) {
283
+ request.remove_lighting = params.remove_lighting;
284
+ }
285
+ if (params.target_formats) {
286
+ request.target_formats = params.target_formats;
287
+ }
288
+ if (params.auto_size !== undefined)
289
+ request.auto_size = params.auto_size;
290
+ if (params.origin_at)
291
+ request.origin_at = params.origin_at;
292
+ const response = await client.post("/openapi/v2/text-to-3d", request);
293
+ const taskId = response.result;
294
+ const output = {
295
+ task_id: taskId,
296
+ status: "PENDING",
297
+ message: `Text-to-3D refine task created successfully. Task ID: ${taskId}`,
298
+ estimated_time: "2-3 minutes"
299
+ };
300
+ return formatTaskCreatedResponse(output, params.response_format, "Text-to-3D Refine Task Created", `Texturing the preview model from task "${params.preview_task_id}".`);
301
+ }
302
+ catch (error) {
303
+ return {
304
+ isError: true,
305
+ content: [{
306
+ type: "text",
307
+ text: handleMeshyError(error)
308
+ }]
309
+ };
310
+ }
311
+ });
312
+ // Multi-image-to-3D tool
313
+ server.registerTool("meshy_multi_image_to_3d", {
314
+ title: "Generate 3D Model from Multiple Images",
315
+ description: `Generate a 3D model from 1–4 images using Meshy AI.
316
+
317
+ Use multiple views of the same object for better 3D reconstruction.
318
+
319
+ Image Input (provide ONE of these):
320
+ - image_urls (array): 1–4 publicly accessible image URLs
321
+ - file_paths (array): 1–4 absolute paths to LOCAL image files. Server reads and encodes them automatically.
322
+
323
+ IMPORTANT: For local files, always use file_paths instead of manually base64-encoding.
324
+
325
+ Other Args:
326
+ - ai_model: "meshy-5", "meshy-6", or "latest" (default). Ask user which model before proceeding
327
+ - model_type, pose_mode, topology, target_polycount, should_remesh, symmetry_mode
328
+ - should_texture: Whether to generate textures (default true)
329
+ - enable_pbr: PBR maps (default false)
330
+ - texture_prompt, texture_image_url: Guide texturing
331
+ - image_enhancement: Optimize input images (default true, meshy-6/latest only)
332
+ - remove_lighting: Remove highlights/shadows from base color texture (default true, meshy-6/latest only)
333
+ - save_pre_remeshed_model, response_format
334
+
335
+ Examples:
336
+ - Local files: { file_paths: ["/path/front.jpg", "/path/side.jpg"] }
337
+ - Public URLs: { image_urls: ["https://example.com/front.jpg", "https://example.com/side.jpg"] }
338
+
339
+ Error Handling:
340
+ - Returns "InvalidImageUrl" if any image is not accessible
341
+ - Returns "File not found" if any file_path doesn't exist`,
342
+ inputSchema: MultiImageTo3DInputSchema,
343
+ outputSchema: TaskCreatedOutputSchema,
344
+ annotations: {
345
+ readOnlyHint: false,
346
+ destructiveHint: false,
347
+ idempotentHint: false,
348
+ openWorldHint: true
349
+ }
350
+ }, async (params) => {
351
+ try {
352
+ let resolvedUrls;
353
+ if (params.file_paths && params.file_paths.length > 0) {
354
+ resolvedUrls = await Promise.all(params.file_paths.map(fp => fileToDataUri(fp)));
355
+ }
356
+ else if (params.image_urls && params.image_urls.length > 0) {
357
+ resolvedUrls = params.image_urls;
358
+ }
359
+ else {
360
+ throw new Error("Either image_urls or file_paths must be provided.");
361
+ }
362
+ const request = {
363
+ image_urls: resolvedUrls,
364
+ enable_pbr: params.enable_pbr,
365
+ moderation: false
366
+ };
367
+ if (params.ai_model)
368
+ request.ai_model = params.ai_model;
369
+ if (params.model_type)
370
+ request.model_type = params.model_type;
371
+ if (params.pose_mode)
372
+ request.pose_mode = params.pose_mode;
373
+ if (params.topology)
374
+ request.topology = params.topology;
375
+ if (params.target_polycount)
376
+ request.target_polycount = params.target_polycount;
377
+ if (params.should_remesh !== undefined)
378
+ request.should_remesh = params.should_remesh;
379
+ if (params.symmetry_mode)
380
+ request.symmetry_mode = params.symmetry_mode;
381
+ if (params.should_texture !== undefined)
382
+ request.should_texture = params.should_texture;
383
+ if (params.texture_prompt)
384
+ request.texture_prompt = params.texture_prompt;
385
+ if (params.texture_image_url)
386
+ request.texture_image_url = params.texture_image_url;
387
+ if (params.image_enhancement !== undefined)
388
+ request.image_enhancement = params.image_enhancement;
389
+ if (params.remove_lighting !== undefined)
390
+ request.remove_lighting = params.remove_lighting;
391
+ if (params.save_pre_remeshed_model !== undefined)
392
+ request.save_pre_remeshed_model = params.save_pre_remeshed_model;
393
+ if (params.target_formats)
394
+ request.target_formats = params.target_formats;
395
+ if (params.auto_size !== undefined)
396
+ request.auto_size = params.auto_size;
397
+ if (params.origin_at)
398
+ request.origin_at = params.origin_at;
399
+ const response = await client.post("/openapi/v1/multi-image-to-3d", request);
400
+ const taskId = response.result;
401
+ const output = {
402
+ task_id: taskId,
403
+ status: "PENDING",
404
+ message: `Multi-image 3D generation task created successfully. Task ID: ${taskId}`,
405
+ estimated_time: "2-3 minutes"
406
+ };
407
+ return formatTaskCreatedResponse(output, params.response_format, "3D Generation Task Created (Multi-Image-to-3D)", `Your 3D model is being generated from ${resolvedUrls.length} image(s).`, "multi-image-to-3d");
408
+ }
409
+ catch (error) {
410
+ return {
411
+ isError: true,
412
+ content: [{
413
+ type: "text",
414
+ text: handleMeshyError(error, { tool: "meshy_multi_image_to_3d" })
415
+ }]
416
+ };
417
+ }
418
+ });
419
+ }
@@ -0,0 +1,9 @@
1
+ /**
2
+ * Image generation tools (text-to-image, image-to-image)
3
+ */
4
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
5
+ import { MeshyClient } from "../services/meshy-client.js";
6
+ /**
7
+ * Register image generation tools with the MCP server
8
+ */
9
+ export declare function registerImageTools(server: McpServer, client: MeshyClient): void;
@@ -0,0 +1,154 @@
1
+ /**
2
+ * Image generation tools (text-to-image, image-to-image)
3
+ */
4
+ import { handleMeshyError } from "../services/error-handler.js";
5
+ import { fileToDataUri } from "../services/file-utils.js";
6
+ import { TextToImageInputSchema, ImageToImageInputSchema } from "../schemas/image.js";
7
+ import { TaskCreatedOutputSchema } from "../schemas/output.js";
8
+ import { formatTaskCreatedResponse } from "../utils/response-formatter.js";
9
+ /**
10
+ * Register image generation tools with the MCP server
11
+ */
12
+ export function registerImageTools(server, client) {
13
+ // Text-to-image tool
14
+ server.registerTool("meshy_text_to_image", {
15
+ title: "Generate 2D Image from Text",
16
+ description: `Generate a 2D image from a text description using Meshy AI.
17
+
18
+ Useful for creating reference images that can then be used with image-to-3d or image-to-image.
19
+
20
+ Args:
21
+ - ai_model (enum): AI model to use - "nano-banana" or "nano-banana-pro" (required)
22
+ - prompt (string): Text description of the image to generate (2-600 characters, required)
23
+ - generate_multi_view (boolean, optional): Generate multiple viewpoint images (default: false)
24
+ - pose_mode (enum, optional): Pose for character images - "a-pose" or "t-pose"
25
+ - aspect_ratio (enum, optional): Image aspect ratio (default: "1:1"). Options: "1:1", "16:9", "9:16", "4:3", "3:4"
26
+ - response_format (enum): Output format - "markdown" or "json" (default: "markdown")
27
+
28
+ Returns:
29
+ {
30
+ "task_id": "abc-123-def",
31
+ "status": "PENDING",
32
+ "message": "Image generation task created...",
33
+ "estimated_time": "1-2 minutes"
34
+ }
35
+
36
+ Next Steps:
37
+ Use meshy_get_task_status with task_id and task_type="text-to-image" to check progress.
38
+
39
+ Examples:
40
+ - Basic image: { ai_model: "nano-banana", prompt: "a cute cartoon cat" }
41
+ - Widescreen: { ai_model: "nano-banana-pro", prompt: "fantasy landscape", aspect_ratio: "16:9" }
42
+ - Multi-view character: { ai_model: "nano-banana-pro", prompt: "game character", generate_multi_view: true }
43
+
44
+ Error Handling:
45
+ - Returns "InsufficientCredits" if account needs upgrade`,
46
+ inputSchema: TextToImageInputSchema,
47
+ outputSchema: TaskCreatedOutputSchema,
48
+ annotations: {
49
+ readOnlyHint: false,
50
+ destructiveHint: false,
51
+ idempotentHint: false,
52
+ openWorldHint: true
53
+ }
54
+ }, async (params) => {
55
+ try {
56
+ const request = {
57
+ ai_model: params.ai_model,
58
+ prompt: params.prompt,
59
+ generate_multi_view: params.generate_multi_view,
60
+ aspect_ratio: params.aspect_ratio
61
+ };
62
+ if (params.pose_mode)
63
+ request.pose_mode = params.pose_mode;
64
+ const response = await client.post("/openapi/v1/text-to-image", request);
65
+ const taskId = response.result;
66
+ const output = {
67
+ task_id: taskId,
68
+ status: "PENDING",
69
+ message: `Text-to-image task created successfully. Task ID: ${taskId}`,
70
+ estimated_time: "1-2 minutes"
71
+ };
72
+ return formatTaskCreatedResponse(output, params.response_format, "Image Generation Task Created", `Generating image from prompt: "${params.prompt}"`, "text-to-image");
73
+ }
74
+ catch (error) {
75
+ return {
76
+ isError: true,
77
+ content: [{
78
+ type: "text",
79
+ text: handleMeshyError(error)
80
+ }]
81
+ };
82
+ }
83
+ });
84
+ // Image-to-image tool
85
+ server.registerTool("meshy_image_to_image", {
86
+ title: "Transform Image using AI",
87
+ description: `Transform images using AI with a text prompt and reference images using Meshy AI.
88
+
89
+ Reference Image Input (provide ONE of these):
90
+ - reference_image_urls (array): 1–5 publicly accessible reference image URLs
91
+ - reference_file_paths (array): 1–5 absolute paths to LOCAL image files. Server reads and encodes them automatically.
92
+
93
+ IMPORTANT: For local files, always use reference_file_paths instead of manually base64-encoding.
94
+
95
+ Other Args:
96
+ - ai_model (enum): "nano-banana" or "nano-banana-pro" (required)
97
+ - prompt (string): Text description guiding the transformation (required)
98
+ - generate_multi_view (boolean, optional): Generate multiple viewpoint images (default: false)
99
+ - response_format (enum): Output format (default: "markdown")
100
+
101
+ Examples:
102
+ - Local file: { ai_model: "nano-banana", prompt: "make it a robot", reference_file_paths: ["/path/to/image.jpg"] }
103
+ - Public URL: { ai_model: "nano-banana", prompt: "make it a robot", reference_image_urls: ["https://..."] }
104
+
105
+ Error Handling:
106
+ - Returns "InvalidImageUrl" if any reference image is not accessible
107
+ - Returns "File not found" if any file_path doesn't exist`,
108
+ inputSchema: ImageToImageInputSchema,
109
+ outputSchema: TaskCreatedOutputSchema,
110
+ annotations: {
111
+ readOnlyHint: false,
112
+ destructiveHint: false,
113
+ idempotentHint: false,
114
+ openWorldHint: true
115
+ }
116
+ }, async (params) => {
117
+ try {
118
+ let resolvedUrls;
119
+ if (params.reference_file_paths && params.reference_file_paths.length > 0) {
120
+ resolvedUrls = await Promise.all(params.reference_file_paths.map(fp => fileToDataUri(fp)));
121
+ }
122
+ else if (params.reference_image_urls && params.reference_image_urls.length > 0) {
123
+ resolvedUrls = params.reference_image_urls;
124
+ }
125
+ else {
126
+ throw new Error("Either reference_image_urls or reference_file_paths must be provided.");
127
+ }
128
+ const request = {
129
+ ai_model: params.ai_model,
130
+ prompt: params.prompt,
131
+ reference_image_urls: resolvedUrls,
132
+ generate_multi_view: params.generate_multi_view
133
+ };
134
+ const response = await client.post("/openapi/v1/image-to-image", request);
135
+ const taskId = response.result;
136
+ const output = {
137
+ task_id: taskId,
138
+ status: "PENDING",
139
+ message: `Image-to-image task created successfully. Task ID: ${taskId}`,
140
+ estimated_time: "1-2 minutes"
141
+ };
142
+ return formatTaskCreatedResponse(output, params.response_format, "Image-to-Image Task Created", `Transforming ${resolvedUrls.length} reference image(s) with AI.`, "image-to-image");
143
+ }
144
+ catch (error) {
145
+ return {
146
+ isError: true,
147
+ content: [{
148
+ type: "text",
149
+ text: handleMeshyError(error)
150
+ }]
151
+ };
152
+ }
153
+ });
154
+ }
@@ -0,0 +1,9 @@
1
+ /**
2
+ * Post-processing tools (remesh, retexture, rig, animate)
3
+ */
4
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
5
+ import { MeshyClient } from "../services/meshy-client.js";
6
+ /**
7
+ * Register post-processing tools with the MCP server
8
+ */
9
+ export declare function registerPostProcessingTools(server: McpServer, client: MeshyClient): void;