@mindstone-engineering/mcp-server-runway 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,138 @@
1
+ import { z } from 'zod';
2
+ import { runwayFetch, resolveMediaInput } from '../client.js';
3
+ import { VOICE_PRESETS, DUBBING_LANGUAGES } from '../types.js';
4
+ import { withErrorHandling } from '../utils.js';
5
+ export function registerAudioTools(server) {
6
+ // ── Text-to-Speech ────────────────────────────────────────────────────
7
+ server.registerTool('generate_speech', {
8
+ description: 'Generate spoken audio from text using ElevenLabs voices via Runway. ' +
9
+ '49 voice presets available. Can also use custom voice IDs. ' +
10
+ 'COST: 1 credit per 50 characters. WORKFLOW: Returns task_id → poll or use wait_for_runway_task.',
11
+ inputSchema: z.object({
12
+ text: z.string().describe('Text to speak. Max 1000 characters.'),
13
+ voice: z.string().optional().describe('Voice preset name or custom voice UUID. Default: Maya.'),
14
+ }),
15
+ annotations: { readOnlyHint: false, destructiveHint: false },
16
+ }, withErrorHandling(async (args) => {
17
+ const voice = args.voice || 'Maya';
18
+ const isCustomVoice = voice.includes('-') && voice.length > 20;
19
+ const voicePayload = isCustomVoice
20
+ ? { type: 'custom', id: voice }
21
+ : { type: 'runway-preset', presetId: voice };
22
+ const body = {
23
+ model: 'eleven_multilingual_v2',
24
+ promptText: args.text,
25
+ voice: voicePayload,
26
+ };
27
+ const result = await runwayFetch('/text_to_speech', { method: 'POST', body: JSON.stringify(body) });
28
+ const textLen = args.text.length;
29
+ const estCredits = Math.ceil(textLen / 50);
30
+ return JSON.stringify({
31
+ ok: true, task_id: result.id, status: 'PENDING', voice,
32
+ estimated_credits: estCredits, estimated_cost: `$${(estCredits * 0.01).toFixed(2)}`,
33
+ message: `Speech generation started (voice: ${voice}). Poll with check_runway_task("${result.id}") every 10s, or use wait_for_runway_task.`,
34
+ });
35
+ }));
36
+ // ── Sound Effect ──────────────────────────────────────────────────────
37
+ server.registerTool('generate_sound_effect', {
38
+ description: 'Generate sound effects from a text description. ' +
39
+ 'COST: 1 credit per 6 seconds. WORKFLOW: Returns task_id → poll or use wait_for_runway_task.',
40
+ inputSchema: z.object({
41
+ prompt_text: z.string().describe('Describe the sound effect. Max 3000 chars.'),
42
+ duration: z.number().optional().describe('Duration in seconds (0.5-30). Auto-determined if omitted.'),
43
+ loop: z.boolean().optional().describe('If true, output loops seamlessly. Default: false.'),
44
+ }),
45
+ annotations: { readOnlyHint: false, destructiveHint: false },
46
+ }, withErrorHandling(async (args) => {
47
+ const body = { model: 'eleven_text_to_sound_v2', promptText: args.prompt_text };
48
+ if (args.duration !== undefined)
49
+ body.duration = args.duration;
50
+ if (args.loop !== undefined)
51
+ body.loop = args.loop;
52
+ const result = await runwayFetch('/sound_effect', { method: 'POST', body: JSON.stringify(body) });
53
+ return JSON.stringify({
54
+ ok: true, task_id: result.id, status: 'PENDING',
55
+ cost_rate: '1 credit per 6s of audio',
56
+ message: `Sound effect generation started. Poll with check_runway_task("${result.id}") every 10s, or use wait_for_runway_task.`,
57
+ });
58
+ }));
59
+ // ── Voice Swap (Speech-to-Speech) ─────────────────────────────────────
60
+ server.registerTool('swap_voice', {
61
+ description: 'Replace the voice in an audio or video file with a different voice, preserving speech content. ' +
62
+ 'COST: 1 credit per 3 seconds.',
63
+ inputSchema: z.object({
64
+ media: z.string().describe('Audio or video file. HTTPS URL or local file path.'),
65
+ media_type: z.enum(['audio', 'video']).optional().describe('Whether input is audio or video. Default: audio.'),
66
+ voice: z.enum(VOICE_PRESETS).optional().describe('Target voice preset. Default: Maya.'),
67
+ remove_background_noise: z.boolean().optional().describe('Remove background noise. Default: false.'),
68
+ }),
69
+ annotations: { readOnlyHint: false, destructiveHint: false },
70
+ }, withErrorHandling(async (args) => {
71
+ const mediaType = args.media_type || 'audio';
72
+ const mediaUri = await resolveMediaInput(args.media, mediaType);
73
+ const voice = args.voice || 'Maya';
74
+ const body = {
75
+ model: 'eleven_multilingual_sts_v2',
76
+ media: { type: mediaType, uri: mediaUri },
77
+ voice: { type: 'runway-preset', presetId: voice },
78
+ };
79
+ if (args.remove_background_noise !== undefined)
80
+ body.removeBackgroundNoise = args.remove_background_noise;
81
+ const result = await runwayFetch('/speech_to_speech', { method: 'POST', body: JSON.stringify(body) });
82
+ return JSON.stringify({
83
+ ok: true, task_id: result.id, status: 'PENDING', voice,
84
+ cost_rate: '1 credit per 3s of audio',
85
+ message: `Voice swap started (target: ${voice}). Poll with check_runway_task("${result.id}") every 15s, or use wait_for_runway_task.`,
86
+ });
87
+ }));
88
+ // ── Voice Dubbing ─────────────────────────────────────────────────────
89
+ server.registerTool('dub_audio', {
90
+ description: 'Translate and dub audio into a different language, cloning the original speaker\'s voice. ' +
91
+ 'COST: 1 credit per 2 seconds. WORKFLOW: Returns task_id → poll or use wait_for_runway_task.',
92
+ inputSchema: z.object({
93
+ audio: z.string().describe('Audio file to dub. HTTPS URL or local file path.'),
94
+ target_language: z.enum(DUBBING_LANGUAGES).describe('Target language code (e.g., "es" for Spanish).'),
95
+ disable_voice_cloning: z.boolean().optional().describe('Use generic voice instead of cloning. Default: false.'),
96
+ drop_background_audio: z.boolean().optional().describe('Remove background audio/music. Default: false.'),
97
+ num_speakers: z.number().int().optional().describe('Number of speakers. Auto-detected if not specified.'),
98
+ }),
99
+ annotations: { readOnlyHint: false, destructiveHint: false },
100
+ }, withErrorHandling(async (args) => {
101
+ const audioUri = await resolveMediaInput(args.audio, 'audio');
102
+ const targetLang = args.target_language;
103
+ const body = { model: 'eleven_voice_dubbing', audioUri, targetLang };
104
+ if (args.disable_voice_cloning !== undefined)
105
+ body.disableVoiceCloning = args.disable_voice_cloning;
106
+ if (args.drop_background_audio !== undefined)
107
+ body.dropBackgroundAudio = args.drop_background_audio;
108
+ if (args.num_speakers !== undefined)
109
+ body.numSpeakers = args.num_speakers;
110
+ const result = await runwayFetch('/voice_dubbing', { method: 'POST', body: JSON.stringify(body) });
111
+ return JSON.stringify({
112
+ ok: true, task_id: result.id, status: 'PENDING', target_language: targetLang,
113
+ cost_rate: '1 credit per 2s of output audio',
114
+ message: `Voice dubbing started (target: ${targetLang}). Poll with check_runway_task("${result.id}") every 15s, or use wait_for_runway_task.`,
115
+ });
116
+ }));
117
+ // ── Voice Isolation ───────────────────────────────────────────────────
118
+ server.registerTool('isolate_voice', {
119
+ description: 'Isolate voice from background audio. Extracts clean speech. Input must be 4.6s-3600s. ' +
120
+ 'COST: 1 credit per 6 seconds.',
121
+ inputSchema: z.object({
122
+ audio: z.string().describe('Audio file with voice + background. HTTPS URL or local file.'),
123
+ }),
124
+ annotations: { readOnlyHint: false, destructiveHint: false },
125
+ }, withErrorHandling(async (args) => {
126
+ const audioUri = await resolveMediaInput(args.audio, 'audio');
127
+ const result = await runwayFetch('/voice_isolation', {
128
+ method: 'POST',
129
+ body: JSON.stringify({ model: 'eleven_voice_isolation', audioUri }),
130
+ });
131
+ return JSON.stringify({
132
+ ok: true, task_id: result.id, status: 'PENDING',
133
+ cost_rate: '1 credit per 6s of audio',
134
+ message: `Voice isolation started. Poll with check_runway_task("${result.id}") every 10s, or use wait_for_runway_task.`,
135
+ });
136
+ }));
137
+ }
138
+ //# sourceMappingURL=audio.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerConfigureTools(server: McpServer): void;
3
+ //# sourceMappingURL=configure.d.ts.map
@@ -0,0 +1,46 @@
1
+ import { z } from 'zod';
2
+ import { setApiKey } from '../auth.js';
3
+ import { bridgeRequest, BRIDGE_STATE_PATH } from '../bridge.js';
4
+ import { RunwayError } from '../types.js';
5
+ import { withErrorHandling } from '../utils.js';
6
+ export function registerConfigureTools(server) {
7
+ server.registerTool('configure_runway_api_key', {
8
+ description: 'Save your Runway API key. Call this when the user provides their key. ' +
9
+ 'WHERE TO GET A KEY: Go to https://dev.runwayml.com/ → API Keys. ' +
10
+ 'Add credits in Billing (min $10 = 1000 credits).',
11
+ inputSchema: z.object({
12
+ api_key: z.string().min(1).describe('Runway API key (starts with "key_").'),
13
+ }),
14
+ annotations: { readOnlyHint: false, destructiveHint: false },
15
+ }, withErrorHandling(async (args) => {
16
+ const key = args.api_key.trim();
17
+ // If bridge is available, persist via bridge
18
+ if (BRIDGE_STATE_PATH) {
19
+ try {
20
+ const result = await bridgeRequest('/bundled/runway/configure', { apiKey: key });
21
+ if (result.success) {
22
+ setApiKey(key);
23
+ const message = result.warning
24
+ ? `Runway API key configured successfully. Note: ${result.warning}`
25
+ : 'Runway API key configured! Ready to generate.';
26
+ return JSON.stringify({ ok: true, message });
27
+ }
28
+ // Bridge returned failure — surface as error, do NOT fall through
29
+ throw new RunwayError(result.error || 'Bridge configuration failed', 'BRIDGE_ERROR', 'The host app bridge rejected the configuration request. Check the host app logs.');
30
+ }
31
+ catch (error) {
32
+ if (error instanceof RunwayError)
33
+ throw error;
34
+ // Bridge request failed (network, timeout, etc.) — surface as error
35
+ throw new RunwayError(`Bridge request failed: ${error instanceof Error ? error.message : String(error)}`, 'BRIDGE_ERROR', 'Could not reach the host app bridge. Ensure the host app is running.');
36
+ }
37
+ }
38
+ // No bridge — store in-memory
39
+ setApiKey(key);
40
+ return JSON.stringify({
41
+ ok: true,
42
+ message: 'Runway API key configured for this session.',
43
+ });
44
+ }));
45
+ }
46
+ //# sourceMappingURL=configure.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerImageTools(server: McpServer): void;
3
+ //# sourceMappingURL=image.d.ts.map
@@ -0,0 +1,52 @@
1
+ import { z } from 'zod';
2
+ import { runwayFetch, resolveMediaInput, addContentModeration } from '../client.js';
3
+ import { withErrorHandling } from '../utils.js';
4
+ export function registerImageTools(server) {
5
+ server.registerTool('generate_image', {
6
+ description: 'Generate an image from text, optionally with reference images for style/content guidance. ' +
7
+ 'MODELS: gen4_image (high quality), gen4_image_turbo (fast/cheap), gemini_2.5_flash (Google Gemini). ' +
8
+ 'REFERENCE IMAGES: Tag with short label (e.g., "cat"), use @tag in prompt. ' +
9
+ 'WORKFLOW: Returns task_id → poll with check_runway_task or use wait_for_runway_task.',
10
+ inputSchema: z.object({
11
+ prompt_text: z.string().describe('Text description. Use @tag to reference images.'),
12
+ model: z.enum(['gen4_image', 'gen4_image_turbo', 'gemini_2.5_flash']).optional().describe('Default: gen4_image.'),
13
+ ratio: z.enum([
14
+ '1024:1024', '1080:1080', '1168:880', '1360:768', '1440:1080',
15
+ '1080:1440', '1808:768', '1920:1080', '1080:1920', '2112:912',
16
+ '1280:720', '720:1280', '720:720', '960:720', '720:960', '1680:720',
17
+ ]).optional().describe('Output resolution. Default: 1920:1080.'),
18
+ reference_images: z.array(z.object({
19
+ uri: z.string().describe('HTTPS URL, Runway URI, or local file path.'),
20
+ tag: z.string().optional().describe('Tag for @mention in prompt. 3-16 lowercase chars.'),
21
+ })).min(1).max(3).optional().describe('Optional 1-3 reference images.'),
22
+ content_moderation: z.enum(['auto', 'low']).optional().describe('Public figure threshold.'),
23
+ seed: z.number().int().optional().describe('Random seed (0-4294967295).'),
24
+ }),
25
+ annotations: { readOnlyHint: false, destructiveHint: false },
26
+ }, withErrorHandling(async (args) => {
27
+ const model = args.model || 'gen4_image';
28
+ const ratio = args.ratio || '1920:1080';
29
+ const refs = args.reference_images;
30
+ const seed = args.seed;
31
+ const body = { model, promptText: args.prompt_text, ratio };
32
+ if (refs && refs.length > 0) {
33
+ const resolvedRefs = [];
34
+ for (const r of refs) {
35
+ resolvedRefs.push({ uri: await resolveMediaInput(r.uri, 'image'), ...(r.tag ? { tag: r.tag } : {}) });
36
+ }
37
+ body.referenceImages = resolvedRefs;
38
+ }
39
+ if (seed !== undefined)
40
+ body.seed = seed;
41
+ addContentModeration(body, args.content_moderation);
42
+ const result = await runwayFetch('/text_to_image', { method: 'POST', body: JSON.stringify(body) });
43
+ const is1080p = ['1920:1080', '1080:1920', '1440:1080', '1080:1440'].includes(ratio);
44
+ const credits = model === 'gen4_image_turbo' ? 2 : model === 'gemini_2.5_flash' ? 5 : (is1080p ? 8 : 5);
45
+ return JSON.stringify({
46
+ ok: true, task_id: result.id, status: 'PENDING', model,
47
+ estimated_credits: credits, estimated_cost: `$${(credits * 0.01).toFixed(2)}`,
48
+ message: `Image generation started (${model}). Poll with check_runway_task("${result.id}") every 10s, or use wait_for_runway_task.`,
49
+ });
50
+ }));
51
+ }
52
+ //# sourceMappingURL=image.js.map
@@ -0,0 +1,8 @@
1
+ export { registerConfigureTools } from './configure.js';
2
+ export { registerVideoTools } from './video.js';
3
+ export { registerImageTools } from './image.js';
4
+ export { registerAudioTools } from './audio.js';
5
+ export { registerVoiceTools } from './voices.js';
6
+ export { registerTaskTools } from './tasks.js';
7
+ export { registerAccountTools } from './account.js';
8
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1,8 @@
1
+ export { registerConfigureTools } from './configure.js';
2
+ export { registerVideoTools } from './video.js';
3
+ export { registerImageTools } from './image.js';
4
+ export { registerAudioTools } from './audio.js';
5
+ export { registerVoiceTools } from './voices.js';
6
+ export { registerTaskTools } from './tasks.js';
7
+ export { registerAccountTools } from './account.js';
8
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerTaskTools(server: McpServer): void;
3
+ //# sourceMappingURL=tasks.d.ts.map
@@ -0,0 +1,193 @@
1
+ import { z } from 'zod';
2
+ import { runwayFetch, runwayRawFetch, validateDownloadUrl } from '../client.js';
3
+ import { RunwayError } from '../types.js';
4
+ import { withErrorHandling } from '../utils.js';
5
+ const sleep = (ms) => new Promise(resolve => setTimeout(resolve, ms));
6
+ export function registerTaskTools(server) {
7
+ // ── Check Task ────────────────────────────────────────────────────────
8
+ server.registerTool('check_runway_task', {
9
+ description: 'Check the status of any Runway generation task. ' +
10
+ 'STATUS VALUES: PENDING → THROTTLED → RUNNING → SUCCEEDED (output URLs) or FAILED. ' +
11
+ 'TIP: Use wait_for_runway_task for automatic polling.',
12
+ inputSchema: z.object({
13
+ task_id: z.string().describe('Task ID from any generate_* tool.'),
14
+ }),
15
+ annotations: { readOnlyHint: true },
16
+ }, withErrorHandling(async (args) => {
17
+ const result = await runwayFetch(`/tasks/${args.task_id}`);
18
+ const response = {
19
+ ok: true, task_id: result.id, status: result.status, created_at: result.createdAt,
20
+ };
21
+ if (result.status === 'PENDING' || result.status === 'THROTTLED') {
22
+ response.message = `Task ${result.status.toLowerCase()}. Poll again in 15 seconds.`;
23
+ }
24
+ else if (result.status === 'RUNNING') {
25
+ response.message = 'Generation in progress. Poll again in 20 seconds.';
26
+ }
27
+ else if (result.status === 'SUCCEEDED' && result.output?.length) {
28
+ response.output = result.output;
29
+ response.message = 'Generation complete! Output URLs are ready.';
30
+ }
31
+ else if (result.status === 'FAILED') {
32
+ response.ok = false;
33
+ response.error = result.failure || 'Generation failed.';
34
+ response.failure_code = result.failureCode;
35
+ }
36
+ return JSON.stringify(response);
37
+ }));
38
+ // ── Wait for Task ─────────────────────────────────────────────────────
39
+ server.registerTool('wait_for_runway_task', {
40
+ description: 'Submit a task ID and wait for it to complete. Polls automatically. ' +
41
+ 'Returns the final task result including output URLs when successful.',
42
+ inputSchema: z.object({
43
+ task_id: z.string().describe('Task ID from any generate_* tool.'),
44
+ poll_interval: z.number().optional().describe('Seconds between polls. Default: 15. Min: 5.'),
45
+ timeout: z.number().optional().describe('Max seconds to wait. Default: 300 (5 min). Max: 600.'),
46
+ }),
47
+ annotations: { readOnlyHint: true },
48
+ }, withErrorHandling(async (args) => {
49
+ const pollInterval = Math.max(5, args.poll_interval || 15) * 1000;
50
+ const timeout = Math.min(600, Math.max(30, args.timeout || 300)) * 1000;
51
+ const taskId = args.task_id;
52
+ const startTime = Date.now();
53
+ const maxTransientErrors = 3;
54
+ let transientErrors = 0;
55
+ while (Date.now() - startTime < timeout) {
56
+ let result;
57
+ try {
58
+ result = await runwayFetch(`/tasks/${taskId}`);
59
+ transientErrors = 0;
60
+ }
61
+ catch (pollErr) {
62
+ transientErrors++;
63
+ if (transientErrors >= maxTransientErrors)
64
+ throw pollErr;
65
+ await sleep(pollInterval);
66
+ continue;
67
+ }
68
+ if (result.status === 'SUCCEEDED') {
69
+ return JSON.stringify({
70
+ ok: true, task_id: result.id, status: 'SUCCEEDED',
71
+ output: result.output, created_at: result.createdAt,
72
+ elapsed_seconds: Math.round((Date.now() - startTime) / 1000),
73
+ message: 'Generation complete! Output URLs are ready. Use download_runway_output to save locally.',
74
+ });
75
+ }
76
+ if (result.status === 'FAILED') {
77
+ return JSON.stringify({
78
+ ok: false, task_id: result.id, status: 'FAILED',
79
+ error: result.failure || 'Generation failed.', failure_code: result.failureCode,
80
+ elapsed_seconds: Math.round((Date.now() - startTime) / 1000),
81
+ });
82
+ }
83
+ await sleep(pollInterval);
84
+ }
85
+ return JSON.stringify({
86
+ ok: false, task_id: taskId, status: 'TIMEOUT',
87
+ error: `Task did not complete within ${timeout / 1000}s. It may still be running — check with check_runway_task.`,
88
+ elapsed_seconds: Math.round((Date.now() - startTime) / 1000),
89
+ });
90
+ }));
91
+ // ── Cancel/Delete Task ────────────────────────────────────────────────
92
+ server.registerTool('cancel_runway_task', {
93
+ description: 'Cancel a pending/running task or delete a completed task. Saves credits if cancelled before completion.',
94
+ inputSchema: z.object({
95
+ task_id: z.string().describe('Task ID to cancel or delete.'),
96
+ }),
97
+ annotations: { readOnlyHint: false, destructiveHint: true },
98
+ }, withErrorHandling(async (args) => {
99
+ const cancelRes = await runwayRawFetch(`/tasks/${args.task_id}`, { method: 'DELETE' });
100
+ if (!cancelRes.ok && cancelRes.status !== 204) {
101
+ throw new RunwayError(`Failed to cancel task (HTTP ${cancelRes.status})`, `HTTP_${cancelRes.status}`, 'Check the task ID and try again.');
102
+ }
103
+ return JSON.stringify({ ok: true, message: `Task ${args.task_id} cancelled/deleted.` });
104
+ }));
105
+ // ── Download Output ───────────────────────────────────────────────────
106
+ server.registerTool('download_runway_output', {
107
+ description: 'Download a Runway output (video, image, audio) to a local file. ' +
108
+ 'Use after a task succeeds to save the output locally.',
109
+ inputSchema: z.object({
110
+ url: z.string().describe('Output URL from a completed task.'),
111
+ output_path: z.string().describe('Local file path to save to. Parent directory must exist.'),
112
+ }),
113
+ annotations: { readOnlyHint: false, destructiveHint: false },
114
+ }, withErrorHandling(async (args) => {
115
+ const url = args.url;
116
+ const outputPath = args.output_path;
117
+ // Validate URL (SSRF prevention — blocks private/reserved hosts)
118
+ const urlError = validateDownloadUrl(url);
119
+ if (urlError) {
120
+ return JSON.stringify({ ok: false, error: urlError });
121
+ }
122
+ // Validate output path
123
+ const fs = await import('fs');
124
+ const pathMod = await import('path');
125
+ const parentDir = pathMod.dirname(outputPath);
126
+ if (!fs.existsSync(parentDir)) {
127
+ return JSON.stringify({ ok: false, error: `Parent directory does not exist: ${parentDir}` });
128
+ }
129
+ const response = await fetch(url);
130
+ if (!response.ok) {
131
+ return JSON.stringify({ ok: false, error: `Download failed (HTTP ${response.status}). The URL may have expired.` });
132
+ }
133
+ if (!response.body) {
134
+ return JSON.stringify({ ok: false, error: 'No response body received.' });
135
+ }
136
+ const fileHandle = fs.createWriteStream(outputPath);
137
+ let bytesWritten = 0;
138
+ try {
139
+ for await (const chunk of response.body) {
140
+ fileHandle.write(chunk);
141
+ bytesWritten += chunk.length;
142
+ }
143
+ fileHandle.end();
144
+ await new Promise((resolve, reject) => {
145
+ fileHandle.on('finish', resolve);
146
+ fileHandle.on('error', reject);
147
+ });
148
+ }
149
+ catch (streamErr) {
150
+ fileHandle.destroy();
151
+ try {
152
+ fs.unlinkSync(outputPath);
153
+ }
154
+ catch { /* cleanup best-effort */ }
155
+ throw streamErr;
156
+ }
157
+ const sizeMB = (bytesWritten / 1_048_576).toFixed(1);
158
+ return JSON.stringify({
159
+ ok: true, path: outputPath, size_mb: sizeMB,
160
+ message: `Downloaded ${sizeMB}MB to ${outputPath}`,
161
+ });
162
+ }));
163
+ // ── Upload Media ──────────────────────────────────────────────────────
164
+ server.registerTool('upload_media', {
165
+ description: 'Upload a local file to Runway\'s ephemeral storage, returning a runway:// URI. ' +
166
+ 'Supports files up to 200MB. URI valid for 24 hours.',
167
+ inputSchema: z.object({
168
+ file_path: z.string().describe('Absolute path to the local file to upload.'),
169
+ }),
170
+ annotations: { readOnlyHint: false, destructiveHint: false },
171
+ }, withErrorHandling(async (args) => {
172
+ const fs = await import('fs');
173
+ const filePath = args.file_path;
174
+ if (!fs.existsSync(filePath)) {
175
+ return JSON.stringify({ ok: false, error: `File not found: ${filePath}` });
176
+ }
177
+ const stats = fs.statSync(filePath);
178
+ if (stats.size > 200 * 1_048_576) {
179
+ return JSON.stringify({ ok: false, error: 'File exceeds 200MB limit.' });
180
+ }
181
+ if (stats.size < 512) {
182
+ return JSON.stringify({ ok: false, error: 'File must be at least 512 bytes.' });
183
+ }
184
+ const { uploadEphemeral } = await import('../client.js');
185
+ const uri = await uploadEphemeral(filePath);
186
+ const sizeMB = (stats.size / 1_048_576).toFixed(1);
187
+ return JSON.stringify({
188
+ ok: true, runway_uri: uri, size_mb: sizeMB,
189
+ message: `Uploaded ${sizeMB}MB. URI valid for 24 hours. Use this URI in any generation tool: ${uri}`,
190
+ });
191
+ }));
192
+ }
193
+ //# sourceMappingURL=tasks.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerVideoTools(server: McpServer): void;
3
+ //# sourceMappingURL=video.d.ts.map
@@ -0,0 +1,168 @@
1
+ import { z } from 'zod';
2
+ import { runwayFetch, resolveMediaInput, costEstimate, addContentModeration } from '../client.js';
3
+ import { withErrorHandling } from '../utils.js';
4
+ export function registerVideoTools(server) {
5
+ // ── Image-to-Video ────────────────────────────────────────────────────
6
+ server.registerTool('generate_video_from_image', {
7
+ description: 'Animate a still image into a video. Supports first-frame (and optionally last-frame) keyframe control. ' +
8
+ 'MODELS: gen4.5 (flagship), gen4_turbo (fast/cheap), gen3a_turbo (legacy), veo3.1 (best quality + audio), veo3.1_fast, veo3. ' +
9
+ 'WORKFLOW: Returns task_id → poll with check_runway_task or use wait_for_runway_task.',
10
+ inputSchema: z.object({
11
+ prompt_image: z.string().describe('First frame image. HTTPS URL, Runway URI, or local file path.'),
12
+ last_frame_image: z.string().optional().describe('Optional last frame image for transition.'),
13
+ prompt_text: z.string().optional().describe('Describe the desired motion.'),
14
+ model: z.enum(['gen4.5', 'gen4_turbo', 'gen3a_turbo', 'veo3.1', 'veo3.1_fast', 'veo3']).optional().describe('Default: gen4_turbo.'),
15
+ ratio: z.enum(['1280:720', '720:1280', '960:960', '1104:832', '832:1104', '1584:672']).optional().describe('Output resolution. Default: 1280:720.'),
16
+ duration: z.number().optional().describe('Seconds. gen4: 2-10 (default 5). veo: 4,6,8 (default 8).'),
17
+ audio: z.boolean().optional().describe('Generate audio (veo models only). Default: true.'),
18
+ content_moderation: z.enum(['auto', 'low']).optional().describe('Public figure threshold.'),
19
+ seed: z.number().int().optional().describe('Random seed (0-4294967295) for reproducibility.'),
20
+ }),
21
+ annotations: { readOnlyHint: false, destructiveHint: false },
22
+ }, withErrorHandling(async (args) => {
23
+ const model = args.model || 'gen4_turbo';
24
+ const isRunwayModel = ['gen4.5', 'gen4_turbo', 'gen3a_turbo'].includes(model);
25
+ const ratio = args.ratio || '1280:720';
26
+ const duration = args.duration || (isRunwayModel ? 5 : 8);
27
+ const audio = args.audio;
28
+ const seed = args.seed;
29
+ const firstImage = await resolveMediaInput(args.prompt_image, 'image');
30
+ let promptImage;
31
+ if (args.last_frame_image) {
32
+ const lastImage = await resolveMediaInput(args.last_frame_image, 'image');
33
+ promptImage = [
34
+ { uri: firstImage, position: 'first' },
35
+ { uri: lastImage, position: 'last' },
36
+ ];
37
+ }
38
+ else {
39
+ promptImage = firstImage;
40
+ }
41
+ const body = { model, promptImage, ratio, duration };
42
+ if (args.prompt_text)
43
+ body.promptText = args.prompt_text;
44
+ if (audio !== undefined && !isRunwayModel)
45
+ body.audio = audio;
46
+ if (seed !== undefined)
47
+ body.seed = seed;
48
+ addContentModeration(body, args.content_moderation);
49
+ const result = await runwayFetch('/image_to_video', { method: 'POST', body: JSON.stringify(body) });
50
+ const est = costEstimate(model, duration, audio);
51
+ return JSON.stringify({
52
+ ok: true, task_id: result.id, status: 'PENDING', model, duration,
53
+ estimated_credits: est.credits, estimated_cost: est.usd,
54
+ keyframes: args.last_frame_image ? 'first+last' : 'first',
55
+ message: `Image-to-video started (${model}, ${duration}s). Poll with check_runway_task("${result.id}") every 30s, or use wait_for_runway_task.`,
56
+ });
57
+ }));
58
+ // ── Text-to-Video ─────────────────────────────────────────────────────
59
+ server.registerTool('generate_video_from_text', {
60
+ description: 'Create a video entirely from a text description. ' +
61
+ 'MODELS: gen4.5 (flagship), veo3.1 (best + audio), veo3.1_fast, veo3. ' +
62
+ 'WORKFLOW: Returns task_id → poll or use wait_for_runway_task.',
63
+ inputSchema: z.object({
64
+ prompt_text: z.string().describe('Detailed video description. Max 1000 chars.'),
65
+ model: z.enum(['gen4.5', 'veo3.1', 'veo3.1_fast', 'veo3']).optional().describe('Default: gen4.5.'),
66
+ ratio: z.enum(['1280:720', '720:1280']).optional().describe('Output resolution. Default: 1280:720.'),
67
+ duration: z.number().optional().describe('Seconds. gen4.5: 2-10 (default 5). veo: 4,6,8 (default 8).'),
68
+ audio: z.boolean().optional().describe('Generate audio (veo models only). Default: true.'),
69
+ content_moderation: z.enum(['auto', 'low']).optional().describe('Public figure threshold.'),
70
+ seed: z.number().int().optional().describe('Random seed (0-4294967295).'),
71
+ }),
72
+ annotations: { readOnlyHint: false, destructiveHint: false },
73
+ }, withErrorHandling(async (args) => {
74
+ const model = args.model || 'gen4.5';
75
+ const isGen45 = model === 'gen4.5';
76
+ const ratio = args.ratio || '1280:720';
77
+ const duration = args.duration || (isGen45 ? 5 : 8);
78
+ const audio = args.audio !== false;
79
+ const seed = args.seed;
80
+ const body = { model, promptText: args.prompt_text, ratio, duration };
81
+ if (!isGen45)
82
+ body.audio = audio;
83
+ if (seed !== undefined)
84
+ body.seed = seed;
85
+ addContentModeration(body, args.content_moderation);
86
+ const result = await runwayFetch('/text_to_video', { method: 'POST', body: JSON.stringify(body) });
87
+ const est = costEstimate(model, duration, audio);
88
+ return JSON.stringify({
89
+ ok: true, task_id: result.id, status: 'PENDING', model, duration, audio,
90
+ estimated_credits: est.credits, estimated_cost: est.usd,
91
+ message: `Text-to-video started (${model}, ${duration}s, audio=${audio}). Poll with check_runway_task("${result.id}") every 30s, or use wait_for_runway_task.`,
92
+ });
93
+ }));
94
+ // ── Video-to-Video ────────────────────────────────────────────────────
95
+ server.registerTool('generate_video_from_video', {
96
+ description: 'Re-style or transform an existing video using Gen-4 Aleph. ' +
97
+ 'MODEL: gen4_aleph (only option). 15 credits/sec. ' +
98
+ 'WORKFLOW: Returns task_id → poll or use wait_for_runway_task.',
99
+ inputSchema: z.object({
100
+ video: z.string().describe('Source video. HTTPS URL, Runway URI, or local file path.'),
101
+ prompt_text: z.string().describe('Describe the transformation.'),
102
+ reference_image: z.string().optional().describe('Optional style reference image.'),
103
+ content_moderation: z.enum(['auto', 'low']).optional().describe('Public figure threshold.'),
104
+ seed: z.number().int().optional().describe('Random seed (0-4294967295).'),
105
+ }),
106
+ annotations: { readOnlyHint: false, destructiveHint: false },
107
+ }, withErrorHandling(async (args) => {
108
+ const videoUri = await resolveMediaInput(args.video, 'video');
109
+ const seed = args.seed;
110
+ const body = { model: 'gen4_aleph', videoUri, promptText: args.prompt_text };
111
+ if (seed !== undefined)
112
+ body.seed = seed;
113
+ if (args.reference_image) {
114
+ body.references = [{ type: 'image', uri: await resolveMediaInput(args.reference_image, 'image') }];
115
+ }
116
+ addContentModeration(body, args.content_moderation);
117
+ const result = await runwayFetch('/video_to_video', { method: 'POST', body: JSON.stringify(body) });
118
+ return JSON.stringify({
119
+ ok: true, task_id: result.id, status: 'PENDING', model: 'gen4_aleph',
120
+ cost_rate: '15 credits/sec ($0.15/sec)',
121
+ message: `Video-to-video started (gen4_aleph). Poll with check_runway_task("${result.id}") every 30s, or use wait_for_runway_task.`,
122
+ });
123
+ }));
124
+ // ── Character Performance ─────────────────────────────────────────────
125
+ server.registerTool('character_performance', {
126
+ description: 'Animate a character with facial expressions and body movements from a reference performance video (Act-Two). ' +
127
+ 'MODEL: act_two. 5 credits/sec. Output duration matches reference video length. ' +
128
+ 'WORKFLOW: Returns task_id → poll or use wait_for_runway_task.',
129
+ inputSchema: z.object({
130
+ character: z.string().describe('Character to animate. HTTPS URL or local file path.'),
131
+ reference_video: z.string().describe('Performance video (3-30s). HTTPS URL or local file.'),
132
+ character_type: z.enum(['image', 'video']).optional().describe('Whether character input is image or video. Default: image.'),
133
+ body_control: z.boolean().optional().describe('Enable body movement control. Default: false.'),
134
+ expression_intensity: z.number().int().optional().describe('Expression intensity 1-5. Default: 3.'),
135
+ ratio: z.enum(['1280:720', '720:1280', '960:960', '1104:832', '832:1104', '1584:672']).optional().describe('Output resolution.'),
136
+ content_moderation: z.enum(['auto', 'low']).optional().describe('Public figure threshold.'),
137
+ seed: z.number().int().optional().describe('Random seed (0-4294967295).'),
138
+ }),
139
+ annotations: { readOnlyHint: false, destructiveHint: false },
140
+ }, withErrorHandling(async (args) => {
141
+ const charType = args.character_type || 'image';
142
+ const charCategory = charType === 'video' ? 'video' : 'image';
143
+ const characterUri = await resolveMediaInput(args.character, charCategory);
144
+ const referenceUri = await resolveMediaInput(args.reference_video, 'video');
145
+ const seed = args.seed;
146
+ const body = {
147
+ model: 'act_two',
148
+ character: { type: charType, uri: characterUri },
149
+ reference: { type: 'video', uri: referenceUri },
150
+ };
151
+ if (args.body_control !== undefined)
152
+ body.bodyControl = args.body_control;
153
+ if (args.expression_intensity !== undefined)
154
+ body.expressionIntensity = args.expression_intensity;
155
+ if (args.ratio)
156
+ body.ratio = args.ratio;
157
+ if (seed !== undefined)
158
+ body.seed = seed;
159
+ addContentModeration(body, args.content_moderation);
160
+ const result = await runwayFetch('/character_performance', { method: 'POST', body: JSON.stringify(body) });
161
+ return JSON.stringify({
162
+ ok: true, task_id: result.id, status: 'PENDING', model: 'act_two',
163
+ cost_rate: '5 credits/sec ($0.05/sec)',
164
+ message: `Character performance started (act_two). Poll with check_runway_task("${result.id}") every 30s, or use wait_for_runway_task.`,
165
+ });
166
+ }));
167
+ }
168
+ //# sourceMappingURL=video.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerVoiceTools(server: McpServer): void;
3
+ //# sourceMappingURL=voices.d.ts.map