@mindstone-engineering/mcp-server-elevenlabs 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/auth.d.ts ADDED
@@ -0,0 +1,21 @@
1
+ /**
2
+ * ElevenLabs authentication module.
3
+ *
4
+ * Simple API key management — stored via env var (ELEVENLABS_API_KEY)
5
+ * or configured at runtime via the configure_elevenlabs_api_key tool.
6
+ *
7
+ * Auth: xi-api-key header on all API requests (NOT Bearer).
8
+ */
9
+ /**
10
+ * Get the current API key.
11
+ */
12
+ export declare function getApiKey(): string;
13
+ /**
14
+ * Set the API key at runtime (from configure tool).
15
+ */
16
+ export declare function setApiKey(key: string): void;
17
+ /**
18
+ * Check if an API key is configured.
19
+ */
20
+ export declare function hasApiKey(): boolean;
21
+ //# sourceMappingURL=auth.d.ts.map
package/dist/auth.js ADDED
@@ -0,0 +1,29 @@
1
+ /**
2
+ * ElevenLabs authentication module.
3
+ *
4
+ * Simple API key management — stored via env var (ELEVENLABS_API_KEY)
5
+ * or configured at runtime via the configure_elevenlabs_api_key tool.
6
+ *
7
+ * Auth: xi-api-key header on all API requests (NOT Bearer).
8
+ */
9
+ /** Runtime API key — starts from env, can be updated via configure tool. */
10
+ let apiKey = process.env.ELEVENLABS_API_KEY ?? '';
11
+ /**
12
+ * Get the current API key.
13
+ */
14
+ export function getApiKey() {
15
+ return apiKey;
16
+ }
17
+ /**
18
+ * Set the API key at runtime (from configure tool).
19
+ */
20
+ export function setApiKey(key) {
21
+ apiKey = key;
22
+ }
23
+ /**
24
+ * Check if an API key is configured.
25
+ */
26
+ export function hasApiKey() {
27
+ return apiKey.trim().length > 0;
28
+ }
29
+ //# sourceMappingURL=auth.js.map
@@ -0,0 +1,16 @@
1
+ /**
2
+ * Path to bridge state file, supporting both current and legacy env vars.
3
+ */
4
+ export declare const BRIDGE_STATE_PATH: string;
5
+ /**
6
+ * Send a request to the host app bridge.
7
+ *
8
+ * The bridge is an HTTP server running inside the host app (e.g. Rebel)
9
+ * that handles credential management and other cross-process operations.
10
+ */
11
+ export declare const bridgeRequest: (urlPath: string, body: Record<string, unknown>) => Promise<{
12
+ success: boolean;
13
+ warning?: string;
14
+ error?: string;
15
+ }>;
16
+ //# sourceMappingURL=bridge.d.ts.map
package/dist/bridge.js ADDED
@@ -0,0 +1,43 @@
1
+ import * as fs from 'fs';
2
+ import { REQUEST_TIMEOUT_MS } from './types.js';
3
+ /**
4
+ * Path to bridge state file, supporting both current and legacy env vars.
5
+ */
6
+ export const BRIDGE_STATE_PATH = process.env.MCP_HOST_BRIDGE_STATE || process.env.MINDSTONE_REBEL_BRIDGE_STATE || '';
7
+ const loadBridgeState = () => {
8
+ if (!BRIDGE_STATE_PATH)
9
+ return null;
10
+ try {
11
+ const raw = fs.readFileSync(BRIDGE_STATE_PATH, 'utf8');
12
+ return JSON.parse(raw);
13
+ }
14
+ catch {
15
+ return null;
16
+ }
17
+ };
18
+ /**
19
+ * Send a request to the host app bridge.
20
+ *
21
+ * The bridge is an HTTP server running inside the host app (e.g. Rebel)
22
+ * that handles credential management and other cross-process operations.
23
+ */
24
+ export const bridgeRequest = async (urlPath, body) => {
25
+ const bridge = loadBridgeState();
26
+ if (!bridge) {
27
+ return { success: false, error: 'Bridge not available' };
28
+ }
29
+ const response = await fetch(`http://127.0.0.1:${bridge.port}${urlPath}`, {
30
+ method: 'POST',
31
+ signal: AbortSignal.timeout(REQUEST_TIMEOUT_MS),
32
+ headers: {
33
+ 'Content-Type': 'application/json',
34
+ Authorization: `Bearer ${bridge.token}`,
35
+ },
36
+ body: JSON.stringify(body),
37
+ });
38
+ if (response.status === 401 || response.status === 403) {
39
+ return { success: false, error: `Bridge returned ${response.status}: unauthorized. Check host app authentication.` };
40
+ }
41
+ return response.json();
42
+ };
43
+ //# sourceMappingURL=bridge.js.map
@@ -0,0 +1,25 @@
1
+ /**
2
+ * ElevenLabs API HTTP client.
3
+ *
4
+ * Centralises xi-api-key header injection, error handling, rate-limit
5
+ * messaging, and timeout handling for all ElevenLabs API calls.
6
+ *
7
+ * Auth: xi-api-key: {key} (NOT Bearer, NOT Basic)
8
+ * Base URL: https://api.elevenlabs.io/v1
9
+ * Voices URL: https://api.elevenlabs.io/v2/voices
10
+ */
11
+ import { type AudioResult } from './types.js';
12
+ /**
13
+ * Make an authenticated request to the ElevenLabs API.
14
+ * Returns a raw Response object.
15
+ */
16
+ export declare function elevenLabsFetch(apiKey: string, urlPath: string, options?: RequestInit): Promise<Response>;
17
+ /**
18
+ * Make a JSON API call and parse the response.
19
+ */
20
+ export declare function elevenLabsJson<T>(apiKey: string, urlPath: string, options?: RequestInit): Promise<T>;
21
+ /**
22
+ * Make an API call that returns raw audio binary. Save to file and return path.
23
+ */
24
+ export declare function elevenLabsAudio(apiKey: string, urlPath: string, options?: RequestInit, fileExtension?: string): Promise<AudioResult>;
25
+ //# sourceMappingURL=client.d.ts.map
package/dist/client.js ADDED
@@ -0,0 +1,108 @@
1
+ /**
2
+ * ElevenLabs API HTTP client.
3
+ *
4
+ * Centralises xi-api-key header injection, error handling, rate-limit
5
+ * messaging, and timeout handling for all ElevenLabs API calls.
6
+ *
7
+ * Auth: xi-api-key: {key} (NOT Bearer, NOT Basic)
8
+ * Base URL: https://api.elevenlabs.io/v1
9
+ * Voices URL: https://api.elevenlabs.io/v2/voices
10
+ */
11
+ import * as fs from 'fs';
12
+ import * as path from 'path';
13
+ import * as os from 'os';
14
+ import * as crypto from 'crypto';
15
+ import { ElevenLabsError, REQUEST_TIMEOUT_MS, getErrorResolution, } from './types.js';
16
+ const ELEVENLABS_API_BASE = 'https://api.elevenlabs.io/v1';
17
+ /**
18
+ * Make an authenticated request to the ElevenLabs API.
19
+ * Returns a raw Response object.
20
+ */
21
+ export async function elevenLabsFetch(apiKey, urlPath, options = {}) {
22
+ if (!apiKey || apiKey.trim().length === 0) {
23
+ throw new ElevenLabsError('ElevenLabs API key not configured', 'AUTH_REQUIRED', 'Configure your ElevenLabs API key in Settings. Get one at https://elevenlabs.io/app/settings/api-keys');
24
+ }
25
+ const url = urlPath.startsWith('https://')
26
+ ? urlPath
27
+ : `${ELEVENLABS_API_BASE}${urlPath}`;
28
+ const headers = {
29
+ 'xi-api-key': apiKey,
30
+ ...(options.headers || {}),
31
+ };
32
+ // Only set Content-Type for JSON bodies (not FormData)
33
+ if (!(options.body instanceof FormData) && !headers['Content-Type']) {
34
+ headers['Content-Type'] = 'application/json';
35
+ }
36
+ console.error(`[ElevenLabs API] ${options.method || 'GET'} ${url}`);
37
+ let response;
38
+ try {
39
+ response = await fetch(url, {
40
+ ...options,
41
+ signal: options.signal ?? AbortSignal.timeout(REQUEST_TIMEOUT_MS),
42
+ headers,
43
+ });
44
+ }
45
+ catch (error) {
46
+ if (error instanceof Error && error.name === 'TimeoutError') {
47
+ throw new ElevenLabsError('Request to ElevenLabs API timed out', 'TIMEOUT', 'The request took too long. Try again or check if the ElevenLabs API is available.');
48
+ }
49
+ throw error;
50
+ }
51
+ // Handle rate limiting
52
+ if (response.status === 429) {
53
+ throw new ElevenLabsError('Rate limited. Please wait a moment before retrying.', 'RATE_LIMITED', getErrorResolution(429));
54
+ }
55
+ // Handle auth errors
56
+ if (response.status === 401) {
57
+ throw new ElevenLabsError('Authentication failed', 'AUTH_FAILED', getErrorResolution(401));
58
+ }
59
+ if (response.status === 403) {
60
+ let detail = '';
61
+ try {
62
+ const errBody = await response.clone().json();
63
+ if (typeof errBody.detail === 'string') {
64
+ detail = errBody.detail;
65
+ }
66
+ else if (errBody.detail?.message) {
67
+ detail = errBody.detail.message;
68
+ }
69
+ }
70
+ catch { /* not JSON */ }
71
+ throw new ElevenLabsError(`Access forbidden: ${detail || 'insufficient permissions or quota'}`, 'AUTH_FAILED', getErrorResolution(403, detail));
72
+ }
73
+ // Handle other errors
74
+ if (!response.ok) {
75
+ let detail = '';
76
+ try {
77
+ const errBody = await response.clone().json();
78
+ if (typeof errBody.detail === 'string') {
79
+ detail = errBody.detail;
80
+ }
81
+ else if (errBody.detail?.message) {
82
+ detail = errBody.detail.message;
83
+ }
84
+ }
85
+ catch { /* not JSON */ }
86
+ throw new ElevenLabsError(`ElevenLabs API error (HTTP ${response.status}): ${detail || response.statusText}`, `HTTP_${response.status}`, getErrorResolution(response.status, detail));
87
+ }
88
+ return response;
89
+ }
90
+ /**
91
+ * Make a JSON API call and parse the response.
92
+ */
93
+ export async function elevenLabsJson(apiKey, urlPath, options = {}) {
94
+ const response = await elevenLabsFetch(apiKey, urlPath, options);
95
+ return (await response.json());
96
+ }
97
+ /**
98
+ * Make an API call that returns raw audio binary. Save to file and return path.
99
+ */
100
+ export async function elevenLabsAudio(apiKey, urlPath, options = {}, fileExtension = 'mp3') {
101
+ const response = await elevenLabsFetch(apiKey, urlPath, options);
102
+ const buffer = Buffer.from(await response.arrayBuffer());
103
+ const fileName = `elevenlabs_${crypto.randomUUID()}.${fileExtension}`;
104
+ const filePath = path.join(os.tmpdir(), fileName);
105
+ fs.writeFileSync(filePath, buffer);
106
+ return { filePath, sizeBytes: buffer.length };
107
+ }
108
+ //# sourceMappingURL=client.js.map
@@ -0,0 +1,14 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * ElevenLabs MCP Server
4
+ *
5
+ * Provides ElevenLabs audio integration via Model Context Protocol.
6
+ * Music generation, text-to-speech, sound effects, voice browsing, transcription.
7
+ *
8
+ * Environment variables:
9
+ * - ELEVENLABS_API_KEY: ElevenLabs API key (required, starts with sk_)
10
+ * - MCP_HOST_BRIDGE_STATE: Path to host app bridge state file (optional)
11
+ * - MINDSTONE_REBEL_BRIDGE_STATE: Legacy bridge state path (optional)
12
+ */
13
+ export {};
14
+ //# sourceMappingURL=index.d.ts.map
package/dist/index.js ADDED
@@ -0,0 +1,25 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * ElevenLabs MCP Server
4
+ *
5
+ * Provides ElevenLabs audio integration via Model Context Protocol.
6
+ * Music generation, text-to-speech, sound effects, voice browsing, transcription.
7
+ *
8
+ * Environment variables:
9
+ * - ELEVENLABS_API_KEY: ElevenLabs API key (required, starts with sk_)
10
+ * - MCP_HOST_BRIDGE_STATE: Path to host app bridge state file (optional)
11
+ * - MINDSTONE_REBEL_BRIDGE_STATE: Legacy bridge state path (optional)
12
+ */
13
+ import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
14
+ import { createServer } from './server.js';
15
+ async function main() {
16
+ const server = createServer();
17
+ const transport = new StdioServerTransport();
18
+ await server.connect(transport);
19
+ console.error('ElevenLabs MCP server running on stdio');
20
+ }
21
+ main().catch((error) => {
22
+ console.error('Fatal error:', error);
23
+ process.exit(1);
24
+ });
25
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1,3 @@
1
+ import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function createServer(): McpServer;
3
+ //# sourceMappingURL=server.d.ts.map
package/dist/server.js ADDED
@@ -0,0 +1,15 @@
1
+ import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ import { registerConfigureTools, registerMusicTools, registerSpeechTools, registerVoiceTools, registerTranscriptionTools, } from './tools/index.js';
3
+ export function createServer() {
4
+ const server = new McpServer({
5
+ name: 'elevenlabs-mcp-server',
6
+ version: '0.1.0',
7
+ });
8
+ registerConfigureTools(server);
9
+ registerMusicTools(server);
10
+ registerSpeechTools(server);
11
+ registerVoiceTools(server);
12
+ registerTranscriptionTools(server);
13
+ return server;
14
+ }
15
+ //# sourceMappingURL=server.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerConfigureTools(server: McpServer): void;
3
+ //# sourceMappingURL=configure.d.ts.map
@@ -0,0 +1,46 @@
1
+ import { z } from 'zod';
2
+ import { setApiKey } from '../auth.js';
3
+ import { bridgeRequest, BRIDGE_STATE_PATH } from '../bridge.js';
4
+ import { ElevenLabsError } from '../types.js';
5
+ import { withErrorHandling } from '../utils.js';
6
+ export function registerConfigureTools(server) {
7
+ server.registerTool('configure_elevenlabs_api_key', {
8
+ description: 'Save your ElevenLabs API key. Call this when the user provides their key. ' +
9
+ 'WHERE TO GET A KEY: Go to https://elevenlabs.io/app/settings/api-keys → Create new API key → Copy the key (starts with "sk_"). ' +
10
+ 'FREE TIER: 10,000 characters/month for TTS. Music generation requires a paid plan.',
11
+ inputSchema: z.object({
12
+ api_key: z.string().min(1).describe('ElevenLabs API key (starts with "sk_").'),
13
+ }),
14
+ annotations: { readOnlyHint: false, destructiveHint: false },
15
+ }, withErrorHandling(async (args) => {
16
+ const key = args.api_key.trim();
17
+ // If bridge is available, persist via bridge
18
+ if (BRIDGE_STATE_PATH) {
19
+ try {
20
+ const result = await bridgeRequest('/bundled/elevenlabs/configure', { apiKey: key });
21
+ if (result.success) {
22
+ setApiKey(key);
23
+ const message = result.warning
24
+ ? `ElevenLabs API key configured successfully. Note: ${result.warning}`
25
+ : 'ElevenLabs API key configured successfully! You can now generate music, speech, sound effects, and more.';
26
+ return JSON.stringify({ ok: true, message });
27
+ }
28
+ // Bridge returned failure — surface as error, do NOT fall through
29
+ throw new ElevenLabsError(result.error || 'Bridge configuration failed', 'BRIDGE_ERROR', 'The host app bridge rejected the configuration request. Check the host app logs.');
30
+ }
31
+ catch (error) {
32
+ if (error instanceof ElevenLabsError)
33
+ throw error;
34
+ // Bridge request failed (network, timeout, etc.) — surface as error
35
+ throw new ElevenLabsError(`Bridge request failed: ${error instanceof Error ? error.message : String(error)}`, 'BRIDGE_ERROR', 'Could not reach the host app bridge. Ensure the host app is running.');
36
+ }
37
+ }
38
+ // No bridge — store in-memory
39
+ setApiKey(key);
40
+ return JSON.stringify({
41
+ ok: true,
42
+ message: 'ElevenLabs API key configured successfully! You can now generate music, speech, sound effects, and more.',
43
+ });
44
+ }));
45
+ }
46
+ //# sourceMappingURL=configure.js.map
@@ -0,0 +1,6 @@
1
+ export { registerConfigureTools } from './configure.js';
2
+ export { registerMusicTools } from './music.js';
3
+ export { registerSpeechTools } from './speech.js';
4
+ export { registerVoiceTools } from './voices.js';
5
+ export { registerTranscriptionTools } from './transcription.js';
6
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1,6 @@
1
+ export { registerConfigureTools } from './configure.js';
2
+ export { registerMusicTools } from './music.js';
3
+ export { registerSpeechTools } from './speech.js';
4
+ export { registerVoiceTools } from './voices.js';
5
+ export { registerTranscriptionTools } from './transcription.js';
6
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerMusicTools(server: McpServer): void;
3
+ //# sourceMappingURL=music.d.ts.map
@@ -0,0 +1,134 @@
1
+ import { z } from 'zod';
2
+ import { getApiKey } from '../auth.js';
3
+ import { elevenLabsJson, elevenLabsAudio } from '../client.js';
4
+ import { ElevenLabsError } from '../types.js';
5
+ import { withErrorHandling } from '../utils.js';
6
+ const OUTPUT_FORMAT_ENUM = z.enum([
7
+ 'mp3_44100_128', 'mp3_44100_192',
8
+ 'pcm_16000', 'pcm_22050', 'pcm_24000', 'pcm_44100',
9
+ 'ulaw_8000',
10
+ ]).optional();
11
+ export function registerMusicTools(server) {
12
+ // ── generate_music ────────────────────────────────────────────────────
13
+ server.registerTool('generate_music', {
14
+ description: 'Generate music from a text prompt using ElevenLabs Music API. ' +
15
+ 'Returns a saved audio file path. DURATION: 3-600 seconds (default 30). ' +
16
+ 'COST: Consumes credits based on duration. ' +
17
+ 'PROMPT TIPS: Describe genre, mood, instruments, style, lyrics.',
18
+ inputSchema: z.object({
19
+ prompt: z.string().min(1).describe('Describe the music: genre, mood, instruments, style, lyrics.'),
20
+ duration_seconds: z.number().min(3).max(600).optional().describe('Duration in seconds (3-600). Default: 30.'),
21
+ force_instrumental: z.boolean().optional().describe('Force instrumental-only output (no vocals). Default: false.'),
22
+ output_format: OUTPUT_FORMAT_ENUM.describe('Audio output format. Default: mp3_44100_128.'),
23
+ seed: z.number().int().optional().describe('Random seed for reproducibility.'),
24
+ }),
25
+ annotations: { readOnlyHint: false, destructiveHint: false },
26
+ }, withErrorHandling(async (args) => {
27
+ const apiKey = getApiKey();
28
+ if (!apiKey) {
29
+ throw new ElevenLabsError('ElevenLabs API key not configured', 'AUTH_REQUIRED', 'Ask the user for their API key, then call configure_elevenlabs_api_key.');
30
+ }
31
+ const durationSeconds = args.duration_seconds ?? 30;
32
+ const durationMs = Math.max(3000, Math.min(600000, durationSeconds * 1000));
33
+ const outputFormat = args.output_format ?? 'mp3_44100_128';
34
+ const body = {
35
+ prompt: args.prompt,
36
+ music_length_ms: durationMs,
37
+ model_id: 'music_v1',
38
+ };
39
+ if (args.force_instrumental !== undefined)
40
+ body.force_instrumental = args.force_instrumental;
41
+ if (args.seed !== undefined)
42
+ body.seed = args.seed;
43
+ const ext = outputFormat.startsWith('mp3') ? 'mp3' : 'wav';
44
+ const result = await elevenLabsAudio(apiKey, `/music?output_format=${outputFormat}`, { method: 'POST', body: JSON.stringify(body) }, ext);
45
+ return JSON.stringify({
46
+ ok: true,
47
+ file_path: result.filePath,
48
+ size_bytes: result.sizeBytes,
49
+ duration_seconds: durationSeconds,
50
+ format: outputFormat,
51
+ message: `Music generated and saved to ${result.filePath} (${(result.sizeBytes / 1024).toFixed(1)} KB, ${durationSeconds}s).`,
52
+ });
53
+ }));
54
+ // ── create_music_plan ─────────────────────────────────────────────────
55
+ server.registerTool('create_music_plan', {
56
+ description: 'Create a composition plan for music generation — FREE, no credits consumed. ' +
57
+ 'Returns a structured plan with sections, styles, and lyrics. ' +
58
+ 'Review the plan and pass it to generate_music_from_plan when ready.',
59
+ inputSchema: z.object({
60
+ prompt: z.string().min(1).describe('Describe the music you want.'),
61
+ duration_seconds: z.number().min(3).max(600).optional().describe('Target duration in seconds (3-600). Default: 30.'),
62
+ }),
63
+ annotations: { readOnlyHint: true, destructiveHint: false },
64
+ }, withErrorHandling(async (args) => {
65
+ const apiKey = getApiKey();
66
+ if (!apiKey) {
67
+ throw new ElevenLabsError('ElevenLabs API key not configured', 'AUTH_REQUIRED', 'Ask the user for their API key, then call configure_elevenlabs_api_key.');
68
+ }
69
+ const durationSeconds = args.duration_seconds ?? 30;
70
+ const durationMs = Math.max(3000, Math.min(600000, durationSeconds * 1000));
71
+ const plan = await elevenLabsJson(apiKey, '/music/plan', {
72
+ method: 'POST',
73
+ body: JSON.stringify({
74
+ prompt: args.prompt,
75
+ music_length_ms: durationMs,
76
+ model_id: 'music_v1',
77
+ }),
78
+ });
79
+ const totalDurationMs = plan.sections.reduce((sum, s) => sum + (s.duration_ms || 0), 0);
80
+ return JSON.stringify({
81
+ ok: true,
82
+ composition_plan: plan,
83
+ total_duration_seconds: totalDurationMs / 1000,
84
+ num_sections: plan.sections.length,
85
+ cost: 'FREE — no credits consumed',
86
+ message: `Composition plan created with ${plan.sections.length} sections (${(totalDurationMs / 1000).toFixed(1)}s total). Review the plan and pass it to generate_music_from_plan when ready.`,
87
+ hint: 'You can modify positive_global_styles, negative_global_styles, section styles, lyrics, and durations before generating.',
88
+ });
89
+ }));
90
+ // ── generate_music_from_plan ──────────────────────────────────────────
91
+ server.registerTool('generate_music_from_plan', {
92
+ description: 'Generate music from a composition plan (created by create_music_plan or manually crafted). ' +
93
+ 'The plan must have at least one section. COST: Consumes credits based on duration.',
94
+ inputSchema: z.object({
95
+ composition_plan: z.object({
96
+ positive_global_styles: z.array(z.string()).optional().describe('Styles to apply globally.'),
97
+ negative_global_styles: z.array(z.string()).optional().describe('Styles to avoid globally.'),
98
+ sections: z.array(z.object({
99
+ style: z.string().optional(),
100
+ lyrics: z.string().optional(),
101
+ duration_ms: z.number().optional(),
102
+ })).min(1).describe('Array of sections, each with style, lyrics, and duration_ms.'),
103
+ }).describe('The composition plan object.'),
104
+ seed: z.number().int().optional().describe('Random seed for reproducibility.'),
105
+ output_format: OUTPUT_FORMAT_ENUM.describe('Audio output format. Default: mp3_44100_128.'),
106
+ }),
107
+ annotations: { readOnlyHint: false, destructiveHint: false },
108
+ }, withErrorHandling(async (args) => {
109
+ const apiKey = getApiKey();
110
+ if (!apiKey) {
111
+ throw new ElevenLabsError('ElevenLabs API key not configured', 'AUTH_REQUIRED', 'Ask the user for their API key, then call configure_elevenlabs_api_key.');
112
+ }
113
+ const compositionPlan = args.composition_plan;
114
+ const outputFormat = args.output_format ?? 'mp3_44100_128';
115
+ const body = {
116
+ composition_plan: compositionPlan,
117
+ model_id: 'music_v1',
118
+ };
119
+ if (args.seed !== undefined)
120
+ body.seed = args.seed;
121
+ const ext = outputFormat.startsWith('mp3') ? 'mp3' : 'wav';
122
+ const result = await elevenLabsAudio(apiKey, `/music?output_format=${outputFormat}`, { method: 'POST', body: JSON.stringify(body) }, ext);
123
+ const totalDurationMs = (compositionPlan.sections ?? []).reduce((sum, s) => sum + (s.duration_ms || 0), 0);
124
+ return JSON.stringify({
125
+ ok: true,
126
+ file_path: result.filePath,
127
+ size_bytes: result.sizeBytes,
128
+ duration_seconds: totalDurationMs / 1000,
129
+ format: outputFormat,
130
+ message: `Music generated from plan and saved to ${result.filePath} (${(result.sizeBytes / 1024).toFixed(1)} KB).`,
131
+ });
132
+ }));
133
+ }
134
+ //# sourceMappingURL=music.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerSpeechTools(server: McpServer): void;
3
+ //# sourceMappingURL=speech.d.ts.map
@@ -0,0 +1,126 @@
1
+ import { z } from 'zod';
2
+ import { getApiKey } from '../auth.js';
3
+ import { elevenLabsJson, elevenLabsAudio } from '../client.js';
4
+ import { ElevenLabsError } from '../types.js';
5
+ import { withErrorHandling } from '../utils.js';
6
+ /**
7
+ * Look up a voice by name via the ElevenLabs v2 voices API.
8
+ */
9
+ async function lookupVoiceByName(apiKey, name) {
10
+ const data = await elevenLabsJson(apiKey, `https://api.elevenlabs.io/v2/voices?search=${encodeURIComponent(name)}&page_size=5`);
11
+ if (!data.voices || data.voices.length === 0) {
12
+ throw new ElevenLabsError(`No voice found matching "${name}"`, 'VOICE_NOT_FOUND', `No voice matched "${name}". Use list_voices to browse available voices and get the exact voice_id.`);
13
+ }
14
+ return data.voices[0];
15
+ }
16
+ export function registerSpeechTools(server) {
17
+ // ── generate_speech ───────────────────────────────────────────────────
18
+ server.registerTool('generate_speech', {
19
+ description: 'Generate spoken audio from text using ElevenLabs text-to-speech. ' +
20
+ 'Use voice_id (direct) or voice_name (fuzzy search). ' +
21
+ 'Models: eleven_multilingual_v2 (default, 29 languages), eleven_monolingual_v1 (English), eleven_turbo_v2_5 (low latency). ' +
22
+ 'COST: ~1 credit per 100 characters.',
23
+ inputSchema: z.object({
24
+ text: z.string().min(1).describe('Text to speak. Maximum ~5000 characters per request.'),
25
+ voice_id: z.string().optional().describe('Direct voice ID. Takes priority over voice_name.'),
26
+ voice_name: z.string().optional().describe('Voice name for fuzzy search (e.g., "Rachel", "Adam").'),
27
+ model_id: z.enum(['eleven_multilingual_v2', 'eleven_monolingual_v1', 'eleven_turbo_v2_5']).optional()
28
+ .describe('TTS model. Default: eleven_multilingual_v2.'),
29
+ stability: z.number().min(0).max(1).optional().describe('Voice stability 0-1. Default: 0.5.'),
30
+ similarity_boost: z.number().min(0).max(1).optional().describe('Voice similarity 0-1. Default: 0.75.'),
31
+ output_format: z.enum(['mp3_44100_128', 'mp3_44100_192', 'pcm_16000', 'pcm_22050', 'pcm_24000', 'pcm_44100']).optional()
32
+ .describe('Audio output format. Default: mp3_44100_128.'),
33
+ }),
34
+ annotations: { readOnlyHint: false, destructiveHint: false },
35
+ }, withErrorHandling(async (args) => {
36
+ const apiKey = getApiKey();
37
+ if (!apiKey) {
38
+ throw new ElevenLabsError('ElevenLabs API key not configured', 'AUTH_REQUIRED', 'Ask the user for their API key, then call configure_elevenlabs_api_key.');
39
+ }
40
+ let voiceId = args.voice_id;
41
+ const voiceName = args.voice_name;
42
+ const modelId = args.model_id ?? 'eleven_multilingual_v2';
43
+ const stability = args.stability ?? 0.5;
44
+ const similarityBoost = args.similarity_boost ?? 0.75;
45
+ const outputFormat = args.output_format ?? 'mp3_44100_128';
46
+ // Voice lookup
47
+ let resolvedVoiceName = voiceName || 'default';
48
+ if (!voiceId) {
49
+ if (voiceName) {
50
+ const voice = await lookupVoiceByName(apiKey, voiceName);
51
+ voiceId = voice.voice_id;
52
+ resolvedVoiceName = voice.name;
53
+ }
54
+ else {
55
+ // Default to Rachel if no voice specified
56
+ try {
57
+ const voice = await lookupVoiceByName(apiKey, 'Rachel');
58
+ voiceId = voice.voice_id;
59
+ resolvedVoiceName = voice.name;
60
+ }
61
+ catch (err) {
62
+ // Propagate the error so withErrorHandling emits isError: true
63
+ if (err instanceof ElevenLabsError) {
64
+ throw err;
65
+ }
66
+ throw new ElevenLabsError('No voice specified and default voice lookup failed.', 'VOICE_NOT_FOUND', 'Provide a voice_id or voice_name. Use list_voices to find available voices.');
67
+ }
68
+ }
69
+ }
70
+ const body = {
71
+ text: args.text,
72
+ model_id: modelId,
73
+ voice_settings: {
74
+ stability,
75
+ similarity_boost: similarityBoost,
76
+ },
77
+ };
78
+ const ext = outputFormat.startsWith('mp3') ? 'mp3' : 'wav';
79
+ const result = await elevenLabsAudio(apiKey, `/text-to-speech/${voiceId}?output_format=${outputFormat}`, { method: 'POST', body: JSON.stringify(body) }, ext);
80
+ return JSON.stringify({
81
+ ok: true,
82
+ file_path: result.filePath,
83
+ size_bytes: result.sizeBytes,
84
+ voice: resolvedVoiceName,
85
+ voice_id: voiceId,
86
+ model: modelId,
87
+ format: outputFormat,
88
+ message: `Speech generated with voice "${resolvedVoiceName}" and saved to ${result.filePath} (${(result.sizeBytes / 1024).toFixed(1)} KB).`,
89
+ });
90
+ }));
91
+ // ── generate_sound_effect ─────────────────────────────────────────────
92
+ server.registerTool('generate_sound_effect', {
93
+ description: 'Generate sound effects from a text description. ' +
94
+ 'DURATION: 0.5-22 seconds (auto if omitted). ' +
95
+ 'PROMPT INFLUENCE (0-1): How closely to follow the text prompt. Default: 0.3. ' +
96
+ 'COST: Credits based on duration.',
97
+ inputSchema: z.object({
98
+ prompt: z.string().min(1).describe('Describe the sound effect. Be specific about characteristics.'),
99
+ duration_seconds: z.number().min(0.5).max(22).optional().describe('Duration in seconds (0.5-22). Auto if omitted.'),
100
+ prompt_influence: z.number().min(0).max(1).optional().describe('How closely to follow the prompt (0-1). Default: 0.3.'),
101
+ }),
102
+ annotations: { readOnlyHint: false, destructiveHint: false },
103
+ }, withErrorHandling(async (args) => {
104
+ const apiKey = getApiKey();
105
+ if (!apiKey) {
106
+ throw new ElevenLabsError('ElevenLabs API key not configured', 'AUTH_REQUIRED', 'Ask the user for their API key, then call configure_elevenlabs_api_key.');
107
+ }
108
+ const promptInfluence = args.prompt_influence ?? 0.3;
109
+ const body = {
110
+ text: args.prompt,
111
+ prompt_influence: promptInfluence,
112
+ };
113
+ if (args.duration_seconds !== undefined) {
114
+ body.duration_seconds = Math.max(0.5, Math.min(22, args.duration_seconds));
115
+ }
116
+ const result = await elevenLabsAudio(apiKey, '/sound-generation', { method: 'POST', body: JSON.stringify(body) }, 'mp3');
117
+ return JSON.stringify({
118
+ ok: true,
119
+ file_path: result.filePath,
120
+ size_bytes: result.sizeBytes,
121
+ duration_seconds: args.duration_seconds ?? 'auto',
122
+ message: `Sound effect generated and saved to ${result.filePath} (${(result.sizeBytes / 1024).toFixed(1)} KB).`,
123
+ });
124
+ }));
125
+ }
126
+ //# sourceMappingURL=speech.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerTranscriptionTools(server: McpServer): void;
3
+ //# sourceMappingURL=transcription.d.ts.map
@@ -0,0 +1,50 @@
1
+ import { z } from 'zod';
2
+ import * as fs from 'fs';
3
+ import * as path from 'path';
4
+ import { getApiKey } from '../auth.js';
5
+ import { elevenLabsJson } from '../client.js';
6
+ import { ElevenLabsError } from '../types.js';
7
+ import { withErrorHandling } from '../utils.js';
8
+ export function registerTranscriptionTools(server) {
9
+ server.registerTool('transcribe_audio', {
10
+ description: 'Transcribe speech from an audio file to text using ElevenLabs Speech-to-Text. ' +
11
+ 'INPUT: Local audio file path (.mp3, .wav, .m4a, .ogg, .flac, .webm, .mp4). ' +
12
+ 'Auto-detects language by default. Specify language_code for better accuracy. ' +
13
+ 'COST: Credits based on audio duration.',
14
+ inputSchema: z.object({
15
+ file_path: z.string().min(1).describe('Absolute path to local audio file to transcribe.'),
16
+ language_code: z.string().optional().describe('Language code (e.g., "en", "es", "fr"). Auto-detected if omitted.'),
17
+ }),
18
+ annotations: { readOnlyHint: true, destructiveHint: false },
19
+ }, withErrorHandling(async (args) => {
20
+ const apiKey = getApiKey();
21
+ if (!apiKey) {
22
+ throw new ElevenLabsError('ElevenLabs API key not configured', 'AUTH_REQUIRED', 'Ask the user for their API key, then call configure_elevenlabs_api_key.');
23
+ }
24
+ const filePath = args.file_path;
25
+ // Read local file
26
+ if (!fs.existsSync(filePath)) {
27
+ throw new ElevenLabsError(`File not found: ${filePath}`, 'FILE_NOT_FOUND', 'Provide an absolute path to an existing audio file.');
28
+ }
29
+ const fileBuffer = fs.readFileSync(filePath);
30
+ const fileName = path.basename(filePath);
31
+ // Use FormData to send as multipart
32
+ const formData = new FormData();
33
+ formData.append('audio', new Blob([fileBuffer]), fileName);
34
+ if (args.language_code) {
35
+ formData.append('language_code', args.language_code);
36
+ }
37
+ const data = await elevenLabsJson(apiKey, '/speech-to-text', {
38
+ method: 'POST',
39
+ body: formData,
40
+ });
41
+ return JSON.stringify({
42
+ ok: true,
43
+ text: data.text,
44
+ word_count: data.words?.length || 0,
45
+ language: args.language_code || 'auto-detected',
46
+ message: `Transcription complete: ${data.text.length} characters, ${data.words?.length || 0} words.`,
47
+ });
48
+ }));
49
+ }
50
+ //# sourceMappingURL=transcription.js.map
@@ -0,0 +1,3 @@
1
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2
+ export declare function registerVoiceTools(server: McpServer): void;
3
+ //# sourceMappingURL=voices.d.ts.map
@@ -0,0 +1,49 @@
1
+ import { z } from 'zod';
2
+ import { getApiKey } from '../auth.js';
3
+ import { elevenLabsJson } from '../client.js';
4
+ import { ElevenLabsError } from '../types.js';
5
+ import { withErrorHandling } from '../utils.js';
6
+ export function registerVoiceTools(server) {
7
+ server.registerTool('list_voices', {
8
+ description: 'Search and browse available ElevenLabs voices. FREE — no credits consumed. ' +
9
+ 'Returns voice ID, name, description, category, preview URL, and labels. ' +
10
+ 'Use voice_id from results with generate_speech.',
11
+ inputSchema: z.object({
12
+ search: z.string().optional().describe('Search query to filter voices by name.'),
13
+ category: z.enum(['premade', 'cloned', 'generated', 'professional']).optional()
14
+ .describe('Filter by voice category.'),
15
+ page_size: z.number().int().min(1).max(100).optional().describe('Number of results (1-100). Default: 20.'),
16
+ }),
17
+ annotations: { readOnlyHint: true, destructiveHint: false },
18
+ }, withErrorHandling(async (args) => {
19
+ const apiKey = getApiKey();
20
+ if (!apiKey) {
21
+ throw new ElevenLabsError('ElevenLabs API key not configured', 'AUTH_REQUIRED', 'Ask the user for their API key, then call configure_elevenlabs_api_key.');
22
+ }
23
+ const params = new URLSearchParams();
24
+ if (args.search)
25
+ params.set('search', args.search);
26
+ if (args.category)
27
+ params.set('category', args.category);
28
+ params.set('page_size', String(Math.min(100, args.page_size ?? 20)));
29
+ const data = await elevenLabsJson(apiKey, `https://api.elevenlabs.io/v2/voices?${params.toString()}`);
30
+ const voices = data.voices.map((v) => ({
31
+ voice_id: v.voice_id,
32
+ name: v.name,
33
+ category: v.category,
34
+ description: v.description,
35
+ labels: v.labels,
36
+ preview_url: v.preview_url,
37
+ }));
38
+ return JSON.stringify({
39
+ ok: true,
40
+ voices,
41
+ count: voices.length,
42
+ has_more: data.has_more || false,
43
+ cost: 'FREE — no credits consumed',
44
+ message: `Found ${voices.length} voice${voices.length === 1 ? '' : 's'}${args.search ? ` matching "${args.search}"` : ''}.`,
45
+ hint: 'Use voice_id with generate_speech to create audio with a specific voice.',
46
+ });
47
+ }));
48
+ }
49
+ //# sourceMappingURL=voices.js.map
@@ -0,0 +1,60 @@
1
+ export declare const REQUEST_TIMEOUT_MS = 30000;
2
+ export interface BridgeState {
3
+ port: number;
4
+ token: string;
5
+ }
6
+ export declare class ElevenLabsError extends Error {
7
+ readonly code: string;
8
+ readonly resolution: string;
9
+ constructor(message: string, code: string, resolution: string);
10
+ }
11
+ export interface VoiceResult {
12
+ voice_id: string;
13
+ name: string;
14
+ category?: string;
15
+ description?: string;
16
+ preview_url?: string;
17
+ labels?: Record<string, string>;
18
+ }
19
+ export interface VoicesResponse {
20
+ voices: VoiceResult[];
21
+ has_more?: boolean;
22
+ }
23
+ export interface CompositionSection {
24
+ style?: string;
25
+ lyrics?: string;
26
+ duration_ms?: number;
27
+ }
28
+ export interface CompositionPlan {
29
+ positive_global_styles?: string[];
30
+ negative_global_styles?: string[];
31
+ sections?: CompositionSection[];
32
+ }
33
+ export interface MusicPlanResponse {
34
+ positive_global_styles: string[];
35
+ negative_global_styles: string[];
36
+ sections: Array<{
37
+ style: string;
38
+ lyrics: string;
39
+ duration_ms: number;
40
+ }>;
41
+ }
42
+ export interface TranscriptionWord {
43
+ text: string;
44
+ start: number;
45
+ end: number;
46
+ type?: string;
47
+ }
48
+ export interface TranscriptionResponse {
49
+ text: string;
50
+ words?: TranscriptionWord[];
51
+ }
52
+ export interface AudioResult {
53
+ filePath: string;
54
+ sizeBytes: number;
55
+ }
56
+ /**
57
+ * Resolve an error status code to an actionable resolution string.
58
+ */
59
+ export declare function getErrorResolution(status: number, detail?: string): string;
60
+ //# sourceMappingURL=types.d.ts.map
package/dist/types.js ADDED
@@ -0,0 +1,34 @@
1
+ export const REQUEST_TIMEOUT_MS = 30_000;
2
+ export class ElevenLabsError extends Error {
3
+ code;
4
+ resolution;
5
+ constructor(message, code, resolution) {
6
+ super(message);
7
+ this.code = code;
8
+ this.resolution = resolution;
9
+ this.name = 'ElevenLabsError';
10
+ }
11
+ }
12
+ /**
13
+ * Resolve an error status code to an actionable resolution string.
14
+ */
15
+ export function getErrorResolution(status, detail) {
16
+ const msg = (detail || '').toLowerCase();
17
+ if (status === 401 || msg.includes('unauthorized') || msg.includes('invalid api key')) {
18
+ return 'Authentication failed. Check your ElevenLabs API key in Settings. Get one at https://elevenlabs.io/app/settings/api-keys';
19
+ }
20
+ if (status === 403 || msg.includes('quota') || msg.includes('limit') || msg.includes('credits')) {
21
+ return 'Insufficient credits or quota exceeded. Check usage at https://elevenlabs.io/app/usage';
22
+ }
23
+ if (status === 422 || msg.includes('validation')) {
24
+ return 'Invalid request parameters. Check the input values and try again.';
25
+ }
26
+ if (status === 429) {
27
+ return 'Rate limited. Wait a moment and try again.';
28
+ }
29
+ if (msg.includes('content') || msg.includes('policy') || msg.includes('moderation')) {
30
+ return 'Content policy violation. Try a different prompt.';
31
+ }
32
+ return 'Please try again. If the issue persists, check your API key and credits at https://elevenlabs.io/app/settings/api-keys';
33
+ }
34
+ //# sourceMappingURL=types.js.map
@@ -0,0 +1,14 @@
1
+ import type { CallToolResult } from '@modelcontextprotocol/sdk/types.js';
2
+ type ToolHandler<T> = (args: T, extra: unknown) => Promise<CallToolResult>;
3
+ /**
4
+ * Wraps a tool handler with standard error handling.
5
+ *
6
+ * - On success: returns the string result as a text content block.
7
+ * - On ElevenLabsError: returns a structured JSON error with code and resolution.
8
+ * - On unknown error: returns a generic error message.
9
+ *
10
+ * Secrets are never exposed in error messages.
11
+ */
12
+ export declare function withErrorHandling<T>(fn: (args: T, extra: unknown) => Promise<string>): ToolHandler<T>;
13
+ export {};
14
+ //# sourceMappingURL=utils.d.ts.map
package/dist/utils.js ADDED
@@ -0,0 +1,42 @@
1
+ import { ElevenLabsError } from './types.js';
2
+ /**
3
+ * Wraps a tool handler with standard error handling.
4
+ *
5
+ * - On success: returns the string result as a text content block.
6
+ * - On ElevenLabsError: returns a structured JSON error with code and resolution.
7
+ * - On unknown error: returns a generic error message.
8
+ *
9
+ * Secrets are never exposed in error messages.
10
+ */
11
+ export function withErrorHandling(fn) {
12
+ return async (args, extra) => {
13
+ try {
14
+ const result = await fn(args, extra);
15
+ return { content: [{ type: 'text', text: result }] };
16
+ }
17
+ catch (error) {
18
+ if (error instanceof ElevenLabsError) {
19
+ return {
20
+ content: [
21
+ {
22
+ type: 'text',
23
+ text: JSON.stringify({
24
+ ok: false,
25
+ error: error.message,
26
+ code: error.code,
27
+ resolution: error.resolution,
28
+ }),
29
+ },
30
+ ],
31
+ isError: true,
32
+ };
33
+ }
34
+ const errorMessage = error instanceof Error ? error.message : String(error);
35
+ return {
36
+ content: [{ type: 'text', text: JSON.stringify({ ok: false, error: errorMessage }) }],
37
+ isError: true,
38
+ };
39
+ }
40
+ };
41
+ }
42
+ //# sourceMappingURL=utils.js.map
package/package.json ADDED
@@ -0,0 +1,48 @@
1
+ {
2
+ "name": "@mindstone-engineering/mcp-server-elevenlabs",
3
+ "version": "0.1.0",
4
+ "description": "ElevenLabs MCP server for Model Context Protocol hosts — music, TTS, sound effects, voices, transcription",
5
+ "license": "FSL-1.1-MIT",
6
+ "type": "module",
7
+ "bin": {
8
+ "mcp-server-elevenlabs": "dist/index.js"
9
+ },
10
+ "files": [
11
+ "dist",
12
+ "!dist/**/*.map"
13
+ ],
14
+ "repository": {
15
+ "type": "git",
16
+ "url": "https://github.com/nspr-io/mcp-servers.git",
17
+ "directory": "connectors/elevenlabs"
18
+ },
19
+ "homepage": "https://github.com/nspr-io/mcp-servers/tree/main/connectors/elevenlabs",
20
+ "publishConfig": {
21
+ "access": "public"
22
+ },
23
+ "scripts": {
24
+ "build": "tsc && shx chmod +x dist/index.js",
25
+ "prepare": "npm run build",
26
+ "watch": "tsc --watch",
27
+ "start": "node dist/index.js",
28
+ "test": "vitest run",
29
+ "test:watch": "vitest",
30
+ "test:coverage": "vitest run --coverage"
31
+ },
32
+ "dependencies": {
33
+ "@modelcontextprotocol/sdk": "^1.26.0",
34
+ "zod": "^3.23.0"
35
+ },
36
+ "devDependencies": {
37
+ "@mindstone-engineering/mcp-test-harness": "file:../../test-harness",
38
+ "@types/node": "^22",
39
+ "@vitest/coverage-v8": "^4.1.3",
40
+ "msw": "^2.13.2",
41
+ "shx": "^0.3.4",
42
+ "typescript": "^5.8.2",
43
+ "vitest": "^4.1.3"
44
+ },
45
+ "engines": {
46
+ "node": ">=20"
47
+ }
48
+ }