aniclaude 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,40 @@
1
+ # AniClaude
2
+
3
+ Voice-enabled Claude Code - talk to Claude in your terminal with an animated avatar.
4
+
5
+ ## Requirements
6
+
7
+ - Node.js 18+
8
+ - [Claude Code](https://claude.ai/code) installed (`npm install -g @anthropic-ai/claude-code`)
9
+ - Google Chrome or Safari (for avatar window)
10
+ - Microphone access
11
+
12
+ ## Usage
13
+
14
+ ```bash
15
+ npx aniclaude
16
+ ```
17
+
18
+ This will:
19
+ 1. Start Claude Code with voice input enabled
20
+ 2. Open a small avatar window in your browser
21
+ 3. Click "Start" in the avatar window to begin speaking
22
+
23
+ ## Options
24
+
25
+ ```
26
+ --with-permissions Run Claude with permission prompts (default: skipped)
27
+ --no-browser Don't open the avatar window
28
+ -h, --help Show help
29
+ -v, --version Show version
30
+ ```
31
+
32
+ ## How it works
33
+
34
+ - **Speech-to-Text**: Uses Web Speech API (browser-native, instant)
35
+ - **Text-to-Speech**: Uses ElevenLabs API via hosted server
36
+ - **Avatar**: Animated MP4 that syncs with speech
37
+
38
+ ## License
39
+
40
+ MIT
@@ -0,0 +1,602 @@
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>AniClaude</title>
7
+ <style>
8
+ * { margin: 0; padding: 0; box-sizing: border-box; }
9
+ html, body {
10
+ width: 100%;
11
+ height: 100%;
12
+ overflow: hidden;
13
+ background: transparent;
14
+ }
15
+ body {
16
+ display: flex;
17
+ align-items: center;
18
+ justify-content: center;
19
+ background: linear-gradient(135deg, #1a1a2e 0%, #16213e 100%);
20
+ font-family: system-ui, -apple-system, sans-serif;
21
+ }
22
+ #avatar-container {
23
+ width: 100%;
24
+ height: 100%;
25
+ display: flex;
26
+ align-items: center;
27
+ justify-content: center;
28
+ position: relative;
29
+ }
30
+ #avatar {
31
+ max-width: 90%;
32
+ max-height: 90%;
33
+ object-fit: contain;
34
+ border-radius: 8px;
35
+ }
36
+ #status {
37
+ position: absolute;
38
+ bottom: 12px;
39
+ left: 50%;
40
+ transform: translateX(-50%);
41
+ background: rgba(0, 0, 0, 0.7);
42
+ color: white;
43
+ padding: 6px 14px;
44
+ border-radius: 16px;
45
+ font-size: 12px;
46
+ opacity: 0;
47
+ transition: opacity 0.3s ease;
48
+ }
49
+ #status.visible { opacity: 1; }
50
+ #start-overlay {
51
+ position: absolute;
52
+ top: 0;
53
+ left: 0;
54
+ right: 0;
55
+ bottom: 0;
56
+ background: rgba(0, 0, 0, 0.7);
57
+ display: flex;
58
+ align-items: center;
59
+ justify-content: center;
60
+ cursor: pointer;
61
+ z-index: 100;
62
+ }
63
+ #start-overlay.hidden { display: none; }
64
+ #start-btn {
65
+ background: #44ff44;
66
+ color: #000;
67
+ border: none;
68
+ padding: 16px 32px;
69
+ border-radius: 24px;
70
+ font-size: 16px;
71
+ font-weight: bold;
72
+ cursor: pointer;
73
+ transition: transform 0.2s;
74
+ }
75
+ #start-btn:hover { transform: scale(1.05); }
76
+ #connection {
77
+ position: absolute;
78
+ top: 10px;
79
+ right: 10px;
80
+ width: 12px;
81
+ height: 12px;
82
+ border-radius: 50%;
83
+ background: #ff4444;
84
+ transition: background 0.3s ease;
85
+ }
86
+ #connection.connected { background: #44ff44; }
87
+ #connection.connecting { background: #ffaa00; animation: pulse 1s infinite; }
88
+ @keyframes pulse { 0%, 100% { opacity: 1; } 50% { opacity: 0.5; } }
89
+ /* Listening state - green border */
90
+ body.listening { box-shadow: inset 0 0 0 3px #44ff44; }
91
+ body.processing { box-shadow: inset 0 0 0 3px #ffaa00; }
92
+ /* Push to talk button */
93
+ #ptt-btn {
94
+ position: absolute;
95
+ bottom: 10px;
96
+ right: 10px;
97
+ width: 40px;
98
+ height: 40px;
99
+ border-radius: 50%;
100
+ border: none;
101
+ background: rgba(255, 255, 255, 0.15);
102
+ color: white;
103
+ cursor: pointer;
104
+ display: flex;
105
+ align-items: center;
106
+ justify-content: center;
107
+ transition: all 0.2s ease;
108
+ z-index: 50;
109
+ }
110
+ #ptt-btn:hover { background: rgba(255, 255, 255, 0.25); transform: scale(1.05); }
111
+ #ptt-btn:active { transform: scale(0.95); }
112
+ #ptt-btn.listening { background: #44ff44; color: #000; }
113
+ #ptt-btn svg { width: 20px; height: 20px; }
114
+ </style>
115
+ </head>
116
+ <body>
117
+ <div id="avatar-container">
118
+ <div id="connection" class="connecting"></div>
119
+ <video id="avatar" loop autoplay muted playsinline></video>
120
+ <div id="status"></div>
121
+ <div id="start-overlay">
122
+ <button id="start-btn">Click to Start</button>
123
+ </div>
124
+ <button id="ptt-btn" title="Push to Talk">
125
+ <svg viewBox="0 0 24 24" fill="currentColor">
126
+ <path d="M12 14c1.66 0 3-1.34 3-3V5c0-1.66-1.34-3-3-3S9 3.34 9 5v6c0 1.66 1.34 3 3 3zm-1-9c0-.55.45-1 1-1s1 .45 1 1v6c0 .55-.45 1-1 1s-1-.45-1-1V5zm6 6c0 2.76-2.24 5-5 5s-5-2.24-5-5H5c0 3.53 2.61 6.43 6 6.92V21h2v-3.08c3.39-.49 6-3.39 6-6.92h-2z"/>
127
+ </svg>
128
+ </button>
129
+ </div>
130
+
131
+ <script>
132
+ // Config from URL params
133
+ const params = new URLSearchParams(window.location.search);
134
+ const WS_PORT = params.get('port') || '3457';
135
+ const SERVER_URL = params.get('server') || 'http://localhost:3456';
136
+
137
+ // Elements
138
+ const avatar = document.getElementById('avatar');
139
+ const status = document.getElementById('status');
140
+ const connection = document.getElementById('connection');
141
+ const startOverlay = document.getElementById('start-overlay');
142
+ const startBtn = document.getElementById('start-btn');
143
+ const pttBtn = document.getElementById('ptt-btn');
144
+
145
+ // State
146
+ let ws = null;
147
+ let isConnected = false;
148
+ let isSpeaking = false;
149
+ let audioQueue = [];
150
+ let currentAudio = null;
151
+ let isStarted = false;
152
+
153
+ // Video URLs (served from local server)
154
+ const IDLE_VIDEO = '/idle.mp4';
155
+ const TALKING_VIDEO = '/speak.mp4';
156
+
157
+ // Initialize avatar
158
+ avatar.src = IDLE_VIDEO;
159
+
160
+ function setAvatarState(state) {
161
+ const newSrc = state === 'talking' ? TALKING_VIDEO : IDLE_VIDEO;
162
+ // Compare using endsWith since avatar.src returns full URL
163
+ if (!avatar.src.endsWith(newSrc)) {
164
+ avatar.src = newSrc;
165
+ avatar.play().catch(() => {});
166
+ }
167
+ }
168
+
169
+ function showStatus(text, duration = 0) {
170
+ status.textContent = text;
171
+ status.classList.add('visible');
172
+ if (duration > 0) {
173
+ setTimeout(() => status.classList.remove('visible'), duration);
174
+ }
175
+ }
176
+
177
+ function hideStatus() {
178
+ status.classList.remove('visible');
179
+ }
180
+
181
+ // ========== Web Speech STT ==========
182
+ let recognition = null;
183
+ let isListening = false;
184
+ let isStartingRecognition = false;
185
+
186
+ function initSpeechRecognition() {
187
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
188
+ if (!SpeechRecognition) {
189
+ console.error('Web Speech API not supported');
190
+ showStatus('Speech not supported', 3000);
191
+ return;
192
+ }
193
+
194
+ recognition = new SpeechRecognition();
195
+ recognition.continuous = false;
196
+ recognition.interimResults = true;
197
+ recognition.lang = 'en-US';
198
+
199
+ recognition.onstart = () => {
200
+ console.log('[STT] Listening...');
201
+ isListening = true;
202
+ isStartingRecognition = false;
203
+ document.body.classList.add('listening');
204
+ pttBtn.classList.add('listening');
205
+ showStatus('Listening...');
206
+ };
207
+
208
+ recognition.onresult = (event) => {
209
+ let finalTranscript = '';
210
+ let interimTranscript = '';
211
+
212
+ for (let i = event.resultIndex; i < event.results.length; i++) {
213
+ const transcript = event.results[i][0].transcript;
214
+ if (event.results[i].isFinal) {
215
+ finalTranscript += transcript;
216
+ } else {
217
+ interimTranscript += transcript;
218
+ }
219
+ }
220
+
221
+ if (interimTranscript) {
222
+ showStatus(interimTranscript);
223
+ }
224
+
225
+ if (finalTranscript) {
226
+ console.log('[STT] Final:', finalTranscript);
227
+ sendVoiceInput(finalTranscript);
228
+ }
229
+ };
230
+
231
+ recognition.onend = () => {
232
+ console.log('[STT] Ended');
233
+ isListening = false;
234
+ isStartingRecognition = false;
235
+ document.body.classList.remove('listening');
236
+ pttBtn.classList.remove('listening');
237
+ hideStatus();
238
+ };
239
+
240
+ recognition.onerror = (event) => {
241
+ console.error('[STT] Error:', event.error);
242
+ isListening = false;
243
+ isStartingRecognition = false;
244
+ document.body.classList.remove('listening');
245
+ pttBtn.classList.remove('listening');
246
+ if (event.error !== 'no-speech') {
247
+ showStatus('Error: ' + event.error, 2000);
248
+ }
249
+ };
250
+ }
251
+
252
+ function startListening() {
253
+ if (!isStarted || isListening || isSpeaking || isStartingRecognition) return;
254
+ if (!recognition) initSpeechRecognition();
255
+ if (recognition) {
256
+ try {
257
+ isStartingRecognition = true;
258
+ recognition.start();
259
+ } catch (e) {
260
+ isStartingRecognition = false;
261
+ console.error('[STT] Start error:', e);
262
+ }
263
+ }
264
+ }
265
+
266
+ function handleStart() {
267
+ if (isStarted) return;
268
+ isStarted = true;
269
+ startOverlay.classList.add('hidden');
270
+ console.log('[AniClaude] Started by user');
271
+ startListening();
272
+ }
273
+
274
+ // Click handler for start button
275
+ startBtn.addEventListener('click', handleStart);
276
+ startOverlay.addEventListener('click', handleStart);
277
+
278
+ // Push-to-talk button - toggle listening
279
+ pttBtn.addEventListener('click', () => {
280
+ if (!isStarted) {
281
+ handleStart();
282
+ return;
283
+ }
284
+ if (isListening) {
285
+ stopListening();
286
+ } else if (!isSpeaking) {
287
+ startListening();
288
+ }
289
+ });
290
+
291
+ function stopListening() {
292
+ if (recognition && isListening) {
293
+ recognition.stop();
294
+ }
295
+ }
296
+
297
+ // ========== Text Preprocessing for TTS ==========
298
+ function preprocessForTTS(text) {
299
+ let processed = text;
300
+
301
+ // Remove backticks but keep content
302
+ processed = processed.replace(/```/g, ' ');
303
+ processed = processed.replace(/`/g, '');
304
+
305
+ // Remove markdown bold/italic
306
+ processed = processed.replace(/\*\*([^*]+)\*\*/g, '$1');
307
+ processed = processed.replace(/\*([^*]+)\*/g, '$1');
308
+ processed = processed.replace(/__([^_]+)__/g, '$1');
309
+ processed = processed.replace(/_([^_]+)_/g, '$1');
310
+
311
+ // Convert markdown headers to just the text
312
+ processed = processed.replace(/^#{1,6}\s+/gm, '');
313
+
314
+ // Handle bullet points - add pause
315
+ processed = processed.replace(/^[\s]*[-*•]\s+/gm, '. ');
316
+
317
+ // Handle numbered lists - keep the number
318
+ processed = processed.replace(/^[\s]*(\d+)\.\s+/gm, '. $1, ');
319
+
320
+ // Simplify URLs - just say "link"
321
+ processed = processed.replace(/https?:\/\/[^\s]+/g, 'link');
322
+
323
+ // Simplify file paths
324
+ processed = processed.replace(/[\/\\][\w\-\.\/\\]+\.\w+/g, (match) => {
325
+ const filename = match.split(/[\/\\]/).pop();
326
+ return filename;
327
+ });
328
+
329
+ // Remove HTML tags
330
+ processed = processed.replace(/<[^>]+>/g, '');
331
+
332
+ // Convert special characters
333
+ processed = processed.replace(/&amp;/g, 'and');
334
+ processed = processed.replace(/&lt;/g, 'less than');
335
+ processed = processed.replace(/&gt;/g, 'greater than');
336
+ processed = processed.replace(/\$/g, 'dollar ');
337
+ processed = processed.replace(/\{/g, ' ');
338
+ processed = processed.replace(/\}/g, ' ');
339
+ processed = processed.replace(/\[/g, ' ');
340
+ processed = processed.replace(/\]/g, ' ');
341
+ processed = processed.replace(/\|/g, ' ');
342
+
343
+ // Clean up arrows and symbols
344
+ processed = processed.replace(/->/g, ' to ');
345
+ processed = processed.replace(/=>/g, ' becomes ');
346
+ processed = processed.replace(/→/g, ' to ');
347
+ processed = processed.replace(/←/g, ' from ');
348
+ processed = processed.replace(/↔/g, ' both ways ');
349
+ processed = processed.replace(/\.\.\./g, '...');
350
+
351
+ // Add pause for newlines (comma for single, period for multiple)
352
+ processed = processed.replace(/\n{2,}/g, '. ');
353
+ processed = processed.replace(/\n/g, ', ');
354
+
355
+ // Normalize whitespace
356
+ processed = processed.replace(/\s+/g, ' ');
357
+
358
+ // Clean up multiple periods
359
+ processed = processed.replace(/\.+/g, '.');
360
+ processed = processed.replace(/\.\s*\./g, '.');
361
+
362
+ return processed.trim();
363
+ }
364
+
365
+ // ========== TTS via Server (Streaming) ==========
366
+
367
+ // Check if MediaSource supports MP3 streaming
368
+ const supportsStreamingMP3 = window.MediaSource &&
369
+ MediaSource.isTypeSupported('audio/mpeg');
370
+
371
+ async function speak(text) {
372
+ if (!text || text.length < 5) return;
373
+
374
+ console.log('[TTS] Speaking:', text.substring(0, 50) + '...');
375
+ isSpeaking = true;
376
+ setAvatarState('talking');
377
+
378
+ try {
379
+ const response = await fetch(SERVER_URL + '/tts', {
380
+ method: 'POST',
381
+ headers: { 'Content-Type': 'application/json' },
382
+ body: JSON.stringify({ text })
383
+ });
384
+
385
+ if (!response.ok) {
386
+ throw new Error('TTS request failed: ' + response.status);
387
+ }
388
+
389
+ // Use streaming playback if supported, otherwise fall back to buffered
390
+ if (supportsStreamingMP3) {
391
+ await playAudioStreaming(response);
392
+ } else {
393
+ const audioData = await response.arrayBuffer();
394
+ await playAudioBuffered(audioData);
395
+ }
396
+ } catch (error) {
397
+ console.error('[TTS] Error:', error);
398
+ } finally {
399
+ isSpeaking = false;
400
+ setAvatarState('idle');
401
+ processAudioQueue();
402
+ }
403
+ }
404
+
405
+ // Streaming playback using MediaSource API
406
+ async function playAudioStreaming(response) {
407
+ return new Promise(async (resolve) => {
408
+ const mediaSource = new MediaSource();
409
+ const audio = new Audio();
410
+ currentAudio = audio;
411
+ audio.src = URL.createObjectURL(mediaSource);
412
+
413
+ mediaSource.addEventListener('sourceopen', async () => {
414
+ const sourceBuffer = mediaSource.addSourceBuffer('audio/mpeg');
415
+ const reader = response.body.getReader();
416
+ let hasStartedPlaying = false;
417
+
418
+ const processChunk = async () => {
419
+ try {
420
+ const { done, value } = await reader.read();
421
+
422
+ if (done) {
423
+ // Signal end of stream when buffer is done updating
424
+ if (!sourceBuffer.updating) {
425
+ mediaSource.endOfStream();
426
+ } else {
427
+ sourceBuffer.addEventListener('updateend', () => {
428
+ if (mediaSource.readyState === 'open') {
429
+ mediaSource.endOfStream();
430
+ }
431
+ }, { once: true });
432
+ }
433
+ return;
434
+ }
435
+
436
+ // Wait for buffer to be ready
437
+ if (sourceBuffer.updating) {
438
+ await new Promise(r => sourceBuffer.addEventListener('updateend', r, { once: true }));
439
+ }
440
+
441
+ // Append chunk
442
+ sourceBuffer.appendBuffer(value);
443
+
444
+ // Start playing after first chunk
445
+ if (!hasStartedPlaying) {
446
+ hasStartedPlaying = true;
447
+ audio.play().catch(console.error);
448
+ console.log('[TTS] Streaming playback started');
449
+ }
450
+
451
+ // Process next chunk
452
+ await processChunk();
453
+ } catch (e) {
454
+ console.error('[TTS] Streaming error:', e);
455
+ resolve();
456
+ }
457
+ };
458
+
459
+ await processChunk();
460
+ });
461
+
462
+ audio.onended = () => {
463
+ URL.revokeObjectURL(audio.src);
464
+ currentAudio = null;
465
+ resolve();
466
+ };
467
+
468
+ audio.onerror = () => {
469
+ URL.revokeObjectURL(audio.src);
470
+ currentAudio = null;
471
+ resolve();
472
+ };
473
+ });
474
+ }
475
+
476
+ // Fallback: Buffered playback (waits for full audio)
477
+ function playAudioBuffered(arrayBuffer) {
478
+ return new Promise((resolve) => {
479
+ const blob = new Blob([arrayBuffer], { type: 'audio/mpeg' });
480
+ const url = URL.createObjectURL(blob);
481
+ const audio = new Audio(url);
482
+ currentAudio = audio;
483
+
484
+ audio.onended = () => {
485
+ URL.revokeObjectURL(url);
486
+ currentAudio = null;
487
+ resolve();
488
+ };
489
+
490
+ audio.onerror = () => {
491
+ URL.revokeObjectURL(url);
492
+ currentAudio = null;
493
+ resolve();
494
+ };
495
+
496
+ audio.play().catch(resolve);
497
+ });
498
+ }
499
+
500
+ function queueSpeech(text) {
501
+ // Preprocess text for natural speech
502
+ const processed = preprocessForTTS(text);
503
+ if (processed.length < 5) return; // Skip if too short after processing
504
+
505
+ audioQueue.push(processed);
506
+ if (!isSpeaking) {
507
+ processAudioQueue();
508
+ }
509
+ }
510
+
511
+ function processAudioQueue() {
512
+ if (audioQueue.length === 0) {
513
+ // All speech done, start listening
514
+ setTimeout(startListening, 500);
515
+ return;
516
+ }
517
+
518
+ const text = audioQueue.shift();
519
+ speak(text);
520
+ }
521
+
522
+ // ========== WebSocket Connection ==========
523
+ function connectWebSocket() {
524
+ console.log('[WS] Connecting to localhost:' + WS_PORT);
525
+ connection.className = 'connecting';
526
+
527
+ ws = new WebSocket('ws://localhost:' + WS_PORT);
528
+
529
+ ws.onopen = () => {
530
+ console.log('[WS] Connected');
531
+ isConnected = true;
532
+ connection.className = 'connected';
533
+ showStatus('Connected', 2000);
534
+
535
+ // If already started, begin listening
536
+ if (isStarted) {
537
+ setTimeout(startListening, 500);
538
+ }
539
+ };
540
+
541
+ ws.onmessage = (event) => {
542
+ try {
543
+ const data = JSON.parse(event.data);
544
+ handleMessage(data);
545
+ } catch (e) {
546
+ console.error('[WS] Parse error:', e);
547
+ }
548
+ };
549
+
550
+ ws.onclose = () => {
551
+ console.log('[WS] Disconnected');
552
+ isConnected = false;
553
+ connection.className = '';
554
+
555
+ // Reconnect after 2 seconds
556
+ setTimeout(connectWebSocket, 2000);
557
+ };
558
+
559
+ ws.onerror = (error) => {
560
+ console.error('[WS] Error:', error);
561
+ };
562
+ }
563
+
564
+ function handleMessage(data) {
565
+ console.log('[WS] Received:', data.type);
566
+
567
+ switch (data.type) {
568
+ case 'claude_response':
569
+ if (data.text) {
570
+ stopListening();
571
+ queueSpeech(data.text);
572
+ }
573
+ break;
574
+
575
+ case 'status':
576
+ showStatus(data.message, 2000);
577
+ break;
578
+ }
579
+ }
580
+
581
+ function sendVoiceInput(text) {
582
+ if (ws && isConnected) {
583
+ ws.send(JSON.stringify({
584
+ type: 'voice_input',
585
+ text: text
586
+ }));
587
+ showStatus('Sent: ' + text.substring(0, 30) + '...', 1500);
588
+ }
589
+ }
590
+
591
+ // Initialize
592
+ initSpeechRecognition();
593
+ connectWebSocket();
594
+
595
+ // Handle window close
596
+ window.addEventListener('beforeunload', () => {
597
+ if (ws) ws.close();
598
+ if (recognition) recognition.stop();
599
+ });
600
+ </script>
601
+ </body>
602
+ </html>
Binary file
Binary file
@@ -0,0 +1,9 @@
1
+ /**
2
+ * AniClaude CLI Configuration
3
+ */
4
+ export declare const TTS_SERVER_URL: string;
5
+ export declare const HTTP_PORT: number;
6
+ export declare const WS_PORT: number;
7
+ export declare const WINDOW_WIDTH = 300;
8
+ export declare const WINDOW_HEIGHT = 350;
9
+ export declare const BANNER = "\n\u001B[36m _ _ ____ _ _\n / \\ _ __ (_) ___| | __ _ _ _ __| | ___\n / _ \\ | '_ \\| | | | |/ _` | | | |/ _` |/ _ \\\n / ___ \\| | | | | |___| | (_| | |_| | (_| | __/\n/_/ \\_\\_| |_|_|\\____|_|\\__,_|\\__,_|\\__,_|\\___|\n\u001B[0m";
package/dist/config.js ADDED
@@ -0,0 +1,22 @@
1
+ "use strict";
2
+ /**
3
+ * AniClaude CLI Configuration
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.BANNER = exports.WINDOW_HEIGHT = exports.WINDOW_WIDTH = exports.WS_PORT = exports.HTTP_PORT = exports.TTS_SERVER_URL = void 0;
7
+ // Production TTS server URL (deployed on Render)
8
+ exports.TTS_SERVER_URL = process.env.ANICLAUDE_SERVER || 'https://api.aniclaude.com';
9
+ // Local server ports
10
+ exports.HTTP_PORT = parseInt(process.env.ANICLAUDE_HTTP_PORT || '3457', 10);
11
+ exports.WS_PORT = parseInt(process.env.ANICLAUDE_WS_PORT || '3458', 10);
12
+ // Avatar window size
13
+ exports.WINDOW_WIDTH = 300;
14
+ exports.WINDOW_HEIGHT = 350;
15
+ // CLI branding
16
+ exports.BANNER = `
17
+ \x1b[36m _ _ ____ _ _
18
+ / \\ _ __ (_) ___| | __ _ _ _ __| | ___
19
+ / _ \\ | '_ \\| | | | |/ _\` | | | |/ _\` |/ _ \\
20
+ / ___ \\| | | | | |___| | (_| | |_| | (_| | __/
21
+ /_/ \\_\\_| |_|_|\\____|_|\\__,_|\\__,_|\\__,_|\\___|
22
+ \x1b[0m`;
@@ -0,0 +1,6 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * AniClaude CLI
4
+ * Voice-enabled Claude Code - talk to Claude in your terminal
5
+ */
6
+ export {};