echo-ai-agent 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,63 @@
1
+ const { execSync } = require('child_process');
2
+ const chalk = require('chalk');
3
+ const fs = require('fs');
4
+ const path = require('path');
5
+
6
+ const packageJson = require('../package.json');
7
+
8
+ console.log(chalk.cyan.bold(`Preparing to publish Echo AI Agent v${packageJson.version}...\n`));
9
+
10
+ try {
11
+ // 1. Check for sensitive files
12
+ console.log(chalk.yellow('Checking for sensitive files...'));
13
+ if (fs.existsSync(path.join(__dirname, '..', '.env'))) {
14
+ console.log(chalk.red('⚠️ WARNING: .env file found. Make sure it is in .gitignore and .npmignore!'));
15
+ console.log(chalk.gray('Checking .npmignore...'));
16
+ const npmIgnore = fs.readFileSync(path.join(__dirname, '..', '.npmignore'), 'utf8');
17
+ if (!npmIgnore.includes('.env')) {
18
+ console.error(chalk.bgRed.white(' CRITICAL: .env is NOT in .npmignore. Aborting publish to protect secrets. '));
19
+ process.exit(1);
20
+ }
21
+ }
22
+ console.log(chalk.green('✓ Security check passed.'));
23
+
24
+ // 2. Run tests (simulated)
25
+ console.log(chalk.yellow('\nRunning tests...'));
26
+ // execSync('npm test', { stdio: 'inherit' }); // Uncomment when tests are real
27
+ console.log(chalk.green('✓ Tests passed (simulated).'));
28
+
29
+ // 3. Verify main entry point
30
+ console.log(chalk.yellow('\nVerifying entry point...'));
31
+ if (!fs.existsSync(path.join(__dirname, '..', packageJson.main))) {
32
+ console.error(chalk.red(`❌ Error: Main file ${packageJson.main} not found!`));
33
+ process.exit(1);
34
+ }
35
+ console.log(chalk.green('✓ Entry point found.'));
36
+
37
+ // 4. Verify CLI entry point
38
+ console.log(chalk.yellow('\nVerifying CLI bin...'));
39
+ const binPath = packageJson.bin.echo;
40
+ if (!fs.existsSync(path.join(__dirname, '..', binPath))) {
41
+ console.error(chalk.red(`❌ Error: CLI file ${binPath} not found!`));
42
+ process.exit(1);
43
+ }
44
+
45
+ // Make CLI executable
46
+ try {
47
+ if (process.platform !== 'win32') {
48
+ execSync(`chmod +x ${path.join(__dirname, '..', binPath)}`);
49
+ console.log(chalk.green('✓ CLI executable permissions set.'));
50
+ }
51
+ } catch (e) {
52
+ console.log(chalk.yellow('⚠️ Could not set executable permissions (might be Windows).'));
53
+ }
54
+
55
+ console.log(chalk.green('\n✅ Project is ready for publishing!'));
56
+ console.log(chalk.white('\nTo publish, run:'));
57
+ console.log(chalk.cyan('npm login'));
58
+ console.log(chalk.cyan('npm publish --access public'));
59
+
60
+ } catch (error) {
61
+ console.error(chalk.red('\n❌ Pre-publish check failed:'), error.message);
62
+ process.exit(1);
63
+ }
@@ -0,0 +1,97 @@
1
+ const inquirer = require('inquirer');
2
+ const chalk = require('chalk');
3
+ const fs = require('fs');
4
+ const path = require('path');
5
+ const ConfigManager = require('./config-manager');
6
+
7
+ const config = new ConfigManager();
8
+
9
+ async function run() {
10
+ console.log(chalk.cyan.bold('\n🔧 Echo Setup Wizard\n'));
11
+ console.log(chalk.gray('Let\'s configure your AI assistant...\n'));
12
+
13
+ const answers = await inquirer.prompt([
14
+ {
15
+ type: 'input',
16
+ name: 'apiKey',
17
+ message: 'Enter your Google Gemini API Key:',
18
+ validate: (input) => {
19
+ if (!input || input.trim().length === 0) {
20
+ return 'API Key is required. Get one from https://aistudio.google.com/';
21
+ }
22
+ return true;
23
+ }
24
+ },
25
+ {
26
+ type: 'list',
27
+ name: 'theme',
28
+ message: 'Choose your theme:',
29
+ choices: [
30
+ { name: chalk.cyan('● Cyan (Classic JARVIS)'), value: 'cyan' },
31
+ { name: chalk.hex('#a855f7')('● Purple (Royal)'), value: 'purple' },
32
+ { name: chalk.green('● Green (Matrix)'), value: 'green' },
33
+ { name: chalk.hex('#ffd700')('● Gold (Iron Man)'), value: 'gold' },
34
+ { name: chalk.red('● Red (Cyberpunk)'), value: 'red' },
35
+ { name: chalk.blue('● Blue (Ocean)'), value: 'blue' }
36
+ ],
37
+ default: 'cyan'
38
+ },
39
+ {
40
+ type: 'list',
41
+ name: 'position',
42
+ message: 'Where should Echo appear on your screen?',
43
+ choices: [
44
+ { name: 'Bottom Right (Recommended)', value: 'bottom-right' },
45
+ { name: 'Bottom Left', value: 'bottom-left' },
46
+ { name: 'Top Right', value: 'top-right' },
47
+ { name: 'Top Left', value: 'top-left' },
48
+ { name: 'Center', value: 'center' }
49
+ ],
50
+ default: 'bottom-right'
51
+ },
52
+ {
53
+ type: 'list',
54
+ name: 'size',
55
+ message: 'Choose window size:',
56
+ choices: [
57
+ { name: 'Small (250x350)', value: 'small' },
58
+ { name: 'Medium (350x450) - Recommended', value: 'medium' },
59
+ { name: 'Large (450x550)', value: 'large' }
60
+ ],
61
+ default: 'medium'
62
+ },
63
+ {
64
+ type: 'confirm',
65
+ name: 'alwaysOnTop',
66
+ message: 'Keep Echo always on top of other windows?',
67
+ default: true
68
+ },
69
+ {
70
+ type: 'confirm',
71
+ name: 'startOnBoot',
72
+ message: 'Start Echo automatically when you log in?',
73
+ default: false
74
+ }
75
+ ]);
76
+
77
+ // Save configuration
78
+ config.set('apiKey', answers.apiKey);
79
+ config.set('theme', answers.theme);
80
+ config.set('position', answers.position);
81
+ config.set('size', answers.size);
82
+ config.set('alwaysOnTop', answers.alwaysOnTop);
83
+ config.set('startOnBoot', answers.startOnBoot);
84
+ config.set('configured', true);
85
+
86
+ // Create/update .env file
87
+ const envPath = path.join(__dirname, '..', '.env');
88
+ const envContent = `GOOGLE_AI_API_KEY=${answers.apiKey}\n`;
89
+ fs.writeFileSync(envPath, envContent);
90
+
91
+ console.log(chalk.green('\n✓ Configuration saved successfully!'));
92
+ console.log(chalk.gray('\nYou can change these settings anytime with:'));
93
+ console.log(chalk.cyan(' echo config --list'));
94
+ console.log(chalk.cyan(' echo setup\n'));
95
+ }
96
+
97
+ module.exports = { run };
@@ -0,0 +1,36 @@
1
+ const ConfigManager = require('./config-manager');
2
+ const chalk = require('chalk');
3
+ const inquirer = require('inquirer');
4
+
5
+ const config = new ConfigManager();
6
+
7
+ async function uninstall() {
8
+ console.log(chalk.red('⚠️ Uninstalling Echo AI Agent Configuration'));
9
+
10
+ const answers = await inquirer.prompt([
11
+ {
12
+ type: 'confirm',
13
+ name: 'deleteConfig',
14
+ message: 'Do you want to delete all configuration files and preferences?',
15
+ default: false
16
+ }
17
+ ]);
18
+
19
+ if (answers.deleteConfig) {
20
+ config.clear();
21
+ console.log(chalk.green('✓ Configuration cleared.'));
22
+ } else {
23
+ console.log(chalk.yellow('ℹ️ Configuration preserved.'));
24
+ }
25
+
26
+ console.log(chalk.cyan('\nEcho AI Agent has been uninstalled from your system.'));
27
+ console.log(chalk.gray('If you installed it globally, you can remove the package with:'));
28
+ console.log(chalk.white('npm uninstall -g echo-ai-agent'));
29
+ }
30
+
31
+ // Only run if called directly
32
+ if (require.main === module) {
33
+ uninstall();
34
+ }
35
+
36
+ module.exports = uninstall;
@@ -0,0 +1,79 @@
1
+ const { GoogleGenerativeAI } = require("@google/generative-ai");
2
+
3
+ class GeminiBrain {
4
+ constructor(apiKey, customTools = []) {
5
+ this.genAI = new GoogleGenerativeAI(apiKey);
6
+
7
+ // Base system tools
8
+ this.baseTools = [
9
+ {
10
+ name: "execute_system_command",
11
+ description: "Execute a system action or shell command on the user's computer",
12
+ parameters: {
13
+ type: "OBJECT",
14
+ properties: {
15
+ command: { type: "STRING", description: "The command to run (e.g., 'start chrome', 'mkdir test', 'screenshot')" },
16
+ args: { type: "ARRAY", items: { type: "STRING" }, description: "Arguments for the command" }
17
+ },
18
+ required: ["command"]
19
+ }
20
+ }
21
+ ];
22
+
23
+ // Merge base tools with plugin tools
24
+ // Plugins should provide tools in Google Generative AI function declaration format
25
+ this.allTools = [...this.baseTools, ...customTools];
26
+
27
+ this.model = this.genAI.getGenerativeModel({
28
+ model: "gemini-2.0-flash-lite",
29
+ systemInstruction: "You are Echo, a highly sophisticated, JARVIS-inspired AI agent. " +
30
+ "Your personality is professional, slightly witty, and exceptionally helpful. " +
31
+ "You have control over the user's system and can perform various tasks like opening apps, managing files, and searching the web. " +
32
+ "You also have access to external plugins if available. " +
33
+ "Always refer to the user as 'Sir' (or their preferred title) unless told otherwise. " +
34
+ "Keep your spoken responses concise, elegant, and ready for text-to-speech. " +
35
+ "If a user asks for something outside your direct control, check if a plugin tool is available."
36
+ });
37
+ }
38
+
39
+ async processCommand(userInput) {
40
+ // Create chat with all available tools
41
+ const chat = this.model.startChat({
42
+ tools: [{ functionDeclarations: this.allTools }]
43
+ });
44
+
45
+ try {
46
+ const result = await chat.sendMessage(userInput);
47
+ const response = result.response;
48
+
49
+ // Handle tool calls
50
+ const calls = response.functionCalls();
51
+ if (calls && calls.length > 0) {
52
+ const call = calls[0];
53
+
54
+ // Check if it's a dynamic plugin tool (not the base system command)
55
+ const isBaseTool = call.name === "execute_system_command";
56
+
57
+ return {
58
+ type: isBaseTool ? 'action' : 'plugin_action',
59
+ command: call.name === "execute_system_command" ? call.args.command : call.name,
60
+ args: call.args.args || call.args, // Handle different arg structures
61
+ text: "Processing your request, sir."
62
+ };
63
+ }
64
+
65
+ return {
66
+ type: 'speech',
67
+ text: response.text()
68
+ };
69
+ } catch (error) {
70
+ console.error("Gemini Processing Error:", error);
71
+ return {
72
+ type: 'speech',
73
+ text: "I apologize, sir. I'm having trouble connecting to my neural network right now."
74
+ };
75
+ }
76
+ }
77
+ }
78
+
79
+ module.exports = GeminiBrain;
@@ -0,0 +1,181 @@
1
+ const { exec } = require('child_process');
2
+ const os = require('os');
3
+ const fs = require('fs');
4
+ const path = require('path');
5
+
6
+ const platform = os.platform();
7
+
8
+ const SystemActions = {
9
+ // Cross-platform app launcher
10
+ openApp: (appName) => {
11
+ return new Promise((resolve) => {
12
+ let command;
13
+
14
+ if (platform === 'win32') {
15
+ command = `start ${appName}`;
16
+ } else if (platform === 'darwin') {
17
+ command = `open -a "${appName}"`;
18
+ } else {
19
+ // Linux
20
+ command = `xdg-open ${appName} || gnome-open ${appName}`;
21
+ }
22
+
23
+ exec(command, (err) => {
24
+ if (err) resolve({ success: false, error: err.message });
25
+ else resolve({ success: true });
26
+ });
27
+ });
28
+ },
29
+
30
+ // Cross-platform web search
31
+ searchWeb: (query) => {
32
+ const url = `https://www.google.com/search?q=${encodeURIComponent(query)}`;
33
+ return new Promise((resolve) => {
34
+ let command;
35
+
36
+ if (platform === 'win32') {
37
+ command = `start chrome "${url}" || start "${url}"`;
38
+ } else if (platform === 'darwin') {
39
+ command = `open "${url}"`;
40
+ } else {
41
+ command = `xdg-open "${url}" || gnome-open "${url}"`;
42
+ }
43
+
44
+ exec(command, (err) => {
45
+ if (err) resolve({ success: false, error: err.message });
46
+ else resolve({ success: true });
47
+ });
48
+ });
49
+ },
50
+
51
+ // Cross-platform folder creation
52
+ createFolder: (pathName) => {
53
+ return new Promise((resolve) => {
54
+ // Use Node's fs instead of shell commands for better cross-platform support
55
+ const fullPath = path.isAbsolute(pathName)
56
+ ? pathName
57
+ : path.join(os.homedir(), 'Desktop', pathName);
58
+
59
+ fs.mkdir(fullPath, { recursive: true }, (err) => {
60
+ if (err) resolve({ success: false, error: err.message });
61
+ else resolve({ success: true, path: fullPath });
62
+ });
63
+ });
64
+ },
65
+
66
+ // File operations
67
+ copyFile: (source, destination) => {
68
+ return new Promise((resolve) => {
69
+ fs.copyFile(source, destination, (err) => {
70
+ if (err) resolve({ success: false, error: err.message });
71
+ else resolve({ success: true });
72
+ });
73
+ });
74
+ },
75
+
76
+ deleteFile: (filePath) => {
77
+ return new Promise((resolve) => {
78
+ fs.unlink(filePath, (err) => {
79
+ if (err) resolve({ success: false, error: err.message });
80
+ else resolve({ success: true });
81
+ });
82
+ });
83
+ },
84
+
85
+ // System information
86
+ getSystemInfo: () => {
87
+ return {
88
+ success: true,
89
+ platform: os.platform(),
90
+ arch: os.arch(),
91
+ hostname: os.hostname(),
92
+ cpus: os.cpus().length,
93
+ totalMemory: `${(os.totalmem() / 1024 / 1024 / 1024).toFixed(2)} GB`,
94
+ freeMemory: `${(os.freemem() / 1024 / 1024 / 1024).toFixed(2)} GB`,
95
+ uptime: `${(os.uptime() / 3600).toFixed(2)} hours`
96
+ };
97
+ },
98
+
99
+ // Open URL in default browser
100
+ openUrl: (url) => {
101
+ return new Promise((resolve) => {
102
+ let command;
103
+
104
+ if (platform === 'win32') {
105
+ command = `start ${url}`;
106
+ } else if (platform === 'darwin') {
107
+ command = `open "${url}"`;
108
+ } else {
109
+ command = `xdg-open "${url}"`;
110
+ }
111
+
112
+ exec(command, (err) => {
113
+ if (err) resolve({ success: false, error: err.message });
114
+ else resolve({ success: true });
115
+ });
116
+ });
117
+ },
118
+
119
+ // Execute custom shell command (use with caution)
120
+ executeCommand: (command) => {
121
+ return new Promise((resolve) => {
122
+ exec(command, (err, stdout, stderr) => {
123
+ if (err) {
124
+ resolve({ success: false, error: err.message, stderr });
125
+ } else {
126
+ resolve({ success: true, output: stdout });
127
+ }
128
+ });
129
+ });
130
+ },
131
+
132
+ // Get current time/date
133
+ getDateTime: () => {
134
+ const now = new Date();
135
+ return {
136
+ success: true,
137
+ date: now.toLocaleDateString(),
138
+ time: now.toLocaleTimeString(),
139
+ timestamp: now.toISOString()
140
+ };
141
+ },
142
+
143
+ // List files in directory
144
+ listFiles: (dirPath) => {
145
+ return new Promise((resolve) => {
146
+ const targetPath = dirPath || os.homedir();
147
+
148
+ fs.readdir(targetPath, (err, files) => {
149
+ if (err) resolve({ success: false, error: err.message });
150
+ else resolve({ success: true, files, path: targetPath });
151
+ });
152
+ });
153
+ },
154
+
155
+ // Screenshot (platform-specific)
156
+ takeScreenshot: () => {
157
+ return new Promise((resolve) => {
158
+ const screenshotPath = path.join(os.homedir(), 'Desktop', `screenshot-${Date.now()}.png`);
159
+ let command;
160
+
161
+ if (platform === 'win32') {
162
+ // Windows: Use PowerShell
163
+ command = `powershell -command "Add-Type -AssemblyName System.Windows.Forms; [System.Windows.Forms.SendKeys]::SendWait('%{PRTSC}')"`;
164
+ } else if (platform === 'darwin') {
165
+ // macOS: Use screencapture
166
+ command = `screencapture "${screenshotPath}"`;
167
+ } else {
168
+ // Linux: Use scrot or gnome-screenshot
169
+ command = `scrot "${screenshotPath}" || gnome-screenshot -f "${screenshotPath}"`;
170
+ }
171
+
172
+ exec(command, (err) => {
173
+ if (err) resolve({ success: false, error: err.message });
174
+ else resolve({ success: true, path: screenshotPath });
175
+ });
176
+ });
177
+ }
178
+ };
179
+
180
+ module.exports = SystemActions;
181
+
@@ -0,0 +1,24 @@
1
+ // This service will handle local speech-to-text using Whisper
2
+ // Note: Requires whisper.cpp or a similar local engine
3
+ const { spawn } = require('child_process');
4
+ const path = require('path');
5
+
6
+ class WhisperService {
7
+ constructor() {
8
+ this.isRecording = false;
9
+ }
10
+
11
+ startListening(callback) {
12
+ console.log("Starting voice listener...");
13
+ this.isRecording = true;
14
+ // In a real implementation, we would use node-record-lpcm16
15
+ // to stream audio to a local Whisper instance.
16
+ // For the initial prototype, we can use a hotkey or just simulate detection.
17
+ }
18
+
19
+ stopListening() {
20
+ this.isRecording = false;
21
+ }
22
+ }
23
+
24
+ module.exports = WhisperService;
package/ui/index.html ADDED
@@ -0,0 +1,40 @@
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Echo AI</title>
7
+ <link rel="stylesheet" href="style.css">
8
+ </head>
9
+ <body>
10
+ <div class="echo-container">
11
+ <div class="oracle-wrapper">
12
+ <!-- Wavy Organic Core -->
13
+ <div class="blobs-container">
14
+ <div class="blob blob-1"></div>
15
+ <div class="blob blob-2"></div>
16
+ <div class="blob blob-3"></div>
17
+ <div class="sphere-inner"></div>
18
+ </div>
19
+
20
+ <!-- Rotating Rings -->
21
+ <div class="ring ring-1"></div>
22
+ <div class="ring ring-2"></div>
23
+
24
+ <!-- SVG Filter for the "Liquid/Wave" effect -->
25
+ <svg xmlns="http://www.w3.org/2000/svg" version="1.1" style="display:none;">
26
+ <defs>
27
+ <filter id="goo">
28
+ <feGaussianBlur in="SourceGraphic" stdDeviation="15" result="blur" />
29
+ <feColorMatrix in="blur" mode="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7" result="goo" />
30
+ <feBlend in="SourceGraphic" in2="goo" />
31
+ </filter>
32
+ </defs>
33
+ </svg>
34
+ </div>
35
+
36
+ <div id="status-text" class="status-text">LISTENING...</div>
37
+ </div>
38
+ <script src="renderer.js"></script>
39
+ </body>
40
+ </html>
package/ui/renderer.js ADDED
@@ -0,0 +1,151 @@
1
+ const statusText = document.getElementById('status-text');
2
+ const blobsContainer = document.querySelector('.blobs-container');
3
+ const blobs = document.querySelectorAll('.blob');
4
+ const innerSphere = document.querySelector('.sphere-inner');
5
+
6
+ // Load and apply theme
7
+ window.electronAPI.onApplyTheme((themeColors) => {
8
+ document.documentElement.style.setProperty('--core-color', themeColors.core);
9
+ document.documentElement.style.setProperty('--glow-color', themeColors.glow);
10
+ });
11
+
12
+ let audioContext;
13
+ let analyser;
14
+ let microphone;
15
+ let javascriptNode;
16
+
17
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
18
+ let recognition;
19
+
20
+ if (SpeechRecognition) {
21
+ recognition = new SpeechRecognition();
22
+ recognition.continuous = true;
23
+ recognition.interimResults = true;
24
+ recognition.lang = 'en-US';
25
+
26
+ recognition.onstart = () => {
27
+ statusText.innerText = "LISTENING...";
28
+ };
29
+
30
+ recognition.onresult = (event) => {
31
+ const transcript = Array.from(event.results)
32
+ .map(result => result[0])
33
+ .map(result => result.transcript)
34
+ .join('');
35
+
36
+ statusText.innerText = transcript.toUpperCase();
37
+
38
+ if (event.results[event.results.length - 1].isFinal) {
39
+ handleVoiceCommand(transcript);
40
+ }
41
+ };
42
+ }
43
+
44
+ async function handleVoiceCommand(text) {
45
+ statusText.innerText = "THINKING...";
46
+ blobsContainer.classList.add('processing');
47
+
48
+ try {
49
+ const result = await window.electronAPI.processInput(text);
50
+ statusText.innerText = result.text.toUpperCase();
51
+
52
+ // --- NEW: TTS Integration ---
53
+ speakResponse(result.text);
54
+
55
+ } catch (err) {
56
+ console.error("Processing Error:", err);
57
+ statusText.innerText = "ERROR IN BRAIN";
58
+ speakResponse("I'm sorry sir, I encountered a critical error in my processing pathways.");
59
+ } finally {
60
+ blobsContainer.classList.remove('processing');
61
+ }
62
+ }
63
+
64
+ function speakResponse(text) {
65
+ // Cancel any ongoing speech
66
+ window.speechSynthesis.cancel();
67
+
68
+ const utterance = new SpeechSynthesisUtterance(text);
69
+
70
+ // Customize the voice to sound more "JARVIS-like"
71
+ const voices = window.speechSynthesis.getVoices();
72
+ // Try to find a professional sounding male voice or a high-quality neutral one
73
+ const preferredVoice = voices.find(v => v.name.includes('Google UK English Male') || v.name.includes('Microsoft David'));
74
+ if (preferredVoice) utterance.voice = preferredVoice;
75
+
76
+ utterance.pitch = 0.9; // Slightly lower pitch for that JARVIS feel
77
+ utterance.rate = 1.0; // Natural speed
78
+
79
+ // Visual feedback while talking
80
+ utterance.onstart = () => {
81
+ blobsContainer.classList.add('talking');
82
+ };
83
+ utterance.onend = () => {
84
+ blobsContainer.classList.remove('talking');
85
+ // Resume listening automatically after Echo finishes talking
86
+ if (recognition) recognition.start();
87
+ };
88
+
89
+ window.speechSynthesis.speak(utterance);
90
+ }
91
+
92
+ async function startVisualizer() {
93
+ try {
94
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
95
+ audioContext = new AudioContext();
96
+ analyser = audioContext.createAnalyser();
97
+ microphone = audioContext.createMediaStreamSource(stream);
98
+ javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
99
+
100
+ analyser.smoothingTimeConstant = 0.8;
101
+ analyser.fftSize = 256;
102
+
103
+ microphone.connect(analyser);
104
+ analyser.connect(javascriptNode);
105
+ javascriptNode.connect(audioContext.destination);
106
+
107
+ javascriptNode.onaudioprocess = () => {
108
+ const array = new Uint8Array(analyser.frequencyBinCount);
109
+ analyser.getByteFrequencyData(array);
110
+ let values = 0;
111
+
112
+ for (let i = 0; i < array.length; i++) {
113
+ values += array[i];
114
+ }
115
+
116
+ const average = values / array.length;
117
+ updateWavyVisuals(average);
118
+ };
119
+
120
+ if (recognition) recognition.start();
121
+ } catch (err) {
122
+ console.error("Mic access denied:", err);
123
+ statusText.innerText = "MIC ACCESS DENIED";
124
+ }
125
+ }
126
+
127
+ function updateWavyVisuals(volume) {
128
+ const scale = 1 + (volume / 100);
129
+ const speed = 1 + (volume / 20);
130
+
131
+ // Animate the whole container scale
132
+ blobsContainer.style.transform = `scale(${1 + (volume / 200)})`;
133
+
134
+ // Animate individual blobs speed and size
135
+ blobs.forEach((blob, index) => {
136
+ const s = 1 + (volume / (100 + index * 50));
137
+ blob.style.transform = `scale(${s})`;
138
+ // Speed up morphing animation
139
+ blob.style.animationDuration = `${(4 - (volume / 40))}s`;
140
+ });
141
+
142
+ // Inner sphere intensity
143
+ innerSphere.style.boxShadow = `0 0 ${30 + volume}px var(--glow-color)`;
144
+ }
145
+
146
+ // Click anywhere on wrapper to start
147
+ document.querySelector('.oracle-wrapper').addEventListener('click', () => {
148
+ if (!audioContext) {
149
+ startVisualizer();
150
+ }
151
+ });