@openpalm/channel-voice 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/web/app.js ADDED
@@ -0,0 +1,520 @@
1
+ /* ================================================================
2
+ OpenPalm Voice — App
3
+ State machine: idle → recording → processing → idle
4
+ Falls back to browser Speech APIs when server STT/TTS unavailable.
5
+ ================================================================ */
6
+
7
+ ;(function () {
8
+ 'use strict'
9
+
10
+ // --- DOM refs ---
11
+ var recordBtn = document.getElementById('record-btn')
12
+ var log = document.getElementById('log')
13
+ var statusEl = document.getElementById('status')
14
+ var settingsBtn = document.getElementById('settings-btn')
15
+ var settingsDialog = document.getElementById('settings-dialog')
16
+ var settingsForm = document.getElementById('settings-form')
17
+ var announcer = document.getElementById('announcer')
18
+ var inputVoice = document.getElementById('setting-voice')
19
+ var inputHaptic = document.getElementById('setting-haptic')
20
+ var inputWakelock = document.getElementById('setting-wakelock')
21
+ var inputContinuous = document.getElementById('setting-continuous')
22
+ var continuousBtn = document.getElementById('continuous-btn')
23
+
24
+ // --- State ---
25
+ var state = 'idle'
26
+ var continuous = false
27
+ var recorder = null
28
+ var chunks = []
29
+ var wakeLock = null
30
+ var audioCtx = null
31
+
32
+ // --- Capabilities (populated on init from /api/health) ---
33
+ var caps = {
34
+ serverStt: false,
35
+ serverTts: false,
36
+ browserStt: !!(window.SpeechRecognition || window.webkitSpeechRecognition),
37
+ browserTts: 'speechSynthesis' in window
38
+ }
39
+
40
+ // --- Settings ---
41
+ function loadSettings() {
42
+ try {
43
+ var s = JSON.parse(localStorage.getItem('voice-settings') || '{}')
44
+ inputVoice.value = s.voice || ''
45
+ inputHaptic.checked = s.haptic !== false
46
+ inputWakelock.checked = s.wakelock !== false
47
+ inputContinuous.checked = !!s.continuous
48
+ } catch (_) {
49
+ inputHaptic.checked = true
50
+ inputWakelock.checked = true
51
+ inputContinuous.checked = false
52
+ }
53
+ }
54
+
55
+ function saveSettings() {
56
+ localStorage.setItem('voice-settings', JSON.stringify({
57
+ voice: inputVoice.value,
58
+ haptic: inputHaptic.checked,
59
+ wakelock: inputWakelock.checked,
60
+ continuous: inputContinuous.checked
61
+ }))
62
+ setContinuous(inputContinuous.checked)
63
+ }
64
+
65
+ function getSetting(key) {
66
+ try {
67
+ var s = JSON.parse(localStorage.getItem('voice-settings') || '{}')
68
+ if (key === 'haptic') return s.haptic !== false
69
+ if (key === 'wakelock') return s.wakelock !== false
70
+ if (key === 'continuous') return !!s.continuous
71
+ return s[key] || ''
72
+ } catch (_) {
73
+ return key === 'voice' ? '' : (key === 'continuous' ? false : true)
74
+ }
75
+ }
76
+
77
+ function setContinuous(enabled) {
78
+ continuous = enabled
79
+ continuousBtn.setAttribute('aria-pressed', String(enabled))
80
+ if (enabled && state === 'idle') {
81
+ startRecording()
82
+ }
83
+ }
84
+
85
+ // --- Utilities ---
86
+ function escapeHtml(text) {
87
+ var el = document.createElement('span')
88
+ el.textContent = text
89
+ return el.innerHTML
90
+ }
91
+
92
+ function announce(msg) {
93
+ announcer.textContent = msg
94
+ }
95
+
96
+ function pickMimeType() {
97
+ var types = ['audio/mp4', 'audio/webm;codecs=opus', 'audio/webm']
98
+ for (var i = 0; i < types.length; i++) {
99
+ if (typeof MediaRecorder.isTypeSupported === 'function' && MediaRecorder.isTypeSupported(types[i])) {
100
+ return types[i]
101
+ }
102
+ }
103
+ return ''
104
+ }
105
+
106
+ function haptic(pattern) {
107
+ if (getSetting('haptic') && navigator.vibrate) {
108
+ navigator.vibrate(pattern)
109
+ }
110
+ }
111
+
112
+ // --- UI Updates ---
113
+ function setState(newState, statusMsg) {
114
+ state = newState
115
+ recordBtn.setAttribute('data-state', newState)
116
+ statusEl.textContent = statusMsg || newState
117
+ statusEl.setAttribute('aria-label', 'Status: ' + (statusMsg || newState))
118
+ announce(statusMsg || newState)
119
+
120
+ if (newState === 'idle') {
121
+ recordBtn.setAttribute('aria-label', 'Start recording')
122
+ } else if (newState === 'recording') {
123
+ recordBtn.setAttribute('aria-label', 'Stop recording')
124
+ } else if (newState === 'processing') {
125
+ recordBtn.setAttribute('aria-label', 'Processing, please wait')
126
+ }
127
+ }
128
+
129
+ // --- Simple markdown rendering (bold, italic, code, code blocks, lists) ---
130
+ function renderMarkdown(text) {
131
+ var escaped = escapeHtml(text)
132
+ // Code blocks: ```...```
133
+ escaped = escaped.replace(/```(\w*)\n?([\s\S]*?)```/g, '<pre><code>$2</code></pre>')
134
+ // Inline code: `...`
135
+ escaped = escaped.replace(/`([^`]+)`/g, '<code>$1</code>')
136
+ // Bold: **...**
137
+ escaped = escaped.replace(/\*\*([^*]+)\*\*/g, '<strong>$1</strong>')
138
+ // Italic: *...*
139
+ escaped = escaped.replace(/(?<!\*)\*([^*]+)\*(?!\*)/g, '<em>$1</em>')
140
+ // Line breaks
141
+ escaped = escaped.replace(/\n/g, '<br>')
142
+ return escaped
143
+ }
144
+
145
+ function addLog(level, message) {
146
+ var entry = document.createElement('div')
147
+ entry.className = 'log-entry'
148
+ entry.setAttribute('data-level', level)
149
+ var rendered = (level === 'AI') ? renderMarkdown(message) : escapeHtml(message)
150
+ entry.innerHTML = '<span class="log-label">' + escapeHtml(level) + '</span>' + rendered
151
+ log.appendChild(entry)
152
+ log.scrollTop = log.scrollHeight
153
+ }
154
+
155
+ // --- Wake Lock ---
156
+ async function acquireWakeLock() {
157
+ if (!getSetting('wakelock') || !('wakeLock' in navigator)) return
158
+ try {
159
+ wakeLock = await navigator.wakeLock.request('screen')
160
+ } catch (_) {
161
+ // Wake lock not available
162
+ }
163
+ }
164
+
165
+ function releaseWakeLock() {
166
+ if (wakeLock) {
167
+ wakeLock.release().catch(function () {})
168
+ wakeLock = null
169
+ }
170
+ }
171
+
172
+ // --- Audio Playback ---
173
+ function playBase64Audio(base64) {
174
+ return new Promise(function (resolve, reject) {
175
+ try {
176
+ var binary = atob(base64)
177
+ var bytes = new Uint8Array(binary.length)
178
+ for (var i = 0; i < binary.length; i++) {
179
+ bytes[i] = binary.charCodeAt(i)
180
+ }
181
+ if (!audioCtx) {
182
+ audioCtx = new (window.AudioContext || window.webkitAudioContext)()
183
+ }
184
+ audioCtx.decodeAudioData(bytes.buffer, function (buffer) {
185
+ var source = audioCtx.createBufferSource()
186
+ source.buffer = buffer
187
+ source.connect(audioCtx.destination)
188
+ source.onended = resolve
189
+ source.start(0)
190
+ }, function (err) {
191
+ reject(err)
192
+ })
193
+ } catch (err) {
194
+ reject(err)
195
+ }
196
+ })
197
+ }
198
+
199
+ // --- Strip markdown for TTS ---
200
+ function stripMarkdownForSpeech(text) {
201
+ return text
202
+ .replace(/```[\s\S]*?```/g, '')
203
+ .replace(/`([^`]+)`/g, '$1')
204
+ .replace(/\*\*([^*]+)\*\*/g, '$1')
205
+ .replace(/\*([^*]+)\*/g, '$1')
206
+ .replace(/^#{1,6}\s+/gm, '')
207
+ .replace(/^\s*[-*+]\s+/gm, '')
208
+ .replace(/^\s*\d+\.\s+/gm, '')
209
+ .replace(/\[([^\]]+)\]\([^)]+\)/g, '$1')
210
+ .replace(/\n{3,}/g, '\n\n')
211
+ .trim()
212
+ }
213
+
214
+ // --- Browser TTS fallback ---
215
+ function speakWithBrowser(text) {
216
+ return new Promise(function (resolve) {
217
+ if (!caps.browserTts) { resolve(); return }
218
+ var utterance = new SpeechSynthesisUtterance(stripMarkdownForSpeech(text))
219
+ var voice = getSetting('voice')
220
+ if (voice) {
221
+ var voices = speechSynthesis.getVoices()
222
+ var match = voices.find(function (v) {
223
+ return v.name.toLowerCase().indexOf(voice.toLowerCase()) !== -1
224
+ })
225
+ if (match) utterance.voice = match
226
+ }
227
+ utterance.onend = resolve
228
+ utterance.onerror = resolve
229
+ speechSynthesis.speak(utterance)
230
+ })
231
+ }
232
+
233
+ // --- Browser STT ---
234
+ function transcribeWithBrowser() {
235
+ return new Promise(function (resolve, reject) {
236
+ var SR = window.SpeechRecognition || window.webkitSpeechRecognition
237
+ if (!SR) { reject(new Error('Browser speech recognition not supported')); return }
238
+ var recognition = new SR()
239
+ recognition.lang = navigator.language || 'en-US'
240
+ recognition.interimResults = false
241
+ recognition.maxAlternatives = 1
242
+ var gotResult = false
243
+ recognition.onresult = function (event) {
244
+ gotResult = true
245
+ var text = event.results[0][0].transcript
246
+ resolve(text)
247
+ }
248
+ recognition.onerror = function (event) {
249
+ // no-speech and aborted are normal in continuous mode — treat as empty
250
+ if (event.error === 'no-speech' || event.error === 'aborted') {
251
+ resolve('')
252
+ } else {
253
+ reject(new Error('Speech recognition error: ' + event.error))
254
+ }
255
+ }
256
+ recognition.onend = function () {
257
+ if (!gotResult) resolve('')
258
+ }
259
+ recognition.start()
260
+ })
261
+ }
262
+
263
+ // --- Recording (server STT path) ---
264
+ async function startRecordingAudio() {
265
+ try {
266
+ var stream = await navigator.mediaDevices.getUserMedia({ audio: true })
267
+ chunks = []
268
+ var mimeType = pickMimeType()
269
+ recorder = new MediaRecorder(stream, mimeType ? { mimeType: mimeType } : undefined)
270
+
271
+ recorder.ondataavailable = function (e) {
272
+ if (e.data.size > 0) chunks.push(e.data)
273
+ }
274
+
275
+ recorder.start()
276
+ setState('recording', 'recording')
277
+ haptic(50)
278
+ await acquireWakeLock()
279
+ } catch (err) {
280
+ addLog('ERR', 'Microphone access denied: ' + err.message)
281
+ setState('idle', 'ready')
282
+ }
283
+ }
284
+
285
+ async function stopRecordingAndSendAudio() {
286
+ setState('processing', 'transcribing')
287
+ haptic([30, 50, 30])
288
+ releaseWakeLock()
289
+
290
+ await new Promise(function (resolve) {
291
+ recorder.onstop = function () {
292
+ recorder.stream.getTracks().forEach(function (t) { t.stop() })
293
+ resolve()
294
+ }
295
+ recorder.stop()
296
+ })
297
+
298
+ if (chunks.length === 0) {
299
+ addLog('ERR', 'No audio recorded')
300
+ setState('idle', 'ready')
301
+ return
302
+ }
303
+
304
+ var detectedMime = pickMimeType()
305
+ var blob = new Blob(chunks, { type: detectedMime || 'audio/webm' })
306
+ chunks = []
307
+ recorder = null
308
+
309
+ var ext = detectedMime.indexOf('mp4') !== -1 ? 'm4a' : 'webm'
310
+ var form = new FormData()
311
+ form.append('audio', blob, 'recording.' + ext)
312
+ await sendToServer(form)
313
+ }
314
+
315
+ // --- Recording (browser STT path) ---
316
+ async function startBrowserSTT() {
317
+ setState('recording', 'listening')
318
+ haptic(50)
319
+ await acquireWakeLock()
320
+
321
+ try {
322
+ var text = await transcribeWithBrowser()
323
+ releaseWakeLock()
324
+
325
+ if (!text || !text.trim()) {
326
+ if (continuous) {
327
+ setState('idle', 'listening...')
328
+ setTimeout(function () { startRecording() }, 300)
329
+ } else {
330
+ addLog('SYS', 'No speech detected')
331
+ setState('idle', 'ready')
332
+ }
333
+ return
334
+ }
335
+
336
+ setState('processing', 'processing')
337
+ haptic([30, 50, 30])
338
+
339
+ var form = new FormData()
340
+ form.append('text', text.trim())
341
+ await sendToServer(form)
342
+ } catch (err) {
343
+ releaseWakeLock()
344
+ addLog('ERR', err.message)
345
+ setState('idle', 'ready')
346
+ }
347
+ }
348
+
349
+ // --- Send to server and handle response ---
350
+ async function sendToServer(form) {
351
+ try {
352
+ addLog('TX', 'sending...')
353
+ var response = await fetch('/api/pipeline', {
354
+ method: 'POST',
355
+ body: form
356
+ })
357
+
358
+ if (!response.ok) {
359
+ var errBody = null
360
+ try { errBody = await response.json() } catch (_) {}
361
+ var errMsg = (errBody && errBody.error) || ('Server error ' + response.status)
362
+
363
+ // If server STT failed for any reason, switch to browser STT for future recordings
364
+ if (errBody && (errBody.code === 'stt_not_configured' || errBody.code === 'stt_error')) {
365
+ caps.serverStt = false
366
+ if (caps.browserStt) {
367
+ addLog('SYS', 'Server STT unavailable, switching to browser speech recognition')
368
+ addLog('SYS', 'Tap the microphone again to retry')
369
+ } else {
370
+ addLog('ERR', 'Server STT failed and browser speech recognition not available')
371
+ }
372
+ setState('idle', 'ready')
373
+ return
374
+ }
375
+
376
+ addLog('ERR', errMsg)
377
+ setState('idle', 'error')
378
+ return
379
+ }
380
+
381
+ var data = await response.json()
382
+
383
+ if (data.transcript) {
384
+ addLog('YOU', data.transcript)
385
+ }
386
+ if (data.response) {
387
+ addLog('AI', data.response)
388
+ }
389
+
390
+ // Play audio: server TTS if available, otherwise browser TTS
391
+ if (data.audio) {
392
+ try {
393
+ await playBase64Audio(data.audio)
394
+ } catch (err) {
395
+ addLog('SYS', 'Audio decode failed, using browser voice')
396
+ if (data.response) await speakWithBrowser(data.response)
397
+ }
398
+ } else if (data.response && caps.browserTts) {
399
+ await speakWithBrowser(data.response)
400
+ }
401
+
402
+ setState('idle', 'ready')
403
+ haptic(30)
404
+
405
+ // Auto-restart if continuous listening is on
406
+ if (continuous) {
407
+ setTimeout(function () { startRecording() }, 300)
408
+ }
409
+ } catch (err) {
410
+ addLog('ERR', 'Request failed: ' + err.message)
411
+ setState('idle', 'offline')
412
+ }
413
+ }
414
+
415
+ // --- Toggle recording ---
416
+ function startRecording() {
417
+ if (state !== 'idle') return
418
+ if (caps.serverStt) {
419
+ startRecordingAudio()
420
+ } else if (caps.browserStt) {
421
+ startBrowserSTT()
422
+ } else {
423
+ addLog('ERR', 'No speech recognition available (server STT not configured, browser API not supported)')
424
+ }
425
+ }
426
+
427
+ function stopRecording() {
428
+ if (state !== 'recording') return
429
+ // If user manually stops, also turn off continuous
430
+ if (continuous) setContinuous(false)
431
+ if (recorder) {
432
+ stopRecordingAndSendAudio()
433
+ }
434
+ // Browser STT stops on its own (no manual stop needed)
435
+ }
436
+
437
+ function toggleRecording() {
438
+ if (state === 'idle') {
439
+ startRecording()
440
+ } else if (state === 'recording') {
441
+ stopRecording()
442
+ }
443
+ }
444
+
445
+ // --- Event Handlers ---
446
+ recordBtn.addEventListener('click', toggleRecording)
447
+
448
+ document.addEventListener('keydown', function (e) {
449
+ if (e.code === 'Space' && e.target === document.body) {
450
+ e.preventDefault()
451
+ toggleRecording()
452
+ }
453
+ })
454
+
455
+ continuousBtn.addEventListener('click', function () {
456
+ setContinuous(!continuous)
457
+ // Persist to settings
458
+ try {
459
+ var s = JSON.parse(localStorage.getItem('voice-settings') || '{}')
460
+ s.continuous = continuous
461
+ localStorage.setItem('voice-settings', JSON.stringify(s))
462
+ } catch (_) {}
463
+ })
464
+
465
+ settingsBtn.addEventListener('click', function () {
466
+ loadSettings()
467
+ settingsDialog.showModal()
468
+ })
469
+
470
+ settingsForm.addEventListener('submit', function () {
471
+ saveSettings()
472
+ })
473
+
474
+ // --- Online / Offline ---
475
+ window.addEventListener('online', function () {
476
+ if (state === 'idle') setState('idle', 'ready')
477
+ addLog('SYS', 'Connection restored')
478
+ checkCapabilities()
479
+ })
480
+
481
+ window.addEventListener('offline', function () {
482
+ setState('idle', 'offline')
483
+ addLog('SYS', 'Connection lost')
484
+ })
485
+
486
+ // --- Check server capabilities ---
487
+ function checkCapabilities() {
488
+ fetch('/api/health').then(function (res) {
489
+ return res.json()
490
+ }).then(function (data) {
491
+ caps.serverStt = !!(data.stt && data.stt.configured)
492
+ caps.serverTts = !!(data.tts && data.tts.configured)
493
+
494
+ var sttSource = caps.serverStt ? 'server (' + data.stt.model + ')' : (caps.browserStt ? 'browser' : 'none')
495
+ var ttsSource = caps.serverTts ? 'server (' + data.tts.model + ')' : (caps.browserTts ? 'browser' : 'none')
496
+ addLog('SYS', 'STT: ' + sttSource + ' | TTS: ' + ttsSource)
497
+
498
+ if (!caps.serverStt && !caps.browserStt) {
499
+ addLog('ERR', 'No speech recognition available')
500
+ }
501
+ }).catch(function () {
502
+ addLog('SYS', 'Server unreachable, using browser APIs')
503
+ caps.serverStt = false
504
+ caps.serverTts = false
505
+ })
506
+ }
507
+
508
+ // --- Service Worker ---
509
+ if ('serviceWorker' in navigator) {
510
+ navigator.serviceWorker.register('/sw.js').catch(function () {})
511
+ }
512
+
513
+ // --- Init ---
514
+ loadSettings()
515
+ continuous = getSetting('continuous')
516
+ continuousBtn.setAttribute('aria-pressed', String(continuous))
517
+ setState('idle', navigator.onLine ? 'ready' : 'offline')
518
+ addLog('SYS', 'Voice channel ready. Tap the microphone or press Space to begin.')
519
+ checkCapabilities()
520
+ })()
package/web/index.html ADDED
@@ -0,0 +1,85 @@
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="utf-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1, viewport-fit=cover">
6
+ <meta name="theme-color" content="#ff9d00">
7
+ <meta name="apple-mobile-web-app-capable" content="yes">
8
+ <meta name="apple-mobile-web-app-status-bar-style" content="default">
9
+ <meta name="description" content="Voice-driven AI assistant">
10
+ <title>OpenPalm Voice</title>
11
+ <link rel="stylesheet" href="/styles.css">
12
+ <link rel="manifest" href="/manifest.webmanifest">
13
+ </head>
14
+ <body>
15
+ <div class="app">
16
+ <header class="header">
17
+ <div class="header-brand">
18
+ <span class="brand-slash">/</span><span class="brand-name">voice</span>
19
+ </div>
20
+ <div class="header-right">
21
+ <span id="status" class="status-indicator" aria-label="Status: ready">ready</span>
22
+ <button id="settings-btn" class="icon-btn" aria-label="Open settings">
23
+ <svg width="20" height="20" viewBox="0 0 20 20" fill="none" aria-hidden="true">
24
+ <path d="M10 12.5a2.5 2.5 0 100-5 2.5 2.5 0 000 5z" stroke="currentColor" stroke-width="1.5"/>
25
+ <path d="M16.2 12.2a1.4 1.4 0 00.28 1.54l.05.05a1.7 1.7 0 11-2.4 2.4l-.05-.05a1.4 1.4 0 00-1.54-.28 1.4 1.4 0 00-.85 1.28v.15a1.7 1.7 0 11-3.4 0v-.08a1.4 1.4 0 00-.91-1.28 1.4 1.4 0 00-1.54.28l-.05.05a1.7 1.7 0 11-2.4-2.4l.05-.05a1.4 1.4 0 00.28-1.54 1.4 1.4 0 00-1.28-.85H2.3a1.7 1.7 0 110-3.4h.08a1.4 1.4 0 001.28-.91 1.4 1.4 0 00-.28-1.54l-.05-.05a1.7 1.7 0 112.4-2.4l.05.05a1.4 1.4 0 001.54.28h.07a1.4 1.4 0 00.85-1.28V2.3a1.7 1.7 0 113.4 0v.08a1.4 1.4 0 00.85 1.28 1.4 1.4 0 001.54-.28l.05-.05a1.7 1.7 0 112.4 2.4l-.05.05a1.4 1.4 0 00-.28 1.54v.07a1.4 1.4 0 001.28.85h.15a1.7 1.7 0 110 3.4h-.08a1.4 1.4 0 00-1.28.85z" stroke="currentColor" stroke-width="1.5"/>
26
+ </svg>
27
+ </button>
28
+ </div>
29
+ </header>
30
+
31
+ <main id="log" class="log" role="log" aria-label="Conversation log"></main>
32
+
33
+ <div class="controls">
34
+ <button id="record-btn" class="record-btn" data-state="idle" aria-label="Start recording">
35
+ <svg class="mic-icon" width="28" height="28" viewBox="0 0 24 24" fill="none" aria-hidden="true">
36
+ <path d="M12 1a3 3 0 00-3 3v8a3 3 0 006 0V4a3 3 0 00-3-3z" fill="currentColor"/>
37
+ <path d="M19 10v2a7 7 0 01-14 0v-2" stroke="currentColor" stroke-width="2" stroke-linecap="round"/>
38
+ <line x1="12" y1="19" x2="12" y2="23" stroke="currentColor" stroke-width="2" stroke-linecap="round"/>
39
+ <line x1="8" y1="23" x2="16" y2="23" stroke="currentColor" stroke-width="2" stroke-linecap="round"/>
40
+ </svg>
41
+ <span class="spinner" aria-hidden="true"></span>
42
+ </button>
43
+ <button id="continuous-btn" class="continuous-btn" aria-label="Enable continuous listening" aria-pressed="false">
44
+ <svg width="18" height="18" viewBox="0 0 24 24" fill="none" aria-hidden="true">
45
+ <path d="M1 4v6h6" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
46
+ <path d="M3.51 15a9 9 0 1 0 2.13-9.36L1 10" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
47
+ </svg>
48
+ </button>
49
+ </div>
50
+
51
+ <footer class="footer">
52
+ <p>mic &rarr; STT &rarr; LLM &rarr; TTS &rarr; speaker</p>
53
+ </footer>
54
+ </div>
55
+
56
+ <dialog id="settings-dialog" class="settings-dialog" aria-label="Settings">
57
+ <h2>Settings</h2>
58
+ <form method="dialog" id="settings-form">
59
+ <label class="field">
60
+ <span class="field-label">TTS voice</span>
61
+ <input type="text" id="setting-voice" class="field-input" placeholder="default">
62
+ </label>
63
+ <label class="field checkbox-field">
64
+ <input type="checkbox" id="setting-haptic">
65
+ <span class="field-label">Haptic feedback</span>
66
+ </label>
67
+ <label class="field checkbox-field">
68
+ <input type="checkbox" id="setting-wakelock">
69
+ <span class="field-label">Keep screen on while recording</span>
70
+ </label>
71
+ <label class="field checkbox-field">
72
+ <input type="checkbox" id="setting-continuous">
73
+ <span class="field-label">Continuous listening (auto-restart after response)</span>
74
+ </label>
75
+ <div class="dialog-actions">
76
+ <button type="submit" class="btn btn-primary">Done</button>
77
+ </div>
78
+ </form>
79
+ </dialog>
80
+
81
+ <div id="announcer" class="sr-only" aria-live="polite" aria-atomic="true"></div>
82
+
83
+ <script src="/app.js"></script>
84
+ </body>
85
+ </html>
@@ -0,0 +1,9 @@
1
+ {
2
+ "name": "OpenPalm Voice",
3
+ "short_name": "Voice",
4
+ "start_url": "/",
5
+ "display": "standalone",
6
+ "background_color": "#ffffff",
7
+ "theme_color": "#ff9d00",
8
+ "description": "Voice-driven AI assistant"
9
+ }