@makemore/agent-frontend 1.5.0 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +68 -5
- package/dist/chat-widget.js +68 -16
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -131,6 +131,9 @@ See `django-tts-example.py` for the complete Django backend implementation.
|
|
|
131
131
|
anonymousSession: '/api/auth/session/',
|
|
132
132
|
runs: '/api/chat/runs/',
|
|
133
133
|
runEvents: '/api/chat/runs/{runId}/events/',
|
|
134
|
+
simulateCustomer: '/api/chat/simulate-customer/',
|
|
135
|
+
ttsVoices: '/api/tts/voices/', // For voice settings UI (proxy mode)
|
|
136
|
+
ttsSetVoice: '/api/tts/set-voice/', // For voice settings UI (proxy mode)
|
|
134
137
|
},
|
|
135
138
|
});
|
|
136
139
|
</script>
|
|
@@ -169,7 +172,7 @@ See `django-tts-example.py` for the complete Django backend implementation.
|
|
|
169
172
|
| `showClearButton` | boolean | `true` | Show clear conversation button in header |
|
|
170
173
|
| `showDebugButton` | boolean | `true` | Show debug mode toggle button in header |
|
|
171
174
|
| `showTTSButton` | boolean | `true` | Show TTS toggle button in header |
|
|
172
|
-
| `showVoiceSettings` | boolean | `true` | Show voice settings button in header (direct API
|
|
175
|
+
| `showVoiceSettings` | boolean | `true` | Show voice settings button in header (works with proxy and direct API) |
|
|
173
176
|
| `showExpandButton` | boolean | `true` | Show expand/minimize button in header |
|
|
174
177
|
|
|
175
178
|
### Text-to-Speech (ElevenLabs)
|
|
@@ -201,8 +204,66 @@ ELEVENLABS_VOICES = {
|
|
|
201
204
|
'user': 'pNInz6obpgDQGcFmaJgB', # Adam
|
|
202
205
|
}
|
|
203
206
|
```
|
|
204
|
-
3. Add
|
|
205
|
-
4. Add URL
|
|
207
|
+
3. Add views from `django-tts-example.py` to your Django app
|
|
208
|
+
4. Add URL routes:
|
|
209
|
+
```python
|
|
210
|
+
path('api/tts/speak/', views.text_to_speech),
|
|
211
|
+
path('api/tts/voices/', views.get_voices), # For voice settings UI
|
|
212
|
+
path('api/tts/set-voice/', views.set_voice), # For voice settings UI
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
**Voice Settings Support:**
|
|
216
|
+
|
|
217
|
+
The widget now supports voice settings UI in proxy mode! Add these endpoints to enable the voice picker:
|
|
218
|
+
|
|
219
|
+
```python
|
|
220
|
+
# Get available voices
|
|
221
|
+
@api_view(['GET'])
|
|
222
|
+
def get_voices(request):
|
|
223
|
+
"""Fetch available voices from ElevenLabs"""
|
|
224
|
+
try:
|
|
225
|
+
response = requests.get(
|
|
226
|
+
'https://api.elevenlabs.io/v1/voices',
|
|
227
|
+
headers={'xi-api-key': settings.ELEVENLABS_API_KEY}
|
|
228
|
+
)
|
|
229
|
+
return JsonResponse(response.json())
|
|
230
|
+
except Exception as e:
|
|
231
|
+
return JsonResponse({'error': str(e)}, status=500)
|
|
232
|
+
|
|
233
|
+
# Set voice for user session
|
|
234
|
+
@api_view(['POST'])
|
|
235
|
+
def set_voice(request):
|
|
236
|
+
"""Update voice preference for user's session"""
|
|
237
|
+
role = request.data.get('role') # 'assistant' or 'user'
|
|
238
|
+
voice_id = request.data.get('voice_id')
|
|
239
|
+
|
|
240
|
+
# Store in session or database
|
|
241
|
+
if not hasattr(request, 'session'):
|
|
242
|
+
return JsonResponse({'error': 'Session not available'}, status=400)
|
|
243
|
+
|
|
244
|
+
if role not in ['assistant', 'user']:
|
|
245
|
+
return JsonResponse({'error': 'Invalid role'}, status=400)
|
|
246
|
+
|
|
247
|
+
# Store voice preference in session
|
|
248
|
+
if 'tts_voices' not in request.session:
|
|
249
|
+
request.session['tts_voices'] = {}
|
|
250
|
+
request.session['tts_voices'][role] = voice_id
|
|
251
|
+
request.session.modified = True
|
|
252
|
+
|
|
253
|
+
return JsonResponse({'success': True, 'role': role, 'voice_id': voice_id})
|
|
254
|
+
|
|
255
|
+
# Update text_to_speech view to use session voices
|
|
256
|
+
@api_view(['POST'])
|
|
257
|
+
def text_to_speech(request):
|
|
258
|
+
text = request.data.get('text', '')
|
|
259
|
+
role = request.data.get('role', 'assistant')
|
|
260
|
+
|
|
261
|
+
# Get voice from session or fall back to settings
|
|
262
|
+
session_voices = request.session.get('tts_voices', {})
|
|
263
|
+
voice_id = session_voices.get(role) or settings.ELEVENLABS_VOICES.get(role)
|
|
264
|
+
|
|
265
|
+
# ... rest of TTS implementation
|
|
266
|
+
```
|
|
206
267
|
|
|
207
268
|
#### Option 2: Direct API (Client-Side)
|
|
208
269
|
|
|
@@ -249,10 +310,12 @@ ChatWidget.setVoice('user', 'voice_id'); // Change user voice
|
|
|
249
310
|
|
|
250
311
|
**Voice Settings UI:**
|
|
251
312
|
|
|
252
|
-
|
|
313
|
+
A voice settings button (🎙️) appears in the header when TTS is enabled. Click it to:
|
|
253
314
|
- Select assistant voice from dropdown
|
|
254
315
|
- Select customer voice for demo mode
|
|
255
|
-
- Voices are automatically fetched from your ElevenLabs account
|
|
316
|
+
- Voices are automatically fetched from your ElevenLabs account (direct API) or Django backend (proxy mode)
|
|
317
|
+
|
|
318
|
+
**Works with both proxy and direct API modes!** Just implement the `/api/tts/voices/` and `/api/tts/set-voice/` endpoints in your Django backend (see above).
|
|
256
319
|
|
|
257
320
|
**Customize Header Buttons:**
|
|
258
321
|
```javascript
|
package/dist/chat-widget.js
CHANGED
|
@@ -42,6 +42,8 @@
|
|
|
42
42
|
runs: '/api/agent-runtime/runs/',
|
|
43
43
|
runEvents: '/api/agent-runtime/runs/{runId}/events/',
|
|
44
44
|
simulateCustomer: '/api/agent-runtime/simulate-customer/',
|
|
45
|
+
ttsVoices: '/api/tts/voices/', // For fetching available voices (proxy mode)
|
|
46
|
+
ttsSetVoice: '/api/tts/set-voice/', // For setting voice (proxy mode)
|
|
45
47
|
},
|
|
46
48
|
// Demo flow control
|
|
47
49
|
autoRunDelay: 1000, // Delay in ms before auto-generating next message
|
|
@@ -298,25 +300,69 @@
|
|
|
298
300
|
render();
|
|
299
301
|
}
|
|
300
302
|
|
|
301
|
-
function setVoice(role, voiceId) {
|
|
303
|
+
async function setVoice(role, voiceId) {
|
|
302
304
|
config.ttsVoices[role] = voiceId;
|
|
305
|
+
|
|
306
|
+
// If using proxy, notify backend of voice change
|
|
307
|
+
if (config.ttsProxyUrl) {
|
|
308
|
+
try {
|
|
309
|
+
const token = await getOrCreateSession();
|
|
310
|
+
const headers = {
|
|
311
|
+
'Content-Type': 'application/json',
|
|
312
|
+
};
|
|
313
|
+
if (token) {
|
|
314
|
+
headers[config.anonymousTokenHeader] = token;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
await fetch(`${config.backendUrl}${config.apiPaths.ttsSetVoice}`, {
|
|
318
|
+
method: 'POST',
|
|
319
|
+
headers,
|
|
320
|
+
body: JSON.stringify({ role, voice_id: voiceId }),
|
|
321
|
+
});
|
|
322
|
+
} catch (err) {
|
|
323
|
+
console.error('[ChatWidget] Failed to set voice on backend:', err);
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
303
327
|
render();
|
|
304
328
|
}
|
|
305
329
|
|
|
306
330
|
async function fetchAvailableVoices() {
|
|
307
|
-
if (!config.elevenLabsApiKey) return;
|
|
308
|
-
|
|
309
331
|
try {
|
|
310
|
-
|
|
311
|
-
headers: {
|
|
312
|
-
'xi-api-key': config.elevenLabsApiKey,
|
|
313
|
-
},
|
|
314
|
-
});
|
|
332
|
+
let voices = [];
|
|
315
333
|
|
|
316
|
-
if (
|
|
317
|
-
|
|
318
|
-
|
|
334
|
+
if (config.ttsProxyUrl) {
|
|
335
|
+
// Fetch voices from Django backend
|
|
336
|
+
const token = await getOrCreateSession();
|
|
337
|
+
const headers = {};
|
|
338
|
+
if (token) {
|
|
339
|
+
headers[config.anonymousTokenHeader] = token;
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
const response = await fetch(`${config.backendUrl}${config.apiPaths.ttsVoices}`, {
|
|
343
|
+
headers,
|
|
344
|
+
});
|
|
345
|
+
|
|
346
|
+
if (response.ok) {
|
|
347
|
+
const data = await response.json();
|
|
348
|
+
voices = data.voices || [];
|
|
349
|
+
}
|
|
350
|
+
} else if (config.elevenLabsApiKey) {
|
|
351
|
+
// Fetch voices directly from ElevenLabs
|
|
352
|
+
const response = await fetch('https://api.elevenlabs.io/v1/voices', {
|
|
353
|
+
headers: {
|
|
354
|
+
'xi-api-key': config.elevenLabsApiKey,
|
|
355
|
+
},
|
|
356
|
+
});
|
|
357
|
+
|
|
358
|
+
if (response.ok) {
|
|
359
|
+
const data = await response.json();
|
|
360
|
+
voices = data.voices || [];
|
|
361
|
+
}
|
|
319
362
|
}
|
|
363
|
+
|
|
364
|
+
config.availableVoices = voices;
|
|
365
|
+
render(); // Re-render to update dropdowns
|
|
320
366
|
} catch (err) {
|
|
321
367
|
console.error('[ChatWidget] Failed to fetch voices:', err);
|
|
322
368
|
}
|
|
@@ -599,7 +645,7 @@
|
|
|
599
645
|
state.isSimulating = false;
|
|
600
646
|
|
|
601
647
|
// Speak simulated user message if TTS enabled
|
|
602
|
-
if (config.enableTTS
|
|
648
|
+
if (config.enableTTS) {
|
|
603
649
|
await speakText(data.response, 'user');
|
|
604
650
|
}
|
|
605
651
|
|
|
@@ -624,9 +670,15 @@
|
|
|
624
670
|
|
|
625
671
|
const journey = config.journeyTypes[journeyType];
|
|
626
672
|
if (journey?.initialMessage) {
|
|
627
|
-
setTimeout(() => {
|
|
673
|
+
setTimeout(async () => {
|
|
628
674
|
state.isSimulating = true;
|
|
629
675
|
render();
|
|
676
|
+
|
|
677
|
+
// Speak initial message if TTS enabled
|
|
678
|
+
if (config.enableTTS) {
|
|
679
|
+
await speakText(journey.initialMessage, 'user');
|
|
680
|
+
}
|
|
681
|
+
|
|
630
682
|
sendMessage(journey.initialMessage).then(() => {
|
|
631
683
|
state.isSimulating = false;
|
|
632
684
|
render();
|
|
@@ -922,7 +974,7 @@
|
|
|
922
974
|
${state.isSpeaking ? '🔊' : (config.enableTTS ? '🔉' : '🔇')}
|
|
923
975
|
</button>
|
|
924
976
|
` : ''}
|
|
925
|
-
${config.showVoiceSettings && config.elevenLabsApiKey
|
|
977
|
+
${config.showVoiceSettings && (config.elevenLabsApiKey || config.ttsProxyUrl) ? `
|
|
926
978
|
<button class="cw-header-btn ${state.voiceSettingsOpen ? 'cw-btn-active' : ''}" data-action="toggle-voice-settings" title="Voice Settings">
|
|
927
979
|
🎙️
|
|
928
980
|
</button>
|
|
@@ -1055,8 +1107,8 @@
|
|
|
1055
1107
|
// Initial render
|
|
1056
1108
|
render();
|
|
1057
1109
|
|
|
1058
|
-
// Fetch available voices if
|
|
1059
|
-
if (config.elevenLabsApiKey
|
|
1110
|
+
// Fetch available voices if TTS is configured
|
|
1111
|
+
if (config.elevenLabsApiKey || config.ttsProxyUrl) {
|
|
1060
1112
|
fetchAvailableVoices();
|
|
1061
1113
|
}
|
|
1062
1114
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@makemore/agent-frontend",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.6.1",
|
|
4
4
|
"description": "A standalone, zero-dependency chat widget for AI agents. Embed conversational AI into any website with a single script tag.",
|
|
5
5
|
"main": "dist/chat-widget.js",
|
|
6
6
|
"files": [
|