openvoiceui 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +104 -0
- package/Dockerfile +30 -0
- package/LICENSE +21 -0
- package/README.md +638 -0
- package/SETUP.md +360 -0
- package/app.py +232 -0
- package/auto-approve-devices.js +111 -0
- package/cli/index.js +372 -0
- package/config/__init__.py +4 -0
- package/config/default.yaml +43 -0
- package/config/flags.yaml +67 -0
- package/config/loader.py +203 -0
- package/config/providers.yaml +71 -0
- package/config/speech_normalization.yaml +182 -0
- package/config/theme.json +4 -0
- package/data/greetings.json +25 -0
- package/default-pages/ai-image-creator.html +915 -0
- package/default-pages/bulk-image-uploader.html +492 -0
- package/default-pages/desktop.html +2865 -0
- package/default-pages/file-explorer.html +854 -0
- package/default-pages/interactive-map.html +655 -0
- package/default-pages/style-guide.html +1005 -0
- package/default-pages/website-setup.html +1623 -0
- package/deploy/openclaw/Dockerfile +46 -0
- package/deploy/openvoiceui.service +30 -0
- package/deploy/setup-nginx.sh +50 -0
- package/deploy/setup-sudo.sh +306 -0
- package/deploy/skill-runner/Dockerfile +19 -0
- package/deploy/skill-runner/requirements.txt +14 -0
- package/deploy/skill-runner/server.py +269 -0
- package/deploy/supertonic/Dockerfile +22 -0
- package/deploy/supertonic/server.py +79 -0
- package/docker-compose.pinokio.yml +11 -0
- package/docker-compose.yml +59 -0
- package/greetings.json +25 -0
- package/index.html +65 -0
- package/inject-device-identity.js +142 -0
- package/package.json +82 -0
- package/profiles/default.json +114 -0
- package/profiles/manager.py +354 -0
- package/profiles/schema.json +337 -0
- package/prompts/voice-system-prompt.md +149 -0
- package/providers/__init__.py +39 -0
- package/providers/base.py +63 -0
- package/providers/llm/__init__.py +12 -0
- package/providers/llm/base.py +71 -0
- package/providers/llm/clawdbot_provider.py +112 -0
- package/providers/llm/zai_provider.py +115 -0
- package/providers/registry.py +320 -0
- package/providers/stt/__init__.py +12 -0
- package/providers/stt/base.py +58 -0
- package/providers/stt/webspeech_provider.py +49 -0
- package/providers/stt/whisper_provider.py +100 -0
- package/providers/tts/__init__.py +20 -0
- package/providers/tts/base.py +91 -0
- package/providers/tts/groq_provider.py +74 -0
- package/providers/tts/supertonic_provider.py +72 -0
- package/requirements.txt +38 -0
- package/routes/__init__.py +10 -0
- package/routes/admin.py +515 -0
- package/routes/canvas.py +1315 -0
- package/routes/chat.py +51 -0
- package/routes/conversation.py +2158 -0
- package/routes/elevenlabs_hybrid.py +306 -0
- package/routes/greetings.py +98 -0
- package/routes/icons.py +279 -0
- package/routes/image_gen.py +364 -0
- package/routes/instructions.py +190 -0
- package/routes/music.py +838 -0
- package/routes/onboarding.py +43 -0
- package/routes/pi.py +62 -0
- package/routes/profiles.py +215 -0
- package/routes/report_issue.py +68 -0
- package/routes/static_files.py +533 -0
- package/routes/suno.py +664 -0
- package/routes/theme.py +81 -0
- package/routes/transcripts.py +199 -0
- package/routes/vision.py +348 -0
- package/routes/workspace.py +288 -0
- package/server.py +1510 -0
- package/services/__init__.py +1 -0
- package/services/auth.py +143 -0
- package/services/canvas_versioning.py +239 -0
- package/services/db_pool.py +107 -0
- package/services/gateway.py +16 -0
- package/services/gateway_manager.py +333 -0
- package/services/gateways/__init__.py +12 -0
- package/services/gateways/base.py +110 -0
- package/services/gateways/compat.py +264 -0
- package/services/gateways/openclaw.py +1134 -0
- package/services/health.py +100 -0
- package/services/memory_client.py +455 -0
- package/services/paths.py +26 -0
- package/services/speech_normalizer.py +285 -0
- package/services/tts.py +270 -0
- package/setup-config.js +262 -0
- package/sounds/air_horn.mp3 +0 -0
- package/sounds/bruh.mp3 +0 -0
- package/sounds/crowd_cheer.mp3 +0 -0
- package/sounds/gunshot.mp3 +0 -0
- package/sounds/impact.mp3 +0 -0
- package/sounds/lets_go.mp3 +0 -0
- package/sounds/record_stop.mp3 +0 -0
- package/sounds/rewind.mp3 +0 -0
- package/sounds/sad_trombone.mp3 +0 -0
- package/sounds/scratch_long.mp3 +0 -0
- package/sounds/yeah.mp3 +0 -0
- package/src/adapters/ClawdBotAdapter.js +264 -0
- package/src/adapters/_template.js +133 -0
- package/src/adapters/elevenlabs-classic.js +841 -0
- package/src/adapters/elevenlabs-hybrid.js +812 -0
- package/src/adapters/hume-evi.js +676 -0
- package/src/admin.html +1339 -0
- package/src/app.js +8802 -0
- package/src/core/Config.js +173 -0
- package/src/core/EmotionEngine.js +307 -0
- package/src/core/EventBridge.js +180 -0
- package/src/core/EventBus.js +117 -0
- package/src/core/VoiceSession.js +607 -0
- package/src/face/BaseFace.js +259 -0
- package/src/face/EyeFace.js +208 -0
- package/src/face/HaloSmokeFace.js +509 -0
- package/src/face/manifest.json +27 -0
- package/src/face/previews/eyes.svg +16 -0
- package/src/face/previews/orb.svg +29 -0
- package/src/features/MusicPlayer.js +620 -0
- package/src/features/Soundboard.js +128 -0
- package/src/providers/DeepgramSTT.js +472 -0
- package/src/providers/DeepgramStreamingSTT.js +766 -0
- package/src/providers/GroqSTT.js +559 -0
- package/src/providers/TTSPlayer.js +323 -0
- package/src/providers/WebSpeechSTT.js +479 -0
- package/src/providers/tts/BaseTTSProvider.js +81 -0
- package/src/providers/tts/HumeProvider.js +77 -0
- package/src/providers/tts/SupertonicProvider.js +174 -0
- package/src/providers/tts/index.js +140 -0
- package/src/shell/adapter-registry.js +154 -0
- package/src/shell/caller-bridge.js +35 -0
- package/src/shell/camera-bridge.js +28 -0
- package/src/shell/canvas-bridge.js +32 -0
- package/src/shell/commercial-bridge.js +44 -0
- package/src/shell/face-bridge.js +44 -0
- package/src/shell/music-bridge.js +60 -0
- package/src/shell/orchestrator.js +233 -0
- package/src/shell/profile-discovery.js +303 -0
- package/src/shell/sounds-bridge.js +28 -0
- package/src/shell/transcript-bridge.js +61 -0
- package/src/shell/waveform-bridge.js +33 -0
- package/src/styles/base.css +2862 -0
- package/src/styles/face.css +417 -0
- package/src/styles/pi-overrides.css +89 -0
- package/src/styles/theme-dark.css +67 -0
- package/src/test-tts.html +175 -0
- package/src/ui/AppShell.js +544 -0
- package/src/ui/ProfileSwitcher.js +228 -0
- package/src/ui/SessionControl.js +240 -0
- package/src/ui/face/FacePicker.js +195 -0
- package/src/ui/face/FaceRenderer.js +309 -0
- package/src/ui/settings/PlaylistEditor.js +366 -0
- package/src/ui/settings/SettingsPanel.css +684 -0
- package/src/ui/settings/SettingsPanel.js +419 -0
- package/src/ui/settings/TTSVoicePreview.js +210 -0
- package/src/ui/themes/ThemeManager.js +213 -0
- package/src/ui/visualizers/BaseVisualizer.js +29 -0
- package/src/ui/visualizers/PartyFXVisualizer.css +291 -0
- package/src/ui/visualizers/PartyFXVisualizer.js +637 -0
- package/static/emulators/jsdos/js-dos.css +1 -0
- package/static/emulators/jsdos/js-dos.js +22 -0
- package/static/favicon.svg +55 -0
- package/static/icons/apple-touch-icon.png +0 -0
- package/static/icons/favicon-32.png +0 -0
- package/static/icons/icon-192.png +0 -0
- package/static/icons/icon-512.png +0 -0
- package/static/install.html +449 -0
- package/static/manifest.json +26 -0
- package/static/sw.js +21 -0
- package/tts_providers/__init__.py +136 -0
- package/tts_providers/base_provider.py +319 -0
- package/tts_providers/groq_provider.py +155 -0
- package/tts_providers/hume_provider.py +226 -0
- package/tts_providers/providers_config.json +119 -0
- package/tts_providers/qwen3_provider.py +371 -0
- package/tts_providers/resemble_provider.py +315 -0
- package/tts_providers/supertonic_provider.py +557 -0
- package/tts_providers/supertonic_tts.py +399 -0
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
"""
|
|
2
|
+
routes/elevenlabs_hybrid.py — ElevenLabs + OpenClaw Hybrid Blueprint (P7-T5)
|
|
3
|
+
|
|
4
|
+
Provides two endpoints that form the bridge between ElevenLabs Conversational
|
|
5
|
+
AI (voice layer) and OpenClaw / Clawdbot Gateway (brain layer):
|
|
6
|
+
|
|
7
|
+
POST /api/elevenlabs-llm
|
|
8
|
+
Custom LLM endpoint configured in the ElevenLabs hybrid agent.
|
|
9
|
+
Receives the conversation context, extracts the latest user message,
|
|
10
|
+
forwards it to the Clawdbot Gateway using the persistent WebSocket
|
|
11
|
+
connection, strips canvas/HTML markers from the response, then
|
|
12
|
+
returns clean text to ElevenLabs in OpenAI-compatible SSE format
|
|
13
|
+
so ElevenLabs TTS can begin speaking as the first tokens arrive.
|
|
14
|
+
|
|
15
|
+
GET /api/canvas-pending
|
|
16
|
+
Side-channel for canvas commands extracted from OpenClaw responses.
|
|
17
|
+
Returns and clears the pending canvas command queue so the frontend
|
|
18
|
+
adapter (ElevenLabsHybridAdapter._startCanvasPolling) can load the
|
|
19
|
+
correct iframe without the agent reading HTML aloud.
|
|
20
|
+
|
|
21
|
+
Architecture:
|
|
22
|
+
Browser (ElevenLabs SDK)
|
|
23
|
+
→ POST /api/elevenlabs-llm (this module)
|
|
24
|
+
→ gateway_connection.stream_to_queue(session='voice-elevenlabs-hybrid')
|
|
25
|
+
← streaming text chunks
|
|
26
|
+
→ SSE to ElevenLabs TTS
|
|
27
|
+
|
|
28
|
+
OpenClaw response: "Dashboard ready! {canvas:present,url:/pages/stats.html} Check it out."
|
|
29
|
+
Spoken text: "Dashboard ready! Check it out."
|
|
30
|
+
Canvas queue: [{"action": "present", "url": "/pages/stats.html"}]
|
|
31
|
+
GET /api/canvas-pending returns → {"commands": [{"action": "present", "url": "..."}]}
|
|
32
|
+
|
|
33
|
+
Ref: future-dev-plans/16-ELEVENLABS-OPENCLAW-HYBRID.md
|
|
34
|
+
Ref: ADR-008 (Fallback chains — graceful degradation on Gateway unavailability)
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
import json
|
|
38
|
+
import logging
|
|
39
|
+
import os
|
|
40
|
+
import queue
|
|
41
|
+
import re
|
|
42
|
+
import threading
|
|
43
|
+
from collections import deque
|
|
44
|
+
|
|
45
|
+
from flask import Blueprint, Response, jsonify, request
|
|
46
|
+
|
|
47
|
+
from services.gateway import gateway_connection
|
|
48
|
+
|
|
49
|
+
logger = logging.getLogger(__name__)
|
|
50
|
+
|
|
51
|
+
# ---------------------------------------------------------------------------
|
|
52
|
+
# Blueprint
|
|
53
|
+
# ---------------------------------------------------------------------------
|
|
54
|
+
|
|
55
|
+
elevenlabs_hybrid_bp = Blueprint('elevenlabs_hybrid', __name__)
|
|
56
|
+
|
|
57
|
+
# ---------------------------------------------------------------------------
|
|
58
|
+
# Session key
|
|
59
|
+
# Separate from ClawdBot's voice-main-N key so histories don't collide.
|
|
60
|
+
# ---------------------------------------------------------------------------
|
|
61
|
+
|
|
62
|
+
HYBRID_SESSION_KEY = os.getenv('ELEVENLABS_HYBRID_SESSION_KEY', 'voice-elevenlabs-hybrid')
|
|
63
|
+
|
|
64
|
+
# Optional shared secret for validating requests from ElevenLabs
|
|
65
|
+
HYBRID_LLM_SECRET = os.getenv('ELEVENLABS_HYBRID_LLM_SECRET', '')
|
|
66
|
+
|
|
67
|
+
# ---------------------------------------------------------------------------
|
|
68
|
+
# Canvas command side-channel
|
|
69
|
+
# Thread-safe deque; items are dicts: {"action": "present"|"close", "url": str}
|
|
70
|
+
# ---------------------------------------------------------------------------
|
|
71
|
+
|
|
72
|
+
_canvas_pending: deque = deque()
|
|
73
|
+
_canvas_lock = threading.Lock()
|
|
74
|
+
|
|
75
|
+
# ---------------------------------------------------------------------------
|
|
76
|
+
# Canvas extraction helpers
|
|
77
|
+
# ---------------------------------------------------------------------------
|
|
78
|
+
|
|
79
|
+
_CANVAS_PATTERN = re.compile(r'\{canvas:(\w+),url:([^}]+)\}')
|
|
80
|
+
_HTML_BLOCK_PATTERN = re.compile(r'```html[\s\S]*?```', re.IGNORECASE)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _extract_canvas_commands(text: str) -> list:
|
|
84
|
+
"""
|
|
85
|
+
Parse {canvas:action,url:path} markers from OpenClaw response text.
|
|
86
|
+
|
|
87
|
+
Returns a list of {"action": str, "url": str} dicts.
|
|
88
|
+
"""
|
|
89
|
+
commands = []
|
|
90
|
+
for match in _CANVAS_PATTERN.finditer(text):
|
|
91
|
+
commands.append({
|
|
92
|
+
'action': match.group(1), # 'present', 'close', etc.
|
|
93
|
+
'url': match.group(2).strip(),
|
|
94
|
+
})
|
|
95
|
+
return commands
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _strip_canvas_markers(text: str) -> str:
|
|
99
|
+
"""
|
|
100
|
+
Remove {canvas:...} markers and raw ```html``` blocks from spoken text
|
|
101
|
+
so ElevenLabs TTS doesn't read them aloud.
|
|
102
|
+
"""
|
|
103
|
+
text = _CANVAS_PATTERN.sub('', text)
|
|
104
|
+
text = _HTML_BLOCK_PATTERN.sub('', text)
|
|
105
|
+
return text.strip()
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _queue_canvas_commands(commands: list) -> None:
|
|
109
|
+
"""Append extracted canvas commands to the pending queue (thread-safe)."""
|
|
110
|
+
if not commands:
|
|
111
|
+
return
|
|
112
|
+
with _canvas_lock:
|
|
113
|
+
for cmd in commands:
|
|
114
|
+
_canvas_pending.append(cmd)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
# ---------------------------------------------------------------------------
|
|
118
|
+
# POST /api/elevenlabs-llm — custom LLM endpoint
|
|
119
|
+
# ---------------------------------------------------------------------------
|
|
120
|
+
|
|
121
|
+
@elevenlabs_hybrid_bp.route('/api/elevenlabs-llm', methods=['POST'])
|
|
122
|
+
def elevenlabs_custom_llm():
|
|
123
|
+
"""
|
|
124
|
+
Bridge ElevenLabs voice to OpenClaw brain.
|
|
125
|
+
|
|
126
|
+
ElevenLabs sends the full conversation context in OpenAI chat format:
|
|
127
|
+
{"messages": [{"role": "system"/"user"/"assistant", "content": "..."}]}
|
|
128
|
+
|
|
129
|
+
We extract the latest user message, forward to Gateway, stream the
|
|
130
|
+
response back as OpenAI-compatible SSE so ElevenLabs TTS can start
|
|
131
|
+
speaking on the first sentence rather than waiting for the full reply.
|
|
132
|
+
|
|
133
|
+
Fallback (ADR-008): if Gateway is unreachable, returns a graceful
|
|
134
|
+
error response so ElevenLabs speaks an apology rather than hanging.
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
# ── Optional shared-secret auth ──────────────────────────────────────────
|
|
138
|
+
if HYBRID_LLM_SECRET:
|
|
139
|
+
auth_header = request.headers.get('Authorization', '')
|
|
140
|
+
if auth_header != f'Bearer {HYBRID_LLM_SECRET}':
|
|
141
|
+
return jsonify({'error': 'Unauthorized'}), 401
|
|
142
|
+
|
|
143
|
+
# ── Parse request ─────────────────────────────────────────────────────────
|
|
144
|
+
data = request.get_json(silent=True) or {}
|
|
145
|
+
messages = data.get('messages', [])
|
|
146
|
+
|
|
147
|
+
if not messages:
|
|
148
|
+
return _openai_error_response("No messages provided"), 400
|
|
149
|
+
|
|
150
|
+
# Extract the latest user turn (last message with role 'user')
|
|
151
|
+
user_message = ''
|
|
152
|
+
for msg in reversed(messages):
|
|
153
|
+
if msg.get('role') == 'user':
|
|
154
|
+
user_message = msg.get('content', '')
|
|
155
|
+
break
|
|
156
|
+
|
|
157
|
+
if not user_message:
|
|
158
|
+
return _openai_error_response("No user message found in context"), 400
|
|
159
|
+
|
|
160
|
+
logger.info(f"[ElevenLabsHybrid] Custom LLM request: {user_message[:80]!r}")
|
|
161
|
+
|
|
162
|
+
# ── Check Gateway availability ────────────────────────────────────────────
|
|
163
|
+
if not gateway_connection.is_configured():
|
|
164
|
+
logger.warning('[ElevenLabsHybrid] Gateway not configured — returning fallback response')
|
|
165
|
+
return _openai_sync_response(
|
|
166
|
+
"Sorry, my connection to the server brain is not configured right now. "
|
|
167
|
+
"Please check the CLAWDBOT_AUTH_TOKEN environment variable."
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# ── Stream from Gateway → SSE to ElevenLabs ──────────────────────────────
|
|
171
|
+
|
|
172
|
+
def generate():
|
|
173
|
+
"""Generator: reads Gateway events, yields OpenAI SSE chunks."""
|
|
174
|
+
event_queue: queue.Queue = queue.Queue()
|
|
175
|
+
captured_actions = []
|
|
176
|
+
full_response_parts = []
|
|
177
|
+
|
|
178
|
+
# Run stream_to_queue in a background thread so we can yield from
|
|
179
|
+
# the main thread (Flask's response generator must be synchronous).
|
|
180
|
+
stream_thread = threading.Thread(
|
|
181
|
+
target=gateway_connection.stream_to_queue,
|
|
182
|
+
args=(event_queue, user_message, HYBRID_SESSION_KEY, captured_actions),
|
|
183
|
+
daemon=True,
|
|
184
|
+
)
|
|
185
|
+
stream_thread.start()
|
|
186
|
+
|
|
187
|
+
try:
|
|
188
|
+
while True:
|
|
189
|
+
try:
|
|
190
|
+
event = event_queue.get(timeout=60)
|
|
191
|
+
except queue.Empty:
|
|
192
|
+
logger.warning('[ElevenLabsHybrid] Gateway stream timeout')
|
|
193
|
+
break
|
|
194
|
+
|
|
195
|
+
etype = event.get('type')
|
|
196
|
+
|
|
197
|
+
if etype == 'delta':
|
|
198
|
+
chunk_text = event.get('text', '')
|
|
199
|
+
if chunk_text:
|
|
200
|
+
full_response_parts.append(chunk_text)
|
|
201
|
+
# Strip canvas markers from streaming chunks
|
|
202
|
+
clean_chunk = _strip_canvas_markers(chunk_text)
|
|
203
|
+
if clean_chunk:
|
|
204
|
+
yield _sse_delta(clean_chunk)
|
|
205
|
+
|
|
206
|
+
elif etype == 'text_done':
|
|
207
|
+
full_response = event.get('response') or ''.join(full_response_parts)
|
|
208
|
+
# Extract canvas commands from the full response
|
|
209
|
+
canvas_cmds = _extract_canvas_commands(full_response)
|
|
210
|
+
_queue_canvas_commands(canvas_cmds)
|
|
211
|
+
if canvas_cmds:
|
|
212
|
+
logger.info(f'[ElevenLabsHybrid] Queued {len(canvas_cmds)} canvas command(s)')
|
|
213
|
+
break
|
|
214
|
+
|
|
215
|
+
elif etype == 'error':
|
|
216
|
+
error_msg = event.get('error', 'Unknown Gateway error')
|
|
217
|
+
logger.error(f'[ElevenLabsHybrid] Gateway error: {error_msg}')
|
|
218
|
+
yield _sse_delta("I'm having trouble connecting right now. Please try again.")
|
|
219
|
+
break
|
|
220
|
+
|
|
221
|
+
elif etype == 'handshake':
|
|
222
|
+
# Connection established — nothing to yield to ElevenLabs
|
|
223
|
+
continue
|
|
224
|
+
|
|
225
|
+
# 'action' events: tool use / lifecycle events — log only
|
|
226
|
+
elif etype == 'action':
|
|
227
|
+
action_name = event.get('action', {}).get('type', 'unknown')
|
|
228
|
+
logger.debug(f'[ElevenLabsHybrid] Gateway action: {action_name}')
|
|
229
|
+
|
|
230
|
+
finally:
|
|
231
|
+
yield 'data: [DONE]\n\n'
|
|
232
|
+
|
|
233
|
+
stream_thread.join(timeout=5)
|
|
234
|
+
|
|
235
|
+
return Response(
|
|
236
|
+
generate(),
|
|
237
|
+
mimetype='text/event-stream',
|
|
238
|
+
headers={
|
|
239
|
+
'Cache-Control': 'no-cache',
|
|
240
|
+
'X-Accel-Buffering': 'no', # disable nginx buffering for SSE
|
|
241
|
+
},
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
# ---------------------------------------------------------------------------
|
|
246
|
+
# GET /api/canvas-pending — canvas command side-channel
|
|
247
|
+
# ---------------------------------------------------------------------------
|
|
248
|
+
|
|
249
|
+
@elevenlabs_hybrid_bp.route('/api/canvas-pending', methods=['GET'])
|
|
250
|
+
def canvas_pending():
|
|
251
|
+
"""
|
|
252
|
+
Return and clear the pending canvas command queue.
|
|
253
|
+
|
|
254
|
+
The ElevenLabsHybridAdapter frontend polls this endpoint every second
|
|
255
|
+
during a hybrid conversation. When OpenClaw creates a canvas page, the
|
|
256
|
+
command appears here; the frontend then loads the iframe.
|
|
257
|
+
|
|
258
|
+
Response:
|
|
259
|
+
{"commands": [{"action": "present", "url": "/pages/stats.html"}, ...]}
|
|
260
|
+
|
|
261
|
+
Commands are consumed (cleared) on each call.
|
|
262
|
+
"""
|
|
263
|
+
with _canvas_lock:
|
|
264
|
+
commands = list(_canvas_pending)
|
|
265
|
+
_canvas_pending.clear()
|
|
266
|
+
|
|
267
|
+
return jsonify({'commands': commands})
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
# ---------------------------------------------------------------------------
|
|
271
|
+
# Helpers
|
|
272
|
+
# ---------------------------------------------------------------------------
|
|
273
|
+
|
|
274
|
+
def _sse_delta(text: str) -> str:
|
|
275
|
+
"""Format a text chunk as an OpenAI-compatible SSE delta event."""
|
|
276
|
+
payload = json.dumps({
|
|
277
|
+
'choices': [{
|
|
278
|
+
'delta': {
|
|
279
|
+
'role': 'assistant',
|
|
280
|
+
'content': text,
|
|
281
|
+
},
|
|
282
|
+
'finish_reason': None,
|
|
283
|
+
}]
|
|
284
|
+
})
|
|
285
|
+
return f'data: {payload}\n\n'
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def _openai_sync_response(text: str):
|
|
289
|
+
"""
|
|
290
|
+
Return a non-streaming OpenAI-compatible response.
|
|
291
|
+
Used for fallback / error paths where we have the full text immediately.
|
|
292
|
+
"""
|
|
293
|
+
return jsonify({
|
|
294
|
+
'choices': [{
|
|
295
|
+
'message': {
|
|
296
|
+
'role': 'assistant',
|
|
297
|
+
'content': text,
|
|
298
|
+
},
|
|
299
|
+
'finish_reason': 'stop',
|
|
300
|
+
}]
|
|
301
|
+
})
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def _openai_error_response(message: str):
|
|
305
|
+
"""Return an OpenAI-compatible error body."""
|
|
306
|
+
return jsonify({'error': {'message': message, 'type': 'invalid_request_error'}})
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
"""
|
|
2
|
+
routes/greetings.py — Greetings API
|
|
3
|
+
|
|
4
|
+
GET /api/greetings — return full greetings.json
|
|
5
|
+
GET /api/greetings/random — return a single random greeting (optional ?user=mike)
|
|
6
|
+
POST /api/greetings/add — append a contextual greeting (agent use)
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import random
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
from flask import Blueprint, jsonify, request
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
greetings_bp = Blueprint('greetings', __name__)
|
|
19
|
+
|
|
20
|
+
GREETINGS_PATH = Path(__file__).parent.parent / 'greetings.json'
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _load() -> dict:
|
|
24
|
+
try:
|
|
25
|
+
with open(GREETINGS_PATH) as f:
|
|
26
|
+
return json.load(f)
|
|
27
|
+
except Exception as e:
|
|
28
|
+
logger.error(f'Failed to load greetings.json: {e}')
|
|
29
|
+
return {'greetings': {'generic': {'classic_annoyed': ['What do you want?']}, 'mike': {}, 'contextual': []}}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _save(data: dict) -> None:
|
|
33
|
+
"""Persist greetings (atomic write)."""
|
|
34
|
+
tmp = GREETINGS_PATH.with_suffix('.tmp')
|
|
35
|
+
tmp.write_text(json.dumps(data, indent=2))
|
|
36
|
+
tmp.replace(GREETINGS_PATH)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@greetings_bp.route('/api/greetings', methods=['GET'])
|
|
40
|
+
def get_greetings():
|
|
41
|
+
return jsonify(_load())
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@greetings_bp.route('/api/greetings/random', methods=['GET'])
|
|
45
|
+
def random_greeting():
|
|
46
|
+
"""Return a random greeting. Pass ?user=mike for Mike-specific categories."""
|
|
47
|
+
user = request.args.get('user', '').lower().strip()
|
|
48
|
+
data = _load()
|
|
49
|
+
greetings = data.get('greetings', {})
|
|
50
|
+
|
|
51
|
+
pool = []
|
|
52
|
+
|
|
53
|
+
# Check for a queued next_greeting first
|
|
54
|
+
if data.get('next_greeting'):
|
|
55
|
+
next_g = data['next_greeting']
|
|
56
|
+
data['next_greeting'] = None
|
|
57
|
+
_save(data)
|
|
58
|
+
return jsonify({'greeting': next_g, 'category': 'queued', 'user': user})
|
|
59
|
+
|
|
60
|
+
# Add contextual greetings (highest priority, 3x weight)
|
|
61
|
+
contextual = greetings.get('contextual', [])
|
|
62
|
+
pool.extend(contextual * 3)
|
|
63
|
+
|
|
64
|
+
# Add user-specific greetings if recognized
|
|
65
|
+
if user == 'mike':
|
|
66
|
+
mike_cats = greetings.get('mike', {})
|
|
67
|
+
for cat_greetings in mike_cats.values():
|
|
68
|
+
pool.extend(cat_greetings)
|
|
69
|
+
|
|
70
|
+
# Always add generic greetings
|
|
71
|
+
generic_cats = greetings.get('generic', {})
|
|
72
|
+
for cat_greetings in generic_cats.values():
|
|
73
|
+
pool.extend(cat_greetings)
|
|
74
|
+
|
|
75
|
+
if not pool:
|
|
76
|
+
return jsonify({'greeting': 'What do you want?', 'category': 'fallback', 'user': user})
|
|
77
|
+
|
|
78
|
+
greeting = random.choice(pool)
|
|
79
|
+
return jsonify({'greeting': greeting, 'category': 'random', 'user': user})
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@greetings_bp.route('/api/greetings/add', methods=['POST'])
|
|
83
|
+
def add_greeting():
|
|
84
|
+
"""Agent can queue a contextual greeting for the next session start."""
|
|
85
|
+
body = request.get_json(silent=True) or {}
|
|
86
|
+
greeting = (body.get('greeting') or '').strip()
|
|
87
|
+
if not greeting:
|
|
88
|
+
return jsonify({'ok': False, 'error': 'Missing greeting'}), 400
|
|
89
|
+
if len(greeting) > 300:
|
|
90
|
+
return jsonify({'ok': False, 'error': 'Greeting too long (max 300 chars)'}), 400
|
|
91
|
+
|
|
92
|
+
data = _load()
|
|
93
|
+
contextual = data['greetings'].get('contextual', [])
|
|
94
|
+
contextual.append(greeting)
|
|
95
|
+
data['greetings']['contextual'] = contextual[-20:] # keep last 20
|
|
96
|
+
_save(data)
|
|
97
|
+
logger.info(f'Contextual greeting added: {greeting[:80]}')
|
|
98
|
+
return jsonify({'ok': True, 'total_contextual': len(data['greetings']['contextual'])})
|
package/routes/icons.py
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Icon library & AI icon generation.
|
|
3
|
+
|
|
4
|
+
Static icons:
|
|
5
|
+
GET /api/icons/library → list all icon names
|
|
6
|
+
GET /api/icons/library/search?q=<term> → search icons by name
|
|
7
|
+
GET /api/icons/library/<name>.svg → serve a Lucide SVG icon
|
|
8
|
+
|
|
9
|
+
Generated icons:
|
|
10
|
+
POST /api/icons/generate → generate icon via Gemini
|
|
11
|
+
GET /api/icons/generated → list user's generated icons
|
|
12
|
+
GET /api/icons/generated/<filename> → serve a generated icon
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
import re
|
|
17
|
+
import json
|
|
18
|
+
import base64
|
|
19
|
+
import hashlib
|
|
20
|
+
import time
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
import requests
|
|
24
|
+
from flask import Blueprint, jsonify, request, send_file, Response
|
|
25
|
+
|
|
26
|
+
from services.paths import RUNTIME_DIR
|
|
27
|
+
|
|
28
|
+
icons_bp = Blueprint('icons', __name__)
|
|
29
|
+
|
|
30
|
+
# ── Static icon library (Lucide SVGs, shared across all clients) ──
|
|
31
|
+
LUCIDE_DIR = Path('/mnt/system/base/icons/lucide')
|
|
32
|
+
|
|
33
|
+
# ── Per-user generated icons ──
|
|
34
|
+
GENERATED_DIR = RUNTIME_DIR / 'icons' / 'generated'
|
|
35
|
+
|
|
36
|
+
# ── Gemini config ──
|
|
37
|
+
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', '')
|
|
38
|
+
GEMINI_MODEL = 'gemini-2.5-flash-image'
|
|
39
|
+
GEMINI_URL = f'https://generativelanguage.googleapis.com/v1beta/models/{GEMINI_MODEL}:generateContent'
|
|
40
|
+
|
|
41
|
+
# Cache icon list (rebuilt on first request)
|
|
42
|
+
_icon_list_cache = None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _get_icon_list():
|
|
46
|
+
"""Get sorted list of all Lucide icon names."""
|
|
47
|
+
global _icon_list_cache
|
|
48
|
+
if _icon_list_cache is None:
|
|
49
|
+
if LUCIDE_DIR.exists():
|
|
50
|
+
_icon_list_cache = sorted(
|
|
51
|
+
p.stem for p in LUCIDE_DIR.glob('*.svg')
|
|
52
|
+
)
|
|
53
|
+
else:
|
|
54
|
+
_icon_list_cache = []
|
|
55
|
+
return _icon_list_cache
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _ensure_generated_dir():
|
|
59
|
+
"""Create per-user generated icons directory."""
|
|
60
|
+
GENERATED_DIR.mkdir(parents=True, exist_ok=True)
|
|
61
|
+
return GENERATED_DIR
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
# ══════════════════════════════════════════════════════════════
|
|
65
|
+
# STATIC ICON LIBRARY
|
|
66
|
+
# ══════════════════════════════════════════════════════════════
|
|
67
|
+
|
|
68
|
+
@icons_bp.route('/api/icons/library')
|
|
69
|
+
def list_icons():
|
|
70
|
+
"""List all available icon names."""
|
|
71
|
+
icons = _get_icon_list()
|
|
72
|
+
return jsonify({
|
|
73
|
+
'count': len(icons),
|
|
74
|
+
'icons': icons,
|
|
75
|
+
})
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@icons_bp.route('/api/icons/library/search')
|
|
79
|
+
def search_icons():
|
|
80
|
+
"""Search icons by name. ?q=folder&limit=20"""
|
|
81
|
+
q = request.args.get('q', '').lower().strip()
|
|
82
|
+
limit = min(int(request.args.get('limit', 50)), 200)
|
|
83
|
+
|
|
84
|
+
if not q:
|
|
85
|
+
return jsonify({'error': 'Missing ?q= parameter'}), 400
|
|
86
|
+
|
|
87
|
+
icons = _get_icon_list()
|
|
88
|
+
# Exact prefix matches first, then contains
|
|
89
|
+
prefix = [n for n in icons if n.startswith(q)]
|
|
90
|
+
contains = [n for n in icons if q in n and n not in prefix]
|
|
91
|
+
results = (prefix + contains)[:limit]
|
|
92
|
+
|
|
93
|
+
return jsonify({
|
|
94
|
+
'query': q,
|
|
95
|
+
'count': len(results),
|
|
96
|
+
'icons': results,
|
|
97
|
+
})
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@icons_bp.route('/api/icons/library/<name>.svg')
|
|
101
|
+
def serve_icon(name):
|
|
102
|
+
"""Serve a Lucide SVG icon by name."""
|
|
103
|
+
# Sanitize name
|
|
104
|
+
safe = re.sub(r'[^a-z0-9\-]', '', name.lower())
|
|
105
|
+
path = LUCIDE_DIR / f'{safe}.svg'
|
|
106
|
+
|
|
107
|
+
if not path.exists():
|
|
108
|
+
return Response('<!-- icon not found -->', status=404, mimetype='image/svg+xml')
|
|
109
|
+
|
|
110
|
+
return send_file(str(path), mimetype='image/svg+xml',
|
|
111
|
+
max_age=86400) # cache 1 day
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
# ══════════════════════════════════════════════════════════════
|
|
115
|
+
# AI ICON GENERATION (Gemini)
|
|
116
|
+
# ══════════════════════════════════════════════════════════════
|
|
117
|
+
|
|
118
|
+
@icons_bp.route('/api/icons/generate', methods=['POST'])
|
|
119
|
+
def generate_icon():
|
|
120
|
+
"""
|
|
121
|
+
Generate a custom icon via Gemini image generation.
|
|
122
|
+
|
|
123
|
+
POST body:
|
|
124
|
+
{ "prompt": "description of icon",
|
|
125
|
+
"name": "optional-filename-slug",
|
|
126
|
+
"style": "optional style override" }
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
{ "url": "/api/icons/generated/my-icon.png",
|
|
130
|
+
"name": "my-icon",
|
|
131
|
+
"prompt": "..." }
|
|
132
|
+
"""
|
|
133
|
+
if not GEMINI_API_KEY:
|
|
134
|
+
return jsonify({'error': 'GEMINI_API_KEY not configured'}), 500
|
|
135
|
+
|
|
136
|
+
data = request.get_json(silent=True) or {}
|
|
137
|
+
user_prompt = data.get('prompt', '').strip()
|
|
138
|
+
if not user_prompt:
|
|
139
|
+
return jsonify({'error': 'Missing "prompt" field'}), 400
|
|
140
|
+
|
|
141
|
+
name_slug = data.get('name', '').strip()
|
|
142
|
+
style = data.get('style', '').strip()
|
|
143
|
+
|
|
144
|
+
# Build the generation prompt
|
|
145
|
+
style_instruction = style or (
|
|
146
|
+
'Windows XP style icon, clean vector art, vibrant colors, '
|
|
147
|
+
'slight 3D shading, white or transparent background'
|
|
148
|
+
)
|
|
149
|
+
full_prompt = (
|
|
150
|
+
f'Generate a single app icon: {user_prompt}. '
|
|
151
|
+
f'Style: {style_instruction}. '
|
|
152
|
+
f'The icon should be simple, recognizable at 48x48 pixels, centered on the canvas, '
|
|
153
|
+
f'with no text or labels. Square aspect ratio. Professional quality.'
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Generate filename
|
|
157
|
+
if not name_slug:
|
|
158
|
+
# Derive from prompt
|
|
159
|
+
name_slug = re.sub(r'[^a-z0-9]+', '-', user_prompt.lower())[:40].strip('-')
|
|
160
|
+
safe_name = re.sub(r'[^a-z0-9\-]', '', name_slug)
|
|
161
|
+
if not safe_name:
|
|
162
|
+
safe_name = 'icon-' + hashlib.md5(user_prompt.encode()).hexdigest()[:8]
|
|
163
|
+
|
|
164
|
+
# Call Gemini API
|
|
165
|
+
try:
|
|
166
|
+
resp = requests.post(
|
|
167
|
+
f'{GEMINI_URL}?key={GEMINI_API_KEY}',
|
|
168
|
+
json={
|
|
169
|
+
'contents': [{'parts': [{'text': full_prompt}]}],
|
|
170
|
+
'generationConfig': {
|
|
171
|
+
'responseModalities': ['IMAGE', 'TEXT'],
|
|
172
|
+
},
|
|
173
|
+
},
|
|
174
|
+
timeout=60,
|
|
175
|
+
)
|
|
176
|
+
resp.raise_for_status()
|
|
177
|
+
result = resp.json()
|
|
178
|
+
except requests.RequestException as e:
|
|
179
|
+
return jsonify({'error': f'Gemini API error: {str(e)}'}), 502
|
|
180
|
+
|
|
181
|
+
# Extract image from response
|
|
182
|
+
image_data = None
|
|
183
|
+
mime_type = 'image/png'
|
|
184
|
+
try:
|
|
185
|
+
for candidate in result.get('candidates', []):
|
|
186
|
+
for part in candidate.get('content', {}).get('parts', []):
|
|
187
|
+
if 'inlineData' in part:
|
|
188
|
+
image_data = base64.b64decode(part['inlineData']['data'])
|
|
189
|
+
mime_type = part['inlineData'].get('mimeType', 'image/png')
|
|
190
|
+
break
|
|
191
|
+
if image_data:
|
|
192
|
+
break
|
|
193
|
+
except (KeyError, TypeError):
|
|
194
|
+
pass
|
|
195
|
+
|
|
196
|
+
if not image_data:
|
|
197
|
+
return jsonify({
|
|
198
|
+
'error': 'Gemini did not return an image',
|
|
199
|
+
'raw': result.get('candidates', [{}])[0].get('content', {}).get('parts', []),
|
|
200
|
+
}), 502
|
|
201
|
+
|
|
202
|
+
# Determine extension
|
|
203
|
+
ext = '.png'
|
|
204
|
+
if 'jpeg' in mime_type:
|
|
205
|
+
ext = '.jpg'
|
|
206
|
+
elif 'webp' in mime_type:
|
|
207
|
+
ext = '.webp'
|
|
208
|
+
|
|
209
|
+
# Save to server immediately (NEVER lose generated content)
|
|
210
|
+
out_dir = _ensure_generated_dir()
|
|
211
|
+
filename = f'{safe_name}{ext}'
|
|
212
|
+
out_path = out_dir / filename
|
|
213
|
+
|
|
214
|
+
# Don't overwrite — add timestamp suffix
|
|
215
|
+
if out_path.exists():
|
|
216
|
+
filename = f'{safe_name}-{int(time.time())}{ext}'
|
|
217
|
+
out_path = out_dir / filename
|
|
218
|
+
|
|
219
|
+
out_path.write_bytes(image_data)
|
|
220
|
+
|
|
221
|
+
# Save metadata alongside
|
|
222
|
+
meta_path = out_dir / f'{filename}.meta.json'
|
|
223
|
+
meta_path.write_text(json.dumps({
|
|
224
|
+
'prompt': user_prompt,
|
|
225
|
+
'full_prompt': full_prompt,
|
|
226
|
+
'style': style_instruction,
|
|
227
|
+
'generated_at': time.strftime('%Y-%m-%d %H:%M:%S'),
|
|
228
|
+
'size': len(image_data),
|
|
229
|
+
'mime': mime_type,
|
|
230
|
+
}, indent=2))
|
|
231
|
+
|
|
232
|
+
url = f'/api/icons/generated/{filename}'
|
|
233
|
+
|
|
234
|
+
return jsonify({
|
|
235
|
+
'url': url,
|
|
236
|
+
'name': safe_name,
|
|
237
|
+
'filename': filename,
|
|
238
|
+
'prompt': user_prompt,
|
|
239
|
+
'size': len(image_data),
|
|
240
|
+
})
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
# ══════════════════════════════════════════════════════════════
|
|
244
|
+
# GENERATED ICONS — LIST & SERVE
|
|
245
|
+
# ══════════════════════════════════════════════════════════════
|
|
246
|
+
|
|
247
|
+
@icons_bp.route('/api/icons/generated')
|
|
248
|
+
def list_generated():
|
|
249
|
+
"""List user's generated icons."""
|
|
250
|
+
out_dir = _ensure_generated_dir()
|
|
251
|
+
icons = []
|
|
252
|
+
for p in sorted(out_dir.iterdir(), key=lambda x: x.stat().st_mtime, reverse=True):
|
|
253
|
+
if p.suffix in ('.png', '.jpg', '.jpeg', '.webp') and not p.name.endswith('.meta.json'):
|
|
254
|
+
meta = {}
|
|
255
|
+
meta_path = out_dir / f'{p.name}.meta.json'
|
|
256
|
+
if meta_path.exists():
|
|
257
|
+
try:
|
|
258
|
+
meta = json.loads(meta_path.read_text())
|
|
259
|
+
except Exception:
|
|
260
|
+
pass
|
|
261
|
+
icons.append({
|
|
262
|
+
'name': p.stem,
|
|
263
|
+
'filename': p.name,
|
|
264
|
+
'url': f'/api/icons/generated/{p.name}',
|
|
265
|
+
'size': p.stat().st_size,
|
|
266
|
+
'prompt': meta.get('prompt', ''),
|
|
267
|
+
'generated_at': meta.get('generated_at', ''),
|
|
268
|
+
})
|
|
269
|
+
return jsonify({'count': len(icons), 'icons': icons})
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
@icons_bp.route('/api/icons/generated/<filename>')
|
|
273
|
+
def serve_generated(filename):
|
|
274
|
+
"""Serve a generated icon."""
|
|
275
|
+
safe = re.sub(r'[^\w.\-]', '', filename)
|
|
276
|
+
path = _ensure_generated_dir() / safe
|
|
277
|
+
if not path.exists():
|
|
278
|
+
return jsonify({'error': 'Not found'}), 404
|
|
279
|
+
return send_file(str(path), max_age=3600)
|