@agentunion/kite 1.0.7 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/core/event_hub/entry.py +305 -26
- package/core/event_hub/hub.py +8 -0
- package/core/event_hub/server.py +80 -17
- package/core/kite_log.py +241 -0
- package/core/launcher/entry.py +978 -284
- package/core/launcher/process_manager.py +456 -46
- package/core/registry/entry.py +272 -3
- package/core/registry/server.py +339 -289
- package/core/registry/store.py +10 -4
- package/extensions/agents/__init__.py +1 -0
- package/extensions/agents/assistant/__init__.py +1 -0
- package/extensions/agents/assistant/entry.py +380 -0
- package/extensions/agents/assistant/module.md +22 -0
- package/extensions/agents/assistant/server.py +236 -0
- package/extensions/channels/__init__.py +1 -0
- package/extensions/channels/acp_channel/__init__.py +1 -0
- package/extensions/channels/acp_channel/entry.py +380 -0
- package/extensions/channels/acp_channel/module.md +22 -0
- package/extensions/channels/acp_channel/server.py +236 -0
- package/extensions/event_hub_bench/entry.py +664 -379
- package/extensions/event_hub_bench/module.md +2 -1
- package/extensions/services/backup/__init__.py +1 -0
- package/extensions/services/backup/entry.py +380 -0
- package/extensions/services/backup/module.md +22 -0
- package/extensions/services/backup/server.py +244 -0
- package/extensions/services/model_service/__init__.py +1 -0
- package/extensions/services/model_service/entry.py +380 -0
- package/extensions/services/model_service/module.md +22 -0
- package/extensions/services/model_service/server.py +236 -0
- package/extensions/services/watchdog/entry.py +460 -147
- package/extensions/services/watchdog/module.md +3 -0
- package/extensions/services/watchdog/monitor.py +128 -13
- package/extensions/services/watchdog/server.py +75 -13
- package/extensions/services/web/__init__.py +1 -0
- package/extensions/services/web/config.yaml +149 -0
- package/extensions/services/web/entry.py +487 -0
- package/extensions/services/web/module.md +24 -0
- package/extensions/services/web/routes/__init__.py +1 -0
- package/extensions/services/web/routes/routes_call.py +189 -0
- package/extensions/services/web/routes/routes_config.py +512 -0
- package/extensions/services/web/routes/routes_contacts.py +98 -0
- package/extensions/services/web/routes/routes_devlog.py +99 -0
- package/extensions/services/web/routes/routes_phone.py +81 -0
- package/extensions/services/web/routes/routes_sms.py +48 -0
- package/extensions/services/web/routes/routes_stats.py +17 -0
- package/extensions/services/web/routes/routes_voicechat.py +554 -0
- package/extensions/services/web/routes/schemas.py +216 -0
- package/extensions/services/web/server.py +332 -0
- package/extensions/services/web/static/css/style.css +1064 -0
- package/extensions/services/web/static/index.html +1445 -0
- package/extensions/services/web/static/js/app.js +4671 -0
- package/extensions/services/web/vendor/__init__.py +1 -0
- package/extensions/services/web/vendor/bluetooth/__init__.py +0 -0
- package/extensions/services/web/vendor/bluetooth/audio.py +348 -0
- package/extensions/services/web/vendor/bluetooth/contacts.py +251 -0
- package/extensions/services/web/vendor/bluetooth/manager.py +395 -0
- package/extensions/services/web/vendor/bluetooth/sms.py +290 -0
- package/extensions/services/web/vendor/bluetooth/telephony.py +274 -0
- package/extensions/services/web/vendor/config.py +139 -0
- package/extensions/services/web/vendor/conversation/__init__.py +0 -0
- package/extensions/services/web/vendor/conversation/asr.py +936 -0
- package/extensions/services/web/vendor/conversation/engine.py +548 -0
- package/extensions/services/web/vendor/conversation/llm.py +534 -0
- package/extensions/services/web/vendor/conversation/mcp_tools.py +190 -0
- package/extensions/services/web/vendor/conversation/tts.py +322 -0
- package/extensions/services/web/vendor/conversation/vad.py +138 -0
- package/extensions/services/web/vendor/storage/__init__.py +1 -0
- package/extensions/services/web/vendor/storage/identity.py +312 -0
- package/extensions/services/web/vendor/storage/store.py +507 -0
- package/extensions/services/web/vendor/task/__init__.py +0 -0
- package/extensions/services/web/vendor/task/manager.py +864 -0
- package/extensions/services/web/vendor/task/models.py +45 -0
- package/extensions/services/web/vendor/task/webhook.py +263 -0
- package/extensions/services/web/vendor/tools/__init__.py +0 -0
- package/extensions/services/web/vendor/tools/registry.py +321 -0
- package/main.py +230 -90
- package/package.json +1 -1
|
@@ -0,0 +1,512 @@
|
|
|
1
|
+
"""Routes for configuration and LLM model listing."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import copy
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
import httpx
|
|
12
|
+
from fastapi import APIRouter, HTTPException, Query, WebSocket, WebSocketDisconnect
|
|
13
|
+
from pydantic import BaseModel
|
|
14
|
+
|
|
15
|
+
from vendor import config as cfg
|
|
16
|
+
from routes.schemas import ConfigUpdate
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
router = APIRouter(tags=["config"])
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# ---------------------------------------------------------------------------
|
|
24
|
+
# Helpers
|
|
25
|
+
# ---------------------------------------------------------------------------
|
|
26
|
+
|
|
27
|
+
def _mask_keys(obj: Any, depth: int = 0) -> Any:
|
|
28
|
+
"""Recursively mask values whose key looks like a secret."""
|
|
29
|
+
if isinstance(obj, dict):
|
|
30
|
+
masked: dict[str, Any] = {}
|
|
31
|
+
for k, v in obj.items():
|
|
32
|
+
if any(s in k.lower() for s in ("api_key", "secret", "password", "token")):
|
|
33
|
+
if isinstance(v, str) and len(v) > 4:
|
|
34
|
+
masked[k] = "****" + v[-4:]
|
|
35
|
+
elif isinstance(v, str) and v:
|
|
36
|
+
masked[k] = "****"
|
|
37
|
+
else:
|
|
38
|
+
masked[k] = v
|
|
39
|
+
else:
|
|
40
|
+
masked[k] = _mask_keys(v, depth + 1)
|
|
41
|
+
return masked
|
|
42
|
+
if isinstance(obj, list):
|
|
43
|
+
return [_mask_keys(item, depth + 1) for item in obj]
|
|
44
|
+
return obj
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# ---------------------------------------------------------------------------
|
|
48
|
+
# Config endpoints
|
|
49
|
+
# ---------------------------------------------------------------------------
|
|
50
|
+
|
|
51
|
+
@router.get("/config")
|
|
52
|
+
async def get_config():
|
|
53
|
+
"""Return the current configuration (keys shown in plain text)."""
|
|
54
|
+
return copy.deepcopy(cfg.get_config())
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@router.put("/config")
|
|
58
|
+
async def update_config(updates: ConfigUpdate):
|
|
59
|
+
"""Merge updates into the runtime configuration and persist to disk."""
|
|
60
|
+
update_data = updates.model_dump(exclude_none=True)
|
|
61
|
+
if not update_data:
|
|
62
|
+
raise HTTPException(status_code=400, detail="No updates provided")
|
|
63
|
+
try:
|
|
64
|
+
new_config = cfg.update_config(update_data)
|
|
65
|
+
return copy.deepcopy(new_config)
|
|
66
|
+
except Exception as exc:
|
|
67
|
+
logger.exception("Failed to update config")
|
|
68
|
+
raise HTTPException(status_code=500, detail=str(exc))
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# ---------------------------------------------------------------------------
|
|
72
|
+
# Model listing
|
|
73
|
+
# ---------------------------------------------------------------------------
|
|
74
|
+
|
|
75
|
+
@router.get("/models")
|
|
76
|
+
async def list_models(provider: str = Query(..., description="LLM provider: openai, claude, gemini")):
|
|
77
|
+
"""Query a provider's API for available models and return ``[{id, name}]``."""
|
|
78
|
+
provider = provider.lower()
|
|
79
|
+
provider_cfg = cfg.get(f"llm.providers.{provider}")
|
|
80
|
+
if not provider_cfg:
|
|
81
|
+
raise HTTPException(status_code=400, detail=f"Unknown provider: {provider}")
|
|
82
|
+
|
|
83
|
+
base_url: str = provider_cfg.get("base_url", "")
|
|
84
|
+
api_key: str = provider_cfg.get("api_key", "")
|
|
85
|
+
|
|
86
|
+
if not api_key:
|
|
87
|
+
raise HTTPException(status_code=400, detail=f"API key not configured for {provider}")
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
async with httpx.AsyncClient(timeout=30) as client:
|
|
91
|
+
if provider == "openai":
|
|
92
|
+
return await _fetch_openai_models(client, base_url, api_key)
|
|
93
|
+
elif provider == "claude":
|
|
94
|
+
return await _fetch_claude_models(client, base_url, api_key)
|
|
95
|
+
elif provider == "gemini":
|
|
96
|
+
return await _fetch_gemini_models(client, base_url, api_key)
|
|
97
|
+
else:
|
|
98
|
+
raise HTTPException(status_code=400, detail=f"Unsupported provider: {provider}")
|
|
99
|
+
except httpx.HTTPError as exc:
|
|
100
|
+
logger.exception("Failed to fetch models from %s", provider)
|
|
101
|
+
raise HTTPException(status_code=502, detail=f"Failed to fetch models from {provider}: {exc}")
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
async def _fetch_openai_models(
|
|
105
|
+
client: httpx.AsyncClient, base_url: str, api_key: str
|
|
106
|
+
) -> list[dict[str, str]]:
|
|
107
|
+
resp = await client.get(
|
|
108
|
+
f"{base_url.rstrip('/')}/models",
|
|
109
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
110
|
+
)
|
|
111
|
+
resp.raise_for_status()
|
|
112
|
+
data = resp.json()
|
|
113
|
+
models: list[dict[str, str]] = []
|
|
114
|
+
for m in data.get("data", []):
|
|
115
|
+
model_id: str = m.get("id", "")
|
|
116
|
+
if model_id:
|
|
117
|
+
models.append({"id": model_id, "name": model_id})
|
|
118
|
+
models.sort(key=lambda x: x["id"])
|
|
119
|
+
return models
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
async def _fetch_claude_models(
|
|
123
|
+
client: httpx.AsyncClient, base_url: str, api_key: str
|
|
124
|
+
) -> list[dict[str, str]]:
|
|
125
|
+
resp = await client.get(
|
|
126
|
+
f"{base_url.rstrip('/')}/models",
|
|
127
|
+
headers={
|
|
128
|
+
"x-api-key": api_key,
|
|
129
|
+
"anthropic-version": "2023-06-01",
|
|
130
|
+
},
|
|
131
|
+
)
|
|
132
|
+
resp.raise_for_status()
|
|
133
|
+
data = resp.json()
|
|
134
|
+
models: list[dict[str, str]] = []
|
|
135
|
+
for m in data.get("data", []):
|
|
136
|
+
model_id = m.get("id", "")
|
|
137
|
+
display_name = m.get("display_name", model_id)
|
|
138
|
+
models.append({"id": model_id, "name": display_name})
|
|
139
|
+
models.sort(key=lambda x: x["id"])
|
|
140
|
+
return models
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
async def _fetch_gemini_models(
|
|
144
|
+
client: httpx.AsyncClient, base_url: str, api_key: str
|
|
145
|
+
) -> list[dict[str, str]]:
|
|
146
|
+
resp = await client.get(
|
|
147
|
+
f"{base_url.rstrip('/')}/models",
|
|
148
|
+
params={"key": api_key},
|
|
149
|
+
)
|
|
150
|
+
resp.raise_for_status()
|
|
151
|
+
data = resp.json()
|
|
152
|
+
models: list[dict[str, str]] = []
|
|
153
|
+
for m in data.get("models", []):
|
|
154
|
+
model_name: str = m.get("name", "")
|
|
155
|
+
display_name: str = m.get("displayName", model_name)
|
|
156
|
+
model_id = model_name.removeprefix("models/")
|
|
157
|
+
models.append({"id": model_id, "name": display_name})
|
|
158
|
+
models.sort(key=lambda x: x["id"])
|
|
159
|
+
return models
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
# ---------------------------------------------------------------------------
|
|
163
|
+
# Chat test
|
|
164
|
+
# ---------------------------------------------------------------------------
|
|
165
|
+
|
|
166
|
+
class ChatRequest(BaseModel):
|
|
167
|
+
provider: str = "openai"
|
|
168
|
+
base_url: str = ""
|
|
169
|
+
api_key: str = ""
|
|
170
|
+
model: str = ""
|
|
171
|
+
temperature: float = 0.7
|
|
172
|
+
max_tokens: int = 1024
|
|
173
|
+
messages: list[dict[str, Any]] = []
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
@router.post("/chat")
|
|
177
|
+
async def chat_completion(req: ChatRequest):
|
|
178
|
+
"""Send messages to an LLM and return the response. Used for model testing."""
|
|
179
|
+
if not req.api_key:
|
|
180
|
+
raise HTTPException(status_code=400, detail="API key is required")
|
|
181
|
+
if not req.messages:
|
|
182
|
+
raise HTTPException(status_code=400, detail="Messages cannot be empty")
|
|
183
|
+
|
|
184
|
+
provider = req.provider.lower()
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
from conversation.llm import OpenAILLM, ClaudeLLM, GeminiLLM
|
|
188
|
+
|
|
189
|
+
if provider == "openai":
|
|
190
|
+
llm = OpenAILLM(
|
|
191
|
+
base_url=req.base_url or "https://api.openai.com/v1",
|
|
192
|
+
api_key=req.api_key,
|
|
193
|
+
model=req.model or "gpt-4o",
|
|
194
|
+
temperature=req.temperature,
|
|
195
|
+
max_tokens=req.max_tokens,
|
|
196
|
+
)
|
|
197
|
+
elif provider == "claude":
|
|
198
|
+
llm = ClaudeLLM(
|
|
199
|
+
base_url=req.base_url or "https://api.anthropic.com/v1",
|
|
200
|
+
api_key=req.api_key,
|
|
201
|
+
model=req.model or "claude-sonnet-4-20250514",
|
|
202
|
+
temperature=req.temperature,
|
|
203
|
+
max_tokens=req.max_tokens,
|
|
204
|
+
)
|
|
205
|
+
elif provider == "gemini":
|
|
206
|
+
llm = GeminiLLM(
|
|
207
|
+
base_url=req.base_url or "https://generativelanguage.googleapis.com/v1beta",
|
|
208
|
+
api_key=req.api_key,
|
|
209
|
+
model=req.model or "gemini-2.0-flash",
|
|
210
|
+
temperature=req.temperature,
|
|
211
|
+
max_tokens=req.max_tokens,
|
|
212
|
+
)
|
|
213
|
+
else:
|
|
214
|
+
raise HTTPException(status_code=400, detail=f"Unsupported provider: {provider}")
|
|
215
|
+
|
|
216
|
+
result = await llm.generate(req.messages)
|
|
217
|
+
return {"content": result.get("content", "")}
|
|
218
|
+
|
|
219
|
+
except HTTPException:
|
|
220
|
+
raise
|
|
221
|
+
except Exception as exc:
|
|
222
|
+
logger.exception("Chat completion failed")
|
|
223
|
+
raise HTTPException(status_code=502, detail=str(exc))
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
# ---------------------------------------------------------------------------
|
|
227
|
+
# ASR real-time test via WebSocket
|
|
228
|
+
# ---------------------------------------------------------------------------
|
|
229
|
+
|
|
230
|
+
@router.websocket("/ws/asr-test")
|
|
231
|
+
async def asr_test_websocket(ws: WebSocket):
|
|
232
|
+
"""WebSocket endpoint for real-time ASR testing."""
|
|
233
|
+
from conversation.asr import create_asr_provider
|
|
234
|
+
|
|
235
|
+
await ws.accept()
|
|
236
|
+
|
|
237
|
+
asr = None
|
|
238
|
+
stats = {"audio_bytes": 0, "audio_chunks": 0, "asr_packets_sent": 0, "asr_responses": 0}
|
|
239
|
+
|
|
240
|
+
try:
|
|
241
|
+
test_provider = ""
|
|
242
|
+
test_resource_id = ""
|
|
243
|
+
test_sample_rate = 0
|
|
244
|
+
first_msg = await asyncio.wait_for(ws.receive(), timeout=5.0)
|
|
245
|
+
if "text" in first_msg and first_msg["text"]:
|
|
246
|
+
try:
|
|
247
|
+
config_data = json.loads(first_msg["text"])
|
|
248
|
+
if config_data.get("type") == "config":
|
|
249
|
+
test_provider = config_data.get("provider", "")
|
|
250
|
+
test_resource_id = config_data.get("resource_id", "")
|
|
251
|
+
test_sample_rate = int(config_data.get("sample_rate", 0))
|
|
252
|
+
logger.info("ASR test config: provider=%s, resource_id=%s, sample_rate=%s",
|
|
253
|
+
test_provider, test_resource_id, test_sample_rate)
|
|
254
|
+
except (json.JSONDecodeError, ValueError):
|
|
255
|
+
pass
|
|
256
|
+
|
|
257
|
+
asr = create_asr_provider(
|
|
258
|
+
provider_override=test_provider or None,
|
|
259
|
+
resource_id_override=test_resource_id or None,
|
|
260
|
+
sample_rate_override=test_sample_rate or None,
|
|
261
|
+
)
|
|
262
|
+
actual_provider = test_provider or cfg.get("asr.provider", "unknown")
|
|
263
|
+
language = cfg.get("asr.whisper.language", "zh")
|
|
264
|
+
|
|
265
|
+
max_retries = 3
|
|
266
|
+
last_err = None
|
|
267
|
+
for attempt in range(1, max_retries + 1):
|
|
268
|
+
try:
|
|
269
|
+
await asr.start_stream(language)
|
|
270
|
+
last_err = None
|
|
271
|
+
break
|
|
272
|
+
except Exception as exc:
|
|
273
|
+
last_err = exc
|
|
274
|
+
logger.warning("ASR connect attempt %d/%d failed: %s", attempt, max_retries, exc)
|
|
275
|
+
if attempt < max_retries:
|
|
276
|
+
await asyncio.sleep(0.5 * attempt)
|
|
277
|
+
asr = create_asr_provider(
|
|
278
|
+
provider_override=test_provider or None,
|
|
279
|
+
resource_id_override=test_resource_id or None,
|
|
280
|
+
sample_rate_override=test_sample_rate or None,
|
|
281
|
+
)
|
|
282
|
+
if last_err:
|
|
283
|
+
await ws.send_json({"type": "error", "message": str(last_err)})
|
|
284
|
+
return
|
|
285
|
+
|
|
286
|
+
actual_sr = getattr(asr, "SAMPLE_RATE", 16000)
|
|
287
|
+
|
|
288
|
+
await ws.send_json({
|
|
289
|
+
"type": "ready",
|
|
290
|
+
"provider": actual_provider,
|
|
291
|
+
"sample_rate": actual_sr,
|
|
292
|
+
"resource_id": getattr(asr, "resource_id", ""),
|
|
293
|
+
})
|
|
294
|
+
|
|
295
|
+
last_interim = ""
|
|
296
|
+
last_stats_time = asyncio.get_event_loop().time()
|
|
297
|
+
|
|
298
|
+
while True:
|
|
299
|
+
message = await ws.receive()
|
|
300
|
+
|
|
301
|
+
if message["type"] == "websocket.disconnect":
|
|
302
|
+
break
|
|
303
|
+
|
|
304
|
+
if "bytes" in message and message["bytes"]:
|
|
305
|
+
chunk = message["bytes"]
|
|
306
|
+
stats["audio_bytes"] += len(chunk)
|
|
307
|
+
stats["audio_chunks"] += 1
|
|
308
|
+
|
|
309
|
+
prev_buf_len = len(getattr(asr, "_buffer", b""))
|
|
310
|
+
await asr.feed_audio(chunk)
|
|
311
|
+
new_buf_len = len(getattr(asr, "_buffer", b""))
|
|
312
|
+
if new_buf_len < prev_buf_len + len(chunk):
|
|
313
|
+
bytes_consumed = prev_buf_len + len(chunk) - new_buf_len
|
|
314
|
+
frame_size = getattr(asr, "BYTES_PER_FRAME", 6400)
|
|
315
|
+
packets = bytes_consumed // frame_size if frame_size else 0
|
|
316
|
+
stats["asr_packets_sent"] += packets
|
|
317
|
+
|
|
318
|
+
interim = await asr.get_interim_result()
|
|
319
|
+
if interim and interim != last_interim:
|
|
320
|
+
last_interim = interim
|
|
321
|
+
await ws.send_json({"type": "interim", "text": interim})
|
|
322
|
+
|
|
323
|
+
now = asyncio.get_event_loop().time()
|
|
324
|
+
if now - last_stats_time >= 0.5:
|
|
325
|
+
last_stats_time = now
|
|
326
|
+
duration_s = stats["audio_bytes"] / (actual_sr * 2)
|
|
327
|
+
await ws.send_json({
|
|
328
|
+
"type": "stats",
|
|
329
|
+
"audio_bytes": stats["audio_bytes"],
|
|
330
|
+
"audio_chunks": stats["audio_chunks"],
|
|
331
|
+
"audio_duration": round(duration_s, 1),
|
|
332
|
+
"asr_packets_sent": stats["asr_packets_sent"],
|
|
333
|
+
"has_interim": bool(interim),
|
|
334
|
+
})
|
|
335
|
+
|
|
336
|
+
elif "text" in message and message["text"]:
|
|
337
|
+
try:
|
|
338
|
+
data = json.loads(message["text"])
|
|
339
|
+
except json.JSONDecodeError:
|
|
340
|
+
continue
|
|
341
|
+
|
|
342
|
+
if data.get("type") == "stop":
|
|
343
|
+
duration_s = stats["audio_bytes"] / (actual_sr * 2)
|
|
344
|
+
await ws.send_json({
|
|
345
|
+
"type": "stats",
|
|
346
|
+
"audio_bytes": stats["audio_bytes"],
|
|
347
|
+
"audio_chunks": stats["audio_chunks"],
|
|
348
|
+
"audio_duration": round(duration_s, 1),
|
|
349
|
+
"asr_packets_sent": stats["asr_packets_sent"],
|
|
350
|
+
"has_interim": bool(last_interim),
|
|
351
|
+
"getting_final": True,
|
|
352
|
+
})
|
|
353
|
+
final_text = await asr.get_result()
|
|
354
|
+
await ws.send_json({"type": "final", "text": final_text})
|
|
355
|
+
break
|
|
356
|
+
|
|
357
|
+
except WebSocketDisconnect:
|
|
358
|
+
logger.info("ASR test WebSocket disconnected")
|
|
359
|
+
except Exception as exc:
|
|
360
|
+
logger.exception("ASR test WebSocket error")
|
|
361
|
+
try:
|
|
362
|
+
await ws.send_json({"type": "error", "message": str(exc)})
|
|
363
|
+
except Exception:
|
|
364
|
+
pass
|
|
365
|
+
finally:
|
|
366
|
+
if asr:
|
|
367
|
+
try:
|
|
368
|
+
await asr.stop_stream()
|
|
369
|
+
except Exception:
|
|
370
|
+
pass
|
|
371
|
+
try:
|
|
372
|
+
await ws.close()
|
|
373
|
+
except Exception:
|
|
374
|
+
pass
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
# ---------------------------------------------------------------------------
|
|
378
|
+
# Audio diagnostic log upload
|
|
379
|
+
# ---------------------------------------------------------------------------
|
|
380
|
+
|
|
381
|
+
class AudioDiagLog(BaseModel):
|
|
382
|
+
log: str = ""
|
|
383
|
+
timestamp: str = ""
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
@router.post("/audio-diag")
|
|
387
|
+
async def upload_audio_diag(data: AudioDiagLog):
|
|
388
|
+
"""Receive browser audio diagnostic logs."""
|
|
389
|
+
import os
|
|
390
|
+
log_dir = str(cfg.data_dir())
|
|
391
|
+
os.makedirs(log_dir, exist_ok=True)
|
|
392
|
+
log_path = os.path.join(log_dir, "audio_diag.log")
|
|
393
|
+
with open(log_path, "w", encoding="utf-8") as f:
|
|
394
|
+
f.write(f"=== Audio Diagnostic Log ===\n")
|
|
395
|
+
f.write(f"Timestamp: {data.timestamp}\n\n")
|
|
396
|
+
f.write(data.log)
|
|
397
|
+
logger.info("Audio diagnostic log saved to %s (%d bytes)", log_path, len(data.log))
|
|
398
|
+
return {"status": "ok", "path": log_path}
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
# ---------------------------------------------------------------------------
|
|
402
|
+
# TTS diagnostic log
|
|
403
|
+
# ---------------------------------------------------------------------------
|
|
404
|
+
|
|
405
|
+
class TTSDiagLog(BaseModel):
|
|
406
|
+
log: str = ""
|
|
407
|
+
provider: str = ""
|
|
408
|
+
error: str = ""
|
|
409
|
+
timestamp: str = ""
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
@router.post("/tts-diag")
|
|
413
|
+
async def upload_tts_diag(data: TTSDiagLog):
|
|
414
|
+
"""Receive TTS test diagnostic logs from the browser."""
|
|
415
|
+
import os
|
|
416
|
+
log_dir = str(cfg.data_dir())
|
|
417
|
+
os.makedirs(log_dir, exist_ok=True)
|
|
418
|
+
log_path = os.path.join(log_dir, "tts_diag.log")
|
|
419
|
+
with open(log_path, "a", encoding="utf-8") as f:
|
|
420
|
+
f.write(f"\n=== TTS Diag [{data.timestamp}] provider={data.provider} ===\n")
|
|
421
|
+
if data.error:
|
|
422
|
+
f.write(f"Error: {data.error}\n")
|
|
423
|
+
f.write(data.log + "\n")
|
|
424
|
+
logger.info("TTS diag log appended to %s (provider=%s, error=%s)",
|
|
425
|
+
log_path, data.provider, data.error[:80] if data.error else "")
|
|
426
|
+
return {"status": "ok", "path": log_path}
|
|
427
|
+
|
|
428
|
+
class TTSTestRequest(BaseModel):
|
|
429
|
+
provider: str = "edge-tts"
|
|
430
|
+
voice: str = ""
|
|
431
|
+
speed: float = 1.0
|
|
432
|
+
volume: float = 1.0
|
|
433
|
+
text: str = ""
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
@router.post("/tts-test")
|
|
437
|
+
async def tts_test(req: TTSTestRequest):
|
|
438
|
+
"""Synthesize text and return base64-encoded audio for playback testing."""
|
|
439
|
+
if not req.text or not req.text.strip():
|
|
440
|
+
raise HTTPException(status_code=400, detail="Text cannot be empty")
|
|
441
|
+
|
|
442
|
+
from conversation.tts import create_tts_provider
|
|
443
|
+
|
|
444
|
+
overrides: dict[str, Any] = {}
|
|
445
|
+
provider = req.provider.lower()
|
|
446
|
+
|
|
447
|
+
if provider == "volcengine":
|
|
448
|
+
app_id = cfg.get("tts.volcengine.app_id") or cfg.get("asr.volcengine.app_id", "")
|
|
449
|
+
token = cfg.get("tts.volcengine.access_token") or cfg.get("asr.volcengine.access_token", "")
|
|
450
|
+
if not app_id or not token:
|
|
451
|
+
raise HTTPException(
|
|
452
|
+
status_code=400,
|
|
453
|
+
detail=f"火山引擎 TTS 缺少认证配置 (app_id={'有' if app_id else '空'}, access_token={'有' if token else '空'}). "
|
|
454
|
+
f"请在 ASR 或 TTS 配置中填写 app_id 和 access_token.",
|
|
455
|
+
)
|
|
456
|
+
if req.voice:
|
|
457
|
+
overrides["voice_type"] = req.voice
|
|
458
|
+
overrides["speed_ratio"] = req.speed
|
|
459
|
+
overrides["volume_ratio"] = req.volume
|
|
460
|
+
|
|
461
|
+
elif provider == "tencent":
|
|
462
|
+
sid = cfg.get("tts.tencent.secret_id") or cfg.get("asr.tencent.secret_id", "")
|
|
463
|
+
skey = cfg.get("tts.tencent.secret_key") or cfg.get("asr.tencent.secret_key", "")
|
|
464
|
+
if not sid or not skey:
|
|
465
|
+
raise HTTPException(
|
|
466
|
+
status_code=400,
|
|
467
|
+
detail=f"腾讯云 TTS 缺少认证配置 (secret_id={'有' if sid else '空'}, secret_key={'有' if skey else '空'}). "
|
|
468
|
+
f"请在 ASR 或 TTS 配置中填写 secret_id 和 secret_key.",
|
|
469
|
+
)
|
|
470
|
+
if req.voice:
|
|
471
|
+
overrides["voice_type"] = int(req.voice)
|
|
472
|
+
overrides["speed"] = req.speed
|
|
473
|
+
overrides["volume"] = req.volume
|
|
474
|
+
|
|
475
|
+
elif provider == "edge-tts":
|
|
476
|
+
if req.voice:
|
|
477
|
+
overrides["voice"] = req.voice
|
|
478
|
+
pct = int((req.speed - 1.0) * 100)
|
|
479
|
+
overrides["rate"] = f"{pct:+d}%"
|
|
480
|
+
vol_pct = int((req.volume - 1.0) * 100)
|
|
481
|
+
overrides["volume"] = f"{vol_pct:+d}%"
|
|
482
|
+
|
|
483
|
+
logger.info("TTS test: provider=%s, voice=%s, speed=%s, overrides=%s",
|
|
484
|
+
provider, req.voice, req.speed, list(overrides.keys()))
|
|
485
|
+
|
|
486
|
+
try:
|
|
487
|
+
tts = create_tts_provider(provider=provider, **overrides)
|
|
488
|
+
audio_data = await tts.synthesize(req.text)
|
|
489
|
+
if not audio_data:
|
|
490
|
+
raise HTTPException(status_code=500, detail="TTS returned empty audio")
|
|
491
|
+
|
|
492
|
+
import base64 as b64
|
|
493
|
+
audio_b64 = b64.b64encode(audio_data).decode()
|
|
494
|
+
|
|
495
|
+
content_type = "audio/mpeg"
|
|
496
|
+
if provider == "tencent":
|
|
497
|
+
codec = cfg.get("tts.tencent.codec", "pcm")
|
|
498
|
+
if codec == "pcm":
|
|
499
|
+
content_type = "audio/pcm"
|
|
500
|
+
|
|
501
|
+
return {
|
|
502
|
+
"audio": audio_b64,
|
|
503
|
+
"content_type": content_type,
|
|
504
|
+
"size": len(audio_data),
|
|
505
|
+
}
|
|
506
|
+
except HTTPException:
|
|
507
|
+
raise
|
|
508
|
+
except Exception as exc:
|
|
509
|
+
logger.exception("TTS test failed for provider=%s", provider)
|
|
510
|
+
detail = str(exc)
|
|
511
|
+
exc_type = type(exc).__name__
|
|
512
|
+
raise HTTPException(status_code=502, detail=f"[{exc_type}] {detail}")
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
"""Routes for contact management."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
|
|
7
|
+
from fastapi import APIRouter, HTTPException, Query, Request
|
|
8
|
+
|
|
9
|
+
from routes.schemas import (
|
|
10
|
+
ContactCreate,
|
|
11
|
+
ContactRecord,
|
|
12
|
+
ContactUpdate,
|
|
13
|
+
PaginatedResponse,
|
|
14
|
+
)
|
|
15
|
+
from vendor.storage import store
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
router = APIRouter(tags=["contacts"])
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@router.get("/contacts", response_model=PaginatedResponse)
|
|
23
|
+
async def list_contacts(
|
|
24
|
+
page: int = Query(1, ge=1),
|
|
25
|
+
page_size: int = Query(50, ge=1, le=200),
|
|
26
|
+
query: str | None = Query(None),
|
|
27
|
+
):
|
|
28
|
+
"""List contacts with optional search and pagination."""
|
|
29
|
+
items, total = await store.list_contacts(page=page, page_size=page_size, query=query)
|
|
30
|
+
return PaginatedResponse(items=items, total=total, page=page, page_size=page_size)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@router.post("/contacts", response_model=ContactRecord)
|
|
34
|
+
async def add_contact(contact: ContactCreate):
|
|
35
|
+
"""Add a new contact."""
|
|
36
|
+
data = contact.model_dump(exclude_none=True)
|
|
37
|
+
record = await store.add_contact(data)
|
|
38
|
+
return ContactRecord(**record)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@router.get("/contacts/{contact_id}", response_model=ContactRecord)
|
|
42
|
+
async def get_contact(contact_id: str):
|
|
43
|
+
"""Retrieve a single contact by ID."""
|
|
44
|
+
record = await store.get_contact(contact_id)
|
|
45
|
+
if record is None:
|
|
46
|
+
raise HTTPException(status_code=404, detail="Contact not found")
|
|
47
|
+
return ContactRecord(**record)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@router.put("/contacts/{contact_id}", response_model=ContactRecord)
|
|
51
|
+
async def update_contact(contact_id: str, updates: ContactUpdate):
|
|
52
|
+
"""Update an existing contact."""
|
|
53
|
+
existing = await store.get_contact(contact_id)
|
|
54
|
+
if existing is None:
|
|
55
|
+
raise HTTPException(status_code=404, detail="Contact not found")
|
|
56
|
+
|
|
57
|
+
update_data = updates.model_dump(exclude_none=True)
|
|
58
|
+
if not update_data:
|
|
59
|
+
raise HTTPException(status_code=400, detail="No fields to update")
|
|
60
|
+
|
|
61
|
+
success = await store.update_contact(contact_id, update_data)
|
|
62
|
+
if not success:
|
|
63
|
+
raise HTTPException(status_code=500, detail="Failed to update contact")
|
|
64
|
+
|
|
65
|
+
updated = await store.get_contact(contact_id)
|
|
66
|
+
return ContactRecord(**updated)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@router.delete("/contacts/{contact_id}")
|
|
70
|
+
async def delete_contact(contact_id: str):
|
|
71
|
+
"""Delete a contact by ID."""
|
|
72
|
+
success = await store.delete_contact(contact_id)
|
|
73
|
+
if not success:
|
|
74
|
+
raise HTTPException(status_code=404, detail="Contact not found")
|
|
75
|
+
return {"status": "deleted", "contact_id": contact_id}
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@router.post("/contacts/sync")
|
|
79
|
+
async def sync_contacts(request: Request):
|
|
80
|
+
"""Sync contacts from the connected phone via Bluetooth."""
|
|
81
|
+
bt = request.app.state.bt_manager
|
|
82
|
+
try:
|
|
83
|
+
contacts = await bt.sync_contacts()
|
|
84
|
+
added = 0
|
|
85
|
+
updated = 0
|
|
86
|
+
for c in contacts:
|
|
87
|
+
existing = await store.find_contact_by_phone(c.get("phone", ""))
|
|
88
|
+
if existing:
|
|
89
|
+
await store.update_contact(existing["id"], c)
|
|
90
|
+
updated += 1
|
|
91
|
+
else:
|
|
92
|
+
c["source"] = "phone_sync"
|
|
93
|
+
await store.add_contact(c)
|
|
94
|
+
added += 1
|
|
95
|
+
return {"status": "synced", "added": added, "updated": updated, "total": len(contacts)}
|
|
96
|
+
except Exception as exc:
|
|
97
|
+
logger.exception("Contact sync failed")
|
|
98
|
+
raise HTTPException(status_code=500, detail=str(exc))
|