openspeechapi 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openspeech/__init__.py +75 -0
- openspeech/__main__.py +5 -0
- openspeech/cli.py +413 -0
- openspeech/client/__init__.py +4 -0
- openspeech/client/client.py +145 -0
- openspeech/config.py +212 -0
- openspeech/core/__init__.py +0 -0
- openspeech/core/base.py +75 -0
- openspeech/core/enums.py +39 -0
- openspeech/core/models.py +61 -0
- openspeech/core/registry.py +37 -0
- openspeech/core/settings.py +8 -0
- openspeech/demo.py +675 -0
- openspeech/dispatch/__init__.py +0 -0
- openspeech/dispatch/context.py +34 -0
- openspeech/dispatch/dispatcher.py +661 -0
- openspeech/dispatch/executors/__init__.py +0 -0
- openspeech/dispatch/executors/base.py +34 -0
- openspeech/dispatch/executors/in_process.py +66 -0
- openspeech/dispatch/executors/remote.py +64 -0
- openspeech/dispatch/executors/subprocess_exec.py +446 -0
- openspeech/dispatch/fanout.py +95 -0
- openspeech/dispatch/filters.py +73 -0
- openspeech/dispatch/lifecycle.py +178 -0
- openspeech/dispatch/watcher.py +82 -0
- openspeech/engine_catalog.py +236 -0
- openspeech/engine_registry.yaml +347 -0
- openspeech/exceptions.py +51 -0
- openspeech/factory.py +325 -0
- openspeech/local_engines/__init__.py +12 -0
- openspeech/local_engines/aim_resolver.py +91 -0
- openspeech/local_engines/backends/__init__.py +1 -0
- openspeech/local_engines/backends/docker_backend.py +490 -0
- openspeech/local_engines/backends/native_backend.py +902 -0
- openspeech/local_engines/base.py +30 -0
- openspeech/local_engines/engines/__init__.py +1 -0
- openspeech/local_engines/engines/faster_whisper.py +36 -0
- openspeech/local_engines/engines/fish_speech.py +33 -0
- openspeech/local_engines/engines/sherpa_onnx.py +56 -0
- openspeech/local_engines/engines/whisper.py +41 -0
- openspeech/local_engines/engines/whisperlivekit.py +60 -0
- openspeech/local_engines/manager.py +208 -0
- openspeech/local_engines/models.py +50 -0
- openspeech/local_engines/progress.py +69 -0
- openspeech/local_engines/registry.py +19 -0
- openspeech/local_engines/task_store.py +52 -0
- openspeech/local_engines/tasks.py +71 -0
- openspeech/logging_config.py +607 -0
- openspeech/observe/__init__.py +0 -0
- openspeech/observe/base.py +79 -0
- openspeech/observe/debug.py +44 -0
- openspeech/observe/latency.py +19 -0
- openspeech/observe/metrics.py +47 -0
- openspeech/observe/tracing.py +44 -0
- openspeech/observe/usage.py +27 -0
- openspeech/providers/__init__.py +0 -0
- openspeech/providers/_template.py +101 -0
- openspeech/providers/stt/__init__.py +0 -0
- openspeech/providers/stt/alibaba.py +86 -0
- openspeech/providers/stt/assemblyai.py +135 -0
- openspeech/providers/stt/azure_speech.py +99 -0
- openspeech/providers/stt/baidu.py +135 -0
- openspeech/providers/stt/deepgram.py +311 -0
- openspeech/providers/stt/elevenlabs.py +385 -0
- openspeech/providers/stt/faster_whisper.py +211 -0
- openspeech/providers/stt/google_cloud.py +106 -0
- openspeech/providers/stt/iflytek.py +427 -0
- openspeech/providers/stt/macos_speech.py +226 -0
- openspeech/providers/stt/openai.py +84 -0
- openspeech/providers/stt/sherpa_onnx.py +353 -0
- openspeech/providers/stt/tencent.py +212 -0
- openspeech/providers/stt/volcengine.py +107 -0
- openspeech/providers/stt/whisper.py +153 -0
- openspeech/providers/stt/whisperlivekit.py +530 -0
- openspeech/providers/stt/windows_speech.py +249 -0
- openspeech/providers/tts/__init__.py +0 -0
- openspeech/providers/tts/alibaba.py +95 -0
- openspeech/providers/tts/azure_speech.py +123 -0
- openspeech/providers/tts/baidu.py +143 -0
- openspeech/providers/tts/coqui.py +64 -0
- openspeech/providers/tts/cosyvoice.py +90 -0
- openspeech/providers/tts/deepgram.py +174 -0
- openspeech/providers/tts/elevenlabs.py +311 -0
- openspeech/providers/tts/fish_speech.py +158 -0
- openspeech/providers/tts/google_cloud.py +107 -0
- openspeech/providers/tts/iflytek.py +209 -0
- openspeech/providers/tts/macos_say.py +251 -0
- openspeech/providers/tts/minimax.py +122 -0
- openspeech/providers/tts/openai.py +104 -0
- openspeech/providers/tts/piper.py +104 -0
- openspeech/providers/tts/tencent.py +189 -0
- openspeech/providers/tts/volcengine.py +117 -0
- openspeech/providers/tts/windows_sapi.py +234 -0
- openspeech/server/__init__.py +1 -0
- openspeech/server/app.py +72 -0
- openspeech/server/auth.py +42 -0
- openspeech/server/middleware.py +75 -0
- openspeech/server/routes/__init__.py +1 -0
- openspeech/server/routes/management.py +848 -0
- openspeech/server/routes/stt.py +121 -0
- openspeech/server/routes/tts.py +159 -0
- openspeech/server/routes/webui.py +29 -0
- openspeech/server/webui/app.js +2649 -0
- openspeech/server/webui/index.html +216 -0
- openspeech/server/webui/styles.css +617 -0
- openspeech/server/ws/__init__.py +1 -0
- openspeech/server/ws/stt_stream.py +263 -0
- openspeech/server/ws/tts_stream.py +207 -0
- openspeech/telemetry/__init__.py +21 -0
- openspeech/telemetry/perf.py +307 -0
- openspeech/utils/__init__.py +5 -0
- openspeech/utils/audio_converter.py +406 -0
- openspeech/utils/audio_playback.py +156 -0
- openspeech/vendor_registry.yaml +74 -0
- openspeechapi-0.1.0.dist-info/METADATA +101 -0
- openspeechapi-0.1.0.dist-info/RECORD +118 -0
- openspeechapi-0.1.0.dist-info/WHEEL +4 -0
- openspeechapi-0.1.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,490 @@
|
|
|
1
|
+
"""Docker runtime backend for local engine lifecycle."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
import shutil
|
|
7
|
+
import subprocess
|
|
8
|
+
import time
|
|
9
|
+
import urllib.error
|
|
10
|
+
import urllib.request
|
|
11
|
+
from collections import deque
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
from openspeech.local_engines.base import RuntimeBackend
|
|
15
|
+
from openspeech.local_engines.models import EngineSpec, EngineStatus, RuntimeConfig
|
|
16
|
+
|
|
17
|
+
_PULL_PROGRESS_RE = re.compile(
|
|
18
|
+
r"([0-9]+(?:\.[0-9]+)?)\s*([kKmMgGtTpP]?i?[bB])\s*/\s*([0-9]+(?:\.[0-9]+)?)\s*([kKmMgGtTpP]?i?[bB])"
|
|
19
|
+
)
|
|
20
|
+
_LAYER_PREFIX_RE = re.compile(r"^([a-f0-9]{6,64}):\s*(.*)$")
|
|
21
|
+
_UNIT_MULTIPLIER = {
|
|
22
|
+
"B": 1,
|
|
23
|
+
"KB": 1000,
|
|
24
|
+
"MB": 1000**2,
|
|
25
|
+
"GB": 1000**3,
|
|
26
|
+
"TB": 1000**4,
|
|
27
|
+
"KIB": 1024,
|
|
28
|
+
"MIB": 1024**2,
|
|
29
|
+
"GIB": 1024**3,
|
|
30
|
+
"TIB": 1024**4,
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class DockerBackend(RuntimeBackend):
|
|
35
|
+
runtime_name = "docker"
|
|
36
|
+
|
|
37
|
+
def _require_docker(self) -> None:
|
|
38
|
+
if shutil.which("docker") is None:
|
|
39
|
+
raise RuntimeError("Docker is not installed or not found in PATH")
|
|
40
|
+
|
|
41
|
+
def _run(self, cmd: list[str], check: bool = True) -> subprocess.CompletedProcess:
|
|
42
|
+
try:
|
|
43
|
+
return subprocess.run(cmd, capture_output=True, text=True, check=check)
|
|
44
|
+
except subprocess.CalledProcessError as exc:
|
|
45
|
+
out = (exc.stdout or "").strip()
|
|
46
|
+
err = (exc.stderr or "").strip()
|
|
47
|
+
detail = "\n".join(x for x in [out, err] if x)
|
|
48
|
+
if detail:
|
|
49
|
+
raise RuntimeError(
|
|
50
|
+
f"Command failed: {' '.join(cmd)}\n{detail}"
|
|
51
|
+
) from exc
|
|
52
|
+
raise RuntimeError(f"Command failed: {' '.join(cmd)} (exit={exc.returncode})") from exc
|
|
53
|
+
|
|
54
|
+
def _image_exists(self, image: str) -> bool:
|
|
55
|
+
result = self._run(["docker", "image", "inspect", image], check=False)
|
|
56
|
+
return result.returncode == 0
|
|
57
|
+
|
|
58
|
+
@staticmethod
|
|
59
|
+
def _to_bytes(value: float, unit: str) -> int:
|
|
60
|
+
unit_norm = unit.strip().upper().replace("IB", "IB").replace("B", "B")
|
|
61
|
+
if unit_norm not in _UNIT_MULTIPLIER:
|
|
62
|
+
unit_norm = unit_norm.replace("I", "")
|
|
63
|
+
mul = _UNIT_MULTIPLIER.get(unit_norm, 1)
|
|
64
|
+
return int(value * mul)
|
|
65
|
+
|
|
66
|
+
@staticmethod
|
|
67
|
+
def _extract_pull_progress(line: str) -> tuple[str, int, int] | None:
|
|
68
|
+
m_layer = _LAYER_PREFIX_RE.match(line.strip())
|
|
69
|
+
if not m_layer:
|
|
70
|
+
return None
|
|
71
|
+
layer, payload = m_layer.group(1), m_layer.group(2)
|
|
72
|
+
m = _PULL_PROGRESS_RE.search(payload)
|
|
73
|
+
if not m:
|
|
74
|
+
return None
|
|
75
|
+
cur = DockerBackend._to_bytes(float(m.group(1)), m.group(2))
|
|
76
|
+
total = DockerBackend._to_bytes(float(m.group(3)), m.group(4))
|
|
77
|
+
if total <= 0:
|
|
78
|
+
return None
|
|
79
|
+
return layer, min(cur, total), total
|
|
80
|
+
|
|
81
|
+
@staticmethod
|
|
82
|
+
def _extract_pull_status(line: str) -> tuple[str, str] | None:
|
|
83
|
+
m = _LAYER_PREFIX_RE.match(line.strip())
|
|
84
|
+
if not m:
|
|
85
|
+
return None
|
|
86
|
+
return m.group(1), m.group(2)
|
|
87
|
+
|
|
88
|
+
@staticmethod
|
|
89
|
+
def _parse_platform(platform: str) -> tuple[str, str]:
|
|
90
|
+
p = (platform or "").strip()
|
|
91
|
+
if "/" in p:
|
|
92
|
+
os_name, arch = p.split("/", 1)
|
|
93
|
+
return os_name.strip() or "linux", arch.strip() or "amd64"
|
|
94
|
+
return "linux", "amd64"
|
|
95
|
+
|
|
96
|
+
def _fetch_layer_sizes(self, image: str, platform: str) -> dict[str, int]:
|
|
97
|
+
"""Fetch compressed layer sizes from manifest for progress estimation."""
|
|
98
|
+
os_name, arch = self._parse_platform(platform)
|
|
99
|
+
cmd = ["docker", "manifest", "inspect", "--verbose", image]
|
|
100
|
+
proc = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
|
101
|
+
if proc.returncode != 0 or not (proc.stdout or "").strip():
|
|
102
|
+
return {}
|
|
103
|
+
try:
|
|
104
|
+
manifests = json.loads(proc.stdout)
|
|
105
|
+
except json.JSONDecodeError:
|
|
106
|
+
return {}
|
|
107
|
+
if not isinstance(manifests, list):
|
|
108
|
+
return {}
|
|
109
|
+
for entry in manifests:
|
|
110
|
+
desc = entry.get("Descriptor", {})
|
|
111
|
+
plat = desc.get("platform", {})
|
|
112
|
+
if plat.get("os") == os_name and plat.get("architecture") == arch:
|
|
113
|
+
oci = entry.get("OCIManifest", {})
|
|
114
|
+
layers = oci.get("layers", [])
|
|
115
|
+
result: dict[str, int] = {}
|
|
116
|
+
for layer in layers:
|
|
117
|
+
digest = str(layer.get("digest", ""))
|
|
118
|
+
size = int(layer.get("size", 0) or 0)
|
|
119
|
+
if not digest.startswith("sha256:") or size <= 0:
|
|
120
|
+
continue
|
|
121
|
+
key = digest.split(":", 1)[1][:12]
|
|
122
|
+
result[key] = size
|
|
123
|
+
return result
|
|
124
|
+
return {}
|
|
125
|
+
|
|
126
|
+
def _pull_with_progress(self, image: str, platform: str, report) -> None:
|
|
127
|
+
cmd = ["docker", "pull"]
|
|
128
|
+
if platform:
|
|
129
|
+
cmd.extend(["--platform", platform])
|
|
130
|
+
cmd.append(image)
|
|
131
|
+
|
|
132
|
+
proc = subprocess.Popen( # noqa: S603
|
|
133
|
+
cmd,
|
|
134
|
+
stdout=subprocess.PIPE,
|
|
135
|
+
stderr=subprocess.STDOUT,
|
|
136
|
+
text=False,
|
|
137
|
+
bufsize=0,
|
|
138
|
+
)
|
|
139
|
+
if proc.stdout is None:
|
|
140
|
+
raise RuntimeError("Failed to capture docker pull output")
|
|
141
|
+
|
|
142
|
+
layer_sizes = self._fetch_layer_sizes(image, platform)
|
|
143
|
+
layer_progress: dict[str, tuple[int, int]] = {}
|
|
144
|
+
layer_state: dict[str, str] = {}
|
|
145
|
+
layer_last_emit_at: dict[str, float] = {}
|
|
146
|
+
last_emit = 0.0
|
|
147
|
+
speed_window: deque[tuple[float, int]] = deque(maxlen=12)
|
|
148
|
+
transfer_started_at = time.monotonic()
|
|
149
|
+
tail = deque(maxlen=40)
|
|
150
|
+
try:
|
|
151
|
+
pending = ""
|
|
152
|
+
|
|
153
|
+
def _handle_line(raw: str) -> None:
|
|
154
|
+
nonlocal last_emit
|
|
155
|
+
line = raw.strip()
|
|
156
|
+
if not line:
|
|
157
|
+
return
|
|
158
|
+
tail.append(line)
|
|
159
|
+
parsed = self._extract_pull_progress(line)
|
|
160
|
+
status_line = self._extract_pull_status(line)
|
|
161
|
+
now = time.monotonic()
|
|
162
|
+
|
|
163
|
+
def _current_transfer_stats() -> tuple[float, int | None]:
|
|
164
|
+
done = sum(c for c, t in layer_progress.values())
|
|
165
|
+
known_total = sum(t for c, t in layer_progress.values())
|
|
166
|
+
manifest_total = sum(layer_sizes.values()) if layer_sizes else 0
|
|
167
|
+
all_total = manifest_total if manifest_total > 0 else known_total
|
|
168
|
+
|
|
169
|
+
speed_window.append((now, done))
|
|
170
|
+
speed_bps = 0.0
|
|
171
|
+
eta_seconds: int | None = None
|
|
172
|
+
if len(speed_window) >= 2:
|
|
173
|
+
t0, b0 = speed_window[0]
|
|
174
|
+
t1, b1 = speed_window[-1]
|
|
175
|
+
dt = max(0.001, t1 - t0)
|
|
176
|
+
db = max(0, b1 - b0)
|
|
177
|
+
speed_bps = db / dt
|
|
178
|
+
remaining = max(0, all_total - done)
|
|
179
|
+
if speed_bps > 1 and all_total > 0:
|
|
180
|
+
eta_seconds = int(remaining / speed_bps)
|
|
181
|
+
return speed_bps, eta_seconds
|
|
182
|
+
|
|
183
|
+
if status_line is not None:
|
|
184
|
+
layer_id, status = status_line
|
|
185
|
+
total_size = layer_sizes.get(layer_id)
|
|
186
|
+
if total_size is not None and layer_id not in layer_progress:
|
|
187
|
+
layer_progress[layer_id] = (0, total_size)
|
|
188
|
+
if "Already exists" in status or "Download complete" in status:
|
|
189
|
+
if total_size is not None:
|
|
190
|
+
layer_progress[layer_id] = (total_size, total_size)
|
|
191
|
+
if layer_state.get(layer_id) != status:
|
|
192
|
+
layer_state[layer_id] = status
|
|
193
|
+
speed_bps, eta_seconds = _current_transfer_stats()
|
|
194
|
+
report(
|
|
195
|
+
"pull_image",
|
|
196
|
+
(
|
|
197
|
+
f"layer {layer_id}: {status} "
|
|
198
|
+
f"({self._format_speed(speed_bps)}, ETA {self._format_eta(eta_seconds)})"
|
|
199
|
+
),
|
|
200
|
+
None,
|
|
201
|
+
eta_seconds=eta_seconds,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
if parsed is not None:
|
|
205
|
+
layer, cur, total = parsed
|
|
206
|
+
layer_progress[layer] = (cur, total)
|
|
207
|
+
prev = layer_last_emit_at.get(layer, 0.0)
|
|
208
|
+
if (now - prev) >= 1.2:
|
|
209
|
+
speed_bps, eta_seconds = _current_transfer_stats()
|
|
210
|
+
ratio = (cur / total) if total > 0 else 0.0
|
|
211
|
+
report(
|
|
212
|
+
"pull_image",
|
|
213
|
+
(
|
|
214
|
+
f"layer {layer}: downloading "
|
|
215
|
+
f"{self._format_bytes(cur)}/{self._format_bytes(total)} "
|
|
216
|
+
f"({ratio * 100:.1f}%) at {self._format_speed(speed_bps)}, "
|
|
217
|
+
f"ETA {self._format_eta(eta_seconds)}"
|
|
218
|
+
),
|
|
219
|
+
None,
|
|
220
|
+
eta_seconds=eta_seconds,
|
|
221
|
+
)
|
|
222
|
+
layer_last_emit_at[layer] = now
|
|
223
|
+
|
|
224
|
+
if (now - last_emit) >= 0.4 and layer_progress:
|
|
225
|
+
done = sum(c for c, t in layer_progress.values())
|
|
226
|
+
known_total = sum(t for c, t in layer_progress.values())
|
|
227
|
+
manifest_total = sum(layer_sizes.values()) if layer_sizes else 0
|
|
228
|
+
all_total = manifest_total if manifest_total > 0 else known_total
|
|
229
|
+
ratio = (done / all_total) if all_total > 0 else 0.0
|
|
230
|
+
progress = 35.0 + min(55.0, ratio * 55.0)
|
|
231
|
+
speed_bps, eta_seconds = _current_transfer_stats()
|
|
232
|
+
completed = sum(1 for c, t in layer_progress.values() if c >= t > 0)
|
|
233
|
+
speed_txt = self._format_speed(speed_bps)
|
|
234
|
+
eta_txt = self._format_eta(eta_seconds)
|
|
235
|
+
report(
|
|
236
|
+
"pull_image",
|
|
237
|
+
(
|
|
238
|
+
f"Pulling {image} ... {ratio * 100:.1f}% "
|
|
239
|
+
f"({completed}/{len(layer_progress)} layers, {speed_txt}, ETA {eta_txt})"
|
|
240
|
+
),
|
|
241
|
+
progress,
|
|
242
|
+
eta_seconds=eta_seconds,
|
|
243
|
+
)
|
|
244
|
+
last_emit = now
|
|
245
|
+
|
|
246
|
+
while True:
|
|
247
|
+
chunk = proc.stdout.read(4096)
|
|
248
|
+
if not chunk:
|
|
249
|
+
break
|
|
250
|
+
text = chunk.decode("utf-8", errors="ignore").replace("\r", "\n")
|
|
251
|
+
pending += text
|
|
252
|
+
lines = pending.split("\n")
|
|
253
|
+
pending = lines.pop() if lines else ""
|
|
254
|
+
for raw_line in lines:
|
|
255
|
+
_handle_line(raw_line)
|
|
256
|
+
|
|
257
|
+
if pending.strip():
|
|
258
|
+
_handle_line(pending)
|
|
259
|
+
|
|
260
|
+
returncode = proc.wait()
|
|
261
|
+
if returncode != 0:
|
|
262
|
+
detail = "\n".join(tail)
|
|
263
|
+
raise RuntimeError(
|
|
264
|
+
f"Command failed: {' '.join(cmd)}\n{detail}"
|
|
265
|
+
)
|
|
266
|
+
elapsed = max(0.001, time.monotonic() - transfer_started_at)
|
|
267
|
+
done = sum(c for c, t in layer_progress.values())
|
|
268
|
+
speed = done / elapsed if done > 0 else 0.0
|
|
269
|
+
report(
|
|
270
|
+
"pull_image",
|
|
271
|
+
f"Image {image} is ready. Avg speed {self._format_speed(speed)}.",
|
|
272
|
+
90.0,
|
|
273
|
+
eta_seconds=0,
|
|
274
|
+
)
|
|
275
|
+
except Exception:
|
|
276
|
+
try:
|
|
277
|
+
proc.kill()
|
|
278
|
+
except Exception:
|
|
279
|
+
pass
|
|
280
|
+
raise
|
|
281
|
+
|
|
282
|
+
@staticmethod
|
|
283
|
+
def _format_speed(speed_bps: float) -> str:
|
|
284
|
+
if speed_bps <= 0:
|
|
285
|
+
return "-- B/s"
|
|
286
|
+
units = ["B/s", "KB/s", "MB/s", "GB/s"]
|
|
287
|
+
v = float(speed_bps)
|
|
288
|
+
idx = 0
|
|
289
|
+
while v >= 1024 and idx < len(units) - 1:
|
|
290
|
+
v /= 1024
|
|
291
|
+
idx += 1
|
|
292
|
+
return f"{v:.1f} {units[idx]}"
|
|
293
|
+
|
|
294
|
+
@staticmethod
|
|
295
|
+
def _format_eta(eta_seconds: int | None) -> str:
|
|
296
|
+
if eta_seconds is None:
|
|
297
|
+
return "--:--"
|
|
298
|
+
if eta_seconds <= 0:
|
|
299
|
+
return "00:00"
|
|
300
|
+
mins, sec = divmod(eta_seconds, 60)
|
|
301
|
+
hours, mins = divmod(mins, 60)
|
|
302
|
+
if hours > 0:
|
|
303
|
+
return f"{hours:02d}:{mins:02d}:{sec:02d}"
|
|
304
|
+
return f"{mins:02d}:{sec:02d}"
|
|
305
|
+
|
|
306
|
+
@staticmethod
|
|
307
|
+
def _format_bytes(size: int) -> str:
|
|
308
|
+
if size <= 0:
|
|
309
|
+
return "0 B"
|
|
310
|
+
units = ["B", "KB", "MB", "GB", "TB"]
|
|
311
|
+
value = float(size)
|
|
312
|
+
idx = 0
|
|
313
|
+
while value >= 1024 and idx < len(units) - 1:
|
|
314
|
+
value /= 1024
|
|
315
|
+
idx += 1
|
|
316
|
+
return f"{value:.1f} {units[idx]}"
|
|
317
|
+
|
|
318
|
+
def _effective_options(self, spec: EngineSpec, cfg: RuntimeConfig) -> dict:
|
|
319
|
+
opts = dict(spec.options)
|
|
320
|
+
opts.update(cfg.options)
|
|
321
|
+
return opts
|
|
322
|
+
|
|
323
|
+
def _ensure_workdir(self, cfg: RuntimeConfig) -> Path:
|
|
324
|
+
p = Path(cfg.work_dir).expanduser().resolve()
|
|
325
|
+
p.mkdir(parents=True, exist_ok=True)
|
|
326
|
+
return p
|
|
327
|
+
|
|
328
|
+
def _health_url(self, opts: dict, cfg: RuntimeConfig) -> str:
|
|
329
|
+
host_port = int(opts.get("host_port", 8080))
|
|
330
|
+
health_path = str(opts.get("health_path", "/health"))
|
|
331
|
+
return cfg.options.get("health_url", f"http://127.0.0.1:{host_port}{health_path}")
|
|
332
|
+
|
|
333
|
+
def install(self, spec: EngineSpec, cfg: RuntimeConfig, report) -> None:
|
|
334
|
+
opts = self._effective_options(spec, cfg)
|
|
335
|
+
image = str(opts["docker_image"])
|
|
336
|
+
platform = str(opts.get("platform", "")).strip()
|
|
337
|
+
report("check_runtime", "Checking docker runtime...", 5)
|
|
338
|
+
self._require_docker()
|
|
339
|
+
|
|
340
|
+
report("pull_image", f"Pulling image {image} ...", 20)
|
|
341
|
+
self._pull_with_progress(image, platform, report)
|
|
342
|
+
report("done", "Image pull completed.", 100)
|
|
343
|
+
|
|
344
|
+
def update(self, spec: EngineSpec, cfg: RuntimeConfig, report) -> None:
|
|
345
|
+
opts = self._effective_options(spec, cfg)
|
|
346
|
+
image = str(opts["docker_image"])
|
|
347
|
+
platform = str(opts.get("platform", "")).strip()
|
|
348
|
+
report("check_runtime", "Checking docker runtime...", 5)
|
|
349
|
+
self._require_docker()
|
|
350
|
+
report("pull_image", f"Updating image {image} ...", 30)
|
|
351
|
+
self._pull_with_progress(image, platform, report)
|
|
352
|
+
report("done", "Image update completed.", 100)
|
|
353
|
+
|
|
354
|
+
def start(self, spec: EngineSpec, cfg: RuntimeConfig, report) -> None:
|
|
355
|
+
opts = self._effective_options(spec, cfg)
|
|
356
|
+
image = str(opts["docker_image"])
|
|
357
|
+
container = str(opts["container_name"])
|
|
358
|
+
platform = str(opts.get("platform", "")).strip()
|
|
359
|
+
host_port = int(opts.get("host_port", 8080))
|
|
360
|
+
container_port = int(opts.get("container_port", 8080))
|
|
361
|
+
start_command = str(opts.get("start_command", "")).strip()
|
|
362
|
+
|
|
363
|
+
report("check_runtime", "Checking docker runtime...", 5)
|
|
364
|
+
self._require_docker()
|
|
365
|
+
|
|
366
|
+
report("prepare_dirs", "Preparing local workspace for engine...", 15)
|
|
367
|
+
work_dir = self._ensure_workdir(cfg)
|
|
368
|
+
cache_dir = work_dir / spec.name / "cache"
|
|
369
|
+
cache_dir.mkdir(parents=True, exist_ok=True)
|
|
370
|
+
|
|
371
|
+
report("cleanup", "Removing stale container if present...", 25)
|
|
372
|
+
self._run(["docker", "rm", "-f", container], check=False)
|
|
373
|
+
|
|
374
|
+
pull_on_start = bool(opts.get("pull_on_start", True))
|
|
375
|
+
if pull_on_start:
|
|
376
|
+
report("pull_image", f"Ensuring image {image} is present...", 35)
|
|
377
|
+
self._pull_with_progress(image, platform, report)
|
|
378
|
+
else:
|
|
379
|
+
if self._image_exists(image):
|
|
380
|
+
report("pull_image", f"Skip pull (check updates off): using local image {image}.", 35)
|
|
381
|
+
else:
|
|
382
|
+
report(
|
|
383
|
+
"pull_image",
|
|
384
|
+
f"Local image not found, pulling once: {image} ...",
|
|
385
|
+
35,
|
|
386
|
+
)
|
|
387
|
+
self._pull_with_progress(image, platform, report)
|
|
388
|
+
|
|
389
|
+
report("start_container", f"Starting container {container} ...", 40)
|
|
390
|
+
cmd = [
|
|
391
|
+
"docker",
|
|
392
|
+
"run",
|
|
393
|
+
"-d",
|
|
394
|
+
"--name",
|
|
395
|
+
container,
|
|
396
|
+
"-p",
|
|
397
|
+
f"{host_port}:{container_port}",
|
|
398
|
+
"-v",
|
|
399
|
+
f"{cache_dir}:/data",
|
|
400
|
+
]
|
|
401
|
+
if platform:
|
|
402
|
+
cmd.extend(["--platform", platform])
|
|
403
|
+
cmd.append(image)
|
|
404
|
+
if start_command:
|
|
405
|
+
cmd.extend(["sh", "-lc", start_command])
|
|
406
|
+
self._run(cmd, check=True)
|
|
407
|
+
|
|
408
|
+
report("wait_health", "Waiting for engine health check...", 70)
|
|
409
|
+
health_url = self._health_url(opts, cfg)
|
|
410
|
+
deadline = time.monotonic() + max(cfg.timeout_s, 1.0)
|
|
411
|
+
next_heartbeat = 0.0
|
|
412
|
+
while time.monotonic() < deadline:
|
|
413
|
+
now = time.monotonic()
|
|
414
|
+
if now >= next_heartbeat:
|
|
415
|
+
total = max(cfg.timeout_s, 1.0)
|
|
416
|
+
elapsed = total - max(0.0, deadline - now)
|
|
417
|
+
ratio = min(1.0, max(0.0, elapsed / total))
|
|
418
|
+
progress = 70.0 + ratio * 25.0
|
|
419
|
+
report("wait_health", f"Waiting for health endpoint {health_url} ...", progress)
|
|
420
|
+
next_heartbeat = now + 1.0
|
|
421
|
+
try:
|
|
422
|
+
with urllib.request.urlopen(health_url, timeout=3) as resp:
|
|
423
|
+
if resp.status < 500:
|
|
424
|
+
report("ready", f"Engine is healthy: {health_url}", 100)
|
|
425
|
+
return
|
|
426
|
+
except urllib.error.URLError:
|
|
427
|
+
pass
|
|
428
|
+
time.sleep(1.0)
|
|
429
|
+
|
|
430
|
+
raise RuntimeError(
|
|
431
|
+
f"Timed out waiting for health check at {health_url}. "
|
|
432
|
+
f"Check logs via `openspeech engine logs --name {spec.name}`."
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
def stop(self, spec: EngineSpec, cfg: RuntimeConfig, report) -> None:
|
|
436
|
+
opts = self._effective_options(spec, cfg)
|
|
437
|
+
container = str(opts["container_name"])
|
|
438
|
+
report("check_runtime", "Checking docker runtime...", 10)
|
|
439
|
+
self._require_docker()
|
|
440
|
+
report("stop_container", f"Stopping container {container} ...", 60)
|
|
441
|
+
self._run(["docker", "rm", "-f", container], check=False)
|
|
442
|
+
report("done", "Engine stopped.", 100)
|
|
443
|
+
|
|
444
|
+
def status(self, spec: EngineSpec, cfg: RuntimeConfig) -> EngineStatus:
|
|
445
|
+
opts = self._effective_options(spec, cfg)
|
|
446
|
+
container = str(opts["container_name"])
|
|
447
|
+
self._require_docker()
|
|
448
|
+
|
|
449
|
+
result = self._run(
|
|
450
|
+
[
|
|
451
|
+
"docker",
|
|
452
|
+
"ps",
|
|
453
|
+
"-a",
|
|
454
|
+
"--filter",
|
|
455
|
+
f"name=^{container}$",
|
|
456
|
+
"--format",
|
|
457
|
+
"{{.Status}}",
|
|
458
|
+
],
|
|
459
|
+
check=False,
|
|
460
|
+
)
|
|
461
|
+
raw = (result.stdout or "").strip()
|
|
462
|
+
running = raw.lower().startswith("up")
|
|
463
|
+
healthy = False
|
|
464
|
+
detail = raw or "not found"
|
|
465
|
+
if running:
|
|
466
|
+
health_url = self._health_url(opts, cfg)
|
|
467
|
+
try:
|
|
468
|
+
with urllib.request.urlopen(health_url, timeout=3) as resp:
|
|
469
|
+
healthy = resp.status < 500
|
|
470
|
+
except urllib.error.URLError:
|
|
471
|
+
healthy = False
|
|
472
|
+
return EngineStatus(
|
|
473
|
+
engine=spec.name,
|
|
474
|
+
runtime=self.runtime_name,
|
|
475
|
+
running=running,
|
|
476
|
+
healthy=healthy,
|
|
477
|
+
detail=detail,
|
|
478
|
+
metadata={"container_name": container},
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
def logs(self, spec: EngineSpec, cfg: RuntimeConfig, lines: int = 100) -> str:
|
|
482
|
+
opts = self._effective_options(spec, cfg)
|
|
483
|
+
container = str(opts["container_name"])
|
|
484
|
+
self._require_docker()
|
|
485
|
+
result = self._run(
|
|
486
|
+
["docker", "logs", "--tail", str(lines), container],
|
|
487
|
+
check=False,
|
|
488
|
+
)
|
|
489
|
+
output = (result.stdout or "") + (result.stderr or "")
|
|
490
|
+
return output.strip()
|