pactown 0.1.4__py3-none-any.whl → 0.1.47__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pactown/__init__.py +178 -4
- pactown/cli.py +539 -37
- pactown/config.py +12 -11
- pactown/deploy/__init__.py +17 -3
- pactown/deploy/base.py +35 -33
- pactown/deploy/compose.py +59 -58
- pactown/deploy/docker.py +40 -41
- pactown/deploy/kubernetes.py +43 -42
- pactown/deploy/podman.py +55 -56
- pactown/deploy/quadlet.py +1021 -0
- pactown/deploy/quadlet_api.py +533 -0
- pactown/deploy/quadlet_shell.py +557 -0
- pactown/events.py +1066 -0
- pactown/fast_start.py +514 -0
- pactown/generator.py +31 -30
- pactown/llm.py +450 -0
- pactown/markpact_blocks.py +50 -0
- pactown/network.py +59 -38
- pactown/orchestrator.py +90 -93
- pactown/parallel.py +40 -40
- pactown/platform.py +146 -0
- pactown/registry/__init__.py +1 -1
- pactown/registry/client.py +45 -46
- pactown/registry/models.py +25 -25
- pactown/registry/server.py +24 -24
- pactown/resolver.py +30 -30
- pactown/runner_api.py +458 -0
- pactown/sandbox_manager.py +480 -79
- pactown/security.py +682 -0
- pactown/service_runner.py +1201 -0
- pactown/user_isolation.py +458 -0
- {pactown-0.1.4.dist-info → pactown-0.1.47.dist-info}/METADATA +65 -9
- pactown-0.1.47.dist-info/RECORD +36 -0
- pactown-0.1.47.dist-info/entry_points.txt +5 -0
- pactown-0.1.4.dist-info/RECORD +0 -24
- pactown-0.1.4.dist-info/entry_points.txt +0 -3
- {pactown-0.1.4.dist-info → pactown-0.1.47.dist-info}/WHEEL +0 -0
- {pactown-0.1.4.dist-info → pactown-0.1.47.dist-info}/licenses/LICENSE +0 -0
pactown/fast_start.py
ADDED
|
@@ -0,0 +1,514 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Fast startup module for pactown.
|
|
3
|
+
|
|
4
|
+
Provides optimizations for rapid service startup:
|
|
5
|
+
- Dependency caching (reuse venvs with same deps)
|
|
6
|
+
- Pre-warmed sandbox pool
|
|
7
|
+
- Parallel service startup
|
|
8
|
+
- Async health checks
|
|
9
|
+
- Hot reload without reinstall
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import asyncio
|
|
13
|
+
import hashlib
|
|
14
|
+
import os
|
|
15
|
+
import shutil
|
|
16
|
+
import subprocess
|
|
17
|
+
import time
|
|
18
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
19
|
+
from dataclasses import dataclass, field
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from threading import Lock
|
|
22
|
+
from typing import Any, Callable, Dict, List, Optional, Set
|
|
23
|
+
|
|
24
|
+
from .markpact_blocks import parse_blocks
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class CachedVenv:
|
|
29
|
+
"""Cached virtual environment for a specific dependency set."""
|
|
30
|
+
deps_hash: str
|
|
31
|
+
path: Path
|
|
32
|
+
created_at: float
|
|
33
|
+
last_used: float
|
|
34
|
+
deps: List[str]
|
|
35
|
+
|
|
36
|
+
def is_valid(self) -> bool:
|
|
37
|
+
"""Check if venv still exists and is valid."""
|
|
38
|
+
return (self.path / "bin" / "python").exists()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class PrewarmedSandbox:
|
|
43
|
+
"""Pre-created sandbox ready for immediate use."""
|
|
44
|
+
path: Path
|
|
45
|
+
venv_path: Optional[Path]
|
|
46
|
+
deps_hash: str
|
|
47
|
+
created_at: float
|
|
48
|
+
in_use: bool = False
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@dataclass
|
|
52
|
+
class FastStartResult:
|
|
53
|
+
"""Result of fast startup."""
|
|
54
|
+
success: bool
|
|
55
|
+
startup_time_ms: float
|
|
56
|
+
cache_hit: bool
|
|
57
|
+
message: str
|
|
58
|
+
sandbox_path: Optional[Path] = None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class DependencyCache:
|
|
62
|
+
"""
|
|
63
|
+
Caches virtual environments by dependency hash.
|
|
64
|
+
|
|
65
|
+
Instead of creating a new venv for each service, reuses existing venvs
|
|
66
|
+
that have the same dependencies installed.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
def __init__(
|
|
70
|
+
self,
|
|
71
|
+
cache_root: Path,
|
|
72
|
+
max_cache_size: int = 20,
|
|
73
|
+
max_age_hours: int = 24,
|
|
74
|
+
):
|
|
75
|
+
self.cache_root = cache_root
|
|
76
|
+
self.cache_root.mkdir(parents=True, exist_ok=True)
|
|
77
|
+
self.max_cache_size = max_cache_size
|
|
78
|
+
self.max_age_seconds = max_age_hours * 3600
|
|
79
|
+
self._cache: Dict[str, CachedVenv] = {}
|
|
80
|
+
self._lock = Lock()
|
|
81
|
+
self._load_existing()
|
|
82
|
+
|
|
83
|
+
def _load_existing(self):
|
|
84
|
+
"""Load existing cached venvs from disk."""
|
|
85
|
+
for venv_dir in self.cache_root.iterdir():
|
|
86
|
+
if venv_dir.is_dir() and (venv_dir / "bin" / "python").exists():
|
|
87
|
+
deps_file = venv_dir / ".deps"
|
|
88
|
+
if deps_file.exists():
|
|
89
|
+
deps = deps_file.read_text().strip().split("\n")
|
|
90
|
+
deps_hash = self._hash_deps(deps)
|
|
91
|
+
self._cache[deps_hash] = CachedVenv(
|
|
92
|
+
deps_hash=deps_hash,
|
|
93
|
+
path=venv_dir,
|
|
94
|
+
created_at=venv_dir.stat().st_ctime,
|
|
95
|
+
last_used=time.time(),
|
|
96
|
+
deps=deps,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
def _hash_deps(self, deps: List[str]) -> str:
|
|
100
|
+
"""Create hash of dependencies for cache key."""
|
|
101
|
+
# Normalize and sort deps for consistent hashing
|
|
102
|
+
normalized = sorted([d.strip().lower() for d in deps if d.strip()])
|
|
103
|
+
deps_str = "\n".join(normalized)
|
|
104
|
+
return hashlib.sha256(deps_str.encode()).hexdigest()[:16]
|
|
105
|
+
|
|
106
|
+
def get_cached_venv(self, deps: List[str]) -> Optional[CachedVenv]:
|
|
107
|
+
"""Get a cached venv for the given dependencies."""
|
|
108
|
+
deps_hash = self._hash_deps(deps)
|
|
109
|
+
|
|
110
|
+
with self._lock:
|
|
111
|
+
cached = self._cache.get(deps_hash)
|
|
112
|
+
if cached and cached.is_valid():
|
|
113
|
+
cached.last_used = time.time()
|
|
114
|
+
return cached
|
|
115
|
+
elif cached:
|
|
116
|
+
# Invalid cache entry, remove it
|
|
117
|
+
del self._cache[deps_hash]
|
|
118
|
+
if cached.path.exists():
|
|
119
|
+
shutil.rmtree(cached.path)
|
|
120
|
+
|
|
121
|
+
return None
|
|
122
|
+
|
|
123
|
+
def create_and_cache(self, deps: List[str], on_progress: Optional[Callable[[str], None]] = None) -> CachedVenv:
|
|
124
|
+
"""Create a new venv with deps and cache it."""
|
|
125
|
+
deps_hash = self._hash_deps(deps)
|
|
126
|
+
venv_path = self.cache_root / f"venv_{deps_hash}"
|
|
127
|
+
|
|
128
|
+
if on_progress:
|
|
129
|
+
on_progress(f"Creating cached venv for {len(deps)} deps...")
|
|
130
|
+
|
|
131
|
+
# Create venv
|
|
132
|
+
if venv_path.exists():
|
|
133
|
+
shutil.rmtree(venv_path)
|
|
134
|
+
|
|
135
|
+
subprocess.run(
|
|
136
|
+
["python3", "-m", "venv", str(venv_path)],
|
|
137
|
+
capture_output=True,
|
|
138
|
+
check=True,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# Install deps
|
|
142
|
+
if deps:
|
|
143
|
+
pip_path = venv_path / "bin" / "pip"
|
|
144
|
+
subprocess.run(
|
|
145
|
+
[str(pip_path), "install", "-q", "--disable-pip-version-check"] + deps,
|
|
146
|
+
capture_output=True,
|
|
147
|
+
check=True,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Save deps list
|
|
151
|
+
(venv_path / ".deps").write_text("\n".join(deps))
|
|
152
|
+
|
|
153
|
+
cached = CachedVenv(
|
|
154
|
+
deps_hash=deps_hash,
|
|
155
|
+
path=venv_path,
|
|
156
|
+
created_at=time.time(),
|
|
157
|
+
last_used=time.time(),
|
|
158
|
+
deps=deps,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
with self._lock:
|
|
162
|
+
self._cache[deps_hash] = cached
|
|
163
|
+
self._cleanup_old()
|
|
164
|
+
|
|
165
|
+
if on_progress:
|
|
166
|
+
on_progress(f"Cached venv created: {deps_hash}")
|
|
167
|
+
|
|
168
|
+
return cached
|
|
169
|
+
|
|
170
|
+
def _cleanup_old(self):
|
|
171
|
+
"""Remove old cache entries."""
|
|
172
|
+
now = time.time()
|
|
173
|
+
to_remove = []
|
|
174
|
+
|
|
175
|
+
for deps_hash, cached in self._cache.items():
|
|
176
|
+
if now - cached.last_used > self.max_age_seconds:
|
|
177
|
+
to_remove.append(deps_hash)
|
|
178
|
+
|
|
179
|
+
# Also remove if over max size (LRU)
|
|
180
|
+
if len(self._cache) > self.max_cache_size:
|
|
181
|
+
sorted_by_use = sorted(
|
|
182
|
+
self._cache.items(),
|
|
183
|
+
key=lambda x: x[1].last_used
|
|
184
|
+
)
|
|
185
|
+
to_remove.extend([h for h, _ in sorted_by_use[:len(self._cache) - self.max_cache_size]])
|
|
186
|
+
|
|
187
|
+
for deps_hash in set(to_remove):
|
|
188
|
+
if deps_hash in self._cache:
|
|
189
|
+
cached = self._cache[deps_hash]
|
|
190
|
+
if cached.path.exists():
|
|
191
|
+
shutil.rmtree(cached.path)
|
|
192
|
+
del self._cache[deps_hash]
|
|
193
|
+
|
|
194
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
195
|
+
"""Get cache statistics."""
|
|
196
|
+
with self._lock:
|
|
197
|
+
return {
|
|
198
|
+
"cached_venvs": len(self._cache),
|
|
199
|
+
"max_size": self.max_cache_size,
|
|
200
|
+
"entries": [
|
|
201
|
+
{
|
|
202
|
+
"hash": c.deps_hash,
|
|
203
|
+
"deps_count": len(c.deps),
|
|
204
|
+
"age_hours": (time.time() - c.created_at) / 3600,
|
|
205
|
+
}
|
|
206
|
+
for c in self._cache.values()
|
|
207
|
+
]
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
class SandboxPool:
|
|
212
|
+
"""
|
|
213
|
+
Pool of pre-warmed sandboxes for instant startup.
|
|
214
|
+
|
|
215
|
+
Keeps a pool of ready-to-use sandboxes with common dependency sets
|
|
216
|
+
pre-installed. When a service needs to start, it can grab a pre-warmed
|
|
217
|
+
sandbox instead of creating one from scratch.
|
|
218
|
+
"""
|
|
219
|
+
|
|
220
|
+
COMMON_STACKS = [
|
|
221
|
+
["fastapi", "uvicorn"], # Basic FastAPI
|
|
222
|
+
["fastapi", "uvicorn", "pydantic"], # FastAPI with Pydantic
|
|
223
|
+
["fastapi", "uvicorn", "sqlalchemy"], # FastAPI with DB
|
|
224
|
+
["flask", "gunicorn"], # Flask
|
|
225
|
+
]
|
|
226
|
+
|
|
227
|
+
def __init__(
|
|
228
|
+
self,
|
|
229
|
+
pool_root: Path,
|
|
230
|
+
dep_cache: DependencyCache,
|
|
231
|
+
pool_size_per_stack: int = 2,
|
|
232
|
+
):
|
|
233
|
+
self.pool_root = pool_root
|
|
234
|
+
self.pool_root.mkdir(parents=True, exist_ok=True)
|
|
235
|
+
self.dep_cache = dep_cache
|
|
236
|
+
self.pool_size = pool_size_per_stack
|
|
237
|
+
self._pool: Dict[str, List[PrewarmedSandbox]] = {}
|
|
238
|
+
self._lock = Lock()
|
|
239
|
+
|
|
240
|
+
def _hash_deps(self, deps: List[str]) -> str:
|
|
241
|
+
"""Hash deps for pool key."""
|
|
242
|
+
return self.dep_cache._hash_deps(deps)
|
|
243
|
+
|
|
244
|
+
def warm_pool(self, on_progress: Optional[Callable[[str], None]] = None):
|
|
245
|
+
"""Pre-warm the sandbox pool with common stacks."""
|
|
246
|
+
for stack in self.COMMON_STACKS:
|
|
247
|
+
deps_hash = self._hash_deps(stack)
|
|
248
|
+
|
|
249
|
+
# Ensure we have a cached venv
|
|
250
|
+
if not self.dep_cache.get_cached_venv(stack):
|
|
251
|
+
if on_progress:
|
|
252
|
+
on_progress(f"Warming cache for: {', '.join(stack)}")
|
|
253
|
+
self.dep_cache.create_and_cache(stack, on_progress)
|
|
254
|
+
|
|
255
|
+
def get_prewarmed(self, deps: List[str]) -> Optional[PrewarmedSandbox]:
|
|
256
|
+
"""Get a pre-warmed sandbox for the given deps if available."""
|
|
257
|
+
deps_hash = self._hash_deps(deps)
|
|
258
|
+
|
|
259
|
+
with self._lock:
|
|
260
|
+
if deps_hash in self._pool:
|
|
261
|
+
for sandbox in self._pool[deps_hash]:
|
|
262
|
+
if not sandbox.in_use:
|
|
263
|
+
sandbox.in_use = True
|
|
264
|
+
return sandbox
|
|
265
|
+
|
|
266
|
+
return None
|
|
267
|
+
|
|
268
|
+
def release(self, sandbox: PrewarmedSandbox):
|
|
269
|
+
"""Release a sandbox back to the pool."""
|
|
270
|
+
with self._lock:
|
|
271
|
+
sandbox.in_use = False
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
class FastServiceStarter:
|
|
275
|
+
"""
|
|
276
|
+
Optimized service starter with caching and parallel execution.
|
|
277
|
+
|
|
278
|
+
Provides millisecond startup times by:
|
|
279
|
+
1. Caching dependency venvs
|
|
280
|
+
2. Reusing sandboxes with same deps
|
|
281
|
+
3. Async health checks (optional)
|
|
282
|
+
4. Parallel file writing
|
|
283
|
+
"""
|
|
284
|
+
|
|
285
|
+
def __init__(
|
|
286
|
+
self,
|
|
287
|
+
sandbox_root: Path,
|
|
288
|
+
cache_root: Optional[Path] = None,
|
|
289
|
+
enable_caching: bool = True,
|
|
290
|
+
enable_pool: bool = True,
|
|
291
|
+
):
|
|
292
|
+
self.sandbox_root = sandbox_root
|
|
293
|
+
self.sandbox_root.mkdir(parents=True, exist_ok=True)
|
|
294
|
+
|
|
295
|
+
self.cache_root = cache_root or (sandbox_root / ".cache")
|
|
296
|
+
self.enable_caching = enable_caching
|
|
297
|
+
self.enable_pool = enable_pool
|
|
298
|
+
|
|
299
|
+
if enable_caching:
|
|
300
|
+
self.dep_cache = DependencyCache(self.cache_root / "venvs")
|
|
301
|
+
else:
|
|
302
|
+
self.dep_cache = None
|
|
303
|
+
|
|
304
|
+
if enable_pool:
|
|
305
|
+
self.sandbox_pool = SandboxPool(
|
|
306
|
+
self.cache_root / "pool",
|
|
307
|
+
self.dep_cache,
|
|
308
|
+
)
|
|
309
|
+
else:
|
|
310
|
+
self.sandbox_pool = None
|
|
311
|
+
|
|
312
|
+
self._executor = ThreadPoolExecutor(max_workers=4)
|
|
313
|
+
|
|
314
|
+
async def fast_create_sandbox(
|
|
315
|
+
self,
|
|
316
|
+
service_name: str,
|
|
317
|
+
content: str,
|
|
318
|
+
on_log: Optional[Callable[[str], None]] = None,
|
|
319
|
+
) -> FastStartResult:
|
|
320
|
+
"""
|
|
321
|
+
Create a sandbox as fast as possible.
|
|
322
|
+
|
|
323
|
+
Uses caching and optimizations to minimize startup time.
|
|
324
|
+
Returns in milliseconds for cached deps.
|
|
325
|
+
"""
|
|
326
|
+
start_time = time.time()
|
|
327
|
+
cache_hit = False
|
|
328
|
+
|
|
329
|
+
def log(msg: str):
|
|
330
|
+
if on_log:
|
|
331
|
+
on_log(msg)
|
|
332
|
+
|
|
333
|
+
# Parse content
|
|
334
|
+
try:
|
|
335
|
+
blocks = parse_blocks(content)
|
|
336
|
+
except Exception as e:
|
|
337
|
+
return FastStartResult(
|
|
338
|
+
success=False,
|
|
339
|
+
startup_time_ms=(time.time() - start_time) * 1000,
|
|
340
|
+
cache_hit=False,
|
|
341
|
+
message=f"Parse error: {e}",
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
# Extract deps and files
|
|
345
|
+
deps: List[str] = []
|
|
346
|
+
files: Dict[str, str] = {}
|
|
347
|
+
run_cmd: Optional[str] = None
|
|
348
|
+
|
|
349
|
+
for block in blocks:
|
|
350
|
+
if block.kind == "deps":
|
|
351
|
+
deps.extend([d.strip() for d in block.body.strip().split("\n") if d.strip()])
|
|
352
|
+
elif block.kind == "file":
|
|
353
|
+
file_path = block.get_path() or "main.py"
|
|
354
|
+
files[file_path] = block.body
|
|
355
|
+
elif block.kind == "run":
|
|
356
|
+
run_cmd = block.body.strip()
|
|
357
|
+
|
|
358
|
+
# Create sandbox directory
|
|
359
|
+
sandbox_path = self.sandbox_root / service_name
|
|
360
|
+
if sandbox_path.exists():
|
|
361
|
+
shutil.rmtree(sandbox_path)
|
|
362
|
+
sandbox_path.mkdir(parents=True)
|
|
363
|
+
|
|
364
|
+
# Write files in parallel
|
|
365
|
+
write_start = time.time()
|
|
366
|
+
await asyncio.get_event_loop().run_in_executor(
|
|
367
|
+
self._executor,
|
|
368
|
+
self._write_files_parallel,
|
|
369
|
+
sandbox_path,
|
|
370
|
+
files,
|
|
371
|
+
)
|
|
372
|
+
log(f"⚡ Files written in {(time.time() - write_start) * 1000:.0f}ms")
|
|
373
|
+
|
|
374
|
+
# Handle dependencies with caching
|
|
375
|
+
venv_path = None
|
|
376
|
+
if deps and self.enable_caching and self.dep_cache:
|
|
377
|
+
cached = self.dep_cache.get_cached_venv(deps)
|
|
378
|
+
|
|
379
|
+
if cached:
|
|
380
|
+
cache_hit = True
|
|
381
|
+
venv_path = cached.path
|
|
382
|
+
log(f"⚡ Cache hit! Reusing venv ({cached.deps_hash})")
|
|
383
|
+
|
|
384
|
+
# Symlink to cached venv instead of copying
|
|
385
|
+
venv_link = sandbox_path / ".venv"
|
|
386
|
+
venv_link.symlink_to(cached.path)
|
|
387
|
+
else:
|
|
388
|
+
# Create and cache new venv
|
|
389
|
+
log(f"📦 Cache miss, installing {len(deps)} deps...")
|
|
390
|
+
cached = await asyncio.get_event_loop().run_in_executor(
|
|
391
|
+
self._executor,
|
|
392
|
+
self.dep_cache.create_and_cache,
|
|
393
|
+
deps,
|
|
394
|
+
log,
|
|
395
|
+
)
|
|
396
|
+
venv_path = cached.path
|
|
397
|
+
venv_link = sandbox_path / ".venv"
|
|
398
|
+
venv_link.symlink_to(cached.path)
|
|
399
|
+
elif deps:
|
|
400
|
+
# No caching, install directly
|
|
401
|
+
log(f"📦 Installing {len(deps)} deps (no cache)...")
|
|
402
|
+
await asyncio.get_event_loop().run_in_executor(
|
|
403
|
+
self._executor,
|
|
404
|
+
self._install_deps_direct,
|
|
405
|
+
sandbox_path,
|
|
406
|
+
deps,
|
|
407
|
+
)
|
|
408
|
+
venv_path = sandbox_path / ".venv"
|
|
409
|
+
|
|
410
|
+
# Write requirements.txt
|
|
411
|
+
if deps:
|
|
412
|
+
(sandbox_path / "requirements.txt").write_text("\n".join(deps))
|
|
413
|
+
|
|
414
|
+
total_time_ms = (time.time() - start_time) * 1000
|
|
415
|
+
|
|
416
|
+
return FastStartResult(
|
|
417
|
+
success=True,
|
|
418
|
+
startup_time_ms=total_time_ms,
|
|
419
|
+
cache_hit=cache_hit,
|
|
420
|
+
message=f"Sandbox ready in {total_time_ms:.0f}ms" + (" (cached)" if cache_hit else ""),
|
|
421
|
+
sandbox_path=sandbox_path,
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
def _write_files_parallel(self, sandbox_path: Path, files: Dict[str, str]):
|
|
425
|
+
"""Write multiple files in parallel."""
|
|
426
|
+
def write_file(item):
|
|
427
|
+
path, content = item
|
|
428
|
+
file_path = sandbox_path / path
|
|
429
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
430
|
+
file_path.write_text(content)
|
|
431
|
+
|
|
432
|
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
433
|
+
list(executor.map(write_file, files.items()))
|
|
434
|
+
|
|
435
|
+
def _install_deps_direct(self, sandbox_path: Path, deps: List[str]):
|
|
436
|
+
"""Install deps directly without caching."""
|
|
437
|
+
venv_path = sandbox_path / ".venv"
|
|
438
|
+
subprocess.run(
|
|
439
|
+
["python3", "-m", "venv", str(venv_path)],
|
|
440
|
+
capture_output=True,
|
|
441
|
+
check=True,
|
|
442
|
+
)
|
|
443
|
+
pip_path = venv_path / "bin" / "pip"
|
|
444
|
+
subprocess.run(
|
|
445
|
+
[str(pip_path), "install", "-q", "--disable-pip-version-check"] + deps,
|
|
446
|
+
capture_output=True,
|
|
447
|
+
check=True,
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
def get_cache_stats(self) -> Dict[str, Any]:
|
|
451
|
+
"""Get caching statistics."""
|
|
452
|
+
stats = {
|
|
453
|
+
"caching_enabled": self.enable_caching,
|
|
454
|
+
"pool_enabled": self.enable_pool,
|
|
455
|
+
}
|
|
456
|
+
if self.dep_cache:
|
|
457
|
+
stats["dep_cache"] = self.dep_cache.get_stats()
|
|
458
|
+
return stats
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
class ParallelServiceRunner:
|
|
462
|
+
"""
|
|
463
|
+
Run multiple services in parallel with optimized startup.
|
|
464
|
+
"""
|
|
465
|
+
|
|
466
|
+
def __init__(self, fast_starter: FastServiceStarter, max_parallel: int = 4):
|
|
467
|
+
self.fast_starter = fast_starter
|
|
468
|
+
self.max_parallel = max_parallel
|
|
469
|
+
self._semaphore = asyncio.Semaphore(max_parallel)
|
|
470
|
+
|
|
471
|
+
async def run_parallel(
|
|
472
|
+
self,
|
|
473
|
+
services: List[Dict[str, Any]],
|
|
474
|
+
on_service_log: Optional[Callable[[str, str], None]] = None,
|
|
475
|
+
) -> List[FastStartResult]:
|
|
476
|
+
"""
|
|
477
|
+
Run multiple services in parallel.
|
|
478
|
+
|
|
479
|
+
Args:
|
|
480
|
+
services: List of dicts with {service_id, content, port}
|
|
481
|
+
on_service_log: Callback (service_id, message)
|
|
482
|
+
|
|
483
|
+
Returns:
|
|
484
|
+
List of FastStartResult for each service
|
|
485
|
+
"""
|
|
486
|
+
async def run_one(svc: Dict[str, Any]) -> FastStartResult:
|
|
487
|
+
async with self._semaphore:
|
|
488
|
+
service_id = svc["service_id"]
|
|
489
|
+
|
|
490
|
+
def log(msg: str):
|
|
491
|
+
if on_service_log:
|
|
492
|
+
on_service_log(service_id, msg)
|
|
493
|
+
|
|
494
|
+
return await self.fast_starter.fast_create_sandbox(
|
|
495
|
+
service_name=f"service_{service_id}",
|
|
496
|
+
content=svc["content"],
|
|
497
|
+
on_log=log,
|
|
498
|
+
)
|
|
499
|
+
|
|
500
|
+
results = await asyncio.gather(*[run_one(s) for s in services])
|
|
501
|
+
return list(results)
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
# Global fast starter instance
|
|
505
|
+
_fast_starter: Optional[FastServiceStarter] = None
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
def get_fast_starter(sandbox_root: Optional[Path] = None) -> FastServiceStarter:
|
|
509
|
+
"""Get or create the global fast starter instance."""
|
|
510
|
+
global _fast_starter
|
|
511
|
+
if _fast_starter is None:
|
|
512
|
+
root = sandbox_root or Path("/tmp/pactown-sandboxes")
|
|
513
|
+
_fast_starter = FastServiceStarter(root)
|
|
514
|
+
return _fast_starter
|