pactown 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pactown/__init__.py +23 -0
- pactown/cli.py +347 -0
- pactown/config.py +158 -0
- pactown/deploy/__init__.py +17 -0
- pactown/deploy/base.py +263 -0
- pactown/deploy/compose.py +359 -0
- pactown/deploy/docker.py +299 -0
- pactown/deploy/kubernetes.py +449 -0
- pactown/deploy/podman.py +400 -0
- pactown/generator.py +212 -0
- pactown/network.py +245 -0
- pactown/orchestrator.py +455 -0
- pactown/parallel.py +268 -0
- pactown/registry/__init__.py +12 -0
- pactown/registry/client.py +253 -0
- pactown/registry/models.py +150 -0
- pactown/registry/server.py +207 -0
- pactown/resolver.py +160 -0
- pactown/sandbox_manager.py +328 -0
- pactown-0.1.4.dist-info/METADATA +308 -0
- pactown-0.1.4.dist-info/RECORD +24 -0
- pactown-0.1.4.dist-info/WHEEL +4 -0
- pactown-0.1.4.dist-info/entry_points.txt +3 -0
- pactown-0.1.4.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
"""FastAPI server for pactown registry."""
|
|
2
|
+
|
|
3
|
+
import hashlib
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
import click
|
|
8
|
+
import uvicorn
|
|
9
|
+
from fastapi import FastAPI, HTTPException, Query
|
|
10
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
11
|
+
from pydantic import BaseModel
|
|
12
|
+
|
|
13
|
+
from .models import Artifact, ArtifactVersion, RegistryStorage
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class PublishRequest(BaseModel):
|
|
17
|
+
name: str
|
|
18
|
+
version: str
|
|
19
|
+
readme_content: str
|
|
20
|
+
namespace: str = "default"
|
|
21
|
+
description: str = ""
|
|
22
|
+
tags: list[str] = []
|
|
23
|
+
metadata: dict = {}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class PublishResponse(BaseModel):
|
|
27
|
+
success: bool
|
|
28
|
+
artifact: str
|
|
29
|
+
version: str
|
|
30
|
+
checksum: str
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class ArtifactInfo(BaseModel):
|
|
34
|
+
name: str
|
|
35
|
+
namespace: str
|
|
36
|
+
description: str
|
|
37
|
+
latest_version: Optional[str]
|
|
38
|
+
versions: list[str]
|
|
39
|
+
tags: list[str]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class VersionInfo(BaseModel):
|
|
43
|
+
version: str
|
|
44
|
+
readme_content: str
|
|
45
|
+
checksum: str
|
|
46
|
+
published_at: str
|
|
47
|
+
metadata: dict
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def create_app(storage_path: str = "./.pactown-registry") -> FastAPI:
|
|
51
|
+
"""Create the registry FastAPI application."""
|
|
52
|
+
|
|
53
|
+
app = FastAPI(
|
|
54
|
+
title="Pactown Registry",
|
|
55
|
+
description="Local artifact registry for markpact modules",
|
|
56
|
+
version="0.1.0",
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
app.add_middleware(
|
|
60
|
+
CORSMiddleware,
|
|
61
|
+
allow_origins=["*"],
|
|
62
|
+
allow_credentials=True,
|
|
63
|
+
allow_methods=["*"],
|
|
64
|
+
allow_headers=["*"],
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
storage = RegistryStorage(Path(storage_path))
|
|
68
|
+
|
|
69
|
+
@app.get("/health")
|
|
70
|
+
def health():
|
|
71
|
+
return {"status": "ok", "service": "pactown-registry"}
|
|
72
|
+
|
|
73
|
+
@app.get("/v1/artifacts", response_model=list[ArtifactInfo])
|
|
74
|
+
def list_artifacts(
|
|
75
|
+
namespace: Optional[str] = Query(None, description="Filter by namespace"),
|
|
76
|
+
search: Optional[str] = Query(None, description="Search query"),
|
|
77
|
+
):
|
|
78
|
+
if search:
|
|
79
|
+
artifacts = storage.search(search)
|
|
80
|
+
else:
|
|
81
|
+
artifacts = storage.list(namespace)
|
|
82
|
+
|
|
83
|
+
return [
|
|
84
|
+
ArtifactInfo(
|
|
85
|
+
name=a.name,
|
|
86
|
+
namespace=a.namespace,
|
|
87
|
+
description=a.description,
|
|
88
|
+
latest_version=a.latest_version,
|
|
89
|
+
versions=list(a.versions.keys()),
|
|
90
|
+
tags=a.tags,
|
|
91
|
+
)
|
|
92
|
+
for a in artifacts
|
|
93
|
+
]
|
|
94
|
+
|
|
95
|
+
@app.get("/v1/artifacts/{namespace}/{name}", response_model=ArtifactInfo)
|
|
96
|
+
def get_artifact(namespace: str, name: str):
|
|
97
|
+
artifact = storage.get(namespace, name)
|
|
98
|
+
if not artifact:
|
|
99
|
+
raise HTTPException(status_code=404, detail="Artifact not found")
|
|
100
|
+
|
|
101
|
+
return ArtifactInfo(
|
|
102
|
+
name=artifact.name,
|
|
103
|
+
namespace=artifact.namespace,
|
|
104
|
+
description=artifact.description,
|
|
105
|
+
latest_version=artifact.latest_version,
|
|
106
|
+
versions=list(artifact.versions.keys()),
|
|
107
|
+
tags=artifact.tags,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
@app.get("/v1/artifacts/{namespace}/{name}/{version}", response_model=VersionInfo)
|
|
111
|
+
def get_version(namespace: str, name: str, version: str):
|
|
112
|
+
artifact = storage.get(namespace, name)
|
|
113
|
+
if not artifact:
|
|
114
|
+
raise HTTPException(status_code=404, detail="Artifact not found")
|
|
115
|
+
|
|
116
|
+
ver = artifact.get_version(version)
|
|
117
|
+
if not ver:
|
|
118
|
+
raise HTTPException(status_code=404, detail="Version not found")
|
|
119
|
+
|
|
120
|
+
return VersionInfo(
|
|
121
|
+
version=ver.version,
|
|
122
|
+
readme_content=ver.readme_content,
|
|
123
|
+
checksum=ver.checksum,
|
|
124
|
+
published_at=ver.published_at.isoformat(),
|
|
125
|
+
metadata=ver.metadata,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
@app.get("/v1/artifacts/{namespace}/{name}/{version}/readme")
|
|
129
|
+
def get_readme(namespace: str, name: str, version: str):
|
|
130
|
+
artifact = storage.get(namespace, name)
|
|
131
|
+
if not artifact:
|
|
132
|
+
raise HTTPException(status_code=404, detail="Artifact not found")
|
|
133
|
+
|
|
134
|
+
ver = artifact.get_version(version)
|
|
135
|
+
if not ver:
|
|
136
|
+
raise HTTPException(status_code=404, detail="Version not found")
|
|
137
|
+
|
|
138
|
+
return {"content": ver.readme_content}
|
|
139
|
+
|
|
140
|
+
@app.post("/v1/publish", response_model=PublishResponse)
|
|
141
|
+
def publish(req: PublishRequest):
|
|
142
|
+
checksum = hashlib.sha256(req.readme_content.encode()).hexdigest()
|
|
143
|
+
|
|
144
|
+
artifact = storage.get(req.namespace, req.name)
|
|
145
|
+
if not artifact:
|
|
146
|
+
artifact = Artifact(
|
|
147
|
+
name=req.name,
|
|
148
|
+
namespace=req.namespace,
|
|
149
|
+
description=req.description,
|
|
150
|
+
tags=req.tags,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
version = ArtifactVersion(
|
|
154
|
+
version=req.version,
|
|
155
|
+
readme_content=req.readme_content,
|
|
156
|
+
checksum=checksum,
|
|
157
|
+
metadata=req.metadata,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
artifact.add_version(version)
|
|
161
|
+
if req.description:
|
|
162
|
+
artifact.description = req.description
|
|
163
|
+
if req.tags:
|
|
164
|
+
artifact.tags = list(set(artifact.tags + req.tags))
|
|
165
|
+
|
|
166
|
+
storage.save_artifact(artifact)
|
|
167
|
+
|
|
168
|
+
return PublishResponse(
|
|
169
|
+
success=True,
|
|
170
|
+
artifact=artifact.full_name,
|
|
171
|
+
version=req.version,
|
|
172
|
+
checksum=checksum,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
@app.delete("/v1/artifacts/{namespace}/{name}")
|
|
176
|
+
def delete_artifact(namespace: str, name: str):
|
|
177
|
+
if storage.delete(namespace, name):
|
|
178
|
+
return {"success": True, "message": f"Deleted {namespace}/{name}"}
|
|
179
|
+
raise HTTPException(status_code=404, detail="Artifact not found")
|
|
180
|
+
|
|
181
|
+
@app.get("/v1/namespaces")
|
|
182
|
+
def list_namespaces():
|
|
183
|
+
namespaces = set(a.namespace for a in storage.list())
|
|
184
|
+
return {"namespaces": sorted(namespaces)}
|
|
185
|
+
|
|
186
|
+
return app
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
@click.command()
|
|
190
|
+
@click.option("--host", default="0.0.0.0", help="Host to bind to")
|
|
191
|
+
@click.option("--port", default=8800, help="Port to bind to")
|
|
192
|
+
@click.option("--storage", default="./.pactown-registry", help="Storage path")
|
|
193
|
+
@click.option("--reload", is_flag=True, help="Enable auto-reload")
|
|
194
|
+
def main(host: str, port: int, storage: str, reload: bool):
|
|
195
|
+
"""Start the pactown registry server."""
|
|
196
|
+
app = create_app(storage)
|
|
197
|
+
uvicorn.run(
|
|
198
|
+
"pactown.registry.server:create_app",
|
|
199
|
+
host=host,
|
|
200
|
+
port=port,
|
|
201
|
+
reload=reload,
|
|
202
|
+
factory=True,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
if __name__ == "__main__":
|
|
207
|
+
main()
|
pactown/resolver.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
"""Dependency resolver for pactown ecosystems."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from collections import deque
|
|
6
|
+
|
|
7
|
+
from .config import EcosystemConfig, ServiceConfig
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class ResolvedDependency:
|
|
12
|
+
"""A resolved dependency with endpoint information."""
|
|
13
|
+
name: str
|
|
14
|
+
version: str
|
|
15
|
+
endpoint: str
|
|
16
|
+
env_var: str
|
|
17
|
+
service: Optional[ServiceConfig] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class DependencyResolver:
|
|
21
|
+
"""Resolves dependencies between services in an ecosystem."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, config: EcosystemConfig):
|
|
24
|
+
self.config = config
|
|
25
|
+
self._graph: dict[str, list[str]] = {}
|
|
26
|
+
self._build_graph()
|
|
27
|
+
|
|
28
|
+
def _build_graph(self) -> None:
|
|
29
|
+
"""Build dependency graph from configuration."""
|
|
30
|
+
for name, service in self.config.services.items():
|
|
31
|
+
self._graph[name] = []
|
|
32
|
+
for dep in service.depends_on:
|
|
33
|
+
if dep.name in self.config.services:
|
|
34
|
+
self._graph[name].append(dep.name)
|
|
35
|
+
|
|
36
|
+
def get_startup_order(self) -> list[str]:
|
|
37
|
+
"""
|
|
38
|
+
Get services in topological order for startup.
|
|
39
|
+
Services with no dependencies start first.
|
|
40
|
+
"""
|
|
41
|
+
# in_degree[X] = number of dependencies X has
|
|
42
|
+
in_degree = {name: len(deps) for name, deps in self._graph.items()}
|
|
43
|
+
|
|
44
|
+
# Start with services that have no dependencies
|
|
45
|
+
queue = deque([name for name, degree in in_degree.items() if degree == 0])
|
|
46
|
+
order = []
|
|
47
|
+
|
|
48
|
+
while queue:
|
|
49
|
+
current = queue.popleft()
|
|
50
|
+
order.append(current)
|
|
51
|
+
|
|
52
|
+
# For each service that depends on current, decrease its in_degree
|
|
53
|
+
for name, deps in self._graph.items():
|
|
54
|
+
if current in deps:
|
|
55
|
+
in_degree[name] -= 1
|
|
56
|
+
if in_degree[name] == 0:
|
|
57
|
+
queue.append(name)
|
|
58
|
+
|
|
59
|
+
if len(order) != len(self._graph):
|
|
60
|
+
missing = set(self._graph.keys()) - set(order)
|
|
61
|
+
raise ValueError(f"Circular dependency detected involving: {missing}")
|
|
62
|
+
|
|
63
|
+
return order
|
|
64
|
+
|
|
65
|
+
def get_shutdown_order(self) -> list[str]:
|
|
66
|
+
"""Get services in reverse order for shutdown."""
|
|
67
|
+
return list(reversed(self.get_startup_order()))
|
|
68
|
+
|
|
69
|
+
def resolve_service_deps(self, service_name: str) -> list[ResolvedDependency]:
|
|
70
|
+
"""Resolve all dependencies for a service."""
|
|
71
|
+
if service_name not in self.config.services:
|
|
72
|
+
raise ValueError(f"Unknown service: {service_name}")
|
|
73
|
+
|
|
74
|
+
service = self.config.services[service_name]
|
|
75
|
+
resolved = []
|
|
76
|
+
|
|
77
|
+
for dep in service.depends_on:
|
|
78
|
+
if dep.name in self.config.services:
|
|
79
|
+
dep_service = self.config.services[dep.name]
|
|
80
|
+
endpoint = dep.endpoint or f"http://localhost:{dep_service.port}"
|
|
81
|
+
env_var = dep.env_var or f"{dep.name.upper().replace('-', '_')}_URL"
|
|
82
|
+
|
|
83
|
+
resolved.append(ResolvedDependency(
|
|
84
|
+
name=dep.name,
|
|
85
|
+
version=dep.version,
|
|
86
|
+
endpoint=endpoint,
|
|
87
|
+
env_var=env_var,
|
|
88
|
+
service=dep_service,
|
|
89
|
+
))
|
|
90
|
+
else:
|
|
91
|
+
endpoint = dep.endpoint or f"http://localhost:8800/v1/{dep.name}"
|
|
92
|
+
env_var = dep.env_var or f"{dep.name.upper().replace('-', '_')}_URL"
|
|
93
|
+
|
|
94
|
+
resolved.append(ResolvedDependency(
|
|
95
|
+
name=dep.name,
|
|
96
|
+
version=dep.version,
|
|
97
|
+
endpoint=endpoint,
|
|
98
|
+
env_var=env_var,
|
|
99
|
+
))
|
|
100
|
+
|
|
101
|
+
return resolved
|
|
102
|
+
|
|
103
|
+
def get_environment(self, service_name: str) -> dict[str, str]:
|
|
104
|
+
"""Get environment variables for a service including dependency endpoints."""
|
|
105
|
+
if service_name not in self.config.services:
|
|
106
|
+
raise ValueError(f"Unknown service: {service_name}")
|
|
107
|
+
|
|
108
|
+
service = self.config.services[service_name]
|
|
109
|
+
env = dict(service.env)
|
|
110
|
+
|
|
111
|
+
for dep in self.resolve_service_deps(service_name):
|
|
112
|
+
env[dep.env_var] = dep.endpoint
|
|
113
|
+
|
|
114
|
+
env["PACTOWN_SERVICE_NAME"] = service_name
|
|
115
|
+
env["PACTOWN_ECOSYSTEM"] = self.config.name
|
|
116
|
+
if service.port:
|
|
117
|
+
env["MARKPACT_PORT"] = str(service.port)
|
|
118
|
+
|
|
119
|
+
return env
|
|
120
|
+
|
|
121
|
+
def validate(self) -> list[str]:
|
|
122
|
+
"""Validate the dependency graph and return any issues."""
|
|
123
|
+
issues = []
|
|
124
|
+
|
|
125
|
+
try:
|
|
126
|
+
self.get_startup_order()
|
|
127
|
+
except ValueError as e:
|
|
128
|
+
issues.append(str(e))
|
|
129
|
+
|
|
130
|
+
for name, service in self.config.services.items():
|
|
131
|
+
for dep in service.depends_on:
|
|
132
|
+
if dep.name not in self.config.services:
|
|
133
|
+
if dep.registry == "local":
|
|
134
|
+
issues.append(
|
|
135
|
+
f"Service '{name}' depends on '{dep.name}' which is not "
|
|
136
|
+
f"defined locally and no registry is configured"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
return issues
|
|
140
|
+
|
|
141
|
+
def print_graph(self) -> str:
|
|
142
|
+
"""Return ASCII representation of dependency graph."""
|
|
143
|
+
lines = [f"Ecosystem: {self.config.name}", ""]
|
|
144
|
+
|
|
145
|
+
try:
|
|
146
|
+
order = self.get_startup_order()
|
|
147
|
+
except ValueError:
|
|
148
|
+
order = list(self._graph.keys())
|
|
149
|
+
|
|
150
|
+
for name in order:
|
|
151
|
+
service = self.config.services[name]
|
|
152
|
+
deps = [d.name for d in service.depends_on]
|
|
153
|
+
port = f":{service.port}" if service.port else ""
|
|
154
|
+
|
|
155
|
+
if deps:
|
|
156
|
+
lines.append(f" [{name}{port}] → {', '.join(deps)}")
|
|
157
|
+
else:
|
|
158
|
+
lines.append(f" [{name}{port}] (no deps)")
|
|
159
|
+
|
|
160
|
+
return "\n".join(lines)
|
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
"""Sandbox manager for pactown services."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import shutil
|
|
5
|
+
import subprocess
|
|
6
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional, Any, Callable
|
|
10
|
+
import signal
|
|
11
|
+
import time
|
|
12
|
+
from threading import Lock
|
|
13
|
+
|
|
14
|
+
from markpact import Sandbox, parse_blocks, run_cmd, ensure_venv
|
|
15
|
+
from markpact.runner import install_deps
|
|
16
|
+
|
|
17
|
+
from .config import ServiceConfig
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ServiceProcess:
|
|
22
|
+
"""Represents a running service process."""
|
|
23
|
+
name: str
|
|
24
|
+
pid: int
|
|
25
|
+
port: Optional[int]
|
|
26
|
+
sandbox_path: Path
|
|
27
|
+
process: Optional[subprocess.Popen] = None
|
|
28
|
+
started_at: float = field(default_factory=time.time)
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def is_running(self) -> bool:
|
|
32
|
+
if self.process:
|
|
33
|
+
return self.process.poll() is None
|
|
34
|
+
try:
|
|
35
|
+
os.kill(self.pid, 0)
|
|
36
|
+
return True
|
|
37
|
+
except OSError:
|
|
38
|
+
return False
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class SandboxManager:
|
|
42
|
+
"""Manages sandboxes for multiple services."""
|
|
43
|
+
|
|
44
|
+
def __init__(self, sandbox_root: str | Path):
|
|
45
|
+
self.sandbox_root = Path(sandbox_root)
|
|
46
|
+
self.sandbox_root.mkdir(parents=True, exist_ok=True)
|
|
47
|
+
self._processes: dict[str, ServiceProcess] = {}
|
|
48
|
+
|
|
49
|
+
def get_sandbox_path(self, service_name: str) -> Path:
|
|
50
|
+
"""Get sandbox path for a service."""
|
|
51
|
+
return self.sandbox_root / service_name
|
|
52
|
+
|
|
53
|
+
def create_sandbox(self, service: ServiceConfig, readme_path: Path) -> Sandbox:
|
|
54
|
+
"""Create a sandbox for a service from its README."""
|
|
55
|
+
sandbox_path = self.get_sandbox_path(service.name)
|
|
56
|
+
|
|
57
|
+
if sandbox_path.exists():
|
|
58
|
+
shutil.rmtree(sandbox_path)
|
|
59
|
+
sandbox_path.mkdir(parents=True)
|
|
60
|
+
|
|
61
|
+
sandbox = Sandbox(sandbox_path)
|
|
62
|
+
|
|
63
|
+
readme_content = readme_path.read_text()
|
|
64
|
+
blocks = parse_blocks(readme_content)
|
|
65
|
+
|
|
66
|
+
deps = []
|
|
67
|
+
run_command = None
|
|
68
|
+
|
|
69
|
+
for block in blocks:
|
|
70
|
+
if block.kind == "deps":
|
|
71
|
+
deps.extend(block.body.strip().split("\n"))
|
|
72
|
+
elif block.kind == "file":
|
|
73
|
+
file_path = block.get_path() or "main.py"
|
|
74
|
+
sandbox.write_file(file_path, block.body)
|
|
75
|
+
elif block.kind == "run":
|
|
76
|
+
run_command = block.body.strip()
|
|
77
|
+
|
|
78
|
+
if deps:
|
|
79
|
+
ensure_venv(sandbox, verbose=False)
|
|
80
|
+
install_deps([d for d in deps if d.strip()], sandbox, verbose=False)
|
|
81
|
+
|
|
82
|
+
return sandbox
|
|
83
|
+
|
|
84
|
+
def start_service(
|
|
85
|
+
self,
|
|
86
|
+
service: ServiceConfig,
|
|
87
|
+
readme_path: Path,
|
|
88
|
+
env: dict[str, str],
|
|
89
|
+
verbose: bool = True,
|
|
90
|
+
) -> ServiceProcess:
|
|
91
|
+
"""Start a service in its sandbox."""
|
|
92
|
+
if service.name in self._processes:
|
|
93
|
+
existing = self._processes[service.name]
|
|
94
|
+
if existing.is_running:
|
|
95
|
+
raise RuntimeError(f"Service {service.name} is already running")
|
|
96
|
+
|
|
97
|
+
sandbox = self.create_sandbox(service, readme_path)
|
|
98
|
+
|
|
99
|
+
readme_content = readme_path.read_text()
|
|
100
|
+
blocks = parse_blocks(readme_content)
|
|
101
|
+
|
|
102
|
+
run_command = None
|
|
103
|
+
for block in blocks:
|
|
104
|
+
if block.kind == "run":
|
|
105
|
+
run_command = block.body.strip()
|
|
106
|
+
break
|
|
107
|
+
|
|
108
|
+
if not run_command:
|
|
109
|
+
raise ValueError(f"No run command found in {readme_path}")
|
|
110
|
+
|
|
111
|
+
full_env = os.environ.copy()
|
|
112
|
+
full_env.update(env)
|
|
113
|
+
|
|
114
|
+
if sandbox.has_venv:
|
|
115
|
+
venv_bin = str(sandbox.venv_bin)
|
|
116
|
+
full_env["PATH"] = f"{venv_bin}:{full_env.get('PATH', '')}"
|
|
117
|
+
full_env["VIRTUAL_ENV"] = str(sandbox.path / ".venv")
|
|
118
|
+
|
|
119
|
+
if verbose:
|
|
120
|
+
print(f"Starting {service.name} on port {service.port}...")
|
|
121
|
+
|
|
122
|
+
process = subprocess.Popen(
|
|
123
|
+
run_command,
|
|
124
|
+
shell=True,
|
|
125
|
+
cwd=str(sandbox.path),
|
|
126
|
+
env=full_env,
|
|
127
|
+
stdout=subprocess.PIPE if not verbose else None,
|
|
128
|
+
stderr=subprocess.PIPE if not verbose else None,
|
|
129
|
+
preexec_fn=os.setsid,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
svc_process = ServiceProcess(
|
|
133
|
+
name=service.name,
|
|
134
|
+
pid=process.pid,
|
|
135
|
+
port=service.port,
|
|
136
|
+
sandbox_path=sandbox.path,
|
|
137
|
+
process=process,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
self._processes[service.name] = svc_process
|
|
141
|
+
return svc_process
|
|
142
|
+
|
|
143
|
+
def stop_service(self, service_name: str, timeout: int = 10) -> bool:
|
|
144
|
+
"""Stop a running service."""
|
|
145
|
+
if service_name not in self._processes:
|
|
146
|
+
return False
|
|
147
|
+
|
|
148
|
+
svc = self._processes[service_name]
|
|
149
|
+
|
|
150
|
+
if not svc.is_running:
|
|
151
|
+
del self._processes[service_name]
|
|
152
|
+
return True
|
|
153
|
+
|
|
154
|
+
try:
|
|
155
|
+
os.killpg(os.getpgid(svc.pid), signal.SIGTERM)
|
|
156
|
+
except ProcessLookupError:
|
|
157
|
+
del self._processes[service_name]
|
|
158
|
+
return True
|
|
159
|
+
|
|
160
|
+
deadline = time.time() + timeout
|
|
161
|
+
while time.time() < deadline:
|
|
162
|
+
if not svc.is_running:
|
|
163
|
+
break
|
|
164
|
+
time.sleep(0.1)
|
|
165
|
+
|
|
166
|
+
if svc.is_running:
|
|
167
|
+
try:
|
|
168
|
+
os.killpg(os.getpgid(svc.pid), signal.SIGKILL)
|
|
169
|
+
except ProcessLookupError:
|
|
170
|
+
pass
|
|
171
|
+
|
|
172
|
+
del self._processes[service_name]
|
|
173
|
+
return True
|
|
174
|
+
|
|
175
|
+
def stop_all(self, timeout: int = 10) -> None:
|
|
176
|
+
"""Stop all running services."""
|
|
177
|
+
for name in list(self._processes.keys()):
|
|
178
|
+
self.stop_service(name, timeout)
|
|
179
|
+
|
|
180
|
+
def get_status(self, service_name: str) -> Optional[dict]:
|
|
181
|
+
"""Get status of a service."""
|
|
182
|
+
if service_name not in self._processes:
|
|
183
|
+
return None
|
|
184
|
+
|
|
185
|
+
svc = self._processes[service_name]
|
|
186
|
+
return {
|
|
187
|
+
"name": svc.name,
|
|
188
|
+
"pid": svc.pid,
|
|
189
|
+
"port": svc.port,
|
|
190
|
+
"running": svc.is_running,
|
|
191
|
+
"uptime": time.time() - svc.started_at,
|
|
192
|
+
"sandbox": str(svc.sandbox_path),
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
def get_all_status(self) -> list[dict]:
|
|
196
|
+
"""Get status of all services."""
|
|
197
|
+
return [
|
|
198
|
+
self.get_status(name)
|
|
199
|
+
for name in self._processes
|
|
200
|
+
if self.get_status(name)
|
|
201
|
+
]
|
|
202
|
+
|
|
203
|
+
def clean_sandbox(self, service_name: str) -> None:
|
|
204
|
+
"""Remove sandbox directory for a service."""
|
|
205
|
+
sandbox_path = self.get_sandbox_path(service_name)
|
|
206
|
+
if sandbox_path.exists():
|
|
207
|
+
shutil.rmtree(sandbox_path)
|
|
208
|
+
|
|
209
|
+
def clean_all(self) -> None:
|
|
210
|
+
"""Remove all sandbox directories."""
|
|
211
|
+
if self.sandbox_root.exists():
|
|
212
|
+
shutil.rmtree(self.sandbox_root)
|
|
213
|
+
self.sandbox_root.mkdir(parents=True)
|
|
214
|
+
|
|
215
|
+
def create_sandboxes_parallel(
|
|
216
|
+
self,
|
|
217
|
+
services: list[tuple[ServiceConfig, Path]],
|
|
218
|
+
max_workers: int = 4,
|
|
219
|
+
on_complete: Optional[Callable[[str, bool, float], None]] = None,
|
|
220
|
+
) -> dict[str, Sandbox]:
|
|
221
|
+
"""
|
|
222
|
+
Create sandboxes for multiple services in parallel.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
services: List of (ServiceConfig, readme_path) tuples
|
|
226
|
+
max_workers: Maximum parallel workers
|
|
227
|
+
on_complete: Callback(name, success, duration)
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
Dict of {service_name: Sandbox}
|
|
231
|
+
"""
|
|
232
|
+
results: dict[str, Sandbox] = {}
|
|
233
|
+
errors: dict[str, str] = {}
|
|
234
|
+
lock = Lock()
|
|
235
|
+
|
|
236
|
+
def create_one(service: ServiceConfig, readme_path: Path) -> tuple[str, Sandbox]:
|
|
237
|
+
sandbox = self.create_sandbox(service, readme_path)
|
|
238
|
+
return service.name, sandbox
|
|
239
|
+
|
|
240
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
241
|
+
futures = {}
|
|
242
|
+
start_times = {}
|
|
243
|
+
|
|
244
|
+
for service, readme_path in services:
|
|
245
|
+
start_times[service.name] = time.time()
|
|
246
|
+
future = executor.submit(create_one, service, readme_path)
|
|
247
|
+
futures[future] = service.name
|
|
248
|
+
|
|
249
|
+
for future in as_completed(futures):
|
|
250
|
+
name = futures[future]
|
|
251
|
+
duration = time.time() - start_times[name]
|
|
252
|
+
|
|
253
|
+
try:
|
|
254
|
+
_, sandbox = future.result()
|
|
255
|
+
with lock:
|
|
256
|
+
results[name] = sandbox
|
|
257
|
+
if on_complete:
|
|
258
|
+
on_complete(name, True, duration)
|
|
259
|
+
except Exception as e:
|
|
260
|
+
with lock:
|
|
261
|
+
errors[name] = str(e)
|
|
262
|
+
if on_complete:
|
|
263
|
+
on_complete(name, False, duration)
|
|
264
|
+
|
|
265
|
+
if errors:
|
|
266
|
+
error_msg = "; ".join(f"{k}: {v}" for k, v in errors.items())
|
|
267
|
+
raise RuntimeError(f"Failed to create sandboxes: {error_msg}")
|
|
268
|
+
|
|
269
|
+
return results
|
|
270
|
+
|
|
271
|
+
def start_services_parallel(
|
|
272
|
+
self,
|
|
273
|
+
services: list[tuple[ServiceConfig, Path, dict[str, str]]],
|
|
274
|
+
max_workers: int = 4,
|
|
275
|
+
on_complete: Optional[Callable[[str, bool, float], None]] = None,
|
|
276
|
+
) -> dict[str, ServiceProcess]:
|
|
277
|
+
"""
|
|
278
|
+
Start multiple services in parallel.
|
|
279
|
+
|
|
280
|
+
Note: Should only be used for services with no inter-dependencies.
|
|
281
|
+
For dependent services, use the orchestrator's wave-based approach.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
services: List of (ServiceConfig, readme_path, env) tuples
|
|
285
|
+
max_workers: Maximum parallel workers
|
|
286
|
+
on_complete: Callback(name, success, duration)
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
Dict of {service_name: ServiceProcess}
|
|
290
|
+
"""
|
|
291
|
+
results: dict[str, ServiceProcess] = {}
|
|
292
|
+
errors: dict[str, str] = {}
|
|
293
|
+
lock = Lock()
|
|
294
|
+
|
|
295
|
+
def start_one(
|
|
296
|
+
service: ServiceConfig,
|
|
297
|
+
readme_path: Path,
|
|
298
|
+
env: dict[str, str]
|
|
299
|
+
) -> tuple[str, ServiceProcess]:
|
|
300
|
+
proc = self.start_service(service, readme_path, env, verbose=False)
|
|
301
|
+
return service.name, proc
|
|
302
|
+
|
|
303
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
304
|
+
futures = {}
|
|
305
|
+
start_times = {}
|
|
306
|
+
|
|
307
|
+
for service, readme_path, env in services:
|
|
308
|
+
start_times[service.name] = time.time()
|
|
309
|
+
future = executor.submit(start_one, service, readme_path, env)
|
|
310
|
+
futures[future] = service.name
|
|
311
|
+
|
|
312
|
+
for future in as_completed(futures):
|
|
313
|
+
name = futures[future]
|
|
314
|
+
duration = time.time() - start_times[name]
|
|
315
|
+
|
|
316
|
+
try:
|
|
317
|
+
_, proc = future.result()
|
|
318
|
+
with lock:
|
|
319
|
+
results[name] = proc
|
|
320
|
+
if on_complete:
|
|
321
|
+
on_complete(name, True, duration)
|
|
322
|
+
except Exception as e:
|
|
323
|
+
with lock:
|
|
324
|
+
errors[name] = str(e)
|
|
325
|
+
if on_complete:
|
|
326
|
+
on_complete(name, False, duration)
|
|
327
|
+
|
|
328
|
+
return results, errors
|