pactown 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pactown/deploy/base.py ADDED
@@ -0,0 +1,263 @@
1
+ """Base classes for deployment backends."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from abc import ABC, abstractmethod
6
+ from dataclasses import dataclass, field
7
+ from enum import Enum
8
+ from pathlib import Path
9
+ from typing import Optional, Any
10
+
11
+
12
+ class RuntimeType(Enum):
13
+ """Container runtime types."""
14
+ LOCAL = "local" # Local process (development)
15
+ DOCKER = "docker" # Docker
16
+ PODMAN = "podman" # Podman (rootless containers)
17
+ KUBERNETES = "kubernetes" # Kubernetes
18
+ COMPOSE = "compose" # Docker Compose / Podman Compose
19
+
20
+
21
+ class DeploymentMode(Enum):
22
+ """Deployment environment modes."""
23
+ DEVELOPMENT = "development"
24
+ STAGING = "staging"
25
+ PRODUCTION = "production"
26
+
27
+
28
+ @dataclass
29
+ class DeploymentConfig:
30
+ """Configuration for deployment."""
31
+
32
+ # Runtime settings
33
+ runtime: RuntimeType = RuntimeType.LOCAL
34
+ mode: DeploymentMode = DeploymentMode.DEVELOPMENT
35
+
36
+ # Container settings
37
+ registry: str = "" # Container registry URL
38
+ namespace: str = "default" # K8s namespace or project name
39
+ image_prefix: str = "pactown" # Image name prefix
40
+
41
+ # Network settings
42
+ network_name: str = "pactown-net" # Container network name
43
+ expose_ports: bool = True # Expose ports to host
44
+ use_internal_dns: bool = True # Use container DNS for service discovery
45
+
46
+ # Security settings
47
+ rootless: bool = True # Use rootless containers (Podman)
48
+ read_only_fs: bool = False # Read-only filesystem
49
+ no_new_privileges: bool = True # No new privileges
50
+ drop_capabilities: list[str] = field(default_factory=lambda: ["ALL"])
51
+ add_capabilities: list[str] = field(default_factory=list)
52
+
53
+ # Resource limits
54
+ memory_limit: str = "512m" # Memory limit per service
55
+ cpu_limit: str = "0.5" # CPU limit per service
56
+
57
+ # Health check settings
58
+ health_check_interval: str = "30s"
59
+ health_check_timeout: str = "10s"
60
+ health_check_retries: int = 3
61
+
62
+ # Persistence
63
+ volumes_path: Optional[Path] = None # Path for persistent volumes
64
+
65
+ # Labels and annotations
66
+ labels: dict[str, str] = field(default_factory=dict)
67
+ annotations: dict[str, str] = field(default_factory=dict)
68
+
69
+ @classmethod
70
+ def for_production(cls) -> "DeploymentConfig":
71
+ """Create production-ready configuration."""
72
+ return cls(
73
+ mode=DeploymentMode.PRODUCTION,
74
+ rootless=True,
75
+ read_only_fs=True,
76
+ no_new_privileges=True,
77
+ drop_capabilities=["ALL"],
78
+ memory_limit="1g",
79
+ cpu_limit="1.0",
80
+ health_check_interval="10s",
81
+ health_check_retries=5,
82
+ )
83
+
84
+ @classmethod
85
+ def for_development(cls) -> "DeploymentConfig":
86
+ """Create development configuration."""
87
+ return cls(
88
+ mode=DeploymentMode.DEVELOPMENT,
89
+ rootless=False,
90
+ read_only_fs=False,
91
+ expose_ports=True,
92
+ )
93
+
94
+
95
+ @dataclass
96
+ class DeploymentResult:
97
+ """Result of a deployment operation."""
98
+ success: bool
99
+ service_name: str
100
+ runtime: RuntimeType
101
+ container_id: Optional[str] = None
102
+ image_name: Optional[str] = None
103
+ endpoint: Optional[str] = None
104
+ error: Optional[str] = None
105
+ logs: Optional[str] = None
106
+
107
+
108
+ class DeploymentBackend(ABC):
109
+ """Abstract base class for deployment backends."""
110
+
111
+ def __init__(self, config: DeploymentConfig):
112
+ self.config = config
113
+
114
+ @property
115
+ @abstractmethod
116
+ def runtime_type(self) -> RuntimeType:
117
+ """Return the runtime type."""
118
+ pass
119
+
120
+ @abstractmethod
121
+ def is_available(self) -> bool:
122
+ """Check if the runtime is available."""
123
+ pass
124
+
125
+ @abstractmethod
126
+ def build_image(
127
+ self,
128
+ service_name: str,
129
+ dockerfile_path: Path,
130
+ context_path: Path,
131
+ tag: Optional[str] = None,
132
+ ) -> DeploymentResult:
133
+ """Build a container image."""
134
+ pass
135
+
136
+ @abstractmethod
137
+ def push_image(
138
+ self,
139
+ image_name: str,
140
+ registry: Optional[str] = None,
141
+ ) -> DeploymentResult:
142
+ """Push image to registry."""
143
+ pass
144
+
145
+ @abstractmethod
146
+ def deploy(
147
+ self,
148
+ service_name: str,
149
+ image_name: str,
150
+ port: int,
151
+ env: dict[str, str],
152
+ health_check: Optional[str] = None,
153
+ ) -> DeploymentResult:
154
+ """Deploy a service."""
155
+ pass
156
+
157
+ @abstractmethod
158
+ def stop(self, service_name: str) -> DeploymentResult:
159
+ """Stop a deployed service."""
160
+ pass
161
+
162
+ @abstractmethod
163
+ def logs(self, service_name: str, tail: int = 100) -> str:
164
+ """Get logs from a service."""
165
+ pass
166
+
167
+ @abstractmethod
168
+ def status(self, service_name: str) -> dict[str, Any]:
169
+ """Get status of a service."""
170
+ pass
171
+
172
+ def generate_dockerfile(
173
+ self,
174
+ service_name: str,
175
+ sandbox_path: Path,
176
+ base_image: str = "python:3.12-slim",
177
+ ) -> Path:
178
+ """Generate Dockerfile for a service."""
179
+ dockerfile_content = self._create_dockerfile(
180
+ sandbox_path, base_image
181
+ )
182
+
183
+ dockerfile_path = sandbox_path / "Dockerfile"
184
+ dockerfile_path.write_text(dockerfile_content)
185
+ return dockerfile_path
186
+
187
+ def _create_dockerfile(
188
+ self,
189
+ sandbox_path: Path,
190
+ base_image: str,
191
+ ) -> str:
192
+ """Create Dockerfile content."""
193
+ # Check for requirements.txt
194
+ has_requirements = (sandbox_path / "requirements.txt").exists()
195
+
196
+ # Check for package.json (Node.js)
197
+ has_package_json = (sandbox_path / "package.json").exists()
198
+
199
+ if has_package_json:
200
+ return self._create_node_dockerfile(sandbox_path)
201
+
202
+ # Default Python Dockerfile
203
+ lines = [
204
+ f"FROM {base_image}",
205
+ "",
206
+ "WORKDIR /app",
207
+ "",
208
+ "# Security: run as non-root user",
209
+ "RUN useradd -m -u 1000 appuser",
210
+ "",
211
+ ]
212
+
213
+ if has_requirements:
214
+ lines.extend([
215
+ "# Install dependencies",
216
+ "COPY requirements.txt .",
217
+ "RUN pip install --no-cache-dir -r requirements.txt",
218
+ "",
219
+ ])
220
+
221
+ lines.extend([
222
+ "# Copy application",
223
+ "COPY . .",
224
+ "",
225
+ "# Switch to non-root user",
226
+ "USER appuser",
227
+ "",
228
+ "# Health check",
229
+ 'HEALTHCHECK --interval=30s --timeout=10s --retries=3 \\',
230
+ ' CMD curl -f http://localhost:${PORT:-8000}/health || exit 1',
231
+ "",
232
+ "# Default command",
233
+ 'CMD ["python", "main.py"]',
234
+ ])
235
+
236
+ return "\n".join(lines)
237
+
238
+ def _create_node_dockerfile(self, sandbox_path: Path) -> str:
239
+ """Create Dockerfile for Node.js service."""
240
+ return """FROM node:20-slim
241
+
242
+ WORKDIR /app
243
+
244
+ # Security: run as non-root user
245
+ RUN useradd -m -u 1000 appuser
246
+
247
+ # Install dependencies
248
+ COPY package*.json ./
249
+ RUN npm ci --only=production
250
+
251
+ # Copy application
252
+ COPY . .
253
+
254
+ # Switch to non-root user
255
+ USER appuser
256
+
257
+ # Health check
258
+ HEALTHCHECK --interval=30s --timeout=10s --retries=3 \\
259
+ CMD curl -f http://localhost:${PORT:-3000}/health || exit 1
260
+
261
+ # Default command
262
+ CMD ["node", "server.js"]
263
+ """
@@ -0,0 +1,359 @@
1
+ """Docker Compose / Podman Compose generator for multi-service deployment."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import yaml
6
+ from pathlib import Path
7
+ from typing import Optional
8
+ from dataclasses import dataclass
9
+
10
+ from ..config import EcosystemConfig, ServiceConfig
11
+ from .base import DeploymentConfig, DeploymentMode
12
+
13
+
14
+ @dataclass
15
+ class ComposeService:
16
+ """Represents a service in docker-compose.yaml."""
17
+ name: str
18
+ build_context: str
19
+ dockerfile: str
20
+ image: str
21
+ ports: list[str]
22
+ environment: dict[str, str]
23
+ depends_on: list[str]
24
+ health_check: Optional[dict] = None
25
+ deploy: Optional[dict] = None
26
+ networks: list[str] = None
27
+ volumes: list[str] = None
28
+
29
+
30
+ class ComposeGenerator:
31
+ """
32
+ Generate Docker Compose / Podman Compose files for pactown ecosystems.
33
+
34
+ Supports:
35
+ - Docker Compose v3.8+
36
+ - Podman Compose
37
+ - Docker Swarm mode
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ ecosystem: EcosystemConfig,
43
+ deploy_config: DeploymentConfig,
44
+ base_path: Path,
45
+ ):
46
+ self.ecosystem = ecosystem
47
+ self.deploy_config = deploy_config
48
+ self.base_path = Path(base_path)
49
+
50
+ def generate(
51
+ self,
52
+ output_path: Optional[Path] = None,
53
+ include_registry: bool = False,
54
+ ) -> dict:
55
+ """
56
+ Generate docker-compose.yaml content.
57
+
58
+ Args:
59
+ output_path: Optional path to write the file
60
+ include_registry: Include pactown registry service
61
+
62
+ Returns:
63
+ Compose file as dict
64
+ """
65
+ compose = {
66
+ "version": "3.8",
67
+ "name": self.ecosystem.name,
68
+ "services": {},
69
+ "networks": {
70
+ self.deploy_config.network_name: {
71
+ "driver": "bridge",
72
+ },
73
+ },
74
+ }
75
+
76
+ # Add volumes section if needed
77
+ volumes = {}
78
+
79
+ for name, service in self.ecosystem.services.items():
80
+ compose_service = self._create_service(name, service)
81
+ compose["services"][name] = compose_service
82
+
83
+ # Check for volume mounts
84
+ if self.deploy_config.volumes_path:
85
+ volume_name = f"{name}-data"
86
+ volumes[volume_name] = {"driver": "local"}
87
+
88
+ if volumes:
89
+ compose["volumes"] = volumes
90
+
91
+ # Add registry service if requested
92
+ if include_registry:
93
+ compose["services"]["registry"] = self._create_registry_service()
94
+
95
+ # Write to file if path provided
96
+ if output_path:
97
+ output_path = Path(output_path)
98
+ with open(output_path, "w") as f:
99
+ yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
100
+
101
+ return compose
102
+
103
+ def _create_service(self, name: str, service: ServiceConfig) -> dict:
104
+ """Create compose service definition."""
105
+ readme_path = self.base_path / service.readme
106
+ sandbox_path = self.base_path / self.ecosystem.sandbox_root / name
107
+
108
+ svc = {
109
+ "build": {
110
+ "context": str(sandbox_path),
111
+ "dockerfile": "Dockerfile",
112
+ },
113
+ "image": f"{self.deploy_config.image_prefix}/{name}:latest",
114
+ "container_name": f"{self.ecosystem.name}-{name}",
115
+ "restart": "unless-stopped",
116
+ "networks": [self.deploy_config.network_name],
117
+ }
118
+
119
+ # Ports
120
+ if service.port and self.deploy_config.expose_ports:
121
+ svc["ports"] = [f"{service.port}:{service.port}"]
122
+
123
+ # Environment
124
+ env = {"SERVICE_NAME": name}
125
+ if service.port:
126
+ env["MARKPACT_PORT"] = str(service.port)
127
+
128
+ # Add dependency URLs
129
+ for dep in service.depends_on:
130
+ dep_service = self.ecosystem.services.get(dep.name)
131
+ if dep_service:
132
+ env_key = dep.name.upper().replace("-", "_")
133
+ # Use container DNS name
134
+ env[f"{env_key}_URL"] = f"http://{dep.name}:{dep_service.port}"
135
+
136
+ env.update(service.env)
137
+ svc["environment"] = env
138
+
139
+ # Dependencies
140
+ if service.depends_on:
141
+ svc["depends_on"] = {}
142
+ for dep in service.depends_on:
143
+ if dep.name in self.ecosystem.services:
144
+ svc["depends_on"][dep.name] = {
145
+ "condition": "service_healthy" if self.ecosystem.services[dep.name].health_check else "service_started"
146
+ }
147
+
148
+ # Health check
149
+ if service.health_check:
150
+ svc["healthcheck"] = {
151
+ "test": ["CMD", "curl", "-f", f"http://localhost:{service.port}{service.health_check}"],
152
+ "interval": self.deploy_config.health_check_interval,
153
+ "timeout": self.deploy_config.health_check_timeout,
154
+ "retries": self.deploy_config.health_check_retries,
155
+ "start_period": "10s",
156
+ }
157
+
158
+ # Production settings
159
+ if self.deploy_config.mode == DeploymentMode.PRODUCTION:
160
+ svc["deploy"] = {
161
+ "resources": {
162
+ "limits": {
163
+ "memory": self.deploy_config.memory_limit,
164
+ "cpus": self.deploy_config.cpu_limit,
165
+ },
166
+ "reservations": {
167
+ "memory": "128M",
168
+ "cpus": "0.1",
169
+ },
170
+ },
171
+ "restart_policy": {
172
+ "condition": "on-failure",
173
+ "delay": "5s",
174
+ "max_attempts": 3,
175
+ "window": "120s",
176
+ },
177
+ }
178
+
179
+ # Security options
180
+ svc["security_opt"] = ["no-new-privileges:true"]
181
+
182
+ if self.deploy_config.read_only_fs:
183
+ svc["read_only"] = True
184
+ svc["tmpfs"] = ["/tmp"]
185
+
186
+ svc["cap_drop"] = self.deploy_config.drop_capabilities
187
+
188
+ if self.deploy_config.add_capabilities:
189
+ svc["cap_add"] = self.deploy_config.add_capabilities
190
+
191
+ # Labels
192
+ svc["labels"] = {
193
+ "pactown.ecosystem": self.ecosystem.name,
194
+ "pactown.service": name,
195
+ **self.deploy_config.labels,
196
+ }
197
+
198
+ return svc
199
+
200
+ def _create_registry_service(self) -> dict:
201
+ """Create pactown registry service."""
202
+ return {
203
+ "image": "pactown/registry:latest",
204
+ "container_name": f"{self.ecosystem.name}-registry",
205
+ "restart": "unless-stopped",
206
+ "ports": ["8800:8800"],
207
+ "networks": [self.deploy_config.network_name],
208
+ "volumes": ["registry-data:/data"],
209
+ "environment": {
210
+ "REGISTRY_PORT": "8800",
211
+ "REGISTRY_DATA_DIR": "/data",
212
+ },
213
+ "healthcheck": {
214
+ "test": ["CMD", "curl", "-f", "http://localhost:8800/health"],
215
+ "interval": "30s",
216
+ "timeout": "10s",
217
+ "retries": 3,
218
+ },
219
+ }
220
+
221
+ def generate_override(
222
+ self,
223
+ output_path: Optional[Path] = None,
224
+ dev_mode: bool = True,
225
+ ) -> dict:
226
+ """
227
+ Generate docker-compose.override.yaml for development.
228
+
229
+ This file is automatically merged with docker-compose.yaml
230
+ and provides development-specific settings.
231
+ """
232
+ override = {
233
+ "version": "3.8",
234
+ "services": {},
235
+ }
236
+
237
+ for name, service in self.ecosystem.services.items():
238
+ sandbox_path = self.base_path / self.ecosystem.sandbox_root / name
239
+
240
+ svc = {}
241
+
242
+ if dev_mode:
243
+ # Mount source code for hot reload
244
+ svc["volumes"] = [
245
+ f"{sandbox_path}:/app:z",
246
+ ]
247
+
248
+ # Enable debug mode
249
+ svc["environment"] = {
250
+ "DEBUG": "true",
251
+ "LOG_LEVEL": "debug",
252
+ }
253
+
254
+ # Remove resource limits for development
255
+ svc["deploy"] = None
256
+
257
+ if svc:
258
+ override["services"][name] = svc
259
+
260
+ if output_path:
261
+ output_path = Path(output_path)
262
+ with open(output_path, "w") as f:
263
+ yaml.dump(override, f, default_flow_style=False, sort_keys=False)
264
+
265
+ return override
266
+
267
+ def generate_production(
268
+ self,
269
+ output_path: Optional[Path] = None,
270
+ replicas: int = 2,
271
+ ) -> dict:
272
+ """
273
+ Generate docker-compose.prod.yaml for production/swarm deployment.
274
+ """
275
+ compose = self.generate()
276
+
277
+ for name in compose["services"]:
278
+ svc = compose["services"][name]
279
+
280
+ # Add swarm-specific deploy config
281
+ svc["deploy"] = {
282
+ "mode": "replicated",
283
+ "replicas": replicas,
284
+ "update_config": {
285
+ "parallelism": 1,
286
+ "delay": "10s",
287
+ "failure_action": "rollback",
288
+ "order": "start-first",
289
+ },
290
+ "rollback_config": {
291
+ "parallelism": 1,
292
+ "delay": "10s",
293
+ },
294
+ "resources": {
295
+ "limits": {
296
+ "memory": self.deploy_config.memory_limit,
297
+ "cpus": self.deploy_config.cpu_limit,
298
+ },
299
+ },
300
+ }
301
+
302
+ if output_path:
303
+ output_path = Path(output_path)
304
+ with open(output_path, "w") as f:
305
+ yaml.dump(compose, f, default_flow_style=False, sort_keys=False)
306
+
307
+ return compose
308
+
309
+
310
+ def generate_compose_from_config(
311
+ config_path: Path,
312
+ output_dir: Optional[Path] = None,
313
+ production: bool = False,
314
+ ) -> dict:
315
+ """
316
+ Convenience function to generate compose files from pactown config.
317
+
318
+ Args:
319
+ config_path: Path to saas.pactown.yaml
320
+ output_dir: Directory to write compose files
321
+ production: Generate production configuration
322
+
323
+ Returns:
324
+ Generated compose dict
325
+ """
326
+ from ..config import load_config
327
+
328
+ config_path = Path(config_path)
329
+ ecosystem = load_config(config_path)
330
+
331
+ deploy_config = (
332
+ DeploymentConfig.for_production() if production
333
+ else DeploymentConfig.for_development()
334
+ )
335
+
336
+ generator = ComposeGenerator(
337
+ ecosystem=ecosystem,
338
+ deploy_config=deploy_config,
339
+ base_path=config_path.parent,
340
+ )
341
+
342
+ output_dir = output_dir or config_path.parent
343
+
344
+ # Generate main compose file
345
+ compose = generator.generate(
346
+ output_path=output_dir / "docker-compose.yaml"
347
+ )
348
+
349
+ # Generate override for development
350
+ if not production:
351
+ generator.generate_override(
352
+ output_path=output_dir / "docker-compose.override.yaml"
353
+ )
354
+ else:
355
+ generator.generate_production(
356
+ output_path=output_dir / "docker-compose.prod.yaml"
357
+ )
358
+
359
+ return compose