pactown 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pactown/__init__.py +23 -0
- pactown/cli.py +347 -0
- pactown/config.py +158 -0
- pactown/deploy/__init__.py +17 -0
- pactown/deploy/base.py +263 -0
- pactown/deploy/compose.py +359 -0
- pactown/deploy/docker.py +299 -0
- pactown/deploy/kubernetes.py +449 -0
- pactown/deploy/podman.py +400 -0
- pactown/generator.py +212 -0
- pactown/network.py +245 -0
- pactown/orchestrator.py +455 -0
- pactown/parallel.py +268 -0
- pactown/registry/__init__.py +12 -0
- pactown/registry/client.py +253 -0
- pactown/registry/models.py +150 -0
- pactown/registry/server.py +207 -0
- pactown/resolver.py +160 -0
- pactown/sandbox_manager.py +328 -0
- pactown-0.1.4.dist-info/METADATA +308 -0
- pactown-0.1.4.dist-info/RECORD +24 -0
- pactown-0.1.4.dist-info/WHEEL +4 -0
- pactown-0.1.4.dist-info/entry_points.txt +3 -0
- pactown-0.1.4.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,449 @@
|
|
|
1
|
+
"""Kubernetes deployment backend - production-grade orchestration."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import subprocess
|
|
6
|
+
import json
|
|
7
|
+
import yaml
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional, Any
|
|
10
|
+
|
|
11
|
+
from .base import (
|
|
12
|
+
DeploymentBackend,
|
|
13
|
+
DeploymentConfig,
|
|
14
|
+
DeploymentResult,
|
|
15
|
+
RuntimeType,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class KubernetesBackend(DeploymentBackend):
|
|
20
|
+
"""
|
|
21
|
+
Kubernetes deployment backend for production environments.
|
|
22
|
+
|
|
23
|
+
Generates and applies Kubernetes manifests for:
|
|
24
|
+
- Deployments with rolling updates
|
|
25
|
+
- Services for internal/external access
|
|
26
|
+
- ConfigMaps for configuration
|
|
27
|
+
- Secrets for sensitive data
|
|
28
|
+
- HorizontalPodAutoscaler for scaling
|
|
29
|
+
- NetworkPolicies for security
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(self, config: DeploymentConfig, kubeconfig: Optional[str] = None):
|
|
33
|
+
super().__init__(config)
|
|
34
|
+
self.kubeconfig = kubeconfig
|
|
35
|
+
|
|
36
|
+
@property
|
|
37
|
+
def runtime_type(self) -> RuntimeType:
|
|
38
|
+
return RuntimeType.KUBERNETES
|
|
39
|
+
|
|
40
|
+
def _kubectl(self, *args: str, input_data: Optional[str] = None) -> subprocess.CompletedProcess:
|
|
41
|
+
"""Run kubectl command."""
|
|
42
|
+
cmd = ["kubectl"]
|
|
43
|
+
if self.kubeconfig:
|
|
44
|
+
cmd.extend(["--kubeconfig", self.kubeconfig])
|
|
45
|
+
cmd.extend(args)
|
|
46
|
+
|
|
47
|
+
return subprocess.run(
|
|
48
|
+
cmd,
|
|
49
|
+
capture_output=True,
|
|
50
|
+
text=True,
|
|
51
|
+
input=input_data,
|
|
52
|
+
timeout=60,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
def is_available(self) -> bool:
|
|
56
|
+
"""Check if kubectl is available and cluster is reachable."""
|
|
57
|
+
try:
|
|
58
|
+
result = self._kubectl("cluster-info")
|
|
59
|
+
return result.returncode == 0
|
|
60
|
+
except (subprocess.TimeoutExpired, FileNotFoundError):
|
|
61
|
+
return False
|
|
62
|
+
|
|
63
|
+
def build_image(
|
|
64
|
+
self,
|
|
65
|
+
service_name: str,
|
|
66
|
+
dockerfile_path: Path,
|
|
67
|
+
context_path: Path,
|
|
68
|
+
tag: Optional[str] = None,
|
|
69
|
+
) -> DeploymentResult:
|
|
70
|
+
"""Build image (delegates to Docker/Podman)."""
|
|
71
|
+
# K8s doesn't build images directly, use Docker or Podman
|
|
72
|
+
image_name = f"{self.config.registry}/{self.config.image_prefix}/{service_name}"
|
|
73
|
+
if tag:
|
|
74
|
+
image_name = f"{image_name}:{tag}"
|
|
75
|
+
else:
|
|
76
|
+
image_name = f"{image_name}:latest"
|
|
77
|
+
|
|
78
|
+
# Try podman first, then docker
|
|
79
|
+
for runtime in ["podman", "docker"]:
|
|
80
|
+
try:
|
|
81
|
+
result = subprocess.run(
|
|
82
|
+
[runtime, "build", "-t", image_name, "-f", str(dockerfile_path), str(context_path)],
|
|
83
|
+
capture_output=True,
|
|
84
|
+
text=True,
|
|
85
|
+
timeout=300,
|
|
86
|
+
)
|
|
87
|
+
if result.returncode == 0:
|
|
88
|
+
return DeploymentResult(
|
|
89
|
+
success=True,
|
|
90
|
+
service_name=service_name,
|
|
91
|
+
runtime=self.runtime_type,
|
|
92
|
+
image_name=image_name,
|
|
93
|
+
)
|
|
94
|
+
except FileNotFoundError:
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
return DeploymentResult(
|
|
98
|
+
success=False,
|
|
99
|
+
service_name=service_name,
|
|
100
|
+
runtime=self.runtime_type,
|
|
101
|
+
error="No container runtime (docker/podman) available",
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
def push_image(
|
|
105
|
+
self,
|
|
106
|
+
image_name: str,
|
|
107
|
+
registry: Optional[str] = None,
|
|
108
|
+
) -> DeploymentResult:
|
|
109
|
+
"""Push image to registry."""
|
|
110
|
+
for runtime in ["podman", "docker"]:
|
|
111
|
+
try:
|
|
112
|
+
result = subprocess.run(
|
|
113
|
+
[runtime, "push", image_name],
|
|
114
|
+
capture_output=True,
|
|
115
|
+
text=True,
|
|
116
|
+
timeout=300,
|
|
117
|
+
)
|
|
118
|
+
if result.returncode == 0:
|
|
119
|
+
return DeploymentResult(
|
|
120
|
+
success=True,
|
|
121
|
+
service_name=image_name.split("/")[-1].split(":")[0],
|
|
122
|
+
runtime=self.runtime_type,
|
|
123
|
+
image_name=image_name,
|
|
124
|
+
)
|
|
125
|
+
except FileNotFoundError:
|
|
126
|
+
continue
|
|
127
|
+
|
|
128
|
+
return DeploymentResult(
|
|
129
|
+
success=False,
|
|
130
|
+
service_name=image_name,
|
|
131
|
+
runtime=self.runtime_type,
|
|
132
|
+
error="Failed to push image",
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
def deploy(
|
|
136
|
+
self,
|
|
137
|
+
service_name: str,
|
|
138
|
+
image_name: str,
|
|
139
|
+
port: int,
|
|
140
|
+
env: dict[str, str],
|
|
141
|
+
health_check: Optional[str] = None,
|
|
142
|
+
) -> DeploymentResult:
|
|
143
|
+
"""Deploy to Kubernetes."""
|
|
144
|
+
manifests = self.generate_manifests(
|
|
145
|
+
service_name=service_name,
|
|
146
|
+
image_name=image_name,
|
|
147
|
+
port=port,
|
|
148
|
+
env=env,
|
|
149
|
+
health_check=health_check,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Apply all manifests
|
|
153
|
+
for manifest in manifests:
|
|
154
|
+
manifest_yaml = yaml.dump(manifest, default_flow_style=False)
|
|
155
|
+
result = self._kubectl("apply", "-f", "-", input_data=manifest_yaml)
|
|
156
|
+
|
|
157
|
+
if result.returncode != 0:
|
|
158
|
+
return DeploymentResult(
|
|
159
|
+
success=False,
|
|
160
|
+
service_name=service_name,
|
|
161
|
+
runtime=self.runtime_type,
|
|
162
|
+
error=result.stderr,
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Get service endpoint
|
|
166
|
+
result = self._kubectl(
|
|
167
|
+
"get", "service", service_name,
|
|
168
|
+
"-n", self.config.namespace,
|
|
169
|
+
"-o", "jsonpath={.status.loadBalancer.ingress[0].ip}",
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
endpoint = f"http://{result.stdout}:{port}" if result.stdout else f"http://{service_name}.{self.config.namespace}.svc.cluster.local:{port}"
|
|
173
|
+
|
|
174
|
+
return DeploymentResult(
|
|
175
|
+
success=True,
|
|
176
|
+
service_name=service_name,
|
|
177
|
+
runtime=self.runtime_type,
|
|
178
|
+
image_name=image_name,
|
|
179
|
+
endpoint=endpoint,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
def stop(self, service_name: str) -> DeploymentResult:
|
|
183
|
+
"""Delete Kubernetes resources."""
|
|
184
|
+
result = self._kubectl(
|
|
185
|
+
"delete", "deployment,service,configmap",
|
|
186
|
+
"-l", f"app={service_name}",
|
|
187
|
+
"-n", self.config.namespace,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
return DeploymentResult(
|
|
191
|
+
success=result.returncode == 0,
|
|
192
|
+
service_name=service_name,
|
|
193
|
+
runtime=self.runtime_type,
|
|
194
|
+
error=result.stderr if result.returncode != 0 else None,
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
def logs(self, service_name: str, tail: int = 100) -> str:
|
|
198
|
+
"""Get pod logs."""
|
|
199
|
+
result = self._kubectl(
|
|
200
|
+
"logs",
|
|
201
|
+
"-l", f"app={service_name}",
|
|
202
|
+
"-n", self.config.namespace,
|
|
203
|
+
"--tail", str(tail),
|
|
204
|
+
)
|
|
205
|
+
return result.stdout + result.stderr
|
|
206
|
+
|
|
207
|
+
def status(self, service_name: str) -> dict[str, Any]:
|
|
208
|
+
"""Get deployment status."""
|
|
209
|
+
result = self._kubectl(
|
|
210
|
+
"get", "deployment", service_name,
|
|
211
|
+
"-n", self.config.namespace,
|
|
212
|
+
"-o", "json",
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
if result.returncode != 0:
|
|
216
|
+
return {"running": False, "error": "Deployment not found"}
|
|
217
|
+
|
|
218
|
+
try:
|
|
219
|
+
data = json.loads(result.stdout)
|
|
220
|
+
status = data.get("status", {})
|
|
221
|
+
return {
|
|
222
|
+
"running": status.get("availableReplicas", 0) > 0,
|
|
223
|
+
"replicas": status.get("replicas", 0),
|
|
224
|
+
"available": status.get("availableReplicas", 0),
|
|
225
|
+
"ready": status.get("readyReplicas", 0),
|
|
226
|
+
"updated": status.get("updatedReplicas", 0),
|
|
227
|
+
}
|
|
228
|
+
except json.JSONDecodeError:
|
|
229
|
+
return {"running": False, "error": "Failed to parse status"}
|
|
230
|
+
|
|
231
|
+
def generate_manifests(
|
|
232
|
+
self,
|
|
233
|
+
service_name: str,
|
|
234
|
+
image_name: str,
|
|
235
|
+
port: int,
|
|
236
|
+
env: dict[str, str],
|
|
237
|
+
health_check: Optional[str] = None,
|
|
238
|
+
replicas: int = 2,
|
|
239
|
+
) -> list[dict]:
|
|
240
|
+
"""Generate Kubernetes manifests for a service."""
|
|
241
|
+
labels = {
|
|
242
|
+
"app": service_name,
|
|
243
|
+
"managed-by": "pactown",
|
|
244
|
+
**self.config.labels,
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
manifests = []
|
|
248
|
+
|
|
249
|
+
# Namespace
|
|
250
|
+
manifests.append({
|
|
251
|
+
"apiVersion": "v1",
|
|
252
|
+
"kind": "Namespace",
|
|
253
|
+
"metadata": {
|
|
254
|
+
"name": self.config.namespace,
|
|
255
|
+
"labels": {"managed-by": "pactown"},
|
|
256
|
+
},
|
|
257
|
+
})
|
|
258
|
+
|
|
259
|
+
# ConfigMap for environment variables
|
|
260
|
+
if env:
|
|
261
|
+
manifests.append({
|
|
262
|
+
"apiVersion": "v1",
|
|
263
|
+
"kind": "ConfigMap",
|
|
264
|
+
"metadata": {
|
|
265
|
+
"name": f"{service_name}-config",
|
|
266
|
+
"namespace": self.config.namespace,
|
|
267
|
+
"labels": labels,
|
|
268
|
+
},
|
|
269
|
+
"data": env,
|
|
270
|
+
})
|
|
271
|
+
|
|
272
|
+
# Deployment
|
|
273
|
+
container_spec = {
|
|
274
|
+
"name": service_name,
|
|
275
|
+
"image": image_name,
|
|
276
|
+
"ports": [{"containerPort": port}],
|
|
277
|
+
"resources": {
|
|
278
|
+
"limits": {
|
|
279
|
+
"memory": self.config.memory_limit,
|
|
280
|
+
"cpu": self.config.cpu_limit,
|
|
281
|
+
},
|
|
282
|
+
"requests": {
|
|
283
|
+
"memory": "128Mi",
|
|
284
|
+
"cpu": "100m",
|
|
285
|
+
},
|
|
286
|
+
},
|
|
287
|
+
"securityContext": {
|
|
288
|
+
"runAsNonRoot": True,
|
|
289
|
+
"runAsUser": 1000,
|
|
290
|
+
"readOnlyRootFilesystem": self.config.read_only_fs,
|
|
291
|
+
"allowPrivilegeEscalation": False,
|
|
292
|
+
"capabilities": {
|
|
293
|
+
"drop": self.config.drop_capabilities,
|
|
294
|
+
},
|
|
295
|
+
},
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
if env:
|
|
299
|
+
container_spec["envFrom"] = [
|
|
300
|
+
{"configMapRef": {"name": f"{service_name}-config"}}
|
|
301
|
+
]
|
|
302
|
+
|
|
303
|
+
if health_check:
|
|
304
|
+
container_spec["livenessProbe"] = {
|
|
305
|
+
"httpGet": {"path": health_check, "port": port},
|
|
306
|
+
"initialDelaySeconds": 10,
|
|
307
|
+
"periodSeconds": 10,
|
|
308
|
+
"timeoutSeconds": 5,
|
|
309
|
+
"failureThreshold": 3,
|
|
310
|
+
}
|
|
311
|
+
container_spec["readinessProbe"] = {
|
|
312
|
+
"httpGet": {"path": health_check, "port": port},
|
|
313
|
+
"initialDelaySeconds": 5,
|
|
314
|
+
"periodSeconds": 5,
|
|
315
|
+
"timeoutSeconds": 3,
|
|
316
|
+
"failureThreshold": 3,
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
deployment = {
|
|
320
|
+
"apiVersion": "apps/v1",
|
|
321
|
+
"kind": "Deployment",
|
|
322
|
+
"metadata": {
|
|
323
|
+
"name": service_name,
|
|
324
|
+
"namespace": self.config.namespace,
|
|
325
|
+
"labels": labels,
|
|
326
|
+
"annotations": self.config.annotations,
|
|
327
|
+
},
|
|
328
|
+
"spec": {
|
|
329
|
+
"replicas": replicas,
|
|
330
|
+
"selector": {"matchLabels": {"app": service_name}},
|
|
331
|
+
"strategy": {
|
|
332
|
+
"type": "RollingUpdate",
|
|
333
|
+
"rollingUpdate": {
|
|
334
|
+
"maxUnavailable": 0,
|
|
335
|
+
"maxSurge": 1,
|
|
336
|
+
},
|
|
337
|
+
},
|
|
338
|
+
"template": {
|
|
339
|
+
"metadata": {"labels": labels},
|
|
340
|
+
"spec": {
|
|
341
|
+
"securityContext": {
|
|
342
|
+
"runAsNonRoot": True,
|
|
343
|
+
"seccompProfile": {"type": "RuntimeDefault"},
|
|
344
|
+
},
|
|
345
|
+
"containers": [container_spec],
|
|
346
|
+
"serviceAccountName": "default",
|
|
347
|
+
"automountServiceAccountToken": False,
|
|
348
|
+
},
|
|
349
|
+
},
|
|
350
|
+
},
|
|
351
|
+
}
|
|
352
|
+
manifests.append(deployment)
|
|
353
|
+
|
|
354
|
+
# Service
|
|
355
|
+
service = {
|
|
356
|
+
"apiVersion": "v1",
|
|
357
|
+
"kind": "Service",
|
|
358
|
+
"metadata": {
|
|
359
|
+
"name": service_name,
|
|
360
|
+
"namespace": self.config.namespace,
|
|
361
|
+
"labels": labels,
|
|
362
|
+
},
|
|
363
|
+
"spec": {
|
|
364
|
+
"selector": {"app": service_name},
|
|
365
|
+
"ports": [{"port": port, "targetPort": port}],
|
|
366
|
+
"type": "ClusterIP",
|
|
367
|
+
},
|
|
368
|
+
}
|
|
369
|
+
manifests.append(service)
|
|
370
|
+
|
|
371
|
+
# NetworkPolicy for security
|
|
372
|
+
network_policy = {
|
|
373
|
+
"apiVersion": "networking.k8s.io/v1",
|
|
374
|
+
"kind": "NetworkPolicy",
|
|
375
|
+
"metadata": {
|
|
376
|
+
"name": f"{service_name}-network-policy",
|
|
377
|
+
"namespace": self.config.namespace,
|
|
378
|
+
},
|
|
379
|
+
"spec": {
|
|
380
|
+
"podSelector": {"matchLabels": {"app": service_name}},
|
|
381
|
+
"policyTypes": ["Ingress", "Egress"],
|
|
382
|
+
"ingress": [{
|
|
383
|
+
"from": [{"namespaceSelector": {"matchLabels": {"managed-by": "pactown"}}}],
|
|
384
|
+
"ports": [{"port": port}],
|
|
385
|
+
}],
|
|
386
|
+
"egress": [{
|
|
387
|
+
"to": [{"namespaceSelector": {"matchLabels": {"managed-by": "pactown"}}}],
|
|
388
|
+
}],
|
|
389
|
+
},
|
|
390
|
+
}
|
|
391
|
+
manifests.append(network_policy)
|
|
392
|
+
|
|
393
|
+
return manifests
|
|
394
|
+
|
|
395
|
+
def generate_hpa(
|
|
396
|
+
self,
|
|
397
|
+
service_name: str,
|
|
398
|
+
min_replicas: int = 2,
|
|
399
|
+
max_replicas: int = 10,
|
|
400
|
+
target_cpu: int = 70,
|
|
401
|
+
) -> dict:
|
|
402
|
+
"""Generate HorizontalPodAutoscaler manifest."""
|
|
403
|
+
return {
|
|
404
|
+
"apiVersion": "autoscaling/v2",
|
|
405
|
+
"kind": "HorizontalPodAutoscaler",
|
|
406
|
+
"metadata": {
|
|
407
|
+
"name": f"{service_name}-hpa",
|
|
408
|
+
"namespace": self.config.namespace,
|
|
409
|
+
},
|
|
410
|
+
"spec": {
|
|
411
|
+
"scaleTargetRef": {
|
|
412
|
+
"apiVersion": "apps/v1",
|
|
413
|
+
"kind": "Deployment",
|
|
414
|
+
"name": service_name,
|
|
415
|
+
},
|
|
416
|
+
"minReplicas": min_replicas,
|
|
417
|
+
"maxReplicas": max_replicas,
|
|
418
|
+
"metrics": [{
|
|
419
|
+
"type": "Resource",
|
|
420
|
+
"resource": {
|
|
421
|
+
"name": "cpu",
|
|
422
|
+
"target": {
|
|
423
|
+
"type": "Utilization",
|
|
424
|
+
"averageUtilization": target_cpu,
|
|
425
|
+
},
|
|
426
|
+
},
|
|
427
|
+
}],
|
|
428
|
+
},
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
def save_manifests(
|
|
432
|
+
self,
|
|
433
|
+
service_name: str,
|
|
434
|
+
manifests: list[dict],
|
|
435
|
+
output_dir: Path,
|
|
436
|
+
) -> Path:
|
|
437
|
+
"""Save manifests to files."""
|
|
438
|
+
output_dir = Path(output_dir)
|
|
439
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
440
|
+
|
|
441
|
+
output_file = output_dir / f"{service_name}.yaml"
|
|
442
|
+
|
|
443
|
+
with open(output_file, "w") as f:
|
|
444
|
+
for i, manifest in enumerate(manifests):
|
|
445
|
+
if i > 0:
|
|
446
|
+
f.write("---\n")
|
|
447
|
+
yaml.dump(manifest, f, default_flow_style=False)
|
|
448
|
+
|
|
449
|
+
return output_file
|