raijin-server 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- raijin_server/__init__.py +5 -0
- raijin_server/cli.py +447 -0
- raijin_server/config.py +139 -0
- raijin_server/healthchecks.py +296 -0
- raijin_server/modules/__init__.py +26 -0
- raijin_server/modules/bootstrap.py +224 -0
- raijin_server/modules/calico.py +36 -0
- raijin_server/modules/essentials.py +29 -0
- raijin_server/modules/firewall.py +27 -0
- raijin_server/modules/full_install.py +131 -0
- raijin_server/modules/grafana.py +69 -0
- raijin_server/modules/hardening.py +47 -0
- raijin_server/modules/harness.py +47 -0
- raijin_server/modules/istio.py +13 -0
- raijin_server/modules/kafka.py +34 -0
- raijin_server/modules/kong.py +19 -0
- raijin_server/modules/kubernetes.py +187 -0
- raijin_server/modules/loki.py +27 -0
- raijin_server/modules/minio.py +19 -0
- raijin_server/modules/network.py +57 -0
- raijin_server/modules/prometheus.py +30 -0
- raijin_server/modules/traefik.py +40 -0
- raijin_server/modules/velero.py +47 -0
- raijin_server/modules/vpn.py +152 -0
- raijin_server/utils.py +241 -0
- raijin_server/validators.py +230 -0
- raijin_server-0.1.0.dist-info/METADATA +219 -0
- raijin_server-0.1.0.dist-info/RECORD +32 -0
- raijin_server-0.1.0.dist-info/WHEEL +5 -0
- raijin_server-0.1.0.dist-info/entry_points.txt +2 -0
- raijin_server-0.1.0.dist-info/licenses/LICENSE +21 -0
- raijin_server-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"""Health checks para validar estado dos servicos apos instalacao."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import subprocess
|
|
6
|
+
import time
|
|
7
|
+
from typing import Callable, Tuple
|
|
8
|
+
|
|
9
|
+
import typer
|
|
10
|
+
|
|
11
|
+
from raijin_server.utils import ExecutionContext, logger
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def wait_for_condition(
|
|
15
|
+
check_fn: Callable[[], bool],
|
|
16
|
+
description: str,
|
|
17
|
+
timeout: int = 300,
|
|
18
|
+
interval: int = 10,
|
|
19
|
+
) -> bool:
|
|
20
|
+
"""Aguarda ate que uma condicao seja satisfeita ou timeout.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
check_fn: Funcao que retorna True quando condicao e satisfeita
|
|
24
|
+
description: Descricao da condicao sendo aguardada
|
|
25
|
+
timeout: Tempo maximo de espera em segundos
|
|
26
|
+
interval: Intervalo entre verificacoes em segundos
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
True se condicao foi satisfeita, False se timeout
|
|
30
|
+
"""
|
|
31
|
+
elapsed = 0
|
|
32
|
+
typer.echo(f"Aguardando: {description}...")
|
|
33
|
+
|
|
34
|
+
while elapsed < timeout:
|
|
35
|
+
if check_fn():
|
|
36
|
+
typer.secho(f"✓ {description} [OK]", fg=typer.colors.GREEN)
|
|
37
|
+
return True
|
|
38
|
+
|
|
39
|
+
time.sleep(interval)
|
|
40
|
+
elapsed += interval
|
|
41
|
+
typer.echo(f" ... ainda aguardando ({elapsed}/{timeout}s)")
|
|
42
|
+
|
|
43
|
+
typer.secho(f"✗ {description} [TIMEOUT]", fg=typer.colors.RED)
|
|
44
|
+
return False
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def check_systemd_service(service: str, ctx: ExecutionContext) -> Tuple[bool, str]:
|
|
48
|
+
"""Verifica se um servico systemd esta ativo."""
|
|
49
|
+
if ctx.dry_run:
|
|
50
|
+
return True, "dry-run mode"
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
result = subprocess.run(
|
|
54
|
+
["systemctl", "is-active", service],
|
|
55
|
+
capture_output=True,
|
|
56
|
+
text=True,
|
|
57
|
+
timeout=10,
|
|
58
|
+
)
|
|
59
|
+
status = result.stdout.strip()
|
|
60
|
+
if result.returncode == 0 and status == "active":
|
|
61
|
+
return True, "active"
|
|
62
|
+
return False, status
|
|
63
|
+
except Exception as e:
|
|
64
|
+
return False, f"error: {e}"
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def check_k8s_node_ready(ctx: ExecutionContext, timeout: int = 300) -> bool:
|
|
68
|
+
"""Verifica se o node Kubernetes esta Ready."""
|
|
69
|
+
if ctx.dry_run:
|
|
70
|
+
typer.echo("[dry-run] Pulando verificacao de node Kubernetes")
|
|
71
|
+
return True
|
|
72
|
+
|
|
73
|
+
def check():
|
|
74
|
+
try:
|
|
75
|
+
result = subprocess.run(
|
|
76
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].status.conditions[?(@.type=='Ready')].status}"],
|
|
77
|
+
capture_output=True,
|
|
78
|
+
text=True,
|
|
79
|
+
timeout=10,
|
|
80
|
+
)
|
|
81
|
+
return result.returncode == 0 and "True" in result.stdout
|
|
82
|
+
except Exception:
|
|
83
|
+
return False
|
|
84
|
+
|
|
85
|
+
return wait_for_condition(check, "Node Kubernetes Ready", timeout=timeout)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def check_k8s_pods_in_namespace(namespace: str, ctx: ExecutionContext, timeout: int = 300) -> bool:
|
|
89
|
+
"""Verifica se todos os pods em um namespace estao Running."""
|
|
90
|
+
if ctx.dry_run:
|
|
91
|
+
typer.echo(f"[dry-run] Pulando verificacao de pods no namespace {namespace}")
|
|
92
|
+
return True
|
|
93
|
+
|
|
94
|
+
def check():
|
|
95
|
+
try:
|
|
96
|
+
result = subprocess.run(
|
|
97
|
+
[
|
|
98
|
+
"kubectl", "get", "pods", "-n", namespace,
|
|
99
|
+
"-o", "jsonpath={.items[*].status.phase}"
|
|
100
|
+
],
|
|
101
|
+
capture_output=True,
|
|
102
|
+
text=True,
|
|
103
|
+
timeout=10,
|
|
104
|
+
)
|
|
105
|
+
if result.returncode != 0:
|
|
106
|
+
return False
|
|
107
|
+
|
|
108
|
+
phases = result.stdout.strip().split()
|
|
109
|
+
if not phases:
|
|
110
|
+
return False
|
|
111
|
+
|
|
112
|
+
return all(phase in ["Running", "Succeeded"] for phase in phases)
|
|
113
|
+
except Exception:
|
|
114
|
+
return False
|
|
115
|
+
|
|
116
|
+
return wait_for_condition(
|
|
117
|
+
check,
|
|
118
|
+
f"Pods no namespace '{namespace}' Running",
|
|
119
|
+
timeout=timeout
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def check_helm_release(release: str, namespace: str, ctx: ExecutionContext) -> Tuple[bool, str]:
|
|
124
|
+
"""Verifica status de um release Helm."""
|
|
125
|
+
if ctx.dry_run:
|
|
126
|
+
return True, "dry-run mode"
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
result = subprocess.run(
|
|
130
|
+
["helm", "status", release, "-n", namespace, "-o", "json"],
|
|
131
|
+
capture_output=True,
|
|
132
|
+
text=True,
|
|
133
|
+
timeout=15,
|
|
134
|
+
)
|
|
135
|
+
if result.returncode == 0:
|
|
136
|
+
import json
|
|
137
|
+
data = json.loads(result.stdout)
|
|
138
|
+
status = data.get("info", {}).get("status", "unknown")
|
|
139
|
+
return status == "deployed", status
|
|
140
|
+
return False, "not found"
|
|
141
|
+
except Exception as e:
|
|
142
|
+
return False, f"error: {e}"
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def check_port_listening(port: int, ctx: ExecutionContext) -> Tuple[bool, str]:
|
|
146
|
+
"""Verifica se uma porta esta em listening."""
|
|
147
|
+
if ctx.dry_run:
|
|
148
|
+
return True, "dry-run mode"
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
result = subprocess.run(
|
|
152
|
+
["ss", "-tuln"],
|
|
153
|
+
capture_output=True,
|
|
154
|
+
text=True,
|
|
155
|
+
timeout=5,
|
|
156
|
+
)
|
|
157
|
+
if result.returncode == 0:
|
|
158
|
+
listening = any(f":{port}" in line for line in result.stdout.split("\n"))
|
|
159
|
+
return listening, "listening" if listening else "not listening"
|
|
160
|
+
return False, "ss command failed"
|
|
161
|
+
except Exception as e:
|
|
162
|
+
return False, f"error: {e}"
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def verify_essentials(ctx: ExecutionContext) -> bool:
|
|
166
|
+
"""Health check para modulo essentials."""
|
|
167
|
+
logger.info("Verificando health check: essentials")
|
|
168
|
+
typer.secho("\n=== Health Check: Essentials ===", fg=typer.colors.CYAN)
|
|
169
|
+
|
|
170
|
+
checks = [
|
|
171
|
+
("timedatectl NTP", lambda: subprocess.run(["timedatectl", "show", "-p", "NTP", "--value"], capture_output=True).stdout.strip() == b"yes"),
|
|
172
|
+
]
|
|
173
|
+
|
|
174
|
+
all_ok = True
|
|
175
|
+
for name, check_fn in checks:
|
|
176
|
+
try:
|
|
177
|
+
if ctx.dry_run or check_fn():
|
|
178
|
+
typer.secho(f" ✓ {name}", fg=typer.colors.GREEN)
|
|
179
|
+
else:
|
|
180
|
+
typer.secho(f" ✗ {name}", fg=typer.colors.YELLOW)
|
|
181
|
+
all_ok = False
|
|
182
|
+
except Exception as e:
|
|
183
|
+
typer.secho(f" ✗ {name}: {e}", fg=typer.colors.YELLOW)
|
|
184
|
+
all_ok = False
|
|
185
|
+
|
|
186
|
+
return all_ok
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def verify_hardening(ctx: ExecutionContext) -> bool:
|
|
190
|
+
"""Health check para modulo hardening."""
|
|
191
|
+
logger.info("Verificando health check: hardening")
|
|
192
|
+
typer.secho("\n=== Health Check: Hardening ===", fg=typer.colors.CYAN)
|
|
193
|
+
|
|
194
|
+
services = ["fail2ban"]
|
|
195
|
+
all_ok = True
|
|
196
|
+
|
|
197
|
+
for service in services:
|
|
198
|
+
ok, status = check_systemd_service(service, ctx)
|
|
199
|
+
if ok:
|
|
200
|
+
typer.secho(f" ✓ {service}: {status}", fg=typer.colors.GREEN)
|
|
201
|
+
else:
|
|
202
|
+
typer.secho(f" ✗ {service}: {status}", fg=typer.colors.YELLOW)
|
|
203
|
+
all_ok = False
|
|
204
|
+
|
|
205
|
+
return all_ok
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def verify_kubernetes(ctx: ExecutionContext) -> bool:
|
|
209
|
+
"""Health check para modulo kubernetes."""
|
|
210
|
+
logger.info("Verificando health check: kubernetes")
|
|
211
|
+
typer.secho("\n=== Health Check: Kubernetes ===", fg=typer.colors.CYAN)
|
|
212
|
+
|
|
213
|
+
services = ["kubelet", "containerd"]
|
|
214
|
+
all_ok = True
|
|
215
|
+
|
|
216
|
+
for service in services:
|
|
217
|
+
ok, status = check_systemd_service(service, ctx)
|
|
218
|
+
if ok:
|
|
219
|
+
typer.secho(f" ✓ {service}: {status}", fg=typer.colors.GREEN)
|
|
220
|
+
else:
|
|
221
|
+
typer.secho(f" ✗ {service}: {status}", fg=typer.colors.RED)
|
|
222
|
+
all_ok = False
|
|
223
|
+
|
|
224
|
+
# Verifica API server
|
|
225
|
+
ok, msg = check_port_listening(6443, ctx)
|
|
226
|
+
if ok:
|
|
227
|
+
typer.secho(f" ✓ API Server (6443): {msg}", fg=typer.colors.GREEN)
|
|
228
|
+
else:
|
|
229
|
+
typer.secho(f" ✗ API Server (6443): {msg}", fg=typer.colors.RED)
|
|
230
|
+
all_ok = False
|
|
231
|
+
|
|
232
|
+
# Verifica node ready
|
|
233
|
+
if not check_k8s_node_ready(ctx, timeout=180):
|
|
234
|
+
all_ok = False
|
|
235
|
+
|
|
236
|
+
return all_ok
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
def verify_calico(ctx: ExecutionContext) -> bool:
|
|
240
|
+
"""Health check para modulo calico."""
|
|
241
|
+
logger.info("Verificando health check: calico")
|
|
242
|
+
typer.secho("\n=== Health Check: Calico ===", fg=typer.colors.CYAN)
|
|
243
|
+
|
|
244
|
+
return check_k8s_pods_in_namespace("kube-system", ctx, timeout=180)
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def verify_helm_chart(release: str, namespace: str, ctx: ExecutionContext) -> bool:
|
|
248
|
+
"""Health check generico para charts Helm."""
|
|
249
|
+
logger.info(f"Verificando health check: {release} no namespace {namespace}")
|
|
250
|
+
typer.secho(f"\n=== Health Check: {release} ===", fg=typer.colors.CYAN)
|
|
251
|
+
|
|
252
|
+
ok, status = check_helm_release(release, namespace, ctx)
|
|
253
|
+
if ok:
|
|
254
|
+
typer.secho(f" ✓ Release {release}: {status}", fg=typer.colors.GREEN)
|
|
255
|
+
else:
|
|
256
|
+
typer.secho(f" ✗ Release {release}: {status}", fg=typer.colors.RED)
|
|
257
|
+
return False
|
|
258
|
+
|
|
259
|
+
return check_k8s_pods_in_namespace(namespace, ctx, timeout=180)
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
# Mapeamento de modulos para funcoes de health check
|
|
263
|
+
HEALTH_CHECKS = {
|
|
264
|
+
"essentials": verify_essentials,
|
|
265
|
+
"hardening": verify_hardening,
|
|
266
|
+
"kubernetes": verify_kubernetes,
|
|
267
|
+
"calico": verify_calico,
|
|
268
|
+
"prometheus": lambda ctx: verify_helm_chart("kube-prometheus-stack", "observability", ctx),
|
|
269
|
+
"grafana": lambda ctx: verify_helm_chart("grafana", "observability", ctx),
|
|
270
|
+
"loki": lambda ctx: verify_helm_chart("loki", "observability", ctx),
|
|
271
|
+
"traefik": lambda ctx: verify_helm_chart("traefik", "traefik", ctx),
|
|
272
|
+
"kong": lambda ctx: verify_helm_chart("kong", "kong", ctx),
|
|
273
|
+
"minio": lambda ctx: verify_helm_chart("minio", "minio", ctx),
|
|
274
|
+
"velero": lambda ctx: verify_helm_chart("velero", "velero", ctx),
|
|
275
|
+
"kafka": lambda ctx: verify_helm_chart("kafka", "kafka", ctx),
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def run_health_check(module: str, ctx: ExecutionContext) -> bool:
|
|
280
|
+
"""Executa health check para um modulo especifico."""
|
|
281
|
+
if module not in HEALTH_CHECKS:
|
|
282
|
+
logger.warning(f"Nenhum health check definido para modulo '{module}'")
|
|
283
|
+
return True
|
|
284
|
+
|
|
285
|
+
try:
|
|
286
|
+
result = HEALTH_CHECKS[module](ctx)
|
|
287
|
+
if result:
|
|
288
|
+
logger.info(f"Health check '{module}': PASS")
|
|
289
|
+
else:
|
|
290
|
+
logger.warning(f"Health check '{module}': FAIL")
|
|
291
|
+
ctx.warnings.append(f"Health check falhou para modulo '{module}'")
|
|
292
|
+
return result
|
|
293
|
+
except Exception as e:
|
|
294
|
+
logger.error(f"Erro durante health check '{module}': {e}")
|
|
295
|
+
ctx.errors.append(f"Erro no health check '{module}': {e}")
|
|
296
|
+
return False
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""Colecao de modulos suportados pelo CLI."""
|
|
2
|
+
|
|
3
|
+
__all__ = [
|
|
4
|
+
"hardening",
|
|
5
|
+
"network",
|
|
6
|
+
"essentials",
|
|
7
|
+
"firewall",
|
|
8
|
+
"kubernetes",
|
|
9
|
+
"calico",
|
|
10
|
+
"istio",
|
|
11
|
+
"traefik",
|
|
12
|
+
"kong",
|
|
13
|
+
"minio",
|
|
14
|
+
"prometheus",
|
|
15
|
+
"grafana",
|
|
16
|
+
"loki",
|
|
17
|
+
"harness",
|
|
18
|
+
"velero",
|
|
19
|
+
"kafka",
|
|
20
|
+
"bootstrap",
|
|
21
|
+
"full_install",
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
from raijin_server.modules import calico, essentials, firewall, grafana, harness, hardening, istio
|
|
25
|
+
from raijin_server.modules import kafka, kong, kubernetes, loki, minio, network, prometheus, traefik
|
|
26
|
+
from raijin_server.modules import velero, bootstrap, full_install
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
"""Instalacao automatica de ferramentas necessarias para o raijin-server."""
|
|
2
|
+
|
|
3
|
+
import shutil
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import typer
|
|
7
|
+
|
|
8
|
+
from raijin_server.utils import ExecutionContext, apt_install, apt_update, require_root, run_cmd, write_file
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Versoes das ferramentas
|
|
12
|
+
HELM_VERSION = "3.14.0"
|
|
13
|
+
KUBECTL_VERSION = "1.30.0"
|
|
14
|
+
ISTIOCTL_VERSION = "1.21.0"
|
|
15
|
+
VELERO_VERSION = "1.13.0"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _install_helm(ctx: ExecutionContext) -> None:
|
|
19
|
+
"""Instala Helm via script oficial."""
|
|
20
|
+
if shutil.which("helm") and not ctx.dry_run:
|
|
21
|
+
typer.echo("Helm ja instalado, pulando...")
|
|
22
|
+
return
|
|
23
|
+
|
|
24
|
+
typer.echo(f"Instalando Helm v{HELM_VERSION}...")
|
|
25
|
+
run_cmd(
|
|
26
|
+
"curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash",
|
|
27
|
+
ctx,
|
|
28
|
+
use_shell=True,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _install_kubectl(ctx: ExecutionContext) -> None:
|
|
33
|
+
"""Instala kubectl via binary."""
|
|
34
|
+
if shutil.which("kubectl") and not ctx.dry_run:
|
|
35
|
+
typer.echo("kubectl ja instalado, pulando...")
|
|
36
|
+
return
|
|
37
|
+
|
|
38
|
+
typer.echo(f"Instalando kubectl v{KUBECTL_VERSION}...")
|
|
39
|
+
run_cmd(
|
|
40
|
+
[
|
|
41
|
+
"curl",
|
|
42
|
+
"-fsSL",
|
|
43
|
+
"-o",
|
|
44
|
+
"/tmp/kubectl",
|
|
45
|
+
f"https://dl.k8s.io/release/v{KUBECTL_VERSION}/bin/linux/amd64/kubectl",
|
|
46
|
+
],
|
|
47
|
+
ctx,
|
|
48
|
+
)
|
|
49
|
+
run_cmd(["chmod", "+x", "/tmp/kubectl"], ctx)
|
|
50
|
+
run_cmd(["mv", "/tmp/kubectl", "/usr/local/bin/kubectl"], ctx)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _install_istioctl(ctx: ExecutionContext) -> None:
|
|
54
|
+
"""Instala istioctl."""
|
|
55
|
+
if shutil.which("istioctl") and not ctx.dry_run:
|
|
56
|
+
typer.echo("istioctl ja instalado, pulando...")
|
|
57
|
+
return
|
|
58
|
+
|
|
59
|
+
typer.echo(f"Instalando istioctl v{ISTIOCTL_VERSION}...")
|
|
60
|
+
run_cmd(
|
|
61
|
+
f"curl -L https://istio.io/downloadIstio | ISTIO_VERSION={ISTIOCTL_VERSION} sh -",
|
|
62
|
+
ctx,
|
|
63
|
+
use_shell=True,
|
|
64
|
+
cwd="/tmp",
|
|
65
|
+
)
|
|
66
|
+
run_cmd(
|
|
67
|
+
["mv", f"/tmp/istio-{ISTIOCTL_VERSION}/bin/istioctl", "/usr/local/bin/istioctl"],
|
|
68
|
+
ctx,
|
|
69
|
+
)
|
|
70
|
+
run_cmd(["chmod", "+x", "/usr/local/bin/istioctl"], ctx)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _install_velero(ctx: ExecutionContext) -> None:
|
|
74
|
+
"""Instala Velero CLI."""
|
|
75
|
+
if shutil.which("velero") and not ctx.dry_run:
|
|
76
|
+
typer.echo("Velero CLI ja instalado, pulando...")
|
|
77
|
+
return
|
|
78
|
+
|
|
79
|
+
typer.echo(f"Instalando Velero CLI v{VELERO_VERSION}...")
|
|
80
|
+
tarball = f"velero-v{VELERO_VERSION}-linux-amd64.tar.gz"
|
|
81
|
+
url = f"https://github.com/vmware-tanzu/velero/releases/download/v{VELERO_VERSION}/{tarball}"
|
|
82
|
+
|
|
83
|
+
run_cmd(["curl", "-fsSL", "-o", f"/tmp/{tarball}", url], ctx)
|
|
84
|
+
run_cmd(["tar", "-xzf", f"/tmp/{tarball}", "-C", "/tmp"], ctx)
|
|
85
|
+
run_cmd(
|
|
86
|
+
["mv", f"/tmp/velero-v{VELERO_VERSION}-linux-amd64/velero", "/usr/local/bin/velero"],
|
|
87
|
+
ctx,
|
|
88
|
+
)
|
|
89
|
+
run_cmd(["chmod", "+x", "/usr/local/bin/velero"], ctx)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _install_containerd(ctx: ExecutionContext) -> None:
|
|
93
|
+
"""Configura containerd como container runtime."""
|
|
94
|
+
typer.echo("Configurando containerd...")
|
|
95
|
+
|
|
96
|
+
# Carrega modulos do kernel necessarios
|
|
97
|
+
modules_conf = """overlay
|
|
98
|
+
br_netfilter
|
|
99
|
+
"""
|
|
100
|
+
write_file(Path("/etc/modules-load.d/k8s.conf"), modules_conf, ctx)
|
|
101
|
+
|
|
102
|
+
run_cmd(["modprobe", "overlay"], ctx, check=False)
|
|
103
|
+
run_cmd(["modprobe", "br_netfilter"], ctx, check=False)
|
|
104
|
+
|
|
105
|
+
# Sysctl para Kubernetes
|
|
106
|
+
sysctl_conf = """net.bridge.bridge-nf-call-iptables = 1
|
|
107
|
+
net.bridge.bridge-nf-call-ip6tables = 1
|
|
108
|
+
net.ipv4.ip_forward = 1
|
|
109
|
+
"""
|
|
110
|
+
write_file(Path("/etc/sysctl.d/k8s.conf"), sysctl_conf, ctx)
|
|
111
|
+
run_cmd(["sysctl", "--system"], ctx, check=False)
|
|
112
|
+
|
|
113
|
+
apt_install(["containerd"], ctx)
|
|
114
|
+
|
|
115
|
+
# Gera config padrao
|
|
116
|
+
run_cmd(["mkdir", "-p", "/etc/containerd"], ctx)
|
|
117
|
+
run_cmd("containerd config default > /etc/containerd/config.toml", ctx, use_shell=True)
|
|
118
|
+
|
|
119
|
+
# Habilita SystemdCgroup
|
|
120
|
+
run_cmd(
|
|
121
|
+
"sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml",
|
|
122
|
+
ctx,
|
|
123
|
+
use_shell=True,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
run_cmd(["systemctl", "restart", "containerd"], ctx, check=False)
|
|
127
|
+
run_cmd(["systemctl", "enable", "containerd"], ctx, check=False)
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def _setup_swap(ctx: ExecutionContext) -> None:
|
|
131
|
+
"""Desabilita swap (requisito do Kubernetes)."""
|
|
132
|
+
typer.echo("Desabilitando swap (requisito Kubernetes)...")
|
|
133
|
+
run_cmd(["swapoff", "-a"], ctx, check=False)
|
|
134
|
+
# Remove swap do fstab
|
|
135
|
+
run_cmd(
|
|
136
|
+
"sed -i '/swap/d' /etc/fstab",
|
|
137
|
+
ctx,
|
|
138
|
+
use_shell=True,
|
|
139
|
+
check=False,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _install_cert_manager(ctx: ExecutionContext) -> None:
|
|
144
|
+
"""Instala cert-manager para gerenciamento de certificados TLS."""
|
|
145
|
+
typer.echo("Instalando cert-manager...")
|
|
146
|
+
run_cmd(
|
|
147
|
+
[
|
|
148
|
+
"kubectl",
|
|
149
|
+
"apply",
|
|
150
|
+
"-f",
|
|
151
|
+
"https://github.com/cert-manager/cert-manager/releases/download/v1.14.0/cert-manager.yaml",
|
|
152
|
+
],
|
|
153
|
+
ctx,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def run(ctx: ExecutionContext) -> None:
|
|
158
|
+
"""Instala todas as ferramentas necessarias para o ambiente produtivo."""
|
|
159
|
+
require_root(ctx)
|
|
160
|
+
|
|
161
|
+
typer.secho("\n=== Bootstrap: Instalando Ferramentas ===", fg=typer.colors.CYAN, bold=True)
|
|
162
|
+
|
|
163
|
+
# Atualiza sistema
|
|
164
|
+
typer.echo("\n[1/8] Atualizando sistema...")
|
|
165
|
+
apt_update(ctx)
|
|
166
|
+
|
|
167
|
+
# Pacotes base
|
|
168
|
+
typer.echo("\n[2/8] Instalando pacotes essenciais...")
|
|
169
|
+
apt_install(
|
|
170
|
+
[
|
|
171
|
+
"curl",
|
|
172
|
+
"wget",
|
|
173
|
+
"git",
|
|
174
|
+
"gnupg",
|
|
175
|
+
"lsb-release",
|
|
176
|
+
"ca-certificates",
|
|
177
|
+
"apt-transport-https",
|
|
178
|
+
"software-properties-common",
|
|
179
|
+
"htop",
|
|
180
|
+
"net-tools",
|
|
181
|
+
"vim",
|
|
182
|
+
"jq",
|
|
183
|
+
"unzip",
|
|
184
|
+
"nfs-common", # Para storage NFS
|
|
185
|
+
"open-iscsi", # Para iSCSI storage
|
|
186
|
+
],
|
|
187
|
+
ctx,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
# Desabilita swap
|
|
191
|
+
typer.echo("\n[3/8] Configurando sistema para Kubernetes...")
|
|
192
|
+
_setup_swap(ctx)
|
|
193
|
+
|
|
194
|
+
# Containerd
|
|
195
|
+
typer.echo("\n[4/8] Configurando container runtime...")
|
|
196
|
+
_install_containerd(ctx)
|
|
197
|
+
|
|
198
|
+
# Helm
|
|
199
|
+
typer.echo("\n[5/8] Instalando Helm...")
|
|
200
|
+
_install_helm(ctx)
|
|
201
|
+
|
|
202
|
+
# kubectl
|
|
203
|
+
typer.echo("\n[6/8] Instalando kubectl...")
|
|
204
|
+
_install_kubectl(ctx)
|
|
205
|
+
|
|
206
|
+
# istioctl
|
|
207
|
+
typer.echo("\n[7/8] Instalando istioctl...")
|
|
208
|
+
_install_istioctl(ctx)
|
|
209
|
+
|
|
210
|
+
# velero
|
|
211
|
+
typer.echo("\n[8/8] Instalando Velero CLI...")
|
|
212
|
+
_install_velero(ctx)
|
|
213
|
+
|
|
214
|
+
typer.secho("\n✓ Bootstrap concluido! Ferramentas instaladas.", fg=typer.colors.GREEN, bold=True)
|
|
215
|
+
|
|
216
|
+
# Resumo
|
|
217
|
+
typer.echo("\nFerramentas instaladas:")
|
|
218
|
+
tools = ["helm", "kubectl", "istioctl", "velero", "containerd"]
|
|
219
|
+
for tool in tools:
|
|
220
|
+
path = shutil.which(tool)
|
|
221
|
+
if path or ctx.dry_run:
|
|
222
|
+
typer.secho(f" ✓ {tool}", fg=typer.colors.GREEN)
|
|
223
|
+
else:
|
|
224
|
+
typer.secho(f" ✗ {tool} (nao encontrado)", fg=typer.colors.YELLOW)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"""Configuracao de Calico como CNI com CIDR customizado e policy default-deny."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import typer
|
|
6
|
+
|
|
7
|
+
from raijin_server.utils import ExecutionContext, ensure_tool, kubectl_apply, require_root, run_cmd, write_file
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def run(ctx: ExecutionContext) -> None:
|
|
11
|
+
require_root(ctx)
|
|
12
|
+
ensure_tool("kubectl", ctx, install_hint="Instale kubectl ou habilite dry-run.")
|
|
13
|
+
ensure_tool("curl", ctx, install_hint="Instale curl.")
|
|
14
|
+
|
|
15
|
+
typer.echo("Aplicando Calico como CNI...")
|
|
16
|
+
pod_cidr = typer.prompt("Pod CIDR (Calico)", default="10.244.0.0/16")
|
|
17
|
+
|
|
18
|
+
manifest_url = "https://raw.githubusercontent.com/projectcalico/calico/v3.27.2/manifests/calico.yaml"
|
|
19
|
+
cmd = f"curl -s {manifest_url} | sed 's#192.168.0.0/16#{pod_cidr}#' | kubectl apply -f -"
|
|
20
|
+
run_cmd(cmd, ctx, use_shell=True)
|
|
21
|
+
|
|
22
|
+
# NetworkPolicy default-deny para workloads (excepto kube-system).
|
|
23
|
+
default_deny = """apiVersion: networking.k8s.io/v1
|
|
24
|
+
kind: NetworkPolicy
|
|
25
|
+
metadata:
|
|
26
|
+
name: default-deny-all
|
|
27
|
+
namespace: default
|
|
28
|
+
spec:
|
|
29
|
+
podSelector: {}
|
|
30
|
+
policyTypes:
|
|
31
|
+
- Ingress
|
|
32
|
+
- Egress
|
|
33
|
+
"""
|
|
34
|
+
policy_path = Path("/tmp/raijin-default-deny.yaml")
|
|
35
|
+
write_file(policy_path, default_deny, ctx)
|
|
36
|
+
kubectl_apply(str(policy_path), ctx)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Instalacao de ferramentas basicas do sistema."""
|
|
2
|
+
|
|
3
|
+
import typer
|
|
4
|
+
|
|
5
|
+
from raijin_server.utils import ExecutionContext, apt_install, apt_update, require_root, run_cmd
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def run(ctx: ExecutionContext) -> None:
|
|
9
|
+
require_root(ctx)
|
|
10
|
+
typer.echo("Instalando ferramentas essenciais...")
|
|
11
|
+
apt_update(ctx)
|
|
12
|
+
apt_install(
|
|
13
|
+
[
|
|
14
|
+
"curl",
|
|
15
|
+
"wget",
|
|
16
|
+
"git",
|
|
17
|
+
"gnupg",
|
|
18
|
+
"lsb-release",
|
|
19
|
+
"ca-certificates",
|
|
20
|
+
"apt-transport-https",
|
|
21
|
+
"htop",
|
|
22
|
+
"net-tools",
|
|
23
|
+
"vim",
|
|
24
|
+
"jq",
|
|
25
|
+
"unzip",
|
|
26
|
+
],
|
|
27
|
+
ctx,
|
|
28
|
+
)
|
|
29
|
+
run_cmd(["timedatectl", "set-ntp", "true"], ctx)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Gerenciamento de firewall com UFW."""
|
|
2
|
+
|
|
3
|
+
import typer
|
|
4
|
+
|
|
5
|
+
from raijin_server.utils import ExecutionContext, apt_install, require_root, run_cmd
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def run(ctx: ExecutionContext) -> None:
|
|
9
|
+
require_root(ctx)
|
|
10
|
+
typer.echo("Configurando UFW...")
|
|
11
|
+
|
|
12
|
+
apt_install(["ufw"], ctx)
|
|
13
|
+
run_cmd(["ufw", "--force", "reset"], ctx)
|
|
14
|
+
|
|
15
|
+
regras = [
|
|
16
|
+
["ufw", "allow", "22"],
|
|
17
|
+
["ufw", "allow", "80"],
|
|
18
|
+
["ufw", "allow", "443"],
|
|
19
|
+
["ufw", "allow", "6443"], # API server Kubernetes
|
|
20
|
+
["ufw", "allow", "2379:2380/tcp"], # etcd
|
|
21
|
+
["ufw", "allow", "10250"], # kubelet
|
|
22
|
+
]
|
|
23
|
+
for regra in regras:
|
|
24
|
+
run_cmd(regra, ctx, check=False)
|
|
25
|
+
|
|
26
|
+
run_cmd(["ufw", "--force", "enable"], ctx)
|
|
27
|
+
run_cmd(["ufw", "status", "numbered"], ctx)
|