raijin-server 0.2.17__tar.gz → 0.2.19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {raijin_server-0.2.17/src/raijin_server.egg-info → raijin_server-0.2.19}/PKG-INFO +1 -1
- {raijin_server-0.2.17 → raijin_server-0.2.19}/setup.cfg +1 -1
- raijin_server-0.2.19/src/raijin_server/__init__.py +5 -0
- raijin_server-0.2.19/src/raijin_server/modules/metallb.py +265 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/traefik.py +41 -1
- {raijin_server-0.2.17 → raijin_server-0.2.19/src/raijin_server.egg-info}/PKG-INFO +1 -1
- raijin_server-0.2.17/src/raijin_server/__init__.py +0 -5
- raijin_server-0.2.17/src/raijin_server/modules/metallb.py +0 -141
- {raijin_server-0.2.17 → raijin_server-0.2.19}/LICENSE +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/README.md +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/pyproject.toml +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/cli.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/config.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/healthchecks.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/__init__.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/apokolips_demo.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/bootstrap.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/calico.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/cert_manager.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/essentials.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/firewall.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/full_install.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/grafana.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/hardening.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/harness.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/istio.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/kafka.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/kong.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/kubernetes.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/loki.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/minio.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/network.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/observability_dashboards.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/observability_ingress.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/prometheus.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/sanitize.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/secrets.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/ssh_hardening.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/velero.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/vpn.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/scripts/__init__.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/scripts/checklist.sh +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/scripts/install.sh +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/scripts/log_size_metric.sh +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/scripts/pre-deploy-check.sh +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/utils.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/validators.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server.egg-info/SOURCES.txt +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server.egg-info/dependency_links.txt +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server.egg-info/entry_points.txt +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server.egg-info/requires.txt +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server.egg-info/top_level.txt +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/tests/test_full_install_sequence.py +0 -0
- {raijin_server-0.2.17 → raijin_server-0.2.19}/tests/test_registry.py +0 -0
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
"""Provisiona MetalLB (L2) com pool de IPs para LoadBalancer em ambientes bare metal."""
|
|
2
|
+
|
|
3
|
+
import socket
|
|
4
|
+
import time
|
|
5
|
+
|
|
6
|
+
import typer
|
|
7
|
+
|
|
8
|
+
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
12
|
+
"""Tenta obter o nome do node via kubectl; fallback para hostname local."""
|
|
13
|
+
|
|
14
|
+
result = run_cmd(
|
|
15
|
+
[
|
|
16
|
+
"kubectl",
|
|
17
|
+
"get",
|
|
18
|
+
"nodes",
|
|
19
|
+
"-o",
|
|
20
|
+
"jsonpath={.items[0].metadata.name}",
|
|
21
|
+
],
|
|
22
|
+
ctx,
|
|
23
|
+
check=False,
|
|
24
|
+
)
|
|
25
|
+
if result.returncode == 0:
|
|
26
|
+
node_name = (result.stdout or "").strip()
|
|
27
|
+
if node_name:
|
|
28
|
+
return node_name
|
|
29
|
+
return socket.gethostname()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _uninstall_metallb(ctx: ExecutionContext) -> None:
|
|
33
|
+
"""Remove instalacao anterior do MetalLB completamente."""
|
|
34
|
+
typer.echo("Removendo instalacao anterior do MetalLB...")
|
|
35
|
+
|
|
36
|
+
# Helm uninstall
|
|
37
|
+
run_cmd(
|
|
38
|
+
["helm", "uninstall", "metallb", "-n", "metallb-system"],
|
|
39
|
+
ctx,
|
|
40
|
+
check=False,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
# Remove CRDs que podem ficar orfaos
|
|
44
|
+
run_cmd(
|
|
45
|
+
["kubectl", "delete", "crd",
|
|
46
|
+
"ipaddresspools.metallb.io",
|
|
47
|
+
"l2advertisements.metallb.io",
|
|
48
|
+
"bgpadvertisements.metallb.io",
|
|
49
|
+
"bgppeers.metallb.io",
|
|
50
|
+
"bfdprofiles.metallb.io",
|
|
51
|
+
"communities.metallb.io",
|
|
52
|
+
"servicel2statuses.metallb.io",
|
|
53
|
+
"--ignore-not-found"],
|
|
54
|
+
ctx,
|
|
55
|
+
check=False,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Remove namespace se existir
|
|
59
|
+
run_cmd(
|
|
60
|
+
["kubectl", "delete", "namespace", "metallb-system", "--ignore-not-found"],
|
|
61
|
+
ctx,
|
|
62
|
+
check=False,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
# Aguarda limpeza
|
|
66
|
+
time.sleep(5)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _check_existing_metallb(ctx: ExecutionContext) -> bool:
|
|
70
|
+
"""Verifica se existe instalacao do MetalLB."""
|
|
71
|
+
result = run_cmd(
|
|
72
|
+
["helm", "status", "metallb", "-n", "metallb-system"],
|
|
73
|
+
ctx,
|
|
74
|
+
check=False,
|
|
75
|
+
)
|
|
76
|
+
return result.returncode == 0
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _wait_for_pods_running(ctx: ExecutionContext, timeout: int = 180) -> bool:
|
|
80
|
+
"""Aguarda todos os pods do MetalLB estarem Running."""
|
|
81
|
+
typer.echo("Aguardando pods do MetalLB ficarem Running...")
|
|
82
|
+
deadline = time.time() + timeout
|
|
83
|
+
|
|
84
|
+
while time.time() < deadline:
|
|
85
|
+
# Verifica se ha pods pending ou em erro
|
|
86
|
+
result = run_cmd(
|
|
87
|
+
[
|
|
88
|
+
"kubectl", "-n", "metallb-system", "get", "pods",
|
|
89
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}:{.status.phase}\\n{end}",
|
|
90
|
+
],
|
|
91
|
+
ctx,
|
|
92
|
+
check=False,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
if result.returncode != 0:
|
|
96
|
+
time.sleep(5)
|
|
97
|
+
continue
|
|
98
|
+
|
|
99
|
+
output = (result.stdout or "").strip()
|
|
100
|
+
if not output:
|
|
101
|
+
time.sleep(5)
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
pods = []
|
|
105
|
+
for line in output.split("\n"):
|
|
106
|
+
if not line:
|
|
107
|
+
continue
|
|
108
|
+
# Split apenas na ultima ocorrencia de ":" para evitar problemas com nomes
|
|
109
|
+
parts = line.rsplit(":", 1)
|
|
110
|
+
if len(parts) == 2:
|
|
111
|
+
pods.append((parts[0], parts[1]))
|
|
112
|
+
|
|
113
|
+
all_running = all(phase == "Running" for _, phase in pods)
|
|
114
|
+
if all_running and pods:
|
|
115
|
+
typer.secho(f" Todos os {len(pods)} pods Running.", fg=typer.colors.GREEN)
|
|
116
|
+
return True
|
|
117
|
+
|
|
118
|
+
# Mostra status atual
|
|
119
|
+
pending = [name for name, phase in pods if phase != "Running"]
|
|
120
|
+
if pending:
|
|
121
|
+
typer.echo(f" Aguardando: {', '.join(pending[:3])}...")
|
|
122
|
+
|
|
123
|
+
time.sleep(10)
|
|
124
|
+
|
|
125
|
+
# Timeout - mostra diagnostico
|
|
126
|
+
typer.secho(" Timeout esperando pods. Diagnostico:", fg=typer.colors.YELLOW)
|
|
127
|
+
run_cmd(["kubectl", "-n", "metallb-system", "get", "pods", "-o", "wide"], ctx, check=False)
|
|
128
|
+
run_cmd(["kubectl", "-n", "metallb-system", "get", "events", "--sort-by=.lastTimestamp"], ctx, check=False)
|
|
129
|
+
return False
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def _wait_for_webhook_ready(ctx: ExecutionContext, timeout: int = 120) -> bool:
|
|
133
|
+
"""Aguarda webhook estar respondendo."""
|
|
134
|
+
typer.echo("Aguardando webhook do MetalLB...")
|
|
135
|
+
deadline = time.time() + timeout
|
|
136
|
+
|
|
137
|
+
while time.time() < deadline:
|
|
138
|
+
result = run_cmd(
|
|
139
|
+
[
|
|
140
|
+
"kubectl", "-n", "metallb-system", "get", "endpoints",
|
|
141
|
+
"metallb-webhook-service", "-o", "jsonpath={.subsets[0].addresses[0].ip}",
|
|
142
|
+
],
|
|
143
|
+
ctx,
|
|
144
|
+
check=False,
|
|
145
|
+
)
|
|
146
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
147
|
+
typer.secho(" Webhook disponivel.", fg=typer.colors.GREEN)
|
|
148
|
+
return True
|
|
149
|
+
time.sleep(5)
|
|
150
|
+
|
|
151
|
+
typer.secho(" Webhook nao ficou disponivel.", fg=typer.colors.YELLOW)
|
|
152
|
+
return False
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _apply_pool_with_retry(manifest: str, ctx: ExecutionContext, max_attempts: int = 12) -> bool:
|
|
156
|
+
"""Aplica IPAddressPool/L2Advertisement com retry."""
|
|
157
|
+
typer.echo("Aplicando IPAddressPool e L2Advertisement...")
|
|
158
|
+
|
|
159
|
+
for attempt in range(1, max_attempts + 1):
|
|
160
|
+
result = run_cmd(
|
|
161
|
+
f"echo '{manifest}' | kubectl apply -f -",
|
|
162
|
+
ctx,
|
|
163
|
+
use_shell=True,
|
|
164
|
+
check=False,
|
|
165
|
+
)
|
|
166
|
+
if result.returncode == 0:
|
|
167
|
+
typer.secho(" Pool e L2Advertisement aplicados.", fg=typer.colors.GREEN)
|
|
168
|
+
return True
|
|
169
|
+
|
|
170
|
+
stderr = (result.stderr or "").lower()
|
|
171
|
+
if "webhook" in stderr or "connection refused" in stderr:
|
|
172
|
+
typer.echo(f" Webhook nao pronto, tentativa {attempt}/{max_attempts}...")
|
|
173
|
+
time.sleep(10)
|
|
174
|
+
else:
|
|
175
|
+
typer.secho(f" Erro: {result.stderr}", fg=typer.colors.RED)
|
|
176
|
+
return False
|
|
177
|
+
|
|
178
|
+
return False
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def run(ctx: ExecutionContext) -> None:
|
|
182
|
+
require_root(ctx)
|
|
183
|
+
typer.echo("Instalando MetalLB via Helm...")
|
|
184
|
+
|
|
185
|
+
# Prompt opcional de limpeza
|
|
186
|
+
if _check_existing_metallb(ctx):
|
|
187
|
+
cleanup = typer.confirm(
|
|
188
|
+
"Instalacao anterior do MetalLB detectada. Limpar antes de reinstalar?",
|
|
189
|
+
default=False,
|
|
190
|
+
)
|
|
191
|
+
if cleanup:
|
|
192
|
+
_uninstall_metallb(ctx)
|
|
193
|
+
|
|
194
|
+
pool = typer.prompt(
|
|
195
|
+
"Pool de IPs (range ou CIDR) para services LoadBalancer",
|
|
196
|
+
default="192.168.1.100-192.168.1.250",
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
node_name = _detect_node_name(ctx)
|
|
200
|
+
|
|
201
|
+
values = [
|
|
202
|
+
# Permite agendar em control-plane de cluster single-node
|
|
203
|
+
"controller.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
204
|
+
"controller.tolerations[0].operator=Exists",
|
|
205
|
+
"controller.tolerations[0].effect=NoSchedule",
|
|
206
|
+
"controller.tolerations[1].key=node-role.kubernetes.io/master",
|
|
207
|
+
"controller.tolerations[1].operator=Exists",
|
|
208
|
+
"controller.tolerations[1].effect=NoSchedule",
|
|
209
|
+
"speaker.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
210
|
+
"speaker.tolerations[0].operator=Exists",
|
|
211
|
+
"speaker.tolerations[0].effect=NoSchedule",
|
|
212
|
+
"speaker.tolerations[1].key=node-role.kubernetes.io/master",
|
|
213
|
+
"speaker.tolerations[1].operator=Exists",
|
|
214
|
+
"speaker.tolerations[1].effect=NoSchedule",
|
|
215
|
+
# nodeSelector com chave escapada
|
|
216
|
+
f"controller.nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
217
|
+
f"speaker.nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
218
|
+
]
|
|
219
|
+
|
|
220
|
+
# Instala do zero
|
|
221
|
+
helm_upgrade_install(
|
|
222
|
+
release="metallb",
|
|
223
|
+
chart="metallb",
|
|
224
|
+
namespace="metallb-system",
|
|
225
|
+
repo="metallb",
|
|
226
|
+
repo_url="https://metallb.github.io/metallb",
|
|
227
|
+
ctx=ctx,
|
|
228
|
+
values=values,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
# Aguarda pods estarem Running
|
|
232
|
+
if not _wait_for_pods_running(ctx):
|
|
233
|
+
ctx.errors.append("Pods do MetalLB nao subiram - verifique taints/recursos do cluster")
|
|
234
|
+
return
|
|
235
|
+
|
|
236
|
+
# Aguarda webhook
|
|
237
|
+
if not _wait_for_webhook_ready(ctx):
|
|
238
|
+
typer.secho("Continuando mesmo sem confirmacao do webhook...", fg=typer.colors.YELLOW)
|
|
239
|
+
|
|
240
|
+
# Aplica pool
|
|
241
|
+
manifest = f"""
|
|
242
|
+
apiVersion: metallb.io/v1beta1
|
|
243
|
+
kind: IPAddressPool
|
|
244
|
+
metadata:
|
|
245
|
+
name: raijin-pool
|
|
246
|
+
namespace: metallb-system
|
|
247
|
+
spec:
|
|
248
|
+
addresses:
|
|
249
|
+
- {pool}
|
|
250
|
+
---
|
|
251
|
+
apiVersion: metallb.io/v1beta1
|
|
252
|
+
kind: L2Advertisement
|
|
253
|
+
metadata:
|
|
254
|
+
name: raijin-l2
|
|
255
|
+
namespace: metallb-system
|
|
256
|
+
spec:
|
|
257
|
+
ipAddressPools:
|
|
258
|
+
- raijin-pool
|
|
259
|
+
"""
|
|
260
|
+
|
|
261
|
+
if not _apply_pool_with_retry(manifest, ctx):
|
|
262
|
+
ctx.errors.append("Falha ao aplicar IPAddressPool/L2Advertisement")
|
|
263
|
+
return
|
|
264
|
+
|
|
265
|
+
typer.secho("\n✓ MetalLB instalado. Services LoadBalancer usarao o pool informado.", fg=typer.colors.GREEN, bold=True)
|
|
@@ -7,6 +7,36 @@ import typer
|
|
|
7
7
|
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd
|
|
8
8
|
|
|
9
9
|
|
|
10
|
+
def _check_existing_traefik(ctx: ExecutionContext) -> bool:
|
|
11
|
+
"""Verifica se existe instalacao do Traefik."""
|
|
12
|
+
result = run_cmd(
|
|
13
|
+
["helm", "status", "traefik", "-n", "traefik"],
|
|
14
|
+
ctx,
|
|
15
|
+
check=False,
|
|
16
|
+
)
|
|
17
|
+
return result.returncode == 0
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _uninstall_traefik(ctx: ExecutionContext) -> None:
|
|
21
|
+
"""Remove instalacao anterior do Traefik."""
|
|
22
|
+
import time
|
|
23
|
+
typer.echo("Removendo instalacao anterior do Traefik...")
|
|
24
|
+
|
|
25
|
+
run_cmd(
|
|
26
|
+
["helm", "uninstall", "traefik", "-n", "traefik"],
|
|
27
|
+
ctx,
|
|
28
|
+
check=False,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
run_cmd(
|
|
32
|
+
["kubectl", "delete", "namespace", "traefik", "--ignore-not-found"],
|
|
33
|
+
ctx,
|
|
34
|
+
check=False,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
time.sleep(5)
|
|
38
|
+
|
|
39
|
+
|
|
10
40
|
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
11
41
|
"""Tenta obter o nome do node via kubectl; fallback para hostname local.
|
|
12
42
|
|
|
@@ -35,6 +65,15 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
35
65
|
require_root(ctx)
|
|
36
66
|
typer.echo("Instalando Traefik via Helm...")
|
|
37
67
|
|
|
68
|
+
# Prompt opcional de limpeza
|
|
69
|
+
if _check_existing_traefik(ctx):
|
|
70
|
+
cleanup = typer.confirm(
|
|
71
|
+
"Instalacao anterior do Traefik detectada. Limpar antes de reinstalar?",
|
|
72
|
+
default=False,
|
|
73
|
+
)
|
|
74
|
+
if cleanup:
|
|
75
|
+
_uninstall_traefik(ctx)
|
|
76
|
+
|
|
38
77
|
acme_email = typer.prompt("Email para ACME/Let's Encrypt", default="admin@example.com")
|
|
39
78
|
dashboard_host = typer.prompt("Host para dashboard (opcional)", default="traefik.local")
|
|
40
79
|
|
|
@@ -56,7 +95,8 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
56
95
|
"tolerations[1].key=node-role.kubernetes.io/master",
|
|
57
96
|
"tolerations[1].operator=Exists",
|
|
58
97
|
"tolerations[1].effect=NoSchedule",
|
|
59
|
-
|
|
98
|
+
# Escapa chave com ponto para evitar parsing incorreto
|
|
99
|
+
f"nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
60
100
|
]
|
|
61
101
|
|
|
62
102
|
if dashboard_host:
|
|
@@ -1,141 +0,0 @@
|
|
|
1
|
-
"""Provisiona MetalLB (L2) com pool de IPs para LoadBalancer em ambientes bare metal."""
|
|
2
|
-
|
|
3
|
-
import socket
|
|
4
|
-
|
|
5
|
-
import typer
|
|
6
|
-
|
|
7
|
-
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
11
|
-
"""Tenta obter o nome do node via kubectl; fallback para hostname local."""
|
|
12
|
-
|
|
13
|
-
result = run_cmd(
|
|
14
|
-
[
|
|
15
|
-
"kubectl",
|
|
16
|
-
"get",
|
|
17
|
-
"nodes",
|
|
18
|
-
"-o",
|
|
19
|
-
"jsonpath={.items[0].metadata.name}",
|
|
20
|
-
],
|
|
21
|
-
ctx,
|
|
22
|
-
check=False,
|
|
23
|
-
)
|
|
24
|
-
if result.returncode == 0:
|
|
25
|
-
node_name = (result.stdout or "").strip()
|
|
26
|
-
if node_name:
|
|
27
|
-
return node_name
|
|
28
|
-
return socket.gethostname()
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
def _rollout_wait(kind: str, name: str, ctx: ExecutionContext) -> None:
|
|
32
|
-
run_cmd([
|
|
33
|
-
"kubectl",
|
|
34
|
-
"-n",
|
|
35
|
-
"metallb-system",
|
|
36
|
-
"rollout",
|
|
37
|
-
"status",
|
|
38
|
-
f"{kind}/{name}",
|
|
39
|
-
"--timeout",
|
|
40
|
-
"180s",
|
|
41
|
-
], ctx, check=False)
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
def _wait_webhook(ctx: ExecutionContext) -> None:
|
|
45
|
-
# Descobre o nome do deployment do webhook (varia conforme chart), entao aguarda disponibilidade
|
|
46
|
-
result = run_cmd(
|
|
47
|
-
[
|
|
48
|
-
"kubectl",
|
|
49
|
-
"-n",
|
|
50
|
-
"metallb-system",
|
|
51
|
-
"get",
|
|
52
|
-
"deploy",
|
|
53
|
-
"-l",
|
|
54
|
-
"app.kubernetes.io/component=webhook",
|
|
55
|
-
"-o",
|
|
56
|
-
"jsonpath={.items[0].metadata.name}",
|
|
57
|
-
],
|
|
58
|
-
ctx,
|
|
59
|
-
check=False,
|
|
60
|
-
)
|
|
61
|
-
if result.returncode == 0:
|
|
62
|
-
name = (result.stdout or "").strip()
|
|
63
|
-
if name:
|
|
64
|
-
_rollout_wait("deployment", name, ctx)
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
def run(ctx: ExecutionContext) -> None:
|
|
68
|
-
require_root(ctx)
|
|
69
|
-
typer.echo("Instalando MetalLB via Helm...")
|
|
70
|
-
|
|
71
|
-
pool = typer.prompt(
|
|
72
|
-
"Pool de IPs (range ou CIDR) para services LoadBalancer",
|
|
73
|
-
default="192.168.1.100-192.168.1.250",
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
node_name = _detect_node_name(ctx)
|
|
77
|
-
|
|
78
|
-
values = [
|
|
79
|
-
# Permite agendar em control-plane de cluster single-node
|
|
80
|
-
"controller.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
81
|
-
"controller.tolerations[0].operator=Exists",
|
|
82
|
-
"controller.tolerations[0].effect=NoSchedule",
|
|
83
|
-
"controller.tolerations[1].key=node-role.kubernetes.io/master",
|
|
84
|
-
"controller.tolerations[1].operator=Exists",
|
|
85
|
-
"controller.tolerations[1].effect=NoSchedule",
|
|
86
|
-
"speaker.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
87
|
-
"speaker.tolerations[0].operator=Exists",
|
|
88
|
-
"speaker.tolerations[0].effect=NoSchedule",
|
|
89
|
-
"speaker.tolerations[1].key=node-role.kubernetes.io/master",
|
|
90
|
-
"speaker.tolerations[1].operator=Exists",
|
|
91
|
-
"speaker.tolerations[1].effect=NoSchedule",
|
|
92
|
-
# Escapa a chave com ponto; evita map literal que quebra o schema do chart
|
|
93
|
-
f"controller.nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
94
|
-
f"speaker.nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
95
|
-
]
|
|
96
|
-
|
|
97
|
-
# Instala control-plane + speaker
|
|
98
|
-
helm_upgrade_install(
|
|
99
|
-
release="metallb",
|
|
100
|
-
chart="metallb",
|
|
101
|
-
namespace="metallb-system",
|
|
102
|
-
repo="metallb",
|
|
103
|
-
repo_url="https://metallb.github.io/metallb",
|
|
104
|
-
ctx=ctx,
|
|
105
|
-
values=values,
|
|
106
|
-
)
|
|
107
|
-
|
|
108
|
-
# Espera recursos principais ficarem prontos
|
|
109
|
-
_rollout_wait("deployment", "controller", ctx)
|
|
110
|
-
_rollout_wait("daemonset", "speaker", ctx)
|
|
111
|
-
_wait_webhook(ctx)
|
|
112
|
-
run_cmd(["sleep", "5"], ctx, check=False) # pequeno buffer para webhook responder
|
|
113
|
-
|
|
114
|
-
# Aplica IPAddressPool + L2Advertisement
|
|
115
|
-
manifest = f"""
|
|
116
|
-
apiVersion: metallb.io/v1beta1
|
|
117
|
-
kind: IPAddressPool
|
|
118
|
-
metadata:
|
|
119
|
-
name: raijin-pool
|
|
120
|
-
namespace: metallb-system
|
|
121
|
-
spec:
|
|
122
|
-
addresses:
|
|
123
|
-
- {pool}
|
|
124
|
-
---
|
|
125
|
-
apiVersion: metallb.io/v1beta1
|
|
126
|
-
kind: L2Advertisement
|
|
127
|
-
metadata:
|
|
128
|
-
name: raijin-l2
|
|
129
|
-
namespace: metallb-system
|
|
130
|
-
spec:
|
|
131
|
-
ipAddressPools:
|
|
132
|
-
- raijin-pool
|
|
133
|
-
"""
|
|
134
|
-
|
|
135
|
-
run_cmd(
|
|
136
|
-
f"echo '{manifest}' | kubectl apply -f -",
|
|
137
|
-
ctx,
|
|
138
|
-
use_shell=True,
|
|
139
|
-
)
|
|
140
|
-
|
|
141
|
-
typer.secho("\n✓ MetalLB aplicado. Services LoadBalancer usarao o pool informado.", fg=typer.colors.GREEN, bold=True)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/observability_dashboards.py
RENAMED
|
File without changes
|
{raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server/modules/observability_ingress.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{raijin_server-0.2.17 → raijin_server-0.2.19}/src/raijin_server.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|