raijin-server 0.2.16__tar.gz → 0.2.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. {raijin_server-0.2.16/src/raijin_server.egg-info → raijin_server-0.2.18}/PKG-INFO +1 -1
  2. {raijin_server-0.2.16 → raijin_server-0.2.18}/setup.cfg +1 -1
  3. raijin_server-0.2.18/src/raijin_server/__init__.py +5 -0
  4. raijin_server-0.2.18/src/raijin_server/modules/metallb.py +258 -0
  5. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/traefik.py +41 -1
  6. {raijin_server-0.2.16 → raijin_server-0.2.18/src/raijin_server.egg-info}/PKG-INFO +1 -1
  7. raijin_server-0.2.16/src/raijin_server/__init__.py +0 -5
  8. raijin_server-0.2.16/src/raijin_server/modules/metallb.py +0 -141
  9. {raijin_server-0.2.16 → raijin_server-0.2.18}/LICENSE +0 -0
  10. {raijin_server-0.2.16 → raijin_server-0.2.18}/README.md +0 -0
  11. {raijin_server-0.2.16 → raijin_server-0.2.18}/pyproject.toml +0 -0
  12. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/cli.py +0 -0
  13. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/config.py +0 -0
  14. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/healthchecks.py +0 -0
  15. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/__init__.py +0 -0
  16. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/apokolips_demo.py +0 -0
  17. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/bootstrap.py +0 -0
  18. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/calico.py +0 -0
  19. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/cert_manager.py +0 -0
  20. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/essentials.py +0 -0
  21. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/firewall.py +0 -0
  22. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/full_install.py +0 -0
  23. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/grafana.py +0 -0
  24. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/hardening.py +0 -0
  25. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/harness.py +0 -0
  26. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/istio.py +0 -0
  27. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/kafka.py +0 -0
  28. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/kong.py +0 -0
  29. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/kubernetes.py +0 -0
  30. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/loki.py +0 -0
  31. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/minio.py +0 -0
  32. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/network.py +0 -0
  33. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/observability_dashboards.py +0 -0
  34. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/observability_ingress.py +0 -0
  35. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/prometheus.py +0 -0
  36. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/sanitize.py +0 -0
  37. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/secrets.py +0 -0
  38. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/ssh_hardening.py +0 -0
  39. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/velero.py +0 -0
  40. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/modules/vpn.py +0 -0
  41. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/scripts/__init__.py +0 -0
  42. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/scripts/checklist.sh +0 -0
  43. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/scripts/install.sh +0 -0
  44. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/scripts/log_size_metric.sh +0 -0
  45. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/scripts/pre-deploy-check.sh +0 -0
  46. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/utils.py +0 -0
  47. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server/validators.py +0 -0
  48. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server.egg-info/SOURCES.txt +0 -0
  49. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server.egg-info/dependency_links.txt +0 -0
  50. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server.egg-info/entry_points.txt +0 -0
  51. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server.egg-info/requires.txt +0 -0
  52. {raijin_server-0.2.16 → raijin_server-0.2.18}/src/raijin_server.egg-info/top_level.txt +0 -0
  53. {raijin_server-0.2.16 → raijin_server-0.2.18}/tests/test_full_install_sequence.py +0 -0
  54. {raijin_server-0.2.16 → raijin_server-0.2.18}/tests/test_registry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: raijin-server
3
- Version: 0.2.16
3
+ Version: 0.2.18
4
4
  Summary: CLI para automacao de setup e hardening de servidores Ubuntu Server.
5
5
  Home-page: https://example.com/raijin-server
6
6
  Author: Equipe Raijin
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  name = raijin-server
3
- version = 0.2.16
3
+ version = 0.2.18
4
4
  description = CLI para automacao de setup e hardening de servidores Ubuntu Server.
5
5
  long_description = file: README.md
6
6
  long_description_content_type = text/markdown
@@ -0,0 +1,5 @@
1
+ """Pacote principal do CLI Raijin Server."""
2
+
3
+ __version__ = "0.2.18"
4
+
5
+ __all__ = ["__version__"]
@@ -0,0 +1,258 @@
1
+ """Provisiona MetalLB (L2) com pool de IPs para LoadBalancer em ambientes bare metal."""
2
+
3
+ import socket
4
+ import time
5
+
6
+ import typer
7
+
8
+ from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd
9
+
10
+
11
+ def _detect_node_name(ctx: ExecutionContext) -> str:
12
+ """Tenta obter o nome do node via kubectl; fallback para hostname local."""
13
+
14
+ result = run_cmd(
15
+ [
16
+ "kubectl",
17
+ "get",
18
+ "nodes",
19
+ "-o",
20
+ "jsonpath={.items[0].metadata.name}",
21
+ ],
22
+ ctx,
23
+ check=False,
24
+ )
25
+ if result.returncode == 0:
26
+ node_name = (result.stdout or "").strip()
27
+ if node_name:
28
+ return node_name
29
+ return socket.gethostname()
30
+
31
+
32
+ def _uninstall_metallb(ctx: ExecutionContext) -> None:
33
+ """Remove instalacao anterior do MetalLB completamente."""
34
+ typer.echo("Removendo instalacao anterior do MetalLB...")
35
+
36
+ # Helm uninstall
37
+ run_cmd(
38
+ ["helm", "uninstall", "metallb", "-n", "metallb-system"],
39
+ ctx,
40
+ check=False,
41
+ )
42
+
43
+ # Remove CRDs que podem ficar orfaos
44
+ run_cmd(
45
+ ["kubectl", "delete", "crd",
46
+ "ipaddresspools.metallb.io",
47
+ "l2advertisements.metallb.io",
48
+ "bgpadvertisements.metallb.io",
49
+ "bgppeers.metallb.io",
50
+ "bfdprofiles.metallb.io",
51
+ "communities.metallb.io",
52
+ "servicel2statuses.metallb.io",
53
+ "--ignore-not-found"],
54
+ ctx,
55
+ check=False,
56
+ )
57
+
58
+ # Remove namespace se existir
59
+ run_cmd(
60
+ ["kubectl", "delete", "namespace", "metallb-system", "--ignore-not-found"],
61
+ ctx,
62
+ check=False,
63
+ )
64
+
65
+ # Aguarda limpeza
66
+ time.sleep(5)
67
+
68
+
69
+ def _check_existing_metallb(ctx: ExecutionContext) -> bool:
70
+ """Verifica se existe instalacao do MetalLB."""
71
+ result = run_cmd(
72
+ ["helm", "status", "metallb", "-n", "metallb-system"],
73
+ ctx,
74
+ check=False,
75
+ )
76
+ return result.returncode == 0
77
+
78
+
79
+ def _wait_for_pods_running(ctx: ExecutionContext, timeout: int = 180) -> bool:
80
+ """Aguarda todos os pods do MetalLB estarem Running."""
81
+ typer.echo("Aguardando pods do MetalLB ficarem Running...")
82
+ deadline = time.time() + timeout
83
+
84
+ while time.time() < deadline:
85
+ # Verifica se ha pods pending ou em erro
86
+ result = run_cmd(
87
+ [
88
+ "kubectl", "-n", "metallb-system", "get", "pods",
89
+ "-o", "jsonpath={range .items[*]}{.metadata.name}:{.status.phase}\\n{end}",
90
+ ],
91
+ ctx,
92
+ check=False,
93
+ )
94
+
95
+ if result.returncode != 0:
96
+ time.sleep(5)
97
+ continue
98
+
99
+ output = (result.stdout or "").strip()
100
+ if not output:
101
+ time.sleep(5)
102
+ continue
103
+
104
+ pods = [line.split(":") for line in output.split("\n") if line and ":" in line]
105
+
106
+ all_running = all(phase == "Running" for _, phase in pods)
107
+ if all_running and pods:
108
+ typer.secho(f" Todos os {len(pods)} pods Running.", fg=typer.colors.GREEN)
109
+ return True
110
+
111
+ # Mostra status atual
112
+ pending = [name for name, phase in pods if phase != "Running"]
113
+ if pending:
114
+ typer.echo(f" Aguardando: {', '.join(pending[:3])}...")
115
+
116
+ time.sleep(10)
117
+
118
+ # Timeout - mostra diagnostico
119
+ typer.secho(" Timeout esperando pods. Diagnostico:", fg=typer.colors.YELLOW)
120
+ run_cmd(["kubectl", "-n", "metallb-system", "get", "pods", "-o", "wide"], ctx, check=False)
121
+ run_cmd(["kubectl", "-n", "metallb-system", "get", "events", "--sort-by=.lastTimestamp"], ctx, check=False)
122
+ return False
123
+
124
+
125
+ def _wait_for_webhook_ready(ctx: ExecutionContext, timeout: int = 120) -> bool:
126
+ """Aguarda webhook estar respondendo."""
127
+ typer.echo("Aguardando webhook do MetalLB...")
128
+ deadline = time.time() + timeout
129
+
130
+ while time.time() < deadline:
131
+ result = run_cmd(
132
+ [
133
+ "kubectl", "-n", "metallb-system", "get", "endpoints",
134
+ "metallb-webhook-service", "-o", "jsonpath={.subsets[0].addresses[0].ip}",
135
+ ],
136
+ ctx,
137
+ check=False,
138
+ )
139
+ if result.returncode == 0 and (result.stdout or "").strip():
140
+ typer.secho(" Webhook disponivel.", fg=typer.colors.GREEN)
141
+ return True
142
+ time.sleep(5)
143
+
144
+ typer.secho(" Webhook nao ficou disponivel.", fg=typer.colors.YELLOW)
145
+ return False
146
+
147
+
148
+ def _apply_pool_with_retry(manifest: str, ctx: ExecutionContext, max_attempts: int = 12) -> bool:
149
+ """Aplica IPAddressPool/L2Advertisement com retry."""
150
+ typer.echo("Aplicando IPAddressPool e L2Advertisement...")
151
+
152
+ for attempt in range(1, max_attempts + 1):
153
+ result = run_cmd(
154
+ f"echo '{manifest}' | kubectl apply -f -",
155
+ ctx,
156
+ use_shell=True,
157
+ check=False,
158
+ )
159
+ if result.returncode == 0:
160
+ typer.secho(" Pool e L2Advertisement aplicados.", fg=typer.colors.GREEN)
161
+ return True
162
+
163
+ stderr = (result.stderr or "").lower()
164
+ if "webhook" in stderr or "connection refused" in stderr:
165
+ typer.echo(f" Webhook nao pronto, tentativa {attempt}/{max_attempts}...")
166
+ time.sleep(10)
167
+ else:
168
+ typer.secho(f" Erro: {result.stderr}", fg=typer.colors.RED)
169
+ return False
170
+
171
+ return False
172
+
173
+
174
+ def run(ctx: ExecutionContext) -> None:
175
+ require_root(ctx)
176
+ typer.echo("Instalando MetalLB via Helm...")
177
+
178
+ # Prompt opcional de limpeza
179
+ if _check_existing_metallb(ctx):
180
+ cleanup = typer.confirm(
181
+ "Instalacao anterior do MetalLB detectada. Limpar antes de reinstalar?",
182
+ default=False,
183
+ )
184
+ if cleanup:
185
+ _uninstall_metallb(ctx)
186
+
187
+ pool = typer.prompt(
188
+ "Pool de IPs (range ou CIDR) para services LoadBalancer",
189
+ default="192.168.1.100-192.168.1.250",
190
+ )
191
+
192
+ node_name = _detect_node_name(ctx)
193
+
194
+ values = [
195
+ # Permite agendar em control-plane de cluster single-node
196
+ "controller.tolerations[0].key=node-role.kubernetes.io/control-plane",
197
+ "controller.tolerations[0].operator=Exists",
198
+ "controller.tolerations[0].effect=NoSchedule",
199
+ "controller.tolerations[1].key=node-role.kubernetes.io/master",
200
+ "controller.tolerations[1].operator=Exists",
201
+ "controller.tolerations[1].effect=NoSchedule",
202
+ "speaker.tolerations[0].key=node-role.kubernetes.io/control-plane",
203
+ "speaker.tolerations[0].operator=Exists",
204
+ "speaker.tolerations[0].effect=NoSchedule",
205
+ "speaker.tolerations[1].key=node-role.kubernetes.io/master",
206
+ "speaker.tolerations[1].operator=Exists",
207
+ "speaker.tolerations[1].effect=NoSchedule",
208
+ # nodeSelector com chave escapada
209
+ f"controller.nodeSelector.kubernetes\\.io/hostname={node_name}",
210
+ f"speaker.nodeSelector.kubernetes\\.io/hostname={node_name}",
211
+ ]
212
+
213
+ # Instala do zero
214
+ helm_upgrade_install(
215
+ release="metallb",
216
+ chart="metallb",
217
+ namespace="metallb-system",
218
+ repo="metallb",
219
+ repo_url="https://metallb.github.io/metallb",
220
+ ctx=ctx,
221
+ values=values,
222
+ )
223
+
224
+ # Aguarda pods estarem Running
225
+ if not _wait_for_pods_running(ctx):
226
+ ctx.errors.append("Pods do MetalLB nao subiram - verifique taints/recursos do cluster")
227
+ return
228
+
229
+ # Aguarda webhook
230
+ if not _wait_for_webhook_ready(ctx):
231
+ typer.secho("Continuando mesmo sem confirmacao do webhook...", fg=typer.colors.YELLOW)
232
+
233
+ # Aplica pool
234
+ manifest = f"""
235
+ apiVersion: metallb.io/v1beta1
236
+ kind: IPAddressPool
237
+ metadata:
238
+ name: raijin-pool
239
+ namespace: metallb-system
240
+ spec:
241
+ addresses:
242
+ - {pool}
243
+ ---
244
+ apiVersion: metallb.io/v1beta1
245
+ kind: L2Advertisement
246
+ metadata:
247
+ name: raijin-l2
248
+ namespace: metallb-system
249
+ spec:
250
+ ipAddressPools:
251
+ - raijin-pool
252
+ """
253
+
254
+ if not _apply_pool_with_retry(manifest, ctx):
255
+ ctx.errors.append("Falha ao aplicar IPAddressPool/L2Advertisement")
256
+ return
257
+
258
+ typer.secho("\n✓ MetalLB instalado. Services LoadBalancer usarao o pool informado.", fg=typer.colors.GREEN, bold=True)
@@ -7,6 +7,36 @@ import typer
7
7
  from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd
8
8
 
9
9
 
10
+ def _check_existing_traefik(ctx: ExecutionContext) -> bool:
11
+ """Verifica se existe instalacao do Traefik."""
12
+ result = run_cmd(
13
+ ["helm", "status", "traefik", "-n", "traefik"],
14
+ ctx,
15
+ check=False,
16
+ )
17
+ return result.returncode == 0
18
+
19
+
20
+ def _uninstall_traefik(ctx: ExecutionContext) -> None:
21
+ """Remove instalacao anterior do Traefik."""
22
+ import time
23
+ typer.echo("Removendo instalacao anterior do Traefik...")
24
+
25
+ run_cmd(
26
+ ["helm", "uninstall", "traefik", "-n", "traefik"],
27
+ ctx,
28
+ check=False,
29
+ )
30
+
31
+ run_cmd(
32
+ ["kubectl", "delete", "namespace", "traefik", "--ignore-not-found"],
33
+ ctx,
34
+ check=False,
35
+ )
36
+
37
+ time.sleep(5)
38
+
39
+
10
40
  def _detect_node_name(ctx: ExecutionContext) -> str:
11
41
  """Tenta obter o nome do node via kubectl; fallback para hostname local.
12
42
 
@@ -35,6 +65,15 @@ def run(ctx: ExecutionContext) -> None:
35
65
  require_root(ctx)
36
66
  typer.echo("Instalando Traefik via Helm...")
37
67
 
68
+ # Prompt opcional de limpeza
69
+ if _check_existing_traefik(ctx):
70
+ cleanup = typer.confirm(
71
+ "Instalacao anterior do Traefik detectada. Limpar antes de reinstalar?",
72
+ default=False,
73
+ )
74
+ if cleanup:
75
+ _uninstall_traefik(ctx)
76
+
38
77
  acme_email = typer.prompt("Email para ACME/Let's Encrypt", default="admin@example.com")
39
78
  dashboard_host = typer.prompt("Host para dashboard (opcional)", default="traefik.local")
40
79
 
@@ -56,7 +95,8 @@ def run(ctx: ExecutionContext) -> None:
56
95
  "tolerations[1].key=node-role.kubernetes.io/master",
57
96
  "tolerations[1].operator=Exists",
58
97
  "tolerations[1].effect=NoSchedule",
59
- f"nodeSelector.kubernetes.io/hostname={node_name}",
98
+ # Escapa chave com ponto para evitar parsing incorreto
99
+ f"nodeSelector.kubernetes\\.io/hostname={node_name}",
60
100
  ]
61
101
 
62
102
  if dashboard_host:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: raijin-server
3
- Version: 0.2.16
3
+ Version: 0.2.18
4
4
  Summary: CLI para automacao de setup e hardening de servidores Ubuntu Server.
5
5
  Home-page: https://example.com/raijin-server
6
6
  Author: Equipe Raijin
@@ -1,5 +0,0 @@
1
- """Pacote principal do CLI Raijin Server."""
2
-
3
- __version__ = "0.2.16"
4
-
5
- __all__ = ["__version__"]
@@ -1,141 +0,0 @@
1
- """Provisiona MetalLB (L2) com pool de IPs para LoadBalancer em ambientes bare metal."""
2
-
3
- import socket
4
-
5
- import typer
6
-
7
- from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd
8
-
9
-
10
- def _detect_node_name(ctx: ExecutionContext) -> str:
11
- """Tenta obter o nome do node via kubectl; fallback para hostname local."""
12
-
13
- result = run_cmd(
14
- [
15
- "kubectl",
16
- "get",
17
- "nodes",
18
- "-o",
19
- "jsonpath={.items[0].metadata.name}",
20
- ],
21
- ctx,
22
- check=False,
23
- )
24
- if result.returncode == 0:
25
- node_name = (result.stdout or "").strip()
26
- if node_name:
27
- return node_name
28
- return socket.gethostname()
29
-
30
-
31
- def _rollout_wait(kind: str, name: str, ctx: ExecutionContext) -> None:
32
- run_cmd([
33
- "kubectl",
34
- "-n",
35
- "metallb-system",
36
- "rollout",
37
- "status",
38
- f"{kind}/{name}",
39
- "--timeout",
40
- "180s",
41
- ], ctx, check=False)
42
-
43
-
44
- def _wait_webhook(ctx: ExecutionContext) -> None:
45
- # Descobre o nome do deployment do webhook (varia conforme chart), entao aguarda disponibilidade
46
- result = run_cmd(
47
- [
48
- "kubectl",
49
- "-n",
50
- "metallb-system",
51
- "get",
52
- "deploy",
53
- "-l",
54
- "app.kubernetes.io/component=webhook",
55
- "-o",
56
- "jsonpath={.items[0].metadata.name}",
57
- ],
58
- ctx,
59
- check=False,
60
- )
61
- if result.returncode == 0:
62
- name = (result.stdout or "").strip()
63
- if name:
64
- _rollout_wait("deployment", name, ctx)
65
-
66
-
67
- def run(ctx: ExecutionContext) -> None:
68
- require_root(ctx)
69
- typer.echo("Instalando MetalLB via Helm...")
70
-
71
- pool = typer.prompt(
72
- "Pool de IPs (range ou CIDR) para services LoadBalancer",
73
- default="192.168.1.100-192.168.1.250",
74
- )
75
-
76
- node_name = _detect_node_name(ctx)
77
-
78
- values = [
79
- # Permite agendar em control-plane de cluster single-node
80
- "controller.tolerations[0].key=node-role.kubernetes.io/control-plane",
81
- "controller.tolerations[0].operator=Exists",
82
- "controller.tolerations[0].effect=NoSchedule",
83
- "controller.tolerations[1].key=node-role.kubernetes.io/master",
84
- "controller.tolerations[1].operator=Exists",
85
- "controller.tolerations[1].effect=NoSchedule",
86
- "speaker.tolerations[0].key=node-role.kubernetes.io/control-plane",
87
- "speaker.tolerations[0].operator=Exists",
88
- "speaker.tolerations[0].effect=NoSchedule",
89
- "speaker.tolerations[1].key=node-role.kubernetes.io/master",
90
- "speaker.tolerations[1].operator=Exists",
91
- "speaker.tolerations[1].effect=NoSchedule",
92
- # Usa set em formato de mapa para preservar a chave com ponto (kubernetes.io/hostname)
93
- f"controller.nodeSelector={{\"kubernetes.io/hostname\":\"{node_name}\"}}",
94
- f"speaker.nodeSelector={{\"kubernetes.io/hostname\":\"{node_name}\"}}",
95
- ]
96
-
97
- # Instala control-plane + speaker
98
- helm_upgrade_install(
99
- release="metallb",
100
- chart="metallb",
101
- namespace="metallb-system",
102
- repo="metallb",
103
- repo_url="https://metallb.github.io/metallb",
104
- ctx=ctx,
105
- values=values,
106
- )
107
-
108
- # Espera recursos principais ficarem prontos
109
- _rollout_wait("deployment", "controller", ctx)
110
- _rollout_wait("daemonset", "speaker", ctx)
111
- _wait_webhook(ctx)
112
- run_cmd(["sleep", "5"], ctx, check=False) # pequeno buffer para webhook responder
113
-
114
- # Aplica IPAddressPool + L2Advertisement
115
- manifest = f"""
116
- apiVersion: metallb.io/v1beta1
117
- kind: IPAddressPool
118
- metadata:
119
- name: raijin-pool
120
- namespace: metallb-system
121
- spec:
122
- addresses:
123
- - {pool}
124
- ---
125
- apiVersion: metallb.io/v1beta1
126
- kind: L2Advertisement
127
- metadata:
128
- name: raijin-l2
129
- namespace: metallb-system
130
- spec:
131
- ipAddressPools:
132
- - raijin-pool
133
- """
134
-
135
- run_cmd(
136
- f"echo '{manifest}' | kubectl apply -f -",
137
- ctx,
138
- use_shell=True,
139
- )
140
-
141
- typer.secho("\n✓ MetalLB aplicado. Services LoadBalancer usarao o pool informado.", fg=typer.colors.GREEN, bold=True)
File without changes
File without changes