raijin-server 0.2.41__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of raijin-server might be problematic. Click here for more details.
- raijin_server/__init__.py +1 -1
- raijin_server/cli.py +6 -0
- raijin_server/modules/__init__.py +3 -1
- raijin_server/modules/grafana.py +365 -16
- raijin_server/modules/internal_dns.py +446 -0
- raijin_server/modules/kong.py +8 -4
- raijin_server/modules/minio.py +15 -5
- raijin_server/modules/observability_ingress.py +29 -1
- raijin_server/modules/prometheus.py +266 -3
- raijin_server/modules/traefik.py +35 -1
- raijin_server/modules/vpn_client.py +438 -0
- raijin_server-0.3.0.dist-info/METADATA +361 -0
- {raijin_server-0.2.41.dist-info → raijin_server-0.3.0.dist-info}/RECORD +17 -15
- raijin_server-0.2.41.dist-info/METADATA +0 -564
- {raijin_server-0.2.41.dist-info → raijin_server-0.3.0.dist-info}/WHEEL +0 -0
- {raijin_server-0.2.41.dist-info → raijin_server-0.3.0.dist-info}/entry_points.txt +0 -0
- {raijin_server-0.2.41.dist-info → raijin_server-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {raijin_server-0.2.41.dist-info → raijin_server-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -2,8 +2,12 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
+
import json
|
|
5
6
|
import socket
|
|
7
|
+
import tempfile
|
|
8
|
+
import textwrap
|
|
6
9
|
import time
|
|
10
|
+
from pathlib import Path
|
|
7
11
|
|
|
8
12
|
import typer
|
|
9
13
|
|
|
@@ -16,6 +20,10 @@ from raijin_server.utils import (
|
|
|
16
20
|
)
|
|
17
21
|
|
|
18
22
|
DEFAULT_NAMESPACE = "observability"
|
|
23
|
+
LOCAL_PATH_PROVISIONER_URL = (
|
|
24
|
+
"https://raw.githubusercontent.com/rancher/local-path-provisioner/"
|
|
25
|
+
"v0.0.30/deploy/local-path-storage.yaml"
|
|
26
|
+
)
|
|
19
27
|
|
|
20
28
|
|
|
21
29
|
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
@@ -31,6 +39,7 @@ def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
|
31
39
|
|
|
32
40
|
|
|
33
41
|
def _get_default_storage_class(ctx: ExecutionContext) -> str:
|
|
42
|
+
"""Retorna o nome da StorageClass default do cluster, se existir."""
|
|
34
43
|
if ctx.dry_run:
|
|
35
44
|
return ""
|
|
36
45
|
result = run_cmd(
|
|
@@ -39,12 +48,260 @@ def _get_default_storage_class(ctx: ExecutionContext) -> str:
|
|
|
39
48
|
"get",
|
|
40
49
|
"storageclass",
|
|
41
50
|
"-o",
|
|
42
|
-
"jsonpath={.items[?(@.metadata.annotations
|
|
51
|
+
"jsonpath={.items[?(@.metadata.annotations.storageclass\\.kubernetes\\.io/is-default-class=='true')].metadata.name}",
|
|
43
52
|
],
|
|
44
53
|
ctx,
|
|
45
54
|
check=False,
|
|
46
55
|
)
|
|
47
|
-
|
|
56
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
57
|
+
return (result.stdout or "").strip()
|
|
58
|
+
return ""
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _list_storage_classes(ctx: ExecutionContext) -> list:
|
|
62
|
+
"""Lista todas as StorageClasses disponiveis."""
|
|
63
|
+
result = run_cmd(
|
|
64
|
+
["kubectl", "get", "storageclass", "-o", "jsonpath={.items[*].metadata.name}"],
|
|
65
|
+
ctx,
|
|
66
|
+
check=False,
|
|
67
|
+
)
|
|
68
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
69
|
+
return (result.stdout or "").strip().split()
|
|
70
|
+
return []
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _apply_manifest(ctx: ExecutionContext, manifest: str, description: str) -> bool:
|
|
74
|
+
"""Aplica manifest YAML temporario com kubectl."""
|
|
75
|
+
tmp_path = None
|
|
76
|
+
try:
|
|
77
|
+
with tempfile.NamedTemporaryFile("w", delete=False, suffix=".yaml") as tmp:
|
|
78
|
+
tmp.write(manifest)
|
|
79
|
+
tmp.flush()
|
|
80
|
+
tmp_path = Path(tmp.name)
|
|
81
|
+
result = run_cmd(
|
|
82
|
+
["kubectl", "apply", "-f", str(tmp_path)],
|
|
83
|
+
ctx,
|
|
84
|
+
check=False,
|
|
85
|
+
)
|
|
86
|
+
if result.returncode != 0:
|
|
87
|
+
typer.secho(f" Falha ao aplicar {description}.", fg=typer.colors.RED)
|
|
88
|
+
return False
|
|
89
|
+
typer.secho(f" ✓ {description} aplicado.", fg=typer.colors.GREEN)
|
|
90
|
+
return True
|
|
91
|
+
finally:
|
|
92
|
+
if tmp_path and tmp_path.exists():
|
|
93
|
+
tmp_path.unlink(missing_ok=True)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _patch_local_path_provisioner_tolerations(ctx: ExecutionContext) -> None:
|
|
97
|
+
"""Adiciona tolerations ao local-path-provisioner para rodar em control-plane."""
|
|
98
|
+
typer.echo(" Configurando tolerations no local-path-provisioner...")
|
|
99
|
+
|
|
100
|
+
# Patch no deployment para tolerar control-plane
|
|
101
|
+
patch_deployment = textwrap.dedent(
|
|
102
|
+
"""
|
|
103
|
+
spec:
|
|
104
|
+
template:
|
|
105
|
+
spec:
|
|
106
|
+
tolerations:
|
|
107
|
+
- key: node-role.kubernetes.io/control-plane
|
|
108
|
+
operator: Exists
|
|
109
|
+
effect: NoSchedule
|
|
110
|
+
- key: node-role.kubernetes.io/master
|
|
111
|
+
operator: Exists
|
|
112
|
+
effect: NoSchedule
|
|
113
|
+
"""
|
|
114
|
+
).strip()
|
|
115
|
+
|
|
116
|
+
result = run_cmd(
|
|
117
|
+
[
|
|
118
|
+
"kubectl", "-n", "local-path-storage", "patch", "deployment",
|
|
119
|
+
"local-path-provisioner", "--patch", patch_deployment,
|
|
120
|
+
],
|
|
121
|
+
ctx,
|
|
122
|
+
check=False,
|
|
123
|
+
)
|
|
124
|
+
if result.returncode == 0:
|
|
125
|
+
typer.secho(" ✓ Deployment patched com tolerations.", fg=typer.colors.GREEN)
|
|
126
|
+
|
|
127
|
+
# Patch no ConfigMap para os helper pods (que criam os dirs no node)
|
|
128
|
+
# O local-path-provisioner usa um ConfigMap com helperPod template
|
|
129
|
+
helper_pod_config = {
|
|
130
|
+
"nodePathMap": [
|
|
131
|
+
{
|
|
132
|
+
"node": "DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
|
133
|
+
"paths": ["/opt/local-path-provisioner"]
|
|
134
|
+
}
|
|
135
|
+
],
|
|
136
|
+
"setupCommand": None,
|
|
137
|
+
"teardownCommand": None,
|
|
138
|
+
"helperPod": {
|
|
139
|
+
"apiVersion": "v1",
|
|
140
|
+
"kind": "Pod",
|
|
141
|
+
"metadata": {},
|
|
142
|
+
"spec": {
|
|
143
|
+
"tolerations": [
|
|
144
|
+
{"key": "node-role.kubernetes.io/control-plane", "operator": "Exists", "effect": "NoSchedule"},
|
|
145
|
+
{"key": "node-role.kubernetes.io/master", "operator": "Exists", "effect": "NoSchedule"}
|
|
146
|
+
],
|
|
147
|
+
"containers": [
|
|
148
|
+
{
|
|
149
|
+
"name": "helper-pod",
|
|
150
|
+
"image": "busybox:stable",
|
|
151
|
+
"imagePullPolicy": "IfNotPresent"
|
|
152
|
+
}
|
|
153
|
+
]
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
# Converte para JSON string para o patch
|
|
159
|
+
config_json_str = json.dumps(helper_pod_config)
|
|
160
|
+
patch_data = json.dumps({"data": {"config.json": config_json_str}})
|
|
161
|
+
|
|
162
|
+
# Aplica via patch no ConfigMap
|
|
163
|
+
result = run_cmd(
|
|
164
|
+
[
|
|
165
|
+
"kubectl", "-n", "local-path-storage", "patch", "configmap",
|
|
166
|
+
"local-path-config", "--type=merge", "-p", patch_data,
|
|
167
|
+
],
|
|
168
|
+
ctx,
|
|
169
|
+
check=False,
|
|
170
|
+
)
|
|
171
|
+
if result.returncode == 0:
|
|
172
|
+
typer.secho(" ✓ ConfigMap patched para helper pods.", fg=typer.colors.GREEN)
|
|
173
|
+
|
|
174
|
+
# Reinicia o deployment para aplicar as mudanças
|
|
175
|
+
run_cmd(
|
|
176
|
+
["kubectl", "-n", "local-path-storage", "rollout", "restart", "deployment/local-path-provisioner"],
|
|
177
|
+
ctx,
|
|
178
|
+
check=False,
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
# Aguarda rollout
|
|
182
|
+
run_cmd(
|
|
183
|
+
[
|
|
184
|
+
"kubectl", "-n", "local-path-storage", "rollout", "status",
|
|
185
|
+
"deployment/local-path-provisioner", "--timeout=60s",
|
|
186
|
+
],
|
|
187
|
+
ctx,
|
|
188
|
+
check=False,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def _install_local_path_provisioner(ctx: ExecutionContext) -> bool:
|
|
193
|
+
"""Instala local-path-provisioner para usar storage local (NVMe/SSD)."""
|
|
194
|
+
typer.echo("Instalando local-path-provisioner para storage local...")
|
|
195
|
+
|
|
196
|
+
result = run_cmd(
|
|
197
|
+
["kubectl", "apply", "-f", LOCAL_PATH_PROVISIONER_URL],
|
|
198
|
+
ctx,
|
|
199
|
+
check=False,
|
|
200
|
+
)
|
|
201
|
+
if result.returncode != 0:
|
|
202
|
+
typer.secho(" Falha ao instalar local-path-provisioner.", fg=typer.colors.RED)
|
|
203
|
+
return False
|
|
204
|
+
|
|
205
|
+
# Aguarda deployment ficar pronto inicialmente
|
|
206
|
+
typer.echo(" Aguardando local-path-provisioner ficar Ready...")
|
|
207
|
+
run_cmd(
|
|
208
|
+
[
|
|
209
|
+
"kubectl", "-n", "local-path-storage", "rollout", "status",
|
|
210
|
+
"deployment/local-path-provisioner", "--timeout=60s",
|
|
211
|
+
],
|
|
212
|
+
ctx,
|
|
213
|
+
check=False,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
# Aplica tolerations para control-plane (single-node clusters)
|
|
217
|
+
_patch_local_path_provisioner_tolerations(ctx)
|
|
218
|
+
|
|
219
|
+
typer.secho(" ✓ local-path-provisioner instalado e configurado.", fg=typer.colors.GREEN)
|
|
220
|
+
return True
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def _set_default_storage_class(ctx: ExecutionContext, name: str) -> None:
|
|
224
|
+
"""Define uma StorageClass como default."""
|
|
225
|
+
# Remove default de outras classes primeiro
|
|
226
|
+
existing = _list_storage_classes(ctx)
|
|
227
|
+
for sc in existing:
|
|
228
|
+
if sc != name:
|
|
229
|
+
run_cmd(
|
|
230
|
+
[
|
|
231
|
+
"kubectl", "annotate", "storageclass", sc,
|
|
232
|
+
"storageclass.kubernetes.io/is-default-class-",
|
|
233
|
+
"--overwrite",
|
|
234
|
+
],
|
|
235
|
+
ctx,
|
|
236
|
+
check=False,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
# Define a nova como default
|
|
240
|
+
run_cmd(
|
|
241
|
+
[
|
|
242
|
+
"kubectl", "annotate", "storageclass", name,
|
|
243
|
+
"storageclass.kubernetes.io/is-default-class=true",
|
|
244
|
+
"--overwrite",
|
|
245
|
+
],
|
|
246
|
+
ctx,
|
|
247
|
+
check=True,
|
|
248
|
+
)
|
|
249
|
+
typer.secho(f" ✓ StorageClass '{name}' definida como default.", fg=typer.colors.GREEN)
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def _ensure_storage_class(ctx: ExecutionContext) -> str:
|
|
253
|
+
"""Garante que existe uma StorageClass disponivel, instalando local-path se necessario."""
|
|
254
|
+
if ctx.dry_run:
|
|
255
|
+
return "local-path" # Retorna um valor dummy para dry-run
|
|
256
|
+
|
|
257
|
+
default_sc = _get_default_storage_class(ctx)
|
|
258
|
+
available = _list_storage_classes(ctx)
|
|
259
|
+
|
|
260
|
+
# Se ja existir default (qualquer uma), usa ela
|
|
261
|
+
if default_sc:
|
|
262
|
+
typer.echo(f"StorageClass default detectada: {default_sc}")
|
|
263
|
+
# Se for local-path, garante que o provisioner tem tolerations
|
|
264
|
+
if default_sc == "local-path" or "local-path" in available:
|
|
265
|
+
_patch_local_path_provisioner_tolerations(ctx)
|
|
266
|
+
return default_sc
|
|
267
|
+
|
|
268
|
+
# Se local-path estiver disponivel mas nao for default, define como default
|
|
269
|
+
if "local-path" in available:
|
|
270
|
+
typer.echo("StorageClass 'local-path' detectada.")
|
|
271
|
+
_patch_local_path_provisioner_tolerations(ctx)
|
|
272
|
+
_set_default_storage_class(ctx, "local-path")
|
|
273
|
+
return "local-path"
|
|
274
|
+
|
|
275
|
+
# Se houver outras classes disponiveis, pergunta qual usar
|
|
276
|
+
if available:
|
|
277
|
+
typer.echo(f"StorageClasses disponiveis (sem default): {', '.join(available)}")
|
|
278
|
+
choice = typer.prompt(
|
|
279
|
+
f"Qual StorageClass usar? ({'/'.join(available)})",
|
|
280
|
+
default=available[0],
|
|
281
|
+
)
|
|
282
|
+
return choice
|
|
283
|
+
|
|
284
|
+
# Nenhuma StorageClass disponivel - instala local-path automaticamente
|
|
285
|
+
typer.secho(
|
|
286
|
+
"Nenhuma StorageClass encontrada no cluster.",
|
|
287
|
+
fg=typer.colors.YELLOW,
|
|
288
|
+
)
|
|
289
|
+
install = typer.confirm(
|
|
290
|
+
"Instalar local-path-provisioner para usar armazenamento local (NVMe/SSD)?",
|
|
291
|
+
default=True,
|
|
292
|
+
)
|
|
293
|
+
if not install:
|
|
294
|
+
typer.secho(
|
|
295
|
+
"Abortando: Prometheus com PVC requer uma StorageClass.",
|
|
296
|
+
fg=typer.colors.RED,
|
|
297
|
+
)
|
|
298
|
+
raise typer.Exit(1)
|
|
299
|
+
|
|
300
|
+
if not _install_local_path_provisioner(ctx):
|
|
301
|
+
raise typer.Exit(1)
|
|
302
|
+
|
|
303
|
+
_set_default_storage_class(ctx, "local-path")
|
|
304
|
+
return "local-path"
|
|
48
305
|
|
|
49
306
|
|
|
50
307
|
def _ensure_cluster_access(ctx: ExecutionContext) -> None:
|
|
@@ -165,11 +422,17 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
165
422
|
|
|
166
423
|
kubectl_create_ns(namespace, ctx)
|
|
167
424
|
|
|
425
|
+
# Verifica se existe StorageClass default para sugerir no prompt
|
|
168
426
|
default_sc = _get_default_storage_class(ctx)
|
|
427
|
+
|
|
169
428
|
enable_persistence = typer.confirm(
|
|
170
429
|
"Habilitar PVC para Prometheus e Alertmanager?", default=bool(default_sc)
|
|
171
430
|
)
|
|
172
431
|
|
|
432
|
+
# Se habilitou PVC, garante que existe StorageClass disponivel
|
|
433
|
+
if enable_persistence:
|
|
434
|
+
default_sc = _ensure_storage_class(ctx)
|
|
435
|
+
|
|
173
436
|
node_name = _detect_node_name(ctx)
|
|
174
437
|
|
|
175
438
|
values = [
|
|
@@ -216,7 +479,7 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
216
479
|
f"prometheusOperator.nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
217
480
|
]
|
|
218
481
|
|
|
219
|
-
extra_args = ["--wait", "--timeout", "
|
|
482
|
+
extra_args = ["--wait", "--timeout", "10m", "--atomic"]
|
|
220
483
|
|
|
221
484
|
chart_version = typer.prompt(
|
|
222
485
|
"Versao do chart (vazio para latest)",
|
raijin_server/modules/traefik.py
CHANGED
|
@@ -75,7 +75,25 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
75
75
|
_uninstall_traefik(ctx)
|
|
76
76
|
|
|
77
77
|
acme_email = typer.prompt("Email para ACME/Let's Encrypt", default="admin@example.com")
|
|
78
|
-
|
|
78
|
+
|
|
79
|
+
# Dashboard do Traefik deve ser acessado via VPN, não publicamente
|
|
80
|
+
enable_dashboard = typer.confirm(
|
|
81
|
+
"Habilitar dashboard público? (NÃO recomendado - use VPN + port-forward)",
|
|
82
|
+
default=False
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
dashboard_host = ""
|
|
86
|
+
if enable_dashboard:
|
|
87
|
+
typer.secho(
|
|
88
|
+
"\n⚠️ ATENÇÃO: Expor dashboard do Traefik publicamente é um risco de segurança!",
|
|
89
|
+
fg=typer.colors.YELLOW,
|
|
90
|
+
bold=True,
|
|
91
|
+
)
|
|
92
|
+
typer.secho(
|
|
93
|
+
"Recomendação: Acesse via VPN com port-forward.\n",
|
|
94
|
+
fg=typer.colors.YELLOW,
|
|
95
|
+
)
|
|
96
|
+
dashboard_host = typer.prompt("Host para dashboard", default="traefik.local")
|
|
79
97
|
|
|
80
98
|
node_name = _detect_node_name(ctx)
|
|
81
99
|
|
|
@@ -112,3 +130,19 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
112
130
|
ctx=ctx,
|
|
113
131
|
values=values,
|
|
114
132
|
)
|
|
133
|
+
|
|
134
|
+
typer.secho("\n✓ Traefik instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|
|
135
|
+
|
|
136
|
+
if enable_dashboard and dashboard_host:
|
|
137
|
+
typer.echo(f"\nDashboard público: https://{dashboard_host}/dashboard/")
|
|
138
|
+
else:
|
|
139
|
+
typer.secho("\n🔒 Acesso Seguro ao Dashboard via VPN:", fg=typer.colors.CYAN, bold=True)
|
|
140
|
+
typer.echo("\n1. Configure VPN (se ainda não tiver):")
|
|
141
|
+
typer.echo(" sudo raijin vpn")
|
|
142
|
+
typer.echo("\n2. Conecte via WireGuard")
|
|
143
|
+
typer.echo("\n3. Acesse via NodePort ou port-forward:")
|
|
144
|
+
typer.echo(" kubectl -n traefik port-forward deployment/traefik 9000:9000")
|
|
145
|
+
typer.echo("\n4. Abra no navegador:")
|
|
146
|
+
typer.echo(" http://localhost:9000/dashboard/")
|
|
147
|
+
typer.echo("\nOu via service direto (se LoadBalancer/NodePort):")
|
|
148
|
+
typer.echo(" kubectl -n traefik get svc traefik")
|