raijin-server 0.2.20__py3-none-any.whl → 0.2.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- raijin_server/__init__.py +1 -1
- raijin_server/modules/cert_manager.py +84 -1
- raijin_server/modules/grafana.py +138 -2
- raijin_server/modules/harness.py +127 -12
- raijin_server/modules/istio.py +149 -5
- raijin_server/modules/kafka.py +166 -12
- raijin_server/modules/kong.py +148 -4
- raijin_server/modules/loki.py +155 -9
- raijin_server/modules/minio.py +181 -4
- raijin_server/modules/prometheus.py +160 -1
- raijin_server/modules/secrets.py +189 -5
- raijin_server/modules/velero.py +141 -22
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/METADATA +1 -1
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/RECORD +18 -18
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/WHEEL +0 -0
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/entry_points.txt +0 -0
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/licenses/LICENSE +0 -0
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/top_level.txt +0 -0
raijin_server/modules/minio.py
CHANGED
|
@@ -1,13 +1,174 @@
|
|
|
1
|
-
"""Deploy do MinIO via Helm."""
|
|
1
|
+
"""Deploy do MinIO via Helm com configuracoes production-ready."""
|
|
2
|
+
|
|
3
|
+
import secrets
|
|
4
|
+
import socket
|
|
5
|
+
import time
|
|
2
6
|
|
|
3
7
|
import typer
|
|
4
8
|
|
|
5
|
-
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root
|
|
9
|
+
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
13
|
+
"""Detecta nome do node para nodeSelector."""
|
|
14
|
+
result = run_cmd(
|
|
15
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
16
|
+
ctx,
|
|
17
|
+
check=False,
|
|
18
|
+
)
|
|
19
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
20
|
+
return (result.stdout or "").strip()
|
|
21
|
+
return socket.gethostname()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _generate_secret(length: int = 32) -> str:
|
|
25
|
+
"""Gera secret aleatório seguro."""
|
|
26
|
+
return secrets.token_urlsafe(length)[:length]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _check_existing_minio(ctx: ExecutionContext) -> bool:
|
|
30
|
+
"""Verifica se existe instalacao do MinIO."""
|
|
31
|
+
result = run_cmd(
|
|
32
|
+
["helm", "status", "minio", "-n", "minio"],
|
|
33
|
+
ctx,
|
|
34
|
+
check=False,
|
|
35
|
+
)
|
|
36
|
+
return result.returncode == 0
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _uninstall_minio(ctx: ExecutionContext) -> None:
|
|
40
|
+
"""Remove instalacao anterior do MinIO."""
|
|
41
|
+
typer.echo("Removendo instalacao anterior do MinIO...")
|
|
42
|
+
|
|
43
|
+
run_cmd(
|
|
44
|
+
["helm", "uninstall", "minio", "-n", "minio"],
|
|
45
|
+
ctx,
|
|
46
|
+
check=False,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
# Pergunta se quer remover PVCs (dados)
|
|
50
|
+
remove_data = typer.confirm(
|
|
51
|
+
"Remover PVCs (dados persistentes)?",
|
|
52
|
+
default=False,
|
|
53
|
+
)
|
|
54
|
+
if remove_data:
|
|
55
|
+
run_cmd(
|
|
56
|
+
["kubectl", "-n", "minio", "delete", "pvc", "--all"],
|
|
57
|
+
ctx,
|
|
58
|
+
check=False,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
run_cmd(
|
|
62
|
+
["kubectl", "delete", "namespace", "minio", "--ignore-not-found"],
|
|
63
|
+
ctx,
|
|
64
|
+
check=False,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
time.sleep(5)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _wait_for_minio_ready(ctx: ExecutionContext, timeout: int = 180) -> bool:
|
|
71
|
+
"""Aguarda pods do MinIO ficarem Ready."""
|
|
72
|
+
typer.echo("Aguardando pods do MinIO ficarem Ready...")
|
|
73
|
+
deadline = time.time() + timeout
|
|
74
|
+
|
|
75
|
+
while time.time() < deadline:
|
|
76
|
+
result = run_cmd(
|
|
77
|
+
[
|
|
78
|
+
"kubectl", "-n", "minio", "get", "pods",
|
|
79
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
80
|
+
],
|
|
81
|
+
ctx,
|
|
82
|
+
check=False,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
if result.returncode == 0:
|
|
86
|
+
output = (result.stdout or "").strip()
|
|
87
|
+
if output:
|
|
88
|
+
pods = []
|
|
89
|
+
for item in output.split():
|
|
90
|
+
if "=" in item:
|
|
91
|
+
parts = item.rsplit("=", 1)
|
|
92
|
+
if len(parts) == 2:
|
|
93
|
+
pods.append((parts[0], parts[1]))
|
|
94
|
+
|
|
95
|
+
if pods and all(phase == "Running" for _, phase in pods):
|
|
96
|
+
typer.secho(f" Todos os {len(pods)} pods Running.", fg=typer.colors.GREEN)
|
|
97
|
+
return True
|
|
98
|
+
|
|
99
|
+
pending = [name for name, phase in pods if phase != "Running"]
|
|
100
|
+
if pending:
|
|
101
|
+
typer.echo(f" Aguardando: {', '.join(pending[:3])}...")
|
|
102
|
+
|
|
103
|
+
time.sleep(10)
|
|
104
|
+
|
|
105
|
+
typer.secho(" Timeout aguardando pods do MinIO.", fg=typer.colors.YELLOW)
|
|
106
|
+
return False
|
|
6
107
|
|
|
7
108
|
|
|
8
109
|
def run(ctx: ExecutionContext) -> None:
|
|
9
110
|
require_root(ctx)
|
|
10
|
-
typer.echo("Instalando MinIO via Helm
|
|
111
|
+
typer.echo("Instalando MinIO via Helm...")
|
|
112
|
+
|
|
113
|
+
# Prompt opcional de limpeza
|
|
114
|
+
if _check_existing_minio(ctx):
|
|
115
|
+
cleanup = typer.confirm(
|
|
116
|
+
"Instalacao anterior do MinIO detectada. Limpar antes de reinstalar?",
|
|
117
|
+
default=False,
|
|
118
|
+
)
|
|
119
|
+
if cleanup:
|
|
120
|
+
_uninstall_minio(ctx)
|
|
121
|
+
|
|
122
|
+
# Configuracoes interativas
|
|
123
|
+
mode = typer.prompt(
|
|
124
|
+
"Modo de operacao (standalone/distributed)",
|
|
125
|
+
default="standalone",
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
root_user = typer.prompt("Root user (admin)", default="minio-admin")
|
|
129
|
+
root_password = typer.prompt(
|
|
130
|
+
"Root password (deixe vazio para gerar)",
|
|
131
|
+
default="",
|
|
132
|
+
hide_input=True,
|
|
133
|
+
)
|
|
134
|
+
if not root_password:
|
|
135
|
+
root_password = _generate_secret(24)
|
|
136
|
+
typer.secho(f" Password gerado: {root_password}", fg=typer.colors.CYAN)
|
|
137
|
+
|
|
138
|
+
persistence_size = typer.prompt("Tamanho do storage (ex: 10Gi, 50Gi)", default="10Gi")
|
|
139
|
+
enable_console = typer.confirm("Habilitar Console Web?", default=True)
|
|
140
|
+
|
|
141
|
+
node_name = _detect_node_name(ctx)
|
|
142
|
+
|
|
143
|
+
values = [
|
|
144
|
+
f"mode={mode}",
|
|
145
|
+
f"rootUser={root_user}",
|
|
146
|
+
f"rootPassword={root_password}",
|
|
147
|
+
# Persistence
|
|
148
|
+
"persistence.enabled=true",
|
|
149
|
+
f"persistence.size={persistence_size}",
|
|
150
|
+
# Resources (production defaults)
|
|
151
|
+
"resources.requests.memory=512Mi",
|
|
152
|
+
"resources.requests.cpu=250m",
|
|
153
|
+
"resources.limits.memory=1Gi",
|
|
154
|
+
# Tolerations para control-plane
|
|
155
|
+
"tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
156
|
+
"tolerations[0].operator=Exists",
|
|
157
|
+
"tolerations[0].effect=NoSchedule",
|
|
158
|
+
"tolerations[1].key=node-role.kubernetes.io/master",
|
|
159
|
+
"tolerations[1].operator=Exists",
|
|
160
|
+
"tolerations[1].effect=NoSchedule",
|
|
161
|
+
# NodeSelector
|
|
162
|
+
f"nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
163
|
+
]
|
|
164
|
+
|
|
165
|
+
# Console
|
|
166
|
+
if enable_console:
|
|
167
|
+
values.extend([
|
|
168
|
+
"consoleService.type=ClusterIP",
|
|
169
|
+
"consoleIngress.enabled=false",
|
|
170
|
+
])
|
|
171
|
+
|
|
11
172
|
helm_upgrade_install(
|
|
12
173
|
release="minio",
|
|
13
174
|
chart="minio",
|
|
@@ -15,5 +176,21 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
15
176
|
repo="minio",
|
|
16
177
|
repo_url="https://charts.min.io/",
|
|
17
178
|
ctx=ctx,
|
|
18
|
-
values=
|
|
179
|
+
values=values,
|
|
19
180
|
)
|
|
181
|
+
|
|
182
|
+
# Aguarda pods ficarem prontos
|
|
183
|
+
if not ctx.dry_run:
|
|
184
|
+
_wait_for_minio_ready(ctx)
|
|
185
|
+
|
|
186
|
+
# Mostra informacoes uteis
|
|
187
|
+
typer.secho("\n✓ MinIO instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|
|
188
|
+
typer.echo("\nCredenciais:")
|
|
189
|
+
typer.echo(f" Root User: {root_user}")
|
|
190
|
+
typer.echo(f" Root Password: {root_password}")
|
|
191
|
+
typer.echo("\nPara acessar a API (port-forward):")
|
|
192
|
+
typer.echo(" kubectl -n minio port-forward svc/minio 9000:9000")
|
|
193
|
+
if enable_console:
|
|
194
|
+
typer.echo("\nPara acessar o Console Web (port-forward):")
|
|
195
|
+
typer.echo(" kubectl -n minio port-forward svc/minio-console 9001:9001")
|
|
196
|
+
typer.echo(" Acesse: http://localhost:9001")
|
|
@@ -2,6 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
+
import socket
|
|
6
|
+
import time
|
|
7
|
+
|
|
5
8
|
import typer
|
|
6
9
|
|
|
7
10
|
from raijin_server.utils import (
|
|
@@ -15,6 +18,18 @@ from raijin_server.utils import (
|
|
|
15
18
|
DEFAULT_NAMESPACE = "observability"
|
|
16
19
|
|
|
17
20
|
|
|
21
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
22
|
+
"""Detecta nome do node para nodeSelector."""
|
|
23
|
+
result = run_cmd(
|
|
24
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
25
|
+
ctx,
|
|
26
|
+
check=False,
|
|
27
|
+
)
|
|
28
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
29
|
+
return (result.stdout or "").strip()
|
|
30
|
+
return socket.gethostname()
|
|
31
|
+
|
|
32
|
+
|
|
18
33
|
def _get_default_storage_class(ctx: ExecutionContext) -> str:
|
|
19
34
|
if ctx.dry_run:
|
|
20
35
|
return ""
|
|
@@ -41,6 +56,96 @@ def _ensure_cluster_access(ctx: ExecutionContext) -> None:
|
|
|
41
56
|
raise typer.Exit(code=1)
|
|
42
57
|
|
|
43
58
|
|
|
59
|
+
def _check_existing_prometheus(ctx: ExecutionContext, namespace: str) -> bool:
|
|
60
|
+
"""Verifica se existe instalacao do Prometheus Stack."""
|
|
61
|
+
result = run_cmd(
|
|
62
|
+
["helm", "status", "kube-prometheus-stack", "-n", namespace],
|
|
63
|
+
ctx,
|
|
64
|
+
check=False,
|
|
65
|
+
)
|
|
66
|
+
return result.returncode == 0
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _uninstall_prometheus(ctx: ExecutionContext, namespace: str) -> None:
|
|
70
|
+
"""Remove instalacao anterior do Prometheus Stack."""
|
|
71
|
+
typer.echo("Removendo instalacao anterior do kube-prometheus-stack...")
|
|
72
|
+
|
|
73
|
+
run_cmd(
|
|
74
|
+
["helm", "uninstall", "kube-prometheus-stack", "-n", namespace],
|
|
75
|
+
ctx,
|
|
76
|
+
check=False,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Remove CRDs if requested
|
|
80
|
+
remove_crds = typer.confirm("Remover CRDs do Prometheus (pode afetar outros operadores)?", default=False)
|
|
81
|
+
if remove_crds:
|
|
82
|
+
crds = [
|
|
83
|
+
"alertmanagerconfigs.monitoring.coreos.com",
|
|
84
|
+
"alertmanagers.monitoring.coreos.com",
|
|
85
|
+
"podmonitors.monitoring.coreos.com",
|
|
86
|
+
"probes.monitoring.coreos.com",
|
|
87
|
+
"prometheusagents.monitoring.coreos.com",
|
|
88
|
+
"prometheuses.monitoring.coreos.com",
|
|
89
|
+
"prometheusrules.monitoring.coreos.com",
|
|
90
|
+
"scrapeconfigs.monitoring.coreos.com",
|
|
91
|
+
"servicemonitors.monitoring.coreos.com",
|
|
92
|
+
"thanosrulers.monitoring.coreos.com",
|
|
93
|
+
]
|
|
94
|
+
for crd in crds:
|
|
95
|
+
run_cmd(["kubectl", "delete", "crd", crd], ctx, check=False)
|
|
96
|
+
|
|
97
|
+
remove_data = typer.confirm("Remover PVCs (dados persistentes)?", default=False)
|
|
98
|
+
if remove_data:
|
|
99
|
+
run_cmd(
|
|
100
|
+
["kubectl", "-n", namespace, "delete", "pvc", "-l", "app.kubernetes.io/name=prometheus"],
|
|
101
|
+
ctx,
|
|
102
|
+
check=False,
|
|
103
|
+
)
|
|
104
|
+
run_cmd(
|
|
105
|
+
["kubectl", "-n", namespace, "delete", "pvc", "-l", "app.kubernetes.io/name=alertmanager"],
|
|
106
|
+
ctx,
|
|
107
|
+
check=False,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
time.sleep(5)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _wait_for_prometheus_ready(ctx: ExecutionContext, namespace: str, timeout: int = 300) -> bool:
|
|
114
|
+
"""Aguarda pods do Prometheus Stack ficarem Ready."""
|
|
115
|
+
typer.echo("Aguardando pods do Prometheus Stack ficarem Ready...")
|
|
116
|
+
deadline = time.time() + timeout
|
|
117
|
+
|
|
118
|
+
while time.time() < deadline:
|
|
119
|
+
result = run_cmd(
|
|
120
|
+
[
|
|
121
|
+
"kubectl", "-n", namespace, "get", "pods",
|
|
122
|
+
"-l", "app.kubernetes.io/instance=kube-prometheus-stack",
|
|
123
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
124
|
+
],
|
|
125
|
+
ctx,
|
|
126
|
+
check=False,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
if result.returncode == 0:
|
|
130
|
+
output = (result.stdout or "").strip()
|
|
131
|
+
if output:
|
|
132
|
+
pods = []
|
|
133
|
+
for item in output.split():
|
|
134
|
+
if "=" in item:
|
|
135
|
+
parts = item.rsplit("=", 1)
|
|
136
|
+
if len(parts) == 2:
|
|
137
|
+
pods.append((parts[0], parts[1]))
|
|
138
|
+
|
|
139
|
+
if pods and all(phase == "Running" for _, phase in pods):
|
|
140
|
+
typer.secho(" Prometheus Stack Ready.", fg=typer.colors.GREEN)
|
|
141
|
+
return True
|
|
142
|
+
|
|
143
|
+
time.sleep(10)
|
|
144
|
+
|
|
145
|
+
typer.secho(" Timeout aguardando Prometheus Stack.", fg=typer.colors.YELLOW)
|
|
146
|
+
return False
|
|
147
|
+
|
|
148
|
+
|
|
44
149
|
def run(ctx: ExecutionContext) -> None:
|
|
45
150
|
require_root(ctx)
|
|
46
151
|
_ensure_cluster_access(ctx)
|
|
@@ -48,6 +153,16 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
48
153
|
typer.echo("Instalando kube-prometheus-stack via Helm...")
|
|
49
154
|
|
|
50
155
|
namespace = typer.prompt("Namespace destino", default=DEFAULT_NAMESPACE)
|
|
156
|
+
|
|
157
|
+
# Prompt opcional de limpeza
|
|
158
|
+
if _check_existing_prometheus(ctx, namespace):
|
|
159
|
+
cleanup = typer.confirm(
|
|
160
|
+
"Instalacao anterior do Prometheus Stack detectada. Limpar antes de reinstalar?",
|
|
161
|
+
default=False,
|
|
162
|
+
)
|
|
163
|
+
if cleanup:
|
|
164
|
+
_uninstall_prometheus(ctx, namespace)
|
|
165
|
+
|
|
51
166
|
kubectl_create_ns(namespace, ctx)
|
|
52
167
|
|
|
53
168
|
default_sc = _get_default_storage_class(ctx)
|
|
@@ -55,6 +170,8 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
55
170
|
"Habilitar PVC para Prometheus e Alertmanager?", default=bool(default_sc)
|
|
56
171
|
)
|
|
57
172
|
|
|
173
|
+
node_name = _detect_node_name(ctx)
|
|
174
|
+
|
|
58
175
|
values = [
|
|
59
176
|
"grafana.enabled=false",
|
|
60
177
|
"prometheus.prometheusSpec.retention=15d",
|
|
@@ -62,6 +179,41 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
62
179
|
"prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false",
|
|
63
180
|
"prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false",
|
|
64
181
|
"defaultRules.create=true",
|
|
182
|
+
# Tolerations for control-plane nodes
|
|
183
|
+
"prometheus.prometheusSpec.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
184
|
+
"prometheus.prometheusSpec.tolerations[0].operator=Exists",
|
|
185
|
+
"prometheus.prometheusSpec.tolerations[0].effect=NoSchedule",
|
|
186
|
+
"prometheus.prometheusSpec.tolerations[1].key=node-role.kubernetes.io/master",
|
|
187
|
+
"prometheus.prometheusSpec.tolerations[1].operator=Exists",
|
|
188
|
+
"prometheus.prometheusSpec.tolerations[1].effect=NoSchedule",
|
|
189
|
+
"alertmanager.alertmanagerSpec.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
190
|
+
"alertmanager.alertmanagerSpec.tolerations[0].operator=Exists",
|
|
191
|
+
"alertmanager.alertmanagerSpec.tolerations[0].effect=NoSchedule",
|
|
192
|
+
"alertmanager.alertmanagerSpec.tolerations[1].key=node-role.kubernetes.io/master",
|
|
193
|
+
"alertmanager.alertmanagerSpec.tolerations[1].operator=Exists",
|
|
194
|
+
"alertmanager.alertmanagerSpec.tolerations[1].effect=NoSchedule",
|
|
195
|
+
"prometheusOperator.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
196
|
+
"prometheusOperator.tolerations[0].operator=Exists",
|
|
197
|
+
"prometheusOperator.tolerations[0].effect=NoSchedule",
|
|
198
|
+
"prometheusOperator.tolerations[1].key=node-role.kubernetes.io/master",
|
|
199
|
+
"prometheusOperator.tolerations[1].operator=Exists",
|
|
200
|
+
"prometheusOperator.tolerations[1].effect=NoSchedule",
|
|
201
|
+
"kube-state-metrics.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
202
|
+
"kube-state-metrics.tolerations[0].operator=Exists",
|
|
203
|
+
"kube-state-metrics.tolerations[0].effect=NoSchedule",
|
|
204
|
+
"kube-state-metrics.tolerations[1].key=node-role.kubernetes.io/master",
|
|
205
|
+
"kube-state-metrics.tolerations[1].operator=Exists",
|
|
206
|
+
"kube-state-metrics.tolerations[1].effect=NoSchedule",
|
|
207
|
+
"prometheus-node-exporter.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
208
|
+
"prometheus-node-exporter.tolerations[0].operator=Exists",
|
|
209
|
+
"prometheus-node-exporter.tolerations[0].effect=NoSchedule",
|
|
210
|
+
"prometheus-node-exporter.tolerations[1].key=node-role.kubernetes.io/master",
|
|
211
|
+
"prometheus-node-exporter.tolerations[1].operator=Exists",
|
|
212
|
+
"prometheus-node-exporter.tolerations[1].effect=NoSchedule",
|
|
213
|
+
# NodeSelector
|
|
214
|
+
f"prometheus.prometheusSpec.nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
215
|
+
f"alertmanager.alertmanagerSpec.nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
216
|
+
f"prometheusOperator.nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
65
217
|
]
|
|
66
218
|
|
|
67
219
|
extra_args = ["--wait", "--timeout", "5m", "--atomic"]
|
|
@@ -112,4 +264,11 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
112
264
|
extra_args=extra_args,
|
|
113
265
|
)
|
|
114
266
|
|
|
115
|
-
|
|
267
|
+
if not ctx.dry_run:
|
|
268
|
+
_wait_for_prometheus_ready(ctx, namespace)
|
|
269
|
+
|
|
270
|
+
typer.secho("\n✓ kube-prometheus-stack instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|
|
271
|
+
typer.echo("\nPara acessar Prometheus via port-forward:")
|
|
272
|
+
typer.echo(f" kubectl -n {namespace} port-forward svc/kube-prometheus-stack-prometheus 9090:9090")
|
|
273
|
+
typer.echo("\nPara acessar Alertmanager via port-forward:")
|
|
274
|
+
typer.echo(f" kubectl -n {namespace} port-forward svc/kube-prometheus-stack-alertmanager 9093:9093")
|
raijin_server/modules/secrets.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
|
-
"""Automacao de sealed-secrets e external-secrets via Helm.
|
|
1
|
+
"""Automacao de sealed-secrets e external-secrets via Helm (production-ready).
|
|
2
2
|
|
|
3
3
|
Instala os controladores necessários para criptografar e consumir segredos
|
|
4
4
|
em clusters Kubernetes. Inclui opcionalmente a exportacao do certificado
|
|
5
5
|
publico do sealed-secrets para permitir geracao de manifests lacrados.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import socket
|
|
9
|
+
import time
|
|
8
10
|
from pathlib import Path
|
|
9
11
|
|
|
10
12
|
import typer
|
|
@@ -15,12 +17,107 @@ from raijin_server.utils import (
|
|
|
15
17
|
helm_upgrade_install,
|
|
16
18
|
require_root,
|
|
17
19
|
run_cmd,
|
|
20
|
+
write_file,
|
|
18
21
|
)
|
|
19
22
|
|
|
20
23
|
SEALED_NAMESPACE = "kube-system"
|
|
21
24
|
ESO_NAMESPACE = "external-secrets"
|
|
22
25
|
|
|
23
26
|
|
|
27
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
28
|
+
"""Detecta nome do node para nodeSelector."""
|
|
29
|
+
result = run_cmd(
|
|
30
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
31
|
+
ctx,
|
|
32
|
+
check=False,
|
|
33
|
+
)
|
|
34
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
35
|
+
return (result.stdout or "").strip()
|
|
36
|
+
return socket.gethostname()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _check_existing_sealed_secrets(ctx: ExecutionContext, namespace: str) -> bool:
|
|
40
|
+
"""Verifica se existe instalacao do Sealed Secrets."""
|
|
41
|
+
result = run_cmd(
|
|
42
|
+
["helm", "status", "sealed-secrets", "-n", namespace],
|
|
43
|
+
ctx,
|
|
44
|
+
check=False,
|
|
45
|
+
)
|
|
46
|
+
return result.returncode == 0
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _check_existing_external_secrets(ctx: ExecutionContext, namespace: str) -> bool:
|
|
50
|
+
"""Verifica se existe instalacao do External Secrets."""
|
|
51
|
+
result = run_cmd(
|
|
52
|
+
["helm", "status", "external-secrets", "-n", namespace],
|
|
53
|
+
ctx,
|
|
54
|
+
check=False,
|
|
55
|
+
)
|
|
56
|
+
return result.returncode == 0
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _uninstall_sealed_secrets(ctx: ExecutionContext, namespace: str) -> None:
|
|
60
|
+
"""Remove instalacao anterior do Sealed Secrets."""
|
|
61
|
+
typer.echo("Removendo instalacao anterior do Sealed Secrets...")
|
|
62
|
+
|
|
63
|
+
run_cmd(
|
|
64
|
+
["helm", "uninstall", "sealed-secrets", "-n", namespace],
|
|
65
|
+
ctx,
|
|
66
|
+
check=False,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
time.sleep(5)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _uninstall_external_secrets(ctx: ExecutionContext, namespace: str) -> None:
|
|
73
|
+
"""Remove instalacao anterior do External Secrets."""
|
|
74
|
+
typer.echo("Removendo instalacao anterior do External Secrets...")
|
|
75
|
+
|
|
76
|
+
run_cmd(
|
|
77
|
+
["helm", "uninstall", "external-secrets", "-n", namespace],
|
|
78
|
+
ctx,
|
|
79
|
+
check=False,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
time.sleep(5)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _wait_for_sealed_secrets_ready(ctx: ExecutionContext, namespace: str, timeout: int = 120) -> bool:
|
|
86
|
+
"""Aguarda pods do Sealed Secrets ficarem Ready."""
|
|
87
|
+
typer.echo("Aguardando pods do Sealed Secrets ficarem Ready...")
|
|
88
|
+
deadline = time.time() + timeout
|
|
89
|
+
|
|
90
|
+
while time.time() < deadline:
|
|
91
|
+
result = run_cmd(
|
|
92
|
+
[
|
|
93
|
+
"kubectl", "-n", namespace, "get", "pods",
|
|
94
|
+
"-l", "app.kubernetes.io/name=sealed-secrets",
|
|
95
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
96
|
+
],
|
|
97
|
+
ctx,
|
|
98
|
+
check=False,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
if result.returncode == 0:
|
|
102
|
+
output = (result.stdout or "").strip()
|
|
103
|
+
if output:
|
|
104
|
+
pods = []
|
|
105
|
+
for item in output.split():
|
|
106
|
+
if "=" in item:
|
|
107
|
+
parts = item.rsplit("=", 1)
|
|
108
|
+
if len(parts) == 2:
|
|
109
|
+
pods.append((parts[0], parts[1]))
|
|
110
|
+
|
|
111
|
+
if pods and all(phase == "Running" for _, phase in pods):
|
|
112
|
+
typer.secho(" Sealed Secrets Ready.", fg=typer.colors.GREEN)
|
|
113
|
+
return True
|
|
114
|
+
|
|
115
|
+
time.sleep(5)
|
|
116
|
+
|
|
117
|
+
typer.secho(" Timeout aguardando Sealed Secrets.", fg=typer.colors.YELLOW)
|
|
118
|
+
return False
|
|
119
|
+
|
|
120
|
+
|
|
24
121
|
def _export_sealed_cert(namespace: str, ctx: ExecutionContext) -> None:
|
|
25
122
|
"""Exporta o certificado publico do sealed-secrets para um caminho local."""
|
|
26
123
|
|
|
@@ -67,8 +164,40 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
67
164
|
sealed_ns = typer.prompt("Namespace para sealed-secrets", default=SEALED_NAMESPACE)
|
|
68
165
|
eso_ns = typer.prompt("Namespace para external-secrets", default=ESO_NAMESPACE)
|
|
69
166
|
|
|
167
|
+
node_name = _detect_node_name(ctx)
|
|
168
|
+
|
|
70
169
|
# sealed-secrets
|
|
71
170
|
typer.secho("\n== Sealed Secrets ==", fg=typer.colors.CYAN, bold=True)
|
|
171
|
+
|
|
172
|
+
# Prompt opcional de limpeza
|
|
173
|
+
if _check_existing_sealed_secrets(ctx, sealed_ns):
|
|
174
|
+
cleanup = typer.confirm(
|
|
175
|
+
"Instalacao anterior do Sealed Secrets detectada. Limpar antes de reinstalar?",
|
|
176
|
+
default=False,
|
|
177
|
+
)
|
|
178
|
+
if cleanup:
|
|
179
|
+
_uninstall_sealed_secrets(ctx, sealed_ns)
|
|
180
|
+
|
|
181
|
+
sealed_values_yaml = f"""tolerations:
|
|
182
|
+
- key: node-role.kubernetes.io/control-plane
|
|
183
|
+
operator: Exists
|
|
184
|
+
effect: NoSchedule
|
|
185
|
+
- key: node-role.kubernetes.io/master
|
|
186
|
+
operator: Exists
|
|
187
|
+
effect: NoSchedule
|
|
188
|
+
nodeSelector:
|
|
189
|
+
kubernetes.io/hostname: {node_name}
|
|
190
|
+
resources:
|
|
191
|
+
requests:
|
|
192
|
+
memory: 64Mi
|
|
193
|
+
cpu: 50m
|
|
194
|
+
limits:
|
|
195
|
+
memory: 128Mi
|
|
196
|
+
"""
|
|
197
|
+
|
|
198
|
+
sealed_values_path = Path("/tmp/raijin-sealed-secrets-values.yaml")
|
|
199
|
+
write_file(sealed_values_path, sealed_values_yaml, ctx)
|
|
200
|
+
|
|
72
201
|
helm_upgrade_install(
|
|
73
202
|
"sealed-secrets",
|
|
74
203
|
"sealed-secrets",
|
|
@@ -77,8 +206,12 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
77
206
|
repo="bitnami-labs",
|
|
78
207
|
repo_url="https://bitnami-labs.github.io/sealed-secrets",
|
|
79
208
|
create_namespace=True,
|
|
209
|
+
extra_args=["-f", str(sealed_values_path)],
|
|
80
210
|
)
|
|
81
211
|
|
|
212
|
+
if not ctx.dry_run:
|
|
213
|
+
_wait_for_sealed_secrets_ready(ctx, sealed_ns)
|
|
214
|
+
|
|
82
215
|
typer.echo(
|
|
83
216
|
"Para criar sealed-secrets a partir do seu desktop, exporte o certificado publico e use kubeseal."
|
|
84
217
|
)
|
|
@@ -87,7 +220,57 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
87
220
|
|
|
88
221
|
# external-secrets
|
|
89
222
|
typer.secho("\n== External Secrets Operator ==", fg=typer.colors.CYAN, bold=True)
|
|
90
|
-
|
|
223
|
+
|
|
224
|
+
# Prompt opcional de limpeza
|
|
225
|
+
if _check_existing_external_secrets(ctx, eso_ns):
|
|
226
|
+
cleanup = typer.confirm(
|
|
227
|
+
"Instalacao anterior do External Secrets detectada. Limpar antes de reinstalar?",
|
|
228
|
+
default=False,
|
|
229
|
+
)
|
|
230
|
+
if cleanup:
|
|
231
|
+
_uninstall_external_secrets(ctx, eso_ns)
|
|
232
|
+
|
|
233
|
+
eso_values_yaml = f"""installCRDs: true
|
|
234
|
+
tolerations:
|
|
235
|
+
- key: node-role.kubernetes.io/control-plane
|
|
236
|
+
operator: Exists
|
|
237
|
+
effect: NoSchedule
|
|
238
|
+
- key: node-role.kubernetes.io/master
|
|
239
|
+
operator: Exists
|
|
240
|
+
effect: NoSchedule
|
|
241
|
+
nodeSelector:
|
|
242
|
+
kubernetes.io/hostname: {node_name}
|
|
243
|
+
webhook:
|
|
244
|
+
tolerations:
|
|
245
|
+
- key: node-role.kubernetes.io/control-plane
|
|
246
|
+
operator: Exists
|
|
247
|
+
effect: NoSchedule
|
|
248
|
+
- key: node-role.kubernetes.io/master
|
|
249
|
+
operator: Exists
|
|
250
|
+
effect: NoSchedule
|
|
251
|
+
nodeSelector:
|
|
252
|
+
kubernetes.io/hostname: {node_name}
|
|
253
|
+
certController:
|
|
254
|
+
tolerations:
|
|
255
|
+
- key: node-role.kubernetes.io/control-plane
|
|
256
|
+
operator: Exists
|
|
257
|
+
effect: NoSchedule
|
|
258
|
+
- key: node-role.kubernetes.io/master
|
|
259
|
+
operator: Exists
|
|
260
|
+
effect: NoSchedule
|
|
261
|
+
nodeSelector:
|
|
262
|
+
kubernetes.io/hostname: {node_name}
|
|
263
|
+
resources:
|
|
264
|
+
requests:
|
|
265
|
+
memory: 64Mi
|
|
266
|
+
cpu: 50m
|
|
267
|
+
limits:
|
|
268
|
+
memory: 128Mi
|
|
269
|
+
"""
|
|
270
|
+
|
|
271
|
+
eso_values_path = Path("/tmp/raijin-external-secrets-values.yaml")
|
|
272
|
+
write_file(eso_values_path, eso_values_yaml, ctx)
|
|
273
|
+
|
|
91
274
|
helm_upgrade_install(
|
|
92
275
|
"external-secrets",
|
|
93
276
|
"external-secrets",
|
|
@@ -96,14 +279,15 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
96
279
|
repo="external-secrets",
|
|
97
280
|
repo_url="https://charts.external-secrets.io",
|
|
98
281
|
create_namespace=True,
|
|
99
|
-
extra_args=
|
|
282
|
+
extra_args=["-f", str(eso_values_path)],
|
|
100
283
|
)
|
|
101
284
|
|
|
285
|
+
typer.secho("\n✓ Secrets management instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|
|
102
286
|
typer.echo(
|
|
103
|
-
"
|
|
287
|
+
"\nExternal Secrets Operator instalado. Configure um SecretStore/ClusterSecretStore conforme seu provedor (AWS/GCP/Vault)."
|
|
104
288
|
)
|
|
105
289
|
|
|
106
290
|
typer.secho("\nDicas rapidas:", fg=typer.colors.GREEN)
|
|
107
|
-
typer.echo("- Gere sealed-secrets localmente: kubeseal --controller-namespace
|
|
291
|
+
typer.echo(f"- Gere sealed-secrets localmente: kubeseal --controller-namespace {sealed_ns} --controller-name sealed-secrets < secret.yaml > sealed.yaml")
|
|
108
292
|
typer.echo("- Para ESO: crie um SecretStore apontando para seu backend e um ExternalSecret referenciando os keys.")
|
|
109
293
|
|