raijin-server 0.2.20__py3-none-any.whl → 0.2.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- raijin_server/__init__.py +1 -1
- raijin_server/modules/cert_manager.py +84 -1
- raijin_server/modules/grafana.py +138 -2
- raijin_server/modules/harness.py +127 -12
- raijin_server/modules/istio.py +149 -5
- raijin_server/modules/kafka.py +166 -12
- raijin_server/modules/kong.py +148 -4
- raijin_server/modules/loki.py +155 -9
- raijin_server/modules/minio.py +181 -4
- raijin_server/modules/prometheus.py +160 -1
- raijin_server/modules/secrets.py +189 -5
- raijin_server/modules/velero.py +141 -22
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/METADATA +1 -1
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/RECORD +18 -18
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/WHEEL +0 -0
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/entry_points.txt +0 -0
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/licenses/LICENSE +0 -0
- {raijin_server-0.2.20.dist-info → raijin_server-0.2.22.dist-info}/top_level.txt +0 -0
raijin_server/modules/kafka.py
CHANGED
|
@@ -1,28 +1,170 @@
|
|
|
1
|
-
"""Deploy do Apache Kafka via Helm (Bitnami OCI)."""
|
|
1
|
+
"""Deploy do Apache Kafka via Helm (Bitnami OCI) - production-ready."""
|
|
2
|
+
|
|
3
|
+
import socket
|
|
4
|
+
import time
|
|
5
|
+
from pathlib import Path
|
|
2
6
|
|
|
3
7
|
import typer
|
|
4
8
|
|
|
5
|
-
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root
|
|
9
|
+
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd, write_file
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
13
|
+
"""Detecta nome do node para nodeSelector."""
|
|
14
|
+
result = run_cmd(
|
|
15
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
16
|
+
ctx,
|
|
17
|
+
check=False,
|
|
18
|
+
)
|
|
19
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
20
|
+
return (result.stdout or "").strip()
|
|
21
|
+
return socket.gethostname()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _check_existing_kafka(ctx: ExecutionContext) -> bool:
|
|
25
|
+
"""Verifica se existe instalacao do Kafka."""
|
|
26
|
+
result = run_cmd(
|
|
27
|
+
["helm", "status", "kafka", "-n", "kafka"],
|
|
28
|
+
ctx,
|
|
29
|
+
check=False,
|
|
30
|
+
)
|
|
31
|
+
return result.returncode == 0
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _uninstall_kafka(ctx: ExecutionContext) -> None:
|
|
35
|
+
"""Remove instalacao anterior do Kafka."""
|
|
36
|
+
typer.echo("Removendo instalacao anterior do Kafka...")
|
|
37
|
+
|
|
38
|
+
run_cmd(
|
|
39
|
+
["helm", "uninstall", "kafka", "-n", "kafka"],
|
|
40
|
+
ctx,
|
|
41
|
+
check=False,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
remove_data = typer.confirm("Remover PVCs (dados persistentes)?", default=False)
|
|
45
|
+
if remove_data:
|
|
46
|
+
run_cmd(
|
|
47
|
+
["kubectl", "-n", "kafka", "delete", "pvc", "-l", "app.kubernetes.io/name=kafka"],
|
|
48
|
+
ctx,
|
|
49
|
+
check=False,
|
|
50
|
+
)
|
|
51
|
+
run_cmd(
|
|
52
|
+
["kubectl", "-n", "kafka", "delete", "pvc", "-l", "app.kubernetes.io/name=zookeeper"],
|
|
53
|
+
ctx,
|
|
54
|
+
check=False,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
time.sleep(5)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _wait_for_kafka_ready(ctx: ExecutionContext, timeout: int = 300) -> bool:
|
|
61
|
+
"""Aguarda pods do Kafka ficarem Ready."""
|
|
62
|
+
typer.echo("Aguardando pods do Kafka ficarem Ready...")
|
|
63
|
+
deadline = time.time() + timeout
|
|
64
|
+
|
|
65
|
+
while time.time() < deadline:
|
|
66
|
+
result = run_cmd(
|
|
67
|
+
[
|
|
68
|
+
"kubectl", "-n", "kafka", "get", "pods",
|
|
69
|
+
"-l", "app.kubernetes.io/name=kafka",
|
|
70
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
71
|
+
],
|
|
72
|
+
ctx,
|
|
73
|
+
check=False,
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
if result.returncode == 0:
|
|
77
|
+
output = (result.stdout or "").strip()
|
|
78
|
+
if output:
|
|
79
|
+
pods = []
|
|
80
|
+
for item in output.split():
|
|
81
|
+
if "=" in item:
|
|
82
|
+
parts = item.rsplit("=", 1)
|
|
83
|
+
if len(parts) == 2:
|
|
84
|
+
pods.append((parts[0], parts[1]))
|
|
85
|
+
|
|
86
|
+
if pods and all(phase == "Running" for _, phase in pods):
|
|
87
|
+
typer.secho(" Kafka Ready.", fg=typer.colors.GREEN)
|
|
88
|
+
return True
|
|
89
|
+
|
|
90
|
+
time.sleep(10)
|
|
91
|
+
|
|
92
|
+
typer.secho(" Timeout aguardando Kafka.", fg=typer.colors.YELLOW)
|
|
93
|
+
return False
|
|
6
94
|
|
|
7
95
|
|
|
8
96
|
def run(ctx: ExecutionContext) -> None:
|
|
9
97
|
require_root(ctx)
|
|
10
98
|
typer.echo("Instalando Kafka (Bitnami) via Helm OCI...")
|
|
11
99
|
|
|
12
|
-
|
|
100
|
+
# Prompt opcional de limpeza
|
|
101
|
+
if _check_existing_kafka(ctx):
|
|
102
|
+
cleanup = typer.confirm(
|
|
103
|
+
"Instalacao anterior do Kafka detectada. Limpar antes de reinstalar?",
|
|
104
|
+
default=False,
|
|
105
|
+
)
|
|
106
|
+
if cleanup:
|
|
107
|
+
_uninstall_kafka(ctx)
|
|
108
|
+
|
|
109
|
+
replicas = typer.prompt("Numero de brokers Kafka", default="1")
|
|
110
|
+
zk_replicas = typer.prompt("Numero de replicas Zookeeper", default="1")
|
|
13
111
|
disk_size = typer.prompt("Storage por broker", default="20Gi")
|
|
112
|
+
zk_disk_size = typer.prompt("Storage por Zookeeper", default="10Gi")
|
|
113
|
+
|
|
114
|
+
node_name = _detect_node_name(ctx)
|
|
115
|
+
|
|
116
|
+
values_yaml = f"""replicaCount: {replicas}
|
|
117
|
+
zookeeper:
|
|
118
|
+
enabled: true
|
|
119
|
+
replicaCount: {zk_replicas}
|
|
120
|
+
persistence:
|
|
121
|
+
enabled: true
|
|
122
|
+
size: {zk_disk_size}
|
|
123
|
+
tolerations:
|
|
124
|
+
- key: node-role.kubernetes.io/control-plane
|
|
125
|
+
operator: Exists
|
|
126
|
+
effect: NoSchedule
|
|
127
|
+
- key: node-role.kubernetes.io/master
|
|
128
|
+
operator: Exists
|
|
129
|
+
effect: NoSchedule
|
|
130
|
+
nodeSelector:
|
|
131
|
+
kubernetes.io/hostname: {node_name}
|
|
132
|
+
persistence:
|
|
133
|
+
enabled: true
|
|
134
|
+
size: {disk_size}
|
|
135
|
+
tolerations:
|
|
136
|
+
- key: node-role.kubernetes.io/control-plane
|
|
137
|
+
operator: Exists
|
|
138
|
+
effect: NoSchedule
|
|
139
|
+
- key: node-role.kubernetes.io/master
|
|
140
|
+
operator: Exists
|
|
141
|
+
effect: NoSchedule
|
|
142
|
+
nodeSelector:
|
|
143
|
+
kubernetes.io/hostname: {node_name}
|
|
144
|
+
metrics:
|
|
145
|
+
kafka:
|
|
146
|
+
enabled: true
|
|
147
|
+
jmx:
|
|
148
|
+
enabled: true
|
|
149
|
+
serviceMonitor:
|
|
150
|
+
enabled: true
|
|
151
|
+
namespace: kafka
|
|
152
|
+
resources:
|
|
153
|
+
requests:
|
|
154
|
+
memory: 512Mi
|
|
155
|
+
cpu: 250m
|
|
156
|
+
limits:
|
|
157
|
+
memory: 2Gi
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
values_path = Path("/tmp/raijin-kafka-values.yaml")
|
|
161
|
+
write_file(values_path, values_yaml, ctx)
|
|
162
|
+
|
|
163
|
+
run_cmd(["kubectl", "create", "namespace", "kafka"], ctx, check=False)
|
|
14
164
|
|
|
15
165
|
# Bitnami charts migraram para OCI. Usamos a referencia OCI diretamente.
|
|
16
166
|
chart_ref = "oci://registry-1.docker.io/bitnamicharts/kafka"
|
|
17
167
|
|
|
18
|
-
values = [
|
|
19
|
-
f"replicaCount={replicas}",
|
|
20
|
-
"zookeeper.enabled=true",
|
|
21
|
-
f"persistence.size={disk_size}",
|
|
22
|
-
"metrics.kafka.enabled=true",
|
|
23
|
-
"metrics.jmx.enabled=true",
|
|
24
|
-
]
|
|
25
|
-
|
|
26
168
|
helm_upgrade_install(
|
|
27
169
|
release="kafka",
|
|
28
170
|
chart=chart_ref,
|
|
@@ -30,5 +172,17 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
30
172
|
repo=None,
|
|
31
173
|
repo_url=None,
|
|
32
174
|
ctx=ctx,
|
|
33
|
-
values=
|
|
175
|
+
values=[],
|
|
176
|
+
extra_args=["-f", str(values_path)],
|
|
34
177
|
)
|
|
178
|
+
|
|
179
|
+
if not ctx.dry_run:
|
|
180
|
+
_wait_for_kafka_ready(ctx)
|
|
181
|
+
|
|
182
|
+
typer.secho("\n✓ Kafka instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|
|
183
|
+
typer.echo("\nPara conectar ao Kafka de dentro do cluster:")
|
|
184
|
+
typer.echo(" kafka.kafka.svc.cluster.local:9092")
|
|
185
|
+
typer.echo("\nPara port-forward local:")
|
|
186
|
+
typer.echo(" kubectl -n kafka port-forward svc/kafka 9092:9092")
|
|
187
|
+
typer.echo("\nPara criar um topic de teste:")
|
|
188
|
+
typer.echo(" kubectl -n kafka exec -it kafka-0 -- kafka-topics.sh --create --topic test --bootstrap-server localhost:9092")
|
raijin_server/modules/kong.py
CHANGED
|
@@ -1,13 +1,145 @@
|
|
|
1
|
-
"""Configuracao do Kong via Helm."""
|
|
1
|
+
"""Configuracao do Kong Gateway via Helm com configuracoes production-ready."""
|
|
2
|
+
|
|
3
|
+
import socket
|
|
4
|
+
import time
|
|
2
5
|
|
|
3
6
|
import typer
|
|
4
7
|
|
|
5
|
-
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root
|
|
8
|
+
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
12
|
+
"""Detecta nome do node para nodeSelector."""
|
|
13
|
+
result = run_cmd(
|
|
14
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
15
|
+
ctx,
|
|
16
|
+
check=False,
|
|
17
|
+
)
|
|
18
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
19
|
+
return (result.stdout or "").strip()
|
|
20
|
+
return socket.gethostname()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _check_existing_kong(ctx: ExecutionContext) -> bool:
|
|
24
|
+
"""Verifica se existe instalacao do Kong."""
|
|
25
|
+
result = run_cmd(
|
|
26
|
+
["helm", "status", "kong", "-n", "kong"],
|
|
27
|
+
ctx,
|
|
28
|
+
check=False,
|
|
29
|
+
)
|
|
30
|
+
return result.returncode == 0
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _uninstall_kong(ctx: ExecutionContext) -> None:
|
|
34
|
+
"""Remove instalacao anterior do Kong."""
|
|
35
|
+
typer.echo("Removendo instalacao anterior do Kong...")
|
|
36
|
+
|
|
37
|
+
run_cmd(
|
|
38
|
+
["helm", "uninstall", "kong", "-n", "kong"],
|
|
39
|
+
ctx,
|
|
40
|
+
check=False,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
run_cmd(
|
|
44
|
+
["kubectl", "delete", "namespace", "kong", "--ignore-not-found"],
|
|
45
|
+
ctx,
|
|
46
|
+
check=False,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
time.sleep(5)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _wait_for_kong_ready(ctx: ExecutionContext, timeout: int = 180) -> bool:
|
|
53
|
+
"""Aguarda pods do Kong ficarem Ready."""
|
|
54
|
+
typer.echo("Aguardando pods do Kong ficarem Ready...")
|
|
55
|
+
deadline = time.time() + timeout
|
|
56
|
+
|
|
57
|
+
while time.time() < deadline:
|
|
58
|
+
result = run_cmd(
|
|
59
|
+
[
|
|
60
|
+
"kubectl", "-n", "kong", "get", "pods",
|
|
61
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
62
|
+
],
|
|
63
|
+
ctx,
|
|
64
|
+
check=False,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
if result.returncode == 0:
|
|
68
|
+
output = (result.stdout or "").strip()
|
|
69
|
+
if output:
|
|
70
|
+
pods = []
|
|
71
|
+
for item in output.split():
|
|
72
|
+
if "=" in item:
|
|
73
|
+
parts = item.rsplit("=", 1)
|
|
74
|
+
if len(parts) == 2:
|
|
75
|
+
pods.append((parts[0], parts[1]))
|
|
76
|
+
|
|
77
|
+
if pods and all(phase == "Running" for _, phase in pods):
|
|
78
|
+
typer.secho(f" Todos os {len(pods)} pods Running.", fg=typer.colors.GREEN)
|
|
79
|
+
return True
|
|
80
|
+
|
|
81
|
+
pending = [name for name, phase in pods if phase != "Running"]
|
|
82
|
+
if pending:
|
|
83
|
+
typer.echo(f" Aguardando: {', '.join(pending[:3])}...")
|
|
84
|
+
|
|
85
|
+
time.sleep(10)
|
|
86
|
+
|
|
87
|
+
typer.secho(" Timeout aguardando pods do Kong.", fg=typer.colors.YELLOW)
|
|
88
|
+
return False
|
|
6
89
|
|
|
7
90
|
|
|
8
91
|
def run(ctx: ExecutionContext) -> None:
|
|
9
92
|
require_root(ctx)
|
|
10
|
-
typer.echo("Instalando Kong via Helm...")
|
|
93
|
+
typer.echo("Instalando Kong Gateway via Helm...")
|
|
94
|
+
|
|
95
|
+
# Prompt opcional de limpeza
|
|
96
|
+
if _check_existing_kong(ctx):
|
|
97
|
+
cleanup = typer.confirm(
|
|
98
|
+
"Instalacao anterior do Kong detectada. Limpar antes de reinstalar?",
|
|
99
|
+
default=False,
|
|
100
|
+
)
|
|
101
|
+
if cleanup:
|
|
102
|
+
_uninstall_kong(ctx)
|
|
103
|
+
|
|
104
|
+
# Configuracoes interativas
|
|
105
|
+
enable_admin = typer.confirm("Habilitar Admin API (para gerenciamento)?", default=True)
|
|
106
|
+
db_mode = typer.prompt(
|
|
107
|
+
"Modo de banco de dados (dbless/postgres)",
|
|
108
|
+
default="dbless",
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
node_name = _detect_node_name(ctx)
|
|
112
|
+
|
|
113
|
+
values = [
|
|
114
|
+
# Modo de operacao
|
|
115
|
+
f"env.database={db_mode}",
|
|
116
|
+
# Ingress Controller
|
|
117
|
+
"ingressController.installCRDs=true",
|
|
118
|
+
"ingressController.enabled=true",
|
|
119
|
+
# Proxy service
|
|
120
|
+
"proxy.enabled=true",
|
|
121
|
+
"proxy.type=LoadBalancer",
|
|
122
|
+
# Tolerations para control-plane
|
|
123
|
+
"tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
124
|
+
"tolerations[0].operator=Exists",
|
|
125
|
+
"tolerations[0].effect=NoSchedule",
|
|
126
|
+
"tolerations[1].key=node-role.kubernetes.io/master",
|
|
127
|
+
"tolerations[1].operator=Exists",
|
|
128
|
+
"tolerations[1].effect=NoSchedule",
|
|
129
|
+
# NodeSelector
|
|
130
|
+
f"nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
131
|
+
]
|
|
132
|
+
|
|
133
|
+
# Admin API
|
|
134
|
+
if enable_admin:
|
|
135
|
+
values.extend([
|
|
136
|
+
"admin.enabled=true",
|
|
137
|
+
"admin.type=ClusterIP",
|
|
138
|
+
"admin.http.enabled=true",
|
|
139
|
+
])
|
|
140
|
+
else:
|
|
141
|
+
values.append("admin.enabled=false")
|
|
142
|
+
|
|
11
143
|
helm_upgrade_install(
|
|
12
144
|
release="kong",
|
|
13
145
|
chart="kong",
|
|
@@ -15,5 +147,17 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
15
147
|
repo="kong",
|
|
16
148
|
repo_url="https://charts.konghq.com",
|
|
17
149
|
ctx=ctx,
|
|
18
|
-
values=
|
|
150
|
+
values=values,
|
|
19
151
|
)
|
|
152
|
+
|
|
153
|
+
# Aguarda pods ficarem prontos
|
|
154
|
+
if not ctx.dry_run:
|
|
155
|
+
_wait_for_kong_ready(ctx)
|
|
156
|
+
|
|
157
|
+
# Mostra informacoes uteis
|
|
158
|
+
typer.secho("\n✓ Kong instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|
|
159
|
+
typer.echo("\nPara verificar o servico:")
|
|
160
|
+
typer.echo(" kubectl -n kong get svc kong-kong-proxy")
|
|
161
|
+
if enable_admin:
|
|
162
|
+
typer.echo("\nPara acessar Admin API (port-forward):")
|
|
163
|
+
typer.echo(" kubectl -n kong port-forward svc/kong-kong-admin 8001:8001")
|
raijin_server/modules/loki.py
CHANGED
|
@@ -1,20 +1,156 @@
|
|
|
1
|
-
"""Configuracao do Loki via Helm."""
|
|
1
|
+
"""Configuracao do Loki Stack via Helm (production-ready)."""
|
|
2
|
+
|
|
3
|
+
import socket
|
|
4
|
+
import time
|
|
5
|
+
from pathlib import Path
|
|
2
6
|
|
|
3
7
|
import typer
|
|
4
8
|
|
|
5
|
-
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root
|
|
9
|
+
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd, write_file
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
13
|
+
"""Detecta nome do node para nodeSelector."""
|
|
14
|
+
result = run_cmd(
|
|
15
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
16
|
+
ctx,
|
|
17
|
+
check=False,
|
|
18
|
+
)
|
|
19
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
20
|
+
return (result.stdout or "").strip()
|
|
21
|
+
return socket.gethostname()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _check_existing_loki(ctx: ExecutionContext) -> bool:
|
|
25
|
+
"""Verifica se existe instalacao do Loki."""
|
|
26
|
+
result = run_cmd(
|
|
27
|
+
["helm", "status", "loki", "-n", "observability"],
|
|
28
|
+
ctx,
|
|
29
|
+
check=False,
|
|
30
|
+
)
|
|
31
|
+
return result.returncode == 0
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _uninstall_loki(ctx: ExecutionContext) -> None:
|
|
35
|
+
"""Remove instalacao anterior do Loki."""
|
|
36
|
+
typer.echo("Removendo instalacao anterior do Loki...")
|
|
37
|
+
|
|
38
|
+
run_cmd(
|
|
39
|
+
["helm", "uninstall", "loki", "-n", "observability"],
|
|
40
|
+
ctx,
|
|
41
|
+
check=False,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
remove_data = typer.confirm("Remover PVCs (dados persistentes)?", default=False)
|
|
45
|
+
if remove_data:
|
|
46
|
+
run_cmd(
|
|
47
|
+
["kubectl", "-n", "observability", "delete", "pvc", "-l", "app=loki"],
|
|
48
|
+
ctx,
|
|
49
|
+
check=False,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
time.sleep(5)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _wait_for_loki_ready(ctx: ExecutionContext, timeout: int = 180) -> bool:
|
|
56
|
+
"""Aguarda pods do Loki ficarem Ready."""
|
|
57
|
+
typer.echo("Aguardando pods do Loki ficarem Ready...")
|
|
58
|
+
deadline = time.time() + timeout
|
|
59
|
+
|
|
60
|
+
while time.time() < deadline:
|
|
61
|
+
result = run_cmd(
|
|
62
|
+
[
|
|
63
|
+
"kubectl", "-n", "observability", "get", "pods",
|
|
64
|
+
"-l", "app=loki",
|
|
65
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
66
|
+
],
|
|
67
|
+
ctx,
|
|
68
|
+
check=False,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
if result.returncode == 0:
|
|
72
|
+
output = (result.stdout or "").strip()
|
|
73
|
+
if output:
|
|
74
|
+
pods = []
|
|
75
|
+
for item in output.split():
|
|
76
|
+
if "=" in item:
|
|
77
|
+
parts = item.rsplit("=", 1)
|
|
78
|
+
if len(parts) == 2:
|
|
79
|
+
pods.append((parts[0], parts[1]))
|
|
80
|
+
|
|
81
|
+
if pods and all(phase == "Running" for _, phase in pods):
|
|
82
|
+
typer.secho(" Loki Ready.", fg=typer.colors.GREEN)
|
|
83
|
+
return True
|
|
84
|
+
|
|
85
|
+
time.sleep(10)
|
|
86
|
+
|
|
87
|
+
typer.secho(" Timeout aguardando Loki.", fg=typer.colors.YELLOW)
|
|
88
|
+
return False
|
|
6
89
|
|
|
7
90
|
|
|
8
91
|
def run(ctx: ExecutionContext) -> None:
|
|
9
92
|
require_root(ctx)
|
|
10
93
|
typer.echo("Instalando Loki Stack via Helm...")
|
|
11
94
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
95
|
+
# Prompt opcional de limpeza
|
|
96
|
+
if _check_existing_loki(ctx):
|
|
97
|
+
cleanup = typer.confirm(
|
|
98
|
+
"Instalacao anterior do Loki detectada. Limpar antes de reinstalar?",
|
|
99
|
+
default=False,
|
|
100
|
+
)
|
|
101
|
+
if cleanup:
|
|
102
|
+
_uninstall_loki(ctx)
|
|
103
|
+
|
|
104
|
+
retention_hours = typer.prompt("Retencao de logs em horas", default="168")
|
|
105
|
+
persistence_size = typer.prompt("Tamanho do storage", default="20Gi")
|
|
106
|
+
|
|
107
|
+
node_name = _detect_node_name(ctx)
|
|
108
|
+
|
|
109
|
+
values_yaml = f"""loki:
|
|
110
|
+
persistence:
|
|
111
|
+
enabled: true
|
|
112
|
+
size: {persistence_size}
|
|
113
|
+
config:
|
|
114
|
+
table_manager:
|
|
115
|
+
retention_deletes_enabled: true
|
|
116
|
+
retention_period: {retention_hours}h
|
|
117
|
+
tolerations:
|
|
118
|
+
- key: node-role.kubernetes.io/control-plane
|
|
119
|
+
operator: Exists
|
|
120
|
+
effect: NoSchedule
|
|
121
|
+
- key: node-role.kubernetes.io/master
|
|
122
|
+
operator: Exists
|
|
123
|
+
effect: NoSchedule
|
|
124
|
+
nodeSelector:
|
|
125
|
+
kubernetes.io/hostname: {node_name}
|
|
126
|
+
resources:
|
|
127
|
+
requests:
|
|
128
|
+
memory: 256Mi
|
|
129
|
+
cpu: 100m
|
|
130
|
+
limits:
|
|
131
|
+
memory: 512Mi
|
|
132
|
+
|
|
133
|
+
promtail:
|
|
134
|
+
enabled: true
|
|
135
|
+
tolerations:
|
|
136
|
+
- key: node-role.kubernetes.io/control-plane
|
|
137
|
+
operator: Exists
|
|
138
|
+
effect: NoSchedule
|
|
139
|
+
- key: node-role.kubernetes.io/master
|
|
140
|
+
operator: Exists
|
|
141
|
+
effect: NoSchedule
|
|
142
|
+
resources:
|
|
143
|
+
requests:
|
|
144
|
+
memory: 128Mi
|
|
145
|
+
cpu: 50m
|
|
146
|
+
limits:
|
|
147
|
+
memory: 256Mi
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
values_path = Path("/tmp/raijin-loki-values.yaml")
|
|
151
|
+
write_file(values_path, values_yaml, ctx)
|
|
152
|
+
|
|
153
|
+
run_cmd(["kubectl", "create", "namespace", "observability"], ctx, check=False)
|
|
18
154
|
|
|
19
155
|
helm_upgrade_install(
|
|
20
156
|
release="loki",
|
|
@@ -23,5 +159,15 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
23
159
|
repo="grafana",
|
|
24
160
|
repo_url="https://grafana.github.io/helm-charts",
|
|
25
161
|
ctx=ctx,
|
|
26
|
-
values=
|
|
162
|
+
values=[],
|
|
163
|
+
extra_args=["-f", str(values_path)],
|
|
27
164
|
)
|
|
165
|
+
|
|
166
|
+
if not ctx.dry_run:
|
|
167
|
+
_wait_for_loki_ready(ctx)
|
|
168
|
+
|
|
169
|
+
typer.secho("\n✓ Loki Stack instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|
|
170
|
+
typer.echo("\nPara acessar Loki via port-forward:")
|
|
171
|
+
typer.echo(" kubectl -n observability port-forward svc/loki 3100:3100")
|
|
172
|
+
typer.echo("\nPara verificar logs:")
|
|
173
|
+
typer.echo(" curl http://localhost:3100/ready")
|