raijin-server 0.2.21__py3-none-any.whl → 0.2.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- raijin_server/__init__.py +1 -1
- raijin_server/modules/cert_manager.py +83 -0
- raijin_server/modules/grafana.py +138 -2
- raijin_server/modules/harness.py +127 -12
- raijin_server/modules/istio.py +149 -5
- raijin_server/modules/kafka.py +166 -12
- raijin_server/modules/kong.py +148 -4
- raijin_server/modules/loki.py +155 -9
- raijin_server/modules/minio.py +181 -4
- raijin_server/modules/prometheus.py +160 -1
- raijin_server/modules/secrets.py +189 -5
- raijin_server/modules/velero.py +141 -22
- {raijin_server-0.2.21.dist-info → raijin_server-0.2.22.dist-info}/METADATA +1 -1
- {raijin_server-0.2.21.dist-info → raijin_server-0.2.22.dist-info}/RECORD +18 -18
- {raijin_server-0.2.21.dist-info → raijin_server-0.2.22.dist-info}/WHEEL +0 -0
- {raijin_server-0.2.21.dist-info → raijin_server-0.2.22.dist-info}/entry_points.txt +0 -0
- {raijin_server-0.2.21.dist-info → raijin_server-0.2.22.dist-info}/licenses/LICENSE +0 -0
- {raijin_server-0.2.21.dist-info → raijin_server-0.2.22.dist-info}/top_level.txt +0 -0
raijin_server/__init__.py
CHANGED
|
@@ -601,6 +601,31 @@ def _run_helm_install(ctx: ExecutionContext, attempt: int = 1) -> bool:
|
|
|
601
601
|
"--set", "startupapicheck.enabled=true",
|
|
602
602
|
"--set", "webhook.replicaCount=1",
|
|
603
603
|
"--set", "cainjector.replicaCount=1",
|
|
604
|
+
# Tolerations para control-plane (single-node clusters)
|
|
605
|
+
"--set", "tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
606
|
+
"--set", "tolerations[0].operator=Exists",
|
|
607
|
+
"--set", "tolerations[0].effect=NoSchedule",
|
|
608
|
+
"--set", "tolerations[1].key=node-role.kubernetes.io/master",
|
|
609
|
+
"--set", "tolerations[1].operator=Exists",
|
|
610
|
+
"--set", "tolerations[1].effect=NoSchedule",
|
|
611
|
+
"--set", "webhook.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
612
|
+
"--set", "webhook.tolerations[0].operator=Exists",
|
|
613
|
+
"--set", "webhook.tolerations[0].effect=NoSchedule",
|
|
614
|
+
"--set", "webhook.tolerations[1].key=node-role.kubernetes.io/master",
|
|
615
|
+
"--set", "webhook.tolerations[1].operator=Exists",
|
|
616
|
+
"--set", "webhook.tolerations[1].effect=NoSchedule",
|
|
617
|
+
"--set", "cainjector.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
618
|
+
"--set", "cainjector.tolerations[0].operator=Exists",
|
|
619
|
+
"--set", "cainjector.tolerations[0].effect=NoSchedule",
|
|
620
|
+
"--set", "cainjector.tolerations[1].key=node-role.kubernetes.io/master",
|
|
621
|
+
"--set", "cainjector.tolerations[1].operator=Exists",
|
|
622
|
+
"--set", "cainjector.tolerations[1].effect=NoSchedule",
|
|
623
|
+
"--set", "startupapicheck.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
624
|
+
"--set", "startupapicheck.tolerations[0].operator=Exists",
|
|
625
|
+
"--set", "startupapicheck.tolerations[0].effect=NoSchedule",
|
|
626
|
+
"--set", "startupapicheck.tolerations[1].key=node-role.kubernetes.io/master",
|
|
627
|
+
"--set", "startupapicheck.tolerations[1].operator=Exists",
|
|
628
|
+
"--set", "startupapicheck.tolerations[1].effect=NoSchedule",
|
|
604
629
|
"--wait",
|
|
605
630
|
"--timeout", "15m",
|
|
606
631
|
"--debug", # Mais logs
|
|
@@ -1234,6 +1259,55 @@ def _diagnose_problems(ctx: ExecutionContext) -> None:
|
|
|
1234
1259
|
typer.secho("\n Nenhum problema óbvio detectado", fg=typer.colors.GREEN)
|
|
1235
1260
|
|
|
1236
1261
|
|
|
1262
|
+
def _check_existing_cert_manager() -> bool:
|
|
1263
|
+
"""Verifica se existe instalacao do cert-manager."""
|
|
1264
|
+
try:
|
|
1265
|
+
result = subprocess.run(
|
|
1266
|
+
["helm", "status", "cert-manager", "-n", NAMESPACE],
|
|
1267
|
+
capture_output=True,
|
|
1268
|
+
text=True,
|
|
1269
|
+
timeout=15,
|
|
1270
|
+
env=_helm_env(),
|
|
1271
|
+
)
|
|
1272
|
+
return result.returncode == 0
|
|
1273
|
+
except Exception:
|
|
1274
|
+
return False
|
|
1275
|
+
|
|
1276
|
+
|
|
1277
|
+
def _uninstall_cert_manager(ctx: ExecutionContext) -> None:
|
|
1278
|
+
"""Remove instalacao anterior do cert-manager."""
|
|
1279
|
+
typer.echo("Removendo instalacao anterior do cert-manager...")
|
|
1280
|
+
|
|
1281
|
+
run_cmd(
|
|
1282
|
+
["helm", "uninstall", "cert-manager", "-n", NAMESPACE],
|
|
1283
|
+
ctx,
|
|
1284
|
+
check=False,
|
|
1285
|
+
)
|
|
1286
|
+
|
|
1287
|
+
# Remove CRDs
|
|
1288
|
+
run_cmd(
|
|
1289
|
+
["kubectl", "delete", "crd",
|
|
1290
|
+
"certificaterequests.cert-manager.io",
|
|
1291
|
+
"certificates.cert-manager.io",
|
|
1292
|
+
"challenges.acme.cert-manager.io",
|
|
1293
|
+
"clusterissuers.cert-manager.io",
|
|
1294
|
+
"issuers.cert-manager.io",
|
|
1295
|
+
"orders.acme.cert-manager.io",
|
|
1296
|
+
"--ignore-not-found"],
|
|
1297
|
+
ctx,
|
|
1298
|
+
check=False,
|
|
1299
|
+
)
|
|
1300
|
+
|
|
1301
|
+
# Remove namespace
|
|
1302
|
+
run_cmd(
|
|
1303
|
+
["kubectl", "delete", "namespace", NAMESPACE, "--ignore-not-found"],
|
|
1304
|
+
ctx,
|
|
1305
|
+
check=False,
|
|
1306
|
+
)
|
|
1307
|
+
|
|
1308
|
+
time.sleep(5)
|
|
1309
|
+
|
|
1310
|
+
|
|
1237
1311
|
# =============================================================================
|
|
1238
1312
|
# Entry Points
|
|
1239
1313
|
# =============================================================================
|
|
@@ -1253,6 +1327,15 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
1253
1327
|
ctx.errors.append("cert-manager: cluster não acessível")
|
|
1254
1328
|
raise typer.Exit(code=1)
|
|
1255
1329
|
|
|
1330
|
+
# Prompt opcional de limpeza
|
|
1331
|
+
if _check_existing_cert_manager():
|
|
1332
|
+
cleanup = typer.confirm(
|
|
1333
|
+
"Instalacao anterior do cert-manager detectada. Limpar antes de reinstalar?",
|
|
1334
|
+
default=False,
|
|
1335
|
+
)
|
|
1336
|
+
if cleanup:
|
|
1337
|
+
_uninstall_cert_manager(ctx)
|
|
1338
|
+
|
|
1256
1339
|
# Mostra status atual
|
|
1257
1340
|
status = _get_cert_manager_status(ctx)
|
|
1258
1341
|
_print_status(status)
|
raijin_server/modules/grafana.py
CHANGED
|
@@ -1,20 +1,113 @@
|
|
|
1
1
|
"""Configuracao do Grafana via Helm com datasource e dashboards provisionados."""
|
|
2
2
|
|
|
3
|
+
import socket
|
|
4
|
+
import time
|
|
3
5
|
from pathlib import Path
|
|
4
6
|
|
|
5
7
|
import typer
|
|
6
8
|
|
|
7
|
-
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, write_file
|
|
9
|
+
from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd, write_file
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
13
|
+
"""Detecta nome do node para nodeSelector."""
|
|
14
|
+
result = run_cmd(
|
|
15
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
16
|
+
ctx,
|
|
17
|
+
check=False,
|
|
18
|
+
)
|
|
19
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
20
|
+
return (result.stdout or "").strip()
|
|
21
|
+
return socket.gethostname()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _check_existing_grafana(ctx: ExecutionContext) -> bool:
|
|
25
|
+
"""Verifica se existe instalacao do Grafana."""
|
|
26
|
+
result = run_cmd(
|
|
27
|
+
["helm", "status", "grafana", "-n", "observability"],
|
|
28
|
+
ctx,
|
|
29
|
+
check=False,
|
|
30
|
+
)
|
|
31
|
+
return result.returncode == 0
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _uninstall_grafana(ctx: ExecutionContext) -> None:
|
|
35
|
+
"""Remove instalacao anterior do Grafana."""
|
|
36
|
+
typer.echo("Removendo instalacao anterior do Grafana...")
|
|
37
|
+
|
|
38
|
+
run_cmd(
|
|
39
|
+
["helm", "uninstall", "grafana", "-n", "observability"],
|
|
40
|
+
ctx,
|
|
41
|
+
check=False,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
remove_data = typer.confirm("Remover PVCs (dados persistentes)?", default=False)
|
|
45
|
+
if remove_data:
|
|
46
|
+
run_cmd(
|
|
47
|
+
["kubectl", "-n", "observability", "delete", "pvc", "-l", "app.kubernetes.io/name=grafana"],
|
|
48
|
+
ctx,
|
|
49
|
+
check=False,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
time.sleep(5)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _wait_for_grafana_ready(ctx: ExecutionContext, timeout: int = 180) -> bool:
|
|
56
|
+
"""Aguarda pods do Grafana ficarem Ready."""
|
|
57
|
+
typer.echo("Aguardando pods do Grafana ficarem Ready...")
|
|
58
|
+
deadline = time.time() + timeout
|
|
59
|
+
|
|
60
|
+
while time.time() < deadline:
|
|
61
|
+
result = run_cmd(
|
|
62
|
+
[
|
|
63
|
+
"kubectl", "-n", "observability", "get", "pods",
|
|
64
|
+
"-l", "app.kubernetes.io/name=grafana",
|
|
65
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
66
|
+
],
|
|
67
|
+
ctx,
|
|
68
|
+
check=False,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
if result.returncode == 0:
|
|
72
|
+
output = (result.stdout or "").strip()
|
|
73
|
+
if output:
|
|
74
|
+
pods = []
|
|
75
|
+
for item in output.split():
|
|
76
|
+
if "=" in item:
|
|
77
|
+
parts = item.rsplit("=", 1)
|
|
78
|
+
if len(parts) == 2:
|
|
79
|
+
pods.append((parts[0], parts[1]))
|
|
80
|
+
|
|
81
|
+
if pods and all(phase == "Running" for _, phase in pods):
|
|
82
|
+
typer.secho(" Grafana Ready.", fg=typer.colors.GREEN)
|
|
83
|
+
return True
|
|
84
|
+
|
|
85
|
+
time.sleep(10)
|
|
86
|
+
|
|
87
|
+
typer.secho(" Timeout aguardando Grafana.", fg=typer.colors.YELLOW)
|
|
88
|
+
return False
|
|
8
89
|
|
|
9
90
|
|
|
10
91
|
def run(ctx: ExecutionContext) -> None:
|
|
11
92
|
require_root(ctx)
|
|
12
93
|
typer.echo("Instalando Grafana via Helm...")
|
|
13
94
|
|
|
95
|
+
# Prompt opcional de limpeza
|
|
96
|
+
if _check_existing_grafana(ctx):
|
|
97
|
+
cleanup = typer.confirm(
|
|
98
|
+
"Instalacao anterior do Grafana detectada. Limpar antes de reinstalar?",
|
|
99
|
+
default=False,
|
|
100
|
+
)
|
|
101
|
+
if cleanup:
|
|
102
|
+
_uninstall_grafana(ctx)
|
|
103
|
+
|
|
14
104
|
admin_password = typer.prompt("Senha admin do Grafana", default="admin")
|
|
15
105
|
ingress_host = typer.prompt("Host para acessar o Grafana", default="grafana.local")
|
|
16
106
|
ingress_class = typer.prompt("IngressClass", default="traefik")
|
|
17
107
|
tls_secret = typer.prompt("Secret TLS (cert-manager)", default="grafana-tls")
|
|
108
|
+
persistence_size = typer.prompt("Tamanho do storage", default="10Gi")
|
|
109
|
+
|
|
110
|
+
node_name = _detect_node_name(ctx)
|
|
18
111
|
|
|
19
112
|
values_yaml = f"""adminPassword: {admin_password}
|
|
20
113
|
service:
|
|
@@ -30,7 +123,22 @@ ingress:
|
|
|
30
123
|
- {ingress_host}
|
|
31
124
|
persistence:
|
|
32
125
|
enabled: true
|
|
33
|
-
size:
|
|
126
|
+
size: {persistence_size}
|
|
127
|
+
tolerations:
|
|
128
|
+
- key: node-role.kubernetes.io/control-plane
|
|
129
|
+
operator: Exists
|
|
130
|
+
effect: NoSchedule
|
|
131
|
+
- key: node-role.kubernetes.io/master
|
|
132
|
+
operator: Exists
|
|
133
|
+
effect: NoSchedule
|
|
134
|
+
nodeSelector:
|
|
135
|
+
kubernetes.io/hostname: {node_name}
|
|
136
|
+
resources:
|
|
137
|
+
requests:
|
|
138
|
+
memory: 256Mi
|
|
139
|
+
cpu: 100m
|
|
140
|
+
limits:
|
|
141
|
+
memory: 512Mi
|
|
34
142
|
datasources:
|
|
35
143
|
datasources.yaml:
|
|
36
144
|
apiVersion: 1
|
|
@@ -42,6 +150,22 @@ datasources:
|
|
|
42
150
|
isDefault: true
|
|
43
151
|
jsonData:
|
|
44
152
|
timeInterval: 30s
|
|
153
|
+
- name: Loki
|
|
154
|
+
type: loki
|
|
155
|
+
access: proxy
|
|
156
|
+
url: http://loki.observability.svc:3100
|
|
157
|
+
dashboardProviders:
|
|
158
|
+
dashboardproviders.yaml:
|
|
159
|
+
apiVersion: 1
|
|
160
|
+
providers:
|
|
161
|
+
- name: 'default'
|
|
162
|
+
orgId: 1
|
|
163
|
+
folder: ''
|
|
164
|
+
type: file
|
|
165
|
+
disableDeletion: false
|
|
166
|
+
editable: true
|
|
167
|
+
options:
|
|
168
|
+
path: /var/lib/grafana/dashboards/default
|
|
45
169
|
dashboards:
|
|
46
170
|
default:
|
|
47
171
|
kubernetes:
|
|
@@ -57,6 +181,8 @@ dashboards:
|
|
|
57
181
|
values_path = Path("/tmp/raijin-grafana-values.yaml")
|
|
58
182
|
write_file(values_path, values_yaml, ctx)
|
|
59
183
|
|
|
184
|
+
run_cmd(["kubectl", "create", "namespace", "observability"], ctx, check=False)
|
|
185
|
+
|
|
60
186
|
helm_upgrade_install(
|
|
61
187
|
release="grafana",
|
|
62
188
|
chart="grafana",
|
|
@@ -67,3 +193,13 @@ dashboards:
|
|
|
67
193
|
values=[],
|
|
68
194
|
extra_args=["-f", str(values_path)],
|
|
69
195
|
)
|
|
196
|
+
|
|
197
|
+
if not ctx.dry_run:
|
|
198
|
+
_wait_for_grafana_ready(ctx)
|
|
199
|
+
|
|
200
|
+
typer.secho("\n✓ Grafana instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|
|
201
|
+
typer.echo(f"\nAcesse: https://{ingress_host}")
|
|
202
|
+
typer.echo("Usuario: admin")
|
|
203
|
+
typer.echo(f"Senha: {admin_password}")
|
|
204
|
+
typer.echo("\nPara port-forward local:")
|
|
205
|
+
typer.echo(" kubectl -n observability port-forward svc/grafana 3000:80")
|
raijin_server/modules/harness.py
CHANGED
|
@@ -1,8 +1,83 @@
|
|
|
1
|
-
"""Instalacao do Harness Delegate via Helm."""
|
|
1
|
+
"""Instalacao do Harness Delegate via Helm (production-ready)."""
|
|
2
|
+
|
|
3
|
+
import socket
|
|
4
|
+
import time
|
|
5
|
+
from pathlib import Path
|
|
2
6
|
|
|
3
7
|
import typer
|
|
4
8
|
|
|
5
|
-
from raijin_server.utils import ExecutionContext, ensure_tool, require_root, run_cmd
|
|
9
|
+
from raijin_server.utils import ExecutionContext, ensure_tool, require_root, run_cmd, write_file
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
13
|
+
"""Detecta nome do node para nodeSelector."""
|
|
14
|
+
result = run_cmd(
|
|
15
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
16
|
+
ctx,
|
|
17
|
+
check=False,
|
|
18
|
+
)
|
|
19
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
20
|
+
return (result.stdout or "").strip()
|
|
21
|
+
return socket.gethostname()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _check_existing_delegate(ctx: ExecutionContext, namespace: str, delegate_name: str) -> bool:
|
|
25
|
+
"""Verifica se existe instalacao do Harness Delegate."""
|
|
26
|
+
result = run_cmd(
|
|
27
|
+
["helm", "status", delegate_name, "-n", namespace],
|
|
28
|
+
ctx,
|
|
29
|
+
check=False,
|
|
30
|
+
)
|
|
31
|
+
return result.returncode == 0
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _uninstall_delegate(ctx: ExecutionContext, namespace: str, delegate_name: str) -> None:
|
|
35
|
+
"""Remove instalacao anterior do Harness Delegate."""
|
|
36
|
+
typer.echo("Removendo instalacao anterior do Harness Delegate...")
|
|
37
|
+
|
|
38
|
+
run_cmd(
|
|
39
|
+
["helm", "uninstall", delegate_name, "-n", namespace],
|
|
40
|
+
ctx,
|
|
41
|
+
check=False,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
time.sleep(5)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _wait_for_delegate_ready(ctx: ExecutionContext, namespace: str, delegate_name: str, timeout: int = 180) -> bool:
|
|
48
|
+
"""Aguarda pods do Harness Delegate ficarem Ready."""
|
|
49
|
+
typer.echo("Aguardando pods do Harness Delegate ficarem Ready...")
|
|
50
|
+
deadline = time.time() + timeout
|
|
51
|
+
|
|
52
|
+
while time.time() < deadline:
|
|
53
|
+
result = run_cmd(
|
|
54
|
+
[
|
|
55
|
+
"kubectl", "-n", namespace, "get", "pods",
|
|
56
|
+
"-l", f"app.kubernetes.io/name={delegate_name}",
|
|
57
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
58
|
+
],
|
|
59
|
+
ctx,
|
|
60
|
+
check=False,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
if result.returncode == 0:
|
|
64
|
+
output = (result.stdout or "").strip()
|
|
65
|
+
if output:
|
|
66
|
+
pods = []
|
|
67
|
+
for item in output.split():
|
|
68
|
+
if "=" in item:
|
|
69
|
+
parts = item.rsplit("=", 1)
|
|
70
|
+
if len(parts) == 2:
|
|
71
|
+
pods.append((parts[0], parts[1]))
|
|
72
|
+
|
|
73
|
+
if pods and all(phase == "Running" for _, phase in pods):
|
|
74
|
+
typer.secho(" Harness Delegate Ready.", fg=typer.colors.GREEN)
|
|
75
|
+
return True
|
|
76
|
+
|
|
77
|
+
time.sleep(10)
|
|
78
|
+
|
|
79
|
+
typer.secho(" Timeout aguardando Harness Delegate.", fg=typer.colors.YELLOW)
|
|
80
|
+
return False
|
|
6
81
|
|
|
7
82
|
|
|
8
83
|
def run(ctx: ExecutionContext) -> None:
|
|
@@ -16,6 +91,18 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
16
91
|
delegate_name = typer.prompt("Delegate name", default="raijin-delegate")
|
|
17
92
|
namespace = typer.prompt("Namespace", default="harness-delegate")
|
|
18
93
|
delegate_token = typer.prompt("Delegate token", hide_input=True)
|
|
94
|
+
replicas = typer.prompt("Numero de replicas", default="1")
|
|
95
|
+
|
|
96
|
+
# Prompt opcional de limpeza
|
|
97
|
+
if _check_existing_delegate(ctx, namespace, delegate_name):
|
|
98
|
+
cleanup = typer.confirm(
|
|
99
|
+
"Instalacao anterior do Harness Delegate detectada. Limpar antes de reinstalar?",
|
|
100
|
+
default=False,
|
|
101
|
+
)
|
|
102
|
+
if cleanup:
|
|
103
|
+
_uninstall_delegate(ctx, namespace, delegate_name)
|
|
104
|
+
|
|
105
|
+
node_name = _detect_node_name(ctx)
|
|
19
106
|
|
|
20
107
|
run_cmd(
|
|
21
108
|
["helm", "repo", "add", "harness", "https://app.harness.io/storage/harness-download/delegate-helm-chart/"],
|
|
@@ -23,6 +110,33 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
23
110
|
)
|
|
24
111
|
run_cmd(["helm", "repo", "update"], ctx)
|
|
25
112
|
|
|
113
|
+
# Create values file with tolerations
|
|
114
|
+
values_yaml = f"""delegateName: {delegate_name}
|
|
115
|
+
accountId: {account_id}
|
|
116
|
+
delegateToken: {delegate_token}
|
|
117
|
+
orgId: {org_id}
|
|
118
|
+
projectId: {project_id}
|
|
119
|
+
replicaCount: {replicas}
|
|
120
|
+
tolerations:
|
|
121
|
+
- key: node-role.kubernetes.io/control-plane
|
|
122
|
+
operator: Exists
|
|
123
|
+
effect: NoSchedule
|
|
124
|
+
- key: node-role.kubernetes.io/master
|
|
125
|
+
operator: Exists
|
|
126
|
+
effect: NoSchedule
|
|
127
|
+
nodeSelector:
|
|
128
|
+
kubernetes.io/hostname: {node_name}
|
|
129
|
+
resources:
|
|
130
|
+
requests:
|
|
131
|
+
memory: 256Mi
|
|
132
|
+
cpu: 100m
|
|
133
|
+
limits:
|
|
134
|
+
memory: 1Gi
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
values_path = Path("/tmp/raijin-harness-values.yaml")
|
|
138
|
+
write_file(values_path, values_yaml, ctx)
|
|
139
|
+
|
|
26
140
|
cmd = [
|
|
27
141
|
"helm",
|
|
28
142
|
"upgrade",
|
|
@@ -32,16 +146,17 @@ def run(ctx: ExecutionContext) -> None:
|
|
|
32
146
|
"-n",
|
|
33
147
|
namespace,
|
|
34
148
|
"--create-namespace",
|
|
35
|
-
"
|
|
36
|
-
|
|
37
|
-
"--set",
|
|
38
|
-
f"accountId={account_id}",
|
|
39
|
-
"--set",
|
|
40
|
-
f"delegateToken={delegate_token}",
|
|
41
|
-
"--set",
|
|
42
|
-
f"orgId={org_id}",
|
|
43
|
-
"--set",
|
|
44
|
-
f"projectId={project_id}",
|
|
149
|
+
"-f",
|
|
150
|
+
str(values_path),
|
|
45
151
|
]
|
|
46
152
|
|
|
47
153
|
run_cmd(cmd, ctx, mask_output=True, display_override="helm upgrade --install <delegate> harness/harness-delegate-ng ...")
|
|
154
|
+
|
|
155
|
+
if not ctx.dry_run:
|
|
156
|
+
_wait_for_delegate_ready(ctx, namespace, delegate_name)
|
|
157
|
+
|
|
158
|
+
typer.secho("\n✓ Harness Delegate instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|
|
159
|
+
typer.echo(f"\nO delegate '{delegate_name}' deve aparecer no Harness em alguns minutos.")
|
|
160
|
+
typer.echo("\nPara verificar status:")
|
|
161
|
+
typer.echo(f" kubectl -n {namespace} get pods")
|
|
162
|
+
typer.echo(f" kubectl -n {namespace} logs -l app.kubernetes.io/name={delegate_name}")
|
raijin_server/modules/istio.py
CHANGED
|
@@ -1,13 +1,157 @@
|
|
|
1
|
-
"""Instalacao do Istio usando istioctl."""
|
|
1
|
+
"""Instalacao do Istio usando istioctl com configuracoes production-ready."""
|
|
2
|
+
|
|
3
|
+
import socket
|
|
4
|
+
import time
|
|
2
5
|
|
|
3
6
|
import typer
|
|
4
7
|
|
|
5
8
|
from raijin_server.utils import ExecutionContext, ensure_tool, require_root, run_cmd
|
|
6
9
|
|
|
7
10
|
|
|
11
|
+
ISTIO_PROFILES = ["default", "demo", "minimal", "ambient", "empty"]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _detect_node_name(ctx: ExecutionContext) -> str:
|
|
15
|
+
"""Detecta nome do node para nodeSelector."""
|
|
16
|
+
result = run_cmd(
|
|
17
|
+
["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
|
|
18
|
+
ctx,
|
|
19
|
+
check=False,
|
|
20
|
+
)
|
|
21
|
+
if result.returncode == 0 and (result.stdout or "").strip():
|
|
22
|
+
return (result.stdout or "").strip()
|
|
23
|
+
return socket.gethostname()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _check_existing_istio(ctx: ExecutionContext) -> bool:
|
|
27
|
+
"""Verifica se existe instalacao do Istio."""
|
|
28
|
+
result = run_cmd(
|
|
29
|
+
["kubectl", "get", "namespace", "istio-system"],
|
|
30
|
+
ctx,
|
|
31
|
+
check=False,
|
|
32
|
+
)
|
|
33
|
+
return result.returncode == 0
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _uninstall_istio(ctx: ExecutionContext) -> None:
|
|
37
|
+
"""Remove instalacao anterior do Istio."""
|
|
38
|
+
typer.echo("Removendo instalacao anterior do Istio...")
|
|
39
|
+
|
|
40
|
+
run_cmd(
|
|
41
|
+
["istioctl", "uninstall", "--purge", "-y"],
|
|
42
|
+
ctx,
|
|
43
|
+
check=False,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
run_cmd(
|
|
47
|
+
["kubectl", "delete", "namespace", "istio-system", "--ignore-not-found"],
|
|
48
|
+
ctx,
|
|
49
|
+
check=False,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
time.sleep(5)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _wait_for_istio_ready(ctx: ExecutionContext, timeout: int = 300) -> bool:
|
|
56
|
+
"""Aguarda pods do Istio ficarem Ready."""
|
|
57
|
+
typer.echo("Aguardando pods do Istio ficarem Ready...")
|
|
58
|
+
deadline = time.time() + timeout
|
|
59
|
+
|
|
60
|
+
while time.time() < deadline:
|
|
61
|
+
result = run_cmd(
|
|
62
|
+
[
|
|
63
|
+
"kubectl", "-n", "istio-system", "get", "pods",
|
|
64
|
+
"-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
|
|
65
|
+
],
|
|
66
|
+
ctx,
|
|
67
|
+
check=False,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
if result.returncode == 0:
|
|
71
|
+
output = (result.stdout or "").strip()
|
|
72
|
+
if output:
|
|
73
|
+
pods = []
|
|
74
|
+
for item in output.split():
|
|
75
|
+
if "=" in item:
|
|
76
|
+
parts = item.rsplit("=", 1)
|
|
77
|
+
if len(parts) == 2:
|
|
78
|
+
pods.append((parts[0], parts[1]))
|
|
79
|
+
|
|
80
|
+
if pods and all(phase in ("Running", "Succeeded") for _, phase in pods):
|
|
81
|
+
typer.secho(f" Todos os {len(pods)} pods Ready.", fg=typer.colors.GREEN)
|
|
82
|
+
return True
|
|
83
|
+
|
|
84
|
+
pending = [name for name, phase in pods if phase not in ("Running", "Succeeded")]
|
|
85
|
+
if pending:
|
|
86
|
+
typer.echo(f" Aguardando: {', '.join(pending[:3])}...")
|
|
87
|
+
|
|
88
|
+
time.sleep(10)
|
|
89
|
+
|
|
90
|
+
typer.secho(" Timeout aguardando pods do Istio.", fg=typer.colors.YELLOW)
|
|
91
|
+
return False
|
|
92
|
+
|
|
93
|
+
|
|
8
94
|
def run(ctx: ExecutionContext) -> None:
|
|
9
95
|
require_root(ctx)
|
|
10
|
-
ensure_tool("istioctl", ctx, install_hint="
|
|
11
|
-
typer.echo("Instalando Istio
|
|
12
|
-
|
|
13
|
-
|
|
96
|
+
ensure_tool("istioctl", ctx, install_hint="Baixe em https://istio.io/latest/docs/setup/getting-started/")
|
|
97
|
+
typer.echo("Instalando Istio...")
|
|
98
|
+
|
|
99
|
+
# Prompt opcional de limpeza
|
|
100
|
+
if _check_existing_istio(ctx):
|
|
101
|
+
cleanup = typer.confirm(
|
|
102
|
+
"Instalacao anterior do Istio detectada. Limpar antes de reinstalar?",
|
|
103
|
+
default=False,
|
|
104
|
+
)
|
|
105
|
+
if cleanup:
|
|
106
|
+
_uninstall_istio(ctx)
|
|
107
|
+
|
|
108
|
+
# Selecao de perfil
|
|
109
|
+
typer.echo(f"\nPerfis disponiveis: {', '.join(ISTIO_PROFILES)}")
|
|
110
|
+
profile = typer.prompt("Perfil do Istio", default="default")
|
|
111
|
+
if profile not in ISTIO_PROFILES:
|
|
112
|
+
typer.secho(f"Perfil '{profile}' invalido. Usando 'default'.", fg=typer.colors.YELLOW)
|
|
113
|
+
profile = "default"
|
|
114
|
+
|
|
115
|
+
node_name = _detect_node_name(ctx)
|
|
116
|
+
|
|
117
|
+
# Instala com tolerations para control-plane
|
|
118
|
+
install_cmd = [
|
|
119
|
+
"istioctl", "install",
|
|
120
|
+
"--set", f"profile={profile}",
|
|
121
|
+
# Tolerations para istiod (control plane)
|
|
122
|
+
"--set", "components.pilot.k8s.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
123
|
+
"--set", "components.pilot.k8s.tolerations[0].operator=Exists",
|
|
124
|
+
"--set", "components.pilot.k8s.tolerations[0].effect=NoSchedule",
|
|
125
|
+
"--set", "components.pilot.k8s.tolerations[1].key=node-role.kubernetes.io/master",
|
|
126
|
+
"--set", "components.pilot.k8s.tolerations[1].operator=Exists",
|
|
127
|
+
"--set", "components.pilot.k8s.tolerations[1].effect=NoSchedule",
|
|
128
|
+
# Tolerations para ingress gateway
|
|
129
|
+
"--set", "components.ingressGateways[0].k8s.tolerations[0].key=node-role.kubernetes.io/control-plane",
|
|
130
|
+
"--set", "components.ingressGateways[0].k8s.tolerations[0].operator=Exists",
|
|
131
|
+
"--set", "components.ingressGateways[0].k8s.tolerations[0].effect=NoSchedule",
|
|
132
|
+
"--set", "components.ingressGateways[0].k8s.tolerations[1].key=node-role.kubernetes.io/master",
|
|
133
|
+
"--set", "components.ingressGateways[0].k8s.tolerations[1].operator=Exists",
|
|
134
|
+
"--set", "components.ingressGateways[0].k8s.tolerations[1].effect=NoSchedule",
|
|
135
|
+
# NodeSelector
|
|
136
|
+
"--set", f"components.pilot.k8s.nodeSelector.kubernetes\\.io/hostname={node_name}",
|
|
137
|
+
"-y",
|
|
138
|
+
]
|
|
139
|
+
|
|
140
|
+
run_cmd(install_cmd, ctx)
|
|
141
|
+
|
|
142
|
+
# Aguarda pods ficarem prontos
|
|
143
|
+
if not ctx.dry_run:
|
|
144
|
+
_wait_for_istio_ready(ctx)
|
|
145
|
+
|
|
146
|
+
# Pergunta sobre injection
|
|
147
|
+
enable_injection = typer.confirm(
|
|
148
|
+
"Habilitar sidecar injection automatico no namespace 'default'?",
|
|
149
|
+
default=True,
|
|
150
|
+
)
|
|
151
|
+
if enable_injection:
|
|
152
|
+
run_cmd(
|
|
153
|
+
["kubectl", "label", "namespace", "default", "istio-injection=enabled", "--overwrite"],
|
|
154
|
+
ctx,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
typer.secho("\n✓ Istio instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
|