raijin-server 0.2.21__tar.gz → 0.2.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. {raijin_server-0.2.21/src/raijin_server.egg-info → raijin_server-0.2.22}/PKG-INFO +1 -1
  2. {raijin_server-0.2.21 → raijin_server-0.2.22}/setup.cfg +1 -1
  3. raijin_server-0.2.22/src/raijin_server/__init__.py +5 -0
  4. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/cert_manager.py +83 -0
  5. raijin_server-0.2.22/src/raijin_server/modules/grafana.py +205 -0
  6. raijin_server-0.2.22/src/raijin_server/modules/harness.py +162 -0
  7. raijin_server-0.2.22/src/raijin_server/modules/istio.py +157 -0
  8. raijin_server-0.2.22/src/raijin_server/modules/kafka.py +188 -0
  9. raijin_server-0.2.22/src/raijin_server/modules/kong.py +163 -0
  10. raijin_server-0.2.22/src/raijin_server/modules/loki.py +173 -0
  11. raijin_server-0.2.22/src/raijin_server/modules/minio.py +196 -0
  12. raijin_server-0.2.22/src/raijin_server/modules/prometheus.py +274 -0
  13. raijin_server-0.2.22/src/raijin_server/modules/secrets.py +293 -0
  14. raijin_server-0.2.22/src/raijin_server/modules/velero.py +166 -0
  15. {raijin_server-0.2.21 → raijin_server-0.2.22/src/raijin_server.egg-info}/PKG-INFO +1 -1
  16. raijin_server-0.2.21/src/raijin_server/__init__.py +0 -5
  17. raijin_server-0.2.21/src/raijin_server/modules/grafana.py +0 -69
  18. raijin_server-0.2.21/src/raijin_server/modules/harness.py +0 -47
  19. raijin_server-0.2.21/src/raijin_server/modules/istio.py +0 -13
  20. raijin_server-0.2.21/src/raijin_server/modules/kafka.py +0 -34
  21. raijin_server-0.2.21/src/raijin_server/modules/kong.py +0 -19
  22. raijin_server-0.2.21/src/raijin_server/modules/loki.py +0 -27
  23. raijin_server-0.2.21/src/raijin_server/modules/minio.py +0 -19
  24. raijin_server-0.2.21/src/raijin_server/modules/prometheus.py +0 -115
  25. raijin_server-0.2.21/src/raijin_server/modules/secrets.py +0 -109
  26. raijin_server-0.2.21/src/raijin_server/modules/velero.py +0 -47
  27. {raijin_server-0.2.21 → raijin_server-0.2.22}/LICENSE +0 -0
  28. {raijin_server-0.2.21 → raijin_server-0.2.22}/README.md +0 -0
  29. {raijin_server-0.2.21 → raijin_server-0.2.22}/pyproject.toml +0 -0
  30. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/cli.py +0 -0
  31. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/config.py +0 -0
  32. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/healthchecks.py +0 -0
  33. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/__init__.py +0 -0
  34. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/apokolips_demo.py +0 -0
  35. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/bootstrap.py +0 -0
  36. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/calico.py +0 -0
  37. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/essentials.py +0 -0
  38. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/firewall.py +0 -0
  39. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/full_install.py +0 -0
  40. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/hardening.py +0 -0
  41. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/kubernetes.py +0 -0
  42. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/metallb.py +0 -0
  43. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/network.py +0 -0
  44. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/observability_dashboards.py +0 -0
  45. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/observability_ingress.py +0 -0
  46. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/sanitize.py +0 -0
  47. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/ssh_hardening.py +0 -0
  48. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/traefik.py +0 -0
  49. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/modules/vpn.py +0 -0
  50. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/scripts/__init__.py +0 -0
  51. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/scripts/checklist.sh +0 -0
  52. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/scripts/install.sh +0 -0
  53. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/scripts/log_size_metric.sh +0 -0
  54. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/scripts/pre-deploy-check.sh +0 -0
  55. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/utils.py +0 -0
  56. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server/validators.py +0 -0
  57. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server.egg-info/SOURCES.txt +0 -0
  58. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server.egg-info/dependency_links.txt +0 -0
  59. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server.egg-info/entry_points.txt +0 -0
  60. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server.egg-info/requires.txt +0 -0
  61. {raijin_server-0.2.21 → raijin_server-0.2.22}/src/raijin_server.egg-info/top_level.txt +0 -0
  62. {raijin_server-0.2.21 → raijin_server-0.2.22}/tests/test_full_install_sequence.py +0 -0
  63. {raijin_server-0.2.21 → raijin_server-0.2.22}/tests/test_registry.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: raijin-server
3
- Version: 0.2.21
3
+ Version: 0.2.22
4
4
  Summary: CLI para automacao de setup e hardening de servidores Ubuntu Server.
5
5
  Home-page: https://example.com/raijin-server
6
6
  Author: Equipe Raijin
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  name = raijin-server
3
- version = 0.2.21
3
+ version = 0.2.22
4
4
  description = CLI para automacao de setup e hardening de servidores Ubuntu Server.
5
5
  long_description = file: README.md
6
6
  long_description_content_type = text/markdown
@@ -0,0 +1,5 @@
1
+ """Pacote principal do CLI Raijin Server."""
2
+
3
+ __version__ = "0.2.22"
4
+
5
+ __all__ = ["__version__"]
@@ -601,6 +601,31 @@ def _run_helm_install(ctx: ExecutionContext, attempt: int = 1) -> bool:
601
601
  "--set", "startupapicheck.enabled=true",
602
602
  "--set", "webhook.replicaCount=1",
603
603
  "--set", "cainjector.replicaCount=1",
604
+ # Tolerations para control-plane (single-node clusters)
605
+ "--set", "tolerations[0].key=node-role.kubernetes.io/control-plane",
606
+ "--set", "tolerations[0].operator=Exists",
607
+ "--set", "tolerations[0].effect=NoSchedule",
608
+ "--set", "tolerations[1].key=node-role.kubernetes.io/master",
609
+ "--set", "tolerations[1].operator=Exists",
610
+ "--set", "tolerations[1].effect=NoSchedule",
611
+ "--set", "webhook.tolerations[0].key=node-role.kubernetes.io/control-plane",
612
+ "--set", "webhook.tolerations[0].operator=Exists",
613
+ "--set", "webhook.tolerations[0].effect=NoSchedule",
614
+ "--set", "webhook.tolerations[1].key=node-role.kubernetes.io/master",
615
+ "--set", "webhook.tolerations[1].operator=Exists",
616
+ "--set", "webhook.tolerations[1].effect=NoSchedule",
617
+ "--set", "cainjector.tolerations[0].key=node-role.kubernetes.io/control-plane",
618
+ "--set", "cainjector.tolerations[0].operator=Exists",
619
+ "--set", "cainjector.tolerations[0].effect=NoSchedule",
620
+ "--set", "cainjector.tolerations[1].key=node-role.kubernetes.io/master",
621
+ "--set", "cainjector.tolerations[1].operator=Exists",
622
+ "--set", "cainjector.tolerations[1].effect=NoSchedule",
623
+ "--set", "startupapicheck.tolerations[0].key=node-role.kubernetes.io/control-plane",
624
+ "--set", "startupapicheck.tolerations[0].operator=Exists",
625
+ "--set", "startupapicheck.tolerations[0].effect=NoSchedule",
626
+ "--set", "startupapicheck.tolerations[1].key=node-role.kubernetes.io/master",
627
+ "--set", "startupapicheck.tolerations[1].operator=Exists",
628
+ "--set", "startupapicheck.tolerations[1].effect=NoSchedule",
604
629
  "--wait",
605
630
  "--timeout", "15m",
606
631
  "--debug", # Mais logs
@@ -1234,6 +1259,55 @@ def _diagnose_problems(ctx: ExecutionContext) -> None:
1234
1259
  typer.secho("\n Nenhum problema óbvio detectado", fg=typer.colors.GREEN)
1235
1260
 
1236
1261
 
1262
+ def _check_existing_cert_manager() -> bool:
1263
+ """Verifica se existe instalacao do cert-manager."""
1264
+ try:
1265
+ result = subprocess.run(
1266
+ ["helm", "status", "cert-manager", "-n", NAMESPACE],
1267
+ capture_output=True,
1268
+ text=True,
1269
+ timeout=15,
1270
+ env=_helm_env(),
1271
+ )
1272
+ return result.returncode == 0
1273
+ except Exception:
1274
+ return False
1275
+
1276
+
1277
+ def _uninstall_cert_manager(ctx: ExecutionContext) -> None:
1278
+ """Remove instalacao anterior do cert-manager."""
1279
+ typer.echo("Removendo instalacao anterior do cert-manager...")
1280
+
1281
+ run_cmd(
1282
+ ["helm", "uninstall", "cert-manager", "-n", NAMESPACE],
1283
+ ctx,
1284
+ check=False,
1285
+ )
1286
+
1287
+ # Remove CRDs
1288
+ run_cmd(
1289
+ ["kubectl", "delete", "crd",
1290
+ "certificaterequests.cert-manager.io",
1291
+ "certificates.cert-manager.io",
1292
+ "challenges.acme.cert-manager.io",
1293
+ "clusterissuers.cert-manager.io",
1294
+ "issuers.cert-manager.io",
1295
+ "orders.acme.cert-manager.io",
1296
+ "--ignore-not-found"],
1297
+ ctx,
1298
+ check=False,
1299
+ )
1300
+
1301
+ # Remove namespace
1302
+ run_cmd(
1303
+ ["kubectl", "delete", "namespace", NAMESPACE, "--ignore-not-found"],
1304
+ ctx,
1305
+ check=False,
1306
+ )
1307
+
1308
+ time.sleep(5)
1309
+
1310
+
1237
1311
  # =============================================================================
1238
1312
  # Entry Points
1239
1313
  # =============================================================================
@@ -1253,6 +1327,15 @@ def run(ctx: ExecutionContext) -> None:
1253
1327
  ctx.errors.append("cert-manager: cluster não acessível")
1254
1328
  raise typer.Exit(code=1)
1255
1329
 
1330
+ # Prompt opcional de limpeza
1331
+ if _check_existing_cert_manager():
1332
+ cleanup = typer.confirm(
1333
+ "Instalacao anterior do cert-manager detectada. Limpar antes de reinstalar?",
1334
+ default=False,
1335
+ )
1336
+ if cleanup:
1337
+ _uninstall_cert_manager(ctx)
1338
+
1256
1339
  # Mostra status atual
1257
1340
  status = _get_cert_manager_status(ctx)
1258
1341
  _print_status(status)
@@ -0,0 +1,205 @@
1
+ """Configuracao do Grafana via Helm com datasource e dashboards provisionados."""
2
+
3
+ import socket
4
+ import time
5
+ from pathlib import Path
6
+
7
+ import typer
8
+
9
+ from raijin_server.utils import ExecutionContext, helm_upgrade_install, require_root, run_cmd, write_file
10
+
11
+
12
+ def _detect_node_name(ctx: ExecutionContext) -> str:
13
+ """Detecta nome do node para nodeSelector."""
14
+ result = run_cmd(
15
+ ["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
16
+ ctx,
17
+ check=False,
18
+ )
19
+ if result.returncode == 0 and (result.stdout or "").strip():
20
+ return (result.stdout or "").strip()
21
+ return socket.gethostname()
22
+
23
+
24
+ def _check_existing_grafana(ctx: ExecutionContext) -> bool:
25
+ """Verifica se existe instalacao do Grafana."""
26
+ result = run_cmd(
27
+ ["helm", "status", "grafana", "-n", "observability"],
28
+ ctx,
29
+ check=False,
30
+ )
31
+ return result.returncode == 0
32
+
33
+
34
+ def _uninstall_grafana(ctx: ExecutionContext) -> None:
35
+ """Remove instalacao anterior do Grafana."""
36
+ typer.echo("Removendo instalacao anterior do Grafana...")
37
+
38
+ run_cmd(
39
+ ["helm", "uninstall", "grafana", "-n", "observability"],
40
+ ctx,
41
+ check=False,
42
+ )
43
+
44
+ remove_data = typer.confirm("Remover PVCs (dados persistentes)?", default=False)
45
+ if remove_data:
46
+ run_cmd(
47
+ ["kubectl", "-n", "observability", "delete", "pvc", "-l", "app.kubernetes.io/name=grafana"],
48
+ ctx,
49
+ check=False,
50
+ )
51
+
52
+ time.sleep(5)
53
+
54
+
55
+ def _wait_for_grafana_ready(ctx: ExecutionContext, timeout: int = 180) -> bool:
56
+ """Aguarda pods do Grafana ficarem Ready."""
57
+ typer.echo("Aguardando pods do Grafana ficarem Ready...")
58
+ deadline = time.time() + timeout
59
+
60
+ while time.time() < deadline:
61
+ result = run_cmd(
62
+ [
63
+ "kubectl", "-n", "observability", "get", "pods",
64
+ "-l", "app.kubernetes.io/name=grafana",
65
+ "-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
66
+ ],
67
+ ctx,
68
+ check=False,
69
+ )
70
+
71
+ if result.returncode == 0:
72
+ output = (result.stdout or "").strip()
73
+ if output:
74
+ pods = []
75
+ for item in output.split():
76
+ if "=" in item:
77
+ parts = item.rsplit("=", 1)
78
+ if len(parts) == 2:
79
+ pods.append((parts[0], parts[1]))
80
+
81
+ if pods and all(phase == "Running" for _, phase in pods):
82
+ typer.secho(" Grafana Ready.", fg=typer.colors.GREEN)
83
+ return True
84
+
85
+ time.sleep(10)
86
+
87
+ typer.secho(" Timeout aguardando Grafana.", fg=typer.colors.YELLOW)
88
+ return False
89
+
90
+
91
+ def run(ctx: ExecutionContext) -> None:
92
+ require_root(ctx)
93
+ typer.echo("Instalando Grafana via Helm...")
94
+
95
+ # Prompt opcional de limpeza
96
+ if _check_existing_grafana(ctx):
97
+ cleanup = typer.confirm(
98
+ "Instalacao anterior do Grafana detectada. Limpar antes de reinstalar?",
99
+ default=False,
100
+ )
101
+ if cleanup:
102
+ _uninstall_grafana(ctx)
103
+
104
+ admin_password = typer.prompt("Senha admin do Grafana", default="admin")
105
+ ingress_host = typer.prompt("Host para acessar o Grafana", default="grafana.local")
106
+ ingress_class = typer.prompt("IngressClass", default="traefik")
107
+ tls_secret = typer.prompt("Secret TLS (cert-manager)", default="grafana-tls")
108
+ persistence_size = typer.prompt("Tamanho do storage", default="10Gi")
109
+
110
+ node_name = _detect_node_name(ctx)
111
+
112
+ values_yaml = f"""adminPassword: {admin_password}
113
+ service:
114
+ type: ClusterIP
115
+ ingress:
116
+ enabled: true
117
+ ingressClassName: {ingress_class}
118
+ hosts:
119
+ - {ingress_host}
120
+ tls:
121
+ - secretName: {tls_secret}
122
+ hosts:
123
+ - {ingress_host}
124
+ persistence:
125
+ enabled: true
126
+ size: {persistence_size}
127
+ tolerations:
128
+ - key: node-role.kubernetes.io/control-plane
129
+ operator: Exists
130
+ effect: NoSchedule
131
+ - key: node-role.kubernetes.io/master
132
+ operator: Exists
133
+ effect: NoSchedule
134
+ nodeSelector:
135
+ kubernetes.io/hostname: {node_name}
136
+ resources:
137
+ requests:
138
+ memory: 256Mi
139
+ cpu: 100m
140
+ limits:
141
+ memory: 512Mi
142
+ datasources:
143
+ datasources.yaml:
144
+ apiVersion: 1
145
+ datasources:
146
+ - name: Prometheus
147
+ type: prometheus
148
+ access: proxy
149
+ url: http://kube-prometheus-stack-prometheus.observability.svc:9090
150
+ isDefault: true
151
+ jsonData:
152
+ timeInterval: 30s
153
+ - name: Loki
154
+ type: loki
155
+ access: proxy
156
+ url: http://loki.observability.svc:3100
157
+ dashboardProviders:
158
+ dashboardproviders.yaml:
159
+ apiVersion: 1
160
+ providers:
161
+ - name: 'default'
162
+ orgId: 1
163
+ folder: ''
164
+ type: file
165
+ disableDeletion: false
166
+ editable: true
167
+ options:
168
+ path: /var/lib/grafana/dashboards/default
169
+ dashboards:
170
+ default:
171
+ kubernetes:
172
+ gnetId: 6417
173
+ revision: 1
174
+ datasource: Prometheus
175
+ node-exporter:
176
+ gnetId: 1860
177
+ revision: 27
178
+ datasource: Prometheus
179
+ """
180
+
181
+ values_path = Path("/tmp/raijin-grafana-values.yaml")
182
+ write_file(values_path, values_yaml, ctx)
183
+
184
+ run_cmd(["kubectl", "create", "namespace", "observability"], ctx, check=False)
185
+
186
+ helm_upgrade_install(
187
+ release="grafana",
188
+ chart="grafana",
189
+ namespace="observability",
190
+ repo="grafana",
191
+ repo_url="https://grafana.github.io/helm-charts",
192
+ ctx=ctx,
193
+ values=[],
194
+ extra_args=["-f", str(values_path)],
195
+ )
196
+
197
+ if not ctx.dry_run:
198
+ _wait_for_grafana_ready(ctx)
199
+
200
+ typer.secho("\n✓ Grafana instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
201
+ typer.echo(f"\nAcesse: https://{ingress_host}")
202
+ typer.echo("Usuario: admin")
203
+ typer.echo(f"Senha: {admin_password}")
204
+ typer.echo("\nPara port-forward local:")
205
+ typer.echo(" kubectl -n observability port-forward svc/grafana 3000:80")
@@ -0,0 +1,162 @@
1
+ """Instalacao do Harness Delegate via Helm (production-ready)."""
2
+
3
+ import socket
4
+ import time
5
+ from pathlib import Path
6
+
7
+ import typer
8
+
9
+ from raijin_server.utils import ExecutionContext, ensure_tool, require_root, run_cmd, write_file
10
+
11
+
12
+ def _detect_node_name(ctx: ExecutionContext) -> str:
13
+ """Detecta nome do node para nodeSelector."""
14
+ result = run_cmd(
15
+ ["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
16
+ ctx,
17
+ check=False,
18
+ )
19
+ if result.returncode == 0 and (result.stdout or "").strip():
20
+ return (result.stdout or "").strip()
21
+ return socket.gethostname()
22
+
23
+
24
+ def _check_existing_delegate(ctx: ExecutionContext, namespace: str, delegate_name: str) -> bool:
25
+ """Verifica se existe instalacao do Harness Delegate."""
26
+ result = run_cmd(
27
+ ["helm", "status", delegate_name, "-n", namespace],
28
+ ctx,
29
+ check=False,
30
+ )
31
+ return result.returncode == 0
32
+
33
+
34
+ def _uninstall_delegate(ctx: ExecutionContext, namespace: str, delegate_name: str) -> None:
35
+ """Remove instalacao anterior do Harness Delegate."""
36
+ typer.echo("Removendo instalacao anterior do Harness Delegate...")
37
+
38
+ run_cmd(
39
+ ["helm", "uninstall", delegate_name, "-n", namespace],
40
+ ctx,
41
+ check=False,
42
+ )
43
+
44
+ time.sleep(5)
45
+
46
+
47
+ def _wait_for_delegate_ready(ctx: ExecutionContext, namespace: str, delegate_name: str, timeout: int = 180) -> bool:
48
+ """Aguarda pods do Harness Delegate ficarem Ready."""
49
+ typer.echo("Aguardando pods do Harness Delegate ficarem Ready...")
50
+ deadline = time.time() + timeout
51
+
52
+ while time.time() < deadline:
53
+ result = run_cmd(
54
+ [
55
+ "kubectl", "-n", namespace, "get", "pods",
56
+ "-l", f"app.kubernetes.io/name={delegate_name}",
57
+ "-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
58
+ ],
59
+ ctx,
60
+ check=False,
61
+ )
62
+
63
+ if result.returncode == 0:
64
+ output = (result.stdout or "").strip()
65
+ if output:
66
+ pods = []
67
+ for item in output.split():
68
+ if "=" in item:
69
+ parts = item.rsplit("=", 1)
70
+ if len(parts) == 2:
71
+ pods.append((parts[0], parts[1]))
72
+
73
+ if pods and all(phase == "Running" for _, phase in pods):
74
+ typer.secho(" Harness Delegate Ready.", fg=typer.colors.GREEN)
75
+ return True
76
+
77
+ time.sleep(10)
78
+
79
+ typer.secho(" Timeout aguardando Harness Delegate.", fg=typer.colors.YELLOW)
80
+ return False
81
+
82
+
83
+ def run(ctx: ExecutionContext) -> None:
84
+ require_root(ctx)
85
+ ensure_tool("helm", ctx, install_hint="Instale helm para implantar o delegate.")
86
+
87
+ typer.echo("Instalando Harness Delegate via Helm...")
88
+ account_id = typer.prompt("Harness accountId")
89
+ org_id = typer.prompt("Org ID", default="default")
90
+ project_id = typer.prompt("Project ID", default="default")
91
+ delegate_name = typer.prompt("Delegate name", default="raijin-delegate")
92
+ namespace = typer.prompt("Namespace", default="harness-delegate")
93
+ delegate_token = typer.prompt("Delegate token", hide_input=True)
94
+ replicas = typer.prompt("Numero de replicas", default="1")
95
+
96
+ # Prompt opcional de limpeza
97
+ if _check_existing_delegate(ctx, namespace, delegate_name):
98
+ cleanup = typer.confirm(
99
+ "Instalacao anterior do Harness Delegate detectada. Limpar antes de reinstalar?",
100
+ default=False,
101
+ )
102
+ if cleanup:
103
+ _uninstall_delegate(ctx, namespace, delegate_name)
104
+
105
+ node_name = _detect_node_name(ctx)
106
+
107
+ run_cmd(
108
+ ["helm", "repo", "add", "harness", "https://app.harness.io/storage/harness-download/delegate-helm-chart/"],
109
+ ctx,
110
+ )
111
+ run_cmd(["helm", "repo", "update"], ctx)
112
+
113
+ # Create values file with tolerations
114
+ values_yaml = f"""delegateName: {delegate_name}
115
+ accountId: {account_id}
116
+ delegateToken: {delegate_token}
117
+ orgId: {org_id}
118
+ projectId: {project_id}
119
+ replicaCount: {replicas}
120
+ tolerations:
121
+ - key: node-role.kubernetes.io/control-plane
122
+ operator: Exists
123
+ effect: NoSchedule
124
+ - key: node-role.kubernetes.io/master
125
+ operator: Exists
126
+ effect: NoSchedule
127
+ nodeSelector:
128
+ kubernetes.io/hostname: {node_name}
129
+ resources:
130
+ requests:
131
+ memory: 256Mi
132
+ cpu: 100m
133
+ limits:
134
+ memory: 1Gi
135
+ """
136
+
137
+ values_path = Path("/tmp/raijin-harness-values.yaml")
138
+ write_file(values_path, values_yaml, ctx)
139
+
140
+ cmd = [
141
+ "helm",
142
+ "upgrade",
143
+ "--install",
144
+ delegate_name,
145
+ "harness/harness-delegate-ng",
146
+ "-n",
147
+ namespace,
148
+ "--create-namespace",
149
+ "-f",
150
+ str(values_path),
151
+ ]
152
+
153
+ run_cmd(cmd, ctx, mask_output=True, display_override="helm upgrade --install <delegate> harness/harness-delegate-ng ...")
154
+
155
+ if not ctx.dry_run:
156
+ _wait_for_delegate_ready(ctx, namespace, delegate_name)
157
+
158
+ typer.secho("\n✓ Harness Delegate instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
159
+ typer.echo(f"\nO delegate '{delegate_name}' deve aparecer no Harness em alguns minutos.")
160
+ typer.echo("\nPara verificar status:")
161
+ typer.echo(f" kubectl -n {namespace} get pods")
162
+ typer.echo(f" kubectl -n {namespace} logs -l app.kubernetes.io/name={delegate_name}")
@@ -0,0 +1,157 @@
1
+ """Instalacao do Istio usando istioctl com configuracoes production-ready."""
2
+
3
+ import socket
4
+ import time
5
+
6
+ import typer
7
+
8
+ from raijin_server.utils import ExecutionContext, ensure_tool, require_root, run_cmd
9
+
10
+
11
+ ISTIO_PROFILES = ["default", "demo", "minimal", "ambient", "empty"]
12
+
13
+
14
+ def _detect_node_name(ctx: ExecutionContext) -> str:
15
+ """Detecta nome do node para nodeSelector."""
16
+ result = run_cmd(
17
+ ["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].metadata.name}"],
18
+ ctx,
19
+ check=False,
20
+ )
21
+ if result.returncode == 0 and (result.stdout or "").strip():
22
+ return (result.stdout or "").strip()
23
+ return socket.gethostname()
24
+
25
+
26
+ def _check_existing_istio(ctx: ExecutionContext) -> bool:
27
+ """Verifica se existe instalacao do Istio."""
28
+ result = run_cmd(
29
+ ["kubectl", "get", "namespace", "istio-system"],
30
+ ctx,
31
+ check=False,
32
+ )
33
+ return result.returncode == 0
34
+
35
+
36
+ def _uninstall_istio(ctx: ExecutionContext) -> None:
37
+ """Remove instalacao anterior do Istio."""
38
+ typer.echo("Removendo instalacao anterior do Istio...")
39
+
40
+ run_cmd(
41
+ ["istioctl", "uninstall", "--purge", "-y"],
42
+ ctx,
43
+ check=False,
44
+ )
45
+
46
+ run_cmd(
47
+ ["kubectl", "delete", "namespace", "istio-system", "--ignore-not-found"],
48
+ ctx,
49
+ check=False,
50
+ )
51
+
52
+ time.sleep(5)
53
+
54
+
55
+ def _wait_for_istio_ready(ctx: ExecutionContext, timeout: int = 300) -> bool:
56
+ """Aguarda pods do Istio ficarem Ready."""
57
+ typer.echo("Aguardando pods do Istio ficarem Ready...")
58
+ deadline = time.time() + timeout
59
+
60
+ while time.time() < deadline:
61
+ result = run_cmd(
62
+ [
63
+ "kubectl", "-n", "istio-system", "get", "pods",
64
+ "-o", "jsonpath={range .items[*]}{.metadata.name}={.status.phase} {end}",
65
+ ],
66
+ ctx,
67
+ check=False,
68
+ )
69
+
70
+ if result.returncode == 0:
71
+ output = (result.stdout or "").strip()
72
+ if output:
73
+ pods = []
74
+ for item in output.split():
75
+ if "=" in item:
76
+ parts = item.rsplit("=", 1)
77
+ if len(parts) == 2:
78
+ pods.append((parts[0], parts[1]))
79
+
80
+ if pods and all(phase in ("Running", "Succeeded") for _, phase in pods):
81
+ typer.secho(f" Todos os {len(pods)} pods Ready.", fg=typer.colors.GREEN)
82
+ return True
83
+
84
+ pending = [name for name, phase in pods if phase not in ("Running", "Succeeded")]
85
+ if pending:
86
+ typer.echo(f" Aguardando: {', '.join(pending[:3])}...")
87
+
88
+ time.sleep(10)
89
+
90
+ typer.secho(" Timeout aguardando pods do Istio.", fg=typer.colors.YELLOW)
91
+ return False
92
+
93
+
94
+ def run(ctx: ExecutionContext) -> None:
95
+ require_root(ctx)
96
+ ensure_tool("istioctl", ctx, install_hint="Baixe em https://istio.io/latest/docs/setup/getting-started/")
97
+ typer.echo("Instalando Istio...")
98
+
99
+ # Prompt opcional de limpeza
100
+ if _check_existing_istio(ctx):
101
+ cleanup = typer.confirm(
102
+ "Instalacao anterior do Istio detectada. Limpar antes de reinstalar?",
103
+ default=False,
104
+ )
105
+ if cleanup:
106
+ _uninstall_istio(ctx)
107
+
108
+ # Selecao de perfil
109
+ typer.echo(f"\nPerfis disponiveis: {', '.join(ISTIO_PROFILES)}")
110
+ profile = typer.prompt("Perfil do Istio", default="default")
111
+ if profile not in ISTIO_PROFILES:
112
+ typer.secho(f"Perfil '{profile}' invalido. Usando 'default'.", fg=typer.colors.YELLOW)
113
+ profile = "default"
114
+
115
+ node_name = _detect_node_name(ctx)
116
+
117
+ # Instala com tolerations para control-plane
118
+ install_cmd = [
119
+ "istioctl", "install",
120
+ "--set", f"profile={profile}",
121
+ # Tolerations para istiod (control plane)
122
+ "--set", "components.pilot.k8s.tolerations[0].key=node-role.kubernetes.io/control-plane",
123
+ "--set", "components.pilot.k8s.tolerations[0].operator=Exists",
124
+ "--set", "components.pilot.k8s.tolerations[0].effect=NoSchedule",
125
+ "--set", "components.pilot.k8s.tolerations[1].key=node-role.kubernetes.io/master",
126
+ "--set", "components.pilot.k8s.tolerations[1].operator=Exists",
127
+ "--set", "components.pilot.k8s.tolerations[1].effect=NoSchedule",
128
+ # Tolerations para ingress gateway
129
+ "--set", "components.ingressGateways[0].k8s.tolerations[0].key=node-role.kubernetes.io/control-plane",
130
+ "--set", "components.ingressGateways[0].k8s.tolerations[0].operator=Exists",
131
+ "--set", "components.ingressGateways[0].k8s.tolerations[0].effect=NoSchedule",
132
+ "--set", "components.ingressGateways[0].k8s.tolerations[1].key=node-role.kubernetes.io/master",
133
+ "--set", "components.ingressGateways[0].k8s.tolerations[1].operator=Exists",
134
+ "--set", "components.ingressGateways[0].k8s.tolerations[1].effect=NoSchedule",
135
+ # NodeSelector
136
+ "--set", f"components.pilot.k8s.nodeSelector.kubernetes\\.io/hostname={node_name}",
137
+ "-y",
138
+ ]
139
+
140
+ run_cmd(install_cmd, ctx)
141
+
142
+ # Aguarda pods ficarem prontos
143
+ if not ctx.dry_run:
144
+ _wait_for_istio_ready(ctx)
145
+
146
+ # Pergunta sobre injection
147
+ enable_injection = typer.confirm(
148
+ "Habilitar sidecar injection automatico no namespace 'default'?",
149
+ default=True,
150
+ )
151
+ if enable_injection:
152
+ run_cmd(
153
+ ["kubectl", "label", "namespace", "default", "istio-injection=enabled", "--overwrite"],
154
+ ctx,
155
+ )
156
+
157
+ typer.secho("\n✓ Istio instalado com sucesso.", fg=typer.colors.GREEN, bold=True)