raijin-server 0.3.7__py3-none-any.whl → 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
raijin_server/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
1
  """Pacote principal do CLI Raijin Server."""
2
2
 
3
- __version__ = "0.3.7"
3
+ __version__ = "0.3.9"
4
4
 
5
5
  __all__ = ["__version__"]
raijin_server/cli.py CHANGED
@@ -47,7 +47,7 @@ from raijin_server.modules import (
47
47
  )
48
48
  from raijin_server.utils import ExecutionContext, logger, active_log_file, available_log_files, page_text, ensure_tool
49
49
  from raijin_server.validators import validate_system_requirements, check_module_dependencies, MODULE_DEPENDENCIES
50
- from raijin_server.healthchecks import run_health_check
50
+ from raijin_server.healthchecks import run_health_check, validate_module_status, get_all_module_statuses
51
51
  from raijin_server.config import ConfigManager
52
52
  from raijin_server import module_manager
53
53
 
@@ -212,7 +212,7 @@ def _run_module(ctx: typer.Context, name: str, skip_validation: bool = False) ->
212
212
 
213
213
  def _print_banner() -> None:
214
214
  console.print(Panel.fit(BANNER, style="bold blue"))
215
- console.print("[bright_white]Automacao de setup e hardening para Ubuntu Server[/bright_white]\n")
215
+ console.print(f"[bright_white]Automacao de setup e hardening para Ubuntu Server[/bright_white] [dim]v{__version__}[/dim]\n")
216
216
 
217
217
 
218
218
  def _select_state_dir() -> Path:
@@ -337,7 +337,7 @@ def _rollback_module(
337
337
  typer.secho(f"Rollback finalizado (best-effort) para {name}\n", fg=typer.colors.GREEN)
338
338
 
339
339
 
340
- def _render_menu(dry_run: bool) -> int:
340
+ def _render_menu(dry_run: bool, live_status: bool = True) -> int:
341
341
  table = Table(
342
342
  title="Selecione um modulo para executar",
343
343
  header_style="bold white",
@@ -348,9 +348,29 @@ def _render_menu(dry_run: bool) -> int:
348
348
  table.add_column("Status", style="green", no_wrap=True)
349
349
  table.add_column("Modulo", style="bold green")
350
350
  table.add_column("Descricao", style="white")
351
+
352
+ # Obtém status em tempo real se solicitado
353
+ if live_status:
354
+ console.print("[dim]Validando status dos módulos...[/dim]")
355
+ statuses = get_all_module_statuses()
356
+ else:
357
+ statuses = {}
358
+
351
359
  for idx, name in enumerate(MODULES.keys(), start=1):
352
360
  desc = MODULE_DESCRIPTIONS.get(name, "")
353
- status = "[green]✔[/green]" if _is_completed(name) else "[dim]-[/dim]"
361
+
362
+ if live_status:
363
+ status_val = statuses.get(name, "not_installed")
364
+ if status_val == "ok":
365
+ status = "[green]✔[/green]"
366
+ elif status_val == "error":
367
+ status = "[red]✗[/red]"
368
+ else:
369
+ status = "[dim]-[/dim]"
370
+ else:
371
+ # Fallback para arquivo .done
372
+ status = "[green]✔[/green]" if _is_completed(name) else "[dim]-[/dim]"
373
+
354
374
  table.add_row(f"{idx}", status, name, desc)
355
375
 
356
376
  exit_idx = len(MODULES) + 1
@@ -375,10 +375,372 @@ HEALTH_CHECKS = {
375
375
  "kafka": lambda ctx: verify_helm_chart("kafka", "kafka", ctx),
376
376
  "cert_manager": verify_cert_manager,
377
377
  "secrets": verify_secrets,
378
-
379
378
  }
380
379
 
381
380
 
381
+ # =============================================================================
382
+ # STATUS VALIDATION - Validação em tempo real para o menu interativo
383
+ # =============================================================================
384
+
385
+ def _quick_cmd(cmd: list[str], timeout: int = 5) -> tuple[bool, str]:
386
+ """Executa comando rápido e retorna (sucesso, output)."""
387
+ try:
388
+ result = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout)
389
+ return result.returncode == 0, result.stdout.strip()
390
+ except Exception as e:
391
+ return False, str(e)
392
+
393
+
394
+ def _check_namespace_exists(ns: str) -> bool:
395
+ """Verifica se namespace existe."""
396
+ ok, _ = _quick_cmd(["kubectl", "get", "ns", ns])
397
+ return ok
398
+
399
+
400
+ def _check_pods_running(ns: str) -> tuple[bool, bool]:
401
+ """Retorna (existe, todos_running)."""
402
+ if not _check_namespace_exists(ns):
403
+ return False, False
404
+ ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", ns, "-o", "jsonpath={.items[*].status.phase}"])
405
+ if not ok or not out:
406
+ return True, False # ns existe mas sem pods ou erro
407
+ phases = out.split()
408
+ all_ok = all(p in ("Running", "Succeeded") for p in phases)
409
+ return True, all_ok
410
+
411
+
412
+ def _check_helm_deployed(release: str, ns: str) -> tuple[bool, bool]:
413
+ """Retorna (existe, deployed)."""
414
+ ok, out = _quick_cmd(["helm", "status", release, "-n", ns, "--output", "json"], timeout=10)
415
+ if not ok:
416
+ return False, False
417
+ try:
418
+ import json
419
+ data = json.loads(out)
420
+ status = data.get("info", {}).get("status", "")
421
+ return True, status == "deployed"
422
+ except Exception:
423
+ return False, False
424
+
425
+
426
+ def _check_systemd_active(service: str) -> bool:
427
+ """Verifica se serviço systemd está ativo."""
428
+ ok, out = _quick_cmd(["systemctl", "is-active", service])
429
+ return ok and out == "active"
430
+
431
+
432
+ def _check_crd_exists(crd: str) -> bool:
433
+ """Verifica se CRD existe."""
434
+ ok, _ = _quick_cmd(["kubectl", "get", "crd", crd])
435
+ return ok
436
+
437
+
438
+ def _check_cluster_secret_store() -> tuple[bool, bool]:
439
+ """Verifica ClusterSecretStore. Retorna (existe, ready)."""
440
+ ok, out = _quick_cmd(["kubectl", "get", "clustersecretstore", "-o", "jsonpath={.items[*].status.conditions[?(@.type=='Ready')].status}"])
441
+ if not ok:
442
+ return False, False
443
+ return True, "True" in out
444
+
445
+
446
+ # Status: "ok" = ✓, "error" = ✗, "not_installed" = -
447
+ ModuleStatus = str # "ok" | "error" | "not_installed"
448
+
449
+
450
+ def validate_module_status(module: str) -> ModuleStatus:
451
+ """Valida status de um módulo em tempo real."""
452
+ validators = {
453
+ "sanitize": _validate_sanitize,
454
+ "bootstrap": _validate_bootstrap,
455
+ "ssh_hardening": _validate_ssh_hardening,
456
+ "hardening": _validate_hardening,
457
+ "network": _validate_network,
458
+ "essentials": _validate_essentials,
459
+ "firewall": _validate_firewall,
460
+ "vpn": _validate_vpn,
461
+ "vpn_client": _validate_vpn_client,
462
+ "internal_dns": _validate_internal_dns,
463
+ "kubernetes": _validate_kubernetes,
464
+ "calico": _validate_calico,
465
+ "metallb": _validate_metallb,
466
+ "traefik": _validate_traefik,
467
+ "cert_manager": _validate_cert_manager,
468
+ "istio": _validate_istio,
469
+ "kong": _validate_kong,
470
+ "minio": _validate_minio,
471
+ "prometheus": _validate_prometheus,
472
+ "grafana": _validate_grafana,
473
+ "secrets": _validate_secrets,
474
+ "loki": _validate_loki,
475
+ "harbor": _validate_harbor,
476
+ "harness": _validate_harness,
477
+ "velero": _validate_velero,
478
+ "kafka": _validate_kafka,
479
+ "full_install": _validate_full_install,
480
+ }
481
+
482
+ validator = validators.get(module)
483
+ if validator:
484
+ try:
485
+ return validator()
486
+ except Exception:
487
+ return "error"
488
+ return "not_installed"
489
+
490
+
491
+ def _validate_sanitize() -> ModuleStatus:
492
+ # Sanitize é idempotente, consideramos OK se bootstrap/k8s estiver funcionando
493
+ return "ok"
494
+
495
+
496
+ def _validate_bootstrap() -> ModuleStatus:
497
+ # Verifica se ferramentas estão instaladas
498
+ tools = ["helm", "kubectl", "containerd"]
499
+ for tool in tools:
500
+ ok, _ = _quick_cmd(["which", tool])
501
+ if not ok:
502
+ return "not_installed"
503
+ return "ok"
504
+
505
+
506
+ def _validate_ssh_hardening() -> ModuleStatus:
507
+ # Verifica se SSH está rodando
508
+ if _check_systemd_active("ssh") or _check_systemd_active("sshd"):
509
+ return "ok"
510
+ return "not_installed"
511
+
512
+
513
+ def _validate_hardening() -> ModuleStatus:
514
+ if _check_systemd_active("fail2ban"):
515
+ return "ok"
516
+ return "not_installed"
517
+
518
+
519
+ def _validate_network() -> ModuleStatus:
520
+ # Verifica se hostname está configurado
521
+ ok, hostname = _quick_cmd(["hostname"])
522
+ if ok and hostname:
523
+ return "ok"
524
+ return "not_installed"
525
+
526
+
527
+ def _validate_essentials() -> ModuleStatus:
528
+ # Verifica NTP
529
+ ok, out = _quick_cmd(["timedatectl", "show", "-p", "NTP", "--value"])
530
+ if ok and out == "yes":
531
+ return "ok"
532
+ return "not_installed"
533
+
534
+
535
+ def _validate_firewall() -> ModuleStatus:
536
+ if _check_systemd_active("ufw"):
537
+ return "ok"
538
+ return "not_installed"
539
+
540
+
541
+ def _validate_vpn() -> ModuleStatus:
542
+ if _check_systemd_active("wg-quick@wg0"):
543
+ return "ok"
544
+ return "not_installed"
545
+
546
+
547
+ def _validate_vpn_client() -> ModuleStatus:
548
+ # VPN client é gerenciado pelo VPN module
549
+ if _check_systemd_active("wg-quick@wg0"):
550
+ return "ok"
551
+ return "not_installed"
552
+
553
+
554
+ def _validate_internal_dns() -> ModuleStatus:
555
+ # Verifica se CoreDNS custom config existe
556
+ ok, _ = _quick_cmd(["kubectl", "get", "configmap", "coredns-custom", "-n", "kube-system"])
557
+ if ok:
558
+ return "ok"
559
+ return "not_installed"
560
+
561
+
562
+ def _validate_kubernetes() -> ModuleStatus:
563
+ if not _check_systemd_active("kubelet"):
564
+ return "not_installed"
565
+ if not _check_systemd_active("containerd"):
566
+ return "error"
567
+ # Verifica se node está ready
568
+ ok, out = _quick_cmd(["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].status.conditions[?(@.type=='Ready')].status}"])
569
+ if ok and "True" in out:
570
+ return "ok"
571
+ return "error"
572
+
573
+
574
+ def _validate_calico() -> ModuleStatus:
575
+ exists, running = _check_pods_running("kube-system")
576
+ if not exists:
577
+ return "not_installed"
578
+ # Verifica se calico-node está rodando
579
+ ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", "kube-system", "-l", "k8s-app=calico-node", "-o", "jsonpath={.items[*].status.phase}"])
580
+ if ok and out and "Running" in out:
581
+ return "ok"
582
+ return "not_installed"
583
+
584
+
585
+ def _validate_metallb() -> ModuleStatus:
586
+ exists, running = _check_pods_running("metallb-system")
587
+ if not exists:
588
+ return "not_installed"
589
+ if running:
590
+ return "ok"
591
+ return "error"
592
+
593
+
594
+ def _validate_traefik() -> ModuleStatus:
595
+ exists, deployed = _check_helm_deployed("traefik", "traefik")
596
+ if not exists:
597
+ return "not_installed"
598
+ _, running = _check_pods_running("traefik")
599
+ if deployed and running:
600
+ return "ok"
601
+ return "error"
602
+
603
+
604
+ def _validate_cert_manager() -> ModuleStatus:
605
+ exists, deployed = _check_helm_deployed("cert-manager", "cert-manager")
606
+ if not exists:
607
+ return "not_installed"
608
+ _, running = _check_pods_running("cert-manager")
609
+ if deployed and running:
610
+ return "ok"
611
+ return "error"
612
+
613
+
614
+ def _validate_istio() -> ModuleStatus:
615
+ exists, running = _check_pods_running("istio-system")
616
+ if not exists:
617
+ return "not_installed"
618
+ if running:
619
+ return "ok"
620
+ return "error"
621
+
622
+
623
+ def _validate_kong() -> ModuleStatus:
624
+ exists, deployed = _check_helm_deployed("kong", "kong")
625
+ if not exists:
626
+ return "not_installed"
627
+ _, running = _check_pods_running("kong")
628
+ if deployed and running:
629
+ return "ok"
630
+ return "error"
631
+
632
+
633
+ def _validate_minio() -> ModuleStatus:
634
+ exists, deployed = _check_helm_deployed("minio", "minio")
635
+ if not exists:
636
+ return "not_installed"
637
+ _, running = _check_pods_running("minio")
638
+ if deployed and running:
639
+ return "ok"
640
+ return "error"
641
+
642
+
643
+ def _validate_prometheus() -> ModuleStatus:
644
+ exists, deployed = _check_helm_deployed("kube-prometheus-stack", "observability")
645
+ if not exists:
646
+ return "not_installed"
647
+ ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", "observability", "-l", "app.kubernetes.io/name=prometheus", "-o", "jsonpath={.items[*].status.phase}"])
648
+ if ok and out and "Running" in out:
649
+ return "ok"
650
+ if exists:
651
+ return "error"
652
+ return "not_installed"
653
+
654
+
655
+ def _validate_grafana() -> ModuleStatus:
656
+ ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", "observability", "-l", "app.kubernetes.io/name=grafana", "-o", "jsonpath={.items[*].status.phase}"])
657
+ if ok and out and "Running" in out:
658
+ return "ok"
659
+ return "not_installed"
660
+
661
+
662
+ def _validate_secrets() -> ModuleStatus:
663
+ # Verifica Vault
664
+ exists_vault, running_vault = _check_pods_running("vault")
665
+ # Verifica External Secrets
666
+ exists_eso, running_eso = _check_pods_running("external-secrets")
667
+ # Verifica ClusterSecretStore
668
+ css_exists, css_ready = _check_cluster_secret_store()
669
+
670
+ if not exists_vault and not exists_eso:
671
+ return "not_installed"
672
+
673
+ if exists_vault and exists_eso and css_exists:
674
+ if running_vault and running_eso and css_ready:
675
+ return "ok"
676
+ return "error"
677
+
678
+ return "not_installed"
679
+
680
+
681
+ def _validate_loki() -> ModuleStatus:
682
+ exists, deployed = _check_helm_deployed("loki", "observability")
683
+ if not exists:
684
+ return "not_installed"
685
+ ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", "observability", "-l", "app.kubernetes.io/name=loki", "-o", "jsonpath={.items[*].status.phase}"])
686
+ if ok and out and "Running" in out:
687
+ return "ok"
688
+ return "error"
689
+
690
+
691
+ def _validate_harbor() -> ModuleStatus:
692
+ exists, deployed = _check_helm_deployed("harbor", "harbor")
693
+ if not exists:
694
+ return "not_installed"
695
+ _, running = _check_pods_running("harbor")
696
+ if deployed and running:
697
+ return "ok"
698
+ return "error"
699
+
700
+
701
+ def _validate_harness() -> ModuleStatus:
702
+ exists, running = _check_pods_running("harness")
703
+ if not exists:
704
+ return "not_installed"
705
+ if running:
706
+ return "ok"
707
+ return "error"
708
+
709
+
710
+ def _validate_velero() -> ModuleStatus:
711
+ exists, deployed = _check_helm_deployed("velero", "velero")
712
+ if not exists:
713
+ return "not_installed"
714
+ _, running = _check_pods_running("velero")
715
+ if deployed and running:
716
+ return "ok"
717
+ return "error"
718
+
719
+
720
+ def _validate_kafka() -> ModuleStatus:
721
+ exists, deployed = _check_helm_deployed("kafka", "kafka")
722
+ if not exists:
723
+ return "not_installed"
724
+ _, running = _check_pods_running("kafka")
725
+ if deployed and running:
726
+ return "ok"
727
+ return "error"
728
+
729
+
730
+ def _validate_full_install() -> ModuleStatus:
731
+ # Full install é um meta-módulo
732
+ return "ok"
733
+
734
+
735
+ def get_all_module_statuses() -> dict[str, ModuleStatus]:
736
+ """Retorna o status de todos os módulos."""
737
+ from raijin_server.cli import MODULES
738
+ statuses = {}
739
+ for module in MODULES.keys():
740
+ statuses[module] = validate_module_status(module)
741
+ return statuses
742
+
743
+
382
744
  def run_health_check(module: str, ctx: ExecutionContext) -> bool:
383
745
  """Executa health check para um modulo especifico."""
384
746
  if module not in HEALTH_CHECKS:
@@ -138,12 +138,17 @@ def _get_minio_credentials(ctx: ExecutionContext) -> tuple[str, str]:
138
138
  )
139
139
 
140
140
 
141
- def _initialize_vault(ctx: ExecutionContext, vault_ns: str, node_ip: str) -> tuple[str, list[str]]:
142
- """Inicializa o Vault e retorna root token e unseal keys."""
143
- typer.echo("\n Inicializando Vault...")
141
+ def _initialize_vault(ctx: ExecutionContext, vault_ns: str, node_ip: str) -> tuple[str, str]:
142
+ """Inicializa o Vault com 1 key/1 threshold e retorna root token e unseal key."""
143
+ typer.echo("\nInicializando Vault...")
144
144
 
145
+ # Usa 1 key com threshold 1 para simplificar (produção pode usar 5/3)
145
146
  result = run_cmd(
146
- ["kubectl", "-n", vault_ns, "exec", "vault-0", "--", "vault", "operator", "init", "-format=json"],
147
+ [
148
+ "kubectl", "-n", vault_ns, "exec", "vault-0", "--",
149
+ "vault", "operator", "init",
150
+ "-key-shares=1", "-key-threshold=1", "-format=json"
151
+ ],
147
152
  ctx,
148
153
  check=False,
149
154
  )
@@ -155,28 +160,60 @@ def _initialize_vault(ctx: ExecutionContext, vault_ns: str, node_ip: str) -> tup
155
160
  import json
156
161
  init_data = json.loads(result.stdout)
157
162
  root_token = init_data["root_token"]
158
- unseal_keys = init_data["unseal_keys_b64"]
163
+ unseal_key = init_data["unseal_keys_b64"][0]
159
164
 
160
165
  # Salva keys localmente
161
166
  vault_keys_path = Path("/etc/vault/keys.json")
162
167
  vault_keys_path.parent.mkdir(parents=True, exist_ok=True)
163
168
  vault_keys_path.write_text(json.dumps(init_data, indent=2))
164
169
  typer.secho(f"\n✓ Vault keys salvas em {vault_keys_path}", fg=typer.colors.GREEN)
170
+
171
+ # Salva credenciais em secret K8s para uso do ESO
172
+ _save_vault_credentials_to_k8s(ctx, vault_ns, root_token, unseal_key)
173
+
165
174
  typer.secho("⚠️ IMPORTANTE: Guarde essas keys em local seguro!", fg=typer.colors.YELLOW, bold=True)
166
175
 
167
- return root_token, unseal_keys
176
+ return root_token, unseal_key
168
177
 
169
178
 
170
- def _unseal_vault(ctx: ExecutionContext, vault_ns: str, unseal_keys: list[str]) -> None:
171
- """Destrava o Vault usando as unseal keys."""
179
+ def _save_vault_credentials_to_k8s(ctx: ExecutionContext, vault_ns: str, root_token: str, unseal_key: str) -> None:
180
+ """Salva credenciais do Vault em secret K8s."""
181
+ typer.echo("Salvando credenciais do Vault em secret K8s...")
182
+
183
+ # Codifica em base64
184
+ token_b64 = base64.b64encode(root_token.encode()).decode()
185
+ key_b64 = base64.b64encode(unseal_key.encode()).decode()
186
+
187
+ secret_yaml = f"""apiVersion: v1
188
+ kind: Secret
189
+ metadata:
190
+ name: vault-init-credentials
191
+ namespace: {vault_ns}
192
+ type: Opaque
193
+ data:
194
+ root-token: {token_b64}
195
+ unseal-key: {key_b64}
196
+ """
197
+
198
+ secret_path = Path("/tmp/raijin-vault-credentials.yaml")
199
+ write_file(secret_path, secret_yaml, ctx)
200
+
201
+ run_cmd(
202
+ ["kubectl", "apply", "-f", str(secret_path)],
203
+ ctx,
204
+ )
205
+
206
+ typer.secho("✓ Credenciais salvas em secret vault-init-credentials.", fg=typer.colors.GREEN)
207
+
208
+
209
+ def _unseal_vault(ctx: ExecutionContext, vault_ns: str, unseal_key: str) -> None:
210
+ """Destrava o Vault usando a unseal key."""
172
211
  typer.echo("\nDesbloqueando Vault...")
173
212
 
174
- # Precisa de 3 keys das 5 geradas (threshold padrão)
175
- for i in range(3):
176
- run_cmd(
177
- ["kubectl", "-n", vault_ns, "exec", "vault-0", "--", "vault", "operator", "unseal", unseal_keys[i]],
178
- ctx,
179
- )
213
+ run_cmd(
214
+ ["kubectl", "-n", vault_ns, "exec", "vault-0", "--", "vault", "operator", "unseal", unseal_key],
215
+ ctx,
216
+ )
180
217
 
181
218
  typer.secho("✓ Vault desbloqueado.", fg=typer.colors.GREEN)
182
219
 
@@ -262,23 +299,21 @@ def _create_secretstore_example(ctx: ExecutionContext, vault_ns: str, eso_ns: st
262
299
  """Cria exemplo de ClusterSecretStore e ExternalSecret."""
263
300
  typer.echo("\nCriando exemplo de ClusterSecretStore...")
264
301
 
265
- secretstore_yaml = f"""apiVersion: external-secrets.io/v1beta1
302
+ secretstore_yaml = f"""apiVersion: external-secrets.io/v1
266
303
  kind: ClusterSecretStore
267
304
  metadata:
268
305
  name: vault-backend
269
306
  spec:
270
307
  provider:
271
308
  vault:
272
- server: "http://vault.{vault_ns}.svc.cluster.local:8200"
309
+ server: "http://vault.{vault_ns}.svc:8200"
273
310
  path: "secret"
274
311
  version: "v2"
275
312
  auth:
276
- kubernetes:
277
- mountPath: "kubernetes"
278
- role: "eso-role"
279
- serviceAccountRef:
280
- name: "external-secrets"
281
- namespace: "{eso_ns}"
313
+ tokenSecretRef:
314
+ namespace: "{vault_ns}"
315
+ name: "vault-init-credentials"
316
+ key: "root-token"
282
317
  """
283
318
 
284
319
  secretstore_path = Path("/tmp/raijin-vault-secretstore.yaml")
@@ -310,7 +345,7 @@ def _create_example_secret(ctx: ExecutionContext, vault_ns: str, root_token: str
310
345
  typer.secho("✓ Secret 'secret/example' criado no Vault.", fg=typer.colors.GREEN)
311
346
 
312
347
  # Cria ExternalSecret de exemplo
313
- external_secret_yaml = """apiVersion: external-secrets.io/v1beta1
348
+ external_secret_yaml = """apiVersion: external-secrets.io/v1
314
349
  kind: ExternalSecret
315
350
  metadata:
316
351
  name: example-secret
@@ -379,7 +414,7 @@ def run(ctx: ExecutionContext) -> None:
379
414
  )
380
415
  node_ip = result.stdout.strip() if result.returncode == 0 else "192.168.1.81"
381
416
 
382
- minio_host = typer.prompt("MinIO host", default=f"{node_ip}:30900")
417
+ minio_host = typer.prompt("MinIO host (interno)", default="minio.minio.svc:9000")
383
418
  access_key, secret_key = _get_minio_credentials(ctx)
384
419
 
385
420
  # ========== HashiCorp Vault ==========
@@ -469,15 +504,14 @@ injector:
469
504
  if not ctx.dry_run:
470
505
  _wait_for_pods_ready(ctx, vault_ns, "app.kubernetes.io/name=vault", timeout=180)
471
506
 
472
- # Inicializa Vault
473
- root_token, unseal_keys = _initialize_vault(ctx, vault_ns, node_ip)
507
+ # Inicializa Vault (retorna root_token e unseal_key)
508
+ root_token, unseal_key = _initialize_vault(ctx, vault_ns, node_ip)
474
509
 
475
510
  # Destrava Vault
476
- _unseal_vault(ctx, vault_ns, unseal_keys)
511
+ _unseal_vault(ctx, vault_ns, unseal_key)
477
512
 
478
513
  # Configura Vault
479
514
  _enable_kv_secrets(ctx, vault_ns, root_token)
480
- _configure_kubernetes_auth(ctx, vault_ns, root_token)
481
515
 
482
516
  # ========== External Secrets Operator ==========
483
517
  typer.secho("\n== External Secrets Operator ==", fg=typer.colors.CYAN, bold=True)
@@ -545,8 +579,7 @@ resources:
545
579
  if not ctx.dry_run:
546
580
  _wait_for_pods_ready(ctx, eso_ns, "app.kubernetes.io/name=external-secrets", timeout=120)
547
581
 
548
- # Configura integração Vault + ESO
549
- _create_eso_policy_and_role(ctx, vault_ns, root_token, eso_ns)
582
+ # Cria ClusterSecretStore (usa tokenSecretRef, não precisa de Kubernetes auth)
550
583
  _create_secretstore_example(ctx, vault_ns, eso_ns, node_ip)
551
584
  _create_example_secret(ctx, vault_ns, root_token)
552
585
 
@@ -562,7 +595,7 @@ resources:
562
595
 
563
596
  typer.echo("\n2. Criar ExternalSecret:")
564
597
  typer.echo(" kubectl apply -f - <<EOF")
565
- typer.echo(" apiVersion: external-secrets.io/v1beta1")
598
+ typer.echo(" apiVersion: external-secrets.io/v1")
566
599
  typer.echo(" kind: ExternalSecret")
567
600
  typer.echo(" metadata:")
568
601
  typer.echo(" name: myapp-secret")
@@ -582,8 +615,14 @@ resources:
582
615
  typer.echo("\n3. Secret será sincronizado automaticamente!")
583
616
  typer.echo(" kubectl get secret myapp-secret -o yaml")
584
617
 
618
+ typer.secho("\n=== Recuperar Credenciais ===", fg=typer.colors.CYAN)
619
+ typer.echo("Via arquivo local:")
620
+ typer.echo(" cat /etc/vault/keys.json")
621
+ typer.echo("\nVia Kubernetes Secret:")
622
+ typer.echo(f" kubectl -n {vault_ns} get secret vault-init-credentials -o jsonpath='{{.data.root-token}}' | base64 -d")
623
+ typer.echo(f" kubectl -n {vault_ns} get secret vault-init-credentials -o jsonpath='{{.data.unseal-key}}' | base64 -d")
624
+
585
625
  typer.secho("\n⚠️ IMPORTANTE:", fg=typer.colors.YELLOW, bold=True)
586
- typer.echo(f"- Root token e unseal keys salvos em: /etc/vault/keys.json")
587
- typer.echo("- Faça backup dessas keys em local seguro!")
588
- typer.echo("- Após reboot do Vault, use: kubectl -n vault exec vault-0 -- vault operator unseal")
626
+ typer.echo("- Faça backup das credenciais em local seguro!")
627
+ typer.echo(f"- Após reboot do Vault, use: kubectl -n {vault_ns} exec vault-0 -- vault operator unseal <unseal-key>")
589
628
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: raijin-server
3
- Version: 0.3.7
3
+ Version: 0.3.9
4
4
  Summary: CLI para automacao de setup e hardening de servidores Ubuntu Server.
5
5
  Home-page: https://example.com/raijin-server
6
6
  Author: Equipe Raijin
@@ -1,7 +1,7 @@
1
- raijin_server/__init__.py,sha256=kD4ksBx35-1QYm5EkRuSYWXJAwwJx_-sM9cp92bOhh4,94
2
- raijin_server/cli.py,sha256=WvZaPJ5AVjhzzs_jLLe2QGvVEH_VphRwnUkTMEgycbI,37320
1
+ raijin_server/__init__.py,sha256=fs3Gv4GHIXMcXgGtp4XuiBBUAmcJ5tMKl9xefHaKW1k,94
2
+ raijin_server/cli.py,sha256=IKakZrKe9dYdxWVqboK6f5Zu_ZcpiAZd8UJ4r8HWVlM,38031
3
3
  raijin_server/config.py,sha256=QNiEVvrbW56XgvNn5-h3bkJm46Xc8mjNqPbvixXD8N0,4829
4
- raijin_server/healthchecks.py,sha256=UHSRyeKTsCGeL_4dxDSGZ1t8164Q7wYTi1c3ZiU0cro,13536
4
+ raijin_server/healthchecks.py,sha256=46s260-Of0GbPqaZFkPrkx93vJ0dYvzAf8wJ2bJI0J8,24753
5
5
  raijin_server/minio_utils.py,sha256=NQxIGoVf4-eM8rNwEHdd4QFnEIh2OxY3DyOiFkznsYs,18299
6
6
  raijin_server/module_manager.py,sha256=Wmhj603CN0XGUVr7_Fo8CHzKd9yIbS9x5BJLqDj78kw,10259
7
7
  raijin_server/utils.py,sha256=9RnGnPoUTYOpMVRLNa4P4lIQrJNQLkSkPUxycZRGv78,20827
@@ -28,7 +28,7 @@ raijin_server/modules/minio.py,sha256=ZoxugJvvuGLzViDfEzrVCRZUevoiFwcEy0PNyn0My4
28
28
  raijin_server/modules/network.py,sha256=QRlYdcryCCPAWG3QQ_W7ld9gJgETI7H8gwntOU7UqFE,4818
29
29
  raijin_server/modules/prometheus.py,sha256=lyhaqLIfMl0GtQ2b2Hre7_A47HrHBB5gspmnWtwXZ4Y,21880
30
30
  raijin_server/modules/sanitize.py,sha256=_RnWn1DUuNrzx3NnKEbMvf5iicgjiN_ubwT59e0rYWY,6040
31
- raijin_server/modules/secrets.py,sha256=HOFk57LFyzW4XJ3c8uEEPRd5Dj_OYDI1NBVLzJMp0vY,18562
31
+ raijin_server/modules/secrets.py,sha256=3QzvFd4qH1hyOtbu3Cxyu4JUaYgWssxI-oZ4gS3HIP4,19924
32
32
  raijin_server/modules/ssh_hardening.py,sha256=Zd0dlylUBr01SkrI1CS05-0DB9xIto5rWH1bUVs80ow,5422
33
33
  raijin_server/modules/traefik.py,sha256=omziywss4o-8t64Kj-upLqbXdFYm2JwqOoOukDUmqxY,5008
34
34
  raijin_server/modules/velero.py,sha256=nH7WI145OOK-DZo_ZjNegEnwkppi8h98DeQaB5A_kVg,7161
@@ -39,9 +39,9 @@ raijin_server/scripts/checklist.sh,sha256=j6E0Kmk1EfjLvKK1VpCqzXJAXI_7Bm67LK4ndy
39
39
  raijin_server/scripts/install.sh,sha256=Y1ickbQ4siQ0NIPs6UgrqUr8WWy7U0LHmaTQbEgavoI,3949
40
40
  raijin_server/scripts/log_size_metric.sh,sha256=Iv4SsX8AuCYRou-klYn32mX41xB6j0xJGLBO6riw4rU,1208
41
41
  raijin_server/scripts/pre-deploy-check.sh,sha256=XqMo7IMIpwUHF17YEmU0-cVmTDMoCGMBFnmS39FidI4,4912
42
- raijin_server-0.3.7.dist-info/licenses/LICENSE,sha256=kJsMCjOiRZE0AQNtxWqBa32z9kMAaF4EUxyHj3hKaJo,1105
43
- raijin_server-0.3.7.dist-info/METADATA,sha256=KGvWKBpPSa4-6oSXFHJ0rykLQ-_VFftLobt0mFm-Co0,8829
44
- raijin_server-0.3.7.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
45
- raijin_server-0.3.7.dist-info/entry_points.txt,sha256=3ZvxDX4pvcjkIRsXAJ69wIfVmKa78LKo-C3QhqN2KVM,56
46
- raijin_server-0.3.7.dist-info/top_level.txt,sha256=Yz1xneCRtsZOzbPIcTAcrSxd-1p80pohMXYAZ74dpok,14
47
- raijin_server-0.3.7.dist-info/RECORD,,
42
+ raijin_server-0.3.9.dist-info/licenses/LICENSE,sha256=kJsMCjOiRZE0AQNtxWqBa32z9kMAaF4EUxyHj3hKaJo,1105
43
+ raijin_server-0.3.9.dist-info/METADATA,sha256=dHrI-6NR6o_J1xfn84-92o7ZfeeH6bZYrt9b6wIiTQ0,8829
44
+ raijin_server-0.3.9.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
45
+ raijin_server-0.3.9.dist-info/entry_points.txt,sha256=3ZvxDX4pvcjkIRsXAJ69wIfVmKa78LKo-C3QhqN2KVM,56
46
+ raijin_server-0.3.9.dist-info/top_level.txt,sha256=Yz1xneCRtsZOzbPIcTAcrSxd-1p80pohMXYAZ74dpok,14
47
+ raijin_server-0.3.9.dist-info/RECORD,,