raijin-server 0.3.8__py3-none-any.whl → 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of raijin-server might be problematic. Click here for more details.
- raijin_server/__init__.py +1 -1
- raijin_server/cli.py +24 -4
- raijin_server/healthchecks.py +363 -1
- {raijin_server-0.3.8.dist-info → raijin_server-0.3.9.dist-info}/METADATA +1 -1
- {raijin_server-0.3.8.dist-info → raijin_server-0.3.9.dist-info}/RECORD +9 -9
- {raijin_server-0.3.8.dist-info → raijin_server-0.3.9.dist-info}/WHEEL +0 -0
- {raijin_server-0.3.8.dist-info → raijin_server-0.3.9.dist-info}/entry_points.txt +0 -0
- {raijin_server-0.3.8.dist-info → raijin_server-0.3.9.dist-info}/licenses/LICENSE +0 -0
- {raijin_server-0.3.8.dist-info → raijin_server-0.3.9.dist-info}/top_level.txt +0 -0
raijin_server/__init__.py
CHANGED
raijin_server/cli.py
CHANGED
|
@@ -47,7 +47,7 @@ from raijin_server.modules import (
|
|
|
47
47
|
)
|
|
48
48
|
from raijin_server.utils import ExecutionContext, logger, active_log_file, available_log_files, page_text, ensure_tool
|
|
49
49
|
from raijin_server.validators import validate_system_requirements, check_module_dependencies, MODULE_DEPENDENCIES
|
|
50
|
-
from raijin_server.healthchecks import run_health_check
|
|
50
|
+
from raijin_server.healthchecks import run_health_check, validate_module_status, get_all_module_statuses
|
|
51
51
|
from raijin_server.config import ConfigManager
|
|
52
52
|
from raijin_server import module_manager
|
|
53
53
|
|
|
@@ -212,7 +212,7 @@ def _run_module(ctx: typer.Context, name: str, skip_validation: bool = False) ->
|
|
|
212
212
|
|
|
213
213
|
def _print_banner() -> None:
|
|
214
214
|
console.print(Panel.fit(BANNER, style="bold blue"))
|
|
215
|
-
console.print("[bright_white]Automacao de setup e hardening para Ubuntu Server[/bright_white]\n")
|
|
215
|
+
console.print(f"[bright_white]Automacao de setup e hardening para Ubuntu Server[/bright_white] [dim]v{__version__}[/dim]\n")
|
|
216
216
|
|
|
217
217
|
|
|
218
218
|
def _select_state_dir() -> Path:
|
|
@@ -337,7 +337,7 @@ def _rollback_module(
|
|
|
337
337
|
typer.secho(f"Rollback finalizado (best-effort) para {name}\n", fg=typer.colors.GREEN)
|
|
338
338
|
|
|
339
339
|
|
|
340
|
-
def _render_menu(dry_run: bool) -> int:
|
|
340
|
+
def _render_menu(dry_run: bool, live_status: bool = True) -> int:
|
|
341
341
|
table = Table(
|
|
342
342
|
title="Selecione um modulo para executar",
|
|
343
343
|
header_style="bold white",
|
|
@@ -348,9 +348,29 @@ def _render_menu(dry_run: bool) -> int:
|
|
|
348
348
|
table.add_column("Status", style="green", no_wrap=True)
|
|
349
349
|
table.add_column("Modulo", style="bold green")
|
|
350
350
|
table.add_column("Descricao", style="white")
|
|
351
|
+
|
|
352
|
+
# Obtém status em tempo real se solicitado
|
|
353
|
+
if live_status:
|
|
354
|
+
console.print("[dim]Validando status dos módulos...[/dim]")
|
|
355
|
+
statuses = get_all_module_statuses()
|
|
356
|
+
else:
|
|
357
|
+
statuses = {}
|
|
358
|
+
|
|
351
359
|
for idx, name in enumerate(MODULES.keys(), start=1):
|
|
352
360
|
desc = MODULE_DESCRIPTIONS.get(name, "")
|
|
353
|
-
|
|
361
|
+
|
|
362
|
+
if live_status:
|
|
363
|
+
status_val = statuses.get(name, "not_installed")
|
|
364
|
+
if status_val == "ok":
|
|
365
|
+
status = "[green]✔[/green]"
|
|
366
|
+
elif status_val == "error":
|
|
367
|
+
status = "[red]✗[/red]"
|
|
368
|
+
else:
|
|
369
|
+
status = "[dim]-[/dim]"
|
|
370
|
+
else:
|
|
371
|
+
# Fallback para arquivo .done
|
|
372
|
+
status = "[green]✔[/green]" if _is_completed(name) else "[dim]-[/dim]"
|
|
373
|
+
|
|
354
374
|
table.add_row(f"{idx}", status, name, desc)
|
|
355
375
|
|
|
356
376
|
exit_idx = len(MODULES) + 1
|
raijin_server/healthchecks.py
CHANGED
|
@@ -375,10 +375,372 @@ HEALTH_CHECKS = {
|
|
|
375
375
|
"kafka": lambda ctx: verify_helm_chart("kafka", "kafka", ctx),
|
|
376
376
|
"cert_manager": verify_cert_manager,
|
|
377
377
|
"secrets": verify_secrets,
|
|
378
|
-
|
|
379
378
|
}
|
|
380
379
|
|
|
381
380
|
|
|
381
|
+
# =============================================================================
|
|
382
|
+
# STATUS VALIDATION - Validação em tempo real para o menu interativo
|
|
383
|
+
# =============================================================================
|
|
384
|
+
|
|
385
|
+
def _quick_cmd(cmd: list[str], timeout: int = 5) -> tuple[bool, str]:
|
|
386
|
+
"""Executa comando rápido e retorna (sucesso, output)."""
|
|
387
|
+
try:
|
|
388
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout)
|
|
389
|
+
return result.returncode == 0, result.stdout.strip()
|
|
390
|
+
except Exception as e:
|
|
391
|
+
return False, str(e)
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def _check_namespace_exists(ns: str) -> bool:
|
|
395
|
+
"""Verifica se namespace existe."""
|
|
396
|
+
ok, _ = _quick_cmd(["kubectl", "get", "ns", ns])
|
|
397
|
+
return ok
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
def _check_pods_running(ns: str) -> tuple[bool, bool]:
|
|
401
|
+
"""Retorna (existe, todos_running)."""
|
|
402
|
+
if not _check_namespace_exists(ns):
|
|
403
|
+
return False, False
|
|
404
|
+
ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", ns, "-o", "jsonpath={.items[*].status.phase}"])
|
|
405
|
+
if not ok or not out:
|
|
406
|
+
return True, False # ns existe mas sem pods ou erro
|
|
407
|
+
phases = out.split()
|
|
408
|
+
all_ok = all(p in ("Running", "Succeeded") for p in phases)
|
|
409
|
+
return True, all_ok
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
def _check_helm_deployed(release: str, ns: str) -> tuple[bool, bool]:
|
|
413
|
+
"""Retorna (existe, deployed)."""
|
|
414
|
+
ok, out = _quick_cmd(["helm", "status", release, "-n", ns, "--output", "json"], timeout=10)
|
|
415
|
+
if not ok:
|
|
416
|
+
return False, False
|
|
417
|
+
try:
|
|
418
|
+
import json
|
|
419
|
+
data = json.loads(out)
|
|
420
|
+
status = data.get("info", {}).get("status", "")
|
|
421
|
+
return True, status == "deployed"
|
|
422
|
+
except Exception:
|
|
423
|
+
return False, False
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
def _check_systemd_active(service: str) -> bool:
|
|
427
|
+
"""Verifica se serviço systemd está ativo."""
|
|
428
|
+
ok, out = _quick_cmd(["systemctl", "is-active", service])
|
|
429
|
+
return ok and out == "active"
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
def _check_crd_exists(crd: str) -> bool:
|
|
433
|
+
"""Verifica se CRD existe."""
|
|
434
|
+
ok, _ = _quick_cmd(["kubectl", "get", "crd", crd])
|
|
435
|
+
return ok
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
def _check_cluster_secret_store() -> tuple[bool, bool]:
|
|
439
|
+
"""Verifica ClusterSecretStore. Retorna (existe, ready)."""
|
|
440
|
+
ok, out = _quick_cmd(["kubectl", "get", "clustersecretstore", "-o", "jsonpath={.items[*].status.conditions[?(@.type=='Ready')].status}"])
|
|
441
|
+
if not ok:
|
|
442
|
+
return False, False
|
|
443
|
+
return True, "True" in out
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
# Status: "ok" = ✓, "error" = ✗, "not_installed" = -
|
|
447
|
+
ModuleStatus = str # "ok" | "error" | "not_installed"
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
def validate_module_status(module: str) -> ModuleStatus:
|
|
451
|
+
"""Valida status de um módulo em tempo real."""
|
|
452
|
+
validators = {
|
|
453
|
+
"sanitize": _validate_sanitize,
|
|
454
|
+
"bootstrap": _validate_bootstrap,
|
|
455
|
+
"ssh_hardening": _validate_ssh_hardening,
|
|
456
|
+
"hardening": _validate_hardening,
|
|
457
|
+
"network": _validate_network,
|
|
458
|
+
"essentials": _validate_essentials,
|
|
459
|
+
"firewall": _validate_firewall,
|
|
460
|
+
"vpn": _validate_vpn,
|
|
461
|
+
"vpn_client": _validate_vpn_client,
|
|
462
|
+
"internal_dns": _validate_internal_dns,
|
|
463
|
+
"kubernetes": _validate_kubernetes,
|
|
464
|
+
"calico": _validate_calico,
|
|
465
|
+
"metallb": _validate_metallb,
|
|
466
|
+
"traefik": _validate_traefik,
|
|
467
|
+
"cert_manager": _validate_cert_manager,
|
|
468
|
+
"istio": _validate_istio,
|
|
469
|
+
"kong": _validate_kong,
|
|
470
|
+
"minio": _validate_minio,
|
|
471
|
+
"prometheus": _validate_prometheus,
|
|
472
|
+
"grafana": _validate_grafana,
|
|
473
|
+
"secrets": _validate_secrets,
|
|
474
|
+
"loki": _validate_loki,
|
|
475
|
+
"harbor": _validate_harbor,
|
|
476
|
+
"harness": _validate_harness,
|
|
477
|
+
"velero": _validate_velero,
|
|
478
|
+
"kafka": _validate_kafka,
|
|
479
|
+
"full_install": _validate_full_install,
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
validator = validators.get(module)
|
|
483
|
+
if validator:
|
|
484
|
+
try:
|
|
485
|
+
return validator()
|
|
486
|
+
except Exception:
|
|
487
|
+
return "error"
|
|
488
|
+
return "not_installed"
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def _validate_sanitize() -> ModuleStatus:
|
|
492
|
+
# Sanitize é idempotente, consideramos OK se bootstrap/k8s estiver funcionando
|
|
493
|
+
return "ok"
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
def _validate_bootstrap() -> ModuleStatus:
|
|
497
|
+
# Verifica se ferramentas estão instaladas
|
|
498
|
+
tools = ["helm", "kubectl", "containerd"]
|
|
499
|
+
for tool in tools:
|
|
500
|
+
ok, _ = _quick_cmd(["which", tool])
|
|
501
|
+
if not ok:
|
|
502
|
+
return "not_installed"
|
|
503
|
+
return "ok"
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
def _validate_ssh_hardening() -> ModuleStatus:
|
|
507
|
+
# Verifica se SSH está rodando
|
|
508
|
+
if _check_systemd_active("ssh") or _check_systemd_active("sshd"):
|
|
509
|
+
return "ok"
|
|
510
|
+
return "not_installed"
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
def _validate_hardening() -> ModuleStatus:
|
|
514
|
+
if _check_systemd_active("fail2ban"):
|
|
515
|
+
return "ok"
|
|
516
|
+
return "not_installed"
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
def _validate_network() -> ModuleStatus:
|
|
520
|
+
# Verifica se hostname está configurado
|
|
521
|
+
ok, hostname = _quick_cmd(["hostname"])
|
|
522
|
+
if ok and hostname:
|
|
523
|
+
return "ok"
|
|
524
|
+
return "not_installed"
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
def _validate_essentials() -> ModuleStatus:
|
|
528
|
+
# Verifica NTP
|
|
529
|
+
ok, out = _quick_cmd(["timedatectl", "show", "-p", "NTP", "--value"])
|
|
530
|
+
if ok and out == "yes":
|
|
531
|
+
return "ok"
|
|
532
|
+
return "not_installed"
|
|
533
|
+
|
|
534
|
+
|
|
535
|
+
def _validate_firewall() -> ModuleStatus:
|
|
536
|
+
if _check_systemd_active("ufw"):
|
|
537
|
+
return "ok"
|
|
538
|
+
return "not_installed"
|
|
539
|
+
|
|
540
|
+
|
|
541
|
+
def _validate_vpn() -> ModuleStatus:
|
|
542
|
+
if _check_systemd_active("wg-quick@wg0"):
|
|
543
|
+
return "ok"
|
|
544
|
+
return "not_installed"
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
def _validate_vpn_client() -> ModuleStatus:
|
|
548
|
+
# VPN client é gerenciado pelo VPN module
|
|
549
|
+
if _check_systemd_active("wg-quick@wg0"):
|
|
550
|
+
return "ok"
|
|
551
|
+
return "not_installed"
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
def _validate_internal_dns() -> ModuleStatus:
|
|
555
|
+
# Verifica se CoreDNS custom config existe
|
|
556
|
+
ok, _ = _quick_cmd(["kubectl", "get", "configmap", "coredns-custom", "-n", "kube-system"])
|
|
557
|
+
if ok:
|
|
558
|
+
return "ok"
|
|
559
|
+
return "not_installed"
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
def _validate_kubernetes() -> ModuleStatus:
|
|
563
|
+
if not _check_systemd_active("kubelet"):
|
|
564
|
+
return "not_installed"
|
|
565
|
+
if not _check_systemd_active("containerd"):
|
|
566
|
+
return "error"
|
|
567
|
+
# Verifica se node está ready
|
|
568
|
+
ok, out = _quick_cmd(["kubectl", "get", "nodes", "-o", "jsonpath={.items[0].status.conditions[?(@.type=='Ready')].status}"])
|
|
569
|
+
if ok and "True" in out:
|
|
570
|
+
return "ok"
|
|
571
|
+
return "error"
|
|
572
|
+
|
|
573
|
+
|
|
574
|
+
def _validate_calico() -> ModuleStatus:
|
|
575
|
+
exists, running = _check_pods_running("kube-system")
|
|
576
|
+
if not exists:
|
|
577
|
+
return "not_installed"
|
|
578
|
+
# Verifica se calico-node está rodando
|
|
579
|
+
ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", "kube-system", "-l", "k8s-app=calico-node", "-o", "jsonpath={.items[*].status.phase}"])
|
|
580
|
+
if ok and out and "Running" in out:
|
|
581
|
+
return "ok"
|
|
582
|
+
return "not_installed"
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
def _validate_metallb() -> ModuleStatus:
|
|
586
|
+
exists, running = _check_pods_running("metallb-system")
|
|
587
|
+
if not exists:
|
|
588
|
+
return "not_installed"
|
|
589
|
+
if running:
|
|
590
|
+
return "ok"
|
|
591
|
+
return "error"
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
def _validate_traefik() -> ModuleStatus:
|
|
595
|
+
exists, deployed = _check_helm_deployed("traefik", "traefik")
|
|
596
|
+
if not exists:
|
|
597
|
+
return "not_installed"
|
|
598
|
+
_, running = _check_pods_running("traefik")
|
|
599
|
+
if deployed and running:
|
|
600
|
+
return "ok"
|
|
601
|
+
return "error"
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
def _validate_cert_manager() -> ModuleStatus:
|
|
605
|
+
exists, deployed = _check_helm_deployed("cert-manager", "cert-manager")
|
|
606
|
+
if not exists:
|
|
607
|
+
return "not_installed"
|
|
608
|
+
_, running = _check_pods_running("cert-manager")
|
|
609
|
+
if deployed and running:
|
|
610
|
+
return "ok"
|
|
611
|
+
return "error"
|
|
612
|
+
|
|
613
|
+
|
|
614
|
+
def _validate_istio() -> ModuleStatus:
|
|
615
|
+
exists, running = _check_pods_running("istio-system")
|
|
616
|
+
if not exists:
|
|
617
|
+
return "not_installed"
|
|
618
|
+
if running:
|
|
619
|
+
return "ok"
|
|
620
|
+
return "error"
|
|
621
|
+
|
|
622
|
+
|
|
623
|
+
def _validate_kong() -> ModuleStatus:
|
|
624
|
+
exists, deployed = _check_helm_deployed("kong", "kong")
|
|
625
|
+
if not exists:
|
|
626
|
+
return "not_installed"
|
|
627
|
+
_, running = _check_pods_running("kong")
|
|
628
|
+
if deployed and running:
|
|
629
|
+
return "ok"
|
|
630
|
+
return "error"
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
def _validate_minio() -> ModuleStatus:
|
|
634
|
+
exists, deployed = _check_helm_deployed("minio", "minio")
|
|
635
|
+
if not exists:
|
|
636
|
+
return "not_installed"
|
|
637
|
+
_, running = _check_pods_running("minio")
|
|
638
|
+
if deployed and running:
|
|
639
|
+
return "ok"
|
|
640
|
+
return "error"
|
|
641
|
+
|
|
642
|
+
|
|
643
|
+
def _validate_prometheus() -> ModuleStatus:
|
|
644
|
+
exists, deployed = _check_helm_deployed("kube-prometheus-stack", "observability")
|
|
645
|
+
if not exists:
|
|
646
|
+
return "not_installed"
|
|
647
|
+
ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", "observability", "-l", "app.kubernetes.io/name=prometheus", "-o", "jsonpath={.items[*].status.phase}"])
|
|
648
|
+
if ok and out and "Running" in out:
|
|
649
|
+
return "ok"
|
|
650
|
+
if exists:
|
|
651
|
+
return "error"
|
|
652
|
+
return "not_installed"
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
def _validate_grafana() -> ModuleStatus:
|
|
656
|
+
ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", "observability", "-l", "app.kubernetes.io/name=grafana", "-o", "jsonpath={.items[*].status.phase}"])
|
|
657
|
+
if ok and out and "Running" in out:
|
|
658
|
+
return "ok"
|
|
659
|
+
return "not_installed"
|
|
660
|
+
|
|
661
|
+
|
|
662
|
+
def _validate_secrets() -> ModuleStatus:
|
|
663
|
+
# Verifica Vault
|
|
664
|
+
exists_vault, running_vault = _check_pods_running("vault")
|
|
665
|
+
# Verifica External Secrets
|
|
666
|
+
exists_eso, running_eso = _check_pods_running("external-secrets")
|
|
667
|
+
# Verifica ClusterSecretStore
|
|
668
|
+
css_exists, css_ready = _check_cluster_secret_store()
|
|
669
|
+
|
|
670
|
+
if not exists_vault and not exists_eso:
|
|
671
|
+
return "not_installed"
|
|
672
|
+
|
|
673
|
+
if exists_vault and exists_eso and css_exists:
|
|
674
|
+
if running_vault and running_eso and css_ready:
|
|
675
|
+
return "ok"
|
|
676
|
+
return "error"
|
|
677
|
+
|
|
678
|
+
return "not_installed"
|
|
679
|
+
|
|
680
|
+
|
|
681
|
+
def _validate_loki() -> ModuleStatus:
|
|
682
|
+
exists, deployed = _check_helm_deployed("loki", "observability")
|
|
683
|
+
if not exists:
|
|
684
|
+
return "not_installed"
|
|
685
|
+
ok, out = _quick_cmd(["kubectl", "get", "pods", "-n", "observability", "-l", "app.kubernetes.io/name=loki", "-o", "jsonpath={.items[*].status.phase}"])
|
|
686
|
+
if ok and out and "Running" in out:
|
|
687
|
+
return "ok"
|
|
688
|
+
return "error"
|
|
689
|
+
|
|
690
|
+
|
|
691
|
+
def _validate_harbor() -> ModuleStatus:
|
|
692
|
+
exists, deployed = _check_helm_deployed("harbor", "harbor")
|
|
693
|
+
if not exists:
|
|
694
|
+
return "not_installed"
|
|
695
|
+
_, running = _check_pods_running("harbor")
|
|
696
|
+
if deployed and running:
|
|
697
|
+
return "ok"
|
|
698
|
+
return "error"
|
|
699
|
+
|
|
700
|
+
|
|
701
|
+
def _validate_harness() -> ModuleStatus:
|
|
702
|
+
exists, running = _check_pods_running("harness")
|
|
703
|
+
if not exists:
|
|
704
|
+
return "not_installed"
|
|
705
|
+
if running:
|
|
706
|
+
return "ok"
|
|
707
|
+
return "error"
|
|
708
|
+
|
|
709
|
+
|
|
710
|
+
def _validate_velero() -> ModuleStatus:
|
|
711
|
+
exists, deployed = _check_helm_deployed("velero", "velero")
|
|
712
|
+
if not exists:
|
|
713
|
+
return "not_installed"
|
|
714
|
+
_, running = _check_pods_running("velero")
|
|
715
|
+
if deployed and running:
|
|
716
|
+
return "ok"
|
|
717
|
+
return "error"
|
|
718
|
+
|
|
719
|
+
|
|
720
|
+
def _validate_kafka() -> ModuleStatus:
|
|
721
|
+
exists, deployed = _check_helm_deployed("kafka", "kafka")
|
|
722
|
+
if not exists:
|
|
723
|
+
return "not_installed"
|
|
724
|
+
_, running = _check_pods_running("kafka")
|
|
725
|
+
if deployed and running:
|
|
726
|
+
return "ok"
|
|
727
|
+
return "error"
|
|
728
|
+
|
|
729
|
+
|
|
730
|
+
def _validate_full_install() -> ModuleStatus:
|
|
731
|
+
# Full install é um meta-módulo
|
|
732
|
+
return "ok"
|
|
733
|
+
|
|
734
|
+
|
|
735
|
+
def get_all_module_statuses() -> dict[str, ModuleStatus]:
|
|
736
|
+
"""Retorna o status de todos os módulos."""
|
|
737
|
+
from raijin_server.cli import MODULES
|
|
738
|
+
statuses = {}
|
|
739
|
+
for module in MODULES.keys():
|
|
740
|
+
statuses[module] = validate_module_status(module)
|
|
741
|
+
return statuses
|
|
742
|
+
|
|
743
|
+
|
|
382
744
|
def run_health_check(module: str, ctx: ExecutionContext) -> bool:
|
|
383
745
|
"""Executa health check para um modulo especifico."""
|
|
384
746
|
if module not in HEALTH_CHECKS:
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
raijin_server/__init__.py,sha256=
|
|
2
|
-
raijin_server/cli.py,sha256=
|
|
1
|
+
raijin_server/__init__.py,sha256=fs3Gv4GHIXMcXgGtp4XuiBBUAmcJ5tMKl9xefHaKW1k,94
|
|
2
|
+
raijin_server/cli.py,sha256=IKakZrKe9dYdxWVqboK6f5Zu_ZcpiAZd8UJ4r8HWVlM,38031
|
|
3
3
|
raijin_server/config.py,sha256=QNiEVvrbW56XgvNn5-h3bkJm46Xc8mjNqPbvixXD8N0,4829
|
|
4
|
-
raijin_server/healthchecks.py,sha256=
|
|
4
|
+
raijin_server/healthchecks.py,sha256=46s260-Of0GbPqaZFkPrkx93vJ0dYvzAf8wJ2bJI0J8,24753
|
|
5
5
|
raijin_server/minio_utils.py,sha256=NQxIGoVf4-eM8rNwEHdd4QFnEIh2OxY3DyOiFkznsYs,18299
|
|
6
6
|
raijin_server/module_manager.py,sha256=Wmhj603CN0XGUVr7_Fo8CHzKd9yIbS9x5BJLqDj78kw,10259
|
|
7
7
|
raijin_server/utils.py,sha256=9RnGnPoUTYOpMVRLNa4P4lIQrJNQLkSkPUxycZRGv78,20827
|
|
@@ -39,9 +39,9 @@ raijin_server/scripts/checklist.sh,sha256=j6E0Kmk1EfjLvKK1VpCqzXJAXI_7Bm67LK4ndy
|
|
|
39
39
|
raijin_server/scripts/install.sh,sha256=Y1ickbQ4siQ0NIPs6UgrqUr8WWy7U0LHmaTQbEgavoI,3949
|
|
40
40
|
raijin_server/scripts/log_size_metric.sh,sha256=Iv4SsX8AuCYRou-klYn32mX41xB6j0xJGLBO6riw4rU,1208
|
|
41
41
|
raijin_server/scripts/pre-deploy-check.sh,sha256=XqMo7IMIpwUHF17YEmU0-cVmTDMoCGMBFnmS39FidI4,4912
|
|
42
|
-
raijin_server-0.3.
|
|
43
|
-
raijin_server-0.3.
|
|
44
|
-
raijin_server-0.3.
|
|
45
|
-
raijin_server-0.3.
|
|
46
|
-
raijin_server-0.3.
|
|
47
|
-
raijin_server-0.3.
|
|
42
|
+
raijin_server-0.3.9.dist-info/licenses/LICENSE,sha256=kJsMCjOiRZE0AQNtxWqBa32z9kMAaF4EUxyHj3hKaJo,1105
|
|
43
|
+
raijin_server-0.3.9.dist-info/METADATA,sha256=dHrI-6NR6o_J1xfn84-92o7ZfeeH6bZYrt9b6wIiTQ0,8829
|
|
44
|
+
raijin_server-0.3.9.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
45
|
+
raijin_server-0.3.9.dist-info/entry_points.txt,sha256=3ZvxDX4pvcjkIRsXAJ69wIfVmKa78LKo-C3QhqN2KVM,56
|
|
46
|
+
raijin_server-0.3.9.dist-info/top_level.txt,sha256=Yz1xneCRtsZOzbPIcTAcrSxd-1p80pohMXYAZ74dpok,14
|
|
47
|
+
raijin_server-0.3.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|