raijin-server 0.3.3__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of raijin-server might be problematic. Click here for more details.

@@ -103,6 +103,15 @@ def run(ctx: ExecutionContext) -> None:
103
103
 
104
104
  retention_hours = typer.prompt("Retencao de logs em horas", default="168")
105
105
  persistence_size = typer.prompt("Tamanho do storage", default="20Gi")
106
+
107
+ # NodePort para acesso via VPN
108
+ enable_nodeport = typer.confirm(
109
+ "Habilitar NodePort para acesso via VPN?",
110
+ default=True
111
+ )
112
+ nodeport_port = 30310
113
+ if enable_nodeport:
114
+ nodeport_port = int(typer.prompt("Porta NodePort", default="30310"))
106
115
 
107
116
  node_name = _detect_node_name(ctx)
108
117
 
@@ -147,6 +156,15 @@ promtail:
147
156
  memory: 256Mi
148
157
  """
149
158
 
159
+ # Adiciona NodePort se habilitado
160
+ if enable_nodeport:
161
+ values_yaml += f"""
162
+ loki:
163
+ service:
164
+ type: NodePort
165
+ nodePort: {nodeport_port}
166
+ """
167
+
150
168
  values_path = Path("/tmp/raijin-loki-values.yaml")
151
169
  write_file(values_path, values_yaml, ctx)
152
170
 
@@ -167,7 +185,13 @@ promtail:
167
185
  _wait_for_loki_ready(ctx)
168
186
 
169
187
  typer.secho("\n✓ Loki Stack instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
170
- typer.echo("\nPara acessar Loki via port-forward:")
171
- typer.echo(" kubectl -n observability port-forward svc/loki 3100:3100")
172
- typer.echo("\nPara verificar logs:")
173
- typer.echo(" curl http://localhost:3100/ready")
188
+
189
+ if enable_nodeport:
190
+ typer.secho("\n🔒 Acesso via VPN + NodePort:", fg=typer.colors.CYAN, bold=True)
191
+ typer.echo(f"\n curl http://<VPN_SERVER_IP>:{nodeport_port}/ready")
192
+ typer.echo(f"\n Exemplo: curl http://10.8.0.1:{nodeport_port}/ready")
193
+ else:
194
+ typer.echo("\nPara acessar Loki via port-forward:")
195
+ typer.echo(" kubectl -n observability port-forward svc/loki 3100:3100")
196
+ typer.echo("\nPara verificar logs:")
197
+ typer.echo(" curl http://localhost:3100/ready")
@@ -449,6 +449,18 @@ def run(ctx: ExecutionContext) -> None:
449
449
 
450
450
  enable_console = typer.confirm("Habilitar Console Web?", default=True)
451
451
 
452
+ # NodePort para acesso via VPN
453
+ enable_nodeport = typer.confirm(
454
+ "Habilitar NodePort para acesso via VPN?",
455
+ default=True
456
+ )
457
+ api_nodeport = 30900
458
+ console_nodeport = 30901
459
+ if enable_nodeport:
460
+ api_nodeport = int(typer.prompt("Porta NodePort para API S3", default="30900"))
461
+ if enable_console:
462
+ console_nodeport = int(typer.prompt("Porta NodePort para Console", default="30901"))
463
+
452
464
  node_name = _detect_node_name(ctx)
453
465
 
454
466
  values = [
@@ -486,13 +498,27 @@ def run(ctx: ExecutionContext) -> None:
486
498
  if is_distributed:
487
499
  values.append(f"replicas={replicas}")
488
500
 
489
- # Console
490
- if enable_console:
501
+ # Service type (NodePort ou ClusterIP)
502
+ if enable_nodeport:
491
503
  values.extend([
492
- "consoleService.type=ClusterIP",
493
- "consoleIngress.enabled=false",
504
+ "service.type=NodePort",
505
+ f"service.nodePort={api_nodeport}",
494
506
  ])
495
507
 
508
+ # Console
509
+ if enable_console:
510
+ if enable_nodeport:
511
+ values.extend([
512
+ "consoleService.type=NodePort",
513
+ f"consoleService.nodePort={console_nodeport}",
514
+ "consoleIngress.enabled=false",
515
+ ])
516
+ else:
517
+ values.extend([
518
+ "consoleService.type=ClusterIP",
519
+ "consoleIngress.enabled=false",
520
+ ])
521
+
496
522
  helm_upgrade_install(
497
523
  release="minio",
498
524
  chart="minio",
@@ -514,14 +540,21 @@ def run(ctx: ExecutionContext) -> None:
514
540
  typer.echo(f" Root Password: {root_password}")
515
541
 
516
542
  if enable_console:
517
- typer.secho("\n🔒 Acesso Seguro ao MinIO Console via VPN:", fg=typer.colors.CYAN, bold=True)
518
- typer.echo("\n1. Configure VPN (se ainda não tiver):")
519
- typer.echo(" sudo raijin vpn")
520
- typer.echo("\n2. Conecte via WireGuard")
521
- typer.echo("\n3. Faça port-forward:")
522
- typer.echo(" kubectl -n minio port-forward svc/minio-console 9001:9001")
523
- typer.echo("\n4. Acesse no navegador:")
524
- typer.echo(" http://localhost:9001")
525
-
526
- typer.echo("\nPara acessar a API S3 (port-forward):")
527
- typer.echo(" kubectl -n minio port-forward svc/minio 9000:9000")
543
+ if enable_nodeport:
544
+ typer.secho("\n🔒 Acesso ao MinIO Console via VPN:", fg=typer.colors.CYAN, bold=True)
545
+ typer.echo("\n1. Configure VPN (se ainda não tiver):")
546
+ typer.echo(" sudo raijin vpn")
547
+ typer.echo("\n2. Conecte via WireGuard")
548
+ typer.echo("\n3. Acesse no navegador (IP da VPN):")
549
+ typer.echo(f" http://<VPN_SERVER_IP>:{console_nodeport}")
550
+ typer.echo("\n Exemplo: http://10.8.0.1:{}".format(console_nodeport))
551
+ else:
552
+ typer.secho("\n🔒 Acesso via Port-Forward:", fg=typer.colors.CYAN, bold=True)
553
+ typer.echo("\n kubectl -n minio port-forward svc/minio-console 9001:9001")
554
+ typer.echo("\n Acesse: http://localhost:9001")
555
+
556
+ if enable_nodeport:
557
+ typer.echo(f"\nAPI S3 via VPN: http://<VPN_SERVER_IP>:{api_nodeport}")
558
+ else:
559
+ typer.echo("\nPara acessar a API S3 (port-forward):")
560
+ typer.echo(" kubectl -n minio port-forward svc/minio 9000:9000")
@@ -428,6 +428,17 @@ def run(ctx: ExecutionContext) -> None:
428
428
  enable_persistence = typer.confirm(
429
429
  "Habilitar PVC para Prometheus e Alertmanager?", default=bool(default_sc)
430
430
  )
431
+
432
+ # NodePort para acesso via VPN
433
+ enable_nodeport = typer.confirm(
434
+ "Habilitar NodePort para acesso via VPN?",
435
+ default=True
436
+ )
437
+ prometheus_nodeport = 30090
438
+ alertmanager_nodeport = 30093
439
+ if enable_nodeport:
440
+ prometheus_nodeport = int(typer.prompt("Porta NodePort para Prometheus", default="30090"))
441
+ alertmanager_nodeport = int(typer.prompt("Porta NodePort para Alertmanager", default="30093"))
431
442
 
432
443
  # Se habilitou PVC, garante que existe StorageClass disponivel
433
444
  if enable_persistence:
@@ -442,31 +453,42 @@ def run(ctx: ExecutionContext) -> None:
442
453
  "prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false",
443
454
  "prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false",
444
455
  "defaultRules.create=true",
445
- # Tolerations for control-plane nodes
456
+ # Tolerations for control-plane nodes - Prometheus
446
457
  "prometheus.prometheusSpec.tolerations[0].key=node-role.kubernetes.io/control-plane",
447
458
  "prometheus.prometheusSpec.tolerations[0].operator=Exists",
448
459
  "prometheus.prometheusSpec.tolerations[0].effect=NoSchedule",
449
460
  "prometheus.prometheusSpec.tolerations[1].key=node-role.kubernetes.io/master",
450
461
  "prometheus.prometheusSpec.tolerations[1].operator=Exists",
451
462
  "prometheus.prometheusSpec.tolerations[1].effect=NoSchedule",
463
+ # Tolerations - Alertmanager
452
464
  "alertmanager.alertmanagerSpec.tolerations[0].key=node-role.kubernetes.io/control-plane",
453
465
  "alertmanager.alertmanagerSpec.tolerations[0].operator=Exists",
454
466
  "alertmanager.alertmanagerSpec.tolerations[0].effect=NoSchedule",
455
467
  "alertmanager.alertmanagerSpec.tolerations[1].key=node-role.kubernetes.io/master",
456
468
  "alertmanager.alertmanagerSpec.tolerations[1].operator=Exists",
457
469
  "alertmanager.alertmanagerSpec.tolerations[1].effect=NoSchedule",
470
+ # Tolerations - Prometheus Operator
458
471
  "prometheusOperator.tolerations[0].key=node-role.kubernetes.io/control-plane",
459
472
  "prometheusOperator.tolerations[0].operator=Exists",
460
473
  "prometheusOperator.tolerations[0].effect=NoSchedule",
461
474
  "prometheusOperator.tolerations[1].key=node-role.kubernetes.io/master",
462
475
  "prometheusOperator.tolerations[1].operator=Exists",
463
476
  "prometheusOperator.tolerations[1].effect=NoSchedule",
477
+ # Tolerations - Admission Webhooks (Jobs que criam/atualizam webhooks)
478
+ "prometheusOperator.admissionWebhooks.patch.tolerations[0].key=node-role.kubernetes.io/control-plane",
479
+ "prometheusOperator.admissionWebhooks.patch.tolerations[0].operator=Exists",
480
+ "prometheusOperator.admissionWebhooks.patch.tolerations[0].effect=NoSchedule",
481
+ "prometheusOperator.admissionWebhooks.patch.tolerations[1].key=node-role.kubernetes.io/master",
482
+ "prometheusOperator.admissionWebhooks.patch.tolerations[1].operator=Exists",
483
+ "prometheusOperator.admissionWebhooks.patch.tolerations[1].effect=NoSchedule",
484
+ # Tolerations - kube-state-metrics
464
485
  "kube-state-metrics.tolerations[0].key=node-role.kubernetes.io/control-plane",
465
486
  "kube-state-metrics.tolerations[0].operator=Exists",
466
487
  "kube-state-metrics.tolerations[0].effect=NoSchedule",
467
488
  "kube-state-metrics.tolerations[1].key=node-role.kubernetes.io/master",
468
489
  "kube-state-metrics.tolerations[1].operator=Exists",
469
490
  "kube-state-metrics.tolerations[1].effect=NoSchedule",
491
+ # Tolerations - node-exporter
470
492
  "prometheus-node-exporter.tolerations[0].key=node-role.kubernetes.io/control-plane",
471
493
  "prometheus-node-exporter.tolerations[0].operator=Exists",
472
494
  "prometheus-node-exporter.tolerations[0].effect=NoSchedule",
@@ -477,8 +499,15 @@ def run(ctx: ExecutionContext) -> None:
477
499
  f"prometheus.prometheusSpec.nodeSelector.kubernetes\\.io/hostname={node_name}",
478
500
  f"alertmanager.alertmanagerSpec.nodeSelector.kubernetes\\.io/hostname={node_name}",
479
501
  f"prometheusOperator.nodeSelector.kubernetes\\.io/hostname={node_name}",
480
- ]
481
-
502
+ ]
503
+ # NodePort para acesso via VPN
504
+ if enable_nodeport:
505
+ values.extend([
506
+ "prometheus.service.type=NodePort",
507
+ f"prometheus.service.nodePort={prometheus_nodeport}",
508
+ "alertmanager.service.type=NodePort",
509
+ f"alertmanager.service.nodePort={alertmanager_nodeport}",
510
+ ])
482
511
  extra_args = ["--wait", "--timeout", "10m", "--atomic"]
483
512
 
484
513
  chart_version = typer.prompt(
@@ -531,7 +560,18 @@ def run(ctx: ExecutionContext) -> None:
531
560
  _wait_for_prometheus_ready(ctx, namespace)
532
561
 
533
562
  typer.secho("\n✓ kube-prometheus-stack instalado com sucesso.", fg=typer.colors.GREEN, bold=True)
534
- typer.echo("\nPara acessar Prometheus via port-forward:")
535
- typer.echo(f" kubectl -n {namespace} port-forward svc/kube-prometheus-stack-prometheus 9090:9090")
536
- typer.echo("\nPara acessar Alertmanager via port-forward:")
537
- typer.echo(f" kubectl -n {namespace} port-forward svc/kube-prometheus-stack-alertmanager 9093:9093")
563
+
564
+ if enable_nodeport:
565
+ typer.secho("\n🔒 Acesso via VPN + NodePort:", fg=typer.colors.CYAN, bold=True)
566
+ typer.echo("\n1. Configure VPN: sudo raijin vpn")
567
+ typer.echo("2. Conecte via WireGuard")
568
+ typer.echo("\nPrometheus:")
569
+ typer.echo(f" http://<VPN_SERVER_IP>:{prometheus_nodeport}")
570
+ typer.echo("\nAlertmanager:")
571
+ typer.echo(f" http://<VPN_SERVER_IP>:{alertmanager_nodeport}")
572
+ typer.echo("\nExemplo: http://10.8.0.1:{} (Prometheus)".format(prometheus_nodeport))
573
+ else:
574
+ typer.echo("\nPara acessar Prometheus via port-forward:")
575
+ typer.echo(f" kubectl -n {namespace} port-forward svc/kube-prometheus-stack-prometheus 9090:9090")
576
+ typer.echo("\nPara acessar Alertmanager via port-forward:")
577
+ typer.echo(f" kubectl -n {namespace} port-forward svc/kube-prometheus-stack-alertmanager 9093:9093")