k8s-helper-cli 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- k8s_helper/__init__.py +1 -1
- k8s_helper/cli.py +480 -6
- k8s_helper/core.py +680 -4
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/METADATA +123 -3
- k8s_helper_cli-0.2.2.dist-info/RECORD +11 -0
- k8s_helper_cli-0.2.0.dist-info/RECORD +0 -11
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/WHEEL +0 -0
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/entry_points.txt +0 -0
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/licenses/LICENSE +0 -0
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/top_level.txt +0 -0
k8s_helper/__init__.py
CHANGED
k8s_helper/cli.py
CHANGED
@@ -8,6 +8,8 @@ from rich.console import Console
|
|
8
8
|
from rich.table import Table
|
9
9
|
from rich.panel import Panel
|
10
10
|
from rich.text import Text
|
11
|
+
import time
|
12
|
+
import time
|
11
13
|
|
12
14
|
from .core import K8sClient
|
13
15
|
from .config import get_config
|
@@ -21,7 +23,8 @@ from .utils import (
|
|
21
23
|
validate_name,
|
22
24
|
validate_image,
|
23
25
|
parse_env_vars,
|
24
|
-
parse_labels
|
26
|
+
parse_labels,
|
27
|
+
format_age
|
25
28
|
)
|
26
29
|
from . import __version__
|
27
30
|
|
@@ -451,10 +454,15 @@ def apply(
|
|
451
454
|
service_type: str = typer.Option("ClusterIP", help="Service type"),
|
452
455
|
env: Optional[str] = typer.Option(None, "--env", "-e", help="Environment variables"),
|
453
456
|
labels: Optional[str] = typer.Option(None, "--labels", "-l", help="Labels"),
|
457
|
+
init_container: Optional[str] = typer.Option(None, "--init-container", help="Init container (name:image:command)"),
|
458
|
+
init_env: Optional[str] = typer.Option(None, "--init-env", help="Init container environment variables"),
|
459
|
+
pvc: Optional[str] = typer.Option(None, "--pvc", help="PVC to mount (name:mount_path)"),
|
460
|
+
secret: Optional[str] = typer.Option(None, "--secret", help="Secret to mount (name:mount_path)"),
|
454
461
|
namespace: Optional[str] = namespace_option,
|
455
|
-
wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for deployment to be ready")
|
462
|
+
wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for deployment to be ready"),
|
463
|
+
show_url: bool = typer.Option(True, "--show-url/--no-show-url", help="Show service URL after deployment")
|
456
464
|
):
|
457
|
-
"""Deploy an application (deployment + service)"""
|
465
|
+
"""Deploy an application (deployment + service) with advanced features"""
|
458
466
|
if not validate_name(name):
|
459
467
|
console.print(f"❌ Invalid application name: {name}")
|
460
468
|
return
|
@@ -471,6 +479,80 @@ def apply(
|
|
471
479
|
|
472
480
|
console.print(f"🚀 Deploying application: {name}")
|
473
481
|
|
482
|
+
# Prepare init containers
|
483
|
+
init_containers = []
|
484
|
+
if init_container:
|
485
|
+
try:
|
486
|
+
parts = init_container.split(":")
|
487
|
+
if len(parts) >= 2:
|
488
|
+
init_name, init_image = parts[0], parts[1]
|
489
|
+
init_command = parts[2].split(" ") if len(parts) > 2 else None
|
490
|
+
|
491
|
+
init_env_vars = parse_env_vars(init_env) if init_env else None
|
492
|
+
|
493
|
+
init_containers.append({
|
494
|
+
'name': init_name,
|
495
|
+
'image': init_image,
|
496
|
+
'command': init_command,
|
497
|
+
'env_vars': init_env_vars
|
498
|
+
})
|
499
|
+
|
500
|
+
console.print(f"🔧 Init container: {init_name} ({init_image})")
|
501
|
+
else:
|
502
|
+
console.print(f"❌ Invalid init container format: {init_container}")
|
503
|
+
return
|
504
|
+
except Exception as e:
|
505
|
+
console.print(f"❌ Error parsing init container: {e}")
|
506
|
+
return
|
507
|
+
|
508
|
+
# Prepare volumes and volume mounts
|
509
|
+
volumes = []
|
510
|
+
volume_mounts = []
|
511
|
+
|
512
|
+
if pvc:
|
513
|
+
try:
|
514
|
+
pvc_parts = pvc.split(":")
|
515
|
+
if len(pvc_parts) == 2:
|
516
|
+
pvc_name, mount_path = pvc_parts
|
517
|
+
volumes.append({
|
518
|
+
'name': f"{pvc_name}-volume",
|
519
|
+
'type': 'pvc',
|
520
|
+
'claim_name': pvc_name
|
521
|
+
})
|
522
|
+
volume_mounts.append({
|
523
|
+
'name': f"{pvc_name}-volume",
|
524
|
+
'mount_path': mount_path
|
525
|
+
})
|
526
|
+
console.print(f"💾 PVC mount: {pvc_name} → {mount_path}")
|
527
|
+
else:
|
528
|
+
console.print(f"❌ Invalid PVC format: {pvc}")
|
529
|
+
return
|
530
|
+
except Exception as e:
|
531
|
+
console.print(f"❌ Error parsing PVC: {e}")
|
532
|
+
return
|
533
|
+
|
534
|
+
if secret:
|
535
|
+
try:
|
536
|
+
secret_parts = secret.split(":")
|
537
|
+
if len(secret_parts) == 2:
|
538
|
+
secret_name, mount_path = secret_parts
|
539
|
+
volumes.append({
|
540
|
+
'name': f"{secret_name}-volume",
|
541
|
+
'type': 'secret',
|
542
|
+
'secret_name': secret_name
|
543
|
+
})
|
544
|
+
volume_mounts.append({
|
545
|
+
'name': f"{secret_name}-volume",
|
546
|
+
'mount_path': mount_path
|
547
|
+
})
|
548
|
+
console.print(f"🔐 Secret mount: {secret_name} → {mount_path}")
|
549
|
+
else:
|
550
|
+
console.print(f"❌ Invalid secret format: {secret}")
|
551
|
+
return
|
552
|
+
except Exception as e:
|
553
|
+
console.print(f"❌ Error parsing secret: {e}")
|
554
|
+
return
|
555
|
+
|
474
556
|
# Create deployment
|
475
557
|
with console.status(f"Creating deployment {name}..."):
|
476
558
|
deployment_result = client.create_deployment(
|
@@ -479,7 +561,10 @@ def apply(
|
|
479
561
|
replicas=replicas,
|
480
562
|
container_port=port,
|
481
563
|
env_vars=env_vars,
|
482
|
-
labels=label_dict
|
564
|
+
labels=label_dict,
|
565
|
+
init_containers=init_containers if init_containers else None,
|
566
|
+
volume_mounts=volume_mounts if volume_mounts else None,
|
567
|
+
volumes=volumes if volumes else None
|
483
568
|
)
|
484
569
|
|
485
570
|
if not deployment_result:
|
@@ -508,6 +593,36 @@ def apply(
|
|
508
593
|
console.print(f"✅ Application {name} is ready")
|
509
594
|
else:
|
510
595
|
console.print(f"❌ Application {name} failed to become ready")
|
596
|
+
|
597
|
+
# Show service URL if requested
|
598
|
+
if show_url:
|
599
|
+
console.print(f"\n🔗 Service URL Information:")
|
600
|
+
|
601
|
+
# Wait a moment for service to be ready
|
602
|
+
time.sleep(2)
|
603
|
+
|
604
|
+
url_info = client.get_service_url(f"{name}-service", ns)
|
605
|
+
if url_info:
|
606
|
+
console.print(f"🔧 Service Type: {url_info['type']}")
|
607
|
+
console.print(f"🖥️ Cluster IP: {url_info['cluster_ip']}")
|
608
|
+
|
609
|
+
if url_info['type'] == 'LoadBalancer':
|
610
|
+
if url_info.get('aws_elb'):
|
611
|
+
console.print(f"🌐 AWS ELB DNS: [green]{url_info['elb_dns_name']}[/green]")
|
612
|
+
console.print(f"🔗 External URL: [blue]{url_info['external_url']}[/blue]")
|
613
|
+
elif url_info.get('external_url'):
|
614
|
+
console.print(f"🔗 External URL: [blue]{url_info['external_url']}[/blue]")
|
615
|
+
else:
|
616
|
+
console.print(f"⏳ LoadBalancer provisioning... Use 'k8s-helper service-url {name}-service' to check status")
|
617
|
+
|
618
|
+
elif url_info['type'] == 'NodePort':
|
619
|
+
if url_info.get('external_url'):
|
620
|
+
console.print(f"🔗 NodePort URL: [blue]{url_info['external_url']}[/blue]")
|
621
|
+
|
622
|
+
elif url_info['type'] == 'ClusterIP':
|
623
|
+
console.print(f"💡 ClusterIP service - accessible within cluster at {url_info['cluster_ip']}:{port}")
|
624
|
+
else:
|
625
|
+
console.print("❌ Could not retrieve service URL information")
|
511
626
|
|
512
627
|
|
513
628
|
@app.command()
|
@@ -536,5 +651,364 @@ def cleanup(
|
|
536
651
|
console.print(f"⚠️ Partial cleanup completed for application {name}")
|
537
652
|
|
538
653
|
|
539
|
-
|
540
|
-
|
654
|
+
# ======================
|
655
|
+
# EKS COMMANDS
|
656
|
+
# ======================
|
657
|
+
@app.command()
|
658
|
+
def create_eks_cluster(
|
659
|
+
name: str = typer.Argument(..., help="Cluster name"),
|
660
|
+
region: str = typer.Option("us-west-2", "--region", "-r", help="AWS region"),
|
661
|
+
version: str = typer.Option("1.29", "--version", "-v", help="Kubernetes version"),
|
662
|
+
node_group: str = typer.Option(None, "--node-group", help="Node group name"),
|
663
|
+
instance_types: str = typer.Option("t3.medium", "--instance-types", help="EC2 instance types (comma-separated)"),
|
664
|
+
min_size: int = typer.Option(1, "--min-size", help="Minimum number of nodes"),
|
665
|
+
max_size: int = typer.Option(3, "--max-size", help="Maximum number of nodes"),
|
666
|
+
desired_size: int = typer.Option(2, "--desired-size", help="Desired number of nodes"),
|
667
|
+
wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for cluster to be ready")
|
668
|
+
):
|
669
|
+
"""Create an AWS EKS cluster"""
|
670
|
+
if not validate_name(name):
|
671
|
+
console.print(f"❌ Invalid cluster name: {name}")
|
672
|
+
return
|
673
|
+
|
674
|
+
try:
|
675
|
+
from .core import EKSClient
|
676
|
+
|
677
|
+
eks_client = EKSClient(region=region)
|
678
|
+
|
679
|
+
# Parse instance types
|
680
|
+
instance_type_list = [t.strip() for t in instance_types.split(",")]
|
681
|
+
|
682
|
+
scaling_config = {
|
683
|
+
"minSize": min_size,
|
684
|
+
"maxSize": max_size,
|
685
|
+
"desiredSize": desired_size
|
686
|
+
}
|
687
|
+
|
688
|
+
console.print(f"🚀 Creating EKS cluster: {name}")
|
689
|
+
console.print(f"📍 Region: {region}")
|
690
|
+
console.print(f"🎯 Version: {version}")
|
691
|
+
console.print(f"💻 Instance types: {instance_type_list}")
|
692
|
+
console.print(f"📊 Scaling: {min_size}-{max_size} nodes (desired: {desired_size})")
|
693
|
+
|
694
|
+
# Show what will be created
|
695
|
+
console.print("\n🔧 EKS Requirements:")
|
696
|
+
console.print(" • IAM roles for cluster and node groups")
|
697
|
+
console.print(" • VPC subnets in at least 2 availability zones")
|
698
|
+
console.print(" • Security groups for cluster communication")
|
699
|
+
console.print(" • EKS cluster control plane")
|
700
|
+
if node_group:
|
701
|
+
console.print(" • Managed node group with EC2 instances")
|
702
|
+
|
703
|
+
with console.status("Creating EKS cluster and required resources..."):
|
704
|
+
cluster_info = eks_client.create_cluster(
|
705
|
+
cluster_name=name,
|
706
|
+
version=version,
|
707
|
+
node_group_name=node_group,
|
708
|
+
instance_types=instance_type_list,
|
709
|
+
scaling_config=scaling_config
|
710
|
+
)
|
711
|
+
|
712
|
+
console.print(f"✅ EKS cluster creation initiated")
|
713
|
+
console.print(f"📋 Cluster ARN: {cluster_info['cluster_arn']}")
|
714
|
+
console.print(f"🕐 Created at: {cluster_info['created_at']}")
|
715
|
+
|
716
|
+
if 'subnets' in cluster_info:
|
717
|
+
console.print(f"🌐 Subnets: {cluster_info['subnets']}")
|
718
|
+
|
719
|
+
if wait:
|
720
|
+
console.print("⏳ Waiting for cluster to become active...")
|
721
|
+
with console.status("Waiting for cluster to be ready..."):
|
722
|
+
if eks_client.wait_for_cluster_active(name):
|
723
|
+
console.print("✅ EKS cluster is now active!")
|
724
|
+
|
725
|
+
# Show cluster status
|
726
|
+
status = eks_client.get_cluster_status(name)
|
727
|
+
console.print(f"🔗 Endpoint: {status['endpoint']}")
|
728
|
+
|
729
|
+
# Show next steps
|
730
|
+
console.print(f"\n🚀 Next steps:")
|
731
|
+
console.print(f" 1. Configure kubectl: aws eks update-kubeconfig --name {name} --region {region}")
|
732
|
+
console.print(f" 2. Verify connection: kubectl get svc")
|
733
|
+
console.print(f" 3. Deploy applications: k8s-helper apply <app-name> <image>")
|
734
|
+
else:
|
735
|
+
console.print("❌ Timeout waiting for cluster to become active")
|
736
|
+
else:
|
737
|
+
console.print(f"💡 Use 'aws eks update-kubeconfig --name {name} --region {region}' to configure kubectl")
|
738
|
+
|
739
|
+
except Exception as e:
|
740
|
+
error_message = str(e)
|
741
|
+
console.print(f"❌ Failed to create EKS cluster: {error_message}")
|
742
|
+
|
743
|
+
# Provide helpful guidance based on error type
|
744
|
+
if "Need at least 2 subnets" in error_message:
|
745
|
+
console.print("\n🛠️ Troubleshooting:")
|
746
|
+
console.print(" • EKS requires subnets in at least 2 availability zones")
|
747
|
+
console.print(" • Check your VPC configuration in the AWS Console")
|
748
|
+
console.print(" • Ensure you have subnets in different AZs")
|
749
|
+
console.print(" • The tool will attempt to create subnets if none exist")
|
750
|
+
elif "credentials not found" in error_message:
|
751
|
+
console.print("\n🛠️ Troubleshooting:")
|
752
|
+
console.print(" • Configure AWS credentials: aws configure")
|
753
|
+
console.print(" • Or set environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY")
|
754
|
+
console.print(" • Ensure you have EKS permissions")
|
755
|
+
elif "VPC" in error_message:
|
756
|
+
console.print("\n🛠️ Troubleshooting:")
|
757
|
+
console.print(" • Check your VPC configuration")
|
758
|
+
console.print(" • Ensure you have a default VPC or create one")
|
759
|
+
console.print(" • Verify subnet CIDR ranges don't overlap")
|
760
|
+
|
761
|
+
|
762
|
+
# ======================
|
763
|
+
# SECRET COMMANDS
|
764
|
+
# ======================
|
765
|
+
@app.command()
|
766
|
+
def create_secret(
|
767
|
+
name: str = typer.Argument(..., help="Secret name"),
|
768
|
+
data: str = typer.Option(..., "--data", "-d", help="Secret data (key1=value1,key2=value2)"),
|
769
|
+
secret_type: str = typer.Option("Opaque", "--type", "-t", help="Secret type"),
|
770
|
+
namespace: Optional[str] = namespace_option
|
771
|
+
):
|
772
|
+
"""Create a Kubernetes secret"""
|
773
|
+
if not validate_name(name):
|
774
|
+
console.print(f"❌ Invalid secret name: {name}")
|
775
|
+
return
|
776
|
+
|
777
|
+
# Parse data
|
778
|
+
try:
|
779
|
+
data_dict = {}
|
780
|
+
for pair in data.split(","):
|
781
|
+
if "=" in pair:
|
782
|
+
key, value = pair.split("=", 1)
|
783
|
+
data_dict[key.strip()] = value.strip()
|
784
|
+
else:
|
785
|
+
console.print(f"❌ Invalid data format: {pair}")
|
786
|
+
return
|
787
|
+
|
788
|
+
if not data_dict:
|
789
|
+
console.print("❌ No valid data provided")
|
790
|
+
return
|
791
|
+
|
792
|
+
ns = namespace or get_config().get_namespace()
|
793
|
+
client = K8sClient(namespace=ns)
|
794
|
+
|
795
|
+
with console.status(f"Creating secret {name}..."):
|
796
|
+
result = client.create_secret(name, data_dict, secret_type, ns)
|
797
|
+
|
798
|
+
if result:
|
799
|
+
console.print(f"✅ Secret {name} created successfully")
|
800
|
+
console.print(f"📋 Type: {secret_type}")
|
801
|
+
console.print(f"🔑 Keys: {list(data_dict.keys())}")
|
802
|
+
else:
|
803
|
+
console.print(f"❌ Failed to create secret {name}")
|
804
|
+
|
805
|
+
except Exception as e:
|
806
|
+
console.print(f"❌ Error creating secret: {e}")
|
807
|
+
|
808
|
+
|
809
|
+
@app.command()
|
810
|
+
def list_secrets(
|
811
|
+
namespace: Optional[str] = namespace_option,
|
812
|
+
output: str = output_option
|
813
|
+
):
|
814
|
+
"""List secrets"""
|
815
|
+
ns = namespace or get_config().get_namespace()
|
816
|
+
client = K8sClient(namespace=ns)
|
817
|
+
|
818
|
+
secrets = client.list_secrets(ns)
|
819
|
+
|
820
|
+
if output == "table":
|
821
|
+
table = Table(title=f"Secrets in {ns}")
|
822
|
+
table.add_column("Name", style="cyan")
|
823
|
+
table.add_column("Type", style="magenta")
|
824
|
+
table.add_column("Keys", style="green")
|
825
|
+
table.add_column("Age", style="blue")
|
826
|
+
|
827
|
+
for secret in secrets:
|
828
|
+
age = format_age(secret['created_at'])
|
829
|
+
keys = ", ".join(secret['data_keys'])
|
830
|
+
table.add_row(secret['name'], secret['type'], keys, age)
|
831
|
+
|
832
|
+
console.print(table)
|
833
|
+
elif output == "yaml":
|
834
|
+
console.print(format_yaml_output(secrets))
|
835
|
+
elif output == "json":
|
836
|
+
console.print(format_json_output(secrets))
|
837
|
+
|
838
|
+
|
839
|
+
@app.command()
|
840
|
+
def delete_secret(
|
841
|
+
name: str = typer.Argument(..., help="Secret name"),
|
842
|
+
namespace: Optional[str] = namespace_option
|
843
|
+
):
|
844
|
+
"""Delete a secret"""
|
845
|
+
ns = namespace or get_config().get_namespace()
|
846
|
+
client = K8sClient(namespace=ns)
|
847
|
+
|
848
|
+
if typer.confirm(f"Are you sure you want to delete secret {name}?"):
|
849
|
+
with console.status(f"Deleting secret {name}..."):
|
850
|
+
if client.delete_secret(name, ns):
|
851
|
+
console.print(f"✅ Secret {name} deleted successfully")
|
852
|
+
else:
|
853
|
+
console.print(f"❌ Failed to delete secret {name}")
|
854
|
+
|
855
|
+
|
856
|
+
# ======================
|
857
|
+
# PVC COMMANDS
|
858
|
+
# ======================
|
859
|
+
@app.command()
|
860
|
+
def create_pvc(
|
861
|
+
name: str = typer.Argument(..., help="PVC name"),
|
862
|
+
size: str = typer.Argument(..., help="Storage size (e.g., 10Gi, 100Mi)"),
|
863
|
+
access_modes: str = typer.Option("ReadWriteOnce", "--access-modes", "-a", help="Access modes (comma-separated)"),
|
864
|
+
storage_class: Optional[str] = typer.Option(None, "--storage-class", "-s", help="Storage class"),
|
865
|
+
namespace: Optional[str] = namespace_option
|
866
|
+
):
|
867
|
+
"""Create a Persistent Volume Claim"""
|
868
|
+
if not validate_name(name):
|
869
|
+
console.print(f"❌ Invalid PVC name: {name}")
|
870
|
+
return
|
871
|
+
|
872
|
+
# Parse access modes
|
873
|
+
access_modes_list = [mode.strip() for mode in access_modes.split(",")]
|
874
|
+
|
875
|
+
ns = namespace or get_config().get_namespace()
|
876
|
+
client = K8sClient(namespace=ns)
|
877
|
+
|
878
|
+
with console.status(f"Creating PVC {name}..."):
|
879
|
+
result = client.create_pvc(
|
880
|
+
name=name,
|
881
|
+
size=size,
|
882
|
+
access_modes=access_modes_list,
|
883
|
+
storage_class=storage_class,
|
884
|
+
namespace=ns
|
885
|
+
)
|
886
|
+
|
887
|
+
if result:
|
888
|
+
console.print(f"✅ PVC {name} created successfully")
|
889
|
+
console.print(f"💾 Size: {size}")
|
890
|
+
console.print(f"🔐 Access modes: {access_modes_list}")
|
891
|
+
if storage_class:
|
892
|
+
console.print(f"📦 Storage class: {storage_class}")
|
893
|
+
else:
|
894
|
+
console.print(f"❌ Failed to create PVC {name}")
|
895
|
+
|
896
|
+
|
897
|
+
@app.command()
|
898
|
+
def list_pvcs(
|
899
|
+
namespace: Optional[str] = namespace_option,
|
900
|
+
output: str = output_option
|
901
|
+
):
|
902
|
+
"""List Persistent Volume Claims"""
|
903
|
+
ns = namespace or get_config().get_namespace()
|
904
|
+
client = K8sClient(namespace=ns)
|
905
|
+
|
906
|
+
pvcs = client.list_pvcs(ns)
|
907
|
+
|
908
|
+
if output == "table":
|
909
|
+
table = Table(title=f"PVCs in {ns}")
|
910
|
+
table.add_column("Name", style="cyan")
|
911
|
+
table.add_column("Status", style="magenta")
|
912
|
+
table.add_column("Volume", style="green")
|
913
|
+
table.add_column("Size", style="blue")
|
914
|
+
table.add_column("Access Modes", style="yellow")
|
915
|
+
table.add_column("Storage Class", style="red")
|
916
|
+
table.add_column("Age", style="blue")
|
917
|
+
|
918
|
+
for pvc in pvcs:
|
919
|
+
age = format_age(pvc['created_at'])
|
920
|
+
status_color = "green" if pvc['status'] == 'Bound' else "yellow"
|
921
|
+
table.add_row(
|
922
|
+
pvc['name'],
|
923
|
+
f"[{status_color}]{pvc['status']}[/{status_color}]",
|
924
|
+
pvc['volume_name'] or "N/A",
|
925
|
+
pvc['size'],
|
926
|
+
", ".join(pvc['access_modes']),
|
927
|
+
pvc['storage_class'] or "N/A",
|
928
|
+
age
|
929
|
+
)
|
930
|
+
|
931
|
+
console.print(table)
|
932
|
+
elif output == "yaml":
|
933
|
+
console.print(format_yaml_output(pvcs))
|
934
|
+
elif output == "json":
|
935
|
+
console.print(format_json_output(pvcs))
|
936
|
+
|
937
|
+
|
938
|
+
@app.command()
|
939
|
+
def delete_pvc(
|
940
|
+
name: str = typer.Argument(..., help="PVC name"),
|
941
|
+
namespace: Optional[str] = namespace_option
|
942
|
+
):
|
943
|
+
"""Delete a Persistent Volume Claim"""
|
944
|
+
ns = namespace or get_config().get_namespace()
|
945
|
+
client = K8sClient(namespace=ns)
|
946
|
+
|
947
|
+
if typer.confirm(f"Are you sure you want to delete PVC {name}?"):
|
948
|
+
with console.status(f"Deleting PVC {name}..."):
|
949
|
+
if client.delete_pvc(name, ns):
|
950
|
+
console.print(f"✅ PVC {name} deleted successfully")
|
951
|
+
else:
|
952
|
+
console.print(f"❌ Failed to delete PVC {name}")
|
953
|
+
|
954
|
+
|
955
|
+
# ======================
|
956
|
+
# SERVICE URL COMMAND
|
957
|
+
# ======================
|
958
|
+
@app.command()
|
959
|
+
def service_url(
|
960
|
+
name: str = typer.Argument(..., help="Service name"),
|
961
|
+
namespace: Optional[str] = namespace_option,
|
962
|
+
watch: bool = typer.Option(False, "--watch", "-w", help="Watch for URL changes")
|
963
|
+
):
|
964
|
+
"""Get service URL including AWS ELB URLs"""
|
965
|
+
ns = namespace or get_config().get_namespace()
|
966
|
+
client = K8sClient(namespace=ns)
|
967
|
+
|
968
|
+
def show_service_url():
|
969
|
+
url_info = client.get_service_url(name, ns)
|
970
|
+
if not url_info:
|
971
|
+
console.print(f"❌ Service {name} not found")
|
972
|
+
return False
|
973
|
+
|
974
|
+
console.print(f"\n🔗 Service URL Information for [cyan]{name}[/cyan]")
|
975
|
+
console.print(f"📍 Namespace: {url_info['namespace']}")
|
976
|
+
console.print(f"🔧 Type: {url_info['type']}")
|
977
|
+
console.print(f"🖥️ Cluster IP: {url_info['cluster_ip']}")
|
978
|
+
|
979
|
+
# Show ports
|
980
|
+
console.print("\n📋 Ports:")
|
981
|
+
for port in url_info['ports']:
|
982
|
+
console.print(f" • {port['port']}/{port['protocol']} → {port['target_port']}")
|
983
|
+
|
984
|
+
# Show external access
|
985
|
+
if url_info['type'] == 'LoadBalancer':
|
986
|
+
if url_info.get('aws_elb'):
|
987
|
+
console.print(f"\n🌐 AWS ELB DNS: [green]{url_info['elb_dns_name']}[/green]")
|
988
|
+
console.print(f"🔗 External URL: [blue]{url_info['external_url']}[/blue]")
|
989
|
+
elif url_info.get('external_url'):
|
990
|
+
console.print(f"\n🔗 External URL: [blue]{url_info['external_url']}[/blue]")
|
991
|
+
elif url_info.get('status'):
|
992
|
+
console.print(f"\n⏳ Status: {url_info['status']}")
|
993
|
+
|
994
|
+
elif url_info['type'] == 'NodePort':
|
995
|
+
if url_info.get('external_url'):
|
996
|
+
console.print(f"\n🔗 NodePort URL: [blue]{url_info['external_url']}[/blue]")
|
997
|
+
console.print(f"🖥️ Node IP: {url_info['node_ip']}")
|
998
|
+
console.print(f"🚪 Node Port: {url_info['node_port']}")
|
999
|
+
|
1000
|
+
elif url_info['type'] == 'ClusterIP':
|
1001
|
+
console.print(f"\n💡 ClusterIP service - only accessible within cluster")
|
1002
|
+
|
1003
|
+
return True
|
1004
|
+
|
1005
|
+
if watch:
|
1006
|
+
console.print("👁️ Watching for service URL changes (Ctrl+C to stop)")
|
1007
|
+
try:
|
1008
|
+
while True:
|
1009
|
+
show_service_url()
|
1010
|
+
time.sleep(5)
|
1011
|
+
except KeyboardInterrupt:
|
1012
|
+
console.print("\n👋 Stopped watching")
|
1013
|
+
else:
|
1014
|
+
show_service_url()
|