k8s-helper-cli 0.1.2__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- k8s_helper/__init__.py +1 -1
- k8s_helper/cli.py +457 -6
- k8s_helper/core.py +592 -4
- {k8s_helper_cli-0.1.2.dist-info → k8s_helper_cli-0.2.1.dist-info}/METADATA +126 -3
- k8s_helper_cli-0.2.1.dist-info/RECORD +11 -0
- k8s_helper_cli-0.1.2.dist-info/RECORD +0 -11
- {k8s_helper_cli-0.1.2.dist-info → k8s_helper_cli-0.2.1.dist-info}/WHEEL +0 -0
- {k8s_helper_cli-0.1.2.dist-info → k8s_helper_cli-0.2.1.dist-info}/entry_points.txt +0 -0
- {k8s_helper_cli-0.1.2.dist-info → k8s_helper_cli-0.2.1.dist-info}/licenses/LICENSE +0 -0
- {k8s_helper_cli-0.1.2.dist-info → k8s_helper_cli-0.2.1.dist-info}/top_level.txt +0 -0
k8s_helper/__init__.py
CHANGED
k8s_helper/cli.py
CHANGED
@@ -8,6 +8,8 @@ from rich.console import Console
|
|
8
8
|
from rich.table import Table
|
9
9
|
from rich.panel import Panel
|
10
10
|
from rich.text import Text
|
11
|
+
import time
|
12
|
+
import time
|
11
13
|
|
12
14
|
from .core import K8sClient
|
13
15
|
from .config import get_config
|
@@ -21,12 +23,27 @@ from .utils import (
|
|
21
23
|
validate_name,
|
22
24
|
validate_image,
|
23
25
|
parse_env_vars,
|
24
|
-
parse_labels
|
26
|
+
parse_labels,
|
27
|
+
format_age
|
25
28
|
)
|
29
|
+
from . import __version__
|
30
|
+
|
31
|
+
def version_callback(value: bool):
|
32
|
+
"""Version callback for the CLI"""
|
33
|
+
if value:
|
34
|
+
typer.echo(f"k8s-helper-cli version {__version__}")
|
35
|
+
raise typer.Exit()
|
26
36
|
|
27
37
|
app = typer.Typer(help="k8s-helper: Simplified Kubernetes operations")
|
28
38
|
console = Console()
|
29
39
|
|
40
|
+
@app.callback()
|
41
|
+
def main(
|
42
|
+
version: Optional[bool] = typer.Option(None, "--version", callback=version_callback, is_eager=True, help="Show version and exit")
|
43
|
+
):
|
44
|
+
"""Main callback to handle global options"""
|
45
|
+
return
|
46
|
+
|
30
47
|
# Global options
|
31
48
|
namespace_option = typer.Option(None, "--namespace", "-n", help="Kubernetes namespace")
|
32
49
|
output_option = typer.Option("table", "--output", "-o", help="Output format: table, yaml, json")
|
@@ -437,10 +454,15 @@ def apply(
|
|
437
454
|
service_type: str = typer.Option("ClusterIP", help="Service type"),
|
438
455
|
env: Optional[str] = typer.Option(None, "--env", "-e", help="Environment variables"),
|
439
456
|
labels: Optional[str] = typer.Option(None, "--labels", "-l", help="Labels"),
|
457
|
+
init_container: Optional[str] = typer.Option(None, "--init-container", help="Init container (name:image:command)"),
|
458
|
+
init_env: Optional[str] = typer.Option(None, "--init-env", help="Init container environment variables"),
|
459
|
+
pvc: Optional[str] = typer.Option(None, "--pvc", help="PVC to mount (name:mount_path)"),
|
460
|
+
secret: Optional[str] = typer.Option(None, "--secret", help="Secret to mount (name:mount_path)"),
|
440
461
|
namespace: Optional[str] = namespace_option,
|
441
|
-
wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for deployment to be ready")
|
462
|
+
wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for deployment to be ready"),
|
463
|
+
show_url: bool = typer.Option(True, "--show-url/--no-show-url", help="Show service URL after deployment")
|
442
464
|
):
|
443
|
-
"""Deploy an application (deployment + service)"""
|
465
|
+
"""Deploy an application (deployment + service) with advanced features"""
|
444
466
|
if not validate_name(name):
|
445
467
|
console.print(f"❌ Invalid application name: {name}")
|
446
468
|
return
|
@@ -457,6 +479,80 @@ def apply(
|
|
457
479
|
|
458
480
|
console.print(f"🚀 Deploying application: {name}")
|
459
481
|
|
482
|
+
# Prepare init containers
|
483
|
+
init_containers = []
|
484
|
+
if init_container:
|
485
|
+
try:
|
486
|
+
parts = init_container.split(":")
|
487
|
+
if len(parts) >= 2:
|
488
|
+
init_name, init_image = parts[0], parts[1]
|
489
|
+
init_command = parts[2].split(" ") if len(parts) > 2 else None
|
490
|
+
|
491
|
+
init_env_vars = parse_env_vars(init_env) if init_env else None
|
492
|
+
|
493
|
+
init_containers.append({
|
494
|
+
'name': init_name,
|
495
|
+
'image': init_image,
|
496
|
+
'command': init_command,
|
497
|
+
'env_vars': init_env_vars
|
498
|
+
})
|
499
|
+
|
500
|
+
console.print(f"🔧 Init container: {init_name} ({init_image})")
|
501
|
+
else:
|
502
|
+
console.print(f"❌ Invalid init container format: {init_container}")
|
503
|
+
return
|
504
|
+
except Exception as e:
|
505
|
+
console.print(f"❌ Error parsing init container: {e}")
|
506
|
+
return
|
507
|
+
|
508
|
+
# Prepare volumes and volume mounts
|
509
|
+
volumes = []
|
510
|
+
volume_mounts = []
|
511
|
+
|
512
|
+
if pvc:
|
513
|
+
try:
|
514
|
+
pvc_parts = pvc.split(":")
|
515
|
+
if len(pvc_parts) == 2:
|
516
|
+
pvc_name, mount_path = pvc_parts
|
517
|
+
volumes.append({
|
518
|
+
'name': f"{pvc_name}-volume",
|
519
|
+
'type': 'pvc',
|
520
|
+
'claim_name': pvc_name
|
521
|
+
})
|
522
|
+
volume_mounts.append({
|
523
|
+
'name': f"{pvc_name}-volume",
|
524
|
+
'mount_path': mount_path
|
525
|
+
})
|
526
|
+
console.print(f"💾 PVC mount: {pvc_name} → {mount_path}")
|
527
|
+
else:
|
528
|
+
console.print(f"❌ Invalid PVC format: {pvc}")
|
529
|
+
return
|
530
|
+
except Exception as e:
|
531
|
+
console.print(f"❌ Error parsing PVC: {e}")
|
532
|
+
return
|
533
|
+
|
534
|
+
if secret:
|
535
|
+
try:
|
536
|
+
secret_parts = secret.split(":")
|
537
|
+
if len(secret_parts) == 2:
|
538
|
+
secret_name, mount_path = secret_parts
|
539
|
+
volumes.append({
|
540
|
+
'name': f"{secret_name}-volume",
|
541
|
+
'type': 'secret',
|
542
|
+
'secret_name': secret_name
|
543
|
+
})
|
544
|
+
volume_mounts.append({
|
545
|
+
'name': f"{secret_name}-volume",
|
546
|
+
'mount_path': mount_path
|
547
|
+
})
|
548
|
+
console.print(f"🔐 Secret mount: {secret_name} → {mount_path}")
|
549
|
+
else:
|
550
|
+
console.print(f"❌ Invalid secret format: {secret}")
|
551
|
+
return
|
552
|
+
except Exception as e:
|
553
|
+
console.print(f"❌ Error parsing secret: {e}")
|
554
|
+
return
|
555
|
+
|
460
556
|
# Create deployment
|
461
557
|
with console.status(f"Creating deployment {name}..."):
|
462
558
|
deployment_result = client.create_deployment(
|
@@ -465,7 +561,10 @@ def apply(
|
|
465
561
|
replicas=replicas,
|
466
562
|
container_port=port,
|
467
563
|
env_vars=env_vars,
|
468
|
-
labels=label_dict
|
564
|
+
labels=label_dict,
|
565
|
+
init_containers=init_containers if init_containers else None,
|
566
|
+
volume_mounts=volume_mounts if volume_mounts else None,
|
567
|
+
volumes=volumes if volumes else None
|
469
568
|
)
|
470
569
|
|
471
570
|
if not deployment_result:
|
@@ -494,6 +593,36 @@ def apply(
|
|
494
593
|
console.print(f"✅ Application {name} is ready")
|
495
594
|
else:
|
496
595
|
console.print(f"❌ Application {name} failed to become ready")
|
596
|
+
|
597
|
+
# Show service URL if requested
|
598
|
+
if show_url:
|
599
|
+
console.print(f"\n🔗 Service URL Information:")
|
600
|
+
|
601
|
+
# Wait a moment for service to be ready
|
602
|
+
time.sleep(2)
|
603
|
+
|
604
|
+
url_info = client.get_service_url(f"{name}-service", ns)
|
605
|
+
if url_info:
|
606
|
+
console.print(f"🔧 Service Type: {url_info['type']}")
|
607
|
+
console.print(f"🖥️ Cluster IP: {url_info['cluster_ip']}")
|
608
|
+
|
609
|
+
if url_info['type'] == 'LoadBalancer':
|
610
|
+
if url_info.get('aws_elb'):
|
611
|
+
console.print(f"🌐 AWS ELB DNS: [green]{url_info['elb_dns_name']}[/green]")
|
612
|
+
console.print(f"🔗 External URL: [blue]{url_info['external_url']}[/blue]")
|
613
|
+
elif url_info.get('external_url'):
|
614
|
+
console.print(f"🔗 External URL: [blue]{url_info['external_url']}[/blue]")
|
615
|
+
else:
|
616
|
+
console.print(f"⏳ LoadBalancer provisioning... Use 'k8s-helper service-url {name}-service' to check status")
|
617
|
+
|
618
|
+
elif url_info['type'] == 'NodePort':
|
619
|
+
if url_info.get('external_url'):
|
620
|
+
console.print(f"🔗 NodePort URL: [blue]{url_info['external_url']}[/blue]")
|
621
|
+
|
622
|
+
elif url_info['type'] == 'ClusterIP':
|
623
|
+
console.print(f"💡 ClusterIP service - accessible within cluster at {url_info['cluster_ip']}:{port}")
|
624
|
+
else:
|
625
|
+
console.print("❌ Could not retrieve service URL information")
|
497
626
|
|
498
627
|
|
499
628
|
@app.command()
|
@@ -522,5 +651,327 @@ def cleanup(
|
|
522
651
|
console.print(f"⚠️ Partial cleanup completed for application {name}")
|
523
652
|
|
524
653
|
|
525
|
-
|
526
|
-
|
654
|
+
# ======================
|
655
|
+
# EKS COMMANDS
|
656
|
+
# ======================
|
657
|
+
@app.command()
|
658
|
+
def create_eks_cluster(
|
659
|
+
name: str = typer.Argument(..., help="Cluster name"),
|
660
|
+
region: str = typer.Option("us-west-2", "--region", "-r", help="AWS region"),
|
661
|
+
version: str = typer.Option("1.29", "--version", "-v", help="Kubernetes version"),
|
662
|
+
node_group: str = typer.Option(None, "--node-group", help="Node group name"),
|
663
|
+
instance_types: str = typer.Option("t3.medium", "--instance-types", help="EC2 instance types (comma-separated)"),
|
664
|
+
min_size: int = typer.Option(1, "--min-size", help="Minimum number of nodes"),
|
665
|
+
max_size: int = typer.Option(3, "--max-size", help="Maximum number of nodes"),
|
666
|
+
desired_size: int = typer.Option(2, "--desired-size", help="Desired number of nodes"),
|
667
|
+
wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for cluster to be ready")
|
668
|
+
):
|
669
|
+
"""Create an AWS EKS cluster"""
|
670
|
+
if not validate_name(name):
|
671
|
+
console.print(f"❌ Invalid cluster name: {name}")
|
672
|
+
return
|
673
|
+
|
674
|
+
try:
|
675
|
+
from .core import EKSClient
|
676
|
+
|
677
|
+
eks_client = EKSClient(region=region)
|
678
|
+
|
679
|
+
# Parse instance types
|
680
|
+
instance_type_list = [t.strip() for t in instance_types.split(",")]
|
681
|
+
|
682
|
+
scaling_config = {
|
683
|
+
"minSize": min_size,
|
684
|
+
"maxSize": max_size,
|
685
|
+
"desiredSize": desired_size
|
686
|
+
}
|
687
|
+
|
688
|
+
console.print(f"🚀 Creating EKS cluster: {name}")
|
689
|
+
console.print(f"📍 Region: {region}")
|
690
|
+
console.print(f"🎯 Version: {version}")
|
691
|
+
console.print(f"💻 Instance types: {instance_type_list}")
|
692
|
+
console.print(f"📊 Scaling: {min_size}-{max_size} nodes (desired: {desired_size})")
|
693
|
+
|
694
|
+
with console.status("Creating EKS cluster..."):
|
695
|
+
cluster_info = eks_client.create_cluster(
|
696
|
+
cluster_name=name,
|
697
|
+
version=version,
|
698
|
+
node_group_name=node_group,
|
699
|
+
instance_types=instance_type_list,
|
700
|
+
scaling_config=scaling_config
|
701
|
+
)
|
702
|
+
|
703
|
+
console.print(f"✅ EKS cluster creation initiated")
|
704
|
+
console.print(f"📋 Cluster ARN: {cluster_info['cluster_arn']}")
|
705
|
+
console.print(f"🕐 Created at: {cluster_info['created_at']}")
|
706
|
+
|
707
|
+
if wait:
|
708
|
+
console.print("⏳ Waiting for cluster to become active...")
|
709
|
+
with console.status("Waiting for cluster to be ready..."):
|
710
|
+
if eks_client.wait_for_cluster_active(name):
|
711
|
+
console.print("✅ EKS cluster is now active!")
|
712
|
+
|
713
|
+
# Show cluster status
|
714
|
+
status = eks_client.get_cluster_status(name)
|
715
|
+
console.print(f"🔗 Endpoint: {status['endpoint']}")
|
716
|
+
else:
|
717
|
+
console.print("❌ Timeout waiting for cluster to become active")
|
718
|
+
else:
|
719
|
+
console.print("💡 Use 'aws eks update-kubeconfig --name {name} --region {region}' to configure kubectl")
|
720
|
+
|
721
|
+
except Exception as e:
|
722
|
+
console.print(f"❌ Failed to create EKS cluster: {e}")
|
723
|
+
|
724
|
+
|
725
|
+
# ======================
|
726
|
+
# SECRET COMMANDS
|
727
|
+
# ======================
|
728
|
+
@app.command()
|
729
|
+
def create_secret(
|
730
|
+
name: str = typer.Argument(..., help="Secret name"),
|
731
|
+
data: str = typer.Option(..., "--data", "-d", help="Secret data (key1=value1,key2=value2)"),
|
732
|
+
secret_type: str = typer.Option("Opaque", "--type", "-t", help="Secret type"),
|
733
|
+
namespace: Optional[str] = namespace_option
|
734
|
+
):
|
735
|
+
"""Create a Kubernetes secret"""
|
736
|
+
if not validate_name(name):
|
737
|
+
console.print(f"❌ Invalid secret name: {name}")
|
738
|
+
return
|
739
|
+
|
740
|
+
# Parse data
|
741
|
+
try:
|
742
|
+
data_dict = {}
|
743
|
+
for pair in data.split(","):
|
744
|
+
if "=" in pair:
|
745
|
+
key, value = pair.split("=", 1)
|
746
|
+
data_dict[key.strip()] = value.strip()
|
747
|
+
else:
|
748
|
+
console.print(f"❌ Invalid data format: {pair}")
|
749
|
+
return
|
750
|
+
|
751
|
+
if not data_dict:
|
752
|
+
console.print("❌ No valid data provided")
|
753
|
+
return
|
754
|
+
|
755
|
+
ns = namespace or get_config().get_namespace()
|
756
|
+
client = K8sClient(namespace=ns)
|
757
|
+
|
758
|
+
with console.status(f"Creating secret {name}..."):
|
759
|
+
result = client.create_secret(name, data_dict, secret_type, ns)
|
760
|
+
|
761
|
+
if result:
|
762
|
+
console.print(f"✅ Secret {name} created successfully")
|
763
|
+
console.print(f"📋 Type: {secret_type}")
|
764
|
+
console.print(f"🔑 Keys: {list(data_dict.keys())}")
|
765
|
+
else:
|
766
|
+
console.print(f"❌ Failed to create secret {name}")
|
767
|
+
|
768
|
+
except Exception as e:
|
769
|
+
console.print(f"❌ Error creating secret: {e}")
|
770
|
+
|
771
|
+
|
772
|
+
@app.command()
|
773
|
+
def list_secrets(
|
774
|
+
namespace: Optional[str] = namespace_option,
|
775
|
+
output: str = output_option
|
776
|
+
):
|
777
|
+
"""List secrets"""
|
778
|
+
ns = namespace or get_config().get_namespace()
|
779
|
+
client = K8sClient(namespace=ns)
|
780
|
+
|
781
|
+
secrets = client.list_secrets(ns)
|
782
|
+
|
783
|
+
if output == "table":
|
784
|
+
table = Table(title=f"Secrets in {ns}")
|
785
|
+
table.add_column("Name", style="cyan")
|
786
|
+
table.add_column("Type", style="magenta")
|
787
|
+
table.add_column("Keys", style="green")
|
788
|
+
table.add_column("Age", style="blue")
|
789
|
+
|
790
|
+
for secret in secrets:
|
791
|
+
age = format_age(secret['created_at'])
|
792
|
+
keys = ", ".join(secret['data_keys'])
|
793
|
+
table.add_row(secret['name'], secret['type'], keys, age)
|
794
|
+
|
795
|
+
console.print(table)
|
796
|
+
elif output == "yaml":
|
797
|
+
console.print(format_yaml_output(secrets))
|
798
|
+
elif output == "json":
|
799
|
+
console.print(format_json_output(secrets))
|
800
|
+
|
801
|
+
|
802
|
+
@app.command()
|
803
|
+
def delete_secret(
|
804
|
+
name: str = typer.Argument(..., help="Secret name"),
|
805
|
+
namespace: Optional[str] = namespace_option
|
806
|
+
):
|
807
|
+
"""Delete a secret"""
|
808
|
+
ns = namespace or get_config().get_namespace()
|
809
|
+
client = K8sClient(namespace=ns)
|
810
|
+
|
811
|
+
if typer.confirm(f"Are you sure you want to delete secret {name}?"):
|
812
|
+
with console.status(f"Deleting secret {name}..."):
|
813
|
+
if client.delete_secret(name, ns):
|
814
|
+
console.print(f"✅ Secret {name} deleted successfully")
|
815
|
+
else:
|
816
|
+
console.print(f"❌ Failed to delete secret {name}")
|
817
|
+
|
818
|
+
|
819
|
+
# ======================
|
820
|
+
# PVC COMMANDS
|
821
|
+
# ======================
|
822
|
+
@app.command()
|
823
|
+
def create_pvc(
|
824
|
+
name: str = typer.Argument(..., help="PVC name"),
|
825
|
+
size: str = typer.Argument(..., help="Storage size (e.g., 10Gi, 100Mi)"),
|
826
|
+
access_modes: str = typer.Option("ReadWriteOnce", "--access-modes", "-a", help="Access modes (comma-separated)"),
|
827
|
+
storage_class: Optional[str] = typer.Option(None, "--storage-class", "-s", help="Storage class"),
|
828
|
+
namespace: Optional[str] = namespace_option
|
829
|
+
):
|
830
|
+
"""Create a Persistent Volume Claim"""
|
831
|
+
if not validate_name(name):
|
832
|
+
console.print(f"❌ Invalid PVC name: {name}")
|
833
|
+
return
|
834
|
+
|
835
|
+
# Parse access modes
|
836
|
+
access_modes_list = [mode.strip() for mode in access_modes.split(",")]
|
837
|
+
|
838
|
+
ns = namespace or get_config().get_namespace()
|
839
|
+
client = K8sClient(namespace=ns)
|
840
|
+
|
841
|
+
with console.status(f"Creating PVC {name}..."):
|
842
|
+
result = client.create_pvc(
|
843
|
+
name=name,
|
844
|
+
size=size,
|
845
|
+
access_modes=access_modes_list,
|
846
|
+
storage_class=storage_class,
|
847
|
+
namespace=ns
|
848
|
+
)
|
849
|
+
|
850
|
+
if result:
|
851
|
+
console.print(f"✅ PVC {name} created successfully")
|
852
|
+
console.print(f"💾 Size: {size}")
|
853
|
+
console.print(f"🔐 Access modes: {access_modes_list}")
|
854
|
+
if storage_class:
|
855
|
+
console.print(f"📦 Storage class: {storage_class}")
|
856
|
+
else:
|
857
|
+
console.print(f"❌ Failed to create PVC {name}")
|
858
|
+
|
859
|
+
|
860
|
+
@app.command()
|
861
|
+
def list_pvcs(
|
862
|
+
namespace: Optional[str] = namespace_option,
|
863
|
+
output: str = output_option
|
864
|
+
):
|
865
|
+
"""List Persistent Volume Claims"""
|
866
|
+
ns = namespace or get_config().get_namespace()
|
867
|
+
client = K8sClient(namespace=ns)
|
868
|
+
|
869
|
+
pvcs = client.list_pvcs(ns)
|
870
|
+
|
871
|
+
if output == "table":
|
872
|
+
table = Table(title=f"PVCs in {ns}")
|
873
|
+
table.add_column("Name", style="cyan")
|
874
|
+
table.add_column("Status", style="magenta")
|
875
|
+
table.add_column("Volume", style="green")
|
876
|
+
table.add_column("Size", style="blue")
|
877
|
+
table.add_column("Access Modes", style="yellow")
|
878
|
+
table.add_column("Storage Class", style="red")
|
879
|
+
table.add_column("Age", style="blue")
|
880
|
+
|
881
|
+
for pvc in pvcs:
|
882
|
+
age = format_age(pvc['created_at'])
|
883
|
+
status_color = "green" if pvc['status'] == 'Bound' else "yellow"
|
884
|
+
table.add_row(
|
885
|
+
pvc['name'],
|
886
|
+
f"[{status_color}]{pvc['status']}[/{status_color}]",
|
887
|
+
pvc['volume_name'] or "N/A",
|
888
|
+
pvc['size'],
|
889
|
+
", ".join(pvc['access_modes']),
|
890
|
+
pvc['storage_class'] or "N/A",
|
891
|
+
age
|
892
|
+
)
|
893
|
+
|
894
|
+
console.print(table)
|
895
|
+
elif output == "yaml":
|
896
|
+
console.print(format_yaml_output(pvcs))
|
897
|
+
elif output == "json":
|
898
|
+
console.print(format_json_output(pvcs))
|
899
|
+
|
900
|
+
|
901
|
+
@app.command()
|
902
|
+
def delete_pvc(
|
903
|
+
name: str = typer.Argument(..., help="PVC name"),
|
904
|
+
namespace: Optional[str] = namespace_option
|
905
|
+
):
|
906
|
+
"""Delete a Persistent Volume Claim"""
|
907
|
+
ns = namespace or get_config().get_namespace()
|
908
|
+
client = K8sClient(namespace=ns)
|
909
|
+
|
910
|
+
if typer.confirm(f"Are you sure you want to delete PVC {name}?"):
|
911
|
+
with console.status(f"Deleting PVC {name}..."):
|
912
|
+
if client.delete_pvc(name, ns):
|
913
|
+
console.print(f"✅ PVC {name} deleted successfully")
|
914
|
+
else:
|
915
|
+
console.print(f"❌ Failed to delete PVC {name}")
|
916
|
+
|
917
|
+
|
918
|
+
# ======================
|
919
|
+
# SERVICE URL COMMAND
|
920
|
+
# ======================
|
921
|
+
@app.command()
|
922
|
+
def service_url(
|
923
|
+
name: str = typer.Argument(..., help="Service name"),
|
924
|
+
namespace: Optional[str] = namespace_option,
|
925
|
+
watch: bool = typer.Option(False, "--watch", "-w", help="Watch for URL changes")
|
926
|
+
):
|
927
|
+
"""Get service URL including AWS ELB URLs"""
|
928
|
+
ns = namespace or get_config().get_namespace()
|
929
|
+
client = K8sClient(namespace=ns)
|
930
|
+
|
931
|
+
def show_service_url():
|
932
|
+
url_info = client.get_service_url(name, ns)
|
933
|
+
if not url_info:
|
934
|
+
console.print(f"❌ Service {name} not found")
|
935
|
+
return False
|
936
|
+
|
937
|
+
console.print(f"\n🔗 Service URL Information for [cyan]{name}[/cyan]")
|
938
|
+
console.print(f"📍 Namespace: {url_info['namespace']}")
|
939
|
+
console.print(f"🔧 Type: {url_info['type']}")
|
940
|
+
console.print(f"🖥️ Cluster IP: {url_info['cluster_ip']}")
|
941
|
+
|
942
|
+
# Show ports
|
943
|
+
console.print("\n📋 Ports:")
|
944
|
+
for port in url_info['ports']:
|
945
|
+
console.print(f" • {port['port']}/{port['protocol']} → {port['target_port']}")
|
946
|
+
|
947
|
+
# Show external access
|
948
|
+
if url_info['type'] == 'LoadBalancer':
|
949
|
+
if url_info.get('aws_elb'):
|
950
|
+
console.print(f"\n🌐 AWS ELB DNS: [green]{url_info['elb_dns_name']}[/green]")
|
951
|
+
console.print(f"🔗 External URL: [blue]{url_info['external_url']}[/blue]")
|
952
|
+
elif url_info.get('external_url'):
|
953
|
+
console.print(f"\n🔗 External URL: [blue]{url_info['external_url']}[/blue]")
|
954
|
+
elif url_info.get('status'):
|
955
|
+
console.print(f"\n⏳ Status: {url_info['status']}")
|
956
|
+
|
957
|
+
elif url_info['type'] == 'NodePort':
|
958
|
+
if url_info.get('external_url'):
|
959
|
+
console.print(f"\n🔗 NodePort URL: [blue]{url_info['external_url']}[/blue]")
|
960
|
+
console.print(f"🖥️ Node IP: {url_info['node_ip']}")
|
961
|
+
console.print(f"🚪 Node Port: {url_info['node_port']}")
|
962
|
+
|
963
|
+
elif url_info['type'] == 'ClusterIP':
|
964
|
+
console.print(f"\n💡 ClusterIP service - only accessible within cluster")
|
965
|
+
|
966
|
+
return True
|
967
|
+
|
968
|
+
if watch:
|
969
|
+
console.print("👁️ Watching for service URL changes (Ctrl+C to stop)")
|
970
|
+
try:
|
971
|
+
while True:
|
972
|
+
show_service_url()
|
973
|
+
time.sleep(5)
|
974
|
+
except KeyboardInterrupt:
|
975
|
+
console.print("\n👋 Stopped watching")
|
976
|
+
else:
|
977
|
+
show_service_url()
|
k8s_helper/core.py
CHANGED
@@ -2,8 +2,211 @@ from kubernetes import client, config
|
|
2
2
|
from kubernetes.client.rest import ApiException
|
3
3
|
from typing import Dict, List, Optional, Any
|
4
4
|
import yaml
|
5
|
+
import time
|
6
|
+
import base64
|
7
|
+
import boto3
|
8
|
+
import json
|
9
|
+
from botocore.exceptions import ClientError, NoCredentialsError
|
5
10
|
|
6
11
|
|
12
|
+
class EKSClient:
|
13
|
+
"""AWS EKS client for cluster management"""
|
14
|
+
|
15
|
+
def __init__(self, region: str = "us-west-2"):
|
16
|
+
"""Initialize EKS client
|
17
|
+
|
18
|
+
Args:
|
19
|
+
region: AWS region for EKS operations
|
20
|
+
"""
|
21
|
+
self.region = region
|
22
|
+
try:
|
23
|
+
self.eks_client = boto3.client('eks', region_name=region)
|
24
|
+
self.ec2_client = boto3.client('ec2', region_name=region)
|
25
|
+
self.iam_client = boto3.client('iam', region_name=region)
|
26
|
+
except (NoCredentialsError, ClientError) as e:
|
27
|
+
raise Exception(f"AWS credentials not found or invalid: {e}")
|
28
|
+
|
29
|
+
def create_cluster(self, cluster_name: str, version: str = "1.29",
|
30
|
+
subnets: List[str] = None, security_groups: List[str] = None,
|
31
|
+
role_arn: str = None, node_group_name: str = None,
|
32
|
+
instance_types: List[str] = None, ami_type: str = "AL2_x86_64",
|
33
|
+
capacity_type: str = "ON_DEMAND", scaling_config: Dict = None) -> Dict:
|
34
|
+
"""Create an EKS cluster
|
35
|
+
|
36
|
+
Args:
|
37
|
+
cluster_name: Name of the EKS cluster
|
38
|
+
version: Kubernetes version
|
39
|
+
subnets: List of subnet IDs
|
40
|
+
security_groups: List of security group IDs
|
41
|
+
role_arn: IAM role ARN for the cluster
|
42
|
+
node_group_name: Name for the node group
|
43
|
+
instance_types: List of EC2 instance types
|
44
|
+
ami_type: AMI type for nodes
|
45
|
+
capacity_type: Capacity type (ON_DEMAND or SPOT)
|
46
|
+
scaling_config: Scaling configuration for node group
|
47
|
+
|
48
|
+
Returns:
|
49
|
+
Dict containing cluster information
|
50
|
+
"""
|
51
|
+
try:
|
52
|
+
# Use default values if not provided
|
53
|
+
if subnets is None:
|
54
|
+
subnets = self._get_default_subnets()
|
55
|
+
|
56
|
+
if role_arn is None:
|
57
|
+
role_arn = self._create_or_get_cluster_role()
|
58
|
+
|
59
|
+
if instance_types is None:
|
60
|
+
instance_types = ["t3.medium"]
|
61
|
+
|
62
|
+
if scaling_config is None:
|
63
|
+
scaling_config = {
|
64
|
+
"minSize": 1,
|
65
|
+
"maxSize": 3,
|
66
|
+
"desiredSize": 2
|
67
|
+
}
|
68
|
+
|
69
|
+
# Create cluster
|
70
|
+
cluster_response = self.eks_client.create_cluster(
|
71
|
+
name=cluster_name,
|
72
|
+
version=version,
|
73
|
+
roleArn=role_arn,
|
74
|
+
resourcesVpcConfig={
|
75
|
+
'subnetIds': subnets,
|
76
|
+
'securityGroupIds': security_groups or [],
|
77
|
+
'endpointConfigPublic': True,
|
78
|
+
'endpointConfigPrivate': True
|
79
|
+
},
|
80
|
+
logging={
|
81
|
+
'enable': True,
|
82
|
+
'types': ['api', 'audit', 'authenticator', 'controllerManager', 'scheduler']
|
83
|
+
}
|
84
|
+
)
|
85
|
+
|
86
|
+
cluster_info = {
|
87
|
+
'cluster_name': cluster_name,
|
88
|
+
'status': 'CREATING',
|
89
|
+
'cluster_arn': cluster_response['cluster']['arn'],
|
90
|
+
'endpoint': cluster_response['cluster'].get('endpoint', 'Not available yet'),
|
91
|
+
'version': version,
|
92
|
+
'role_arn': role_arn,
|
93
|
+
'subnets': subnets,
|
94
|
+
'created_at': cluster_response['cluster']['createdAt']
|
95
|
+
}
|
96
|
+
|
97
|
+
# If node group name is provided, we'll create it after cluster is active
|
98
|
+
if node_group_name:
|
99
|
+
cluster_info['node_group_name'] = node_group_name
|
100
|
+
cluster_info['instance_types'] = instance_types
|
101
|
+
cluster_info['scaling_config'] = scaling_config
|
102
|
+
|
103
|
+
return cluster_info
|
104
|
+
|
105
|
+
except ClientError as e:
|
106
|
+
raise Exception(f"Failed to create EKS cluster: {e}")
|
107
|
+
|
108
|
+
def _get_default_subnets(self) -> List[str]:
|
109
|
+
"""Get default subnets for EKS cluster"""
|
110
|
+
try:
|
111
|
+
response = self.ec2_client.describe_subnets()
|
112
|
+
subnets = []
|
113
|
+
for subnet in response['Subnets']:
|
114
|
+
if subnet['State'] == 'available':
|
115
|
+
subnets.append(subnet['SubnetId'])
|
116
|
+
|
117
|
+
if len(subnets) < 2:
|
118
|
+
raise Exception("Need at least 2 subnets for EKS cluster")
|
119
|
+
|
120
|
+
return subnets[:2] # Return first 2 available subnets
|
121
|
+
|
122
|
+
except ClientError as e:
|
123
|
+
raise Exception(f"Failed to get default subnets: {e}")
|
124
|
+
|
125
|
+
def _create_or_get_cluster_role(self) -> str:
|
126
|
+
"""Create or get IAM role for EKS cluster"""
|
127
|
+
role_name = "eks-cluster-role"
|
128
|
+
|
129
|
+
try:
|
130
|
+
# Check if role exists
|
131
|
+
response = self.iam_client.get_role(RoleName=role_name)
|
132
|
+
return response['Role']['Arn']
|
133
|
+
|
134
|
+
except ClientError as e:
|
135
|
+
if e.response['Error']['Code'] == 'NoSuchEntity':
|
136
|
+
# Create the role
|
137
|
+
trust_policy = {
|
138
|
+
"Version": "2012-10-17",
|
139
|
+
"Statement": [
|
140
|
+
{
|
141
|
+
"Effect": "Allow",
|
142
|
+
"Principal": {
|
143
|
+
"Service": "eks.amazonaws.com"
|
144
|
+
},
|
145
|
+
"Action": "sts:AssumeRole"
|
146
|
+
}
|
147
|
+
]
|
148
|
+
}
|
149
|
+
|
150
|
+
response = self.iam_client.create_role(
|
151
|
+
RoleName=role_name,
|
152
|
+
AssumeRolePolicyDocument=json.dumps(trust_policy),
|
153
|
+
Description="EKS cluster role created by k8s-helper"
|
154
|
+
)
|
155
|
+
|
156
|
+
# Attach required policies
|
157
|
+
policies = [
|
158
|
+
"arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
|
159
|
+
]
|
160
|
+
|
161
|
+
for policy in policies:
|
162
|
+
self.iam_client.attach_role_policy(
|
163
|
+
RoleName=role_name,
|
164
|
+
PolicyArn=policy
|
165
|
+
)
|
166
|
+
|
167
|
+
return response['Role']['Arn']
|
168
|
+
else:
|
169
|
+
raise Exception(f"Failed to create or get cluster role: {e}")
|
170
|
+
|
171
|
+
def get_cluster_status(self, cluster_name: str) -> Dict:
|
172
|
+
"""Get EKS cluster status"""
|
173
|
+
try:
|
174
|
+
response = self.eks_client.describe_cluster(name=cluster_name)
|
175
|
+
cluster = response['cluster']
|
176
|
+
|
177
|
+
return {
|
178
|
+
'name': cluster['name'],
|
179
|
+
'status': cluster['status'],
|
180
|
+
'endpoint': cluster.get('endpoint', 'Not available'),
|
181
|
+
'version': cluster['version'],
|
182
|
+
'platform_version': cluster['platformVersion'],
|
183
|
+
'created_at': cluster['createdAt'],
|
184
|
+
'arn': cluster['arn']
|
185
|
+
}
|
186
|
+
|
187
|
+
except ClientError as e:
|
188
|
+
raise Exception(f"Failed to get cluster status: {e}")
|
189
|
+
|
190
|
+
def wait_for_cluster_active(self, cluster_name: str, timeout: int = 1800) -> bool:
|
191
|
+
"""Wait for EKS cluster to become active"""
|
192
|
+
import time
|
193
|
+
start_time = time.time()
|
194
|
+
|
195
|
+
while time.time() - start_time < timeout:
|
196
|
+
try:
|
197
|
+
status = self.get_cluster_status(cluster_name)
|
198
|
+
if status['status'] == 'ACTIVE':
|
199
|
+
return True
|
200
|
+
elif status['status'] == 'FAILED':
|
201
|
+
raise Exception(f"Cluster creation failed")
|
202
|
+
|
203
|
+
time.sleep(30) # Check every 30 seconds
|
204
|
+
|
205
|
+
except Exception as e:
|
206
|
+
raise Exception(f"Error waiting for cluster: {e}")
|
207
|
+
|
208
|
+
return False
|
209
|
+
|
7
210
|
class K8sClient:
|
8
211
|
def __init__(self, namespace="default"):
|
9
212
|
try:
|
@@ -20,8 +223,26 @@ class K8sClient:
|
|
20
223
|
# ======================
|
21
224
|
def create_deployment(self, name: str, image: str, replicas: int = 1,
|
22
225
|
container_port: int = 80, env_vars: Optional[Dict[str, str]] = None,
|
23
|
-
labels: Optional[Dict[str, str]] = None
|
24
|
-
|
226
|
+
labels: Optional[Dict[str, str]] = None,
|
227
|
+
init_containers: Optional[List[Dict]] = None,
|
228
|
+
volume_mounts: Optional[List[Dict]] = None,
|
229
|
+
volumes: Optional[List[Dict]] = None) -> Optional[Any]:
|
230
|
+
"""Create a Kubernetes deployment
|
231
|
+
|
232
|
+
Args:
|
233
|
+
name: Deployment name
|
234
|
+
image: Container image
|
235
|
+
replicas: Number of replicas
|
236
|
+
container_port: Container port
|
237
|
+
env_vars: Environment variables
|
238
|
+
labels: Labels for the deployment
|
239
|
+
init_containers: List of init container specifications
|
240
|
+
volume_mounts: List of volume mounts for the main container
|
241
|
+
volumes: List of volumes for the pod
|
242
|
+
|
243
|
+
Returns:
|
244
|
+
Deployment object if successful, None otherwise
|
245
|
+
"""
|
25
246
|
if labels is None:
|
26
247
|
labels = {"app": name}
|
27
248
|
|
@@ -30,16 +251,90 @@ class K8sClient:
|
|
30
251
|
if env_vars:
|
31
252
|
env = [client.V1EnvVar(name=k, value=v) for k, v in env_vars.items()]
|
32
253
|
|
254
|
+
# Volume mounts for main container
|
255
|
+
volume_mounts_obj = []
|
256
|
+
if volume_mounts:
|
257
|
+
for vm in volume_mounts:
|
258
|
+
volume_mounts_obj.append(client.V1VolumeMount(
|
259
|
+
name=vm.get('name'),
|
260
|
+
mount_path=vm.get('mount_path'),
|
261
|
+
read_only=vm.get('read_only', False)
|
262
|
+
))
|
263
|
+
|
264
|
+
# Main container
|
33
265
|
container = client.V1Container(
|
34
266
|
name=name,
|
35
267
|
image=image,
|
36
268
|
ports=[client.V1ContainerPort(container_port=container_port)],
|
37
|
-
env=env if env else None
|
269
|
+
env=env if env else None,
|
270
|
+
volume_mounts=volume_mounts_obj if volume_mounts_obj else None
|
38
271
|
)
|
39
272
|
|
273
|
+
# Init containers
|
274
|
+
init_containers_obj = []
|
275
|
+
if init_containers:
|
276
|
+
for init_container in init_containers:
|
277
|
+
init_env = []
|
278
|
+
if init_container.get('env_vars'):
|
279
|
+
init_env = [client.V1EnvVar(name=k, value=v)
|
280
|
+
for k, v in init_container['env_vars'].items()]
|
281
|
+
|
282
|
+
init_volume_mounts = []
|
283
|
+
if init_container.get('volume_mounts'):
|
284
|
+
for vm in init_container['volume_mounts']:
|
285
|
+
init_volume_mounts.append(client.V1VolumeMount(
|
286
|
+
name=vm.get('name'),
|
287
|
+
mount_path=vm.get('mount_path'),
|
288
|
+
read_only=vm.get('read_only', False)
|
289
|
+
))
|
290
|
+
|
291
|
+
init_containers_obj.append(client.V1Container(
|
292
|
+
name=init_container['name'],
|
293
|
+
image=init_container['image'],
|
294
|
+
command=init_container.get('command'),
|
295
|
+
args=init_container.get('args'),
|
296
|
+
env=init_env if init_env else None,
|
297
|
+
volume_mounts=init_volume_mounts if init_volume_mounts else None
|
298
|
+
))
|
299
|
+
|
300
|
+
# Volumes
|
301
|
+
volumes_obj = []
|
302
|
+
if volumes:
|
303
|
+
for volume in volumes:
|
304
|
+
if volume.get('type') == 'pvc':
|
305
|
+
volumes_obj.append(client.V1Volume(
|
306
|
+
name=volume['name'],
|
307
|
+
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
|
308
|
+
claim_name=volume['claim_name']
|
309
|
+
)
|
310
|
+
))
|
311
|
+
elif volume.get('type') == 'secret':
|
312
|
+
volumes_obj.append(client.V1Volume(
|
313
|
+
name=volume['name'],
|
314
|
+
secret=client.V1SecretVolumeSource(
|
315
|
+
secret_name=volume['secret_name']
|
316
|
+
)
|
317
|
+
))
|
318
|
+
elif volume.get('type') == 'configmap':
|
319
|
+
volumes_obj.append(client.V1Volume(
|
320
|
+
name=volume['name'],
|
321
|
+
config_map=client.V1ConfigMapVolumeSource(
|
322
|
+
name=volume['config_map_name']
|
323
|
+
)
|
324
|
+
))
|
325
|
+
elif volume.get('type') == 'empty_dir':
|
326
|
+
volumes_obj.append(client.V1Volume(
|
327
|
+
name=volume['name'],
|
328
|
+
empty_dir=client.V1EmptyDirVolumeSource()
|
329
|
+
))
|
330
|
+
|
40
331
|
template = client.V1PodTemplateSpec(
|
41
332
|
metadata=client.V1ObjectMeta(labels=labels),
|
42
|
-
spec=client.V1PodSpec(
|
333
|
+
spec=client.V1PodSpec(
|
334
|
+
containers=[container],
|
335
|
+
init_containers=init_containers_obj if init_containers_obj else None,
|
336
|
+
volumes=volumes_obj if volumes_obj else None
|
337
|
+
)
|
43
338
|
)
|
44
339
|
|
45
340
|
spec = client.V1DeploymentSpec(
|
@@ -468,6 +763,299 @@ class K8sClient:
|
|
468
763
|
print(f"❌ Error describing service '{name}': {e}")
|
469
764
|
return None
|
470
765
|
|
766
|
+
# ======================
|
767
|
+
# SECRET OPERATIONS
|
768
|
+
# ======================
|
769
|
+
def create_secret(self, name: str, data: Dict[str, str],
|
770
|
+
secret_type: str = "Opaque", namespace: str = None) -> Optional[Any]:
|
771
|
+
"""Create a Kubernetes secret
|
772
|
+
|
773
|
+
Args:
|
774
|
+
name: Name of the secret
|
775
|
+
data: Dictionary of key-value pairs for the secret
|
776
|
+
secret_type: Type of secret (Opaque, kubernetes.io/tls, etc.)
|
777
|
+
namespace: Namespace (uses default if not provided)
|
778
|
+
|
779
|
+
Returns:
|
780
|
+
Secret object if successful, None otherwise
|
781
|
+
"""
|
782
|
+
try:
|
783
|
+
ns = namespace or self.namespace
|
784
|
+
|
785
|
+
# Encode data as base64
|
786
|
+
encoded_data = {}
|
787
|
+
for key, value in data.items():
|
788
|
+
encoded_data[key] = base64.b64encode(value.encode()).decode()
|
789
|
+
|
790
|
+
secret = client.V1Secret(
|
791
|
+
metadata=client.V1ObjectMeta(name=name, namespace=ns),
|
792
|
+
type=secret_type,
|
793
|
+
data=encoded_data
|
794
|
+
)
|
795
|
+
|
796
|
+
result = self.core_v1.create_namespaced_secret(
|
797
|
+
namespace=ns,
|
798
|
+
body=secret
|
799
|
+
)
|
800
|
+
|
801
|
+
return result
|
802
|
+
|
803
|
+
except ApiException as e:
|
804
|
+
print(f"❌ Error creating secret: {e}")
|
805
|
+
return None
|
806
|
+
|
807
|
+
def get_secret(self, name: str, namespace: str = None) -> Optional[Dict]:
|
808
|
+
"""Get a Kubernetes secret
|
809
|
+
|
810
|
+
Args:
|
811
|
+
name: Name of the secret
|
812
|
+
namespace: Namespace (uses default if not provided)
|
813
|
+
|
814
|
+
Returns:
|
815
|
+
Dictionary containing secret data
|
816
|
+
"""
|
817
|
+
try:
|
818
|
+
ns = namespace or self.namespace
|
819
|
+
result = self.core_v1.read_namespaced_secret(name=name, namespace=ns)
|
820
|
+
|
821
|
+
# Decode base64 data
|
822
|
+
decoded_data = {}
|
823
|
+
if result.data:
|
824
|
+
for key, value in result.data.items():
|
825
|
+
decoded_data[key] = base64.b64decode(value).decode()
|
826
|
+
|
827
|
+
return {
|
828
|
+
'name': result.metadata.name,
|
829
|
+
'namespace': result.metadata.namespace,
|
830
|
+
'type': result.type,
|
831
|
+
'data': decoded_data,
|
832
|
+
'created_at': result.metadata.creation_timestamp
|
833
|
+
}
|
834
|
+
|
835
|
+
except ApiException as e:
|
836
|
+
print(f"❌ Error getting secret: {e}")
|
837
|
+
return None
|
838
|
+
|
839
|
+
def delete_secret(self, name: str, namespace: str = None) -> bool:
|
840
|
+
"""Delete a Kubernetes secret"""
|
841
|
+
try:
|
842
|
+
ns = namespace or self.namespace
|
843
|
+
self.core_v1.delete_namespaced_secret(name=name, namespace=ns)
|
844
|
+
return True
|
845
|
+
except ApiException as e:
|
846
|
+
print(f"❌ Error deleting secret: {e}")
|
847
|
+
return False
|
848
|
+
|
849
|
+
def list_secrets(self, namespace: str = None) -> List[Dict]:
|
850
|
+
"""List all secrets in a namespace"""
|
851
|
+
try:
|
852
|
+
ns = namespace or self.namespace
|
853
|
+
result = self.core_v1.list_namespaced_secret(namespace=ns)
|
854
|
+
|
855
|
+
secrets = []
|
856
|
+
for secret in result.items:
|
857
|
+
secrets.append({
|
858
|
+
'name': secret.metadata.name,
|
859
|
+
'namespace': secret.metadata.namespace,
|
860
|
+
'type': secret.type,
|
861
|
+
'data_keys': list(secret.data.keys()) if secret.data else [],
|
862
|
+
'created_at': secret.metadata.creation_timestamp
|
863
|
+
})
|
864
|
+
|
865
|
+
return secrets
|
866
|
+
|
867
|
+
except ApiException as e:
|
868
|
+
print(f"❌ Error listing secrets: {e}")
|
869
|
+
return []
|
870
|
+
|
871
|
+
# ======================
|
872
|
+
# PVC OPERATIONS
|
873
|
+
# ======================
|
874
|
+
def create_pvc(self, name: str, size: str, access_modes: List[str] = None,
|
875
|
+
storage_class: str = None, namespace: str = None) -> Optional[Any]:
|
876
|
+
"""Create a Persistent Volume Claim
|
877
|
+
|
878
|
+
Args:
|
879
|
+
name: Name of the PVC
|
880
|
+
size: Size of the volume (e.g., '10Gi', '100Mi')
|
881
|
+
access_modes: List of access modes (default: ['ReadWriteOnce'])
|
882
|
+
storage_class: Storage class name
|
883
|
+
namespace: Namespace (uses default if not provided)
|
884
|
+
|
885
|
+
Returns:
|
886
|
+
PVC object if successful, None otherwise
|
887
|
+
"""
|
888
|
+
try:
|
889
|
+
ns = namespace or self.namespace
|
890
|
+
|
891
|
+
if access_modes is None:
|
892
|
+
access_modes = ['ReadWriteOnce']
|
893
|
+
|
894
|
+
# Create PVC specification
|
895
|
+
pvc_spec = client.V1PersistentVolumeClaimSpec(
|
896
|
+
access_modes=access_modes,
|
897
|
+
resources=client.V1ResourceRequirements(
|
898
|
+
requests={'storage': size}
|
899
|
+
)
|
900
|
+
)
|
901
|
+
|
902
|
+
if storage_class:
|
903
|
+
pvc_spec.storage_class_name = storage_class
|
904
|
+
|
905
|
+
pvc = client.V1PersistentVolumeClaim(
|
906
|
+
metadata=client.V1ObjectMeta(name=name, namespace=ns),
|
907
|
+
spec=pvc_spec
|
908
|
+
)
|
909
|
+
|
910
|
+
result = self.core_v1.create_namespaced_persistent_volume_claim(
|
911
|
+
namespace=ns,
|
912
|
+
body=pvc
|
913
|
+
)
|
914
|
+
|
915
|
+
return result
|
916
|
+
|
917
|
+
except ApiException as e:
|
918
|
+
print(f"❌ Error creating PVC: {e}")
|
919
|
+
return None
|
920
|
+
|
921
|
+
def get_pvc(self, name: str, namespace: str = None) -> Optional[Dict]:
|
922
|
+
"""Get a Persistent Volume Claim"""
|
923
|
+
try:
|
924
|
+
ns = namespace or self.namespace
|
925
|
+
result = self.core_v1.read_namespaced_persistent_volume_claim(name=name, namespace=ns)
|
926
|
+
|
927
|
+
return {
|
928
|
+
'name': result.metadata.name,
|
929
|
+
'namespace': result.metadata.namespace,
|
930
|
+
'status': result.status.phase,
|
931
|
+
'volume_name': result.spec.volume_name,
|
932
|
+
'access_modes': result.spec.access_modes,
|
933
|
+
'storage_class': result.spec.storage_class_name,
|
934
|
+
'size': result.spec.resources.requests.get('storage', 'Unknown'),
|
935
|
+
'created_at': result.metadata.creation_timestamp
|
936
|
+
}
|
937
|
+
|
938
|
+
except ApiException as e:
|
939
|
+
print(f"❌ Error getting PVC: {e}")
|
940
|
+
return None
|
941
|
+
|
942
|
+
def delete_pvc(self, name: str, namespace: str = None) -> bool:
|
943
|
+
"""Delete a Persistent Volume Claim"""
|
944
|
+
try:
|
945
|
+
ns = namespace or self.namespace
|
946
|
+
self.core_v1.delete_namespaced_persistent_volume_claim(name=name, namespace=ns)
|
947
|
+
return True
|
948
|
+
except ApiException as e:
|
949
|
+
print(f"❌ Error deleting PVC: {e}")
|
950
|
+
return False
|
951
|
+
|
952
|
+
def list_pvcs(self, namespace: str = None) -> List[Dict]:
|
953
|
+
"""List all PVCs in a namespace"""
|
954
|
+
try:
|
955
|
+
ns = namespace or self.namespace
|
956
|
+
result = self.core_v1.list_namespaced_persistent_volume_claim(namespace=ns)
|
957
|
+
|
958
|
+
pvcs = []
|
959
|
+
for pvc in result.items:
|
960
|
+
pvcs.append({
|
961
|
+
'name': pvc.metadata.name,
|
962
|
+
'namespace': pvc.metadata.namespace,
|
963
|
+
'status': pvc.status.phase,
|
964
|
+
'volume_name': pvc.spec.volume_name,
|
965
|
+
'access_modes': pvc.spec.access_modes,
|
966
|
+
'storage_class': pvc.spec.storage_class_name,
|
967
|
+
'size': pvc.spec.resources.requests.get('storage', 'Unknown'),
|
968
|
+
'created_at': pvc.metadata.creation_timestamp
|
969
|
+
})
|
970
|
+
|
971
|
+
return pvcs
|
972
|
+
|
973
|
+
except ApiException as e:
|
974
|
+
print(f"❌ Error listing PVCs: {e}")
|
975
|
+
return []
|
976
|
+
|
977
|
+
# ======================
|
978
|
+
# SERVICE URL OPERATIONS
|
979
|
+
# ======================
|
980
|
+
def get_service_url(self, name: str, namespace: str = None) -> Optional[Dict]:
|
981
|
+
"""Get service URL, including AWS ELB URLs for LoadBalancer services
|
982
|
+
|
983
|
+
Args:
|
984
|
+
name: Name of the service
|
985
|
+
namespace: Namespace (uses default if not provided)
|
986
|
+
|
987
|
+
Returns:
|
988
|
+
Dictionary containing service URL information
|
989
|
+
"""
|
990
|
+
try:
|
991
|
+
ns = namespace or self.namespace
|
992
|
+
service = self.core_v1.read_namespaced_service(name=name, namespace=ns)
|
993
|
+
|
994
|
+
service_type = service.spec.type
|
995
|
+
ports = []
|
996
|
+
for port in service.spec.ports:
|
997
|
+
ports.append({
|
998
|
+
'port': port.port,
|
999
|
+
'target_port': port.target_port,
|
1000
|
+
'protocol': port.protocol,
|
1001
|
+
'name': port.name
|
1002
|
+
})
|
1003
|
+
|
1004
|
+
result = {
|
1005
|
+
'name': name,
|
1006
|
+
'namespace': ns,
|
1007
|
+
'type': service_type,
|
1008
|
+
'ports': ports,
|
1009
|
+
'cluster_ip': service.spec.cluster_ip
|
1010
|
+
}
|
1011
|
+
|
1012
|
+
if service_type == 'LoadBalancer':
|
1013
|
+
# Check for AWS ELB
|
1014
|
+
ingress = service.status.load_balancer.ingress
|
1015
|
+
if ingress:
|
1016
|
+
for ing in ingress:
|
1017
|
+
if ing.hostname: # AWS ELB uses hostname
|
1018
|
+
result['external_url'] = f"http://{ing.hostname}"
|
1019
|
+
result['external_hostname'] = ing.hostname
|
1020
|
+
|
1021
|
+
# Check if it's an AWS ELB
|
1022
|
+
if 'elb.amazonaws.com' in ing.hostname:
|
1023
|
+
result['aws_elb'] = True
|
1024
|
+
result['elb_dns_name'] = ing.hostname
|
1025
|
+
elif ing.ip: # Some cloud providers use IP
|
1026
|
+
result['external_url'] = f"http://{ing.ip}"
|
1027
|
+
result['external_ip'] = ing.ip
|
1028
|
+
|
1029
|
+
# If no ingress yet, service might still be provisioning
|
1030
|
+
if not ingress:
|
1031
|
+
result['status'] = 'Provisioning LoadBalancer...'
|
1032
|
+
|
1033
|
+
elif service_type == 'NodePort':
|
1034
|
+
# For NodePort, we need to get node IPs
|
1035
|
+
nodes = self.core_v1.list_node()
|
1036
|
+
if nodes.items:
|
1037
|
+
node_ip = None
|
1038
|
+
for node in nodes.items:
|
1039
|
+
for address in node.status.addresses:
|
1040
|
+
if address.type == 'ExternalIP':
|
1041
|
+
node_ip = address.address
|
1042
|
+
break
|
1043
|
+
if node_ip:
|
1044
|
+
break
|
1045
|
+
|
1046
|
+
if node_ip:
|
1047
|
+
for port in service.spec.ports:
|
1048
|
+
if port.node_port:
|
1049
|
+
result['external_url'] = f"http://{node_ip}:{port.node_port}"
|
1050
|
+
result['node_ip'] = node_ip
|
1051
|
+
result['node_port'] = port.node_port
|
1052
|
+
|
1053
|
+
return result
|
1054
|
+
|
1055
|
+
except ApiException as e:
|
1056
|
+
print(f"❌ Error getting service URL: {e}")
|
1057
|
+
return None
|
1058
|
+
|
471
1059
|
# ======================
|
472
1060
|
# UTILITY METHODS
|
473
1061
|
# ======================
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: k8s-helper-cli
|
3
|
-
Version: 0.1
|
3
|
+
Version: 0.2.1
|
4
4
|
Summary: A simplified Python wrapper for common Kubernetes operations
|
5
5
|
Author-email: Harshit Chatterjee <harshitchatterjee50@gmail.com>
|
6
6
|
License-Expression: MIT
|
@@ -14,6 +14,8 @@ Requires-Dist: kubernetes>=26.1.0
|
|
14
14
|
Requires-Dist: typer>=0.9.0
|
15
15
|
Requires-Dist: rich>=13.0.0
|
16
16
|
Requires-Dist: pyyaml>=6.0
|
17
|
+
Requires-Dist: boto3>=1.26.0
|
18
|
+
Requires-Dist: botocore>=1.29.0
|
17
19
|
Provides-Extra: dev
|
18
20
|
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
19
21
|
Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
|
@@ -30,8 +32,13 @@ A simplified Python wrapper for common Kubernetes operations that makes it easy
|
|
30
32
|
## Features
|
31
33
|
|
32
34
|
- ✅ **Pod Management**: Create, delete, and list pods
|
33
|
-
- ✅ **Deployment Management**: Create, delete, scale, and list deployments
|
34
|
-
- ✅ **Service Management**: Create, delete,
|
35
|
+
- ✅ **Deployment Management**: Create, delete, scale, and list deployments with init containers
|
36
|
+
- ✅ **Service Management**: Create, delete, list services with URL retrieval
|
37
|
+
- ✅ **AWS EKS Integration**: Create and manage EKS clusters with automatic configuration
|
38
|
+
- ✅ **Secrets Management**: Create, list, and delete Kubernetes secrets
|
39
|
+
- ✅ **Persistent Volume Claims**: Create, list, and delete PVCs with multiple access modes
|
40
|
+
- ✅ **Service URL Discovery**: Get service URLs including AWS ELB DNS names
|
41
|
+
- ✅ **Advanced Deployments**: Support for init containers, volume mounts, and complex configurations
|
35
42
|
- ✅ **Resource Monitoring**: Get logs, events, and resource descriptions
|
36
43
|
- ✅ **Easy Configuration**: Simple configuration management
|
37
44
|
- ✅ **Formatted Output**: Beautiful table, YAML, and JSON output formats
|
@@ -60,6 +67,13 @@ pip install -e .
|
|
60
67
|
|
61
68
|
**Important**: k8s-helper requires an active Kubernetes cluster connection. Without a properly configured kubectl and accessible cluster, the commands will fail with configuration errors.
|
62
69
|
|
70
|
+
### AWS EKS Features Prerequisites
|
71
|
+
|
72
|
+
For AWS EKS integration features:
|
73
|
+
- AWS CLI configured with appropriate credentials (`aws configure`)
|
74
|
+
- AWS IAM permissions for EKS, EC2, and IAM operations
|
75
|
+
- boto3 package (automatically installed with k8s-helper-cli)
|
76
|
+
|
63
77
|
### Setting up Kubernetes (Choose one):
|
64
78
|
|
65
79
|
1. **Local Development**:
|
@@ -523,6 +537,9 @@ k8s-helper provides a command-line interface for Kubernetes operations. After in
|
|
523
537
|
# Show help
|
524
538
|
k8s-helper --help
|
525
539
|
|
540
|
+
# Show version
|
541
|
+
k8s-helper --version
|
542
|
+
|
526
543
|
# Configure settings
|
527
544
|
k8s-helper config --namespace my-namespace
|
528
545
|
k8s-helper config --output-format yaml
|
@@ -699,6 +716,112 @@ k8s-helper --install-completion zsh
|
|
699
716
|
k8s-helper --show-completion bash
|
700
717
|
```
|
701
718
|
|
719
|
+
### AWS EKS Integration
|
720
|
+
|
721
|
+
```bash
|
722
|
+
# Create an EKS cluster
|
723
|
+
k8s-helper create-eks-cluster my-cluster --region us-west-2 --version 1.29
|
724
|
+
|
725
|
+
# Create EKS cluster with custom settings
|
726
|
+
k8s-helper create-eks-cluster my-cluster \
|
727
|
+
--region us-east-1 \
|
728
|
+
--instance-types t3.medium,t3.large \
|
729
|
+
--min-size 2 \
|
730
|
+
--max-size 10 \
|
731
|
+
--desired-size 3 \
|
732
|
+
--node-group my-nodes \
|
733
|
+
--wait
|
734
|
+
|
735
|
+
# Note: Requires AWS credentials configured (aws configure)
|
736
|
+
```
|
737
|
+
|
738
|
+
### Secrets Management
|
739
|
+
|
740
|
+
```bash
|
741
|
+
# Create a secret
|
742
|
+
k8s-helper create-secret my-secret --data "username=admin,password=secret123"
|
743
|
+
|
744
|
+
# Create a TLS secret
|
745
|
+
k8s-helper create-secret tls-secret --data "tls.crt=cert_content,tls.key=key_content" --type kubernetes.io/tls
|
746
|
+
|
747
|
+
# List secrets
|
748
|
+
k8s-helper list-secrets --namespace my-namespace
|
749
|
+
|
750
|
+
# Delete a secret
|
751
|
+
k8s-helper delete-secret my-secret --namespace my-namespace
|
752
|
+
```
|
753
|
+
|
754
|
+
### Persistent Volume Claims (PVC)
|
755
|
+
|
756
|
+
```bash
|
757
|
+
# Create a PVC
|
758
|
+
k8s-helper create-pvc my-storage 10Gi --access-modes ReadWriteOnce
|
759
|
+
|
760
|
+
# Create PVC with specific storage class
|
761
|
+
k8s-helper create-pvc my-storage 50Gi --storage-class fast-ssd --access-modes ReadWriteMany
|
762
|
+
|
763
|
+
# List PVCs
|
764
|
+
k8s-helper list-pvcs --namespace my-namespace
|
765
|
+
|
766
|
+
# Delete a PVC
|
767
|
+
k8s-helper delete-pvc my-storage --namespace my-namespace
|
768
|
+
```
|
769
|
+
|
770
|
+
### Service URL Retrieval
|
771
|
+
|
772
|
+
```bash
|
773
|
+
# Get service URL (including AWS ELB URLs)
|
774
|
+
k8s-helper service-url my-service --namespace my-namespace
|
775
|
+
|
776
|
+
# Watch for URL changes (useful for LoadBalancer provisioning)
|
777
|
+
k8s-helper service-url my-service --watch --namespace my-namespace
|
778
|
+
|
779
|
+
# Shows:
|
780
|
+
# - ClusterIP access information
|
781
|
+
# - NodePort URLs
|
782
|
+
# - AWS ELB DNS names for LoadBalancer services
|
783
|
+
# - External IPs and hostnames
|
784
|
+
```
|
785
|
+
|
786
|
+
### Enhanced Application Deployment
|
787
|
+
|
788
|
+
```bash
|
789
|
+
# Deploy with init container
|
790
|
+
k8s-helper apply my-app nginx:latest \
|
791
|
+
--init-container "init-db:postgres:13:pg_isready -h db" \
|
792
|
+
--init-env "PGHOST=db,PGPORT=5432"
|
793
|
+
|
794
|
+
# Deploy with PVC mount
|
795
|
+
k8s-helper apply my-app nginx:latest \
|
796
|
+
--pvc "my-storage:/data" \
|
797
|
+
--replicas 2
|
798
|
+
|
799
|
+
# Deploy with secret mount
|
800
|
+
k8s-helper apply my-app nginx:latest \
|
801
|
+
--secret "my-secret:/etc/secrets" \
|
802
|
+
--port 8080
|
803
|
+
|
804
|
+
# Deploy with LoadBalancer and show URL
|
805
|
+
k8s-helper apply my-app nginx:latest \
|
806
|
+
--service-type LoadBalancer \
|
807
|
+
--wait \
|
808
|
+
--show-url
|
809
|
+
|
810
|
+
# Complex deployment with multiple features
|
811
|
+
k8s-helper apply my-app nginx:latest \
|
812
|
+
--replicas 3 \
|
813
|
+
--port 8080 \
|
814
|
+
--service-type LoadBalancer \
|
815
|
+
--env "ENV=production,DEBUG=false" \
|
816
|
+
--labels "app=my-app,version=v1.0" \
|
817
|
+
--init-container "migrate:migrate-tool:latest:migrate up" \
|
818
|
+
--init-env "DB_HOST=postgres,DB_PORT=5432" \
|
819
|
+
--secret "db-secret:/etc/db" \
|
820
|
+
--pvc "app-storage:/var/data" \
|
821
|
+
--wait \
|
822
|
+
--show-url
|
823
|
+
```
|
824
|
+
|
702
825
|
## Real-World Examples
|
703
826
|
|
704
827
|
### 1. Simple Web Application
|
@@ -0,0 +1,11 @@
|
|
1
|
+
k8s_helper/__init__.py,sha256=7hcwl8Rhdl6fyIr3J2O8SCUXHfEujbuJ0rGVIiGRgAI,2666
|
2
|
+
k8s_helper/cli.py,sha256=qcFNAfKska-Ouqr2ILoFoLxEyXPgoqaWvl4y64dMkCU,36366
|
3
|
+
k8s_helper/config.py,sha256=P7YdfyvCHprrNs2J9DRb3RrClylfTTh5hfTtDzLug0A,6867
|
4
|
+
k8s_helper/core.py,sha256=UK1fwFyZiWl8ajrNy7n5w6Lgo6b6T6DiZFH4BvRyQUY,44844
|
5
|
+
k8s_helper/utils.py,sha256=wYgTd5ktyuI-EiVcfW7FrxA7MzXY5odrEKQgmMVdueY,9496
|
6
|
+
k8s_helper_cli-0.2.1.dist-info/licenses/LICENSE,sha256=tXPvVl3gLVc6e0qCEoLH9KjeA7z4JVL78UybpvGtBCw,1096
|
7
|
+
k8s_helper_cli-0.2.1.dist-info/METADATA,sha256=a7oedjwmqQ4-dEoDFR0k3R1BCTTDH_MmET2_zbgXG2w,26956
|
8
|
+
k8s_helper_cli-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
9
|
+
k8s_helper_cli-0.2.1.dist-info/entry_points.txt,sha256=IoCMWUZ6mn90LwzQzEy5YkWOwvogDdZ6ycqUWAzCFTQ,50
|
10
|
+
k8s_helper_cli-0.2.1.dist-info/top_level.txt,sha256=x9A1jflyer-z2cFnkqk5B42juoH2q0fy5hkT9upsTG8,11
|
11
|
+
k8s_helper_cli-0.2.1.dist-info/RECORD,,
|
@@ -1,11 +0,0 @@
|
|
1
|
-
k8s_helper/__init__.py,sha256=QjPeV7P3FN7lYoU83Y786Mu2zwevuJeW8uL4kRiqhN8,2666
|
2
|
-
k8s_helper/cli.py,sha256=HSa_JXe-LB7pUYJIBM3KxjsRLVFe1fSDD6uwG_NJn6A,18282
|
3
|
-
k8s_helper/config.py,sha256=P7YdfyvCHprrNs2J9DRb3RrClylfTTh5hfTtDzLug0A,6867
|
4
|
-
k8s_helper/core.py,sha256=gCQkJSEQ3_ectExB9h1rFvI56Egysg50eVgYtpfquH0,21066
|
5
|
-
k8s_helper/utils.py,sha256=wYgTd5ktyuI-EiVcfW7FrxA7MzXY5odrEKQgmMVdueY,9496
|
6
|
-
k8s_helper_cli-0.1.2.dist-info/licenses/LICENSE,sha256=tXPvVl3gLVc6e0qCEoLH9KjeA7z4JVL78UybpvGtBCw,1096
|
7
|
-
k8s_helper_cli-0.1.2.dist-info/METADATA,sha256=EHDDnCm3EY0cP8EjxWKwVsPFbjx3ZVilHKm0_05HAN0,23339
|
8
|
-
k8s_helper_cli-0.1.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
9
|
-
k8s_helper_cli-0.1.2.dist-info/entry_points.txt,sha256=IoCMWUZ6mn90LwzQzEy5YkWOwvogDdZ6ycqUWAzCFTQ,50
|
10
|
-
k8s_helper_cli-0.1.2.dist-info/top_level.txt,sha256=x9A1jflyer-z2cFnkqk5B42juoH2q0fy5hkT9upsTG8,11
|
11
|
-
k8s_helper_cli-0.1.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|