k8s-helper-cli 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- k8s_helper/__init__.py +1 -1
- k8s_helper/cli.py +480 -6
- k8s_helper/core.py +680 -4
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/METADATA +123 -3
- k8s_helper_cli-0.2.2.dist-info/RECORD +11 -0
- k8s_helper_cli-0.2.0.dist-info/RECORD +0 -11
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/WHEEL +0 -0
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/entry_points.txt +0 -0
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/licenses/LICENSE +0 -0
- {k8s_helper_cli-0.2.0.dist-info → k8s_helper_cli-0.2.2.dist-info}/top_level.txt +0 -0
k8s_helper/core.py
CHANGED
@@ -2,8 +2,299 @@ from kubernetes import client, config
|
|
2
2
|
from kubernetes.client.rest import ApiException
|
3
3
|
from typing import Dict, List, Optional, Any
|
4
4
|
import yaml
|
5
|
+
import time
|
6
|
+
import base64
|
7
|
+
import boto3
|
8
|
+
import json
|
9
|
+
from botocore.exceptions import ClientError, NoCredentialsError
|
5
10
|
|
6
11
|
|
12
|
+
class EKSClient:
|
13
|
+
"""AWS EKS client for cluster management"""
|
14
|
+
|
15
|
+
def __init__(self, region: str = "us-west-2"):
|
16
|
+
"""Initialize EKS client
|
17
|
+
|
18
|
+
Args:
|
19
|
+
region: AWS region for EKS operations
|
20
|
+
"""
|
21
|
+
self.region = region
|
22
|
+
try:
|
23
|
+
self.eks_client = boto3.client('eks', region_name=region)
|
24
|
+
self.ec2_client = boto3.client('ec2', region_name=region)
|
25
|
+
self.iam_client = boto3.client('iam', region_name=region)
|
26
|
+
except (NoCredentialsError, ClientError) as e:
|
27
|
+
raise Exception(f"AWS credentials not found or invalid: {e}")
|
28
|
+
|
29
|
+
def create_cluster(self, cluster_name: str, version: str = "1.29",
|
30
|
+
subnets: List[str] = None, security_groups: List[str] = None,
|
31
|
+
role_arn: str = None, node_group_name: str = None,
|
32
|
+
instance_types: List[str] = None, ami_type: str = "AL2_x86_64",
|
33
|
+
capacity_type: str = "ON_DEMAND", scaling_config: Dict = None) -> Dict:
|
34
|
+
"""Create an EKS cluster
|
35
|
+
|
36
|
+
Args:
|
37
|
+
cluster_name: Name of the EKS cluster
|
38
|
+
version: Kubernetes version
|
39
|
+
subnets: List of subnet IDs
|
40
|
+
security_groups: List of security group IDs
|
41
|
+
role_arn: IAM role ARN for the cluster
|
42
|
+
node_group_name: Name for the node group
|
43
|
+
instance_types: List of EC2 instance types
|
44
|
+
ami_type: AMI type for nodes
|
45
|
+
capacity_type: Capacity type (ON_DEMAND or SPOT)
|
46
|
+
scaling_config: Scaling configuration for node group
|
47
|
+
|
48
|
+
Returns:
|
49
|
+
Dict containing cluster information
|
50
|
+
"""
|
51
|
+
try:
|
52
|
+
# Use default values if not provided
|
53
|
+
if subnets is None:
|
54
|
+
subnets = self._get_default_subnets()
|
55
|
+
|
56
|
+
if role_arn is None:
|
57
|
+
role_arn = self._create_or_get_cluster_role()
|
58
|
+
|
59
|
+
if instance_types is None:
|
60
|
+
instance_types = ["t3.medium"]
|
61
|
+
|
62
|
+
if scaling_config is None:
|
63
|
+
scaling_config = {
|
64
|
+
"minSize": 1,
|
65
|
+
"maxSize": 3,
|
66
|
+
"desiredSize": 2
|
67
|
+
}
|
68
|
+
|
69
|
+
# Create cluster
|
70
|
+
cluster_response = self.eks_client.create_cluster(
|
71
|
+
name=cluster_name,
|
72
|
+
version=version,
|
73
|
+
roleArn=role_arn,
|
74
|
+
resourcesVpcConfig={
|
75
|
+
'subnetIds': subnets,
|
76
|
+
'securityGroupIds': security_groups or [],
|
77
|
+
'endpointConfigPublic': True,
|
78
|
+
'endpointConfigPrivate': True
|
79
|
+
},
|
80
|
+
logging={
|
81
|
+
'enable': True,
|
82
|
+
'types': ['api', 'audit', 'authenticator', 'controllerManager', 'scheduler']
|
83
|
+
}
|
84
|
+
)
|
85
|
+
|
86
|
+
cluster_info = {
|
87
|
+
'cluster_name': cluster_name,
|
88
|
+
'status': 'CREATING',
|
89
|
+
'cluster_arn': cluster_response['cluster']['arn'],
|
90
|
+
'endpoint': cluster_response['cluster'].get('endpoint', 'Not available yet'),
|
91
|
+
'version': version,
|
92
|
+
'role_arn': role_arn,
|
93
|
+
'subnets': subnets,
|
94
|
+
'created_at': cluster_response['cluster']['createdAt']
|
95
|
+
}
|
96
|
+
|
97
|
+
# If node group name is provided, we'll create it after cluster is active
|
98
|
+
if node_group_name:
|
99
|
+
cluster_info['node_group_name'] = node_group_name
|
100
|
+
cluster_info['instance_types'] = instance_types
|
101
|
+
cluster_info['scaling_config'] = scaling_config
|
102
|
+
|
103
|
+
return cluster_info
|
104
|
+
|
105
|
+
except ClientError as e:
|
106
|
+
raise Exception(f"Failed to create EKS cluster: {e}")
|
107
|
+
|
108
|
+
def _get_default_subnets(self) -> List[str]:
|
109
|
+
"""Get default subnets for EKS cluster from different AZs"""
|
110
|
+
try:
|
111
|
+
response = self.ec2_client.describe_subnets()
|
112
|
+
|
113
|
+
# Group subnets by availability zone
|
114
|
+
subnets_by_az = {}
|
115
|
+
for subnet in response['Subnets']:
|
116
|
+
if subnet['State'] == 'available':
|
117
|
+
az = subnet['AvailabilityZone']
|
118
|
+
if az not in subnets_by_az:
|
119
|
+
subnets_by_az[az] = []
|
120
|
+
subnets_by_az[az].append(subnet['SubnetId'])
|
121
|
+
|
122
|
+
# Get at least 2 subnets from different AZs
|
123
|
+
selected_subnets = []
|
124
|
+
for az, subnet_ids in subnets_by_az.items():
|
125
|
+
if len(selected_subnets) < 2:
|
126
|
+
selected_subnets.append(subnet_ids[0]) # Take first subnet from each AZ
|
127
|
+
|
128
|
+
if len(selected_subnets) < 2:
|
129
|
+
# If we don't have subnets in 2 different AZs, let's create them
|
130
|
+
selected_subnets = self._create_default_vpc_subnets()
|
131
|
+
|
132
|
+
return selected_subnets
|
133
|
+
|
134
|
+
except ClientError as e:
|
135
|
+
raise Exception(f"Failed to get default subnets: {e}")
|
136
|
+
|
137
|
+
def _create_default_vpc_subnets(self) -> List[str]:
|
138
|
+
"""Create default VPC and subnets for EKS if none exist"""
|
139
|
+
try:
|
140
|
+
# Get default VPC
|
141
|
+
vpcs = self.ec2_client.describe_vpcs(Filters=[{'Name': 'isDefault', 'Values': ['true']}])
|
142
|
+
if not vpcs['Vpcs']:
|
143
|
+
raise Exception("No default VPC found. Please create subnets manually or set up a VPC.")
|
144
|
+
|
145
|
+
vpc_id = vpcs['Vpcs'][0]['VpcId']
|
146
|
+
|
147
|
+
# Get available AZs
|
148
|
+
azs = self.ec2_client.describe_availability_zones()
|
149
|
+
if len(azs['AvailabilityZones']) < 2:
|
150
|
+
raise Exception("Need at least 2 availability zones for EKS cluster")
|
151
|
+
|
152
|
+
# Create subnets in first 2 AZs
|
153
|
+
subnet_ids = []
|
154
|
+
for i, az in enumerate(azs['AvailabilityZones'][:2]):
|
155
|
+
cidr = f"172.31.{i * 16}.0/20" # Create non-overlapping CIDR blocks
|
156
|
+
|
157
|
+
try:
|
158
|
+
response = self.ec2_client.create_subnet(
|
159
|
+
VpcId=vpc_id,
|
160
|
+
CidrBlock=cidr,
|
161
|
+
AvailabilityZone=az['ZoneName']
|
162
|
+
)
|
163
|
+
subnet_id = response['Subnet']['SubnetId']
|
164
|
+
subnet_ids.append(subnet_id)
|
165
|
+
|
166
|
+
# Enable auto-assign public IP
|
167
|
+
self.ec2_client.modify_subnet_attribute(
|
168
|
+
SubnetId=subnet_id,
|
169
|
+
MapPublicIpOnLaunch={'Value': True}
|
170
|
+
)
|
171
|
+
|
172
|
+
# Tag the subnet
|
173
|
+
self.ec2_client.create_tags(
|
174
|
+
Resources=[subnet_id],
|
175
|
+
Tags=[
|
176
|
+
{'Key': 'Name', 'Value': f'eks-subnet-{az["ZoneName"]}'},
|
177
|
+
{'Key': 'kubernetes.io/role/elb', 'Value': '1'}
|
178
|
+
]
|
179
|
+
)
|
180
|
+
|
181
|
+
except ClientError as e:
|
182
|
+
if e.response['Error']['Code'] == 'InvalidVpc.Range':
|
183
|
+
# Try a different CIDR range
|
184
|
+
cidr = f"10.0.{i}.0/24"
|
185
|
+
response = self.ec2_client.create_subnet(
|
186
|
+
VpcId=vpc_id,
|
187
|
+
CidrBlock=cidr,
|
188
|
+
AvailabilityZone=az['ZoneName']
|
189
|
+
)
|
190
|
+
subnet_id = response['Subnet']['SubnetId']
|
191
|
+
subnet_ids.append(subnet_id)
|
192
|
+
|
193
|
+
# Enable auto-assign public IP
|
194
|
+
self.ec2_client.modify_subnet_attribute(
|
195
|
+
SubnetId=subnet_id,
|
196
|
+
MapPublicIpOnLaunch={'Value': True}
|
197
|
+
)
|
198
|
+
|
199
|
+
# Tag the subnet
|
200
|
+
self.ec2_client.create_tags(
|
201
|
+
Resources=[subnet_id],
|
202
|
+
Tags=[
|
203
|
+
{'Key': 'Name', 'Value': f'eks-subnet-{az["ZoneName"]}'},
|
204
|
+
{'Key': 'kubernetes.io/role/elb', 'Value': '1'}
|
205
|
+
]
|
206
|
+
)
|
207
|
+
|
208
|
+
return subnet_ids
|
209
|
+
|
210
|
+
except ClientError as e:
|
211
|
+
raise Exception(f"Failed to create default subnets: {e}")
|
212
|
+
|
213
|
+
def _create_or_get_cluster_role(self) -> str:
|
214
|
+
"""Create or get IAM role for EKS cluster"""
|
215
|
+
role_name = "eks-cluster-role"
|
216
|
+
|
217
|
+
try:
|
218
|
+
# Check if role exists
|
219
|
+
response = self.iam_client.get_role(RoleName=role_name)
|
220
|
+
return response['Role']['Arn']
|
221
|
+
|
222
|
+
except ClientError as e:
|
223
|
+
if e.response['Error']['Code'] == 'NoSuchEntity':
|
224
|
+
# Create the role
|
225
|
+
trust_policy = {
|
226
|
+
"Version": "2012-10-17",
|
227
|
+
"Statement": [
|
228
|
+
{
|
229
|
+
"Effect": "Allow",
|
230
|
+
"Principal": {
|
231
|
+
"Service": "eks.amazonaws.com"
|
232
|
+
},
|
233
|
+
"Action": "sts:AssumeRole"
|
234
|
+
}
|
235
|
+
]
|
236
|
+
}
|
237
|
+
|
238
|
+
response = self.iam_client.create_role(
|
239
|
+
RoleName=role_name,
|
240
|
+
AssumeRolePolicyDocument=json.dumps(trust_policy),
|
241
|
+
Description="EKS cluster role created by k8s-helper"
|
242
|
+
)
|
243
|
+
|
244
|
+
# Attach required policies
|
245
|
+
policies = [
|
246
|
+
"arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
|
247
|
+
]
|
248
|
+
|
249
|
+
for policy in policies:
|
250
|
+
self.iam_client.attach_role_policy(
|
251
|
+
RoleName=role_name,
|
252
|
+
PolicyArn=policy
|
253
|
+
)
|
254
|
+
|
255
|
+
return response['Role']['Arn']
|
256
|
+
else:
|
257
|
+
raise Exception(f"Failed to create or get cluster role: {e}")
|
258
|
+
|
259
|
+
def get_cluster_status(self, cluster_name: str) -> Dict:
|
260
|
+
"""Get EKS cluster status"""
|
261
|
+
try:
|
262
|
+
response = self.eks_client.describe_cluster(name=cluster_name)
|
263
|
+
cluster = response['cluster']
|
264
|
+
|
265
|
+
return {
|
266
|
+
'name': cluster['name'],
|
267
|
+
'status': cluster['status'],
|
268
|
+
'endpoint': cluster.get('endpoint', 'Not available'),
|
269
|
+
'version': cluster['version'],
|
270
|
+
'platform_version': cluster.get('platformVersion', 'Not available'),
|
271
|
+
'created_at': cluster['createdAt'],
|
272
|
+
'arn': cluster['arn']
|
273
|
+
}
|
274
|
+
|
275
|
+
except ClientError as e:
|
276
|
+
raise Exception(f"Failed to get cluster status: {e}")
|
277
|
+
|
278
|
+
def wait_for_cluster_active(self, cluster_name: str, timeout: int = 1800) -> bool:
|
279
|
+
"""Wait for EKS cluster to become active"""
|
280
|
+
import time
|
281
|
+
start_time = time.time()
|
282
|
+
|
283
|
+
while time.time() - start_time < timeout:
|
284
|
+
try:
|
285
|
+
status = self.get_cluster_status(cluster_name)
|
286
|
+
if status['status'] == 'ACTIVE':
|
287
|
+
return True
|
288
|
+
elif status['status'] == 'FAILED':
|
289
|
+
raise Exception(f"Cluster creation failed")
|
290
|
+
|
291
|
+
time.sleep(30) # Check every 30 seconds
|
292
|
+
|
293
|
+
except Exception as e:
|
294
|
+
raise Exception(f"Error waiting for cluster: {e}")
|
295
|
+
|
296
|
+
return False
|
297
|
+
|
7
298
|
class K8sClient:
|
8
299
|
def __init__(self, namespace="default"):
|
9
300
|
try:
|
@@ -20,8 +311,26 @@ class K8sClient:
|
|
20
311
|
# ======================
|
21
312
|
def create_deployment(self, name: str, image: str, replicas: int = 1,
|
22
313
|
container_port: int = 80, env_vars: Optional[Dict[str, str]] = None,
|
23
|
-
labels: Optional[Dict[str, str]] = None
|
24
|
-
|
314
|
+
labels: Optional[Dict[str, str]] = None,
|
315
|
+
init_containers: Optional[List[Dict]] = None,
|
316
|
+
volume_mounts: Optional[List[Dict]] = None,
|
317
|
+
volumes: Optional[List[Dict]] = None) -> Optional[Any]:
|
318
|
+
"""Create a Kubernetes deployment
|
319
|
+
|
320
|
+
Args:
|
321
|
+
name: Deployment name
|
322
|
+
image: Container image
|
323
|
+
replicas: Number of replicas
|
324
|
+
container_port: Container port
|
325
|
+
env_vars: Environment variables
|
326
|
+
labels: Labels for the deployment
|
327
|
+
init_containers: List of init container specifications
|
328
|
+
volume_mounts: List of volume mounts for the main container
|
329
|
+
volumes: List of volumes for the pod
|
330
|
+
|
331
|
+
Returns:
|
332
|
+
Deployment object if successful, None otherwise
|
333
|
+
"""
|
25
334
|
if labels is None:
|
26
335
|
labels = {"app": name}
|
27
336
|
|
@@ -30,16 +339,90 @@ class K8sClient:
|
|
30
339
|
if env_vars:
|
31
340
|
env = [client.V1EnvVar(name=k, value=v) for k, v in env_vars.items()]
|
32
341
|
|
342
|
+
# Volume mounts for main container
|
343
|
+
volume_mounts_obj = []
|
344
|
+
if volume_mounts:
|
345
|
+
for vm in volume_mounts:
|
346
|
+
volume_mounts_obj.append(client.V1VolumeMount(
|
347
|
+
name=vm.get('name'),
|
348
|
+
mount_path=vm.get('mount_path'),
|
349
|
+
read_only=vm.get('read_only', False)
|
350
|
+
))
|
351
|
+
|
352
|
+
# Main container
|
33
353
|
container = client.V1Container(
|
34
354
|
name=name,
|
35
355
|
image=image,
|
36
356
|
ports=[client.V1ContainerPort(container_port=container_port)],
|
37
|
-
env=env if env else None
|
357
|
+
env=env if env else None,
|
358
|
+
volume_mounts=volume_mounts_obj if volume_mounts_obj else None
|
38
359
|
)
|
39
360
|
|
361
|
+
# Init containers
|
362
|
+
init_containers_obj = []
|
363
|
+
if init_containers:
|
364
|
+
for init_container in init_containers:
|
365
|
+
init_env = []
|
366
|
+
if init_container.get('env_vars'):
|
367
|
+
init_env = [client.V1EnvVar(name=k, value=v)
|
368
|
+
for k, v in init_container['env_vars'].items()]
|
369
|
+
|
370
|
+
init_volume_mounts = []
|
371
|
+
if init_container.get('volume_mounts'):
|
372
|
+
for vm in init_container['volume_mounts']:
|
373
|
+
init_volume_mounts.append(client.V1VolumeMount(
|
374
|
+
name=vm.get('name'),
|
375
|
+
mount_path=vm.get('mount_path'),
|
376
|
+
read_only=vm.get('read_only', False)
|
377
|
+
))
|
378
|
+
|
379
|
+
init_containers_obj.append(client.V1Container(
|
380
|
+
name=init_container['name'],
|
381
|
+
image=init_container['image'],
|
382
|
+
command=init_container.get('command'),
|
383
|
+
args=init_container.get('args'),
|
384
|
+
env=init_env if init_env else None,
|
385
|
+
volume_mounts=init_volume_mounts if init_volume_mounts else None
|
386
|
+
))
|
387
|
+
|
388
|
+
# Volumes
|
389
|
+
volumes_obj = []
|
390
|
+
if volumes:
|
391
|
+
for volume in volumes:
|
392
|
+
if volume.get('type') == 'pvc':
|
393
|
+
volumes_obj.append(client.V1Volume(
|
394
|
+
name=volume['name'],
|
395
|
+
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
|
396
|
+
claim_name=volume['claim_name']
|
397
|
+
)
|
398
|
+
))
|
399
|
+
elif volume.get('type') == 'secret':
|
400
|
+
volumes_obj.append(client.V1Volume(
|
401
|
+
name=volume['name'],
|
402
|
+
secret=client.V1SecretVolumeSource(
|
403
|
+
secret_name=volume['secret_name']
|
404
|
+
)
|
405
|
+
))
|
406
|
+
elif volume.get('type') == 'configmap':
|
407
|
+
volumes_obj.append(client.V1Volume(
|
408
|
+
name=volume['name'],
|
409
|
+
config_map=client.V1ConfigMapVolumeSource(
|
410
|
+
name=volume['config_map_name']
|
411
|
+
)
|
412
|
+
))
|
413
|
+
elif volume.get('type') == 'empty_dir':
|
414
|
+
volumes_obj.append(client.V1Volume(
|
415
|
+
name=volume['name'],
|
416
|
+
empty_dir=client.V1EmptyDirVolumeSource()
|
417
|
+
))
|
418
|
+
|
40
419
|
template = client.V1PodTemplateSpec(
|
41
420
|
metadata=client.V1ObjectMeta(labels=labels),
|
42
|
-
spec=client.V1PodSpec(
|
421
|
+
spec=client.V1PodSpec(
|
422
|
+
containers=[container],
|
423
|
+
init_containers=init_containers_obj if init_containers_obj else None,
|
424
|
+
volumes=volumes_obj if volumes_obj else None
|
425
|
+
)
|
43
426
|
)
|
44
427
|
|
45
428
|
spec = client.V1DeploymentSpec(
|
@@ -468,6 +851,299 @@ class K8sClient:
|
|
468
851
|
print(f"❌ Error describing service '{name}': {e}")
|
469
852
|
return None
|
470
853
|
|
854
|
+
# ======================
|
855
|
+
# SECRET OPERATIONS
|
856
|
+
# ======================
|
857
|
+
def create_secret(self, name: str, data: Dict[str, str],
|
858
|
+
secret_type: str = "Opaque", namespace: str = None) -> Optional[Any]:
|
859
|
+
"""Create a Kubernetes secret
|
860
|
+
|
861
|
+
Args:
|
862
|
+
name: Name of the secret
|
863
|
+
data: Dictionary of key-value pairs for the secret
|
864
|
+
secret_type: Type of secret (Opaque, kubernetes.io/tls, etc.)
|
865
|
+
namespace: Namespace (uses default if not provided)
|
866
|
+
|
867
|
+
Returns:
|
868
|
+
Secret object if successful, None otherwise
|
869
|
+
"""
|
870
|
+
try:
|
871
|
+
ns = namespace or self.namespace
|
872
|
+
|
873
|
+
# Encode data as base64
|
874
|
+
encoded_data = {}
|
875
|
+
for key, value in data.items():
|
876
|
+
encoded_data[key] = base64.b64encode(value.encode()).decode()
|
877
|
+
|
878
|
+
secret = client.V1Secret(
|
879
|
+
metadata=client.V1ObjectMeta(name=name, namespace=ns),
|
880
|
+
type=secret_type,
|
881
|
+
data=encoded_data
|
882
|
+
)
|
883
|
+
|
884
|
+
result = self.core_v1.create_namespaced_secret(
|
885
|
+
namespace=ns,
|
886
|
+
body=secret
|
887
|
+
)
|
888
|
+
|
889
|
+
return result
|
890
|
+
|
891
|
+
except ApiException as e:
|
892
|
+
print(f"❌ Error creating secret: {e}")
|
893
|
+
return None
|
894
|
+
|
895
|
+
def get_secret(self, name: str, namespace: str = None) -> Optional[Dict]:
|
896
|
+
"""Get a Kubernetes secret
|
897
|
+
|
898
|
+
Args:
|
899
|
+
name: Name of the secret
|
900
|
+
namespace: Namespace (uses default if not provided)
|
901
|
+
|
902
|
+
Returns:
|
903
|
+
Dictionary containing secret data
|
904
|
+
"""
|
905
|
+
try:
|
906
|
+
ns = namespace or self.namespace
|
907
|
+
result = self.core_v1.read_namespaced_secret(name=name, namespace=ns)
|
908
|
+
|
909
|
+
# Decode base64 data
|
910
|
+
decoded_data = {}
|
911
|
+
if result.data:
|
912
|
+
for key, value in result.data.items():
|
913
|
+
decoded_data[key] = base64.b64decode(value).decode()
|
914
|
+
|
915
|
+
return {
|
916
|
+
'name': result.metadata.name,
|
917
|
+
'namespace': result.metadata.namespace,
|
918
|
+
'type': result.type,
|
919
|
+
'data': decoded_data,
|
920
|
+
'created_at': result.metadata.creation_timestamp
|
921
|
+
}
|
922
|
+
|
923
|
+
except ApiException as e:
|
924
|
+
print(f"❌ Error getting secret: {e}")
|
925
|
+
return None
|
926
|
+
|
927
|
+
def delete_secret(self, name: str, namespace: str = None) -> bool:
|
928
|
+
"""Delete a Kubernetes secret"""
|
929
|
+
try:
|
930
|
+
ns = namespace or self.namespace
|
931
|
+
self.core_v1.delete_namespaced_secret(name=name, namespace=ns)
|
932
|
+
return True
|
933
|
+
except ApiException as e:
|
934
|
+
print(f"❌ Error deleting secret: {e}")
|
935
|
+
return False
|
936
|
+
|
937
|
+
def list_secrets(self, namespace: str = None) -> List[Dict]:
|
938
|
+
"""List all secrets in a namespace"""
|
939
|
+
try:
|
940
|
+
ns = namespace or self.namespace
|
941
|
+
result = self.core_v1.list_namespaced_secret(namespace=ns)
|
942
|
+
|
943
|
+
secrets = []
|
944
|
+
for secret in result.items:
|
945
|
+
secrets.append({
|
946
|
+
'name': secret.metadata.name,
|
947
|
+
'namespace': secret.metadata.namespace,
|
948
|
+
'type': secret.type,
|
949
|
+
'data_keys': list(secret.data.keys()) if secret.data else [],
|
950
|
+
'created_at': secret.metadata.creation_timestamp
|
951
|
+
})
|
952
|
+
|
953
|
+
return secrets
|
954
|
+
|
955
|
+
except ApiException as e:
|
956
|
+
print(f"❌ Error listing secrets: {e}")
|
957
|
+
return []
|
958
|
+
|
959
|
+
# ======================
|
960
|
+
# PVC OPERATIONS
|
961
|
+
# ======================
|
962
|
+
def create_pvc(self, name: str, size: str, access_modes: List[str] = None,
|
963
|
+
storage_class: str = None, namespace: str = None) -> Optional[Any]:
|
964
|
+
"""Create a Persistent Volume Claim
|
965
|
+
|
966
|
+
Args:
|
967
|
+
name: Name of the PVC
|
968
|
+
size: Size of the volume (e.g., '10Gi', '100Mi')
|
969
|
+
access_modes: List of access modes (default: ['ReadWriteOnce'])
|
970
|
+
storage_class: Storage class name
|
971
|
+
namespace: Namespace (uses default if not provided)
|
972
|
+
|
973
|
+
Returns:
|
974
|
+
PVC object if successful, None otherwise
|
975
|
+
"""
|
976
|
+
try:
|
977
|
+
ns = namespace or self.namespace
|
978
|
+
|
979
|
+
if access_modes is None:
|
980
|
+
access_modes = ['ReadWriteOnce']
|
981
|
+
|
982
|
+
# Create PVC specification
|
983
|
+
pvc_spec = client.V1PersistentVolumeClaimSpec(
|
984
|
+
access_modes=access_modes,
|
985
|
+
resources=client.V1ResourceRequirements(
|
986
|
+
requests={'storage': size}
|
987
|
+
)
|
988
|
+
)
|
989
|
+
|
990
|
+
if storage_class:
|
991
|
+
pvc_spec.storage_class_name = storage_class
|
992
|
+
|
993
|
+
pvc = client.V1PersistentVolumeClaim(
|
994
|
+
metadata=client.V1ObjectMeta(name=name, namespace=ns),
|
995
|
+
spec=pvc_spec
|
996
|
+
)
|
997
|
+
|
998
|
+
result = self.core_v1.create_namespaced_persistent_volume_claim(
|
999
|
+
namespace=ns,
|
1000
|
+
body=pvc
|
1001
|
+
)
|
1002
|
+
|
1003
|
+
return result
|
1004
|
+
|
1005
|
+
except ApiException as e:
|
1006
|
+
print(f"❌ Error creating PVC: {e}")
|
1007
|
+
return None
|
1008
|
+
|
1009
|
+
def get_pvc(self, name: str, namespace: str = None) -> Optional[Dict]:
|
1010
|
+
"""Get a Persistent Volume Claim"""
|
1011
|
+
try:
|
1012
|
+
ns = namespace or self.namespace
|
1013
|
+
result = self.core_v1.read_namespaced_persistent_volume_claim(name=name, namespace=ns)
|
1014
|
+
|
1015
|
+
return {
|
1016
|
+
'name': result.metadata.name,
|
1017
|
+
'namespace': result.metadata.namespace,
|
1018
|
+
'status': result.status.phase,
|
1019
|
+
'volume_name': result.spec.volume_name,
|
1020
|
+
'access_modes': result.spec.access_modes,
|
1021
|
+
'storage_class': result.spec.storage_class_name,
|
1022
|
+
'size': result.spec.resources.requests.get('storage', 'Unknown'),
|
1023
|
+
'created_at': result.metadata.creation_timestamp
|
1024
|
+
}
|
1025
|
+
|
1026
|
+
except ApiException as e:
|
1027
|
+
print(f"❌ Error getting PVC: {e}")
|
1028
|
+
return None
|
1029
|
+
|
1030
|
+
def delete_pvc(self, name: str, namespace: str = None) -> bool:
|
1031
|
+
"""Delete a Persistent Volume Claim"""
|
1032
|
+
try:
|
1033
|
+
ns = namespace or self.namespace
|
1034
|
+
self.core_v1.delete_namespaced_persistent_volume_claim(name=name, namespace=ns)
|
1035
|
+
return True
|
1036
|
+
except ApiException as e:
|
1037
|
+
print(f"❌ Error deleting PVC: {e}")
|
1038
|
+
return False
|
1039
|
+
|
1040
|
+
def list_pvcs(self, namespace: str = None) -> List[Dict]:
|
1041
|
+
"""List all PVCs in a namespace"""
|
1042
|
+
try:
|
1043
|
+
ns = namespace or self.namespace
|
1044
|
+
result = self.core_v1.list_namespaced_persistent_volume_claim(namespace=ns)
|
1045
|
+
|
1046
|
+
pvcs = []
|
1047
|
+
for pvc in result.items:
|
1048
|
+
pvcs.append({
|
1049
|
+
'name': pvc.metadata.name,
|
1050
|
+
'namespace': pvc.metadata.namespace,
|
1051
|
+
'status': pvc.status.phase,
|
1052
|
+
'volume_name': pvc.spec.volume_name,
|
1053
|
+
'access_modes': pvc.spec.access_modes,
|
1054
|
+
'storage_class': pvc.spec.storage_class_name,
|
1055
|
+
'size': pvc.spec.resources.requests.get('storage', 'Unknown'),
|
1056
|
+
'created_at': pvc.metadata.creation_timestamp
|
1057
|
+
})
|
1058
|
+
|
1059
|
+
return pvcs
|
1060
|
+
|
1061
|
+
except ApiException as e:
|
1062
|
+
print(f"❌ Error listing PVCs: {e}")
|
1063
|
+
return []
|
1064
|
+
|
1065
|
+
# ======================
|
1066
|
+
# SERVICE URL OPERATIONS
|
1067
|
+
# ======================
|
1068
|
+
def get_service_url(self, name: str, namespace: str = None) -> Optional[Dict]:
|
1069
|
+
"""Get service URL, including AWS ELB URLs for LoadBalancer services
|
1070
|
+
|
1071
|
+
Args:
|
1072
|
+
name: Name of the service
|
1073
|
+
namespace: Namespace (uses default if not provided)
|
1074
|
+
|
1075
|
+
Returns:
|
1076
|
+
Dictionary containing service URL information
|
1077
|
+
"""
|
1078
|
+
try:
|
1079
|
+
ns = namespace or self.namespace
|
1080
|
+
service = self.core_v1.read_namespaced_service(name=name, namespace=ns)
|
1081
|
+
|
1082
|
+
service_type = service.spec.type
|
1083
|
+
ports = []
|
1084
|
+
for port in service.spec.ports:
|
1085
|
+
ports.append({
|
1086
|
+
'port': port.port,
|
1087
|
+
'target_port': port.target_port,
|
1088
|
+
'protocol': port.protocol,
|
1089
|
+
'name': port.name
|
1090
|
+
})
|
1091
|
+
|
1092
|
+
result = {
|
1093
|
+
'name': name,
|
1094
|
+
'namespace': ns,
|
1095
|
+
'type': service_type,
|
1096
|
+
'ports': ports,
|
1097
|
+
'cluster_ip': service.spec.cluster_ip
|
1098
|
+
}
|
1099
|
+
|
1100
|
+
if service_type == 'LoadBalancer':
|
1101
|
+
# Check for AWS ELB
|
1102
|
+
ingress = service.status.load_balancer.ingress
|
1103
|
+
if ingress:
|
1104
|
+
for ing in ingress:
|
1105
|
+
if ing.hostname: # AWS ELB uses hostname
|
1106
|
+
result['external_url'] = f"http://{ing.hostname}"
|
1107
|
+
result['external_hostname'] = ing.hostname
|
1108
|
+
|
1109
|
+
# Check if it's an AWS ELB
|
1110
|
+
if 'elb.amazonaws.com' in ing.hostname:
|
1111
|
+
result['aws_elb'] = True
|
1112
|
+
result['elb_dns_name'] = ing.hostname
|
1113
|
+
elif ing.ip: # Some cloud providers use IP
|
1114
|
+
result['external_url'] = f"http://{ing.ip}"
|
1115
|
+
result['external_ip'] = ing.ip
|
1116
|
+
|
1117
|
+
# If no ingress yet, service might still be provisioning
|
1118
|
+
if not ingress:
|
1119
|
+
result['status'] = 'Provisioning LoadBalancer...'
|
1120
|
+
|
1121
|
+
elif service_type == 'NodePort':
|
1122
|
+
# For NodePort, we need to get node IPs
|
1123
|
+
nodes = self.core_v1.list_node()
|
1124
|
+
if nodes.items:
|
1125
|
+
node_ip = None
|
1126
|
+
for node in nodes.items:
|
1127
|
+
for address in node.status.addresses:
|
1128
|
+
if address.type == 'ExternalIP':
|
1129
|
+
node_ip = address.address
|
1130
|
+
break
|
1131
|
+
if node_ip:
|
1132
|
+
break
|
1133
|
+
|
1134
|
+
if node_ip:
|
1135
|
+
for port in service.spec.ports:
|
1136
|
+
if port.node_port:
|
1137
|
+
result['external_url'] = f"http://{node_ip}:{port.node_port}"
|
1138
|
+
result['node_ip'] = node_ip
|
1139
|
+
result['node_port'] = port.node_port
|
1140
|
+
|
1141
|
+
return result
|
1142
|
+
|
1143
|
+
except ApiException as e:
|
1144
|
+
print(f"❌ Error getting service URL: {e}")
|
1145
|
+
return None
|
1146
|
+
|
471
1147
|
# ======================
|
472
1148
|
# UTILITY METHODS
|
473
1149
|
# ======================
|