k8s-helper-cli 0.5.0__tar.gz → 0.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. {k8s_helper_cli-0.5.0/src/k8s_helper_cli.egg-info → k8s_helper_cli-0.5.1}/PKG-INFO +1 -1
  2. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/pyproject.toml +1 -1
  3. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper/__init__.py +1 -1
  4. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper/cli.py +212 -0
  5. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper/core.py +395 -0
  6. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1/src/k8s_helper_cli.egg-info}/PKG-INFO +1 -1
  7. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/LICENSE +0 -0
  8. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/README.md +0 -0
  9. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/setup.cfg +0 -0
  10. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper/config.py +0 -0
  11. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper/utils.py +0 -0
  12. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper_cli.egg-info/SOURCES.txt +0 -0
  13. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper_cli.egg-info/dependency_links.txt +0 -0
  14. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper_cli.egg-info/entry_points.txt +0 -0
  15. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper_cli.egg-info/requires.txt +0 -0
  16. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/src/k8s_helper_cli.egg-info/top_level.txt +0 -0
  17. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/tests/test_core.py +0 -0
  18. {k8s_helper_cli-0.5.0 → k8s_helper_cli-0.5.1}/tests/test_integration.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: k8s-helper-cli
3
- Version: 0.5.0
3
+ Version: 0.5.1
4
4
  Summary: A simplified Python wrapper for common Kubernetes operations
5
5
  Author-email: Harshit Chatterjee <harshitchatterjee50@gmail.com>
6
6
  License-Expression: MIT
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "k8s-helper-cli"
3
- version = "0.5.0"
3
+ version = "0.5.1"
4
4
  description = "A simplified Python wrapper for common Kubernetes operations"
5
5
  readme = "README.md"
6
6
  authors = [
@@ -20,7 +20,7 @@ from .utils import (
20
20
  create_service_manifest
21
21
  )
22
22
 
23
- __version__ = "0.5.0"
23
+ __version__ = "0.5.1"
24
24
  __author__ = "Harshit Chatterjee"
25
25
  __email__ = "harshitchatterjee50@gmail.com"
26
26
 
@@ -1886,5 +1886,217 @@ def update_prometheus_target(
1886
1886
  console.print(f"❌ Error updating Prometheus target: {e}")
1887
1887
 
1888
1888
 
1889
+ # ======================
1890
+ # HELM-BASED MONITORING COMMANDS
1891
+ # ======================
1892
+ @app.command()
1893
+ def setup_monitoring_stack(
1894
+ namespace: str = typer.Option("monitoring", "--namespace", "-n", help="Namespace for monitoring stack"),
1895
+ grafana_service_type: str = typer.Option("NodePort", "--service-type", "-t", help="Grafana service type: NodePort, LoadBalancer, ClusterIP"),
1896
+ prometheus_storage_size: str = typer.Option("10Gi", "--prometheus-storage", help="Prometheus storage size"),
1897
+ grafana_storage_size: str = typer.Option("5Gi", "--grafana-storage", help="Grafana storage size"),
1898
+ wait: bool = typer.Option(True, "--wait/--no-wait", help="Wait for deployments to be ready"),
1899
+ install_ingress: bool = typer.Option(False, "--install-ingress", help="Install ingress for external access")
1900
+ ):
1901
+ """Deploy monitoring stack using official Helm charts (Prometheus + Grafana)"""
1902
+
1903
+ # Validate service type
1904
+ valid_service_types = ["NodePort", "LoadBalancer", "ClusterIP"]
1905
+ if grafana_service_type not in valid_service_types:
1906
+ console.print(f"❌ Invalid service type: {grafana_service_type}")
1907
+ console.print(f"💡 Valid options: {', '.join(valid_service_types)}")
1908
+ return
1909
+
1910
+ console.print(f"🚀 Setting up Helm-based monitoring stack in namespace: {namespace}")
1911
+ console.print(f"🔧 Grafana service type: {grafana_service_type}")
1912
+ console.print(f"💾 Prometheus storage: {prometheus_storage_size}")
1913
+ console.print(f"💾 Grafana storage: {grafana_storage_size}")
1914
+
1915
+ # Show what will be deployed
1916
+ console.print("\n📋 Components to deploy via Helm:")
1917
+ console.print(" • Prometheus Operator (kube-prometheus-stack)")
1918
+ console.print(" • Grafana with persistent storage")
1919
+ console.print(" • AlertManager for alerts")
1920
+ console.print(" • Node Exporter for node metrics")
1921
+ console.print(" • kube-state-metrics for cluster state")
1922
+ console.print(" • Prometheus rules and dashboards")
1923
+ if install_ingress:
1924
+ console.print(" • Ingress for external access")
1925
+
1926
+ try:
1927
+ client = K8sClient()
1928
+
1929
+ with console.status("Deploying Helm monitoring stack..."):
1930
+ result = client.setup_helm_monitoring(
1931
+ namespace=namespace,
1932
+ grafana_service_type=grafana_service_type,
1933
+ prometheus_storage_size=prometheus_storage_size,
1934
+ grafana_storage_size=grafana_storage_size,
1935
+ wait_for_ready=wait,
1936
+ install_ingress=install_ingress
1937
+ )
1938
+
1939
+ if result['success']:
1940
+ console.print("✅ Helm monitoring stack deployed successfully!")
1941
+
1942
+ # Show deployment summary
1943
+ console.print(f"\n📋 Deployment Summary:")
1944
+ console.print(f"📍 Namespace: {result['namespace']}")
1945
+ console.print(f"🎯 Helm Release: {result['release_name']}")
1946
+
1947
+ if result.get('prometheus', {}).get('deployed'):
1948
+ console.print("✅ Prometheus Operator: Deployed")
1949
+ else:
1950
+ console.print("❌ Prometheus Operator: Failed to deploy")
1951
+
1952
+ if result.get('grafana', {}).get('deployed'):
1953
+ console.print("✅ Grafana: Deployed")
1954
+ if result['grafana'].get('admin_password'):
1955
+ console.print(f"🔑 Grafana admin password: {result['grafana']['admin_password']}")
1956
+ else:
1957
+ console.print("🔑 Grafana admin password: admin")
1958
+ else:
1959
+ console.print("❌ Grafana: Failed to deploy")
1960
+
1961
+ # Show access information
1962
+ console.print(f"\n🔗 Access Information:")
1963
+
1964
+ if result.get('grafana_url'):
1965
+ console.print(f"🔗 Grafana URL: [blue]{result['grafana_url']}[/blue]")
1966
+ else:
1967
+ console.print(f"💡 Grafana: kubectl port-forward -n {namespace} svc/kube-prometheus-stack-grafana 3000:80")
1968
+
1969
+ if result.get('prometheus_url'):
1970
+ console.print(f"🔗 Prometheus URL: [blue]{result['prometheus_url']}[/blue]")
1971
+ else:
1972
+ console.print(f"💡 Prometheus: kubectl port-forward -n {namespace} svc/kube-prometheus-stack-prometheus 9090:9090")
1973
+
1974
+ if result.get('alertmanager_url'):
1975
+ console.print(f"🔗 AlertManager URL: [blue]{result['alertmanager_url']}[/blue]")
1976
+ else:
1977
+ console.print(f"💡 AlertManager: kubectl port-forward -n {namespace} svc/kube-prometheus-stack-alertmanager 9093:9093")
1978
+
1979
+ # Show next steps
1980
+ console.print(f"\n🚀 Next Steps:")
1981
+ console.print(f" 1. Access Grafana with admin/admin (or password shown above)")
1982
+ console.print(f" 2. Explore pre-configured dashboards")
1983
+ console.print(f" 3. Set up custom alerts in AlertManager")
1984
+ console.print(f" 4. Add custom Prometheus targets if needed")
1985
+ console.print(f"\n💡 Useful commands:")
1986
+ console.print(f" • Check status: k8s-helper monitoring-stack-status -n {namespace}")
1987
+ console.print(f" • List dashboards: kubectl get configmaps -n {namespace} | grep dashboard")
1988
+ console.print(f" • View Helm release: helm list -n {namespace}")
1989
+
1990
+ else:
1991
+ console.print(f"❌ Failed to deploy Helm monitoring stack: {result.get('error', 'Unknown error')}")
1992
+
1993
+ console.print("\n🛠️ Troubleshooting:")
1994
+ console.print(" • Ensure Helm is installed: helm version")
1995
+ console.print(" • Check cluster connectivity: kubectl get nodes")
1996
+ console.print(" • Verify namespace permissions")
1997
+ console.print(f" • View Helm status: helm status -n {namespace} kube-prometheus-stack")
1998
+
1999
+ except Exception as e:
2000
+ console.print(f"❌ Error setting up Helm monitoring: {e}")
2001
+ console.print("\n🛠️ Troubleshooting:")
2002
+ console.print(" • Ensure Helm is installed and configured")
2003
+ console.print(" • Check if kubectl is configured correctly")
2004
+ console.print(" • Verify you have cluster admin permissions")
2005
+
2006
+
2007
+ @app.command()
2008
+ def monitoring_stack_status(
2009
+ namespace: str = typer.Option("monitoring", "--namespace", "-n", help="Monitoring namespace"),
2010
+ output: str = output_option
2011
+ ):
2012
+ """Show status of Helm-based monitoring stack"""
2013
+ try:
2014
+ client = K8sClient()
2015
+
2016
+ with console.status("Checking Helm monitoring stack status..."):
2017
+ info = client.get_helm_monitoring_info(namespace)
2018
+
2019
+ if 'error' in info:
2020
+ console.print(f"❌ Error getting monitoring status: {info['error']}")
2021
+ return
2022
+
2023
+ if output == "table":
2024
+ # Helm release info
2025
+ console.print(f"🎯 Helm Release: {info.get('release_name', 'kube-prometheus-stack')}")
2026
+ console.print(f"📊 Release Status: {info.get('release_status', 'Unknown')}")
2027
+ console.print(f"📅 Last Deployed: {info.get('last_deployed', 'Unknown')}")
2028
+
2029
+ # Overview table
2030
+ table = Table(title=f"Monitoring Stack Status - {namespace}")
2031
+ table.add_column("Component", style="cyan")
2032
+ table.add_column("Status", style="green")
2033
+ table.add_column("URL", style="blue")
2034
+
2035
+ # Components status
2036
+ components = ['prometheus', 'grafana', 'alertmanager']
2037
+ for component in components:
2038
+ if component in info:
2039
+ comp_info = info[component]
2040
+ status = "🟢 Running" if comp_info.get('running') else "🔴 Not Running"
2041
+ url = comp_info.get('url', 'Port-forward required')
2042
+ table.add_row(component.capitalize(), status, url)
2043
+
2044
+ console.print(table)
2045
+
2046
+ # Show pod status
2047
+ if info.get('pods'):
2048
+ pod_table = Table(title="Pod Status")
2049
+ pod_table.add_column("Pod", style="cyan")
2050
+ pod_table.add_column("Status", style="green")
2051
+ pod_table.add_column("Ready", style="blue")
2052
+
2053
+ for pod in info['pods']:
2054
+ pod_table.add_row(
2055
+ pod['name'],
2056
+ pod['status'],
2057
+ f"{pod['ready']}/{pod['total']}"
2058
+ )
2059
+
2060
+ console.print(pod_table)
2061
+
2062
+ elif output == "json":
2063
+ console.print(format_json_output(info))
2064
+ elif output == "yaml":
2065
+ console.print(format_yaml_output(info))
2066
+
2067
+ except Exception as e:
2068
+ console.print(f"❌ Error checking Helm monitoring status: {e}")
2069
+
2070
+
2071
+ @app.command()
2072
+ def delete_monitoring_stack(
2073
+ namespace: str = typer.Option("monitoring", "--namespace", "-n", help="Monitoring namespace"),
2074
+ release_name: str = typer.Option("kube-prometheus-stack", "--release-name", help="Helm release name"),
2075
+ force: bool = typer.Option(False, "--force", help="Skip confirmation prompt")
2076
+ ):
2077
+ """Delete Helm-based monitoring stack"""
2078
+ if not force:
2079
+ if not typer.confirm(f"Are you sure you want to delete the Helm monitoring stack '{release_name}' in namespace '{namespace}'?"):
2080
+ console.print("❌ Operation cancelled")
2081
+ return
2082
+
2083
+ try:
2084
+ client = K8sClient()
2085
+
2086
+ console.print(f"🗑️ Deleting Helm monitoring stack: {release_name}")
2087
+
2088
+ with console.status("Uninstalling Helm release..."):
2089
+ result = client.delete_helm_monitoring(namespace, release_name)
2090
+
2091
+ if result['success']:
2092
+ console.print(f"✅ Helm monitoring stack '{release_name}' deleted successfully")
2093
+ console.print(f"📋 Cleaned up {result.get('resources_deleted', 0)} resources")
2094
+ else:
2095
+ console.print(f"❌ Failed to delete Helm monitoring stack: {result.get('error', 'Unknown error')}")
2096
+
2097
+ except Exception as e:
2098
+ console.print(f"❌ Error deleting Helm monitoring: {e}")
2099
+
2100
+
1889
2101
  if __name__ == "__main__":
1890
2102
  app()
@@ -3083,3 +3083,398 @@ scrape_configs:
3083
3083
  except Exception as e:
3084
3084
  print(f"⚠️ Could not restart Prometheus deployment: {e}")
3085
3085
  return False
3086
+
3087
+ # ======================
3088
+ # HELM-BASED MONITORING METHODS
3089
+ # ======================
3090
+
3091
+ def setup_helm_monitoring(self, namespace: str = "monitoring",
3092
+ grafana_service_type: str = "NodePort",
3093
+ prometheus_storage_size: str = "10Gi",
3094
+ grafana_storage_size: str = "5Gi",
3095
+ wait_for_ready: bool = True,
3096
+ install_ingress: bool = False) -> Dict:
3097
+ """Deploy monitoring stack using official Helm charts"""
3098
+ import subprocess
3099
+ import tempfile
3100
+ import os
3101
+
3102
+ try:
3103
+ # Check if Helm is available
3104
+ try:
3105
+ result = subprocess.run(['helm', 'version'], capture_output=True, text=True, check=True)
3106
+ except (subprocess.CalledProcessError, FileNotFoundError):
3107
+ return {
3108
+ 'success': False,
3109
+ 'error': 'Helm is not installed or not in PATH. Please install Helm first.'
3110
+ }
3111
+
3112
+ # Create namespace if it doesn't exist
3113
+ try:
3114
+ self.core_v1.create_namespace(
3115
+ body=client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace))
3116
+ )
3117
+ print(f"✅ Created namespace: {namespace}")
3118
+ except ApiException as e:
3119
+ if e.status == 409: # Already exists
3120
+ print(f"✅ Namespace {namespace} already exists")
3121
+ else:
3122
+ print(f"⚠️ Could not create namespace: {e}")
3123
+
3124
+ # Add Prometheus community Helm repository
3125
+ print("📦 Adding Prometheus community Helm repository...")
3126
+ try:
3127
+ subprocess.run([
3128
+ 'helm', 'repo', 'add', 'prometheus-community',
3129
+ 'https://prometheus-community.github.io/helm-charts'
3130
+ ], check=True, capture_output=True)
3131
+
3132
+ subprocess.run(['helm', 'repo', 'update'], check=True, capture_output=True)
3133
+ print("✅ Helm repository added and updated")
3134
+ except subprocess.CalledProcessError as e:
3135
+ return {
3136
+ 'success': False,
3137
+ 'error': f'Failed to add Helm repository: {e.stderr.decode() if e.stderr else str(e)}'
3138
+ }
3139
+
3140
+ # Create Helm values file
3141
+ helm_values = {
3142
+ 'grafana': {
3143
+ 'enabled': True,
3144
+ 'persistence': {
3145
+ 'enabled': True,
3146
+ 'size': grafana_storage_size
3147
+ },
3148
+ 'service': {
3149
+ 'type': grafana_service_type
3150
+ },
3151
+ 'adminPassword': 'admin',
3152
+ 'datasources': {
3153
+ 'datasources.yaml': {
3154
+ 'apiVersion': 1,
3155
+ 'datasources': [{
3156
+ 'name': 'Prometheus',
3157
+ 'type': 'prometheus',
3158
+ 'url': 'http://kube-prometheus-stack-prometheus:9090',
3159
+ 'access': 'proxy',
3160
+ 'isDefault': True
3161
+ }]
3162
+ }
3163
+ },
3164
+ 'dashboardProviders': {
3165
+ 'dashboardproviders.yaml': {
3166
+ 'apiVersion': 1,
3167
+ 'providers': [{
3168
+ 'name': 'default',
3169
+ 'orgId': 1,
3170
+ 'folder': '',
3171
+ 'type': 'file',
3172
+ 'disableDeletion': False,
3173
+ 'editable': True,
3174
+ 'options': {
3175
+ 'path': '/var/lib/grafana/dashboards/default'
3176
+ }
3177
+ }]
3178
+ }
3179
+ },
3180
+ 'dashboards': {
3181
+ 'default': {
3182
+ 'kubernetes-cluster-dashboard': {
3183
+ 'gnetId': 7249,
3184
+ 'revision': 1,
3185
+ 'datasource': 'Prometheus'
3186
+ },
3187
+ 'kubernetes-pod-dashboard': {
3188
+ 'gnetId': 6417,
3189
+ 'revision': 1,
3190
+ 'datasource': 'Prometheus'
3191
+ },
3192
+ 'node-exporter-dashboard': {
3193
+ 'gnetId': 1860,
3194
+ 'revision': 27,
3195
+ 'datasource': 'Prometheus'
3196
+ }
3197
+ }
3198
+ }
3199
+ },
3200
+ 'prometheus': {
3201
+ 'enabled': True,
3202
+ 'prometheusSpec': {
3203
+ 'retention': '30d',
3204
+ 'storageSpec': {
3205
+ 'volumeClaimTemplate': {
3206
+ 'spec': {
3207
+ 'accessModes': ['ReadWriteOnce'],
3208
+ 'resources': {
3209
+ 'requests': {
3210
+ 'storage': prometheus_storage_size
3211
+ }
3212
+ }
3213
+ }
3214
+ }
3215
+ },
3216
+ 'serviceMonitorSelectorNilUsesHelmValues': False
3217
+ }
3218
+ },
3219
+ 'alertmanager': {
3220
+ 'enabled': True
3221
+ },
3222
+ 'nodeExporter': {
3223
+ 'enabled': True
3224
+ },
3225
+ 'kubeStateMetrics': {
3226
+ 'enabled': True
3227
+ },
3228
+ 'defaultRules': {
3229
+ 'create': True,
3230
+ 'rules': {
3231
+ 'alertmanager': True,
3232
+ 'etcd': True,
3233
+ 'general': True,
3234
+ 'k8s': True,
3235
+ 'kubeApiserver': True,
3236
+ 'kubePrometheusNodeRecording': True,
3237
+ 'kubernetesApps': True,
3238
+ 'kubernetesResources': True,
3239
+ 'kubernetesStorage': True,
3240
+ 'kubernetesSystem': True,
3241
+ 'network': True,
3242
+ 'node': True,
3243
+ 'prometheus': True,
3244
+ 'prometheusOperator': True
3245
+ }
3246
+ }
3247
+ }
3248
+
3249
+ # Add ingress if requested
3250
+ if install_ingress:
3251
+ helm_values['grafana']['ingress'] = {
3252
+ 'enabled': True,
3253
+ 'hosts': [f'grafana.{namespace}.local'],
3254
+ 'paths': ['/']
3255
+ }
3256
+
3257
+ # Write values to temporary file
3258
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
3259
+ yaml.dump(helm_values, f, default_flow_style=False)
3260
+ values_file = f.name
3261
+
3262
+ try:
3263
+ # Install the Helm chart
3264
+ print("🚀 Installing kube-prometheus-stack via Helm...")
3265
+ helm_cmd = [
3266
+ 'helm', 'install', 'kube-prometheus-stack',
3267
+ 'prometheus-community/kube-prometheus-stack',
3268
+ '--namespace', namespace,
3269
+ '--values', values_file
3270
+ ]
3271
+
3272
+ if wait_for_ready:
3273
+ helm_cmd.append('--wait')
3274
+ helm_cmd.extend(['--timeout', '10m'])
3275
+
3276
+ result = subprocess.run(helm_cmd, capture_output=True, text=True, check=True)
3277
+ print("✅ Helm chart installed successfully")
3278
+
3279
+ # Wait a bit for pods to start
3280
+ if wait_for_ready:
3281
+ print("⏳ Waiting for pods to be ready...")
3282
+ time.sleep(30)
3283
+
3284
+ # Get service information
3285
+ services_info = self._get_helm_monitoring_services(namespace)
3286
+
3287
+ return {
3288
+ 'success': True,
3289
+ 'namespace': namespace,
3290
+ 'release_name': 'kube-prometheus-stack',
3291
+ 'prometheus': {'deployed': True},
3292
+ 'grafana': {
3293
+ 'deployed': True,
3294
+ 'admin_password': 'admin'
3295
+ },
3296
+ 'grafana_url': services_info.get('grafana_url'),
3297
+ 'prometheus_url': services_info.get('prometheus_url'),
3298
+ 'alertmanager_url': services_info.get('alertmanager_url')
3299
+ }
3300
+
3301
+ finally:
3302
+ # Clean up temporary file
3303
+ os.unlink(values_file)
3304
+
3305
+ except subprocess.CalledProcessError as e:
3306
+ error_msg = e.stderr.decode() if e.stderr else str(e)
3307
+ return {
3308
+ 'success': False,
3309
+ 'error': f'Helm installation failed: {error_msg}'
3310
+ }
3311
+ except Exception as e:
3312
+ return {
3313
+ 'success': False,
3314
+ 'error': f'Failed to setup Helm monitoring: {str(e)}'
3315
+ }
3316
+
3317
+ def get_helm_monitoring_info(self, namespace: str = "monitoring") -> Dict:
3318
+ """Get information about the Helm-based monitoring stack"""
3319
+ import subprocess
3320
+
3321
+ try:
3322
+ # Check if Helm release exists
3323
+ try:
3324
+ result = subprocess.run([
3325
+ 'helm', 'status', 'kube-prometheus-stack',
3326
+ '--namespace', namespace
3327
+ ], capture_output=True, text=True, check=True)
3328
+
3329
+ # Parse Helm status
3330
+ lines = result.stdout.split('\n')
3331
+ release_info = {}
3332
+ for line in lines:
3333
+ if 'STATUS:' in line:
3334
+ release_info['release_status'] = line.split('STATUS:')[1].strip()
3335
+ elif 'LAST DEPLOYED:' in line:
3336
+ release_info['last_deployed'] = line.split('LAST DEPLOYED:')[1].strip()
3337
+
3338
+ except subprocess.CalledProcessError:
3339
+ return {'error': 'Helm release not found. Use setup-helm-monitoring to deploy first.'}
3340
+
3341
+ # Get services information
3342
+ services_info = self._get_helm_monitoring_services(namespace)
3343
+
3344
+ # Get pod status
3345
+ pods_info = self._get_monitoring_pods_status(namespace)
3346
+
3347
+ return {
3348
+ 'release_name': 'kube-prometheus-stack',
3349
+ 'release_status': release_info.get('release_status', 'Unknown'),
3350
+ 'last_deployed': release_info.get('last_deployed', 'Unknown'),
3351
+ 'prometheus': {
3352
+ 'running': any(pod['name'].startswith('prometheus-kube-prometheus-stack-prometheus')
3353
+ for pod in pods_info if pod['status'] == 'Running'),
3354
+ 'url': services_info.get('prometheus_url', 'Port-forward required')
3355
+ },
3356
+ 'grafana': {
3357
+ 'running': any(pod['name'].startswith('kube-prometheus-stack-grafana')
3358
+ for pod in pods_info if pod['status'] == 'Running'),
3359
+ 'url': services_info.get('grafana_url', 'Port-forward required')
3360
+ },
3361
+ 'alertmanager': {
3362
+ 'running': any(pod['name'].startswith('alertmanager-kube-prometheus-stack-alertmanager')
3363
+ for pod in pods_info if pod['status'] == 'Running'),
3364
+ 'url': services_info.get('alertmanager_url', 'Port-forward required')
3365
+ },
3366
+ 'pods': pods_info
3367
+ }
3368
+
3369
+ except Exception as e:
3370
+ return {'error': f'Failed to get monitoring info: {str(e)}'}
3371
+
3372
+ def delete_helm_monitoring(self, namespace: str = "monitoring",
3373
+ release_name: str = "kube-prometheus-stack") -> Dict:
3374
+ """Delete Helm-based monitoring stack"""
3375
+ import subprocess
3376
+
3377
+ try:
3378
+ # Uninstall Helm release
3379
+ result = subprocess.run([
3380
+ 'helm', 'uninstall', release_name,
3381
+ '--namespace', namespace
3382
+ ], capture_output=True, text=True, check=True)
3383
+
3384
+ print(f"✅ Helm release '{release_name}' uninstalled")
3385
+
3386
+ # Count remaining resources (optional cleanup)
3387
+ try:
3388
+ # Delete PVCs that might remain
3389
+ pvcs = self.core_v1.list_namespaced_persistent_volume_claim(namespace=namespace)
3390
+ pvc_count = 0
3391
+ for pvc in pvcs.items:
3392
+ if 'prometheus' in pvc.metadata.name or 'grafana' in pvc.metadata.name:
3393
+ self.core_v1.delete_namespaced_persistent_volume_claim(
3394
+ name=pvc.metadata.name,
3395
+ namespace=namespace
3396
+ )
3397
+ pvc_count += 1
3398
+
3399
+ if pvc_count > 0:
3400
+ print(f"✅ Cleaned up {pvc_count} persistent volume claims")
3401
+
3402
+ except Exception as cleanup_error:
3403
+ print(f"⚠️ Could not clean up some resources: {cleanup_error}")
3404
+
3405
+ return {
3406
+ 'success': True,
3407
+ 'resources_deleted': pvc_count
3408
+ }
3409
+
3410
+ except subprocess.CalledProcessError as e:
3411
+ error_msg = e.stderr.decode() if e.stderr else str(e)
3412
+ return {
3413
+ 'success': False,
3414
+ 'error': f'Failed to uninstall Helm release: {error_msg}'
3415
+ }
3416
+ except Exception as e:
3417
+ return {
3418
+ 'success': False,
3419
+ 'error': f'Failed to delete monitoring stack: {str(e)}'
3420
+ }
3421
+
3422
+ def _get_helm_monitoring_services(self, namespace: str) -> Dict:
3423
+ """Get service URLs for Helm monitoring components"""
3424
+ services_info = {}
3425
+
3426
+ try:
3427
+ # Get services
3428
+ services = self.core_v1.list_namespaced_service(namespace=namespace)
3429
+
3430
+ for service in services.items:
3431
+ service_name = service.metadata.name
3432
+
3433
+ if 'grafana' in service_name:
3434
+ url = self._get_service_url(service, namespace, 80)
3435
+ if url:
3436
+ services_info['grafana_url'] = url
3437
+
3438
+ elif 'prometheus' in service_name and 'operated' not in service_name:
3439
+ url = self._get_service_url(service, namespace, 9090)
3440
+ if url:
3441
+ services_info['prometheus_url'] = url
3442
+
3443
+ elif 'alertmanager' in service_name and 'operated' not in service_name:
3444
+ url = self._get_service_url(service, namespace, 9093)
3445
+ if url:
3446
+ services_info['alertmanager_url'] = url
3447
+
3448
+ except Exception as e:
3449
+ print(f"⚠️ Could not get service information: {e}")
3450
+
3451
+ return services_info
3452
+
3453
+ def _get_monitoring_pods_status(self, namespace: str) -> List[Dict]:
3454
+ """Get status of monitoring pods"""
3455
+ pods_info = []
3456
+
3457
+ try:
3458
+ pods = self.core_v1.list_namespaced_pod(namespace=namespace)
3459
+
3460
+ for pod in pods.items:
3461
+ if any(component in pod.metadata.name for component in
3462
+ ['prometheus', 'grafana', 'alertmanager', 'node-exporter', 'kube-state-metrics']):
3463
+
3464
+ ready_containers = 0
3465
+ total_containers = len(pod.status.container_statuses) if pod.status.container_statuses else 0
3466
+
3467
+ if pod.status.container_statuses:
3468
+ ready_containers = sum(1 for cs in pod.status.container_statuses if cs.ready)
3469
+
3470
+ pods_info.append({
3471
+ 'name': pod.metadata.name,
3472
+ 'status': pod.status.phase,
3473
+ 'ready': ready_containers,
3474
+ 'total': total_containers
3475
+ })
3476
+
3477
+ except Exception as e:
3478
+ print(f"⚠️ Could not get pod status: {e}")
3479
+
3480
+ return pods_info
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: k8s-helper-cli
3
- Version: 0.5.0
3
+ Version: 0.5.1
4
4
  Summary: A simplified Python wrapper for common Kubernetes operations
5
5
  Author-email: Harshit Chatterjee <harshitchatterjee50@gmail.com>
6
6
  License-Expression: MIT
File without changes
File without changes
File without changes