kubetorch 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. kubetorch/__init__.py +59 -0
  2. kubetorch/cli.py +1939 -0
  3. kubetorch/cli_utils.py +967 -0
  4. kubetorch/config.py +453 -0
  5. kubetorch/constants.py +18 -0
  6. kubetorch/docs/Makefile +18 -0
  7. kubetorch/docs/__init__.py +0 -0
  8. kubetorch/docs/_ext/json_globaltoc.py +42 -0
  9. kubetorch/docs/api/cli.rst +10 -0
  10. kubetorch/docs/api/python/app.rst +21 -0
  11. kubetorch/docs/api/python/cls.rst +19 -0
  12. kubetorch/docs/api/python/compute.rst +25 -0
  13. kubetorch/docs/api/python/config.rst +11 -0
  14. kubetorch/docs/api/python/fn.rst +19 -0
  15. kubetorch/docs/api/python/image.rst +14 -0
  16. kubetorch/docs/api/python/secret.rst +18 -0
  17. kubetorch/docs/api/python/volumes.rst +13 -0
  18. kubetorch/docs/api/python.rst +101 -0
  19. kubetorch/docs/conf.py +69 -0
  20. kubetorch/docs/index.rst +20 -0
  21. kubetorch/docs/requirements.txt +5 -0
  22. kubetorch/globals.py +269 -0
  23. kubetorch/logger.py +59 -0
  24. kubetorch/resources/__init__.py +0 -0
  25. kubetorch/resources/callables/__init__.py +0 -0
  26. kubetorch/resources/callables/cls/__init__.py +0 -0
  27. kubetorch/resources/callables/cls/cls.py +159 -0
  28. kubetorch/resources/callables/fn/__init__.py +0 -0
  29. kubetorch/resources/callables/fn/fn.py +140 -0
  30. kubetorch/resources/callables/module.py +1315 -0
  31. kubetorch/resources/callables/utils.py +203 -0
  32. kubetorch/resources/compute/__init__.py +0 -0
  33. kubetorch/resources/compute/app.py +253 -0
  34. kubetorch/resources/compute/compute.py +2414 -0
  35. kubetorch/resources/compute/decorators.py +137 -0
  36. kubetorch/resources/compute/utils.py +1026 -0
  37. kubetorch/resources/compute/websocket.py +135 -0
  38. kubetorch/resources/images/__init__.py +1 -0
  39. kubetorch/resources/images/image.py +412 -0
  40. kubetorch/resources/images/images.py +64 -0
  41. kubetorch/resources/secrets/__init__.py +2 -0
  42. kubetorch/resources/secrets/kubernetes_secrets_client.py +377 -0
  43. kubetorch/resources/secrets/provider_secrets/__init__.py +0 -0
  44. kubetorch/resources/secrets/provider_secrets/anthropic_secret.py +12 -0
  45. kubetorch/resources/secrets/provider_secrets/aws_secret.py +16 -0
  46. kubetorch/resources/secrets/provider_secrets/azure_secret.py +14 -0
  47. kubetorch/resources/secrets/provider_secrets/cohere_secret.py +12 -0
  48. kubetorch/resources/secrets/provider_secrets/gcp_secret.py +16 -0
  49. kubetorch/resources/secrets/provider_secrets/github_secret.py +13 -0
  50. kubetorch/resources/secrets/provider_secrets/huggingface_secret.py +20 -0
  51. kubetorch/resources/secrets/provider_secrets/kubeconfig_secret.py +12 -0
  52. kubetorch/resources/secrets/provider_secrets/lambda_secret.py +13 -0
  53. kubetorch/resources/secrets/provider_secrets/langchain_secret.py +12 -0
  54. kubetorch/resources/secrets/provider_secrets/openai_secret.py +11 -0
  55. kubetorch/resources/secrets/provider_secrets/pinecone_secret.py +12 -0
  56. kubetorch/resources/secrets/provider_secrets/providers.py +92 -0
  57. kubetorch/resources/secrets/provider_secrets/ssh_secret.py +12 -0
  58. kubetorch/resources/secrets/provider_secrets/wandb_secret.py +11 -0
  59. kubetorch/resources/secrets/secret.py +224 -0
  60. kubetorch/resources/secrets/secret_factory.py +64 -0
  61. kubetorch/resources/secrets/utils.py +222 -0
  62. kubetorch/resources/volumes/__init__.py +0 -0
  63. kubetorch/resources/volumes/volume.py +340 -0
  64. kubetorch/servers/__init__.py +0 -0
  65. kubetorch/servers/http/__init__.py +0 -0
  66. kubetorch/servers/http/distributed_utils.py +2968 -0
  67. kubetorch/servers/http/http_client.py +802 -0
  68. kubetorch/servers/http/http_server.py +1622 -0
  69. kubetorch/servers/http/server_metrics.py +255 -0
  70. kubetorch/servers/http/utils.py +722 -0
  71. kubetorch/serving/__init__.py +0 -0
  72. kubetorch/serving/autoscaling.py +153 -0
  73. kubetorch/serving/base_service_manager.py +344 -0
  74. kubetorch/serving/constants.py +77 -0
  75. kubetorch/serving/deployment_service_manager.py +431 -0
  76. kubetorch/serving/knative_service_manager.py +487 -0
  77. kubetorch/serving/raycluster_service_manager.py +526 -0
  78. kubetorch/serving/service_manager.py +18 -0
  79. kubetorch/serving/templates/deployment_template.yaml +17 -0
  80. kubetorch/serving/templates/knative_service_template.yaml +19 -0
  81. kubetorch/serving/templates/kt_setup_template.sh.j2 +91 -0
  82. kubetorch/serving/templates/pod_template.yaml +198 -0
  83. kubetorch/serving/templates/raycluster_service_template.yaml +42 -0
  84. kubetorch/serving/templates/raycluster_template.yaml +35 -0
  85. kubetorch/serving/templates/service_template.yaml +21 -0
  86. kubetorch/serving/templates/workerset_template.yaml +36 -0
  87. kubetorch/serving/utils.py +344 -0
  88. kubetorch/utils.py +263 -0
  89. kubetorch-0.2.5.dist-info/METADATA +75 -0
  90. kubetorch-0.2.5.dist-info/RECORD +92 -0
  91. kubetorch-0.2.5.dist-info/WHEEL +4 -0
  92. kubetorch-0.2.5.dist-info/entry_points.txt +5 -0
@@ -0,0 +1,487 @@
1
+ import os
2
+ import re
3
+ import time
4
+ from datetime import datetime, timezone
5
+ from typing import List, Optional
6
+
7
+ from kubernetes import client
8
+
9
+ import kubetorch as kt
10
+ import kubetorch.serving.constants as serving_constants
11
+ from kubetorch.logger import get_logger
12
+ from kubetorch.resources.compute.utils import (
13
+ check_pod_events_for_errors,
14
+ check_pod_status_for_errors,
15
+ check_revision_for_errors,
16
+ ServiceTimeoutError,
17
+ )
18
+ from kubetorch.servers.http.utils import load_template
19
+ from kubetorch.serving.autoscaling import AutoscalingConfig
20
+ from kubetorch.serving.base_service_manager import BaseServiceManager
21
+ from kubetorch.serving.utils import nested_override, pod_is_running
22
+
23
+ logger = get_logger(__name__)
24
+
25
+
26
+ class KnativeServiceManager(BaseServiceManager):
27
+ """Service manager for Knative services with autoscaling capabilities."""
28
+
29
+ def _create_or_update_knative_service(
30
+ self,
31
+ name: str,
32
+ module_name: str,
33
+ pod_template: dict,
34
+ autoscaling_config: AutoscalingConfig = None,
35
+ gpu_annotations: dict = None,
36
+ inactivity_ttl: str = None,
37
+ custom_labels: dict = None,
38
+ custom_annotations: dict = None,
39
+ custom_template: dict = None,
40
+ scheduler_name: str = None,
41
+ queue_name: str = None,
42
+ dryrun: bool = False,
43
+ ) -> dict:
44
+ """Creates or updates a Knative service based on the provided configuration.
45
+
46
+ Returns:
47
+ Dict
48
+ """
49
+ # Clean the module name to remove any invalid characters for labels
50
+ clean_module_name = re.sub(r"[^A-Za-z0-9.-]|^[-.]|[-.]$", "", module_name)
51
+
52
+ labels = {
53
+ **self.base_labels,
54
+ serving_constants.KT_MODULE_LABEL: clean_module_name,
55
+ serving_constants.KT_SERVICE_LABEL: name,
56
+ serving_constants.KT_TEMPLATE_LABEL: "ksvc",
57
+ }
58
+
59
+ if custom_labels:
60
+ labels.update(custom_labels)
61
+
62
+ # Template labels (exclude template label - that's only for the top-level resource)
63
+ template_labels = {
64
+ **self.base_labels,
65
+ serving_constants.KT_MODULE_LABEL: clean_module_name,
66
+ serving_constants.KT_SERVICE_LABEL: name,
67
+ }
68
+
69
+ if custom_labels:
70
+ template_labels.update(custom_labels)
71
+
72
+ template_annotations = {
73
+ "networking.knative.dev/ingress.class": "kourier.ingress.networking.knative.dev",
74
+ }
75
+
76
+ annotations = {
77
+ "prometheus.io/scrape": "true",
78
+ "prometheus.io/port": "8080",
79
+ "prometheus.io/path": serving_constants.PROMETHEUS_HEALTH_ENDPOINT,
80
+ "serving.knative.dev/container-name": "kubetorch",
81
+ "serving.knative.dev/probe-path": "/health",
82
+ }
83
+ if custom_annotations:
84
+ annotations.update(custom_annotations)
85
+
86
+ if scheduler_name and queue_name:
87
+ labels["kai.scheduler/queue"] = queue_name # Useful for queries, etc
88
+ template_labels["kai.scheduler/queue"] = queue_name # Required for KAI to schedule pods
89
+ # Note: KAI wraps the Knative revision in a podgroup, expecting at least 1 pod to schedule initially
90
+ # Only set min-scale=1 if user hasn't explicitly provided a min_scale value
91
+ if autoscaling_config.min_scale is None:
92
+ template_annotations["autoscaling.knative.dev/min-scale"] = "1"
93
+
94
+ # Add autoscaling annotations (config always provided)
95
+ autoscaling_annotations = autoscaling_config.convert_to_annotations()
96
+ template_annotations.update(autoscaling_annotations)
97
+
98
+ # Add progress deadline if specified (not an autoscaling annotation)
99
+ if autoscaling_config.progress_deadline is not None:
100
+ template_annotations["serving.knative.dev/progress-deadline"] = autoscaling_config.progress_deadline
101
+
102
+ if inactivity_ttl:
103
+ annotations[serving_constants.INACTIVITY_TTL_ANNOTATION] = inactivity_ttl
104
+ logger.info(f"Configuring auto-down after idle timeout ({inactivity_ttl})")
105
+
106
+ template_annotations.update(annotations)
107
+
108
+ if gpu_annotations:
109
+ template_annotations.update(gpu_annotations)
110
+
111
+ deployment_timestamp = datetime.now(timezone.utc).isoformat()
112
+ template_annotations.update({"kubetorch.com/deployment_timestamp": deployment_timestamp})
113
+
114
+ # Set containerConcurrency based on autoscaling config
115
+ # When using concurrency-based autoscaling, set containerConcurrency to match
116
+ # the target to ensure the container's limit aligns with autoscaler expectations
117
+ template_vars = {
118
+ "name": name,
119
+ "namespace": self.namespace,
120
+ "annotations": annotations,
121
+ "template_annotations": template_annotations,
122
+ "labels": labels,
123
+ "template_labels": template_labels,
124
+ "pod_template": pod_template,
125
+ }
126
+
127
+ if autoscaling_config.concurrency is not None:
128
+ template_vars["container_concurrency"] = autoscaling_config.concurrency
129
+
130
+ service = load_template(
131
+ template_file=serving_constants.KNATIVE_SERVICE_TEMPLATE_FILE,
132
+ template_dir=os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates"),
133
+ **template_vars,
134
+ )
135
+
136
+ if custom_template:
137
+ nested_override(service, custom_template)
138
+
139
+ try:
140
+ kwargs = {"dry_run": "All"} if dryrun else {}
141
+ created_service: dict = self.objects_api.create_namespaced_custom_object(
142
+ group="serving.knative.dev",
143
+ version="v1",
144
+ namespace=self.namespace,
145
+ plural="services",
146
+ body=service,
147
+ **kwargs,
148
+ )
149
+
150
+ logger.info(
151
+ f"Created Knative service {name} in namespace {self.namespace}",
152
+ )
153
+ return created_service
154
+
155
+ except client.exceptions.ApiException as e:
156
+ if e.status == 409:
157
+ logger.info(f"Service {name} already exists, updating")
158
+ existing_service = self.get_knative_service(name)
159
+ return existing_service
160
+ else:
161
+ logger.error(
162
+ f"Failed to create Knative service: {str(e)}",
163
+ )
164
+ raise e
165
+
166
+ def get_knative_service(self, service_name: str) -> dict:
167
+ """Retrieve a Knative service by name."""
168
+ try:
169
+ service = self.objects_api.get_namespaced_custom_object(
170
+ group="serving.knative.dev",
171
+ version="v1",
172
+ namespace=self.namespace,
173
+ plural="services",
174
+ name=service_name,
175
+ )
176
+ return service
177
+
178
+ except client.exceptions.ApiException as e:
179
+ logger.error(f"Failed to load Knative service '{service_name}': {str(e)}")
180
+ raise
181
+
182
+ def get_deployment_timestamp_annotation(self, service_name: str) -> Optional[str]:
183
+ """Get deployment timestamp annotation for Knative services."""
184
+ try:
185
+ service = self.get_knative_service(service_name)
186
+ if service:
187
+ return (
188
+ service.get("metadata", {}).get("annotations", {}).get("kubetorch.com/deployment_timestamp", None)
189
+ )
190
+ except client.exceptions.ApiException:
191
+ pass
192
+ return None
193
+
194
+ def update_deployment_timestamp_annotation(self, service_name: str, new_timestamp: str) -> str:
195
+ """Update deployment timestamp annotation for Knative services."""
196
+ try:
197
+ patch_body = {"metadata": {"annotations": {"kubetorch.com/deployment_timestamp": new_timestamp}}}
198
+ self.objects_api.patch_namespaced_custom_object(
199
+ group="serving.knative.dev",
200
+ version="v1",
201
+ namespace=self.namespace,
202
+ plural="services",
203
+ name=service_name,
204
+ body=patch_body,
205
+ )
206
+ return new_timestamp
207
+ except client.exceptions.ApiException as e:
208
+ logger.error(f"Failed to update deployment timestamp for Knative service '{service_name}': {str(e)}")
209
+ raise
210
+
211
+ def get_knative_service_endpoint(self, service_name: str) -> str:
212
+ """Get the endpoint URL for a Knative service."""
213
+ try:
214
+ service = self.get_knative_service(service_name)
215
+
216
+ # Get the URL from the service status
217
+ status = service.get("status", {})
218
+ url = status.get("url")
219
+ if url:
220
+ return url
221
+
222
+ # Fallback to constructing URL
223
+ return f"http://{service_name}.{self.namespace}.svc.cluster.local"
224
+
225
+ except Exception as e:
226
+ logger.warning(f"Could not get Knative service URL for {service_name}: {e}")
227
+ return f"http://{service_name}.{self.namespace}.svc.cluster.local"
228
+
229
+ def create_or_update_service(
230
+ self,
231
+ service_name: str,
232
+ module_name: str,
233
+ pod_template: dict,
234
+ autoscaling_config: AutoscalingConfig = None,
235
+ gpu_annotations: dict = None,
236
+ inactivity_ttl: str = None,
237
+ custom_labels: dict = None,
238
+ custom_annotations: dict = None,
239
+ custom_template: dict = None,
240
+ scheduler_name: str = None,
241
+ queue_name: str = None,
242
+ dryrun: bool = False,
243
+ **kwargs, # Ignore deployment-specific args like replicas
244
+ ):
245
+ """
246
+ Creates a Knative service with autoscaling capabilities.
247
+ """
248
+ logger.info(f"Deploying Kubetorch autoscaling (Knative) service with name: {service_name}")
249
+ try:
250
+ created_service = self._create_or_update_knative_service(
251
+ name=service_name,
252
+ pod_template=pod_template,
253
+ module_name=module_name,
254
+ autoscaling_config=autoscaling_config,
255
+ gpu_annotations=gpu_annotations,
256
+ inactivity_ttl=inactivity_ttl,
257
+ custom_labels=custom_labels,
258
+ custom_annotations=custom_annotations,
259
+ custom_template=custom_template,
260
+ scheduler_name=scheduler_name,
261
+ queue_name=queue_name,
262
+ dryrun=dryrun,
263
+ )
264
+ return created_service
265
+ except Exception as e:
266
+ logger.error(f"Failed to launch new Knative service: {str(e)}")
267
+ raise e
268
+
269
+ def get_endpoint(self, service_name: str) -> str:
270
+ """Get the endpoint URL for a Knative service."""
271
+ return self.get_knative_service_endpoint(service_name)
272
+
273
+ def get_pods_for_service(self, service_name: str, **kwargs) -> List[client.V1Pod]:
274
+ """Get all pods associated with this Knative service."""
275
+ return self.get_pods_for_service_static(
276
+ service_name=service_name,
277
+ namespace=self.namespace,
278
+ core_api=self.core_api,
279
+ )
280
+
281
+ def _status_condition_ready(self, status: dict) -> bool:
282
+ """Check if service status conditions indicate ready state."""
283
+ conditions = status.get("conditions", [])
284
+ for condition in conditions:
285
+ if condition.get("type") == "Ready":
286
+ return condition.get("status") == "True"
287
+ return False
288
+
289
+ def check_service_ready(
290
+ self,
291
+ service_name: str,
292
+ launch_timeout: int,
293
+ objects_api: client.CustomObjectsApi = None,
294
+ core_api: client.CoreV1Api = None,
295
+ queue_name: str = None,
296
+ scheduler_name: str = None,
297
+ **kwargs,
298
+ ) -> bool:
299
+ """Checks if the Knative service is ready to start serving requests.
300
+
301
+ Core checks:
302
+ - Service status and conditions
303
+ - Revision status and conditions
304
+ - Pod status and conditions
305
+ - Autoscaling conditions (min-scale, etc.)
306
+
307
+ Common failure scenarios handled:
308
+ - Image pull failures or delays
309
+ - Container initialization and setup (pip installs, etc.)
310
+ - User-defined image setup steps
311
+ - Node provisioning delays or failures
312
+ - Service health check failures
313
+ - Container terminations
314
+ - Autoscaling not meeting minimum requirements
315
+
316
+ Note:
317
+ This method checks all pods associated with the service, not just the first one.
318
+ Service check will fail fast only for truly unrecoverable conditions (like missing images or autoscaling
319
+ not being triggered or enabled).
320
+
321
+ Unless there is a clear reason to terminate, will wait for the full specified timeout
322
+ to allow autoscaling and node provisioning to work (where relevant).
323
+
324
+ Args:
325
+ service_name: Name of the Knative service
326
+ launch_timeout: Timeout in seconds to wait for readiness
327
+ objects_api: Objects API instance (uses self.objects_api if None)
328
+ core_api: Core API instance (uses self.core_api if None)
329
+ queue_name: Queue name for scheduling checks
330
+ scheduler_name: Scheduler name for scheduling checks
331
+ **kwargs: Additional arguments
332
+
333
+ Returns:
334
+ True if service is ready
335
+
336
+ Raises:
337
+ ServiceTimeoutError: If service doesn't become ready within timeout
338
+ QueueUnschedulableError: If pods can't be scheduled due to queue issues
339
+ ResourceNotAvailableError: If required resources aren't available
340
+ """
341
+ if objects_api is None:
342
+ objects_api = self.objects_api
343
+ if core_api is None:
344
+ core_api = self.core_api
345
+
346
+ sleep_interval = 2
347
+ start_time = time.time()
348
+
349
+ # Instead of spamming logs with each iteration, only log once
350
+ displayed_msgs = {
351
+ "service_status": False,
352
+ "waiting_for_pods": None,
353
+ "revision_status": False,
354
+ "service_readiness": False,
355
+ "autoscaling": False,
356
+ }
357
+
358
+ logger.info(f"Checking service {service_name} pod readiness (timeout: {launch_timeout} seconds)")
359
+ iteration = 0
360
+ while (time.time() - start_time) < launch_timeout:
361
+ iteration += 1
362
+ try:
363
+ service = objects_api.get_namespaced_custom_object(
364
+ group="serving.knative.dev",
365
+ version="v1",
366
+ namespace=self.namespace,
367
+ plural="services",
368
+ name=service_name,
369
+ )
370
+ status = service.get("status")
371
+ if not status:
372
+ if not displayed_msgs["service_status"]:
373
+ logger.info(f"Waiting for service {service_name} status")
374
+ displayed_msgs["service_status"] = True
375
+ time.sleep(sleep_interval)
376
+ continue
377
+
378
+ for cond in status.get("conditions", []):
379
+ if cond.get("type") == "Ready" and cond.get("reason") == "NotOwned":
380
+ raise kt.KnativeServiceConflictError(
381
+ f"Knative service '{service_name}' cannot become ready: {cond.get('message')}"
382
+ )
383
+
384
+ # Check autoscaling conditions
385
+ if not displayed_msgs["autoscaling"]:
386
+ logger.info("Checking autoscaling conditions")
387
+ displayed_msgs["autoscaling"] = True
388
+
389
+ # Get the min-scale from annotations
390
+ min_scale = 0
391
+ if service.get("spec", {}).get("template", {}).get("metadata", {}).get("annotations", {}):
392
+ min_scale_str = service["spec"]["template"]["metadata"]["annotations"].get(
393
+ "autoscaling.knative.dev/min-scale", "0"
394
+ )
395
+ min_scale = int(min_scale_str)
396
+
397
+ if min_scale == 0 and self._status_condition_ready(status):
398
+ # Service is ready and allowed to scale to zero
399
+ logger.info(f"Service {service_name} is already marked as ready")
400
+ return True
401
+
402
+ if min_scale == 0:
403
+ # Always need at least one pod
404
+ min_scale = 1
405
+
406
+ # Get current number of Running pods
407
+ pods = self.get_pods_for_service(service_name)
408
+ running_pods = [p for p in pods if pod_is_running(p)]
409
+ running_pods_count = len(running_pods)
410
+
411
+ if running_pods_count < min_scale:
412
+ for pod in pods:
413
+ # Check for image pull errors in container status
414
+ check_pod_status_for_errors(pod, queue_name, scheduler_name)
415
+
416
+ # Check pod events separately from the core API
417
+ check_pod_events_for_errors(pod, self.namespace, core_api)
418
+
419
+ if (
420
+ displayed_msgs["waiting_for_pods"] is None
421
+ or displayed_msgs["waiting_for_pods"] != running_pods_count
422
+ ):
423
+ logger.info(
424
+ f"Waiting for minimum scale ({min_scale} pods), currently have {running_pods_count}"
425
+ )
426
+ displayed_msgs["waiting_for_pods"] = running_pods_count
427
+ else:
428
+ if not displayed_msgs["service_readiness"]:
429
+ logger.info(
430
+ f"Min {min_scale} pod{'s are' if min_scale > 1 else ' is'} ready, waiting for service to be marked as ready"
431
+ )
432
+ displayed_msgs["service_readiness"] = True
433
+
434
+ if self._status_condition_ready(status):
435
+ logger.info(f"Service {service_name} is now ready")
436
+ return True
437
+
438
+ if not displayed_msgs["revision_status"]:
439
+ logger.info("Checking service revision status")
440
+ displayed_msgs["revision_status"] = True
441
+
442
+ latest_revision = status.get("latestCreatedRevisionName")
443
+ if latest_revision:
444
+ check_revision_for_errors(latest_revision, self.namespace, objects_api)
445
+
446
+ except client.exceptions.ApiException:
447
+ raise
448
+
449
+ if iteration % 10 == 0:
450
+ elapsed = int(time.time() - start_time)
451
+ remaining = max(0, int(launch_timeout - elapsed))
452
+ logger.info(f"Service is not yet marked as ready " f"(elapsed: {elapsed}s, remaining: {remaining}s)")
453
+
454
+ time.sleep(sleep_interval)
455
+
456
+ raise ServiceTimeoutError(
457
+ f"Service {service_name} did not become ready within {launch_timeout} seconds. "
458
+ "To update the timeout, set the `launch_timeout` parameter in the Compute class, or set the "
459
+ "environment variable `KT_LAUNCH_TIMEOUT`."
460
+ )
461
+
462
+ def teardown_service(self, service_name: str, console=None) -> bool:
463
+ """Teardown Knative service and associated resources.
464
+
465
+ Args:
466
+ service_name: Name of the Knative service to teardown
467
+ console: Optional Rich console for output
468
+
469
+ Returns:
470
+ True if teardown was successful, False otherwise
471
+ """
472
+ from kubetorch.resources.compute.utils import delete_service
473
+
474
+ try:
475
+ # Delete the Knative service
476
+ delete_service(
477
+ custom_api=self.objects_api,
478
+ name=service_name,
479
+ namespace=self.namespace,
480
+ console=console,
481
+ )
482
+
483
+ return True
484
+
485
+ except Exception as e:
486
+ logger.error(f"Failed to teardown Knative service {service_name}: {e}")
487
+ return False