skypilot-nightly 1.0.0.dev20250704__py3-none-any.whl → 1.0.0.dev20250708__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sky/__init__.py +2 -2
- sky/backends/backend_utils.py +2 -1
- sky/backends/cloud_vm_ray_backend.py +52 -8
- sky/catalog/cudo_catalog.py +1 -1
- sky/client/common.py +6 -1
- sky/clouds/aws.py +5 -0
- sky/clouds/azure.py +3 -0
- sky/clouds/cloud.py +3 -0
- sky/clouds/cudo.py +3 -0
- sky/clouds/do.py +3 -0
- sky/clouds/fluidstack.py +3 -0
- sky/clouds/gcp.py +3 -2
- sky/clouds/ibm.py +3 -0
- sky/clouds/kubernetes.py +63 -24
- sky/clouds/lambda_cloud.py +3 -0
- sky/clouds/nebius.py +3 -0
- sky/clouds/oci.py +3 -0
- sky/clouds/paperspace.py +3 -0
- sky/clouds/runpod.py +3 -0
- sky/clouds/scp.py +3 -0
- sky/clouds/utils/gcp_utils.py +61 -1
- sky/clouds/vast.py +3 -0
- sky/clouds/vsphere.py +3 -0
- sky/dashboard/out/404.html +1 -1
- sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
- sky/dashboard/out/clusters/[cluster].html +1 -1
- sky/dashboard/out/clusters.html +1 -1
- sky/dashboard/out/config.html +1 -1
- sky/dashboard/out/index.html +1 -1
- sky/dashboard/out/infra/[context].html +1 -1
- sky/dashboard/out/infra.html +1 -1
- sky/dashboard/out/jobs/[job].html +1 -1
- sky/dashboard/out/jobs.html +1 -1
- sky/dashboard/out/users.html +1 -1
- sky/dashboard/out/volumes.html +1 -1
- sky/dashboard/out/workspace/new.html +1 -1
- sky/dashboard/out/workspaces/[name].html +1 -1
- sky/dashboard/out/workspaces.html +1 -1
- sky/provision/__init__.py +11 -0
- sky/provision/gcp/__init__.py +1 -0
- sky/provision/gcp/config.py +106 -13
- sky/provision/gcp/constants.py +0 -3
- sky/provision/gcp/instance.py +21 -0
- sky/provision/kubernetes/instance.py +16 -0
- sky/provision/kubernetes/utils.py +6 -2
- sky/resources.py +1 -30
- sky/server/requests/executor.py +4 -1
- sky/server/server.py +1 -1
- sky/server/uvicorn.py +2 -0
- sky/skylet/constants.py +6 -2
- sky/templates/gcp-ray.yml.j2 +3 -0
- sky/templates/kubernetes-ray.yml.j2 +8 -2
- sky/utils/resources_utils.py +30 -0
- sky/utils/schemas.py +19 -0
- sky/utils/tempstore.py +51 -0
- {skypilot_nightly-1.0.0.dev20250704.dist-info → skypilot_nightly-1.0.0.dev20250708.dist-info}/METADATA +3 -2
- {skypilot_nightly-1.0.0.dev20250704.dist-info → skypilot_nightly-1.0.0.dev20250708.dist-info}/RECORD +63 -62
- /sky/dashboard/out/_next/static/{6TieQqyqsJiaJC33q0FfI → O3wBEOmvYEVEqZxAP7Czn}/_buildManifest.js +0 -0
- /sky/dashboard/out/_next/static/{6TieQqyqsJiaJC33q0FfI → O3wBEOmvYEVEqZxAP7Czn}/_ssgManifest.js +0 -0
- {skypilot_nightly-1.0.0.dev20250704.dist-info → skypilot_nightly-1.0.0.dev20250708.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20250704.dist-info → skypilot_nightly-1.0.0.dev20250708.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20250704.dist-info → skypilot_nightly-1.0.0.dev20250708.dist-info}/licenses/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20250704.dist-info → skypilot_nightly-1.0.0.dev20250708.dist-info}/top_level.txt +0 -0
sky/__init__.py
CHANGED
@@ -5,7 +5,7 @@ from typing import Optional
|
|
5
5
|
import urllib.request
|
6
6
|
|
7
7
|
# Replaced with the current commit when building the wheels.
|
8
|
-
_SKYPILOT_COMMIT_SHA = '
|
8
|
+
_SKYPILOT_COMMIT_SHA = '81af2fc744e51537d1509ba2f10749593bdb3a3b'
|
9
9
|
|
10
10
|
|
11
11
|
def _get_git_commit():
|
@@ -35,7 +35,7 @@ def _get_git_commit():
|
|
35
35
|
|
36
36
|
|
37
37
|
__commit__ = _get_git_commit()
|
38
|
-
__version__ = '1.0.0.
|
38
|
+
__version__ = '1.0.0.dev20250708'
|
39
39
|
__root_dir__ = os.path.dirname(os.path.abspath(__file__))
|
40
40
|
|
41
41
|
|
sky/backends/backend_utils.py
CHANGED
@@ -51,6 +51,7 @@ from sky.utils import rich_utils
|
|
51
51
|
from sky.utils import schemas
|
52
52
|
from sky.utils import status_lib
|
53
53
|
from sky.utils import subprocess_utils
|
54
|
+
from sky.utils import tempstore
|
54
55
|
from sky.utils import timeline
|
55
56
|
from sky.utils import ux_utils
|
56
57
|
from sky.workspaces import core as workspaces_core
|
@@ -247,7 +248,7 @@ def _optimize_file_mounts(tmp_yaml_path: str) -> None:
|
|
247
248
|
# - use a remote command to move all runtime files to their right places.
|
248
249
|
|
249
250
|
# Local tmp dir holding runtime files.
|
250
|
-
local_runtime_files_dir =
|
251
|
+
local_runtime_files_dir = tempstore.mkdtemp()
|
251
252
|
new_file_mounts = {_REMOTE_RUNTIME_FILES_DIR: local_runtime_files_dir}
|
252
253
|
|
253
254
|
# Generate local_src -> unique_name.
|
@@ -1600,7 +1600,8 @@ class RetryingVmProvisioner(object):
|
|
1600
1600
|
CloudVmRayBackend().post_teardown_cleanup(
|
1601
1601
|
handle,
|
1602
1602
|
terminate=not prev_cluster_ever_up,
|
1603
|
-
remove_from_db=False
|
1603
|
+
remove_from_db=False,
|
1604
|
+
failover=True)
|
1604
1605
|
# TODO(suquark): other clouds may have different zone
|
1605
1606
|
# blocking strategy. See '_update_blocklist_on_error'
|
1606
1607
|
# for details.
|
@@ -4402,12 +4403,17 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4402
4403
|
handle: CloudVmRayResourceHandle,
|
4403
4404
|
terminate: bool,
|
4404
4405
|
purge: bool = False,
|
4405
|
-
remove_from_db: bool = True
|
4406
|
+
remove_from_db: bool = True,
|
4407
|
+
failover: bool = False) -> None:
|
4406
4408
|
"""Cleanup local configs/caches and delete TPUs after teardown.
|
4407
4409
|
|
4408
4410
|
This method will handle the following cleanup steps:
|
4409
4411
|
* Deleting the TPUs;
|
4410
4412
|
* Removing ssh configs for the cluster;
|
4413
|
+
* Deleting the open ports;
|
4414
|
+
* Deleting the custom multi network infrastructure based on the
|
4415
|
+
failover flag (e.g. delete firewalls, subnets, and VPCs for GPU
|
4416
|
+
Direct if failover is False, otherwise, only delete the subnets);
|
4411
4417
|
* Updating the local state of the cluster;
|
4412
4418
|
* Removing the terminated cluster's scripts and ray yaml files.
|
4413
4419
|
"""
|
@@ -4439,12 +4445,14 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4439
4445
|
# The cluster yaml does not exist when skypilot has not found
|
4440
4446
|
# the right resource to provision the cluster.
|
4441
4447
|
if handle.cluster_yaml is not None:
|
4448
|
+
launched_resources = (
|
4449
|
+
handle.launched_resources.assert_launchable())
|
4450
|
+
cloud = launched_resources.cloud
|
4451
|
+
config = global_user_state.get_cluster_yaml_dict(
|
4452
|
+
handle.cluster_yaml)
|
4453
|
+
ports_cleaned_up = False
|
4454
|
+
custom_multi_network_cleaned_up = False
|
4442
4455
|
try:
|
4443
|
-
launched_resources = (
|
4444
|
-
handle.launched_resources.assert_launchable())
|
4445
|
-
cloud = launched_resources.cloud
|
4446
|
-
config = global_user_state.get_cluster_yaml_dict(
|
4447
|
-
handle.cluster_yaml)
|
4448
4456
|
cloud.check_features_are_supported(
|
4449
4457
|
launched_resources,
|
4450
4458
|
{clouds.CloudImplementationFeatures.OPEN_PORTS})
|
@@ -4452,7 +4460,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4452
4460
|
cluster_name_on_cloud,
|
4453
4461
|
handle.launched_resources.ports,
|
4454
4462
|
config['provider'])
|
4455
|
-
|
4463
|
+
ports_cleaned_up = True
|
4456
4464
|
except exceptions.NotSupportedError:
|
4457
4465
|
pass
|
4458
4466
|
except exceptions.PortDoesNotExistError:
|
@@ -4466,6 +4474,42 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
4466
4474
|
else:
|
4467
4475
|
raise
|
4468
4476
|
|
4477
|
+
# Clean up custom multi networks, e.g. the subnets, firewalls,
|
4478
|
+
# and VPCs created for GCP GPUDirect TCPX
|
4479
|
+
try:
|
4480
|
+
cloud.check_features_are_supported(
|
4481
|
+
handle.launched_resources, {
|
4482
|
+
clouds.CloudImplementationFeatures.
|
4483
|
+
CUSTOM_MULTI_NETWORK
|
4484
|
+
})
|
4485
|
+
provision_lib.cleanup_custom_multi_network(
|
4486
|
+
repr(cloud), cluster_name_on_cloud, config['provider'],
|
4487
|
+
failover)
|
4488
|
+
custom_multi_network_cleaned_up = True
|
4489
|
+
except exceptions.NotSupportedError:
|
4490
|
+
pass
|
4491
|
+
except Exception as e: # pylint: disable=broad-except
|
4492
|
+
if purge:
|
4493
|
+
msg = common_utils.format_exception(e, use_bracket=True)
|
4494
|
+
logger.warning(
|
4495
|
+
f'Failed to cleanup custom multi network. Skipping '
|
4496
|
+
f'since purge is set. Details: {msg}')
|
4497
|
+
else:
|
4498
|
+
raise
|
4499
|
+
|
4500
|
+
if ports_cleaned_up and custom_multi_network_cleaned_up:
|
4501
|
+
try:
|
4502
|
+
self.remove_cluster_config(handle)
|
4503
|
+
except Exception as e: # pylint: disable=broad-except
|
4504
|
+
if purge:
|
4505
|
+
msg = common_utils.format_exception(
|
4506
|
+
e, use_bracket=True)
|
4507
|
+
logger.warning(
|
4508
|
+
f'Failed to remove cluster config. Skipping '
|
4509
|
+
f'since purge is set. Details: {msg}')
|
4510
|
+
else:
|
4511
|
+
raise
|
4512
|
+
|
4469
4513
|
sky.utils.cluster_utils.SSHConfigHelper.remove_cluster(
|
4470
4514
|
handle.cluster_name)
|
4471
4515
|
|
sky/catalog/cudo_catalog.py
CHANGED
sky/client/common.py
CHANGED
@@ -206,9 +206,14 @@ def _upload_chunk_with_retry(params: UploadChunkParams) -> None:
|
|
206
206
|
f'Retrying... ({attempt + 1} / {max_attempts})')
|
207
207
|
time.sleep(1)
|
208
208
|
else:
|
209
|
+
try:
|
210
|
+
response_details = response.json().get('detail')
|
211
|
+
except Exception: # pylint: disable=broad-except
|
212
|
+
response_details = response.content
|
209
213
|
error_msg = (
|
210
214
|
f'Failed to upload chunk: {params.chunk_index + 1} / '
|
211
|
-
f'{params.total_chunks}: {
|
215
|
+
f'{params.total_chunks}: {response_details} '
|
216
|
+
f'(Status code: {response.status_code})')
|
212
217
|
upload_logger.error(error_msg)
|
213
218
|
with ux_utils.print_exception_no_traceback():
|
214
219
|
raise RuntimeError(
|
sky/clouds/aws.py
CHANGED
@@ -174,6 +174,11 @@ class AWS(clouds.Cloud):
|
|
174
174
|
f'High availability controllers are not supported on {cls._REPR}.'
|
175
175
|
)
|
176
176
|
|
177
|
+
unsupported_features[
|
178
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK] = (
|
179
|
+
f'Customized multiple network interfaces are not supported on {cls._REPR}.'
|
180
|
+
)
|
181
|
+
|
177
182
|
return unsupported_features
|
178
183
|
|
179
184
|
@classmethod
|
sky/clouds/azure.py
CHANGED
@@ -94,6 +94,9 @@ class Azure(clouds.Cloud):
|
|
94
94
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS: (
|
95
95
|
f'High availability controllers are not supported on {cls._REPR}.'
|
96
96
|
),
|
97
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK: (
|
98
|
+
f'Customized multiple network interfaces are not supported on {cls._REPR}.'
|
99
|
+
),
|
97
100
|
}
|
98
101
|
if resources.use_spot:
|
99
102
|
features[clouds.CloudImplementationFeatures.STOP] = (
|
sky/clouds/cloud.py
CHANGED
@@ -55,6 +55,9 @@ class CloudImplementationFeatures(enum.Enum):
|
|
55
55
|
AUTO_TERMINATE = 'auto_terminate' # Pod/VM can stop or down itself
|
56
56
|
AUTOSTOP = 'autostop' # Pod/VM can stop itself
|
57
57
|
AUTODOWN = 'autodown' # Pod/VM can down itself
|
58
|
+
# Pod/VM can have customized multiple network interfaces
|
59
|
+
# e.g. GCP GPUDirect TCPX
|
60
|
+
CUSTOM_MULTI_NETWORK = 'custom_multi_network'
|
58
61
|
|
59
62
|
|
60
63
|
# Use str, enum.Enum to allow CloudCapability to be used as a string.
|
sky/clouds/cudo.py
CHANGED
@@ -73,6 +73,9 @@ class Cudo(clouds.Cloud):
|
|
73
73
|
),
|
74
74
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
75
75
|
('High availability controllers are not supported on Cudo.'),
|
76
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
77
|
+
('Customized multiple network interfaces are not supported on Cudo.'
|
78
|
+
),
|
76
79
|
}
|
77
80
|
_MAX_CLUSTER_NAME_LEN_LIMIT = 60
|
78
81
|
|
sky/clouds/do.py
CHANGED
@@ -40,6 +40,9 @@ class DO(clouds.Cloud):
|
|
40
40
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
41
41
|
('High availability controllers are not supported in '
|
42
42
|
f'{_REPR}.'),
|
43
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
44
|
+
('Customized multiple network interfaces are not supported in '
|
45
|
+
f'{_REPR}.'),
|
43
46
|
}
|
44
47
|
# DO maximum node name length defined as <= 255
|
45
48
|
# https://docs.digitalocean.com/reference/api/api-reference/#operation/droplets_create
|
sky/clouds/fluidstack.py
CHANGED
@@ -63,6 +63,9 @@ class Fluidstack(clouds.Cloud):
|
|
63
63
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
64
64
|
('High availability controllers are not supported in '
|
65
65
|
f'{_REPR}.'),
|
66
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
67
|
+
('Customized multiple network interfaces are not supported in '
|
68
|
+
f'{_REPR}.'),
|
66
69
|
}
|
67
70
|
# Using the latest SkyPilot provisioner API to provision and check status.
|
68
71
|
PROVISIONER_VERSION = clouds.ProvisionerVersion.SKYPILOT
|
sky/clouds/gcp.py
CHANGED
@@ -512,8 +512,9 @@ class GCP(clouds.Cloud):
|
|
512
512
|
default_value=False,
|
513
513
|
override_configs=resources.cluster_config_overrides)
|
514
514
|
resources_vars['enable_gpu_direct'] = enable_gpu_direct
|
515
|
-
network_tier = r.network_tier
|
516
|
-
|
515
|
+
network_tier = (r.network_tier if r.network_tier is not None else
|
516
|
+
resources_utils.NetworkTier.STANDARD)
|
517
|
+
resources_vars['network_tier'] = network_tier.value
|
517
518
|
accelerators = r.accelerators
|
518
519
|
if accelerators is not None:
|
519
520
|
assert len(accelerators) == 1, r
|
sky/clouds/ibm.py
CHANGED
@@ -53,6 +53,9 @@ class IBM(clouds.Cloud):
|
|
53
53
|
(f'Opening ports is currently not supported on {cls._REPR}.'),
|
54
54
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
55
55
|
('High availability controllers are not supported on IBM.'),
|
56
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
57
|
+
('Customized multiple network interfaces are not supported on '
|
58
|
+
f'{cls._REPR}.'),
|
56
59
|
}
|
57
60
|
if resources.use_spot:
|
58
61
|
features[clouds.CloudImplementationFeatures.STOP] = (
|
sky/clouds/kubernetes.py
CHANGED
@@ -14,6 +14,7 @@ from sky import exceptions
|
|
14
14
|
from sky import sky_logging
|
15
15
|
from sky import skypilot_config
|
16
16
|
from sky.adaptors import kubernetes
|
17
|
+
from sky.clouds.utils import gcp_utils
|
17
18
|
from sky.provision import instance_setup
|
18
19
|
from sky.provision.kubernetes import network_utils
|
19
20
|
from sky.provision.kubernetes import utils as kubernetes_utils
|
@@ -22,6 +23,7 @@ from sky.provision.kubernetes.utils import normalize_tpu_accelerator_name
|
|
22
23
|
from sky.skylet import constants
|
23
24
|
from sky.utils import annotations
|
24
25
|
from sky.utils import common_utils
|
26
|
+
from sky.utils import kubernetes_enums
|
25
27
|
from sky.utils import registry
|
26
28
|
from sky.utils import resources_utils
|
27
29
|
from sky.utils import schemas
|
@@ -76,6 +78,9 @@ class Kubernetes(clouds.Cloud):
|
|
76
78
|
'tiers are not '
|
77
79
|
'supported in '
|
78
80
|
'Kubernetes.',
|
81
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
82
|
+
('Customized multiple network interfaces are not supported in '
|
83
|
+
'Kubernetes.'),
|
79
84
|
clouds.CloudImplementationFeatures.CUSTOM_NETWORK_TIER:
|
80
85
|
('Custom network tier is currently not supported in '
|
81
86
|
f'{_REPR}.'),
|
@@ -281,7 +286,8 @@ class Kubernetes(clouds.Cloud):
|
|
281
286
|
default_value=None)
|
282
287
|
if (autoscaler_type is not None and
|
283
288
|
not kubernetes_utils.get_autoscaler(
|
284
|
-
|
289
|
+
kubernetes_enums.KubernetesAutoscalerType(
|
290
|
+
autoscaler_type)).can_query_backend):
|
285
291
|
# Unsupported autoscaler type. Rely on the autoscaler to
|
286
292
|
# provision the right instance type without running checks.
|
287
293
|
# Worst case, if autoscaling fails, the pod will be stuck in
|
@@ -298,7 +304,8 @@ class Kubernetes(clouds.Cloud):
|
|
298
304
|
|
299
305
|
if autoscaler_type is None:
|
300
306
|
continue
|
301
|
-
autoscaler = kubernetes_utils.get_autoscaler(
|
307
|
+
autoscaler = kubernetes_utils.get_autoscaler(
|
308
|
+
kubernetes_enums.KubernetesAutoscalerType(autoscaler_type))
|
302
309
|
logger.debug(f'{context} has autoscaler of type: {autoscaler_type}')
|
303
310
|
if autoscaler.can_create_new_instance_of_type(
|
304
311
|
context, instance_type):
|
@@ -406,8 +413,10 @@ class Kubernetes(clouds.Cloud):
|
|
406
413
|
|
407
414
|
@staticmethod
|
408
415
|
def _calculate_provision_timeout(
|
409
|
-
|
410
|
-
|
416
|
+
num_nodes: int,
|
417
|
+
volume_mounts: Optional[List['volume_lib.VolumeMount']],
|
418
|
+
enable_flex_start: bool,
|
419
|
+
) -> int:
|
411
420
|
"""Calculate provision timeout based on number of nodes.
|
412
421
|
|
413
422
|
The timeout scales linearly with the number of nodes to account for
|
@@ -415,6 +424,8 @@ class Kubernetes(clouds.Cloud):
|
|
415
424
|
|
416
425
|
Args:
|
417
426
|
num_nodes: Number of nodes being provisioned
|
427
|
+
volume_mounts: Volume mounts for the pod
|
428
|
+
enable_flex_start: Whether flex start is enabled
|
418
429
|
|
419
430
|
Returns:
|
420
431
|
Timeout in seconds
|
@@ -422,7 +433,12 @@ class Kubernetes(clouds.Cloud):
|
|
422
433
|
base_timeout = 10 # Base timeout for single node
|
423
434
|
per_node_timeout = 0.2 # Additional seconds per node
|
424
435
|
max_timeout = 60 # Cap at 1 minute
|
425
|
-
if
|
436
|
+
if enable_flex_start:
|
437
|
+
# Flex start takes longer to provision.
|
438
|
+
base_timeout = 600
|
439
|
+
per_node_timeout = 10
|
440
|
+
max_timeout = 900
|
441
|
+
elif volume_mounts is not None:
|
426
442
|
for volume_mount in volume_mounts:
|
427
443
|
if (volume_mount.volume_config.type ==
|
428
444
|
volume_lib.VolumeType.PVC.value):
|
@@ -581,24 +597,6 @@ class Kubernetes(clouds.Cloud):
|
|
581
597
|
if resources.use_spot:
|
582
598
|
spot_label_key, spot_label_value = kubernetes_utils.get_spot_label()
|
583
599
|
|
584
|
-
# Timeout for resource provisioning. This timeout determines how long to
|
585
|
-
# wait for pod to be in pending status before giving up.
|
586
|
-
# Larger timeout may be required for autoscaling clusters, since
|
587
|
-
# autoscaler may take some time to provision new nodes.
|
588
|
-
# Note that this timeout includes time taken by the Kubernetes scheduler
|
589
|
-
# itself, which can be upto 2-3 seconds, and up to 10-15 seconds when
|
590
|
-
# scheduling 100s of pods.
|
591
|
-
# We use a linear scaling formula to determine the timeout based on the
|
592
|
-
# number of nodes.
|
593
|
-
|
594
|
-
timeout = self._calculate_provision_timeout(num_nodes, volume_mounts)
|
595
|
-
timeout = skypilot_config.get_effective_region_config(
|
596
|
-
cloud='kubernetes',
|
597
|
-
region=context,
|
598
|
-
keys=('provision_timeout',),
|
599
|
-
default_value=timeout,
|
600
|
-
override_configs=resources.cluster_config_overrides)
|
601
|
-
|
602
600
|
# Check if this cluster supports high performance networking and
|
603
601
|
# configure IPC_LOCK capability for clusters like Nebius that support it
|
604
602
|
k8s_ipc_lock_capability = False
|
@@ -640,7 +638,46 @@ class Kubernetes(clouds.Cloud):
|
|
640
638
|
cloud='kubernetes',
|
641
639
|
region=context,
|
642
640
|
keys=('kueue', 'local_queue_name'),
|
643
|
-
default_value=None
|
641
|
+
default_value=None,
|
642
|
+
override_configs=resources.cluster_config_overrides))
|
643
|
+
|
644
|
+
# Check DWS configuration for GKE.
|
645
|
+
(enable_flex_start, enable_flex_start_queued_provisioning,
|
646
|
+
max_run_duration_seconds) = gcp_utils.get_dws_config(
|
647
|
+
context, k8s_kueue_local_queue_name,
|
648
|
+
resources.cluster_config_overrides)
|
649
|
+
if enable_flex_start_queued_provisioning or enable_flex_start:
|
650
|
+
# DWS is only supported in GKE, check the autoscaler type.
|
651
|
+
autoscaler_type = skypilot_config.get_effective_region_config(
|
652
|
+
cloud='kubernetes',
|
653
|
+
region=context,
|
654
|
+
keys=('autoscaler',),
|
655
|
+
default_value=None)
|
656
|
+
if (autoscaler_type !=
|
657
|
+
kubernetes_enums.KubernetesAutoscalerType.GKE.value):
|
658
|
+
raise ValueError(
|
659
|
+
f'DWS is only supported in GKE, but the autoscaler type '
|
660
|
+
f'for context {context} is {autoscaler_type}')
|
661
|
+
|
662
|
+
# Timeout for resource provisioning. This timeout determines how long to
|
663
|
+
# wait for pod to be in pending status before giving up.
|
664
|
+
# Larger timeout may be required for autoscaling clusters, since
|
665
|
+
# autoscaler may take some time to provision new nodes.
|
666
|
+
# Note that this timeout includes time taken by the Kubernetes scheduler
|
667
|
+
# itself, which can be upto 2-3 seconds, and up to 10-15 seconds when
|
668
|
+
# scheduling 100s of pods.
|
669
|
+
# We use a linear scaling formula to determine the timeout based on the
|
670
|
+
# number of nodes.
|
671
|
+
|
672
|
+
timeout = self._calculate_provision_timeout(
|
673
|
+
num_nodes, volume_mounts, enable_flex_start or
|
674
|
+
enable_flex_start_queued_provisioning)
|
675
|
+
timeout = skypilot_config.get_effective_region_config(
|
676
|
+
cloud='kubernetes',
|
677
|
+
region=context,
|
678
|
+
keys=('provision_timeout',),
|
679
|
+
default_value=timeout,
|
680
|
+
override_configs=resources.cluster_config_overrides)
|
644
681
|
|
645
682
|
deploy_vars = {
|
646
683
|
'instance_type': resources.instance_type,
|
@@ -696,6 +733,8 @@ class Kubernetes(clouds.Cloud):
|
|
696
733
|
(k8s_ha_storage_class_name),
|
697
734
|
'avoid_label_keys': avoid_label_keys,
|
698
735
|
'k8s_ipc_lock_capability': k8s_ipc_lock_capability,
|
736
|
+
'k8s_enable_flex_start': enable_flex_start,
|
737
|
+
'k8s_max_run_duration_seconds': max_run_duration_seconds,
|
699
738
|
}
|
700
739
|
|
701
740
|
# Add kubecontext if it is set. It may be None if SkyPilot is running
|
sky/clouds/lambda_cloud.py
CHANGED
@@ -49,6 +49,9 @@ class Lambda(clouds.Cloud):
|
|
49
49
|
f'{_REPR}.'),
|
50
50
|
clouds.CloudImplementationFeatures.HOST_CONTROLLERS: f'Host controllers are not supported in {_REPR}.',
|
51
51
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS: f'High availability controllers are not supported on {_REPR}.',
|
52
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
53
|
+
('Customized multiple network interfaces are not supported in '
|
54
|
+
f'{_REPR}.'),
|
52
55
|
}
|
53
56
|
|
54
57
|
PROVISIONER_VERSION = clouds.ProvisionerVersion.SKYPILOT
|
sky/clouds/nebius.py
CHANGED
@@ -62,6 +62,9 @@ class Nebius(clouds.Cloud):
|
|
62
62
|
('Custom network tier is currently not supported on Nebius.'),
|
63
63
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
64
64
|
('High availability controllers are not supported on Nebius.'),
|
65
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
66
|
+
('Customized multiple network interfaces are not supported on '
|
67
|
+
f'{_REPR}.'),
|
65
68
|
}
|
66
69
|
# Nebius maximum instance name length defined as <= 63 as a hostname length
|
67
70
|
# 63 - 8 - 5 = 50 characters since
|
sky/clouds/oci.py
CHANGED
@@ -80,6 +80,9 @@ class OCI(clouds.Cloud):
|
|
80
80
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
81
81
|
('High availability controllers are not supported on '
|
82
82
|
f'{cls._REPR}.'),
|
83
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
84
|
+
('Customized multiple network interfaces are not supported on '
|
85
|
+
f'{cls._REPR}.'),
|
83
86
|
}
|
84
87
|
if resources.use_spot:
|
85
88
|
unsupported_features[clouds.CloudImplementationFeatures.STOP] = (
|
sky/clouds/paperspace.py
CHANGED
@@ -47,6 +47,9 @@ class Paperspace(clouds.Cloud):
|
|
47
47
|
f'{_REPR}.'),
|
48
48
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
49
49
|
(f'High availability controllers are not supported in {_REPR}.'),
|
50
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
51
|
+
('Customized multiple network interfaces are not supported in '
|
52
|
+
f'{_REPR}.'),
|
50
53
|
}
|
51
54
|
_MAX_CLUSTER_NAME_LEN_LIMIT = 120
|
52
55
|
_regions: List[clouds.Region] = []
|
sky/clouds/runpod.py
CHANGED
@@ -39,6 +39,9 @@ class RunPod(clouds.Cloud):
|
|
39
39
|
'to local disk.'),
|
40
40
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
41
41
|
('High availability controllers are not supported on RunPod.'),
|
42
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
43
|
+
('Customized multiple network interfaces are not supported on '
|
44
|
+
'RunPod.'),
|
42
45
|
}
|
43
46
|
_MAX_CLUSTER_NAME_LEN_LIMIT = 120
|
44
47
|
_regions: List[clouds.Region] = []
|
sky/clouds/scp.py
CHANGED
@@ -62,6 +62,9 @@ class SCP(clouds.Cloud):
|
|
62
62
|
f'{_REPR}.'),
|
63
63
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
64
64
|
(f'High availability controllers are not supported on {_REPR}.'),
|
65
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
66
|
+
('Customized multiple network interfaces are not supported on '
|
67
|
+
f'{_REPR}.'),
|
65
68
|
}
|
66
69
|
|
67
70
|
_INDENT_PREFIX = ' '
|
sky/clouds/utils/gcp_utils.py
CHANGED
@@ -10,7 +10,7 @@ import dataclasses
|
|
10
10
|
import json
|
11
11
|
import time
|
12
12
|
import typing
|
13
|
-
from typing import List, Optional, Set
|
13
|
+
from typing import Any, Dict, List, Optional, Set, Tuple
|
14
14
|
|
15
15
|
import cachetools
|
16
16
|
|
@@ -18,6 +18,7 @@ from sky import sky_logging
|
|
18
18
|
from sky import skypilot_config
|
19
19
|
from sky.provision.gcp import constants
|
20
20
|
from sky.provision.kubernetes import utils as kubernetes_utils
|
21
|
+
from sky.utils import resources_utils
|
21
22
|
from sky.utils import subprocess_utils
|
22
23
|
|
23
24
|
if typing.TYPE_CHECKING:
|
@@ -206,3 +207,62 @@ def get_minimal_storage_permissions() -> List[str]:
|
|
206
207
|
permissions += constants.GCP_MINIMAL_PERMISSIONS
|
207
208
|
|
208
209
|
return permissions
|
210
|
+
|
211
|
+
|
212
|
+
# Get the DWS configuration for the given context in GKE.
|
213
|
+
def get_dws_config(
|
214
|
+
context: str,
|
215
|
+
k8s_kueue_local_queue_name: Optional[str],
|
216
|
+
cluster_config_overrides: Optional[Dict[str, Any]] = None,
|
217
|
+
) -> Tuple[bool, bool, Optional[int]]:
|
218
|
+
"""Get the DWS configuration for the given context.
|
219
|
+
|
220
|
+
Args:
|
221
|
+
context: The context to get the DWS configuration for.
|
222
|
+
k8s_kueue_local_queue_name: The name of the Kueue local queue.
|
223
|
+
cluster_config_overrides: The cluster config overrides.
|
224
|
+
|
225
|
+
Returns:
|
226
|
+
A tuple of (enable_flex_start,
|
227
|
+
enable_flex_start_queued_provisioning,
|
228
|
+
max_run_duration_seconds).
|
229
|
+
|
230
|
+
Raises:
|
231
|
+
ValueError: If k8s_kueue_local_queue_name is missing to enable
|
232
|
+
flex start queued provisioning for the given context.
|
233
|
+
"""
|
234
|
+
dws_config = skypilot_config.get_effective_region_config(
|
235
|
+
cloud='kubernetes',
|
236
|
+
region=context,
|
237
|
+
keys=('dws',),
|
238
|
+
default_value={},
|
239
|
+
override_configs=cluster_config_overrides)
|
240
|
+
if not dws_config:
|
241
|
+
return False, False, None
|
242
|
+
|
243
|
+
enabled = dws_config.get('enabled', False)
|
244
|
+
if not enabled:
|
245
|
+
return False, False, None
|
246
|
+
|
247
|
+
enable_flex_start = False
|
248
|
+
enable_flex_start_queued_provisioning = False
|
249
|
+
max_run_duration_seconds = None
|
250
|
+
# If users already use Kueue, use the flex start with queued
|
251
|
+
# provisioning mode.
|
252
|
+
if k8s_kueue_local_queue_name:
|
253
|
+
enable_flex_start_queued_provisioning = True
|
254
|
+
else:
|
255
|
+
enable_flex_start = True
|
256
|
+
|
257
|
+
if not enable_flex_start_queued_provisioning:
|
258
|
+
return (enable_flex_start, enable_flex_start_queued_provisioning,
|
259
|
+
max_run_duration_seconds)
|
260
|
+
|
261
|
+
# Max run duration is only used in the flex start with queued
|
262
|
+
# provisioning mode.
|
263
|
+
max_run_duration = dws_config.get('max_run_duration', None)
|
264
|
+
if max_run_duration:
|
265
|
+
max_run_duration_seconds = resources_utils.parse_time_minutes(
|
266
|
+
max_run_duration) * 60
|
267
|
+
return (enable_flex_start, enable_flex_start_queued_provisioning,
|
268
|
+
max_run_duration_seconds)
|
sky/clouds/vast.py
CHANGED
@@ -35,6 +35,9 @@ class Vast(clouds.Cloud):
|
|
35
35
|
('Mounting object stores is not supported on Vast.'),
|
36
36
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
37
37
|
('High availability controllers are not supported on Vast.'),
|
38
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
39
|
+
('Customized multiple network interfaces are not supported on Vast.'
|
40
|
+
),
|
38
41
|
}
|
39
42
|
#
|
40
43
|
# Vast doesn't have a max cluster name limit. This number
|
sky/clouds/vsphere.py
CHANGED
@@ -60,6 +60,9 @@ class Vsphere(clouds.Cloud):
|
|
60
60
|
(f'Opening ports is currently not supported on {_REPR}.'),
|
61
61
|
clouds.CloudImplementationFeatures.HIGH_AVAILABILITY_CONTROLLERS:
|
62
62
|
(f'High availability controllers are not supported on {_REPR}.'),
|
63
|
+
clouds.CloudImplementationFeatures.CUSTOM_MULTI_NETWORK:
|
64
|
+
(f'Customized multiple network interfaces '
|
65
|
+
f'are not supported on {_REPR}.'),
|
63
66
|
}
|
64
67
|
|
65
68
|
_MAX_CLUSTER_NAME_LEN_LIMIT = 80 # The name can't exceeds 80 characters
|
sky/dashboard/out/404.html
CHANGED
@@ -1 +1 @@
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/0da6afe66176678a.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/0da6afe66176678a.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-9a81ea998672c303.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-efc06c2733009cd3.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-c0a4f1ea606d48d2.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-a37b06ddb64521fd.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_error-c72a1f77a3c0be1b.js" defer=""></script><script src="/dashboard/_next/static/
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/0da6afe66176678a.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/0da6afe66176678a.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-9a81ea998672c303.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-efc06c2733009cd3.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-c0a4f1ea606d48d2.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-a37b06ddb64521fd.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_error-c72a1f77a3c0be1b.js" defer=""></script><script src="/dashboard/_next/static/O3wBEOmvYEVEqZxAP7Czn/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/O3wBEOmvYEVEqZxAP7Czn/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"statusCode":404}},"page":"/_error","query":{},"buildId":"O3wBEOmvYEVEqZxAP7Czn","assetPrefix":"/dashboard","nextExport":true,"isFallback":false,"gip":true,"scriptLoader":[]}</script></body></html>
|
@@ -1 +1 @@
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/0da6afe66176678a.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/0da6afe66176678a.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-9a81ea998672c303.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-efc06c2733009cd3.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-c0a4f1ea606d48d2.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-a37b06ddb64521fd.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-162f3033ffcd3d31.js" defer=""></script><script src="/dashboard/_next/static/chunks/5230-df791914b54d91d9.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-5ea3ffa10fc884f2.js" defer=""></script><script src="/dashboard/_next/static/chunks/1664-d65361e92b85e786.js" defer=""></script><script src="/dashboard/_next/static/chunks/804-9f5e98ce84d46bdd.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-6ff4e45dfb49d11d.js" defer=""></script><script src="/dashboard/_next/static/chunks/3698-52ad1ca228faa776.js" defer=""></script><script src="/dashboard/_next/static/chunks/9470-21d059a1dfa03f61.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D/%5Bjob%5D-8135aba0712bda37.js" defer=""></script><script src="/dashboard/_next/static/
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/0da6afe66176678a.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/0da6afe66176678a.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-9a81ea998672c303.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-efc06c2733009cd3.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-c0a4f1ea606d48d2.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-a37b06ddb64521fd.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-162f3033ffcd3d31.js" defer=""></script><script src="/dashboard/_next/static/chunks/5230-df791914b54d91d9.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-5ea3ffa10fc884f2.js" defer=""></script><script src="/dashboard/_next/static/chunks/1664-d65361e92b85e786.js" defer=""></script><script src="/dashboard/_next/static/chunks/804-9f5e98ce84d46bdd.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-6ff4e45dfb49d11d.js" defer=""></script><script src="/dashboard/_next/static/chunks/3698-52ad1ca228faa776.js" defer=""></script><script src="/dashboard/_next/static/chunks/9470-21d059a1dfa03f61.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D/%5Bjob%5D-8135aba0712bda37.js" defer=""></script><script src="/dashboard/_next/static/O3wBEOmvYEVEqZxAP7Czn/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/O3wBEOmvYEVEqZxAP7Czn/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{}},"page":"/clusters/[cluster]/[job]","query":{},"buildId":"O3wBEOmvYEVEqZxAP7Czn","assetPrefix":"/dashboard","nextExport":true,"autoExport":true,"isFallback":false,"scriptLoader":[]}</script></body></html>
|
@@ -1 +1 @@
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/0da6afe66176678a.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/0da6afe66176678a.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-9a81ea998672c303.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-efc06c2733009cd3.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-c0a4f1ea606d48d2.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-a37b06ddb64521fd.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-162f3033ffcd3d31.js" defer=""></script><script src="/dashboard/_next/static/chunks/5230-df791914b54d91d9.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-5ea3ffa10fc884f2.js" defer=""></script><script src="/dashboard/_next/static/chunks/1664-d65361e92b85e786.js" defer=""></script><script src="/dashboard/_next/static/chunks/804-9f5e98ce84d46bdd.js" defer=""></script><script src="/dashboard/_next/static/chunks/1272-1ef0bf0237faccdb.js" defer=""></script><script src="/dashboard/_next/static/chunks/3947-b059261d6fa88a1f.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-6ff4e45dfb49d11d.js" defer=""></script><script src="/dashboard/_next/static/chunks/3698-52ad1ca228faa776.js" defer=""></script><script src="/dashboard/_next/static/chunks/9470-21d059a1dfa03f61.js" defer=""></script><script src="/dashboard/_next/static/chunks/6990-d0dc765474fa0eca.js" defer=""></script><script src="/dashboard/_next/static/chunks/8969-909d53833da080cb.js" defer=""></script><script src="/dashboard/_next/static/chunks/1043-1b39779691bb4030.js" defer=""></script><script src="/dashboard/_next/static/chunks/6601-fcfad0ddf92ec7ab.js" defer=""></script><script src="/dashboard/_next/static/chunks/938-044ad21de8b4626b.js" defer=""></script><script src="/dashboard/_next/static/chunks/1871-80dea41717729fa5.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D-1159f362b960e2b8.js" defer=""></script><script src="/dashboard/_next/static/
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/0da6afe66176678a.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/0da6afe66176678a.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-9a81ea998672c303.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-efc06c2733009cd3.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-c0a4f1ea606d48d2.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-a37b06ddb64521fd.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-162f3033ffcd3d31.js" defer=""></script><script src="/dashboard/_next/static/chunks/5230-df791914b54d91d9.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-5ea3ffa10fc884f2.js" defer=""></script><script src="/dashboard/_next/static/chunks/1664-d65361e92b85e786.js" defer=""></script><script src="/dashboard/_next/static/chunks/804-9f5e98ce84d46bdd.js" defer=""></script><script src="/dashboard/_next/static/chunks/1272-1ef0bf0237faccdb.js" defer=""></script><script src="/dashboard/_next/static/chunks/3947-b059261d6fa88a1f.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-6ff4e45dfb49d11d.js" defer=""></script><script src="/dashboard/_next/static/chunks/3698-52ad1ca228faa776.js" defer=""></script><script src="/dashboard/_next/static/chunks/9470-21d059a1dfa03f61.js" defer=""></script><script src="/dashboard/_next/static/chunks/6990-d0dc765474fa0eca.js" defer=""></script><script src="/dashboard/_next/static/chunks/8969-909d53833da080cb.js" defer=""></script><script src="/dashboard/_next/static/chunks/1043-1b39779691bb4030.js" defer=""></script><script src="/dashboard/_next/static/chunks/6601-fcfad0ddf92ec7ab.js" defer=""></script><script src="/dashboard/_next/static/chunks/938-044ad21de8b4626b.js" defer=""></script><script src="/dashboard/_next/static/chunks/1871-80dea41717729fa5.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D-1159f362b960e2b8.js" defer=""></script><script src="/dashboard/_next/static/O3wBEOmvYEVEqZxAP7Czn/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/O3wBEOmvYEVEqZxAP7Czn/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{}},"page":"/clusters/[cluster]","query":{},"buildId":"O3wBEOmvYEVEqZxAP7Czn","assetPrefix":"/dashboard","nextExport":true,"autoExport":true,"isFallback":false,"scriptLoader":[]}</script></body></html>
|