skypilot-nightly 1.0.0.dev20251013__py3-none-any.whl → 1.0.0.dev20251015__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of skypilot-nightly might be problematic. Click here for more details.
- sky/__init__.py +2 -2
- sky/authentication.py +9 -2
- sky/backends/backend_utils.py +62 -40
- sky/backends/cloud_vm_ray_backend.py +8 -6
- sky/catalog/kubernetes_catalog.py +19 -25
- sky/client/cli/command.py +53 -19
- sky/client/sdk.py +13 -1
- sky/dashboard/out/404.html +1 -1
- sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
- sky/dashboard/out/clusters/[cluster].html +1 -1
- sky/dashboard/out/clusters.html +1 -1
- sky/dashboard/out/config.html +1 -1
- sky/dashboard/out/index.html +1 -1
- sky/dashboard/out/infra/[context].html +1 -1
- sky/dashboard/out/infra.html +1 -1
- sky/dashboard/out/jobs/[job].html +1 -1
- sky/dashboard/out/jobs/pools/[pool].html +1 -1
- sky/dashboard/out/jobs.html +1 -1
- sky/dashboard/out/users.html +1 -1
- sky/dashboard/out/volumes.html +1 -1
- sky/dashboard/out/workspace/new.html +1 -1
- sky/dashboard/out/workspaces/[name].html +1 -1
- sky/dashboard/out/workspaces.html +1 -1
- sky/jobs/controller.py +122 -145
- sky/jobs/recovery_strategy.py +59 -82
- sky/jobs/scheduler.py +5 -5
- sky/jobs/state.py +65 -21
- sky/jobs/utils.py +58 -22
- sky/metrics/utils.py +27 -6
- sky/provision/common.py +2 -0
- sky/provision/instance_setup.py +10 -2
- sky/provision/kubernetes/instance.py +34 -10
- sky/provision/kubernetes/utils.py +53 -39
- sky/server/common.py +4 -2
- sky/server/requests/executor.py +3 -1
- sky/server/requests/preconditions.py +2 -4
- sky/server/requests/requests.py +13 -23
- sky/server/server.py +5 -0
- sky/sky_logging.py +0 -2
- sky/skylet/constants.py +22 -5
- sky/skylet/log_lib.py +0 -1
- sky/skylet/log_lib.pyi +1 -1
- sky/utils/asyncio_utils.py +18 -0
- sky/utils/common.py +2 -0
- sky/utils/context.py +57 -51
- sky/utils/context_utils.py +2 -2
- sky/utils/controller_utils.py +35 -8
- sky/utils/locks.py +20 -5
- sky/utils/subprocess_utils.py +4 -3
- {skypilot_nightly-1.0.0.dev20251013.dist-info → skypilot_nightly-1.0.0.dev20251015.dist-info}/METADATA +38 -37
- {skypilot_nightly-1.0.0.dev20251013.dist-info → skypilot_nightly-1.0.0.dev20251015.dist-info}/RECORD +57 -56
- /sky/dashboard/out/_next/static/{MtlDUf-nH1hhcy7xwbCj3 → -bih7JVStsXyeasac-dvQ}/_buildManifest.js +0 -0
- /sky/dashboard/out/_next/static/{MtlDUf-nH1hhcy7xwbCj3 → -bih7JVStsXyeasac-dvQ}/_ssgManifest.js +0 -0
- {skypilot_nightly-1.0.0.dev20251013.dist-info → skypilot_nightly-1.0.0.dev20251015.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20251013.dist-info → skypilot_nightly-1.0.0.dev20251015.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20251013.dist-info → skypilot_nightly-1.0.0.dev20251015.dist-info}/licenses/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20251013.dist-info → skypilot_nightly-1.0.0.dev20251015.dist-info}/top_level.txt +0 -0
sky/__init__.py
CHANGED
|
@@ -7,7 +7,7 @@ import urllib.request
|
|
|
7
7
|
from sky.utils import directory_utils
|
|
8
8
|
|
|
9
9
|
# Replaced with the current commit when building the wheels.
|
|
10
|
-
_SKYPILOT_COMMIT_SHA = '
|
|
10
|
+
_SKYPILOT_COMMIT_SHA = '4f345c81bd4baeb1ea1e7a98dc6b650f6ec6c7c5'
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def _get_git_commit():
|
|
@@ -37,7 +37,7 @@ def _get_git_commit():
|
|
|
37
37
|
|
|
38
38
|
|
|
39
39
|
__commit__ = _get_git_commit()
|
|
40
|
-
__version__ = '1.0.0.
|
|
40
|
+
__version__ = '1.0.0.dev20251015'
|
|
41
41
|
__root_dir__ = directory_utils.get_sky_dir()
|
|
42
42
|
|
|
43
43
|
|
sky/authentication.py
CHANGED
|
@@ -153,7 +153,12 @@ def get_or_generate_keys() -> Tuple[str, str]:
|
|
|
153
153
|
return private_key_path, public_key_path
|
|
154
154
|
|
|
155
155
|
|
|
156
|
-
def create_ssh_key_files_from_db(private_key_path: str):
|
|
156
|
+
def create_ssh_key_files_from_db(private_key_path: str) -> bool:
|
|
157
|
+
"""Creates the ssh key files from the database.
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
True if the ssh key files are created successfully, False otherwise.
|
|
161
|
+
"""
|
|
157
162
|
# Assume private key path is in the format of
|
|
158
163
|
# ~/.sky/clients/<user_hash>/ssh/sky-key
|
|
159
164
|
separated_path = os.path.normpath(private_key_path).split(os.path.sep)
|
|
@@ -181,12 +186,14 @@ def create_ssh_key_files_from_db(private_key_path: str):
|
|
|
181
186
|
ssh_public_key, ssh_private_key, exists = (
|
|
182
187
|
global_user_state.get_ssh_keys(user_hash))
|
|
183
188
|
if not exists:
|
|
184
|
-
|
|
189
|
+
logger.debug(f'SSH keys not found for user {user_hash}')
|
|
190
|
+
return False
|
|
185
191
|
_save_key_pair(private_key_path, public_key_path, ssh_private_key,
|
|
186
192
|
ssh_public_key)
|
|
187
193
|
assert os.path.exists(public_key_path), (
|
|
188
194
|
'Private key found, but associated public key '
|
|
189
195
|
f'{public_key_path} does not exist.')
|
|
196
|
+
return True
|
|
190
197
|
|
|
191
198
|
|
|
192
199
|
def configure_ssh_info(config: Dict[str, Any]) -> Dict[str, Any]:
|
sky/backends/backend_utils.py
CHANGED
|
@@ -1857,6 +1857,13 @@ def check_owner_identity(cluster_name: str) -> None:
|
|
|
1857
1857
|
summary_response=True)
|
|
1858
1858
|
if record is None:
|
|
1859
1859
|
return
|
|
1860
|
+
_check_owner_identity_with_record(cluster_name, record)
|
|
1861
|
+
|
|
1862
|
+
|
|
1863
|
+
def _check_owner_identity_with_record(cluster_name: str,
|
|
1864
|
+
record: Dict[str, Any]) -> None:
|
|
1865
|
+
if env_options.Options.SKIP_CLOUD_IDENTITY_CHECK.get():
|
|
1866
|
+
return
|
|
1860
1867
|
handle = record['handle']
|
|
1861
1868
|
if not isinstance(handle, backends.CloudVmRayResourceHandle):
|
|
1862
1869
|
return
|
|
@@ -2151,6 +2158,7 @@ def check_can_clone_disk_and_override_task(
|
|
|
2151
2158
|
|
|
2152
2159
|
def _update_cluster_status(
|
|
2153
2160
|
cluster_name: str,
|
|
2161
|
+
record: Dict[str, Any],
|
|
2154
2162
|
include_user_info: bool = True,
|
|
2155
2163
|
summary_response: bool = False) -> Optional[Dict[str, Any]]:
|
|
2156
2164
|
"""Update the cluster status.
|
|
@@ -2179,12 +2187,6 @@ def _update_cluster_status(
|
|
|
2179
2187
|
fetched from the cloud provider or there are leaked nodes causing
|
|
2180
2188
|
the node number larger than expected.
|
|
2181
2189
|
"""
|
|
2182
|
-
record = global_user_state.get_cluster_from_name(
|
|
2183
|
-
cluster_name,
|
|
2184
|
-
include_user_info=include_user_info,
|
|
2185
|
-
summary_response=summary_response)
|
|
2186
|
-
if record is None:
|
|
2187
|
-
return None
|
|
2188
2190
|
handle = record['handle']
|
|
2189
2191
|
if handle.cluster_yaml is None:
|
|
2190
2192
|
# Remove cluster from db since this cluster does not have a config file
|
|
@@ -2677,10 +2679,9 @@ def refresh_cluster_record(
|
|
|
2677
2679
|
# using the correct cloud credentials.
|
|
2678
2680
|
workspace = record.get('workspace', constants.SKYPILOT_DEFAULT_WORKSPACE)
|
|
2679
2681
|
with skypilot_config.local_active_workspace_ctx(workspace):
|
|
2680
|
-
check_owner_identity
|
|
2681
|
-
|
|
2682
|
-
|
|
2683
|
-
return record
|
|
2682
|
+
# check_owner_identity returns if the record handle is
|
|
2683
|
+
# not a CloudVmRayResourceHandle
|
|
2684
|
+
_check_owner_identity_with_record(cluster_name, record)
|
|
2684
2685
|
|
|
2685
2686
|
# The loop logic allows us to notice if the status was updated in the
|
|
2686
2687
|
# global_user_state by another process and stop trying to get the lock.
|
|
@@ -2697,7 +2698,8 @@ def refresh_cluster_record(
|
|
|
2697
2698
|
return record
|
|
2698
2699
|
|
|
2699
2700
|
if cluster_lock_already_held:
|
|
2700
|
-
return _update_cluster_status(cluster_name,
|
|
2701
|
+
return _update_cluster_status(cluster_name, record,
|
|
2702
|
+
include_user_info,
|
|
2701
2703
|
summary_response)
|
|
2702
2704
|
|
|
2703
2705
|
# Try to acquire the lock so we can fetch the status.
|
|
@@ -2713,7 +2715,7 @@ def refresh_cluster_record(
|
|
|
2713
2715
|
record, force_refresh_statuses):
|
|
2714
2716
|
return record
|
|
2715
2717
|
# Update and return the cluster status.
|
|
2716
|
-
return _update_cluster_status(cluster_name,
|
|
2718
|
+
return _update_cluster_status(cluster_name, record,
|
|
2717
2719
|
include_user_info,
|
|
2718
2720
|
summary_response)
|
|
2719
2721
|
|
|
@@ -3117,25 +3119,23 @@ def refresh_cluster_records() -> None:
|
|
|
3117
3119
|
exclude_managed_clusters = True
|
|
3118
3120
|
if env_options.Options.SHOW_DEBUG_INFO.get():
|
|
3119
3121
|
exclude_managed_clusters = False
|
|
3120
|
-
cluster_names =
|
|
3121
|
-
|
|
3122
|
+
cluster_names = set(
|
|
3123
|
+
global_user_state.get_cluster_names(
|
|
3124
|
+
exclude_managed_clusters=exclude_managed_clusters,))
|
|
3122
3125
|
|
|
3123
3126
|
# TODO(syang): we should try not to leak
|
|
3124
3127
|
# request info in backend_utils.py.
|
|
3125
3128
|
# Refactor this to use some other info to
|
|
3126
3129
|
# determine if a launch is in progress.
|
|
3127
|
-
|
|
3130
|
+
requests = requests_lib.get_request_tasks(
|
|
3128
3131
|
req_filter=requests_lib.RequestTaskFilter(
|
|
3129
3132
|
status=[requests_lib.RequestStatus.RUNNING],
|
|
3130
|
-
cluster_names=cluster_names,
|
|
3131
3133
|
include_request_names=['sky.launch']))
|
|
3132
3134
|
cluster_names_with_launch_request = {
|
|
3133
|
-
request.cluster_name for request in
|
|
3135
|
+
request.cluster_name for request in requests
|
|
3134
3136
|
}
|
|
3135
|
-
cluster_names_without_launch_request =
|
|
3136
|
-
|
|
3137
|
-
if cluster_name not in cluster_names_with_launch_request
|
|
3138
|
-
]
|
|
3137
|
+
cluster_names_without_launch_request = (cluster_names -
|
|
3138
|
+
cluster_names_with_launch_request)
|
|
3139
3139
|
|
|
3140
3140
|
def _refresh_cluster_record(cluster_name):
|
|
3141
3141
|
return _refresh_cluster(cluster_name,
|
|
@@ -3144,7 +3144,7 @@ def refresh_cluster_records() -> None:
|
|
|
3144
3144
|
include_user_info=False,
|
|
3145
3145
|
summary_response=True)
|
|
3146
3146
|
|
|
3147
|
-
if len(
|
|
3147
|
+
if len(cluster_names_without_launch_request) > 0:
|
|
3148
3148
|
# Do not refresh the clusters that have an active launch request.
|
|
3149
3149
|
subprocess_utils.run_in_parallel(_refresh_cluster_record,
|
|
3150
3150
|
cluster_names_without_launch_request)
|
|
@@ -3270,7 +3270,15 @@ def get_clusters(
|
|
|
3270
3270
|
expanded_private_key_path = os.path.expanduser(
|
|
3271
3271
|
ssh_private_key_path)
|
|
3272
3272
|
if not os.path.exists(expanded_private_key_path):
|
|
3273
|
-
auth.create_ssh_key_files_from_db(
|
|
3273
|
+
success = auth.create_ssh_key_files_from_db(
|
|
3274
|
+
ssh_private_key_path)
|
|
3275
|
+
if not success:
|
|
3276
|
+
# If the ssh key files are not found, we do not
|
|
3277
|
+
# update the record with credentials.
|
|
3278
|
+
logger.debug(
|
|
3279
|
+
f'SSH keys not found for cluster {record["name"]} '
|
|
3280
|
+
f'at key path {ssh_private_key_path}')
|
|
3281
|
+
continue
|
|
3274
3282
|
else:
|
|
3275
3283
|
private_key_path, _ = auth.get_or_generate_keys()
|
|
3276
3284
|
expanded_private_key_path = os.path.expanduser(private_key_path)
|
|
@@ -3332,7 +3340,10 @@ def get_clusters(
|
|
|
3332
3340
|
force_refresh_statuses=force_refresh_statuses,
|
|
3333
3341
|
include_user_info=True,
|
|
3334
3342
|
summary_response=summary_response)
|
|
3335
|
-
if
|
|
3343
|
+
# record may be None if the cluster is deleted during refresh,
|
|
3344
|
+
# e.g. all the Pods of a cluster on Kubernetes have been
|
|
3345
|
+
# deleted before refresh.
|
|
3346
|
+
if record is not None and 'error' not in record:
|
|
3336
3347
|
_update_records_with_handle_info([record])
|
|
3337
3348
|
if include_credentials:
|
|
3338
3349
|
_update_records_with_credentials([record])
|
|
@@ -3344,45 +3355,56 @@ def get_clusters(
|
|
|
3344
3355
|
# request info in backend_utils.py.
|
|
3345
3356
|
# Refactor this to use some other info to
|
|
3346
3357
|
# determine if a launch is in progress.
|
|
3347
|
-
|
|
3358
|
+
requests = requests_lib.get_request_tasks(
|
|
3348
3359
|
req_filter=requests_lib.RequestTaskFilter(
|
|
3349
3360
|
status=[requests_lib.RequestStatus.RUNNING],
|
|
3350
3361
|
cluster_names=cluster_names,
|
|
3351
3362
|
include_request_names=['sky.launch']))
|
|
3352
3363
|
cluster_names_with_launch_request = {
|
|
3353
|
-
request.cluster_name for request in
|
|
3364
|
+
request.cluster_name for request in requests
|
|
3354
3365
|
}
|
|
3366
|
+
# Preserve the index of the cluster name as it appears on "records"
|
|
3355
3367
|
cluster_names_without_launch_request = [
|
|
3356
|
-
|
|
3368
|
+
(i, cluster_name)
|
|
3369
|
+
for i, cluster_name in enumerate(cluster_names)
|
|
3357
3370
|
if cluster_name not in cluster_names_with_launch_request
|
|
3358
3371
|
]
|
|
3359
3372
|
# for clusters that have an active launch request, we do not refresh the status
|
|
3360
|
-
updated_records = [
|
|
3361
|
-
record for record in records
|
|
3362
|
-
if record['name'] in cluster_names_with_launch_request
|
|
3363
|
-
]
|
|
3373
|
+
updated_records = []
|
|
3364
3374
|
if len(cluster_names_without_launch_request) > 0:
|
|
3365
3375
|
with progress:
|
|
3366
3376
|
updated_records = subprocess_utils.run_in_parallel(
|
|
3367
|
-
_refresh_cluster_record,
|
|
3368
|
-
|
|
3377
|
+
_refresh_cluster_record, [
|
|
3378
|
+
cluster_name
|
|
3379
|
+
for _, cluster_name in cluster_names_without_launch_request
|
|
3380
|
+
])
|
|
3381
|
+
# Preserve the index of the cluster name as it appears on "records"
|
|
3382
|
+
# before filtering for clusters being launched.
|
|
3383
|
+
updated_records_dict: Dict[int, Optional[Dict[str, Any]]] = {
|
|
3384
|
+
cluster_names_without_launch_request[i][0]: updated_records[i]
|
|
3385
|
+
for i in range(len(cluster_names_without_launch_request))
|
|
3386
|
+
}
|
|
3369
3387
|
# Show information for removed clusters.
|
|
3370
3388
|
kept_records = []
|
|
3371
3389
|
autodown_clusters, remaining_clusters, failed_clusters = [], [], []
|
|
3372
3390
|
for i, record in enumerate(records):
|
|
3373
|
-
if
|
|
3391
|
+
if i not in updated_records_dict:
|
|
3392
|
+
# record was not refreshed, keep the original record
|
|
3393
|
+
kept_records.append(record)
|
|
3394
|
+
continue
|
|
3395
|
+
updated_record = updated_records_dict[i]
|
|
3396
|
+
if updated_record is None:
|
|
3374
3397
|
if record['to_down']:
|
|
3375
|
-
autodown_clusters.append(
|
|
3398
|
+
autodown_clusters.append(record['name'])
|
|
3376
3399
|
else:
|
|
3377
|
-
remaining_clusters.append(
|
|
3378
|
-
elif
|
|
3379
|
-
failed_clusters.append(
|
|
3380
|
-
(cluster_names[i], updated_records[i]['error']))
|
|
3400
|
+
remaining_clusters.append(record['name'])
|
|
3401
|
+
elif updated_record['status'] == 'UNKNOWN':
|
|
3402
|
+
failed_clusters.append((record['name'], updated_record['error']))
|
|
3381
3403
|
# Keep the original record if the status is unknown,
|
|
3382
3404
|
# so that the user can still see the cluster.
|
|
3383
3405
|
kept_records.append(record)
|
|
3384
3406
|
else:
|
|
3385
|
-
kept_records.append(
|
|
3407
|
+
kept_records.append(updated_record)
|
|
3386
3408
|
|
|
3387
3409
|
if autodown_clusters:
|
|
3388
3410
|
plural = 's' if len(autodown_clusters) > 1 else ''
|
|
@@ -605,7 +605,11 @@ class RayCodeGen:
|
|
|
605
605
|
# skip the scheduling step.
|
|
606
606
|
job_lib.scheduler.schedule_step()
|
|
607
607
|
|
|
608
|
-
|
|
608
|
+
# If some nodes are down and then new nodes are added after launching again,
|
|
609
|
+
# the result of `ray.nodes()` will include all the nodes, so we need to get
|
|
610
|
+
# the alive nodes.
|
|
611
|
+
alive_nodes = [n for n in ray.nodes() if 'Alive' in n and n['Alive']]
|
|
612
|
+
total_num_nodes = len(alive_nodes)
|
|
609
613
|
setup_bundles = [{{"CPU": _SETUP_CPUS}} for _ in range(total_num_nodes)]
|
|
610
614
|
setup_pg = ray.util.placement_group(setup_bundles, strategy='STRICT_SPREAD')
|
|
611
615
|
setup_workers = [run_bash_command_with_log_and_return_pid \\
|
|
@@ -3720,7 +3724,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
3720
3724
|
|
|
3721
3725
|
self._update_after_cluster_provisioned(
|
|
3722
3726
|
handle, to_provision_config.prev_handle, task,
|
|
3723
|
-
prev_cluster_status,
|
|
3727
|
+
prev_cluster_status, config_hash)
|
|
3724
3728
|
return handle, False
|
|
3725
3729
|
|
|
3726
3730
|
cluster_config_file = config_dict['ray']
|
|
@@ -3792,7 +3796,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
3792
3796
|
|
|
3793
3797
|
self._update_after_cluster_provisioned(
|
|
3794
3798
|
handle, to_provision_config.prev_handle, task,
|
|
3795
|
-
prev_cluster_status,
|
|
3799
|
+
prev_cluster_status, config_hash)
|
|
3796
3800
|
return handle, False
|
|
3797
3801
|
|
|
3798
3802
|
def _open_ports(self, handle: CloudVmRayResourceHandle) -> None:
|
|
@@ -3810,7 +3814,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
3810
3814
|
prev_handle: Optional[CloudVmRayResourceHandle],
|
|
3811
3815
|
task: task_lib.Task,
|
|
3812
3816
|
prev_cluster_status: Optional[status_lib.ClusterStatus],
|
|
3813
|
-
|
|
3817
|
+
config_hash: str) -> None:
|
|
3814
3818
|
usage_lib.messages.usage.update_cluster_resources(
|
|
3815
3819
|
handle.launched_nodes, handle.launched_resources)
|
|
3816
3820
|
usage_lib.messages.usage.update_final_cluster_status(
|
|
@@ -3922,8 +3926,6 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
3922
3926
|
handle.cached_external_ssh_ports, handle.docker_user,
|
|
3923
3927
|
handle.ssh_user)
|
|
3924
3928
|
|
|
3925
|
-
locks.get_lock(lock_id).force_unlock()
|
|
3926
|
-
|
|
3927
3929
|
def _sync_workdir(self, handle: CloudVmRayResourceHandle,
|
|
3928
3930
|
workdir: Union[Path, Dict[str, Any]],
|
|
3929
3931
|
envs_and_secrets: Dict[str, str]) -> None:
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
Kubernetes does not require a catalog of instances, but we need an image catalog
|
|
4
4
|
mapping SkyPilot image tags to corresponding container image tags.
|
|
5
5
|
"""
|
|
6
|
+
import collections
|
|
6
7
|
import re
|
|
7
8
|
import typing
|
|
8
9
|
from typing import Dict, List, Optional, Set, Tuple
|
|
@@ -167,12 +168,25 @@ def _list_accelerators(
|
|
|
167
168
|
accelerators_qtys: Set[Tuple[str, int]] = set()
|
|
168
169
|
keys = lf.get_label_keys()
|
|
169
170
|
nodes = kubernetes_utils.get_kubernetes_nodes(context=context)
|
|
171
|
+
|
|
172
|
+
# Check if any nodes have accelerators before fetching pods
|
|
173
|
+
has_accelerator_nodes = False
|
|
174
|
+
for node in nodes:
|
|
175
|
+
for key in keys:
|
|
176
|
+
if key in node.metadata.labels:
|
|
177
|
+
has_accelerator_nodes = True
|
|
178
|
+
break
|
|
179
|
+
if has_accelerator_nodes:
|
|
180
|
+
break
|
|
181
|
+
|
|
182
|
+
# Only fetch pods if we have accelerator nodes and realtime is requested
|
|
170
183
|
pods = None
|
|
171
|
-
|
|
172
|
-
|
|
184
|
+
allocated_qty_by_node: Dict[str, int] = collections.defaultdict(int)
|
|
185
|
+
if realtime and has_accelerator_nodes:
|
|
186
|
+
# Get the allocated GPU quantity by each node
|
|
173
187
|
try:
|
|
174
|
-
|
|
175
|
-
context=context)
|
|
188
|
+
allocated_qty_by_node = (
|
|
189
|
+
kubernetes_utils.get_allocated_gpu_qty_by_node(context=context))
|
|
176
190
|
except kubernetes.api_exception() as e:
|
|
177
191
|
if e.status == 403:
|
|
178
192
|
logger.warning(
|
|
@@ -191,7 +205,6 @@ def _list_accelerators(
|
|
|
191
205
|
for node in nodes:
|
|
192
206
|
for key in keys:
|
|
193
207
|
if key in node.metadata.labels:
|
|
194
|
-
allocated_qty = 0
|
|
195
208
|
accelerator_name = lf.get_accelerator_from_label_value(
|
|
196
209
|
node.metadata.labels.get(key))
|
|
197
210
|
|
|
@@ -251,26 +264,7 @@ def _list_accelerators(
|
|
|
251
264
|
total_accelerators_available[accelerator_name] = -1
|
|
252
265
|
continue
|
|
253
266
|
|
|
254
|
-
|
|
255
|
-
# Get all the pods running on the node
|
|
256
|
-
if (pod.spec.node_name == node.metadata.name and
|
|
257
|
-
pod.status.phase in ['Running', 'Pending']):
|
|
258
|
-
# Skip pods that should not count against GPU count
|
|
259
|
-
if (kubernetes_utils.
|
|
260
|
-
should_exclude_pod_from_gpu_allocation(pod)):
|
|
261
|
-
logger.debug(
|
|
262
|
-
f'Excluding pod '
|
|
263
|
-
f'{pod.metadata.name} from GPU count '
|
|
264
|
-
f'calculations on node {node.metadata.name}')
|
|
265
|
-
continue
|
|
266
|
-
# Iterate over all the containers in the pod and sum
|
|
267
|
-
# the GPU requests
|
|
268
|
-
for container in pod.spec.containers:
|
|
269
|
-
if container.resources.requests:
|
|
270
|
-
allocated_qty += (
|
|
271
|
-
kubernetes_utils.get_node_accelerator_count(
|
|
272
|
-
context, container.resources.requests))
|
|
273
|
-
|
|
267
|
+
allocated_qty = allocated_qty_by_node[node.metadata.name]
|
|
274
268
|
accelerators_available = accelerator_count - allocated_qty
|
|
275
269
|
# Initialize the total_accelerators_available to make sure the
|
|
276
270
|
# key exists in the dictionary.
|
sky/client/cli/command.py
CHANGED
|
@@ -158,12 +158,17 @@ def _get_cluster_records_and_set_ssh_config(
|
|
|
158
158
|
# Update the SSH config for all clusters
|
|
159
159
|
for record in cluster_records:
|
|
160
160
|
handle = record['handle']
|
|
161
|
-
|
|
161
|
+
name = record['name']
|
|
162
162
|
if not (handle is not None and handle.cached_external_ips is not None
|
|
163
163
|
and 'credentials' in record):
|
|
164
164
|
# If the cluster is not UP or does not have credentials available,
|
|
165
165
|
# we need to remove the cluster from the SSH config.
|
|
166
|
-
cluster_utils.SSHConfigHelper.remove_cluster(
|
|
166
|
+
cluster_utils.SSHConfigHelper.remove_cluster(name)
|
|
167
|
+
continue
|
|
168
|
+
if not record['credentials']:
|
|
169
|
+
# The credential is missing for some reason, continue.
|
|
170
|
+
logger.debug(
|
|
171
|
+
f'Client did not receive SSH credential for cluster {name}')
|
|
167
172
|
continue
|
|
168
173
|
|
|
169
174
|
# During the failover, even though a cluster does not exist, the handle
|
|
@@ -1868,7 +1873,8 @@ def status(verbose: bool, refresh: bool, ip: bool, endpoints: bool,
|
|
|
1868
1873
|
controllers = []
|
|
1869
1874
|
for cluster_record in cluster_records:
|
|
1870
1875
|
cluster_name = cluster_record['name']
|
|
1871
|
-
controller = controller_utils.Controllers.from_name(
|
|
1876
|
+
controller = controller_utils.Controllers.from_name(
|
|
1877
|
+
cluster_name, expect_exact_match=False)
|
|
1872
1878
|
if controller is not None:
|
|
1873
1879
|
controllers.append(cluster_record)
|
|
1874
1880
|
else:
|
|
@@ -2034,7 +2040,8 @@ def cost_report(all: bool, days: int): # pylint: disable=redefined-builtin
|
|
|
2034
2040
|
for cluster_record in cluster_records:
|
|
2035
2041
|
cluster_name = cluster_record['name']
|
|
2036
2042
|
try:
|
|
2037
|
-
controller = controller_utils.Controllers.from_name(
|
|
2043
|
+
controller = controller_utils.Controllers.from_name(
|
|
2044
|
+
cluster_name, expect_exact_match=False)
|
|
2038
2045
|
except AssertionError:
|
|
2039
2046
|
# There could be some old controller clusters from previous
|
|
2040
2047
|
# versions that we should not show in the cost report.
|
|
@@ -2406,7 +2413,8 @@ def cancel(
|
|
|
2406
2413
|
job_ids=job_ids_to_cancel)
|
|
2407
2414
|
_async_call_or_wait(request_id, async_call, 'sky.cancel')
|
|
2408
2415
|
except exceptions.NotSupportedError as e:
|
|
2409
|
-
controller = controller_utils.Controllers.from_name(
|
|
2416
|
+
controller = controller_utils.Controllers.from_name(
|
|
2417
|
+
cluster, expect_exact_match=False)
|
|
2410
2418
|
assert controller is not None, cluster
|
|
2411
2419
|
with ux_utils.print_exception_no_traceback():
|
|
2412
2420
|
raise click.UsageError(
|
|
@@ -2707,7 +2715,8 @@ def start(
|
|
|
2707
2715
|
# Get all clusters that are not controllers.
|
|
2708
2716
|
cluster_records = [
|
|
2709
2717
|
cluster for cluster in all_clusters
|
|
2710
|
-
if controller_utils.Controllers.from_name(
|
|
2718
|
+
if controller_utils.Controllers.from_name(
|
|
2719
|
+
cluster['name'], expect_exact_match=False) is None
|
|
2711
2720
|
]
|
|
2712
2721
|
if cluster_records is None:
|
|
2713
2722
|
# Get GLOB cluster names
|
|
@@ -2769,7 +2778,8 @@ def start(
|
|
|
2769
2778
|
# Checks for controller clusters (jobs controller / sky serve controller).
|
|
2770
2779
|
controllers, normal_clusters = [], []
|
|
2771
2780
|
for name in to_start:
|
|
2772
|
-
if controller_utils.Controllers.from_name(
|
|
2781
|
+
if controller_utils.Controllers.from_name(
|
|
2782
|
+
name, expect_exact_match=False) is not None:
|
|
2773
2783
|
controllers.append(name)
|
|
2774
2784
|
else:
|
|
2775
2785
|
normal_clusters.append(name)
|
|
@@ -2905,7 +2915,8 @@ def _hint_or_raise_for_down_jobs_controller(controller_name: str,
|
|
|
2905
2915
|
to be torn down (e.g., because it has jobs running or
|
|
2906
2916
|
it is in init state)
|
|
2907
2917
|
"""
|
|
2908
|
-
controller = controller_utils.Controllers.from_name(
|
|
2918
|
+
controller = controller_utils.Controllers.from_name(
|
|
2919
|
+
controller_name, expect_exact_match=False)
|
|
2909
2920
|
assert controller is not None, controller_name
|
|
2910
2921
|
|
|
2911
2922
|
with rich_utils.client_status(
|
|
@@ -3004,7 +3015,8 @@ def _hint_or_raise_for_down_sky_serve_controller(controller_name: str,
|
|
|
3004
3015
|
to be torn down (e.g., because it has services running or
|
|
3005
3016
|
it is in init state)
|
|
3006
3017
|
"""
|
|
3007
|
-
controller = controller_utils.Controllers.from_name(
|
|
3018
|
+
controller = controller_utils.Controllers.from_name(
|
|
3019
|
+
controller_name, expect_exact_match=False)
|
|
3008
3020
|
assert controller is not None, controller_name
|
|
3009
3021
|
with rich_utils.client_status('[bold cyan]Checking for live services[/]'):
|
|
3010
3022
|
try:
|
|
@@ -3115,14 +3127,15 @@ def _down_or_stop_clusters(
|
|
|
3115
3127
|
names = list(names)
|
|
3116
3128
|
if names:
|
|
3117
3129
|
controllers = [
|
|
3118
|
-
name for name in names
|
|
3119
|
-
|
|
3130
|
+
name for name in names if controller_utils.Controllers.from_name(
|
|
3131
|
+
name, expect_exact_match=False) is not None
|
|
3120
3132
|
]
|
|
3121
3133
|
controllers_str = ', '.join(map(repr, controllers))
|
|
3122
3134
|
names = [
|
|
3123
3135
|
cluster['name']
|
|
3124
3136
|
for cluster in _get_cluster_records_and_set_ssh_config(names)
|
|
3125
|
-
if controller_utils.Controllers.from_name(
|
|
3137
|
+
if controller_utils.Controllers.from_name(
|
|
3138
|
+
cluster['name'], expect_exact_match=False) is None
|
|
3126
3139
|
]
|
|
3127
3140
|
|
|
3128
3141
|
# Make sure the controllers are explicitly specified without other
|
|
@@ -3147,7 +3160,7 @@ def _down_or_stop_clusters(
|
|
|
3147
3160
|
f'{controllers_str} is currently not supported.')
|
|
3148
3161
|
else:
|
|
3149
3162
|
controller = controller_utils.Controllers.from_name(
|
|
3150
|
-
controller_name)
|
|
3163
|
+
controller_name, expect_exact_match=False)
|
|
3151
3164
|
assert controller is not None
|
|
3152
3165
|
hint_or_raise = _controller_to_hint_or_raise(controller)
|
|
3153
3166
|
try:
|
|
@@ -3195,9 +3208,10 @@ def _down_or_stop_clusters(
|
|
|
3195
3208
|
names = [
|
|
3196
3209
|
record['name']
|
|
3197
3210
|
for record in all_clusters
|
|
3198
|
-
if controller_utils.Controllers.from_name(
|
|
3199
|
-
|
|
3200
|
-
|
|
3211
|
+
if controller_utils.Controllers.from_name(
|
|
3212
|
+
record['name'], expect_exact_match=False) is None and
|
|
3213
|
+
(down or idle_minutes_to_autostop is not None or
|
|
3214
|
+
record['status'] != status_lib.ClusterStatus.STOPPED)
|
|
3201
3215
|
]
|
|
3202
3216
|
|
|
3203
3217
|
clusters = names
|
|
@@ -3227,6 +3241,9 @@ def _down_or_stop_clusters(
|
|
|
3227
3241
|
|
|
3228
3242
|
request_ids = []
|
|
3229
3243
|
|
|
3244
|
+
successes: List[str] = []
|
|
3245
|
+
failures: List[Tuple[str, str]] = []
|
|
3246
|
+
|
|
3230
3247
|
def _down_or_stop(name: str):
|
|
3231
3248
|
success_progress = False
|
|
3232
3249
|
if idle_minutes_to_autostop is not None:
|
|
@@ -3237,9 +3254,10 @@ def _down_or_stop_clusters(
|
|
|
3237
3254
|
_async_call_or_wait(
|
|
3238
3255
|
request_id, async_call,
|
|
3239
3256
|
server_constants.REQUEST_NAME_PREFIX + operation)
|
|
3240
|
-
except (exceptions.NotSupportedError,
|
|
3241
|
-
exceptions.
|
|
3257
|
+
except (exceptions.NotSupportedError, exceptions.ClusterNotUpError,
|
|
3258
|
+
exceptions.CloudError) as e:
|
|
3242
3259
|
message = str(e)
|
|
3260
|
+
failures.append((name, str(e)))
|
|
3243
3261
|
else: # no exception raised
|
|
3244
3262
|
success_progress = True
|
|
3245
3263
|
message = (f'{colorama.Fore.GREEN}{operation} '
|
|
@@ -3275,13 +3293,17 @@ def _down_or_stop_clusters(
|
|
|
3275
3293
|
f'{colorama.Fore.RED}{operation} cluster {name}...failed. '
|
|
3276
3294
|
f'{colorama.Style.RESET_ALL}'
|
|
3277
3295
|
f'\nReason: {common_utils.format_exception(e)}.')
|
|
3296
|
+
failures.append((name, str(e)))
|
|
3278
3297
|
except (exceptions.NotSupportedError,
|
|
3279
|
-
exceptions.ClusterOwnerIdentityMismatchError
|
|
3298
|
+
exceptions.ClusterOwnerIdentityMismatchError,
|
|
3299
|
+
exceptions.CloudError) as e:
|
|
3280
3300
|
message = str(e)
|
|
3301
|
+
failures.append((name, str(e)))
|
|
3281
3302
|
else: # no exception raised
|
|
3282
3303
|
message = (
|
|
3283
3304
|
f'{colorama.Fore.GREEN}{operation} cluster {name}...done.'
|
|
3284
3305
|
f'{colorama.Style.RESET_ALL}')
|
|
3306
|
+
successes.append(name)
|
|
3285
3307
|
if not down:
|
|
3286
3308
|
message += ('\n To restart the cluster, run: '
|
|
3287
3309
|
f'{colorama.Style.BRIGHT}sky start {name}'
|
|
@@ -3304,6 +3326,18 @@ def _down_or_stop_clusters(
|
|
|
3304
3326
|
click.secho(f'{operation} requests are sent. Check the requests\' '
|
|
3305
3327
|
'status with `sky request get <request_id>`.')
|
|
3306
3328
|
|
|
3329
|
+
click.echo('\nSummary:')
|
|
3330
|
+
if successes:
|
|
3331
|
+
click.echo(' ✓ Succeeded: ' + ', '.join(successes))
|
|
3332
|
+
if failures:
|
|
3333
|
+
failed_pretty = []
|
|
3334
|
+
for name, reason in failures:
|
|
3335
|
+
first = reason.strip().splitlines()[0]
|
|
3336
|
+
first = first if len(first) <= 120 else first[:120] + '…'
|
|
3337
|
+
failed_pretty.append(f'{name} ({first})')
|
|
3338
|
+
click.echo(' ✗ Failed: ' + ', '.join(failed_pretty))
|
|
3339
|
+
raise click.ClickException('Some clusters failed. See summary above.')
|
|
3340
|
+
|
|
3307
3341
|
|
|
3308
3342
|
@cli.command(cls=_DocumentedCodeCommand)
|
|
3309
3343
|
@flags.config_option(expose_value=False)
|
sky/client/sdk.py
CHANGED
|
@@ -98,6 +98,9 @@ def reload_config() -> None:
|
|
|
98
98
|
skypilot_config.safe_reload_config()
|
|
99
99
|
|
|
100
100
|
|
|
101
|
+
# The overloads are not comprehensive - e.g. get_result Literal[False] could be
|
|
102
|
+
# specified to return None. We can add more overloads if needed. To do that see
|
|
103
|
+
# https://github.com/python/mypy/issues/8634#issuecomment-609411104
|
|
101
104
|
@typing.overload
|
|
102
105
|
def stream_response(request_id: None,
|
|
103
106
|
response: 'requests.Response',
|
|
@@ -112,7 +115,16 @@ def stream_response(request_id: server_common.RequestId[T],
|
|
|
112
115
|
response: 'requests.Response',
|
|
113
116
|
output_stream: Optional['io.TextIOBase'] = None,
|
|
114
117
|
resumable: bool = False,
|
|
115
|
-
get_result:
|
|
118
|
+
get_result: Literal[True] = True) -> T:
|
|
119
|
+
...
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
@typing.overload
|
|
123
|
+
def stream_response(request_id: server_common.RequestId[T],
|
|
124
|
+
response: 'requests.Response',
|
|
125
|
+
output_stream: Optional['io.TextIOBase'] = None,
|
|
126
|
+
resumable: bool = False,
|
|
127
|
+
get_result: bool = True) -> Optional[T]:
|
|
116
128
|
...
|
|
117
129
|
|
|
118
130
|
|
sky/dashboard/out/404.html
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-ac3a34c8f9fef041.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_error-c66a4e8afc46f17b.js" defer=""></script><script src="/dashboard/_next/static
|
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-ac3a34c8f9fef041.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_error-c66a4e8afc46f17b.js" defer=""></script><script src="/dashboard/_next/static/-bih7JVStsXyeasac-dvQ/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/-bih7JVStsXyeasac-dvQ/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"statusCode":404}},"page":"/_error","query":{},"buildId":"-bih7JVStsXyeasac-dvQ","assetPrefix":"/dashboard","nextExport":true,"isFallback":false,"gip":true,"scriptLoader":[]}</script></body></html>
|
|
@@ -1 +1 @@
|
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-ac3a34c8f9fef041.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-3d59f75e2ccf9321.js" defer=""></script><script src="/dashboard/_next/static/chunks/6130-2be46d70a38f1e82.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-d67458fcb1386c92.js" defer=""></script><script src="/dashboard/_next/static/chunks/7411-b15471acd2cba716.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-01359c57e018caa4.js" defer=""></script><script src="/dashboard/_next/static/chunks/3850-ff4a9a69d978632b.js" defer=""></script><script src="/dashboard/_next/static/chunks/6135-4b4d5e824b7f9d3c.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D/%5Bjob%5D-8f058b0346db2aff.js" defer=""></script><script src="/dashboard/_next/static
|
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-ac3a34c8f9fef041.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-3d59f75e2ccf9321.js" defer=""></script><script src="/dashboard/_next/static/chunks/6130-2be46d70a38f1e82.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-d67458fcb1386c92.js" defer=""></script><script src="/dashboard/_next/static/chunks/7411-b15471acd2cba716.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-01359c57e018caa4.js" defer=""></script><script src="/dashboard/_next/static/chunks/3850-ff4a9a69d978632b.js" defer=""></script><script src="/dashboard/_next/static/chunks/6135-4b4d5e824b7f9d3c.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D/%5Bjob%5D-8f058b0346db2aff.js" defer=""></script><script src="/dashboard/_next/static/-bih7JVStsXyeasac-dvQ/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/-bih7JVStsXyeasac-dvQ/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{}},"page":"/clusters/[cluster]/[job]","query":{},"buildId":"-bih7JVStsXyeasac-dvQ","assetPrefix":"/dashboard","nextExport":true,"autoExport":true,"isFallback":false,"scriptLoader":[]}</script></body></html>
|
|
@@ -1 +1 @@
|
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-ac3a34c8f9fef041.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-3d59f75e2ccf9321.js" defer=""></script><script src="/dashboard/_next/static/chunks/6130-2be46d70a38f1e82.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-d67458fcb1386c92.js" defer=""></script><script src="/dashboard/_next/static/chunks/7411-b15471acd2cba716.js" defer=""></script><script src="/dashboard/_next/static/chunks/1272-1ef0bf0237faccdb.js" defer=""></script><script src="/dashboard/_next/static/chunks/7359-c8d04e06886000b3.js" defer=""></script><script src="/dashboard/_next/static/chunks/6212-7bd06f60ba693125.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-01359c57e018caa4.js" defer=""></script><script src="/dashboard/_next/static/chunks/3850-ff4a9a69d978632b.js" defer=""></script><script src="/dashboard/_next/static/chunks/8969-66237729cdf9749e.js" defer=""></script><script src="/dashboard/_next/static/chunks/6990-f6818c84ed8f1c86.js" defer=""></script><script src="/dashboard/_next/static/chunks/6135-4b4d5e824b7f9d3c.js" defer=""></script><script src="/dashboard/_next/static/chunks/1121-d0782b9251f0fcd3.js" defer=""></script><script src="/dashboard/_next/static/chunks/6601-06114c982db410b6.js" defer=""></script><script src="/dashboard/_next/static/chunks/3015-7e0e8f06bb2f881c.js" defer=""></script><script src="/dashboard/_next/static/chunks/6856-5fdc9b851a18acdb.js" defer=""></script><script src="/dashboard/_next/static/chunks/1871-49141c317f3a9020.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D-477555ab7c0b13d8.js" defer=""></script><script src="/dashboard/_next/static
|
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-ac3a34c8f9fef041.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-3d59f75e2ccf9321.js" defer=""></script><script src="/dashboard/_next/static/chunks/6130-2be46d70a38f1e82.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-d67458fcb1386c92.js" defer=""></script><script src="/dashboard/_next/static/chunks/7411-b15471acd2cba716.js" defer=""></script><script src="/dashboard/_next/static/chunks/1272-1ef0bf0237faccdb.js" defer=""></script><script src="/dashboard/_next/static/chunks/7359-c8d04e06886000b3.js" defer=""></script><script src="/dashboard/_next/static/chunks/6212-7bd06f60ba693125.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-01359c57e018caa4.js" defer=""></script><script src="/dashboard/_next/static/chunks/3850-ff4a9a69d978632b.js" defer=""></script><script src="/dashboard/_next/static/chunks/8969-66237729cdf9749e.js" defer=""></script><script src="/dashboard/_next/static/chunks/6990-f6818c84ed8f1c86.js" defer=""></script><script src="/dashboard/_next/static/chunks/6135-4b4d5e824b7f9d3c.js" defer=""></script><script src="/dashboard/_next/static/chunks/1121-d0782b9251f0fcd3.js" defer=""></script><script src="/dashboard/_next/static/chunks/6601-06114c982db410b6.js" defer=""></script><script src="/dashboard/_next/static/chunks/3015-7e0e8f06bb2f881c.js" defer=""></script><script src="/dashboard/_next/static/chunks/6856-5fdc9b851a18acdb.js" defer=""></script><script src="/dashboard/_next/static/chunks/1871-49141c317f3a9020.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D-477555ab7c0b13d8.js" defer=""></script><script src="/dashboard/_next/static/-bih7JVStsXyeasac-dvQ/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/-bih7JVStsXyeasac-dvQ/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{}},"page":"/clusters/[cluster]","query":{},"buildId":"-bih7JVStsXyeasac-dvQ","assetPrefix":"/dashboard","nextExport":true,"autoExport":true,"isFallback":false,"scriptLoader":[]}</script></body></html>
|