skypilot-nightly 1.0.0.dev20250828__py3-none-any.whl → 1.0.0.dev20250829__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of skypilot-nightly might be problematic. Click here for more details.
- sky/__init__.py +2 -2
- sky/backends/backend_utils.py +130 -40
- sky/backends/cloud_vm_ray_backend.py +19 -3
- sky/backends/wheel_utils.py +35 -8
- sky/clouds/aws.py +118 -1
- sky/dashboard/out/404.html +1 -1
- sky/dashboard/out/_next/static/chunks/{webpack-6dae1cd599a34def.js → webpack-6e76f636a048e145.js} +1 -1
- sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
- sky/dashboard/out/clusters/[cluster].html +1 -1
- sky/dashboard/out/clusters.html +1 -1
- sky/dashboard/out/config.html +1 -1
- sky/dashboard/out/index.html +1 -1
- sky/dashboard/out/infra/[context].html +1 -1
- sky/dashboard/out/infra.html +1 -1
- sky/dashboard/out/jobs/[job].html +1 -1
- sky/dashboard/out/jobs/pools/[pool].html +1 -1
- sky/dashboard/out/jobs.html +1 -1
- sky/dashboard/out/users.html +1 -1
- sky/dashboard/out/volumes.html +1 -1
- sky/dashboard/out/workspace/new.html +1 -1
- sky/dashboard/out/workspaces/[name].html +1 -1
- sky/dashboard/out/workspaces.html +1 -1
- sky/global_user_state.py +58 -10
- sky/jobs/server/server.py +2 -1
- sky/provision/aws/config.py +78 -3
- sky/provision/aws/instance.py +45 -6
- sky/provision/kubernetes/utils.py +9 -0
- sky/schemas/db/global_user_state/007_cluster_event_request_id.py +34 -0
- sky/serve/server/server.py +2 -1
- sky/server/common.py +1 -2
- sky/server/daemons.py +6 -0
- sky/server/requests/executor.py +3 -2
- sky/server/requests/payloads.py +3 -1
- sky/server/requests/preconditions.py +3 -2
- sky/server/requests/requests.py +110 -29
- sky/server/server.py +70 -61
- sky/server/stream_utils.py +7 -5
- sky/setup_files/dependencies.py +6 -1
- sky/sky_logging.py +28 -0
- sky/skylet/constants.py +6 -0
- sky/templates/aws-ray.yml.j2 +1 -0
- sky/utils/annotations.py +8 -2
- sky/utils/cluster_utils.py +3 -3
- sky/utils/db/db_utils.py +11 -0
- sky/utils/db/migration_utils.py +1 -1
- sky/utils/kubernetes_enums.py +1 -0
- sky/utils/lock_events.py +94 -0
- sky/utils/timeline.py +24 -93
- {skypilot_nightly-1.0.0.dev20250828.dist-info → skypilot_nightly-1.0.0.dev20250829.dist-info}/METADATA +8 -2
- {skypilot_nightly-1.0.0.dev20250828.dist-info → skypilot_nightly-1.0.0.dev20250829.dist-info}/RECORD +56 -54
- /sky/dashboard/out/_next/static/{9DW6d9jaP2kZt0NcgIfFa → hYJYFIxp_ZFONR4wTIJqZ}/_buildManifest.js +0 -0
- /sky/dashboard/out/_next/static/{9DW6d9jaP2kZt0NcgIfFa → hYJYFIxp_ZFONR4wTIJqZ}/_ssgManifest.js +0 -0
- {skypilot_nightly-1.0.0.dev20250828.dist-info → skypilot_nightly-1.0.0.dev20250829.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20250828.dist-info → skypilot_nightly-1.0.0.dev20250829.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20250828.dist-info → skypilot_nightly-1.0.0.dev20250829.dist-info}/licenses/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20250828.dist-info → skypilot_nightly-1.0.0.dev20250829.dist-info}/top_level.txt +0 -0
sky/__init__.py
CHANGED
|
@@ -7,7 +7,7 @@ import urllib.request
|
|
|
7
7
|
from sky.utils import directory_utils
|
|
8
8
|
|
|
9
9
|
# Replaced with the current commit when building the wheels.
|
|
10
|
-
_SKYPILOT_COMMIT_SHA = '
|
|
10
|
+
_SKYPILOT_COMMIT_SHA = '27f74c78af59ef98180b59a30c43410e46e3ce37'
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def _get_git_commit():
|
|
@@ -37,7 +37,7 @@ def _get_git_commit():
|
|
|
37
37
|
|
|
38
38
|
|
|
39
39
|
__commit__ = _get_git_commit()
|
|
40
|
-
__version__ = '1.0.0.
|
|
40
|
+
__version__ = '1.0.0.dev20250829'
|
|
41
41
|
__root_dir__ = directory_utils.get_sky_dir()
|
|
42
42
|
|
|
43
43
|
|
sky/backends/backend_utils.py
CHANGED
|
@@ -1409,6 +1409,62 @@ def ssh_credential_from_yaml(
|
|
|
1409
1409
|
return credentials
|
|
1410
1410
|
|
|
1411
1411
|
|
|
1412
|
+
def ssh_credentials_from_handles(
|
|
1413
|
+
handles: List['cloud_vm_ray_backend.CloudVmRayResourceHandle'],
|
|
1414
|
+
) -> List[Dict[str, Any]]:
|
|
1415
|
+
"""Returns ssh_user, ssh_private_key and ssh_control name.
|
|
1416
|
+
"""
|
|
1417
|
+
non_empty_cluster_yaml_paths = [
|
|
1418
|
+
handle.cluster_yaml
|
|
1419
|
+
for handle in handles
|
|
1420
|
+
if handle.cluster_yaml is not None
|
|
1421
|
+
]
|
|
1422
|
+
cluster_yaml_dicts = global_user_state.get_cluster_yaml_dict_multiple(
|
|
1423
|
+
non_empty_cluster_yaml_paths)
|
|
1424
|
+
cluster_yaml_dicts_to_index = {
|
|
1425
|
+
cluster_yaml_path: cluster_yaml_dict
|
|
1426
|
+
for cluster_yaml_path, cluster_yaml_dict in zip(
|
|
1427
|
+
non_empty_cluster_yaml_paths, cluster_yaml_dicts)
|
|
1428
|
+
}
|
|
1429
|
+
|
|
1430
|
+
credentials_to_return: List[Dict[str, Any]] = []
|
|
1431
|
+
for handle in handles:
|
|
1432
|
+
if handle.cluster_yaml is None:
|
|
1433
|
+
credentials_to_return.append(dict())
|
|
1434
|
+
continue
|
|
1435
|
+
ssh_user = handle.ssh_user
|
|
1436
|
+
docker_user = handle.docker_user
|
|
1437
|
+
config = cluster_yaml_dicts_to_index[handle.cluster_yaml]
|
|
1438
|
+
auth_section = config['auth']
|
|
1439
|
+
if ssh_user is None:
|
|
1440
|
+
ssh_user = auth_section['ssh_user'].strip()
|
|
1441
|
+
ssh_private_key_path = auth_section.get('ssh_private_key')
|
|
1442
|
+
ssh_control_name = config.get('cluster_name', '__default__')
|
|
1443
|
+
ssh_proxy_command = auth_section.get('ssh_proxy_command')
|
|
1444
|
+
|
|
1445
|
+
# Update the ssh_user placeholder in proxy command, if required
|
|
1446
|
+
if (ssh_proxy_command is not None and
|
|
1447
|
+
constants.SKY_SSH_USER_PLACEHOLDER in ssh_proxy_command):
|
|
1448
|
+
ssh_proxy_command = ssh_proxy_command.replace(
|
|
1449
|
+
constants.SKY_SSH_USER_PLACEHOLDER, ssh_user)
|
|
1450
|
+
|
|
1451
|
+
credentials = {
|
|
1452
|
+
'ssh_user': ssh_user,
|
|
1453
|
+
'ssh_private_key': ssh_private_key_path,
|
|
1454
|
+
'ssh_control_name': ssh_control_name,
|
|
1455
|
+
'ssh_proxy_command': ssh_proxy_command,
|
|
1456
|
+
}
|
|
1457
|
+
if docker_user is not None:
|
|
1458
|
+
credentials['docker_user'] = docker_user
|
|
1459
|
+
ssh_provider_module = config['provider']['module']
|
|
1460
|
+
# If we are running ssh command on kubernetes node.
|
|
1461
|
+
if 'kubernetes' in ssh_provider_module:
|
|
1462
|
+
credentials['disable_control_master'] = True
|
|
1463
|
+
credentials_to_return.append(credentials)
|
|
1464
|
+
|
|
1465
|
+
return credentials_to_return
|
|
1466
|
+
|
|
1467
|
+
|
|
1412
1468
|
def parallel_data_transfer_to_nodes(
|
|
1413
1469
|
runners: List[command_runner.CommandRunner],
|
|
1414
1470
|
source: Optional[str],
|
|
@@ -2056,7 +2112,10 @@ def _update_cluster_status(cluster_name: str) -> Optional[Dict[str, Any]]:
|
|
|
2056
2112
|
f'{output}\n', stderr)
|
|
2057
2113
|
return (*_count_healthy_nodes_from_ray(output), output, stderr)
|
|
2058
2114
|
|
|
2115
|
+
ray_status_details: Optional[str] = None
|
|
2116
|
+
|
|
2059
2117
|
def run_ray_status_to_check_ray_cluster_healthy() -> bool:
|
|
2118
|
+
nonlocal ray_status_details
|
|
2060
2119
|
try:
|
|
2061
2120
|
# NOTE: fetching the IPs is very slow as it calls into
|
|
2062
2121
|
# `ray get head-ip/worker-ips`. Using cached IPs is safe because
|
|
@@ -2134,19 +2193,25 @@ def _update_cluster_status(cluster_name: str) -> Optional[Dict[str, Any]]:
|
|
|
2134
2193
|
# showing up
|
|
2135
2194
|
time.sleep(1)
|
|
2136
2195
|
|
|
2196
|
+
ray_status_details = (
|
|
2197
|
+
f'{ready_head + ready_workers}/{total_nodes} ready')
|
|
2137
2198
|
raise RuntimeError(
|
|
2138
2199
|
f'Refreshing status ({cluster_name!r}): ray status not showing '
|
|
2139
2200
|
f'all nodes ({ready_head + ready_workers}/'
|
|
2140
2201
|
f'{total_nodes});\noutput:\n{output}\nstderr:\n{stderr}')
|
|
2141
2202
|
|
|
2142
2203
|
except exceptions.FetchClusterInfoError:
|
|
2204
|
+
ray_status_details = 'failed to get IPs'
|
|
2143
2205
|
logger.debug(
|
|
2144
2206
|
f'Refreshing status ({cluster_name!r}) failed to get IPs.')
|
|
2145
2207
|
except RuntimeError as e:
|
|
2208
|
+
if ray_status_details is None:
|
|
2209
|
+
ray_status_details = str(e)
|
|
2146
2210
|
logger.debug(common_utils.format_exception(e))
|
|
2147
2211
|
except Exception as e: # pylint: disable=broad-except
|
|
2148
2212
|
# This can be raised by `external_ssh_ports()`, due to the
|
|
2149
2213
|
# underlying call to kubernetes API.
|
|
2214
|
+
ray_status_details = str(e)
|
|
2150
2215
|
logger.debug(f'Refreshing status ({cluster_name!r}) failed: ',
|
|
2151
2216
|
exc_info=e)
|
|
2152
2217
|
return False
|
|
@@ -2259,6 +2324,10 @@ def _update_cluster_status(cluster_name: str) -> Optional[Dict[str, Any]]:
|
|
|
2259
2324
|
# (2) Otherwise, we will reset the autostop setting, unless the cluster is
|
|
2260
2325
|
# autostopping/autodowning.
|
|
2261
2326
|
some_nodes_terminated = 0 < len(node_statuses) < handle.launched_nodes
|
|
2327
|
+
# If all nodes are up and ray cluster is health, we would have returned
|
|
2328
|
+
# earlier. So if all_nodes_up is True and we are here, it means the ray
|
|
2329
|
+
# cluster must have been unhealthy.
|
|
2330
|
+
ray_cluster_unhealthy = all_nodes_up
|
|
2262
2331
|
some_nodes_not_stopped = any(status[0] != status_lib.ClusterStatus.STOPPED
|
|
2263
2332
|
for status in node_statuses)
|
|
2264
2333
|
is_abnormal = (some_nodes_terminated or some_nodes_not_stopped)
|
|
@@ -2269,8 +2338,10 @@ def _update_cluster_status(cluster_name: str) -> Optional[Dict[str, Any]]:
|
|
|
2269
2338
|
|
|
2270
2339
|
if some_nodes_terminated:
|
|
2271
2340
|
init_reason = 'one or more nodes terminated'
|
|
2341
|
+
elif ray_cluster_unhealthy:
|
|
2342
|
+
init_reason = f'ray cluster is unhealthy ({ray_status_details})'
|
|
2272
2343
|
elif some_nodes_not_stopped:
|
|
2273
|
-
init_reason = 'some
|
|
2344
|
+
init_reason = 'some but not all nodes are stopped'
|
|
2274
2345
|
logger.debug('The cluster is abnormal. Setting to INIT status. '
|
|
2275
2346
|
f'node_statuses: {node_statuses}')
|
|
2276
2347
|
if record['autostop'] >= 0:
|
|
@@ -2365,7 +2436,8 @@ def _update_cluster_status(cluster_name: str) -> Optional[Dict[str, Any]]:
|
|
|
2365
2436
|
# Some status reason clears after a certain time (e.g. k8s events
|
|
2366
2437
|
# are only stored for an hour by default), so it is possible that
|
|
2367
2438
|
# the previous event has a status reason, but now it does not.
|
|
2368
|
-
init_reason_regex = f'^Cluster is abnormal because
|
|
2439
|
+
init_reason_regex = (f'^Cluster is abnormal because '
|
|
2440
|
+
f'{re.escape(init_reason)}.*')
|
|
2369
2441
|
log_message = f'Cluster is abnormal because {init_reason}'
|
|
2370
2442
|
if status_reason:
|
|
2371
2443
|
log_message += f' ({status_reason})'
|
|
@@ -2385,10 +2457,17 @@ def _update_cluster_status(cluster_name: str) -> Optional[Dict[str, Any]]:
|
|
|
2385
2457
|
return global_user_state.get_cluster_from_name(cluster_name)
|
|
2386
2458
|
# Now is_abnormal is False: either node_statuses is empty or all nodes are
|
|
2387
2459
|
# STOPPED.
|
|
2460
|
+
verb = 'terminated' if to_terminate else 'stopped'
|
|
2388
2461
|
backend = backends.CloudVmRayBackend()
|
|
2389
2462
|
global_user_state.add_cluster_event(
|
|
2390
|
-
cluster_name,
|
|
2391
|
-
|
|
2463
|
+
cluster_name,
|
|
2464
|
+
None,
|
|
2465
|
+
f'All nodes {verb}, cleaning up the cluster.',
|
|
2466
|
+
global_user_state.ClusterEventType.STATUS_CHANGE,
|
|
2467
|
+
# This won't do anything for a terminated cluster, but it's needed for a
|
|
2468
|
+
# stopped cluster.
|
|
2469
|
+
nop_if_duplicate=True,
|
|
2470
|
+
)
|
|
2392
2471
|
backend.post_teardown_cleanup(handle, terminate=to_terminate, purge=False)
|
|
2393
2472
|
return global_user_state.get_cluster_from_name(cluster_name)
|
|
2394
2473
|
|
|
@@ -2916,44 +2995,57 @@ def get_clusters(
|
|
|
2916
2995
|
logger.info(f'Cluster(s) not found: {bright}{clusters_str}{reset}.')
|
|
2917
2996
|
records = new_records
|
|
2918
2997
|
|
|
2919
|
-
def
|
|
2920
|
-
|
|
2998
|
+
def _update_records_with_credentials_and_resources_str(
|
|
2999
|
+
records: List[Optional[Dict[str, Any]]]) -> None:
|
|
2921
3000
|
"""Add the credentials to the record.
|
|
2922
3001
|
|
|
2923
3002
|
This is useful for the client side to setup the ssh config of the
|
|
2924
3003
|
cluster.
|
|
2925
3004
|
"""
|
|
2926
|
-
|
|
2927
|
-
|
|
2928
|
-
|
|
2929
|
-
|
|
2930
|
-
|
|
2931
|
-
|
|
2932
|
-
handle
|
|
2933
|
-
|
|
2934
|
-
|
|
2935
|
-
|
|
2936
|
-
|
|
2937
|
-
|
|
2938
|
-
|
|
2939
|
-
|
|
2940
|
-
|
|
3005
|
+
records_with_handle = []
|
|
3006
|
+
|
|
3007
|
+
# only act on records that have a handle
|
|
3008
|
+
for record in records:
|
|
3009
|
+
if record is None:
|
|
3010
|
+
continue
|
|
3011
|
+
handle = record['handle']
|
|
3012
|
+
if handle is None:
|
|
3013
|
+
continue
|
|
3014
|
+
record[
|
|
3015
|
+
'resources_str'] = resources_utils.get_readable_resources_repr(
|
|
3016
|
+
handle, simplify=True)
|
|
3017
|
+
record[
|
|
3018
|
+
'resources_str_full'] = resources_utils.get_readable_resources_repr(
|
|
3019
|
+
handle, simplify=False)
|
|
3020
|
+
records_with_handle.append(record)
|
|
3021
|
+
if len(records_with_handle) == 0:
|
|
2941
3022
|
return
|
|
2942
|
-
|
|
2943
|
-
|
|
2944
|
-
|
|
2945
|
-
|
|
2946
|
-
|
|
2947
|
-
|
|
2948
|
-
|
|
2949
|
-
|
|
2950
|
-
|
|
2951
|
-
|
|
2952
|
-
|
|
2953
|
-
|
|
2954
|
-
|
|
2955
|
-
|
|
2956
|
-
|
|
3023
|
+
|
|
3024
|
+
handles = [record['handle'] for record in records_with_handle]
|
|
3025
|
+
credentials = ssh_credentials_from_handles(handles)
|
|
3026
|
+
cached_private_keys: Dict[str, str] = {}
|
|
3027
|
+
for record, credential in zip(records_with_handle, credentials):
|
|
3028
|
+
if not credential:
|
|
3029
|
+
continue
|
|
3030
|
+
ssh_private_key_path = credential.get('ssh_private_key', None)
|
|
3031
|
+
if ssh_private_key_path is not None:
|
|
3032
|
+
expanded_private_key_path = os.path.expanduser(
|
|
3033
|
+
ssh_private_key_path)
|
|
3034
|
+
if not os.path.exists(expanded_private_key_path):
|
|
3035
|
+
auth.create_ssh_key_files_from_db(ssh_private_key_path)
|
|
3036
|
+
else:
|
|
3037
|
+
private_key_path, _ = auth.get_or_generate_keys()
|
|
3038
|
+
expanded_private_key_path = os.path.expanduser(private_key_path)
|
|
3039
|
+
if expanded_private_key_path in cached_private_keys:
|
|
3040
|
+
credential['ssh_private_key_content'] = cached_private_keys[
|
|
3041
|
+
expanded_private_key_path]
|
|
3042
|
+
else:
|
|
3043
|
+
with open(expanded_private_key_path, 'r',
|
|
3044
|
+
encoding='utf-8') as f:
|
|
3045
|
+
credential['ssh_private_key_content'] = f.read()
|
|
3046
|
+
cached_private_keys[expanded_private_key_path] = credential[
|
|
3047
|
+
'ssh_private_key_content']
|
|
3048
|
+
record['credentials'] = credential
|
|
2957
3049
|
|
|
2958
3050
|
def _update_records_with_resources(
|
|
2959
3051
|
records: List[Optional[Dict[str, Any]]]) -> None:
|
|
@@ -2980,9 +3072,7 @@ def get_clusters(
|
|
|
2980
3072
|
if handle.launched_resources.accelerators else None)
|
|
2981
3073
|
|
|
2982
3074
|
# Add auth_config to the records
|
|
2983
|
-
|
|
2984
|
-
_update_record_with_credentials_and_resources_str(record)
|
|
2985
|
-
|
|
3075
|
+
_update_records_with_credentials_and_resources_str(records)
|
|
2986
3076
|
if refresh == common.StatusRefreshMode.NONE:
|
|
2987
3077
|
# Add resources to the records
|
|
2988
3078
|
_update_records_with_resources(records)
|
|
@@ -3022,7 +3112,7 @@ def get_clusters(
|
|
|
3022
3112
|
cluster_name,
|
|
3023
3113
|
force_refresh_statuses=force_refresh_statuses,
|
|
3024
3114
|
acquire_per_cluster_status_lock=True)
|
|
3025
|
-
|
|
3115
|
+
_update_records_with_credentials_and_resources_str([record])
|
|
3026
3116
|
except (exceptions.ClusterStatusFetchingError,
|
|
3027
3117
|
exceptions.CloudUserIdentityError,
|
|
3028
3118
|
exceptions.ClusterOwnerIdentityMismatchError) as e:
|
|
@@ -65,6 +65,7 @@ from sky.utils import context_utils
|
|
|
65
65
|
from sky.utils import controller_utils
|
|
66
66
|
from sky.utils import directory_utils
|
|
67
67
|
from sky.utils import env_options
|
|
68
|
+
from sky.utils import lock_events
|
|
68
69
|
from sky.utils import locks
|
|
69
70
|
from sky.utils import log_utils
|
|
70
71
|
from sky.utils import message_utils
|
|
@@ -2498,7 +2499,12 @@ class CloudVmRayResourceHandle(backends.backend.ResourceHandle):
|
|
|
2498
2499
|
self.stable_internal_external_ips = stable_internal_external_ips
|
|
2499
2500
|
|
|
2500
2501
|
@context_utils.cancellation_guard
|
|
2501
|
-
|
|
2502
|
+
# we expect different request to be acting on different clusters
|
|
2503
|
+
# (= different handles) so we have no real expectation of cache hit
|
|
2504
|
+
# across requests.
|
|
2505
|
+
# Do not change this cache to global scope
|
|
2506
|
+
# without understanding https://github.com/skypilot-org/skypilot/pull/6908
|
|
2507
|
+
@annotations.lru_cache(scope='request', maxsize=10)
|
|
2502
2508
|
@timeline.event
|
|
2503
2509
|
def get_command_runners(self,
|
|
2504
2510
|
force_cached: bool = False,
|
|
@@ -2854,7 +2860,12 @@ class LocalResourcesHandle(CloudVmRayResourceHandle):
|
|
|
2854
2860
|
self.is_grpc_enabled = False
|
|
2855
2861
|
|
|
2856
2862
|
@context_utils.cancellation_guard
|
|
2857
|
-
|
|
2863
|
+
# we expect different request to be acting on different clusters
|
|
2864
|
+
# (= different handles) so we have no real expectation of cache hit
|
|
2865
|
+
# across requests.
|
|
2866
|
+
# Do not change this cache to global scope
|
|
2867
|
+
# without understanding https://github.com/skypilot-org/skypilot/pull/6908
|
|
2868
|
+
@annotations.lru_cache(scope='request', maxsize=10)
|
|
2858
2869
|
@timeline.event
|
|
2859
2870
|
def get_command_runners(self,
|
|
2860
2871
|
force_cached: bool = False,
|
|
@@ -3112,7 +3123,12 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
3112
3123
|
retry_until_up: bool = False,
|
|
3113
3124
|
skip_unnecessary_provisioning: bool = False,
|
|
3114
3125
|
) -> Tuple[Optional[CloudVmRayResourceHandle], bool]:
|
|
3115
|
-
with
|
|
3126
|
+
with lock_events.DistributedLockEvent(lock_id, _CLUSTER_LOCK_TIMEOUT):
|
|
3127
|
+
# Reset spinner message to remove any mention of being blocked
|
|
3128
|
+
# by other requests.
|
|
3129
|
+
rich_utils.force_update_status(
|
|
3130
|
+
ux_utils.spinner_message('Launching'))
|
|
3131
|
+
|
|
3116
3132
|
# Try to launch the exiting cluster first. If no existing
|
|
3117
3133
|
# cluster, this function will create a to_provision_config
|
|
3118
3134
|
# with required resources.
|
sky/backends/wheel_utils.py
CHANGED
|
@@ -16,6 +16,7 @@ import pathlib
|
|
|
16
16
|
import re
|
|
17
17
|
import shutil
|
|
18
18
|
import subprocess
|
|
19
|
+
import sys
|
|
19
20
|
import tempfile
|
|
20
21
|
from typing import Optional, Tuple
|
|
21
22
|
|
|
@@ -133,19 +134,45 @@ def _build_sky_wheel() -> pathlib.Path:
|
|
|
133
134
|
# It is important to normalize the path, otherwise 'pip wheel' would
|
|
134
135
|
# treat the directory as a file and generate an empty wheel.
|
|
135
136
|
norm_path = str(tmp_dir) + os.sep
|
|
137
|
+
# TODO(#5046): Consider adding native UV support for building wheels.
|
|
138
|
+
# Use `python -m pip` instead of `pip3` for better compatibility across
|
|
139
|
+
# different environments (conda, venv, UV, system Python, etc.)
|
|
136
140
|
try:
|
|
137
|
-
# TODO(suquark): For python>=3.7, 'subprocess.run' supports capture
|
|
138
|
-
# of the output.
|
|
139
141
|
subprocess.run([
|
|
140
|
-
'
|
|
142
|
+
sys.executable, '-m', 'pip', 'wheel', '--no-deps', norm_path,
|
|
143
|
+
'--wheel-dir',
|
|
141
144
|
str(tmp_dir)
|
|
142
145
|
],
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
+
capture_output=True,
|
|
147
|
+
check=True,
|
|
148
|
+
text=True)
|
|
146
149
|
except subprocess.CalledProcessError as e:
|
|
147
|
-
|
|
148
|
-
|
|
150
|
+
error_msg = e.stderr
|
|
151
|
+
if 'No module named pip' in error_msg:
|
|
152
|
+
# pip module not found - provide helpful suggestions based on
|
|
153
|
+
# the available package managers
|
|
154
|
+
if shutil.which('uv'):
|
|
155
|
+
msg = ('pip module not found. Since you have UV installed, '
|
|
156
|
+
'you can install pip by running:\n'
|
|
157
|
+
' uv pip install pip')
|
|
158
|
+
elif shutil.which('conda'):
|
|
159
|
+
msg = (
|
|
160
|
+
'pip module not found. Since you have conda installed, '
|
|
161
|
+
'you can install pip by running:\n'
|
|
162
|
+
' conda install pip')
|
|
163
|
+
else:
|
|
164
|
+
msg = ('pip module not found. Please install pip for your '
|
|
165
|
+
f'Python environment ({sys.executable}).')
|
|
166
|
+
else:
|
|
167
|
+
# Other pip errors
|
|
168
|
+
msg = f'pip wheel command failed. Error: {error_msg}'
|
|
169
|
+
raise RuntimeError('Failed to build pip wheel for SkyPilot.\n' +
|
|
170
|
+
msg) from e
|
|
171
|
+
except FileNotFoundError as e:
|
|
172
|
+
# Python executable not found (extremely rare)
|
|
173
|
+
raise RuntimeError(
|
|
174
|
+
f'Failed to build pip wheel for SkyPilot. '
|
|
175
|
+
f'Python executable not found: {sys.executable}') from e
|
|
149
176
|
|
|
150
177
|
try:
|
|
151
178
|
wheel_path = next(tmp_dir.glob(_WHEEL_PATTERN))
|
sky/clouds/aws.py
CHANGED
|
@@ -77,6 +77,103 @@ DEFAULT_SECURITY_GROUP_NAME = f'sky-sg-{common_utils.user_and_hostname_hash()}'
|
|
|
77
77
|
# Security group to use when user specified ports in their resources.
|
|
78
78
|
USER_PORTS_SECURITY_GROUP_NAME = 'sky-sg-{}'
|
|
79
79
|
|
|
80
|
+
# GPU instance types that support EFA
|
|
81
|
+
# TODO(hailong): Some CPU instance types also support EFA, may need to support
|
|
82
|
+
# all of them later.
|
|
83
|
+
# TODO(hailong): Add the EFA info in catalog.
|
|
84
|
+
_EFA_INSTANCE_TYPE_PREFIXES = [
|
|
85
|
+
'g4dn.',
|
|
86
|
+
'g5.',
|
|
87
|
+
'g6.',
|
|
88
|
+
'gr6.',
|
|
89
|
+
'g6e.',
|
|
90
|
+
'p4d.',
|
|
91
|
+
'p4de.',
|
|
92
|
+
'p5.',
|
|
93
|
+
'p5e.',
|
|
94
|
+
'p5en.',
|
|
95
|
+
'p6-b200.',
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
# Docker run options for EFA.
|
|
99
|
+
# Refer to https://github.com/ofiwg/libfabric/issues/6437 for updating
|
|
100
|
+
# memlock ulimit
|
|
101
|
+
_EFA_DOCKER_RUN_OPTIONS = [
|
|
102
|
+
'--cap-add=IPC_LOCK',
|
|
103
|
+
'--device=/dev/infiniband',
|
|
104
|
+
'--ulimit memlock=-1:-1',
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
# AWS EFA image name.
|
|
108
|
+
# Refer to https://docs.aws.amazon.com/dlami/latest/devguide/aws-deep-learning-base-gpu-ami-ubuntu-22-04.html for latest version. # pylint: disable=line-too-long
|
|
109
|
+
# TODO(hailong): may need to update the version later.
|
|
110
|
+
_EFA_IMAGE_NAME = 'Deep Learning Base OSS Nvidia Driver GPU AMI' \
|
|
111
|
+
' (Ubuntu 22.04) 20250808'
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _is_efa_instance_type(instance_type: str) -> bool:
|
|
115
|
+
"""Check if the instance type is in EFA supported instance family."""
|
|
116
|
+
return any(
|
|
117
|
+
instance_type.startswith(prefix)
|
|
118
|
+
for prefix in _EFA_INSTANCE_TYPE_PREFIXES)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@annotations.lru_cache(scope='global', maxsize=128)
|
|
122
|
+
def _get_efa_image_id(region_name: str) -> Optional[str]:
|
|
123
|
+
"""Get the EFA image id for the given region."""
|
|
124
|
+
try:
|
|
125
|
+
client = aws.client('ec2', region_name=region_name)
|
|
126
|
+
response = client.describe_images(Filters=[{
|
|
127
|
+
'Name': 'name',
|
|
128
|
+
'Values': [_EFA_IMAGE_NAME]
|
|
129
|
+
}])
|
|
130
|
+
if 'Images' not in response:
|
|
131
|
+
return None
|
|
132
|
+
if len(response['Images']) == 0:
|
|
133
|
+
return None
|
|
134
|
+
available_images = [
|
|
135
|
+
img for img in response['Images'] if img['State'] == 'available'
|
|
136
|
+
]
|
|
137
|
+
if len(available_images) == 0:
|
|
138
|
+
return None
|
|
139
|
+
sorted_images = sorted(available_images,
|
|
140
|
+
key=lambda x: x['CreationDate'],
|
|
141
|
+
reverse=True)
|
|
142
|
+
return sorted_images[0]['ImageId']
|
|
143
|
+
except (aws.botocore_exceptions().NoCredentialsError,
|
|
144
|
+
aws.botocore_exceptions().ProfileNotFound,
|
|
145
|
+
aws.botocore_exceptions().ClientError) as e:
|
|
146
|
+
with ux_utils.print_exception_no_traceback():
|
|
147
|
+
raise ValueError(f'Failed to get EFA image id: {e}') from None
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
@annotations.lru_cache(scope='global', maxsize=128)
|
|
151
|
+
def _get_max_efa_interfaces(instance_type: str, region_name: str) -> int:
|
|
152
|
+
"""Get the maximum number of EFA interfaces for the given instance type."""
|
|
153
|
+
if not _is_efa_instance_type(instance_type):
|
|
154
|
+
return 0
|
|
155
|
+
try:
|
|
156
|
+
client = aws.client('ec2', region_name=region_name)
|
|
157
|
+
response = client.describe_instance_types(
|
|
158
|
+
InstanceTypes=[instance_type],
|
|
159
|
+
Filters=[{
|
|
160
|
+
'Name': 'network-info.efa-supported',
|
|
161
|
+
'Values': ['true']
|
|
162
|
+
}])
|
|
163
|
+
if 'InstanceTypes' in response and len(response['InstanceTypes']) > 0:
|
|
164
|
+
network_info = response['InstanceTypes'][0]['NetworkInfo']
|
|
165
|
+
if ('EfaInfo' in network_info and
|
|
166
|
+
'MaximumEfaInterfaces' in network_info['EfaInfo']):
|
|
167
|
+
return network_info['EfaInfo']['MaximumEfaInterfaces']
|
|
168
|
+
return 0
|
|
169
|
+
except (aws.botocore_exceptions().NoCredentialsError,
|
|
170
|
+
aws.botocore_exceptions().ProfileNotFound,
|
|
171
|
+
aws.botocore_exceptions().ClientError) as e:
|
|
172
|
+
with ux_utils.print_exception_no_traceback():
|
|
173
|
+
raise ValueError(
|
|
174
|
+
f'Failed to get max EFA interfaces for {instance_type}: {e}'
|
|
175
|
+
) from None
|
|
176
|
+
|
|
80
177
|
|
|
81
178
|
class AWSIdentityType(enum.Enum):
|
|
82
179
|
"""AWS identity type.
|
|
@@ -295,8 +392,13 @@ class AWS(clouds.Cloud):
|
|
|
295
392
|
image_id: Optional[Dict[Optional[str], str]],
|
|
296
393
|
region_name: str,
|
|
297
394
|
instance_type: str,
|
|
395
|
+
enable_efa: bool,
|
|
298
396
|
) -> str:
|
|
299
397
|
if image_id is None:
|
|
398
|
+
if enable_efa:
|
|
399
|
+
efa_image_id = _get_efa_image_id(region_name)
|
|
400
|
+
if efa_image_id:
|
|
401
|
+
return efa_image_id
|
|
300
402
|
return cls._get_default_ami(region_name, instance_type)
|
|
301
403
|
if None in image_id:
|
|
302
404
|
image_id_str = image_id[None]
|
|
@@ -499,12 +601,25 @@ class AWS(clouds.Cloud):
|
|
|
499
601
|
custom_resources = resources_utils.make_ray_custom_resources_str(
|
|
500
602
|
acc_dict)
|
|
501
603
|
|
|
604
|
+
network_tier = (resources.network_tier if resources.network_tier
|
|
605
|
+
is not None else resources_utils.NetworkTier.STANDARD)
|
|
606
|
+
if network_tier == resources_utils.NetworkTier.BEST:
|
|
607
|
+
max_efa_interfaces = _get_max_efa_interfaces(
|
|
608
|
+
resources.instance_type, region_name)
|
|
609
|
+
enable_efa = max_efa_interfaces > 0
|
|
610
|
+
else:
|
|
611
|
+
max_efa_interfaces = 0
|
|
612
|
+
enable_efa = False
|
|
613
|
+
|
|
614
|
+
docker_run_options = []
|
|
502
615
|
if resources.extract_docker_image() is not None:
|
|
503
616
|
image_id_to_use = None
|
|
617
|
+
if enable_efa:
|
|
618
|
+
docker_run_options = _EFA_DOCKER_RUN_OPTIONS
|
|
504
619
|
else:
|
|
505
620
|
image_id_to_use = resources.image_id
|
|
506
621
|
image_id = self._get_image_id(image_id_to_use, region_name,
|
|
507
|
-
resources.instance_type)
|
|
622
|
+
resources.instance_type, enable_efa)
|
|
508
623
|
|
|
509
624
|
root_device_name = self.get_image_root_device_name(
|
|
510
625
|
image_id, region_name)
|
|
@@ -563,6 +678,8 @@ class AWS(clouds.Cloud):
|
|
|
563
678
|
'security_group': security_group,
|
|
564
679
|
'security_group_managed_by_skypilot':
|
|
565
680
|
str(security_group != user_security_group).lower(),
|
|
681
|
+
'max_efa_interfaces': max_efa_interfaces,
|
|
682
|
+
'docker_run_options': docker_run_options,
|
|
566
683
|
**AWS._get_disk_specs(resources.disk_tier)
|
|
567
684
|
}
|
|
568
685
|
|
sky/dashboard/out/404.html
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-
|
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-6e76f636a048e145.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_error-c66a4e8afc46f17b.js" defer=""></script><script src="/dashboard/_next/static/hYJYFIxp_ZFONR4wTIJqZ/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/hYJYFIxp_ZFONR4wTIJqZ/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"statusCode":404}},"page":"/_error","query":{},"buildId":"hYJYFIxp_ZFONR4wTIJqZ","assetPrefix":"/dashboard","nextExport":true,"isFallback":false,"gip":true,"scriptLoader":[]}</script></body></html>
|
sky/dashboard/out/_next/static/chunks/{webpack-6dae1cd599a34def.js → webpack-6e76f636a048e145.js}
RENAMED
|
@@ -1 +1 @@
|
|
|
1
|
-
!function(){"use strict";var t,e,n,r,c,o,u,a,i,f={},s={};function d(t){var e=s[t];if(void 0!==e)return e.exports;var n=s[t]={exports:{}},r=!0;try{f[t](n,n.exports,d),r=!1}finally{r&&delete s[t]}return n.exports}d.m=f,t=[],d.O=function(e,n,r,c){if(n){c=c||0;for(var o=t.length;o>0&&t[o-1][2]>c;o--)t[o]=t[o-1];t[o]=[n,r,c];return}for(var u=1/0,o=0;o<t.length;o++){for(var n=t[o][0],r=t[o][1],c=t[o][2],a=!0,i=0;i<n.length;i++)u>=c&&Object.keys(d.O).every(function(t){return d.O[t](n[i])})?n.splice(i--,1):(a=!1,c<u&&(u=c));if(a){t.splice(o--,1);var f=r();void 0!==f&&(e=f)}}return e},d.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return d.d(e,{a:e}),e},n=Object.getPrototypeOf?function(t){return Object.getPrototypeOf(t)}:function(t){return t.__proto__},d.t=function(t,r){if(1&r&&(t=this(t)),8&r||"object"==typeof t&&t&&(4&r&&t.__esModule||16&r&&"function"==typeof t.then))return t;var c=Object.create(null);d.r(c);var o={};e=e||[null,n({}),n([]),n(n)];for(var u=2&r&&t;"object"==typeof u&&!~e.indexOf(u);u=n(u))Object.getOwnPropertyNames(u).forEach(function(e){o[e]=function(){return t[e]}});return o.default=function(){return t},d.d(c,o),c},d.d=function(t,e){for(var n in e)d.o(e,n)&&!d.o(t,n)&&Object.defineProperty(t,n,{enumerable:!0,get:e[n]})},d.f={},d.e=function(t){return Promise.all(Object.keys(d.f).reduce(function(e,n){return d.f[n](t,e),e},[]))},d.u=function(t){return 2350===t?"static/chunks/2350.fab69e61bac57b23.js":
|
|
1
|
+
!function(){"use strict";var t,e,n,r,c,o,u,a,i,f={},s={};function d(t){var e=s[t];if(void 0!==e)return e.exports;var n=s[t]={exports:{}},r=!0;try{f[t](n,n.exports,d),r=!1}finally{r&&delete s[t]}return n.exports}d.m=f,t=[],d.O=function(e,n,r,c){if(n){c=c||0;for(var o=t.length;o>0&&t[o-1][2]>c;o--)t[o]=t[o-1];t[o]=[n,r,c];return}for(var u=1/0,o=0;o<t.length;o++){for(var n=t[o][0],r=t[o][1],c=t[o][2],a=!0,i=0;i<n.length;i++)u>=c&&Object.keys(d.O).every(function(t){return d.O[t](n[i])})?n.splice(i--,1):(a=!1,c<u&&(u=c));if(a){t.splice(o--,1);var f=r();void 0!==f&&(e=f)}}return e},d.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return d.d(e,{a:e}),e},n=Object.getPrototypeOf?function(t){return Object.getPrototypeOf(t)}:function(t){return t.__proto__},d.t=function(t,r){if(1&r&&(t=this(t)),8&r||"object"==typeof t&&t&&(4&r&&t.__esModule||16&r&&"function"==typeof t.then))return t;var c=Object.create(null);d.r(c);var o={};e=e||[null,n({}),n([]),n(n)];for(var u=2&r&&t;"object"==typeof u&&!~e.indexOf(u);u=n(u))Object.getOwnPropertyNames(u).forEach(function(e){o[e]=function(){return t[e]}});return o.default=function(){return t},d.d(c,o),c},d.d=function(t,e){for(var n in e)d.o(e,n)&&!d.o(t,n)&&Object.defineProperty(t,n,{enumerable:!0,get:e[n]})},d.f={},d.e=function(t){return Promise.all(Object.keys(d.f).reduce(function(e,n){return d.f[n](t,e),e},[]))},d.u=function(t){return 2350===t?"static/chunks/2350.fab69e61bac57b23.js":7325===t?"static/chunks/7325.b4bc99ce0892dcd5.js":3937===t?"static/chunks/3937.210053269f121201.js":9025===t?"static/chunks/9025.a1bef12d672bb66d.js":9984===t?"static/chunks/9984.7eb6cc51fb460cae.js":9946===t?"static/chunks/9946.3b7b43c217ff70ec.js":7669===t?"static/chunks/7669.1f5d9a402bf5cc42.js":4045===t?"static/chunks/4045.b30465273dc5e468.js":4725===t?"static/chunks/4725.10f7a9a5d3ea8208.js":3785===t?"static/chunks/3785.d5b86f6ebc88e6e6.js":4783===t?"static/chunks/4783.c485f48348349f47.js":"static/chunks/"+t+"-"+({616:"3d59f75e2ccf9321",1121:"8afcf719ea87debc",1141:"943efc7aff0f0c06",1272:"1ef0bf0237faccdb",3015:"6c9c09593b1e67b6",3850:"ff4a9a69d978632b",4676:"9da7fdbde90b5549",5739:"d67458fcb1386c92",6130:"2be46d70a38f1e82",6135:"4b4d5e824b7f9d3c",6601:"06114c982db410b6",6856:"049014c6d43d127b",6989:"01359c57e018caa4",6990:"08b2a1cae076a943",7205:"88191679e7988c57",7411:"b15471acd2cba716",8969:"4a6f1a928fb6d370",9037:"89a84fd7fa31362d"})[t]+".js"},d.miniCssF=function(t){},d.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||Function("return this")()}catch(t){if("object"==typeof window)return window}}(),d.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},r={},c="_N_E:",d.l=function(t,e,n,o){if(r[t]){r[t].push(e);return}if(void 0!==n)for(var u,a,i=document.getElementsByTagName("script"),f=0;f<i.length;f++){var s=i[f];if(s.getAttribute("src")==t||s.getAttribute("data-webpack")==c+n){u=s;break}}u||(a=!0,(u=document.createElement("script")).charset="utf-8",u.timeout=120,d.nc&&u.setAttribute("nonce",d.nc),u.setAttribute("data-webpack",c+n),u.src=d.tu(t)),r[t]=[e];var b=function(e,n){u.onerror=u.onload=null,clearTimeout(l);var c=r[t];if(delete r[t],u.parentNode&&u.parentNode.removeChild(u),c&&c.forEach(function(t){return t(n)}),e)return e(n)},l=setTimeout(b.bind(null,void 0,{type:"timeout",target:u}),12e4);u.onerror=b.bind(null,u.onerror),u.onload=b.bind(null,u.onload),a&&document.head.appendChild(u)},d.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},d.tt=function(){return void 0===o&&(o={createScriptURL:function(t){return t}},"undefined"!=typeof trustedTypes&&trustedTypes.createPolicy&&(o=trustedTypes.createPolicy("nextjs#bundler",o))),o},d.tu=function(t){return d.tt().createScriptURL(t)},d.p="/dashboard/_next/",u={2272:0},d.f.j=function(t,e){var n=d.o(u,t)?u[t]:void 0;if(0!==n){if(n)e.push(n[2]);else if(2272!=t){var r=new Promise(function(e,r){n=u[t]=[e,r]});e.push(n[2]=r);var c=d.p+d.u(t),o=Error();d.l(c,function(e){if(d.o(u,t)&&(0!==(n=u[t])&&(u[t]=void 0),n)){var r=e&&("load"===e.type?"missing":e.type),c=e&&e.target&&e.target.src;o.message="Loading chunk "+t+" failed.\n("+r+": "+c+")",o.name="ChunkLoadError",o.type=r,o.request=c,n[1](o)}},"chunk-"+t,t)}else u[t]=0}},d.O.j=function(t){return 0===u[t]},a=function(t,e){var n,r,c=e[0],o=e[1],a=e[2],i=0;if(c.some(function(t){return 0!==u[t]})){for(n in o)d.o(o,n)&&(d.m[n]=o[n]);if(a)var f=a(d)}for(t&&t(e);i<c.length;i++)r=c[i],d.o(u,r)&&u[r]&&u[r][0](),u[r]=0;return d.O(f)},(i=self.webpackChunk_N_E=self.webpackChunk_N_E||[]).forEach(a.bind(null,0)),i.push=a.bind(null,i.push.bind(i)),d.nc=void 0}();
|
|
@@ -1 +1 @@
|
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-
|
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-6e76f636a048e145.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-3d59f75e2ccf9321.js" defer=""></script><script src="/dashboard/_next/static/chunks/6130-2be46d70a38f1e82.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-d67458fcb1386c92.js" defer=""></script><script src="/dashboard/_next/static/chunks/7411-b15471acd2cba716.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-01359c57e018caa4.js" defer=""></script><script src="/dashboard/_next/static/chunks/3850-ff4a9a69d978632b.js" defer=""></script><script src="/dashboard/_next/static/chunks/6135-4b4d5e824b7f9d3c.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D/%5Bjob%5D-06afb50d25f7c61f.js" defer=""></script><script src="/dashboard/_next/static/hYJYFIxp_ZFONR4wTIJqZ/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/hYJYFIxp_ZFONR4wTIJqZ/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{}},"page":"/clusters/[cluster]/[job]","query":{},"buildId":"hYJYFIxp_ZFONR4wTIJqZ","assetPrefix":"/dashboard","nextExport":true,"autoExport":true,"isFallback":false,"scriptLoader":[]}</script></body></html>
|
|
@@ -1 +1 @@
|
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-
|
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-6e76f636a048e145.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/616-3d59f75e2ccf9321.js" defer=""></script><script src="/dashboard/_next/static/chunks/6130-2be46d70a38f1e82.js" defer=""></script><script src="/dashboard/_next/static/chunks/5739-d67458fcb1386c92.js" defer=""></script><script src="/dashboard/_next/static/chunks/7411-b15471acd2cba716.js" defer=""></script><script src="/dashboard/_next/static/chunks/1272-1ef0bf0237faccdb.js" defer=""></script><script src="/dashboard/_next/static/chunks/4676-9da7fdbde90b5549.js" defer=""></script><script src="/dashboard/_next/static/chunks/754-d0da8ab45f9509e9.js" defer=""></script><script src="/dashboard/_next/static/chunks/6989-01359c57e018caa4.js" defer=""></script><script src="/dashboard/_next/static/chunks/3850-ff4a9a69d978632b.js" defer=""></script><script src="/dashboard/_next/static/chunks/8969-4a6f1a928fb6d370.js" defer=""></script><script src="/dashboard/_next/static/chunks/6990-08b2a1cae076a943.js" defer=""></script><script src="/dashboard/_next/static/chunks/6135-4b4d5e824b7f9d3c.js" defer=""></script><script src="/dashboard/_next/static/chunks/1121-8afcf719ea87debc.js" defer=""></script><script src="/dashboard/_next/static/chunks/6856-049014c6d43d127b.js" defer=""></script><script src="/dashboard/_next/static/chunks/6601-06114c982db410b6.js" defer=""></script><script src="/dashboard/_next/static/chunks/3015-6c9c09593b1e67b6.js" defer=""></script><script src="/dashboard/_next/static/chunks/9037-89a84fd7fa31362d.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters/%5Bcluster%5D-a0527109c2fab467.js" defer=""></script><script src="/dashboard/_next/static/hYJYFIxp_ZFONR4wTIJqZ/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/hYJYFIxp_ZFONR4wTIJqZ/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{}},"page":"/clusters/[cluster]","query":{},"buildId":"hYJYFIxp_ZFONR4wTIJqZ","assetPrefix":"/dashboard","nextExport":true,"autoExport":true,"isFallback":false,"scriptLoader":[]}</script></body></html>
|
sky/dashboard/out/clusters.html
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-
|
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-6e76f636a048e145.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/clusters-469814d711d63b1b.js" defer=""></script><script src="/dashboard/_next/static/hYJYFIxp_ZFONR4wTIJqZ/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/hYJYFIxp_ZFONR4wTIJqZ/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{}},"page":"/clusters","query":{},"buildId":"hYJYFIxp_ZFONR4wTIJqZ","assetPrefix":"/dashboard","nextExport":true,"autoExport":true,"isFallback":false,"scriptLoader":[]}</script></body></html>
|