skypilot-nightly 1.0.0.dev20250827__py3-none-any.whl → 1.0.0.dev20250828__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of skypilot-nightly might be problematic. Click here for more details.
- sky/__init__.py +2 -2
- sky/admin_policy.py +11 -10
- sky/authentication.py +1 -1
- sky/backends/backend.py +3 -5
- sky/backends/backend_utils.py +11 -13
- sky/backends/cloud_vm_ray_backend.py +11 -22
- sky/backends/local_docker_backend.py +3 -8
- sky/client/cli/command.py +41 -9
- sky/client/sdk.py +23 -8
- sky/client/sdk_async.py +6 -2
- sky/core.py +1 -4
- sky/dashboard/out/404.html +1 -1
- sky/dashboard/out/_next/static/chunks/{webpack-6e76f636a048e145.js → webpack-6dae1cd599a34def.js} +1 -1
- sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
- sky/dashboard/out/clusters/[cluster].html +1 -1
- sky/dashboard/out/clusters.html +1 -1
- sky/dashboard/out/config.html +1 -1
- sky/dashboard/out/index.html +1 -1
- sky/dashboard/out/infra/[context].html +1 -1
- sky/dashboard/out/infra.html +1 -1
- sky/dashboard/out/jobs/[job].html +1 -1
- sky/dashboard/out/jobs/pools/[pool].html +1 -1
- sky/dashboard/out/jobs.html +1 -1
- sky/dashboard/out/users.html +1 -1
- sky/dashboard/out/volumes.html +1 -1
- sky/dashboard/out/workspace/new.html +1 -1
- sky/dashboard/out/workspaces/[name].html +1 -1
- sky/dashboard/out/workspaces.html +1 -1
- sky/global_user_state.py +24 -12
- sky/jobs/client/sdk.py +5 -2
- sky/jobs/recovery_strategy.py +9 -4
- sky/logs/agent.py +2 -2
- sky/logs/aws.py +6 -3
- sky/provision/do/utils.py +2 -1
- sky/provision/kubernetes/instance.py +55 -11
- sky/provision/kubernetes/utils.py +2 -2
- sky/provision/nebius/utils.py +36 -2
- sky/serve/client/impl.py +5 -4
- sky/serve/replica_managers.py +4 -3
- sky/serve/serve_utils.py +2 -2
- sky/serve/server/impl.py +3 -2
- sky/server/auth/oauth2_proxy.py +10 -4
- sky/server/common.py +3 -2
- sky/server/daemons.py +10 -5
- sky/server/requests/executor.py +2 -1
- sky/server/requests/requests.py +21 -0
- sky/server/server.py +16 -0
- sky/skylet/events.py +2 -3
- sky/skypilot_config.py +10 -10
- sky/task.py +1 -1
- sky/templates/nebius-ray.yml.j2 +4 -8
- sky/usage/usage_lib.py +3 -2
- sky/utils/common_utils.py +0 -72
- sky/utils/controller_utils.py +4 -3
- sky/utils/dag_utils.py +4 -4
- sky/utils/kubernetes/config_map_utils.py +3 -3
- sky/utils/schemas.py +3 -0
- sky/utils/yaml_utils.py +77 -10
- {skypilot_nightly-1.0.0.dev20250827.dist-info → skypilot_nightly-1.0.0.dev20250828.dist-info}/METADATA +1 -1
- {skypilot_nightly-1.0.0.dev20250827.dist-info → skypilot_nightly-1.0.0.dev20250828.dist-info}/RECORD +66 -66
- /sky/dashboard/out/_next/static/{-eL7Ky3bxVivzeLHNB9U6 → 9DW6d9jaP2kZt0NcgIfFa}/_buildManifest.js +0 -0
- /sky/dashboard/out/_next/static/{-eL7Ky3bxVivzeLHNB9U6 → 9DW6d9jaP2kZt0NcgIfFa}/_ssgManifest.js +0 -0
- {skypilot_nightly-1.0.0.dev20250827.dist-info → skypilot_nightly-1.0.0.dev20250828.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20250827.dist-info → skypilot_nightly-1.0.0.dev20250828.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20250827.dist-info → skypilot_nightly-1.0.0.dev20250828.dist-info}/licenses/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20250827.dist-info → skypilot_nightly-1.0.0.dev20250828.dist-info}/top_level.txt +0 -0
sky/__init__.py
CHANGED
|
@@ -7,7 +7,7 @@ import urllib.request
|
|
|
7
7
|
from sky.utils import directory_utils
|
|
8
8
|
|
|
9
9
|
# Replaced with the current commit when building the wheels.
|
|
10
|
-
_SKYPILOT_COMMIT_SHA = '
|
|
10
|
+
_SKYPILOT_COMMIT_SHA = 'ff93214498e29e0aa9a73868b73613535f96b8a3'
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def _get_git_commit():
|
|
@@ -37,7 +37,7 @@ def _get_git_commit():
|
|
|
37
37
|
|
|
38
38
|
|
|
39
39
|
__commit__ = _get_git_commit()
|
|
40
|
-
__version__ = '1.0.0.
|
|
40
|
+
__version__ = '1.0.0.dev20250828'
|
|
41
41
|
__root_dir__ = directory_utils.get_sky_dir()
|
|
42
42
|
|
|
43
43
|
|
sky/admin_policy.py
CHANGED
|
@@ -13,6 +13,7 @@ from sky.adaptors import common as adaptors_common
|
|
|
13
13
|
from sky.utils import common_utils
|
|
14
14
|
from sky.utils import config_utils
|
|
15
15
|
from sky.utils import ux_utils
|
|
16
|
+
from sky.utils import yaml_utils
|
|
16
17
|
|
|
17
18
|
if typing.TYPE_CHECKING:
|
|
18
19
|
import requests
|
|
@@ -80,9 +81,9 @@ class UserRequest:
|
|
|
80
81
|
|
|
81
82
|
def encode(self) -> str:
|
|
82
83
|
return _UserRequestBody(
|
|
83
|
-
task=
|
|
84
|
-
skypilot_config=
|
|
85
|
-
|
|
84
|
+
task=yaml_utils.dump_yaml_str(self.task.to_yaml_config()),
|
|
85
|
+
skypilot_config=yaml_utils.dump_yaml_str(dict(
|
|
86
|
+
self.skypilot_config)),
|
|
86
87
|
request_options=self.request_options,
|
|
87
88
|
at_client_side=self.at_client_side,
|
|
88
89
|
).model_dump_json()
|
|
@@ -92,9 +93,9 @@ class UserRequest:
|
|
|
92
93
|
user_request_body = _UserRequestBody.model_validate_json(body)
|
|
93
94
|
return cls(
|
|
94
95
|
task=sky.Task.from_yaml_config(
|
|
95
|
-
|
|
96
|
+
yaml_utils.read_yaml_all_str(user_request_body.task)[0]),
|
|
96
97
|
skypilot_config=config_utils.Config.from_dict(
|
|
97
|
-
|
|
98
|
+
yaml_utils.read_yaml_all_str(
|
|
98
99
|
user_request_body.skypilot_config)[0]),
|
|
99
100
|
request_options=user_request_body.request_options,
|
|
100
101
|
at_client_side=user_request_body.at_client_side,
|
|
@@ -116,9 +117,9 @@ class MutatedUserRequest:
|
|
|
116
117
|
|
|
117
118
|
def encode(self) -> str:
|
|
118
119
|
return _MutatedUserRequestBody(
|
|
119
|
-
task=
|
|
120
|
-
skypilot_config=
|
|
121
|
-
|
|
120
|
+
task=yaml_utils.dump_yaml_str(self.task.to_yaml_config()),
|
|
121
|
+
skypilot_config=yaml_utils.dump_yaml_str(dict(
|
|
122
|
+
self.skypilot_config),)).model_dump_json()
|
|
122
123
|
|
|
123
124
|
@classmethod
|
|
124
125
|
def decode(cls, mutated_user_request_body: str,
|
|
@@ -126,14 +127,14 @@ class MutatedUserRequest:
|
|
|
126
127
|
mutated_user_request_body = _MutatedUserRequestBody.model_validate_json(
|
|
127
128
|
mutated_user_request_body)
|
|
128
129
|
task = sky.Task.from_yaml_config(
|
|
129
|
-
|
|
130
|
+
yaml_utils.read_yaml_all_str(mutated_user_request_body.task)[0])
|
|
130
131
|
# Some internal Task fields are not serialized. We need to manually
|
|
131
132
|
# restore them from the original request.
|
|
132
133
|
task.managed_job_dag = original_request.task.managed_job_dag
|
|
133
134
|
task.service_name = original_request.task.service_name
|
|
134
135
|
return cls(task=task,
|
|
135
136
|
skypilot_config=config_utils.Config.from_dict(
|
|
136
|
-
|
|
137
|
+
yaml_utils.read_yaml_all_str(
|
|
137
138
|
mutated_user_request_body.skypilot_config)[0],))
|
|
138
139
|
|
|
139
140
|
|
sky/authentication.py
CHANGED
|
@@ -198,7 +198,7 @@ def configure_ssh_info(config: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
198
198
|
_, public_key_path = get_or_generate_keys()
|
|
199
199
|
with open(public_key_path, 'r', encoding='utf-8') as f:
|
|
200
200
|
public_key = f.read().strip()
|
|
201
|
-
config_str =
|
|
201
|
+
config_str = yaml_utils.dump_yaml_str(config)
|
|
202
202
|
config_str = config_str.replace('skypilot:ssh_user',
|
|
203
203
|
config['auth']['ssh_user'])
|
|
204
204
|
config_str = config_str.replace('skypilot:ssh_public_key_content',
|
sky/backends/backend.py
CHANGED
|
@@ -147,9 +147,8 @@ class Backend(Generic[_ResourceHandleType]):
|
|
|
147
147
|
def teardown(self,
|
|
148
148
|
handle: _ResourceHandleType,
|
|
149
149
|
terminate: bool,
|
|
150
|
-
purge: bool = False
|
|
151
|
-
|
|
152
|
-
self._teardown(handle, terminate, purge, explicitly_requested)
|
|
150
|
+
purge: bool = False) -> None:
|
|
151
|
+
self._teardown(handle, terminate, purge)
|
|
153
152
|
|
|
154
153
|
def register_info(self, **kwargs) -> None:
|
|
155
154
|
"""Register backend-specific information."""
|
|
@@ -201,6 +200,5 @@ class Backend(Generic[_ResourceHandleType]):
|
|
|
201
200
|
def _teardown(self,
|
|
202
201
|
handle: _ResourceHandleType,
|
|
203
202
|
terminate: bool,
|
|
204
|
-
purge: bool = False
|
|
205
|
-
explicitly_requested: bool = False):
|
|
203
|
+
purge: bool = False):
|
|
206
204
|
raise NotImplementedError
|
sky/backends/backend_utils.py
CHANGED
|
@@ -241,7 +241,7 @@ def _optimize_file_mounts(tmp_yaml_path: str) -> None:
|
|
|
241
241
|
subprocess.CalledProcessError: If the file mounts are failed to be
|
|
242
242
|
copied.
|
|
243
243
|
"""
|
|
244
|
-
yaml_config =
|
|
244
|
+
yaml_config = yaml_utils.read_yaml(tmp_yaml_path)
|
|
245
245
|
|
|
246
246
|
file_mounts = yaml_config.get('file_mounts', {})
|
|
247
247
|
# Remove the file mounts added by the newline.
|
|
@@ -325,7 +325,7 @@ def _optimize_file_mounts(tmp_yaml_path: str) -> None:
|
|
|
325
325
|
shell=True,
|
|
326
326
|
check=True)
|
|
327
327
|
|
|
328
|
-
|
|
328
|
+
yaml_utils.dump_yaml(tmp_yaml_path, yaml_config)
|
|
329
329
|
|
|
330
330
|
|
|
331
331
|
def path_size_megabytes(path: str) -> int:
|
|
@@ -510,7 +510,7 @@ def _replace_yaml_dicts(
|
|
|
510
510
|
for key in exclude_restore_key_name[:-1]:
|
|
511
511
|
curr = curr[key]
|
|
512
512
|
curr[exclude_restore_key_name[-1]] = value
|
|
513
|
-
return
|
|
513
|
+
return yaml_utils.dump_yaml_str(new_config)
|
|
514
514
|
|
|
515
515
|
|
|
516
516
|
def get_expirable_clouds(
|
|
@@ -937,7 +937,7 @@ def write_cluster_config(
|
|
|
937
937
|
tmp_yaml_path,
|
|
938
938
|
cluster_config_overrides=cluster_config_overrides,
|
|
939
939
|
context=region.name)
|
|
940
|
-
yaml_obj =
|
|
940
|
+
yaml_obj = yaml_utils.read_yaml(tmp_yaml_path)
|
|
941
941
|
pod_config: Dict[str, Any] = yaml_obj['available_node_types'][
|
|
942
942
|
'ray_head_default']['node_config']
|
|
943
943
|
|
|
@@ -976,7 +976,7 @@ def write_cluster_config(
|
|
|
976
976
|
# Read the cluster name from the tmp yaml file, to take the backward
|
|
977
977
|
# compatbility restortion above into account.
|
|
978
978
|
# TODO: remove this after 2 minor releases, 0.10.0.
|
|
979
|
-
yaml_config =
|
|
979
|
+
yaml_config = yaml_utils.read_yaml(tmp_yaml_path)
|
|
980
980
|
config_dict['cluster_name_on_cloud'] = yaml_config['cluster_name']
|
|
981
981
|
|
|
982
982
|
# Make sure to do this before we optimize file mounts. Optimization is
|
|
@@ -1022,7 +1022,7 @@ def _add_auth_to_cluster_config(cloud: clouds.Cloud, tmp_yaml_path: str):
|
|
|
1022
1022
|
|
|
1023
1023
|
This function's output removes comments included in the jinja2 template.
|
|
1024
1024
|
"""
|
|
1025
|
-
config =
|
|
1025
|
+
config = yaml_utils.read_yaml(tmp_yaml_path)
|
|
1026
1026
|
# Check the availability of the cloud type.
|
|
1027
1027
|
if isinstance(cloud, (
|
|
1028
1028
|
clouds.AWS,
|
|
@@ -1054,7 +1054,7 @@ def _add_auth_to_cluster_config(cloud: clouds.Cloud, tmp_yaml_path: str):
|
|
|
1054
1054
|
config = auth.setup_hyperbolic_authentication(config)
|
|
1055
1055
|
else:
|
|
1056
1056
|
assert False, cloud
|
|
1057
|
-
|
|
1057
|
+
yaml_utils.dump_yaml(tmp_yaml_path, config)
|
|
1058
1058
|
|
|
1059
1059
|
|
|
1060
1060
|
def get_timestamp_from_run_timestamp(run_timestamp: str) -> float:
|
|
@@ -1156,7 +1156,7 @@ def _deterministic_cluster_yaml_hash(tmp_yaml_path: str) -> str:
|
|
|
1156
1156
|
"""
|
|
1157
1157
|
|
|
1158
1158
|
# Load the yaml contents so that we can directly remove keys.
|
|
1159
|
-
yaml_config =
|
|
1159
|
+
yaml_config = yaml_utils.read_yaml(tmp_yaml_path)
|
|
1160
1160
|
for key_list in _RAY_YAML_KEYS_TO_REMOVE_FOR_HASH:
|
|
1161
1161
|
dict_to_remove_from = yaml_config
|
|
1162
1162
|
found_key = True
|
|
@@ -1175,7 +1175,7 @@ def _deterministic_cluster_yaml_hash(tmp_yaml_path: str) -> str:
|
|
|
1175
1175
|
config_hash = hashlib.sha256()
|
|
1176
1176
|
|
|
1177
1177
|
yaml_hash = hashlib.sha256(
|
|
1178
|
-
|
|
1178
|
+
yaml_utils.dump_yaml_str(yaml_config).encode('utf-8'))
|
|
1179
1179
|
config_hash.update(yaml_hash.digest())
|
|
1180
1180
|
|
|
1181
1181
|
file_mounts = yaml_config.get('file_mounts', {})
|
|
@@ -2027,9 +2027,7 @@ def _update_cluster_status(cluster_name: str) -> Optional[Dict[str, Any]]:
|
|
|
2027
2027
|
'Cluster has no YAML file. Removing the cluster from cache.',
|
|
2028
2028
|
global_user_state.ClusterEventType.STATUS_CHANGE,
|
|
2029
2029
|
nop_if_duplicate=True)
|
|
2030
|
-
global_user_state.remove_cluster(cluster_name,
|
|
2031
|
-
terminate=True,
|
|
2032
|
-
remove_events=True)
|
|
2030
|
+
global_user_state.remove_cluster(cluster_name, terminate=True)
|
|
2033
2031
|
logger.debug(f'Cluster {cluster_name!r} has no YAML file. '
|
|
2034
2032
|
'Removing the cluster from cache.')
|
|
2035
2033
|
return None
|
|
@@ -2367,7 +2365,7 @@ def _update_cluster_status(cluster_name: str) -> Optional[Dict[str, Any]]:
|
|
|
2367
2365
|
# Some status reason clears after a certain time (e.g. k8s events
|
|
2368
2366
|
# are only stored for an hour by default), so it is possible that
|
|
2369
2367
|
# the previous event has a status reason, but now it does not.
|
|
2370
|
-
init_reason_regex = f'^Cluster is abnormal because {init_reason}
|
|
2368
|
+
init_reason_regex = f'^Cluster is abnormal because {init_reason}.*'
|
|
2371
2369
|
log_message = f'Cluster is abnormal because {init_reason}'
|
|
2372
2370
|
if status_reason:
|
|
2373
2371
|
log_message += f' ({status_reason})'
|
|
@@ -1972,7 +1972,7 @@ class RetryingVmProvisioner(object):
|
|
|
1972
1972
|
ray_config = global_user_state.get_cluster_yaml_dict(
|
|
1973
1973
|
cluster_config_file)
|
|
1974
1974
|
ray_config['upscaling_speed'] = 0
|
|
1975
|
-
|
|
1975
|
+
yaml_utils.dump_yaml(cluster_config_file, ray_config)
|
|
1976
1976
|
start = time.time()
|
|
1977
1977
|
returncode, stdout, stderr = ray_up()
|
|
1978
1978
|
logger.debug(
|
|
@@ -3208,8 +3208,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
3208
3208
|
global_user_state.ClusterEventType.STATUS_CHANGE,
|
|
3209
3209
|
nop_if_duplicate=True)
|
|
3210
3210
|
global_user_state.remove_cluster(cluster_name,
|
|
3211
|
-
terminate=True
|
|
3212
|
-
remove_events=False)
|
|
3211
|
+
terminate=True)
|
|
3213
3212
|
usage_lib.messages.usage.update_final_cluster_status(
|
|
3214
3213
|
None)
|
|
3215
3214
|
logger.error(
|
|
@@ -4011,8 +4010,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
4011
4010
|
def _teardown(self,
|
|
4012
4011
|
handle: CloudVmRayResourceHandle,
|
|
4013
4012
|
terminate: bool,
|
|
4014
|
-
purge: bool = False
|
|
4015
|
-
explicitly_requested: bool = False):
|
|
4013
|
+
purge: bool = False):
|
|
4016
4014
|
"""Tear down or stop the cluster.
|
|
4017
4015
|
|
|
4018
4016
|
Args:
|
|
@@ -4087,8 +4085,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
4087
4085
|
# ClusterOwnerIdentityMismatchError. The argument/flag
|
|
4088
4086
|
# `purge` should bypass such ID mismatch errors.
|
|
4089
4087
|
refresh_cluster_status=(
|
|
4090
|
-
not is_identity_mismatch_and_purge)
|
|
4091
|
-
explicitly_requested=explicitly_requested)
|
|
4088
|
+
not is_identity_mismatch_and_purge))
|
|
4092
4089
|
if terminate:
|
|
4093
4090
|
lock.force_unlock()
|
|
4094
4091
|
break
|
|
@@ -4477,8 +4474,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
4477
4474
|
purge: bool = False,
|
|
4478
4475
|
post_teardown_cleanup: bool = True,
|
|
4479
4476
|
refresh_cluster_status: bool = True,
|
|
4480
|
-
remove_from_db: bool = True
|
|
4481
|
-
explicitly_requested: bool = False) -> None:
|
|
4477
|
+
remove_from_db: bool = True) -> None:
|
|
4482
4478
|
"""Teardown the cluster without acquiring the cluster status lock.
|
|
4483
4479
|
|
|
4484
4480
|
NOTE: This method should not be called without holding the cluster
|
|
@@ -4542,8 +4538,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
4542
4538
|
f'provision yaml so it '
|
|
4543
4539
|
'has not been provisioned. Skipped.')
|
|
4544
4540
|
global_user_state.remove_cluster(handle.cluster_name,
|
|
4545
|
-
terminate=terminate
|
|
4546
|
-
remove_events=False)
|
|
4541
|
+
terminate=terminate)
|
|
4547
4542
|
return
|
|
4548
4543
|
log_path = os.path.join(os.path.expanduser(self.log_dir),
|
|
4549
4544
|
'teardown.log')
|
|
@@ -4600,12 +4595,8 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
4600
4595
|
raise
|
|
4601
4596
|
|
|
4602
4597
|
if post_teardown_cleanup:
|
|
4603
|
-
self.post_teardown_cleanup(
|
|
4604
|
-
|
|
4605
|
-
terminate,
|
|
4606
|
-
purge,
|
|
4607
|
-
remove_from_db,
|
|
4608
|
-
explicitly_requested=explicitly_requested)
|
|
4598
|
+
self.post_teardown_cleanup(handle, terminate, purge,
|
|
4599
|
+
remove_from_db)
|
|
4609
4600
|
return
|
|
4610
4601
|
|
|
4611
4602
|
if (isinstance(cloud, clouds.IBM) and terminate and
|
|
@@ -4649,7 +4640,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
4649
4640
|
prefix='sky_',
|
|
4650
4641
|
delete=False,
|
|
4651
4642
|
suffix='.yml') as f:
|
|
4652
|
-
|
|
4643
|
+
yaml_utils.dump_yaml(f.name, config)
|
|
4653
4644
|
f.flush()
|
|
4654
4645
|
|
|
4655
4646
|
teardown_verb = 'Terminating' if terminate else 'Stopping'
|
|
@@ -4705,8 +4696,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
4705
4696
|
terminate: bool,
|
|
4706
4697
|
purge: bool = False,
|
|
4707
4698
|
remove_from_db: bool = True,
|
|
4708
|
-
failover: bool = False
|
|
4709
|
-
explicitly_requested: bool = False) -> None:
|
|
4699
|
+
failover: bool = False) -> None:
|
|
4710
4700
|
"""Cleanup local configs/caches and delete TPUs after teardown.
|
|
4711
4701
|
|
|
4712
4702
|
This method will handle the following cleanup steps:
|
|
@@ -4884,8 +4874,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
|
|
|
4884
4874
|
|
|
4885
4875
|
if not terminate or remove_from_db:
|
|
4886
4876
|
global_user_state.remove_cluster(handle.cluster_name,
|
|
4887
|
-
terminate=terminate
|
|
4888
|
-
remove_events=explicitly_requested)
|
|
4877
|
+
terminate=terminate)
|
|
4889
4878
|
|
|
4890
4879
|
def remove_cluster_config(self, handle: CloudVmRayResourceHandle) -> None:
|
|
4891
4880
|
"""Remove the YAML config of a cluster."""
|
|
@@ -256,9 +256,7 @@ class LocalDockerBackend(backends.Backend['LocalDockerResourceHandle']):
|
|
|
256
256
|
logger.error(
|
|
257
257
|
'Unable to run container - nvidia runtime for docker not '
|
|
258
258
|
'found. Have you installed nvidia-docker on your machine?')
|
|
259
|
-
global_user_state.remove_cluster(cluster_name,
|
|
260
|
-
terminate=True,
|
|
261
|
-
remove_events=False)
|
|
259
|
+
global_user_state.remove_cluster(cluster_name, terminate=True)
|
|
262
260
|
raise e
|
|
263
261
|
self.containers[handle] = container
|
|
264
262
|
logger.info(
|
|
@@ -325,8 +323,7 @@ class LocalDockerBackend(backends.Backend['LocalDockerResourceHandle']):
|
|
|
325
323
|
def _teardown(self,
|
|
326
324
|
handle: LocalDockerResourceHandle,
|
|
327
325
|
terminate: bool,
|
|
328
|
-
purge: bool = False
|
|
329
|
-
explicitly_requested: bool = False):
|
|
326
|
+
purge: bool = False):
|
|
330
327
|
"""Teardown kills the container."""
|
|
331
328
|
del purge # Unused.
|
|
332
329
|
if not terminate:
|
|
@@ -342,9 +339,7 @@ class LocalDockerBackend(backends.Backend['LocalDockerResourceHandle']):
|
|
|
342
339
|
container.remove(force=True)
|
|
343
340
|
cluster_name = handle.get_cluster_name()
|
|
344
341
|
|
|
345
|
-
global_user_state.remove_cluster(cluster_name,
|
|
346
|
-
terminate=True,
|
|
347
|
-
remove_events=explicitly_requested)
|
|
342
|
+
global_user_state.remove_cluster(cluster_name, terminate=True)
|
|
348
343
|
|
|
349
344
|
# --- Utilities ---
|
|
350
345
|
|
sky/client/cli/command.py
CHANGED
|
@@ -287,9 +287,10 @@ def _complete_cluster_name(ctx: click.Context, param: click.Parameter,
|
|
|
287
287
|
del ctx, param # Unused.
|
|
288
288
|
# TODO(zhwu): we send requests to API server for completion, which can cause
|
|
289
289
|
# large latency. We should investigate caching mechanism if needed.
|
|
290
|
-
response =
|
|
291
|
-
|
|
290
|
+
response = server_common.make_authenticated_request(
|
|
291
|
+
'GET',
|
|
292
292
|
f'/api/completion/cluster_name?incomplete={incomplete}',
|
|
293
|
+
retry=False,
|
|
293
294
|
timeout=2.0,
|
|
294
295
|
)
|
|
295
296
|
response.raise_for_status()
|
|
@@ -300,9 +301,10 @@ def _complete_storage_name(ctx: click.Context, param: click.Parameter,
|
|
|
300
301
|
incomplete: str) -> List[str]:
|
|
301
302
|
"""Handle shell completion for storage names."""
|
|
302
303
|
del ctx, param # Unused.
|
|
303
|
-
response =
|
|
304
|
-
|
|
304
|
+
response = server_common.make_authenticated_request(
|
|
305
|
+
'GET',
|
|
305
306
|
f'/api/completion/storage_name?incomplete={incomplete}',
|
|
307
|
+
retry=False,
|
|
306
308
|
timeout=2.0,
|
|
307
309
|
)
|
|
308
310
|
response.raise_for_status()
|
|
@@ -313,15 +315,34 @@ def _complete_volume_name(ctx: click.Context, param: click.Parameter,
|
|
|
313
315
|
incomplete: str) -> List[str]:
|
|
314
316
|
"""Handle shell completion for volume names."""
|
|
315
317
|
del ctx, param # Unused.
|
|
316
|
-
response =
|
|
317
|
-
|
|
318
|
+
response = server_common.make_authenticated_request(
|
|
319
|
+
'GET',
|
|
318
320
|
f'/api/completion/volume_name?incomplete={incomplete}',
|
|
321
|
+
retry=False,
|
|
319
322
|
timeout=2.0,
|
|
320
323
|
)
|
|
321
324
|
response.raise_for_status()
|
|
322
325
|
return response.json()
|
|
323
326
|
|
|
324
327
|
|
|
328
|
+
def _complete_api_request(ctx: click.Context, param: click.Parameter,
|
|
329
|
+
incomplete: str) -> List[str]:
|
|
330
|
+
"""Handle shell completion for API requests."""
|
|
331
|
+
del ctx, param # Unused.
|
|
332
|
+
response = server_common.make_authenticated_request(
|
|
333
|
+
'GET',
|
|
334
|
+
f'/api/completion/api_request?incomplete={incomplete}',
|
|
335
|
+
retry=False,
|
|
336
|
+
timeout=2.0,
|
|
337
|
+
)
|
|
338
|
+
try:
|
|
339
|
+
response.raise_for_status()
|
|
340
|
+
except requests_lib.exceptions.HTTPError:
|
|
341
|
+
# Server may be outdated/missing this API. Silently skip.
|
|
342
|
+
return []
|
|
343
|
+
return response.json()
|
|
344
|
+
|
|
345
|
+
|
|
325
346
|
def _complete_file_name(ctx: click.Context, param: click.Parameter,
|
|
326
347
|
incomplete: str) -> List[str]:
|
|
327
348
|
"""Handle shell completion for file names.
|
|
@@ -6018,7 +6039,10 @@ def api_stop():
|
|
|
6018
6039
|
|
|
6019
6040
|
@api.command('logs', cls=_DocumentedCodeCommand)
|
|
6020
6041
|
@flags.config_option(expose_value=False)
|
|
6021
|
-
@click.argument('request_id',
|
|
6042
|
+
@click.argument('request_id',
|
|
6043
|
+
required=False,
|
|
6044
|
+
type=str,
|
|
6045
|
+
**_get_shell_complete_args(_complete_api_request))
|
|
6022
6046
|
@click.option('--server-logs',
|
|
6023
6047
|
is_flag=True,
|
|
6024
6048
|
default=False,
|
|
@@ -6062,7 +6086,11 @@ def api_logs(request_id: Optional[str], server_logs: bool,
|
|
|
6062
6086
|
|
|
6063
6087
|
@api.command('cancel', cls=_DocumentedCodeCommand)
|
|
6064
6088
|
@flags.config_option(expose_value=False)
|
|
6065
|
-
@click.argument('request_ids',
|
|
6089
|
+
@click.argument('request_ids',
|
|
6090
|
+
required=False,
|
|
6091
|
+
type=str,
|
|
6092
|
+
nargs=-1,
|
|
6093
|
+
**_get_shell_complete_args(_complete_api_request))
|
|
6066
6094
|
@flags.all_option('Cancel all your requests.')
|
|
6067
6095
|
@flags.all_users_option('Cancel all requests from all users.')
|
|
6068
6096
|
@usage_lib.entrypoint
|
|
@@ -6094,7 +6122,11 @@ def api_cancel(request_ids: Optional[List[str]], all: bool, all_users: bool):
|
|
|
6094
6122
|
|
|
6095
6123
|
@api.command('status', cls=_DocumentedCodeCommand)
|
|
6096
6124
|
@flags.config_option(expose_value=False)
|
|
6097
|
-
@click.argument('request_ids',
|
|
6125
|
+
@click.argument('request_ids',
|
|
6126
|
+
required=False,
|
|
6127
|
+
type=str,
|
|
6128
|
+
nargs=-1,
|
|
6129
|
+
**_get_shell_complete_args(_complete_api_request))
|
|
6098
6130
|
@click.option('--all-status',
|
|
6099
6131
|
'-a',
|
|
6100
6132
|
is_flag=True,
|
sky/client/sdk.py
CHANGED
|
@@ -53,6 +53,7 @@ from sky.utils import rich_utils
|
|
|
53
53
|
from sky.utils import status_lib
|
|
54
54
|
from sky.utils import subprocess_utils
|
|
55
55
|
from sky.utils import ux_utils
|
|
56
|
+
from sky.utils import yaml_utils
|
|
56
57
|
from sky.utils.kubernetes import ssh_utils
|
|
57
58
|
|
|
58
59
|
if typing.TYPE_CHECKING:
|
|
@@ -100,7 +101,8 @@ def reload_config() -> None:
|
|
|
100
101
|
def stream_response(request_id: None,
|
|
101
102
|
response: 'requests.Response',
|
|
102
103
|
output_stream: Optional['io.TextIOBase'] = None,
|
|
103
|
-
resumable: bool = False
|
|
104
|
+
resumable: bool = False,
|
|
105
|
+
get_result: bool = True) -> None:
|
|
104
106
|
...
|
|
105
107
|
|
|
106
108
|
|
|
@@ -108,14 +110,16 @@ def stream_response(request_id: None,
|
|
|
108
110
|
def stream_response(request_id: server_common.RequestId[T],
|
|
109
111
|
response: 'requests.Response',
|
|
110
112
|
output_stream: Optional['io.TextIOBase'] = None,
|
|
111
|
-
resumable: bool = False
|
|
113
|
+
resumable: bool = False,
|
|
114
|
+
get_result: bool = True) -> T:
|
|
112
115
|
...
|
|
113
116
|
|
|
114
117
|
|
|
115
118
|
def stream_response(request_id: Optional[server_common.RequestId[T]],
|
|
116
119
|
response: 'requests.Response',
|
|
117
120
|
output_stream: Optional['io.TextIOBase'] = None,
|
|
118
|
-
resumable: bool = False
|
|
121
|
+
resumable: bool = False,
|
|
122
|
+
get_result: bool = True) -> Optional[T]:
|
|
119
123
|
"""Streams the response to the console.
|
|
120
124
|
|
|
121
125
|
Args:
|
|
@@ -128,6 +132,9 @@ def stream_response(request_id: Optional[server_common.RequestId[T]],
|
|
|
128
132
|
console.
|
|
129
133
|
resumable: Whether the response is resumable on retry. If True, the
|
|
130
134
|
streaming will start from the previous failure point on retry.
|
|
135
|
+
get_result: Whether to get the result of the request. This will
|
|
136
|
+
typically be set to False for `--no-follow` flags as requests may
|
|
137
|
+
continue to run for long periods of time without further streaming.
|
|
131
138
|
"""
|
|
132
139
|
|
|
133
140
|
retry_context: Optional[rest.RetryContext] = None
|
|
@@ -143,7 +150,7 @@ def stream_response(request_id: Optional[server_common.RequestId[T]],
|
|
|
143
150
|
elif line_count > retry_context.line_processed:
|
|
144
151
|
print(line, flush=True, end='', file=output_stream)
|
|
145
152
|
retry_context.line_processed = line_count
|
|
146
|
-
if request_id is not None:
|
|
153
|
+
if request_id is not None and get_result:
|
|
147
154
|
return get(request_id)
|
|
148
155
|
else:
|
|
149
156
|
return None
|
|
@@ -942,10 +949,13 @@ def tail_provision_logs(cluster_name: str,
|
|
|
942
949
|
# to return cleanly after printing the tailed lines. If we provided a
|
|
943
950
|
# non-None request_id here, the get(request_id) in stream_response(
|
|
944
951
|
# would fail since /provision_logs does not create a request record.
|
|
952
|
+
# By virtue of this, we set get_result to False to block get() from
|
|
953
|
+
# running.
|
|
945
954
|
stream_response(request_id=None,
|
|
946
955
|
response=response,
|
|
947
956
|
output_stream=output_stream,
|
|
948
|
-
resumable=(tail == 0)
|
|
957
|
+
resumable=(tail == 0),
|
|
958
|
+
get_result=False)
|
|
949
959
|
return 0
|
|
950
960
|
|
|
951
961
|
|
|
@@ -2025,6 +2035,8 @@ def stream_and_get(
|
|
|
2025
2035
|
Returns:
|
|
2026
2036
|
The ``Request Returns`` of the specified request. See the documentation
|
|
2027
2037
|
of the specific requests above for more details.
|
|
2038
|
+
If follow is False, will always return None. See note on
|
|
2039
|
+
stream_response.
|
|
2028
2040
|
|
|
2029
2041
|
Raises:
|
|
2030
2042
|
Exception: It raises the same exceptions as the specific requests,
|
|
@@ -2056,7 +2068,10 @@ def stream_and_get(
|
|
|
2056
2068
|
if request_id is None:
|
|
2057
2069
|
return None
|
|
2058
2070
|
return get(request_id)
|
|
2059
|
-
return stream_response(request_id,
|
|
2071
|
+
return stream_response(request_id,
|
|
2072
|
+
response,
|
|
2073
|
+
output_stream,
|
|
2074
|
+
get_result=follow)
|
|
2060
2075
|
|
|
2061
2076
|
|
|
2062
2077
|
@usage_lib.entrypoint
|
|
@@ -2332,7 +2347,7 @@ def _save_config_updates(endpoint: Optional[str] = None,
|
|
|
2332
2347
|
config['api_server'][
|
|
2333
2348
|
'service_account_token'] = service_account_token
|
|
2334
2349
|
|
|
2335
|
-
|
|
2350
|
+
yaml_utils.dump_yaml(str(config_path), config)
|
|
2336
2351
|
skypilot_config.reload_config()
|
|
2337
2352
|
|
|
2338
2353
|
|
|
@@ -2348,7 +2363,7 @@ def _clear_api_server_config() -> None:
|
|
|
2348
2363
|
config = dict(config)
|
|
2349
2364
|
del config['api_server']
|
|
2350
2365
|
|
|
2351
|
-
|
|
2366
|
+
yaml_utils.dump_yaml(str(config_path), config, blank=True)
|
|
2352
2367
|
skypilot_config.reload_config()
|
|
2353
2368
|
|
|
2354
2369
|
|
sky/client/sdk_async.py
CHANGED
|
@@ -144,7 +144,8 @@ async def get(request_id: str) -> Any:
|
|
|
144
144
|
async def stream_response_async(request_id: Optional[str],
|
|
145
145
|
response: 'aiohttp.ClientResponse',
|
|
146
146
|
output_stream: Optional['io.TextIOBase'] = None,
|
|
147
|
-
resumable: bool = False
|
|
147
|
+
resumable: bool = False,
|
|
148
|
+
get_result: bool = True) -> Any:
|
|
148
149
|
"""Async version of stream_response that streams the response to the
|
|
149
150
|
console.
|
|
150
151
|
|
|
@@ -155,6 +156,9 @@ async def stream_response_async(request_id: Optional[str],
|
|
|
155
156
|
console.
|
|
156
157
|
resumable: Whether the response is resumable on retry. If True, the
|
|
157
158
|
streaming will start from the previous failure point on retry.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Result of request_id if given. Will only return if get_result is True.
|
|
158
162
|
"""
|
|
159
163
|
|
|
160
164
|
retry_context: Optional[rest.RetryContext] = None
|
|
@@ -170,7 +174,7 @@ async def stream_response_async(request_id: Optional[str],
|
|
|
170
174
|
elif line_count > retry_context.line_processed:
|
|
171
175
|
print(line, flush=True, end='', file=output_stream)
|
|
172
176
|
retry_context.line_processed = line_count
|
|
173
|
-
if request_id is not None:
|
|
177
|
+
if request_id is not None and get_result:
|
|
174
178
|
return await get(request_id)
|
|
175
179
|
except Exception: # pylint: disable=broad-except
|
|
176
180
|
logger.debug(f'To stream request logs: sky api logs {request_id}')
|
sky/core.py
CHANGED
|
@@ -594,10 +594,7 @@ def down(cluster_name: str, purge: bool = False) -> None:
|
|
|
594
594
|
|
|
595
595
|
usage_lib.record_cluster_name_for_current_operation(cluster_name)
|
|
596
596
|
backend = backend_utils.get_backend_from_handle(handle)
|
|
597
|
-
backend.teardown(handle,
|
|
598
|
-
terminate=True,
|
|
599
|
-
purge=purge,
|
|
600
|
-
explicitly_requested=True)
|
|
597
|
+
backend.teardown(handle, terminate=True, purge=purge)
|
|
601
598
|
|
|
602
599
|
|
|
603
600
|
@usage_lib.entrypoint
|
sky/dashboard/out/404.html
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-
|
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-6dae1cd599a34def.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_error-c66a4e8afc46f17b.js" defer=""></script><script src="/dashboard/_next/static/9DW6d9jaP2kZt0NcgIfFa/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/9DW6d9jaP2kZt0NcgIfFa/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"statusCode":404}},"page":"/_error","query":{},"buildId":"9DW6d9jaP2kZt0NcgIfFa","assetPrefix":"/dashboard","nextExport":true,"isFallback":false,"gip":true,"scriptLoader":[]}</script></body></html>
|
sky/dashboard/out/_next/static/chunks/{webpack-6e76f636a048e145.js → webpack-6dae1cd599a34def.js}
RENAMED
|
@@ -1 +1 @@
|
|
|
1
|
-
!function(){"use strict";var t,e,n,r,c,o,u,a,i,f={},s={};function d(t){var e=s[t];if(void 0!==e)return e.exports;var n=s[t]={exports:{}},r=!0;try{f[t](n,n.exports,d),r=!1}finally{r&&delete s[t]}return n.exports}d.m=f,t=[],d.O=function(e,n,r,c){if(n){c=c||0;for(var o=t.length;o>0&&t[o-1][2]>c;o--)t[o]=t[o-1];t[o]=[n,r,c];return}for(var u=1/0,o=0;o<t.length;o++){for(var n=t[o][0],r=t[o][1],c=t[o][2],a=!0,i=0;i<n.length;i++)u>=c&&Object.keys(d.O).every(function(t){return d.O[t](n[i])})?n.splice(i--,1):(a=!1,c<u&&(u=c));if(a){t.splice(o--,1);var f=r();void 0!==f&&(e=f)}}return e},d.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return d.d(e,{a:e}),e},n=Object.getPrototypeOf?function(t){return Object.getPrototypeOf(t)}:function(t){return t.__proto__},d.t=function(t,r){if(1&r&&(t=this(t)),8&r||"object"==typeof t&&t&&(4&r&&t.__esModule||16&r&&"function"==typeof t.then))return t;var c=Object.create(null);d.r(c);var o={};e=e||[null,n({}),n([]),n(n)];for(var u=2&r&&t;"object"==typeof u&&!~e.indexOf(u);u=n(u))Object.getOwnPropertyNames(u).forEach(function(e){o[e]=function(){return t[e]}});return o.default=function(){return t},d.d(c,o),c},d.d=function(t,e){for(var n in e)d.o(e,n)&&!d.o(t,n)&&Object.defineProperty(t,n,{enumerable:!0,get:e[n]})},d.f={},d.e=function(t){return Promise.all(Object.keys(d.f).reduce(function(e,n){return d.f[n](t,e),e},[]))},d.u=function(t){return 2350===t?"static/chunks/2350.fab69e61bac57b23.js":
|
|
1
|
+
!function(){"use strict";var t,e,n,r,c,o,u,a,i,f={},s={};function d(t){var e=s[t];if(void 0!==e)return e.exports;var n=s[t]={exports:{}},r=!0;try{f[t](n,n.exports,d),r=!1}finally{r&&delete s[t]}return n.exports}d.m=f,t=[],d.O=function(e,n,r,c){if(n){c=c||0;for(var o=t.length;o>0&&t[o-1][2]>c;o--)t[o]=t[o-1];t[o]=[n,r,c];return}for(var u=1/0,o=0;o<t.length;o++){for(var n=t[o][0],r=t[o][1],c=t[o][2],a=!0,i=0;i<n.length;i++)u>=c&&Object.keys(d.O).every(function(t){return d.O[t](n[i])})?n.splice(i--,1):(a=!1,c<u&&(u=c));if(a){t.splice(o--,1);var f=r();void 0!==f&&(e=f)}}return e},d.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return d.d(e,{a:e}),e},n=Object.getPrototypeOf?function(t){return Object.getPrototypeOf(t)}:function(t){return t.__proto__},d.t=function(t,r){if(1&r&&(t=this(t)),8&r||"object"==typeof t&&t&&(4&r&&t.__esModule||16&r&&"function"==typeof t.then))return t;var c=Object.create(null);d.r(c);var o={};e=e||[null,n({}),n([]),n(n)];for(var u=2&r&&t;"object"==typeof u&&!~e.indexOf(u);u=n(u))Object.getOwnPropertyNames(u).forEach(function(e){o[e]=function(){return t[e]}});return o.default=function(){return t},d.d(c,o),c},d.d=function(t,e){for(var n in e)d.o(e,n)&&!d.o(t,n)&&Object.defineProperty(t,n,{enumerable:!0,get:e[n]})},d.f={},d.e=function(t){return Promise.all(Object.keys(d.f).reduce(function(e,n){return d.f[n](t,e),e},[]))},d.u=function(t){return 2350===t?"static/chunks/2350.fab69e61bac57b23.js":3937===t?"static/chunks/3937.210053269f121201.js":7325===t?"static/chunks/7325.b4bc99ce0892dcd5.js":9025===t?"static/chunks/9025.a1bef12d672bb66d.js":9984===t?"static/chunks/9984.7eb6cc51fb460cae.js":9946===t?"static/chunks/9946.3b7b43c217ff70ec.js":4045===t?"static/chunks/4045.b30465273dc5e468.js":4725===t?"static/chunks/4725.10f7a9a5d3ea8208.js":7669===t?"static/chunks/7669.1f5d9a402bf5cc42.js":3785===t?"static/chunks/3785.d5b86f6ebc88e6e6.js":4783===t?"static/chunks/4783.c485f48348349f47.js":"static/chunks/"+t+"-"+({616:"3d59f75e2ccf9321",1121:"8afcf719ea87debc",1141:"943efc7aff0f0c06",1272:"1ef0bf0237faccdb",3015:"6c9c09593b1e67b6",3850:"ff4a9a69d978632b",4676:"9da7fdbde90b5549",5739:"d67458fcb1386c92",6130:"2be46d70a38f1e82",6135:"4b4d5e824b7f9d3c",6601:"06114c982db410b6",6856:"049014c6d43d127b",6989:"01359c57e018caa4",6990:"08b2a1cae076a943",7205:"88191679e7988c57",7411:"b15471acd2cba716",8969:"4a6f1a928fb6d370",9037:"89a84fd7fa31362d"})[t]+".js"},d.miniCssF=function(t){},d.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||Function("return this")()}catch(t){if("object"==typeof window)return window}}(),d.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},r={},c="_N_E:",d.l=function(t,e,n,o){if(r[t]){r[t].push(e);return}if(void 0!==n)for(var u,a,i=document.getElementsByTagName("script"),f=0;f<i.length;f++){var s=i[f];if(s.getAttribute("src")==t||s.getAttribute("data-webpack")==c+n){u=s;break}}u||(a=!0,(u=document.createElement("script")).charset="utf-8",u.timeout=120,d.nc&&u.setAttribute("nonce",d.nc),u.setAttribute("data-webpack",c+n),u.src=d.tu(t)),r[t]=[e];var b=function(e,n){u.onerror=u.onload=null,clearTimeout(l);var c=r[t];if(delete r[t],u.parentNode&&u.parentNode.removeChild(u),c&&c.forEach(function(t){return t(n)}),e)return e(n)},l=setTimeout(b.bind(null,void 0,{type:"timeout",target:u}),12e4);u.onerror=b.bind(null,u.onerror),u.onload=b.bind(null,u.onload),a&&document.head.appendChild(u)},d.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},d.tt=function(){return void 0===o&&(o={createScriptURL:function(t){return t}},"undefined"!=typeof trustedTypes&&trustedTypes.createPolicy&&(o=trustedTypes.createPolicy("nextjs#bundler",o))),o},d.tu=function(t){return d.tt().createScriptURL(t)},d.p="/dashboard/_next/",u={2272:0},d.f.j=function(t,e){var n=d.o(u,t)?u[t]:void 0;if(0!==n){if(n)e.push(n[2]);else if(2272!=t){var r=new Promise(function(e,r){n=u[t]=[e,r]});e.push(n[2]=r);var c=d.p+d.u(t),o=Error();d.l(c,function(e){if(d.o(u,t)&&(0!==(n=u[t])&&(u[t]=void 0),n)){var r=e&&("load"===e.type?"missing":e.type),c=e&&e.target&&e.target.src;o.message="Loading chunk "+t+" failed.\n("+r+": "+c+")",o.name="ChunkLoadError",o.type=r,o.request=c,n[1](o)}},"chunk-"+t,t)}else u[t]=0}},d.O.j=function(t){return 0===u[t]},a=function(t,e){var n,r,c=e[0],o=e[1],a=e[2],i=0;if(c.some(function(t){return 0!==u[t]})){for(n in o)d.o(o,n)&&(d.m[n]=o[n]);if(a)var f=a(d)}for(t&&t(e);i<c.length;i++)r=c[i],d.o(u,r)&&u[r]&&u[r][0](),u[r]=0;return d.O(f)},(i=self.webpackChunk_N_E=self.webpackChunk_N_E||[]).forEach(a.bind(null,0)),i.push=a.bind(null,i.push.bind(i)),d.nc=void 0}();
|