skypilot-nightly 1.0.0.dev20241124__py3-none-any.whl → 1.0.0.dev20241126__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sky/__init__.py +2 -2
- sky/adaptors/kubernetes.py +34 -10
- sky/authentication.py +2 -2
- sky/backends/backend_utils.py +6 -1
- sky/cli.py +11 -4
- sky/clouds/kubernetes.py +34 -26
- sky/clouds/service_catalog/__init__.py +7 -8
- sky/clouds/service_catalog/data_fetchers/fetch_azure.py +1 -1
- sky/execution.py +2 -1
- sky/provision/azure/instance.py +2 -1
- sky/provision/kubernetes/utils.py +28 -16
- sky/templates/aws-ray.yml.j2 +7 -1
- sky/templates/azure-ray.yml.j2 +0 -3
- sky/templates/kubernetes-port-forward-proxy-command.sh +5 -0
- sky/templates/kubernetes-ray.yml.j2 +4 -0
- sky/utils/command_runner.py +4 -0
- sky/utils/kubernetes/generate_kubeconfig.sh +44 -2
- sky/utils/kubernetes/rsync_helper.sh +3 -1
- sky/utils/schemas.py +8 -1
- {skypilot_nightly-1.0.0.dev20241124.dist-info → skypilot_nightly-1.0.0.dev20241126.dist-info}/METADATA +1 -1
- {skypilot_nightly-1.0.0.dev20241124.dist-info → skypilot_nightly-1.0.0.dev20241126.dist-info}/RECORD +25 -25
- {skypilot_nightly-1.0.0.dev20241124.dist-info → skypilot_nightly-1.0.0.dev20241126.dist-info}/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20241124.dist-info → skypilot_nightly-1.0.0.dev20241126.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20241124.dist-info → skypilot_nightly-1.0.0.dev20241126.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20241124.dist-info → skypilot_nightly-1.0.0.dev20241126.dist-info}/top_level.txt +0 -0
sky/__init__.py
CHANGED
@@ -5,7 +5,7 @@ from typing import Optional
|
|
5
5
|
import urllib.request
|
6
6
|
|
7
7
|
# Replaced with the current commit when building the wheels.
|
8
|
-
_SKYPILOT_COMMIT_SHA = '
|
8
|
+
_SKYPILOT_COMMIT_SHA = '74a8075e7deba2712cc9406be734d78c9548b7c7'
|
9
9
|
|
10
10
|
|
11
11
|
def _get_git_commit():
|
@@ -35,7 +35,7 @@ def _get_git_commit():
|
|
35
35
|
|
36
36
|
|
37
37
|
__commit__ = _get_git_commit()
|
38
|
-
__version__ = '1.0.0.
|
38
|
+
__version__ = '1.0.0.dev20241126'
|
39
39
|
__root_dir__ = os.path.dirname(os.path.abspath(__file__))
|
40
40
|
|
41
41
|
|
sky/adaptors/kubernetes.py
CHANGED
@@ -19,6 +19,13 @@ urllib3 = common.LazyImport('urllib3',
|
|
19
19
|
# Timeout to use for API calls
|
20
20
|
API_TIMEOUT = 5
|
21
21
|
|
22
|
+
DEFAULT_IN_CLUSTER_REGION = 'in-cluster'
|
23
|
+
# The name for the environment variable that stores the in-cluster context name
|
24
|
+
# for Kubernetes clusters. This is used to associate a name with the current
|
25
|
+
# context when running with in-cluster auth. If not set, the context name is
|
26
|
+
# set to DEFAULT_IN_CLUSTER_REGION.
|
27
|
+
IN_CLUSTER_CONTEXT_NAME_ENV_VAR = 'SKYPILOT_IN_CLUSTER_CONTEXT_NAME'
|
28
|
+
|
22
29
|
|
23
30
|
def _decorate_methods(obj: Any, decorator: Callable, decoration_type: str):
|
24
31
|
for attr_name in dir(obj):
|
@@ -57,16 +64,8 @@ def _api_logging_decorator(logger: str, level: int):
|
|
57
64
|
|
58
65
|
def _load_config(context: Optional[str] = None):
|
59
66
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
60
|
-
|
61
|
-
|
62
|
-
# Kubernetes set environment variables for service discovery do not
|
63
|
-
# show up in SkyPilot tasks. For now, we work around by using
|
64
|
-
# DNS name instead of environment variables.
|
65
|
-
# See issue: https://github.com/skypilot-org/skypilot/issues/2287
|
66
|
-
os.environ['KUBERNETES_SERVICE_HOST'] = 'kubernetes.default.svc'
|
67
|
-
os.environ['KUBERNETES_SERVICE_PORT'] = '443'
|
68
|
-
kubernetes.config.load_incluster_config()
|
69
|
-
except kubernetes.config.config_exception.ConfigException:
|
67
|
+
|
68
|
+
def _load_config_from_kubeconfig(context: Optional[str] = None):
|
70
69
|
try:
|
71
70
|
kubernetes.config.load_kube_config(context=context)
|
72
71
|
except kubernetes.config.config_exception.ConfigException as e:
|
@@ -90,6 +89,21 @@ def _load_config(context: Optional[str] = None):
|
|
90
89
|
with ux_utils.print_exception_no_traceback():
|
91
90
|
raise ValueError(err_str) from None
|
92
91
|
|
92
|
+
if context == in_cluster_context_name() or context is None:
|
93
|
+
try:
|
94
|
+
# Load in-cluster config if running in a pod and context is None.
|
95
|
+
# Kubernetes set environment variables for service discovery do not
|
96
|
+
# show up in SkyPilot tasks. For now, we work around by using
|
97
|
+
# DNS name instead of environment variables.
|
98
|
+
# See issue: https://github.com/skypilot-org/skypilot/issues/2287
|
99
|
+
os.environ['KUBERNETES_SERVICE_HOST'] = 'kubernetes.default.svc'
|
100
|
+
os.environ['KUBERNETES_SERVICE_PORT'] = '443'
|
101
|
+
kubernetes.config.load_incluster_config()
|
102
|
+
except kubernetes.config.config_exception.ConfigException:
|
103
|
+
_load_config_from_kubeconfig()
|
104
|
+
else:
|
105
|
+
_load_config_from_kubeconfig(context)
|
106
|
+
|
93
107
|
|
94
108
|
@_api_logging_decorator('urllib3', logging.ERROR)
|
95
109
|
@functools.lru_cache()
|
@@ -154,3 +168,13 @@ def max_retry_error():
|
|
154
168
|
|
155
169
|
def stream():
|
156
170
|
return kubernetes.stream.stream
|
171
|
+
|
172
|
+
|
173
|
+
def in_cluster_context_name() -> Optional[str]:
|
174
|
+
"""Returns the name of the in-cluster context from the environment.
|
175
|
+
|
176
|
+
If the environment variable is not set, returns the default in-cluster
|
177
|
+
context name.
|
178
|
+
"""
|
179
|
+
return (os.environ.get(IN_CLUSTER_CONTEXT_NAME_ENV_VAR) or
|
180
|
+
DEFAULT_IN_CLUSTER_REGION)
|
sky/authentication.py
CHANGED
@@ -380,8 +380,8 @@ def setup_kubernetes_authentication(config: Dict[str, Any]) -> Dict[str, Any]:
|
|
380
380
|
secret_field_name = clouds.Kubernetes().ssh_key_secret_field_name
|
381
381
|
context = config['provider'].get(
|
382
382
|
'context', kubernetes_utils.get_current_kube_config_context_name())
|
383
|
-
if context ==
|
384
|
-
# If the context is
|
383
|
+
if context == kubernetes.in_cluster_context_name():
|
384
|
+
# If the context is an in-cluster context name, we are running in a pod
|
385
385
|
# with in-cluster configuration. We need to set the context to None
|
386
386
|
# to use the mounted service account.
|
387
387
|
context = None
|
sky/backends/backend_utils.py
CHANGED
@@ -730,7 +730,12 @@ def write_cluster_config(
|
|
730
730
|
f'{skypilot_config.loaded_config_path!r} for {cloud}, but it '
|
731
731
|
'is not supported by this cloud. Remove the config or set: '
|
732
732
|
'`remote_identity: LOCAL_CREDENTIALS`.')
|
733
|
-
|
733
|
+
if isinstance(cloud, clouds.Kubernetes):
|
734
|
+
if skypilot_config.get_nested(
|
735
|
+
('kubernetes', 'allowed_contexts'), None) is None:
|
736
|
+
excluded_clouds.add(cloud)
|
737
|
+
else:
|
738
|
+
excluded_clouds.add(cloud)
|
734
739
|
|
735
740
|
for cloud_str, cloud_obj in cloud_registry.CLOUD_REGISTRY.items():
|
736
741
|
remote_identity_config = skypilot_config.get_nested(
|
sky/cli.py
CHANGED
@@ -486,7 +486,7 @@ def _parse_override_params(
|
|
486
486
|
image_id: Optional[str] = None,
|
487
487
|
disk_size: Optional[int] = None,
|
488
488
|
disk_tier: Optional[str] = None,
|
489
|
-
ports: Optional[Tuple[str]] = None) -> Dict[str, Any]:
|
489
|
+
ports: Optional[Tuple[str, ...]] = None) -> Dict[str, Any]:
|
490
490
|
"""Parses the override parameters into a dictionary."""
|
491
491
|
override_params: Dict[str, Any] = {}
|
492
492
|
if cloud is not None:
|
@@ -539,7 +539,14 @@ def _parse_override_params(
|
|
539
539
|
else:
|
540
540
|
override_params['disk_tier'] = disk_tier
|
541
541
|
if ports:
|
542
|
-
|
542
|
+
if any(p.lower() == 'none' for p in ports):
|
543
|
+
if len(ports) > 1:
|
544
|
+
with ux_utils.print_exception_no_traceback():
|
545
|
+
raise ValueError('Cannot specify both "none" and other '
|
546
|
+
'ports.')
|
547
|
+
override_params['ports'] = None
|
548
|
+
else:
|
549
|
+
override_params['ports'] = ports
|
543
550
|
return override_params
|
544
551
|
|
545
552
|
|
@@ -730,7 +737,7 @@ def _make_task_or_dag_from_entrypoint_with_overrides(
|
|
730
737
|
image_id: Optional[str] = None,
|
731
738
|
disk_size: Optional[int] = None,
|
732
739
|
disk_tier: Optional[str] = None,
|
733
|
-
ports: Optional[Tuple[str]] = None,
|
740
|
+
ports: Optional[Tuple[str, ...]] = None,
|
734
741
|
env: Optional[List[Tuple[str, str]]] = None,
|
735
742
|
field_to_ignore: Optional[List[str]] = None,
|
736
743
|
# job launch specific
|
@@ -1084,7 +1091,7 @@ def launch(
|
|
1084
1091
|
env: List[Tuple[str, str]],
|
1085
1092
|
disk_size: Optional[int],
|
1086
1093
|
disk_tier: Optional[str],
|
1087
|
-
ports: Tuple[str],
|
1094
|
+
ports: Tuple[str, ...],
|
1088
1095
|
idle_minutes_to_autostop: Optional[int],
|
1089
1096
|
down: bool, # pylint: disable=redefined-outer-name
|
1090
1097
|
retry_until_up: bool,
|
sky/clouds/kubernetes.py
CHANGED
@@ -130,32 +130,30 @@ class Kubernetes(clouds.Cloud):
|
|
130
130
|
'Ignoring these contexts.')
|
131
131
|
|
132
132
|
@classmethod
|
133
|
-
def _existing_allowed_contexts(cls) -> List[
|
133
|
+
def _existing_allowed_contexts(cls) -> List[str]:
|
134
134
|
"""Get existing allowed contexts.
|
135
135
|
|
136
136
|
If None is returned in the list, it means that we are running in a pod
|
137
137
|
with in-cluster auth. In this case, we specify None context, which will
|
138
138
|
use the service account mounted in the pod.
|
139
139
|
"""
|
140
|
-
all_contexts = kubernetes_utils.
|
140
|
+
all_contexts = kubernetes_utils.get_all_kube_context_names()
|
141
141
|
if len(all_contexts) == 0:
|
142
142
|
return []
|
143
|
-
|
144
|
-
# If only one context is found and it is None, we are running in a
|
145
|
-
# pod with in-cluster auth. In this case, we allow it to be used
|
146
|
-
# without checking against allowed_contexts.
|
147
|
-
# TODO(romilb): We may want check in-cluster auth against
|
148
|
-
# allowed_contexts in the future by adding a special context name
|
149
|
-
# for in-cluster auth.
|
150
|
-
return [None]
|
143
|
+
|
151
144
|
all_contexts = set(all_contexts)
|
152
145
|
|
153
146
|
allowed_contexts = skypilot_config.get_nested(
|
154
147
|
('kubernetes', 'allowed_contexts'), None)
|
155
148
|
|
156
149
|
if allowed_contexts is None:
|
150
|
+
# Try kubeconfig if present
|
157
151
|
current_context = (
|
158
152
|
kubernetes_utils.get_current_kube_config_context_name())
|
153
|
+
if (current_context is None and
|
154
|
+
kubernetes_utils.is_incluster_config_available()):
|
155
|
+
# If no kubeconfig contexts found, use in-cluster if available
|
156
|
+
current_context = kubernetes.in_cluster_context_name()
|
159
157
|
allowed_contexts = []
|
160
158
|
if current_context is not None:
|
161
159
|
allowed_contexts = [current_context]
|
@@ -180,13 +178,7 @@ class Kubernetes(clouds.Cloud):
|
|
180
178
|
|
181
179
|
regions = []
|
182
180
|
for context in existing_contexts:
|
183
|
-
|
184
|
-
# If running in-cluster, we allow the region to be set to the
|
185
|
-
# singleton region since there is no context name available.
|
186
|
-
regions.append(clouds.Region(
|
187
|
-
kubernetes_utils.IN_CLUSTER_REGION))
|
188
|
-
else:
|
189
|
-
regions.append(clouds.Region(context))
|
181
|
+
regions.append(clouds.Region(context))
|
190
182
|
|
191
183
|
if region is not None:
|
192
184
|
regions = [r for r in regions if r.name == region]
|
@@ -409,12 +401,25 @@ class Kubernetes(clouds.Cloud):
|
|
409
401
|
remote_identity = skypilot_config.get_nested(
|
410
402
|
('kubernetes', 'remote_identity'),
|
411
403
|
schemas.get_default_remote_identity('kubernetes'))
|
412
|
-
|
404
|
+
|
405
|
+
if isinstance(remote_identity, dict):
|
406
|
+
# If remote_identity is a dict, use the service account for the
|
407
|
+
# current context
|
408
|
+
k8s_service_account_name = remote_identity.get(context, None)
|
409
|
+
if k8s_service_account_name is None:
|
410
|
+
err_msg = (f'Context {context!r} not found in '
|
411
|
+
'remote identities from config.yaml')
|
412
|
+
raise ValueError(err_msg)
|
413
|
+
else:
|
414
|
+
# If remote_identity is not a dict, use
|
415
|
+
k8s_service_account_name = remote_identity
|
416
|
+
|
417
|
+
if (k8s_service_account_name ==
|
413
418
|
schemas.RemoteIdentityOptions.LOCAL_CREDENTIALS.value):
|
414
419
|
# SA name doesn't matter since automounting credentials is disabled
|
415
420
|
k8s_service_account_name = 'default'
|
416
421
|
k8s_automount_sa_token = 'false'
|
417
|
-
elif (
|
422
|
+
elif (k8s_service_account_name ==
|
418
423
|
schemas.RemoteIdentityOptions.SERVICE_ACCOUNT.value):
|
419
424
|
# Use the default service account
|
420
425
|
k8s_service_account_name = (
|
@@ -422,7 +427,6 @@ class Kubernetes(clouds.Cloud):
|
|
422
427
|
k8s_automount_sa_token = 'true'
|
423
428
|
else:
|
424
429
|
# User specified a custom service account
|
425
|
-
k8s_service_account_name = remote_identity
|
426
430
|
k8s_automount_sa_token = 'true'
|
427
431
|
|
428
432
|
fuse_device_required = bool(resources.requires_fuse)
|
@@ -447,6 +451,12 @@ class Kubernetes(clouds.Cloud):
|
|
447
451
|
('kubernetes', 'provision_timeout'),
|
448
452
|
timeout,
|
449
453
|
override_configs=resources.cluster_config_overrides)
|
454
|
+
|
455
|
+
# Set environment variables for the pod. Note that SkyPilot env vars
|
456
|
+
# are set separately when the task is run. These env vars are
|
457
|
+
# independent of the SkyPilot task to be run.
|
458
|
+
k8s_env_vars = {kubernetes.IN_CLUSTER_CONTEXT_NAME_ENV_VAR: context}
|
459
|
+
|
450
460
|
# We specify object-store-memory to be 500MB to avoid taking up too
|
451
461
|
# much memory on the head node. 'num-cpus' should be set to limit
|
452
462
|
# the CPU usage on the head pod, otherwise the ray cluster will use the
|
@@ -480,6 +490,7 @@ class Kubernetes(clouds.Cloud):
|
|
480
490
|
'k8s_topology_label_key': k8s_topology_label_key,
|
481
491
|
'k8s_topology_label_value': k8s_topology_label_value,
|
482
492
|
'k8s_resource_key': k8s_resource_key,
|
493
|
+
'k8s_env_vars': k8s_env_vars,
|
483
494
|
'image_id': image_id,
|
484
495
|
'ray_installation_commands': constants.RAY_INSTALLATION_COMMANDS,
|
485
496
|
'ray_head_start_command': instance_setup.ray_head_start_command(
|
@@ -625,16 +636,13 @@ class Kubernetes(clouds.Cloud):
|
|
625
636
|
# TODO: Remove this after 0.9.0.
|
626
637
|
return region, zone
|
627
638
|
|
628
|
-
if region ==
|
639
|
+
if region == kubernetes.in_cluster_context_name():
|
629
640
|
# If running incluster, we set region to IN_CLUSTER_REGION
|
630
641
|
# since there is no context name available.
|
631
642
|
return region, zone
|
632
643
|
|
633
|
-
all_contexts = kubernetes_utils.
|
634
|
-
|
635
|
-
# If [None] context is returned, use the singleton region since we
|
636
|
-
# are running in a pod with in-cluster auth.
|
637
|
-
all_contexts = [kubernetes_utils.IN_CLUSTER_REGION]
|
644
|
+
all_contexts = kubernetes_utils.get_all_kube_context_names()
|
645
|
+
|
638
646
|
if region not in all_contexts:
|
639
647
|
raise ValueError(
|
640
648
|
f'Context {region} not found in kubeconfig. Kubernetes only '
|
@@ -324,9 +324,8 @@ def get_common_gpus() -> List[str]:
|
|
324
324
|
'A100',
|
325
325
|
'A100-80GB',
|
326
326
|
'H100',
|
327
|
-
'K80',
|
328
327
|
'L4',
|
329
|
-
'
|
328
|
+
'L40S',
|
330
329
|
'P100',
|
331
330
|
'T4',
|
332
331
|
'V100',
|
@@ -337,13 +336,13 @@ def get_common_gpus() -> List[str]:
|
|
337
336
|
def get_tpus() -> List[str]:
|
338
337
|
"""Returns a list of TPU names."""
|
339
338
|
# TODO(wei-lin): refactor below hard-coded list.
|
340
|
-
# There are many TPU configurations available, we show the
|
341
|
-
#
|
339
|
+
# There are many TPU configurations available, we show the some smallest
|
340
|
+
# ones for each generation, and people should find larger ones with
|
341
|
+
# sky show-gpus tpu.
|
342
342
|
return [
|
343
|
-
'tpu-v2-
|
344
|
-
'tpu-
|
345
|
-
'tpu-
|
346
|
-
'tpu-v5p-12288'
|
343
|
+
'tpu-v2-8', 'tpu-v3-8', 'tpu-v4-8', 'tpu-v4-16', 'tpu-v4-32',
|
344
|
+
'tpu-v5litepod-1', 'tpu-v5litepod-4', 'tpu-v5litepod-8', 'tpu-v5p-8',
|
345
|
+
'tpu-v5p-16', 'tpu-v5p-32', 'tpu-v6e-1', 'tpu-v6e-4', 'tpu-v6e-8'
|
347
346
|
]
|
348
347
|
|
349
348
|
|
@@ -64,7 +64,7 @@ FAMILY_NAME_TO_SKYPILOT_GPU_NAME = {
|
|
64
64
|
'standardNVSv2Family': 'M60',
|
65
65
|
'standardNVSv3Family': 'M60',
|
66
66
|
'standardNVPromoFamily': 'M60',
|
67
|
-
'standardNVSv4Family': '
|
67
|
+
'standardNVSv4Family': 'MI25',
|
68
68
|
'standardNDSFamily': 'P40',
|
69
69
|
'StandardNVADSA10v5Family': 'A10',
|
70
70
|
'StandardNCadsH100v5Family': 'H100',
|
sky/execution.py
CHANGED
@@ -305,7 +305,8 @@ def _execute(
|
|
305
305
|
do_workdir = (Stage.SYNC_WORKDIR in stages and not dryrun and
|
306
306
|
task.workdir is not None)
|
307
307
|
do_file_mounts = (Stage.SYNC_FILE_MOUNTS in stages and not dryrun and
|
308
|
-
task.file_mounts is not None
|
308
|
+
(task.file_mounts is not None or
|
309
|
+
task.storage_mounts is not None))
|
309
310
|
if do_workdir or do_file_mounts:
|
310
311
|
logger.info(ux_utils.starting_message('Mounting files.'))
|
311
312
|
|
sky/provision/azure/instance.py
CHANGED
@@ -305,7 +305,8 @@ def _create_vm(
|
|
305
305
|
network_profile=network_profile,
|
306
306
|
identity=compute.VirtualMachineIdentity(
|
307
307
|
type='UserAssigned',
|
308
|
-
user_assigned_identities={provider_config['msi']: {}})
|
308
|
+
user_assigned_identities={provider_config['msi']: {}}),
|
309
|
+
priority=node_config['azure_arm_parameters'].get('priority', None))
|
309
310
|
vm_poller = compute_client.virtual_machines.begin_create_or_update(
|
310
311
|
resource_group_name=provider_config['resource_group'],
|
311
312
|
vm_name=vm_name,
|
@@ -37,7 +37,6 @@ if typing.TYPE_CHECKING:
|
|
37
37
|
|
38
38
|
# TODO(romilb): Move constants to constants.py
|
39
39
|
DEFAULT_NAMESPACE = 'default'
|
40
|
-
IN_CLUSTER_REGION = 'in-cluster'
|
41
40
|
|
42
41
|
DEFAULT_SERVICE_ACCOUNT_NAME = 'skypilot-service-account'
|
43
42
|
|
@@ -921,6 +920,9 @@ def is_kubeconfig_exec_auth(
|
|
921
920
|
str: Error message if exec-based authentication is used, None otherwise
|
922
921
|
"""
|
923
922
|
k8s = kubernetes.kubernetes
|
923
|
+
if context == kubernetes.in_cluster_context_name():
|
924
|
+
# If in-cluster config is used, exec-based auth is not used.
|
925
|
+
return False, None
|
924
926
|
try:
|
925
927
|
k8s.config.load_kube_config()
|
926
928
|
except kubernetes.config_exception():
|
@@ -1003,30 +1005,34 @@ def is_incluster_config_available() -> bool:
|
|
1003
1005
|
return os.path.exists('/var/run/secrets/kubernetes.io/serviceaccount/token')
|
1004
1006
|
|
1005
1007
|
|
1006
|
-
def
|
1007
|
-
"""Get all kubernetes context names
|
1008
|
+
def get_all_kube_context_names() -> List[str]:
|
1009
|
+
"""Get all kubernetes context names available in the environment.
|
1010
|
+
|
1011
|
+
Fetches context names from the kubeconfig file and in-cluster auth, if any.
|
1008
1012
|
|
1009
|
-
If running in-cluster
|
1013
|
+
If running in-cluster and IN_CLUSTER_CONTEXT_NAME_ENV_VAR is not set,
|
1014
|
+
returns the default in-cluster kubernetes context name.
|
1010
1015
|
|
1011
1016
|
We should not cache the result of this function as the admin policy may
|
1012
1017
|
update the contexts.
|
1013
1018
|
|
1014
1019
|
Returns:
|
1015
1020
|
List[Optional[str]]: The list of kubernetes context names if
|
1016
|
-
available, an empty list otherwise.
|
1017
|
-
returns [None] to indicate in-cluster config.
|
1021
|
+
available, an empty list otherwise.
|
1018
1022
|
"""
|
1019
1023
|
k8s = kubernetes.kubernetes
|
1024
|
+
context_names = []
|
1020
1025
|
try:
|
1021
1026
|
all_contexts, _ = k8s.config.list_kube_config_contexts()
|
1022
1027
|
# all_contexts will always have at least one context. If kubeconfig
|
1023
1028
|
# does not have any contexts defined, it will raise ConfigException.
|
1024
|
-
|
1029
|
+
context_names = [context['name'] for context in all_contexts]
|
1025
1030
|
except k8s.config.config_exception.ConfigException:
|
1026
|
-
# If
|
1027
|
-
|
1028
|
-
|
1029
|
-
|
1031
|
+
# If no config found, continue
|
1032
|
+
pass
|
1033
|
+
if is_incluster_config_available():
|
1034
|
+
context_names.append(kubernetes.in_cluster_context_name())
|
1035
|
+
return context_names
|
1030
1036
|
|
1031
1037
|
|
1032
1038
|
@functools.lru_cache()
|
@@ -1130,7 +1136,11 @@ class KubernetesInstanceType:
|
|
1130
1136
|
name = (f'{common_utils.format_float(self.cpus)}CPU--'
|
1131
1137
|
f'{common_utils.format_float(self.memory)}GB')
|
1132
1138
|
if self.accelerator_count:
|
1133
|
-
|
1139
|
+
# Replace spaces with underscores in accelerator type to make it a
|
1140
|
+
# valid logical instance type name.
|
1141
|
+
assert self.accelerator_type is not None, self.accelerator_count
|
1142
|
+
acc_name = self.accelerator_type.replace(' ', '_')
|
1143
|
+
name += f'--{self.accelerator_count}{acc_name}'
|
1134
1144
|
return name
|
1135
1145
|
|
1136
1146
|
@staticmethod
|
@@ -1161,7 +1171,9 @@ class KubernetesInstanceType:
|
|
1161
1171
|
accelerator_type = match.group('accelerator_type')
|
1162
1172
|
if accelerator_count:
|
1163
1173
|
accelerator_count = int(accelerator_count)
|
1164
|
-
|
1174
|
+
# This is to revert the accelerator types with spaces back to
|
1175
|
+
# the original format.
|
1176
|
+
accelerator_type = str(accelerator_type).replace('_', ' ')
|
1165
1177
|
else:
|
1166
1178
|
accelerator_count = None
|
1167
1179
|
accelerator_type = None
|
@@ -2185,9 +2197,9 @@ def set_autodown_annotations(handle: 'backends.CloudVmRayResourceHandle',
|
|
2185
2197
|
def get_context_from_config(provider_config: Dict[str, Any]) -> Optional[str]:
|
2186
2198
|
context = provider_config.get('context',
|
2187
2199
|
get_current_kube_config_context_name())
|
2188
|
-
if context ==
|
2189
|
-
# If the context (also used as the region) is
|
2190
|
-
# we need to use in-cluster auth.
|
2200
|
+
if context == kubernetes.in_cluster_context_name():
|
2201
|
+
# If the context (also used as the region) is in-cluster, we need to
|
2202
|
+
# we need to use in-cluster auth by setting the context to None.
|
2191
2203
|
context = None
|
2192
2204
|
return context
|
2193
2205
|
|
sky/templates/aws-ray.yml.j2
CHANGED
@@ -173,6 +173,7 @@ setup_commands:
|
|
173
173
|
# Line 'sudo grep ..': set the number of threads per process to unlimited to avoid ray job submit stucking issue when the number of running ray jobs increase.
|
174
174
|
# Line 'mkdir -p ..': disable host key check
|
175
175
|
# Line 'python3 -c ..': patch the buggy ray files and enable `-o allow_other` option for `goofys`
|
176
|
+
# Line 'rm ~/.aws/credentials': explicitly remove the credentials file to be safe. This is to guard against the case where the credential files was uploaded once as `remote_identity` was not set in a previous launch.
|
176
177
|
- mkdir -p ~/.ssh; touch ~/.ssh/config;
|
177
178
|
{%- for initial_setup_command in initial_setup_commands %}
|
178
179
|
{{ initial_setup_command }}
|
@@ -185,7 +186,12 @@ setup_commands:
|
|
185
186
|
sudo grep -e '^DefaultTasksMax' /etc/systemd/system.conf || (sudo bash -c 'echo "DefaultTasksMax=infinity" >> /etc/systemd/system.conf'); sudo systemctl set-property user-$(id -u $(whoami)).slice TasksMax=infinity; sudo systemctl daemon-reload;
|
186
187
|
{%- endif %}
|
187
188
|
mkdir -p ~/.ssh; (grep -Pzo -q "Host \*\n StrictHostKeyChecking no" ~/.ssh/config) || printf "Host *\n StrictHostKeyChecking no\n" >> ~/.ssh/config;
|
188
|
-
[ -f /etc/fuse.conf ] && sudo sed -i 's/#user_allow_other/user_allow_other/g' /etc/fuse.conf || (sudo sh -c 'echo "user_allow_other" > /etc/fuse.conf');
|
189
|
+
[ -f /etc/fuse.conf ] && sudo sed -i 's/#user_allow_other/user_allow_other/g' /etc/fuse.conf || (sudo sh -c 'echo "user_allow_other" > /etc/fuse.conf');
|
190
|
+
{%- if remote_identity != 'LOCAL_CREDENTIALS' %}
|
191
|
+
rm ~/.aws/credentials || true;
|
192
|
+
{%- endif %}
|
193
|
+
|
194
|
+
|
189
195
|
|
190
196
|
# Command to start ray clusters are now placed in `sky.provision.instance_setup`.
|
191
197
|
# We do not need to list it here anymore.
|
sky/templates/azure-ray.yml.j2
CHANGED
@@ -75,9 +75,6 @@ available_node_types:
|
|
75
75
|
{%- if use_spot %}
|
76
76
|
# optionally set priority to use Spot instances
|
77
77
|
priority: Spot
|
78
|
-
# set a maximum price for spot instances if desired
|
79
|
-
# billingProfile:
|
80
|
-
# maxPrice: -1
|
81
78
|
{%- endif %}
|
82
79
|
cloudInitSetupCommands: |-
|
83
80
|
{%- for cmd in cloud_init_setup_commands %}
|
@@ -58,6 +58,11 @@ KUBECTL_ARGS=()
|
|
58
58
|
if [ -n "$KUBE_CONTEXT" ]; then
|
59
59
|
KUBECTL_ARGS+=("--context=$KUBE_CONTEXT")
|
60
60
|
fi
|
61
|
+
# If context is not provided, it means we are using incluster auth. In this case,
|
62
|
+
# we need to set KUBECONFIG to /dev/null to avoid using kubeconfig file.
|
63
|
+
if [ -z "$KUBE_CONTEXT" ]; then
|
64
|
+
KUBECTL_ARGS+=("--kubeconfig=/dev/null")
|
65
|
+
fi
|
61
66
|
if [ -n "$KUBE_NAMESPACE" ]; then
|
62
67
|
KUBECTL_ARGS+=("--namespace=$KUBE_NAMESPACE")
|
63
68
|
fi
|
@@ -322,6 +322,10 @@ available_node_types:
|
|
322
322
|
valueFrom:
|
323
323
|
fieldRef:
|
324
324
|
fieldPath: metadata.labels['ray-node-type']
|
325
|
+
{% for key, value in k8s_env_vars.items() if k8s_env_vars is not none %}
|
326
|
+
- name: {{ key }}
|
327
|
+
value: {{ value }}
|
328
|
+
{% endfor %}
|
325
329
|
# Do not change this command - it keeps the pod alive until it is
|
326
330
|
# explicitly killed.
|
327
331
|
command: ["/bin/bash", "-c", "--"]
|
sky/utils/command_runner.py
CHANGED
@@ -767,6 +767,10 @@ class KubernetesCommandRunner(CommandRunner):
|
|
767
767
|
]
|
768
768
|
if self.context:
|
769
769
|
kubectl_args += ['--context', self.context]
|
770
|
+
# If context is none, it means we are using incluster auth. In this
|
771
|
+
# case, need to set KUBECONFIG to /dev/null to avoid using kubeconfig.
|
772
|
+
if self.context is None:
|
773
|
+
kubectl_args += ['--kubeconfig', '/dev/null']
|
770
774
|
kubectl_args += [self.pod_name]
|
771
775
|
if ssh_mode == SshMode.LOGIN:
|
772
776
|
assert isinstance(cmd, list), 'cmd must be a list for login mode.'
|
@@ -12,6 +12,7 @@
|
|
12
12
|
# * Specify SKYPILOT_NAMESPACE env var to override the default namespace where the service account is created.
|
13
13
|
# * Specify SKYPILOT_SA_NAME env var to override the default service account name.
|
14
14
|
# * Specify SKIP_SA_CREATION=1 to skip creating the service account and use an existing one
|
15
|
+
# * Specify SUPER_USER=1 to create a service account with cluster-admin permissions
|
15
16
|
#
|
16
17
|
# Usage:
|
17
18
|
# # Create "sky-sa" service account with minimal permissions in "default" namespace and generate kubeconfig
|
@@ -22,6 +23,9 @@
|
|
22
23
|
#
|
23
24
|
# # Use an existing service account "my-sa" in "my-namespace" namespace and generate kubeconfig
|
24
25
|
# $ SKIP_SA_CREATION=1 SKYPILOT_SA_NAME=my-sa SKYPILOT_NAMESPACE=my-namespace ./generate_kubeconfig.sh
|
26
|
+
#
|
27
|
+
# # Create "sky-sa" service account with cluster-admin permissions in "default" namespace
|
28
|
+
# $ SUPER_USER=1 ./generate_kubeconfig.sh
|
25
29
|
|
26
30
|
set -eu -o pipefail
|
27
31
|
|
@@ -29,9 +33,11 @@ set -eu -o pipefail
|
|
29
33
|
# use default.
|
30
34
|
SKYPILOT_SA=${SKYPILOT_SA_NAME:-sky-sa}
|
31
35
|
NAMESPACE=${SKYPILOT_NAMESPACE:-default}
|
36
|
+
SUPER_USER=${SUPER_USER:-0}
|
32
37
|
|
33
38
|
echo "Service account: ${SKYPILOT_SA}"
|
34
39
|
echo "Namespace: ${NAMESPACE}"
|
40
|
+
echo "Super user permissions: ${SUPER_USER}"
|
35
41
|
|
36
42
|
# Set OS specific values.
|
37
43
|
if [[ "$OSTYPE" == "linux-gnu" ]]; then
|
@@ -47,8 +53,43 @@ fi
|
|
47
53
|
|
48
54
|
# If the user has set SKIP_SA_CREATION=1, skip creating the service account.
|
49
55
|
if [ -z ${SKIP_SA_CREATION+x} ]; then
|
50
|
-
echo "Creating the Kubernetes Service Account with minimal RBAC permissions."
|
51
|
-
|
56
|
+
echo "Creating the Kubernetes Service Account with ${SUPER_USER:+super user}${SUPER_USER:-minimal} RBAC permissions."
|
57
|
+
if [ "${SUPER_USER}" = "1" ]; then
|
58
|
+
# Create service account with cluster-admin permissions
|
59
|
+
kubectl apply -f - <<EOF
|
60
|
+
apiVersion: v1
|
61
|
+
kind: Namespace
|
62
|
+
metadata:
|
63
|
+
name: ${NAMESPACE}
|
64
|
+
labels:
|
65
|
+
parent: skypilot
|
66
|
+
---
|
67
|
+
kind: ServiceAccount
|
68
|
+
apiVersion: v1
|
69
|
+
metadata:
|
70
|
+
name: ${SKYPILOT_SA}
|
71
|
+
namespace: ${NAMESPACE}
|
72
|
+
labels:
|
73
|
+
parent: skypilot
|
74
|
+
---
|
75
|
+
apiVersion: rbac.authorization.k8s.io/v1
|
76
|
+
kind: ClusterRoleBinding
|
77
|
+
metadata:
|
78
|
+
name: ${SKYPILOT_SA}-cluster-admin
|
79
|
+
labels:
|
80
|
+
parent: skypilot
|
81
|
+
subjects:
|
82
|
+
- kind: ServiceAccount
|
83
|
+
name: ${SKYPILOT_SA}
|
84
|
+
namespace: ${NAMESPACE}
|
85
|
+
roleRef:
|
86
|
+
kind: ClusterRole
|
87
|
+
name: cluster-admin
|
88
|
+
apiGroup: rbac.authorization.k8s.io
|
89
|
+
EOF
|
90
|
+
else
|
91
|
+
# Original RBAC rules for minimal permissions
|
92
|
+
kubectl apply -f - <<EOF
|
52
93
|
# Create/update namespace specified by the user
|
53
94
|
apiVersion: v1
|
54
95
|
kind: Namespace
|
@@ -173,6 +214,7 @@ roleRef:
|
|
173
214
|
name: skypilot-system-service-account-role
|
174
215
|
apiGroup: rbac.authorization.k8s.io
|
175
216
|
EOF
|
217
|
+
fi
|
176
218
|
# Apply optional ingress-related roles, but don't make the script fail if it fails
|
177
219
|
kubectl apply -f - <<EOF || echo "Failed to apply optional ingress-related roles. Nginx ingress is likely not installed. This is not critical and the script will continue."
|
178
220
|
# Optional: Role for accessing ingress resources
|
@@ -16,7 +16,9 @@ echo "context: $context" >&2
|
|
16
16
|
context_lower=$(echo "$context" | tr '[:upper:]' '[:lower:]')
|
17
17
|
shift
|
18
18
|
if [ -z "$context" ] || [ "$context_lower" = "none" ]; then
|
19
|
-
|
19
|
+
# If context is none, it means we are using incluster auth. In this case,
|
20
|
+
# use need to set KUBECONFIG to /dev/null to avoid using kubeconfig file.
|
21
|
+
kubectl exec -i $pod -n $namespace --kubeconfig=/dev/null -- "$@"
|
20
22
|
else
|
21
23
|
kubectl exec -i $pod -n $namespace --context=$context -- "$@"
|
22
24
|
fi
|
sky/utils/schemas.py
CHANGED
@@ -684,7 +684,14 @@ _REMOTE_IDENTITY_SCHEMA = {
|
|
684
684
|
|
685
685
|
_REMOTE_IDENTITY_SCHEMA_KUBERNETES = {
|
686
686
|
'remote_identity': {
|
687
|
-
'
|
687
|
+
'anyOf': [{
|
688
|
+
'type': 'string'
|
689
|
+
}, {
|
690
|
+
'type': 'object',
|
691
|
+
'additionalProperties': {
|
692
|
+
'type': 'string'
|
693
|
+
}
|
694
|
+
}]
|
688
695
|
},
|
689
696
|
}
|
690
697
|
|
{skypilot_nightly-1.0.0.dev20241124.dist-info → skypilot_nightly-1.0.0.dev20241126.dist-info}/RECORD
RENAMED
@@ -1,13 +1,13 @@
|
|
1
|
-
sky/__init__.py,sha256=
|
1
|
+
sky/__init__.py,sha256=osjCmkEoWHBG-YiWGML4YGuznHH6a-e2w0Qft9syKGM,5882
|
2
2
|
sky/admin_policy.py,sha256=hPo02f_A32gCqhUueF0QYy1fMSSKqRwYEg_9FxScN_s,3248
|
3
|
-
sky/authentication.py,sha256=
|
3
|
+
sky/authentication.py,sha256=kACHmiZgWgRpYd1wx1ofbXRMErfMcFmWrkw4a9NxYrY,20988
|
4
4
|
sky/check.py,sha256=D3Y3saIFAYVvPxuBHnVgJEO0fUVDxgjwuMBaO-D778k,9472
|
5
|
-
sky/cli.py,sha256=
|
5
|
+
sky/cli.py,sha256=0sLOr7lBg2eKeFOgkW2ZS4RYb-hDccM78pVNVnXu_Gs,213764
|
6
6
|
sky/cloud_stores.py,sha256=RjFgmRhUh1Kk__f6g3KxzLp9s7dA0pFK4W1AukEuUaw,21153
|
7
7
|
sky/core.py,sha256=0-4W_DKJZgbwXuzNZKQ2R_qJxqxbqqNfyi0U0PQBKvQ,38230
|
8
8
|
sky/dag.py,sha256=f3sJlkH4bE6Uuz3ozNtsMhcBpRx7KmC9Sa4seDKt4hU,3104
|
9
9
|
sky/exceptions.py,sha256=E3C2Ejcc8RUDAUQn7ar_Jr97C_AxD2rKKMmJOfLJ9d0,8965
|
10
|
-
sky/execution.py,sha256=
|
10
|
+
sky/execution.py,sha256=RsVYs_Fkt8OJUJemz49mJLKE5iZaOvNHCTPcnQ_ngNQ,27522
|
11
11
|
sky/global_user_state.py,sha256=ob3jvtG_yMPGvLlVScgeJ9pqk3FP4jhfEixw8WzFwho,29682
|
12
12
|
sky/optimizer.py,sha256=GjvKQIBtY3NlULzau_9tfa7V2KYVJRrmNrjKVIWCPIQ,59753
|
13
13
|
sky/resources.py,sha256=260JnyzYz0uAYVH8lIfbHjSZUvHne8mfoWmjo_YgeEI,69106
|
@@ -24,13 +24,13 @@ sky/adaptors/cudo.py,sha256=WGvIQrlzJkGDe02Ve7pygA56tHwUc4kwS3XHW8kMFAA,239
|
|
24
24
|
sky/adaptors/docker.py,sha256=_kzpZ0fkWHqqQAVVl0llTsCE31KYz3Sjn8psTBQHVkA,468
|
25
25
|
sky/adaptors/gcp.py,sha256=OQ9RaqjR0r0iaWYpjvEtIx5vnEhyB4LhUCwbtdxsmVk,3115
|
26
26
|
sky/adaptors/ibm.py,sha256=H87vD6izq_wQI8oQC7cx9iVtRgPi_QkAcrfa1Z3PNqU,4906
|
27
|
-
sky/adaptors/kubernetes.py,sha256=
|
27
|
+
sky/adaptors/kubernetes.py,sha256=5pRyPmXYpA0CrU5JFjh88TxC9TNemIaSUkSvaXysrCY,6510
|
28
28
|
sky/adaptors/oci.py,sha256=n_zcrippTZRbTIhN3euD5sqNYn43G397zMavaJyEYbk,1480
|
29
29
|
sky/adaptors/runpod.py,sha256=4Nt_BfZhJAKQNA3wO8cxvvNI8x4NsDGHu_4EhRDlGYQ,225
|
30
30
|
sky/adaptors/vsphere.py,sha256=zJP9SeObEoLrpgHW2VHvZE48EhgVf8GfAEIwBeaDMfM,2129
|
31
31
|
sky/backends/__init__.py,sha256=UDjwbUgpTRApbPJnNfR786GadUuwgRk3vsWoVu5RB_c,536
|
32
32
|
sky/backends/backend.py,sha256=wwfbrxPhjMPs6PSyy3tAHI8WJhl-xhgzWBsAZjmJJ6g,6249
|
33
|
-
sky/backends/backend_utils.py,sha256=
|
33
|
+
sky/backends/backend_utils.py,sha256=tqlpVx7KuQH1LMewPZ9KkkMIR_0mUbrrzJ72kzXMXBk,126294
|
34
34
|
sky/backends/cloud_vm_ray_backend.py,sha256=BDpruXsj-u4wc3WYscLhIbSjjsNZ85iI7fkb-T8f2Bs,233321
|
35
35
|
sky/backends/docker_utils.py,sha256=Hyw1YY20EyghhEbYx6O2FIMDcGkNzBzV9TM7LFynei8,8358
|
36
36
|
sky/backends/local_docker_backend.py,sha256=0JL5m0YUgOmOL4aWEUe4tmt89dsxjk4_WXkPwgEKEis,16801
|
@@ -48,14 +48,14 @@ sky/clouds/cudo.py,sha256=mglcsoYhdWwgkVgFcoZLE0M_UCXOJkvW6nITufPd_BQ,13118
|
|
48
48
|
sky/clouds/fluidstack.py,sha256=u2I6jXEtTqgqRWi2EafMsKqc8VkUq1cR6CSDUvk72_U,12407
|
49
49
|
sky/clouds/gcp.py,sha256=RH3MMdgR3FWPiwm7rFrJ5oebtTcSUVoh7UfQMc_6U4A,54704
|
50
50
|
sky/clouds/ibm.py,sha256=0ArRTQx1_DpTNGByFhukzFedEDzmVjBsGiiques1bQ0,21447
|
51
|
-
sky/clouds/kubernetes.py,sha256=
|
51
|
+
sky/clouds/kubernetes.py,sha256=zYF7GxkbxOFhQV374R0jFZo3k2qpfvMZo9j70w6wkV8,31545
|
52
52
|
sky/clouds/lambda_cloud.py,sha256=42AmcN2X_wdBMuAw606nR_pQCBAy5QFiAo711_WRqDE,12672
|
53
53
|
sky/clouds/oci.py,sha256=OzGWoU3DiMbFujMQLXgCr94Oqb9EyP0CsM4gMYOeU9s,26553
|
54
54
|
sky/clouds/paperspace.py,sha256=0UxOcv_NaLY5hrFoAA_ZienclZUOqzf0yxXXZu4jXG0,10896
|
55
55
|
sky/clouds/runpod.py,sha256=UlHFPQY4wGGi0gLDO-vZoeJcgbQTCYXh4Pk8mKQBNUk,11515
|
56
56
|
sky/clouds/scp.py,sha256=JHyMqkAAqr9lJq79IVjj3rU1g-ZCCGLZTJEzIhYsw7c,15845
|
57
57
|
sky/clouds/vsphere.py,sha256=LzO-Mc-zDgpaDYZxNKGdEFa0eR5DHpTgKsPX60mPi10,12280
|
58
|
-
sky/clouds/service_catalog/__init__.py,sha256=
|
58
|
+
sky/clouds/service_catalog/__init__.py,sha256=p4V0GGeumT8yt01emqDM7Au45H5jvPfGNqdI6L2W3uM,14750
|
59
59
|
sky/clouds/service_catalog/aws_catalog.py,sha256=j33lNC5GXWK6CiGWZORCnumGlRODmCAT2_lfWp0YtBc,13106
|
60
60
|
sky/clouds/service_catalog/azure_catalog.py,sha256=5Q51x_WEKvQ2YSgJvZHRH3URlbwIstYuwpjaWW_wJlw,8149
|
61
61
|
sky/clouds/service_catalog/common.py,sha256=qHNLzh59W34CSSCCztu75n69TuGyDQ310SQc_P-t544,27700
|
@@ -75,7 +75,7 @@ sky/clouds/service_catalog/vsphere_catalog.py,sha256=OV3Czi3vwRSW4lqVPHxU_GND0ox
|
|
75
75
|
sky/clouds/service_catalog/data_fetchers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
76
76
|
sky/clouds/service_catalog/data_fetchers/analyze.py,sha256=VdksJQs3asFE8H5T3ZV1FJas2xD9WEX6c-V5p7y-wp4,2084
|
77
77
|
sky/clouds/service_catalog/data_fetchers/fetch_aws.py,sha256=ro2zazdkDF6z9bE7QFyjoeb4VFxmbNZ1WK5IQrdoQWk,23003
|
78
|
-
sky/clouds/service_catalog/data_fetchers/fetch_azure.py,sha256=
|
78
|
+
sky/clouds/service_catalog/data_fetchers/fetch_azure.py,sha256=K5jyfCAR5d-Hg78tDhmqpz0DQl79ndCW1ZRhBDLcBdM,12796
|
79
79
|
sky/clouds/service_catalog/data_fetchers/fetch_cudo.py,sha256=52P48lvWN0s1ArjeLPeLemPRpxjSRcHincRle0nqdm4,3440
|
80
80
|
sky/clouds/service_catalog/data_fetchers/fetch_fluidstack.py,sha256=yKuAFbjBRNz_e2RNNDT_aHHAuKQ86Ac7GKgIie5O6Pg,7273
|
81
81
|
sky/clouds/service_catalog/data_fetchers/fetch_gcp.py,sha256=VrTTkMF5AjiplfDmvPBW-otR3oXGU3-oFouVMfIua4Q,33447
|
@@ -118,7 +118,7 @@ sky/provision/aws/utils.py,sha256=m49pS-SHGW7Au3bhDeTPsL8N5iRzbwOXzyEWRCc1Vho,32
|
|
118
118
|
sky/provision/azure/__init__.py,sha256=87cgk1_Ws7n9rqaDDPv-HpfrkVeSQMdFQnhnXwyx9g4,548
|
119
119
|
sky/provision/azure/azure-config-template.json,sha256=jrjAgOtpe0e6FSg3vsVqHKQqJe0w-HeWOFT1HuwzS2c,4712
|
120
120
|
sky/provision/azure/config.py,sha256=V5-0Zelt4Xo0vcqnD6PpsnaCS7vc3xosDelILDAKSW4,8885
|
121
|
-
sky/provision/azure/instance.py,sha256=
|
121
|
+
sky/provision/azure/instance.py,sha256=T9yzMGeYIqQVKkZ1paUWIkhRbPTrBCKmWjTFVixahcM,49059
|
122
122
|
sky/provision/cudo/__init__.py,sha256=KAEl26MVPsk7IoP9Gg-MOJJRIV6-X9B0fbyHdyJWdLo,741
|
123
123
|
sky/provision/cudo/config.py,sha256=RYOVkV0MoUqVBJRZiKhBZhjFygeyFs7eUdVMdPg1vds,327
|
124
124
|
sky/provision/cudo/cudo_machine_type.py,sha256=_VNXWPELmlFXbtdcnPvkuLuyE9CZ923BUCdiac-ClDY,696
|
@@ -140,7 +140,7 @@ sky/provision/kubernetes/config.py,sha256=WEKcFXXhe89bLGAvoMiBvTDxdxkpTIA6ezrj2v
|
|
140
140
|
sky/provision/kubernetes/instance.py,sha256=2zd_Z09amOsi0vPZjQYMJCkCWbN2YecMLL9HkmUuPrM,48414
|
141
141
|
sky/provision/kubernetes/network.py,sha256=EpNjRQ131CXepqbdkoRKFu4szVrm0oKEpv1l8EgOkjU,12364
|
142
142
|
sky/provision/kubernetes/network_utils.py,sha256=t1FS3K400fetH7cBuRgQJZl5_jEeMshsvsYmnMUcq8k,11399
|
143
|
-
sky/provision/kubernetes/utils.py,sha256=
|
143
|
+
sky/provision/kubernetes/utils.py,sha256=2taenSlFi3r_78ZQaDQt9i4hQkGAxiePLuz2P9HEcNU,101946
|
144
144
|
sky/provision/kubernetes/manifests/smarter-device-manager-configmap.yaml,sha256=AMzYzlY0JIlfBWj5eX054Rc1XDW2thUcLSOGMJVhIdA,229
|
145
145
|
sky/provision/kubernetes/manifests/smarter-device-manager-daemonset.yaml,sha256=RtTq4F1QUmR2Uunb6zuuRaPhV7hpesz4saHjn3Ncsb4,2010
|
146
146
|
sky/provision/lambda_cloud/__init__.py,sha256=6EEvSgtUeEiup9ivIFevHmgv0GqleroO2X0K7TRa2nE,612
|
@@ -218,8 +218,8 @@ sky/skylet/ray_patches/log_monitor.py.patch,sha256=CPoh3U_ogOHrkMOK7jaIRnwdzxjBT
|
|
218
218
|
sky/skylet/ray_patches/resource_demand_scheduler.py.patch,sha256=AVV-Hw-Rxw16aFm4VsyzayX1QOvwmQuM79iVdSjkSl4,658
|
219
219
|
sky/skylet/ray_patches/updater.py.patch,sha256=ZNMGVYICPBB44jLbEx2KvCgIY7BWYdDv3-2b2HJWmAQ,289
|
220
220
|
sky/skylet/ray_patches/worker.py.patch,sha256=_OBhibdr3xOy5Qje6Tt8D1eQVm_msi50TJbCJmOTxVU,565
|
221
|
-
sky/templates/aws-ray.yml.j2,sha256=
|
222
|
-
sky/templates/azure-ray.yml.j2,sha256=
|
221
|
+
sky/templates/aws-ray.yml.j2,sha256=FK95sqSCrCkoAz9Cdtwq6_g3TqU-3048KE7pUaZfNNg,8814
|
222
|
+
sky/templates/azure-ray.yml.j2,sha256=NQKg_f_S7WjsY90ykx0yNDNOGYnnEL3HS4pA3NMIZkM,6112
|
223
223
|
sky/templates/cudo-ray.yml.j2,sha256=SEHVY57iBauCOE2HYJtYVFEKlriAkdwQu_p86a1n_bA,3548
|
224
224
|
sky/templates/fluidstack-ray.yml.j2,sha256=t8TCULgiErCZdtFmBZVsA8ZdcqR7ccwsmQhuDFTBEAU,3541
|
225
225
|
sky/templates/gcp-ray.yml.j2,sha256=y95B-Nk6hFxm6vEIaxI1wFzAIcy_GcKC3XMYo9m-ThI,9662
|
@@ -227,8 +227,8 @@ sky/templates/ibm-ray.yml.j2,sha256=RMBUqPId8i4CnVwcyfK3DbRapF1jFMuGQlY0E0PFbMU,
|
|
227
227
|
sky/templates/jobs-controller.yaml.j2,sha256=Gu3ogFxFYr09VEXP-6zEbrCUOFo1aYxWEjAq7whCrxo,1607
|
228
228
|
sky/templates/kubernetes-ingress.yml.j2,sha256=73iDklVDWBMbItg0IexCa6_ClXPJOxw7PWz3leku4nE,1340
|
229
229
|
sky/templates/kubernetes-loadbalancer.yml.j2,sha256=IxrNYM366N01bbkJEbZ_UPYxUP8wyVEbRNFHRsBuLsw,626
|
230
|
-
sky/templates/kubernetes-port-forward-proxy-command.sh,sha256=
|
231
|
-
sky/templates/kubernetes-ray.yml.j2,sha256=
|
230
|
+
sky/templates/kubernetes-port-forward-proxy-command.sh,sha256=iw7mypHszg6Ggq9MbyiYMFOkSlXaQZulaxqC5IWYGCc,3381
|
231
|
+
sky/templates/kubernetes-ray.yml.j2,sha256=cK2XAkodjHThv94ITddzJwkePQYf0uCYHZo_1BwCw5U,28728
|
232
232
|
sky/templates/kubernetes-ssh-jump.yml.j2,sha256=k5W5sOIMppU7dDkJMwPlqsUcb92y7L5_TVG3hkgMy8M,2747
|
233
233
|
sky/templates/lambda-ray.yml.j2,sha256=HyvO_tX2vxwSsc4IFVSqGuIbjLMk0bevP9bcxb8ZQII,4498
|
234
234
|
sky/templates/local-ray.yml.j2,sha256=FNHeyHF6nW9nU9QLIZceUWfvrFTTcO51KqhTnYCEFaA,1185
|
@@ -245,7 +245,7 @@ sky/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
245
245
|
sky/utils/accelerator_registry.py,sha256=BO4iYH5bV80Xyp4EPfO0n1D3LL0FvESCy7xm59Je3_o,3798
|
246
246
|
sky/utils/admin_policy_utils.py,sha256=_Vt_jTTYCXmMdryj0vrrumFPewa93qHnzUqBDXjAhRU,5981
|
247
247
|
sky/utils/cluster_yaml_utils.py,sha256=1wRRYqI1kI-eFs1pMW4r_FFjHJ0zamq6v2RRI-Gtx5E,849
|
248
|
-
sky/utils/command_runner.py,sha256=
|
248
|
+
sky/utils/command_runner.py,sha256=ewDjFxcCOv0OeG2aUOIfVWmTls65up9DvSnAXURvGfM,36696
|
249
249
|
sky/utils/command_runner.pyi,sha256=mJOzCgcYZAfHwnY_6Wf1YwlTEJGb9ihzc2f0rE0Kw98,7751
|
250
250
|
sky/utils/common_utils.py,sha256=Qy25LuIoTT0qg391EWyT9i5D6fwk1S4OdFwRpCTZ9Vk,24657
|
251
251
|
sky/utils/control_master_utils.py,sha256=90hnxiAUP20gbJ9e3MERh7rb04ZO_I3LsljNjR26H5I,1416
|
@@ -257,7 +257,7 @@ sky/utils/kubernetes_enums.py,sha256=imGqHSa8O07zD_6xH1SDMM7dBU5lF5fzFFlQuQy00QM
|
|
257
257
|
sky/utils/log_utils.py,sha256=oZYF45uC7GFjAqO-Je-aiX6zhtq91TP-KKaIbQNF-jY,14024
|
258
258
|
sky/utils/resources_utils.py,sha256=Xqi7gxPYw2y5wl5okUI5zx5LEij0hJF_V3Zi8q7TXYg,7890
|
259
259
|
sky/utils/rich_utils.py,sha256=hmnI1X5dKvRIQzB7EyNb34FT97qFNve-0QHqM5r0mVk,3066
|
260
|
-
sky/utils/schemas.py,sha256=
|
260
|
+
sky/utils/schemas.py,sha256=ul_tiSLxJthuJHuiz9NPTLdgtu_ZBbdFd5Pbf6Qb4vQ,29638
|
261
261
|
sky/utils/subprocess_utils.py,sha256=iLOda3vfkD-sIUPlfkDGZs9HnJWLlLRvHVgca9DZH8s,10410
|
262
262
|
sky/utils/timeline.py,sha256=ebHxKJK2HX0utGArrUgSezTPkcwav3VETa_AQS34t-E,3925
|
263
263
|
sky/utils/ux_utils.py,sha256=CqyIFGDuSE8fQasPkna_loZMwtboC9KedR09WEQ7qz0,6502
|
@@ -269,15 +269,15 @@ sky/utils/kubernetes/create_cluster.sh,sha256=VLXfazav9XCMQmeKVqhuOQzt2vM6G1jgnv
|
|
269
269
|
sky/utils/kubernetes/delete_cluster.sh,sha256=BSccHF43GyepDNf-FZcenzHzpXXATkVD92vgn1lWPgk,927
|
270
270
|
sky/utils/kubernetes/deploy_remote_cluster.sh,sha256=vGj0mD0tejHDRy8ulwKOvOF2mfLyT5J8fp7GVqEe_EY,8478
|
271
271
|
sky/utils/kubernetes/generate_kind_config.py,sha256=_TNLnifA_r7-CRq083IP1xjelYqiLjzQX9ohuqYpDH8,3187
|
272
|
-
sky/utils/kubernetes/generate_kubeconfig.sh,sha256=
|
272
|
+
sky/utils/kubernetes/generate_kubeconfig.sh,sha256=MBvXJio0PeujZSCXiRKE_pa6HCTiU9qBzR1WrXccVSY,10477
|
273
273
|
sky/utils/kubernetes/gpu_labeler.py,sha256=j9tdIG98nwJ6WJXNhpLUUFcg-6RYe1pNiE_bLvLIB5Q,6999
|
274
274
|
sky/utils/kubernetes/k8s_gpu_labeler_job.yaml,sha256=k0TBoQ4zgf79-sVkixKSGYFHQ7ZWF5gdVIZPupCCo9A,1224
|
275
275
|
sky/utils/kubernetes/k8s_gpu_labeler_setup.yaml,sha256=VLKT2KKimZu1GDg_4AIlIt488oMQvhRZWwsj9vBbPUg,3812
|
276
|
-
sky/utils/kubernetes/rsync_helper.sh,sha256=
|
276
|
+
sky/utils/kubernetes/rsync_helper.sh,sha256=h4YwrPFf9727CACnMJvF3EyK_0OeOYKKt4su_daKekw,1256
|
277
277
|
sky/utils/kubernetes/ssh_jump_lifecycle_manager.py,sha256=RFLJ3k7MR5UN4SKHykQ0lV9SgXumoULpKYIAt1vh-HU,6560
|
278
|
-
skypilot_nightly-1.0.0.
|
279
|
-
skypilot_nightly-1.0.0.
|
280
|
-
skypilot_nightly-1.0.0.
|
281
|
-
skypilot_nightly-1.0.0.
|
282
|
-
skypilot_nightly-1.0.0.
|
283
|
-
skypilot_nightly-1.0.0.
|
278
|
+
skypilot_nightly-1.0.0.dev20241126.dist-info/LICENSE,sha256=emRJAvE7ngL6x0RhQvlns5wJzGI3NEQ_WMjNmd9TZc4,12170
|
279
|
+
skypilot_nightly-1.0.0.dev20241126.dist-info/METADATA,sha256=HJoBgf0CXoxGWqaKFRngTJ-fvD_RJ30Qi0TwEP_ZlO4,20222
|
280
|
+
skypilot_nightly-1.0.0.dev20241126.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
281
|
+
skypilot_nightly-1.0.0.dev20241126.dist-info/entry_points.txt,sha256=StA6HYpuHj-Y61L2Ze-hK2IcLWgLZcML5gJu8cs6nU4,36
|
282
|
+
skypilot_nightly-1.0.0.dev20241126.dist-info/top_level.txt,sha256=qA8QuiNNb6Y1OF-pCUtPEr6sLEwy2xJX06Bd_CrtrHY,4
|
283
|
+
skypilot_nightly-1.0.0.dev20241126.dist-info/RECORD,,
|
File without changes
|
{skypilot_nightly-1.0.0.dev20241124.dist-info → skypilot_nightly-1.0.0.dev20241126.dist-info}/WHEEL
RENAMED
File without changes
|
File without changes
|
File without changes
|