skypilot-nightly 1.0.0.dev20241123__py3-none-any.whl → 1.0.0.dev20241125__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sky/__init__.py CHANGED
@@ -5,7 +5,7 @@ from typing import Optional
5
5
  import urllib.request
6
6
 
7
7
  # Replaced with the current commit when building the wheels.
8
- _SKYPILOT_COMMIT_SHA = '0b20d568ee1af454bfec3e50ff62d239f976e52d'
8
+ _SKYPILOT_COMMIT_SHA = '894330bd5455e8b6739a5c22abdee2529f5fe282'
9
9
 
10
10
 
11
11
  def _get_git_commit():
@@ -35,7 +35,7 @@ def _get_git_commit():
35
35
 
36
36
 
37
37
  __commit__ = _get_git_commit()
38
- __version__ = '1.0.0.dev20241123'
38
+ __version__ = '1.0.0.dev20241125'
39
39
  __root_dir__ = os.path.dirname(os.path.abspath(__file__))
40
40
 
41
41
 
@@ -19,6 +19,13 @@ urllib3 = common.LazyImport('urllib3',
19
19
  # Timeout to use for API calls
20
20
  API_TIMEOUT = 5
21
21
 
22
+ DEFAULT_IN_CLUSTER_REGION = 'in-cluster'
23
+ # The name for the environment variable that stores the in-cluster context name
24
+ # for Kubernetes clusters. This is used to associate a name with the current
25
+ # context when running with in-cluster auth. If not set, the context name is
26
+ # set to DEFAULT_IN_CLUSTER_REGION.
27
+ IN_CLUSTER_CONTEXT_NAME_ENV_VAR = 'SKYPILOT_IN_CLUSTER_CONTEXT_NAME'
28
+
22
29
 
23
30
  def _decorate_methods(obj: Any, decorator: Callable, decoration_type: str):
24
31
  for attr_name in dir(obj):
@@ -57,16 +64,8 @@ def _api_logging_decorator(logger: str, level: int):
57
64
 
58
65
  def _load_config(context: Optional[str] = None):
59
66
  urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
60
- try:
61
- # Load in-cluster config if running in a pod
62
- # Kubernetes set environment variables for service discovery do not
63
- # show up in SkyPilot tasks. For now, we work around by using
64
- # DNS name instead of environment variables.
65
- # See issue: https://github.com/skypilot-org/skypilot/issues/2287
66
- os.environ['KUBERNETES_SERVICE_HOST'] = 'kubernetes.default.svc'
67
- os.environ['KUBERNETES_SERVICE_PORT'] = '443'
68
- kubernetes.config.load_incluster_config()
69
- except kubernetes.config.config_exception.ConfigException:
67
+
68
+ def _load_config_from_kubeconfig(context: Optional[str] = None):
70
69
  try:
71
70
  kubernetes.config.load_kube_config(context=context)
72
71
  except kubernetes.config.config_exception.ConfigException as e:
@@ -90,6 +89,21 @@ def _load_config(context: Optional[str] = None):
90
89
  with ux_utils.print_exception_no_traceback():
91
90
  raise ValueError(err_str) from None
92
91
 
92
+ if context == in_cluster_context_name() or context is None:
93
+ try:
94
+ # Load in-cluster config if running in a pod and context is None.
95
+ # Kubernetes set environment variables for service discovery do not
96
+ # show up in SkyPilot tasks. For now, we work around by using
97
+ # DNS name instead of environment variables.
98
+ # See issue: https://github.com/skypilot-org/skypilot/issues/2287
99
+ os.environ['KUBERNETES_SERVICE_HOST'] = 'kubernetes.default.svc'
100
+ os.environ['KUBERNETES_SERVICE_PORT'] = '443'
101
+ kubernetes.config.load_incluster_config()
102
+ except kubernetes.config.config_exception.ConfigException:
103
+ _load_config_from_kubeconfig()
104
+ else:
105
+ _load_config_from_kubeconfig(context)
106
+
93
107
 
94
108
  @_api_logging_decorator('urllib3', logging.ERROR)
95
109
  @functools.lru_cache()
@@ -154,3 +168,13 @@ def max_retry_error():
154
168
 
155
169
  def stream():
156
170
  return kubernetes.stream.stream
171
+
172
+
173
+ def in_cluster_context_name() -> Optional[str]:
174
+ """Returns the name of the in-cluster context from the environment.
175
+
176
+ If the environment variable is not set, returns the default in-cluster
177
+ context name.
178
+ """
179
+ return (os.environ.get(IN_CLUSTER_CONTEXT_NAME_ENV_VAR) or
180
+ DEFAULT_IN_CLUSTER_REGION)
sky/authentication.py CHANGED
@@ -380,8 +380,8 @@ def setup_kubernetes_authentication(config: Dict[str, Any]) -> Dict[str, Any]:
380
380
  secret_field_name = clouds.Kubernetes().ssh_key_secret_field_name
381
381
  context = config['provider'].get(
382
382
  'context', kubernetes_utils.get_current_kube_config_context_name())
383
- if context == kubernetes_utils.IN_CLUSTER_REGION:
384
- # If the context is set to IN_CLUSTER_REGION, we are running in a pod
383
+ if context == kubernetes.in_cluster_context_name():
384
+ # If the context is an in-cluster context name, we are running in a pod
385
385
  # with in-cluster configuration. We need to set the context to None
386
386
  # to use the mounted service account.
387
387
  context = None
@@ -730,7 +730,12 @@ def write_cluster_config(
730
730
  f'{skypilot_config.loaded_config_path!r} for {cloud}, but it '
731
731
  'is not supported by this cloud. Remove the config or set: '
732
732
  '`remote_identity: LOCAL_CREDENTIALS`.')
733
- excluded_clouds.add(cloud)
733
+ if isinstance(cloud, clouds.Kubernetes):
734
+ if skypilot_config.get_nested(
735
+ ('kubernetes', 'allowed_contexts'), None) is None:
736
+ excluded_clouds.add(cloud)
737
+ else:
738
+ excluded_clouds.add(cloud)
734
739
 
735
740
  for cloud_str, cloud_obj in cloud_registry.CLOUD_REGISTRY.items():
736
741
  remote_identity_config = skypilot_config.get_nested(
sky/clouds/kubernetes.py CHANGED
@@ -130,32 +130,30 @@ class Kubernetes(clouds.Cloud):
130
130
  'Ignoring these contexts.')
131
131
 
132
132
  @classmethod
133
- def _existing_allowed_contexts(cls) -> List[Optional[str]]:
133
+ def _existing_allowed_contexts(cls) -> List[str]:
134
134
  """Get existing allowed contexts.
135
135
 
136
136
  If None is returned in the list, it means that we are running in a pod
137
137
  with in-cluster auth. In this case, we specify None context, which will
138
138
  use the service account mounted in the pod.
139
139
  """
140
- all_contexts = kubernetes_utils.get_all_kube_config_context_names()
140
+ all_contexts = kubernetes_utils.get_all_kube_context_names()
141
141
  if len(all_contexts) == 0:
142
142
  return []
143
- if all_contexts == [None]:
144
- # If only one context is found and it is None, we are running in a
145
- # pod with in-cluster auth. In this case, we allow it to be used
146
- # without checking against allowed_contexts.
147
- # TODO(romilb): We may want check in-cluster auth against
148
- # allowed_contexts in the future by adding a special context name
149
- # for in-cluster auth.
150
- return [None]
143
+
151
144
  all_contexts = set(all_contexts)
152
145
 
153
146
  allowed_contexts = skypilot_config.get_nested(
154
147
  ('kubernetes', 'allowed_contexts'), None)
155
148
 
156
149
  if allowed_contexts is None:
150
+ # Try kubeconfig if present
157
151
  current_context = (
158
152
  kubernetes_utils.get_current_kube_config_context_name())
153
+ if (current_context is None and
154
+ kubernetes_utils.is_incluster_config_available()):
155
+ # If no kubeconfig contexts found, use in-cluster if available
156
+ current_context = kubernetes.in_cluster_context_name()
159
157
  allowed_contexts = []
160
158
  if current_context is not None:
161
159
  allowed_contexts = [current_context]
@@ -180,13 +178,7 @@ class Kubernetes(clouds.Cloud):
180
178
 
181
179
  regions = []
182
180
  for context in existing_contexts:
183
- if context is None:
184
- # If running in-cluster, we allow the region to be set to the
185
- # singleton region since there is no context name available.
186
- regions.append(clouds.Region(
187
- kubernetes_utils.IN_CLUSTER_REGION))
188
- else:
189
- regions.append(clouds.Region(context))
181
+ regions.append(clouds.Region(context))
190
182
 
191
183
  if region is not None:
192
184
  regions = [r for r in regions if r.name == region]
@@ -409,12 +401,25 @@ class Kubernetes(clouds.Cloud):
409
401
  remote_identity = skypilot_config.get_nested(
410
402
  ('kubernetes', 'remote_identity'),
411
403
  schemas.get_default_remote_identity('kubernetes'))
412
- if (remote_identity ==
404
+
405
+ if isinstance(remote_identity, dict):
406
+ # If remote_identity is a dict, use the service account for the
407
+ # current context
408
+ k8s_service_account_name = remote_identity.get(context, None)
409
+ if k8s_service_account_name is None:
410
+ err_msg = (f'Context {context!r} not found in '
411
+ 'remote identities from config.yaml')
412
+ raise ValueError(err_msg)
413
+ else:
414
+ # If remote_identity is not a dict, use
415
+ k8s_service_account_name = remote_identity
416
+
417
+ if (k8s_service_account_name ==
413
418
  schemas.RemoteIdentityOptions.LOCAL_CREDENTIALS.value):
414
419
  # SA name doesn't matter since automounting credentials is disabled
415
420
  k8s_service_account_name = 'default'
416
421
  k8s_automount_sa_token = 'false'
417
- elif (remote_identity ==
422
+ elif (k8s_service_account_name ==
418
423
  schemas.RemoteIdentityOptions.SERVICE_ACCOUNT.value):
419
424
  # Use the default service account
420
425
  k8s_service_account_name = (
@@ -422,7 +427,6 @@ class Kubernetes(clouds.Cloud):
422
427
  k8s_automount_sa_token = 'true'
423
428
  else:
424
429
  # User specified a custom service account
425
- k8s_service_account_name = remote_identity
426
430
  k8s_automount_sa_token = 'true'
427
431
 
428
432
  fuse_device_required = bool(resources.requires_fuse)
@@ -447,6 +451,12 @@ class Kubernetes(clouds.Cloud):
447
451
  ('kubernetes', 'provision_timeout'),
448
452
  timeout,
449
453
  override_configs=resources.cluster_config_overrides)
454
+
455
+ # Set environment variables for the pod. Note that SkyPilot env vars
456
+ # are set separately when the task is run. These env vars are
457
+ # independent of the SkyPilot task to be run.
458
+ k8s_env_vars = {kubernetes.IN_CLUSTER_CONTEXT_NAME_ENV_VAR: context}
459
+
450
460
  # We specify object-store-memory to be 500MB to avoid taking up too
451
461
  # much memory on the head node. 'num-cpus' should be set to limit
452
462
  # the CPU usage on the head pod, otherwise the ray cluster will use the
@@ -480,6 +490,7 @@ class Kubernetes(clouds.Cloud):
480
490
  'k8s_topology_label_key': k8s_topology_label_key,
481
491
  'k8s_topology_label_value': k8s_topology_label_value,
482
492
  'k8s_resource_key': k8s_resource_key,
493
+ 'k8s_env_vars': k8s_env_vars,
483
494
  'image_id': image_id,
484
495
  'ray_installation_commands': constants.RAY_INSTALLATION_COMMANDS,
485
496
  'ray_head_start_command': instance_setup.ray_head_start_command(
@@ -625,16 +636,13 @@ class Kubernetes(clouds.Cloud):
625
636
  # TODO: Remove this after 0.9.0.
626
637
  return region, zone
627
638
 
628
- if region == kubernetes_utils.IN_CLUSTER_REGION:
639
+ if region == kubernetes.in_cluster_context_name():
629
640
  # If running incluster, we set region to IN_CLUSTER_REGION
630
641
  # since there is no context name available.
631
642
  return region, zone
632
643
 
633
- all_contexts = kubernetes_utils.get_all_kube_config_context_names()
634
- if all_contexts == [None]:
635
- # If [None] context is returned, use the singleton region since we
636
- # are running in a pod with in-cluster auth.
637
- all_contexts = [kubernetes_utils.IN_CLUSTER_REGION]
644
+ all_contexts = kubernetes_utils.get_all_kube_context_names()
645
+
638
646
  if region not in all_contexts:
639
647
  raise ValueError(
640
648
  f'Context {region} not found in kubeconfig. Kubernetes only '
@@ -65,9 +65,14 @@ def list_accelerators(
65
65
  # TODO(romilb): We should consider putting a lru_cache() with TTL to
66
66
  # avoid multiple calls to kubernetes API in a short period of time (e.g.,
67
67
  # from the optimizer).
68
- return list_accelerators_realtime(gpus_only, name_filter, region_filter,
69
- quantity_filter, case_sensitive,
70
- all_regions, require_price)[0]
68
+ return _list_accelerators(gpus_only,
69
+ name_filter,
70
+ region_filter,
71
+ quantity_filter,
72
+ case_sensitive,
73
+ all_regions,
74
+ require_price,
75
+ realtime=False)[0]
71
76
 
72
77
 
73
78
  def list_accelerators_realtime(
@@ -78,10 +83,36 @@ def list_accelerators_realtime(
78
83
  case_sensitive: bool = True,
79
84
  all_regions: bool = False,
80
85
  require_price: bool = True
86
+ ) -> Tuple[Dict[str, List[common.InstanceTypeInfo]], Dict[str, int], Dict[str,
87
+ int]]:
88
+ return _list_accelerators(gpus_only,
89
+ name_filter,
90
+ region_filter,
91
+ quantity_filter,
92
+ case_sensitive,
93
+ all_regions,
94
+ require_price,
95
+ realtime=True)
96
+
97
+
98
+ def _list_accelerators(
99
+ gpus_only: bool,
100
+ name_filter: Optional[str],
101
+ region_filter: Optional[str],
102
+ quantity_filter: Optional[int],
103
+ case_sensitive: bool = True,
104
+ all_regions: bool = False,
105
+ require_price: bool = True,
106
+ realtime: bool = False
81
107
  ) -> Tuple[Dict[str, List[common.InstanceTypeInfo]], Dict[str, int], Dict[str,
82
108
  int]]:
83
109
  """List accelerators in the Kubernetes cluster.
84
110
 
111
+ If realtime is True, the function will query the cluster to fetch real-time
112
+ GPU usage, which is returned in total_accelerators_available. Note that
113
+ this may require an expensive list_pod_for_all_namespaces call, which
114
+ requires cluster-wide pod read permissions.
115
+
85
116
  If the user does not have sufficient permissions to list pods in all
86
117
  namespaces, the function will return free GPUs as -1.
87
118
  """
@@ -115,18 +146,20 @@ def list_accelerators_realtime(
115
146
  accelerators_qtys: Set[Tuple[str, int]] = set()
116
147
  keys = lf.get_label_keys()
117
148
  nodes = kubernetes_utils.get_kubernetes_nodes(context)
118
- # Get the pods to get the real-time GPU usage
119
- try:
120
- pods = kubernetes_utils.get_all_pods_in_kubernetes_cluster(context)
121
- except kubernetes.api_exception() as e:
122
- if e.status == 403:
123
- logger.warning('Failed to get pods in the Kubernetes cluster '
124
- '(forbidden). Please check if your account has '
125
- 'necessary permissions to list pods. Realtime GPU '
126
- 'availability information may be incorrect.')
127
- pods = None
128
- else:
129
- raise
149
+ pods = None
150
+ if realtime:
151
+ # Get the pods to get the real-time GPU usage
152
+ try:
153
+ pods = kubernetes_utils.get_all_pods_in_kubernetes_cluster(context)
154
+ except kubernetes.api_exception() as e:
155
+ if e.status == 403:
156
+ logger.warning(
157
+ 'Failed to get pods in the Kubernetes cluster '
158
+ '(forbidden). Please check if your account has '
159
+ 'necessary permissions to list pods. Realtime GPU '
160
+ 'availability information may be incorrect.')
161
+ else:
162
+ raise
130
163
  # Total number of GPUs in the cluster
131
164
  total_accelerators_capacity: Dict[str, int] = {}
132
165
  # Total number of GPUs currently available in the cluster
sky/execution.py CHANGED
@@ -305,7 +305,8 @@ def _execute(
305
305
  do_workdir = (Stage.SYNC_WORKDIR in stages and not dryrun and
306
306
  task.workdir is not None)
307
307
  do_file_mounts = (Stage.SYNC_FILE_MOUNTS in stages and not dryrun and
308
- task.file_mounts is not None)
308
+ (task.file_mounts is not None or
309
+ task.storage_mounts is not None))
309
310
  if do_workdir or do_file_mounts:
310
311
  logger.info(ux_utils.starting_message('Mounting files.'))
311
312
 
@@ -305,7 +305,8 @@ def _create_vm(
305
305
  network_profile=network_profile,
306
306
  identity=compute.VirtualMachineIdentity(
307
307
  type='UserAssigned',
308
- user_assigned_identities={provider_config['msi']: {}}))
308
+ user_assigned_identities={provider_config['msi']: {}}),
309
+ priority=node_config['azure_arm_parameters']['priority'])
309
310
  vm_poller = compute_client.virtual_machines.begin_create_or_update(
310
311
  resource_group_name=provider_config['resource_group'],
311
312
  vm_name=vm_name,
@@ -37,7 +37,6 @@ if typing.TYPE_CHECKING:
37
37
 
38
38
  # TODO(romilb): Move constants to constants.py
39
39
  DEFAULT_NAMESPACE = 'default'
40
- IN_CLUSTER_REGION = 'in-cluster'
41
40
 
42
41
  DEFAULT_SERVICE_ACCOUNT_NAME = 'skypilot-service-account'
43
42
 
@@ -921,6 +920,9 @@ def is_kubeconfig_exec_auth(
921
920
  str: Error message if exec-based authentication is used, None otherwise
922
921
  """
923
922
  k8s = kubernetes.kubernetes
923
+ if context == kubernetes.in_cluster_context_name():
924
+ # If in-cluster config is used, exec-based auth is not used.
925
+ return False, None
924
926
  try:
925
927
  k8s.config.load_kube_config()
926
928
  except kubernetes.config_exception():
@@ -1003,30 +1005,34 @@ def is_incluster_config_available() -> bool:
1003
1005
  return os.path.exists('/var/run/secrets/kubernetes.io/serviceaccount/token')
1004
1006
 
1005
1007
 
1006
- def get_all_kube_config_context_names() -> List[Optional[str]]:
1007
- """Get all kubernetes context names from the kubeconfig file.
1008
+ def get_all_kube_context_names() -> List[str]:
1009
+ """Get all kubernetes context names available in the environment.
1010
+
1011
+ Fetches context names from the kubeconfig file and in-cluster auth, if any.
1008
1012
 
1009
- If running in-cluster, returns [None] to indicate in-cluster config.
1013
+ If running in-cluster and IN_CLUSTER_CONTEXT_NAME_ENV_VAR is not set,
1014
+ returns the default in-cluster kubernetes context name.
1010
1015
 
1011
1016
  We should not cache the result of this function as the admin policy may
1012
1017
  update the contexts.
1013
1018
 
1014
1019
  Returns:
1015
1020
  List[Optional[str]]: The list of kubernetes context names if
1016
- available, an empty list otherwise. If running in-cluster,
1017
- returns [None] to indicate in-cluster config.
1021
+ available, an empty list otherwise.
1018
1022
  """
1019
1023
  k8s = kubernetes.kubernetes
1024
+ context_names = []
1020
1025
  try:
1021
1026
  all_contexts, _ = k8s.config.list_kube_config_contexts()
1022
1027
  # all_contexts will always have at least one context. If kubeconfig
1023
1028
  # does not have any contexts defined, it will raise ConfigException.
1024
- return [context['name'] for context in all_contexts]
1029
+ context_names = [context['name'] for context in all_contexts]
1025
1030
  except k8s.config.config_exception.ConfigException:
1026
- # If running in cluster, return [None] to indicate in-cluster config
1027
- if is_incluster_config_available():
1028
- return [None]
1029
- return []
1031
+ # If no config found, continue
1032
+ pass
1033
+ if is_incluster_config_available():
1034
+ context_names.append(kubernetes.in_cluster_context_name())
1035
+ return context_names
1030
1036
 
1031
1037
 
1032
1038
  @functools.lru_cache()
@@ -2185,9 +2191,9 @@ def set_autodown_annotations(handle: 'backends.CloudVmRayResourceHandle',
2185
2191
  def get_context_from_config(provider_config: Dict[str, Any]) -> Optional[str]:
2186
2192
  context = provider_config.get('context',
2187
2193
  get_current_kube_config_context_name())
2188
- if context == IN_CLUSTER_REGION:
2189
- # If the context (also used as the region) is set to IN_CLUSTER_REGION
2190
- # we need to use in-cluster auth.
2194
+ if context == kubernetes.in_cluster_context_name():
2195
+ # If the context (also used as the region) is in-cluster, we need to
2196
+ # we need to use in-cluster auth by setting the context to None.
2191
2197
  context = None
2192
2198
  return context
2193
2199
 
@@ -75,9 +75,6 @@ available_node_types:
75
75
  {%- if use_spot %}
76
76
  # optionally set priority to use Spot instances
77
77
  priority: Spot
78
- # set a maximum price for spot instances if desired
79
- # billingProfile:
80
- # maxPrice: -1
81
78
  {%- endif %}
82
79
  cloudInitSetupCommands: |-
83
80
  {%- for cmd in cloud_init_setup_commands %}
@@ -58,6 +58,11 @@ KUBECTL_ARGS=()
58
58
  if [ -n "$KUBE_CONTEXT" ]; then
59
59
  KUBECTL_ARGS+=("--context=$KUBE_CONTEXT")
60
60
  fi
61
+ # If context is not provided, it means we are using incluster auth. In this case,
62
+ # we need to set KUBECONFIG to /dev/null to avoid using kubeconfig file.
63
+ if [ -z "$KUBE_CONTEXT" ]; then
64
+ KUBECTL_ARGS+=("--kubeconfig=/dev/null")
65
+ fi
61
66
  if [ -n "$KUBE_NAMESPACE" ]; then
62
67
  KUBECTL_ARGS+=("--namespace=$KUBE_NAMESPACE")
63
68
  fi
@@ -322,11 +322,22 @@ available_node_types:
322
322
  valueFrom:
323
323
  fieldRef:
324
324
  fieldPath: metadata.labels['ray-node-type']
325
+ {% for key, value in k8s_env_vars.items() if k8s_env_vars is not none %}
326
+ - name: {{ key }}
327
+ value: {{ value }}
328
+ {% endfor %}
325
329
  # Do not change this command - it keeps the pod alive until it is
326
330
  # explicitly killed.
327
331
  command: ["/bin/bash", "-c", "--"]
328
332
  args:
329
333
  - |
334
+ # For backwards compatibility, we put a marker file in the pod
335
+ # to indicate that the pod is running with the changes introduced
336
+ # in project nimbus: https://github.com/skypilot-org/skypilot/pull/4393
337
+ # TODO: Remove this marker file and it's usage in setup_commands
338
+ # after v0.10.0 release.
339
+ touch /tmp/skypilot_is_nimbus
340
+
330
341
  # Helper function to conditionally use sudo
331
342
  # TODO(zhwu): consolidate the two prefix_cmd and sudo replacements
332
343
  prefix_cmd() { if [ $(id -u) -ne 0 ]; then echo "sudo"; else echo ""; fi; }
@@ -575,9 +586,12 @@ setup_commands:
575
586
  STEPS=("apt-ssh-setup" "runtime-setup" "env-setup")
576
587
  start_epoch=$(date +%s);
577
588
  echo "=== Logs for asynchronous ray and skypilot installation ===";
578
- [ -f /tmp/ray_skypilot_installation_complete ] && cat /tmp/${STEPS[1]}.log ||
579
- { tail -f -n +1 /tmp/${STEPS[1]}.log & TAIL_PID=$!; echo "Tail PID: $TAIL_PID"; until [ -f /tmp/ray_skypilot_installation_complete ]; do sleep 0.5; done; kill $TAIL_PID || true; };
580
- [ -f /tmp/${STEPS[1]}.failed ] && { echo "Error: ${STEPS[1]} failed. Exiting."; exit 1; } || true;
589
+ if [ -f /tmp/skypilot_is_nimbus ]; then
590
+ echo "=== Logs for asynchronous ray and skypilot installation ===";
591
+ [ -f /tmp/ray_skypilot_installation_complete ] && cat /tmp/${STEPS[1]}.log ||
592
+ { tail -f -n +1 /tmp/${STEPS[1]}.log & TAIL_PID=$!; echo "Tail PID: $TAIL_PID"; until [ -f /tmp/ray_skypilot_installation_complete ]; do sleep 0.5; done; kill $TAIL_PID || true; };
593
+ [ -f /tmp/${STEPS[1]}.failed ] && { echo "Error: ${STEPS[1]} failed. Exiting."; exit 1; } || true;
594
+ fi
581
595
  end_epoch=$(date +%s);
582
596
  echo "=== Ray and skypilot dependencies installation completed in $(($end_epoch - $start_epoch)) secs ===";
583
597
  start_epoch=$(date +%s);
@@ -767,6 +767,10 @@ class KubernetesCommandRunner(CommandRunner):
767
767
  ]
768
768
  if self.context:
769
769
  kubectl_args += ['--context', self.context]
770
+ # If context is none, it means we are using incluster auth. In this
771
+ # case, need to set KUBECONFIG to /dev/null to avoid using kubeconfig.
772
+ if self.context is None:
773
+ kubectl_args += ['--kubeconfig', '/dev/null']
770
774
  kubectl_args += [self.pod_name]
771
775
  if ssh_mode == SshMode.LOGIN:
772
776
  assert isinstance(cmd, list), 'cmd must be a list for login mode.'
@@ -12,6 +12,7 @@
12
12
  # * Specify SKYPILOT_NAMESPACE env var to override the default namespace where the service account is created.
13
13
  # * Specify SKYPILOT_SA_NAME env var to override the default service account name.
14
14
  # * Specify SKIP_SA_CREATION=1 to skip creating the service account and use an existing one
15
+ # * Specify SUPER_USER=1 to create a service account with cluster-admin permissions
15
16
  #
16
17
  # Usage:
17
18
  # # Create "sky-sa" service account with minimal permissions in "default" namespace and generate kubeconfig
@@ -22,6 +23,9 @@
22
23
  #
23
24
  # # Use an existing service account "my-sa" in "my-namespace" namespace and generate kubeconfig
24
25
  # $ SKIP_SA_CREATION=1 SKYPILOT_SA_NAME=my-sa SKYPILOT_NAMESPACE=my-namespace ./generate_kubeconfig.sh
26
+ #
27
+ # # Create "sky-sa" service account with cluster-admin permissions in "default" namespace
28
+ # $ SUPER_USER=1 ./generate_kubeconfig.sh
25
29
 
26
30
  set -eu -o pipefail
27
31
 
@@ -29,9 +33,11 @@ set -eu -o pipefail
29
33
  # use default.
30
34
  SKYPILOT_SA=${SKYPILOT_SA_NAME:-sky-sa}
31
35
  NAMESPACE=${SKYPILOT_NAMESPACE:-default}
36
+ SUPER_USER=${SUPER_USER:-0}
32
37
 
33
38
  echo "Service account: ${SKYPILOT_SA}"
34
39
  echo "Namespace: ${NAMESPACE}"
40
+ echo "Super user permissions: ${SUPER_USER}"
35
41
 
36
42
  # Set OS specific values.
37
43
  if [[ "$OSTYPE" == "linux-gnu" ]]; then
@@ -47,8 +53,43 @@ fi
47
53
 
48
54
  # If the user has set SKIP_SA_CREATION=1, skip creating the service account.
49
55
  if [ -z ${SKIP_SA_CREATION+x} ]; then
50
- echo "Creating the Kubernetes Service Account with minimal RBAC permissions."
51
- kubectl apply -f - <<EOF
56
+ echo "Creating the Kubernetes Service Account with ${SUPER_USER:+super user}${SUPER_USER:-minimal} RBAC permissions."
57
+ if [ "${SUPER_USER}" = "1" ]; then
58
+ # Create service account with cluster-admin permissions
59
+ kubectl apply -f - <<EOF
60
+ apiVersion: v1
61
+ kind: Namespace
62
+ metadata:
63
+ name: ${NAMESPACE}
64
+ labels:
65
+ parent: skypilot
66
+ ---
67
+ kind: ServiceAccount
68
+ apiVersion: v1
69
+ metadata:
70
+ name: ${SKYPILOT_SA}
71
+ namespace: ${NAMESPACE}
72
+ labels:
73
+ parent: skypilot
74
+ ---
75
+ apiVersion: rbac.authorization.k8s.io/v1
76
+ kind: ClusterRoleBinding
77
+ metadata:
78
+ name: ${SKYPILOT_SA}-cluster-admin
79
+ labels:
80
+ parent: skypilot
81
+ subjects:
82
+ - kind: ServiceAccount
83
+ name: ${SKYPILOT_SA}
84
+ namespace: ${NAMESPACE}
85
+ roleRef:
86
+ kind: ClusterRole
87
+ name: cluster-admin
88
+ apiGroup: rbac.authorization.k8s.io
89
+ EOF
90
+ else
91
+ # Original RBAC rules for minimal permissions
92
+ kubectl apply -f - <<EOF
52
93
  # Create/update namespace specified by the user
53
94
  apiVersion: v1
54
95
  kind: Namespace
@@ -173,6 +214,7 @@ roleRef:
173
214
  name: skypilot-system-service-account-role
174
215
  apiGroup: rbac.authorization.k8s.io
175
216
  EOF
217
+ fi
176
218
  # Apply optional ingress-related roles, but don't make the script fail if it fails
177
219
  kubectl apply -f - <<EOF || echo "Failed to apply optional ingress-related roles. Nginx ingress is likely not installed. This is not critical and the script will continue."
178
220
  # Optional: Role for accessing ingress resources
@@ -16,7 +16,9 @@ echo "context: $context" >&2
16
16
  context_lower=$(echo "$context" | tr '[:upper:]' '[:lower:]')
17
17
  shift
18
18
  if [ -z "$context" ] || [ "$context_lower" = "none" ]; then
19
- kubectl exec -i $pod -n $namespace -- "$@"
19
+ # If context is none, it means we are using incluster auth. In this case,
20
+ # use need to set KUBECONFIG to /dev/null to avoid using kubeconfig file.
21
+ kubectl exec -i $pod -n $namespace --kubeconfig=/dev/null -- "$@"
20
22
  else
21
23
  kubectl exec -i $pod -n $namespace --context=$context -- "$@"
22
24
  fi
sky/utils/schemas.py CHANGED
@@ -684,7 +684,14 @@ _REMOTE_IDENTITY_SCHEMA = {
684
684
 
685
685
  _REMOTE_IDENTITY_SCHEMA_KUBERNETES = {
686
686
  'remote_identity': {
687
- 'type': 'string'
687
+ 'anyOf': [{
688
+ 'type': 'string'
689
+ }, {
690
+ 'type': 'object',
691
+ 'additionalProperties': {
692
+ 'type': 'string'
693
+ }
694
+ }]
688
695
  },
689
696
  }
690
697
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: skypilot-nightly
3
- Version: 1.0.0.dev20241123
3
+ Version: 1.0.0.dev20241125
4
4
  Summary: SkyPilot: An intercloud broker for the clouds
5
5
  Author: SkyPilot Team
6
6
  License: Apache 2.0
@@ -1,13 +1,13 @@
1
- sky/__init__.py,sha256=iM3vq3y59ysi5MEId5G35-3OCeuK0s6QqFeOZFw0TiM,5882
1
+ sky/__init__.py,sha256=MGfQ2l_7Ed92jVfreJmVbhoyb14CRXKGvq7w0z5gKPs,5882
2
2
  sky/admin_policy.py,sha256=hPo02f_A32gCqhUueF0QYy1fMSSKqRwYEg_9FxScN_s,3248
3
- sky/authentication.py,sha256=pAdCT60OxxiXI9KXDyP2lQ9u9vMc6aMtq5Xi2h_hbdw,20984
3
+ sky/authentication.py,sha256=kACHmiZgWgRpYd1wx1ofbXRMErfMcFmWrkw4a9NxYrY,20988
4
4
  sky/check.py,sha256=D3Y3saIFAYVvPxuBHnVgJEO0fUVDxgjwuMBaO-D778k,9472
5
5
  sky/cli.py,sha256=2QrlLeMwKpVKYOBDwtgs9zkBvNgn9Rg3XKk9aE6_0eY,213418
6
6
  sky/cloud_stores.py,sha256=RjFgmRhUh1Kk__f6g3KxzLp9s7dA0pFK4W1AukEuUaw,21153
7
7
  sky/core.py,sha256=0-4W_DKJZgbwXuzNZKQ2R_qJxqxbqqNfyi0U0PQBKvQ,38230
8
8
  sky/dag.py,sha256=f3sJlkH4bE6Uuz3ozNtsMhcBpRx7KmC9Sa4seDKt4hU,3104
9
9
  sky/exceptions.py,sha256=E3C2Ejcc8RUDAUQn7ar_Jr97C_AxD2rKKMmJOfLJ9d0,8965
10
- sky/execution.py,sha256=k6_n-kY1w93UVJZncQdNjqnil1aFIz5WGVwALNUCMqo,27458
10
+ sky/execution.py,sha256=RsVYs_Fkt8OJUJemz49mJLKE5iZaOvNHCTPcnQ_ngNQ,27522
11
11
  sky/global_user_state.py,sha256=ob3jvtG_yMPGvLlVScgeJ9pqk3FP4jhfEixw8WzFwho,29682
12
12
  sky/optimizer.py,sha256=GjvKQIBtY3NlULzau_9tfa7V2KYVJRrmNrjKVIWCPIQ,59753
13
13
  sky/resources.py,sha256=260JnyzYz0uAYVH8lIfbHjSZUvHne8mfoWmjo_YgeEI,69106
@@ -24,13 +24,13 @@ sky/adaptors/cudo.py,sha256=WGvIQrlzJkGDe02Ve7pygA56tHwUc4kwS3XHW8kMFAA,239
24
24
  sky/adaptors/docker.py,sha256=_kzpZ0fkWHqqQAVVl0llTsCE31KYz3Sjn8psTBQHVkA,468
25
25
  sky/adaptors/gcp.py,sha256=OQ9RaqjR0r0iaWYpjvEtIx5vnEhyB4LhUCwbtdxsmVk,3115
26
26
  sky/adaptors/ibm.py,sha256=H87vD6izq_wQI8oQC7cx9iVtRgPi_QkAcrfa1Z3PNqU,4906
27
- sky/adaptors/kubernetes.py,sha256=3DOZkz7NvinNLXJYpU1i8tULCK2vNPmsSF7DMWDdPKc,5508
27
+ sky/adaptors/kubernetes.py,sha256=5pRyPmXYpA0CrU5JFjh88TxC9TNemIaSUkSvaXysrCY,6510
28
28
  sky/adaptors/oci.py,sha256=n_zcrippTZRbTIhN3euD5sqNYn43G397zMavaJyEYbk,1480
29
29
  sky/adaptors/runpod.py,sha256=4Nt_BfZhJAKQNA3wO8cxvvNI8x4NsDGHu_4EhRDlGYQ,225
30
30
  sky/adaptors/vsphere.py,sha256=zJP9SeObEoLrpgHW2VHvZE48EhgVf8GfAEIwBeaDMfM,2129
31
31
  sky/backends/__init__.py,sha256=UDjwbUgpTRApbPJnNfR786GadUuwgRk3vsWoVu5RB_c,536
32
32
  sky/backends/backend.py,sha256=wwfbrxPhjMPs6PSyy3tAHI8WJhl-xhgzWBsAZjmJJ6g,6249
33
- sky/backends/backend_utils.py,sha256=_Kx8fp_inwxL8xf4I6HFWUGOZRT9edDM-2osaJaMlP0,126074
33
+ sky/backends/backend_utils.py,sha256=tqlpVx7KuQH1LMewPZ9KkkMIR_0mUbrrzJ72kzXMXBk,126294
34
34
  sky/backends/cloud_vm_ray_backend.py,sha256=BDpruXsj-u4wc3WYscLhIbSjjsNZ85iI7fkb-T8f2Bs,233321
35
35
  sky/backends/docker_utils.py,sha256=Hyw1YY20EyghhEbYx6O2FIMDcGkNzBzV9TM7LFynei8,8358
36
36
  sky/backends/local_docker_backend.py,sha256=0JL5m0YUgOmOL4aWEUe4tmt89dsxjk4_WXkPwgEKEis,16801
@@ -48,7 +48,7 @@ sky/clouds/cudo.py,sha256=mglcsoYhdWwgkVgFcoZLE0M_UCXOJkvW6nITufPd_BQ,13118
48
48
  sky/clouds/fluidstack.py,sha256=u2I6jXEtTqgqRWi2EafMsKqc8VkUq1cR6CSDUvk72_U,12407
49
49
  sky/clouds/gcp.py,sha256=RH3MMdgR3FWPiwm7rFrJ5oebtTcSUVoh7UfQMc_6U4A,54704
50
50
  sky/clouds/ibm.py,sha256=0ArRTQx1_DpTNGByFhukzFedEDzmVjBsGiiques1bQ0,21447
51
- sky/clouds/kubernetes.py,sha256=Kt2ymUvxE5FgoYx5V-shPHX28mCidj9uJtS1p3X0Yu4,31412
51
+ sky/clouds/kubernetes.py,sha256=zYF7GxkbxOFhQV374R0jFZo3k2qpfvMZo9j70w6wkV8,31545
52
52
  sky/clouds/lambda_cloud.py,sha256=42AmcN2X_wdBMuAw606nR_pQCBAy5QFiAo711_WRqDE,12672
53
53
  sky/clouds/oci.py,sha256=OzGWoU3DiMbFujMQLXgCr94Oqb9EyP0CsM4gMYOeU9s,26553
54
54
  sky/clouds/paperspace.py,sha256=0UxOcv_NaLY5hrFoAA_ZienclZUOqzf0yxXXZu4jXG0,10896
@@ -65,7 +65,7 @@ sky/clouds/service_catalog/cudo_catalog.py,sha256=V_takvL6dWTGQaTLCEvjKIotCDPnMu
65
65
  sky/clouds/service_catalog/fluidstack_catalog.py,sha256=21-cvrYEYTIi7n3ZNF2e7_0QX-PF4BkhlVJUWQOvKrY,5059
66
66
  sky/clouds/service_catalog/gcp_catalog.py,sha256=v_5fsB3dB9oD8U7lBKnCe5ii6AUWEOiQjNarMnU_qLA,24379
67
67
  sky/clouds/service_catalog/ibm_catalog.py,sha256=1iK0KvbI82U7sySb7chr-qm_16x3tTnZ6nIo7o76ouc,4493
68
- sky/clouds/service_catalog/kubernetes_catalog.py,sha256=2M4GyNi2XJq7LOgyr7Da1ncEoMRkezEzHJx4e9veKo0,11086
68
+ sky/clouds/service_catalog/kubernetes_catalog.py,sha256=4MsPXyzpwncwiBmndnbYAMpf2yAP2xINeurM6AaVV2k,12335
69
69
  sky/clouds/service_catalog/lambda_catalog.py,sha256=2R-ccu63BbdvO6X80MtxiniA-jLewXb6I0Ye1rYD9fY,5302
70
70
  sky/clouds/service_catalog/oci_catalog.py,sha256=cyA6ZqwHGOKuPxUl_dKmFGdeWdQGMrvl_-o2MtyF998,8580
71
71
  sky/clouds/service_catalog/paperspace_catalog.py,sha256=MOlfoGRChjEwMzu4nRAho8DrIwwUJ3QlRzrMA1RLqvE,3789
@@ -118,7 +118,7 @@ sky/provision/aws/utils.py,sha256=m49pS-SHGW7Au3bhDeTPsL8N5iRzbwOXzyEWRCc1Vho,32
118
118
  sky/provision/azure/__init__.py,sha256=87cgk1_Ws7n9rqaDDPv-HpfrkVeSQMdFQnhnXwyx9g4,548
119
119
  sky/provision/azure/azure-config-template.json,sha256=jrjAgOtpe0e6FSg3vsVqHKQqJe0w-HeWOFT1HuwzS2c,4712
120
120
  sky/provision/azure/config.py,sha256=V5-0Zelt4Xo0vcqnD6PpsnaCS7vc3xosDelILDAKSW4,8885
121
- sky/provision/azure/instance.py,sha256=Xd1paLWVc6eVHzphOjZB4_BeXZNX7GYgPV9kH3GWvsc,48983
121
+ sky/provision/azure/instance.py,sha256=7bbL5o8vgEeAA7iopPrd7Zh8pFs-SZpMzTeTmU4-zLU,49049
122
122
  sky/provision/cudo/__init__.py,sha256=KAEl26MVPsk7IoP9Gg-MOJJRIV6-X9B0fbyHdyJWdLo,741
123
123
  sky/provision/cudo/config.py,sha256=RYOVkV0MoUqVBJRZiKhBZhjFygeyFs7eUdVMdPg1vds,327
124
124
  sky/provision/cudo/cudo_machine_type.py,sha256=_VNXWPELmlFXbtdcnPvkuLuyE9CZ923BUCdiac-ClDY,696
@@ -140,7 +140,7 @@ sky/provision/kubernetes/config.py,sha256=WEKcFXXhe89bLGAvoMiBvTDxdxkpTIA6ezrj2v
140
140
  sky/provision/kubernetes/instance.py,sha256=2zd_Z09amOsi0vPZjQYMJCkCWbN2YecMLL9HkmUuPrM,48414
141
141
  sky/provision/kubernetes/network.py,sha256=EpNjRQ131CXepqbdkoRKFu4szVrm0oKEpv1l8EgOkjU,12364
142
142
  sky/provision/kubernetes/network_utils.py,sha256=t1FS3K400fetH7cBuRgQJZl5_jEeMshsvsYmnMUcq8k,11399
143
- sky/provision/kubernetes/utils.py,sha256=5nzyD2d5sroM1ajB6M40IycuZX5OYiwzpb4hd4U6eS8,101291
143
+ sky/provision/kubernetes/utils.py,sha256=UuicHqgqbpF937LGoc5tHLrweqPVtvrekDcpsaY_v_k,101557
144
144
  sky/provision/kubernetes/manifests/smarter-device-manager-configmap.yaml,sha256=AMzYzlY0JIlfBWj5eX054Rc1XDW2thUcLSOGMJVhIdA,229
145
145
  sky/provision/kubernetes/manifests/smarter-device-manager-daemonset.yaml,sha256=RtTq4F1QUmR2Uunb6zuuRaPhV7hpesz4saHjn3Ncsb4,2010
146
146
  sky/provision/lambda_cloud/__init__.py,sha256=6EEvSgtUeEiup9ivIFevHmgv0GqleroO2X0K7TRa2nE,612
@@ -219,7 +219,7 @@ sky/skylet/ray_patches/resource_demand_scheduler.py.patch,sha256=AVV-Hw-Rxw16aFm
219
219
  sky/skylet/ray_patches/updater.py.patch,sha256=ZNMGVYICPBB44jLbEx2KvCgIY7BWYdDv3-2b2HJWmAQ,289
220
220
  sky/skylet/ray_patches/worker.py.patch,sha256=_OBhibdr3xOy5Qje6Tt8D1eQVm_msi50TJbCJmOTxVU,565
221
221
  sky/templates/aws-ray.yml.j2,sha256=fJUwkgXwkuackZI3UD7Fum4iJpkZttl6Jwy3MtYqL1I,8547
222
- sky/templates/azure-ray.yml.j2,sha256=uUneIfT5vTLUCvrZXiv2dsd3gFqLH2FK632oBruOO_k,6237
222
+ sky/templates/azure-ray.yml.j2,sha256=NQKg_f_S7WjsY90ykx0yNDNOGYnnEL3HS4pA3NMIZkM,6112
223
223
  sky/templates/cudo-ray.yml.j2,sha256=SEHVY57iBauCOE2HYJtYVFEKlriAkdwQu_p86a1n_bA,3548
224
224
  sky/templates/fluidstack-ray.yml.j2,sha256=t8TCULgiErCZdtFmBZVsA8ZdcqR7ccwsmQhuDFTBEAU,3541
225
225
  sky/templates/gcp-ray.yml.j2,sha256=y95B-Nk6hFxm6vEIaxI1wFzAIcy_GcKC3XMYo9m-ThI,9662
@@ -227,8 +227,8 @@ sky/templates/ibm-ray.yml.j2,sha256=RMBUqPId8i4CnVwcyfK3DbRapF1jFMuGQlY0E0PFbMU,
227
227
  sky/templates/jobs-controller.yaml.j2,sha256=Gu3ogFxFYr09VEXP-6zEbrCUOFo1aYxWEjAq7whCrxo,1607
228
228
  sky/templates/kubernetes-ingress.yml.j2,sha256=73iDklVDWBMbItg0IexCa6_ClXPJOxw7PWz3leku4nE,1340
229
229
  sky/templates/kubernetes-loadbalancer.yml.j2,sha256=IxrNYM366N01bbkJEbZ_UPYxUP8wyVEbRNFHRsBuLsw,626
230
- sky/templates/kubernetes-port-forward-proxy-command.sh,sha256=HlG7CPBBedCVBlL9qv0erW_eKm6Irj0LFyaAWuJW_lc,3148
231
- sky/templates/kubernetes-ray.yml.j2,sha256=OBHPdpaO0Q0OxkpaXQgByxrg58MKr-aXbMs2hS1oJEE,28003
230
+ sky/templates/kubernetes-port-forward-proxy-command.sh,sha256=iw7mypHszg6Ggq9MbyiYMFOkSlXaQZulaxqC5IWYGCc,3381
231
+ sky/templates/kubernetes-ray.yml.j2,sha256=cK2XAkodjHThv94ITddzJwkePQYf0uCYHZo_1BwCw5U,28728
232
232
  sky/templates/kubernetes-ssh-jump.yml.j2,sha256=k5W5sOIMppU7dDkJMwPlqsUcb92y7L5_TVG3hkgMy8M,2747
233
233
  sky/templates/lambda-ray.yml.j2,sha256=HyvO_tX2vxwSsc4IFVSqGuIbjLMk0bevP9bcxb8ZQII,4498
234
234
  sky/templates/local-ray.yml.j2,sha256=FNHeyHF6nW9nU9QLIZceUWfvrFTTcO51KqhTnYCEFaA,1185
@@ -245,7 +245,7 @@ sky/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
245
245
  sky/utils/accelerator_registry.py,sha256=BO4iYH5bV80Xyp4EPfO0n1D3LL0FvESCy7xm59Je3_o,3798
246
246
  sky/utils/admin_policy_utils.py,sha256=_Vt_jTTYCXmMdryj0vrrumFPewa93qHnzUqBDXjAhRU,5981
247
247
  sky/utils/cluster_yaml_utils.py,sha256=1wRRYqI1kI-eFs1pMW4r_FFjHJ0zamq6v2RRI-Gtx5E,849
248
- sky/utils/command_runner.py,sha256=GHTZxoJQ3V8WVSRAaOA4JpRTxtCtuq36H9U8kOfWUwc,36450
248
+ sky/utils/command_runner.py,sha256=ewDjFxcCOv0OeG2aUOIfVWmTls65up9DvSnAXURvGfM,36696
249
249
  sky/utils/command_runner.pyi,sha256=mJOzCgcYZAfHwnY_6Wf1YwlTEJGb9ihzc2f0rE0Kw98,7751
250
250
  sky/utils/common_utils.py,sha256=Qy25LuIoTT0qg391EWyT9i5D6fwk1S4OdFwRpCTZ9Vk,24657
251
251
  sky/utils/control_master_utils.py,sha256=90hnxiAUP20gbJ9e3MERh7rb04ZO_I3LsljNjR26H5I,1416
@@ -257,7 +257,7 @@ sky/utils/kubernetes_enums.py,sha256=imGqHSa8O07zD_6xH1SDMM7dBU5lF5fzFFlQuQy00QM
257
257
  sky/utils/log_utils.py,sha256=oZYF45uC7GFjAqO-Je-aiX6zhtq91TP-KKaIbQNF-jY,14024
258
258
  sky/utils/resources_utils.py,sha256=Xqi7gxPYw2y5wl5okUI5zx5LEij0hJF_V3Zi8q7TXYg,7890
259
259
  sky/utils/rich_utils.py,sha256=hmnI1X5dKvRIQzB7EyNb34FT97qFNve-0QHqM5r0mVk,3066
260
- sky/utils/schemas.py,sha256=yz8IKUA2oWJartaranIc9MfDZmZcIybPZUGViw1Ii1Q,29475
260
+ sky/utils/schemas.py,sha256=ul_tiSLxJthuJHuiz9NPTLdgtu_ZBbdFd5Pbf6Qb4vQ,29638
261
261
  sky/utils/subprocess_utils.py,sha256=iLOda3vfkD-sIUPlfkDGZs9HnJWLlLRvHVgca9DZH8s,10410
262
262
  sky/utils/timeline.py,sha256=ebHxKJK2HX0utGArrUgSezTPkcwav3VETa_AQS34t-E,3925
263
263
  sky/utils/ux_utils.py,sha256=CqyIFGDuSE8fQasPkna_loZMwtboC9KedR09WEQ7qz0,6502
@@ -269,15 +269,15 @@ sky/utils/kubernetes/create_cluster.sh,sha256=VLXfazav9XCMQmeKVqhuOQzt2vM6G1jgnv
269
269
  sky/utils/kubernetes/delete_cluster.sh,sha256=BSccHF43GyepDNf-FZcenzHzpXXATkVD92vgn1lWPgk,927
270
270
  sky/utils/kubernetes/deploy_remote_cluster.sh,sha256=vGj0mD0tejHDRy8ulwKOvOF2mfLyT5J8fp7GVqEe_EY,8478
271
271
  sky/utils/kubernetes/generate_kind_config.py,sha256=_TNLnifA_r7-CRq083IP1xjelYqiLjzQX9ohuqYpDH8,3187
272
- sky/utils/kubernetes/generate_kubeconfig.sh,sha256=livvxDKV-_xx8-dYWNyo4wlg3sOldeHefI37JXKLXu0,9398
272
+ sky/utils/kubernetes/generate_kubeconfig.sh,sha256=MBvXJio0PeujZSCXiRKE_pa6HCTiU9qBzR1WrXccVSY,10477
273
273
  sky/utils/kubernetes/gpu_labeler.py,sha256=j9tdIG98nwJ6WJXNhpLUUFcg-6RYe1pNiE_bLvLIB5Q,6999
274
274
  sky/utils/kubernetes/k8s_gpu_labeler_job.yaml,sha256=k0TBoQ4zgf79-sVkixKSGYFHQ7ZWF5gdVIZPupCCo9A,1224
275
275
  sky/utils/kubernetes/k8s_gpu_labeler_setup.yaml,sha256=VLKT2KKimZu1GDg_4AIlIt488oMQvhRZWwsj9vBbPUg,3812
276
- sky/utils/kubernetes/rsync_helper.sh,sha256=hyYDaYSNxYaNvzUQBzC8AidB7nDeojizjkzc_CTxycY,1077
276
+ sky/utils/kubernetes/rsync_helper.sh,sha256=h4YwrPFf9727CACnMJvF3EyK_0OeOYKKt4su_daKekw,1256
277
277
  sky/utils/kubernetes/ssh_jump_lifecycle_manager.py,sha256=RFLJ3k7MR5UN4SKHykQ0lV9SgXumoULpKYIAt1vh-HU,6560
278
- skypilot_nightly-1.0.0.dev20241123.dist-info/LICENSE,sha256=emRJAvE7ngL6x0RhQvlns5wJzGI3NEQ_WMjNmd9TZc4,12170
279
- skypilot_nightly-1.0.0.dev20241123.dist-info/METADATA,sha256=alQVoYDEyzmyGT9l_iGHfED1X75i8-kpsgtSHmQf45M,20222
280
- skypilot_nightly-1.0.0.dev20241123.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
281
- skypilot_nightly-1.0.0.dev20241123.dist-info/entry_points.txt,sha256=StA6HYpuHj-Y61L2Ze-hK2IcLWgLZcML5gJu8cs6nU4,36
282
- skypilot_nightly-1.0.0.dev20241123.dist-info/top_level.txt,sha256=qA8QuiNNb6Y1OF-pCUtPEr6sLEwy2xJX06Bd_CrtrHY,4
283
- skypilot_nightly-1.0.0.dev20241123.dist-info/RECORD,,
278
+ skypilot_nightly-1.0.0.dev20241125.dist-info/LICENSE,sha256=emRJAvE7ngL6x0RhQvlns5wJzGI3NEQ_WMjNmd9TZc4,12170
279
+ skypilot_nightly-1.0.0.dev20241125.dist-info/METADATA,sha256=V-eTZeyxhKUgaPzhNwlHoF39r1y2PEMx1dmytSCf3cw,20222
280
+ skypilot_nightly-1.0.0.dev20241125.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
281
+ skypilot_nightly-1.0.0.dev20241125.dist-info/entry_points.txt,sha256=StA6HYpuHj-Y61L2Ze-hK2IcLWgLZcML5gJu8cs6nU4,36
282
+ skypilot_nightly-1.0.0.dev20241125.dist-info/top_level.txt,sha256=qA8QuiNNb6Y1OF-pCUtPEr6sLEwy2xJX06Bd_CrtrHY,4
283
+ skypilot_nightly-1.0.0.dev20241125.dist-info/RECORD,,