skypilot-nightly 1.0.0.dev20241207__py3-none-any.whl → 1.0.0.dev20241209__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sky/__init__.py CHANGED
@@ -5,7 +5,7 @@ from typing import Optional
5
5
  import urllib.request
6
6
 
7
7
  # Replaced with the current commit when building the wheels.
8
- _SKYPILOT_COMMIT_SHA = '6e5083293f0d9a9d069d51274c57f0e59e47e5ce'
8
+ _SKYPILOT_COMMIT_SHA = 'ace064c5fdf91d34e738dae8c0cd30e1c011528d'
9
9
 
10
10
 
11
11
  def _get_git_commit():
@@ -35,7 +35,7 @@ def _get_git_commit():
35
35
 
36
36
 
37
37
  __commit__ = _get_git_commit()
38
- __version__ = '1.0.0.dev20241207'
38
+ __version__ = '1.0.0.dev20241209'
39
39
  __root_dir__ = os.path.dirname(os.path.abspath(__file__))
40
40
 
41
41
 
@@ -116,6 +116,16 @@ _REMOTE_RUNTIME_FILES_DIR = '~/.sky/.runtime_files'
116
116
  _ENDPOINTS_RETRY_MESSAGE = ('If the cluster was recently started, '
117
117
  'please retry after a while.')
118
118
 
119
+ # If a cluster is less than LAUNCH_DOUBLE_CHECK_WINDOW seconds old, and we don't
120
+ # see any instances in the cloud, the instances might be in the proccess of
121
+ # being created. We will wait LAUNCH_DOUBLE_CHECK_DELAY seconds and then double
122
+ # check to make sure there are still no instances. LAUNCH_DOUBLE_CHECK_DELAY
123
+ # should be set longer than the delay between (sending the create instance
124
+ # request) and (the instances appearing on the cloud).
125
+ # See https://github.com/skypilot-org/skypilot/issues/4431.
126
+ _LAUNCH_DOUBLE_CHECK_WINDOW = 60
127
+ _LAUNCH_DOUBLE_CHECK_DELAY = 1
128
+
119
129
  # Include the fields that will be used for generating tags that distinguishes
120
130
  # the cluster in ray, to avoid the stopped cluster being discarded due to
121
131
  # updates in the yaml template.
@@ -1926,13 +1936,12 @@ def _update_cluster_status_no_lock(
1926
1936
  logger.debug(
1927
1937
  f'Refreshing status ({cluster_name!r}) failed to get IPs.')
1928
1938
  except RuntimeError as e:
1929
- logger.debug(str(e))
1939
+ logger.debug(common_utils.format_exception(e))
1930
1940
  except Exception as e: # pylint: disable=broad-except
1931
1941
  # This can be raised by `external_ssh_ports()`, due to the
1932
1942
  # underlying call to kubernetes API.
1933
- logger.debug(
1934
- f'Refreshing status ({cluster_name!r}) failed: '
1935
- f'{common_utils.format_exception(e, use_bracket=True)}')
1943
+ logger.debug(f'Refreshing status ({cluster_name!r}) failed: ',
1944
+ exc_info=e)
1936
1945
  return False
1937
1946
 
1938
1947
  # Determining if the cluster is healthy (UP):
@@ -1959,6 +1968,24 @@ def _update_cluster_status_no_lock(
1959
1968
  return record
1960
1969
 
1961
1970
  # All cases below are transitioning the cluster to non-UP states.
1971
+
1972
+ if (not node_statuses and handle.launched_resources.cloud.STATUS_VERSION >=
1973
+ clouds.StatusVersion.SKYPILOT):
1974
+ # Note: launched_at is set during sky launch, even on an existing
1975
+ # cluster. This will catch the case where the cluster was terminated on
1976
+ # the cloud and restarted by sky launch.
1977
+ time_since_launch = time.time() - record['launched_at']
1978
+ if (record['status'] == status_lib.ClusterStatus.INIT and
1979
+ time_since_launch < _LAUNCH_DOUBLE_CHECK_WINDOW):
1980
+ # It's possible the instances for this cluster were just created,
1981
+ # and haven't appeared yet in the cloud API/console. Wait for a bit
1982
+ # and check again. This is a best-effort leak prevention check.
1983
+ # See https://github.com/skypilot-org/skypilot/issues/4431.
1984
+ time.sleep(_LAUNCH_DOUBLE_CHECK_DELAY)
1985
+ node_statuses = _query_cluster_status_via_cloud_api(handle)
1986
+ # Note: even if all the node_statuses are UP now, we will still
1987
+ # consider this cluster abnormal, and its status will be INIT.
1988
+
1962
1989
  if len(node_statuses) > handle.launched_nodes:
1963
1990
  # Unexpected: in the queried region more than 1 cluster with the same
1964
1991
  # constructed name tag returned. This will typically not happen unless
@@ -1987,13 +2014,15 @@ def _update_cluster_status_no_lock(
1987
2014
  f'{colorama.Style.RESET_ALL}')
1988
2015
  assert len(node_statuses) <= handle.launched_nodes
1989
2016
 
1990
- # If the node_statuses is empty, all the nodes are terminated. We can
1991
- # safely set the cluster status to TERMINATED. This handles the edge case
1992
- # where the cluster is terminated by the user manually through the UI.
2017
+ # If the node_statuses is empty, it should mean that all the nodes are
2018
+ # terminated and we can set the cluster status to TERMINATED. This handles
2019
+ # the edge case where the cluster is terminated by the user manually through
2020
+ # the UI.
1993
2021
  to_terminate = not node_statuses
1994
2022
 
1995
- # A cluster is considered "abnormal", if not all nodes are TERMINATED or
1996
- # not all nodes are STOPPED. We check that with the following logic:
2023
+ # A cluster is considered "abnormal", if some (but not all) nodes are
2024
+ # TERMINATED, or not all nodes are STOPPED. We check that with the following
2025
+ # logic:
1997
2026
  # * Not all nodes are terminated and there's at least one node
1998
2027
  # terminated; or
1999
2028
  # * Any of the non-TERMINATED nodes is in a non-STOPPED status.
@@ -2005,6 +2034,8 @@ def _update_cluster_status_no_lock(
2005
2034
  # cluster is probably down.
2006
2035
  # * The cluster is partially terminated or stopped should be considered
2007
2036
  # abnormal.
2037
+ # * The cluster is partially or completely in the INIT state, which means
2038
+ # that provisioning was interrupted. This is considered abnormal.
2008
2039
  #
2009
2040
  # An abnormal cluster will transition to INIT and have any autostop setting
2010
2041
  # reset (unless it's autostopping/autodowning).
@@ -98,6 +98,11 @@ _RETRY_UNTIL_UP_INIT_GAP_SECONDS = 30
98
98
  # The maximum retry count for fetching IP address.
99
99
  _FETCH_IP_MAX_ATTEMPTS = 3
100
100
 
101
+ # How many times to query the cloud provider to make sure instances are
102
+ # stopping/terminating, and how long to wait between each query.
103
+ _TEARDOWN_WAIT_MAX_ATTEMPTS = 10
104
+ _TEARDOWN_WAIT_BETWEEN_ATTEMPS_SECONDS = 1
105
+
101
106
  _TEARDOWN_FAILURE_MESSAGE = (
102
107
  f'\n{colorama.Fore.RED}Failed to terminate '
103
108
  '{cluster_name}. {extra_reason}'
@@ -2403,15 +2408,17 @@ class CloudVmRayResourceHandle(backends.backend.ResourceHandle):
2403
2408
  zip(ip_list, port_list), **ssh_credentials)
2404
2409
  return runners
2405
2410
  if self.cached_cluster_info is None:
2406
- # We have `or self.cached_external_ips is None` here, because
2411
+ # We have `and self.cached_external_ips is None` here, because
2407
2412
  # when a cluster's cloud is just upgraded to the new provsioner,
2408
2413
  # although it has the cached_external_ips, the cached_cluster_info
2409
2414
  # can be None. We need to update it here, even when force_cached is
2410
2415
  # set to True.
2411
2416
  # TODO: We can remove `self.cached_external_ips is None` after
2412
2417
  # version 0.8.0.
2413
- assert not force_cached or self.cached_external_ips is not None, (
2414
- force_cached, self.cached_external_ips)
2418
+ if force_cached and self.cached_external_ips is None:
2419
+ raise RuntimeError(
2420
+ 'Tried to use cached cluster info, but it\'s missing for '
2421
+ f'cluster "{self.cluster_name}"')
2415
2422
  self._update_cluster_info()
2416
2423
  assert self.cached_cluster_info is not None, self
2417
2424
  runners = provision_lib.get_command_runners(
@@ -2838,9 +2845,6 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
2838
2845
  if e.no_failover:
2839
2846
  error_message = str(e)
2840
2847
  else:
2841
- # Clean up the cluster's entry in `sky status`.
2842
- global_user_state.remove_cluster(cluster_name,
2843
- terminate=True)
2844
2848
  usage_lib.messages.usage.update_final_cluster_status(
2845
2849
  None)
2846
2850
  error_message = (
@@ -4027,7 +4031,6 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
4027
4031
  limit=1000).get_result()['items']
4028
4032
  vpc_id = None
4029
4033
  try:
4030
- # pylint: disable=line-too-long
4031
4034
  vpc_id = vpcs_filtered_by_tags_and_region[0]['crn'].rsplit(
4032
4035
  ':', 1)[-1]
4033
4036
  vpc_found = True
@@ -4036,7 +4039,6 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
4036
4039
  returncode = -1
4037
4040
 
4038
4041
  if vpc_found:
4039
- # pylint: disable=line-too-long E1136
4040
4042
  # Delete VPC and it's associated resources
4041
4043
  vpc_provider = IBMVPCProvider(
4042
4044
  config_provider['resource_group_id'], region,
@@ -4138,6 +4140,7 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
4138
4140
  * Removing the terminated cluster's scripts and ray yaml files.
4139
4141
  """
4140
4142
  cluster_name_on_cloud = handle.cluster_name_on_cloud
4143
+ cloud = handle.launched_resources.cloud
4141
4144
 
4142
4145
  if (terminate and handle.launched_resources.is_image_managed is True):
4143
4146
  # Delete the image when terminating a "cloned" cluster, i.e.,
@@ -4158,7 +4161,6 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
4158
4161
  'remove it manually to avoid image leakage. Details: '
4159
4162
  f'{common_utils.format_exception(e, use_bracket=True)}')
4160
4163
  if terminate:
4161
- cloud = handle.launched_resources.cloud
4162
4164
  config = common_utils.read_yaml(handle.cluster_yaml)
4163
4165
  try:
4164
4166
  cloud.check_features_are_supported(
@@ -4185,6 +4187,44 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
4185
4187
  config = common_utils.read_yaml(handle.cluster_yaml)
4186
4188
  backend_utils.SSHConfigHelper.remove_cluster(handle.cluster_name)
4187
4189
 
4190
+ # Confirm that instances have actually transitioned state before
4191
+ # updating the state database. We do this immediately before removing
4192
+ # the state from the database, so that we can guarantee that this is
4193
+ # always called before the state is removed. We considered running this
4194
+ # check as part of provisioner.teardown_cluster or
4195
+ # provision.terminate_instances, but it would open the door code paths
4196
+ # that successfully call this function but do not first call
4197
+ # teardown_cluster or terminate_instances. See
4198
+ # https://github.com/skypilot-org/skypilot/pull/4443#discussion_r1872798032
4199
+ attempts = 0
4200
+ while True:
4201
+ logger.debug(f'instance statuses attempt {attempts + 1}')
4202
+ node_status_dict = provision_lib.query_instances(
4203
+ repr(cloud),
4204
+ cluster_name_on_cloud,
4205
+ config['provider'],
4206
+ non_terminated_only=False)
4207
+
4208
+ unexpected_node_state: Optional[Tuple[str, str]] = None
4209
+ for node_id, node_status in node_status_dict.items():
4210
+ logger.debug(f'{node_id} status: {node_status}')
4211
+ # FIXME(cooperc): Some clouds (e.g. GCP) do not distinguish
4212
+ # between "stopping/stopped" and "terminating/terminated", so we
4213
+ # allow for either status instead of casing on `terminate`.
4214
+ if node_status not in [None, status_lib.ClusterStatus.STOPPED]:
4215
+ unexpected_node_state = (node_id, node_status)
4216
+
4217
+ if unexpected_node_state is None:
4218
+ break
4219
+
4220
+ attempts += 1
4221
+ if attempts < _TEARDOWN_WAIT_MAX_ATTEMPTS:
4222
+ time.sleep(_TEARDOWN_WAIT_BETWEEN_ATTEMPS_SECONDS)
4223
+ else:
4224
+ (node_id, node_status) = unexpected_node_state
4225
+ raise RuntimeError(f'Instance {node_id} in unexpected state '
4226
+ f'{node_status}.')
4227
+
4188
4228
  global_user_state.remove_cluster(handle.cluster_name,
4189
4229
  terminate=terminate)
4190
4230
 
@@ -101,8 +101,8 @@ class AzureInstanceStatus(enum.Enum):
101
101
  ) -> Dict['AzureInstanceStatus', Optional[status_lib.ClusterStatus]]:
102
102
  return {
103
103
  cls.PENDING: status_lib.ClusterStatus.INIT,
104
- cls.STOPPING: status_lib.ClusterStatus.INIT,
105
104
  cls.RUNNING: status_lib.ClusterStatus.UP,
105
+ cls.STOPPING: status_lib.ClusterStatus.STOPPED,
106
106
  cls.STOPPED: status_lib.ClusterStatus.STOPPED,
107
107
  cls.DELETING: None,
108
108
  }
@@ -52,6 +52,8 @@ def _filter_instances(
52
52
  # non_terminated_only=True?
53
53
  # Will there be callers who would want this to be False?
54
54
  # stop() and terminate() for example already implicitly assume non-terminated.
55
+ # Currently, even with non_terminated_only=False, we may not have a dict entry
56
+ # for terminated instances, if they have already been fully deleted.
55
57
  @common_utils.retry
56
58
  def query_instances(
57
59
  cluster_name_on_cloud: str,
@@ -233,7 +233,7 @@ def query_instances(
233
233
  'booting': status_lib.ClusterStatus.INIT,
234
234
  'active': status_lib.ClusterStatus.UP,
235
235
  'unhealthy': status_lib.ClusterStatus.INIT,
236
- 'terminating': status_lib.ClusterStatus.INIT,
236
+ 'terminating': None,
237
237
  }
238
238
  statuses: Dict[str, Optional[status_lib.ClusterStatus]] = {}
239
239
  for instance_id, instance in instances.items():
@@ -286,12 +286,13 @@ def query_instances(
286
286
  assert provider_config is not None, (cluster_name_on_cloud, provider_config)
287
287
  instances = _filter_instances(cluster_name_on_cloud, None)
288
288
 
289
+ # https://docs.digitalocean.com/reference/paperspace/core/commands/machines/#show
289
290
  status_map = {
290
291
  'starting': status_lib.ClusterStatus.INIT,
291
292
  'restarting': status_lib.ClusterStatus.INIT,
292
293
  'upgrading': status_lib.ClusterStatus.INIT,
293
294
  'provisioning': status_lib.ClusterStatus.INIT,
294
- 'stopping': status_lib.ClusterStatus.INIT,
295
+ 'stopping': status_lib.ClusterStatus.STOPPED,
295
296
  'serviceready': status_lib.ClusterStatus.INIT,
296
297
  'ready': status_lib.ClusterStatus.UP,
297
298
  'off': status_lib.ClusterStatus.STOPPED,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: skypilot-nightly
3
- Version: 1.0.0.dev20241207
3
+ Version: 1.0.0.dev20241209
4
4
  Summary: SkyPilot: An intercloud broker for the clouds
5
5
  Author: SkyPilot Team
6
6
  License: Apache 2.0
@@ -1,4 +1,4 @@
1
- sky/__init__.py,sha256=0OL5OaRhBa-eTCDlTfVtoW1yILO71zZRuGIG_8bS1f4,5944
1
+ sky/__init__.py,sha256=IH1Gf4IAfNuSZVS8te5OnHWY9iN7WtOd2h0DoVzsMX4,5944
2
2
  sky/admin_policy.py,sha256=hPo02f_A32gCqhUueF0QYy1fMSSKqRwYEg_9FxScN_s,3248
3
3
  sky/authentication.py,sha256=kACHmiZgWgRpYd1wx1ofbXRMErfMcFmWrkw4a9NxYrY,20988
4
4
  sky/check.py,sha256=D3Y3saIFAYVvPxuBHnVgJEO0fUVDxgjwuMBaO-D778k,9472
@@ -30,8 +30,8 @@ sky/adaptors/runpod.py,sha256=4Nt_BfZhJAKQNA3wO8cxvvNI8x4NsDGHu_4EhRDlGYQ,225
30
30
  sky/adaptors/vsphere.py,sha256=zJP9SeObEoLrpgHW2VHvZE48EhgVf8GfAEIwBeaDMfM,2129
31
31
  sky/backends/__init__.py,sha256=UDjwbUgpTRApbPJnNfR786GadUuwgRk3vsWoVu5RB_c,536
32
32
  sky/backends/backend.py,sha256=iBs5gnMaaUoH2OIQ3xhAjWdrJWqj8T61Za9TGsBFpvQ,7515
33
- sky/backends/backend_utils.py,sha256=BbtkHwp__jGCN0u3gO_pKVITJidQEreVNE6ciTCxy6g,132186
34
- sky/backends/cloud_vm_ray_backend.py,sha256=yu61XKC2VMvUY7w53bRqofAPGnVvsaHKyHh_SaN1J-s,237304
33
+ sky/backends/backend_utils.py,sha256=hpYYXXRG25L4PUxqGbSwPfn_6cjDXBlbPxSCKPvYFwo,134028
34
+ sky/backends/cloud_vm_ray_backend.py,sha256=JQ5T1b0Pa12IEHJLHHEk0gPZDd0K_wViHEYer84j1Ls,239283
35
35
  sky/backends/docker_utils.py,sha256=Hyw1YY20EyghhEbYx6O2FIMDcGkNzBzV9TM7LFynei8,8358
36
36
  sky/backends/local_docker_backend.py,sha256=nSYCjms3HOPjPNOrcCqsUKm1WV3AAovRFjEQ7hcEXW4,17021
37
37
  sky/backends/wheel_utils.py,sha256=CUVOwlBtQjOMv-RSDGx2jMQ0M1D0w9ZPm0TDafJwBDI,8180
@@ -118,7 +118,7 @@ sky/provision/aws/utils.py,sha256=m49pS-SHGW7Au3bhDeTPsL8N5iRzbwOXzyEWRCc1Vho,32
118
118
  sky/provision/azure/__init__.py,sha256=87cgk1_Ws7n9rqaDDPv-HpfrkVeSQMdFQnhnXwyx9g4,548
119
119
  sky/provision/azure/azure-config-template.json,sha256=jrjAgOtpe0e6FSg3vsVqHKQqJe0w-HeWOFT1HuwzS2c,4712
120
120
  sky/provision/azure/config.py,sha256=V5-0Zelt4Xo0vcqnD6PpsnaCS7vc3xosDelILDAKSW4,8885
121
- sky/provision/azure/instance.py,sha256=T9yzMGeYIqQVKkZ1paUWIkhRbPTrBCKmWjTFVixahcM,49059
121
+ sky/provision/azure/instance.py,sha256=FHP-9262pEttjnptktozK1c_nCAbS4vno4Cs8XS1IkU,49062
122
122
  sky/provision/cudo/__init__.py,sha256=KAEl26MVPsk7IoP9Gg-MOJJRIV6-X9B0fbyHdyJWdLo,741
123
123
  sky/provision/cudo/config.py,sha256=RYOVkV0MoUqVBJRZiKhBZhjFygeyFs7eUdVMdPg1vds,327
124
124
  sky/provision/cudo/cudo_machine_type.py,sha256=_VNXWPELmlFXbtdcnPvkuLuyE9CZ923BUCdiac-ClDY,696
@@ -132,7 +132,7 @@ sky/provision/fluidstack/instance.py,sha256=iZK0dNFsRTlazh-2el3N9GDjPvZDtyCn8kWb
132
132
  sky/provision/gcp/__init__.py,sha256=zlgjR2JoaGD7sStGStMRu9bJ62f-8NKEIyb-bFHBlzM,528
133
133
  sky/provision/gcp/config.py,sha256=i0PhR1ybGErQiPT8cD6E5OFB7LD6sub4Rc-mhgTREVI,33340
134
134
  sky/provision/gcp/constants.py,sha256=ojerfnNEeayJn-0aJq2Uq1iTchxOkpruKrPBbHmdiEw,7448
135
- sky/provision/gcp/instance.py,sha256=8yjisiPqNBbhEiPBRogvhhgEpmmDplwTBQRgi17TR_I,24786
135
+ sky/provision/gcp/instance.py,sha256=AMnJz6xDwYqIHmDfJfENTG_ID6uhjD_2VhlWw8FJp_s,24934
136
136
  sky/provision/gcp/instance_utils.py,sha256=veRBr6Oziv0KaUdC4acuWeaOremNV0gMYCCHaSvY7c8,70943
137
137
  sky/provision/gcp/mig_utils.py,sha256=oFpcFZoapHMILSE4iIm8V5bxP1RhbMHRF7cciqq8qAk,7883
138
138
  sky/provision/kubernetes/__init__.py,sha256=y6yVfii81WYG3ROxv4hiIj-ydinS5-xGxLvXnARVQoI,719
@@ -145,7 +145,7 @@ sky/provision/kubernetes/manifests/smarter-device-manager-configmap.yaml,sha256=
145
145
  sky/provision/kubernetes/manifests/smarter-device-manager-daemonset.yaml,sha256=RtTq4F1QUmR2Uunb6zuuRaPhV7hpesz4saHjn3Ncsb4,2010
146
146
  sky/provision/lambda_cloud/__init__.py,sha256=6EEvSgtUeEiup9ivIFevHmgv0GqleroO2X0K7TRa2nE,612
147
147
  sky/provision/lambda_cloud/config.py,sha256=jq1iLzp4Up61r4JGxvtpVbJlgXnea3LHYQhCQyyl7ik,272
148
- sky/provision/lambda_cloud/instance.py,sha256=5-XuX-KwlRq8y62NXNzY_p6aJs4iCPGBf5U4pIR4liI,8975
148
+ sky/provision/lambda_cloud/instance.py,sha256=PEVwEPYOzZ3T_vcq6MtWbg4Au01FIUTlkpbIHEtySz0,8950
149
149
  sky/provision/lambda_cloud/lambda_utils.py,sha256=wIXV1Qe362f8Q9u8DSx2e9IJs4CF03Jr3idHCzhlRz4,9879
150
150
  sky/provision/oci/__init__.py,sha256=5E6EUtTK3mqGVREw5TuVl5DxteBYTZigIii7c8gHExU,612
151
151
  sky/provision/oci/config.py,sha256=diSDTyHLokcuXGB2XgZCHFvsXa8bah1PP2XuMouW_UU,1650
@@ -154,7 +154,7 @@ sky/provision/oci/query_utils.py,sha256=jOTKC7MX8n9YK6ADJuYMooPL9R2mHod4Tgl68Lu8
154
154
  sky/provision/paperspace/__init__.py,sha256=1nbUPWio7UA5gCQkO_rfEDfgXT17u5OtuByxQx4Ez6g,598
155
155
  sky/provision/paperspace/config.py,sha256=oNmffSt-V466pE0DmML8hOCX1CiA24jAqE5JEKuqpyI,1541
156
156
  sky/provision/paperspace/constants.py,sha256=NcLJGivJxshJwhR28yVHysWQ2gtMAkTVmHC91d3kyKM,957
157
- sky/provision/paperspace/instance.py,sha256=q_V01DZSMXLfy63Zwt6AQotq02JuXQZb5CHS_JttlwE,12046
157
+ sky/provision/paperspace/instance.py,sha256=YAIzHEYdO8MRK8iLdweMpY2U28OMCoUvPEwf0DRRQ3Y,12135
158
158
  sky/provision/paperspace/utils.py,sha256=uOmxbDKjV6skFizC4gYXSxRuEqso5ck2kF7MbtNmhEs,9580
159
159
  sky/provision/runpod/__init__.py,sha256=6HYvHI27EaLrX1SS0vWVhdLu5HDBeZCdvAeDJuwM5pk,556
160
160
  sky/provision/runpod/config.py,sha256=9ulZJVL7nHuxhTdoj8D7lNn7SdicJ5zc6FIcHIG9tcg,321
@@ -276,9 +276,9 @@ sky/utils/kubernetes/k8s_gpu_labeler_job.yaml,sha256=k0TBoQ4zgf79-sVkixKSGYFHQ7Z
276
276
  sky/utils/kubernetes/k8s_gpu_labeler_setup.yaml,sha256=VLKT2KKimZu1GDg_4AIlIt488oMQvhRZWwsj9vBbPUg,3812
277
277
  sky/utils/kubernetes/rsync_helper.sh,sha256=h4YwrPFf9727CACnMJvF3EyK_0OeOYKKt4su_daKekw,1256
278
278
  sky/utils/kubernetes/ssh_jump_lifecycle_manager.py,sha256=RFLJ3k7MR5UN4SKHykQ0lV9SgXumoULpKYIAt1vh-HU,6560
279
- skypilot_nightly-1.0.0.dev20241207.dist-info/LICENSE,sha256=emRJAvE7ngL6x0RhQvlns5wJzGI3NEQ_WMjNmd9TZc4,12170
280
- skypilot_nightly-1.0.0.dev20241207.dist-info/METADATA,sha256=JlIbXyrkVS2t9FRhdehfZDSGy0nzj7FLXutyN3cPGLs,20319
281
- skypilot_nightly-1.0.0.dev20241207.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
282
- skypilot_nightly-1.0.0.dev20241207.dist-info/entry_points.txt,sha256=StA6HYpuHj-Y61L2Ze-hK2IcLWgLZcML5gJu8cs6nU4,36
283
- skypilot_nightly-1.0.0.dev20241207.dist-info/top_level.txt,sha256=qA8QuiNNb6Y1OF-pCUtPEr6sLEwy2xJX06Bd_CrtrHY,4
284
- skypilot_nightly-1.0.0.dev20241207.dist-info/RECORD,,
279
+ skypilot_nightly-1.0.0.dev20241209.dist-info/LICENSE,sha256=emRJAvE7ngL6x0RhQvlns5wJzGI3NEQ_WMjNmd9TZc4,12170
280
+ skypilot_nightly-1.0.0.dev20241209.dist-info/METADATA,sha256=cO8FIzhG0O1XKk2YGkPHV80-r0l_702D5riSnPf6c5U,20319
281
+ skypilot_nightly-1.0.0.dev20241209.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
282
+ skypilot_nightly-1.0.0.dev20241209.dist-info/entry_points.txt,sha256=StA6HYpuHj-Y61L2Ze-hK2IcLWgLZcML5gJu8cs6nU4,36
283
+ skypilot_nightly-1.0.0.dev20241209.dist-info/top_level.txt,sha256=qA8QuiNNb6Y1OF-pCUtPEr6sLEwy2xJX06Bd_CrtrHY,4
284
+ skypilot_nightly-1.0.0.dev20241209.dist-info/RECORD,,