skypilot-nightly 1.0.0.dev20250205__py3-none-any.whl → 1.0.0.dev20250207__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sky/__init__.py CHANGED
@@ -5,7 +5,7 @@ from typing import Optional
5
5
  import urllib.request
6
6
 
7
7
  # Replaced with the current commit when building the wheels.
8
- _SKYPILOT_COMMIT_SHA = 'e7d94e956ee77eaabaf9a9178bc748d692eed3de'
8
+ _SKYPILOT_COMMIT_SHA = '5e6b39ce9abf3a22e24b905811ec0be6d52b4a44'
9
9
 
10
10
 
11
11
  def _get_git_commit():
@@ -35,7 +35,7 @@ def _get_git_commit():
35
35
 
36
36
 
37
37
  __commit__ = _get_git_commit()
38
- __version__ = '1.0.0.dev20250205'
38
+ __version__ = '1.0.0.dev20250207'
39
39
  __root_dir__ = os.path.dirname(os.path.abspath(__file__))
40
40
 
41
41
 
sky/authentication.py CHANGED
@@ -289,7 +289,7 @@ def setup_lambda_authentication(config: Dict[str, Any]) -> Dict[str, Any]:
289
289
  config['auth']['ssh_public_key'] = PUBLIC_SSH_KEY_PATH
290
290
 
291
291
  # TODO(zhwu): we need to avoid uploading the public ssh key to the
292
- # nodes, as that will cause problem when the node is used as spot
292
+ # nodes, as that will cause problem when the node is used as jobs
293
293
  # controller, i.e., the public and private key on the node may
294
294
  # not match.
295
295
  file_mounts = config['file_mounts']
@@ -997,7 +997,7 @@ def write_cluster_config(
997
997
 
998
998
  # Read the cluster name from the tmp yaml file, to take the backward
999
999
  # compatbility restortion above into account.
1000
- # TODO: remove this after 2 minor releases, 0.8.0.
1000
+ # TODO: remove this after 2 minor releases, 0.10.0.
1001
1001
  yaml_config = common_utils.read_yaml(tmp_yaml_path)
1002
1002
  config_dict['cluster_name_on_cloud'] = yaml_config['cluster_name']
1003
1003
 
@@ -3416,17 +3416,11 @@ class CloudVmRayBackend(backends.Backend['CloudVmRayResourceHandle']):
3416
3416
  managed_job_code = managed_job_codegen.set_pending(
3417
3417
  job_id, managed_job_dag)
3418
3418
  # Set the managed job to PENDING state to make sure that this
3419
- # managed job appears in the `sky jobs queue`, when there are
3420
- # already 2x vCPU controller processes running on the controller VM,
3421
- # e.g., 16 controller processes running on a controller with 8
3422
- # vCPUs.
3423
- # The managed job should be set to PENDING state *after* the
3424
- # controller process job has been queued, as our skylet on spot
3425
- # controller will set the managed job in FAILED state if the
3426
- # controller process job does not exist.
3427
- # We cannot set the managed job to PENDING state in the codegen for
3428
- # the controller process job, as it will stay in the job pending
3429
- # table and not be executed until there is an empty slot.
3419
+ # managed job appears in the `sky jobs queue`, even if it needs to
3420
+ # wait to be submitted.
3421
+ # We cannot set the managed job to PENDING state in the job template
3422
+ # (jobs-controller.yaml.j2), as it may need to wait for the run
3423
+ # commands to be scheduled on the job controller in high-load cases.
3430
3424
  job_submit_cmd = job_submit_cmd + ' && ' + managed_job_code
3431
3425
 
3432
3426
  returncode, stdout, stderr = self.run_on_head(handle,
sky/exceptions.py CHANGED
@@ -81,6 +81,14 @@ class ManagedJobReachedMaxRetriesError(Exception):
81
81
  pass
82
82
 
83
83
 
84
+ class ManagedJobStatusError(Exception):
85
+ """Raised when a managed job task status update is invalid.
86
+
87
+ For instance, a RUNNING job cannot become SUBMITTED.
88
+ """
89
+ pass
90
+
91
+
84
92
  class ResourcesMismatchError(Exception):
85
93
  """Raised when resources are mismatched."""
86
94
  pass
sky/jobs/controller.py CHANGED
@@ -68,7 +68,7 @@ class JobsController:
68
68
  else:
69
69
  assert task.name is not None, task
70
70
  task_name = task.name
71
- # This is guaranteed by the spot_launch API, where we fill in
71
+ # This is guaranteed by the jobs.launch API, where we fill in
72
72
  # the task.name with
73
73
  # dag_utils.maybe_infer_and_fill_dag_and_task_names.
74
74
  assert task_name is not None, self._dag
@@ -137,8 +137,8 @@ class JobsController:
137
137
  1. The optimizer cannot find a feasible solution.
138
138
  2. Precheck errors: invalid cluster name, failure in getting
139
139
  cloud user identity, or unsupported feature.
140
- exceptions.SpotJobReachedMaxRetryError: This will be raised when
141
- all prechecks passed but the maximum number of retries is
140
+ exceptions.ManagedJobReachedMaxRetriesError: This will be raised
141
+ when all prechecks passed but the maximum number of retries is
142
142
  reached for `sky.launch`. The failure of `sky.launch` can be
143
143
  due to:
144
144
  1. Any of the underlying failover exceptions is due to resources
@@ -482,8 +482,6 @@ def _cleanup(job_id: int, dag_yaml: str):
482
482
  when reaching here, as we currently only support chain DAGs, and only
483
483
  task is executed at a time.
484
484
  """
485
- # NOTE: The code to get cluster name is same as what we did in the spot
486
- # controller, we should keep it in sync with JobsController.__init__()
487
485
  dag, _ = _get_dag_and_name(dag_yaml)
488
486
  for task in dag.tasks:
489
487
  assert task.name is not None, task
sky/jobs/core.py CHANGED
@@ -472,24 +472,3 @@ def sync_down_logs(
472
472
  job_name=name,
473
473
  controller=controller,
474
474
  local_dir=local_dir)
475
-
476
-
477
- spot_launch = common_utils.deprecated_function(
478
- launch,
479
- name='sky.jobs.launch',
480
- deprecated_name='spot_launch',
481
- removing_version='0.8.0',
482
- override_argument={'use_spot': True})
483
- spot_queue = common_utils.deprecated_function(queue,
484
- name='sky.jobs.queue',
485
- deprecated_name='spot_queue',
486
- removing_version='0.8.0')
487
- spot_cancel = common_utils.deprecated_function(cancel,
488
- name='sky.jobs.cancel',
489
- deprecated_name='spot_cancel',
490
- removing_version='0.8.0')
491
- spot_tail_logs = common_utils.deprecated_function(
492
- tail_logs,
493
- name='sky.jobs.tail_logs',
494
- deprecated_name='spot_tail_logs',
495
- removing_version='0.8.0')
@@ -91,10 +91,13 @@ JOB_TABLE_COLUMNS = [
91
91
  'Recoveries', 'Details', 'Actions'
92
92
  ]
93
93
 
94
+ # This column is given by format_job_table but should be ignored.
95
+ SCHED_STATE_COLUMN = 12
96
+
94
97
 
95
98
  def _extract_launch_history(log_content: str) -> str:
96
99
  """Extract launch history from log content.
97
-
100
+
98
101
  Args:
99
102
  log_content: Content of the log file.
100
103
  Returns:
@@ -151,8 +154,12 @@ def home():
151
154
  status_counts[task['status'].value] += 1
152
155
 
153
156
  # Add an empty column for the dropdown button and actions column
154
- rows = [[''] + row + [''] + [''] for row in rows
155
- ] # Add empty cell for failover and actions column
157
+ # Exclude SCHED. STATE column
158
+ rows = [
159
+ [''] + row[:SCHED_STATE_COLUMN] + row[SCHED_STATE_COLUMN + 1:] +
160
+ # Add empty cell for failover and actions column
161
+ [''] + [''] for row in rows
162
+ ]
156
163
 
157
164
  # Add log content as failover history for each job
158
165
  for row in rows:
@@ -263,8 +263,8 @@ class StrategyExecutor:
263
263
  1. The optimizer cannot find a feasible solution.
264
264
  2. Precheck errors: invalid cluster name, failure in getting
265
265
  cloud user identity, or unsupported feature.
266
- exceptions.SpotJobReachedMaxRetryError: This will be raised when
267
- all prechecks passed but the maximum number of retries is
266
+ exceptions.ManagedJobReachedMaxRetriesError: This will be raised
267
+ when all prechecks passed but the maximum number of retries is
268
268
  reached for `sky.launch`. The failure of `sky.launch` can be
269
269
  due to:
270
270
  1. Any of the underlying failover exceptions is due to resources
sky/jobs/state.py CHANGED
@@ -11,6 +11,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
11
11
 
12
12
  import colorama
13
13
 
14
+ from sky import exceptions
14
15
  from sky import sky_logging
15
16
  from sky.utils import common_utils
16
17
  from sky.utils import db_utils
@@ -32,7 +33,7 @@ logger = sky_logging.init_logger(__name__)
32
33
  # the same content as the `task_name` column.
33
34
  # The `job_id` is now not really a job id, but a only a unique
34
35
  # identifier/primary key for all the tasks. We will use `spot_job_id`
35
- # to identify the spot job.
36
+ # to identify the job.
36
37
  # TODO(zhwu): schema migration may be needed.
37
38
  def create_table(cursor, conn):
38
39
  # Enable WAL mode to avoid locking issues.
@@ -420,9 +421,16 @@ def set_submitted(job_id: int, task_id: int, run_timestamp: str,
420
421
  run_timestamp=(?),
421
422
  specs=(?)
422
423
  WHERE spot_job_id=(?) AND
423
- task_id=(?)""",
424
+ task_id=(?) AND
425
+ status=(?) AND
426
+ end_at IS null""",
424
427
  (resources_str, submit_time, ManagedJobStatus.SUBMITTED.value,
425
- run_timestamp, json.dumps(specs), job_id, task_id))
428
+ run_timestamp, json.dumps(specs), job_id, task_id,
429
+ ManagedJobStatus.PENDING.value))
430
+ if cursor.rowcount != 1:
431
+ raise exceptions.ManagedJobStatusError(
432
+ f'Failed to set the task to submitted. '
433
+ f'({cursor.rowcount} rows updated)')
426
434
  callback_func('SUBMITTED')
427
435
 
428
436
 
@@ -434,7 +442,14 @@ def set_starting(job_id: int, task_id: int, callback_func: CallbackType):
434
442
  """\
435
443
  UPDATE spot SET status=(?)
436
444
  WHERE spot_job_id=(?) AND
437
- task_id=(?)""", (ManagedJobStatus.STARTING.value, job_id, task_id))
445
+ task_id=(?) AND
446
+ status=(?) AND
447
+ end_at IS null""", (ManagedJobStatus.STARTING.value, job_id,
448
+ task_id, ManagedJobStatus.SUBMITTED.value))
449
+ if cursor.rowcount != 1:
450
+ raise exceptions.ManagedJobStatusError(
451
+ f'Failed to set the task to starting. '
452
+ f'({cursor.rowcount} rows updated)')
438
453
  callback_func('STARTING')
439
454
 
440
455
 
@@ -447,15 +462,25 @@ def set_started(job_id: int, task_id: int, start_time: float,
447
462
  """\
448
463
  UPDATE spot SET status=(?), start_at=(?), last_recovered_at=(?)
449
464
  WHERE spot_job_id=(?) AND
450
- task_id=(?)""",
465
+ task_id=(?) AND
466
+ status IN (?, ?) AND
467
+ end_at IS null""",
451
468
  (
452
469
  ManagedJobStatus.RUNNING.value,
453
470
  start_time,
454
471
  start_time,
455
472
  job_id,
456
473
  task_id,
474
+ ManagedJobStatus.STARTING.value,
475
+ # If the task is empty, we will jump straight from PENDING to
476
+ # RUNNING
477
+ ManagedJobStatus.PENDING.value,
457
478
  ),
458
479
  )
480
+ if cursor.rowcount != 1:
481
+ raise exceptions.ManagedJobStatusError(
482
+ f'Failed to set the task to started. '
483
+ f'({cursor.rowcount} rows updated)')
459
484
  callback_func('STARTED')
460
485
 
461
486
 
@@ -468,8 +493,15 @@ def set_recovering(job_id: int, task_id: int, callback_func: CallbackType):
468
493
  UPDATE spot SET
469
494
  status=(?), job_duration=job_duration+(?)-last_recovered_at
470
495
  WHERE spot_job_id=(?) AND
471
- task_id=(?)""",
472
- (ManagedJobStatus.RECOVERING.value, time.time(), job_id, task_id))
496
+ task_id=(?) AND
497
+ status=(?) AND
498
+ end_at IS null""",
499
+ (ManagedJobStatus.RECOVERING.value, time.time(), job_id, task_id,
500
+ ManagedJobStatus.RUNNING.value))
501
+ if cursor.rowcount != 1:
502
+ raise exceptions.ManagedJobStatusError(
503
+ f'Failed to set the task to recovering. '
504
+ f'({cursor.rowcount} rows updated)')
473
505
  callback_func('RECOVERING')
474
506
 
475
507
 
@@ -482,8 +514,15 @@ def set_recovered(job_id: int, task_id: int, recovered_time: float,
482
514
  UPDATE spot SET
483
515
  status=(?), last_recovered_at=(?), recovery_count=recovery_count+1
484
516
  WHERE spot_job_id=(?) AND
485
- task_id=(?)""",
486
- (ManagedJobStatus.RUNNING.value, recovered_time, job_id, task_id))
517
+ task_id=(?) AND
518
+ status=(?) AND
519
+ end_at IS null""",
520
+ (ManagedJobStatus.RUNNING.value, recovered_time, job_id, task_id,
521
+ ManagedJobStatus.RECOVERING.value))
522
+ if cursor.rowcount != 1:
523
+ raise exceptions.ManagedJobStatusError(
524
+ f'Failed to set the task to recovered. '
525
+ f'({cursor.rowcount} rows updated)')
487
526
  logger.info('==== Recovered. ====')
488
527
  callback_func('RECOVERED')
489
528
 
@@ -496,10 +535,16 @@ def set_succeeded(job_id: int, task_id: int, end_time: float,
496
535
  """\
497
536
  UPDATE spot SET
498
537
  status=(?), end_at=(?)
499
- WHERE spot_job_id=(?) AND task_id=(?)
500
- AND end_at IS null""",
501
- (ManagedJobStatus.SUCCEEDED.value, end_time, job_id, task_id))
502
-
538
+ WHERE spot_job_id=(?) AND
539
+ task_id=(?) AND
540
+ status=(?) AND
541
+ end_at IS null""",
542
+ (ManagedJobStatus.SUCCEEDED.value, end_time, job_id, task_id,
543
+ ManagedJobStatus.RUNNING.value))
544
+ if cursor.rowcount != 1:
545
+ raise exceptions.ManagedJobStatusError(
546
+ f'Failed to set the task to succeeded. '
547
+ f'({cursor.rowcount} rows updated)')
503
548
  callback_func('SUCCEEDED')
504
549
  logger.info('Job succeeded.')
505
550
 
@@ -571,7 +616,9 @@ def set_failed(
571
616
  {set_str}
572
617
  WHERE spot_job_id=(?) {task_query_str} AND end_at IS null""",
573
618
  (end_time, *list(fields_to_set.values()), job_id, *task_value))
574
- if callback_func:
619
+
620
+ updated = cursor.rowcount > 0
621
+ if callback_func and updated:
575
622
  callback_func('FAILED')
576
623
  logger.info(failure_reason)
577
624
 
@@ -586,12 +633,15 @@ def set_cancelling(job_id: int, callback_func: CallbackType):
586
633
  rows = cursor.execute(
587
634
  """\
588
635
  UPDATE spot SET
589
- status=(?), end_at=(?)
636
+ status=(?)
590
637
  WHERE spot_job_id=(?) AND end_at IS null""",
591
- (ManagedJobStatus.CANCELLING.value, time.time(), job_id))
592
- if rows.rowcount > 0:
593
- logger.info('Cancelling the job...')
594
- callback_func('CANCELLING')
638
+ (ManagedJobStatus.CANCELLING.value, job_id))
639
+ updated = rows.rowcount > 0
640
+ if updated:
641
+ logger.info('Cancelling the job...')
642
+ callback_func('CANCELLING')
643
+ else:
644
+ logger.info('Cancellation skipped, job is already terminal')
595
645
 
596
646
 
597
647
  def set_cancelled(job_id: int, callback_func: CallbackType):
@@ -607,9 +657,12 @@ def set_cancelled(job_id: int, callback_func: CallbackType):
607
657
  WHERE spot_job_id=(?) AND status=(?)""",
608
658
  (ManagedJobStatus.CANCELLED.value, time.time(), job_id,
609
659
  ManagedJobStatus.CANCELLING.value))
610
- if rows.rowcount > 0:
611
- logger.info('Job cancelled.')
612
- callback_func('CANCELLED')
660
+ updated = rows.rowcount > 0
661
+ if updated:
662
+ logger.info('Job cancelled.')
663
+ callback_func('CANCELLED')
664
+ else:
665
+ logger.info('Cancellation skipped, job is not CANCELLING')
613
666
 
614
667
 
615
668
  def set_local_log_file(job_id: int, task_id: Optional[int],
@@ -705,8 +758,9 @@ def get_jobs_to_check_status(job_id: Optional[int] = None) -> List[int]:
705
758
  job_id: Optional job ID to check. If None, checks all jobs.
706
759
 
707
760
  Returns a list of job_ids, including the following:
708
- - For jobs with schedule state: jobs that have schedule state not DONE
709
- - For legacy jobs (no schedule state): jobs that are in non-terminal status
761
+ - Jobs that have a schedule_state that is not DONE
762
+ - Jobs have schedule_state DONE but are in a non-terminal status
763
+ - Legacy jobs (that is, no schedule state) that are in non-terminal status
710
764
  """
711
765
  job_filter = '' if job_id is None else 'AND spot.spot_job_id=(?)'
712
766
  job_value = () if job_id is None else (job_id,)
@@ -719,7 +773,9 @@ def get_jobs_to_check_status(job_id: Optional[int] = None) -> List[int]:
719
773
 
720
774
  # Get jobs that are either:
721
775
  # 1. Have schedule state that is not DONE, or
722
- # 2. Have no schedule state (legacy) AND are in non-terminal status
776
+ # 2. Have schedule state DONE AND are in non-terminal status (unexpected
777
+ # inconsistent state), or
778
+ # 3. Have no schedule state (legacy) AND are in non-terminal status
723
779
  with db_utils.safe_cursor(_DB_PATH) as cursor:
724
780
  rows = cursor.execute(
725
781
  f"""\
@@ -728,14 +784,23 @@ def get_jobs_to_check_status(job_id: Optional[int] = None) -> List[int]:
728
784
  LEFT OUTER JOIN job_info
729
785
  ON spot.spot_job_id=job_info.spot_job_id
730
786
  WHERE (
787
+ -- non-legacy jobs that are not DONE
731
788
  (job_info.schedule_state IS NOT NULL AND
732
789
  job_info.schedule_state IS NOT ?)
733
790
  OR
734
- (job_info.schedule_state IS NULL AND
791
+ -- legacy or that are in non-terminal status or
792
+ -- DONE jobs that are in non-terminal status
793
+ ((-- legacy jobs
794
+ job_info.schedule_state IS NULL OR
795
+ -- non-legacy DONE jobs
796
+ job_info.schedule_state IS ?
797
+ ) AND
798
+ -- non-terminal
735
799
  status NOT IN ({status_filter_str}))
736
800
  )
737
801
  {job_filter}
738
802
  ORDER BY spot.spot_job_id DESC""", [
803
+ ManagedJobScheduleState.DONE.value,
739
804
  ManagedJobScheduleState.DONE.value, *terminal_status_values,
740
805
  *job_value
741
806
  ]).fetchall()
sky/jobs/utils.py CHANGED
@@ -6,11 +6,9 @@ ManagedJobCodeGen.
6
6
  """
7
7
  import collections
8
8
  import enum
9
- import inspect
10
9
  import os
11
10
  import pathlib
12
11
  import shlex
13
- import shutil
14
12
  import textwrap
15
13
  import time
16
14
  import traceback
@@ -53,7 +51,6 @@ JOB_CONTROLLER_NAME: str = (
53
51
  LEGACY_JOB_CONTROLLER_NAME: str = (
54
52
  f'sky-spot-controller-{common_utils.get_user_hash()}')
55
53
  SIGNAL_FILE_PREFIX = '/tmp/sky_jobs_controller_signal_{}'
56
- LEGACY_SIGNAL_FILE_PREFIX = '/tmp/sky_spot_controller_signal_{}'
57
54
  # Controller checks its job's status every this many seconds.
58
55
  JOB_STATUS_CHECK_GAP_SECONDS = 20
59
56
 
@@ -248,16 +245,35 @@ def update_managed_jobs_statuses(job_id: Optional[int] = None):
248
245
  schedule_state = tasks[0]['schedule_state']
249
246
 
250
247
  # Backwards compatibility: this job was submitted when ray was still
251
- # used for managing the parallelism of job controllers.
248
+ # used for managing the parallelism of job controllers, before #4485.
252
249
  # TODO(cooperc): Remove before 0.11.0.
253
250
  if (schedule_state is
254
251
  managed_job_state.ManagedJobScheduleState.INVALID):
255
252
  _handle_legacy_job(job_id)
256
253
  continue
257
254
 
258
- # For jobs with schedule state:
255
+ # Handle jobs with schedule state (non-legacy jobs):
259
256
  pid = tasks[0]['controller_pid']
260
- if pid is None:
257
+ if schedule_state == managed_job_state.ManagedJobScheduleState.DONE:
258
+ # There are two cases where we could get a job that is DONE.
259
+ # 1. At query time (get_jobs_to_check_status), the job was not yet
260
+ # DONE, but since then (before get_managed_jobs is called) it has
261
+ # hit a terminal status, marked itself done, and exited. This is
262
+ # fine.
263
+ # 2. The job is DONE, but in a non-terminal status. This is
264
+ # unexpected. For instance, the task status is RUNNING, but the
265
+ # job schedule_state is DONE.
266
+ if all(task['status'].is_terminal() for task in tasks):
267
+ # Turns out this job is fine, even though it got pulled by
268
+ # get_jobs_to_check_status. Probably case #1 above.
269
+ continue
270
+
271
+ logger.error(f'Job {job_id} has DONE schedule state, but some '
272
+ f'tasks are not terminal. Task statuses: '
273
+ f'{", ".join(task["status"].value for task in tasks)}')
274
+ failure_reason = ('Inconsistent internal job state. This is a bug.')
275
+ elif pid is None:
276
+ # Non-legacy job and controller process has not yet started.
261
277
  if schedule_state in (
262
278
  managed_job_state.ManagedJobScheduleState.INACTIVE,
263
279
  managed_job_state.ManagedJobScheduleState.WAITING):
@@ -458,17 +474,12 @@ def cancel_jobs_by_id(job_ids: Optional[List[int]]) -> str:
458
474
 
459
475
  # Send the signal to the jobs controller.
460
476
  signal_file = pathlib.Path(SIGNAL_FILE_PREFIX.format(job_id))
461
- legacy_signal_file = pathlib.Path(
462
- LEGACY_SIGNAL_FILE_PREFIX.format(job_id))
463
477
  # Filelock is needed to prevent race condition between signal
464
478
  # check/removal and signal writing.
465
479
  with filelock.FileLock(str(signal_file) + '.lock'):
466
480
  with signal_file.open('w', encoding='utf-8') as f:
467
481
  f.write(UserSignal.CANCEL.value)
468
482
  f.flush()
469
- # Backward compatibility for managed jobs launched before #3419. It
470
- # can be removed in the future 0.8.0 release.
471
- shutil.copy(str(signal_file), str(legacy_signal_file))
472
483
  cancelled_job_ids.append(job_id)
473
484
 
474
485
  if not cancelled_job_ids:
@@ -965,7 +976,8 @@ def format_job_table(
965
976
  'STATUS',
966
977
  ]
967
978
  if show_all:
968
- columns += ['STARTED', 'CLUSTER', 'REGION', 'DESCRIPTION']
979
+ # TODO: move SCHED. STATE to a separate flag (e.g. --debug)
980
+ columns += ['STARTED', 'CLUSTER', 'REGION', 'SCHED. STATE', 'DETAILS']
969
981
  if tasks_have_user:
970
982
  columns.insert(0, 'USER')
971
983
  job_table = log_utils.create_table(columns)
@@ -984,20 +996,10 @@ def format_job_table(
984
996
  # by the task_id.
985
997
  jobs[get_hash(task)].append(task)
986
998
 
987
- def generate_description(failure_reason: Optional[str],
988
- schedule_state: Optional[str]) -> str:
989
- description = ''
990
- if schedule_state is not None:
991
- description += f'Scheduler: {schedule_state}'
992
- if failure_reason is not None:
993
- description += ', '
999
+ def generate_details(failure_reason: Optional[str]) -> str:
994
1000
  if failure_reason is not None:
995
- description += f'Failure: {failure_reason}'
996
-
997
- if description == '':
998
- return '-'
999
-
1000
- return description
1001
+ return f'Failure: {failure_reason}'
1002
+ return '-'
1001
1003
 
1002
1004
  for job_hash, job_tasks in jobs.items():
1003
1005
  if show_all:
@@ -1050,13 +1052,13 @@ def format_job_table(
1050
1052
  status_str,
1051
1053
  ]
1052
1054
  if show_all:
1053
- schedule_state = job_tasks[0]['schedule_state']
1054
1055
  failure_reason = job_tasks[current_task_id]['failure_reason']
1055
1056
  job_values.extend([
1056
1057
  '-',
1057
1058
  '-',
1058
1059
  '-',
1059
- generate_description(failure_reason, schedule_state),
1060
+ job_tasks[0]['schedule_state'],
1061
+ generate_details(failure_reason),
1060
1062
  ])
1061
1063
  if tasks_have_user:
1062
1064
  job_values.insert(0, job_tasks[0].get('user', '-'))
@@ -1087,14 +1089,14 @@ def format_job_table(
1087
1089
  # schedule_state is only set at the job level, so if we have
1088
1090
  # more than one task, only display on the aggregated row.
1089
1091
  schedule_state = (task['schedule_state']
1090
- if len(job_tasks) == 1 else None)
1092
+ if len(job_tasks) == 1 else '-')
1091
1093
  values.extend([
1092
1094
  # STARTED
1093
1095
  log_utils.readable_time_duration(task['start_at']),
1094
1096
  task['cluster_resources'],
1095
1097
  task['region'],
1096
- generate_description(task['failure_reason'],
1097
- schedule_state),
1098
+ schedule_state,
1099
+ generate_details(task['failure_reason']),
1098
1100
  ])
1099
1101
  if tasks_have_user:
1100
1102
  values.insert(0, task.get('user', '-'))
@@ -1125,28 +1127,15 @@ class ManagedJobCodeGen:
1125
1127
 
1126
1128
  >> codegen = ManagedJobCodeGen.show_jobs(...)
1127
1129
  """
1128
- # TODO: the try..except.. block is for backward compatibility. Remove it in
1129
- # v0.8.0.
1130
1130
  _PREFIX = textwrap.dedent("""\
1131
- managed_job_version = 0
1132
- try:
1133
- from sky.jobs import utils
1134
- from sky.jobs import constants as managed_job_constants
1135
- from sky.jobs import state as managed_job_state
1136
-
1137
- managed_job_version = managed_job_constants.MANAGED_JOBS_VERSION
1138
- except ImportError:
1139
- from sky.spot import spot_state as managed_job_state
1140
- from sky.spot import spot_utils as utils
1131
+ from sky.jobs import utils
1132
+ from sky.jobs import state as managed_job_state
1141
1133
  """)
1142
1134
 
1143
1135
  @classmethod
1144
1136
  def get_job_table(cls) -> str:
1145
1137
  code = textwrap.dedent("""\
1146
- if managed_job_version < 1:
1147
- job_table = utils.dump_spot_job_queue()
1148
- else:
1149
- job_table = utils.dump_managed_job_queue()
1138
+ job_table = utils.dump_managed_job_queue()
1150
1139
  print(job_table, flush=True)
1151
1140
  """)
1152
1141
  return cls._build(code)
@@ -1182,29 +1171,9 @@ class ManagedJobCodeGen:
1182
1171
  job_id: Optional[int],
1183
1172
  follow: bool = True,
1184
1173
  controller: bool = False) -> str:
1185
- # We inspect the source code of the function here for backward
1186
- # compatibility.
1187
- # TODO: change to utils.stream_logs(job_id, job_name, follow) in v0.8.0.
1188
- # Import libraries required by `stream_logs`. The try...except... block
1189
- # should be removed in v0.8.0.
1190
- code = textwrap.dedent("""\
1191
- import os
1192
- import time
1193
-
1194
- from sky.skylet import job_lib, log_lib
1195
- from sky.skylet import constants
1196
- from sky.utils import ux_utils
1197
- try:
1198
- from sky.jobs.utils import stream_logs_by_id
1199
- except ImportError:
1200
- from sky.spot.spot_utils import stream_logs_by_id
1201
- from typing import Optional
1202
- """)
1203
- code += inspect.getsource(stream_logs)
1204
- code += textwrap.dedent(f"""\
1205
-
1206
- msg = stream_logs({job_id!r}, {job_name!r},
1207
- follow={follow}, controller={controller})
1174
+ code = textwrap.dedent(f"""\
1175
+ msg = utils.stream_logs({job_id!r}, {job_name!r},
1176
+ follow={follow}, controller={controller})
1208
1177
  print(msg, flush=True)
1209
1178
  """)
1210
1179
  return cls._build(code)
sky/skylet/constants.py CHANGED
@@ -86,7 +86,7 @@ TASK_ID_LIST_ENV_VAR = 'SKYPILOT_TASK_IDS'
86
86
  # cluster yaml is updated.
87
87
  #
88
88
  # TODO(zongheng,zhanghao): make the upgrading of skylet automatic?
89
- SKYLET_VERSION = '11'
89
+ SKYLET_VERSION = '12'
90
90
  # The version of the lib files that skylet/jobs use. Whenever there is an API
91
91
  # change for the job_lib or log_lib, we need to bump this version, so that the
92
92
  # user can be notified to update their SkyPilot version on the remote cluster.
@@ -278,7 +278,7 @@ FILE_MOUNTS_WORKDIR_SUBPATH = 'job-{run_id}/workdir'
278
278
  FILE_MOUNTS_SUBPATH = 'job-{run_id}/local-file-mounts/{i}'
279
279
  FILE_MOUNTS_TMP_SUBPATH = 'job-{run_id}/tmp-files'
280
280
 
281
- # The default idle timeout for SkyPilot controllers. This include spot
281
+ # The default idle timeout for SkyPilot controllers. This include jobs
282
282
  # controller and sky serve controller.
283
283
  # TODO(tian): Refactor to controller_utils. Current blocker: circular import.
284
284
  CONTROLLER_IDLE_MINUTES_TO_AUTOSTOP = 10
sky/utils/common_utils.py CHANGED
@@ -28,7 +28,6 @@ from sky.utils import validator
28
28
 
29
29
  _USER_HASH_FILE = os.path.expanduser('~/.sky/user_hash')
30
30
  USER_HASH_LENGTH = 8
31
- USER_HASH_LENGTH_IN_CLUSTER_NAME = 4
32
31
 
33
32
  # We are using base36 to reduce the length of the hash. 2 chars -> 36^2 = 1296
34
33
  # possibilities. considering the final cluster name contains the prefix as well,
@@ -182,7 +181,7 @@ def make_cluster_name_on_cloud(display_name: str,
182
181
  f'on the cloud, we convert it to {cluster_name_on_cloud}.')
183
182
  user_hash = ''
184
183
  if add_user_hash:
185
- user_hash = get_user_hash()[:USER_HASH_LENGTH_IN_CLUSTER_NAME]
184
+ user_hash = get_user_hash()
186
185
  user_hash = f'-{user_hash}'
187
186
  user_hash_length = len(user_hash)
188
187
 
@@ -458,10 +458,6 @@ def get_controller_resources(
458
458
  if custom_controller_resources_config is not None:
459
459
  controller_resources_config_copied.update(
460
460
  custom_controller_resources_config)
461
- elif controller == Controllers.JOBS_CONTROLLER:
462
- controller_resources_config_copied.update(
463
- skypilot_config.get_nested(('spot', 'controller', 'resources'),
464
- {}))
465
461
 
466
462
  try:
467
463
  controller_resources = resources.Resources.from_yaml_config(
@@ -938,7 +934,7 @@ def maybe_translate_local_file_mounts_and_sync_up(task: 'task_lib.Task',
938
934
  if (storage_obj.source is not None and
939
935
  not data_utils.is_cloud_store_url(storage_obj.source)):
940
936
  # Need to replace the local path with bucket URI, and remove the
941
- # name field, so that the storage mount can work on the spot
937
+ # name field, so that the storage mount can work on the jobs
942
938
  # controller.
943
939
  store_types = list(storage_obj.stores.keys())
944
940
  assert len(store_types) == 1, (
sky/utils/dag_utils.py CHANGED
@@ -12,7 +12,7 @@ from sky.utils import ux_utils
12
12
 
13
13
  logger = sky_logging.init_logger(__name__)
14
14
 
15
- # Message thrown when APIs sky.{exec,launch,spot.launch}() received a string
15
+ # Message thrown when APIs sky.{exec,launch,jobs.launch}() received a string
16
16
  # instead of a Dag. CLI (cli.py) is implemented by us so should not trigger
17
17
  # this.
18
18
  _ENTRYPOINT_STRING_AS_DAG_MESSAGE = """\
@@ -31,7 +31,7 @@ The command can then be run as:
31
31
 
32
32
  sky.launch(task, ...)
33
33
 
34
- sky.spot.launch(task, ...)
34
+ sky.jobs.launch(task, ...)
35
35
  """.strip()
36
36
 
37
37
 
sky/utils/log_utils.py CHANGED
@@ -253,7 +253,8 @@ def readable_time_duration(start: Optional[float],
253
253
  e.g. "1h 2m 23s"
254
254
  """
255
255
  # start < 0 means that the starting time is not specified yet.
256
- # It is only used in spot_utils.show_jobs() for job duration calculation.
256
+ # It is only used in jobs_utils.format_job_table() for job duration
257
+ # calculation.
257
258
  if start is None or start < 0:
258
259
  return '-'
259
260
  if end == start == 0:
sky/utils/schemas.py CHANGED
@@ -86,11 +86,6 @@ def _get_single_resources_schema():
86
86
  'use_spot': {
87
87
  'type': 'boolean',
88
88
  },
89
- # Deprecated: use 'job_recovery' instead. This is for backward
90
- # compatibility, and can be removed in 0.8.0.
91
- 'spot_recovery': {
92
- 'type': 'string',
93
- },
94
89
  'job_recovery': {
95
90
  # Either a string or a dict.
96
91
  'anyOf': [{
@@ -256,8 +251,6 @@ def get_resources_schema():
256
251
  'items': multi_resources_schema,
257
252
  }
258
253
  },
259
- # Avoid job_recovery and spot_recovery being present at the same time.
260
- **_check_not_both_fields_present('job_recovery', 'spot_recovery')
261
254
  }
262
255
 
263
256
 
@@ -974,7 +967,6 @@ def get_config_schema():
974
967
  'additionalProperties': False,
975
968
  'properties': {
976
969
  'jobs': controller_resources_schema,
977
- 'spot': controller_resources_schema,
978
970
  'serve': controller_resources_schema,
979
971
  'allowed_clouds': allowed_clouds,
980
972
  'admin_policy': admin_policy_schema,
@@ -982,6 +974,4 @@ def get_config_schema():
982
974
  'nvidia_gpus': gpu_configs,
983
975
  **cloud_configs,
984
976
  },
985
- # Avoid spot and jobs being present at the same time.
986
- **_check_not_both_fields_present('spot', 'jobs')
987
977
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: skypilot-nightly
3
- Version: 1.0.0.dev20250205
3
+ Version: 1.0.0.dev20250207
4
4
  Summary: SkyPilot: An intercloud broker for the clouds
5
5
  Author: SkyPilot Team
6
6
  License: Apache 2.0
@@ -244,7 +244,7 @@ To get the latest features and fixes, use the nightly build or [install from sou
244
244
  pip install "skypilot-nightly[kubernetes,aws,gcp,azure,oci,lambda,runpod,fluidstack,paperspace,cudo,ibm,scp]"
245
245
  ```
246
246
 
247
- [Current supported infra](https://docs.skypilot.co/en/latest/getting-started/installation.html) (Kubernetes; AWS, GCP, Azure, OCI, Lambda Cloud, Fluidstack, RunPod, Cudo, Paperspace, Cloudflare, Samsung, IBM, VMware vSphere):
247
+ [Current supported infra](https://docs.skypilot.co/en/latest/getting-started/installation.html) (Kubernetes; AWS, GCP, Azure, OCI, Lambda Cloud, Fluidstack, RunPod, Cudo, Digital Ocean, Paperspace, Cloudflare, Samsung, IBM, Vast.ai, VMware vSphere):
248
248
  <p align="center">
249
249
  <img alt="SkyPilot" src="https://raw.githubusercontent.com/skypilot-org/skypilot/master/docs/source/images/cloud-logos-light.png" width=85%>
250
250
  </p>
@@ -1,12 +1,12 @@
1
- sky/__init__.py,sha256=B43PI1LpfvJK1Jb5BrCfD6_mru2aGvYMleNoxyptbdc,5560
1
+ sky/__init__.py,sha256=45buNwgfNj1m2WOKO-BG42NwzGhW5XIp_2ugv36_1dI,5560
2
2
  sky/admin_policy.py,sha256=hPo02f_A32gCqhUueF0QYy1fMSSKqRwYEg_9FxScN_s,3248
3
- sky/authentication.py,sha256=ObVT8UOtkQJX4C-H9NhwzQouJzslFfVTfhYJVCisWMs,22338
3
+ sky/authentication.py,sha256=MNc9uHnvQ1EsEl8SsrYcYCGbxcnDbR6gaRCXVNd5RZE,22338
4
4
  sky/check.py,sha256=xzLlxUkBCrzpOho8lw65EvKLPl_b9lA2nteF5MSYbDQ,10885
5
5
  sky/cli.py,sha256=B-YWYiKnfSGdSOXtAY8SRGOGhneUeNPBjXFZ0FuLZ8w,214131
6
6
  sky/cloud_stores.py,sha256=PcLT57_8SZy7o6paAluElfBynaLkbaOq3l-8dNg1AVM,23672
7
7
  sky/core.py,sha256=fE1rn4Ku94S0XmWTO5-6t6eT6aaJImNczRqEnTe8v7Q,38742
8
8
  sky/dag.py,sha256=f3sJlkH4bE6Uuz3ozNtsMhcBpRx7KmC9Sa4seDKt4hU,3104
9
- sky/exceptions.py,sha256=rUi_au7QBNn3_wvwa8Y_MSHN3QDRpVLry8Mfa56LyGk,9197
9
+ sky/exceptions.py,sha256=SEhRubPlk-crkflPC5P_Z085iLrSd3UScYwc790QwYw,9378
10
10
  sky/execution.py,sha256=dpbk1kGRkGHT0FCJKGvjqeV3qIGEN2K20NDZbVrcAvI,28483
11
11
  sky/global_user_state.py,sha256=cTwltMCDIIBaapuGgARxFwpDJDCiKKyVW-PP_qtWuCA,30241
12
12
  sky/optimizer.py,sha256=d5BPAEZVrS3a2oBclSwo8MWkHQKQ3u4tcyawOANN0_0,59836
@@ -32,8 +32,8 @@ sky/adaptors/vast.py,sha256=tpvmHi7IkQNzbbHVkeo04kUSajoEpSzXr2XgeO_I1LU,695
32
32
  sky/adaptors/vsphere.py,sha256=zJP9SeObEoLrpgHW2VHvZE48EhgVf8GfAEIwBeaDMfM,2129
33
33
  sky/backends/__init__.py,sha256=UDjwbUgpTRApbPJnNfR786GadUuwgRk3vsWoVu5RB_c,536
34
34
  sky/backends/backend.py,sha256=iBs5gnMaaUoH2OIQ3xhAjWdrJWqj8T61Za9TGsBFpvQ,7515
35
- sky/backends/backend_utils.py,sha256=Ima6a_KkmpwaU0EpD4JWnjI03eXVODp9ZvBX7Oqx5NM,137528
36
- sky/backends/cloud_vm_ray_backend.py,sha256=f6KkBpavRyQ0MSarAuUISnYVSfkvJR7LFpA4U6dauUM,247146
35
+ sky/backends/backend_utils.py,sha256=A08BFVlmVfjgOHv7tz2jj7V6ku81E4QFy-nA5GjaNLg,137529
36
+ sky/backends/cloud_vm_ray_backend.py,sha256=Z2L-TWiASRLCcAK273c0gy7lRpW1NSYxiqaNo8G16wU,246746
37
37
  sky/backends/docker_utils.py,sha256=Hyw1YY20EyghhEbYx6O2FIMDcGkNzBzV9TM7LFynei8,8358
38
38
  sky/backends/local_docker_backend.py,sha256=nSYCjms3HOPjPNOrcCqsUKm1WV3AAovRFjEQ7hcEXW4,17021
39
39
  sky/backends/wheel_utils.py,sha256=5BUzBqfYz7p1ME6_0PXGmcsAkLVb8NrFt317p7a4X8s,8278
@@ -102,13 +102,13 @@ sky/data/storage.py,sha256=CWVKnHhdzXw1biPbRqYizkyVexL_OCELuJCqtd4hit4,204094
102
102
  sky/data/storage_utils.py,sha256=cM3kxlffYE7PnJySDu8huyUsMX_JYsf9uer8r5OYsjo,9556
103
103
  sky/jobs/__init__.py,sha256=ObZcz3lL1ip8JcmR6gbfZ4RMMfXJJdsnuU2zLQUb8jY,1546
104
104
  sky/jobs/constants.py,sha256=6RphkJ6pmafQ7XYW5qwId1Zvqb99HJelA9kgrgfNR7o,1421
105
- sky/jobs/controller.py,sha256=h4F60FMja-GHlyNpbujqb8lx82P4qf0ghKkXORfYMWY,28694
106
- sky/jobs/core.py,sha256=16oNEXz6HuoPYjnIa9UZBciwZKPGOwhkBd_mkWw4iOw,20063
107
- sky/jobs/recovery_strategy.py,sha256=m-EA-MWXPFrgx2CYFPr6MmgeUoDTEBmY2xruD2PRSGY,26365
105
+ sky/jobs/controller.py,sha256=cX8kGplwa-0Te_ihUfzzOr-TRs_Fw6UdFPm6mrtSE0c,28548
106
+ sky/jobs/core.py,sha256=b9aJB90AxUdhoasSxsWBoD-mQY1MmC05FbPbtyFMzHI,19154
107
+ sky/jobs/recovery_strategy.py,sha256=49H1ca5N4bIJ3W4iqurxzSvJE0dIihPt2XnstboxUm4,26370
108
108
  sky/jobs/scheduler.py,sha256=IUW0a_69Pkvs4jqsWCXkeMDIZn-TTuPNyZvPLGRUYUM,12306
109
- sky/jobs/state.py,sha256=bvBNZMg3DzPfS4eHNzMqYaMui2cqnWoWGDIaiOpaXSk,40770
110
- sky/jobs/utils.py,sha256=9tCKeY2x1lOgFQdaxqx6tZd2zd2e3pdUOQGvgvbf1Rk,52682
111
- sky/jobs/dashboard/dashboard.py,sha256=lLXAt755bkOh0XNjl5eQbu5vys7zYpvEoJgM97DkpeM,7706
109
+ sky/jobs/state.py,sha256=y9X1JGWpCokWBIwHZGB55fx39ZsilB1NZTbN_U8mPjA,43528
110
+ sky/jobs/utils.py,sha256=-xojxXXfv_Sh31YXY0pMFSQLMt65G-QEZe9ITGad56k,51943
111
+ sky/jobs/dashboard/dashboard.py,sha256=ZMysaI6m5vtGvT4OPUdStLY7Gkieefyzh1l9o_WILqY,7896
112
112
  sky/jobs/dashboard/static/favicon.ico,sha256=uYlvgxSM7gjBmXpZ8wydvZUPAbJiiix-rc2Xe5mma9s,15086
113
113
  sky/jobs/dashboard/templates/index.html,sha256=tz95q8O2pF7IvfY6yv0rnPyhj4DX8WX4RIVVxqFKV1Y,28519
114
114
  sky/provision/__init__.py,sha256=hb_z69_7-FH1I8aDpFKNj2x_a8spzceWcovklutNgP8,6370
@@ -215,7 +215,7 @@ sky/skylet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
215
215
  sky/skylet/attempt_skylet.py,sha256=GZ6ITjjA0m-da3IxXXfoHR6n4pjp3X3TOXUqVvSrV0k,2136
216
216
  sky/skylet/autostop_lib.py,sha256=JPDHmByuhoNYXSUHl-OnyeJUkOFWn7gDM1FrS7Kr3E8,4478
217
217
  sky/skylet/configs.py,sha256=UtnpmEL0F9hH6PSjhsps7xgjGZ6qzPOfW1p2yj9tSng,1887
218
- sky/skylet/constants.py,sha256=cMUJmj9iEY7dFW5pllijwrUlcKQmsJxgQSSrvTq9Ua8,16057
218
+ sky/skylet/constants.py,sha256=EUSW4yH59eqBDLMIdmQWIYd3nAJBFoUeo5v9MGiginI,16057
219
219
  sky/skylet/events.py,sha256=0bOjUYpphuAficD9wDB5NOan2vwJDaRqdnm4sl0RK0U,12535
220
220
  sky/skylet/job_lib.py,sha256=Rk-C069cusJIRXsks8xqCb016JSt7GlpU7LrpX0qFJk,42785
221
221
  sky/skylet/log_lib.py,sha256=oFEBd85vDYFrIyyZKekH30yc4rRYILC0F0o-COQ64oE,20445
@@ -270,17 +270,17 @@ sky/utils/admin_policy_utils.py,sha256=_Vt_jTTYCXmMdryj0vrrumFPewa93qHnzUqBDXjAh
270
270
  sky/utils/cluster_yaml_utils.py,sha256=1wRRYqI1kI-eFs1pMW4r_FFjHJ0zamq6v2RRI-Gtx5E,849
271
271
  sky/utils/command_runner.py,sha256=ewDjFxcCOv0OeG2aUOIfVWmTls65up9DvSnAXURvGfM,36696
272
272
  sky/utils/command_runner.pyi,sha256=mJOzCgcYZAfHwnY_6Wf1YwlTEJGb9ihzc2f0rE0Kw98,7751
273
- sky/utils/common_utils.py,sha256=Kh0iymQl9I4HXxYSc3TTcv-xeso27pU_1hGNOc9Xw2o,25370
273
+ sky/utils/common_utils.py,sha256=sAN4d105BxDjJts4nveGOUq61gQA6NVmTrXtp6H97hg,25298
274
274
  sky/utils/control_master_utils.py,sha256=90hnxiAUP20gbJ9e3MERh7rb04ZO_I3LsljNjR26H5I,1416
275
- sky/utils/controller_utils.py,sha256=pY27trKKrb23l4tRXwvvN4xDFHi6yb8iBONNFrpcN_I,46187
276
- sky/utils/dag_utils.py,sha256=R1yhJssvzDg13p6PJIC8OkYFBiR64eIx5xQeRpAG9n4,6099
275
+ sky/utils/controller_utils.py,sha256=SUrhK46ouBH2rm7azfFLIWr-T9-voYAdiXl2z5fG4Qw,45948
276
+ sky/utils/dag_utils.py,sha256=l_0O3RUfe9OdQ9mtbhdlHpJVD4VAF_HQ3A75dgsYIjM,6099
277
277
  sky/utils/db_utils.py,sha256=K2-OHPg0FeHCarevMdWe0IWzm6wWumViEeYeJuGoFUE,3747
278
278
  sky/utils/env_options.py,sha256=E5iwRFBUY2Iq6e0y0c1Mv5OSQ4MRNdk0-p38xUyVerc,1366
279
279
  sky/utils/kubernetes_enums.py,sha256=imGqHSa8O07zD_6xH1SDMM7dBU5lF5fzFFlQuQy00QM,1384
280
- sky/utils/log_utils.py,sha256=xEbUZfDiIiZkyWoLHXwIcqVMCBDEENsLCiogEXMDLt0,14139
280
+ sky/utils/log_utils.py,sha256=AjkgSrk0GVOUbnnCEC2f4lsf2HOIXkZETCxR0BJw2-U,14152
281
281
  sky/utils/resources_utils.py,sha256=06Kx6AfbBdwBYGmIYFEY_qm6OBc2a5esZMPvIX7gCvc,7787
282
282
  sky/utils/rich_utils.py,sha256=hmnI1X5dKvRIQzB7EyNb34FT97qFNve-0QHqM5r0mVk,3066
283
- sky/utils/schemas.py,sha256=32w70vJN0-C-00--QPGw_8zQTRnpgDMH-eSuxPII4bM,30555
283
+ sky/utils/schemas.py,sha256=eEv9F2haLGPKc-kbsoaxdXjmRY3kQNgOlwmOHL_19ME,30021
284
284
  sky/utils/subprocess_utils.py,sha256=YhtxqHoaZLw2M9TikTH56dTboZN3Qu2RsGeWo4uwJVA,12054
285
285
  sky/utils/timeline.py,sha256=ebHxKJK2HX0utGArrUgSezTPkcwav3VETa_AQS34t-E,3925
286
286
  sky/utils/ux_utils.py,sha256=CqyIFGDuSE8fQasPkna_loZMwtboC9KedR09WEQ7qz0,6502
@@ -298,9 +298,9 @@ sky/utils/kubernetes/k8s_gpu_labeler_job.yaml,sha256=k0TBoQ4zgf79-sVkixKSGYFHQ7Z
298
298
  sky/utils/kubernetes/k8s_gpu_labeler_setup.yaml,sha256=VLKT2KKimZu1GDg_4AIlIt488oMQvhRZWwsj9vBbPUg,3812
299
299
  sky/utils/kubernetes/rsync_helper.sh,sha256=h4YwrPFf9727CACnMJvF3EyK_0OeOYKKt4su_daKekw,1256
300
300
  sky/utils/kubernetes/ssh_jump_lifecycle_manager.py,sha256=Kq1MDygF2IxFmu9FXpCxqucXLmeUrvs6OtRij6XTQbo,6554
301
- skypilot_nightly-1.0.0.dev20250205.dist-info/LICENSE,sha256=emRJAvE7ngL6x0RhQvlns5wJzGI3NEQ_WMjNmd9TZc4,12170
302
- skypilot_nightly-1.0.0.dev20250205.dist-info/METADATA,sha256=YqZalGuxw7-oCFt-WKVGSY76zSvwWeRUY-0z3vNW2-8,21373
303
- skypilot_nightly-1.0.0.dev20250205.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
304
- skypilot_nightly-1.0.0.dev20250205.dist-info/entry_points.txt,sha256=StA6HYpuHj-Y61L2Ze-hK2IcLWgLZcML5gJu8cs6nU4,36
305
- skypilot_nightly-1.0.0.dev20250205.dist-info/top_level.txt,sha256=qA8QuiNNb6Y1OF-pCUtPEr6sLEwy2xJX06Bd_CrtrHY,4
306
- skypilot_nightly-1.0.0.dev20250205.dist-info/RECORD,,
301
+ skypilot_nightly-1.0.0.dev20250207.dist-info/LICENSE,sha256=emRJAvE7ngL6x0RhQvlns5wJzGI3NEQ_WMjNmd9TZc4,12170
302
+ skypilot_nightly-1.0.0.dev20250207.dist-info/METADATA,sha256=fZ08jVIo5aS5m1BfmUDd9XtdumGH6f5MtQky3I49av8,21397
303
+ skypilot_nightly-1.0.0.dev20250207.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
304
+ skypilot_nightly-1.0.0.dev20250207.dist-info/entry_points.txt,sha256=StA6HYpuHj-Y61L2Ze-hK2IcLWgLZcML5gJu8cs6nU4,36
305
+ skypilot_nightly-1.0.0.dev20250207.dist-info/top_level.txt,sha256=qA8QuiNNb6Y1OF-pCUtPEr6sLEwy2xJX06Bd_CrtrHY,4
306
+ skypilot_nightly-1.0.0.dev20250207.dist-info/RECORD,,