skypilot-nightly 1.0.0.dev20241114__py3-none-any.whl → 1.0.0.dev20241116__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sky/__init__.py +2 -2
- sky/backends/backend_utils.py +142 -74
- sky/backends/cloud_vm_ray_backend.py +15 -11
- sky/cli.py +15 -4
- sky/clouds/aws.py +1 -0
- sky/clouds/oci.py +0 -2
- sky/clouds/service_catalog/aws_catalog.py +2 -0
- sky/clouds/utils/oci_utils.py +5 -0
- sky/execution.py +43 -22
- sky/global_user_state.py +36 -16
- sky/jobs/core.py +0 -1
- sky/jobs/utils.py +4 -3
- sky/provision/kubernetes/utils.py +2 -0
- sky/provision/oci/instance.py +12 -11
- sky/provision/oci/query_utils.py +212 -6
- sky/serve/core.py +1 -0
- sky/serve/serve_utils.py +35 -30
- sky/skylet/constants.py +1 -1
- sky/skylet/job_lib.py +249 -138
- sky/skylet/log_lib.py +1 -34
- sky/skylet/subprocess_daemon.py +33 -13
- sky/utils/controller_utils.py +10 -9
- sky/utils/schemas.py +1 -0
- sky/utils/subprocess_utils.py +50 -0
- sky/utils/timeline.py +2 -4
- {skypilot_nightly-1.0.0.dev20241114.dist-info → skypilot_nightly-1.0.0.dev20241116.dist-info}/METADATA +1 -1
- {skypilot_nightly-1.0.0.dev20241114.dist-info → skypilot_nightly-1.0.0.dev20241116.dist-info}/RECORD +31 -31
- {skypilot_nightly-1.0.0.dev20241114.dist-info → skypilot_nightly-1.0.0.dev20241116.dist-info}/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20241114.dist-info → skypilot_nightly-1.0.0.dev20241116.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20241114.dist-info → skypilot_nightly-1.0.0.dev20241116.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20241114.dist-info → skypilot_nightly-1.0.0.dev20241116.dist-info}/top_level.txt +0 -0
sky/skylet/job_lib.py
CHANGED
@@ -8,6 +8,7 @@ import json
|
|
8
8
|
import os
|
9
9
|
import pathlib
|
10
10
|
import shlex
|
11
|
+
import signal
|
11
12
|
import sqlite3
|
12
13
|
import subprocess
|
13
14
|
import time
|
@@ -27,6 +28,10 @@ logger = sky_logging.init_logger(__name__)
|
|
27
28
|
|
28
29
|
_LINUX_NEW_LINE = '\n'
|
29
30
|
_JOB_STATUS_LOCK = '~/.sky/locks/.job_{}.lock'
|
31
|
+
# JOB_CMD_IDENTIFIER is used for identifying the process retrieved
|
32
|
+
# with pid is the same driver process to guard against the case where
|
33
|
+
# the same pid is reused by a different process.
|
34
|
+
JOB_CMD_IDENTIFIER = 'echo "SKYPILOT_JOB_ID <{}>"'
|
30
35
|
|
31
36
|
|
32
37
|
def _get_lock_path(job_id: int) -> str:
|
@@ -46,6 +51,7 @@ class JobInfoLoc(enum.IntEnum):
|
|
46
51
|
START_AT = 6
|
47
52
|
END_AT = 7
|
48
53
|
RESOURCES = 8
|
54
|
+
PID = 9
|
49
55
|
|
50
56
|
|
51
57
|
_DB_PATH = os.path.expanduser('~/.sky/jobs.db')
|
@@ -67,6 +73,16 @@ def create_table(cursor, conn):
|
|
67
73
|
# If the database is locked, it is OK to continue, as the WAL mode
|
68
74
|
# is not critical and is likely to be enabled by other processes.
|
69
75
|
|
76
|
+
# Pid column is used for keeping track of the driver process of a job. It
|
77
|
+
# can be in three states:
|
78
|
+
# -1: The job was submitted with SkyPilot older than #4318, where we use
|
79
|
+
# ray job submit to submit the job, i.e. no pid is recorded. This is for
|
80
|
+
# backward compatibility and should be removed after 0.10.0.
|
81
|
+
# 0: The job driver process has never been started. When adding a job with
|
82
|
+
# INIT state, the pid will be set to 0 (the default -1 value is just for
|
83
|
+
# backward compatibility).
|
84
|
+
# >=0: The job has been started. The pid is the driver process's pid.
|
85
|
+
# The driver can be actually running or finished.
|
70
86
|
cursor.execute("""\
|
71
87
|
CREATE TABLE IF NOT EXISTS jobs (
|
72
88
|
job_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
@@ -75,7 +91,10 @@ def create_table(cursor, conn):
|
|
75
91
|
submitted_at FLOAT,
|
76
92
|
status TEXT,
|
77
93
|
run_timestamp TEXT CANDIDATE KEY,
|
78
|
-
start_at FLOAT DEFAULT -1
|
94
|
+
start_at FLOAT DEFAULT -1,
|
95
|
+
end_at FLOAT DEFAULT NULL,
|
96
|
+
resources TEXT DEFAULT NULL,
|
97
|
+
pid INTEGER DEFAULT -1)""")
|
79
98
|
|
80
99
|
cursor.execute("""CREATE TABLE IF NOT EXISTS pending_jobs(
|
81
100
|
job_id INTEGER,
|
@@ -86,7 +105,8 @@ def create_table(cursor, conn):
|
|
86
105
|
|
87
106
|
db_utils.add_column_to_table(cursor, conn, 'jobs', 'end_at', 'FLOAT')
|
88
107
|
db_utils.add_column_to_table(cursor, conn, 'jobs', 'resources', 'TEXT')
|
89
|
-
|
108
|
+
db_utils.add_column_to_table(cursor, conn, 'jobs', 'pid',
|
109
|
+
'INTEGER DEFAULT -1')
|
90
110
|
conn.commit()
|
91
111
|
|
92
112
|
|
@@ -118,6 +138,11 @@ class JobStatus(enum.Enum):
|
|
118
138
|
# In the 'jobs' table, the `start_at` column will be set to the current
|
119
139
|
# time, when the job is firstly transitioned to RUNNING.
|
120
140
|
RUNNING = 'RUNNING'
|
141
|
+
# The job driver process failed. This happens when the job driver process
|
142
|
+
# finishes when the status in job table is still not set to terminal state.
|
143
|
+
# We should keep this state before the SUCCEEDED, as our job status update
|
144
|
+
# relies on the order of the statuses to keep the latest status.
|
145
|
+
FAILED_DRIVER = 'FAILED_DRIVER'
|
121
146
|
# 3 terminal states below: once reached, they do not transition.
|
122
147
|
# The job finished successfully.
|
123
148
|
SUCCEEDED = 'SUCCEEDED'
|
@@ -148,11 +173,16 @@ class JobStatus(enum.Enum):
|
|
148
173
|
return f'{color}{self.value}{colorama.Style.RESET_ALL}'
|
149
174
|
|
150
175
|
|
151
|
-
#
|
152
|
-
#
|
153
|
-
#
|
176
|
+
# We have two steps for job submissions:
|
177
|
+
# 1. Client reserve a job id from the job table by adding a INIT state job.
|
178
|
+
# 2. Client updates the job status to PENDING by actually submitting the job's
|
179
|
+
# command to the scheduler.
|
180
|
+
# In normal cases, the two steps happens very close to each other through two
|
181
|
+
# consecutive SSH connections.
|
182
|
+
# We should update status for INIT job that has been staying in INIT state for
|
183
|
+
# a while (60 seconds), which likely fails to reach step 2.
|
154
184
|
# TODO(zhwu): This number should be tuned based on heuristics.
|
155
|
-
|
185
|
+
_INIT_SUBMIT_GRACE_PERIOD = 60
|
156
186
|
|
157
187
|
_PRE_RESOURCE_STATUSES = [JobStatus.PENDING]
|
158
188
|
|
@@ -175,7 +205,39 @@ class JobScheduler:
|
|
175
205
|
_CURSOR.execute((f'UPDATE pending_jobs SET submit={int(time.time())} '
|
176
206
|
f'WHERE job_id={job_id!r}'))
|
177
207
|
_CONN.commit()
|
178
|
-
|
208
|
+
# Use nohup to ensure the job driver process is a separate process tree,
|
209
|
+
# instead of being a child of the current process. This is important to
|
210
|
+
# avoid a chain of driver processes (job driver can call schedule_step()
|
211
|
+
# to submit new jobs, and the new job can also call schedule_step()
|
212
|
+
# recursively).
|
213
|
+
#
|
214
|
+
# echo $! will output the PID of the last background process started
|
215
|
+
# in the current shell, so we can retrieve it and record in the DB.
|
216
|
+
#
|
217
|
+
# TODO(zhwu): A more elegant solution is to use another daemon process
|
218
|
+
# to be in charge of starting these driver processes, instead of
|
219
|
+
# starting them in the current process.
|
220
|
+
wrapped_cmd = (f'nohup bash -c {shlex.quote(run_cmd)} '
|
221
|
+
'</dev/null >/dev/null 2>&1 & echo $!')
|
222
|
+
proc = subprocess.run(wrapped_cmd,
|
223
|
+
stdout=subprocess.PIPE,
|
224
|
+
stderr=subprocess.PIPE,
|
225
|
+
stdin=subprocess.DEVNULL,
|
226
|
+
start_new_session=True,
|
227
|
+
check=True,
|
228
|
+
shell=True,
|
229
|
+
text=True)
|
230
|
+
# Get the PID of the detached process
|
231
|
+
pid = int(proc.stdout.strip())
|
232
|
+
|
233
|
+
# TODO(zhwu): Backward compatibility, remove this check after 0.10.0.
|
234
|
+
# This is for the case where the job is submitted with SkyPilot older
|
235
|
+
# than #4318, using ray job submit.
|
236
|
+
if 'job submit' in run_cmd:
|
237
|
+
pid = -1
|
238
|
+
_CURSOR.execute((f'UPDATE jobs SET pid={pid} '
|
239
|
+
f'WHERE job_id={job_id!r}'))
|
240
|
+
_CONN.commit()
|
179
241
|
|
180
242
|
def schedule_step(self, force_update_jobs: bool = False) -> None:
|
181
243
|
if force_update_jobs:
|
@@ -237,59 +299,13 @@ _JOB_STATUS_TO_COLOR = {
|
|
237
299
|
JobStatus.SETTING_UP: colorama.Fore.BLUE,
|
238
300
|
JobStatus.PENDING: colorama.Fore.BLUE,
|
239
301
|
JobStatus.RUNNING: colorama.Fore.GREEN,
|
302
|
+
JobStatus.FAILED_DRIVER: colorama.Fore.RED,
|
240
303
|
JobStatus.SUCCEEDED: colorama.Fore.GREEN,
|
241
304
|
JobStatus.FAILED: colorama.Fore.RED,
|
242
305
|
JobStatus.FAILED_SETUP: colorama.Fore.RED,
|
243
306
|
JobStatus.CANCELLED: colorama.Fore.YELLOW,
|
244
307
|
}
|
245
308
|
|
246
|
-
_RAY_TO_JOB_STATUS_MAP = {
|
247
|
-
# These are intentionally set this way, because:
|
248
|
-
# 1. when the ray status indicates the job is PENDING the generated
|
249
|
-
# python program has been `ray job submit` from the job queue
|
250
|
-
# and is now PENDING
|
251
|
-
# 2. when the ray status indicates the job is RUNNING the job can be in
|
252
|
-
# setup or resources may not be allocated yet, i.e. the job should be
|
253
|
-
# PENDING.
|
254
|
-
# For case 2, update_job_status() would compare this mapped PENDING to
|
255
|
-
# the status in our jobs DB and take the max. This is because the job's
|
256
|
-
# generated ray program is the only place that can determine a job has
|
257
|
-
# reserved resources and actually started running: it will set the
|
258
|
-
# status in the DB to SETTING_UP or RUNNING.
|
259
|
-
# If there is no setup specified in the task, as soon as it is started
|
260
|
-
# (ray's status becomes RUNNING), i.e. it will be very rare that the job
|
261
|
-
# will be set to SETTING_UP by the update_job_status, as our generated
|
262
|
-
# ray program will set the status to PENDING immediately.
|
263
|
-
'PENDING': JobStatus.PENDING,
|
264
|
-
'RUNNING': JobStatus.PENDING,
|
265
|
-
'SUCCEEDED': JobStatus.SUCCEEDED,
|
266
|
-
'FAILED': JobStatus.FAILED,
|
267
|
-
'STOPPED': JobStatus.CANCELLED,
|
268
|
-
}
|
269
|
-
|
270
|
-
|
271
|
-
def _create_ray_job_submission_client():
|
272
|
-
"""Import the ray job submission client."""
|
273
|
-
try:
|
274
|
-
import ray # pylint: disable=import-outside-toplevel
|
275
|
-
except ImportError:
|
276
|
-
logger.error('Failed to import ray')
|
277
|
-
raise
|
278
|
-
try:
|
279
|
-
# pylint: disable=import-outside-toplevel
|
280
|
-
from ray import job_submission
|
281
|
-
except ImportError:
|
282
|
-
logger.error(
|
283
|
-
f'Failed to import job_submission with ray=={ray.__version__}')
|
284
|
-
raise
|
285
|
-
port = get_job_submission_port()
|
286
|
-
return job_submission.JobSubmissionClient(
|
287
|
-
address=f'http://127.0.0.1:{port}')
|
288
|
-
|
289
|
-
|
290
|
-
def make_ray_job_id(sky_job_id: int) -> str:
|
291
|
-
return f'{sky_job_id}-{getpass.getuser()}'
|
292
|
-
|
293
309
|
|
294
310
|
def make_job_command_with_user_switching(username: str,
|
295
311
|
command: str) -> List[str]:
|
@@ -301,9 +317,10 @@ def add_job(job_name: str, username: str, run_timestamp: str,
|
|
301
317
|
"""Atomically reserve the next available job id for the user."""
|
302
318
|
job_submitted_at = time.time()
|
303
319
|
# job_id will autoincrement with the null value
|
304
|
-
_CURSOR.execute(
|
305
|
-
|
306
|
-
|
320
|
+
_CURSOR.execute(
|
321
|
+
'INSERT INTO jobs VALUES (null, ?, ?, ?, ?, ?, ?, null, ?, 0)',
|
322
|
+
(job_name, username, job_submitted_at, JobStatus.INIT.value,
|
323
|
+
run_timestamp, None, resources_str))
|
307
324
|
_CONN.commit()
|
308
325
|
rows = _CURSOR.execute('SELECT job_id FROM jobs WHERE run_timestamp=(?)',
|
309
326
|
(run_timestamp,))
|
@@ -478,6 +495,7 @@ def _get_records_from_rows(rows) -> List[Dict[str, Any]]:
|
|
478
495
|
'start_at': row[JobInfoLoc.START_AT.value],
|
479
496
|
'end_at': row[JobInfoLoc.END_AT.value],
|
480
497
|
'resources': row[JobInfoLoc.RESOURCES.value],
|
498
|
+
'pid': row[JobInfoLoc.PID.value],
|
481
499
|
})
|
482
500
|
return records
|
483
501
|
|
@@ -537,6 +555,23 @@ def _get_pending_job(job_id: int) -> Optional[Dict[str, Any]]:
|
|
537
555
|
return None
|
538
556
|
|
539
557
|
|
558
|
+
def _is_job_driver_process_running(job_pid: int, job_id: int) -> bool:
|
559
|
+
"""Check if the job driver process is running.
|
560
|
+
|
561
|
+
We check the cmdline to avoid the case where the same pid is reused by a
|
562
|
+
different process.
|
563
|
+
"""
|
564
|
+
if job_pid <= 0:
|
565
|
+
return False
|
566
|
+
try:
|
567
|
+
job_process = psutil.Process(job_pid)
|
568
|
+
return job_process.is_running() and any(
|
569
|
+
JOB_CMD_IDENTIFIER.format(job_id) in line
|
570
|
+
for line in job_process.cmdline())
|
571
|
+
except psutil.NoSuchProcess:
|
572
|
+
return False
|
573
|
+
|
574
|
+
|
540
575
|
def update_job_status(job_ids: List[int],
|
541
576
|
silent: bool = False) -> List[JobStatus]:
|
542
577
|
"""Updates and returns the job statuses matching our `JobStatus` semantics.
|
@@ -554,11 +589,8 @@ def update_job_status(job_ids: List[int],
|
|
554
589
|
if len(job_ids) == 0:
|
555
590
|
return []
|
556
591
|
|
557
|
-
ray_job_ids = [make_ray_job_id(job_id) for job_id in job_ids]
|
558
|
-
job_client = _create_ray_job_submission_client()
|
559
|
-
|
560
592
|
statuses = []
|
561
|
-
for job_id
|
593
|
+
for job_id in job_ids:
|
562
594
|
# Per-job status lock is required because between the job status
|
563
595
|
# query and the job status update, the job status in the databse
|
564
596
|
# can be modified by the generated ray program.
|
@@ -567,11 +599,13 @@ def update_job_status(job_ids: List[int],
|
|
567
599
|
job_record = _get_jobs_by_ids([job_id])[0]
|
568
600
|
original_status = job_record['status']
|
569
601
|
job_submitted_at = job_record['submitted_at']
|
602
|
+
job_pid = job_record['pid']
|
570
603
|
|
571
|
-
|
604
|
+
pid_query_time = time.time()
|
605
|
+
failed_driver_transition_message = None
|
572
606
|
if original_status == JobStatus.INIT:
|
573
607
|
if (job_submitted_at >= psutil.boot_time() and job_submitted_at
|
574
|
-
>=
|
608
|
+
>= pid_query_time - _INIT_SUBMIT_GRACE_PERIOD):
|
575
609
|
# The job id is reserved, but the job is not submitted yet.
|
576
610
|
# We should keep it in INIT.
|
577
611
|
status = JobStatus.INIT
|
@@ -582,75 +616,98 @@ def update_job_status(job_ids: List[int],
|
|
582
616
|
# was killed before the job is submitted. We should set it
|
583
617
|
# to FAILED then. Note, if ray job indicates the job is
|
584
618
|
# running, we will change status to PENDING below.
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
619
|
+
failed_driver_transition_message = (
|
620
|
+
f'INIT job {job_id} is stale, setting to FAILED_DRIVER')
|
621
|
+
status = JobStatus.FAILED_DRIVER
|
622
|
+
|
623
|
+
# job_pid is 0 if the job is not submitted yet.
|
624
|
+
# job_pid is -1 if the job is submitted with SkyPilot older than
|
625
|
+
# #4318, using ray job submit. We skip the checking for those
|
626
|
+
# jobs.
|
627
|
+
if job_pid > 0:
|
628
|
+
if _is_job_driver_process_running(job_pid, job_id):
|
629
|
+
status = JobStatus.PENDING
|
630
|
+
else:
|
631
|
+
# By default, if the job driver process does not exist,
|
632
|
+
# the actual SkyPilot job is one of the following:
|
633
|
+
# 1. Still pending to be submitted.
|
634
|
+
# 2. Submitted and finished.
|
635
|
+
# 3. Driver failed without correctly setting the job
|
636
|
+
# status in the job table.
|
637
|
+
# Although we set the status to FAILED_DRIVER, it can be
|
638
|
+
# overridden to PENDING if the job is not submitted, or
|
639
|
+
# any other terminal status if the job driver process
|
640
|
+
# finished correctly.
|
641
|
+
failed_driver_transition_message = (
|
642
|
+
f'Job {job_id} driver process is not running, but '
|
643
|
+
'the job state is not in terminal states, setting '
|
644
|
+
'it to FAILED_DRIVER')
|
645
|
+
status = JobStatus.FAILED_DRIVER
|
646
|
+
elif job_pid < 0:
|
647
|
+
# TODO(zhwu): Backward compatibility, remove after 0.9.0.
|
648
|
+
# We set the job status to PENDING instead of actually
|
649
|
+
# checking ray job status and let the status in job table
|
650
|
+
# take effect in the later max.
|
651
|
+
status = JobStatus.PENDING
|
602
652
|
|
603
653
|
pending_job = _get_pending_job(job_id)
|
604
654
|
if pending_job is not None:
|
605
655
|
if pending_job['created_time'] < psutil.boot_time():
|
606
|
-
|
607
|
-
|
608
|
-
|
656
|
+
failed_driver_transition_message = (
|
657
|
+
f'Job {job_id} is stale, setting to FAILED_DRIVER: '
|
658
|
+
f'created_time={pending_job["created_time"]}, '
|
659
|
+
f'boot_time={psutil.boot_time()}')
|
609
660
|
# The job is stale as it is created before the instance
|
610
661
|
# is booted, e.g. the instance is rebooted.
|
611
|
-
status = JobStatus.
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
#
|
620
|
-
#
|
621
|
-
#
|
662
|
+
status = JobStatus.FAILED_DRIVER
|
663
|
+
elif pending_job['submit'] <= 0:
|
664
|
+
# The job is not submitted (submit <= 0), we set it to
|
665
|
+
# PENDING.
|
666
|
+
# For submitted jobs, the driver should have been started,
|
667
|
+
# because the job_lib.JobScheduler.schedule_step() have
|
668
|
+
# the submit field and driver process pid set in the same
|
669
|
+
# job lock.
|
670
|
+
# The job process check in the above section should
|
671
|
+
# correctly figured out the status and we don't overwrite
|
672
|
+
# it here. (Note: the FAILED_DRIVER status will be
|
673
|
+
# overridden by the actual job terminal status in the table
|
674
|
+
# if the job driver process finished correctly.)
|
622
675
|
status = JobStatus.PENDING
|
623
676
|
|
624
677
|
assert original_status is not None, (job_id, status)
|
625
678
|
if status is None:
|
679
|
+
# The job is submitted but the job driver process pid is not
|
680
|
+
# set in the database. This is guarding against the case where
|
681
|
+
# the schedule_step() function is interrupted (e.g., VM stop)
|
682
|
+
# at the middle of starting a new process and setting the pid.
|
626
683
|
status = original_status
|
627
684
|
if (original_status is not None and
|
628
685
|
not original_status.is_terminal()):
|
629
|
-
echo(f'
|
630
|
-
'
|
631
|
-
# The job may be stale, when the instance is restarted
|
632
|
-
#
|
633
|
-
# status
|
634
|
-
|
635
|
-
|
686
|
+
echo(f'Job {job_id} status is None, setting it to '
|
687
|
+
'FAILED_DRIVER.')
|
688
|
+
# The job may be stale, when the instance is restarted. We
|
689
|
+
# need to reset the job status to FAILED_DRIVER if its
|
690
|
+
# original status is in nonterminal_statuses.
|
691
|
+
echo(f'Job {job_id} is in a unknown state, setting it to '
|
692
|
+
'FAILED_DRIVER')
|
693
|
+
status = JobStatus.FAILED_DRIVER
|
636
694
|
_set_status_no_lock(job_id, status)
|
637
|
-
echo(f'Updated job {job_id} status to {status}')
|
638
695
|
else:
|
639
696
|
# Taking max of the status is necessary because:
|
640
|
-
# 1.
|
641
|
-
#
|
642
|
-
#
|
643
|
-
#
|
644
|
-
#
|
645
|
-
#
|
646
|
-
# generated ray program, `original_status` (job status from our
|
647
|
-
# DB) would already have that value. So we take the max here to
|
648
|
-
# keep it at later status.
|
697
|
+
# 1. The original status has already been set to later
|
698
|
+
# terminal state by a finished job driver.
|
699
|
+
# 2. Job driver process check would map any running job process
|
700
|
+
# to `PENDING`, so we need to take the max to keep it at
|
701
|
+
# later status for jobs actually started in SETTING_UP or
|
702
|
+
# RUNNING.
|
649
703
|
status = max(status, original_status)
|
650
704
|
assert status is not None, (job_id, status, original_status)
|
651
705
|
if status != original_status: # Prevents redundant update.
|
652
706
|
_set_status_no_lock(job_id, status)
|
653
707
|
echo(f'Updated job {job_id} status to {status}')
|
708
|
+
if (status == JobStatus.FAILED_DRIVER and
|
709
|
+
failed_driver_transition_message is not None):
|
710
|
+
echo(failed_driver_transition_message)
|
654
711
|
statuses.append(status)
|
655
712
|
return statuses
|
656
713
|
|
@@ -663,17 +720,13 @@ def fail_all_jobs_in_progress() -> None:
|
|
663
720
|
f"""\
|
664
721
|
UPDATE jobs SET status=(?)
|
665
722
|
WHERE status IN ({','.join(['?'] * len(in_progress_status))})
|
666
|
-
""", (JobStatus.
|
723
|
+
""", (JobStatus.FAILED_DRIVER.value, *in_progress_status))
|
667
724
|
_CONN.commit()
|
668
725
|
|
669
726
|
|
670
727
|
def update_status() -> None:
|
671
728
|
# This will be called periodically by the skylet to update the status
|
672
729
|
# of the jobs in the database, to avoid stale job status.
|
673
|
-
# NOTE: there might be a INIT job in the database set to FAILED by this
|
674
|
-
# function, as the ray job status does not exist due to the app
|
675
|
-
# not submitted yet. It will be then reset to PENDING / RUNNING when the
|
676
|
-
# app starts.
|
677
730
|
nonterminal_jobs = _get_jobs(username=None,
|
678
731
|
status_list=JobStatus.nonterminal_statuses())
|
679
732
|
nonterminal_job_ids = [job['job_id'] for job in nonterminal_jobs]
|
@@ -756,6 +809,31 @@ def load_job_queue(payload: str) -> List[Dict[str, Any]]:
|
|
756
809
|
return jobs
|
757
810
|
|
758
811
|
|
812
|
+
# TODO(zhwu): Backward compatibility for jobs submitted before #4318, remove
|
813
|
+
# after 0.10.0.
|
814
|
+
def _create_ray_job_submission_client():
|
815
|
+
"""Import the ray job submission client."""
|
816
|
+
try:
|
817
|
+
import ray # pylint: disable=import-outside-toplevel
|
818
|
+
except ImportError:
|
819
|
+
logger.error('Failed to import ray')
|
820
|
+
raise
|
821
|
+
try:
|
822
|
+
# pylint: disable=import-outside-toplevel
|
823
|
+
from ray import job_submission
|
824
|
+
except ImportError:
|
825
|
+
logger.error(
|
826
|
+
f'Failed to import job_submission with ray=={ray.__version__}')
|
827
|
+
raise
|
828
|
+
port = get_job_submission_port()
|
829
|
+
return job_submission.JobSubmissionClient(
|
830
|
+
address=f'http://127.0.0.1:{port}')
|
831
|
+
|
832
|
+
|
833
|
+
def _make_ray_job_id(sky_job_id: int) -> str:
|
834
|
+
return f'{sky_job_id}-{getpass.getuser()}'
|
835
|
+
|
836
|
+
|
759
837
|
def cancel_jobs_encoded_results(jobs: Optional[List[int]],
|
760
838
|
cancel_all: bool = False) -> str:
|
761
839
|
"""Cancel jobs.
|
@@ -783,27 +861,51 @@ def cancel_jobs_encoded_results(jobs: Optional[List[int]],
|
|
783
861
|
# Cancel jobs with specified IDs.
|
784
862
|
job_records = _get_jobs_by_ids(jobs)
|
785
863
|
|
786
|
-
# TODO(zhwu): `job_client.stop_job` will wait for the jobs to be killed, but
|
787
|
-
# when the memory is not enough, this will keep waiting.
|
788
|
-
job_client = _create_ray_job_submission_client()
|
789
864
|
cancelled_ids = []
|
790
865
|
|
791
866
|
# Sequentially cancel the jobs to avoid the resource number bug caused by
|
792
867
|
# ray cluster (tracked in #1262).
|
793
|
-
for
|
794
|
-
job_id =
|
868
|
+
for job_record in job_records:
|
869
|
+
job_id = job_record['job_id']
|
795
870
|
# Job is locked to ensure that pending queue does not start it while
|
796
871
|
# it is being cancelled
|
797
|
-
with filelock.FileLock(_get_lock_path(
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
#
|
802
|
-
#
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
872
|
+
with filelock.FileLock(_get_lock_path(job_id)):
|
873
|
+
job = _get_jobs_by_ids([job_id])[0]
|
874
|
+
if _is_job_driver_process_running(job['pid'], job_id):
|
875
|
+
# Not use process.terminate() as that will only terminate the
|
876
|
+
# process shell process, not the ray driver process
|
877
|
+
# under the shell.
|
878
|
+
#
|
879
|
+
# We don't kill all the children of the process, like
|
880
|
+
# subprocess_utils.kill_process_daemon() does, but just the
|
881
|
+
# process group here, because the underlying job driver can
|
882
|
+
# start other jobs with `schedule_step`, causing the other job
|
883
|
+
# driver processes to be children of the current job driver
|
884
|
+
# process.
|
885
|
+
#
|
886
|
+
# Killing the process group is enough as the underlying job
|
887
|
+
# should be able to clean itself up correctly by ray driver.
|
888
|
+
#
|
889
|
+
# The process group pid should be the same as the job pid as we
|
890
|
+
# use start_new_session=True, but we use os.getpgid() to be
|
891
|
+
# extra cautious.
|
892
|
+
job_pgid = os.getpgid(job['pid'])
|
893
|
+
os.killpg(job_pgid, signal.SIGTERM)
|
894
|
+
# We don't have to start a daemon to forcefully kill the process
|
895
|
+
# as our job driver process will clean up the underlying
|
896
|
+
# child processes.
|
897
|
+
elif job['pid'] < 0:
|
898
|
+
try:
|
899
|
+
# TODO(zhwu): Backward compatibility, remove after 0.9.0.
|
900
|
+
# The job was submitted with ray job submit before #4318.
|
901
|
+
job_client = _create_ray_job_submission_client()
|
902
|
+
job_client.stop_job(_make_ray_job_id(job['job_id']))
|
903
|
+
except RuntimeError as e:
|
904
|
+
# If the request to the job server fails, we should not
|
905
|
+
# set the job to CANCELLED.
|
906
|
+
if 'does not exist' not in str(e):
|
907
|
+
logger.warning(str(e))
|
908
|
+
continue
|
807
909
|
# Get the job status again to avoid race condition.
|
808
910
|
job_status = get_status_no_lock(job['job_id'])
|
809
911
|
if job_status in [
|
@@ -865,10 +967,17 @@ class JobLibCodeGen:
|
|
865
967
|
if job_name is None:
|
866
968
|
job_name = '-'
|
867
969
|
code = [
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
970
|
+
# We disallow job submission when SKYLET_VERSION is older than 9, as
|
971
|
+
# it was using ray job submit before #4318, and switched to raw
|
972
|
+
# process. Using the old skylet version will cause the job status
|
973
|
+
# to be stuck in PENDING state or transition to FAILED_DRIVER state.
|
974
|
+
'\nif int(constants.SKYLET_VERSION) < 9: '
|
975
|
+
'raise RuntimeError("SkyPilot runtime is too old, which does not '
|
976
|
+
'support submitting jobs.")',
|
977
|
+
'\njob_id = job_lib.add_job('
|
978
|
+
f'{job_name!r},'
|
979
|
+
f'{username!r},'
|
980
|
+
f'{run_timestamp!r},'
|
872
981
|
f'{resources_str!r})',
|
873
982
|
'print("Job ID: " + str(job_id), flush=True)',
|
874
983
|
]
|
@@ -876,9 +985,11 @@ class JobLibCodeGen:
|
|
876
985
|
|
877
986
|
@classmethod
|
878
987
|
def queue_job(cls, job_id: int, cmd: str) -> str:
|
879
|
-
code = [
|
880
|
-
|
881
|
-
|
988
|
+
code = [
|
989
|
+
'job_lib.scheduler.queue('
|
990
|
+
f'{job_id!r},'
|
991
|
+
f'{cmd!r})',
|
992
|
+
]
|
882
993
|
return cls._build(code)
|
883
994
|
|
884
995
|
@classmethod
|
sky/skylet/log_lib.py
CHANGED
@@ -183,40 +183,7 @@ def run_with_log(
|
|
183
183
|
shell=shell,
|
184
184
|
**kwargs) as proc:
|
185
185
|
try:
|
186
|
-
|
187
|
-
# open a new subprocess to gracefully kill the proc, SIGTERM
|
188
|
-
# and then SIGKILL the process group.
|
189
|
-
# Adapted from ray/dashboard/modules/job/job_manager.py#L154
|
190
|
-
parent_pid = os.getpid()
|
191
|
-
daemon_script = os.path.join(
|
192
|
-
os.path.dirname(os.path.abspath(job_lib.__file__)),
|
193
|
-
'subprocess_daemon.py')
|
194
|
-
python_path = subprocess.check_output(
|
195
|
-
constants.SKY_GET_PYTHON_PATH_CMD,
|
196
|
-
shell=True,
|
197
|
-
stderr=subprocess.DEVNULL,
|
198
|
-
encoding='utf-8').strip()
|
199
|
-
daemon_cmd = [
|
200
|
-
python_path,
|
201
|
-
daemon_script,
|
202
|
-
'--parent-pid',
|
203
|
-
str(parent_pid),
|
204
|
-
'--proc-pid',
|
205
|
-
str(proc.pid),
|
206
|
-
]
|
207
|
-
|
208
|
-
# We do not need to set `start_new_session=True` here, as the
|
209
|
-
# daemon script will detach itself from the parent process with
|
210
|
-
# fork to avoid being killed by ray job. See the reason we
|
211
|
-
# daemonize the process in `sky/skylet/subprocess_daemon.py`.
|
212
|
-
subprocess.Popen(
|
213
|
-
daemon_cmd,
|
214
|
-
# Suppress output
|
215
|
-
stdout=subprocess.DEVNULL,
|
216
|
-
stderr=subprocess.DEVNULL,
|
217
|
-
# Disable input
|
218
|
-
stdin=subprocess.DEVNULL,
|
219
|
-
)
|
186
|
+
subprocess_utils.kill_process_daemon(proc.pid)
|
220
187
|
stdout = ''
|
221
188
|
stderr = ''
|
222
189
|
|