skypilot-nightly 1.0.0.dev20251027__py3-none-any.whl → 1.0.0.dev20251101__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of skypilot-nightly might be problematic. Click here for more details.
- sky/__init__.py +2 -2
- sky/adaptors/aws.py +25 -7
- sky/adaptors/coreweave.py +278 -0
- sky/backends/backend_utils.py +9 -6
- sky/backends/cloud_vm_ray_backend.py +2 -3
- sky/check.py +25 -13
- sky/client/cli/command.py +52 -24
- sky/cloud_stores.py +73 -0
- sky/clouds/aws.py +59 -11
- sky/core.py +7 -5
- sky/dashboard/out/404.html +1 -1
- sky/dashboard/out/_next/static/{YP5Vc3ROcDnTGta0XAhcs → 8ixeA0NVQJN8HUdijid8b}/_buildManifest.js +1 -1
- sky/dashboard/out/_next/static/chunks/{1141-d5204f35a3388bf4.js → 1141-c3c10e2c6ed71a8f.js} +1 -1
- sky/dashboard/out/_next/static/chunks/2755.d6dc6d530fed0b61.js +26 -0
- sky/dashboard/out/_next/static/chunks/3294.87a13fba0058865b.js +1 -0
- sky/dashboard/out/_next/static/chunks/{3785.538eb23a098fc304.js → 3785.170be320e0060eaf.js} +1 -1
- sky/dashboard/out/_next/static/chunks/4282-49b2065b7336e496.js +1 -0
- sky/dashboard/out/_next/static/chunks/7615-80aa7b09f45a86d2.js +1 -0
- sky/dashboard/out/_next/static/chunks/8969-4ed9236db997b42b.js +1 -0
- sky/dashboard/out/_next/static/chunks/9360.10a3aac7aad5e3aa.js +31 -0
- sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]/[job]-ac4a217f17b087cb.js +16 -0
- sky/dashboard/out/_next/static/chunks/pages/clusters/{[cluster]-fbf2907ce2bb67e2.js → [cluster]-1704039ccaf997cf.js} +1 -1
- sky/dashboard/out/_next/static/chunks/pages/{jobs-0dc34cf9a8710a9f.js → jobs-7eee823559e5cf9f.js} +1 -1
- sky/dashboard/out/_next/static/chunks/pages/{users-96d6b8bb2dec055f.js → users-2b172f13f8538a7a.js} +1 -1
- sky/dashboard/out/_next/static/chunks/pages/workspaces/{[name]-fb1b4d3bfb047cad.js → [name]-bbfe5860c93470fd.js} +1 -1
- sky/dashboard/out/_next/static/chunks/pages/{workspaces-6fc994fa1ee6c6bf.js → workspaces-1891376c08050940.js} +1 -1
- sky/dashboard/out/_next/static/chunks/{webpack-585d805f693dbceb.js → webpack-e38d5319cd10a3a0.js} +1 -1
- sky/dashboard/out/_next/static/css/0748ce22df867032.css +3 -0
- sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
- sky/dashboard/out/clusters/[cluster].html +1 -1
- sky/dashboard/out/clusters.html +1 -1
- sky/dashboard/out/config.html +1 -1
- sky/dashboard/out/index.html +1 -1
- sky/dashboard/out/infra/[context].html +1 -1
- sky/dashboard/out/infra.html +1 -1
- sky/dashboard/out/jobs/[job].html +1 -1
- sky/dashboard/out/jobs/pools/[pool].html +1 -1
- sky/dashboard/out/jobs.html +1 -1
- sky/dashboard/out/users.html +1 -1
- sky/dashboard/out/volumes.html +1 -1
- sky/dashboard/out/workspace/new.html +1 -1
- sky/dashboard/out/workspaces/[name].html +1 -1
- sky/dashboard/out/workspaces.html +1 -1
- sky/data/data_utils.py +92 -1
- sky/data/mounting_utils.py +71 -2
- sky/data/storage.py +166 -9
- sky/global_user_state.py +14 -18
- sky/jobs/constants.py +2 -0
- sky/jobs/controller.py +62 -67
- sky/jobs/file_content_utils.py +80 -0
- sky/jobs/log_gc.py +201 -0
- sky/jobs/scheduler.py +15 -2
- sky/jobs/server/core.py +85 -13
- sky/jobs/server/server.py +14 -13
- sky/jobs/server/utils.py +28 -10
- sky/jobs/state.py +216 -40
- sky/jobs/utils.py +65 -28
- sky/metrics/utils.py +18 -0
- sky/optimizer.py +1 -1
- sky/provision/kubernetes/instance.py +88 -19
- sky/provision/kubernetes/volume.py +2 -2
- sky/schemas/api/responses.py +3 -5
- sky/schemas/db/spot_jobs/004_job_file_contents.py +42 -0
- sky/schemas/db/spot_jobs/005_logs_gc.py +38 -0
- sky/schemas/generated/managed_jobsv1_pb2.py +39 -35
- sky/schemas/generated/managed_jobsv1_pb2.pyi +21 -5
- sky/serve/replica_managers.py +2 -2
- sky/serve/serve_utils.py +9 -2
- sky/serve/server/server.py +8 -7
- sky/server/common.py +21 -15
- sky/server/constants.py +1 -1
- sky/server/daemons.py +23 -17
- sky/server/requests/executor.py +7 -3
- sky/server/requests/payloads.py +2 -0
- sky/server/requests/request_names.py +80 -0
- sky/server/requests/requests.py +137 -102
- sky/server/requests/serializers/decoders.py +0 -6
- sky/server/requests/serializers/encoders.py +33 -6
- sky/server/server.py +105 -36
- sky/server/stream_utils.py +56 -13
- sky/setup_files/dependencies.py +2 -0
- sky/skylet/constants.py +6 -1
- sky/skylet/events.py +7 -0
- sky/skylet/services.py +18 -7
- sky/ssh_node_pools/server.py +5 -4
- sky/task.py +14 -42
- sky/templates/kubernetes-ray.yml.j2 +1 -1
- sky/templates/nebius-ray.yml.j2 +1 -0
- sky/templates/websocket_proxy.py +140 -12
- sky/users/permission.py +4 -1
- sky/utils/cli_utils/status_utils.py +8 -2
- sky/utils/context_utils.py +13 -1
- sky/utils/db/migration_utils.py +1 -1
- sky/utils/resource_checker.py +4 -1
- sky/utils/resources_utils.py +53 -29
- sky/utils/schemas.py +23 -4
- sky/volumes/server/server.py +4 -3
- sky/workspaces/server.py +7 -6
- {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/METADATA +53 -37
- {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/RECORD +106 -100
- sky/dashboard/out/_next/static/chunks/2755.227c84f5adf75c6b.js +0 -26
- sky/dashboard/out/_next/static/chunks/3015-2dcace420c8939f4.js +0 -1
- sky/dashboard/out/_next/static/chunks/3294.6d5054a953a818cb.js +0 -1
- sky/dashboard/out/_next/static/chunks/4282-d2f3ef2fbf78e347.js +0 -1
- sky/dashboard/out/_next/static/chunks/8969-0389e2cb52412db3.js +0 -1
- sky/dashboard/out/_next/static/chunks/9360.07d78b8552bc9d17.js +0 -31
- sky/dashboard/out/_next/static/chunks/pages/clusters/[cluster]/[job]-c815b90e296b8075.js +0 -16
- sky/dashboard/out/_next/static/css/4c052b4444e52a58.css +0 -3
- /sky/dashboard/out/_next/static/{YP5Vc3ROcDnTGta0XAhcs → 8ixeA0NVQJN8HUdijid8b}/_ssgManifest.js +0 -0
- /sky/dashboard/out/_next/static/chunks/pages/{_app-513d332313670f2a.js → _app-bde01e4a2beec258.js} +0 -0
- {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/licenses/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20251027.dist-info → skypilot_nightly-1.0.0.dev20251101.dist-info}/top_level.txt +0 -0
sky/jobs/controller.py
CHANGED
|
@@ -1,15 +1,17 @@
|
|
|
1
1
|
"""Controller: handles scheduling and the life cycle of a managed job.
|
|
2
2
|
"""
|
|
3
3
|
import asyncio
|
|
4
|
+
import io
|
|
4
5
|
import os
|
|
5
6
|
import pathlib
|
|
6
7
|
import resource
|
|
7
8
|
import shutil
|
|
8
9
|
import sys
|
|
10
|
+
import threading
|
|
9
11
|
import time
|
|
10
12
|
import traceback
|
|
11
13
|
import typing
|
|
12
|
-
from typing import Dict, Optional, Set
|
|
14
|
+
from typing import Dict, Optional, Set
|
|
13
15
|
|
|
14
16
|
import dotenv
|
|
15
17
|
|
|
@@ -22,6 +24,8 @@ from sky.backends import backend_utils
|
|
|
22
24
|
from sky.backends import cloud_vm_ray_backend
|
|
23
25
|
from sky.data import data_utils
|
|
24
26
|
from sky.jobs import constants as jobs_constants
|
|
27
|
+
from sky.jobs import file_content_utils
|
|
28
|
+
from sky.jobs import log_gc
|
|
25
29
|
from sky.jobs import recovery_strategy
|
|
26
30
|
from sky.jobs import scheduler
|
|
27
31
|
from sky.jobs import state as managed_job_state
|
|
@@ -29,6 +33,7 @@ from sky.jobs import utils as managed_job_utils
|
|
|
29
33
|
from sky.skylet import constants
|
|
30
34
|
from sky.skylet import job_lib
|
|
31
35
|
from sky.usage import usage_lib
|
|
36
|
+
from sky.utils import annotations
|
|
32
37
|
from sky.utils import common
|
|
33
38
|
from sky.utils import common_utils
|
|
34
39
|
from sky.utils import context
|
|
@@ -61,17 +66,26 @@ async def create_background_task(coro: typing.Coroutine) -> None:
|
|
|
61
66
|
task.add_done_callback(_background_tasks.discard)
|
|
62
67
|
|
|
63
68
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
+
# Make sure to limit the size as we don't want to cache too many DAGs in memory.
|
|
70
|
+
@annotations.lru_cache(scope='global', maxsize=50)
|
|
71
|
+
def _get_dag(job_id: int) -> 'sky.Dag':
|
|
72
|
+
dag_content = file_content_utils.get_job_dag_content(job_id)
|
|
73
|
+
if dag_content is None:
|
|
74
|
+
raise RuntimeError('Managed job DAG YAML content is unavailable for '
|
|
75
|
+
f'job {job_id}. This can happen if the job was '
|
|
76
|
+
'submitted before file migration completed or if '
|
|
77
|
+
'the submission failed to persist the DAG. Please '
|
|
78
|
+
're-submit the job.')
|
|
69
79
|
|
|
80
|
+
dag = dag_utils.load_chain_dag_from_yaml_str(dag_content)
|
|
81
|
+
assert dag.name is not None, dag
|
|
82
|
+
return dag
|
|
70
83
|
|
|
71
|
-
|
|
84
|
+
|
|
85
|
+
class JobController:
|
|
72
86
|
"""Controls the lifecycle of a single managed job.
|
|
73
87
|
|
|
74
|
-
This controller executes
|
|
88
|
+
This controller executes the chain DAG recorded for the job by:
|
|
75
89
|
- Loading the DAG and preparing per-task environment variables so each task
|
|
76
90
|
has a stable global job identifier across recoveries.
|
|
77
91
|
- Launching the task on the configured backend (``CloudVmRayBackend``),
|
|
@@ -91,7 +105,8 @@ class JobsController:
|
|
|
91
105
|
|
|
92
106
|
Key attributes:
|
|
93
107
|
- ``_job_id``: Integer identifier of this managed job.
|
|
94
|
-
- ``
|
|
108
|
+
- ``_dag`` / ``_dag_name``: The job definition and metadata loaded from the
|
|
109
|
+
database-backed job YAML.
|
|
95
110
|
- ``_backend``: Backend used to launch and manage clusters.
|
|
96
111
|
- ``_pool``: Optional pool name if using a cluster pool.
|
|
97
112
|
- ``starting`` / ``starting_lock`` / ``starting_signal``: Shared scheduler
|
|
@@ -104,7 +119,6 @@ class JobsController:
|
|
|
104
119
|
def __init__(
|
|
105
120
|
self,
|
|
106
121
|
job_id: int,
|
|
107
|
-
dag_yaml: str,
|
|
108
122
|
starting: Set[int],
|
|
109
123
|
starting_lock: asyncio.Lock,
|
|
110
124
|
starting_signal: asyncio.Condition,
|
|
@@ -114,7 +128,6 @@ class JobsController:
|
|
|
114
128
|
|
|
115
129
|
Args:
|
|
116
130
|
job_id: Integer ID of the managed job.
|
|
117
|
-
dag_yaml: Path to the YAML file containing the chain DAG to run.
|
|
118
131
|
starting: Shared set of job IDs currently in the STARTING phase,
|
|
119
132
|
used to limit concurrent launches.
|
|
120
133
|
starting_lock: ``asyncio.Lock`` guarding access to the shared
|
|
@@ -130,12 +143,11 @@ class JobsController:
|
|
|
130
143
|
self.starting_lock = starting_lock
|
|
131
144
|
self.starting_signal = starting_signal
|
|
132
145
|
|
|
133
|
-
logger.info(
|
|
134
|
-
f'dag_yaml={dag_yaml}')
|
|
146
|
+
logger.info('Initializing JobsController for job_id=%s', job_id)
|
|
135
147
|
|
|
136
148
|
self._job_id = job_id
|
|
137
|
-
self.
|
|
138
|
-
self.
|
|
149
|
+
self._dag = _get_dag(job_id)
|
|
150
|
+
self._dag_name = self._dag.name
|
|
139
151
|
logger.info(f'Loaded DAG: {self._dag}')
|
|
140
152
|
|
|
141
153
|
self._backend = cloud_vm_ray_backend.CloudVmRayBackend()
|
|
@@ -779,8 +791,11 @@ class JobsController:
|
|
|
779
791
|
task=self._dag.tasks[task_id]))
|
|
780
792
|
|
|
781
793
|
|
|
782
|
-
class
|
|
783
|
-
"""
|
|
794
|
+
class ControllerManager:
|
|
795
|
+
"""Main loop for a job controller process.
|
|
796
|
+
|
|
797
|
+
Many jobs will be handled by this, each by a single JobController.
|
|
798
|
+
"""
|
|
784
799
|
|
|
785
800
|
def __init__(self, controller_uuid: str) -> None:
|
|
786
801
|
self._controller_uuid = controller_uuid
|
|
@@ -799,10 +814,7 @@ class Controller:
|
|
|
799
814
|
|
|
800
815
|
self._pid = os.getpid()
|
|
801
816
|
|
|
802
|
-
async def _cleanup(self,
|
|
803
|
-
job_id: int,
|
|
804
|
-
dag_yaml: str,
|
|
805
|
-
pool: Optional[str] = None):
|
|
817
|
+
async def _cleanup(self, job_id: int, pool: Optional[str] = None):
|
|
806
818
|
"""Clean up the cluster(s) and storages.
|
|
807
819
|
|
|
808
820
|
(1) Clean up the succeeded task(s)' ephemeral storage. The storage has
|
|
@@ -892,7 +904,7 @@ class Controller:
|
|
|
892
904
|
if error is not None:
|
|
893
905
|
raise error
|
|
894
906
|
|
|
895
|
-
dag
|
|
907
|
+
dag = _get_dag(job_id)
|
|
896
908
|
error = None
|
|
897
909
|
for task in dag.tasks:
|
|
898
910
|
# most things in this function are blocking
|
|
@@ -911,57 +923,45 @@ class Controller:
|
|
|
911
923
|
@context.contextual_async
|
|
912
924
|
async def run_job_loop(self,
|
|
913
925
|
job_id: int,
|
|
914
|
-
dag_yaml: str,
|
|
915
926
|
log_file: str,
|
|
916
|
-
env_file_path: Optional[str] = None,
|
|
917
927
|
pool: Optional[str] = None):
|
|
918
928
|
"""Background task that runs the job loop."""
|
|
919
929
|
ctx = context.get()
|
|
920
930
|
assert ctx is not None, 'Context is not initialized'
|
|
921
931
|
ctx.redirect_log(pathlib.Path(log_file))
|
|
922
932
|
|
|
923
|
-
logger.info(
|
|
924
|
-
logger.info(
|
|
925
|
-
logger.info(
|
|
926
|
-
logger.info(f' env_file_path={env_file_path}')
|
|
927
|
-
logger.info(f' pool={pool}')
|
|
933
|
+
logger.info('Starting job loop for %s', job_id)
|
|
934
|
+
logger.info(' log_file=%s', log_file)
|
|
935
|
+
logger.info(' pool=%s', pool)
|
|
928
936
|
logger.info(f'From controller {self._controller_uuid}')
|
|
929
937
|
logger.info(f' pid={self._pid}')
|
|
930
938
|
|
|
931
|
-
|
|
932
|
-
if
|
|
939
|
+
env_content = file_content_utils.get_job_env_content(job_id)
|
|
940
|
+
if env_content:
|
|
933
941
|
try:
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
f'{list(env_vars.keys())}')
|
|
938
|
-
|
|
939
|
-
# Apply environment variables to the job's context
|
|
942
|
+
env_vars = dotenv.dotenv_values(stream=io.StringIO(env_content))
|
|
943
|
+
logger.info('Loading %d environment variables for job %s',
|
|
944
|
+
len(env_vars), job_id)
|
|
940
945
|
if ctx is not None:
|
|
941
946
|
for key, value in env_vars.items():
|
|
942
947
|
if value is not None:
|
|
943
948
|
ctx.override_envs({key: value})
|
|
944
|
-
logger.debug(
|
|
945
|
-
|
|
946
|
-
# Reload the skypilot config for this context to make sure
|
|
947
|
-
# the latest config is used.
|
|
949
|
+
logger.debug('Set environment variable: %s=%s', key,
|
|
950
|
+
value)
|
|
948
951
|
skypilot_config.reload_config()
|
|
949
|
-
else:
|
|
950
|
-
logger.error(
|
|
951
|
-
|
|
952
|
+
else: # pragma: no cover - defensive
|
|
953
|
+
logger.error('Context is None, cannot set environment '
|
|
954
|
+
'variables')
|
|
952
955
|
except Exception as e: # pylint: disable=broad-except
|
|
953
956
|
logger.error(
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
logger.error(f'Environment file not found: {env_file_path}')
|
|
957
|
+
'Failed to load environment variables for job %s: '
|
|
958
|
+
'%s', job_id, e)
|
|
957
959
|
|
|
958
960
|
cancelling = False
|
|
959
961
|
try:
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
self._job_tasks_lock,
|
|
964
|
-
self._starting_signal, pool)
|
|
962
|
+
controller = JobController(job_id, self.starting,
|
|
963
|
+
self._job_tasks_lock,
|
|
964
|
+
self._starting_signal, pool)
|
|
965
965
|
|
|
966
966
|
async with self._job_tasks_lock:
|
|
967
967
|
if job_id in self.job_tasks:
|
|
@@ -976,7 +976,7 @@ class Controller:
|
|
|
976
976
|
await task
|
|
977
977
|
except asyncio.CancelledError:
|
|
978
978
|
logger.info(f'Job {job_id} was cancelled')
|
|
979
|
-
dag
|
|
979
|
+
dag = _get_dag(job_id)
|
|
980
980
|
task_id, _ = await (
|
|
981
981
|
managed_job_state.get_latest_task_id_status_async(job_id))
|
|
982
982
|
assert task_id is not None, job_id
|
|
@@ -994,7 +994,7 @@ class Controller:
|
|
|
994
994
|
raise
|
|
995
995
|
finally:
|
|
996
996
|
try:
|
|
997
|
-
await self._cleanup(job_id,
|
|
997
|
+
await self._cleanup(job_id, pool=pool)
|
|
998
998
|
logger.info(
|
|
999
999
|
f'Cluster of managed job {job_id} has been cleaned up.')
|
|
1000
1000
|
except Exception as e: # pylint: disable=broad-except
|
|
@@ -1056,29 +1056,23 @@ class Controller:
|
|
|
1056
1056
|
async def start_job(
|
|
1057
1057
|
self,
|
|
1058
1058
|
job_id: int,
|
|
1059
|
-
dag_yaml: str,
|
|
1060
|
-
env_file_path: Optional[str] = None,
|
|
1061
1059
|
pool: Optional[str] = None,
|
|
1062
1060
|
):
|
|
1063
1061
|
"""Start a new job.
|
|
1064
1062
|
|
|
1065
1063
|
Args:
|
|
1066
1064
|
job_id: The ID of the job to start.
|
|
1067
|
-
dag_yaml: Path to the YAML file containing the DAG definition.
|
|
1068
|
-
env_file_path: Optional path to environment file for the job.
|
|
1069
1065
|
"""
|
|
1070
1066
|
# Create log file path for job output redirection
|
|
1071
1067
|
log_dir = os.path.expanduser(jobs_constants.JOBS_CONTROLLER_LOGS_DIR)
|
|
1072
1068
|
os.makedirs(log_dir, exist_ok=True)
|
|
1073
1069
|
log_file = os.path.join(log_dir, f'{job_id}.log')
|
|
1074
1070
|
|
|
1075
|
-
logger.info(f'Starting job {job_id} with
|
|
1076
|
-
f'env_file_path={env_file_path}, and log_file={log_file}')
|
|
1071
|
+
logger.info(f'Starting job {job_id} with log_file={log_file}')
|
|
1077
1072
|
|
|
1078
1073
|
async with self._job_tasks_lock:
|
|
1079
1074
|
self.starting.add(job_id)
|
|
1080
|
-
await create_background_task(
|
|
1081
|
-
self.run_job_loop(job_id, dag_yaml, log_file, env_file_path, pool))
|
|
1075
|
+
await create_background_task(self.run_job_loop(job_id, log_file, pool))
|
|
1082
1076
|
|
|
1083
1077
|
logger.info(f'Job {job_id} started successfully')
|
|
1084
1078
|
|
|
@@ -1151,8 +1145,6 @@ class Controller:
|
|
|
1151
1145
|
|
|
1152
1146
|
logger.info(f'Claiming job {waiting_job["job_id"]}')
|
|
1153
1147
|
job_id = waiting_job['job_id']
|
|
1154
|
-
dag_yaml_path = waiting_job['dag_yaml_path']
|
|
1155
|
-
env_file_path = waiting_job.get('env_file_path')
|
|
1156
1148
|
pool = waiting_job.get('pool', None)
|
|
1157
1149
|
|
|
1158
1150
|
cancels = os.listdir(jobs_constants.CONSOLIDATED_SIGNAL_PATH)
|
|
@@ -1172,7 +1164,7 @@ class Controller:
|
|
|
1172
1164
|
job_id=job_id, task_id=None, task=None))
|
|
1173
1165
|
continue
|
|
1174
1166
|
|
|
1175
|
-
await self.start_job(job_id,
|
|
1167
|
+
await self.start_job(job_id, pool)
|
|
1176
1168
|
|
|
1177
1169
|
|
|
1178
1170
|
async def main(controller_uuid: str):
|
|
@@ -1180,7 +1172,7 @@ async def main(controller_uuid: str):
|
|
|
1180
1172
|
|
|
1181
1173
|
context_utils.hijack_sys_attrs()
|
|
1182
1174
|
|
|
1183
|
-
controller =
|
|
1175
|
+
controller = ControllerManager(controller_uuid)
|
|
1184
1176
|
|
|
1185
1177
|
# Will happen multiple times, who cares though
|
|
1186
1178
|
os.makedirs(jobs_constants.CONSOLIDATED_SIGNAL_PATH, exist_ok=True)
|
|
@@ -1199,7 +1191,10 @@ async def main(controller_uuid: str):
|
|
|
1199
1191
|
# Will loop forever, do it in the background
|
|
1200
1192
|
cancel_job_task = asyncio.create_task(controller.cancel_job())
|
|
1201
1193
|
monitor_loop_task = asyncio.create_task(controller.monitor_loop())
|
|
1202
|
-
|
|
1194
|
+
# Run the garbage collector in a dedicated daemon thread to avoid affecting
|
|
1195
|
+
# the main event loop.
|
|
1196
|
+
gc_thread = threading.Thread(target=log_gc.elect_for_log_gc, daemon=True)
|
|
1197
|
+
gc_thread.start()
|
|
1203
1198
|
try:
|
|
1204
1199
|
await asyncio.gather(cancel_job_task, monitor_loop_task)
|
|
1205
1200
|
except Exception as e: # pylint: disable=broad-except
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""Utilities for managing managed job file content.
|
|
2
|
+
|
|
3
|
+
The helpers in this module fetch job file content (DAG YAML/env files) from the
|
|
4
|
+
database-first storage added for managed jobs, transparently falling back to
|
|
5
|
+
legacy on-disk paths when needed. Consumers should prefer the string-based
|
|
6
|
+
helpers so controllers never have to rely on local disk state.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
from typing import Optional
|
|
11
|
+
|
|
12
|
+
from sky import sky_logging
|
|
13
|
+
from sky.jobs import state as managed_job_state
|
|
14
|
+
|
|
15
|
+
logger = sky_logging.init_logger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def get_job_dag_content(job_id: int) -> Optional[str]:
|
|
19
|
+
"""Get DAG YAML content for a job from database or disk.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
job_id: The job ID
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
DAG YAML content as string, or None if not found
|
|
26
|
+
"""
|
|
27
|
+
file_info = managed_job_state.get_job_file_contents(job_id)
|
|
28
|
+
|
|
29
|
+
# Prefer content stored in the database
|
|
30
|
+
if file_info['dag_yaml_content'] is not None:
|
|
31
|
+
return file_info['dag_yaml_content']
|
|
32
|
+
|
|
33
|
+
# Fallback to disk path for backward compatibility
|
|
34
|
+
dag_yaml_path = file_info.get('dag_yaml_path')
|
|
35
|
+
if dag_yaml_path and os.path.exists(dag_yaml_path):
|
|
36
|
+
try:
|
|
37
|
+
with open(dag_yaml_path, 'r', encoding='utf-8') as f:
|
|
38
|
+
content = f.read()
|
|
39
|
+
logger.debug('Loaded DAG YAML from disk for job %s: %s', job_id,
|
|
40
|
+
dag_yaml_path)
|
|
41
|
+
return content
|
|
42
|
+
except (FileNotFoundError, IOError, OSError) as e:
|
|
43
|
+
logger.warning(
|
|
44
|
+
f'Failed to read DAG YAML from disk {dag_yaml_path}: {e}')
|
|
45
|
+
|
|
46
|
+
logger.warning(f'DAG YAML content not found for job {job_id}')
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def get_job_env_content(job_id: int) -> Optional[str]:
|
|
51
|
+
"""Get environment file content for a job from database or disk.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
job_id: The job ID
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
Environment file content as string, or None if not found
|
|
58
|
+
"""
|
|
59
|
+
file_info = managed_job_state.get_job_file_contents(job_id)
|
|
60
|
+
|
|
61
|
+
# Prefer content stored in the database
|
|
62
|
+
if file_info['env_file_content'] is not None:
|
|
63
|
+
return file_info['env_file_content']
|
|
64
|
+
|
|
65
|
+
# Fallback to disk path for backward compatibility
|
|
66
|
+
env_file_path = file_info.get('env_file_path')
|
|
67
|
+
if env_file_path and os.path.exists(env_file_path):
|
|
68
|
+
try:
|
|
69
|
+
with open(env_file_path, 'r', encoding='utf-8') as f:
|
|
70
|
+
content = f.read()
|
|
71
|
+
logger.debug('Loaded environment file from disk for job %s: %s',
|
|
72
|
+
job_id, env_file_path)
|
|
73
|
+
return content
|
|
74
|
+
except (FileNotFoundError, IOError, OSError) as e:
|
|
75
|
+
logger.warning(
|
|
76
|
+
f'Failed to read environment file from disk {env_file_path}: '
|
|
77
|
+
f'{e}')
|
|
78
|
+
|
|
79
|
+
# Environment file is optional, so don't warn if not found
|
|
80
|
+
return None
|
sky/jobs/log_gc.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
"""Log garbage collection for managed jobs."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
import os
|
|
6
|
+
import pathlib
|
|
7
|
+
import shutil
|
|
8
|
+
import time
|
|
9
|
+
|
|
10
|
+
import anyio
|
|
11
|
+
import filelock
|
|
12
|
+
|
|
13
|
+
from sky import sky_logging
|
|
14
|
+
from sky import skypilot_config
|
|
15
|
+
from sky.jobs import constants as managed_job_constants
|
|
16
|
+
from sky.jobs import state as managed_job_state
|
|
17
|
+
from sky.jobs import utils as managed_job_utils
|
|
18
|
+
from sky.utils import context
|
|
19
|
+
from sky.utils import context_utils
|
|
20
|
+
|
|
21
|
+
logger = sky_logging.init_logger(__name__)
|
|
22
|
+
|
|
23
|
+
# Filelock for garbage collector leader election.
|
|
24
|
+
_JOB_CONTROLLER_GC_LOCK_PATH = os.path.expanduser(
|
|
25
|
+
'~/.sky/locks/job_controller_gc.lock')
|
|
26
|
+
|
|
27
|
+
_DEFAULT_TASK_LOGS_GC_RETENTION_HOURS = 24 * 7
|
|
28
|
+
_DEFAULT_CONTROLLER_LOGS_GC_RETENTION_HOURS = 24 * 7
|
|
29
|
+
|
|
30
|
+
_LEAST_FREQUENT_GC_INTERVAL_SECONDS = 3600
|
|
31
|
+
_MOST_FREQUENT_GC_INTERVAL_SECONDS = 30
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _next_gc_interval(retention_seconds: int) -> int:
|
|
35
|
+
"""Get the next GC interval."""
|
|
36
|
+
# Run the GC at least per hour to ensure hourly accuracy and
|
|
37
|
+
# at most per 30 seconds (when retention_seconds is small) to
|
|
38
|
+
# avoid too frequent cleanup.
|
|
39
|
+
return max(min(retention_seconds, _LEAST_FREQUENT_GC_INTERVAL_SECONDS),
|
|
40
|
+
_MOST_FREQUENT_GC_INTERVAL_SECONDS)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
async def gc_controller_logs_for_job():
|
|
44
|
+
"""Garbage collect job and controller logs."""
|
|
45
|
+
while True:
|
|
46
|
+
skypilot_config.reload_config()
|
|
47
|
+
controller_logs_retention = skypilot_config.get_nested(
|
|
48
|
+
('jobs', 'controller', 'controller_logs_gc_retention_hours'),
|
|
49
|
+
_DEFAULT_CONTROLLER_LOGS_GC_RETENTION_HOURS) * 3600
|
|
50
|
+
# Negative value disables the GC
|
|
51
|
+
if controller_logs_retention >= 0:
|
|
52
|
+
logger.info(f'GC controller logs for job: retention '
|
|
53
|
+
f'{controller_logs_retention} seconds')
|
|
54
|
+
try:
|
|
55
|
+
finished = False
|
|
56
|
+
while not finished:
|
|
57
|
+
finished = await _clean_controller_logs_with_retention(
|
|
58
|
+
controller_logs_retention)
|
|
59
|
+
except asyncio.CancelledError:
|
|
60
|
+
logger.info('Managed jobs logs GC task cancelled')
|
|
61
|
+
break
|
|
62
|
+
except Exception as e: # pylint: disable=broad-except
|
|
63
|
+
logger.error(f'Error GC controller logs for job: {e}',
|
|
64
|
+
exc_info=True)
|
|
65
|
+
else:
|
|
66
|
+
logger.info('Controller logs GC is disabled')
|
|
67
|
+
|
|
68
|
+
interval = _next_gc_interval(controller_logs_retention)
|
|
69
|
+
logger.info('Next controller logs GC is scheduled after '
|
|
70
|
+
f'{interval} seconds')
|
|
71
|
+
await asyncio.sleep(interval)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
async def gc_task_logs_for_job():
|
|
75
|
+
"""Garbage collect task logs for job."""
|
|
76
|
+
while True:
|
|
77
|
+
skypilot_config.reload_config()
|
|
78
|
+
task_logs_retention = skypilot_config.get_nested(
|
|
79
|
+
('jobs', 'controller', 'task_logs_gc_retention_hours'),
|
|
80
|
+
_DEFAULT_TASK_LOGS_GC_RETENTION_HOURS) * 3600
|
|
81
|
+
# Negative value disables the GC
|
|
82
|
+
if task_logs_retention >= 0:
|
|
83
|
+
logger.info('GC task logs for job: '
|
|
84
|
+
f'retention {task_logs_retention} seconds')
|
|
85
|
+
try:
|
|
86
|
+
finished = False
|
|
87
|
+
while not finished:
|
|
88
|
+
finished = await _clean_task_logs_with_retention(
|
|
89
|
+
task_logs_retention)
|
|
90
|
+
except asyncio.CancelledError:
|
|
91
|
+
logger.info('Task logs GC task cancelled')
|
|
92
|
+
break
|
|
93
|
+
except Exception as e: # pylint: disable=broad-except
|
|
94
|
+
logger.error(f'Error GC task logs for job: {e}', exc_info=True)
|
|
95
|
+
else:
|
|
96
|
+
logger.info('Controller logs GC is disabled')
|
|
97
|
+
|
|
98
|
+
interval = _next_gc_interval(task_logs_retention)
|
|
99
|
+
logger.info(f'Next task logs GC is scheduled after {interval} seconds')
|
|
100
|
+
await asyncio.sleep(_next_gc_interval(task_logs_retention))
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
async def _clean_controller_logs_with_retention(retention_seconds: int,
|
|
104
|
+
batch_size: int = 100):
|
|
105
|
+
"""Clean controller logs with retention.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Whether the GC of this round has finished, False means there might
|
|
109
|
+
still be more controller logs to clean.
|
|
110
|
+
"""
|
|
111
|
+
assert batch_size > 0, 'Batch size must be positive'
|
|
112
|
+
jobs = await managed_job_state.get_controller_logs_to_clean_async(
|
|
113
|
+
retention_seconds, batch_size=batch_size)
|
|
114
|
+
job_ids_to_update = []
|
|
115
|
+
for job in jobs:
|
|
116
|
+
job_ids_to_update.append(job['job_id'])
|
|
117
|
+
log_file = managed_job_utils.controller_log_file_for_job(job['job_id'])
|
|
118
|
+
cleaned_at = time.time()
|
|
119
|
+
if await anyio.Path(log_file).exists():
|
|
120
|
+
ts_str = datetime.fromtimestamp(cleaned_at).strftime(
|
|
121
|
+
'%Y-%m-%d %H:%M:%S')
|
|
122
|
+
msg = f'Controller log has been cleaned at {ts_str}.'
|
|
123
|
+
# Sync down logs will reference to this file directly, so we
|
|
124
|
+
# keep the file and delete the content.
|
|
125
|
+
# TODO(aylei): refactor sync down logs if the inode usage
|
|
126
|
+
# becomes an issue.
|
|
127
|
+
async with await anyio.open_file(log_file, 'w',
|
|
128
|
+
encoding='utf-8') as f:
|
|
129
|
+
await f.write(msg + '\n')
|
|
130
|
+
# Batch the update, the timestamp will be not accurate but it's okay.
|
|
131
|
+
await managed_job_state.set_controller_logs_cleaned_async(
|
|
132
|
+
job_ids=job_ids_to_update, logs_cleaned_at=time.time())
|
|
133
|
+
complete = len(jobs) < batch_size
|
|
134
|
+
logger.info(f'Cleaned {len(jobs)} controller logs with retention '
|
|
135
|
+
f'{retention_seconds} seconds, complete: {complete}')
|
|
136
|
+
return complete
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
async def _clean_task_logs_with_retention(retention_seconds: int,
|
|
140
|
+
batch_size: int = 100):
|
|
141
|
+
"""Clean task logs with retention.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Whether the GC of this round has finished, False means there might
|
|
145
|
+
still be more task logs to clean.
|
|
146
|
+
"""
|
|
147
|
+
assert batch_size > 0, 'Batch size must be positive'
|
|
148
|
+
tasks = await managed_job_state.get_task_logs_to_clean_async(
|
|
149
|
+
retention_seconds, batch_size=batch_size)
|
|
150
|
+
tasks_to_update = []
|
|
151
|
+
for task in tasks:
|
|
152
|
+
local_log_file = anyio.Path(task['local_log_file'])
|
|
153
|
+
# We assume the log directory has the following layout:
|
|
154
|
+
# task-id/
|
|
155
|
+
# - run.log
|
|
156
|
+
# - tasks/
|
|
157
|
+
# - run.log
|
|
158
|
+
# and also remove the tasks directory on cleanup.
|
|
159
|
+
task_log_dir = local_log_file.parent.joinpath('tasks')
|
|
160
|
+
await local_log_file.unlink(missing_ok=True)
|
|
161
|
+
await context_utils.to_thread(shutil.rmtree,
|
|
162
|
+
str(task_log_dir),
|
|
163
|
+
ignore_errors=True)
|
|
164
|
+
# We have at least once semantic guarantee for the cleanup here.
|
|
165
|
+
tasks_to_update.append((task['job_id'], task['task_id']))
|
|
166
|
+
await managed_job_state.set_task_logs_cleaned_async(
|
|
167
|
+
tasks=list(tasks_to_update), logs_cleaned_at=time.time())
|
|
168
|
+
complete = len(tasks) < batch_size
|
|
169
|
+
logger.info(f'Cleaned {len(tasks)} task logs with retention '
|
|
170
|
+
f'{retention_seconds} seconds, complete: {complete}')
|
|
171
|
+
return complete
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
@context.contextual_async
|
|
175
|
+
async def run_log_gc():
|
|
176
|
+
"""Run the log garbage collector."""
|
|
177
|
+
log_dir = os.path.expanduser(managed_job_constants.JOBS_CONTROLLER_LOGS_DIR)
|
|
178
|
+
os.makedirs(log_dir, exist_ok=True)
|
|
179
|
+
log_path = os.path.join(log_dir, 'garbage_collector.log')
|
|
180
|
+
# Remove previous log file
|
|
181
|
+
await anyio.Path(log_path).unlink(missing_ok=True)
|
|
182
|
+
ctx = context.get()
|
|
183
|
+
assert ctx is not None, 'Context is not initialized'
|
|
184
|
+
ctx.redirect_log(pathlib.Path(log_path))
|
|
185
|
+
gc_controller_logs_for_job_task = asyncio.create_task(
|
|
186
|
+
gc_controller_logs_for_job())
|
|
187
|
+
gc_task_logs_for_job_task = asyncio.create_task(gc_task_logs_for_job())
|
|
188
|
+
await asyncio.gather(gc_controller_logs_for_job_task,
|
|
189
|
+
gc_task_logs_for_job_task)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def elect_for_log_gc():
|
|
193
|
+
"""Use filelock to elect for the log garbage collector.
|
|
194
|
+
|
|
195
|
+
The log garbage collector runs in the controller process to avoid the
|
|
196
|
+
overhead of launching a new process and the lifecycle management, the
|
|
197
|
+
threads that does not elected as the log garbage collector just wait.
|
|
198
|
+
on the filelock and bring trivial overhead.
|
|
199
|
+
"""
|
|
200
|
+
with filelock.FileLock(_JOB_CONTROLLER_GC_LOCK_PATH):
|
|
201
|
+
asyncio.run(run_log_gc())
|
sky/jobs/scheduler.py
CHANGED
|
@@ -263,6 +263,7 @@ def maybe_start_controllers(from_scheduler: bool = False) -> None:
|
|
|
263
263
|
|
|
264
264
|
if started > 0:
|
|
265
265
|
logger.info(f'Started {started} controllers')
|
|
266
|
+
|
|
266
267
|
except filelock.Timeout:
|
|
267
268
|
# If we can't get the lock, just exit. The process holding the lock
|
|
268
269
|
# should launch any pending jobs.
|
|
@@ -289,8 +290,20 @@ def submit_job(job_id: int, dag_yaml_path: str, original_user_yaml_path: str,
|
|
|
289
290
|
maybe_start_controllers(from_scheduler=True)
|
|
290
291
|
return
|
|
291
292
|
|
|
292
|
-
|
|
293
|
-
|
|
293
|
+
with open(dag_yaml_path, 'r', encoding='utf-8') as dag_file:
|
|
294
|
+
dag_yaml_content = dag_file.read()
|
|
295
|
+
with open(original_user_yaml_path, 'r',
|
|
296
|
+
encoding='utf-8') as original_user_yaml_file:
|
|
297
|
+
original_user_yaml_content = original_user_yaml_file.read()
|
|
298
|
+
with open(env_file_path, 'r', encoding='utf-8') as env_file:
|
|
299
|
+
env_file_content = env_file.read()
|
|
300
|
+
logger.debug(f'Storing job {job_id} file contents in database '
|
|
301
|
+
f'(DAG bytes={len(dag_yaml_content)}, '
|
|
302
|
+
f'original user yaml bytes={len(original_user_yaml_content)}, '
|
|
303
|
+
f'env bytes={len(env_file_content)}).')
|
|
304
|
+
state.scheduler_set_waiting(job_id, dag_yaml_content,
|
|
305
|
+
original_user_yaml_content, env_file_content,
|
|
306
|
+
priority)
|
|
294
307
|
if state.get_ha_recovery_script(job_id) is None:
|
|
295
308
|
# the run command is just the command that called scheduler
|
|
296
309
|
run = (f'source {env_file_path} && '
|