skypilot-nightly 1.0.0.dev20250926__py3-none-any.whl → 1.0.0.dev20251001__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of skypilot-nightly might be problematic. Click here for more details.
- sky/__init__.py +2 -2
- sky/backends/backend_utils.py +43 -14
- sky/backends/cloud_vm_ray_backend.py +153 -38
- sky/check.py +0 -29
- sky/client/cli/command.py +48 -26
- sky/client/cli/table_utils.py +91 -0
- sky/client/sdk.py +14 -23
- sky/client/sdk_async.py +5 -5
- sky/core.py +18 -20
- sky/dashboard/out/404.html +1 -1
- sky/dashboard/out/_next/static/chunks/{3294.03e02ae73455f48e.js → 3294.93d9336bdc032b3a.js} +1 -1
- sky/dashboard/out/_next/static/chunks/6856-5fdc9b851a18acdb.js +1 -0
- sky/dashboard/out/_next/static/chunks/pages/jobs/pools/{[pool]-07349868f7905d37.js → [pool]-509b2977a6373bf6.js} +1 -1
- sky/dashboard/out/_next/static/chunks/{webpack-8e64d11e58eab5cb.js → webpack-4f0c389a4ce5fd9c.js} +1 -1
- sky/dashboard/out/_next/static/{VXU6_xE28M55BOdwmUUJS → m3YT2i5s6v4SsIdYc8WZa}/_buildManifest.js +1 -1
- sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
- sky/dashboard/out/clusters/[cluster].html +1 -1
- sky/dashboard/out/clusters.html +1 -1
- sky/dashboard/out/config.html +1 -1
- sky/dashboard/out/index.html +1 -1
- sky/dashboard/out/infra/[context].html +1 -1
- sky/dashboard/out/infra.html +1 -1
- sky/dashboard/out/jobs/[job].html +1 -1
- sky/dashboard/out/jobs/pools/[pool].html +1 -1
- sky/dashboard/out/jobs.html +1 -1
- sky/dashboard/out/users.html +1 -1
- sky/dashboard/out/volumes.html +1 -1
- sky/dashboard/out/workspace/new.html +1 -1
- sky/dashboard/out/workspaces/[name].html +1 -1
- sky/dashboard/out/workspaces.html +1 -1
- sky/data/storage.py +11 -0
- sky/data/storage_utils.py +1 -45
- sky/execution.py +0 -1
- sky/global_user_state.py +3 -3
- sky/jobs/client/sdk.py +3 -2
- sky/jobs/controller.py +15 -0
- sky/jobs/server/core.py +120 -28
- sky/jobs/server/server.py +1 -1
- sky/jobs/server/utils.py +65 -32
- sky/jobs/state.py +145 -3
- sky/jobs/utils.py +87 -8
- sky/provision/kubernetes/instance.py +1 -1
- sky/schemas/api/responses.py +73 -0
- sky/schemas/generated/managed_jobsv1_pb2.py +70 -0
- sky/schemas/generated/managed_jobsv1_pb2.pyi +262 -0
- sky/schemas/generated/managed_jobsv1_pb2_grpc.py +278 -0
- sky/serve/serve_utils.py +16 -0
- sky/serve/server/core.py +1 -1
- sky/serve/server/impl.py +6 -6
- sky/server/common.py +2 -1
- sky/server/requests/serializers/decoders.py +10 -6
- sky/server/requests/serializers/encoders.py +13 -8
- sky/skylet/constants.py +1 -1
- sky/skylet/job_lib.py +2 -32
- sky/skylet/log_lib.py +211 -0
- sky/skylet/log_lib.pyi +30 -1
- sky/skylet/services.py +208 -2
- sky/skylet/skylet.py +3 -0
- sky/task.py +4 -0
- sky/utils/cluster_utils.py +23 -5
- sky/utils/command_runner.py +21 -5
- sky/utils/command_runner.pyi +11 -0
- sky/utils/volume.py +5 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20251001.dist-info}/METADATA +35 -35
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20251001.dist-info}/RECORD +70 -66
- sky/dashboard/out/_next/static/chunks/6856-2b3600ff2854d066.js +0 -1
- /sky/dashboard/out/_next/static/{VXU6_xE28M55BOdwmUUJS → m3YT2i5s6v4SsIdYc8WZa}/_ssgManifest.js +0 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20251001.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20251001.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20251001.dist-info}/licenses/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20251001.dist-info}/top_level.txt +0 -0
sky/client/cli/command.py
CHANGED
|
@@ -59,7 +59,7 @@ from sky import task as task_lib
|
|
|
59
59
|
from sky.adaptors import common as adaptors_common
|
|
60
60
|
from sky.client import sdk
|
|
61
61
|
from sky.client.cli import flags
|
|
62
|
-
from sky.
|
|
62
|
+
from sky.client.cli import table_utils
|
|
63
63
|
from sky.provision.kubernetes import constants as kubernetes_constants
|
|
64
64
|
from sky.provision.kubernetes import utils as kubernetes_utils
|
|
65
65
|
from sky.schemas.api import responses
|
|
@@ -87,6 +87,7 @@ from sky.utils import status_lib
|
|
|
87
87
|
from sky.utils import subprocess_utils
|
|
88
88
|
from sky.utils import timeline
|
|
89
89
|
from sky.utils import ux_utils
|
|
90
|
+
from sky.utils import volume as volume_utils
|
|
90
91
|
from sky.utils import yaml_utils
|
|
91
92
|
from sky.utils.cli_utils import status_utils
|
|
92
93
|
from sky.volumes import utils as volumes_utils
|
|
@@ -1321,7 +1322,7 @@ def exec(
|
|
|
1321
1322
|
|
|
1322
1323
|
|
|
1323
1324
|
def _handle_jobs_queue_request(
|
|
1324
|
-
request_id: server_common.RequestId[List[
|
|
1325
|
+
request_id: server_common.RequestId[List[responses.ManagedJobRecord]],
|
|
1325
1326
|
show_all: bool,
|
|
1326
1327
|
show_user: bool,
|
|
1327
1328
|
max_num_jobs_to_show: Optional[int],
|
|
@@ -1394,10 +1395,10 @@ def _handle_jobs_queue_request(
|
|
|
1394
1395
|
msg += ('Failed to query managed jobs: '
|
|
1395
1396
|
f'{common_utils.format_exception(e, use_bracket=True)}')
|
|
1396
1397
|
else:
|
|
1397
|
-
msg =
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1398
|
+
msg = table_utils.format_job_table(managed_jobs_,
|
|
1399
|
+
show_all=show_all,
|
|
1400
|
+
show_user=show_user,
|
|
1401
|
+
max_jobs=max_num_jobs_to_show)
|
|
1401
1402
|
return num_in_progress_jobs, msg
|
|
1402
1403
|
|
|
1403
1404
|
|
|
@@ -1512,9 +1513,9 @@ def _status_kubernetes(show_all: bool):
|
|
|
1512
1513
|
click.echo(f'\n{colorama.Fore.CYAN}{colorama.Style.BRIGHT}'
|
|
1513
1514
|
f'Managed jobs'
|
|
1514
1515
|
f'{colorama.Style.RESET_ALL}')
|
|
1515
|
-
msg =
|
|
1516
|
-
|
|
1517
|
-
|
|
1516
|
+
msg = table_utils.format_job_table(all_jobs,
|
|
1517
|
+
show_all=show_all,
|
|
1518
|
+
show_user=False)
|
|
1518
1519
|
click.echo(msg)
|
|
1519
1520
|
if any(['sky-serve-controller' in c.cluster_name for c in all_clusters]):
|
|
1520
1521
|
# TODO: Parse serve controllers and show services separately.
|
|
@@ -2125,7 +2126,7 @@ def queue(clusters: List[str], skip_finished: bool, all_users: bool):
|
|
|
2125
2126
|
f'cluster {cluster!r}.{colorama.Style.RESET_ALL}\n'
|
|
2126
2127
|
f' {common_utils.format_exception(e)}')
|
|
2127
2128
|
return
|
|
2128
|
-
job_tables[cluster] =
|
|
2129
|
+
job_tables[cluster] = table_utils.format_job_queue(job_table)
|
|
2129
2130
|
|
|
2130
2131
|
subprocess_utils.run_in_parallel(_get_job_queue, clusters)
|
|
2131
2132
|
user_str = 'all users' if all_users else 'current user'
|
|
@@ -2962,9 +2963,9 @@ def _hint_or_raise_for_down_jobs_controller(controller_name: str,
|
|
|
2962
2963
|
'jobs (output of `sky jobs queue`) will be lost.')
|
|
2963
2964
|
click.echo(msg)
|
|
2964
2965
|
if managed_jobs_:
|
|
2965
|
-
job_table =
|
|
2966
|
-
|
|
2967
|
-
|
|
2966
|
+
job_table = table_utils.format_job_table(managed_jobs_,
|
|
2967
|
+
show_all=False,
|
|
2968
|
+
show_user=True)
|
|
2968
2969
|
msg = controller.value.decline_down_for_dirty_controller_hint
|
|
2969
2970
|
# Add prefix to each line to align with the bullet point.
|
|
2970
2971
|
msg += '\n'.join(
|
|
@@ -4026,8 +4027,7 @@ def storage_ls(verbose: bool):
|
|
|
4026
4027
|
"""List storage objects managed by SkyPilot."""
|
|
4027
4028
|
request_id = sdk.storage_ls()
|
|
4028
4029
|
storages = sdk.stream_and_get(request_id)
|
|
4029
|
-
storage_table =
|
|
4030
|
-
show_all=verbose)
|
|
4030
|
+
storage_table = table_utils.format_storage_table(storages, show_all=verbose)
|
|
4031
4031
|
click.echo(storage_table)
|
|
4032
4032
|
|
|
4033
4033
|
|
|
@@ -4122,13 +4122,15 @@ def volumes():
|
|
|
4122
4122
|
@click.option('--infra',
|
|
4123
4123
|
required=False,
|
|
4124
4124
|
type=str,
|
|
4125
|
-
help='
|
|
4125
|
+
help='Infrastructure to use. '
|
|
4126
|
+
'Format: cloud, cloud/region, cloud/region/zone, or '
|
|
4127
|
+
'k8s/context-name.'
|
|
4128
|
+
'Examples: k8s, k8s/my-context, runpod/US/US-CA-2. '
|
|
4126
4129
|
'Override the infra defined in the YAML.')
|
|
4127
|
-
@click.option(
|
|
4128
|
-
|
|
4129
|
-
|
|
4130
|
-
|
|
4131
|
-
help='Volume type. Format: pvc. Override the type defined in the YAML.')
|
|
4130
|
+
@click.option('--type',
|
|
4131
|
+
required=False,
|
|
4132
|
+
type=click.Choice(volume_utils.VolumeType.supported_types()),
|
|
4133
|
+
help='Volume type. Override the type defined in the YAML.')
|
|
4132
4134
|
@click.option('--size',
|
|
4133
4135
|
required=False,
|
|
4134
4136
|
type=str,
|
|
@@ -4159,7 +4161,7 @@ def volumes_apply(
|
|
|
4159
4161
|
sky volumes apply volume.yaml
|
|
4160
4162
|
\b
|
|
4161
4163
|
# Apply a volume from a command.
|
|
4162
|
-
sky volumes apply --name pvc1 --infra k8s --type pvc --size 100Gi
|
|
4164
|
+
sky volumes apply --name pvc1 --infra k8s --type k8s-pvc --size 100Gi
|
|
4163
4165
|
"""
|
|
4164
4166
|
# pylint: disable=import-outside-toplevel
|
|
4165
4167
|
from sky.volumes import volume as volume_lib
|
|
@@ -4496,10 +4498,30 @@ def jobs_launch(
|
|
|
4496
4498
|
job_id_handle = _async_call_or_wait(request_id, async_call,
|
|
4497
4499
|
'sky.jobs.launch')
|
|
4498
4500
|
|
|
4499
|
-
if
|
|
4500
|
-
|
|
4501
|
-
|
|
4502
|
-
|
|
4501
|
+
if async_call:
|
|
4502
|
+
return
|
|
4503
|
+
|
|
4504
|
+
job_ids = [job_id_handle[0]] if isinstance(job_id_handle[0],
|
|
4505
|
+
int) else job_id_handle[0]
|
|
4506
|
+
if pool:
|
|
4507
|
+
# Display the worker assignment for the jobs.
|
|
4508
|
+
logger.debug(f'Getting service records for pool: {pool}')
|
|
4509
|
+
records_request_id = managed_jobs.pool_status(pool_names=pool)
|
|
4510
|
+
service_records = _async_call_or_wait(records_request_id, async_call,
|
|
4511
|
+
'sky.jobs.pool_status')
|
|
4512
|
+
logger.debug(f'Pool status: {service_records}')
|
|
4513
|
+
replica_infos = service_records[0]['replica_info']
|
|
4514
|
+
for replica_info in replica_infos:
|
|
4515
|
+
job_id = replica_info.get('used_by', None)
|
|
4516
|
+
if job_id in job_ids:
|
|
4517
|
+
worker_id = replica_info['replica_id']
|
|
4518
|
+
version = replica_info['version']
|
|
4519
|
+
logger.info(f'Job ID: {job_id} assigned to pool {pool} '
|
|
4520
|
+
f'(worker: {worker_id}, version: {version})')
|
|
4521
|
+
|
|
4522
|
+
if not detach_run:
|
|
4523
|
+
if len(job_ids) == 1:
|
|
4524
|
+
job_id = job_ids[0]
|
|
4503
4525
|
returncode = managed_jobs.tail_logs(name=None,
|
|
4504
4526
|
job_id=job_id,
|
|
4505
4527
|
follow=True,
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"""Utilities for formatting tables for CLI output."""
|
|
2
|
+
from typing import List, Optional
|
|
3
|
+
|
|
4
|
+
from sky.jobs import utils as managed_jobs
|
|
5
|
+
from sky.schemas.api import responses
|
|
6
|
+
from sky.skylet import constants
|
|
7
|
+
from sky.utils import common_utils
|
|
8
|
+
from sky.utils import log_utils
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def format_job_queue(jobs: List[responses.ClusterJobRecord]):
|
|
12
|
+
"""Format the job queue for display.
|
|
13
|
+
|
|
14
|
+
Usage:
|
|
15
|
+
jobs = get_job_queue()
|
|
16
|
+
print(format_job_queue(jobs))
|
|
17
|
+
"""
|
|
18
|
+
job_table = log_utils.create_table([
|
|
19
|
+
'ID', 'NAME', 'USER', 'SUBMITTED', 'STARTED', 'DURATION', 'RESOURCES',
|
|
20
|
+
'STATUS', 'LOG', 'GIT COMMIT'
|
|
21
|
+
])
|
|
22
|
+
for job in jobs:
|
|
23
|
+
job_table.add_row([
|
|
24
|
+
job.job_id,
|
|
25
|
+
job.job_name,
|
|
26
|
+
job.username,
|
|
27
|
+
log_utils.readable_time_duration(job.submitted_at),
|
|
28
|
+
log_utils.readable_time_duration(job.start_at),
|
|
29
|
+
log_utils.readable_time_duration(job.start_at,
|
|
30
|
+
job.end_at,
|
|
31
|
+
absolute=True),
|
|
32
|
+
job.resources,
|
|
33
|
+
job.status.colored_str(),
|
|
34
|
+
job.log_path,
|
|
35
|
+
job.metadata.get('git_commit', '-'),
|
|
36
|
+
])
|
|
37
|
+
return job_table
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def format_storage_table(storages: List[responses.StorageRecord],
|
|
41
|
+
show_all: bool = False) -> str:
|
|
42
|
+
"""Format the storage table for display.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
storage_table (dict): The storage table.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
str: The formatted storage table.
|
|
49
|
+
"""
|
|
50
|
+
storage_table = log_utils.create_table([
|
|
51
|
+
'NAME',
|
|
52
|
+
'UPDATED',
|
|
53
|
+
'STORE',
|
|
54
|
+
'COMMAND',
|
|
55
|
+
'STATUS',
|
|
56
|
+
])
|
|
57
|
+
|
|
58
|
+
for row in storages:
|
|
59
|
+
launched_at = row.launched_at
|
|
60
|
+
if show_all:
|
|
61
|
+
command = row.last_use
|
|
62
|
+
else:
|
|
63
|
+
command = common_utils.truncate_long_string(
|
|
64
|
+
row.last_use, constants.LAST_USE_TRUNC_LENGTH)
|
|
65
|
+
storage_table.add_row([
|
|
66
|
+
# NAME
|
|
67
|
+
row.name,
|
|
68
|
+
# LAUNCHED
|
|
69
|
+
log_utils.readable_time_duration(launched_at),
|
|
70
|
+
# CLOUDS
|
|
71
|
+
', '.join([s.value for s in row.store]),
|
|
72
|
+
# COMMAND,
|
|
73
|
+
command,
|
|
74
|
+
# STATUS
|
|
75
|
+
row.status.value,
|
|
76
|
+
])
|
|
77
|
+
if storages:
|
|
78
|
+
return str(storage_table)
|
|
79
|
+
else:
|
|
80
|
+
return 'No existing storage.'
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def format_job_table(jobs: List[responses.ManagedJobRecord],
|
|
84
|
+
show_all: bool,
|
|
85
|
+
show_user: bool,
|
|
86
|
+
max_jobs: Optional[int] = None):
|
|
87
|
+
jobs = [job.model_dump() for job in jobs]
|
|
88
|
+
return managed_jobs.format_job_table(jobs,
|
|
89
|
+
show_all=show_all,
|
|
90
|
+
show_user=show_user,
|
|
91
|
+
max_jobs=max_jobs)
|
sky/client/sdk.py
CHANGED
|
@@ -1267,9 +1267,11 @@ def autostop(
|
|
|
1267
1267
|
@usage_lib.entrypoint
|
|
1268
1268
|
@server_common.check_server_healthy_or_start
|
|
1269
1269
|
@annotations.client_api
|
|
1270
|
-
def queue(
|
|
1271
|
-
|
|
1272
|
-
|
|
1270
|
+
def queue(
|
|
1271
|
+
cluster_name: str,
|
|
1272
|
+
skip_finished: bool = False,
|
|
1273
|
+
all_users: bool = False
|
|
1274
|
+
) -> server_common.RequestId[List[responses.ClusterJobRecord]]:
|
|
1273
1275
|
"""Gets the job queue of a cluster.
|
|
1274
1276
|
|
|
1275
1277
|
Args:
|
|
@@ -1282,8 +1284,8 @@ def queue(cluster_name: str,
|
|
|
1282
1284
|
The request ID of the queue request.
|
|
1283
1285
|
|
|
1284
1286
|
Request Returns:
|
|
1285
|
-
job_records (List[
|
|
1286
|
-
queue.
|
|
1287
|
+
job_records (List[responses.ClusterJobRecord]): A list of job records
|
|
1288
|
+
for each job in the queue.
|
|
1287
1289
|
|
|
1288
1290
|
.. code-block:: python
|
|
1289
1291
|
|
|
@@ -1616,26 +1618,15 @@ def cost_report(
|
|
|
1616
1618
|
@usage_lib.entrypoint
|
|
1617
1619
|
@server_common.check_server_healthy_or_start
|
|
1618
1620
|
@annotations.client_api
|
|
1619
|
-
def storage_ls() -> server_common.RequestId[List[
|
|
1621
|
+
def storage_ls() -> server_common.RequestId[List[responses.StorageRecord]]:
|
|
1620
1622
|
"""Gets the storages.
|
|
1621
1623
|
|
|
1622
1624
|
Returns:
|
|
1623
1625
|
The request ID of the storage list request.
|
|
1624
1626
|
|
|
1625
1627
|
Request Returns:
|
|
1626
|
-
storage_records (List[
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
.. code-block:: python
|
|
1630
|
-
|
|
1631
|
-
{
|
|
1632
|
-
'name': (str) storage name,
|
|
1633
|
-
'launched_at': (int) timestamp of creation,
|
|
1634
|
-
'store': (List[sky.StoreType]) storage type,
|
|
1635
|
-
'last_use': (int) timestamp of last use,
|
|
1636
|
-
'status': (sky.StorageStatus) storage status,
|
|
1637
|
-
}
|
|
1638
|
-
]
|
|
1628
|
+
storage_records (List[responses.StorageRecord]):
|
|
1629
|
+
A list of storage records.
|
|
1639
1630
|
"""
|
|
1640
1631
|
response = server_common.make_authenticated_request('GET', '/storage/ls')
|
|
1641
1632
|
return server_common.get_request_id(response)
|
|
@@ -1910,10 +1901,10 @@ def kubernetes_node_info(
|
|
|
1910
1901
|
@usage_lib.entrypoint
|
|
1911
1902
|
@server_common.check_server_healthy_or_start
|
|
1912
1903
|
@annotations.client_api
|
|
1913
|
-
def status_kubernetes() -> server_common.RequestId[
|
|
1914
|
-
List['kubernetes_utils.KubernetesSkyPilotClusterInfoPayload'],
|
|
1915
|
-
|
|
1916
|
-
|
|
1904
|
+
def status_kubernetes() -> server_common.RequestId[
|
|
1905
|
+
Tuple[List['kubernetes_utils.KubernetesSkyPilotClusterInfoPayload'],
|
|
1906
|
+
List['kubernetes_utils.KubernetesSkyPilotClusterInfoPayload'],
|
|
1907
|
+
List[responses.ManagedJobRecord], Optional[str]]]:
|
|
1917
1908
|
"""Gets all SkyPilot clusters and jobs in the Kubernetes cluster.
|
|
1918
1909
|
|
|
1919
1910
|
Managed jobs and services are also included in the clusters returned.
|
sky/client/sdk_async.py
CHANGED
|
@@ -523,11 +523,11 @@ async def autostop(
|
|
|
523
523
|
@usage_lib.entrypoint
|
|
524
524
|
@annotations.client_api
|
|
525
525
|
async def queue(
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
) -> List[
|
|
526
|
+
cluster_name: str,
|
|
527
|
+
skip_finished: bool = False,
|
|
528
|
+
all_users: bool = False,
|
|
529
|
+
stream_logs: Optional[StreamConfig] = DEFAULT_STREAM_CONFIG
|
|
530
|
+
) -> List[responses.ClusterJobRecord]:
|
|
531
531
|
"""Async version of queue() that gets the job queue of a cluster."""
|
|
532
532
|
request_id = await context_utils.to_thread(sdk.queue, cluster_name,
|
|
533
533
|
skip_finished, all_users)
|
sky/core.py
CHANGED
|
@@ -195,7 +195,7 @@ def status(
|
|
|
195
195
|
def status_kubernetes(
|
|
196
196
|
) -> Tuple[List['kubernetes_utils.KubernetesSkyPilotClusterInfoPayload'],
|
|
197
197
|
List['kubernetes_utils.KubernetesSkyPilotClusterInfoPayload'],
|
|
198
|
-
List[
|
|
198
|
+
List[responses.ManagedJobRecord], Optional[str]]:
|
|
199
199
|
"""Gets all SkyPilot clusters and jobs in the Kubernetes cluster.
|
|
200
200
|
|
|
201
201
|
Managed jobs and services are also included in the clusters returned.
|
|
@@ -270,6 +270,7 @@ all_clusters, unmanaged_clusters, all_jobs, context
|
|
|
270
270
|
kubernetes_utils.KubernetesSkyPilotClusterInfoPayload.from_cluster(c)
|
|
271
271
|
for c in unmanaged_clusters
|
|
272
272
|
]
|
|
273
|
+
all_jobs = [responses.ManagedJobRecord(**job) for job in all_jobs]
|
|
273
274
|
return all_clusters, unmanaged_clusters, all_jobs, context
|
|
274
275
|
|
|
275
276
|
|
|
@@ -803,7 +804,7 @@ def autostop(
|
|
|
803
804
|
@usage_lib.entrypoint
|
|
804
805
|
def queue(cluster_name: str,
|
|
805
806
|
skip_finished: bool = False,
|
|
806
|
-
all_users: bool = False) -> List[
|
|
807
|
+
all_users: bool = False) -> List[responses.ClusterJobRecord]:
|
|
807
808
|
# NOTE(dev): Keep the docstring consistent between the Python API and CLI.
|
|
808
809
|
"""Gets the job queue of a cluster.
|
|
809
810
|
|
|
@@ -850,7 +851,7 @@ def queue(cluster_name: str,
|
|
|
850
851
|
|
|
851
852
|
use_legacy = not handle.is_grpc_enabled_with_flag
|
|
852
853
|
|
|
853
|
-
if
|
|
854
|
+
if not use_legacy:
|
|
854
855
|
try:
|
|
855
856
|
request = jobsv1_pb2.GetJobQueueRequest(user_hash=user_hash,
|
|
856
857
|
all_jobs=all_jobs)
|
|
@@ -879,7 +880,6 @@ def queue(cluster_name: str,
|
|
|
879
880
|
jobs.append(job_dict)
|
|
880
881
|
except exceptions.SkyletMethodNotImplementedError:
|
|
881
882
|
use_legacy = True
|
|
882
|
-
|
|
883
883
|
if use_legacy:
|
|
884
884
|
code = job_lib.JobLibCodeGen.get_job_queue(user_hash, all_jobs)
|
|
885
885
|
returncode, jobs_payload, stderr = backend.run_on_head(
|
|
@@ -891,7 +891,7 @@ def queue(cluster_name: str,
|
|
|
891
891
|
stderr=f'{jobs_payload + stderr}',
|
|
892
892
|
stream_logs=True)
|
|
893
893
|
jobs = job_lib.load_job_queue(jobs_payload)
|
|
894
|
-
return jobs
|
|
894
|
+
return [responses.ClusterJobRecord.model_validate(job) for job in jobs]
|
|
895
895
|
|
|
896
896
|
|
|
897
897
|
@usage_lib.entrypoint
|
|
@@ -1131,25 +1131,25 @@ def job_status(cluster_name: str,
|
|
|
1131
1131
|
# = Storage Management =
|
|
1132
1132
|
# ======================
|
|
1133
1133
|
@usage_lib.entrypoint
|
|
1134
|
-
def storage_ls() -> List[
|
|
1134
|
+
def storage_ls() -> List[responses.StorageRecord]:
|
|
1135
1135
|
# NOTE(dev): Keep the docstring consistent between the Python API and CLI.
|
|
1136
1136
|
"""Gets the storages.
|
|
1137
1137
|
|
|
1138
1138
|
Returns:
|
|
1139
|
-
[
|
|
1140
|
-
{
|
|
1141
|
-
'name': str,
|
|
1142
|
-
'launched_at': int timestamp of creation,
|
|
1143
|
-
'store': List[sky.StoreType],
|
|
1144
|
-
'last_use': int timestamp of last use,
|
|
1145
|
-
'status': sky.StorageStatus,
|
|
1146
|
-
}
|
|
1147
|
-
]
|
|
1139
|
+
List[responses.StorageRecord]: A list of storage records.
|
|
1148
1140
|
"""
|
|
1149
1141
|
storages = global_user_state.get_storage()
|
|
1142
|
+
storage_records = []
|
|
1150
1143
|
for storage in storages:
|
|
1151
|
-
|
|
1152
|
-
|
|
1144
|
+
storage_records.append(
|
|
1145
|
+
responses.StorageRecord(
|
|
1146
|
+
name=storage['name'],
|
|
1147
|
+
launched_at=storage['launched_at'],
|
|
1148
|
+
store=list(storage.pop('handle').sky_stores.keys()),
|
|
1149
|
+
last_use=storage['last_use'],
|
|
1150
|
+
status=storage['status'],
|
|
1151
|
+
))
|
|
1152
|
+
return storage_records
|
|
1153
1153
|
|
|
1154
1154
|
|
|
1155
1155
|
@usage_lib.entrypoint
|
|
@@ -1165,9 +1165,7 @@ def storage_delete(name: str) -> None:
|
|
|
1165
1165
|
if handle is None:
|
|
1166
1166
|
raise ValueError(f'Storage name {name!r} not found.')
|
|
1167
1167
|
else:
|
|
1168
|
-
storage_object = data.Storage(
|
|
1169
|
-
source=handle.source,
|
|
1170
|
-
sync_on_reconstruction=False)
|
|
1168
|
+
storage_object = data.Storage.from_handle(handle)
|
|
1171
1169
|
storage_object.delete()
|
|
1172
1170
|
|
|
1173
1171
|
|
sky/dashboard/out/404.html
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-
|
|
1
|
+
<!DOCTYPE html><html><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><meta name="next-head-count" content="2"/><link rel="preload" href="/dashboard/_next/static/css/4614e06482d7309e.css" as="style"/><link rel="stylesheet" href="/dashboard/_next/static/css/4614e06482d7309e.css" data-n-g=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/dashboard/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/dashboard/_next/static/chunks/webpack-4f0c389a4ce5fd9c.js" defer=""></script><script src="/dashboard/_next/static/chunks/framework-cf60a09ccd051a10.js" defer=""></script><script src="/dashboard/_next/static/chunks/main-f15ccb73239a3bf1.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_app-ce361c6959bc2001.js" defer=""></script><script src="/dashboard/_next/static/chunks/pages/_error-c66a4e8afc46f17b.js" defer=""></script><script src="/dashboard/_next/static/m3YT2i5s6v4SsIdYc8WZa/_buildManifest.js" defer=""></script><script src="/dashboard/_next/static/m3YT2i5s6v4SsIdYc8WZa/_ssgManifest.js" defer=""></script></head><body><div id="__next"></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"statusCode":404}},"page":"/_error","query":{},"buildId":"m3YT2i5s6v4SsIdYc8WZa","assetPrefix":"/dashboard","nextExport":true,"isFallback":false,"gip":true,"scriptLoader":[]}</script></body></html>
|