skypilot-nightly 1.0.0.dev20250926__py3-none-any.whl → 1.0.0.dev20250927__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of skypilot-nightly might be problematic. Click here for more details.
- sky/__init__.py +2 -2
- sky/backends/backend_utils.py +25 -4
- sky/backends/cloud_vm_ray_backend.py +151 -36
- sky/client/cli/command.py +2 -1
- sky/client/cli/table_utils.py +34 -0
- sky/client/sdk.py +7 -5
- sky/client/sdk_async.py +5 -5
- sky/core.py +3 -4
- sky/dashboard/out/404.html +1 -1
- sky/dashboard/out/_next/static/{VXU6_xE28M55BOdwmUUJS → UDSEoDB67vwFMZyCJ4HWU}/_buildManifest.js +1 -1
- sky/dashboard/out/_next/static/chunks/{3294.03e02ae73455f48e.js → 3294.93d9336bdc032b3a.js} +1 -1
- sky/dashboard/out/_next/static/chunks/6856-5fdc9b851a18acdb.js +1 -0
- sky/dashboard/out/_next/static/chunks/{webpack-8e64d11e58eab5cb.js → webpack-7340bc0f0dd8ae74.js} +1 -1
- sky/dashboard/out/clusters/[cluster]/[job].html +1 -1
- sky/dashboard/out/clusters/[cluster].html +1 -1
- sky/dashboard/out/clusters.html +1 -1
- sky/dashboard/out/config.html +1 -1
- sky/dashboard/out/index.html +1 -1
- sky/dashboard/out/infra/[context].html +1 -1
- sky/dashboard/out/infra.html +1 -1
- sky/dashboard/out/jobs/[job].html +1 -1
- sky/dashboard/out/jobs/pools/[pool].html +1 -1
- sky/dashboard/out/jobs.html +1 -1
- sky/dashboard/out/users.html +1 -1
- sky/dashboard/out/volumes.html +1 -1
- sky/dashboard/out/workspace/new.html +1 -1
- sky/dashboard/out/workspaces/[name].html +1 -1
- sky/dashboard/out/workspaces.html +1 -1
- sky/execution.py +0 -1
- sky/global_user_state.py +3 -3
- sky/jobs/server/core.py +96 -26
- sky/jobs/server/utils.py +65 -32
- sky/jobs/state.py +145 -3
- sky/jobs/utils.py +85 -7
- sky/schemas/api/responses.py +18 -0
- sky/schemas/generated/managed_jobsv1_pb2.py +70 -0
- sky/schemas/generated/managed_jobsv1_pb2.pyi +262 -0
- sky/schemas/generated/managed_jobsv1_pb2_grpc.py +278 -0
- sky/serve/serve_utils.py +16 -0
- sky/serve/server/core.py +1 -1
- sky/serve/server/impl.py +6 -6
- sky/server/requests/serializers/decoders.py +2 -2
- sky/server/requests/serializers/encoders.py +7 -3
- sky/skylet/constants.py +1 -1
- sky/skylet/job_lib.py +2 -32
- sky/skylet/log_lib.py +211 -0
- sky/skylet/log_lib.pyi +30 -1
- sky/skylet/services.py +208 -2
- sky/skylet/skylet.py +3 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20250927.dist-info}/METADATA +32 -32
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20250927.dist-info}/RECORD +56 -52
- sky/dashboard/out/_next/static/chunks/6856-2b3600ff2854d066.js +0 -1
- /sky/dashboard/out/_next/static/{VXU6_xE28M55BOdwmUUJS → UDSEoDB67vwFMZyCJ4HWU}/_ssgManifest.js +0 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20250927.dist-info}/WHEEL +0 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20250927.dist-info}/entry_points.txt +0 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20250927.dist-info}/licenses/LICENSE +0 -0
- {skypilot_nightly-1.0.0.dev20250926.dist-info → skypilot_nightly-1.0.0.dev20250927.dist-info}/top_level.txt +0 -0
sky/jobs/utils.py
CHANGED
|
@@ -16,8 +16,8 @@ import textwrap
|
|
|
16
16
|
import time
|
|
17
17
|
import traceback
|
|
18
18
|
import typing
|
|
19
|
-
from typing import (Any, Deque, Dict, List, Literal, Optional, Set,
|
|
20
|
-
Tuple, Union)
|
|
19
|
+
from typing import (Any, Deque, Dict, Iterable, List, Literal, Optional, Set,
|
|
20
|
+
TextIO, Tuple, Union)
|
|
21
21
|
|
|
22
22
|
import colorama
|
|
23
23
|
import filelock
|
|
@@ -51,16 +51,23 @@ from sky.utils import subprocess_utils
|
|
|
51
51
|
from sky.utils import ux_utils
|
|
52
52
|
|
|
53
53
|
if typing.TYPE_CHECKING:
|
|
54
|
+
from google.protobuf import descriptor
|
|
55
|
+
from google.protobuf import json_format
|
|
54
56
|
import grpc
|
|
55
57
|
import psutil
|
|
56
58
|
|
|
57
59
|
import sky
|
|
58
60
|
from sky import dag as dag_lib
|
|
59
61
|
from sky.schemas.generated import jobsv1_pb2
|
|
62
|
+
from sky.schemas.generated import managed_jobsv1_pb2
|
|
60
63
|
else:
|
|
64
|
+
json_format = adaptors_common.LazyImport('google.protobuf.json_format')
|
|
65
|
+
descriptor = adaptors_common.LazyImport('google.protobuf.descriptor')
|
|
61
66
|
psutil = adaptors_common.LazyImport('psutil')
|
|
62
67
|
grpc = adaptors_common.LazyImport('grpc')
|
|
63
68
|
jobsv1_pb2 = adaptors_common.LazyImport('sky.schemas.generated.jobsv1_pb2')
|
|
69
|
+
managed_jobsv1_pb2 = adaptors_common.LazyImport(
|
|
70
|
+
'sky.schemas.generated.managed_jobsv1_pb2')
|
|
64
71
|
|
|
65
72
|
logger = sky_logging.init_logger(__name__)
|
|
66
73
|
|
|
@@ -169,7 +176,7 @@ def _validate_consolidation_mode_config(
|
|
|
169
176
|
if all_jobs:
|
|
170
177
|
nonterminal_jobs = (
|
|
171
178
|
managed_job_state.get_nonterminal_job_ids_by_name(
|
|
172
|
-
None, all_users=True))
|
|
179
|
+
None, None, all_users=True))
|
|
173
180
|
if nonterminal_jobs:
|
|
174
181
|
with ux_utils.print_exception_no_traceback():
|
|
175
182
|
raise exceptions.InconsistentConsolidationModeError(
|
|
@@ -698,14 +705,15 @@ def generate_managed_job_cluster_name(task_name: str, job_id: int) -> str:
|
|
|
698
705
|
|
|
699
706
|
def cancel_jobs_by_id(job_ids: Optional[List[int]],
|
|
700
707
|
all_users: bool = False,
|
|
701
|
-
current_workspace: Optional[str] = None
|
|
708
|
+
current_workspace: Optional[str] = None,
|
|
709
|
+
user_hash: Optional[str] = None) -> str:
|
|
702
710
|
"""Cancel jobs by id.
|
|
703
711
|
|
|
704
712
|
If job_ids is None, cancel all jobs.
|
|
705
713
|
"""
|
|
706
714
|
if job_ids is None:
|
|
707
715
|
job_ids = managed_job_state.get_nonterminal_job_ids_by_name(
|
|
708
|
-
None, all_users)
|
|
716
|
+
None, user_hash, all_users)
|
|
709
717
|
job_ids = list(set(job_ids))
|
|
710
718
|
if not job_ids:
|
|
711
719
|
return 'No job to cancel.'
|
|
@@ -1241,6 +1249,24 @@ def dump_managed_job_queue(
|
|
|
1241
1249
|
user_hashes: Optional[List[Optional[str]]] = None,
|
|
1242
1250
|
statuses: Optional[List[str]] = None,
|
|
1243
1251
|
) -> str:
|
|
1252
|
+
return message_utils.encode_payload(
|
|
1253
|
+
get_managed_job_queue(skip_finished, accessible_workspaces, job_ids,
|
|
1254
|
+
workspace_match, name_match, pool_match, page,
|
|
1255
|
+
limit, user_hashes, statuses))
|
|
1256
|
+
|
|
1257
|
+
|
|
1258
|
+
def get_managed_job_queue(
|
|
1259
|
+
skip_finished: bool = False,
|
|
1260
|
+
accessible_workspaces: Optional[List[str]] = None,
|
|
1261
|
+
job_ids: Optional[List[int]] = None,
|
|
1262
|
+
workspace_match: Optional[str] = None,
|
|
1263
|
+
name_match: Optional[str] = None,
|
|
1264
|
+
pool_match: Optional[str] = None,
|
|
1265
|
+
page: Optional[int] = None,
|
|
1266
|
+
limit: Optional[int] = None,
|
|
1267
|
+
user_hashes: Optional[List[Optional[str]]] = None,
|
|
1268
|
+
statuses: Optional[List[str]] = None,
|
|
1269
|
+
) -> Dict[str, Any]:
|
|
1244
1270
|
# Make sure to get all jobs - some logic below (e.g. high priority job
|
|
1245
1271
|
# detection) requires a full view of the jobs table.
|
|
1246
1272
|
jobs = managed_job_state.get_managed_jobs()
|
|
@@ -1371,12 +1397,12 @@ def dump_managed_job_queue(
|
|
|
1371
1397
|
else:
|
|
1372
1398
|
job['details'] = None
|
|
1373
1399
|
|
|
1374
|
-
return
|
|
1400
|
+
return {
|
|
1375
1401
|
'jobs': jobs,
|
|
1376
1402
|
'total': total,
|
|
1377
1403
|
'total_no_filter': total_no_filter,
|
|
1378
1404
|
'status_counts': status_counts
|
|
1379
|
-
}
|
|
1405
|
+
}
|
|
1380
1406
|
|
|
1381
1407
|
|
|
1382
1408
|
def filter_jobs(
|
|
@@ -1824,6 +1850,58 @@ def format_job_table(
|
|
|
1824
1850
|
return output
|
|
1825
1851
|
|
|
1826
1852
|
|
|
1853
|
+
def decode_managed_job_protos(
|
|
1854
|
+
job_protos: Iterable['managed_jobsv1_pb2.ManagedJobInfo']
|
|
1855
|
+
) -> List[Dict[str, Any]]:
|
|
1856
|
+
"""Decode job protos to dicts. Similar to load_managed_job_queue."""
|
|
1857
|
+
user_hash_to_user = global_user_state.get_users(
|
|
1858
|
+
set(job.user_hash for job in job_protos if job.user_hash))
|
|
1859
|
+
|
|
1860
|
+
jobs = []
|
|
1861
|
+
for job_proto in job_protos:
|
|
1862
|
+
job_dict = _job_proto_to_dict(job_proto)
|
|
1863
|
+
user_hash = job_dict.get('user_hash', None)
|
|
1864
|
+
if user_hash is not None:
|
|
1865
|
+
# Skip jobs that do not have user_hash info.
|
|
1866
|
+
# TODO(cooperc): Remove check before 0.12.0.
|
|
1867
|
+
user = user_hash_to_user.get(user_hash, None)
|
|
1868
|
+
job_dict['user_name'] = user.name if user is not None else None
|
|
1869
|
+
jobs.append(job_dict)
|
|
1870
|
+
return jobs
|
|
1871
|
+
|
|
1872
|
+
|
|
1873
|
+
def _job_proto_to_dict(
|
|
1874
|
+
job_proto: 'managed_jobsv1_pb2.ManagedJobInfo') -> Dict[str, Any]:
|
|
1875
|
+
job_dict = json_format.MessageToDict(
|
|
1876
|
+
job_proto,
|
|
1877
|
+
always_print_fields_with_no_presence=True,
|
|
1878
|
+
# Our API returns fields in snake_case.
|
|
1879
|
+
preserving_proto_field_name=True,
|
|
1880
|
+
use_integers_for_enums=True)
|
|
1881
|
+
for field in job_proto.DESCRIPTOR.fields:
|
|
1882
|
+
# Ensure optional fields are present with None values for
|
|
1883
|
+
# backwards compatibility with older clients.
|
|
1884
|
+
if field.has_presence and field.name not in job_dict:
|
|
1885
|
+
job_dict[field.name] = None
|
|
1886
|
+
# json_format.MessageToDict is meant for encoding to JSON,
|
|
1887
|
+
# and Protobuf encodes int64 as decimal strings in JSON,
|
|
1888
|
+
# so we need to convert them back to ints.
|
|
1889
|
+
# https://protobuf.dev/programming-guides/json/#field-representation
|
|
1890
|
+
if field.type == descriptor.FieldDescriptor.TYPE_INT64:
|
|
1891
|
+
job_dict[field.name] = int(job_dict[field.name])
|
|
1892
|
+
job_dict['status'] = managed_job_state.ManagedJobStatus.from_protobuf(
|
|
1893
|
+
job_dict['status'])
|
|
1894
|
+
# For backwards compatibility, convert schedule_state to a string,
|
|
1895
|
+
# as we don't have the logic to handle it in our request
|
|
1896
|
+
# encoder/decoder, unlike status.
|
|
1897
|
+
schedule_state_enum = (
|
|
1898
|
+
managed_job_state.ManagedJobScheduleState.from_protobuf(
|
|
1899
|
+
job_dict['schedule_state']))
|
|
1900
|
+
job_dict['schedule_state'] = (schedule_state_enum.value
|
|
1901
|
+
if schedule_state_enum is not None else None)
|
|
1902
|
+
return job_dict
|
|
1903
|
+
|
|
1904
|
+
|
|
1827
1905
|
class ManagedJobCodeGen:
|
|
1828
1906
|
"""Code generator for managed job utility functions.
|
|
1829
1907
|
|
sky/schemas/api/responses.py
CHANGED
|
@@ -7,6 +7,7 @@ import pydantic
|
|
|
7
7
|
|
|
8
8
|
from sky import models
|
|
9
9
|
from sky.server import common
|
|
10
|
+
from sky.skylet import job_lib
|
|
10
11
|
from sky.utils import status_lib
|
|
11
12
|
|
|
12
13
|
|
|
@@ -121,6 +122,23 @@ class StatusResponse(ResponseBaseModel):
|
|
|
121
122
|
cluster_name_on_cloud: Optional[str] = None
|
|
122
123
|
|
|
123
124
|
|
|
125
|
+
class ClusterJobRecord(ResponseBaseModel):
|
|
126
|
+
"""Response for the cluster job queue endpoint."""
|
|
127
|
+
job_id: int
|
|
128
|
+
job_name: str
|
|
129
|
+
username: str
|
|
130
|
+
user_hash: str
|
|
131
|
+
submitted_at: float
|
|
132
|
+
# None if the job has not started yet.
|
|
133
|
+
start_at: Optional[float] = None
|
|
134
|
+
# None if the job has not ended yet.
|
|
135
|
+
end_at: Optional[float] = None
|
|
136
|
+
resources: str
|
|
137
|
+
status: job_lib.JobStatus
|
|
138
|
+
log_path: str
|
|
139
|
+
metadata: Dict[str, Any] = {}
|
|
140
|
+
|
|
141
|
+
|
|
124
142
|
class UploadStatus(enum.Enum):
|
|
125
143
|
"""Status of the upload."""
|
|
126
144
|
UPLOADING = 'uploading'
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
3
|
+
# source: sky/schemas/generated/managed_jobsv1.proto
|
|
4
|
+
# Protobuf Python Version: 5.26.1
|
|
5
|
+
"""Generated protocol buffer code."""
|
|
6
|
+
from google.protobuf import descriptor as _descriptor
|
|
7
|
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
|
8
|
+
from google.protobuf import symbol_database as _symbol_database
|
|
9
|
+
from google.protobuf.internal import builder as _builder
|
|
10
|
+
# @@protoc_insertion_point(imports)
|
|
11
|
+
|
|
12
|
+
_sym_db = _symbol_database.Default()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n*sky/schemas/generated/managed_jobsv1.proto\x12\x0fmanaged_jobs.v1\"\x15\n\x06JobIds\x12\x0b\n\x03ids\x18\x01 \x03(\x03\"\x1c\n\nUserHashes\x12\x0e\n\x06hashes\x18\x01 \x03(\t\"\x1c\n\x08Statuses\x12\x10\n\x08statuses\x18\x01 \x03(\t\"\x13\n\x11GetVersionRequest\"0\n\x12GetVersionResponse\x12\x1a\n\x12\x63ontroller_version\x18\x01 \x01(\t\"\xec\x03\n\x12GetJobTableRequest\x12\x15\n\rskip_finished\x18\x01 \x01(\x08\x12\x1d\n\x15\x61\x63\x63\x65ssible_workspaces\x18\x02 \x03(\t\x12-\n\x07job_ids\x18\x03 \x01(\x0b\x32\x17.managed_jobs.v1.JobIdsH\x00\x88\x01\x01\x12\x1c\n\x0fworkspace_match\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nname_match\x18\x05 \x01(\tH\x02\x88\x01\x01\x12\x17\n\npool_match\x18\x06 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04page\x18\x07 \x01(\x05H\x04\x88\x01\x01\x12\x12\n\x05limit\x18\x08 \x01(\x05H\x05\x88\x01\x01\x12\x35\n\x0buser_hashes\x18\t \x01(\x0b\x32\x1b.managed_jobs.v1.UserHashesH\x06\x88\x01\x01\x12\x30\n\x08statuses\x18\n \x01(\x0b\x32\x19.managed_jobs.v1.StatusesH\x07\x88\x01\x01\x12#\n\x1bshow_jobs_without_user_hash\x18\x0b \x01(\x08\x42\n\n\x08_job_idsB\x12\n\x10_workspace_matchB\r\n\x0b_name_matchB\r\n\x0b_pool_matchB\x07\n\x05_pageB\x08\n\x06_limitB\x0e\n\x0c_user_hashesB\x0b\n\t_statuses\"\xa9\x08\n\x0eManagedJobInfo\x12\x0e\n\x06job_id\x18\x01 \x01(\x03\x12\x0f\n\x07task_id\x18\x02 \x01(\x03\x12\x10\n\x08job_name\x18\x03 \x01(\t\x12\x11\n\ttask_name\x18\x04 \x01(\t\x12\x14\n\x0cjob_duration\x18\x05 \x01(\x01\x12\x16\n\tworkspace\x18\x06 \x01(\tH\x00\x88\x01\x01\x12\x31\n\x06status\x18\x07 \x01(\x0e\x32!.managed_jobs.v1.ManagedJobStatus\x12@\n\x0eschedule_state\x18\x08 \x01(\x0e\x32(.managed_jobs.v1.ManagedJobScheduleState\x12\x11\n\tresources\x18\t \x01(\t\x12\x19\n\x11\x63luster_resources\x18\n \x01(\t\x12\x1e\n\x16\x63luster_resources_full\x18\x0b \x01(\t\x12\r\n\x05\x63loud\x18\x0c \x01(\t\x12\x0e\n\x06region\x18\r \x01(\t\x12\r\n\x05infra\x18\x0e \x01(\t\x12G\n\x0c\x61\x63\x63\x65lerators\x18\x0f \x03(\x0b\x32\x31.managed_jobs.v1.ManagedJobInfo.AcceleratorsEntry\x12\x16\n\x0erecovery_count\x18\x10 \x01(\x05\x12\x14\n\x07\x64\x65tails\x18\x11 \x01(\tH\x01\x88\x01\x01\x12\x1b\n\x0e\x66\x61ilure_reason\x18\x12 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tuser_name\x18\x13 \x01(\tH\x03\x88\x01\x01\x12\x16\n\tuser_hash\x18\x14 \x01(\tH\x04\x88\x01\x01\x12\x19\n\x0csubmitted_at\x18\x15 \x01(\x01H\x05\x88\x01\x01\x12\x15\n\x08start_at\x18\x16 \x01(\x01H\x06\x88\x01\x01\x12\x13\n\x06\x65nd_at\x18\x17 \x01(\x01H\x07\x88\x01\x01\x12\x16\n\tuser_yaml\x18\x18 \x01(\tH\x08\x88\x01\x01\x12\x17\n\nentrypoint\x18\x19 \x01(\tH\t\x88\x01\x01\x12?\n\x08metadata\x18\x1a \x03(\x0b\x32-.managed_jobs.v1.ManagedJobInfo.MetadataEntry\x12\x11\n\x04pool\x18\x1b \x01(\tH\n\x88\x01\x01\x12\x16\n\tpool_hash\x18\x1c \x01(\tH\x0b\x88\x01\x01\x1a\x33\n\x11\x41\x63\x63\x65leratorsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x0c\n\n_workspaceB\n\n\x08_detailsB\x11\n\x0f_failure_reasonB\x0c\n\n_user_nameB\x0c\n\n_user_hashB\x0f\n\r_submitted_atB\x0b\n\t_start_atB\t\n\x07_end_atB\x0c\n\n_user_yamlB\r\n\x0b_entrypointB\x07\n\x05_poolB\x0c\n\n_pool_hash\"\xf0\x01\n\x13GetJobTableResponse\x12-\n\x04jobs\x18\x01 \x03(\x0b\x32\x1f.managed_jobs.v1.ManagedJobInfo\x12\r\n\x05total\x18\x02 \x01(\x05\x12\x17\n\x0ftotal_no_filter\x18\x03 \x01(\x05\x12M\n\rstatus_counts\x18\x04 \x03(\x0b\x32\x36.managed_jobs.v1.GetJobTableResponse.StatusCountsEntry\x1a\x33\n\x11StatusCountsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"?\n\x19GetAllJobIdsByNameRequest\x12\x15\n\x08job_name\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\x0b\n\t_job_name\"-\n\x1aGetAllJobIdsByNameResponse\x12\x0f\n\x07job_ids\x18\x01 \x03(\x03\"\xd7\x01\n\x11\x43\x61ncelJobsRequest\x12\x19\n\x11\x63urrent_workspace\x18\x01 \x01(\t\x12\x16\n\tuser_hash\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\tall_users\x18\x03 \x01(\x08H\x00\x12*\n\x07job_ids\x18\x04 \x01(\x0b\x32\x17.managed_jobs.v1.JobIdsH\x00\x12\x12\n\x08job_name\x18\x05 \x01(\tH\x00\x12\x13\n\tpool_name\x18\x06 \x01(\tH\x00\x42\x17\n\x15\x63\x61ncellation_criteriaB\x0c\n\n_user_hash\"%\n\x12\x43\x61ncelJobsResponse\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x97\x01\n\x11StreamLogsRequest\x12\x15\n\x08job_name\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06job_id\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x0e\n\x06\x66ollow\x18\x03 \x01(\x08\x12\x12\n\ncontroller\x18\x04 \x01(\x08\x12\x11\n\x04tail\x18\x05 \x01(\x05H\x02\x88\x01\x01\x42\x0b\n\t_job_nameB\t\n\x07_job_idB\x07\n\x05_tail\"L\n\x12StreamLogsResponse\x12\x10\n\x08log_line\x18\x01 \x01(\t\x12\x16\n\texit_code\x18\x02 \x01(\x05H\x00\x88\x01\x01\x42\x0c\n\n_exit_code*\x85\x04\n\x10ManagedJobStatus\x12\"\n\x1eMANAGED_JOB_STATUS_UNSPECIFIED\x10\x00\x12\x1e\n\x1aMANAGED_JOB_STATUS_PENDING\x10\x01\x12 \n\x1cMANAGED_JOB_STATUS_SUBMITTED\x10\x02\x12\x1f\n\x1bMANAGED_JOB_STATUS_STARTING\x10\x03\x12\x1e\n\x1aMANAGED_JOB_STATUS_RUNNING\x10\x04\x12!\n\x1dMANAGED_JOB_STATUS_RECOVERING\x10\x05\x12!\n\x1dMANAGED_JOB_STATUS_CANCELLING\x10\x06\x12 \n\x1cMANAGED_JOB_STATUS_SUCCEEDED\x10\x07\x12 \n\x1cMANAGED_JOB_STATUS_CANCELLED\x10\x08\x12\x1d\n\x19MANAGED_JOB_STATUS_FAILED\x10\t\x12#\n\x1fMANAGED_JOB_STATUS_FAILED_SETUP\x10\n\x12\'\n#MANAGED_JOB_STATUS_FAILED_PRECHECKS\x10\x0b\x12)\n%MANAGED_JOB_STATUS_FAILED_NO_RESOURCE\x10\x0c\x12(\n$MANAGED_JOB_STATUS_FAILED_CONTROLLER\x10\r*\x8f\x03\n\x17ManagedJobScheduleState\x12*\n&MANAGED_JOB_SCHEDULE_STATE_UNSPECIFIED\x10\x00\x12&\n\"MANAGED_JOB_SCHEDULE_STATE_INVALID\x10\x01\x12\'\n#MANAGED_JOB_SCHEDULE_STATE_INACTIVE\x10\x02\x12&\n\"MANAGED_JOB_SCHEDULE_STATE_WAITING\x10\x03\x12,\n(MANAGED_JOB_SCHEDULE_STATE_ALIVE_WAITING\x10\x04\x12(\n$MANAGED_JOB_SCHEDULE_STATE_LAUNCHING\x10\x05\x12,\n(MANAGED_JOB_SCHEDULE_STATE_ALIVE_BACKOFF\x10\x06\x12$\n MANAGED_JOB_SCHEDULE_STATE_ALIVE\x10\x07\x12#\n\x1fMANAGED_JOB_SCHEDULE_STATE_DONE\x10\x08\x32\xe4\x03\n\x12ManagedJobsService\x12U\n\nGetVersion\x12\".managed_jobs.v1.GetVersionRequest\x1a#.managed_jobs.v1.GetVersionResponse\x12X\n\x0bGetJobTable\x12#.managed_jobs.v1.GetJobTableRequest\x1a$.managed_jobs.v1.GetJobTableResponse\x12m\n\x12GetAllJobIdsByName\x12*.managed_jobs.v1.GetAllJobIdsByNameRequest\x1a+.managed_jobs.v1.GetAllJobIdsByNameResponse\x12U\n\nCancelJobs\x12\".managed_jobs.v1.CancelJobsRequest\x1a#.managed_jobs.v1.CancelJobsResponse\x12W\n\nStreamLogs\x12\".managed_jobs.v1.StreamLogsRequest\x1a#.managed_jobs.v1.StreamLogsResponse0\x01\x62\x06proto3')
|
|
18
|
+
|
|
19
|
+
_globals = globals()
|
|
20
|
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
|
21
|
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sky.schemas.generated.managed_jobsv1_pb2', _globals)
|
|
22
|
+
if not _descriptor._USE_C_DESCRIPTORS:
|
|
23
|
+
DESCRIPTOR._loaded_options = None
|
|
24
|
+
_globals['_MANAGEDJOBINFO_ACCELERATORSENTRY']._loaded_options = None
|
|
25
|
+
_globals['_MANAGEDJOBINFO_ACCELERATORSENTRY']._serialized_options = b'8\001'
|
|
26
|
+
_globals['_MANAGEDJOBINFO_METADATAENTRY']._loaded_options = None
|
|
27
|
+
_globals['_MANAGEDJOBINFO_METADATAENTRY']._serialized_options = b'8\001'
|
|
28
|
+
_globals['_GETJOBTABLERESPONSE_STATUSCOUNTSENTRY']._loaded_options = None
|
|
29
|
+
_globals['_GETJOBTABLERESPONSE_STATUSCOUNTSENTRY']._serialized_options = b'8\001'
|
|
30
|
+
_globals['_MANAGEDJOBSTATUS']._serialized_start=2625
|
|
31
|
+
_globals['_MANAGEDJOBSTATUS']._serialized_end=3142
|
|
32
|
+
_globals['_MANAGEDJOBSCHEDULESTATE']._serialized_start=3145
|
|
33
|
+
_globals['_MANAGEDJOBSCHEDULESTATE']._serialized_end=3544
|
|
34
|
+
_globals['_JOBIDS']._serialized_start=63
|
|
35
|
+
_globals['_JOBIDS']._serialized_end=84
|
|
36
|
+
_globals['_USERHASHES']._serialized_start=86
|
|
37
|
+
_globals['_USERHASHES']._serialized_end=114
|
|
38
|
+
_globals['_STATUSES']._serialized_start=116
|
|
39
|
+
_globals['_STATUSES']._serialized_end=144
|
|
40
|
+
_globals['_GETVERSIONREQUEST']._serialized_start=146
|
|
41
|
+
_globals['_GETVERSIONREQUEST']._serialized_end=165
|
|
42
|
+
_globals['_GETVERSIONRESPONSE']._serialized_start=167
|
|
43
|
+
_globals['_GETVERSIONRESPONSE']._serialized_end=215
|
|
44
|
+
_globals['_GETJOBTABLEREQUEST']._serialized_start=218
|
|
45
|
+
_globals['_GETJOBTABLEREQUEST']._serialized_end=710
|
|
46
|
+
_globals['_MANAGEDJOBINFO']._serialized_start=713
|
|
47
|
+
_globals['_MANAGEDJOBINFO']._serialized_end=1778
|
|
48
|
+
_globals['_MANAGEDJOBINFO_ACCELERATORSENTRY']._serialized_start=1512
|
|
49
|
+
_globals['_MANAGEDJOBINFO_ACCELERATORSENTRY']._serialized_end=1563
|
|
50
|
+
_globals['_MANAGEDJOBINFO_METADATAENTRY']._serialized_start=1565
|
|
51
|
+
_globals['_MANAGEDJOBINFO_METADATAENTRY']._serialized_end=1612
|
|
52
|
+
_globals['_GETJOBTABLERESPONSE']._serialized_start=1781
|
|
53
|
+
_globals['_GETJOBTABLERESPONSE']._serialized_end=2021
|
|
54
|
+
_globals['_GETJOBTABLERESPONSE_STATUSCOUNTSENTRY']._serialized_start=1970
|
|
55
|
+
_globals['_GETJOBTABLERESPONSE_STATUSCOUNTSENTRY']._serialized_end=2021
|
|
56
|
+
_globals['_GETALLJOBIDSBYNAMEREQUEST']._serialized_start=2023
|
|
57
|
+
_globals['_GETALLJOBIDSBYNAMEREQUEST']._serialized_end=2086
|
|
58
|
+
_globals['_GETALLJOBIDSBYNAMERESPONSE']._serialized_start=2088
|
|
59
|
+
_globals['_GETALLJOBIDSBYNAMERESPONSE']._serialized_end=2133
|
|
60
|
+
_globals['_CANCELJOBSREQUEST']._serialized_start=2136
|
|
61
|
+
_globals['_CANCELJOBSREQUEST']._serialized_end=2351
|
|
62
|
+
_globals['_CANCELJOBSRESPONSE']._serialized_start=2353
|
|
63
|
+
_globals['_CANCELJOBSRESPONSE']._serialized_end=2390
|
|
64
|
+
_globals['_STREAMLOGSREQUEST']._serialized_start=2393
|
|
65
|
+
_globals['_STREAMLOGSREQUEST']._serialized_end=2544
|
|
66
|
+
_globals['_STREAMLOGSRESPONSE']._serialized_start=2546
|
|
67
|
+
_globals['_STREAMLOGSRESPONSE']._serialized_end=2622
|
|
68
|
+
_globals['_MANAGEDJOBSSERVICE']._serialized_start=3547
|
|
69
|
+
_globals['_MANAGEDJOBSSERVICE']._serialized_end=4031
|
|
70
|
+
# @@protoc_insertion_point(module_scope)
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
from google.protobuf.internal import containers as _containers
|
|
2
|
+
from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper
|
|
3
|
+
from google.protobuf import descriptor as _descriptor
|
|
4
|
+
from google.protobuf import message as _message
|
|
5
|
+
from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union
|
|
6
|
+
|
|
7
|
+
DESCRIPTOR: _descriptor.FileDescriptor
|
|
8
|
+
|
|
9
|
+
class ManagedJobStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
|
10
|
+
__slots__ = ()
|
|
11
|
+
MANAGED_JOB_STATUS_UNSPECIFIED: _ClassVar[ManagedJobStatus]
|
|
12
|
+
MANAGED_JOB_STATUS_PENDING: _ClassVar[ManagedJobStatus]
|
|
13
|
+
MANAGED_JOB_STATUS_SUBMITTED: _ClassVar[ManagedJobStatus]
|
|
14
|
+
MANAGED_JOB_STATUS_STARTING: _ClassVar[ManagedJobStatus]
|
|
15
|
+
MANAGED_JOB_STATUS_RUNNING: _ClassVar[ManagedJobStatus]
|
|
16
|
+
MANAGED_JOB_STATUS_RECOVERING: _ClassVar[ManagedJobStatus]
|
|
17
|
+
MANAGED_JOB_STATUS_CANCELLING: _ClassVar[ManagedJobStatus]
|
|
18
|
+
MANAGED_JOB_STATUS_SUCCEEDED: _ClassVar[ManagedJobStatus]
|
|
19
|
+
MANAGED_JOB_STATUS_CANCELLED: _ClassVar[ManagedJobStatus]
|
|
20
|
+
MANAGED_JOB_STATUS_FAILED: _ClassVar[ManagedJobStatus]
|
|
21
|
+
MANAGED_JOB_STATUS_FAILED_SETUP: _ClassVar[ManagedJobStatus]
|
|
22
|
+
MANAGED_JOB_STATUS_FAILED_PRECHECKS: _ClassVar[ManagedJobStatus]
|
|
23
|
+
MANAGED_JOB_STATUS_FAILED_NO_RESOURCE: _ClassVar[ManagedJobStatus]
|
|
24
|
+
MANAGED_JOB_STATUS_FAILED_CONTROLLER: _ClassVar[ManagedJobStatus]
|
|
25
|
+
|
|
26
|
+
class ManagedJobScheduleState(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
|
27
|
+
__slots__ = ()
|
|
28
|
+
MANAGED_JOB_SCHEDULE_STATE_UNSPECIFIED: _ClassVar[ManagedJobScheduleState]
|
|
29
|
+
MANAGED_JOB_SCHEDULE_STATE_INVALID: _ClassVar[ManagedJobScheduleState]
|
|
30
|
+
MANAGED_JOB_SCHEDULE_STATE_INACTIVE: _ClassVar[ManagedJobScheduleState]
|
|
31
|
+
MANAGED_JOB_SCHEDULE_STATE_WAITING: _ClassVar[ManagedJobScheduleState]
|
|
32
|
+
MANAGED_JOB_SCHEDULE_STATE_ALIVE_WAITING: _ClassVar[ManagedJobScheduleState]
|
|
33
|
+
MANAGED_JOB_SCHEDULE_STATE_LAUNCHING: _ClassVar[ManagedJobScheduleState]
|
|
34
|
+
MANAGED_JOB_SCHEDULE_STATE_ALIVE_BACKOFF: _ClassVar[ManagedJobScheduleState]
|
|
35
|
+
MANAGED_JOB_SCHEDULE_STATE_ALIVE: _ClassVar[ManagedJobScheduleState]
|
|
36
|
+
MANAGED_JOB_SCHEDULE_STATE_DONE: _ClassVar[ManagedJobScheduleState]
|
|
37
|
+
MANAGED_JOB_STATUS_UNSPECIFIED: ManagedJobStatus
|
|
38
|
+
MANAGED_JOB_STATUS_PENDING: ManagedJobStatus
|
|
39
|
+
MANAGED_JOB_STATUS_SUBMITTED: ManagedJobStatus
|
|
40
|
+
MANAGED_JOB_STATUS_STARTING: ManagedJobStatus
|
|
41
|
+
MANAGED_JOB_STATUS_RUNNING: ManagedJobStatus
|
|
42
|
+
MANAGED_JOB_STATUS_RECOVERING: ManagedJobStatus
|
|
43
|
+
MANAGED_JOB_STATUS_CANCELLING: ManagedJobStatus
|
|
44
|
+
MANAGED_JOB_STATUS_SUCCEEDED: ManagedJobStatus
|
|
45
|
+
MANAGED_JOB_STATUS_CANCELLED: ManagedJobStatus
|
|
46
|
+
MANAGED_JOB_STATUS_FAILED: ManagedJobStatus
|
|
47
|
+
MANAGED_JOB_STATUS_FAILED_SETUP: ManagedJobStatus
|
|
48
|
+
MANAGED_JOB_STATUS_FAILED_PRECHECKS: ManagedJobStatus
|
|
49
|
+
MANAGED_JOB_STATUS_FAILED_NO_RESOURCE: ManagedJobStatus
|
|
50
|
+
MANAGED_JOB_STATUS_FAILED_CONTROLLER: ManagedJobStatus
|
|
51
|
+
MANAGED_JOB_SCHEDULE_STATE_UNSPECIFIED: ManagedJobScheduleState
|
|
52
|
+
MANAGED_JOB_SCHEDULE_STATE_INVALID: ManagedJobScheduleState
|
|
53
|
+
MANAGED_JOB_SCHEDULE_STATE_INACTIVE: ManagedJobScheduleState
|
|
54
|
+
MANAGED_JOB_SCHEDULE_STATE_WAITING: ManagedJobScheduleState
|
|
55
|
+
MANAGED_JOB_SCHEDULE_STATE_ALIVE_WAITING: ManagedJobScheduleState
|
|
56
|
+
MANAGED_JOB_SCHEDULE_STATE_LAUNCHING: ManagedJobScheduleState
|
|
57
|
+
MANAGED_JOB_SCHEDULE_STATE_ALIVE_BACKOFF: ManagedJobScheduleState
|
|
58
|
+
MANAGED_JOB_SCHEDULE_STATE_ALIVE: ManagedJobScheduleState
|
|
59
|
+
MANAGED_JOB_SCHEDULE_STATE_DONE: ManagedJobScheduleState
|
|
60
|
+
|
|
61
|
+
class JobIds(_message.Message):
|
|
62
|
+
__slots__ = ("ids",)
|
|
63
|
+
IDS_FIELD_NUMBER: _ClassVar[int]
|
|
64
|
+
ids: _containers.RepeatedScalarFieldContainer[int]
|
|
65
|
+
def __init__(self, ids: _Optional[_Iterable[int]] = ...) -> None: ...
|
|
66
|
+
|
|
67
|
+
class UserHashes(_message.Message):
|
|
68
|
+
__slots__ = ("hashes",)
|
|
69
|
+
HASHES_FIELD_NUMBER: _ClassVar[int]
|
|
70
|
+
hashes: _containers.RepeatedScalarFieldContainer[str]
|
|
71
|
+
def __init__(self, hashes: _Optional[_Iterable[str]] = ...) -> None: ...
|
|
72
|
+
|
|
73
|
+
class Statuses(_message.Message):
|
|
74
|
+
__slots__ = ("statuses",)
|
|
75
|
+
STATUSES_FIELD_NUMBER: _ClassVar[int]
|
|
76
|
+
statuses: _containers.RepeatedScalarFieldContainer[str]
|
|
77
|
+
def __init__(self, statuses: _Optional[_Iterable[str]] = ...) -> None: ...
|
|
78
|
+
|
|
79
|
+
class GetVersionRequest(_message.Message):
|
|
80
|
+
__slots__ = ()
|
|
81
|
+
def __init__(self) -> None: ...
|
|
82
|
+
|
|
83
|
+
class GetVersionResponse(_message.Message):
|
|
84
|
+
__slots__ = ("controller_version",)
|
|
85
|
+
CONTROLLER_VERSION_FIELD_NUMBER: _ClassVar[int]
|
|
86
|
+
controller_version: str
|
|
87
|
+
def __init__(self, controller_version: _Optional[str] = ...) -> None: ...
|
|
88
|
+
|
|
89
|
+
class GetJobTableRequest(_message.Message):
|
|
90
|
+
__slots__ = ("skip_finished", "accessible_workspaces", "job_ids", "workspace_match", "name_match", "pool_match", "page", "limit", "user_hashes", "statuses", "show_jobs_without_user_hash")
|
|
91
|
+
SKIP_FINISHED_FIELD_NUMBER: _ClassVar[int]
|
|
92
|
+
ACCESSIBLE_WORKSPACES_FIELD_NUMBER: _ClassVar[int]
|
|
93
|
+
JOB_IDS_FIELD_NUMBER: _ClassVar[int]
|
|
94
|
+
WORKSPACE_MATCH_FIELD_NUMBER: _ClassVar[int]
|
|
95
|
+
NAME_MATCH_FIELD_NUMBER: _ClassVar[int]
|
|
96
|
+
POOL_MATCH_FIELD_NUMBER: _ClassVar[int]
|
|
97
|
+
PAGE_FIELD_NUMBER: _ClassVar[int]
|
|
98
|
+
LIMIT_FIELD_NUMBER: _ClassVar[int]
|
|
99
|
+
USER_HASHES_FIELD_NUMBER: _ClassVar[int]
|
|
100
|
+
STATUSES_FIELD_NUMBER: _ClassVar[int]
|
|
101
|
+
SHOW_JOBS_WITHOUT_USER_HASH_FIELD_NUMBER: _ClassVar[int]
|
|
102
|
+
skip_finished: bool
|
|
103
|
+
accessible_workspaces: _containers.RepeatedScalarFieldContainer[str]
|
|
104
|
+
job_ids: JobIds
|
|
105
|
+
workspace_match: str
|
|
106
|
+
name_match: str
|
|
107
|
+
pool_match: str
|
|
108
|
+
page: int
|
|
109
|
+
limit: int
|
|
110
|
+
user_hashes: UserHashes
|
|
111
|
+
statuses: Statuses
|
|
112
|
+
show_jobs_without_user_hash: bool
|
|
113
|
+
def __init__(self, skip_finished: bool = ..., accessible_workspaces: _Optional[_Iterable[str]] = ..., job_ids: _Optional[_Union[JobIds, _Mapping]] = ..., workspace_match: _Optional[str] = ..., name_match: _Optional[str] = ..., pool_match: _Optional[str] = ..., page: _Optional[int] = ..., limit: _Optional[int] = ..., user_hashes: _Optional[_Union[UserHashes, _Mapping]] = ..., statuses: _Optional[_Union[Statuses, _Mapping]] = ..., show_jobs_without_user_hash: bool = ...) -> None: ...
|
|
114
|
+
|
|
115
|
+
class ManagedJobInfo(_message.Message):
|
|
116
|
+
__slots__ = ("job_id", "task_id", "job_name", "task_name", "job_duration", "workspace", "status", "schedule_state", "resources", "cluster_resources", "cluster_resources_full", "cloud", "region", "infra", "accelerators", "recovery_count", "details", "failure_reason", "user_name", "user_hash", "submitted_at", "start_at", "end_at", "user_yaml", "entrypoint", "metadata", "pool", "pool_hash")
|
|
117
|
+
class AcceleratorsEntry(_message.Message):
|
|
118
|
+
__slots__ = ("key", "value")
|
|
119
|
+
KEY_FIELD_NUMBER: _ClassVar[int]
|
|
120
|
+
VALUE_FIELD_NUMBER: _ClassVar[int]
|
|
121
|
+
key: str
|
|
122
|
+
value: float
|
|
123
|
+
def __init__(self, key: _Optional[str] = ..., value: _Optional[float] = ...) -> None: ...
|
|
124
|
+
class MetadataEntry(_message.Message):
|
|
125
|
+
__slots__ = ("key", "value")
|
|
126
|
+
KEY_FIELD_NUMBER: _ClassVar[int]
|
|
127
|
+
VALUE_FIELD_NUMBER: _ClassVar[int]
|
|
128
|
+
key: str
|
|
129
|
+
value: str
|
|
130
|
+
def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ...
|
|
131
|
+
JOB_ID_FIELD_NUMBER: _ClassVar[int]
|
|
132
|
+
TASK_ID_FIELD_NUMBER: _ClassVar[int]
|
|
133
|
+
JOB_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
134
|
+
TASK_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
135
|
+
JOB_DURATION_FIELD_NUMBER: _ClassVar[int]
|
|
136
|
+
WORKSPACE_FIELD_NUMBER: _ClassVar[int]
|
|
137
|
+
STATUS_FIELD_NUMBER: _ClassVar[int]
|
|
138
|
+
SCHEDULE_STATE_FIELD_NUMBER: _ClassVar[int]
|
|
139
|
+
RESOURCES_FIELD_NUMBER: _ClassVar[int]
|
|
140
|
+
CLUSTER_RESOURCES_FIELD_NUMBER: _ClassVar[int]
|
|
141
|
+
CLUSTER_RESOURCES_FULL_FIELD_NUMBER: _ClassVar[int]
|
|
142
|
+
CLOUD_FIELD_NUMBER: _ClassVar[int]
|
|
143
|
+
REGION_FIELD_NUMBER: _ClassVar[int]
|
|
144
|
+
INFRA_FIELD_NUMBER: _ClassVar[int]
|
|
145
|
+
ACCELERATORS_FIELD_NUMBER: _ClassVar[int]
|
|
146
|
+
RECOVERY_COUNT_FIELD_NUMBER: _ClassVar[int]
|
|
147
|
+
DETAILS_FIELD_NUMBER: _ClassVar[int]
|
|
148
|
+
FAILURE_REASON_FIELD_NUMBER: _ClassVar[int]
|
|
149
|
+
USER_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
150
|
+
USER_HASH_FIELD_NUMBER: _ClassVar[int]
|
|
151
|
+
SUBMITTED_AT_FIELD_NUMBER: _ClassVar[int]
|
|
152
|
+
START_AT_FIELD_NUMBER: _ClassVar[int]
|
|
153
|
+
END_AT_FIELD_NUMBER: _ClassVar[int]
|
|
154
|
+
USER_YAML_FIELD_NUMBER: _ClassVar[int]
|
|
155
|
+
ENTRYPOINT_FIELD_NUMBER: _ClassVar[int]
|
|
156
|
+
METADATA_FIELD_NUMBER: _ClassVar[int]
|
|
157
|
+
POOL_FIELD_NUMBER: _ClassVar[int]
|
|
158
|
+
POOL_HASH_FIELD_NUMBER: _ClassVar[int]
|
|
159
|
+
job_id: int
|
|
160
|
+
task_id: int
|
|
161
|
+
job_name: str
|
|
162
|
+
task_name: str
|
|
163
|
+
job_duration: float
|
|
164
|
+
workspace: str
|
|
165
|
+
status: ManagedJobStatus
|
|
166
|
+
schedule_state: ManagedJobScheduleState
|
|
167
|
+
resources: str
|
|
168
|
+
cluster_resources: str
|
|
169
|
+
cluster_resources_full: str
|
|
170
|
+
cloud: str
|
|
171
|
+
region: str
|
|
172
|
+
infra: str
|
|
173
|
+
accelerators: _containers.ScalarMap[str, float]
|
|
174
|
+
recovery_count: int
|
|
175
|
+
details: str
|
|
176
|
+
failure_reason: str
|
|
177
|
+
user_name: str
|
|
178
|
+
user_hash: str
|
|
179
|
+
submitted_at: float
|
|
180
|
+
start_at: float
|
|
181
|
+
end_at: float
|
|
182
|
+
user_yaml: str
|
|
183
|
+
entrypoint: str
|
|
184
|
+
metadata: _containers.ScalarMap[str, str]
|
|
185
|
+
pool: str
|
|
186
|
+
pool_hash: str
|
|
187
|
+
def __init__(self, job_id: _Optional[int] = ..., task_id: _Optional[int] = ..., job_name: _Optional[str] = ..., task_name: _Optional[str] = ..., job_duration: _Optional[float] = ..., workspace: _Optional[str] = ..., status: _Optional[_Union[ManagedJobStatus, str]] = ..., schedule_state: _Optional[_Union[ManagedJobScheduleState, str]] = ..., resources: _Optional[str] = ..., cluster_resources: _Optional[str] = ..., cluster_resources_full: _Optional[str] = ..., cloud: _Optional[str] = ..., region: _Optional[str] = ..., infra: _Optional[str] = ..., accelerators: _Optional[_Mapping[str, float]] = ..., recovery_count: _Optional[int] = ..., details: _Optional[str] = ..., failure_reason: _Optional[str] = ..., user_name: _Optional[str] = ..., user_hash: _Optional[str] = ..., submitted_at: _Optional[float] = ..., start_at: _Optional[float] = ..., end_at: _Optional[float] = ..., user_yaml: _Optional[str] = ..., entrypoint: _Optional[str] = ..., metadata: _Optional[_Mapping[str, str]] = ..., pool: _Optional[str] = ..., pool_hash: _Optional[str] = ...) -> None: ...
|
|
188
|
+
|
|
189
|
+
class GetJobTableResponse(_message.Message):
|
|
190
|
+
__slots__ = ("jobs", "total", "total_no_filter", "status_counts")
|
|
191
|
+
class StatusCountsEntry(_message.Message):
|
|
192
|
+
__slots__ = ("key", "value")
|
|
193
|
+
KEY_FIELD_NUMBER: _ClassVar[int]
|
|
194
|
+
VALUE_FIELD_NUMBER: _ClassVar[int]
|
|
195
|
+
key: str
|
|
196
|
+
value: int
|
|
197
|
+
def __init__(self, key: _Optional[str] = ..., value: _Optional[int] = ...) -> None: ...
|
|
198
|
+
JOBS_FIELD_NUMBER: _ClassVar[int]
|
|
199
|
+
TOTAL_FIELD_NUMBER: _ClassVar[int]
|
|
200
|
+
TOTAL_NO_FILTER_FIELD_NUMBER: _ClassVar[int]
|
|
201
|
+
STATUS_COUNTS_FIELD_NUMBER: _ClassVar[int]
|
|
202
|
+
jobs: _containers.RepeatedCompositeFieldContainer[ManagedJobInfo]
|
|
203
|
+
total: int
|
|
204
|
+
total_no_filter: int
|
|
205
|
+
status_counts: _containers.ScalarMap[str, int]
|
|
206
|
+
def __init__(self, jobs: _Optional[_Iterable[_Union[ManagedJobInfo, _Mapping]]] = ..., total: _Optional[int] = ..., total_no_filter: _Optional[int] = ..., status_counts: _Optional[_Mapping[str, int]] = ...) -> None: ...
|
|
207
|
+
|
|
208
|
+
class GetAllJobIdsByNameRequest(_message.Message):
|
|
209
|
+
__slots__ = ("job_name",)
|
|
210
|
+
JOB_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
211
|
+
job_name: str
|
|
212
|
+
def __init__(self, job_name: _Optional[str] = ...) -> None: ...
|
|
213
|
+
|
|
214
|
+
class GetAllJobIdsByNameResponse(_message.Message):
|
|
215
|
+
__slots__ = ("job_ids",)
|
|
216
|
+
JOB_IDS_FIELD_NUMBER: _ClassVar[int]
|
|
217
|
+
job_ids: _containers.RepeatedScalarFieldContainer[int]
|
|
218
|
+
def __init__(self, job_ids: _Optional[_Iterable[int]] = ...) -> None: ...
|
|
219
|
+
|
|
220
|
+
class CancelJobsRequest(_message.Message):
|
|
221
|
+
__slots__ = ("current_workspace", "user_hash", "all_users", "job_ids", "job_name", "pool_name")
|
|
222
|
+
CURRENT_WORKSPACE_FIELD_NUMBER: _ClassVar[int]
|
|
223
|
+
USER_HASH_FIELD_NUMBER: _ClassVar[int]
|
|
224
|
+
ALL_USERS_FIELD_NUMBER: _ClassVar[int]
|
|
225
|
+
JOB_IDS_FIELD_NUMBER: _ClassVar[int]
|
|
226
|
+
JOB_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
227
|
+
POOL_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
228
|
+
current_workspace: str
|
|
229
|
+
user_hash: str
|
|
230
|
+
all_users: bool
|
|
231
|
+
job_ids: JobIds
|
|
232
|
+
job_name: str
|
|
233
|
+
pool_name: str
|
|
234
|
+
def __init__(self, current_workspace: _Optional[str] = ..., user_hash: _Optional[str] = ..., all_users: bool = ..., job_ids: _Optional[_Union[JobIds, _Mapping]] = ..., job_name: _Optional[str] = ..., pool_name: _Optional[str] = ...) -> None: ...
|
|
235
|
+
|
|
236
|
+
class CancelJobsResponse(_message.Message):
|
|
237
|
+
__slots__ = ("message",)
|
|
238
|
+
MESSAGE_FIELD_NUMBER: _ClassVar[int]
|
|
239
|
+
message: str
|
|
240
|
+
def __init__(self, message: _Optional[str] = ...) -> None: ...
|
|
241
|
+
|
|
242
|
+
class StreamLogsRequest(_message.Message):
|
|
243
|
+
__slots__ = ("job_name", "job_id", "follow", "controller", "tail")
|
|
244
|
+
JOB_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
245
|
+
JOB_ID_FIELD_NUMBER: _ClassVar[int]
|
|
246
|
+
FOLLOW_FIELD_NUMBER: _ClassVar[int]
|
|
247
|
+
CONTROLLER_FIELD_NUMBER: _ClassVar[int]
|
|
248
|
+
TAIL_FIELD_NUMBER: _ClassVar[int]
|
|
249
|
+
job_name: str
|
|
250
|
+
job_id: int
|
|
251
|
+
follow: bool
|
|
252
|
+
controller: bool
|
|
253
|
+
tail: int
|
|
254
|
+
def __init__(self, job_name: _Optional[str] = ..., job_id: _Optional[int] = ..., follow: bool = ..., controller: bool = ..., tail: _Optional[int] = ...) -> None: ...
|
|
255
|
+
|
|
256
|
+
class StreamLogsResponse(_message.Message):
|
|
257
|
+
__slots__ = ("log_line", "exit_code")
|
|
258
|
+
LOG_LINE_FIELD_NUMBER: _ClassVar[int]
|
|
259
|
+
EXIT_CODE_FIELD_NUMBER: _ClassVar[int]
|
|
260
|
+
log_line: str
|
|
261
|
+
exit_code: int
|
|
262
|
+
def __init__(self, log_line: _Optional[str] = ..., exit_code: _Optional[int] = ...) -> None: ...
|