tilebox-workflows 0.48.0__py3-none-any.whl → 0.49.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tilebox/workflows/cache.py +6 -2
- tilebox/workflows/data.py +10 -1
- tilebox/workflows/jobs/client.py +17 -5
- tilebox/workflows/runner/task_runner.py +7 -1
- tilebox/workflows/task.py +7 -0
- tilebox/workflows/workflows/v1/core_pb2.py +34 -34
- tilebox/workflows/workflows/v1/core_pb2.pyi +8 -2
- tilebox/workflows/workflows/v1/job_pb2.py +36 -34
- tilebox/workflows/workflows/v1/job_pb2.pyi +8 -6
- tilebox/workflows/workflows/v1/task_pb2.py +15 -15
- tilebox/workflows/workflows/v1/task_pb2.pyi +4 -4
- {tilebox_workflows-0.48.0.dist-info → tilebox_workflows-0.49.0b1.dist-info}/METADATA +1 -1
- {tilebox_workflows-0.48.0.dist-info → tilebox_workflows-0.49.0b1.dist-info}/RECORD +14 -14
- {tilebox_workflows-0.48.0.dist-info → tilebox_workflows-0.49.0b1.dist-info}/WHEEL +0 -0
tilebox/workflows/cache.py
CHANGED
|
@@ -10,6 +10,7 @@ import boto3
|
|
|
10
10
|
from botocore.exceptions import ClientError
|
|
11
11
|
from google.cloud.exceptions import NotFound
|
|
12
12
|
from google.cloud.storage import Blob, Bucket
|
|
13
|
+
from obstore.exceptions import GenericError
|
|
13
14
|
from obstore.store import ObjectStore
|
|
14
15
|
|
|
15
16
|
|
|
@@ -100,11 +101,14 @@ class ObstoreCache(JobCache):
|
|
|
100
101
|
try:
|
|
101
102
|
entry = self.store.get(str(self.prefix / key))
|
|
102
103
|
return bytes(entry.bytes())
|
|
103
|
-
except OSError:
|
|
104
|
+
except (OSError, GenericError):
|
|
105
|
+
# GenericError is raised if the key contains separator characters, but one of the parents is a file
|
|
106
|
+
# instead of a directory
|
|
104
107
|
raise KeyError(f"{key} is not cached!") from None
|
|
105
108
|
|
|
106
109
|
def __iter__(self) -> Iterator[str]:
|
|
107
|
-
|
|
110
|
+
prefix = "" if self.prefix == ObjectPath(".") else str(self.prefix)
|
|
111
|
+
for obj in self.store.list_with_delimiter(prefix)["objects"]:
|
|
108
112
|
path: str = obj["path"]
|
|
109
113
|
yield path.removeprefix(str(self.prefix) + "/")
|
|
110
114
|
|
tilebox/workflows/data.py
CHANGED
|
@@ -45,7 +45,8 @@ class TaskState(Enum):
|
|
|
45
45
|
RUNNING = 2
|
|
46
46
|
COMPUTED = 3
|
|
47
47
|
FAILED = 4
|
|
48
|
-
|
|
48
|
+
SKIPPED = 5
|
|
49
|
+
FAILED_OPTIONAL = 6
|
|
49
50
|
|
|
50
51
|
|
|
51
52
|
_TASK_STATES = {state.value: state for state in TaskState}
|
|
@@ -349,6 +350,7 @@ class TaskSubmissionGroup:
|
|
|
349
350
|
cluster_slug_pointers: list[int] = field(default_factory=list)
|
|
350
351
|
display_pointers: list[int] = field(default_factory=list)
|
|
351
352
|
max_retries_values: list[int] = field(default_factory=list)
|
|
353
|
+
optional_values: list[bool] = field(default_factory=list)
|
|
352
354
|
|
|
353
355
|
@classmethod
|
|
354
356
|
def from_message(cls, group: core_pb2.TaskSubmissionGroup) -> "TaskSubmissionGroup":
|
|
@@ -360,6 +362,7 @@ class TaskSubmissionGroup:
|
|
|
360
362
|
cluster_slug_pointers=list(group.cluster_slug_pointers),
|
|
361
363
|
display_pointers=list(group.display_pointers),
|
|
362
364
|
max_retries_values=list(group.max_retries_values),
|
|
365
|
+
optional_values=list(group.optional_values),
|
|
363
366
|
)
|
|
364
367
|
|
|
365
368
|
def to_message(self) -> core_pb2.TaskSubmissionGroup:
|
|
@@ -371,6 +374,7 @@ class TaskSubmissionGroup:
|
|
|
371
374
|
cluster_slug_pointers=self.cluster_slug_pointers,
|
|
372
375
|
display_pointers=self.display_pointers,
|
|
373
376
|
max_retries_values=self.max_retries_values,
|
|
377
|
+
optional_values=self.optional_values,
|
|
374
378
|
)
|
|
375
379
|
|
|
376
380
|
|
|
@@ -735,6 +739,7 @@ class QueryFilters:
|
|
|
735
739
|
automation_ids: list[UUID]
|
|
736
740
|
job_states: list[JobState]
|
|
737
741
|
name: str | None
|
|
742
|
+
task_states: list[TaskState]
|
|
738
743
|
|
|
739
744
|
@classmethod
|
|
740
745
|
def from_message(cls, filters: job_pb2.QueryFilters) -> "QueryFilters":
|
|
@@ -746,6 +751,7 @@ class QueryFilters:
|
|
|
746
751
|
automation_ids=[uuid_message_to_uuid(uuid) for uuid in filters.automation_ids],
|
|
747
752
|
job_states=[_JOB_STATES[state] for state in filters.states],
|
|
748
753
|
name=filters.name or None,
|
|
754
|
+
task_states=[_TASK_STATES[state] for state in filters.task_states],
|
|
749
755
|
)
|
|
750
756
|
|
|
751
757
|
def to_message(self) -> job_pb2.QueryFilters:
|
|
@@ -757,4 +763,7 @@ class QueryFilters:
|
|
|
757
763
|
else None,
|
|
758
764
|
states=[cast(core_pb2.JobState, state.value) for state in self.job_states] if self.job_states else None,
|
|
759
765
|
name=self.name or None, # empty string becomes None
|
|
766
|
+
task_states=[cast(core_pb2.TaskState, state.value) for state in self.task_states]
|
|
767
|
+
if self.task_states
|
|
768
|
+
else None,
|
|
760
769
|
)
|
tilebox/workflows/jobs/client.py
CHANGED
|
@@ -12,6 +12,7 @@ from tilebox.workflows.data import (
|
|
|
12
12
|
JobState,
|
|
13
13
|
QueryFilters,
|
|
14
14
|
QueryJobsResponse,
|
|
15
|
+
TaskState,
|
|
15
16
|
)
|
|
16
17
|
from tilebox.workflows.jobs.service import JobService
|
|
17
18
|
from tilebox.workflows.observability.tracing import WorkflowTracer, get_trace_parent_of_current_span
|
|
@@ -35,10 +36,10 @@ JobIDLike: TypeAlias = Job | UUID | str
|
|
|
35
36
|
class JobClient:
|
|
36
37
|
def __init__(self, service: JobService, tracer: WorkflowTracer | None = None) -> None:
|
|
37
38
|
"""Create a new job client.
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
39
|
+
z
|
|
40
|
+
Args:
|
|
41
|
+
service: The service to use for job operations.
|
|
42
|
+
tracer: The tracer to use for tracing.
|
|
42
43
|
"""
|
|
43
44
|
self._service = service
|
|
44
45
|
self._tracer = tracer or WorkflowTracer()
|
|
@@ -77,7 +78,9 @@ class JobClient:
|
|
|
77
78
|
f"or exactly one cluster per task. But got {len(tasks)} tasks and {len(slugs)} clusters."
|
|
78
79
|
)
|
|
79
80
|
|
|
80
|
-
task_submissions = [
|
|
81
|
+
task_submissions = [
|
|
82
|
+
FutureTask(i, task, [], slugs[i], max_retries, optional=False) for i, task in enumerate(tasks)
|
|
83
|
+
]
|
|
81
84
|
submissions_merged = merge_future_tasks_to_submissions(task_submissions, default_cluster)
|
|
82
85
|
if submissions_merged is None:
|
|
83
86
|
raise ValueError("At least one task must be submitted.")
|
|
@@ -163,6 +166,7 @@ class JobClient:
|
|
|
163
166
|
automation_ids: UUID | list[UUID] | None = None,
|
|
164
167
|
job_states: JobState | list[JobState] | None = None,
|
|
165
168
|
name: str | None = None,
|
|
169
|
+
task_states: TaskState | list[TaskState] | None = None,
|
|
166
170
|
) -> list[Job]:
|
|
167
171
|
"""List jobs in the given temporal extent.
|
|
168
172
|
|
|
@@ -185,6 +189,9 @@ class JobClient:
|
|
|
185
189
|
selected states are returned.
|
|
186
190
|
name: A name to filter jobs by. If specified, only jobs with a matching name are returned. The match is
|
|
187
191
|
case-insensitive and uses a fuzzy matching scheme.
|
|
192
|
+
task_states: A task state or list of task states to filter jobs by. If specified, only jobs that have at
|
|
193
|
+
least one task in any of the selected states are returned.
|
|
194
|
+
|
|
188
195
|
Returns:
|
|
189
196
|
A list of jobs matching the given filters.
|
|
190
197
|
"""
|
|
@@ -227,12 +234,17 @@ class JobClient:
|
|
|
227
234
|
if not isinstance(job_states, list):
|
|
228
235
|
job_states = [job_states]
|
|
229
236
|
|
|
237
|
+
task_states = task_states or []
|
|
238
|
+
if not isinstance(task_states, list):
|
|
239
|
+
task_states = [task_states]
|
|
240
|
+
|
|
230
241
|
filters = QueryFilters(
|
|
231
242
|
time_interval=time_interval,
|
|
232
243
|
id_interval=id_interval,
|
|
233
244
|
automation_ids=automation_ids,
|
|
234
245
|
job_states=job_states,
|
|
235
246
|
name=name,
|
|
247
|
+
task_states=task_states,
|
|
236
248
|
)
|
|
237
249
|
|
|
238
250
|
def request(page: PaginationProtocol) -> QueryJobsResponse:
|
|
@@ -534,6 +534,7 @@ class ExecutionContext(ExecutionContextBase):
|
|
|
534
534
|
depends_on: FutureTask | list[FutureTask] | None = None,
|
|
535
535
|
cluster: str | None = None,
|
|
536
536
|
max_retries: int = 0,
|
|
537
|
+
optional: bool = False,
|
|
537
538
|
) -> FutureTask:
|
|
538
539
|
dependencies: list[int] = []
|
|
539
540
|
|
|
@@ -557,6 +558,7 @@ class ExecutionContext(ExecutionContextBase):
|
|
|
557
558
|
depends_on=dependencies,
|
|
558
559
|
cluster=cluster,
|
|
559
560
|
max_retries=max_retries,
|
|
561
|
+
optional=optional,
|
|
560
562
|
)
|
|
561
563
|
self._sub_tasks.append(subtask)
|
|
562
564
|
return subtask
|
|
@@ -567,9 +569,13 @@ class ExecutionContext(ExecutionContextBase):
|
|
|
567
569
|
depends_on: FutureTask | list[FutureTask] | None = None,
|
|
568
570
|
cluster: str | None = None,
|
|
569
571
|
max_retries: int = 0,
|
|
572
|
+
optional: bool = False,
|
|
570
573
|
) -> list[FutureTask]:
|
|
571
574
|
return [
|
|
572
|
-
self.submit_subtask(
|
|
575
|
+
self.submit_subtask(
|
|
576
|
+
task, cluster=cluster, max_retries=max_retries, depends_on=depends_on, optional=optional
|
|
577
|
+
)
|
|
578
|
+
for task in tasks
|
|
573
579
|
]
|
|
574
580
|
|
|
575
581
|
def submit_batch(
|
tilebox/workflows/task.py
CHANGED
|
@@ -227,6 +227,7 @@ class FutureTask:
|
|
|
227
227
|
depends_on: list[int]
|
|
228
228
|
cluster: str | None
|
|
229
229
|
max_retries: int
|
|
230
|
+
optional: bool
|
|
230
231
|
|
|
231
232
|
def identifier(self) -> TaskIdentifier:
|
|
232
233
|
return _task_meta(self.task).identifier
|
|
@@ -313,6 +314,7 @@ def merge_future_tasks_to_submissions(future_tasks: list[FutureTask], fallback_c
|
|
|
313
314
|
group.cluster_slug_pointers.append(cluster_slugs.append_if_unique(task.cluster or fallback_cluster))
|
|
314
315
|
group.display_pointers.append(displays.append_if_unique(task.display()))
|
|
315
316
|
group.max_retries_values.append(task.max_retries)
|
|
317
|
+
group.optional_values.append(task.optional)
|
|
316
318
|
|
|
317
319
|
return TaskSubmissions(
|
|
318
320
|
task_groups=groups,
|
|
@@ -361,6 +363,7 @@ class ExecutionContext(ABC):
|
|
|
361
363
|
depends_on: FutureTask | list[FutureTask] | None = None,
|
|
362
364
|
cluster: str | None = None,
|
|
363
365
|
max_retries: int = 0,
|
|
366
|
+
optional: bool = False,
|
|
364
367
|
) -> FutureTask:
|
|
365
368
|
"""Submit a subtask of the current task.
|
|
366
369
|
|
|
@@ -371,6 +374,9 @@ class ExecutionContext(ABC):
|
|
|
371
374
|
cluster: Slug of the cluster to submit the subtask to. Defaults to None, which means the same cluster as the
|
|
372
375
|
task runner will be used.
|
|
373
376
|
max_retries: The maximum number of retries for the subtask in case of failure. Defaults to 0.
|
|
377
|
+
optional: Whether the subtask is optional. If True, the subtask will not fail the job if it fails. Also
|
|
378
|
+
tasks that depend on this task will still execute after this task even if this task failed. Defaults
|
|
379
|
+
to False.
|
|
374
380
|
|
|
375
381
|
Returns:
|
|
376
382
|
Submitted subtask.
|
|
@@ -383,6 +389,7 @@ class ExecutionContext(ABC):
|
|
|
383
389
|
depends_on: FutureTask | list[FutureTask] | None = None,
|
|
384
390
|
cluster: str | None = None,
|
|
385
391
|
max_retries: int = 0,
|
|
392
|
+
optional: bool = False,
|
|
386
393
|
) -> list[FutureTask]:
|
|
387
394
|
"""Submit a batch of subtasks of the current task. Similar to `submit_subtask`, but for multiple tasks."""
|
|
388
395
|
|
|
@@ -28,7 +28,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__
|
|
|
28
28
|
from tilebox.datasets.tilebox.v1 import id_pb2 as tilebox_dot_v1_dot_id__pb2
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17workflows/v1/core.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13tilebox/v1/id.proto\"
|
|
31
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17workflows/v1/core.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13tilebox/v1/id.proto\"d\n\x07\x43luster\x12\x12\n\x04slug\x18\x02 \x01(\tR\x04slug\x12!\n\x0c\x64isplay_name\x18\x03 \x01(\tR\x0b\x64isplayName\x12\x1c\n\tdeletable\x18\x04 \x01(\x08R\tdeletableJ\x04\x08\x01\x10\x02\"\xe5\x04\n\x03Job\x12\x1e\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12!\n\x0ctrace_parent\x18\x03 \x01(\tR\x0btraceParent\x12\x1e\n\x08\x63\x61nceled\x18\x05 \x01(\x08\x42\x02\x18\x01R\x08\x63\x61nceled\x12\x43\n\x0clegacy_state\x18\x06 \x01(\x0e\x32\x1c.workflows.v1.LegacyJobStateB\x02\x18\x01R\x0blegacyState\x12=\n\x0csubmitted_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x0bsubmittedAt\x12=\n\nstarted_at\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x02\x18\x01R\tstartedAt\x12@\n\x0etask_summaries\x18\t \x03(\x0b\x32\x19.workflows.v1.TaskSummaryR\rtaskSummaries\x12\x33\n\rautomation_id\x18\n \x01(\x0b\x32\x0e.tilebox.v1.IDR\x0c\x61utomationId\x12\x32\n\x08progress\x18\x0b \x03(\x0b\x32\x16.workflows.v1.ProgressR\x08progress\x12,\n\x05state\x18\x0c \x01(\x0e\x32\x16.workflows.v1.JobStateR\x05state\x12\x45\n\x0f\x65xecution_stats\x18\r \x01(\x0b\x32\x1c.workflows.v1.ExecutionStatsR\x0e\x65xecutionStatsJ\x04\x08\x04\x10\x05\"\xaf\x03\n\x0e\x45xecutionStats\x12M\n\x15\x66irst_task_started_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x12\x66irstTaskStartedAt\x12K\n\x14last_task_stopped_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x11lastTaskStoppedAt\x12<\n\x0c\x63ompute_time\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationR\x0b\x63omputeTime\x12<\n\x0c\x65lapsed_time\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x0b\x65lapsedTime\x12 \n\x0bparallelism\x18\x05 \x01(\x01R\x0bparallelism\x12\x1f\n\x0btotal_tasks\x18\x06 \x01(\x04R\ntotalTasks\x12\x42\n\x0etasks_by_state\x18\x07 \x03(\x0b\x32\x1c.workflows.v1.TaskStateCountR\x0ctasksByState\"U\n\x0eTaskStateCount\x12-\n\x05state\x18\x01 \x01(\x0e\x32\x17.workflows.v1.TaskStateR\x05state\x12\x14\n\x05\x63ount\x18\x02 \x01(\x04R\x05\x63ount\"\x9f\x02\n\x0bTaskSummary\x12\x1e\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x02id\x12\x18\n\x07\x64isplay\x18\x02 \x01(\tR\x07\x64isplay\x12-\n\x05state\x18\x03 \x01(\x0e\x32\x17.workflows.v1.TaskStateR\x05state\x12+\n\tparent_id\x18\x04 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x08parentId\x12\x39\n\nstarted_at\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartedAt\x12\x39\n\nstopped_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstoppedAtJ\x04\x08\x05\x10\x06\"S\n\x08Progress\x12\x1d\n\x05label\x18\x01 \x01(\tB\x07\xbaH\x04r\x02\x18\x64R\x05label\x12\x14\n\x05total\x18\x02 \x01(\x04R\x05total\x12\x12\n\x04\x64one\x18\x03 \x01(\x04R\x04\x64one\"\xa2\x03\n\x04Task\x12\x1e\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x02id\x12<\n\nidentifier\x18\x02 \x01(\x0b\x32\x1c.workflows.v1.TaskIdentifierR\nidentifier\x12-\n\x05state\x18\x03 \x01(\x0e\x32\x17.workflows.v1.TaskStateR\x05state\x12\x1b\n\x05input\x18\x04 \x01(\x0c\x42\x05\xaa\x01\x02\x08\x01R\x05input\x12\x1f\n\x07\x64isplay\x18\x05 \x01(\tB\x05\xaa\x01\x02\x08\x01R\x07\x64isplay\x12#\n\x03job\x18\x06 \x01(\x0b\x32\x11.workflows.v1.JobR\x03job\x12+\n\tparent_id\x18\x07 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x08parentId\x12-\n\ndepends_on\x18\x08 \x03(\x0b\x32\x0e.tilebox.v1.IDR\tdependsOn\x12-\n\x05lease\x18\t \x01(\x0b\x32\x17.workflows.v1.TaskLeaseR\x05lease\x12\x1f\n\x0bretry_count\x18\n \x01(\x03R\nretryCount\"d\n\x0eTaskIdentifier\x12\x1e\n\x04name\x18\x01 \x01(\tB\n\xbaH\x07r\x05 \x01(\x80\x02R\x04name\x12\x32\n\x07version\x18\x02 \x01(\tB\x18\xbaH\x15r\x13 \x01\x32\x0f^v(\\d+)\\.(\\d+)$R\x07version\"1\n\x05Tasks\x12(\n\x05tasks\x18\x01 \x03(\x0b\x32\x12.workflows.v1.TaskR\x05tasks\"\x98\x02\n\x14SingleTaskSubmission\x12!\n\x0c\x63luster_slug\x18\x01 \x01(\tR\x0b\x63lusterSlug\x12<\n\nidentifier\x18\x02 \x01(\x0b\x32\x1c.workflows.v1.TaskIdentifierR\nidentifier\x12!\n\x07\x64isplay\x18\x04 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x07\x64isplay\x12\x32\n\x0c\x64\x65pendencies\x18\x05 \x03(\x03\x42\x0e\xbaH\x0b\x92\x01\x08\"\x06\"\x04\x18?(\x00R\x0c\x64\x65pendencies\x12(\n\x0bmax_retries\x18\x06 \x01(\x03\x42\x07\xbaH\x04\"\x02(\x00R\nmaxRetries\x12\x1e\n\x05input\x18\x03 \x01(\x0c\x42\x08\xbaH\x05z\x03\x18\x80\x10R\x05input\"\x91\x02\n\x0fTaskSubmissions\x12N\n\x0btask_groups\x18\x01 \x03(\x0b\x32!.workflows.v1.TaskSubmissionGroupB\n\xbaH\x07\x92\x01\x04\x08\x01\x10@R\ntaskGroups\x12.\n\x13\x63luster_slug_lookup\x18\x02 \x03(\tR\x11\x63lusterSlugLookup\x12I\n\x11identifier_lookup\x18\x03 \x03(\x0b\x32\x1c.workflows.v1.TaskIdentifierR\x10identifierLookup\x12\x33\n\x0e\x64isplay_lookup\x18\x04 \x03(\tB\x0c\xbaH\t\x92\x01\x06\"\x04r\x02\x10\x01R\rdisplayLookup\"\xd2\t\n\x13TaskSubmissionGroup\x12?\n\x1c\x64\x65pendencies_on_other_groups\x18\x01 \x03(\rR\x19\x64\x65pendenciesOnOtherGroups\x12\'\n\x06inputs\x18\x02 \x03(\x0c\x42\x0f\xbaH\x0c\x92\x01\t\x08\x01\"\x05z\x03\x18\x80\x10R\x06inputs\x12/\n\x13identifier_pointers\x18\x03 \x03(\x04R\x12identifierPointers\x12\x32\n\x15\x63luster_slug_pointers\x18\x04 \x03(\x04R\x13\x63lusterSlugPointers\x12)\n\x10\x64isplay_pointers\x18\x05 \x03(\x04R\x0f\x64isplayPointers\x12,\n\x12max_retries_values\x18\x06 \x03(\x03R\x10maxRetriesValues\x12\'\n\x0foptional_values\x18\x07 \x03(\x08R\x0eoptionalValues:\xe9\x06\xbaH\xe5\x06\x1a\xa6\x01\n,task_submission_group.identifiers_size_match\x12?The number of inputs must match the number of task identifiers.\x1a\x35this.inputs.size() == this.identifier_pointers.size()\x1a\xa7\x01\n.task_submission_group.cluster_slugs_size_match\x12<The number of cluster slugs must match the number of inputs.\x1a\x37this.inputs.size() == this.cluster_slug_pointers.size()\x1a\xa0\x01\n)task_submission_group.displays_size_match\x12?The number of display pointers must match the number of inputs.\x1a\x32this.inputs.size() == this.display_pointers.size()\x1a\xa7\x01\n,task_submission_group.max_retries_size_match\x12\x41The number of max_retries_values must match the number of inputs.\x1a\x34this.inputs.size() == this.max_retries_values.size()\x1a\xc2\x01\n)task_submission_group.optional_size_match\x12>The number of optional values must match the number of inputs.\x1aUthis.optional_values.size() == 0 || this.inputs.size() == this.optional_values.size()\"\xa9\x01\n\tTaskLease\x12/\n\x05lease\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationR\x05lease\x12k\n%recommended_wait_until_next_extension\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationR!recommendedWaitUntilNextExtension*\x8d\x01\n\x0eLegacyJobState\x12 \n\x1cLEGACY_JOB_STATE_UNSPECIFIED\x10\x00\x12\x1b\n\x17LEGACY_JOB_STATE_QUEUED\x10\x01\x12\x1c\n\x18LEGACY_JOB_STATE_STARTED\x10\x02\x12\x1e\n\x1aLEGACY_JOB_STATE_COMPLETED\x10\x03*\xb3\x01\n\x08JobState\x12\x19\n\x15JOB_STATE_UNSPECIFIED\x10\x00\x12\x17\n\x13JOB_STATE_SUBMITTED\x10\x01\x12\x15\n\x11JOB_STATE_RUNNING\x10\x02\x12\x15\n\x11JOB_STATE_STARTED\x10\x03\x12\x17\n\x13JOB_STATE_COMPLETED\x10\x04\x12\x14\n\x10JOB_STATE_FAILED\x10\x05\x12\x16\n\x12JOB_STATE_CANCELED\x10\x06*\xbe\x01\n\tTaskState\x12\x1a\n\x16TASK_STATE_UNSPECIFIED\x10\x00\x12\x15\n\x11TASK_STATE_QUEUED\x10\x01\x12\x16\n\x12TASK_STATE_RUNNING\x10\x02\x12\x17\n\x13TASK_STATE_COMPUTED\x10\x03\x12\x15\n\x11TASK_STATE_FAILED\x10\x04\x12\x16\n\x12TASK_STATE_SKIPPED\x10\x05\x12\x1e\n\x1aTASK_STATE_FAILED_OPTIONAL\x10\x06\x42s\n\x10\x63om.workflows.v1B\tCoreProtoP\x01\xa2\x02\x03WXX\xaa\x02\x0cWorkflows.V1\xca\x02\x0cWorkflows\\V1\xe2\x02\x18Workflows\\V1\\GPBMetadata\xea\x02\rWorkflows::V1\x92\x03\x02\x08\x02\x62\x08\x65\x64itionsp\xe8\x07')
|
|
32
32
|
|
|
33
33
|
_globals = globals()
|
|
34
34
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
|
@@ -65,39 +65,39 @@ if not _descriptor._USE_C_DESCRIPTORS:
|
|
|
65
65
|
_globals['_TASKSUBMISSIONS'].fields_by_name['display_lookup']._loaded_options = None
|
|
66
66
|
_globals['_TASKSUBMISSIONS'].fields_by_name['display_lookup']._serialized_options = b'\272H\t\222\001\006\"\004r\002\020\001'
|
|
67
67
|
_globals['_TASKSUBMISSIONGROUP'].fields_by_name['inputs']._loaded_options = None
|
|
68
|
-
_globals['_TASKSUBMISSIONGROUP'].fields_by_name['inputs']._serialized_options = b'\272H\
|
|
68
|
+
_globals['_TASKSUBMISSIONGROUP'].fields_by_name['inputs']._serialized_options = b'\272H\014\222\001\t\010\001\"\005z\003\030\200\020'
|
|
69
69
|
_globals['_TASKSUBMISSIONGROUP']._loaded_options = None
|
|
70
|
-
_globals['_TASKSUBMISSIONGROUP']._serialized_options = b'\272H\
|
|
71
|
-
_globals['_LEGACYJOBSTATE']._serialized_start=
|
|
72
|
-
_globals['_LEGACYJOBSTATE']._serialized_end=
|
|
73
|
-
_globals['_JOBSTATE']._serialized_start=
|
|
74
|
-
_globals['_JOBSTATE']._serialized_end=
|
|
75
|
-
_globals['_TASKSTATE']._serialized_start=
|
|
76
|
-
_globals['_TASKSTATE']._serialized_end=
|
|
70
|
+
_globals['_TASKSUBMISSIONGROUP']._serialized_options = b'\272H\345\006\032\246\001\n,task_submission_group.identifiers_size_match\022?The number of inputs must match the number of task identifiers.\0325this.inputs.size() == this.identifier_pointers.size()\032\247\001\n.task_submission_group.cluster_slugs_size_match\022<The number of cluster slugs must match the number of inputs.\0327this.inputs.size() == this.cluster_slug_pointers.size()\032\240\001\n)task_submission_group.displays_size_match\022?The number of display pointers must match the number of inputs.\0322this.inputs.size() == this.display_pointers.size()\032\247\001\n,task_submission_group.max_retries_size_match\022AThe number of max_retries_values must match the number of inputs.\0324this.inputs.size() == this.max_retries_values.size()\032\302\001\n)task_submission_group.optional_size_match\022>The number of optional values must match the number of inputs.\032Uthis.optional_values.size() == 0 || this.inputs.size() == this.optional_values.size()'
|
|
71
|
+
_globals['_LEGACYJOBSTATE']._serialized_start=4313
|
|
72
|
+
_globals['_LEGACYJOBSTATE']._serialized_end=4454
|
|
73
|
+
_globals['_JOBSTATE']._serialized_start=4457
|
|
74
|
+
_globals['_JOBSTATE']._serialized_end=4636
|
|
75
|
+
_globals['_TASKSTATE']._serialized_start=4639
|
|
76
|
+
_globals['_TASKSTATE']._serialized_end=4829
|
|
77
77
|
_globals['_CLUSTER']._serialized_start=156
|
|
78
|
-
_globals['_CLUSTER']._serialized_end=
|
|
79
|
-
_globals['_JOB']._serialized_start=
|
|
80
|
-
_globals['_JOB']._serialized_end=
|
|
81
|
-
_globals['_EXECUTIONSTATS']._serialized_start=
|
|
82
|
-
_globals['_EXECUTIONSTATS']._serialized_end=
|
|
83
|
-
_globals['_TASKSTATECOUNT']._serialized_start=
|
|
84
|
-
_globals['_TASKSTATECOUNT']._serialized_end=
|
|
85
|
-
_globals['_TASKSUMMARY']._serialized_start=
|
|
86
|
-
_globals['_TASKSUMMARY']._serialized_end=
|
|
87
|
-
_globals['_PROGRESS']._serialized_start=
|
|
88
|
-
_globals['_PROGRESS']._serialized_end=
|
|
89
|
-
_globals['_TASK']._serialized_start=
|
|
90
|
-
_globals['_TASK']._serialized_end=
|
|
91
|
-
_globals['_TASKIDENTIFIER']._serialized_start=
|
|
92
|
-
_globals['_TASKIDENTIFIER']._serialized_end=
|
|
93
|
-
_globals['_TASKS']._serialized_start=
|
|
94
|
-
_globals['_TASKS']._serialized_end=
|
|
95
|
-
_globals['_SINGLETASKSUBMISSION']._serialized_start=
|
|
96
|
-
_globals['_SINGLETASKSUBMISSION']._serialized_end=
|
|
97
|
-
_globals['_TASKSUBMISSIONS']._serialized_start=
|
|
98
|
-
_globals['_TASKSUBMISSIONS']._serialized_end=
|
|
99
|
-
_globals['_TASKSUBMISSIONGROUP']._serialized_start=
|
|
100
|
-
_globals['_TASKSUBMISSIONGROUP']._serialized_end=
|
|
101
|
-
_globals['_TASKLEASE']._serialized_start=
|
|
102
|
-
_globals['_TASKLEASE']._serialized_end=
|
|
78
|
+
_globals['_CLUSTER']._serialized_end=256
|
|
79
|
+
_globals['_JOB']._serialized_start=259
|
|
80
|
+
_globals['_JOB']._serialized_end=872
|
|
81
|
+
_globals['_EXECUTIONSTATS']._serialized_start=875
|
|
82
|
+
_globals['_EXECUTIONSTATS']._serialized_end=1306
|
|
83
|
+
_globals['_TASKSTATECOUNT']._serialized_start=1308
|
|
84
|
+
_globals['_TASKSTATECOUNT']._serialized_end=1393
|
|
85
|
+
_globals['_TASKSUMMARY']._serialized_start=1396
|
|
86
|
+
_globals['_TASKSUMMARY']._serialized_end=1683
|
|
87
|
+
_globals['_PROGRESS']._serialized_start=1685
|
|
88
|
+
_globals['_PROGRESS']._serialized_end=1768
|
|
89
|
+
_globals['_TASK']._serialized_start=1771
|
|
90
|
+
_globals['_TASK']._serialized_end=2189
|
|
91
|
+
_globals['_TASKIDENTIFIER']._serialized_start=2191
|
|
92
|
+
_globals['_TASKIDENTIFIER']._serialized_end=2291
|
|
93
|
+
_globals['_TASKS']._serialized_start=2293
|
|
94
|
+
_globals['_TASKS']._serialized_end=2342
|
|
95
|
+
_globals['_SINGLETASKSUBMISSION']._serialized_start=2345
|
|
96
|
+
_globals['_SINGLETASKSUBMISSION']._serialized_end=2625
|
|
97
|
+
_globals['_TASKSUBMISSIONS']._serialized_start=2628
|
|
98
|
+
_globals['_TASKSUBMISSIONS']._serialized_end=2901
|
|
99
|
+
_globals['_TASKSUBMISSIONGROUP']._serialized_start=2904
|
|
100
|
+
_globals['_TASKSUBMISSIONGROUP']._serialized_end=4138
|
|
101
|
+
_globals['_TASKLEASE']._serialized_start=4141
|
|
102
|
+
_globals['_TASKLEASE']._serialized_end=4310
|
|
103
103
|
# @@protoc_insertion_point(module_scope)
|
|
@@ -35,6 +35,8 @@ class TaskState(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
|
|
|
35
35
|
TASK_STATE_RUNNING: _ClassVar[TaskState]
|
|
36
36
|
TASK_STATE_COMPUTED: _ClassVar[TaskState]
|
|
37
37
|
TASK_STATE_FAILED: _ClassVar[TaskState]
|
|
38
|
+
TASK_STATE_SKIPPED: _ClassVar[TaskState]
|
|
39
|
+
TASK_STATE_FAILED_OPTIONAL: _ClassVar[TaskState]
|
|
38
40
|
LEGACY_JOB_STATE_UNSPECIFIED: LegacyJobState
|
|
39
41
|
LEGACY_JOB_STATE_QUEUED: LegacyJobState
|
|
40
42
|
LEGACY_JOB_STATE_STARTED: LegacyJobState
|
|
@@ -51,6 +53,8 @@ TASK_STATE_QUEUED: TaskState
|
|
|
51
53
|
TASK_STATE_RUNNING: TaskState
|
|
52
54
|
TASK_STATE_COMPUTED: TaskState
|
|
53
55
|
TASK_STATE_FAILED: TaskState
|
|
56
|
+
TASK_STATE_SKIPPED: TaskState
|
|
57
|
+
TASK_STATE_FAILED_OPTIONAL: TaskState
|
|
54
58
|
|
|
55
59
|
class Cluster(_message.Message):
|
|
56
60
|
__slots__ = ("slug", "display_name", "deletable")
|
|
@@ -209,20 +213,22 @@ class TaskSubmissions(_message.Message):
|
|
|
209
213
|
def __init__(self, task_groups: _Optional[_Iterable[_Union[TaskSubmissionGroup, _Mapping]]] = ..., cluster_slug_lookup: _Optional[_Iterable[str]] = ..., identifier_lookup: _Optional[_Iterable[_Union[TaskIdentifier, _Mapping]]] = ..., display_lookup: _Optional[_Iterable[str]] = ...) -> None: ...
|
|
210
214
|
|
|
211
215
|
class TaskSubmissionGroup(_message.Message):
|
|
212
|
-
__slots__ = ("dependencies_on_other_groups", "inputs", "identifier_pointers", "cluster_slug_pointers", "display_pointers", "max_retries_values")
|
|
216
|
+
__slots__ = ("dependencies_on_other_groups", "inputs", "identifier_pointers", "cluster_slug_pointers", "display_pointers", "max_retries_values", "optional_values")
|
|
213
217
|
DEPENDENCIES_ON_OTHER_GROUPS_FIELD_NUMBER: _ClassVar[int]
|
|
214
218
|
INPUTS_FIELD_NUMBER: _ClassVar[int]
|
|
215
219
|
IDENTIFIER_POINTERS_FIELD_NUMBER: _ClassVar[int]
|
|
216
220
|
CLUSTER_SLUG_POINTERS_FIELD_NUMBER: _ClassVar[int]
|
|
217
221
|
DISPLAY_POINTERS_FIELD_NUMBER: _ClassVar[int]
|
|
218
222
|
MAX_RETRIES_VALUES_FIELD_NUMBER: _ClassVar[int]
|
|
223
|
+
OPTIONAL_VALUES_FIELD_NUMBER: _ClassVar[int]
|
|
219
224
|
dependencies_on_other_groups: _containers.RepeatedScalarFieldContainer[int]
|
|
220
225
|
inputs: _containers.RepeatedScalarFieldContainer[bytes]
|
|
221
226
|
identifier_pointers: _containers.RepeatedScalarFieldContainer[int]
|
|
222
227
|
cluster_slug_pointers: _containers.RepeatedScalarFieldContainer[int]
|
|
223
228
|
display_pointers: _containers.RepeatedScalarFieldContainer[int]
|
|
224
229
|
max_retries_values: _containers.RepeatedScalarFieldContainer[int]
|
|
225
|
-
|
|
230
|
+
optional_values: _containers.RepeatedScalarFieldContainer[bool]
|
|
231
|
+
def __init__(self, dependencies_on_other_groups: _Optional[_Iterable[int]] = ..., inputs: _Optional[_Iterable[bytes]] = ..., identifier_pointers: _Optional[_Iterable[int]] = ..., cluster_slug_pointers: _Optional[_Iterable[int]] = ..., display_pointers: _Optional[_Iterable[int]] = ..., max_retries_values: _Optional[_Iterable[int]] = ..., optional_values: _Optional[_Iterable[bool]] = ...) -> None: ...
|
|
226
232
|
|
|
227
233
|
class TaskLease(_message.Message):
|
|
228
234
|
__slots__ = ("lease", "recommended_wait_until_next_extension")
|
|
@@ -29,7 +29,7 @@ from tilebox.workflows.workflows.v1 import core_pb2 as workflows_dot_v1_dot_core
|
|
|
29
29
|
from tilebox.workflows.workflows.v1 import diagram_pb2 as workflows_dot_v1_dot_diagram__pb2
|
|
30
30
|
|
|
31
31
|
|
|
32
|
-
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16workflows/v1/job.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x13tilebox/v1/id.proto\x1a\x16tilebox/v1/query.proto\x1a\x17workflows/v1/core.proto\x1a\x1aworkflows/v1/diagram.proto\"\
|
|
32
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16workflows/v1/job.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x13tilebox/v1/id.proto\x1a\x16tilebox/v1/query.proto\x1a\x17workflows/v1/core.proto\x1a\x1aworkflows/v1/diagram.proto\"\xce\x03\n\x10SubmitJobRequest\x12\x33\n\x05tasks\x18\x05 \x01(\x0b\x32\x1d.workflows.v1.TaskSubmissionsR\x05tasks\x12\"\n\x08job_name\x18\x02 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x07jobName\x12*\n\x0ctrace_parent\x18\x03 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x0btraceParent\x12\x33\n\rautomation_id\x18\x04 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x0c\x61utomationId\x12R\n\x0clegacy_tasks\x18\x01 \x03(\x0b\x32\".workflows.v1.SingleTaskSubmissionB\x0b\x18\x01\xbaH\x06\x92\x01\x03\x10\xe8\x07R\x0blegacyTasks:\xab\x01\xbaH\xa7\x01\x1a\xa4\x01\n!submit_job_request.tasks_required\x12$At least one task must be submitted.\x1aY(this.tasks != null && this.tasks.task_groups.size() > 0) || this.legacy_tasks.size() > 0\">\n\rGetJobRequest\x12-\n\x06job_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x05jobId\"F\n\x15GetJobProgressRequest\x12-\n\x06job_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x05jobId\"@\n\x0fRetryJobRequest\x12-\n\x06job_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x05jobId\"F\n\x10RetryJobResponse\x12\x32\n\x15num_tasks_rescheduled\x18\x01 \x01(\x03R\x13numTasksRescheduled\"A\n\x10\x43\x61ncelJobRequest\x12-\n\x06job_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x05jobId\"\x13\n\x11\x43\x61ncelJobResponse\"\xec\x01\n\x13VisualizeJobRequest\x12-\n\x06job_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x05jobId\x12\x42\n\x0erender_options\x18\x02 \x01(\x0b\x32\x1b.workflows.v1.RenderOptionsR\rrenderOptions\x12\x38\n\x05theme\x18\x03 \x01(\x0e\x32\".workflows.v1.WorkflowDiagramThemeR\x05theme\x12(\n\x10include_job_name\x18\x04 \x01(\x08R\x0eincludeJobName\"\xe9\x02\n\x0cQueryFilters\x12=\n\rtime_interval\x18\x01 \x01(\x0b\x32\x18.tilebox.v1.TimeIntervalR\x0ctimeInterval\x12\x37\n\x0bid_interval\x18\x02 \x01(\x0b\x32\x16.tilebox.v1.IDIntervalR\nidInterval\x12\x35\n\x0e\x61utomation_ids\x18\x03 \x03(\x0b\x32\x0e.tilebox.v1.IDR\rautomationIds\x12.\n\x06states\x18\x04 \x03(\x0e\x32\x16.workflows.v1.JobStateR\x06states\x12\x1b\n\x04name\x18\x05 \x01(\tB\x07\xbaH\x04r\x02\x18\x64R\x04name\x12\x38\n\x0btask_states\x18\x06 \x03(\x0e\x32\x17.workflows.v1.TaskStateR\ntaskStates:#\xbaH \"\x1e\n\rtime_interval\n\x0bid_interval\x10\x01\"{\n\x10QueryJobsRequest\x12\x34\n\x07\x66ilters\x18\x01 \x01(\x0b\x32\x1a.workflows.v1.QueryFiltersR\x07\x66ilters\x12\x31\n\x04page\x18\x02 \x01(\x0b\x32\x16.tilebox.v1.PaginationB\x05\xaa\x01\x02\x08\x01R\x04page\"v\n\x11QueryJobsResponse\x12%\n\x04jobs\x18\x01 \x03(\x0b\x32\x11.workflows.v1.JobR\x04jobs\x12:\n\tnext_page\x18\x03 \x01(\x0b\x32\x16.tilebox.v1.PaginationB\x05\xaa\x01\x02\x08\x01R\x08nextPage\"G\n\x16GetJobPrototypeRequest\x12-\n\x06job_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x05jobId\"w\n\x17GetJobPrototypeResponse\x12\x41\n\nroot_tasks\x18\x01 \x03(\x0b\x32\".workflows.v1.SingleTaskSubmissionR\trootTasks\x12\x19\n\x08job_name\x18\x02 \x01(\tR\x07jobName\"\xc6\x01\n\x0f\x43loneJobRequest\x12-\n\x06job_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x05jobId\x12`\n\x14root_tasks_overrides\x18\x02 \x03(\x0b\x32\".workflows.v1.SingleTaskSubmissionB\n\xbaH\x07\x92\x01\x04\x08\x01\x10@R\x12rootTasksOverrides\x12\"\n\x08job_name\x18\x03 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x07jobName*\xd4\x01\n\x14WorkflowDiagramTheme\x12&\n\"WORKFLOW_DIAGRAM_THEME_UNSPECIFIED\x10\x00\x12 \n\x1cWORKFLOW_DIAGRAM_THEME_LIGHT\x10\x01\x12\x1f\n\x1bWORKFLOW_DIAGRAM_THEME_DARK\x10\x02\x12(\n$WORKFLOW_DIAGRAM_THEME_CONSOLE_LIGHT\x10\x03\x12\'\n#WORKFLOW_DIAGRAM_THEME_CONSOLE_DARK\x10\x04\x32\x9f\x05\n\nJobService\x12>\n\tSubmitJob\x12\x1e.workflows.v1.SubmitJobRequest\x1a\x11.workflows.v1.Job\x12\x38\n\x06GetJob\x12\x1b.workflows.v1.GetJobRequest\x1a\x11.workflows.v1.Job\x12H\n\x0eGetJobProgress\x12#.workflows.v1.GetJobProgressRequest\x1a\x11.workflows.v1.Job\x12I\n\x08RetryJob\x12\x1d.workflows.v1.RetryJobRequest\x1a\x1e.workflows.v1.RetryJobResponse\x12L\n\tCancelJob\x12\x1e.workflows.v1.CancelJobRequest\x1a\x1f.workflows.v1.CancelJobResponse\x12H\n\x0cVisualizeJob\x12!.workflows.v1.VisualizeJobRequest\x1a\x15.workflows.v1.Diagram\x12L\n\tQueryJobs\x12\x1e.workflows.v1.QueryJobsRequest\x1a\x1f.workflows.v1.QueryJobsResponse\x12^\n\x0fGetJobPrototype\x12$.workflows.v1.GetJobPrototypeRequest\x1a%.workflows.v1.GetJobPrototypeResponse\x12<\n\x08\x43loneJob\x12\x1d.workflows.v1.CloneJobRequest\x1a\x11.workflows.v1.JobBr\n\x10\x63om.workflows.v1B\x08JobProtoP\x01\xa2\x02\x03WXX\xaa\x02\x0cWorkflows.V1\xca\x02\x0cWorkflows\\V1\xe2\x02\x18Workflows\\V1\\GPBMetadata\xea\x02\rWorkflows::V1\x92\x03\x02\x08\x02\x62\x08\x65\x64itionsp\xe8\x07')
|
|
33
33
|
|
|
34
34
|
_globals = globals()
|
|
35
35
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
|
@@ -37,12 +37,14 @@ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'workflows.v1.job_pb2', _glo
|
|
|
37
37
|
if not _descriptor._USE_C_DESCRIPTORS:
|
|
38
38
|
_globals['DESCRIPTOR']._loaded_options = None
|
|
39
39
|
_globals['DESCRIPTOR']._serialized_options = b'\n\020com.workflows.v1B\010JobProtoP\001\242\002\003WXX\252\002\014Workflows.V1\312\002\014Workflows\\V1\342\002\030Workflows\\V1\\GPBMetadata\352\002\rWorkflows::V1\222\003\002\010\002'
|
|
40
|
-
_globals['_SUBMITJOBREQUEST'].fields_by_name['legacy_tasks']._loaded_options = None
|
|
41
|
-
_globals['_SUBMITJOBREQUEST'].fields_by_name['legacy_tasks']._serialized_options = b'\272H\007\222\001\004\010\001\020@'
|
|
42
40
|
_globals['_SUBMITJOBREQUEST'].fields_by_name['job_name']._loaded_options = None
|
|
43
41
|
_globals['_SUBMITJOBREQUEST'].fields_by_name['job_name']._serialized_options = b'\272H\004r\002\020\001'
|
|
44
42
|
_globals['_SUBMITJOBREQUEST'].fields_by_name['trace_parent']._loaded_options = None
|
|
45
43
|
_globals['_SUBMITJOBREQUEST'].fields_by_name['trace_parent']._serialized_options = b'\272H\004r\002\020\001'
|
|
44
|
+
_globals['_SUBMITJOBREQUEST'].fields_by_name['legacy_tasks']._loaded_options = None
|
|
45
|
+
_globals['_SUBMITJOBREQUEST'].fields_by_name['legacy_tasks']._serialized_options = b'\030\001\272H\006\222\001\003\020\350\007'
|
|
46
|
+
_globals['_SUBMITJOBREQUEST']._loaded_options = None
|
|
47
|
+
_globals['_SUBMITJOBREQUEST']._serialized_options = b'\272H\247\001\032\244\001\n!submit_job_request.tasks_required\022$At least one task must be submitted.\032Y(this.tasks != null && this.tasks.task_groups.size() > 0) || this.legacy_tasks.size() > 0'
|
|
46
48
|
_globals['_GETJOBREQUEST'].fields_by_name['job_id']._loaded_options = None
|
|
47
49
|
_globals['_GETJOBREQUEST'].fields_by_name['job_id']._serialized_options = b'\272H\003\310\001\001'
|
|
48
50
|
_globals['_GETJOBPROGRESSREQUEST'].fields_by_name['job_id']._loaded_options = None
|
|
@@ -69,36 +71,36 @@ if not _descriptor._USE_C_DESCRIPTORS:
|
|
|
69
71
|
_globals['_CLONEJOBREQUEST'].fields_by_name['root_tasks_overrides']._serialized_options = b'\272H\007\222\001\004\010\001\020@'
|
|
70
72
|
_globals['_CLONEJOBREQUEST'].fields_by_name['job_name']._loaded_options = None
|
|
71
73
|
_globals['_CLONEJOBREQUEST'].fields_by_name['job_name']._serialized_options = b'\272H\004r\002\020\001'
|
|
72
|
-
_globals['_WORKFLOWDIAGRAMTHEME']._serialized_start=
|
|
73
|
-
_globals['_WORKFLOWDIAGRAMTHEME']._serialized_end=
|
|
74
|
+
_globals['_WORKFLOWDIAGRAMTHEME']._serialized_start=2238
|
|
75
|
+
_globals['_WORKFLOWDIAGRAMTHEME']._serialized_end=2450
|
|
74
76
|
_globals['_SUBMITJOBREQUEST']._serialized_start=168
|
|
75
|
-
_globals['_SUBMITJOBREQUEST']._serialized_end=
|
|
76
|
-
_globals['_GETJOBREQUEST']._serialized_start=
|
|
77
|
-
_globals['_GETJOBREQUEST']._serialized_end=
|
|
78
|
-
_globals['_GETJOBPROGRESSREQUEST']._serialized_start=
|
|
79
|
-
_globals['_GETJOBPROGRESSREQUEST']._serialized_end=
|
|
80
|
-
_globals['_RETRYJOBREQUEST']._serialized_start=
|
|
81
|
-
_globals['_RETRYJOBREQUEST']._serialized_end=
|
|
82
|
-
_globals['_RETRYJOBRESPONSE']._serialized_start=
|
|
83
|
-
_globals['_RETRYJOBRESPONSE']._serialized_end=
|
|
84
|
-
_globals['_CANCELJOBREQUEST']._serialized_start=
|
|
85
|
-
_globals['_CANCELJOBREQUEST']._serialized_end=
|
|
86
|
-
_globals['_CANCELJOBRESPONSE']._serialized_start=
|
|
87
|
-
_globals['_CANCELJOBRESPONSE']._serialized_end=
|
|
88
|
-
_globals['_VISUALIZEJOBREQUEST']._serialized_start=
|
|
89
|
-
_globals['_VISUALIZEJOBREQUEST']._serialized_end=
|
|
90
|
-
_globals['_QUERYFILTERS']._serialized_start=
|
|
91
|
-
_globals['_QUERYFILTERS']._serialized_end=
|
|
92
|
-
_globals['_QUERYJOBSREQUEST']._serialized_start=
|
|
93
|
-
_globals['_QUERYJOBSREQUEST']._serialized_end=
|
|
94
|
-
_globals['_QUERYJOBSRESPONSE']._serialized_start=
|
|
95
|
-
_globals['_QUERYJOBSRESPONSE']._serialized_end=
|
|
96
|
-
_globals['_GETJOBPROTOTYPEREQUEST']._serialized_start=
|
|
97
|
-
_globals['_GETJOBPROTOTYPEREQUEST']._serialized_end=
|
|
98
|
-
_globals['_GETJOBPROTOTYPERESPONSE']._serialized_start=
|
|
99
|
-
_globals['_GETJOBPROTOTYPERESPONSE']._serialized_end=
|
|
100
|
-
_globals['_CLONEJOBREQUEST']._serialized_start=
|
|
101
|
-
_globals['_CLONEJOBREQUEST']._serialized_end=
|
|
102
|
-
_globals['_JOBSERVICE']._serialized_start=
|
|
103
|
-
_globals['_JOBSERVICE']._serialized_end=
|
|
77
|
+
_globals['_SUBMITJOBREQUEST']._serialized_end=630
|
|
78
|
+
_globals['_GETJOBREQUEST']._serialized_start=632
|
|
79
|
+
_globals['_GETJOBREQUEST']._serialized_end=694
|
|
80
|
+
_globals['_GETJOBPROGRESSREQUEST']._serialized_start=696
|
|
81
|
+
_globals['_GETJOBPROGRESSREQUEST']._serialized_end=766
|
|
82
|
+
_globals['_RETRYJOBREQUEST']._serialized_start=768
|
|
83
|
+
_globals['_RETRYJOBREQUEST']._serialized_end=832
|
|
84
|
+
_globals['_RETRYJOBRESPONSE']._serialized_start=834
|
|
85
|
+
_globals['_RETRYJOBRESPONSE']._serialized_end=904
|
|
86
|
+
_globals['_CANCELJOBREQUEST']._serialized_start=906
|
|
87
|
+
_globals['_CANCELJOBREQUEST']._serialized_end=971
|
|
88
|
+
_globals['_CANCELJOBRESPONSE']._serialized_start=973
|
|
89
|
+
_globals['_CANCELJOBRESPONSE']._serialized_end=992
|
|
90
|
+
_globals['_VISUALIZEJOBREQUEST']._serialized_start=995
|
|
91
|
+
_globals['_VISUALIZEJOBREQUEST']._serialized_end=1231
|
|
92
|
+
_globals['_QUERYFILTERS']._serialized_start=1234
|
|
93
|
+
_globals['_QUERYFILTERS']._serialized_end=1595
|
|
94
|
+
_globals['_QUERYJOBSREQUEST']._serialized_start=1597
|
|
95
|
+
_globals['_QUERYJOBSREQUEST']._serialized_end=1720
|
|
96
|
+
_globals['_QUERYJOBSRESPONSE']._serialized_start=1722
|
|
97
|
+
_globals['_QUERYJOBSRESPONSE']._serialized_end=1840
|
|
98
|
+
_globals['_GETJOBPROTOTYPEREQUEST']._serialized_start=1842
|
|
99
|
+
_globals['_GETJOBPROTOTYPEREQUEST']._serialized_end=1913
|
|
100
|
+
_globals['_GETJOBPROTOTYPERESPONSE']._serialized_start=1915
|
|
101
|
+
_globals['_GETJOBPROTOTYPERESPONSE']._serialized_end=2034
|
|
102
|
+
_globals['_CLONEJOBREQUEST']._serialized_start=2037
|
|
103
|
+
_globals['_CLONEJOBREQUEST']._serialized_end=2235
|
|
104
|
+
_globals['_JOBSERVICE']._serialized_start=2453
|
|
105
|
+
_globals['_JOBSERVICE']._serialized_end=3124
|
|
104
106
|
# @@protoc_insertion_point(module_scope)
|
|
@@ -26,18 +26,18 @@ WORKFLOW_DIAGRAM_THEME_CONSOLE_LIGHT: WorkflowDiagramTheme
|
|
|
26
26
|
WORKFLOW_DIAGRAM_THEME_CONSOLE_DARK: WorkflowDiagramTheme
|
|
27
27
|
|
|
28
28
|
class SubmitJobRequest(_message.Message):
|
|
29
|
-
__slots__ = ("
|
|
30
|
-
LEGACY_TASKS_FIELD_NUMBER: _ClassVar[int]
|
|
29
|
+
__slots__ = ("tasks", "job_name", "trace_parent", "automation_id", "legacy_tasks")
|
|
31
30
|
TASKS_FIELD_NUMBER: _ClassVar[int]
|
|
32
31
|
JOB_NAME_FIELD_NUMBER: _ClassVar[int]
|
|
33
32
|
TRACE_PARENT_FIELD_NUMBER: _ClassVar[int]
|
|
34
33
|
AUTOMATION_ID_FIELD_NUMBER: _ClassVar[int]
|
|
35
|
-
|
|
34
|
+
LEGACY_TASKS_FIELD_NUMBER: _ClassVar[int]
|
|
36
35
|
tasks: _core_pb2.TaskSubmissions
|
|
37
36
|
job_name: str
|
|
38
37
|
trace_parent: str
|
|
39
38
|
automation_id: _id_pb2.ID
|
|
40
|
-
|
|
39
|
+
legacy_tasks: _containers.RepeatedCompositeFieldContainer[_core_pb2.SingleTaskSubmission]
|
|
40
|
+
def __init__(self, tasks: _Optional[_Union[_core_pb2.TaskSubmissions, _Mapping]] = ..., job_name: _Optional[str] = ..., trace_parent: _Optional[str] = ..., automation_id: _Optional[_Union[_id_pb2.ID, _Mapping]] = ..., legacy_tasks: _Optional[_Iterable[_Union[_core_pb2.SingleTaskSubmission, _Mapping]]] = ...) -> None: ...
|
|
41
41
|
|
|
42
42
|
class GetJobRequest(_message.Message):
|
|
43
43
|
__slots__ = ("job_id",)
|
|
@@ -86,18 +86,20 @@ class VisualizeJobRequest(_message.Message):
|
|
|
86
86
|
def __init__(self, job_id: _Optional[_Union[_id_pb2.ID, _Mapping]] = ..., render_options: _Optional[_Union[_diagram_pb2.RenderOptions, _Mapping]] = ..., theme: _Optional[_Union[WorkflowDiagramTheme, str]] = ..., include_job_name: bool = ...) -> None: ...
|
|
87
87
|
|
|
88
88
|
class QueryFilters(_message.Message):
|
|
89
|
-
__slots__ = ("time_interval", "id_interval", "automation_ids", "states", "name")
|
|
89
|
+
__slots__ = ("time_interval", "id_interval", "automation_ids", "states", "name", "task_states")
|
|
90
90
|
TIME_INTERVAL_FIELD_NUMBER: _ClassVar[int]
|
|
91
91
|
ID_INTERVAL_FIELD_NUMBER: _ClassVar[int]
|
|
92
92
|
AUTOMATION_IDS_FIELD_NUMBER: _ClassVar[int]
|
|
93
93
|
STATES_FIELD_NUMBER: _ClassVar[int]
|
|
94
94
|
NAME_FIELD_NUMBER: _ClassVar[int]
|
|
95
|
+
TASK_STATES_FIELD_NUMBER: _ClassVar[int]
|
|
95
96
|
time_interval: _query_pb2.TimeInterval
|
|
96
97
|
id_interval: _query_pb2.IDInterval
|
|
97
98
|
automation_ids: _containers.RepeatedCompositeFieldContainer[_id_pb2.ID]
|
|
98
99
|
states: _containers.RepeatedScalarFieldContainer[_core_pb2.JobState]
|
|
99
100
|
name: str
|
|
100
|
-
|
|
101
|
+
task_states: _containers.RepeatedScalarFieldContainer[_core_pb2.TaskState]
|
|
102
|
+
def __init__(self, time_interval: _Optional[_Union[_query_pb2.TimeInterval, _Mapping]] = ..., id_interval: _Optional[_Union[_query_pb2.IDInterval, _Mapping]] = ..., automation_ids: _Optional[_Iterable[_Union[_id_pb2.ID, _Mapping]]] = ..., states: _Optional[_Iterable[_Union[_core_pb2.JobState, str]]] = ..., name: _Optional[str] = ..., task_states: _Optional[_Iterable[_Union[_core_pb2.TaskState, str]]] = ...) -> None: ...
|
|
101
103
|
|
|
102
104
|
class QueryJobsRequest(_message.Message):
|
|
103
105
|
__slots__ = ("filters", "page")
|
|
@@ -28,7 +28,7 @@ from tilebox.datasets.tilebox.v1 import id_pb2 as tilebox_dot_v1_dot_id__pb2
|
|
|
28
28
|
from tilebox.workflows.workflows.v1 import core_pb2 as workflows_dot_v1_dot_core__pb2
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17workflows/v1/task.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x13tilebox/v1/id.proto\x1a\x17workflows/v1/core.proto\"\xa6\x01\n\x0fNextTaskRequest\x12\x46\n\rcomputed_task\x18\x01 \x01(\x0b\x32\x1a.workflows.v1.ComputedTaskB\x05\xaa\x01\x02\x08\x01R\x0c\x63omputedTask\x12K\n\x10next_task_to_run\x18\x02 \x01(\x0b\x32\x1b.workflows.v1.NextTaskToRunB\x05\xaa\x01\x02\x08\x01R\rnextTaskToRun\"{\n\rNextTaskToRun\x12*\n\x0c\x63luster_slug\x18\x01 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x0b\x63lusterSlug\x12>\n\x0bidentifiers\x18\x02 \x03(\x0b\x32\x1c.workflows.v1.TaskIdentifierR\x0bidentifiers\"\
|
|
31
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17workflows/v1/task.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x13tilebox/v1/id.proto\x1a\x17workflows/v1/core.proto\"\xa6\x01\n\x0fNextTaskRequest\x12\x46\n\rcomputed_task\x18\x01 \x01(\x0b\x32\x1a.workflows.v1.ComputedTaskB\x05\xaa\x01\x02\x08\x01R\x0c\x63omputedTask\x12K\n\x10next_task_to_run\x18\x02 \x01(\x0b\x32\x1b.workflows.v1.NextTaskToRunB\x05\xaa\x01\x02\x08\x01R\rnextTaskToRun\"{\n\rNextTaskToRun\x12*\n\x0c\x63luster_slug\x18\x01 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x0b\x63lusterSlug\x12>\n\x0bidentifiers\x18\x02 \x03(\x0b\x32\x1c.workflows.v1.TaskIdentifierR\x0bidentifiers\"\xaa\x02\n\x0c\x43omputedTask\x12&\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x02id\x12\x18\n\x07\x64isplay\x18\x02 \x01(\tR\x07\x64isplay\x12:\n\tsub_tasks\x18\x05 \x01(\x0b\x32\x1d.workflows.v1.TaskSubmissionsR\x08subTasks\x12\x41\n\x10progress_updates\x18\x04 \x03(\x0b\x32\x16.workflows.v1.ProgressR\x0fprogressUpdates\x12Y\n\x10legacy_sub_tasks\x18\x03 \x03(\x0b\x32\".workflows.v1.SingleTaskSubmissionB\x0b\x18\x01\xbaH\x06\x92\x01\x03\x10\xe8\x07R\x0elegacySubTasks\"g\n\x0eIdlingResponse\x12U\n\x19suggested_idling_duration\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationR\x17suggestedIdlingDuration\"\x93\x01\n\x10NextTaskResponse\x12/\n\tnext_task\x18\x01 \x01(\x0b\x32\x12.workflows.v1.TaskR\x08nextTask\x12\x34\n\x06idling\x18\x02 \x01(\x0b\x32\x1c.workflows.v1.IdlingResponseR\x06idling:\x18\xbaH\x15\"\x13\n\tnext_task\n\x06idling\"\xc0\x01\n\x11TaskFailedRequest\x12/\n\x07task_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x06taskId\x12\x18\n\x07\x64isplay\x18\x02 \x01(\tR\x07\x64isplay\x12\x1d\n\ncancel_job\x18\x03 \x01(\x08R\tcancelJob\x12\x41\n\x10progress_updates\x18\x04 \x03(\x0b\x32\x16.workflows.v1.ProgressR\x0fprogressUpdates\"B\n\x11TaskStateResponse\x12-\n\x05state\x18\x01 \x01(\x0e\x32\x17.workflows.v1.TaskStateR\x05state\"\x87\x01\n\x10TaskLeaseRequest\x12/\n\x07task_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x06taskId\x12\x42\n\x0frequested_lease\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationR\x0erequestedLease2\xf4\x01\n\x0bTaskService\x12I\n\x08NextTask\x12\x1d.workflows.v1.NextTaskRequest\x1a\x1e.workflows.v1.NextTaskResponse\x12N\n\nTaskFailed\x12\x1f.workflows.v1.TaskFailedRequest\x1a\x1f.workflows.v1.TaskStateResponse\x12J\n\x0f\x45xtendTaskLease\x12\x1e.workflows.v1.TaskLeaseRequest\x1a\x17.workflows.v1.TaskLeaseBs\n\x10\x63om.workflows.v1B\tTaskProtoP\x01\xa2\x02\x03WXX\xaa\x02\x0cWorkflows.V1\xca\x02\x0cWorkflows\\V1\xe2\x02\x18Workflows\\V1\\GPBMetadata\xea\x02\rWorkflows::V1\x92\x03\x02\x08\x02\x62\x08\x65\x64itionsp\xe8\x07')
|
|
32
32
|
|
|
33
33
|
_globals = globals()
|
|
34
34
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
|
@@ -45,7 +45,7 @@ if not _descriptor._USE_C_DESCRIPTORS:
|
|
|
45
45
|
_globals['_COMPUTEDTASK'].fields_by_name['id']._loaded_options = None
|
|
46
46
|
_globals['_COMPUTEDTASK'].fields_by_name['id']._serialized_options = b'\272H\003\310\001\001'
|
|
47
47
|
_globals['_COMPUTEDTASK'].fields_by_name['legacy_sub_tasks']._loaded_options = None
|
|
48
|
-
_globals['_COMPUTEDTASK'].fields_by_name['legacy_sub_tasks']._serialized_options = b'\030\001\272H\
|
|
48
|
+
_globals['_COMPUTEDTASK'].fields_by_name['legacy_sub_tasks']._serialized_options = b'\030\001\272H\006\222\001\003\020\350\007'
|
|
49
49
|
_globals['_NEXTTASKRESPONSE']._loaded_options = None
|
|
50
50
|
_globals['_NEXTTASKRESPONSE']._serialized_options = b'\272H\025\"\023\n\tnext_task\n\006idling'
|
|
51
51
|
_globals['_TASKFAILEDREQUEST'].fields_by_name['task_id']._loaded_options = None
|
|
@@ -57,17 +57,17 @@ if not _descriptor._USE_C_DESCRIPTORS:
|
|
|
57
57
|
_globals['_NEXTTASKTORUN']._serialized_start=317
|
|
58
58
|
_globals['_NEXTTASKTORUN']._serialized_end=440
|
|
59
59
|
_globals['_COMPUTEDTASK']._serialized_start=443
|
|
60
|
-
_globals['_COMPUTEDTASK']._serialized_end=
|
|
61
|
-
_globals['_IDLINGRESPONSE']._serialized_start=
|
|
62
|
-
_globals['_IDLINGRESPONSE']._serialized_end=
|
|
63
|
-
_globals['_NEXTTASKRESPONSE']._serialized_start=
|
|
64
|
-
_globals['_NEXTTASKRESPONSE']._serialized_end=
|
|
65
|
-
_globals['_TASKFAILEDREQUEST']._serialized_start=
|
|
66
|
-
_globals['_TASKFAILEDREQUEST']._serialized_end=
|
|
67
|
-
_globals['_TASKSTATERESPONSE']._serialized_start=
|
|
68
|
-
_globals['_TASKSTATERESPONSE']._serialized_end=
|
|
69
|
-
_globals['_TASKLEASEREQUEST']._serialized_start=
|
|
70
|
-
_globals['_TASKLEASEREQUEST']._serialized_end=
|
|
71
|
-
_globals['_TASKSERVICE']._serialized_start=
|
|
72
|
-
_globals['_TASKSERVICE']._serialized_end=
|
|
60
|
+
_globals['_COMPUTEDTASK']._serialized_end=741
|
|
61
|
+
_globals['_IDLINGRESPONSE']._serialized_start=743
|
|
62
|
+
_globals['_IDLINGRESPONSE']._serialized_end=846
|
|
63
|
+
_globals['_NEXTTASKRESPONSE']._serialized_start=849
|
|
64
|
+
_globals['_NEXTTASKRESPONSE']._serialized_end=996
|
|
65
|
+
_globals['_TASKFAILEDREQUEST']._serialized_start=999
|
|
66
|
+
_globals['_TASKFAILEDREQUEST']._serialized_end=1191
|
|
67
|
+
_globals['_TASKSTATERESPONSE']._serialized_start=1193
|
|
68
|
+
_globals['_TASKSTATERESPONSE']._serialized_end=1259
|
|
69
|
+
_globals['_TASKLEASEREQUEST']._serialized_start=1262
|
|
70
|
+
_globals['_TASKLEASEREQUEST']._serialized_end=1397
|
|
71
|
+
_globals['_TASKSERVICE']._serialized_start=1400
|
|
72
|
+
_globals['_TASKSERVICE']._serialized_end=1644
|
|
73
73
|
# @@protoc_insertion_point(module_scope)
|
|
@@ -27,18 +27,18 @@ class NextTaskToRun(_message.Message):
|
|
|
27
27
|
def __init__(self, cluster_slug: _Optional[str] = ..., identifiers: _Optional[_Iterable[_Union[_core_pb2.TaskIdentifier, _Mapping]]] = ...) -> None: ...
|
|
28
28
|
|
|
29
29
|
class ComputedTask(_message.Message):
|
|
30
|
-
__slots__ = ("id", "display", "
|
|
30
|
+
__slots__ = ("id", "display", "sub_tasks", "progress_updates", "legacy_sub_tasks")
|
|
31
31
|
ID_FIELD_NUMBER: _ClassVar[int]
|
|
32
32
|
DISPLAY_FIELD_NUMBER: _ClassVar[int]
|
|
33
|
-
LEGACY_SUB_TASKS_FIELD_NUMBER: _ClassVar[int]
|
|
34
33
|
SUB_TASKS_FIELD_NUMBER: _ClassVar[int]
|
|
35
34
|
PROGRESS_UPDATES_FIELD_NUMBER: _ClassVar[int]
|
|
35
|
+
LEGACY_SUB_TASKS_FIELD_NUMBER: _ClassVar[int]
|
|
36
36
|
id: _id_pb2.ID
|
|
37
37
|
display: str
|
|
38
|
-
legacy_sub_tasks: _containers.RepeatedCompositeFieldContainer[_core_pb2.SingleTaskSubmission]
|
|
39
38
|
sub_tasks: _core_pb2.TaskSubmissions
|
|
40
39
|
progress_updates: _containers.RepeatedCompositeFieldContainer[_core_pb2.Progress]
|
|
41
|
-
|
|
40
|
+
legacy_sub_tasks: _containers.RepeatedCompositeFieldContainer[_core_pb2.SingleTaskSubmission]
|
|
41
|
+
def __init__(self, id: _Optional[_Union[_id_pb2.ID, _Mapping]] = ..., display: _Optional[str] = ..., sub_tasks: _Optional[_Union[_core_pb2.TaskSubmissions, _Mapping]] = ..., progress_updates: _Optional[_Iterable[_Union[_core_pb2.Progress, _Mapping]]] = ..., legacy_sub_tasks: _Optional[_Iterable[_Union[_core_pb2.SingleTaskSubmission, _Mapping]]] = ...) -> None: ...
|
|
42
42
|
|
|
43
43
|
class IdlingResponse(_message.Message):
|
|
44
44
|
__slots__ = ("suggested_idling_duration",)
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
tilebox/workflows/__init__.py,sha256=D6NXvTUjWv0YWN5tYD09p1cFkGs8nGrZ9V7agtjvp8c,636
|
|
2
|
-
tilebox/workflows/cache.py,sha256=
|
|
2
|
+
tilebox/workflows/cache.py,sha256=atQfB7VMwSJmnf3ip8Wodr_tZRqC97E3ph4LaMTNapc,13508
|
|
3
3
|
tilebox/workflows/client.py,sha256=L8MZXZ-yDutu91sD1am24yfANLCaasRXO8ITJHg5UgE,5680
|
|
4
|
-
tilebox/workflows/data.py,sha256=
|
|
4
|
+
tilebox/workflows/data.py,sha256=6KZ5zTtMi7JFJtx-ZDmCTVu5jewjUd7-FhqjBFIIE0g,30012
|
|
5
5
|
tilebox/workflows/interceptors.py,sha256=yfo6pCxUdhb0EC1J506k1ge4S_BAl83TAFYxCcxu8sU,1799
|
|
6
|
-
tilebox/workflows/task.py,sha256=
|
|
6
|
+
tilebox/workflows/task.py,sha256=_hv1l3EZbS0HyQq7n8UxLyfyXHvxR4qzo-9anyPplP4,22236
|
|
7
7
|
tilebox/workflows/timeseries.py,sha256=wjzYaym8Z3ymdnLhfa_7UTQJHm41XLPMyFxFoe3dKaw,8954
|
|
8
8
|
tilebox/workflows/automations/__init__.py,sha256=HDyTj_H0Z-w9m0noWiXAcrvEylXmSpy8pgrVUWWqjbg,226
|
|
9
9
|
tilebox/workflows/automations/client.py,sha256=EJZHcRXfZyU0NAH1PZuA9fGuYQwQKnz03nyz6lJqtrc,5622
|
|
@@ -16,32 +16,32 @@ tilebox/workflows/clusters/service.py,sha256=4ikKwl_69OW8M1yOlsx0QTlJURXfAAbAh-J
|
|
|
16
16
|
tilebox/workflows/formatting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
17
|
tilebox/workflows/formatting/job.py,sha256=fKhxHrx6mngfXSwtvqo6vzlpYoS5Kd10kJ45b4jmAEI,14097
|
|
18
18
|
tilebox/workflows/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
19
|
-
tilebox/workflows/jobs/client.py,sha256=
|
|
19
|
+
tilebox/workflows/jobs/client.py,sha256=Ws2bX2pUL2V-OsDtnBwo1jSQZPZTGtkMz9BhBHAOegg,12250
|
|
20
20
|
tilebox/workflows/jobs/service.py,sha256=HpCg9v2kUM4rYgDSUtezpJ_yU9VhCHqX8EBboLSwKKE,3493
|
|
21
21
|
tilebox/workflows/observability/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
22
22
|
tilebox/workflows/observability/logging.py,sha256=JLXvuC2Xky2HjFfyxEYAlFKYI9kF2jepBRrdHnmKCLs,16430
|
|
23
23
|
tilebox/workflows/observability/tracing.py,sha256=96SqqPrXI0fB_vEIlHLKjErIsnHEeBQNBGCpROdYl0Q,11646
|
|
24
24
|
tilebox/workflows/runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
25
|
-
tilebox/workflows/runner/task_runner.py,sha256=
|
|
25
|
+
tilebox/workflows/runner/task_runner.py,sha256=PP5QRgbDrti8xM6cOcuMZ-oOoBjYmPa5H04Glj-jdns,28622
|
|
26
26
|
tilebox/workflows/runner/task_service.py,sha256=C1bgoSjXBW4KUKR-_GQSplVa0XojscngOMQfTI5IQ6E,2837
|
|
27
27
|
tilebox/workflows/workflows/v1/automation_pb2.py,sha256=Mf9D84ujlL-vQ0gkHwQh9_yvZKsJ0I7dsIAXL_fczqo,9179
|
|
28
28
|
tilebox/workflows/workflows/v1/automation_pb2.pyi,sha256=LrucnhtVCSDfN88ZfJ2ruHvQq1Kc6CxMRAxKb_Q9kG4,5838
|
|
29
29
|
tilebox/workflows/workflows/v1/automation_pb2_grpc.py,sha256=9-Jy1rzk8nTr1Vj5co46lsY1uLjf2-PTzSnTxcvR60M,18859
|
|
30
|
-
tilebox/workflows/workflows/v1/core_pb2.py,sha256
|
|
31
|
-
tilebox/workflows/workflows/v1/core_pb2.pyi,sha256
|
|
30
|
+
tilebox/workflows/workflows/v1/core_pb2.py,sha256=UN5cDPrpFd6MtPMTEuy3vsqKNiA1Sk3hVaYtBQFLHQ4,14849
|
|
31
|
+
tilebox/workflows/workflows/v1/core_pb2.pyi,sha256=RIylQG4-t65LbZeMaN3NNiZAIatG9HTpzG9jUlI2z-0,13211
|
|
32
32
|
tilebox/workflows/workflows/v1/core_pb2_grpc.py,sha256=xYOs94SXiNYAlFodACnsXW5QovLsHY5tCk3p76RH5Zc,158
|
|
33
33
|
tilebox/workflows/workflows/v1/diagram_pb2.py,sha256=Wh2UGR9Fm97z9o0dm5zPSYjkiOmzJGS_Dds_SIX_DeQ,3536
|
|
34
34
|
tilebox/workflows/workflows/v1/diagram_pb2.pyi,sha256=7IJEm-5M_nt5sziFNjK2DgW7Nwl9Y_fRpR_TM5EE6DQ,1477
|
|
35
35
|
tilebox/workflows/workflows/v1/diagram_pb2_grpc.py,sha256=jg4mBPDwHMbpfRI0magrHvmdJ20dQE5xFHTt-5MdW1Y,2780
|
|
36
|
-
tilebox/workflows/workflows/v1/job_pb2.py,sha256=
|
|
37
|
-
tilebox/workflows/workflows/v1/job_pb2.pyi,sha256=
|
|
36
|
+
tilebox/workflows/workflows/v1/job_pb2.py,sha256=7AW508SjGBE9-8t8KTFNMUwp3iZ1NWvc8pXezMHJwEU,11735
|
|
37
|
+
tilebox/workflows/workflows/v1/job_pb2.pyi,sha256=UpPaqZuvyZOaGlxRoyp3t5ZWuibjeMR4P7bpT0XZWwo,7558
|
|
38
38
|
tilebox/workflows/workflows/v1/job_pb2_grpc.py,sha256=pAqqMZpXQ82NvC3qIrWgjVmg6_J9AIdDlaxjF1BNUtc,17195
|
|
39
|
-
tilebox/workflows/workflows/v1/task_pb2.py,sha256=
|
|
40
|
-
tilebox/workflows/workflows/v1/task_pb2.pyi,sha256=
|
|
39
|
+
tilebox/workflows/workflows/v1/task_pb2.py,sha256=L_wlbqWzvPFkr1NYzPu9AbjORDIDZaT9BFHv5xLhRO8,6851
|
|
40
|
+
tilebox/workflows/workflows/v1/task_pb2.pyi,sha256=OCDshWzwZn9x9HYx-uPQIsLPcP1p27ePk88PuN--G4Q,4488
|
|
41
41
|
tilebox/workflows/workflows/v1/task_pb2_grpc.py,sha256=nkQjtsDiql1ofbSxhDXbPkySd1sYt44uEaSl2Z15jSg,7661
|
|
42
42
|
tilebox/workflows/workflows/v1/workflows_pb2.py,sha256=rGwIydUP4osLD_fG3QmHaqs42mKmRoCqtwihJTLJ314,3990
|
|
43
43
|
tilebox/workflows/workflows/v1/workflows_pb2.pyi,sha256=qiDQUM2Vlu_izQvaSDlK5GqKYGsNJgtm0bo9zW-qNjU,1633
|
|
44
44
|
tilebox/workflows/workflows/v1/workflows_pb2_grpc.py,sha256=36Vp_TIxtS-MRBZHECf84fHFbnrm3-UizCsMOlz7qfo,8529
|
|
45
|
-
tilebox_workflows-0.
|
|
46
|
-
tilebox_workflows-0.
|
|
47
|
-
tilebox_workflows-0.
|
|
45
|
+
tilebox_workflows-0.49.0b1.dist-info/METADATA,sha256=_jMRxCVcVmiI5dP_VHsDsTepUD7fNTKiE2NxrP2_va4,3950
|
|
46
|
+
tilebox_workflows-0.49.0b1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
47
|
+
tilebox_workflows-0.49.0b1.dist-info/RECORD,,
|
|
File without changes
|