tilebox-workflows 0.43.0__py3-none-any.whl → 0.45.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tilebox/workflows/__init__.py +2 -1
- tilebox/workflows/automations/client.py +3 -3
- tilebox/workflows/data.py +200 -46
- tilebox/workflows/formatting/__init__.py +0 -0
- tilebox/workflows/formatting/job.py +402 -0
- tilebox/workflows/jobs/client.py +41 -15
- tilebox/workflows/jobs/service.py +15 -6
- tilebox/workflows/runner/task_runner.py +54 -23
- tilebox/workflows/runner/task_service.py +4 -2
- tilebox/workflows/task.py +95 -14
- tilebox/workflows/workflows/v1/automation_pb2.py +22 -22
- tilebox/workflows/workflows/v1/automation_pb2.pyi +2 -2
- tilebox/workflows/workflows/v1/core_pb2.py +54 -30
- tilebox/workflows/workflows/v1/core_pb2.pyi +89 -16
- tilebox/workflows/workflows/v1/job_pb2.py +36 -38
- tilebox/workflows/workflows/v1/job_pb2.pyi +17 -17
- tilebox/workflows/workflows/v1/job_pb2_grpc.py +43 -0
- tilebox/workflows/workflows/v1/task_pb2.py +16 -16
- tilebox/workflows/workflows/v1/task_pb2.pyi +8 -6
- {tilebox_workflows-0.43.0.dist-info → tilebox_workflows-0.45.0.dist-info}/METADATA +3 -1
- {tilebox_workflows-0.43.0.dist-info → tilebox_workflows-0.45.0.dist-info}/RECORD +22 -20
- {tilebox_workflows-0.43.0.dist-info → tilebox_workflows-0.45.0.dist-info}/WHEEL +0 -0
|
@@ -28,13 +28,19 @@ from _tilebox.grpc.channel import open_channel
|
|
|
28
28
|
from _tilebox.grpc.error import InternalServerError
|
|
29
29
|
from tilebox.datasets.sync.dataset import DatasetClient
|
|
30
30
|
from tilebox.workflows.cache import JobCache
|
|
31
|
-
from tilebox.workflows.data import ComputedTask, Idling, NextTaskToRun,
|
|
31
|
+
from tilebox.workflows.data import ComputedTask, Idling, NextTaskToRun, ProgressIndicator, Task, TaskLease
|
|
32
32
|
from tilebox.workflows.interceptors import Interceptor, InterceptorType
|
|
33
33
|
from tilebox.workflows.observability.logging import get_logger
|
|
34
34
|
from tilebox.workflows.observability.tracing import WorkflowTracer
|
|
35
35
|
from tilebox.workflows.runner.task_service import TaskService
|
|
36
36
|
from tilebox.workflows.task import ExecutionContext as ExecutionContextBase
|
|
37
|
-
from tilebox.workflows.task import
|
|
37
|
+
from tilebox.workflows.task import (
|
|
38
|
+
FutureTask,
|
|
39
|
+
ProgressUpdate,
|
|
40
|
+
RunnerContext,
|
|
41
|
+
TaskMeta,
|
|
42
|
+
merge_future_tasks_to_submissions,
|
|
43
|
+
)
|
|
38
44
|
from tilebox.workflows.task import Task as TaskInstance
|
|
39
45
|
|
|
40
46
|
# The time we give a task to finish it's execution when a runner shutdown is requested before we forcefully stop it
|
|
@@ -56,7 +62,7 @@ _FALLBACK_POLL_INTERVAL = timedelta(seconds=5)
|
|
|
56
62
|
_FALLBACK_JITTER_INTERVAL = timedelta(seconds=5)
|
|
57
63
|
|
|
58
64
|
# Maximum number of progress bars per task, mirroring the limit on the server side
|
|
59
|
-
|
|
65
|
+
_MAX_TASK_PROGRESS_INDICATORS = 1000
|
|
60
66
|
|
|
61
67
|
WrappedFnReturnT = TypeVar("WrappedFnReturnT")
|
|
62
68
|
|
|
@@ -217,7 +223,7 @@ class _GracefulShutdown:
|
|
|
217
223
|
if self._task is not None:
|
|
218
224
|
progress = []
|
|
219
225
|
if self._context is not None:
|
|
220
|
-
progress =
|
|
226
|
+
progress = _finalize_mutable_progress_trackers(self._context._progress_indicators) # noqa: SLF001
|
|
221
227
|
self._service.task_failed(
|
|
222
228
|
self._task,
|
|
223
229
|
RunnerShutdown("Task was interrupted"),
|
|
@@ -431,7 +437,7 @@ class TaskRunner:
|
|
|
431
437
|
|
|
432
438
|
task_failed_retry = _retry_backoff(self._service.task_failed, stop=shutdown_context.stop_if_shutting_down())
|
|
433
439
|
cancel_job = True
|
|
434
|
-
progress_updates =
|
|
440
|
+
progress_updates = _finalize_mutable_progress_trackers(context._progress_indicators) # noqa: SLF001
|
|
435
441
|
task_failed_retry(task, e, cancel_job, progress_updates)
|
|
436
442
|
|
|
437
443
|
return None
|
|
@@ -489,11 +495,13 @@ class TaskRunner:
|
|
|
489
495
|
computed_task = ComputedTask(
|
|
490
496
|
id=task.id,
|
|
491
497
|
display=task.display,
|
|
492
|
-
sub_tasks=
|
|
493
|
-
|
|
494
|
-
for
|
|
495
|
-
|
|
496
|
-
|
|
498
|
+
sub_tasks=merge_future_tasks_to_submissions(
|
|
499
|
+
context._sub_tasks, # noqa: SLF001
|
|
500
|
+
# if not otherwise specified, we use the cluster of the runner for all subtasks, which is also
|
|
501
|
+
# the cluster of the parent task
|
|
502
|
+
self.tasks_to_run.cluster_slug,
|
|
503
|
+
),
|
|
504
|
+
progress_updates=_finalize_mutable_progress_trackers(context._progress_indicators), # noqa: SLF001
|
|
497
505
|
)
|
|
498
506
|
|
|
499
507
|
next_task_retry = _retry_backoff(self._service.next_task, stop=shutdown_context.stop_if_shutting_down())
|
|
@@ -513,20 +521,35 @@ class ExecutionContext(ExecutionContextBase):
|
|
|
513
521
|
self.current_task = task
|
|
514
522
|
self.job_cache = job_cache
|
|
515
523
|
self._sub_tasks: list[FutureTask] = []
|
|
516
|
-
self.
|
|
524
|
+
self._progress_indicators: dict[str | None, ProgressUpdate] = {}
|
|
517
525
|
|
|
518
526
|
def submit_subtask(
|
|
519
527
|
self,
|
|
520
528
|
task: TaskInstance,
|
|
521
|
-
depends_on: list[FutureTask] | None = None,
|
|
529
|
+
depends_on: FutureTask | list[FutureTask] | None = None,
|
|
522
530
|
cluster: str | None = None,
|
|
523
531
|
max_retries: int = 0,
|
|
524
532
|
) -> FutureTask:
|
|
533
|
+
dependencies: list[int] = []
|
|
534
|
+
|
|
535
|
+
if depends_on is None:
|
|
536
|
+
depends_on = []
|
|
537
|
+
elif isinstance(depends_on, FutureTask):
|
|
538
|
+
depends_on = [depends_on]
|
|
539
|
+
elif not isinstance(depends_on, list):
|
|
540
|
+
raise TypeError(f"Invalid dependency. Expected FutureTask or list[FutureTask], got {type(depends_on)}")
|
|
541
|
+
|
|
542
|
+
for dep in depends_on:
|
|
543
|
+
if not isinstance(dep, FutureTask):
|
|
544
|
+
raise TypeError(f"Invalid dependency. Expected FutureTask, got {type(dep)}")
|
|
545
|
+
if dep.index >= len(self._sub_tasks):
|
|
546
|
+
raise ValueError(f"Dependent task {dep.index} does not exist")
|
|
547
|
+
dependencies.append(dep.index)
|
|
525
548
|
subtask = FutureTask(
|
|
526
549
|
index=len(self._sub_tasks),
|
|
527
550
|
task=task,
|
|
528
551
|
# cyclic dependencies are not allowed, they are detected by the server and will result in an error
|
|
529
|
-
depends_on=
|
|
552
|
+
depends_on=dependencies,
|
|
530
553
|
cluster=cluster,
|
|
531
554
|
max_retries=max_retries,
|
|
532
555
|
)
|
|
@@ -534,9 +557,15 @@ class ExecutionContext(ExecutionContextBase):
|
|
|
534
557
|
return subtask
|
|
535
558
|
|
|
536
559
|
def submit_subtasks(
|
|
537
|
-
self,
|
|
560
|
+
self,
|
|
561
|
+
tasks: Sequence[TaskInstance],
|
|
562
|
+
cluster: str | None = None,
|
|
563
|
+
max_retries: int = 0,
|
|
564
|
+
depends_on: FutureTask | list[FutureTask] | None = None,
|
|
538
565
|
) -> list[FutureTask]:
|
|
539
|
-
return [
|
|
566
|
+
return [
|
|
567
|
+
self.submit_subtask(task, cluster=cluster, max_retries=max_retries, depends_on=depends_on) for task in tasks
|
|
568
|
+
]
|
|
540
569
|
|
|
541
570
|
def submit_batch(
|
|
542
571
|
self, tasks: Sequence[TaskInstance], cluster: str | None = None, max_retries: int = 0
|
|
@@ -548,20 +577,20 @@ class ExecutionContext(ExecutionContextBase):
|
|
|
548
577
|
)
|
|
549
578
|
return self.submit_subtasks(tasks, cluster, max_retries)
|
|
550
579
|
|
|
551
|
-
def progress(self, label: str | None) -> ProgressUpdate:
|
|
580
|
+
def progress(self, label: str | None = None) -> ProgressUpdate:
|
|
552
581
|
if label == "":
|
|
553
582
|
label = None
|
|
554
583
|
|
|
555
|
-
if label in self.
|
|
556
|
-
return self.
|
|
584
|
+
if label in self._progress_indicators:
|
|
585
|
+
return self._progress_indicators[label]
|
|
557
586
|
|
|
558
587
|
# this is our server side limit to prevent mistakes / abuse, so let's not allow to go beyond that already
|
|
559
588
|
# client side
|
|
560
|
-
if len(self.
|
|
561
|
-
raise ValueError(f"Cannot create more than {
|
|
589
|
+
if len(self._progress_indicators) > _MAX_TASK_PROGRESS_INDICATORS:
|
|
590
|
+
raise ValueError(f"Cannot create more than {_MAX_TASK_PROGRESS_INDICATORS} progress indicators per task.")
|
|
562
591
|
|
|
563
592
|
progress_bar = ProgressUpdate(label)
|
|
564
|
-
self.
|
|
593
|
+
self._progress_indicators[label] = progress_bar
|
|
565
594
|
return progress_bar
|
|
566
595
|
|
|
567
596
|
@property
|
|
@@ -577,8 +606,10 @@ class ExecutionContext(ExecutionContextBase):
|
|
|
577
606
|
return client.dataset(dataset_id)
|
|
578
607
|
|
|
579
608
|
|
|
580
|
-
def
|
|
581
|
-
|
|
609
|
+
def _finalize_mutable_progress_trackers(
|
|
610
|
+
progress_bars: dict[str | None, ProgressUpdate],
|
|
611
|
+
) -> list[ProgressIndicator]:
|
|
612
|
+
return [ProgressIndicator(label, bar._total, bar._done) for label, bar in progress_bars.items()] # noqa: SLF001
|
|
582
613
|
|
|
583
614
|
|
|
584
615
|
def _execute(task: TaskInstance, context: ExecutionContext, additional_interceptors: list[Interceptor]) -> None:
|
|
@@ -8,7 +8,7 @@ from tilebox.workflows.data import (
|
|
|
8
8
|
ComputedTask,
|
|
9
9
|
Idling,
|
|
10
10
|
NextTaskToRun,
|
|
11
|
-
|
|
11
|
+
ProgressIndicator,
|
|
12
12
|
Task,
|
|
13
13
|
TaskLease,
|
|
14
14
|
uuid_to_uuid_message,
|
|
@@ -48,7 +48,9 @@ class TaskService:
|
|
|
48
48
|
return Idling.from_message(response.idling)
|
|
49
49
|
return None
|
|
50
50
|
|
|
51
|
-
def task_failed(
|
|
51
|
+
def task_failed(
|
|
52
|
+
self, task: Task, error: Exception, cancel_job: bool, progress_updates: list[ProgressIndicator]
|
|
53
|
+
) -> None:
|
|
52
54
|
# job ouptut is limited to 1KB, so truncate the error message if necessary
|
|
53
55
|
error_message = repr(error)[: (1024 - len(task.display or "None") - 1)]
|
|
54
56
|
display = f"{task.display}" if error_message == "" else f"{task.display}\n{error_message}"
|
tilebox/workflows/task.py
CHANGED
|
@@ -4,15 +4,16 @@ import json
|
|
|
4
4
|
import typing
|
|
5
5
|
from abc import ABC, ABCMeta, abstractmethod
|
|
6
6
|
from base64 import b64decode, b64encode
|
|
7
|
+
from collections import defaultdict
|
|
7
8
|
from collections.abc import Sequence
|
|
8
9
|
from dataclasses import dataclass, field, fields, is_dataclass
|
|
9
10
|
from types import NoneType, UnionType
|
|
10
|
-
from typing import Any, cast, get_args, get_origin
|
|
11
|
+
from typing import Any, Generic, TypeVar, cast, get_args, get_origin
|
|
11
12
|
|
|
12
13
|
# from python 3.11 onwards this is available as typing.dataclass_transform:
|
|
13
14
|
from typing_extensions import dataclass_transform
|
|
14
15
|
|
|
15
|
-
from tilebox.workflows.data import RunnerContext, TaskIdentifier,
|
|
16
|
+
from tilebox.workflows.data import RunnerContext, TaskIdentifier, TaskSubmissionGroup, TaskSubmissions
|
|
16
17
|
|
|
17
18
|
META_ATTR = "__tilebox_task_meta__" # the name of the attribute we use to store task metadata on the class
|
|
18
19
|
|
|
@@ -236,16 +237,96 @@ class FutureTask:
|
|
|
236
237
|
def display(self) -> str:
|
|
237
238
|
return self.task.__class__.__name__
|
|
238
239
|
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
240
|
+
|
|
241
|
+
_T = TypeVar("_T")
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
class _FastIndexLookupList(Generic[_T]):
|
|
245
|
+
"""A list that provides fast lookup by index."""
|
|
246
|
+
|
|
247
|
+
def __init__(self) -> None:
|
|
248
|
+
super().__init__()
|
|
249
|
+
self.values = []
|
|
250
|
+
self._index_lookup: dict[_T, int] = {}
|
|
251
|
+
|
|
252
|
+
def __contains__(self, key: _T) -> bool:
|
|
253
|
+
return key in self._index_lookup
|
|
254
|
+
|
|
255
|
+
def __getitem__(self, key: _T) -> _T:
|
|
256
|
+
index = self._index_lookup[key]
|
|
257
|
+
return self.values[index]
|
|
258
|
+
|
|
259
|
+
def append_if_unique(self, value: _T) -> int:
|
|
260
|
+
if value in self._index_lookup:
|
|
261
|
+
return self._index_lookup[value]
|
|
262
|
+
index = len(self.values)
|
|
263
|
+
self.values.append(value)
|
|
264
|
+
self._index_lookup[value] = index
|
|
265
|
+
return index
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def merge_future_tasks_to_submissions(future_tasks: list[FutureTask], fallback_cluster: str) -> TaskSubmissions | None:
|
|
269
|
+
if len(future_tasks) == 0:
|
|
270
|
+
return None
|
|
271
|
+
|
|
272
|
+
dependants = defaultdict(set)
|
|
273
|
+
for task in future_tasks:
|
|
274
|
+
for dep in task.depends_on:
|
|
275
|
+
dependants[dep].add(task.index)
|
|
276
|
+
|
|
277
|
+
dependants = {k: frozenset(v) for k, v in dependants.items()}
|
|
278
|
+
|
|
279
|
+
# we keep track of which task ends up in which group, so we can convert task dependencies to group dependencies
|
|
280
|
+
task_index_to_group = {}
|
|
281
|
+
|
|
282
|
+
group_keys = _FastIndexLookupList[_TaskGroupUniqueKey]()
|
|
283
|
+
groups: list[TaskSubmissionGroup] = []
|
|
284
|
+
# even though in python dicts preserve insertion order, we explicitly keep a list and an explicit lookup dict, to
|
|
285
|
+
# make the intent clear. This also allows us to more easily port this code to Tilebox clients in other languages.
|
|
286
|
+
cluster_slugs = _FastIndexLookupList[str]()
|
|
287
|
+
identifiers = _FastIndexLookupList[TaskIdentifier]()
|
|
288
|
+
displays = _FastIndexLookupList[str]()
|
|
289
|
+
|
|
290
|
+
for task in future_tasks:
|
|
291
|
+
group_key = _TaskGroupUniqueKey(
|
|
292
|
+
dependencies=frozenset(task.depends_on),
|
|
293
|
+
dependants=dependants.get(task.index, frozenset()),
|
|
294
|
+
)
|
|
295
|
+
group_index = group_keys.append_if_unique(group_key)
|
|
296
|
+
if group_index == len(groups): # it was a new unique group
|
|
297
|
+
groups.append(TaskSubmissionGroup(dependencies_on_other_groups=task.depends_on))
|
|
298
|
+
task_index_to_group[task.index] = group_index
|
|
299
|
+
|
|
300
|
+
for i in range(len(groups)):
|
|
301
|
+
group = groups[i]
|
|
302
|
+
group.dependencies_on_other_groups = list(
|
|
303
|
+
# convert the task dependencies to group dependencies, deduplicate and sort them
|
|
304
|
+
{task_index_to_group[dep] for dep in group.dependencies_on_other_groups}
|
|
247
305
|
)
|
|
248
306
|
|
|
307
|
+
for task in future_tasks:
|
|
308
|
+
group_index = task_index_to_group[task.index]
|
|
309
|
+
group = groups[group_index]
|
|
310
|
+
|
|
311
|
+
group.inputs.append(task.input())
|
|
312
|
+
group.identifier_pointers.append(identifiers.append_if_unique(task.identifier()))
|
|
313
|
+
group.cluster_slug_pointers.append(cluster_slugs.append_if_unique(task.cluster or fallback_cluster))
|
|
314
|
+
group.display_pointers.append(displays.append_if_unique(task.display()))
|
|
315
|
+
group.max_retries_values.append(task.max_retries)
|
|
316
|
+
|
|
317
|
+
return TaskSubmissions(
|
|
318
|
+
task_groups=groups,
|
|
319
|
+
cluster_slug_lookup=cluster_slugs.values,
|
|
320
|
+
identifier_lookup=identifiers.values,
|
|
321
|
+
display_lookup=displays.values,
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
@dataclass(frozen=True, unsafe_hash=True)
|
|
326
|
+
class _TaskGroupUniqueKey:
|
|
327
|
+
dependencies: frozenset[int]
|
|
328
|
+
dependants: frozenset[int]
|
|
329
|
+
|
|
249
330
|
|
|
250
331
|
class ProgressUpdate:
|
|
251
332
|
def __init__(self, label: str | None) -> None:
|
|
@@ -254,10 +335,10 @@ class ProgressUpdate:
|
|
|
254
335
|
self._done = 0
|
|
255
336
|
|
|
256
337
|
def add(self, count: int) -> None:
|
|
257
|
-
"""Add a given amount of total work to be done to the progress
|
|
338
|
+
"""Add a given amount of total work to be done to the progress indicator.
|
|
258
339
|
|
|
259
340
|
Args:
|
|
260
|
-
count: The amount of work to add to the progress
|
|
341
|
+
count: The amount of work to add to the progress indicator.
|
|
261
342
|
"""
|
|
262
343
|
self._total += count
|
|
263
344
|
|
|
@@ -307,8 +388,8 @@ class ExecutionContext(ABC):
|
|
|
307
388
|
"""Get the runner context for the task runner executing the task."""
|
|
308
389
|
|
|
309
390
|
@abstractmethod
|
|
310
|
-
def progress(self, label: str | None) -> ProgressUpdate:
|
|
311
|
-
"""Get a progress
|
|
391
|
+
def progress(self, label: str | None = None) -> ProgressUpdate:
|
|
392
|
+
"""Get a progress indicator instance for tracking job progress."""
|
|
312
393
|
|
|
313
394
|
|
|
314
395
|
def serialize_task(task: Task) -> bytes:
|
|
@@ -29,7 +29,7 @@ from tilebox.datasets.tilebox.v1 import id_pb2 as tilebox_dot_v1_dot_id__pb2
|
|
|
29
29
|
from tilebox.workflows.workflows.v1 import core_pb2 as workflows_dot_v1_dot_core__pb2
|
|
30
30
|
|
|
31
31
|
|
|
32
|
-
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dworkflows/v1/automation.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13tilebox/v1/id.proto\x1a\x17workflows/v1/core.proto\"\x9c\x01\n\x0fStorageLocation\x12&\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x02id\x12&\n\x08location\x18\x02 \x01(\tB\n\xbaH\x07r\x05 \x01(\x80\x04R\x08location\x12\x39\n\x04type\x18\x03 \x01(\x0e\x32\x19.workflows.v1.StorageTypeB\n\xbaH\x07\x82\x01\x04\x10\x01 \x00R\x04type\"O\n\x10StorageLocations\x12;\n\tlocations\x18\x01 \x03(\x0b\x32\x1d.workflows.v1.StorageLocationR\tlocations\"\
|
|
32
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dworkflows/v1/automation.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13tilebox/v1/id.proto\x1a\x17workflows/v1/core.proto\"\x9c\x01\n\x0fStorageLocation\x12&\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x02id\x12&\n\x08location\x18\x02 \x01(\tB\n\xbaH\x07r\x05 \x01(\x80\x04R\x08location\x12\x39\n\x04type\x18\x03 \x01(\x0e\x32\x19.workflows.v1.StorageTypeB\n\xbaH\x07\x82\x01\x04\x10\x01 \x00R\x04type\"O\n\x10StorageLocations\x12;\n\tlocations\x18\x01 \x03(\x0b\x32\x1d.workflows.v1.StorageLocationR\tlocations\"\x90\x03\n\x13\x41utomationPrototype\x12\x1e\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x02id\x12\x1e\n\x04name\x18\x02 \x01(\tB\n\xbaH\x07r\x05 \x01(\x80\x08R\x04name\x12@\n\tprototype\x18\x03 \x01(\x0b\x32\".workflows.v1.SingleTaskSubmissionR\tprototype\x12\x61\n\x16storage_event_triggers\x18\x04 \x03(\x0b\x32!.workflows.v1.StorageEventTriggerB\x08\xbaH\x05\x92\x01\x02\x10 R\x14storageEventTriggers\x12H\n\rcron_triggers\x18\x05 \x03(\x0b\x32\x19.workflows.v1.CronTriggerB\x08\xbaH\x05\x92\x01\x02\x10 R\x0c\x63ronTriggers\x12\x1a\n\x08\x64isabled\x18\x06 \x01(\x08R\x08\x64isabled:.\xbaH+\")\n\x16storage_event_triggers\n\rcron_triggers\x10\x01\"R\n\x0b\x41utomations\x12\x43\n\x0b\x61utomations\x18\x01 \x03(\x0b\x32!.workflows.v1.AutomationPrototypeR\x0b\x61utomations\"\xab\x01\n\x13StorageEventTrigger\x12\x1e\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x02id\x12H\n\x10storage_location\x18\x02 \x01(\x0b\x32\x1d.workflows.v1.StorageLocationR\x0fstorageLocation\x12*\n\x0cglob_pattern\x18\x03 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x0bglobPattern\"R\n\x0b\x43ronTrigger\x12\x1e\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x02id\x12#\n\x08schedule\x18\x02 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x08schedule\"E\n\nAutomation\x12#\n\rtrigger_event\x18\x01 \x01(\x0cR\x0ctriggerEvent\x12\x12\n\x04\x61rgs\x18\x02 \x01(\x0cR\x04\x61rgs\"\xa7\x01\n\x15TriggeredStorageEvent\x12>\n\x13storage_location_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x11storageLocationId\x12\x32\n\x04type\x18\x02 \x01(\x0e\x32\x1e.workflows.v1.StorageEventTypeR\x04type\x12\x1a\n\x08location\x18\x03 \x01(\tR\x08location\"S\n\x12TriggeredCronEvent\x12=\n\x0ctrigger_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x0btriggerTime\"w\n\x17\x44\x65leteAutomationRequest\x12;\n\rautomation_id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDB\x06\xbaH\x03\xc8\x01\x01R\x0c\x61utomationId\x12\x1f\n\x0b\x63\x61ncel_jobs\x18\x02 \x01(\x08R\ncancelJobs*k\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x14\n\x10STORAGE_TYPE_GCS\x10\x01\x12\x13\n\x0fSTORAGE_TYPE_S3\x10\x02\x12\x13\n\x0fSTORAGE_TYPE_FS\x10\x03*V\n\x10StorageEventType\x12\"\n\x1eSTORAGE_EVENT_TYPE_UNSPECIFIED\x10\x00\x12\x1e\n\x1aSTORAGE_EVENT_TYPE_CREATED\x10\x01\x32\xd1\x05\n\x11\x41utomationService\x12N\n\x14ListStorageLocations\x12\x16.google.protobuf.Empty\x1a\x1e.workflows.v1.StorageLocations\x12\x43\n\x12GetStorageLocation\x12\x0e.tilebox.v1.ID\x1a\x1d.workflows.v1.StorageLocation\x12U\n\x15\x43reateStorageLocation\x12\x1d.workflows.v1.StorageLocation\x1a\x1d.workflows.v1.StorageLocation\x12?\n\x15\x44\x65leteStorageLocation\x12\x0e.tilebox.v1.ID\x1a\x16.google.protobuf.Empty\x12\x44\n\x0fListAutomations\x12\x16.google.protobuf.Empty\x1a\x19.workflows.v1.Automations\x12\x42\n\rGetAutomation\x12\x0e.tilebox.v1.ID\x1a!.workflows.v1.AutomationPrototype\x12X\n\x10\x43reateAutomation\x12!.workflows.v1.AutomationPrototype\x1a!.workflows.v1.AutomationPrototype\x12X\n\x10UpdateAutomation\x12!.workflows.v1.AutomationPrototype\x1a!.workflows.v1.AutomationPrototype\x12Q\n\x10\x44\x65leteAutomation\x12%.workflows.v1.DeleteAutomationRequest\x1a\x16.google.protobuf.EmptyBy\n\x10\x63om.workflows.v1B\x0f\x41utomationProtoP\x01\xa2\x02\x03WXX\xaa\x02\x0cWorkflows.V1\xca\x02\x0cWorkflows\\V1\xe2\x02\x18Workflows\\V1\\GPBMetadata\xea\x02\rWorkflows::V1\x92\x03\x02\x08\x02\x62\x08\x65\x64itionsp\xe8\x07')
|
|
33
33
|
|
|
34
34
|
_globals = globals()
|
|
35
35
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
|
@@ -57,30 +57,30 @@ if not _descriptor._USE_C_DESCRIPTORS:
|
|
|
57
57
|
_globals['_CRONTRIGGER'].fields_by_name['schedule']._serialized_options = b'\272H\004r\002\020\001'
|
|
58
58
|
_globals['_DELETEAUTOMATIONREQUEST'].fields_by_name['automation_id']._loaded_options = None
|
|
59
59
|
_globals['_DELETEAUTOMATIONREQUEST'].fields_by_name['automation_id']._serialized_options = b'\272H\003\310\001\001'
|
|
60
|
-
_globals['_STORAGETYPE']._serialized_start=
|
|
61
|
-
_globals['_STORAGETYPE']._serialized_end=
|
|
62
|
-
_globals['_STORAGEEVENTTYPE']._serialized_start=
|
|
63
|
-
_globals['_STORAGEEVENTTYPE']._serialized_end=
|
|
60
|
+
_globals['_STORAGETYPE']._serialized_start=1616
|
|
61
|
+
_globals['_STORAGETYPE']._serialized_end=1723
|
|
62
|
+
_globals['_STORAGEEVENTTYPE']._serialized_start=1725
|
|
63
|
+
_globals['_STORAGEEVENTTYPE']._serialized_end=1811
|
|
64
64
|
_globals['_STORAGELOCATION']._serialized_start=185
|
|
65
65
|
_globals['_STORAGELOCATION']._serialized_end=341
|
|
66
66
|
_globals['_STORAGELOCATIONS']._serialized_start=343
|
|
67
67
|
_globals['_STORAGELOCATIONS']._serialized_end=422
|
|
68
68
|
_globals['_AUTOMATIONPROTOTYPE']._serialized_start=425
|
|
69
|
-
_globals['_AUTOMATIONPROTOTYPE']._serialized_end=
|
|
70
|
-
_globals['_AUTOMATIONS']._serialized_start=
|
|
71
|
-
_globals['_AUTOMATIONS']._serialized_end=
|
|
72
|
-
_globals['_STORAGEEVENTTRIGGER']._serialized_start=
|
|
73
|
-
_globals['_STORAGEEVENTTRIGGER']._serialized_end=
|
|
74
|
-
_globals['_CRONTRIGGER']._serialized_start=
|
|
75
|
-
_globals['_CRONTRIGGER']._serialized_end=
|
|
76
|
-
_globals['_AUTOMATION']._serialized_start=
|
|
77
|
-
_globals['_AUTOMATION']._serialized_end=
|
|
78
|
-
_globals['_TRIGGEREDSTORAGEEVENT']._serialized_start=
|
|
79
|
-
_globals['_TRIGGEREDSTORAGEEVENT']._serialized_end=
|
|
80
|
-
_globals['_TRIGGEREDCRONEVENT']._serialized_start=
|
|
81
|
-
_globals['_TRIGGEREDCRONEVENT']._serialized_end=
|
|
82
|
-
_globals['_DELETEAUTOMATIONREQUEST']._serialized_start=
|
|
83
|
-
_globals['_DELETEAUTOMATIONREQUEST']._serialized_end=
|
|
84
|
-
_globals['_AUTOMATIONSERVICE']._serialized_start=
|
|
85
|
-
_globals['_AUTOMATIONSERVICE']._serialized_end=
|
|
69
|
+
_globals['_AUTOMATIONPROTOTYPE']._serialized_end=825
|
|
70
|
+
_globals['_AUTOMATIONS']._serialized_start=827
|
|
71
|
+
_globals['_AUTOMATIONS']._serialized_end=909
|
|
72
|
+
_globals['_STORAGEEVENTTRIGGER']._serialized_start=912
|
|
73
|
+
_globals['_STORAGEEVENTTRIGGER']._serialized_end=1083
|
|
74
|
+
_globals['_CRONTRIGGER']._serialized_start=1085
|
|
75
|
+
_globals['_CRONTRIGGER']._serialized_end=1167
|
|
76
|
+
_globals['_AUTOMATION']._serialized_start=1169
|
|
77
|
+
_globals['_AUTOMATION']._serialized_end=1238
|
|
78
|
+
_globals['_TRIGGEREDSTORAGEEVENT']._serialized_start=1241
|
|
79
|
+
_globals['_TRIGGEREDSTORAGEEVENT']._serialized_end=1408
|
|
80
|
+
_globals['_TRIGGEREDCRONEVENT']._serialized_start=1410
|
|
81
|
+
_globals['_TRIGGEREDCRONEVENT']._serialized_end=1493
|
|
82
|
+
_globals['_DELETEAUTOMATIONREQUEST']._serialized_start=1495
|
|
83
|
+
_globals['_DELETEAUTOMATIONREQUEST']._serialized_end=1614
|
|
84
|
+
_globals['_AUTOMATIONSERVICE']._serialized_start=1814
|
|
85
|
+
_globals['_AUTOMATIONSERVICE']._serialized_end=2535
|
|
86
86
|
# @@protoc_insertion_point(module_scope)
|
|
@@ -56,11 +56,11 @@ class AutomationPrototype(_message.Message):
|
|
|
56
56
|
DISABLED_FIELD_NUMBER: _ClassVar[int]
|
|
57
57
|
id: _id_pb2.ID
|
|
58
58
|
name: str
|
|
59
|
-
prototype: _core_pb2.
|
|
59
|
+
prototype: _core_pb2.SingleTaskSubmission
|
|
60
60
|
storage_event_triggers: _containers.RepeatedCompositeFieldContainer[StorageEventTrigger]
|
|
61
61
|
cron_triggers: _containers.RepeatedCompositeFieldContainer[CronTrigger]
|
|
62
62
|
disabled: bool
|
|
63
|
-
def __init__(self, id: _Optional[_Union[_id_pb2.ID, _Mapping]] = ..., name: _Optional[str] = ..., prototype: _Optional[_Union[_core_pb2.
|
|
63
|
+
def __init__(self, id: _Optional[_Union[_id_pb2.ID, _Mapping]] = ..., name: _Optional[str] = ..., prototype: _Optional[_Union[_core_pb2.SingleTaskSubmission, _Mapping]] = ..., storage_event_triggers: _Optional[_Iterable[_Union[StorageEventTrigger, _Mapping]]] = ..., cron_triggers: _Optional[_Iterable[_Union[CronTrigger, _Mapping]]] = ..., disabled: bool = ...) -> None: ...
|
|
64
64
|
|
|
65
65
|
class Automations(_message.Message):
|
|
66
66
|
__slots__ = ("automations",)
|
|
@@ -28,7 +28,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__
|
|
|
28
28
|
from tilebox.datasets.tilebox.v1 import id_pb2 as tilebox_dot_v1_dot_id__pb2
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17workflows/v1/core.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13tilebox/v1/id.proto\"^\n\x07\x43luster\x12\x12\n\x04slug\x18\x02 \x01(\tR\x04slug\x12!\n\x0c\x64isplay_name\x18\x03 \x01(\tR\x0b\x64isplayName\x12\x1c\n\tdeletable\x18\x04 \x01(\x08R\tdeletable\"\
|
|
31
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17workflows/v1/core.proto\x12\x0cworkflows.v1\x1a\x1b\x62uf/validate/validate.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13tilebox/v1/id.proto\"^\n\x07\x43luster\x12\x12\n\x04slug\x18\x02 \x01(\tR\x04slug\x12!\n\x0c\x64isplay_name\x18\x03 \x01(\tR\x0b\x64isplayName\x12\x1c\n\tdeletable\x18\x04 \x01(\x08R\tdeletable\"\xe5\x04\n\x03Job\x12\x1e\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x02id\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12!\n\x0ctrace_parent\x18\x03 \x01(\tR\x0btraceParent\x12\x1e\n\x08\x63\x61nceled\x18\x05 \x01(\x08\x42\x02\x18\x01R\x08\x63\x61nceled\x12\x43\n\x0clegacy_state\x18\x06 \x01(\x0e\x32\x1c.workflows.v1.LegacyJobStateB\x02\x18\x01R\x0blegacyState\x12=\n\x0csubmitted_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x0bsubmittedAt\x12=\n\nstarted_at\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x02\x18\x01R\tstartedAt\x12@\n\x0etask_summaries\x18\t \x03(\x0b\x32\x19.workflows.v1.TaskSummaryR\rtaskSummaries\x12\x33\n\rautomation_id\x18\n \x01(\x0b\x32\x0e.tilebox.v1.IDR\x0c\x61utomationId\x12\x32\n\x08progress\x18\x0b \x03(\x0b\x32\x16.workflows.v1.ProgressR\x08progress\x12,\n\x05state\x18\x0c \x01(\x0e\x32\x16.workflows.v1.JobStateR\x05state\x12\x45\n\x0f\x65xecution_stats\x18\r \x01(\x0b\x32\x1c.workflows.v1.ExecutionStatsR\x0e\x65xecutionStatsJ\x04\x08\x04\x10\x05\"\xaf\x03\n\x0e\x45xecutionStats\x12M\n\x15\x66irst_task_started_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x12\x66irstTaskStartedAt\x12K\n\x14last_task_stopped_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x11lastTaskStoppedAt\x12<\n\x0c\x63ompute_time\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationR\x0b\x63omputeTime\x12<\n\x0c\x65lapsed_time\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x0b\x65lapsedTime\x12 \n\x0bparallelism\x18\x05 \x01(\x01R\x0bparallelism\x12\x1f\n\x0btotal_tasks\x18\x06 \x01(\x04R\ntotalTasks\x12\x42\n\x0etasks_by_state\x18\x07 \x03(\x0b\x32\x1c.workflows.v1.TaskStateCountR\x0ctasksByState\"U\n\x0eTaskStateCount\x12-\n\x05state\x18\x01 \x01(\x0e\x32\x17.workflows.v1.TaskStateR\x05state\x12\x14\n\x05\x63ount\x18\x02 \x01(\x04R\x05\x63ount\"\x9f\x02\n\x0bTaskSummary\x12\x1e\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x02id\x12\x18\n\x07\x64isplay\x18\x02 \x01(\tR\x07\x64isplay\x12-\n\x05state\x18\x03 \x01(\x0e\x32\x17.workflows.v1.TaskStateR\x05state\x12+\n\tparent_id\x18\x04 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x08parentId\x12\x39\n\nstarted_at\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartedAt\x12\x39\n\nstopped_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstoppedAtJ\x04\x08\x05\x10\x06\"S\n\x08Progress\x12\x1d\n\x05label\x18\x01 \x01(\tB\x07\xbaH\x04r\x02\x18\x64R\x05label\x12\x14\n\x05total\x18\x02 \x01(\x04R\x05total\x12\x12\n\x04\x64one\x18\x03 \x01(\x04R\x04\x64one\"\xa2\x03\n\x04Task\x12\x1e\n\x02id\x18\x01 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x02id\x12<\n\nidentifier\x18\x02 \x01(\x0b\x32\x1c.workflows.v1.TaskIdentifierR\nidentifier\x12-\n\x05state\x18\x03 \x01(\x0e\x32\x17.workflows.v1.TaskStateR\x05state\x12\x1b\n\x05input\x18\x04 \x01(\x0c\x42\x05\xaa\x01\x02\x08\x01R\x05input\x12\x1f\n\x07\x64isplay\x18\x05 \x01(\tB\x05\xaa\x01\x02\x08\x01R\x07\x64isplay\x12#\n\x03job\x18\x06 \x01(\x0b\x32\x11.workflows.v1.JobR\x03job\x12+\n\tparent_id\x18\x07 \x01(\x0b\x32\x0e.tilebox.v1.IDR\x08parentId\x12-\n\ndepends_on\x18\x08 \x03(\x0b\x32\x0e.tilebox.v1.IDR\tdependsOn\x12-\n\x05lease\x18\t \x01(\x0b\x32\x17.workflows.v1.TaskLeaseR\x05lease\x12\x1f\n\x0bretry_count\x18\n \x01(\x03R\nretryCount\"d\n\x0eTaskIdentifier\x12\x1e\n\x04name\x18\x01 \x01(\tB\n\xbaH\x07r\x05 \x01(\x80\x02R\x04name\x12\x32\n\x07version\x18\x02 \x01(\tB\x18\xbaH\x15r\x13 \x01\x32\x0f^v(\\d+)\\.(\\d+)$R\x07version\"1\n\x05Tasks\x12(\n\x05tasks\x18\x01 \x03(\x0b\x32\x12.workflows.v1.TaskR\x05tasks\"\x98\x02\n\x14SingleTaskSubmission\x12!\n\x0c\x63luster_slug\x18\x01 \x01(\tR\x0b\x63lusterSlug\x12<\n\nidentifier\x18\x02 \x01(\x0b\x32\x1c.workflows.v1.TaskIdentifierR\nidentifier\x12!\n\x07\x64isplay\x18\x04 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x07\x64isplay\x12\x32\n\x0c\x64\x65pendencies\x18\x05 \x03(\x03\x42\x0e\xbaH\x0b\x92\x01\x08\"\x06\"\x04\x18?(\x00R\x0c\x64\x65pendencies\x12(\n\x0bmax_retries\x18\x06 \x01(\x03\x42\x07\xbaH\x04\"\x02(\x00R\nmaxRetries\x12\x1e\n\x05input\x18\x03 \x01(\x0c\x42\x08\xbaH\x05z\x03\x18\x80\x10R\x05input\"\x91\x02\n\x0fTaskSubmissions\x12N\n\x0btask_groups\x18\x01 \x03(\x0b\x32!.workflows.v1.TaskSubmissionGroupB\n\xbaH\x07\x92\x01\x04\x08\x01\x10@R\ntaskGroups\x12.\n\x13\x63luster_slug_lookup\x18\x02 \x03(\tR\x11\x63lusterSlugLookup\x12I\n\x11identifier_lookup\x18\x03 \x03(\x0b\x32\x1c.workflows.v1.TaskIdentifierR\x10identifierLookup\x12\x33\n\x0e\x64isplay_lookup\x18\x04 \x03(\tB\x0c\xbaH\t\x92\x01\x06\"\x04r\x02\x10\x01R\rdisplayLookup\"\xe2\x07\n\x13TaskSubmissionGroup\x12?\n\x1c\x64\x65pendencies_on_other_groups\x18\x01 \x03(\rR\x19\x64\x65pendenciesOnOtherGroups\x12%\n\x06inputs\x18\x02 \x03(\x0c\x42\r\xbaH\n\x92\x01\x07\"\x05z\x03\x18\x80\x10R\x06inputs\x12/\n\x13identifier_pointers\x18\x03 \x03(\x04R\x12identifierPointers\x12\x32\n\x15\x63luster_slug_pointers\x18\x04 \x03(\x04R\x13\x63lusterSlugPointers\x12)\n\x10\x64isplay_pointers\x18\x05 \x03(\x04R\x0f\x64isplayPointers\x12,\n\x12max_retries_values\x18\x06 \x03(\x03R\x10maxRetriesValues:\xa4\x05\xbaH\xa0\x05\x1a\xa6\x01\n,task_submission_group.identifiers_size_match\x12?The number of inputs must match the number of task identifiers.\x1a\x35this.inputs.size() == this.identifier_pointers.size()\x1a\xa7\x01\n.task_submission_group.cluster_slugs_size_match\x12<The number of cluster slugs must match the number of inputs.\x1a\x37this.inputs.size() == this.cluster_slug_pointers.size()\x1a\xa0\x01\n)task_submission_group.displays_size_match\x12?The number of display pointers must match the number of inputs.\x1a\x32this.inputs.size() == this.display_pointers.size()\x1a\xa7\x01\n,task_submission_group.max_retries_size_match\x12\x41The number of max_retries_values must match the number of inputs.\x1a\x34this.inputs.size() == this.max_retries_values.size()\"\xa9\x01\n\tTaskLease\x12/\n\x05lease\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationR\x05lease\x12k\n%recommended_wait_until_next_extension\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationR!recommendedWaitUntilNextExtension*\x8d\x01\n\x0eLegacyJobState\x12 \n\x1cLEGACY_JOB_STATE_UNSPECIFIED\x10\x00\x12\x1b\n\x17LEGACY_JOB_STATE_QUEUED\x10\x01\x12\x1c\n\x18LEGACY_JOB_STATE_STARTED\x10\x02\x12\x1e\n\x1aLEGACY_JOB_STATE_COMPLETED\x10\x03*\xb3\x01\n\x08JobState\x12\x19\n\x15JOB_STATE_UNSPECIFIED\x10\x00\x12\x17\n\x13JOB_STATE_SUBMITTED\x10\x01\x12\x15\n\x11JOB_STATE_RUNNING\x10\x02\x12\x15\n\x11JOB_STATE_STARTED\x10\x03\x12\x17\n\x13JOB_STATE_COMPLETED\x10\x04\x12\x14\n\x10JOB_STATE_FAILED\x10\x05\x12\x16\n\x12JOB_STATE_CANCELED\x10\x06*\x86\x01\n\tTaskState\x12\x1a\n\x16TASK_STATE_UNSPECIFIED\x10\x00\x12\x15\n\x11TASK_STATE_QUEUED\x10\x01\x12\x16\n\x12TASK_STATE_RUNNING\x10\x02\x12\x17\n\x13TASK_STATE_COMPUTED\x10\x03\x12\x15\n\x11TASK_STATE_FAILED\x10\x04\x42s\n\x10\x63om.workflows.v1B\tCoreProtoP\x01\xa2\x02\x03WXX\xaa\x02\x0cWorkflows.V1\xca\x02\x0cWorkflows\\V1\xe2\x02\x18Workflows\\V1\\GPBMetadata\xea\x02\rWorkflows::V1\x92\x03\x02\x08\x02\x62\x08\x65\x64itionsp\xe8\x07')
|
|
32
32
|
|
|
33
33
|
_globals = globals()
|
|
34
34
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
|
@@ -36,8 +36,14 @@ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'workflows.v1.core_pb2', _gl
|
|
|
36
36
|
if not _descriptor._USE_C_DESCRIPTORS:
|
|
37
37
|
_globals['DESCRIPTOR']._loaded_options = None
|
|
38
38
|
_globals['DESCRIPTOR']._serialized_options = b'\n\020com.workflows.v1B\tCoreProtoP\001\242\002\003WXX\252\002\014Workflows.V1\312\002\014Workflows\\V1\342\002\030Workflows\\V1\\GPBMetadata\352\002\rWorkflows::V1\222\003\002\010\002'
|
|
39
|
-
_globals['
|
|
40
|
-
_globals['
|
|
39
|
+
_globals['_JOB'].fields_by_name['canceled']._loaded_options = None
|
|
40
|
+
_globals['_JOB'].fields_by_name['canceled']._serialized_options = b'\030\001'
|
|
41
|
+
_globals['_JOB'].fields_by_name['legacy_state']._loaded_options = None
|
|
42
|
+
_globals['_JOB'].fields_by_name['legacy_state']._serialized_options = b'\030\001'
|
|
43
|
+
_globals['_JOB'].fields_by_name['started_at']._loaded_options = None
|
|
44
|
+
_globals['_JOB'].fields_by_name['started_at']._serialized_options = b'\030\001'
|
|
45
|
+
_globals['_PROGRESS'].fields_by_name['label']._loaded_options = None
|
|
46
|
+
_globals['_PROGRESS'].fields_by_name['label']._serialized_options = b'\272H\004r\002\030d'
|
|
41
47
|
_globals['_TASK'].fields_by_name['input']._loaded_options = None
|
|
42
48
|
_globals['_TASK'].fields_by_name['input']._serialized_options = b'\252\001\002\010\001'
|
|
43
49
|
_globals['_TASK'].fields_by_name['display']._loaded_options = None
|
|
@@ -46,34 +52,52 @@ if not _descriptor._USE_C_DESCRIPTORS:
|
|
|
46
52
|
_globals['_TASKIDENTIFIER'].fields_by_name['name']._serialized_options = b'\272H\007r\005 \001(\200\002'
|
|
47
53
|
_globals['_TASKIDENTIFIER'].fields_by_name['version']._loaded_options = None
|
|
48
54
|
_globals['_TASKIDENTIFIER'].fields_by_name['version']._serialized_options = b'\272H\025r\023 \0012\017^v(\\d+)\\.(\\d+)$'
|
|
49
|
-
_globals['
|
|
50
|
-
_globals['
|
|
51
|
-
_globals['
|
|
52
|
-
_globals['
|
|
53
|
-
_globals['
|
|
54
|
-
_globals['
|
|
55
|
-
_globals['
|
|
56
|
-
_globals['
|
|
57
|
-
_globals['
|
|
58
|
-
_globals['
|
|
59
|
-
_globals['
|
|
60
|
-
_globals['
|
|
55
|
+
_globals['_SINGLETASKSUBMISSION'].fields_by_name['display']._loaded_options = None
|
|
56
|
+
_globals['_SINGLETASKSUBMISSION'].fields_by_name['display']._serialized_options = b'\272H\004r\002\020\001'
|
|
57
|
+
_globals['_SINGLETASKSUBMISSION'].fields_by_name['dependencies']._loaded_options = None
|
|
58
|
+
_globals['_SINGLETASKSUBMISSION'].fields_by_name['dependencies']._serialized_options = b'\272H\013\222\001\010\"\006\"\004\030?(\000'
|
|
59
|
+
_globals['_SINGLETASKSUBMISSION'].fields_by_name['max_retries']._loaded_options = None
|
|
60
|
+
_globals['_SINGLETASKSUBMISSION'].fields_by_name['max_retries']._serialized_options = b'\272H\004\"\002(\000'
|
|
61
|
+
_globals['_SINGLETASKSUBMISSION'].fields_by_name['input']._loaded_options = None
|
|
62
|
+
_globals['_SINGLETASKSUBMISSION'].fields_by_name['input']._serialized_options = b'\272H\005z\003\030\200\020'
|
|
63
|
+
_globals['_TASKSUBMISSIONS'].fields_by_name['task_groups']._loaded_options = None
|
|
64
|
+
_globals['_TASKSUBMISSIONS'].fields_by_name['task_groups']._serialized_options = b'\272H\007\222\001\004\010\001\020@'
|
|
65
|
+
_globals['_TASKSUBMISSIONS'].fields_by_name['display_lookup']._loaded_options = None
|
|
66
|
+
_globals['_TASKSUBMISSIONS'].fields_by_name['display_lookup']._serialized_options = b'\272H\t\222\001\006\"\004r\002\020\001'
|
|
67
|
+
_globals['_TASKSUBMISSIONGROUP'].fields_by_name['inputs']._loaded_options = None
|
|
68
|
+
_globals['_TASKSUBMISSIONGROUP'].fields_by_name['inputs']._serialized_options = b'\272H\n\222\001\007\"\005z\003\030\200\020'
|
|
69
|
+
_globals['_TASKSUBMISSIONGROUP']._loaded_options = None
|
|
70
|
+
_globals['_TASKSUBMISSIONGROUP']._serialized_options = b'\272H\240\005\032\246\001\n,task_submission_group.identifiers_size_match\022?The number of inputs must match the number of task identifiers.\0325this.inputs.size() == this.identifier_pointers.size()\032\247\001\n.task_submission_group.cluster_slugs_size_match\022<The number of cluster slugs must match the number of inputs.\0327this.inputs.size() == this.cluster_slug_pointers.size()\032\240\001\n)task_submission_group.displays_size_match\022?The number of display pointers must match the number of inputs.\0322this.inputs.size() == this.display_pointers.size()\032\247\001\n,task_submission_group.max_retries_size_match\022AThe number of max_retries_values must match the number of inputs.\0324this.inputs.size() == this.max_retries_values.size()'
|
|
71
|
+
_globals['_LEGACYJOBSTATE']._serialized_start=4067
|
|
72
|
+
_globals['_LEGACYJOBSTATE']._serialized_end=4208
|
|
73
|
+
_globals['_JOBSTATE']._serialized_start=4211
|
|
74
|
+
_globals['_JOBSTATE']._serialized_end=4390
|
|
75
|
+
_globals['_TASKSTATE']._serialized_start=4393
|
|
76
|
+
_globals['_TASKSTATE']._serialized_end=4527
|
|
61
77
|
_globals['_CLUSTER']._serialized_start=156
|
|
62
78
|
_globals['_CLUSTER']._serialized_end=250
|
|
63
79
|
_globals['_JOB']._serialized_start=253
|
|
64
|
-
_globals['_JOB']._serialized_end=
|
|
65
|
-
_globals['
|
|
66
|
-
_globals['
|
|
67
|
-
_globals['
|
|
68
|
-
_globals['
|
|
69
|
-
_globals['
|
|
70
|
-
_globals['
|
|
71
|
-
_globals['
|
|
72
|
-
_globals['
|
|
73
|
-
_globals['
|
|
74
|
-
_globals['
|
|
75
|
-
_globals['
|
|
76
|
-
_globals['
|
|
77
|
-
_globals['
|
|
78
|
-
_globals['
|
|
80
|
+
_globals['_JOB']._serialized_end=866
|
|
81
|
+
_globals['_EXECUTIONSTATS']._serialized_start=869
|
|
82
|
+
_globals['_EXECUTIONSTATS']._serialized_end=1300
|
|
83
|
+
_globals['_TASKSTATECOUNT']._serialized_start=1302
|
|
84
|
+
_globals['_TASKSTATECOUNT']._serialized_end=1387
|
|
85
|
+
_globals['_TASKSUMMARY']._serialized_start=1390
|
|
86
|
+
_globals['_TASKSUMMARY']._serialized_end=1677
|
|
87
|
+
_globals['_PROGRESS']._serialized_start=1679
|
|
88
|
+
_globals['_PROGRESS']._serialized_end=1762
|
|
89
|
+
_globals['_TASK']._serialized_start=1765
|
|
90
|
+
_globals['_TASK']._serialized_end=2183
|
|
91
|
+
_globals['_TASKIDENTIFIER']._serialized_start=2185
|
|
92
|
+
_globals['_TASKIDENTIFIER']._serialized_end=2285
|
|
93
|
+
_globals['_TASKS']._serialized_start=2287
|
|
94
|
+
_globals['_TASKS']._serialized_end=2336
|
|
95
|
+
_globals['_SINGLETASKSUBMISSION']._serialized_start=2339
|
|
96
|
+
_globals['_SINGLETASKSUBMISSION']._serialized_end=2619
|
|
97
|
+
_globals['_TASKSUBMISSIONS']._serialized_start=2622
|
|
98
|
+
_globals['_TASKSUBMISSIONS']._serialized_end=2895
|
|
99
|
+
_globals['_TASKSUBMISSIONGROUP']._serialized_start=2898
|
|
100
|
+
_globals['_TASKSUBMISSIONGROUP']._serialized_end=3892
|
|
101
|
+
_globals['_TASKLEASE']._serialized_start=3895
|
|
102
|
+
_globals['_TASKLEASE']._serialized_end=4064
|
|
79
103
|
# @@protoc_insertion_point(module_scope)
|