ddeutil-workflow 0.0.63__py3-none-any.whl → 0.0.65__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ddeutil/workflow/__about__.py +1 -1
- ddeutil/workflow/__init__.py +1 -8
- ddeutil/workflow/api/__init__.py +5 -84
- ddeutil/workflow/api/routes/__init__.py +0 -1
- ddeutil/workflow/api/routes/job.py +2 -3
- ddeutil/workflow/api/routes/logs.py +0 -2
- ddeutil/workflow/api/routes/workflows.py +0 -3
- ddeutil/workflow/conf.py +6 -38
- ddeutil/workflow/{exceptions.py → errors.py} +47 -12
- ddeutil/workflow/job.py +249 -118
- ddeutil/workflow/params.py +11 -11
- ddeutil/workflow/result.py +86 -10
- ddeutil/workflow/reusables.py +54 -23
- ddeutil/workflow/stages.py +692 -464
- ddeutil/workflow/utils.py +37 -2
- ddeutil/workflow/workflow.py +163 -664
- {ddeutil_workflow-0.0.63.dist-info → ddeutil_workflow-0.0.65.dist-info}/METADATA +17 -67
- ddeutil_workflow-0.0.65.dist-info/RECORD +28 -0
- {ddeutil_workflow-0.0.63.dist-info → ddeutil_workflow-0.0.65.dist-info}/WHEEL +1 -1
- ddeutil/workflow/api/routes/schedules.py +0 -141
- ddeutil/workflow/api/utils.py +0 -174
- ddeutil/workflow/scheduler.py +0 -813
- ddeutil_workflow-0.0.63.dist-info/RECORD +0 -31
- {ddeutil_workflow-0.0.63.dist-info → ddeutil_workflow-0.0.65.dist-info}/entry_points.txt +0 -0
- {ddeutil_workflow-0.0.63.dist-info → ddeutil_workflow-0.0.65.dist-info}/licenses/LICENSE +0 -0
- {ddeutil_workflow-0.0.63.dist-info → ddeutil_workflow-0.0.65.dist-info}/top_level.txt +0 -0
ddeutil/workflow/workflow.py
CHANGED
@@ -8,9 +8,6 @@ ReleaseQueue, and Workflow models.
|
|
8
8
|
|
9
9
|
This package implement timeout strategy on the workflow execution layer only
|
10
10
|
because the main propose of this package is using Workflow to be orchestrator.
|
11
|
-
|
12
|
-
ReleaseQueue is the memory storage of Release for tracking this release
|
13
|
-
already run or pending in the current session.
|
14
11
|
"""
|
15
12
|
from __future__ import annotations
|
16
13
|
|
@@ -21,288 +18,57 @@ from concurrent.futures import (
|
|
21
18
|
ThreadPoolExecutor,
|
22
19
|
as_completed,
|
23
20
|
)
|
24
|
-
from
|
25
|
-
from datetime import datetime, timedelta
|
21
|
+
from datetime import datetime
|
26
22
|
from enum import Enum
|
27
|
-
from functools import partial, total_ordering
|
28
|
-
from heapq import heappop, heappush
|
29
23
|
from pathlib import Path
|
30
24
|
from queue import Queue
|
31
25
|
from textwrap import dedent
|
32
|
-
from threading import Event
|
33
|
-
from typing import Any, Optional
|
26
|
+
from threading import Event
|
27
|
+
from typing import Any, Optional
|
34
28
|
from zoneinfo import ZoneInfo
|
35
29
|
|
36
|
-
from pydantic import BaseModel,
|
37
|
-
from pydantic.dataclasses import dataclass
|
30
|
+
from pydantic import BaseModel, Field, ValidationInfo
|
38
31
|
from pydantic.functional_validators import field_validator, model_validator
|
39
32
|
from typing_extensions import Self
|
40
33
|
|
41
|
-
from .
|
34
|
+
from . import get_status_from_error
|
42
35
|
from .__types import DictData
|
43
36
|
from .conf import FileLoad, Loader, dynamic
|
37
|
+
from .errors import WorkflowCancelError, WorkflowError, WorkflowTimeoutError
|
44
38
|
from .event import Crontab
|
45
|
-
from .exceptions import WorkflowException
|
46
39
|
from .job import Job
|
47
40
|
from .logs import Audit, get_audit
|
48
41
|
from .params import Param
|
49
|
-
from .result import
|
42
|
+
from .result import (
|
43
|
+
CANCEL,
|
44
|
+
FAILED,
|
45
|
+
SKIP,
|
46
|
+
SUCCESS,
|
47
|
+
WAIT,
|
48
|
+
Result,
|
49
|
+
Status,
|
50
|
+
validate_statuses,
|
51
|
+
)
|
50
52
|
from .reusables import has_template, param2template
|
51
53
|
from .utils import (
|
52
|
-
clear_tz,
|
53
54
|
gen_id,
|
54
|
-
get_dt_now,
|
55
|
-
reach_next_minute,
|
56
55
|
replace_sec,
|
57
|
-
wait_until_next_minute,
|
58
56
|
)
|
59
57
|
|
60
58
|
|
61
59
|
class ReleaseType(str, Enum):
|
62
|
-
"""Release Type Enum
|
60
|
+
"""Release Type Enum."""
|
63
61
|
|
64
|
-
|
65
|
-
|
66
|
-
|
62
|
+
NORMAL = "normal"
|
63
|
+
RERUN = "rerun"
|
64
|
+
EVENT = "event"
|
67
65
|
FORCE = "force"
|
68
66
|
|
69
67
|
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
date: datetime = Field(
|
76
|
-
description=(
|
77
|
-
"A release date that should has second and millisecond equal 0."
|
78
|
-
)
|
79
|
-
)
|
80
|
-
type: ReleaseType = Field(
|
81
|
-
default=ReleaseType.DEFAULT,
|
82
|
-
description="A type of release that create before start execution.",
|
83
|
-
)
|
84
|
-
|
85
|
-
def __repr__(self) -> str:
|
86
|
-
"""Override __repr__ method for represent value of `date` field.
|
87
|
-
|
88
|
-
:rtype: str
|
89
|
-
"""
|
90
|
-
return repr(f"{self.date:%Y-%m-%d %H:%M:%S}")
|
91
|
-
|
92
|
-
def __str__(self) -> str:
|
93
|
-
"""Override string value of this release object with the `date` field.
|
94
|
-
|
95
|
-
:rtype: str
|
96
|
-
"""
|
97
|
-
return f"{self.date:%Y-%m-%d %H:%M:%S}"
|
98
|
-
|
99
|
-
@classmethod
|
100
|
-
def from_dt(cls, dt: Union[datetime, str]) -> Self:
|
101
|
-
"""Construct Release object from `datetime` or `str` objects.
|
102
|
-
|
103
|
-
This method will replace second and millisecond value to 0 and
|
104
|
-
replace timezone to the `tz` config setting or extras overriding before
|
105
|
-
create Release object.
|
106
|
-
|
107
|
-
:param dt: (Union[datetime, str]) A datetime object or string that want to
|
108
|
-
construct to the Release object.
|
109
|
-
|
110
|
-
:raise TypeError: If the type of the dt argument does not valid with
|
111
|
-
datetime or str object.
|
112
|
-
|
113
|
-
:rtype: Release
|
114
|
-
"""
|
115
|
-
if isinstance(dt, str):
|
116
|
-
dt: datetime = datetime.fromisoformat(dt)
|
117
|
-
elif not isinstance(dt, datetime):
|
118
|
-
raise TypeError(
|
119
|
-
f"The `from_dt` need the `dt` parameter type be `str` or "
|
120
|
-
f"`datetime` only, not {type(dt)}."
|
121
|
-
)
|
122
|
-
return cls(date=replace_sec(dt.replace(tzinfo=None)))
|
123
|
-
|
124
|
-
def __eq__(self, other: Union[Release, datetime]) -> bool:
|
125
|
-
"""Override equal property that will compare only the same type or
|
126
|
-
datetime.
|
127
|
-
|
128
|
-
:rtype: bool
|
129
|
-
"""
|
130
|
-
if isinstance(other, self.__class__):
|
131
|
-
return self.date == other.date
|
132
|
-
elif isinstance(other, datetime):
|
133
|
-
return self.date == other
|
134
|
-
return NotImplemented
|
135
|
-
|
136
|
-
def __lt__(self, other: Union[Release, datetime]) -> bool:
|
137
|
-
"""Override less-than property that will compare only the same type or
|
138
|
-
datetime.
|
139
|
-
|
140
|
-
:rtype: bool
|
141
|
-
"""
|
142
|
-
if isinstance(other, self.__class__):
|
143
|
-
return self.date < other.date
|
144
|
-
elif isinstance(other, datetime):
|
145
|
-
return self.date < other
|
146
|
-
return NotImplemented
|
147
|
-
|
148
|
-
|
149
|
-
class ReleaseQueue:
|
150
|
-
"""ReleaseQueue object that is storage management of Release objects on
|
151
|
-
the memory with list object.
|
152
|
-
"""
|
153
|
-
|
154
|
-
def __init__(
|
155
|
-
self,
|
156
|
-
queue: Optional[list[Release]] = None,
|
157
|
-
running: Optional[list[Release]] = None,
|
158
|
-
complete: Optional[list[Release]] = None,
|
159
|
-
extras: Optional[DictData] = None,
|
160
|
-
):
|
161
|
-
self.queue: list[Release] = queue or []
|
162
|
-
self.running: list[Release] = running or []
|
163
|
-
self.complete: list[Release] = complete or []
|
164
|
-
self.extras: DictData = extras or {}
|
165
|
-
self.lock: Lock = Lock()
|
166
|
-
|
167
|
-
@classmethod
|
168
|
-
def from_list(
|
169
|
-
cls,
|
170
|
-
queue: Optional[Union[list[datetime], list[Release]]] = None,
|
171
|
-
) -> Self:
|
172
|
-
"""Construct ReleaseQueue object from an input queue value that passing
|
173
|
-
with list of datetime or list of Release.
|
174
|
-
|
175
|
-
:param queue: A queue object for create ReleaseQueue instance.
|
176
|
-
|
177
|
-
:raise TypeError: If the type of input queue does not valid.
|
178
|
-
|
179
|
-
:rtype: ReleaseQueue
|
180
|
-
"""
|
181
|
-
if queue is None:
|
182
|
-
return cls()
|
183
|
-
|
184
|
-
if isinstance(queue, list):
|
185
|
-
if all(isinstance(q, datetime) for q in queue):
|
186
|
-
return cls(queue=[Release.from_dt(q) for q in queue])
|
187
|
-
|
188
|
-
if all(isinstance(q, Release) for q in queue):
|
189
|
-
return cls(queue=queue)
|
190
|
-
|
191
|
-
raise TypeError(
|
192
|
-
"Type of the queue does not valid with ReleaseQueue "
|
193
|
-
"or list of datetime or list of Release."
|
194
|
-
)
|
195
|
-
|
196
|
-
@property
|
197
|
-
def is_queued(self) -> bool:
|
198
|
-
"""Return True if it has workflow release object in the queue.
|
199
|
-
|
200
|
-
:rtype: bool
|
201
|
-
"""
|
202
|
-
return len(self.queue) > 0
|
203
|
-
|
204
|
-
def check_queue(self, value: Union[Release, datetime]) -> bool:
|
205
|
-
"""Check a Release value already exists in list of tracking
|
206
|
-
queues.
|
207
|
-
|
208
|
-
:param value: A Release object that want to check it already in
|
209
|
-
queues.
|
210
|
-
|
211
|
-
:rtype: bool
|
212
|
-
"""
|
213
|
-
if isinstance(value, datetime):
|
214
|
-
value = Release.from_dt(value)
|
215
|
-
|
216
|
-
with self.lock:
|
217
|
-
return (
|
218
|
-
(value in self.queue)
|
219
|
-
or (value in self.running)
|
220
|
-
or (value in self.complete)
|
221
|
-
)
|
222
|
-
|
223
|
-
def mark_complete(self, value: Release) -> Self:
|
224
|
-
"""Push Release to the complete queue. After push the release, it will
|
225
|
-
delete old release base on the `CORE_MAX_QUEUE_COMPLETE_HIST` value.
|
226
|
-
|
227
|
-
:param value: (Release) A Release value that want to push to the
|
228
|
-
complete field.
|
229
|
-
|
230
|
-
:rtype: Self
|
231
|
-
"""
|
232
|
-
with self.lock:
|
233
|
-
if value in self.running:
|
234
|
-
self.running.remove(value)
|
235
|
-
|
236
|
-
heappush(self.complete, value)
|
237
|
-
|
238
|
-
# NOTE: Remove complete queue on workflow that keep more than the
|
239
|
-
# maximum config value.
|
240
|
-
num_complete_delete: int = len(self.complete) - dynamic(
|
241
|
-
"max_queue_complete_hist", extras=self.extras
|
242
|
-
)
|
243
|
-
|
244
|
-
if num_complete_delete > 0:
|
245
|
-
for _ in range(num_complete_delete):
|
246
|
-
heappop(self.complete)
|
247
|
-
|
248
|
-
return self
|
249
|
-
|
250
|
-
def gen(
|
251
|
-
self,
|
252
|
-
end_date: datetime,
|
253
|
-
audit: type[Audit],
|
254
|
-
runner: CronRunner,
|
255
|
-
name: str,
|
256
|
-
*,
|
257
|
-
force_run: bool = False,
|
258
|
-
extras: Optional[DictData] = None,
|
259
|
-
) -> Self:
|
260
|
-
"""Generate a Release model to the queue field with an input CronRunner.
|
261
|
-
|
262
|
-
Steps:
|
263
|
-
- Create Release object from the current date that not reach the end
|
264
|
-
date.
|
265
|
-
- Check this release do not store on the release queue object.
|
266
|
-
Generate the next date if it exists.
|
267
|
-
- Push this release to the release queue
|
268
|
-
|
269
|
-
:param end_date: (datetime) An end datetime object.
|
270
|
-
:param audit: (type[Audit]) An audit class that want to make audit
|
271
|
-
instance.
|
272
|
-
:param runner: (CronRunner) A `CronRunner` object.
|
273
|
-
:param name: (str) A target name that want to check at pointer of audit.
|
274
|
-
:param force_run: (bool) A flag that allow to release workflow if the
|
275
|
-
audit with that release was pointed. (Default is False).
|
276
|
-
:param extras: (DictDatA) An extra parameter that want to override core
|
277
|
-
config values.
|
278
|
-
|
279
|
-
:rtype: ReleaseQueue
|
280
|
-
|
281
|
-
"""
|
282
|
-
if clear_tz(runner.date) > clear_tz(end_date):
|
283
|
-
return self
|
284
|
-
|
285
|
-
release = Release(
|
286
|
-
date=clear_tz(runner.date),
|
287
|
-
type=(ReleaseType.FORCE if force_run else ReleaseType.POKING),
|
288
|
-
)
|
289
|
-
|
290
|
-
while self.check_queue(release) or (
|
291
|
-
audit.is_pointed(name=name, release=release.date, extras=extras)
|
292
|
-
and not force_run
|
293
|
-
):
|
294
|
-
release = Release(
|
295
|
-
date=clear_tz(runner.next),
|
296
|
-
type=(ReleaseType.FORCE if force_run else ReleaseType.POKING),
|
297
|
-
)
|
298
|
-
|
299
|
-
if clear_tz(runner.date) > clear_tz(end_date):
|
300
|
-
return self
|
301
|
-
|
302
|
-
with self.lock:
|
303
|
-
heappush(self.queue, release)
|
304
|
-
|
305
|
-
return self
|
68
|
+
NORMAL = ReleaseType.NORMAL
|
69
|
+
RERUN = ReleaseType.RERUN
|
70
|
+
EVENT = ReleaseType.EVENT
|
71
|
+
FORCE = ReleaseType.FORCE
|
306
72
|
|
307
73
|
|
308
74
|
class Workflow(BaseModel):
|
@@ -484,8 +250,9 @@ class Workflow(BaseModel):
|
|
484
250
|
def __validate_jobs_need__(self) -> Self:
|
485
251
|
"""Validate each need job in any jobs should exist.
|
486
252
|
|
487
|
-
:raise
|
253
|
+
:raise WorkflowError: If it has not exists need value in this
|
488
254
|
workflow job.
|
255
|
+
:raise ValueError: If the workflow name has template value.
|
489
256
|
|
490
257
|
:rtype: Self
|
491
258
|
"""
|
@@ -493,11 +260,12 @@ class Workflow(BaseModel):
|
|
493
260
|
if not_exist := [
|
494
261
|
need for need in self.jobs[job].needs if need not in self.jobs
|
495
262
|
]:
|
496
|
-
raise
|
263
|
+
raise WorkflowError(
|
497
264
|
f"The needed jobs: {not_exist} do not found in "
|
498
265
|
f"{self.name!r}."
|
499
266
|
)
|
500
267
|
|
268
|
+
# NOTE: Set job ID to the job model.
|
501
269
|
self.jobs[job].id = job
|
502
270
|
|
503
271
|
# VALIDATE: Validate workflow name should not dynamic with params
|
@@ -547,7 +315,7 @@ class Workflow(BaseModel):
|
|
547
315
|
:param params: (DictData) A parameter data that receive from workflow
|
548
316
|
execute method.
|
549
317
|
|
550
|
-
:raise
|
318
|
+
:raise WorkflowError: If parameter value that want to validate does
|
551
319
|
not include the necessary parameter that had required flag.
|
552
320
|
|
553
321
|
:rtype: DictData
|
@@ -561,7 +329,7 @@ class Workflow(BaseModel):
|
|
561
329
|
if (k not in params and self.params[k].required)
|
562
330
|
]
|
563
331
|
if check_key:
|
564
|
-
raise
|
332
|
+
raise WorkflowError(
|
565
333
|
f"Required Param on this workflow setting does not set: "
|
566
334
|
f"{', '.join(check_key)}."
|
567
335
|
)
|
@@ -579,15 +347,35 @@ class Workflow(BaseModel):
|
|
579
347
|
"jobs": {},
|
580
348
|
}
|
581
349
|
|
350
|
+
def validate_release(self, dt: datetime) -> datetime:
|
351
|
+
"""Validate the release datetime that should was replaced second and
|
352
|
+
millisecond to 0 and replaced timezone to None before checking it match
|
353
|
+
with the set `on` field.
|
354
|
+
|
355
|
+
:param dt: (datetime) A datetime object that want to validate.
|
356
|
+
|
357
|
+
:rtype: datetime
|
358
|
+
"""
|
359
|
+
release: datetime = replace_sec(dt.replace(tzinfo=None))
|
360
|
+
if not self.on:
|
361
|
+
return release
|
362
|
+
|
363
|
+
for on in self.on:
|
364
|
+
if release == on.cronjob.schedule(release).next:
|
365
|
+
return release
|
366
|
+
raise WorkflowError(
|
367
|
+
"Release datetime does not support for this workflow"
|
368
|
+
)
|
369
|
+
|
582
370
|
def release(
|
583
371
|
self,
|
584
|
-
release:
|
372
|
+
release: datetime,
|
585
373
|
params: DictData,
|
586
374
|
*,
|
375
|
+
release_type: ReleaseType = NORMAL,
|
587
376
|
run_id: Optional[str] = None,
|
588
377
|
parent_run_id: Optional[str] = None,
|
589
378
|
audit: type[Audit] = None,
|
590
|
-
queue: Optional[ReleaseQueue] = None,
|
591
379
|
override_log_name: Optional[str] = None,
|
592
380
|
result: Optional[Result] = None,
|
593
381
|
timeout: int = 600,
|
@@ -605,24 +393,19 @@ class Workflow(BaseModel):
|
|
605
393
|
- Create release data for pass to parameter templating function.
|
606
394
|
- Execute this workflow with mapping release data to its parameters.
|
607
395
|
- Writing result audit
|
608
|
-
- Remove this release on the running queue
|
609
|
-
- Push this release to complete queue
|
610
396
|
|
611
|
-
:param release: A release datetime
|
397
|
+
:param release: (datetime) A release datetime.
|
612
398
|
:param params: A workflow parameter that pass to execute method.
|
399
|
+
:param release_type:
|
613
400
|
:param run_id: (str) A workflow running ID.
|
614
401
|
:param parent_run_id: (str) A parent workflow running ID.
|
615
402
|
:param audit: An audit class that want to save the execution result.
|
616
|
-
:param queue: (ReleaseQueue) A ReleaseQueue object.
|
617
403
|
:param override_log_name: (str) An override logging name that use
|
618
404
|
instead the workflow name.
|
619
405
|
:param result: (Result) A result object for keeping context and status
|
620
406
|
data.
|
621
407
|
:param timeout: (int) A workflow execution time out in second unit.
|
622
408
|
|
623
|
-
:raise TypeError: If a queue parameter does not match with ReleaseQueue
|
624
|
-
type.
|
625
|
-
|
626
409
|
:rtype: Result
|
627
410
|
"""
|
628
411
|
audit: type[Audit] = audit or get_audit(extras=self.extras)
|
@@ -634,26 +417,16 @@ class Workflow(BaseModel):
|
|
634
417
|
id_logic=name,
|
635
418
|
extras=self.extras,
|
636
419
|
)
|
637
|
-
|
638
|
-
# VALIDATE: check type of queue that valid with ReleaseQueue.
|
639
|
-
if queue is not None and not isinstance(queue, ReleaseQueue):
|
640
|
-
raise TypeError(
|
641
|
-
"The queue argument should be ReleaseQueue object only."
|
642
|
-
)
|
643
|
-
|
644
|
-
# VALIDATE: Change release value to Release object.
|
645
|
-
if isinstance(release, datetime):
|
646
|
-
release: Release = Release.from_dt(release)
|
647
|
-
|
420
|
+
release: datetime = self.validate_release(dt=release)
|
648
421
|
result.trace.info(
|
649
|
-
f"[RELEASE]: Start {name!r} : {release
|
422
|
+
f"[RELEASE]: Start {name!r} : {release:%Y-%m-%d %H:%M:%S}"
|
650
423
|
)
|
651
424
|
tz: ZoneInfo = dynamic("tz", extras=self.extras)
|
652
425
|
values: DictData = param2template(
|
653
426
|
params,
|
654
427
|
params={
|
655
428
|
"release": {
|
656
|
-
"logical_date": release
|
429
|
+
"logical_date": release,
|
657
430
|
"execute_date": datetime.now(tz=tz),
|
658
431
|
"run_id": result.run_id,
|
659
432
|
}
|
@@ -662,19 +435,19 @@ class Workflow(BaseModel):
|
|
662
435
|
)
|
663
436
|
rs: Result = self.execute(
|
664
437
|
params=values,
|
665
|
-
result=result,
|
666
438
|
parent_run_id=result.run_id,
|
667
439
|
timeout=timeout,
|
668
440
|
)
|
441
|
+
result.catch(status=rs.status, context=rs.context)
|
669
442
|
result.trace.info(
|
670
|
-
f"[RELEASE]: End {name!r} : {release
|
443
|
+
f"[RELEASE]: End {name!r} : {release:%Y-%m-%d %H:%M:%S}"
|
671
444
|
)
|
672
445
|
result.trace.debug(f"[RELEASE]: Writing audit: {name!r}.")
|
673
446
|
(
|
674
447
|
audit(
|
675
448
|
name=name,
|
676
|
-
release=release
|
677
|
-
type=
|
449
|
+
release=release,
|
450
|
+
type=release_type,
|
678
451
|
context=result.context,
|
679
452
|
parent_run_id=result.parent_run_id,
|
680
453
|
run_id=result.run_id,
|
@@ -682,17 +455,13 @@ class Workflow(BaseModel):
|
|
682
455
|
extras=self.extras,
|
683
456
|
).save(excluded=None)
|
684
457
|
)
|
685
|
-
|
686
|
-
if queue:
|
687
|
-
queue.mark_complete(release)
|
688
|
-
|
689
458
|
return result.catch(
|
690
459
|
status=rs.status,
|
691
460
|
context={
|
692
461
|
"params": params,
|
693
462
|
"release": {
|
694
|
-
"type":
|
695
|
-
"logical_date": release
|
463
|
+
"type": release_type,
|
464
|
+
"logical_date": release,
|
696
465
|
},
|
697
466
|
**{"jobs": result.context.pop("jobs", {})},
|
698
467
|
**(
|
@@ -710,7 +479,7 @@ class Workflow(BaseModel):
|
|
710
479
|
*,
|
711
480
|
result: Optional[Result] = None,
|
712
481
|
event: Optional[Event] = None,
|
713
|
-
) -> Result:
|
482
|
+
) -> tuple[Status, Result]:
|
714
483
|
"""Job execution with passing dynamic parameters from the main workflow
|
715
484
|
execution to the target job object via job's ID.
|
716
485
|
|
@@ -718,7 +487,8 @@ class Workflow(BaseModel):
|
|
718
487
|
model. It different with `self.execute` because this method run only
|
719
488
|
one job and return with context of this job data.
|
720
489
|
|
721
|
-
|
490
|
+
This method do not raise any error, and it will handle all exception
|
491
|
+
from the job execution.
|
722
492
|
|
723
493
|
:param job: (Job) A job model that want to execute.
|
724
494
|
:param params: (DictData) A parameter data.
|
@@ -726,22 +496,19 @@ class Workflow(BaseModel):
|
|
726
496
|
:param event: (Event) An Event manager instance that use to cancel this
|
727
497
|
execution if it forces stopped by parent execution.
|
728
498
|
|
729
|
-
:rtype: Result
|
499
|
+
:rtype: tuple[Status, Result]
|
730
500
|
"""
|
731
501
|
result: Result = result or Result(run_id=gen_id(self.name, unique=True))
|
732
502
|
|
733
|
-
if job.is_skipped(params=params):
|
734
|
-
result.trace.info(f"[WORKFLOW]: Skip Job: {job.id!r}")
|
735
|
-
job.set_outputs(output={"skipped": True}, to=params)
|
736
|
-
return result.catch(status=SKIP, context=params)
|
737
|
-
|
738
503
|
if event and event.is_set():
|
739
|
-
|
504
|
+
error_msg: str = (
|
505
|
+
"Job execution was canceled because the event was set "
|
506
|
+
"before start job execution."
|
507
|
+
)
|
508
|
+
return CANCEL, result.catch(
|
740
509
|
status=CANCEL,
|
741
510
|
context={
|
742
|
-
"errors":
|
743
|
-
"Workflow job was canceled because event was set."
|
744
|
-
).to_dict(),
|
511
|
+
"errors": WorkflowCancelError(error_msg).to_dict(),
|
745
512
|
},
|
746
513
|
)
|
747
514
|
|
@@ -753,18 +520,31 @@ class Workflow(BaseModel):
|
|
753
520
|
event=event,
|
754
521
|
)
|
755
522
|
job.set_outputs(rs.context, to=params)
|
756
|
-
|
523
|
+
|
524
|
+
if rs.status == FAILED:
|
525
|
+
error_msg: str = f"Job execution, {job.id!r}, was failed."
|
526
|
+
return FAILED, result.catch(
|
527
|
+
status=FAILED,
|
528
|
+
context={
|
529
|
+
"errors": WorkflowError(error_msg).to_dict(),
|
530
|
+
**params,
|
531
|
+
},
|
532
|
+
)
|
533
|
+
|
534
|
+
elif rs.status == CANCEL:
|
757
535
|
error_msg: str = (
|
758
|
-
f"Job, {job.id!r},
|
536
|
+
f"Job execution, {job.id!r}, was canceled from the event after "
|
537
|
+
f"end job execution."
|
759
538
|
)
|
760
|
-
return result.catch(
|
761
|
-
status=
|
539
|
+
return CANCEL, result.catch(
|
540
|
+
status=CANCEL,
|
762
541
|
context={
|
763
|
-
"errors":
|
542
|
+
"errors": WorkflowCancelError(error_msg).to_dict(),
|
764
543
|
**params,
|
765
544
|
},
|
766
545
|
)
|
767
|
-
|
546
|
+
|
547
|
+
return rs.status, result.catch(status=rs.status, context=params)
|
768
548
|
|
769
549
|
def execute(
|
770
550
|
self,
|
@@ -772,7 +552,6 @@ class Workflow(BaseModel):
|
|
772
552
|
*,
|
773
553
|
run_id: Optional[str] = None,
|
774
554
|
parent_run_id: Optional[str] = None,
|
775
|
-
result: Optional[Result] = None,
|
776
555
|
event: Optional[Event] = None,
|
777
556
|
timeout: float = 3600,
|
778
557
|
max_job_parallel: int = 2,
|
@@ -799,10 +578,17 @@ class Workflow(BaseModel):
|
|
799
578
|
the job execution. It will warp that error and keep it in the key `errors`
|
800
579
|
at the result context.
|
801
580
|
|
581
|
+
|
582
|
+
Execution --> Ok --> Result
|
583
|
+
|-status: CANCEL
|
584
|
+
╰-context:
|
585
|
+
╰-errors:
|
586
|
+
|-name: ...
|
587
|
+
╰-message: ...
|
588
|
+
|
802
589
|
:param params: A parameter data that will parameterize before execution.
|
803
590
|
:param run_id: (Optional[str]) A workflow running ID.
|
804
591
|
:param parent_run_id: (Optional[str]) A parent workflow running ID.
|
805
|
-
:param result: (Result) A Result instance for return context and status.
|
806
592
|
:param event: (Event) An Event manager instance that use to cancel this
|
807
593
|
execution if it forces stopped by parent execution.
|
808
594
|
:param timeout: (float) A workflow execution time out in second unit
|
@@ -810,13 +596,12 @@ class Workflow(BaseModel):
|
|
810
596
|
This value does not force stop the task that still running more than
|
811
597
|
this limit time. (Default: 60 * 60 seconds)
|
812
598
|
:param max_job_parallel: (int) The maximum workers that use for job
|
813
|
-
execution in `
|
599
|
+
execution in `ThreadPoolExecutor` object. (Default: 2 workers)
|
814
600
|
|
815
601
|
:rtype: Result
|
816
602
|
"""
|
817
603
|
ts: float = time.monotonic()
|
818
604
|
result: Result = Result.construct_with_rs_or_id(
|
819
|
-
result,
|
820
605
|
run_id=run_id,
|
821
606
|
parent_run_id=parent_run_id,
|
822
607
|
id_logic=self.name,
|
@@ -840,13 +625,26 @@ class Workflow(BaseModel):
|
|
840
625
|
job_queue.put(job_id)
|
841
626
|
|
842
627
|
not_timeout_flag: bool = True
|
628
|
+
total_job: int = len(self.jobs)
|
629
|
+
statuses: list[Status] = [WAIT] * total_job
|
630
|
+
skip_count: int = 0
|
631
|
+
sequence_statuses: list[Status] = []
|
843
632
|
timeout: float = dynamic(
|
844
633
|
"max_job_exec_timeout", f=timeout, extras=self.extras
|
845
634
|
)
|
635
|
+
result.catch(status=WAIT, context=context)
|
636
|
+
if event and event.is_set():
|
637
|
+
return result.catch(
|
638
|
+
status=CANCEL,
|
639
|
+
context={
|
640
|
+
"errors": WorkflowCancelError(
|
641
|
+
"Execution was canceled from the event was set before "
|
642
|
+
"workflow execution."
|
643
|
+
).to_dict(),
|
644
|
+
},
|
645
|
+
)
|
846
646
|
|
847
|
-
with ThreadPoolExecutor(
|
848
|
-
max_workers=max_job_parallel, thread_name_prefix="wf_exec_"
|
849
|
-
) as executor:
|
647
|
+
with ThreadPoolExecutor(max_job_parallel, "wf") as executor:
|
850
648
|
futures: list[Future] = []
|
851
649
|
|
852
650
|
while not job_queue.empty() and (
|
@@ -863,16 +661,20 @@ class Workflow(BaseModel):
|
|
863
661
|
return result.catch(
|
864
662
|
status=FAILED,
|
865
663
|
context={
|
866
|
-
"
|
664
|
+
"status": FAILED,
|
665
|
+
"errors": WorkflowError(
|
867
666
|
f"Validate job trigger rule was failed with "
|
868
667
|
f"{job.trigger_rule.value!r}."
|
869
|
-
).to_dict()
|
668
|
+
).to_dict(),
|
870
669
|
},
|
871
670
|
)
|
872
671
|
elif check == SKIP: # pragma: no cov
|
873
|
-
result.trace.info(
|
874
|
-
|
672
|
+
result.trace.info(
|
673
|
+
f"[JOB]: Skip job: {job_id!r} from trigger rule."
|
674
|
+
)
|
675
|
+
job.set_outputs(output={"status": SKIP}, to=context)
|
875
676
|
job_queue.task_done()
|
677
|
+
skip_count += 1
|
876
678
|
continue
|
877
679
|
|
878
680
|
if max_job_parallel > 1:
|
@@ -898,17 +700,22 @@ class Workflow(BaseModel):
|
|
898
700
|
event=event,
|
899
701
|
)
|
900
702
|
)
|
901
|
-
|
902
|
-
|
703
|
+
elif (future := futures.pop(0)).done():
|
704
|
+
if e := future.exception():
|
705
|
+
sequence_statuses.append(get_status_from_error(e))
|
706
|
+
else:
|
707
|
+
st, _ = future.result()
|
708
|
+
sequence_statuses.append(st)
|
709
|
+
job_queue.put(job_id)
|
710
|
+
elif future.cancelled():
|
711
|
+
sequence_statuses.append(CANCEL)
|
903
712
|
job_queue.put(job_id)
|
904
713
|
elif future.running() or "state=pending" in str(future):
|
905
|
-
time.sleep(0.075)
|
906
714
|
futures.insert(0, future)
|
907
715
|
job_queue.put(job_id)
|
908
716
|
else: # pragma: no cov
|
909
717
|
job_queue.put(job_id)
|
910
718
|
futures.insert(0, future)
|
911
|
-
time.sleep(0.025)
|
912
719
|
result.trace.warning(
|
913
720
|
f"[WORKFLOW]: ... Execution non-threading not "
|
914
721
|
f"handle: {future}."
|
@@ -918,351 +725,43 @@ class Workflow(BaseModel):
|
|
918
725
|
|
919
726
|
if not_timeout_flag:
|
920
727
|
job_queue.join()
|
921
|
-
|
922
|
-
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
|
927
|
-
|
928
|
-
result.trace.error(f"[WORKFLOW]: {self.name!r} was timeout.")
|
929
|
-
event.set()
|
930
|
-
for future in futures:
|
931
|
-
future.cancel()
|
728
|
+
total_future: int = 0
|
729
|
+
for i, future in enumerate(as_completed(futures), start=0):
|
730
|
+
try:
|
731
|
+
statuses[i], _ = future.result()
|
732
|
+
except WorkflowError as e:
|
733
|
+
statuses[i] = get_status_from_error(e)
|
734
|
+
total_future += 1
|
932
735
|
|
933
|
-
|
934
|
-
|
935
|
-
|
936
|
-
"errors": WorkflowException(
|
937
|
-
f"{self.name!r} was timeout."
|
938
|
-
).to_dict()
|
939
|
-
},
|
940
|
-
)
|
736
|
+
# NOTE: Update skipped status from the job trigger.
|
737
|
+
for i in range(skip_count):
|
738
|
+
statuses[total_future + i] = SKIP
|
941
739
|
|
740
|
+
# NOTE: Update status from none-parallel job execution.
|
741
|
+
for i, s in enumerate(sequence_statuses, start=0):
|
742
|
+
statuses[total_future + skip_count + i] = s
|
942
743
|
|
943
|
-
|
944
|
-
"""Workflow Poke model that was implemented the poke method."""
|
744
|
+
status: Status = validate_statuses(statuses)
|
945
745
|
|
946
|
-
|
947
|
-
self,
|
948
|
-
offset: float,
|
949
|
-
end_date: datetime,
|
950
|
-
queue: ReleaseQueue,
|
951
|
-
audit: type[Audit],
|
952
|
-
*,
|
953
|
-
force_run: bool = False,
|
954
|
-
) -> ReleaseQueue:
|
955
|
-
"""Generate Release from all on values from the on field and store them
|
956
|
-
to the ReleaseQueue object.
|
957
|
-
|
958
|
-
:param offset: An offset in second unit for time travel.
|
959
|
-
:param end_date: An end datetime object.
|
960
|
-
:param queue: A workflow queue object.
|
961
|
-
:param audit: An audit class that want to make audit object.
|
962
|
-
:param force_run: A flag that allow to release workflow if the audit
|
963
|
-
with that release was pointed.
|
964
|
-
|
965
|
-
:rtype: ReleaseQueue
|
966
|
-
"""
|
967
|
-
for on in self.on:
|
968
|
-
|
969
|
-
queue.gen(
|
970
|
-
end_date,
|
971
|
-
audit,
|
972
|
-
on.next(get_dt_now(offset=offset).replace(microsecond=0)),
|
973
|
-
self.name,
|
974
|
-
force_run=force_run,
|
975
|
-
)
|
976
|
-
|
977
|
-
return queue
|
746
|
+
return result.catch(status=status, context=context)
|
978
747
|
|
979
|
-
|
980
|
-
|
981
|
-
|
982
|
-
start_date: Optional[datetime] = None,
|
983
|
-
*,
|
984
|
-
run_id: Optional[str] = None,
|
985
|
-
periods: int = 1,
|
986
|
-
audit: Optional[Audit] = None,
|
987
|
-
force_run: bool = False,
|
988
|
-
timeout: int = 1800,
|
989
|
-
max_poking_pool_worker: int = 2,
|
990
|
-
) -> Result:
|
991
|
-
"""Poke workflow with a start datetime value that will pass to its
|
992
|
-
`on` field on the threading executor pool for execute the `release`
|
993
|
-
method (It run all schedules that was set on the `on` values).
|
994
|
-
|
995
|
-
This method will observe its `on` field that nearing to run with the
|
996
|
-
`self.release()` method.
|
997
|
-
|
998
|
-
The limitation of this method is not allow run a date that gather
|
999
|
-
than the current date.
|
1000
|
-
|
1001
|
-
:param params: (DictData) A parameter data.
|
1002
|
-
:param start_date: (datetime) A start datetime object.
|
1003
|
-
:param run_id: (str) A workflow running ID for this poke.
|
1004
|
-
:param periods: (int) A periods in minutes value that use to run this
|
1005
|
-
poking. (Default is 1)
|
1006
|
-
:param audit: (Audit) An audit object that want to use on this poking
|
1007
|
-
process.
|
1008
|
-
:param force_run: (bool) A flag that allow to release workflow if the
|
1009
|
-
audit with that release was pointed. (Default is False)
|
1010
|
-
:param timeout: (int) A second value for timeout while waiting all
|
1011
|
-
futures run completely.
|
1012
|
-
:param max_poking_pool_worker: (int) The maximum poking pool worker.
|
1013
|
-
(Default is 2 workers)
|
1014
|
-
|
1015
|
-
:raise WorkflowException: If the periods parameter less or equal than 0.
|
1016
|
-
|
1017
|
-
:rtype: Result
|
1018
|
-
:return: A list of all results that return from `self.release` method.
|
1019
|
-
"""
|
1020
|
-
audit: type[Audit] = audit or get_audit(extras=self.extras)
|
1021
|
-
result: Result = Result(
|
1022
|
-
run_id=(run_id or gen_id(self.name, unique=True))
|
1023
|
-
)
|
1024
|
-
|
1025
|
-
# VALIDATE: Check the periods value should gather than 0.
|
1026
|
-
if periods <= 0:
|
1027
|
-
raise WorkflowException(
|
1028
|
-
"The period of poking should be `int` and grater or equal "
|
1029
|
-
"than 1."
|
1030
|
-
)
|
1031
|
-
|
1032
|
-
if len(self.on) == 0:
|
1033
|
-
result.trace.warning(
|
1034
|
-
f"[POKING]: {self.name!r} not have any schedule!!!"
|
1035
|
-
)
|
1036
|
-
return result.catch(status=SUCCESS, context={"outputs": []})
|
1037
|
-
|
1038
|
-
# NOTE: Create the current date that change microsecond to 0
|
1039
|
-
current_date: datetime = datetime.now().replace(microsecond=0)
|
1040
|
-
|
1041
|
-
if start_date is None:
|
1042
|
-
# NOTE: Force change start date if it gathers than the current date,
|
1043
|
-
# or it does not pass to this method.
|
1044
|
-
start_date: datetime = current_date
|
1045
|
-
offset: float = 0
|
1046
|
-
elif start_date <= current_date:
|
1047
|
-
start_date = start_date.replace(microsecond=0)
|
1048
|
-
offset: float = (current_date - start_date).total_seconds()
|
1049
|
-
else:
|
1050
|
-
raise WorkflowException(
|
1051
|
-
f"The start datetime should less than or equal the current "
|
1052
|
-
f"datetime, {current_date:%Y-%m-%d %H:%M:%S}."
|
1053
|
-
)
|
1054
|
-
|
1055
|
-
# NOTE: The end date is using to stop generate queue with an input
|
1056
|
-
# periods value. It will change to MM:59.
|
1057
|
-
# For example:
|
1058
|
-
# (input) start_date = 12:04:12, offset = 2
|
1059
|
-
# (output) end_date = 12:06:59
|
1060
|
-
end_date: datetime = start_date.replace(second=0) + timedelta(
|
1061
|
-
minutes=periods + 1, seconds=-1
|
1062
|
-
)
|
1063
|
-
|
1064
|
-
result.trace.info(
|
1065
|
-
f"[POKING]: Execute Poking: {self.name!r} "
|
1066
|
-
f"({start_date:%Y-%m-%d %H:%M:%S} ==> {end_date:%Y-%m-%d %H:%M:%S})"
|
1067
|
-
)
|
1068
|
-
|
1069
|
-
params: DictData = {} if params is None else params
|
1070
|
-
context: list[Result] = []
|
1071
|
-
q: ReleaseQueue = ReleaseQueue()
|
748
|
+
event.set()
|
749
|
+
for future in futures:
|
750
|
+
future.cancel()
|
1072
751
|
|
1073
|
-
|
1074
|
-
|
1075
|
-
|
1076
|
-
self.queue, offset, end_date, audit=audit, force_run=force_run
|
1077
|
-
)
|
1078
|
-
partial_queue(q)
|
1079
|
-
if not q.is_queued:
|
1080
|
-
result.trace.warning(
|
1081
|
-
f"[POKING]: Skip {self.name!r}, not have any queue!!!"
|
752
|
+
result.trace.error(
|
753
|
+
f"[WORKFLOW]: {self.name!r} was timeout because it use exec "
|
754
|
+
f"time more than {timeout} seconds."
|
1082
755
|
)
|
1083
|
-
return result.catch(status=SUCCESS, context={"outputs": []})
|
1084
|
-
|
1085
|
-
with ThreadPoolExecutor(
|
1086
|
-
max_workers=dynamic(
|
1087
|
-
"max_poking_pool_worker",
|
1088
|
-
f=max_poking_pool_worker,
|
1089
|
-
extras=self.extras,
|
1090
|
-
),
|
1091
|
-
thread_name_prefix="wf_poking_",
|
1092
|
-
) as executor:
|
1093
|
-
|
1094
|
-
futures: list[Future] = []
|
1095
|
-
|
1096
|
-
while q.is_queued:
|
1097
|
-
|
1098
|
-
# NOTE: Pop the latest Release object from the release queue.
|
1099
|
-
release: Release = heappop(q.queue)
|
1100
|
-
|
1101
|
-
if reach_next_minute(release.date, offset=offset):
|
1102
|
-
result.trace.debug(
|
1103
|
-
f"[POKING]: Skip Release: "
|
1104
|
-
f"{release.date:%Y-%m-%d %H:%M:%S}"
|
1105
|
-
)
|
1106
|
-
heappush(q.queue, release)
|
1107
|
-
wait_until_next_minute(get_dt_now(offset=offset))
|
1108
756
|
|
1109
|
-
|
1110
|
-
# about the every minute crontab.
|
1111
|
-
partial_queue(q)
|
1112
|
-
continue
|
1113
|
-
|
1114
|
-
heappush(q.running, release)
|
1115
|
-
futures.append(
|
1116
|
-
executor.submit(
|
1117
|
-
self.release,
|
1118
|
-
release=release,
|
1119
|
-
params=params,
|
1120
|
-
audit=audit,
|
1121
|
-
queue=q,
|
1122
|
-
parent_run_id=result.run_id,
|
1123
|
-
)
|
1124
|
-
)
|
1125
|
-
|
1126
|
-
partial_queue(q)
|
1127
|
-
|
1128
|
-
# WARNING: This poking method does not allow to use fail-fast
|
1129
|
-
# logic to catching parallel execution result.
|
1130
|
-
for future in as_completed(futures, timeout=timeout):
|
1131
|
-
context.append(future.result())
|
757
|
+
time.sleep(0.0025)
|
1132
758
|
|
1133
759
|
return result.catch(
|
1134
|
-
status=
|
1135
|
-
context={
|
1136
|
-
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1140
|
-
|
1141
|
-
"""Workflow task Pydantic dataclass object that use to keep mapping data and
|
1142
|
-
workflow model for passing to the multithreading task.
|
1143
|
-
|
1144
|
-
This dataclass object is mapping 1-to-1 with workflow and cron runner
|
1145
|
-
objects.
|
1146
|
-
|
1147
|
-
This dataclass has the release method for itself that prepare necessary
|
1148
|
-
arguments before passing to the parent release method.
|
1149
|
-
|
1150
|
-
:param alias: (str) An alias name of Workflow model.
|
1151
|
-
:param workflow: (Workflow) A Workflow model instance.
|
1152
|
-
:param runner: (CronRunner)
|
1153
|
-
:param values: A value data that want to parameterize.
|
1154
|
-
:param extras: An extra parameter that use to override core config values.
|
1155
|
-
"""
|
1156
|
-
|
1157
|
-
alias: str
|
1158
|
-
workflow: Workflow
|
1159
|
-
runner: CronRunner
|
1160
|
-
values: DictData = field(default_factory=dict)
|
1161
|
-
extras: DictData = field(default_factory=dict)
|
1162
|
-
|
1163
|
-
def release(
|
1164
|
-
self,
|
1165
|
-
release: Optional[Union[Release, datetime]] = None,
|
1166
|
-
run_id: Optional[str] = None,
|
1167
|
-
audit: type[Audit] = None,
|
1168
|
-
queue: Optional[ReleaseQueue] = None,
|
1169
|
-
) -> Result:
|
1170
|
-
"""Release the workflow task that passing an override parameter to
|
1171
|
-
the parent release method with the `values` field.
|
1172
|
-
|
1173
|
-
This method can handler not passing release value by default
|
1174
|
-
generate step. It uses the `runner` field for generate release object.
|
1175
|
-
|
1176
|
-
:param release: A release datetime or Release object.
|
1177
|
-
:param run_id: A workflow running ID for this release.
|
1178
|
-
:param audit: An audit class that want to save the execution result.
|
1179
|
-
:param queue: A ReleaseQueue object that use to mark complete.
|
1180
|
-
|
1181
|
-
:raise ValueError: If a queue parameter does not pass while release
|
1182
|
-
is None.
|
1183
|
-
:raise TypeError: If a queue parameter does not match with ReleaseQueue
|
1184
|
-
type.
|
1185
|
-
|
1186
|
-
:rtype: Result
|
1187
|
-
"""
|
1188
|
-
audit: type[Audit] = audit or get_audit(extras=self.extras)
|
1189
|
-
|
1190
|
-
if release is None:
|
1191
|
-
|
1192
|
-
if queue is None:
|
1193
|
-
raise ValueError(
|
1194
|
-
"If pass None release value, you should to pass the queue"
|
1195
|
-
"for generate this release."
|
1196
|
-
)
|
1197
|
-
elif not isinstance(queue, ReleaseQueue):
|
1198
|
-
raise TypeError(
|
1199
|
-
"The queue argument should be ReleaseQueue object only."
|
1200
|
-
)
|
1201
|
-
|
1202
|
-
if queue.check_queue(self.runner.date):
|
1203
|
-
release = self.runner.next
|
1204
|
-
|
1205
|
-
while queue.check_queue(release):
|
1206
|
-
release = self.runner.next
|
1207
|
-
else:
|
1208
|
-
release = self.runner.date
|
1209
|
-
|
1210
|
-
return self.workflow.release(
|
1211
|
-
release=release,
|
1212
|
-
params=self.values,
|
1213
|
-
run_id=run_id,
|
1214
|
-
audit=audit,
|
1215
|
-
queue=queue,
|
1216
|
-
override_log_name=self.alias,
|
1217
|
-
)
|
1218
|
-
|
1219
|
-
def queue(
|
1220
|
-
self,
|
1221
|
-
end_date: datetime,
|
1222
|
-
queue: ReleaseQueue,
|
1223
|
-
audit: type[Audit],
|
1224
|
-
*,
|
1225
|
-
force_run: bool = False,
|
1226
|
-
) -> ReleaseQueue:
|
1227
|
-
"""Generate Release from the runner field and store it to the
|
1228
|
-
ReleaseQueue object.
|
1229
|
-
|
1230
|
-
:param end_date: An end datetime object.
|
1231
|
-
:param queue: A workflow queue object.
|
1232
|
-
:param audit: An audit class that want to make audit object.
|
1233
|
-
:param force_run: (bool) A flag that allow to release workflow if the
|
1234
|
-
audit with that release was pointed.
|
1235
|
-
|
1236
|
-
:rtype: ReleaseQueue
|
1237
|
-
"""
|
1238
|
-
return queue.gen(
|
1239
|
-
end_date,
|
1240
|
-
audit,
|
1241
|
-
self.runner,
|
1242
|
-
self.alias,
|
1243
|
-
force_run=force_run,
|
1244
|
-
extras=self.extras,
|
1245
|
-
)
|
1246
|
-
|
1247
|
-
def __repr__(self) -> str:
|
1248
|
-
"""Override the `__repr__` method.
|
1249
|
-
|
1250
|
-
:rtype: str
|
1251
|
-
"""
|
1252
|
-
return (
|
1253
|
-
f"{self.__class__.__name__}(alias={self.alias!r}, "
|
1254
|
-
f"workflow={self.workflow.name!r}, runner={self.runner!r}, "
|
1255
|
-
f"values={self.values})"
|
760
|
+
status=FAILED,
|
761
|
+
context={
|
762
|
+
"errors": WorkflowTimeoutError(
|
763
|
+
f"{self.name!r} was timeout because it use exec time more "
|
764
|
+
f"than {timeout} seconds."
|
765
|
+
).to_dict(),
|
766
|
+
},
|
1256
767
|
)
|
1257
|
-
|
1258
|
-
def __eq__(self, other: WorkflowTask) -> bool:
|
1259
|
-
"""Override the equal property that will compare only the same type.
|
1260
|
-
|
1261
|
-
:rtype: bool
|
1262
|
-
"""
|
1263
|
-
if isinstance(other, WorkflowTask):
|
1264
|
-
return (
|
1265
|
-
self.workflow.name == other.workflow.name
|
1266
|
-
and self.runner.cron == other.runner.cron
|
1267
|
-
)
|
1268
|
-
return NotImplemented
|