ddeutil-workflow 0.0.32__py3-none-any.whl → 0.0.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ddeutil/workflow/job.py CHANGED
@@ -38,12 +38,11 @@ from .exceptions import (
38
38
  StageException,
39
39
  UtilException,
40
40
  )
41
- from .result import Result
42
- from .stage import Stage
41
+ from .result import Result, Status
42
+ from .stages import Stage
43
43
  from .templates import has_template
44
44
  from .utils import (
45
45
  cross_product,
46
- cut_id,
47
46
  dash2underscore,
48
47
  filter_func,
49
48
  gen_id,
@@ -222,6 +221,8 @@ class RunsOn(str, Enum):
222
221
 
223
222
  local: str = "local"
224
223
  docker: str = "docker"
224
+ self_hosted: str = "self_hosted"
225
+ k8s: str = "k8s"
225
226
 
226
227
 
227
228
  class Job(BaseModel):
@@ -399,10 +400,15 @@ class Job(BaseModel):
399
400
  # NOTE: If the job ID did not set, it will use index of jobs key
400
401
  # instead.
401
402
  _id: str = self.id or str(len(to["jobs"]) + 1)
403
+
404
+ errors: DictData = (
405
+ {"errors": output.pop("errors", {})} if "errors" in output else {}
406
+ )
407
+
402
408
  to["jobs"][_id] = (
403
- {"strategies": output}
409
+ {"strategies": output, **errors}
404
410
  if self.strategy.is_set()
405
- else output.get(next(iter(output), "DUMMY"), {})
411
+ else {**output.get(next(iter(output), "DUMMY"), {}), **errors}
406
412
  )
407
413
  return to
408
414
 
@@ -411,7 +417,7 @@ class Job(BaseModel):
411
417
  strategy: DictData,
412
418
  params: DictData,
413
419
  *,
414
- run_id: str | None = None,
420
+ result: Result | None = None,
415
421
  event: Event | None = None,
416
422
  ) -> Result:
417
423
  """Job Strategy execution with passing dynamic parameters from the
@@ -430,14 +436,16 @@ class Job(BaseModel):
430
436
  :param strategy: A strategy metrix value that use on this execution.
431
437
  This value will pass to the `matrix` key for templating.
432
438
  :param params: A dynamic parameters that will deepcopy to the context.
433
- :param run_id: A job running ID for this strategy execution.
439
+ :param result: (Result) A result object for keeping context and status
440
+ data.
434
441
  :param event: An event manager that pass to the PoolThreadExecutor.
435
442
 
436
443
  :rtype: Result
437
444
  """
438
- run_id: str = run_id or gen_id(self.id or "", unique=True)
445
+ if result is None: # pragma: no cov
446
+ result: Result = Result(run_id=gen_id(self.id or "", unique=True))
447
+
439
448
  strategy_id: str = gen_id(strategy)
440
- rs: Result = Result(run_id=run_id)
441
449
 
442
450
  # PARAGRAPH:
443
451
  #
@@ -458,18 +466,14 @@ class Job(BaseModel):
458
466
  for stage in self.stages:
459
467
 
460
468
  if stage.is_skipped(params=context):
461
- logger.info(
462
- f"({cut_id(run_id)}) [JOB]: Skip stage: {stage.iden!r}"
463
- )
469
+ result.trace.info(f"[JOB]: Skip stage: {stage.iden!r}")
464
470
  continue
465
471
 
466
- logger.info(
467
- f"({cut_id(run_id)}) [JOB]: Execute stage: {stage.iden!r}"
468
- )
472
+ result.trace.info(f"[JOB]: Execute stage: {stage.iden!r}")
469
473
 
470
474
  # NOTE: Logging a matrix that pass on this stage execution.
471
475
  if strategy:
472
- logger.info(f"({cut_id(run_id)}) [JOB]: ... Matrix: {strategy}")
476
+ result.trace.info(f"[JOB]: ... Matrix: {strategy}")
473
477
 
474
478
  # NOTE: Force stop this execution if event was set from main
475
479
  # execution.
@@ -478,7 +482,7 @@ class Job(BaseModel):
478
482
  "Job strategy was canceled from event that had set before "
479
483
  "strategy execution."
480
484
  )
481
- return rs.catch(
485
+ return result.catch(
482
486
  status=1,
483
487
  context={
484
488
  strategy_id: {
@@ -489,8 +493,11 @@ class Job(BaseModel):
489
493
  # "stages": filter_func(context.pop("stages", {})),
490
494
  #
491
495
  "stages": context.pop("stages", {}),
492
- "error": JobException(error_msg),
493
- "error_message": error_msg,
496
+ "errors": {
497
+ "class": JobException(error_msg),
498
+ "name": "JobException",
499
+ "message": error_msg,
500
+ },
494
501
  },
495
502
  },
496
503
  )
@@ -513,30 +520,40 @@ class Job(BaseModel):
513
520
  # "stages": { { "stage-id-1": ... }, ... }
514
521
  # }
515
522
  #
523
+ # IMPORTANT:
524
+ # This execution change all stage running IDs to the current job
525
+ # running ID, but it still trac log to the same parent running ID
526
+ # (with passing `run_id` and `parent_run_id` to the stage
527
+ # execution arguments).
528
+ #
516
529
  try:
517
530
  stage.set_outputs(
518
531
  stage.handler_execute(
519
- params=context, run_id=run_id
532
+ params=context,
533
+ run_id=result.run_id,
534
+ parent_run_id=result.parent_run_id,
520
535
  ).context,
521
536
  to=context,
522
537
  )
523
538
  except (StageException, UtilException) as err:
524
- logger.error(
525
- f"({cut_id(run_id)}) [JOB]: {err.__class__.__name__}: {err}"
526
- )
539
+ result.trace.error(f"[JOB]: {err.__class__.__name__}: {err}")
527
540
  if config.job_raise_error:
528
541
  raise JobException(
529
- f"Get stage execution error: {err.__class__.__name__}: "
542
+ f"Stage execution error: {err.__class__.__name__}: "
530
543
  f"{err}"
531
544
  ) from None
532
- return rs.catch(
545
+
546
+ return result.catch(
533
547
  status=1,
534
548
  context={
535
549
  strategy_id: {
536
550
  "matrix": strategy,
537
551
  "stages": context.pop("stages", {}),
538
- "error": err,
539
- "error_message": f"{err.__class__.__name__}: {err}",
552
+ "errors": {
553
+ "class": err,
554
+ "name": err.__class__.__name__,
555
+ "message": f"{err.__class__.__name__}: {err}",
556
+ },
540
557
  },
541
558
  },
542
559
  )
@@ -544,8 +561,8 @@ class Job(BaseModel):
544
561
  # NOTE: Remove the current stage object for saving memory.
545
562
  del stage
546
563
 
547
- return rs.catch(
548
- status=0,
564
+ return result.catch(
565
+ status=Status.SUCCESS,
549
566
  context={
550
567
  strategy_id: {
551
568
  "matrix": strategy,
@@ -554,36 +571,49 @@ class Job(BaseModel):
554
571
  },
555
572
  )
556
573
 
557
- def execute(self, params: DictData, run_id: str | None = None) -> Result:
574
+ def execute(
575
+ self,
576
+ params: DictData,
577
+ *,
578
+ run_id: str | None = None,
579
+ parent_run_id: str | None = None,
580
+ result: Result | None = None,
581
+ ) -> Result:
558
582
  """Job execution with passing dynamic parameters from the workflow
559
583
  execution. It will generate matrix values at the first step and run
560
584
  multithread on this metrics to the ``stages`` field of this job.
561
585
 
562
586
  :param params: An input parameters that use on job execution.
563
587
  :param run_id: A job running ID for this execution.
588
+ :param parent_run_id: A parent workflow running ID for this release.
589
+ :param result: (Result) A result object for keeping context and status
590
+ data.
564
591
 
565
592
  :rtype: Result
566
593
  """
567
594
 
568
595
  # NOTE: I use this condition because this method allow passing empty
569
596
  # params and I do not want to create new dict object.
570
- run_id: str = run_id or gen_id(self.id or "", unique=True)
571
- context: DictData = {}
597
+ if result is None: # pragma: no cov
598
+ result: Result = Result(
599
+ run_id=(run_id or gen_id(self.id or "", unique=True)),
600
+ parent_run_id=parent_run_id,
601
+ )
602
+ elif parent_run_id:
603
+ result.set_parent_run_id(parent_run_id)
572
604
 
573
605
  # NOTE: Normal Job execution without parallel strategy matrix. It uses
574
606
  # for-loop to control strategy execution sequentially.
575
607
  if (not self.strategy.is_set()) or self.strategy.max_parallel == 1:
608
+
576
609
  for strategy in self.strategy.make():
577
- rs: Result = self.execute_strategy(
610
+ result: Result = self.execute_strategy(
578
611
  strategy=strategy,
579
612
  params=params,
580
- run_id=run_id,
613
+ result=result,
581
614
  )
582
- context.update(rs.context)
583
- return Result(
584
- status=0,
585
- context=context,
586
- )
615
+
616
+ return result.catch(status=Status.SUCCESS)
587
617
 
588
618
  # NOTE: Create event for cancel executor by trigger stop running event.
589
619
  event: Event = Event()
@@ -600,117 +630,56 @@ class Job(BaseModel):
600
630
  self.execute_strategy,
601
631
  strategy=strategy,
602
632
  params=params,
603
- run_id=run_id,
633
+ result=result,
604
634
  event=event,
605
635
  )
606
636
  for strategy in self.strategy.make()
607
637
  ]
608
638
 
609
- return (
610
- self.__catch_fail_fast(event, futures=futures, run_id=run_id)
611
- if self.strategy.fail_fast
612
- else self.__catch_all_completed(futures=futures, run_id=run_id)
613
- )
614
-
615
- @staticmethod
616
- def __catch_fail_fast(
617
- event: Event,
618
- futures: list[Future],
619
- run_id: str,
620
- *,
621
- timeout: int = 1800,
622
- ) -> Result:
623
- """Job parallel pool futures catching with fail-fast mode. That will
624
- stop and set event on all not done futures if it receives the first
625
- exception from all running futures.
626
-
627
- :param event: An event manager instance that able to set stopper on the
628
- observing multithreading.
629
- :param futures: A list of futures.
630
- :param run_id: A job running ID from execution.
631
- :param timeout: A timeout to waiting all futures complete.
639
+ context: DictData = {}
640
+ status: Status = Status.SUCCESS
641
+ fail_fast_flag: bool = self.strategy.fail_fast
632
642
 
633
- :rtype: Result
634
- """
635
- rs_final: Result = Result(run_id=run_id)
636
- context: DictData = {}
637
- status: int = 0
638
-
639
- # NOTE: Get results from a collection of tasks with a timeout that has
640
- # the first exception.
641
- done, not_done = wait(
642
- futures, timeout=timeout, return_when=FIRST_EXCEPTION
643
- )
644
- nd: str = (
645
- f", the strategies do not run is {not_done}" if not_done else ""
646
- )
647
- logger.debug(f"({cut_id(run_id)}) [JOB]: Strategy is set Fail Fast{nd}")
648
-
649
- # NOTE:
650
- # Stop all running tasks with setting the event manager and cancel
651
- # any scheduled tasks.
652
- #
653
- if len(done) != len(futures):
654
- event.set()
655
- for future in not_done:
656
- future.cancel()
657
-
658
- future: Future
659
- for future in done:
660
-
661
- # NOTE: Handle the first exception from feature
662
- if err := future.exception():
663
- status: int = 1
664
- logger.error(
665
- f"({cut_id(run_id)}) [JOB]: Fail-fast catching:\n\t"
666
- f"{future.exception()}"
643
+ if fail_fast_flag:
644
+ # NOTE: Get results from a collection of tasks with a timeout
645
+ # that has the first exception.
646
+ done, not_done = wait(
647
+ futures, timeout=1800, return_when=FIRST_EXCEPTION
667
648
  )
668
- context.update(
669
- {
670
- "error": err,
671
- "error_message": f"{err.__class__.__name__}: {err}",
672
- },
673
- )
674
- continue
675
-
676
- # NOTE: Update the result context to main job context.
677
- context.update(future.result().context)
678
-
679
- return rs_final.catch(status=status, context=context)
680
-
681
- @staticmethod
682
- def __catch_all_completed(
683
- futures: list[Future],
684
- run_id: str,
685
- *,
686
- timeout: int = 1800,
687
- ) -> Result:
688
- """Job parallel pool futures catching with all-completed mode.
689
-
690
- :param futures: A list of futures.
691
- :param run_id: A job running ID from execution.
692
- :param timeout: A timeout to waiting all futures complete.
693
-
694
- :rtype: Result
695
- """
696
- rs_final: Result = Result(run_id=run_id)
697
- context: DictData = {}
698
- status: int = 0
699
-
700
- for future in as_completed(futures, timeout=timeout):
701
- try:
702
- context.update(future.result().context)
703
- except JobException as err:
704
- status = 1
705
- logger.error(
706
- f"({cut_id(run_id)}) [JOB]: All-completed catching:\n\t"
707
- f"{err.__class__.__name__}:\n\t{err}"
708
- )
709
- context.update(
710
- {
711
- "error": err,
712
- "error_message": f"{err.__class__.__name__}: {err}",
713
- },
649
+ nd: str = (
650
+ f", the strategies do not run is {not_done}"
651
+ if not_done
652
+ else ""
714
653
  )
654
+ result.trace.debug(f"[JOB]: Strategy is set Fail Fast{nd}")
655
+
656
+ # NOTE: Stop all running tasks with setting the event manager
657
+ # and cancel any scheduled tasks.
658
+ if len(done) != len(futures):
659
+ event.set()
660
+ for future in not_done:
661
+ future.cancel()
662
+ else:
663
+ done = as_completed(futures, timeout=1800)
664
+
665
+ for future in done:
666
+ try:
667
+ future.result()
668
+ except JobException as err:
669
+ status = Status.FAILED
670
+ ls: str = "Fail-Fast" if fail_fast_flag else "All-Completed"
671
+ result.trace.error(
672
+ f"[JOB]: {ls} Catch:\n\t{err.__class__.__name__}:"
673
+ f"\n\t{err}"
674
+ )
675
+ context.update(
676
+ {
677
+ "errors": {
678
+ "class": err,
679
+ "name": err.__class__.__name__,
680
+ "message": f"{err.__class__.__name__}: {err}",
681
+ },
682
+ },
683
+ )
715
684
 
716
- return rs_final.catch(status=status, context=context)
685
+ return result.catch(status=status, context=context)
@@ -3,22 +3,173 @@
3
3
  # Licensed under the MIT License. See LICENSE in the project root for
4
4
  # license information.
5
5
  # ------------------------------------------------------------------------------
6
+ """This is the Result module. It is the data context transfer objects that use
7
+ by all object in this package.
8
+ """
6
9
  from __future__ import annotations
7
10
 
11
+ import os
12
+ from abc import ABC, abstractmethod
8
13
  from dataclasses import field
14
+ from datetime import datetime
15
+ from enum import IntEnum
16
+ from inspect import Traceback, currentframe, getframeinfo
17
+ from pathlib import Path
18
+ from threading import Event, get_ident
9
19
  from typing import Optional
10
20
 
21
+ from pydantic import ConfigDict
11
22
  from pydantic.dataclasses import dataclass
12
- from pydantic.functional_validators import model_validator
13
23
  from typing_extensions import Self
14
24
 
15
25
  from .__types import DictData, TupleStr
16
- from .utils import gen_id
26
+ from .conf import config, get_logger
27
+ from .utils import cut_id, gen_id, get_dt_now
17
28
 
18
- __all__: TupleStr = ("Result",)
29
+ logger = get_logger("ddeutil.workflow")
19
30
 
31
+ __all__: TupleStr = (
32
+ "Result",
33
+ "Status",
34
+ "TraceLog",
35
+ "default_gen_id",
36
+ "get_dt_tznow",
37
+ )
20
38
 
21
- @dataclass
39
+
40
+ def default_gen_id() -> str:
41
+ """Return running ID which use for making default ID for the Result model if
42
+ a run_id field initializes at the first time.
43
+
44
+ :rtype: str
45
+ """
46
+ return gen_id("manual", unique=True)
47
+
48
+
49
+ def get_dt_tznow() -> datetime:
50
+ """Return the current datetime object that passing the config timezone.
51
+
52
+ :rtype: datetime
53
+ """
54
+ return get_dt_now(tz=config.tz)
55
+
56
+
57
+ class Status(IntEnum):
58
+ """Status Int Enum object."""
59
+
60
+ SUCCESS: int = 0
61
+ FAILED: int = 1
62
+ WAIT: int = 2
63
+
64
+
65
+ @dataclass(frozen=True)
66
+ class BaseTraceLog(ABC): # pragma: no cov
67
+ """Base Trace Log dataclass object."""
68
+
69
+ run_id: str
70
+ parent_run_id: Optional[str] = None
71
+
72
+ @abstractmethod
73
+ def writer(self, message: str, is_err: bool = False) -> None: ...
74
+
75
+ @abstractmethod
76
+ def make_message(self, message: str) -> str: ...
77
+
78
+ def debug(self, message: str):
79
+ msg: str = self.make_message(message)
80
+
81
+ # NOTE: Write file if debug mode.
82
+ if config.debug:
83
+ self.writer(msg)
84
+
85
+ logger.debug(msg, stacklevel=2)
86
+
87
+ def info(self, message: str):
88
+ msg: str = self.make_message(message)
89
+ self.writer(msg)
90
+ logger.info(msg, stacklevel=2)
91
+
92
+ def warning(self, message: str):
93
+ msg: str = self.make_message(message)
94
+ self.writer(msg)
95
+ logger.warning(msg, stacklevel=2)
96
+
97
+ def error(self, message: str):
98
+ msg: str = self.make_message(message)
99
+ self.writer(msg, is_err=True)
100
+ logger.error(msg, stacklevel=2)
101
+
102
+
103
+ class TraceLog(BaseTraceLog): # pragma: no cov
104
+ """Trace Log object that write file to the local storage."""
105
+
106
+ @property
107
+ def log_file(self) -> Path:
108
+ log_file: Path = (
109
+ config.log_path / f"run_id={self.parent_run_id or self.run_id}"
110
+ )
111
+ if not log_file.exists():
112
+ log_file.mkdir(parents=True)
113
+ return log_file
114
+
115
+ @property
116
+ def cut_id(self) -> str:
117
+ """Combine cutting ID of parent running ID if it set."""
118
+ cut_run_id: str = cut_id(self.run_id)
119
+ if not self.parent_run_id:
120
+ return f"{cut_run_id} -> {' ' * 6}"
121
+
122
+ cut_parent_run_id: str = cut_id(self.parent_run_id)
123
+ return f"{cut_parent_run_id} -> {cut_run_id}"
124
+
125
+ def make_message(self, message: str) -> str:
126
+ return f"({self.cut_id}) {message}"
127
+
128
+ def writer(self, message: str, is_err: bool = False) -> None:
129
+ """The path of logging data will store by format:
130
+
131
+ ... ./logs/run_id=<run-id>/stdout.txt
132
+ ... ./logs/run_id=<run-id>/stderr.txt
133
+
134
+ :param message:
135
+ :param is_err:
136
+ """
137
+ if not config.enable_write_log:
138
+ return
139
+
140
+ frame_info: Traceback = getframeinfo(currentframe().f_back.f_back)
141
+ filename: str = frame_info.filename.split(os.path.sep)[-1]
142
+ lineno: int = frame_info.lineno
143
+
144
+ # NOTE: set process and thread IDs.
145
+ process: int = os.getpid()
146
+ thread: int = get_ident()
147
+
148
+ write_file: str = "stderr.txt" if is_err else "stdout.txt"
149
+ with (self.log_file / write_file).open(
150
+ mode="at", encoding="utf-8"
151
+ ) as f:
152
+ msg_fmt: str = f"{config.log_format_file}\n"
153
+ print(msg_fmt)
154
+ f.write(
155
+ msg_fmt.format(
156
+ **{
157
+ "datetime": get_dt_tznow().strftime(
158
+ config.log_datetime_format
159
+ ),
160
+ "process": process,
161
+ "thread": thread,
162
+ "message": message,
163
+ "filename": filename,
164
+ "lineno": lineno,
165
+ }
166
+ )
167
+ )
168
+
169
+
170
+ @dataclass(
171
+ config=ConfigDict(arbitrary_types_allowed=True, use_enum_values=True)
172
+ )
22
173
  class Result:
23
174
  """Result Pydantic Model for passing and receiving data context from any
24
175
  module execution process like stage execution, job execution, or workflow
@@ -28,22 +179,34 @@ class Result:
28
179
  and ``_run_id`` fields to comparing with other result instance.
29
180
  """
30
181
 
31
- status: int = field(default=2)
182
+ status: Status = field(default=Status.WAIT)
32
183
  context: DictData = field(default_factory=dict)
33
- run_id: Optional[str] = field(default=None)
184
+ run_id: Optional[str] = field(default_factory=default_gen_id)
34
185
 
35
186
  # NOTE: Ignore this field to compare another result model with __eq__.
36
187
  parent_run_id: Optional[str] = field(default=None, compare=False)
188
+ event: Event = field(default_factory=Event, compare=False)
189
+ ts: datetime = field(default_factory=get_dt_tznow, compare=False)
37
190
 
38
- @model_validator(mode="after")
39
- def __prepare_run_id(self) -> Self:
40
- """Prepare running ID which use default ID if it initializes at the
41
- first time.
42
-
43
- :rtype: Self
191
+ @classmethod
192
+ def construct_with_rs_or_id(
193
+ cls,
194
+ result: Result | None = None,
195
+ run_id: str | None = None,
196
+ parent_run_id: str | None = None,
197
+ id_logic: str | None = None,
198
+ ) -> Self: # pragma: no cov
199
+ """Create the Result object or set parent running id if passing Result
200
+ object.
44
201
  """
45
- self._run_id = gen_id("manual", unique=True)
46
- return self
202
+ if result is None:
203
+ result: Result = cls(
204
+ run_id=(run_id or gen_id(id_logic or "", unique=True)),
205
+ parent_run_id=parent_run_id,
206
+ )
207
+ elif parent_run_id:
208
+ result.set_parent_run_id(parent_run_id)
209
+ return result
47
210
 
48
211
  def set_run_id(self, running_id: str) -> Self:
49
212
  """Set a running ID.
@@ -51,7 +214,7 @@ class Result:
51
214
  :param running_id: A running ID that want to update on this model.
52
215
  :rtype: Self
53
216
  """
54
- self.run_id = running_id
217
+ self.run_id: str = running_id
55
218
  return self
56
219
 
57
220
  def set_parent_run_id(self, running_id: str) -> Self:
@@ -63,41 +226,31 @@ class Result:
63
226
  self.parent_run_id: str = running_id
64
227
  return self
65
228
 
66
- def catch(self, status: int, context: DictData) -> Self:
67
- """Catch the status and context to current data."""
68
- self.__dict__["status"] = status
69
- self.__dict__["context"].update(context)
70
- return self
71
-
72
- def receive(self, result: Result) -> Self:
73
- """Receive context from another result object.
229
+ def catch(
230
+ self,
231
+ status: int | Status,
232
+ context: DictData | None = None,
233
+ ) -> Self:
234
+ """Catch the status and context to this Result object. This method will
235
+ use between a child execution return a result, and it wants to pass
236
+ status and context to this object.
74
237
 
75
- :rtype: Self
238
+ :param status:
239
+ :param context:
76
240
  """
77
- self.__dict__["status"] = result.status
78
- self.__dict__["context"].update(result.context)
79
-
80
- # NOTE: Update running ID from an incoming result.
81
- self.parent_run_id = result.parent_run_id
82
- self.run_id = result.run_id
241
+ self.__dict__["status"] = (
242
+ Status(status) if isinstance(status, int) else status
243
+ )
244
+ self.__dict__["context"].update(context or {})
83
245
  return self
84
246
 
85
- def receive_jobs(self, result: Result) -> Self:
86
- """Receive context from another result object that use on the workflow
87
- execution which create a ``jobs`` keys on the context if it does not
88
- exist.
247
+ @property
248
+ def trace(self) -> TraceLog:
249
+ """Return TraceLog object that passing its running ID.
89
250
 
90
- :rtype: Self
251
+ :rtype: TraceLog
91
252
  """
92
- self.__dict__["status"] = result.status
93
-
94
- # NOTE: Check the context has jobs key.
95
- if "jobs" not in self.__dict__["context"]:
96
- self.__dict__["context"]["jobs"] = {}
253
+ return TraceLog(self.run_id, self.parent_run_id)
97
254
 
98
- self.__dict__["context"]["jobs"].update(result.context)
99
-
100
- # NOTE: Update running ID from an incoming result.
101
- self.parent_run_id: str = result.parent_run_id
102
- self.run_id: str = result.run_id
103
- return self
255
+ def alive_time(self) -> float: # pragma: no cov
256
+ return (get_dt_tznow() - self.ts).total_seconds()