ddeutil-workflow 0.0.34__py3-none-any.whl → 0.0.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__: str = "0.0.34"
1
+ __version__: str = "0.0.35"
@@ -9,7 +9,7 @@ from .audit import (
9
9
  Audit,
10
10
  get_audit,
11
11
  )
12
- from .call import (
12
+ from .caller import (
13
13
  ReturnTagFunc,
14
14
  TagFunc,
15
15
  extract_call,
@@ -39,6 +39,11 @@ from .job import (
39
39
  Job,
40
40
  Strategy,
41
41
  )
42
+ from .logs import (
43
+ TraceLog,
44
+ get_dt_tznow,
45
+ get_trace,
46
+ )
42
47
  from .params import (
43
48
  ChoiceParam,
44
49
  DatetimeParam,
@@ -49,9 +54,7 @@ from .params import (
49
54
  from .result import (
50
55
  Result,
51
56
  Status,
52
- TraceLog,
53
57
  default_gen_id,
54
- get_dt_tznow,
55
58
  )
56
59
  from .scheduler import (
57
60
  Schedule,
@@ -20,6 +20,7 @@ from ..conf import config, get_logger
20
20
  from ..scheduler import ReleaseThread, ReleaseThreads
21
21
  from ..workflow import ReleaseQueue, WorkflowTask
22
22
  from .repeat import repeat_at
23
+ from .routes import log
23
24
 
24
25
  load_dotenv()
25
26
  logger = get_logger("ddeutil.workflow")
@@ -77,22 +78,26 @@ async def health():
77
78
  return {"message": "Workflow API already start up"}
78
79
 
79
80
 
80
- # NOTE: Enable the workflow route.
81
+ # NOTE Add the logs route by default.
82
+ app.include_router(log, prefix=config.prefix_path)
83
+
84
+
85
+ # NOTE: Enable the workflows route.
81
86
  if config.enable_route_workflow:
82
- from .route import workflow_route
87
+ from .routes import workflow
83
88
 
84
- app.include_router(workflow_route, prefix=config.prefix_path)
89
+ app.include_router(workflow, prefix=config.prefix_path)
85
90
 
86
91
 
87
- # NOTE: Enable the schedule route.
92
+ # NOTE: Enable the schedules route.
88
93
  if config.enable_route_schedule:
89
94
  from ..audit import get_audit
90
95
  from ..scheduler import schedule_task
91
- from .route import schedule_route
96
+ from .routes import schedule
92
97
 
93
- app.include_router(schedule_route, prefix=config.prefix_path)
98
+ app.include_router(schedule, prefix=config.prefix_path)
94
99
 
95
- @schedule_route.on_event("startup")
100
+ @schedule.on_event("startup")
96
101
  @repeat_at(cron="* * * * *", delay=2)
97
102
  def scheduler_listener():
98
103
  """Schedule broker every minute at 02 second."""
@@ -109,7 +114,7 @@ if config.enable_route_schedule:
109
114
  log=get_audit(),
110
115
  )
111
116
 
112
- @schedule_route.on_event("startup")
117
+ @schedule.on_event("startup")
113
118
  @repeat_at(cron="*/5 * * * *", delay=10)
114
119
  def monitoring():
115
120
  logger.debug("[MONITOR]: Start monitoring threading.")
@@ -0,0 +1,8 @@
1
+ # ------------------------------------------------------------------------------
2
+ # Copyright (c) 2022 Korawich Anuttra. All rights reserved.
3
+ # Licensed under the MIT License. See LICENSE in the project root for
4
+ # license information.
5
+ # ------------------------------------------------------------------------------
6
+ from .logs import log_route as log
7
+ from .schedules import schedule_route as schedule
8
+ from .workflows import workflow_route as workflow
@@ -0,0 +1,36 @@
1
+ # ------------------------------------------------------------------------------
2
+ # Copyright (c) 2022 Korawich Anuttra. All rights reserved.
3
+ # Licensed under the MIT License. See LICENSE in the project root for
4
+ # license information.
5
+ # ------------------------------------------------------------------------------
6
+ from __future__ import annotations
7
+
8
+ from fastapi import APIRouter
9
+ from fastapi.responses import UJSONResponse
10
+
11
+ from ...conf import get_logger
12
+ from ...logs import get_trace_obj
13
+
14
+ logger = get_logger("ddeutil.workflow")
15
+
16
+
17
+ # NOTE: Start create the schedule routes.
18
+ #
19
+ log_route = APIRouter(
20
+ prefix="/logs",
21
+ tags=["logs"],
22
+ default_response_class=UJSONResponse,
23
+ )
24
+
25
+
26
+ @log_route.get(path="/")
27
+ async def get_logs():
28
+ return {
29
+ "message": "Getting logs",
30
+ "audits": list(get_trace_obj().find_logs()),
31
+ }
32
+
33
+
34
+ @log_route.get(path="/{run_id}")
35
+ async def get_log_with_run_id(run_id: str):
36
+ return get_trace_obj().find_log_with_id(run_id)
@@ -6,30 +6,17 @@
6
6
  from __future__ import annotations
7
7
 
8
8
  import copy
9
- from dataclasses import asdict
10
9
  from datetime import datetime, timedelta
11
- from typing import Any
12
10
 
13
11
  from fastapi import APIRouter, HTTPException, Request
14
12
  from fastapi import status as st
15
13
  from fastapi.responses import UJSONResponse
16
- from pydantic import BaseModel
17
14
 
18
- from ..__types import DictData
19
- from ..audit import Audit, get_audit
20
- from ..conf import Loader, config, get_logger
21
- from ..result import Result
22
- from ..scheduler import Schedule
23
- from ..workflow import Workflow
15
+ from ...conf import config, get_logger
16
+ from ...scheduler import Schedule
24
17
 
25
18
  logger = get_logger("ddeutil.workflow")
26
19
 
27
- workflow_route = APIRouter(
28
- prefix="/workflows",
29
- tags=["workflows"],
30
- default_response_class=UJSONResponse,
31
- )
32
-
33
20
  schedule_route = APIRouter(
34
21
  prefix="/schedules",
35
22
  tags=["schedules"],
@@ -37,122 +24,6 @@ schedule_route = APIRouter(
37
24
  )
38
25
 
39
26
 
40
- @workflow_route.get(path="/")
41
- async def get_workflows() -> DictData:
42
- """Return all workflow workflows that exists in config path."""
43
- workflows: DictData = dict(Loader.finds(Workflow))
44
- return {
45
- "message": f"Getting all workflows: {len(workflows)}",
46
- "count": len(workflows),
47
- "workflows": workflows,
48
- }
49
-
50
-
51
- @workflow_route.get(path="/{name}")
52
- async def get_workflow_by_name(name: str) -> DictData:
53
- """Return model of workflow that passing an input workflow name."""
54
- try:
55
- workflow: Workflow = Workflow.from_loader(name=name, externals={})
56
- except ValueError as err:
57
- logger.exception(err)
58
- raise HTTPException(
59
- status_code=st.HTTP_404_NOT_FOUND,
60
- detail=(
61
- f"Workflow workflow name: {name!r} does not found in /conf path"
62
- ),
63
- ) from None
64
- return workflow.model_dump(
65
- by_alias=True,
66
- exclude_none=True,
67
- exclude_unset=True,
68
- exclude_defaults=True,
69
- )
70
-
71
-
72
- class ExecutePayload(BaseModel):
73
- params: dict[str, Any]
74
-
75
-
76
- @workflow_route.post(path="/{name}/execute", status_code=st.HTTP_202_ACCEPTED)
77
- async def execute_workflow(name: str, payload: ExecutePayload) -> DictData:
78
- """Return model of workflow that passing an input workflow name."""
79
- try:
80
- workflow: Workflow = Workflow.from_loader(name=name, externals={})
81
- except ValueError:
82
- raise HTTPException(
83
- status_code=st.HTTP_404_NOT_FOUND,
84
- detail=(
85
- f"Workflow workflow name: {name!r} does not found in /conf path"
86
- ),
87
- ) from None
88
-
89
- # NOTE: Start execute manually
90
- try:
91
- result: Result = workflow.execute(params=payload.params)
92
- except Exception as err:
93
- raise HTTPException(
94
- status_code=st.HTTP_500_INTERNAL_SERVER_ERROR,
95
- detail=f"{type(err)}: {err}",
96
- ) from None
97
-
98
- return asdict(result)
99
-
100
-
101
- @workflow_route.get(path="/{name}/logs")
102
- async def get_workflow_logs(name: str):
103
- try:
104
- return {
105
- "message": f"Getting workflow {name!r} logs",
106
- "logs": [
107
- log.model_dump(
108
- by_alias=True,
109
- exclude_none=True,
110
- exclude_unset=True,
111
- exclude_defaults=True,
112
- )
113
- for log in get_audit().find_audits(name=name)
114
- ],
115
- }
116
- except FileNotFoundError:
117
- raise HTTPException(
118
- status_code=st.HTTP_404_NOT_FOUND,
119
- detail=f"Does not found log for workflow {name!r}",
120
- ) from None
121
-
122
-
123
- @workflow_route.get(path="/{name}/logs/{release}")
124
- async def get_workflow_release_log(name: str, release: str):
125
- try:
126
- log: Audit = get_audit().find_audit_with_release(
127
- name=name, release=datetime.strptime(release, "%Y%m%d%H%M%S")
128
- )
129
- except FileNotFoundError:
130
- raise HTTPException(
131
- status_code=st.HTTP_404_NOT_FOUND,
132
- detail=(
133
- f"Does not found log for workflow {name!r} "
134
- f"with release {release!r}"
135
- ),
136
- ) from None
137
- return {
138
- "message": f"Getting workflow {name!r} log in release {release}",
139
- "log": log.model_dump(
140
- by_alias=True,
141
- exclude_none=True,
142
- exclude_unset=True,
143
- exclude_defaults=True,
144
- ),
145
- }
146
-
147
-
148
- @workflow_route.delete(
149
- path="/{name}/logs/{release}",
150
- status_code=st.HTTP_204_NO_CONTENT,
151
- )
152
- async def del_workflow_release_log(name: str, release: str):
153
- return {"message": f"Deleted workflow {name!r} log in release {release}"}
154
-
155
-
156
27
  @schedule_route.get(path="/{name}")
157
28
  async def get_schedules(name: str):
158
29
  try:
@@ -0,0 +1,137 @@
1
+ # ------------------------------------------------------------------------------
2
+ # Copyright (c) 2022 Korawich Anuttra. All rights reserved.
3
+ # Licensed under the MIT License. See LICENSE in the project root for
4
+ # license information.
5
+ # ------------------------------------------------------------------------------
6
+ from __future__ import annotations
7
+
8
+ from dataclasses import asdict
9
+ from datetime import datetime
10
+ from typing import Any
11
+
12
+ from fastapi import APIRouter, HTTPException
13
+ from fastapi import status as st
14
+ from fastapi.responses import UJSONResponse
15
+ from pydantic import BaseModel
16
+
17
+ from ...__types import DictData
18
+ from ...audit import Audit, get_audit
19
+ from ...conf import Loader, get_logger
20
+ from ...result import Result
21
+ from ...workflow import Workflow
22
+
23
+ logger = get_logger("ddeutil.workflow")
24
+
25
+ workflow_route = APIRouter(
26
+ prefix="/workflows",
27
+ tags=["workflows"],
28
+ default_response_class=UJSONResponse,
29
+ )
30
+
31
+
32
+ @workflow_route.get(path="/")
33
+ async def get_workflows() -> DictData:
34
+ """Return all workflow workflows that exists in config path."""
35
+ workflows: DictData = dict(Loader.finds(Workflow))
36
+ return {
37
+ "message": f"Getting all workflows: {len(workflows)}",
38
+ "count": len(workflows),
39
+ "workflows": workflows,
40
+ }
41
+
42
+
43
+ @workflow_route.get(path="/{name}")
44
+ async def get_workflow_by_name(name: str) -> DictData:
45
+ """Return model of workflow that passing an input workflow name."""
46
+ try:
47
+ workflow: Workflow = Workflow.from_loader(name=name, externals={})
48
+ except ValueError as err:
49
+ logger.exception(err)
50
+ raise HTTPException(
51
+ status_code=st.HTTP_404_NOT_FOUND,
52
+ detail=(
53
+ f"Workflow workflow name: {name!r} does not found in /conf path"
54
+ ),
55
+ ) from None
56
+ return workflow.model_dump(
57
+ by_alias=True,
58
+ exclude_none=True,
59
+ exclude_unset=True,
60
+ exclude_defaults=True,
61
+ )
62
+
63
+
64
+ class ExecutePayload(BaseModel):
65
+ params: dict[str, Any]
66
+
67
+
68
+ @workflow_route.post(path="/{name}/execute", status_code=st.HTTP_202_ACCEPTED)
69
+ async def execute_workflow(name: str, payload: ExecutePayload) -> DictData:
70
+ """Return model of workflow that passing an input workflow name."""
71
+ try:
72
+ workflow: Workflow = Workflow.from_loader(name=name, externals={})
73
+ except ValueError:
74
+ raise HTTPException(
75
+ status_code=st.HTTP_404_NOT_FOUND,
76
+ detail=(
77
+ f"Workflow workflow name: {name!r} does not found in /conf path"
78
+ ),
79
+ ) from None
80
+
81
+ # NOTE: Start execute manually
82
+ try:
83
+ result: Result = workflow.execute(params=payload.params)
84
+ except Exception as err:
85
+ raise HTTPException(
86
+ status_code=st.HTTP_500_INTERNAL_SERVER_ERROR,
87
+ detail=f"{type(err)}: {err}",
88
+ ) from None
89
+
90
+ return asdict(result)
91
+
92
+
93
+ @workflow_route.get(path="/{name}/audits")
94
+ async def get_workflow_audits(name: str):
95
+ try:
96
+ return {
97
+ "message": f"Getting workflow {name!r} audits",
98
+ "audits": [
99
+ audit.model_dump(
100
+ by_alias=True,
101
+ exclude_none=True,
102
+ exclude_unset=True,
103
+ exclude_defaults=True,
104
+ )
105
+ for audit in get_audit().find_audits(name=name)
106
+ ],
107
+ }
108
+ except FileNotFoundError:
109
+ raise HTTPException(
110
+ status_code=st.HTTP_404_NOT_FOUND,
111
+ detail=f"Does not found audit for workflow {name!r}",
112
+ ) from None
113
+
114
+
115
+ @workflow_route.get(path="/{name}/audits/{release}")
116
+ async def get_workflow_release_audit(name: str, release: str):
117
+ try:
118
+ audit: Audit = get_audit().find_audit_with_release(
119
+ name=name, release=datetime.strptime(release, "%Y%m%d%H%M%S")
120
+ )
121
+ except FileNotFoundError:
122
+ raise HTTPException(
123
+ status_code=st.HTTP_404_NOT_FOUND,
124
+ detail=(
125
+ f"Does not found audit for workflow {name!r} "
126
+ f"with release {release!r}"
127
+ ),
128
+ ) from None
129
+ return {
130
+ "message": f"Getting workflow {name!r} audit in release {release}",
131
+ "audit": audit.model_dump(
132
+ by_alias=True,
133
+ exclude_none=True,
134
+ exclude_unset=True,
135
+ exclude_defaults=True,
136
+ ),
137
+ }
ddeutil/workflow/audit.py CHANGED
@@ -20,7 +20,7 @@ from typing_extensions import Self
20
20
 
21
21
  from .__types import DictData, TupleStr
22
22
  from .conf import config
23
- from .result import TraceLog
23
+ from .logs import TraceLog, get_trace
24
24
 
25
25
  __all__: TupleStr = (
26
26
  "get_audit",
@@ -174,7 +174,7 @@ class FileAudit(BaseAudit):
174
174
 
175
175
  :rtype: Self
176
176
  """
177
- trace: TraceLog = TraceLog(self.run_id, self.parent_run_id)
177
+ trace: TraceLog = get_trace(self.run_id, self.parent_run_id)
178
178
 
179
179
  # NOTE: Check environ variable was set for real writing.
180
180
  if not config.enable_write_audit:
@@ -214,7 +214,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
214
214
  """Save logging data that receive a context data from a workflow
215
215
  execution result.
216
216
  """
217
- trace: TraceLog = TraceLog(self.run_id, self.parent_run_id)
217
+ trace: TraceLog = get_trace(self.run_id, self.parent_run_id)
218
218
 
219
219
  # NOTE: Check environ variable was set for real writing.
220
220
  if not config.enable_write_audit:
@@ -60,7 +60,7 @@ def tag(
60
60
 
61
61
  @wraps(func)
62
62
  def wrapped(*args: P.args, **kwargs: P.kwargs) -> TagFunc:
63
- # NOTE: Able to do anything before calling call function.
63
+ # NOTE: Able to do anything before calling the call function.
64
64
  return func(*args, **kwargs)
65
65
 
66
66
  return wrapped
@@ -150,7 +150,7 @@ def extract_call(call: str) -> Callable[[], TagFunc]:
150
150
  """
151
151
  if not (found := Re.RE_TASK_FMT.search(call)):
152
152
  raise ValueError(
153
- f"Call {call!r} does not match with call format regex."
153
+ f"Call {call!r} does not match with the call regex format."
154
154
  )
155
155
 
156
156
  # NOTE: Pass the searching call string to `path`, `func`, and `tag`.
@@ -160,13 +160,13 @@ def extract_call(call: str) -> Callable[[], TagFunc]:
160
160
  rgt: dict[str, Registry] = make_registry(f"{call.path}")
161
161
  if call.func not in rgt:
162
162
  raise NotImplementedError(
163
- f"``REGISTER-MODULES.{call.path}.registries`` does not "
163
+ f"`REGISTER-MODULES.{call.path}.registries` does not "
164
164
  f"implement registry: {call.func!r}."
165
165
  )
166
166
 
167
167
  if call.tag not in rgt[call.func]:
168
168
  raise NotImplementedError(
169
169
  f"tag: {call.tag!r} does not found on registry func: "
170
- f"``REGISTER-MODULES.{call.path}.registries.{call.func}``"
170
+ f"`REGISTER-MODULES.{call.path}.registries.{call.func}`"
171
171
  )
172
172
  return rgt[call.func][call.tag]
ddeutil/workflow/job.py CHANGED
@@ -5,7 +5,7 @@
5
5
  # ------------------------------------------------------------------------------
6
6
  """Job Model that use for keeping stages and node that running its stages.
7
7
  The job handle the lineage of stages and location of execution of stages that
8
- mean the job model able to define ``runs-on`` key that allow you to run this
8
+ mean the job model able to define `runs-on` key that allow you to run this
9
9
  job.
10
10
 
11
11
  This module include Strategy Model that use on the job strategy field.
@@ -24,10 +24,10 @@ from enum import Enum
24
24
  from functools import lru_cache
25
25
  from textwrap import dedent
26
26
  from threading import Event
27
- from typing import Any, Optional, Union
27
+ from typing import Annotated, Any, Literal, Optional, Union
28
28
 
29
29
  from ddeutil.core import freeze_args
30
- from pydantic import BaseModel, Field
30
+ from pydantic import BaseModel, ConfigDict, Field
31
31
  from pydantic.functional_validators import field_validator, model_validator
32
32
  from typing_extensions import Self
33
33
 
@@ -56,6 +56,11 @@ __all__: TupleStr = (
56
56
  "Strategy",
57
57
  "Job",
58
58
  "TriggerRules",
59
+ "RunsOn",
60
+ "RunsOnLocal",
61
+ "RunsOnSelfHosted",
62
+ "RunsOnDocker",
63
+ "RunsOnK8s",
59
64
  "make",
60
65
  )
61
66
 
@@ -216,13 +221,60 @@ class TriggerRules(str, Enum):
216
221
  none_skipped: str = "none_skipped"
217
222
 
218
223
 
219
- class RunsOn(str, Enum):
224
+ class RunsOnType(str, Enum):
220
225
  """Runs-On enum object."""
221
226
 
222
- local: str = "local"
223
- docker: str = "docker"
224
- self_hosted: str = "self_hosted"
225
- k8s: str = "k8s"
227
+ LOCAL: str = "local"
228
+ DOCKER: str = "docker"
229
+ SELF_HOSTED: str = "self_hosted"
230
+ K8S: str = "k8s"
231
+
232
+
233
+ class BaseRunsOn(BaseModel):
234
+ model_config = ConfigDict(use_enum_values=True)
235
+
236
+ type: Literal[RunsOnType.LOCAL]
237
+ args: DictData = Field(
238
+ default_factory=dict,
239
+ alias="with",
240
+ )
241
+
242
+
243
+ class RunsOnLocal(BaseRunsOn):
244
+ """Runs-on local."""
245
+
246
+ type: Literal[RunsOnType.LOCAL] = Field(default=RunsOnType.LOCAL)
247
+
248
+
249
+ class RunsOnSelfHosted(BaseRunsOn):
250
+ """Runs-on self-hosted."""
251
+
252
+ type: Literal[RunsOnType.SELF_HOSTED] = Field(
253
+ default=RunsOnType.SELF_HOSTED
254
+ )
255
+
256
+
257
+ class RunsOnDocker(BaseRunsOn):
258
+ """Runs-on local Docker."""
259
+
260
+ type: Literal[RunsOnType.DOCKER] = Field(default=RunsOnType.DOCKER)
261
+
262
+
263
+ class RunsOnK8s(BaseRunsOn):
264
+ """Runs-on Kubernetes."""
265
+
266
+ type: Literal[RunsOnType.K8S] = Field(default=RunsOnType.K8S)
267
+
268
+
269
+ RunsOn = Annotated[
270
+ Union[
271
+ RunsOnLocal,
272
+ RunsOnSelfHosted,
273
+ RunsOnDocker,
274
+ RunsOnK8s,
275
+ ],
276
+ Field(discriminator="type"),
277
+ ]
226
278
 
227
279
 
228
280
  class Job(BaseModel):
@@ -263,9 +315,9 @@ class Job(BaseModel):
263
315
  default=None,
264
316
  description="A job description that can be string of markdown content.",
265
317
  )
266
- runs_on: Optional[str] = Field(
267
- default=None,
268
- description="A target executor node for this job use to execution.",
318
+ runs_on: RunsOn = Field(
319
+ default_factory=RunsOnLocal,
320
+ description="A target node for this job to use for execution.",
269
321
  serialization_alias="runs-on",
270
322
  )
271
323
  stages: list[Stage] = Field(
@@ -359,7 +411,7 @@ class Job(BaseModel):
359
411
 
360
412
  def set_outputs(self, output: DictData, to: DictData) -> DictData:
361
413
  """Set an outputs from execution process to the received context. The
362
- result from execution will pass to value of ``strategies`` key.
414
+ result from execution will pass to value of `strategies` key.
363
415
 
364
416
  For example of setting output method, If you receive execute output
365
417
  and want to set on the `to` like;
@@ -424,14 +476,14 @@ class Job(BaseModel):
424
476
  workflow execution to strategy matrix.
425
477
 
426
478
  This execution is the minimum level of execution of this job model.
427
- It different with ``self.execute`` because this method run only one
479
+ It different with `self.execute` because this method run only one
428
480
  strategy and return with context of this strategy data.
429
481
 
430
482
  The result of this execution will return result with strategy ID
431
483
  that generated from the `gen_id` function with an input strategy value.
432
484
 
433
- :raise JobException: If it has any error from ``StageException`` or
434
- ``UtilException``.
485
+ :raise JobException: If it has any error from `StageException` or
486
+ `UtilException`.
435
487
 
436
488
  :param strategy: A strategy metrix value that use on this execution.
437
489
  This value will pass to the `matrix` key for templating.
@@ -510,7 +562,7 @@ class Job(BaseModel):
510
562
  #
511
563
  # ... params |= stage.execute(params=params)
512
564
  #
513
- # This step will add the stage result to ``stages`` key in
565
+ # This step will add the stage result to `stages` key in
514
566
  # that stage id. It will have structure like;
515
567
  #
516
568
  # {
@@ -581,7 +633,7 @@ class Job(BaseModel):
581
633
  ) -> Result:
582
634
  """Job execution with passing dynamic parameters from the workflow
583
635
  execution. It will generate matrix values at the first step and run
584
- multithread on this metrics to the ``stages`` field of this job.
636
+ multithread on this metrics to the `stages` field of this job.
585
637
 
586
638
  :param params: An input parameters that use on job execution.
587
639
  :param run_id: A job running ID for this execution.
@@ -591,15 +643,12 @@ class Job(BaseModel):
591
643
 
592
644
  :rtype: Result
593
645
  """
594
-
595
- # NOTE: I use this condition because this method allow passing empty
596
- # params and I do not want to create new dict object.
597
646
  if result is None: # pragma: no cov
598
647
  result: Result = Result(
599
648
  run_id=(run_id or gen_id(self.id or "", unique=True)),
600
649
  parent_run_id=parent_run_id,
601
650
  )
602
- elif parent_run_id:
651
+ elif parent_run_id: # pragma: no cov
603
652
  result.set_parent_run_id(parent_run_id)
604
653
 
605
654
  # NOTE: Normal Job execution without parallel strategy matrix. It uses