ddeutil-workflow 0.0.75__tar.gz → 0.0.77__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/PKG-INFO +1 -1
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/pyproject.toml +1 -0
- ddeutil_workflow-0.0.77/src/ddeutil/workflow/__about__.py +1 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/__cron.py +12 -3
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/__init__.py +2 -1
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/audits.py +15 -10
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/cli.py +87 -21
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/conf.py +26 -10
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/errors.py +8 -10
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/job.py +13 -10
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/result.py +12 -11
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/reusables.py +5 -4
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/stages.py +13 -11
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/traces.py +13 -10
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/utils.py +8 -2
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/workflow.py +57 -46
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/PKG-INFO +1 -1
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_conf.py +14 -4
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_reusables_template.py +6 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_utils.py +8 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_workflow_exec_job.py +2 -2
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_workflow_release.py +1 -0
- ddeutil_workflow-0.0.75/src/ddeutil/workflow/__about__.py +0 -1
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/LICENSE +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/README.md +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/setup.cfg +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/__main__.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/__types.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/api/__init__.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/api/log_conf.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/api/routes/__init__.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/api/routes/job.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/api/routes/logs.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/api/routes/workflows.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/event.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/params.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/SOURCES.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/dependency_links.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/entry_points.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/requires.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/top_level.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test__cron.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test__regex.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_audits.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_cli.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_errors.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_event.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_job.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_job_exec.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_job_exec_strategy.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_params.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_result.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_reusables_call_tag.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_reusables_func_model.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_reusables_template_filter.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_strategy.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_traces.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_workflow.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_workflow_exec.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/tests/test_workflow_rerun.py +0 -0
@@ -0,0 +1 @@
|
|
1
|
+
__version__: str = "0.0.77"
|
@@ -843,7 +843,11 @@ class CronRunner:
|
|
843
843
|
|
844
844
|
@property
|
845
845
|
def next(self) -> datetime:
|
846
|
-
"""Returns the next time of the schedule.
|
846
|
+
"""Returns the next time of the schedule.
|
847
|
+
|
848
|
+
Returns:
|
849
|
+
datetime: A next datetime from the current with shifting step.
|
850
|
+
"""
|
847
851
|
self.date = (
|
848
852
|
self.date
|
849
853
|
if self.reset_flag
|
@@ -860,7 +864,11 @@ class CronRunner:
|
|
860
864
|
def find_date(self, reverse: bool = False) -> datetime:
|
861
865
|
"""Returns the time the schedule would run by `next` or `prev` methods.
|
862
866
|
|
863
|
-
:
|
867
|
+
Args:
|
868
|
+
reverse: A reverse flag.
|
869
|
+
|
870
|
+
Returns:
|
871
|
+
datetime: A next datetime from shifting step.
|
864
872
|
"""
|
865
873
|
# NOTE: Set reset flag to false if start any action.
|
866
874
|
self.reset_flag: bool = False
|
@@ -870,7 +878,8 @@ class CronRunner:
|
|
870
878
|
max(self.shift_limit, 100) if self.is_year else self.shift_limit
|
871
879
|
):
|
872
880
|
|
873
|
-
# NOTE: Shift the date
|
881
|
+
# NOTE: Shift the date from year to minute.
|
882
|
+
mode: DatetimeMode # noqa: F842
|
874
883
|
if all(
|
875
884
|
not self.__shift_date(mode, reverse)
|
876
885
|
for mode in ("year", "month", "day", "hour", "minute")
|
@@ -113,6 +113,7 @@ from .result import (
|
|
113
113
|
WAIT,
|
114
114
|
Result,
|
115
115
|
Status,
|
116
|
+
get_status_from_error,
|
116
117
|
)
|
117
118
|
from .reusables import *
|
118
119
|
from .stages import (
|
@@ -131,7 +132,7 @@ from .stages import (
|
|
131
132
|
VirtualPyStage,
|
132
133
|
)
|
133
134
|
from .traces import (
|
134
|
-
|
135
|
+
BaseTrace,
|
135
136
|
FileTrace,
|
136
137
|
Trace,
|
137
138
|
TraceData,
|
@@ -79,7 +79,10 @@ class BaseAudit(BaseModel, ABC):
|
|
79
79
|
default=None, description="A parent running ID."
|
80
80
|
)
|
81
81
|
run_id: str = Field(description="A running ID")
|
82
|
-
|
82
|
+
runs_metadata: DictData = Field(
|
83
|
+
default_factory=dict,
|
84
|
+
description="A runs metadata that will use to tracking this audit log.",
|
85
|
+
)
|
83
86
|
|
84
87
|
@model_validator(mode="after")
|
85
88
|
def __model_action(self) -> Self:
|
@@ -296,20 +299,22 @@ class FileAudit(BaseAudit):
|
|
296
299
|
|
297
300
|
|
298
301
|
class SQLiteAudit(BaseAudit): # pragma: no cov
|
299
|
-
"""SQLite Audit
|
302
|
+
"""SQLite Audit model."""
|
300
303
|
|
301
304
|
table_name: ClassVar[str] = "audits"
|
302
305
|
schemas: ClassVar[
|
303
306
|
str
|
304
307
|
] = """
|
305
|
-
workflow
|
306
|
-
release int
|
307
|
-
type str
|
308
|
-
context
|
309
|
-
parent_run_id int
|
310
|
-
run_id int
|
311
|
-
|
312
|
-
|
308
|
+
workflow str
|
309
|
+
, release int
|
310
|
+
, type str
|
311
|
+
, context JSON
|
312
|
+
, parent_run_id int
|
313
|
+
, run_id int
|
314
|
+
, metadata JSON
|
315
|
+
, created_at datetime
|
316
|
+
, updated_at datetime
|
317
|
+
primary key ( workflow, release )
|
313
318
|
"""
|
314
319
|
|
315
320
|
@classmethod
|
@@ -8,6 +8,7 @@ from __future__ import annotations
|
|
8
8
|
import json
|
9
9
|
from pathlib import Path
|
10
10
|
from platform import python_version
|
11
|
+
from textwrap import dedent
|
11
12
|
from typing import Annotated, Any, Literal, Optional, Union
|
12
13
|
|
13
14
|
import typer
|
@@ -15,15 +16,13 @@ from pydantic import Field, TypeAdapter
|
|
15
16
|
|
16
17
|
from .__about__ import __version__
|
17
18
|
from .__types import DictData
|
19
|
+
from .conf import config
|
18
20
|
from .errors import JobError
|
19
21
|
from .job import Job
|
20
22
|
from .params import Param
|
21
|
-
from .result import Result
|
22
23
|
from .workflow import Workflow
|
23
24
|
|
24
|
-
app = typer.Typer(
|
25
|
-
pretty_exceptions_enable=True,
|
26
|
-
)
|
25
|
+
app = typer.Typer(pretty_exceptions_enable=True)
|
27
26
|
|
28
27
|
|
29
28
|
@app.callback()
|
@@ -41,12 +40,70 @@ def version() -> None:
|
|
41
40
|
typer.echo(f"python-version=={python_version()}")
|
42
41
|
|
43
42
|
|
43
|
+
@app.command()
|
44
|
+
def init() -> None:
|
45
|
+
"""Initialize a Workflow structure on the current context."""
|
46
|
+
config.conf_path.mkdir(exist_ok=True)
|
47
|
+
(config.conf_path / ".confignore").touch()
|
48
|
+
|
49
|
+
conf_example_path: Path = config.conf_path / "examples"
|
50
|
+
conf_example_path.mkdir(exist_ok=True)
|
51
|
+
|
52
|
+
example_template: Path = conf_example_path / "wf_examples.yml"
|
53
|
+
example_template.write_text(
|
54
|
+
dedent(
|
55
|
+
"""
|
56
|
+
# Example workflow template.
|
57
|
+
wf-example:
|
58
|
+
type: Workflow
|
59
|
+
desc: |
|
60
|
+
An example workflow template.
|
61
|
+
params:
|
62
|
+
name:
|
63
|
+
type: str
|
64
|
+
default: "World"
|
65
|
+
jobs:
|
66
|
+
first-job:
|
67
|
+
stages:
|
68
|
+
- name: "Call tasks"
|
69
|
+
uses: tasks/say-hello-func@example
|
70
|
+
with:
|
71
|
+
name: ${{ params.name }}
|
72
|
+
"""
|
73
|
+
).lstrip("\n")
|
74
|
+
)
|
75
|
+
|
76
|
+
if "." in config.registry_caller:
|
77
|
+
task_path = Path("./tasks")
|
78
|
+
task_path.mkdir(exist_ok=True)
|
79
|
+
|
80
|
+
dummy_tasks_path = task_path / "example.py"
|
81
|
+
dummy_tasks_path.write_text(
|
82
|
+
dedent(
|
83
|
+
"""
|
84
|
+
from ddeutil.workflow import Result, tag
|
85
|
+
|
86
|
+
@tag(name="example", alias="say-hello-func")
|
87
|
+
def hello_world_task(name: str, rs: Result) -> dict[str, str]:
|
88
|
+
\"\"\"Logging hello task function\"\"\"
|
89
|
+
rs.trace.info(f"Hello, {name}")
|
90
|
+
return {"name": name}
|
91
|
+
"""
|
92
|
+
).lstrip("\n")
|
93
|
+
)
|
94
|
+
|
95
|
+
init_path = task_path / "__init__.py"
|
96
|
+
init_path.write_text("from .example import hello_world_task\n")
|
97
|
+
typer.echo(
|
98
|
+
"Starter command: `workflow-cli workflows execute --name=wf-example`"
|
99
|
+
)
|
100
|
+
|
101
|
+
|
44
102
|
@app.command(name="job")
|
45
103
|
def execute_job(
|
46
104
|
params: Annotated[str, typer.Option(help="A job execute parameters")],
|
47
105
|
job: Annotated[str, typer.Option(help="A job model")],
|
48
|
-
|
49
|
-
run_id: Annotated[Optional[str], typer.Option(help="A running ID")] = None,
|
106
|
+
run_id: Annotated[str, typer.Option(help="A running ID")],
|
50
107
|
) -> None:
|
51
108
|
"""Job execution on the local.
|
52
109
|
|
@@ -62,26 +119,19 @@ def execute_job(
|
|
62
119
|
job_dict: dict[str, Any] = json.loads(job)
|
63
120
|
_job: Job = Job.model_validate(obj=job_dict)
|
64
121
|
except json.JSONDecodeError as e:
|
65
|
-
raise ValueError(f"
|
122
|
+
raise ValueError(f"Jobs does not support format: {job!r}.") from e
|
66
123
|
|
67
124
|
typer.echo(f"Job params: {params_dict}")
|
68
|
-
rs: Result = Result(
|
69
|
-
run_id=run_id,
|
70
|
-
parent_run_id=parent_run_id,
|
71
|
-
)
|
72
|
-
|
73
125
|
context: DictData = {}
|
74
126
|
try:
|
75
127
|
_job.set_outputs(
|
76
|
-
_job.execute(
|
77
|
-
params=params_dict,
|
78
|
-
run_id=rs.run_id,
|
79
|
-
parent_run_id=rs.parent_run_id,
|
80
|
-
).context,
|
128
|
+
_job.execute(params=params_dict, run_id=run_id).context,
|
81
129
|
to=context,
|
82
130
|
)
|
131
|
+
typer.echo("[JOB]: Context result:")
|
132
|
+
typer.echo(json.dumps(context, default=str, indent=0))
|
83
133
|
except JobError as err:
|
84
|
-
|
134
|
+
typer.echo(f"[JOB]: {err.__class__.__name__}: {err}")
|
85
135
|
|
86
136
|
|
87
137
|
@app.command()
|
@@ -136,8 +186,24 @@ def workflow_callback():
|
|
136
186
|
|
137
187
|
|
138
188
|
@workflow_app.command(name="execute")
|
139
|
-
def workflow_execute(
|
140
|
-
|
189
|
+
def workflow_execute(
|
190
|
+
name: Annotated[
|
191
|
+
str,
|
192
|
+
typer.Option(help="A name of workflow template."),
|
193
|
+
],
|
194
|
+
params: Annotated[
|
195
|
+
str,
|
196
|
+
typer.Option(help="A workflow execute parameters"),
|
197
|
+
] = "{}",
|
198
|
+
):
|
199
|
+
"""Execute workflow by passing a workflow template name."""
|
200
|
+
try:
|
201
|
+
params_dict: dict[str, Any] = json.loads(params)
|
202
|
+
except json.JSONDecodeError as e:
|
203
|
+
raise ValueError(f"Params does not support format: {params!r}.") from e
|
204
|
+
|
205
|
+
typer.echo(f"Start execute workflow template: {name}")
|
206
|
+
typer.echo(f"... with params: {params_dict}")
|
141
207
|
|
142
208
|
|
143
209
|
WORKFLOW_TYPE = Literal["Workflow"]
|
@@ -167,7 +233,7 @@ def workflow_json_schema(
|
|
167
233
|
template_schema: dict[str, str] = {
|
168
234
|
"$schema": "http://json-schema.org/draft-07/schema#",
|
169
235
|
"title": "Workflow Configuration Schema",
|
170
|
-
"version":
|
236
|
+
"version": __version__,
|
171
237
|
}
|
172
238
|
with open(output, mode="w", encoding="utf-8") as f:
|
173
239
|
json.dump(template_schema | json_schema, f, indent=2)
|
@@ -322,25 +322,30 @@ class YamlParser:
|
|
322
322
|
excluded: Optional[list[str]] = None,
|
323
323
|
extras: Optional[DictData] = None,
|
324
324
|
ignore_filename: Optional[str] = None,
|
325
|
+
tags: Optional[list[str]] = None,
|
325
326
|
) -> Iterator[tuple[str, DictData]]:
|
326
327
|
"""Find all data that match with object type in config path. This class
|
327
328
|
method can use include and exclude list of identity name for filter and
|
328
329
|
adds-on.
|
329
330
|
|
330
|
-
:
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
data
|
336
|
-
|
337
|
-
|
338
|
-
|
331
|
+
Args:
|
332
|
+
obj: (object | str) An object that want to validate matching
|
333
|
+
before return.
|
334
|
+
path: (Path) A config path object.
|
335
|
+
paths: (list[Path]) A list of config path object.
|
336
|
+
excluded: An included list of data key that want to filter from
|
337
|
+
data.
|
338
|
+
extras: (DictData) An extra parameter that use to override core
|
339
|
+
config values.
|
340
|
+
ignore_filename: (str) An ignore filename. Default is
|
339
341
|
``.confignore`` filename.
|
342
|
+
tags: (list[str])
|
343
|
+
A list of tag that want to filter.
|
340
344
|
|
341
345
|
:rtype: Iterator[tuple[str, DictData]]
|
342
346
|
"""
|
343
347
|
excluded: list[str] = excluded or []
|
348
|
+
tags: list[str] = tags or []
|
344
349
|
path: Path = dynamic("conf_path", f=path, extras=extras)
|
345
350
|
paths: Optional[list[Path]] = paths or (extras or {}).get("conf_paths")
|
346
351
|
if not paths:
|
@@ -366,6 +371,14 @@ class YamlParser:
|
|
366
371
|
if key in excluded:
|
367
372
|
continue
|
368
373
|
|
374
|
+
if (
|
375
|
+
tags
|
376
|
+
and (ts := data[key].get("tags"))
|
377
|
+
and isinstance(ts, list)
|
378
|
+
and all(t not in tags for t in ts)
|
379
|
+
): # pragma: no cov
|
380
|
+
continue
|
381
|
+
|
369
382
|
if (t := data.get("type")) and t == obj_type:
|
370
383
|
marking: tuple[float, DictData] = (
|
371
384
|
file.lstat().st_mtime,
|
@@ -469,7 +482,10 @@ def pass_env(value: T) -> T: # pragma: no cov
|
|
469
482
|
if isinstance(value, dict):
|
470
483
|
return {k: pass_env(value[k]) for k in value}
|
471
484
|
elif isinstance(value, (list, tuple, set)):
|
472
|
-
|
485
|
+
try:
|
486
|
+
return type(value)(pass_env(i) for i in value)
|
487
|
+
except TypeError:
|
488
|
+
return value
|
473
489
|
if not isinstance(value, str):
|
474
490
|
return value
|
475
491
|
|
@@ -136,16 +136,14 @@ class BaseError(Exception):
|
|
136
136
|
ErrorData or dict: Exception data, optionally mapped by reference ID
|
137
137
|
|
138
138
|
Example:
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
#
|
143
|
-
|
144
|
-
#
|
145
|
-
|
146
|
-
#
|
147
|
-
ref_data = error.to_dict(with_refs=True)
|
148
|
-
# Returns: {"stage-1": {"name": "BaseError", "message": "Something failed"}}
|
139
|
+
>>> error = BaseError("Something failed", refs="stage-1")
|
140
|
+
>>> # Simple format
|
141
|
+
>>> error.to_dict()
|
142
|
+
>>> # Returns: {"name": "BaseError", "message": "Something failed"}
|
143
|
+
|
144
|
+
>>> # With reference mapping
|
145
|
+
>>> error.to_dict(with_refs=True)
|
146
|
+
>>> # Returns: {"stage-1": {"name": "BaseError", "message": "Something failed"}}
|
149
147
|
```
|
150
148
|
"""
|
151
149
|
data: ErrorData = to_dict(self)
|
@@ -656,6 +656,7 @@ class Job(BaseModel):
|
|
656
656
|
to: DictData,
|
657
657
|
*,
|
658
658
|
job_id: StrOrNone = None,
|
659
|
+
**kwargs,
|
659
660
|
) -> DictData:
|
660
661
|
"""Set an outputs from execution result context to the received context
|
661
662
|
with a `to` input parameter. The result context from job strategy
|
@@ -693,12 +694,15 @@ class Job(BaseModel):
|
|
693
694
|
:raise JobError: If the job's ID does not set and the setting
|
694
695
|
default job ID flag does not set.
|
695
696
|
|
696
|
-
:
|
697
|
-
|
698
|
-
|
699
|
-
|
697
|
+
Args:
|
698
|
+
output: (DictData) A result data context that want to extract
|
699
|
+
and transfer to the `strategies` key in receive context.
|
700
|
+
to: (DictData) A received context data.
|
701
|
+
job_id: (StrOrNone) A job ID if the `id` field does not set.
|
702
|
+
kwargs: Any values that want to add to the target context.
|
700
703
|
|
701
|
-
:
|
704
|
+
Returns:
|
705
|
+
DictData: Return updated the target context with a result context.
|
702
706
|
"""
|
703
707
|
if "jobs" not in to:
|
704
708
|
to["jobs"] = {}
|
@@ -716,8 +720,9 @@ class Job(BaseModel):
|
|
716
720
|
status: dict[str, Status] = (
|
717
721
|
{"status": output.pop("status")} if "status" in output else {}
|
718
722
|
)
|
723
|
+
kwargs: DictData = kwargs or {}
|
719
724
|
if self.strategy.is_set():
|
720
|
-
to["jobs"][_id] = {"strategies": output} | errors | status
|
725
|
+
to["jobs"][_id] = {"strategies": output} | errors | status | kwargs
|
721
726
|
elif len(k := output.keys()) > 1: # pragma: no cov
|
722
727
|
raise JobError(
|
723
728
|
"Strategy output from execution return more than one ID while "
|
@@ -726,7 +731,7 @@ class Job(BaseModel):
|
|
726
731
|
else:
|
727
732
|
_output: DictData = {} if len(k) == 0 else output[list(k)[0]]
|
728
733
|
_output.pop("matrix", {})
|
729
|
-
to["jobs"][_id] = _output | errors | status
|
734
|
+
to["jobs"][_id] = _output | errors | status | kwargs
|
730
735
|
return to
|
731
736
|
|
732
737
|
def get_outputs(
|
@@ -800,8 +805,7 @@ class Job(BaseModel):
|
|
800
805
|
return docker_execution(
|
801
806
|
self,
|
802
807
|
params,
|
803
|
-
run_id=
|
804
|
-
parent_run_id=parent_run_id,
|
808
|
+
run_id=parent_run_id,
|
805
809
|
event=event,
|
806
810
|
).make_info({"execution_time": time.monotonic() - ts})
|
807
811
|
|
@@ -1294,7 +1298,6 @@ def docker_execution(
|
|
1294
1298
|
params: DictData,
|
1295
1299
|
*,
|
1296
1300
|
run_id: StrOrNone = None,
|
1297
|
-
parent_run_id: StrOrNone = None,
|
1298
1301
|
event: Optional[Event] = None,
|
1299
1302
|
): # pragma: no cov
|
1300
1303
|
"""Docker job execution.
|
@@ -16,7 +16,6 @@ Classes:
|
|
16
16
|
Functions:
|
17
17
|
validate_statuses: Determine final status from multiple status values
|
18
18
|
get_status_from_error: Convert exception types to appropriate status
|
19
|
-
get_dt_tznow: Get current datetime with timezone configuration
|
20
19
|
"""
|
21
20
|
from __future__ import annotations
|
22
21
|
|
@@ -89,6 +88,7 @@ class Status(str, Enum):
|
|
89
88
|
return self.name
|
90
89
|
|
91
90
|
def is_result(self) -> bool:
|
91
|
+
"""Return True if this status is the status for result object."""
|
92
92
|
return self in ResultStatuses
|
93
93
|
|
94
94
|
|
@@ -115,15 +115,13 @@ def validate_statuses(statuses: list[Status]) -> Status:
|
|
115
115
|
Status: Final consolidated status based on workflow logic
|
116
116
|
|
117
117
|
Example:
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
# Returns: SUCCESS
|
126
|
-
```
|
118
|
+
>>> # Mixed statuses - FAILED takes priority
|
119
|
+
>>> validate_statuses([SUCCESS, FAILED, SUCCESS])
|
120
|
+
>>> # Returns: FAILED
|
121
|
+
|
122
|
+
>>> # All same status
|
123
|
+
>>> validate_statuses([SUCCESS, SUCCESS, SUCCESS])
|
124
|
+
>>> # Returns: SUCCESS
|
127
125
|
"""
|
128
126
|
if any(s == CANCEL for s in statuses):
|
129
127
|
return CANCEL
|
@@ -153,6 +151,9 @@ def get_status_from_error(
|
|
153
151
|
) -> Status:
|
154
152
|
"""Get the Status from the error object.
|
155
153
|
|
154
|
+
Args:
|
155
|
+
error: An error object.
|
156
|
+
|
156
157
|
Returns:
|
157
158
|
Status: The status from the specific exception class.
|
158
159
|
"""
|
@@ -189,7 +190,7 @@ class Result:
|
|
189
190
|
context: DictData = field(default_factory=default_context)
|
190
191
|
info: DictData = field(default_factory=dict)
|
191
192
|
run_id: Optional[str] = field(default_factory=default_gen_id)
|
192
|
-
parent_run_id: Optional[str] = field(default=None
|
193
|
+
parent_run_id: Optional[str] = field(default=None)
|
193
194
|
ts: datetime = field(default_factory=get_dt_now, compare=False)
|
194
195
|
trace: Optional[Trace] = field(default=None, compare=False, repr=False)
|
195
196
|
extras: DictData = field(default_factory=dict, compare=False, repr=False)
|
@@ -421,12 +421,13 @@ def param2template(
|
|
421
421
|
for k in value
|
422
422
|
}
|
423
423
|
elif isinstance(value, (list, tuple, set)):
|
424
|
-
|
425
|
-
|
424
|
+
try:
|
425
|
+
return type(value)(
|
426
426
|
param2template(i, params, context, filters, extras=extras)
|
427
427
|
for i in value
|
428
|
-
|
429
|
-
|
428
|
+
)
|
429
|
+
except TypeError:
|
430
|
+
return value
|
430
431
|
elif not isinstance(value, str):
|
431
432
|
return value
|
432
433
|
return str2template(
|
@@ -295,7 +295,7 @@ class BaseStage(BaseModel, ABC):
|
|
295
295
|
ts: float = time.monotonic()
|
296
296
|
parent_run_id: str = run_id
|
297
297
|
run_id: str = run_id or gen_id(self.iden, unique=True)
|
298
|
-
context: DictData = {}
|
298
|
+
context: DictData = {"status": WAIT}
|
299
299
|
trace: Trace = get_trace(
|
300
300
|
run_id, parent_run_id=parent_run_id, extras=self.extras
|
301
301
|
)
|
@@ -329,7 +329,7 @@ class BaseStage(BaseModel, ABC):
|
|
329
329
|
parent_run_id=parent_run_id,
|
330
330
|
event=event,
|
331
331
|
)
|
332
|
-
if result_caught.status == WAIT:
|
332
|
+
if result_caught.status == WAIT: # pragma: no cov
|
333
333
|
raise StageError(
|
334
334
|
"Status from execution should not return waiting status."
|
335
335
|
)
|
@@ -413,7 +413,7 @@ class BaseStage(BaseModel, ABC):
|
|
413
413
|
self,
|
414
414
|
output: DictData,
|
415
415
|
to: DictData,
|
416
|
-
|
416
|
+
**kwargs,
|
417
417
|
) -> DictData:
|
418
418
|
"""Set an outputs from execution result context to the received context
|
419
419
|
with a `to` input parameter. The result context from stage execution
|
@@ -447,12 +447,14 @@ class BaseStage(BaseModel, ABC):
|
|
447
447
|
to the `to` argument. The result context was soft copied before set
|
448
448
|
output step.
|
449
449
|
|
450
|
-
:
|
451
|
-
|
452
|
-
|
453
|
-
|
450
|
+
Args:
|
451
|
+
output: (DictData) A result data context that want to extract
|
452
|
+
and transfer to the `outputs` key in receive context.
|
453
|
+
to: (DictData) A received context data.
|
454
|
+
kwargs: Any values that want to add to the target context.
|
454
455
|
|
455
|
-
:
|
456
|
+
Returns:
|
457
|
+
DictData: Return updated the target context with a result context.
|
456
458
|
"""
|
457
459
|
if "stages" not in to:
|
458
460
|
to["stages"] = {}
|
@@ -470,8 +472,8 @@ class BaseStage(BaseModel, ABC):
|
|
470
472
|
status: dict[str, Status] = (
|
471
473
|
{"status": output.pop("status")} if "status" in output else {}
|
472
474
|
)
|
473
|
-
|
474
|
-
to["stages"][_id] = {"outputs": output} | errors | status |
|
475
|
+
kwargs: DictData = kwargs or {}
|
476
|
+
to["stages"][_id] = {"outputs": output} | errors | status | kwargs
|
475
477
|
return to
|
476
478
|
|
477
479
|
def get_outputs(self, output: DictData) -> DictData:
|
@@ -654,7 +656,7 @@ class BaseAsyncStage(BaseStage, ABC):
|
|
654
656
|
parent_run_id=parent_run_id,
|
655
657
|
event=event,
|
656
658
|
)
|
657
|
-
if result_caught.status == WAIT:
|
659
|
+
if result_caught.status == WAIT: # pragma: no cov
|
658
660
|
raise StageError(
|
659
661
|
"Status from execution should not return waiting status."
|
660
662
|
)
|
@@ -287,7 +287,7 @@ class TraceData(BaseModel): # pragma: no cov
|
|
287
287
|
return cls.model_validate(data)
|
288
288
|
|
289
289
|
|
290
|
-
class
|
290
|
+
class BaseEmitTrace(BaseModel, ABC): # pragma: no cov
|
291
291
|
"""Base Trace model with abstraction class property."""
|
292
292
|
|
293
293
|
model_config = ConfigDict(frozen=True)
|
@@ -474,7 +474,7 @@ class BaseTrace(BaseModel, ABC): # pragma: no cov
|
|
474
474
|
await self.amit(message, mode="exception", is_err=True)
|
475
475
|
|
476
476
|
|
477
|
-
class ConsoleTrace(
|
477
|
+
class ConsoleTrace(BaseEmitTrace): # pragma: no cov
|
478
478
|
"""Console Trace log model."""
|
479
479
|
|
480
480
|
def writer(
|
@@ -566,7 +566,11 @@ class ConsoleTrace(BaseTrace): # pragma: no cov
|
|
566
566
|
getattr(logger, mode)(msg, stacklevel=3, extra={"cut_id": self.cut_id})
|
567
567
|
|
568
568
|
|
569
|
-
class
|
569
|
+
class BaseTrace(ConsoleTrace, ABC):
|
570
|
+
"""A Base Trace model that will use for override writing or sending trace
|
571
|
+
log to any service type.
|
572
|
+
"""
|
573
|
+
|
570
574
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
571
575
|
|
572
576
|
url: ParseResult = Field(description="An URL for create pointer.")
|
@@ -575,9 +579,8 @@ class OutsideTrace(ConsoleTrace, ABC):
|
|
575
579
|
"url", mode="before", json_schema_input_type=Union[ParseResult, str]
|
576
580
|
)
|
577
581
|
def __parse_url(cls, value: Union[ParseResult, str]) -> ParseResult:
|
578
|
-
|
579
|
-
|
580
|
-
return value
|
582
|
+
"""Parsing an URL value."""
|
583
|
+
return urlparse(value) if isinstance(value, str) else value
|
581
584
|
|
582
585
|
@field_serializer("url")
|
583
586
|
def __serialize_url(self, value: ParseResult) -> str:
|
@@ -621,7 +624,7 @@ class OutsideTrace(ConsoleTrace, ABC):
|
|
621
624
|
)
|
622
625
|
|
623
626
|
|
624
|
-
class FileTrace(
|
627
|
+
class FileTrace(BaseTrace): # pragma: no cov
|
625
628
|
"""File Trace dataclass that write file to the local storage."""
|
626
629
|
|
627
630
|
@classmethod
|
@@ -765,7 +768,7 @@ class FileTrace(OutsideTrace): # pragma: no cov
|
|
765
768
|
await f.write(trace_meta.model_dump_json() + "\n")
|
766
769
|
|
767
770
|
|
768
|
-
class SQLiteTrace(
|
771
|
+
class SQLiteTrace(BaseTrace): # pragma: no cov
|
769
772
|
"""SQLite Trace dataclass that write trace log to the SQLite database file."""
|
770
773
|
|
771
774
|
table_name: ClassVar[str] = "audits"
|
@@ -779,7 +782,7 @@ class SQLiteTrace(OutsideTrace): # pragma: no cov
|
|
779
782
|
, metadata JSON
|
780
783
|
, created_at datetime
|
781
784
|
, updated_at datetime
|
782
|
-
primary key (
|
785
|
+
primary key ( parent_run_id )
|
783
786
|
"""
|
784
787
|
|
785
788
|
@classmethod
|
@@ -824,7 +827,7 @@ class SQLiteTrace(OutsideTrace): # pragma: no cov
|
|
824
827
|
Trace = Union[
|
825
828
|
FileTrace,
|
826
829
|
SQLiteTrace,
|
827
|
-
|
830
|
+
BaseTrace,
|
828
831
|
]
|
829
832
|
|
830
833
|
|
@@ -271,7 +271,10 @@ def filter_func(value: T) -> T:
|
|
271
271
|
if isinstance(value, dict):
|
272
272
|
return {k: filter_func(value[k]) for k in value}
|
273
273
|
elif isinstance(value, (list, tuple, set)):
|
274
|
-
|
274
|
+
try:
|
275
|
+
return type(value)(filter_func(i) for i in value)
|
276
|
+
except TypeError:
|
277
|
+
return value
|
275
278
|
|
276
279
|
if isfunction(value):
|
277
280
|
# NOTE: If it wants to improve to get this function, it is able to save
|
@@ -338,7 +341,10 @@ def dump_all(
|
|
338
341
|
if isinstance(value, dict):
|
339
342
|
return {k: dump_all(value[k], by_alias=by_alias) for k in value}
|
340
343
|
elif isinstance(value, (list, tuple, set)):
|
341
|
-
|
344
|
+
try:
|
345
|
+
return type(value)(dump_all(i, by_alias=by_alias) for i in value)
|
346
|
+
except TypeError:
|
347
|
+
return value
|
342
348
|
elif isinstance(value, BaseModel):
|
343
349
|
return value.model_dump(by_alias=by_alias)
|
344
350
|
return value
|
@@ -152,6 +152,10 @@ class Workflow(BaseModel):
|
|
152
152
|
default_factory=dict,
|
153
153
|
description="A mapping of job ID and job model that already loaded.",
|
154
154
|
)
|
155
|
+
tags: list[str] = Field(
|
156
|
+
default_factory=list,
|
157
|
+
description="A list of tag that use for simple grouping workflow.",
|
158
|
+
)
|
155
159
|
created_at: datetime = Field(
|
156
160
|
default_factory=get_dt_now,
|
157
161
|
description=(
|
@@ -389,11 +393,12 @@ class Workflow(BaseModel):
|
|
389
393
|
params: DictData,
|
390
394
|
*,
|
391
395
|
run_id: Optional[str] = None,
|
396
|
+
runs_metadata: Optional[DictData] = None,
|
392
397
|
release_type: ReleaseType = NORMAL,
|
393
|
-
audit: type[Audit] = None,
|
394
398
|
override_log_name: Optional[str] = None,
|
395
399
|
timeout: int = 600,
|
396
|
-
|
400
|
+
audit_excluded: Optional[list[str]] = None,
|
401
|
+
audit: type[Audit] = None,
|
397
402
|
) -> Result:
|
398
403
|
"""Release the workflow which is executes workflow with writing audit
|
399
404
|
log tracking. The method is overriding parameter with the release
|
@@ -409,18 +414,22 @@ class Workflow(BaseModel):
|
|
409
414
|
- Execute this workflow with mapping release data to its parameters.
|
410
415
|
- Writing result audit
|
411
416
|
|
412
|
-
:
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
417
|
+
Args:
|
418
|
+
release: (datetime) A release datetime.
|
419
|
+
params: A workflow parameter that pass to execute method.
|
420
|
+
release_type:
|
421
|
+
run_id: (str) A workflow running ID.
|
422
|
+
runs_metadata: (DictData)
|
423
|
+
audit: An audit class that want to save the execution result.
|
424
|
+
override_log_name: (str) An override logging name that use
|
425
|
+
instead the workflow name.
|
426
|
+
timeout: (int) A workflow execution time out in second unit.
|
427
|
+
audit_excluded: (list[str]) A list of key that want to exclude
|
428
|
+
from the audit data.
|
422
429
|
|
423
|
-
:
|
430
|
+
Returns:
|
431
|
+
Result: return result object that pass context data from the execute
|
432
|
+
method.
|
424
433
|
"""
|
425
434
|
name: str = override_log_name or self.name
|
426
435
|
|
@@ -432,7 +441,7 @@ class Workflow(BaseModel):
|
|
432
441
|
run_id: str = gen_id(name, unique=True)
|
433
442
|
parent_run_id: str = run_id
|
434
443
|
|
435
|
-
context: DictData = {}
|
444
|
+
context: DictData = {"status": WAIT}
|
436
445
|
trace: Trace = get_trace(
|
437
446
|
run_id, parent_run_id=parent_run_id, extras=self.extras
|
438
447
|
)
|
@@ -445,6 +454,7 @@ class Workflow(BaseModel):
|
|
445
454
|
"logical_date": release,
|
446
455
|
"execute_date": get_dt_now(),
|
447
456
|
"run_id": run_id,
|
457
|
+
"runs_metadata": runs_metadata or {},
|
448
458
|
}
|
449
459
|
},
|
450
460
|
extras=self.extras,
|
@@ -465,9 +475,17 @@ class Workflow(BaseModel):
|
|
465
475
|
context=context,
|
466
476
|
parent_run_id=parent_run_id,
|
467
477
|
run_id=run_id,
|
468
|
-
execution_time=rs.info.get("execution_time", 0),
|
469
478
|
extras=self.extras,
|
470
|
-
|
479
|
+
runs_metadata=(
|
480
|
+
(runs_metadata or {})
|
481
|
+
| rs.info
|
482
|
+
| {
|
483
|
+
"timeout": timeout,
|
484
|
+
"original_name": self.name,
|
485
|
+
"audit_excluded": audit_excluded,
|
486
|
+
}
|
487
|
+
),
|
488
|
+
).save(excluded=audit_excluded)
|
471
489
|
)
|
472
490
|
return Result(
|
473
491
|
run_id=run_id,
|
@@ -492,7 +510,6 @@ class Workflow(BaseModel):
|
|
492
510
|
def execute_job(
|
493
511
|
self,
|
494
512
|
job: Job,
|
495
|
-
params: DictData,
|
496
513
|
run_id: str,
|
497
514
|
context: DictData,
|
498
515
|
*,
|
@@ -511,7 +528,6 @@ class Workflow(BaseModel):
|
|
511
528
|
|
512
529
|
Args:
|
513
530
|
job: (Job) A job model that want to execute.
|
514
|
-
params: (DictData) A parameter data.
|
515
531
|
run_id: A running stage ID.
|
516
532
|
context: A context data.
|
517
533
|
parent_run_id: A parent running ID. (Default is None)
|
@@ -538,25 +554,24 @@ class Workflow(BaseModel):
|
|
538
554
|
)
|
539
555
|
|
540
556
|
trace.info(f"[WORKFLOW]: Execute Job: {job.id!r}")
|
541
|
-
|
542
|
-
params=
|
557
|
+
result: Result = job.execute(
|
558
|
+
params=context,
|
543
559
|
run_id=parent_run_id,
|
544
560
|
event=event,
|
545
561
|
)
|
546
|
-
job.set_outputs(
|
562
|
+
job.set_outputs(result.context, to=context)
|
547
563
|
|
548
|
-
if
|
564
|
+
if result.status == FAILED:
|
549
565
|
error_msg: str = f"Job execution, {job.id!r}, was failed."
|
550
566
|
return FAILED, catch(
|
551
567
|
context=context,
|
552
568
|
status=FAILED,
|
553
569
|
updated={
|
554
570
|
"errors": WorkflowError(error_msg).to_dict(),
|
555
|
-
**params,
|
556
571
|
},
|
557
572
|
)
|
558
573
|
|
559
|
-
elif
|
574
|
+
elif result.status == CANCEL:
|
560
575
|
error_msg: str = (
|
561
576
|
f"Job execution, {job.id!r}, was canceled from the event after "
|
562
577
|
f"end job execution."
|
@@ -566,13 +581,10 @@ class Workflow(BaseModel):
|
|
566
581
|
status=CANCEL,
|
567
582
|
updated={
|
568
583
|
"errors": WorkflowCancelError(error_msg).to_dict(),
|
569
|
-
**params,
|
570
584
|
},
|
571
585
|
)
|
572
586
|
|
573
|
-
return
|
574
|
-
context=context, status=rs.status, updated=params
|
575
|
-
)
|
587
|
+
return result.status, catch(context, status=result.status)
|
576
588
|
|
577
589
|
def execute(
|
578
590
|
self,
|
@@ -753,7 +765,6 @@ class Workflow(BaseModel):
|
|
753
765
|
executor.submit(
|
754
766
|
self.execute_job,
|
755
767
|
job=job,
|
756
|
-
params=context,
|
757
768
|
run_id=run_id,
|
758
769
|
context=context,
|
759
770
|
parent_run_id=parent_run_id,
|
@@ -768,7 +779,6 @@ class Workflow(BaseModel):
|
|
768
779
|
executor.submit(
|
769
780
|
self.execute_job,
|
770
781
|
job=job,
|
771
|
-
params=context,
|
772
782
|
run_id=run_id,
|
773
783
|
context=context,
|
774
784
|
parent_run_id=parent_run_id,
|
@@ -898,7 +908,7 @@ class Workflow(BaseModel):
|
|
898
908
|
extras=self.extras,
|
899
909
|
)
|
900
910
|
|
901
|
-
err = context
|
911
|
+
err: dict[str, str] = context.get("errors", {})
|
902
912
|
trace.info(f"[WORKFLOW]: Previous error: {err}")
|
903
913
|
|
904
914
|
event: ThreadEvent = event or ThreadEvent()
|
@@ -919,9 +929,9 @@ class Workflow(BaseModel):
|
|
919
929
|
extras=self.extras,
|
920
930
|
)
|
921
931
|
|
922
|
-
# NOTE: Prepare the new context for rerun process.
|
932
|
+
# NOTE: Prepare the new context variable for rerun process.
|
923
933
|
jobs: DictData = context.get("jobs")
|
924
|
-
|
934
|
+
context: DictData = {
|
925
935
|
"params": context["params"].copy(),
|
926
936
|
"jobs": {j: jobs[j] for j in jobs if jobs[j]["status"] == SUCCESS},
|
927
937
|
}
|
@@ -930,19 +940,22 @@ class Workflow(BaseModel):
|
|
930
940
|
job_queue: Queue = Queue()
|
931
941
|
for job_id in self.jobs:
|
932
942
|
|
933
|
-
if job_id in
|
943
|
+
if job_id in context["jobs"]:
|
934
944
|
continue
|
935
945
|
|
936
946
|
job_queue.put(job_id)
|
937
947
|
total_job += 1
|
938
948
|
|
939
949
|
if total_job == 0:
|
940
|
-
trace.warning(
|
950
|
+
trace.warning(
|
951
|
+
"[WORKFLOW]: It does not have job to rerun. it will change "
|
952
|
+
"status to skip."
|
953
|
+
)
|
941
954
|
return Result(
|
942
955
|
run_id=run_id,
|
943
956
|
parent_run_id=parent_run_id,
|
944
|
-
status=
|
945
|
-
context=catch(context=context, status=
|
957
|
+
status=SKIP,
|
958
|
+
context=catch(context=context, status=SKIP),
|
946
959
|
extras=self.extras,
|
947
960
|
)
|
948
961
|
|
@@ -954,14 +967,14 @@ class Workflow(BaseModel):
|
|
954
967
|
"max_job_exec_timeout", f=timeout, extras=self.extras
|
955
968
|
)
|
956
969
|
|
957
|
-
catch(
|
970
|
+
catch(context, status=WAIT)
|
958
971
|
if event and event.is_set():
|
959
972
|
return Result(
|
960
973
|
run_id=run_id,
|
961
974
|
parent_run_id=parent_run_id,
|
962
975
|
status=CANCEL,
|
963
976
|
context=catch(
|
964
|
-
|
977
|
+
context,
|
965
978
|
status=CANCEL,
|
966
979
|
updated={
|
967
980
|
"errors": WorkflowCancelError(
|
@@ -983,7 +996,7 @@ class Workflow(BaseModel):
|
|
983
996
|
):
|
984
997
|
job_id: str = job_queue.get()
|
985
998
|
job: Job = self.job(name=job_id)
|
986
|
-
if (check := job.check_needs(
|
999
|
+
if (check := job.check_needs(context["jobs"])) == WAIT:
|
987
1000
|
job_queue.task_done()
|
988
1001
|
job_queue.put(job_id)
|
989
1002
|
consecutive_waits += 1
|
@@ -1003,7 +1016,7 @@ class Workflow(BaseModel):
|
|
1003
1016
|
parent_run_id=parent_run_id,
|
1004
1017
|
status=FAILED,
|
1005
1018
|
context=catch(
|
1006
|
-
|
1019
|
+
context,
|
1007
1020
|
status=FAILED,
|
1008
1021
|
updated={
|
1009
1022
|
"status": FAILED,
|
@@ -1019,7 +1032,7 @@ class Workflow(BaseModel):
|
|
1019
1032
|
trace.info(
|
1020
1033
|
f"[JOB]: Skip job: {job_id!r} from trigger rule."
|
1021
1034
|
)
|
1022
|
-
job.set_outputs(output={"status": SKIP}, to=
|
1035
|
+
job.set_outputs(output={"status": SKIP}, to=context)
|
1023
1036
|
job_queue.task_done()
|
1024
1037
|
skip_count += 1
|
1025
1038
|
continue
|
@@ -1029,7 +1042,6 @@ class Workflow(BaseModel):
|
|
1029
1042
|
executor.submit(
|
1030
1043
|
self.execute_job,
|
1031
1044
|
job=job,
|
1032
|
-
params=new_context,
|
1033
1045
|
run_id=run_id,
|
1034
1046
|
context=context,
|
1035
1047
|
parent_run_id=parent_run_id,
|
@@ -1044,7 +1056,6 @@ class Workflow(BaseModel):
|
|
1044
1056
|
executor.submit(
|
1045
1057
|
self.execute_job,
|
1046
1058
|
job=job,
|
1047
|
-
params=new_context,
|
1048
1059
|
run_id=run_id,
|
1049
1060
|
context=context,
|
1050
1061
|
parent_run_id=parent_run_id,
|
@@ -1095,7 +1106,7 @@ class Workflow(BaseModel):
|
|
1095
1106
|
run_id=run_id,
|
1096
1107
|
parent_run_id=parent_run_id,
|
1097
1108
|
status=st,
|
1098
|
-
context=catch(
|
1109
|
+
context=catch(context, status=st),
|
1099
1110
|
extras=self.extras,
|
1100
1111
|
)
|
1101
1112
|
|
@@ -1115,7 +1126,7 @@ class Workflow(BaseModel):
|
|
1115
1126
|
parent_run_id=parent_run_id,
|
1116
1127
|
status=FAILED,
|
1117
1128
|
context=catch(
|
1118
|
-
|
1129
|
+
context,
|
1119
1130
|
status=FAILED,
|
1120
1131
|
updated={
|
1121
1132
|
"errors": WorkflowTimeoutError(
|
@@ -61,7 +61,10 @@ def test_load_file(target_path: Path):
|
|
61
61
|
"type": "Workflow",
|
62
62
|
"desc": "Test multi config path",
|
63
63
|
"env": "${WORKFLOW_LOG_TIMEZONE}",
|
64
|
-
}
|
64
|
+
},
|
65
|
+
"test_load_not_set_type": {
|
66
|
+
"desc": "Test load not set type.",
|
67
|
+
},
|
65
68
|
},
|
66
69
|
f,
|
67
70
|
)
|
@@ -92,6 +95,12 @@ def test_load_file(target_path: Path):
|
|
92
95
|
with pytest.raises(TypeError):
|
93
96
|
YamlParser("test_load_file", extras={"conf_paths": target_path})
|
94
97
|
|
98
|
+
load = YamlParser(
|
99
|
+
"test_load_not_set_type", extras={"conf_paths": [target_path]}
|
100
|
+
)
|
101
|
+
with pytest.raises(ValueError):
|
102
|
+
_ = load.type
|
103
|
+
|
95
104
|
|
96
105
|
def test_load_file_finds(target_path: Path):
|
97
106
|
dummy_file: Path = target_path / "01_test_simple_file.yaml"
|
@@ -227,12 +236,13 @@ def test_parse_url():
|
|
227
236
|
assert url.path == "./logs"
|
228
237
|
|
229
238
|
url: ParseResult = urlparse("file:///./logs")
|
230
|
-
print(url)
|
231
239
|
assert url.scheme == "file"
|
232
240
|
assert url.path == "/./logs"
|
233
241
|
|
234
242
|
url: ParseResult = urlparse("sqlite:///home/warehouse/sqlite.db")
|
235
|
-
|
243
|
+
assert url.scheme == "sqlite"
|
244
|
+
assert url.path == "/home/warehouse/sqlite.db"
|
236
245
|
|
237
246
|
url: ParseResult = urlparse("file:./data.db")
|
238
|
-
|
247
|
+
assert url.scheme == "file"
|
248
|
+
assert url.path == "./data.db"
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import os
|
2
2
|
from datetime import datetime
|
3
3
|
from typing import Any
|
4
|
+
from urllib.parse import urlparse
|
4
5
|
|
5
6
|
import pytest
|
6
7
|
from ddeutil.workflow.errors import UtilError
|
@@ -35,11 +36,14 @@ def test_param2template():
|
|
35
36
|
"${{ params.src }}-${WORKFLOW_LOG_TIMEZONE:-}"
|
36
37
|
"${WORKFLOW_DUMMY:-}"
|
37
38
|
),
|
39
|
+
"url": urlparse("file:./conf"),
|
40
|
+
"set": {"${{ params.src }}", "${{ params.value }}"},
|
38
41
|
},
|
39
42
|
params={
|
40
43
|
"params": {
|
41
44
|
"src": "foo",
|
42
45
|
"value": -10,
|
46
|
+
"url": urlparse("file:./conf"),
|
43
47
|
},
|
44
48
|
},
|
45
49
|
)
|
@@ -49,6 +53,8 @@ def test_param2template():
|
|
49
53
|
"int_but_str": "value is 10",
|
50
54
|
"list": ["foo", -10],
|
51
55
|
"str_env": "foo-Asia/Bangkok-",
|
56
|
+
"url": urlparse("file:./conf"),
|
57
|
+
"set": {"foo", -10},
|
52
58
|
} == value
|
53
59
|
|
54
60
|
with pytest.raises(UtilError):
|
@@ -14,6 +14,7 @@ from ddeutil.workflow.utils import (
|
|
14
14
|
get_diff_sec,
|
15
15
|
get_dt_now,
|
16
16
|
make_exec,
|
17
|
+
obj_name,
|
17
18
|
prepare_newline,
|
18
19
|
reach_next_minute,
|
19
20
|
)
|
@@ -185,3 +186,10 @@ def test_dump_all():
|
|
185
186
|
{"name": "first", "info": {"field": "foo", "age": 10}},
|
186
187
|
{"name": "second", "info": {"field": "foo", "age": 10}},
|
187
188
|
]
|
189
|
+
|
190
|
+
|
191
|
+
def test_obj_name():
|
192
|
+
assert obj_name() is None
|
193
|
+
assert obj_name("datetime") == "datetime"
|
194
|
+
assert obj_name(datetime) == "datetime"
|
195
|
+
assert obj_name(datetime(2025, 1, 1, 1)) == "datetime"
|
@@ -18,7 +18,7 @@ def test_workflow_execute_job():
|
|
18
18
|
)
|
19
19
|
workflow: Workflow = Workflow(name="workflow", jobs={"demo-run": job})
|
20
20
|
st, ctx = workflow.execute_job(
|
21
|
-
job=workflow.job("demo-run"),
|
21
|
+
job=workflow.job("demo-run"), run_id="1234", context={}
|
22
22
|
)
|
23
23
|
assert st == SUCCESS
|
24
24
|
assert ctx == {
|
@@ -46,7 +46,7 @@ def test_workflow_execute_job_raise_inside():
|
|
46
46
|
)
|
47
47
|
workflow: Workflow = Workflow(name="workflow", jobs={"demo-run": job})
|
48
48
|
st, ctx = workflow.execute_job(
|
49
|
-
job=workflow.job("demo-run"),
|
49
|
+
job=workflow.job("demo-run"), run_id="1234", context={}
|
50
50
|
)
|
51
51
|
assert st == FAILED
|
52
52
|
assert ctx == {
|
@@ -1 +0,0 @@
|
|
1
|
-
__version__: str = "0.0.75"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/api/routes/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil/workflow/api/routes/workflows.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/SOURCES.txt
RENAMED
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/entry_points.txt
RENAMED
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/requires.txt
RENAMED
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.77}/src/ddeutil_workflow.egg-info/top_level.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|