ddeutil-workflow 0.0.75__tar.gz → 0.0.76__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/PKG-INFO +1 -1
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/pyproject.toml +1 -0
- ddeutil_workflow-0.0.76/src/ddeutil/workflow/__about__.py +1 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/__cron.py +12 -3
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/__init__.py +2 -1
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/audits.py +15 -10
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/cli.py +87 -21
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/errors.py +8 -10
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/job.py +13 -10
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/result.py +12 -11
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/stages.py +11 -9
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/traces.py +13 -10
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/workflow.py +53 -46
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/PKG-INFO +1 -1
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_utils.py +8 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_workflow_exec_job.py +2 -2
- ddeutil_workflow-0.0.75/src/ddeutil/workflow/__about__.py +0 -1
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/LICENSE +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/README.md +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/setup.cfg +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/__main__.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/__types.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/api/__init__.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/api/log_conf.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/api/routes/__init__.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/api/routes/job.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/api/routes/logs.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/api/routes/workflows.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/conf.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/event.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/params.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/reusables.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/utils.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/SOURCES.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/dependency_links.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/entry_points.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/requires.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/top_level.txt +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test__cron.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test__regex.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_audits.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_cli.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_conf.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_errors.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_event.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_job.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_job_exec.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_job_exec_strategy.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_params.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_result.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_reusables_call_tag.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_reusables_func_model.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_reusables_template.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_reusables_template_filter.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_strategy.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_traces.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_workflow.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_workflow_exec.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_workflow_release.py +0 -0
- {ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/tests/test_workflow_rerun.py +0 -0
@@ -0,0 +1 @@
|
|
1
|
+
__version__: str = "0.0.76"
|
@@ -843,7 +843,11 @@ class CronRunner:
|
|
843
843
|
|
844
844
|
@property
|
845
845
|
def next(self) -> datetime:
|
846
|
-
"""Returns the next time of the schedule.
|
846
|
+
"""Returns the next time of the schedule.
|
847
|
+
|
848
|
+
Returns:
|
849
|
+
datetime: A next datetime from the current with shifting step.
|
850
|
+
"""
|
847
851
|
self.date = (
|
848
852
|
self.date
|
849
853
|
if self.reset_flag
|
@@ -860,7 +864,11 @@ class CronRunner:
|
|
860
864
|
def find_date(self, reverse: bool = False) -> datetime:
|
861
865
|
"""Returns the time the schedule would run by `next` or `prev` methods.
|
862
866
|
|
863
|
-
:
|
867
|
+
Args:
|
868
|
+
reverse: A reverse flag.
|
869
|
+
|
870
|
+
Returns:
|
871
|
+
datetime: A next datetime from shifting step.
|
864
872
|
"""
|
865
873
|
# NOTE: Set reset flag to false if start any action.
|
866
874
|
self.reset_flag: bool = False
|
@@ -870,7 +878,8 @@ class CronRunner:
|
|
870
878
|
max(self.shift_limit, 100) if self.is_year else self.shift_limit
|
871
879
|
):
|
872
880
|
|
873
|
-
# NOTE: Shift the date
|
881
|
+
# NOTE: Shift the date from year to minute.
|
882
|
+
mode: DatetimeMode # noqa: F842
|
874
883
|
if all(
|
875
884
|
not self.__shift_date(mode, reverse)
|
876
885
|
for mode in ("year", "month", "day", "hour", "minute")
|
@@ -113,6 +113,7 @@ from .result import (
|
|
113
113
|
WAIT,
|
114
114
|
Result,
|
115
115
|
Status,
|
116
|
+
get_status_from_error,
|
116
117
|
)
|
117
118
|
from .reusables import *
|
118
119
|
from .stages import (
|
@@ -131,7 +132,7 @@ from .stages import (
|
|
131
132
|
VirtualPyStage,
|
132
133
|
)
|
133
134
|
from .traces import (
|
134
|
-
|
135
|
+
BaseTrace,
|
135
136
|
FileTrace,
|
136
137
|
Trace,
|
137
138
|
TraceData,
|
@@ -79,7 +79,10 @@ class BaseAudit(BaseModel, ABC):
|
|
79
79
|
default=None, description="A parent running ID."
|
80
80
|
)
|
81
81
|
run_id: str = Field(description="A running ID")
|
82
|
-
|
82
|
+
runs_metadata: DictData = Field(
|
83
|
+
default_factory=dict,
|
84
|
+
description="A runs metadata that will use to tracking this audit log.",
|
85
|
+
)
|
83
86
|
|
84
87
|
@model_validator(mode="after")
|
85
88
|
def __model_action(self) -> Self:
|
@@ -296,20 +299,22 @@ class FileAudit(BaseAudit):
|
|
296
299
|
|
297
300
|
|
298
301
|
class SQLiteAudit(BaseAudit): # pragma: no cov
|
299
|
-
"""SQLite Audit
|
302
|
+
"""SQLite Audit model."""
|
300
303
|
|
301
304
|
table_name: ClassVar[str] = "audits"
|
302
305
|
schemas: ClassVar[
|
303
306
|
str
|
304
307
|
] = """
|
305
|
-
workflow
|
306
|
-
release int
|
307
|
-
type str
|
308
|
-
context
|
309
|
-
parent_run_id int
|
310
|
-
run_id int
|
311
|
-
|
312
|
-
|
308
|
+
workflow str
|
309
|
+
, release int
|
310
|
+
, type str
|
311
|
+
, context JSON
|
312
|
+
, parent_run_id int
|
313
|
+
, run_id int
|
314
|
+
, metadata JSON
|
315
|
+
, created_at datetime
|
316
|
+
, updated_at datetime
|
317
|
+
primary key ( workflow, release )
|
313
318
|
"""
|
314
319
|
|
315
320
|
@classmethod
|
@@ -8,6 +8,7 @@ from __future__ import annotations
|
|
8
8
|
import json
|
9
9
|
from pathlib import Path
|
10
10
|
from platform import python_version
|
11
|
+
from textwrap import dedent
|
11
12
|
from typing import Annotated, Any, Literal, Optional, Union
|
12
13
|
|
13
14
|
import typer
|
@@ -15,15 +16,13 @@ from pydantic import Field, TypeAdapter
|
|
15
16
|
|
16
17
|
from .__about__ import __version__
|
17
18
|
from .__types import DictData
|
19
|
+
from .conf import config
|
18
20
|
from .errors import JobError
|
19
21
|
from .job import Job
|
20
22
|
from .params import Param
|
21
|
-
from .result import Result
|
22
23
|
from .workflow import Workflow
|
23
24
|
|
24
|
-
app = typer.Typer(
|
25
|
-
pretty_exceptions_enable=True,
|
26
|
-
)
|
25
|
+
app = typer.Typer(pretty_exceptions_enable=True)
|
27
26
|
|
28
27
|
|
29
28
|
@app.callback()
|
@@ -41,12 +40,70 @@ def version() -> None:
|
|
41
40
|
typer.echo(f"python-version=={python_version()}")
|
42
41
|
|
43
42
|
|
43
|
+
@app.command()
|
44
|
+
def init() -> None:
|
45
|
+
"""Initialize a Workflow structure on the current context."""
|
46
|
+
config.conf_path.mkdir(exist_ok=True)
|
47
|
+
(config.conf_path / ".confignore").touch()
|
48
|
+
|
49
|
+
conf_example_path: Path = config.conf_path / "examples"
|
50
|
+
conf_example_path.mkdir(exist_ok=True)
|
51
|
+
|
52
|
+
example_template: Path = conf_example_path / "wf_examples.yml"
|
53
|
+
example_template.write_text(
|
54
|
+
dedent(
|
55
|
+
"""
|
56
|
+
# Example workflow template.
|
57
|
+
wf-example:
|
58
|
+
type: Workflow
|
59
|
+
desc: |
|
60
|
+
An example workflow template.
|
61
|
+
params:
|
62
|
+
name:
|
63
|
+
type: str
|
64
|
+
default: "World"
|
65
|
+
jobs:
|
66
|
+
first-job:
|
67
|
+
stages:
|
68
|
+
- name: "Call tasks"
|
69
|
+
uses: tasks/say-hello-func@example
|
70
|
+
with:
|
71
|
+
name: ${{ params.name }}
|
72
|
+
"""
|
73
|
+
).lstrip("\n")
|
74
|
+
)
|
75
|
+
|
76
|
+
if "." in config.registry_caller:
|
77
|
+
task_path = Path("./tasks")
|
78
|
+
task_path.mkdir(exist_ok=True)
|
79
|
+
|
80
|
+
dummy_tasks_path = task_path / "example.py"
|
81
|
+
dummy_tasks_path.write_text(
|
82
|
+
dedent(
|
83
|
+
"""
|
84
|
+
from ddeutil.workflow import Result, tag
|
85
|
+
|
86
|
+
@tag(name="example", alias="say-hello-func")
|
87
|
+
def hello_world_task(name: str, rs: Result) -> dict[str, str]:
|
88
|
+
\"\"\"Logging hello task function\"\"\"
|
89
|
+
rs.trace.info(f"Hello, {name}")
|
90
|
+
return {"name": name}
|
91
|
+
"""
|
92
|
+
).lstrip("\n")
|
93
|
+
)
|
94
|
+
|
95
|
+
init_path = task_path / "__init__.py"
|
96
|
+
init_path.write_text("from .example import hello_world_task\n")
|
97
|
+
typer.echo(
|
98
|
+
"Starter command: `workflow-cli workflows execute --name=wf-example`"
|
99
|
+
)
|
100
|
+
|
101
|
+
|
44
102
|
@app.command(name="job")
|
45
103
|
def execute_job(
|
46
104
|
params: Annotated[str, typer.Option(help="A job execute parameters")],
|
47
105
|
job: Annotated[str, typer.Option(help="A job model")],
|
48
|
-
|
49
|
-
run_id: Annotated[Optional[str], typer.Option(help="A running ID")] = None,
|
106
|
+
run_id: Annotated[str, typer.Option(help="A running ID")],
|
50
107
|
) -> None:
|
51
108
|
"""Job execution on the local.
|
52
109
|
|
@@ -62,26 +119,19 @@ def execute_job(
|
|
62
119
|
job_dict: dict[str, Any] = json.loads(job)
|
63
120
|
_job: Job = Job.model_validate(obj=job_dict)
|
64
121
|
except json.JSONDecodeError as e:
|
65
|
-
raise ValueError(f"
|
122
|
+
raise ValueError(f"Jobs does not support format: {job!r}.") from e
|
66
123
|
|
67
124
|
typer.echo(f"Job params: {params_dict}")
|
68
|
-
rs: Result = Result(
|
69
|
-
run_id=run_id,
|
70
|
-
parent_run_id=parent_run_id,
|
71
|
-
)
|
72
|
-
|
73
125
|
context: DictData = {}
|
74
126
|
try:
|
75
127
|
_job.set_outputs(
|
76
|
-
_job.execute(
|
77
|
-
params=params_dict,
|
78
|
-
run_id=rs.run_id,
|
79
|
-
parent_run_id=rs.parent_run_id,
|
80
|
-
).context,
|
128
|
+
_job.execute(params=params_dict, run_id=run_id).context,
|
81
129
|
to=context,
|
82
130
|
)
|
131
|
+
typer.echo("[JOB]: Context result:")
|
132
|
+
typer.echo(json.dumps(context, default=str, indent=0))
|
83
133
|
except JobError as err:
|
84
|
-
|
134
|
+
typer.echo(f"[JOB]: {err.__class__.__name__}: {err}")
|
85
135
|
|
86
136
|
|
87
137
|
@app.command()
|
@@ -136,8 +186,24 @@ def workflow_callback():
|
|
136
186
|
|
137
187
|
|
138
188
|
@workflow_app.command(name="execute")
|
139
|
-
def workflow_execute(
|
140
|
-
|
189
|
+
def workflow_execute(
|
190
|
+
name: Annotated[
|
191
|
+
str,
|
192
|
+
typer.Option(help="A name of workflow template."),
|
193
|
+
],
|
194
|
+
params: Annotated[
|
195
|
+
str,
|
196
|
+
typer.Option(help="A workflow execute parameters"),
|
197
|
+
] = "{}",
|
198
|
+
):
|
199
|
+
"""Execute workflow by passing a workflow template name."""
|
200
|
+
try:
|
201
|
+
params_dict: dict[str, Any] = json.loads(params)
|
202
|
+
except json.JSONDecodeError as e:
|
203
|
+
raise ValueError(f"Params does not support format: {params!r}.") from e
|
204
|
+
|
205
|
+
typer.echo(f"Start execute workflow template: {name}")
|
206
|
+
typer.echo(f"... with params: {params_dict}")
|
141
207
|
|
142
208
|
|
143
209
|
WORKFLOW_TYPE = Literal["Workflow"]
|
@@ -167,7 +233,7 @@ def workflow_json_schema(
|
|
167
233
|
template_schema: dict[str, str] = {
|
168
234
|
"$schema": "http://json-schema.org/draft-07/schema#",
|
169
235
|
"title": "Workflow Configuration Schema",
|
170
|
-
"version":
|
236
|
+
"version": __version__,
|
171
237
|
}
|
172
238
|
with open(output, mode="w", encoding="utf-8") as f:
|
173
239
|
json.dump(template_schema | json_schema, f, indent=2)
|
@@ -136,16 +136,14 @@ class BaseError(Exception):
|
|
136
136
|
ErrorData or dict: Exception data, optionally mapped by reference ID
|
137
137
|
|
138
138
|
Example:
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
#
|
143
|
-
|
144
|
-
#
|
145
|
-
|
146
|
-
#
|
147
|
-
ref_data = error.to_dict(with_refs=True)
|
148
|
-
# Returns: {"stage-1": {"name": "BaseError", "message": "Something failed"}}
|
139
|
+
>>> error = BaseError("Something failed", refs="stage-1")
|
140
|
+
>>> # Simple format
|
141
|
+
>>> error.to_dict()
|
142
|
+
>>> # Returns: {"name": "BaseError", "message": "Something failed"}
|
143
|
+
|
144
|
+
>>> # With reference mapping
|
145
|
+
>>> error.to_dict(with_refs=True)
|
146
|
+
>>> # Returns: {"stage-1": {"name": "BaseError", "message": "Something failed"}}
|
149
147
|
```
|
150
148
|
"""
|
151
149
|
data: ErrorData = to_dict(self)
|
@@ -656,6 +656,7 @@ class Job(BaseModel):
|
|
656
656
|
to: DictData,
|
657
657
|
*,
|
658
658
|
job_id: StrOrNone = None,
|
659
|
+
**kwargs,
|
659
660
|
) -> DictData:
|
660
661
|
"""Set an outputs from execution result context to the received context
|
661
662
|
with a `to` input parameter. The result context from job strategy
|
@@ -693,12 +694,15 @@ class Job(BaseModel):
|
|
693
694
|
:raise JobError: If the job's ID does not set and the setting
|
694
695
|
default job ID flag does not set.
|
695
696
|
|
696
|
-
:
|
697
|
-
|
698
|
-
|
699
|
-
|
697
|
+
Args:
|
698
|
+
output: (DictData) A result data context that want to extract
|
699
|
+
and transfer to the `strategies` key in receive context.
|
700
|
+
to: (DictData) A received context data.
|
701
|
+
job_id: (StrOrNone) A job ID if the `id` field does not set.
|
702
|
+
kwargs: Any values that want to add to the target context.
|
700
703
|
|
701
|
-
:
|
704
|
+
Returns:
|
705
|
+
DictData: Return updated the target context with a result context.
|
702
706
|
"""
|
703
707
|
if "jobs" not in to:
|
704
708
|
to["jobs"] = {}
|
@@ -716,8 +720,9 @@ class Job(BaseModel):
|
|
716
720
|
status: dict[str, Status] = (
|
717
721
|
{"status": output.pop("status")} if "status" in output else {}
|
718
722
|
)
|
723
|
+
kwargs: DictData = kwargs or {}
|
719
724
|
if self.strategy.is_set():
|
720
|
-
to["jobs"][_id] = {"strategies": output} | errors | status
|
725
|
+
to["jobs"][_id] = {"strategies": output} | errors | status | kwargs
|
721
726
|
elif len(k := output.keys()) > 1: # pragma: no cov
|
722
727
|
raise JobError(
|
723
728
|
"Strategy output from execution return more than one ID while "
|
@@ -726,7 +731,7 @@ class Job(BaseModel):
|
|
726
731
|
else:
|
727
732
|
_output: DictData = {} if len(k) == 0 else output[list(k)[0]]
|
728
733
|
_output.pop("matrix", {})
|
729
|
-
to["jobs"][_id] = _output | errors | status
|
734
|
+
to["jobs"][_id] = _output | errors | status | kwargs
|
730
735
|
return to
|
731
736
|
|
732
737
|
def get_outputs(
|
@@ -800,8 +805,7 @@ class Job(BaseModel):
|
|
800
805
|
return docker_execution(
|
801
806
|
self,
|
802
807
|
params,
|
803
|
-
run_id=
|
804
|
-
parent_run_id=parent_run_id,
|
808
|
+
run_id=parent_run_id,
|
805
809
|
event=event,
|
806
810
|
).make_info({"execution_time": time.monotonic() - ts})
|
807
811
|
|
@@ -1294,7 +1298,6 @@ def docker_execution(
|
|
1294
1298
|
params: DictData,
|
1295
1299
|
*,
|
1296
1300
|
run_id: StrOrNone = None,
|
1297
|
-
parent_run_id: StrOrNone = None,
|
1298
1301
|
event: Optional[Event] = None,
|
1299
1302
|
): # pragma: no cov
|
1300
1303
|
"""Docker job execution.
|
@@ -16,7 +16,6 @@ Classes:
|
|
16
16
|
Functions:
|
17
17
|
validate_statuses: Determine final status from multiple status values
|
18
18
|
get_status_from_error: Convert exception types to appropriate status
|
19
|
-
get_dt_tznow: Get current datetime with timezone configuration
|
20
19
|
"""
|
21
20
|
from __future__ import annotations
|
22
21
|
|
@@ -89,6 +88,7 @@ class Status(str, Enum):
|
|
89
88
|
return self.name
|
90
89
|
|
91
90
|
def is_result(self) -> bool:
|
91
|
+
"""Return True if this status is the status for result object."""
|
92
92
|
return self in ResultStatuses
|
93
93
|
|
94
94
|
|
@@ -115,15 +115,13 @@ def validate_statuses(statuses: list[Status]) -> Status:
|
|
115
115
|
Status: Final consolidated status based on workflow logic
|
116
116
|
|
117
117
|
Example:
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
# Returns: SUCCESS
|
126
|
-
```
|
118
|
+
>>> # Mixed statuses - FAILED takes priority
|
119
|
+
>>> validate_statuses([SUCCESS, FAILED, SUCCESS])
|
120
|
+
>>> # Returns: FAILED
|
121
|
+
|
122
|
+
>>> # All same status
|
123
|
+
>>> validate_statuses([SUCCESS, SUCCESS, SUCCESS])
|
124
|
+
>>> # Returns: SUCCESS
|
127
125
|
"""
|
128
126
|
if any(s == CANCEL for s in statuses):
|
129
127
|
return CANCEL
|
@@ -153,6 +151,9 @@ def get_status_from_error(
|
|
153
151
|
) -> Status:
|
154
152
|
"""Get the Status from the error object.
|
155
153
|
|
154
|
+
Args:
|
155
|
+
error: An error object.
|
156
|
+
|
156
157
|
Returns:
|
157
158
|
Status: The status from the specific exception class.
|
158
159
|
"""
|
@@ -189,7 +190,7 @@ class Result:
|
|
189
190
|
context: DictData = field(default_factory=default_context)
|
190
191
|
info: DictData = field(default_factory=dict)
|
191
192
|
run_id: Optional[str] = field(default_factory=default_gen_id)
|
192
|
-
parent_run_id: Optional[str] = field(default=None
|
193
|
+
parent_run_id: Optional[str] = field(default=None)
|
193
194
|
ts: datetime = field(default_factory=get_dt_now, compare=False)
|
194
195
|
trace: Optional[Trace] = field(default=None, compare=False, repr=False)
|
195
196
|
extras: DictData = field(default_factory=dict, compare=False, repr=False)
|
@@ -295,7 +295,7 @@ class BaseStage(BaseModel, ABC):
|
|
295
295
|
ts: float = time.monotonic()
|
296
296
|
parent_run_id: str = run_id
|
297
297
|
run_id: str = run_id or gen_id(self.iden, unique=True)
|
298
|
-
context: DictData = {}
|
298
|
+
context: DictData = {"status": WAIT}
|
299
299
|
trace: Trace = get_trace(
|
300
300
|
run_id, parent_run_id=parent_run_id, extras=self.extras
|
301
301
|
)
|
@@ -413,7 +413,7 @@ class BaseStage(BaseModel, ABC):
|
|
413
413
|
self,
|
414
414
|
output: DictData,
|
415
415
|
to: DictData,
|
416
|
-
|
416
|
+
**kwargs,
|
417
417
|
) -> DictData:
|
418
418
|
"""Set an outputs from execution result context to the received context
|
419
419
|
with a `to` input parameter. The result context from stage execution
|
@@ -447,12 +447,14 @@ class BaseStage(BaseModel, ABC):
|
|
447
447
|
to the `to` argument. The result context was soft copied before set
|
448
448
|
output step.
|
449
449
|
|
450
|
-
:
|
451
|
-
|
452
|
-
|
453
|
-
|
450
|
+
Args:
|
451
|
+
output: (DictData) A result data context that want to extract
|
452
|
+
and transfer to the `outputs` key in receive context.
|
453
|
+
to: (DictData) A received context data.
|
454
|
+
kwargs: Any values that want to add to the target context.
|
454
455
|
|
455
|
-
:
|
456
|
+
Returns:
|
457
|
+
DictData: Return updated the target context with a result context.
|
456
458
|
"""
|
457
459
|
if "stages" not in to:
|
458
460
|
to["stages"] = {}
|
@@ -470,8 +472,8 @@ class BaseStage(BaseModel, ABC):
|
|
470
472
|
status: dict[str, Status] = (
|
471
473
|
{"status": output.pop("status")} if "status" in output else {}
|
472
474
|
)
|
473
|
-
|
474
|
-
to["stages"][_id] = {"outputs": output} | errors | status |
|
475
|
+
kwargs: DictData = kwargs or {}
|
476
|
+
to["stages"][_id] = {"outputs": output} | errors | status | kwargs
|
475
477
|
return to
|
476
478
|
|
477
479
|
def get_outputs(self, output: DictData) -> DictData:
|
@@ -287,7 +287,7 @@ class TraceData(BaseModel): # pragma: no cov
|
|
287
287
|
return cls.model_validate(data)
|
288
288
|
|
289
289
|
|
290
|
-
class
|
290
|
+
class BaseEmitTrace(BaseModel, ABC): # pragma: no cov
|
291
291
|
"""Base Trace model with abstraction class property."""
|
292
292
|
|
293
293
|
model_config = ConfigDict(frozen=True)
|
@@ -474,7 +474,7 @@ class BaseTrace(BaseModel, ABC): # pragma: no cov
|
|
474
474
|
await self.amit(message, mode="exception", is_err=True)
|
475
475
|
|
476
476
|
|
477
|
-
class ConsoleTrace(
|
477
|
+
class ConsoleTrace(BaseEmitTrace): # pragma: no cov
|
478
478
|
"""Console Trace log model."""
|
479
479
|
|
480
480
|
def writer(
|
@@ -566,7 +566,11 @@ class ConsoleTrace(BaseTrace): # pragma: no cov
|
|
566
566
|
getattr(logger, mode)(msg, stacklevel=3, extra={"cut_id": self.cut_id})
|
567
567
|
|
568
568
|
|
569
|
-
class
|
569
|
+
class BaseTrace(ConsoleTrace, ABC):
|
570
|
+
"""A Base Trace model that will use for override writing or sending trace
|
571
|
+
log to any service type.
|
572
|
+
"""
|
573
|
+
|
570
574
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
571
575
|
|
572
576
|
url: ParseResult = Field(description="An URL for create pointer.")
|
@@ -575,9 +579,8 @@ class OutsideTrace(ConsoleTrace, ABC):
|
|
575
579
|
"url", mode="before", json_schema_input_type=Union[ParseResult, str]
|
576
580
|
)
|
577
581
|
def __parse_url(cls, value: Union[ParseResult, str]) -> ParseResult:
|
578
|
-
|
579
|
-
|
580
|
-
return value
|
582
|
+
"""Parsing an URL value."""
|
583
|
+
return urlparse(value) if isinstance(value, str) else value
|
581
584
|
|
582
585
|
@field_serializer("url")
|
583
586
|
def __serialize_url(self, value: ParseResult) -> str:
|
@@ -621,7 +624,7 @@ class OutsideTrace(ConsoleTrace, ABC):
|
|
621
624
|
)
|
622
625
|
|
623
626
|
|
624
|
-
class FileTrace(
|
627
|
+
class FileTrace(BaseTrace): # pragma: no cov
|
625
628
|
"""File Trace dataclass that write file to the local storage."""
|
626
629
|
|
627
630
|
@classmethod
|
@@ -765,7 +768,7 @@ class FileTrace(OutsideTrace): # pragma: no cov
|
|
765
768
|
await f.write(trace_meta.model_dump_json() + "\n")
|
766
769
|
|
767
770
|
|
768
|
-
class SQLiteTrace(
|
771
|
+
class SQLiteTrace(BaseTrace): # pragma: no cov
|
769
772
|
"""SQLite Trace dataclass that write trace log to the SQLite database file."""
|
770
773
|
|
771
774
|
table_name: ClassVar[str] = "audits"
|
@@ -779,7 +782,7 @@ class SQLiteTrace(OutsideTrace): # pragma: no cov
|
|
779
782
|
, metadata JSON
|
780
783
|
, created_at datetime
|
781
784
|
, updated_at datetime
|
782
|
-
primary key (
|
785
|
+
primary key ( parent_run_id )
|
783
786
|
"""
|
784
787
|
|
785
788
|
@classmethod
|
@@ -824,7 +827,7 @@ class SQLiteTrace(OutsideTrace): # pragma: no cov
|
|
824
827
|
Trace = Union[
|
825
828
|
FileTrace,
|
826
829
|
SQLiteTrace,
|
827
|
-
|
830
|
+
BaseTrace,
|
828
831
|
]
|
829
832
|
|
830
833
|
|
@@ -389,11 +389,12 @@ class Workflow(BaseModel):
|
|
389
389
|
params: DictData,
|
390
390
|
*,
|
391
391
|
run_id: Optional[str] = None,
|
392
|
+
runs_metadata: Optional[DictData] = None,
|
392
393
|
release_type: ReleaseType = NORMAL,
|
393
|
-
audit: type[Audit] = None,
|
394
394
|
override_log_name: Optional[str] = None,
|
395
395
|
timeout: int = 600,
|
396
|
-
|
396
|
+
audit_excluded: Optional[list[str]] = None,
|
397
|
+
audit: type[Audit] = None,
|
397
398
|
) -> Result:
|
398
399
|
"""Release the workflow which is executes workflow with writing audit
|
399
400
|
log tracking. The method is overriding parameter with the release
|
@@ -409,18 +410,22 @@ class Workflow(BaseModel):
|
|
409
410
|
- Execute this workflow with mapping release data to its parameters.
|
410
411
|
- Writing result audit
|
411
412
|
|
412
|
-
:
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
413
|
+
Args:
|
414
|
+
release: (datetime) A release datetime.
|
415
|
+
params: A workflow parameter that pass to execute method.
|
416
|
+
release_type:
|
417
|
+
run_id: (str) A workflow running ID.
|
418
|
+
runs_metadata: (DictData)
|
419
|
+
audit: An audit class that want to save the execution result.
|
420
|
+
override_log_name: (str) An override logging name that use
|
421
|
+
instead the workflow name.
|
422
|
+
timeout: (int) A workflow execution time out in second unit.
|
423
|
+
audit_excluded: (list[str]) A list of key that want to exclude
|
424
|
+
from the audit data.
|
422
425
|
|
423
|
-
:
|
426
|
+
Returns:
|
427
|
+
Result: return result object that pass context data from the execute
|
428
|
+
method.
|
424
429
|
"""
|
425
430
|
name: str = override_log_name or self.name
|
426
431
|
|
@@ -432,7 +437,7 @@ class Workflow(BaseModel):
|
|
432
437
|
run_id: str = gen_id(name, unique=True)
|
433
438
|
parent_run_id: str = run_id
|
434
439
|
|
435
|
-
context: DictData = {}
|
440
|
+
context: DictData = {"status": WAIT}
|
436
441
|
trace: Trace = get_trace(
|
437
442
|
run_id, parent_run_id=parent_run_id, extras=self.extras
|
438
443
|
)
|
@@ -445,6 +450,7 @@ class Workflow(BaseModel):
|
|
445
450
|
"logical_date": release,
|
446
451
|
"execute_date": get_dt_now(),
|
447
452
|
"run_id": run_id,
|
453
|
+
"runs_metadata": runs_metadata or {},
|
448
454
|
}
|
449
455
|
},
|
450
456
|
extras=self.extras,
|
@@ -465,9 +471,17 @@ class Workflow(BaseModel):
|
|
465
471
|
context=context,
|
466
472
|
parent_run_id=parent_run_id,
|
467
473
|
run_id=run_id,
|
468
|
-
execution_time=rs.info.get("execution_time", 0),
|
469
474
|
extras=self.extras,
|
470
|
-
|
475
|
+
runs_metadata=(
|
476
|
+
(runs_metadata or {})
|
477
|
+
| rs.info
|
478
|
+
| {
|
479
|
+
"timeout": timeout,
|
480
|
+
"original_name": self.name,
|
481
|
+
"audit_excluded": audit_excluded,
|
482
|
+
}
|
483
|
+
),
|
484
|
+
).save(excluded=audit_excluded)
|
471
485
|
)
|
472
486
|
return Result(
|
473
487
|
run_id=run_id,
|
@@ -492,7 +506,6 @@ class Workflow(BaseModel):
|
|
492
506
|
def execute_job(
|
493
507
|
self,
|
494
508
|
job: Job,
|
495
|
-
params: DictData,
|
496
509
|
run_id: str,
|
497
510
|
context: DictData,
|
498
511
|
*,
|
@@ -511,7 +524,6 @@ class Workflow(BaseModel):
|
|
511
524
|
|
512
525
|
Args:
|
513
526
|
job: (Job) A job model that want to execute.
|
514
|
-
params: (DictData) A parameter data.
|
515
527
|
run_id: A running stage ID.
|
516
528
|
context: A context data.
|
517
529
|
parent_run_id: A parent running ID. (Default is None)
|
@@ -538,25 +550,24 @@ class Workflow(BaseModel):
|
|
538
550
|
)
|
539
551
|
|
540
552
|
trace.info(f"[WORKFLOW]: Execute Job: {job.id!r}")
|
541
|
-
|
542
|
-
params=
|
553
|
+
result: Result = job.execute(
|
554
|
+
params=context,
|
543
555
|
run_id=parent_run_id,
|
544
556
|
event=event,
|
545
557
|
)
|
546
|
-
job.set_outputs(
|
558
|
+
job.set_outputs(result.context, to=context)
|
547
559
|
|
548
|
-
if
|
560
|
+
if result.status == FAILED:
|
549
561
|
error_msg: str = f"Job execution, {job.id!r}, was failed."
|
550
562
|
return FAILED, catch(
|
551
563
|
context=context,
|
552
564
|
status=FAILED,
|
553
565
|
updated={
|
554
566
|
"errors": WorkflowError(error_msg).to_dict(),
|
555
|
-
**params,
|
556
567
|
},
|
557
568
|
)
|
558
569
|
|
559
|
-
elif
|
570
|
+
elif result.status == CANCEL:
|
560
571
|
error_msg: str = (
|
561
572
|
f"Job execution, {job.id!r}, was canceled from the event after "
|
562
573
|
f"end job execution."
|
@@ -566,13 +577,10 @@ class Workflow(BaseModel):
|
|
566
577
|
status=CANCEL,
|
567
578
|
updated={
|
568
579
|
"errors": WorkflowCancelError(error_msg).to_dict(),
|
569
|
-
**params,
|
570
580
|
},
|
571
581
|
)
|
572
582
|
|
573
|
-
return
|
574
|
-
context=context, status=rs.status, updated=params
|
575
|
-
)
|
583
|
+
return result.status, catch(context, status=result.status)
|
576
584
|
|
577
585
|
def execute(
|
578
586
|
self,
|
@@ -753,7 +761,6 @@ class Workflow(BaseModel):
|
|
753
761
|
executor.submit(
|
754
762
|
self.execute_job,
|
755
763
|
job=job,
|
756
|
-
params=context,
|
757
764
|
run_id=run_id,
|
758
765
|
context=context,
|
759
766
|
parent_run_id=parent_run_id,
|
@@ -768,7 +775,6 @@ class Workflow(BaseModel):
|
|
768
775
|
executor.submit(
|
769
776
|
self.execute_job,
|
770
777
|
job=job,
|
771
|
-
params=context,
|
772
778
|
run_id=run_id,
|
773
779
|
context=context,
|
774
780
|
parent_run_id=parent_run_id,
|
@@ -898,7 +904,7 @@ class Workflow(BaseModel):
|
|
898
904
|
extras=self.extras,
|
899
905
|
)
|
900
906
|
|
901
|
-
err = context
|
907
|
+
err: dict[str, str] = context.get("errors", {})
|
902
908
|
trace.info(f"[WORKFLOW]: Previous error: {err}")
|
903
909
|
|
904
910
|
event: ThreadEvent = event or ThreadEvent()
|
@@ -919,9 +925,9 @@ class Workflow(BaseModel):
|
|
919
925
|
extras=self.extras,
|
920
926
|
)
|
921
927
|
|
922
|
-
# NOTE: Prepare the new context for rerun process.
|
928
|
+
# NOTE: Prepare the new context variable for rerun process.
|
923
929
|
jobs: DictData = context.get("jobs")
|
924
|
-
|
930
|
+
context: DictData = {
|
925
931
|
"params": context["params"].copy(),
|
926
932
|
"jobs": {j: jobs[j] for j in jobs if jobs[j]["status"] == SUCCESS},
|
927
933
|
}
|
@@ -930,19 +936,22 @@ class Workflow(BaseModel):
|
|
930
936
|
job_queue: Queue = Queue()
|
931
937
|
for job_id in self.jobs:
|
932
938
|
|
933
|
-
if job_id in
|
939
|
+
if job_id in context["jobs"]:
|
934
940
|
continue
|
935
941
|
|
936
942
|
job_queue.put(job_id)
|
937
943
|
total_job += 1
|
938
944
|
|
939
945
|
if total_job == 0:
|
940
|
-
trace.warning(
|
946
|
+
trace.warning(
|
947
|
+
"[WORKFLOW]: It does not have job to rerun. it will change "
|
948
|
+
"status to skip."
|
949
|
+
)
|
941
950
|
return Result(
|
942
951
|
run_id=run_id,
|
943
952
|
parent_run_id=parent_run_id,
|
944
|
-
status=
|
945
|
-
context=catch(context=context, status=
|
953
|
+
status=SKIP,
|
954
|
+
context=catch(context=context, status=SKIP),
|
946
955
|
extras=self.extras,
|
947
956
|
)
|
948
957
|
|
@@ -954,14 +963,14 @@ class Workflow(BaseModel):
|
|
954
963
|
"max_job_exec_timeout", f=timeout, extras=self.extras
|
955
964
|
)
|
956
965
|
|
957
|
-
catch(
|
966
|
+
catch(context, status=WAIT)
|
958
967
|
if event and event.is_set():
|
959
968
|
return Result(
|
960
969
|
run_id=run_id,
|
961
970
|
parent_run_id=parent_run_id,
|
962
971
|
status=CANCEL,
|
963
972
|
context=catch(
|
964
|
-
|
973
|
+
context,
|
965
974
|
status=CANCEL,
|
966
975
|
updated={
|
967
976
|
"errors": WorkflowCancelError(
|
@@ -983,7 +992,7 @@ class Workflow(BaseModel):
|
|
983
992
|
):
|
984
993
|
job_id: str = job_queue.get()
|
985
994
|
job: Job = self.job(name=job_id)
|
986
|
-
if (check := job.check_needs(
|
995
|
+
if (check := job.check_needs(context["jobs"])) == WAIT:
|
987
996
|
job_queue.task_done()
|
988
997
|
job_queue.put(job_id)
|
989
998
|
consecutive_waits += 1
|
@@ -1003,7 +1012,7 @@ class Workflow(BaseModel):
|
|
1003
1012
|
parent_run_id=parent_run_id,
|
1004
1013
|
status=FAILED,
|
1005
1014
|
context=catch(
|
1006
|
-
|
1015
|
+
context,
|
1007
1016
|
status=FAILED,
|
1008
1017
|
updated={
|
1009
1018
|
"status": FAILED,
|
@@ -1019,7 +1028,7 @@ class Workflow(BaseModel):
|
|
1019
1028
|
trace.info(
|
1020
1029
|
f"[JOB]: Skip job: {job_id!r} from trigger rule."
|
1021
1030
|
)
|
1022
|
-
job.set_outputs(output={"status": SKIP}, to=
|
1031
|
+
job.set_outputs(output={"status": SKIP}, to=context)
|
1023
1032
|
job_queue.task_done()
|
1024
1033
|
skip_count += 1
|
1025
1034
|
continue
|
@@ -1029,7 +1038,6 @@ class Workflow(BaseModel):
|
|
1029
1038
|
executor.submit(
|
1030
1039
|
self.execute_job,
|
1031
1040
|
job=job,
|
1032
|
-
params=new_context,
|
1033
1041
|
run_id=run_id,
|
1034
1042
|
context=context,
|
1035
1043
|
parent_run_id=parent_run_id,
|
@@ -1044,7 +1052,6 @@ class Workflow(BaseModel):
|
|
1044
1052
|
executor.submit(
|
1045
1053
|
self.execute_job,
|
1046
1054
|
job=job,
|
1047
|
-
params=new_context,
|
1048
1055
|
run_id=run_id,
|
1049
1056
|
context=context,
|
1050
1057
|
parent_run_id=parent_run_id,
|
@@ -1095,7 +1102,7 @@ class Workflow(BaseModel):
|
|
1095
1102
|
run_id=run_id,
|
1096
1103
|
parent_run_id=parent_run_id,
|
1097
1104
|
status=st,
|
1098
|
-
context=catch(
|
1105
|
+
context=catch(context, status=st),
|
1099
1106
|
extras=self.extras,
|
1100
1107
|
)
|
1101
1108
|
|
@@ -1115,7 +1122,7 @@ class Workflow(BaseModel):
|
|
1115
1122
|
parent_run_id=parent_run_id,
|
1116
1123
|
status=FAILED,
|
1117
1124
|
context=catch(
|
1118
|
-
|
1125
|
+
context,
|
1119
1126
|
status=FAILED,
|
1120
1127
|
updated={
|
1121
1128
|
"errors": WorkflowTimeoutError(
|
@@ -14,6 +14,7 @@ from ddeutil.workflow.utils import (
|
|
14
14
|
get_diff_sec,
|
15
15
|
get_dt_now,
|
16
16
|
make_exec,
|
17
|
+
obj_name,
|
17
18
|
prepare_newline,
|
18
19
|
reach_next_minute,
|
19
20
|
)
|
@@ -185,3 +186,10 @@ def test_dump_all():
|
|
185
186
|
{"name": "first", "info": {"field": "foo", "age": 10}},
|
186
187
|
{"name": "second", "info": {"field": "foo", "age": 10}},
|
187
188
|
]
|
189
|
+
|
190
|
+
|
191
|
+
def test_obj_name():
|
192
|
+
assert obj_name() is None
|
193
|
+
assert obj_name("datetime") == "datetime"
|
194
|
+
assert obj_name(datetime) == "datetime"
|
195
|
+
assert obj_name(datetime(2025, 1, 1, 1)) == "datetime"
|
@@ -18,7 +18,7 @@ def test_workflow_execute_job():
|
|
18
18
|
)
|
19
19
|
workflow: Workflow = Workflow(name="workflow", jobs={"demo-run": job})
|
20
20
|
st, ctx = workflow.execute_job(
|
21
|
-
job=workflow.job("demo-run"),
|
21
|
+
job=workflow.job("demo-run"), run_id="1234", context={}
|
22
22
|
)
|
23
23
|
assert st == SUCCESS
|
24
24
|
assert ctx == {
|
@@ -46,7 +46,7 @@ def test_workflow_execute_job_raise_inside():
|
|
46
46
|
)
|
47
47
|
workflow: Workflow = Workflow(name="workflow", jobs={"demo-run": job})
|
48
48
|
st, ctx = workflow.execute_job(
|
49
|
-
job=workflow.job("demo-run"),
|
49
|
+
job=workflow.job("demo-run"), run_id="1234", context={}
|
50
50
|
)
|
51
51
|
assert st == FAILED
|
52
52
|
assert ctx == {
|
@@ -1 +0,0 @@
|
|
1
|
-
__version__: str = "0.0.75"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/api/routes/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil/workflow/api/routes/workflows.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/SOURCES.txt
RENAMED
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/entry_points.txt
RENAMED
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/requires.txt
RENAMED
File without changes
|
{ddeutil_workflow-0.0.75 → ddeutil_workflow-0.0.76}/src/ddeutil_workflow.egg-info/top_level.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|