ddeutil-workflow 0.0.69__tar.gz → 0.0.70__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ddeutil_workflow-0.0.69/src/ddeutil_workflow.egg-info → ddeutil_workflow-0.0.70}/PKG-INFO +1 -1
- ddeutil_workflow-0.0.70/src/ddeutil/workflow/__about__.py +1 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/cli.py +51 -1
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/conf.py +2 -2
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/errors.py +7 -1
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/job.py +9 -3
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/reusables.py +1 -1
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/traces.py +120 -82
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/utils.py +19 -11
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/workflow.py +218 -2
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70/src/ddeutil_workflow.egg-info}/PKG-INFO +1 -1
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil_workflow.egg-info/SOURCES.txt +2 -1
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_workflow.py +4 -1
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_workflow_exec.py +12 -7
- ddeutil_workflow-0.0.70/tests/test_workflow_rerun.py +167 -0
- ddeutil_workflow-0.0.69/src/ddeutil/workflow/__about__.py +0 -1
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/LICENSE +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/README.md +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/pyproject.toml +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/setup.cfg +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/__cron.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/__init__.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/__main__.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/__types.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/api/__init__.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/api/log_conf.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/api/routes/__init__.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/api/routes/job.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/api/routes/logs.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/api/routes/workflows.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/audits.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/event.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/params.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/result.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/stages.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil_workflow.egg-info/dependency_links.txt +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil_workflow.egg-info/entry_points.txt +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil_workflow.egg-info/requires.txt +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil_workflow.egg-info/top_level.txt +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test__cron.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test__regex.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_audits.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_conf.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_errors.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_event.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_job.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_job_exec.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_job_exec_strategy.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_params.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_result.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_reusables_call_tag.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_reusables_func_model.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_reusables_template.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_reusables_template_filter.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_strategy.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_traces.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_utils.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_workflow_exec_job.py +0 -0
- {ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/tests/test_workflow_release.py +0 -0
@@ -0,0 +1 @@
|
|
1
|
+
__version__: str = "0.0.70"
|
@@ -1,17 +1,21 @@
|
|
1
1
|
import json
|
2
2
|
from pathlib import Path
|
3
3
|
from platform import python_version
|
4
|
-
from typing import Annotated, Any, Optional
|
4
|
+
from typing import Annotated, Any, Literal, Optional, Union
|
5
5
|
|
6
6
|
import typer
|
7
7
|
import uvicorn
|
8
|
+
from pydantic import Field, TypeAdapter
|
8
9
|
|
9
10
|
from .__about__ import __version__
|
10
11
|
from .__types import DictData
|
11
12
|
from .api import app as fastapp
|
12
13
|
from .errors import JobError
|
14
|
+
from .event import Crontab
|
13
15
|
from .job import Job
|
16
|
+
from .params import Param
|
14
17
|
from .result import Result
|
18
|
+
from .workflow import Workflow
|
15
19
|
|
16
20
|
app = typer.Typer(
|
17
21
|
pretty_exceptions_enable=True,
|
@@ -129,5 +133,51 @@ def workflow_execute():
|
|
129
133
|
""""""
|
130
134
|
|
131
135
|
|
136
|
+
WORKFLOW_TYPE = Literal["Workflow"]
|
137
|
+
|
138
|
+
|
139
|
+
class WorkflowSchema(Workflow):
|
140
|
+
"""Override workflow model fields for generate JSON schema file."""
|
141
|
+
|
142
|
+
type: WORKFLOW_TYPE = Field(description="A type of workflow template.")
|
143
|
+
name: Optional[str] = Field(default=None, description="A workflow name.")
|
144
|
+
params: dict[str, Union[Param, str]] = Field(
|
145
|
+
default_factory=dict,
|
146
|
+
description="A parameters that need to use on this workflow.",
|
147
|
+
)
|
148
|
+
on: Union[list[Union[Crontab, str]], str] = Field(
|
149
|
+
default_factory=list,
|
150
|
+
description="A list of Crontab instance for this workflow schedule.",
|
151
|
+
)
|
152
|
+
|
153
|
+
|
154
|
+
CRONTAB_TYPE = Literal["Crontab"]
|
155
|
+
|
156
|
+
|
157
|
+
class CrontabSchema(Crontab):
|
158
|
+
"""Override crontab model fields for generate JSON schema file."""
|
159
|
+
|
160
|
+
type: CRONTAB_TYPE = Field(description="A type of crontab template.")
|
161
|
+
|
162
|
+
|
163
|
+
@workflow_app.command(name="json-schema")
|
164
|
+
def workflow_json_schema(
|
165
|
+
output: Annotated[
|
166
|
+
Path,
|
167
|
+
typer.Option(help="An output file to export the JSON schema."),
|
168
|
+
] = Path("./json-schema.json"),
|
169
|
+
) -> None:
|
170
|
+
"""Generate JSON schema file from the Workflow model."""
|
171
|
+
template = dict[str, Union[WorkflowSchema, CrontabSchema]]
|
172
|
+
json_schema = TypeAdapter(template).json_schema(by_alias=True)
|
173
|
+
template_schema: dict[str, str] = {
|
174
|
+
"$schema": "http://json-schema.org/draft-07/schema#",
|
175
|
+
"title": "Workflow Configuration Schema",
|
176
|
+
"version": "1.0.0",
|
177
|
+
}
|
178
|
+
with open(output, mode="w", encoding="utf-8") as f:
|
179
|
+
json.dump(template_schema | json_schema, f, indent=2)
|
180
|
+
|
181
|
+
|
132
182
|
if __name__ == "__main__":
|
133
183
|
app()
|
@@ -327,13 +327,13 @@ class YamlParser:
|
|
327
327
|
*,
|
328
328
|
ignore_filename: Optional[str] = None,
|
329
329
|
) -> bool:
|
330
|
-
"""Check this file was ignored.
|
330
|
+
"""Check this file was ignored from the `.confignore` format.
|
331
331
|
|
332
332
|
:param file: (Path) A file path that want to check.
|
333
333
|
:param path: (Path) A config path that want to read the config
|
334
334
|
ignore file.
|
335
335
|
:param ignore_filename: (str) An ignore filename. Default is
|
336
|
-
|
336
|
+
``.confignore`` filename.
|
337
337
|
|
338
338
|
:rtype: bool
|
339
339
|
"""
|
@@ -38,8 +38,14 @@ def to_dict(exception: Exception, **kwargs) -> ErrorData: # pragma: no cov
|
|
38
38
|
|
39
39
|
|
40
40
|
class BaseError(Exception):
|
41
|
-
"""Base Workflow exception class will implement the
|
41
|
+
"""Base Workflow exception class will implement the ``refs`` argument for
|
42
42
|
making an error context to the result context.
|
43
|
+
|
44
|
+
Attributes:
|
45
|
+
refs: (:obj:str, optional)
|
46
|
+
context: (:obj:DictData)
|
47
|
+
params: (:obj:DictData)
|
48
|
+
|
43
49
|
"""
|
44
50
|
|
45
51
|
def __init__(
|
@@ -402,13 +402,19 @@ class Job(BaseModel):
|
|
402
402
|
"""
|
403
403
|
# VALIDATE: Validate stage id should not duplicate.
|
404
404
|
rs: list[str] = []
|
405
|
+
rs_raise: list[str] = []
|
405
406
|
for stage in value:
|
406
407
|
name: str = stage.iden
|
407
408
|
if name in rs:
|
408
|
-
|
409
|
-
|
410
|
-
)
|
409
|
+
rs_raise.append(name)
|
410
|
+
continue
|
411
411
|
rs.append(name)
|
412
|
+
|
413
|
+
if rs_raise:
|
414
|
+
raise ValueError(
|
415
|
+
f"Stage name, {', '.join(repr(s) for s in rs_raise)}, should "
|
416
|
+
f"not be duplicate."
|
417
|
+
)
|
412
418
|
return value
|
413
419
|
|
414
420
|
@model_validator(mode="after")
|
@@ -44,7 +44,7 @@ from .errors import UtilError
|
|
44
44
|
T = TypeVar("T")
|
45
45
|
P = ParamSpec("P")
|
46
46
|
|
47
|
-
# NOTE: Adjust logging level of the
|
47
|
+
# NOTE: Adjust logging level of the ``asyncio`` to INFO level.
|
48
48
|
logging.getLogger("asyncio").setLevel(logging.INFO)
|
49
49
|
|
50
50
|
|
@@ -333,6 +333,124 @@ class BaseTrace(BaseModel, ABC): # pragma: no cov
|
|
333
333
|
"Adjust make message method for this trace object before using."
|
334
334
|
)
|
335
335
|
|
336
|
+
@abstractmethod
|
337
|
+
def _logging(
|
338
|
+
self,
|
339
|
+
message: str,
|
340
|
+
mode: str,
|
341
|
+
*,
|
342
|
+
is_err: bool = False,
|
343
|
+
):
|
344
|
+
"""Write trace log with append mode and logging this message with any
|
345
|
+
logging level.
|
346
|
+
|
347
|
+
:param message: (str) A message that want to log.
|
348
|
+
:param mode: (str)
|
349
|
+
:param is_err: (bool)
|
350
|
+
"""
|
351
|
+
raise NotImplementedError(
|
352
|
+
"Logging action should be implement for making trace log."
|
353
|
+
)
|
354
|
+
|
355
|
+
def debug(self, message: str):
|
356
|
+
"""Write trace log with append mode and logging this message with the
|
357
|
+
DEBUG level.
|
358
|
+
|
359
|
+
:param message: (str) A message that want to log.
|
360
|
+
"""
|
361
|
+
self._logging(message, mode="debug")
|
362
|
+
|
363
|
+
def info(self, message: str) -> None:
|
364
|
+
"""Write trace log with append mode and logging this message with the
|
365
|
+
INFO level.
|
366
|
+
|
367
|
+
:param message: (str) A message that want to log.
|
368
|
+
"""
|
369
|
+
self._logging(message, mode="info")
|
370
|
+
|
371
|
+
def warning(self, message: str) -> None:
|
372
|
+
"""Write trace log with append mode and logging this message with the
|
373
|
+
WARNING level.
|
374
|
+
|
375
|
+
:param message: (str) A message that want to log.
|
376
|
+
"""
|
377
|
+
self._logging(message, mode="warning")
|
378
|
+
|
379
|
+
def error(self, message: str) -> None:
|
380
|
+
"""Write trace log with append mode and logging this message with the
|
381
|
+
ERROR level.
|
382
|
+
|
383
|
+
:param message: (str) A message that want to log.
|
384
|
+
"""
|
385
|
+
self._logging(message, mode="error", is_err=True)
|
386
|
+
|
387
|
+
def exception(self, message: str) -> None:
|
388
|
+
"""Write trace log with append mode and logging this message with the
|
389
|
+
EXCEPTION level.
|
390
|
+
|
391
|
+
:param message: (str) A message that want to log.
|
392
|
+
"""
|
393
|
+
self._logging(message, mode="exception", is_err=True)
|
394
|
+
|
395
|
+
@abstractmethod
|
396
|
+
async def _alogging(
|
397
|
+
self,
|
398
|
+
message: str,
|
399
|
+
mode: str,
|
400
|
+
*,
|
401
|
+
is_err: bool = False,
|
402
|
+
) -> None:
|
403
|
+
"""Async write trace log with append mode and logging this message with
|
404
|
+
any logging level.
|
405
|
+
|
406
|
+
:param message: (str) A message that want to log.
|
407
|
+
:param mode: (str)
|
408
|
+
:param is_err: (bool)
|
409
|
+
"""
|
410
|
+
raise NotImplementedError(
|
411
|
+
"Async Logging action should be implement for making trace log."
|
412
|
+
)
|
413
|
+
|
414
|
+
async def adebug(self, message: str) -> None: # pragma: no cov
|
415
|
+
"""Async write trace log with append mode and logging this message with
|
416
|
+
the DEBUG level.
|
417
|
+
|
418
|
+
:param message: (str) A message that want to log.
|
419
|
+
"""
|
420
|
+
await self._alogging(message, mode="debug")
|
421
|
+
|
422
|
+
async def ainfo(self, message: str) -> None: # pragma: no cov
|
423
|
+
"""Async write trace log with append mode and logging this message with
|
424
|
+
the INFO level.
|
425
|
+
|
426
|
+
:param message: (str) A message that want to log.
|
427
|
+
"""
|
428
|
+
await self._alogging(message, mode="info")
|
429
|
+
|
430
|
+
async def awarning(self, message: str) -> None: # pragma: no cov
|
431
|
+
"""Async write trace log with append mode and logging this message with
|
432
|
+
the WARNING level.
|
433
|
+
|
434
|
+
:param message: (str) A message that want to log.
|
435
|
+
"""
|
436
|
+
await self._alogging(message, mode="warning")
|
437
|
+
|
438
|
+
async def aerror(self, message: str) -> None: # pragma: no cov
|
439
|
+
"""Async write trace log with append mode and logging this message with
|
440
|
+
the ERROR level.
|
441
|
+
|
442
|
+
:param message: (str) A message that want to log.
|
443
|
+
"""
|
444
|
+
await self._alogging(message, mode="error", is_err=True)
|
445
|
+
|
446
|
+
async def aexception(self, message: str) -> None: # pragma: no cov
|
447
|
+
"""Async write trace log with append mode and logging this message with
|
448
|
+
the EXCEPTION level.
|
449
|
+
|
450
|
+
:param message: (str) A message that want to log.
|
451
|
+
"""
|
452
|
+
await self._alogging(message, mode="exception", is_err=True)
|
453
|
+
|
336
454
|
|
337
455
|
class ConsoleTrace(BaseTrace): # pragma: no cov
|
338
456
|
"""Console Trace log model."""
|
@@ -416,7 +534,7 @@ class ConsoleTrace(BaseTrace): # pragma: no cov
|
|
416
534
|
f"{PrefixMsg.from_str(message).prepare(self.extras)}"
|
417
535
|
)
|
418
536
|
|
419
|
-
def
|
537
|
+
def _logging(
|
420
538
|
self, message: str, mode: str, *, is_err: bool = False
|
421
539
|
) -> None:
|
422
540
|
"""Write trace log with append mode and logging this message with any
|
@@ -433,47 +551,7 @@ class ConsoleTrace(BaseTrace): # pragma: no cov
|
|
433
551
|
|
434
552
|
getattr(logger, mode)(msg, stacklevel=3, extra={"cut_id": self.cut_id})
|
435
553
|
|
436
|
-
def
|
437
|
-
"""Write trace log with append mode and logging this message with the
|
438
|
-
DEBUG level.
|
439
|
-
|
440
|
-
:param message: (str) A message that want to log.
|
441
|
-
"""
|
442
|
-
self.__logging(message, mode="debug")
|
443
|
-
|
444
|
-
def info(self, message: str) -> None:
|
445
|
-
"""Write trace log with append mode and logging this message with the
|
446
|
-
INFO level.
|
447
|
-
|
448
|
-
:param message: (str) A message that want to log.
|
449
|
-
"""
|
450
|
-
self.__logging(message, mode="info")
|
451
|
-
|
452
|
-
def warning(self, message: str) -> None:
|
453
|
-
"""Write trace log with append mode and logging this message with the
|
454
|
-
WARNING level.
|
455
|
-
|
456
|
-
:param message: (str) A message that want to log.
|
457
|
-
"""
|
458
|
-
self.__logging(message, mode="warning")
|
459
|
-
|
460
|
-
def error(self, message: str) -> None:
|
461
|
-
"""Write trace log with append mode and logging this message with the
|
462
|
-
ERROR level.
|
463
|
-
|
464
|
-
:param message: (str) A message that want to log.
|
465
|
-
"""
|
466
|
-
self.__logging(message, mode="error", is_err=True)
|
467
|
-
|
468
|
-
def exception(self, message: str) -> None:
|
469
|
-
"""Write trace log with append mode and logging this message with the
|
470
|
-
EXCEPTION level.
|
471
|
-
|
472
|
-
:param message: (str) A message that want to log.
|
473
|
-
"""
|
474
|
-
self.__logging(message, mode="exception", is_err=True)
|
475
|
-
|
476
|
-
async def __alogging(
|
554
|
+
async def _alogging(
|
477
555
|
self, message: str, mode: str, *, is_err: bool = False
|
478
556
|
) -> None:
|
479
557
|
"""Write trace log with append mode and logging this message with any
|
@@ -490,46 +568,6 @@ class ConsoleTrace(BaseTrace): # pragma: no cov
|
|
490
568
|
|
491
569
|
getattr(logger, mode)(msg, stacklevel=3, extra={"cut_id": self.cut_id})
|
492
570
|
|
493
|
-
async def adebug(self, message: str) -> None: # pragma: no cov
|
494
|
-
"""Async write trace log with append mode and logging this message with
|
495
|
-
the DEBUG level.
|
496
|
-
|
497
|
-
:param message: (str) A message that want to log.
|
498
|
-
"""
|
499
|
-
await self.__alogging(message, mode="debug")
|
500
|
-
|
501
|
-
async def ainfo(self, message: str) -> None: # pragma: no cov
|
502
|
-
"""Async write trace log with append mode and logging this message with
|
503
|
-
the INFO level.
|
504
|
-
|
505
|
-
:param message: (str) A message that want to log.
|
506
|
-
"""
|
507
|
-
await self.__alogging(message, mode="info")
|
508
|
-
|
509
|
-
async def awarning(self, message: str) -> None: # pragma: no cov
|
510
|
-
"""Async write trace log with append mode and logging this message with
|
511
|
-
the WARNING level.
|
512
|
-
|
513
|
-
:param message: (str) A message that want to log.
|
514
|
-
"""
|
515
|
-
await self.__alogging(message, mode="warning")
|
516
|
-
|
517
|
-
async def aerror(self, message: str) -> None: # pragma: no cov
|
518
|
-
"""Async write trace log with append mode and logging this message with
|
519
|
-
the ERROR level.
|
520
|
-
|
521
|
-
:param message: (str) A message that want to log.
|
522
|
-
"""
|
523
|
-
await self.__alogging(message, mode="error", is_err=True)
|
524
|
-
|
525
|
-
async def aexception(self, message: str) -> None: # pragma: no cov
|
526
|
-
"""Async write trace log with append mode and logging this message with
|
527
|
-
the EXCEPTION level.
|
528
|
-
|
529
|
-
:param message: (str) A message that want to log.
|
530
|
-
"""
|
531
|
-
await self.__alogging(message, mode="exception", is_err=True)
|
532
|
-
|
533
571
|
|
534
572
|
class FileTrace(ConsoleTrace): # pragma: no cov
|
535
573
|
"""File Trace dataclass that write file to the local storage."""
|
@@ -163,21 +163,23 @@ def gen_id(
|
|
163
163
|
extras: DictData | None = None,
|
164
164
|
) -> str:
|
165
165
|
"""Generate running ID for able to tracking. This generates process use
|
166
|
-
|
166
|
+
``md5`` algorithm function if ``WORKFLOW_CORE_WORKFLOW_ID_SIMPLE_MODE`` set
|
167
167
|
to false. But it will cut this hashing value length to 10 it the setting
|
168
168
|
value set to true.
|
169
169
|
|
170
170
|
Simple Mode:
|
171
171
|
|
172
|
-
... 0000 00 00 00 00 00 000000
|
173
|
-
... year month day hour minute second microsecond
|
172
|
+
... 0000 00 00 00 00 00 000000 T 0000000000
|
173
|
+
... year month day hour minute second microsecond sep simple-id
|
174
174
|
|
175
175
|
:param value: A value that want to add to prefix before hashing with md5.
|
176
|
-
:param sensitive: A flag that convert the value to lower
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
:param
|
176
|
+
:param sensitive: (bool) A flag that enable to convert the value to lower
|
177
|
+
case before hashing that value before generate ID.
|
178
|
+
:param unique: (bool) A flag that add timestamp at microsecond level to
|
179
|
+
value before hashing.
|
180
|
+
:param simple_mode: (bool | None) A flag for generate ID by simple mode.
|
181
|
+
:param extras: (DictData) An extra parameter that use for override config
|
182
|
+
value.
|
181
183
|
|
182
184
|
:rtype: str
|
183
185
|
"""
|
@@ -212,7 +214,8 @@ def default_gen_id() -> str:
|
|
212
214
|
def make_exec(path: Union[Path, str]) -> None:
|
213
215
|
"""Change mode of file to be executable file.
|
214
216
|
|
215
|
-
:param path: A file path that want to make executable
|
217
|
+
:param path: (Path | str) A file path that want to make executable
|
218
|
+
permission.
|
216
219
|
"""
|
217
220
|
f: Path = Path(path) if isinstance(path, str) else path
|
218
221
|
f.chmod(f.stat().st_mode | stat.S_IEXEC)
|
@@ -285,9 +288,14 @@ def dump_all(value: T, by_alias: bool = False) -> T: ... # pragma: no cov
|
|
285
288
|
|
286
289
|
|
287
290
|
def dump_all(
|
288
|
-
value: Union[T, BaseModel],
|
291
|
+
value: Union[T, BaseModel],
|
292
|
+
by_alias: bool = False,
|
289
293
|
) -> Union[T, DictData]:
|
290
|
-
"""Dump all BaseModel object to dict.
|
294
|
+
"""Dump all nested BaseModel object to dict object.
|
295
|
+
|
296
|
+
:param value: (T | BaseModel)
|
297
|
+
:param by_alias: (bool)
|
298
|
+
"""
|
291
299
|
if isinstance(value, dict):
|
292
300
|
return {k: dump_all(value[k], by_alias=by_alias) for k in value}
|
293
301
|
elif isinstance(value, (list, tuple, set)):
|
@@ -257,8 +257,10 @@ class Workflow(BaseModel):
|
|
257
257
|
f"{self.name!r}."
|
258
258
|
)
|
259
259
|
|
260
|
-
# NOTE:
|
261
|
-
self.jobs[job].
|
260
|
+
# NOTE: Copy the job model and set job ID to the job model.
|
261
|
+
job_model = self.jobs[job].model_copy()
|
262
|
+
job_model.id = job
|
263
|
+
self.jobs[job] = job_model
|
262
264
|
|
263
265
|
# VALIDATE: Validate workflow name should not dynamic with params
|
264
266
|
# template.
|
@@ -771,3 +773,217 @@ class Workflow(BaseModel):
|
|
771
773
|
).to_dict(),
|
772
774
|
},
|
773
775
|
)
|
776
|
+
|
777
|
+
def rerun(
|
778
|
+
self,
|
779
|
+
context: DictData,
|
780
|
+
*,
|
781
|
+
parent_run_id: Optional[str] = None,
|
782
|
+
event: Optional[Event] = None,
|
783
|
+
timeout: float = 3600,
|
784
|
+
max_job_parallel: int = 2,
|
785
|
+
) -> Result:
|
786
|
+
"""Re-Execute workflow with passing the error context data.
|
787
|
+
|
788
|
+
:param context: A context result that get the failed status.
|
789
|
+
:param parent_run_id: (Optional[str]) A parent workflow running ID.
|
790
|
+
:param event: (Event) An Event manager instance that use to cancel this
|
791
|
+
execution if it forces stopped by parent execution.
|
792
|
+
:param timeout: (float) A workflow execution time out in second unit
|
793
|
+
that use for limit time of execution and waiting job dependency.
|
794
|
+
This value does not force stop the task that still running more than
|
795
|
+
this limit time. (Default: 60 * 60 seconds)
|
796
|
+
:param max_job_parallel: (int) The maximum workers that use for job
|
797
|
+
execution in `ThreadPoolExecutor` object. (Default: 2 workers)
|
798
|
+
|
799
|
+
:rtype: Result
|
800
|
+
"""
|
801
|
+
ts: float = time.monotonic()
|
802
|
+
|
803
|
+
result: Result = Result.construct_with_rs_or_id(
|
804
|
+
parent_run_id=parent_run_id,
|
805
|
+
id_logic=self.name,
|
806
|
+
extras=self.extras,
|
807
|
+
)
|
808
|
+
if context["status"] == SUCCESS:
|
809
|
+
result.trace.info(
|
810
|
+
"[WORKFLOW]: Does not rerun because it already executed with "
|
811
|
+
"success status."
|
812
|
+
)
|
813
|
+
return result.catch(status=SUCCESS, context=context)
|
814
|
+
|
815
|
+
err = context["errors"]
|
816
|
+
result.trace.info(f"[WORKFLOW]: Previous error: {err}")
|
817
|
+
|
818
|
+
event: Event = event or Event()
|
819
|
+
max_job_parallel: int = dynamic(
|
820
|
+
"max_job_parallel", f=max_job_parallel, extras=self.extras
|
821
|
+
)
|
822
|
+
result.trace.info(
|
823
|
+
f"[WORKFLOW]: Execute: {self.name!r} ("
|
824
|
+
f"{'parallel' if max_job_parallel > 1 else 'sequential'} jobs)"
|
825
|
+
)
|
826
|
+
if not self.jobs:
|
827
|
+
result.trace.warning(f"[WORKFLOW]: {self.name!r} does not set jobs")
|
828
|
+
return result.catch(status=SUCCESS, context=context)
|
829
|
+
|
830
|
+
# NOTE: Prepare the new context for rerun process.
|
831
|
+
jobs: DictData = context.get("jobs")
|
832
|
+
new_context: DictData = {
|
833
|
+
"params": context["params"].copy(),
|
834
|
+
"jobs": {j: jobs[j] for j in jobs if jobs[j]["status"] == SUCCESS},
|
835
|
+
}
|
836
|
+
|
837
|
+
total_job: int = 0
|
838
|
+
job_queue: Queue = Queue()
|
839
|
+
for job_id in self.jobs:
|
840
|
+
|
841
|
+
if job_id in new_context["jobs"]:
|
842
|
+
continue
|
843
|
+
|
844
|
+
job_queue.put(job_id)
|
845
|
+
total_job += 1
|
846
|
+
|
847
|
+
if total_job == 0:
|
848
|
+
result.trace.warning("[WORKFLOW]: It does not have job to rerun.")
|
849
|
+
return result.catch(status=SUCCESS, context=context)
|
850
|
+
|
851
|
+
not_timeout_flag: bool = True
|
852
|
+
statuses: list[Status] = [WAIT] * total_job
|
853
|
+
skip_count: int = 0
|
854
|
+
sequence_statuses: list[Status] = []
|
855
|
+
timeout: float = dynamic(
|
856
|
+
"max_job_exec_timeout", f=timeout, extras=self.extras
|
857
|
+
)
|
858
|
+
|
859
|
+
result.catch(status=WAIT, context=new_context)
|
860
|
+
if event and event.is_set():
|
861
|
+
return result.catch(
|
862
|
+
status=CANCEL,
|
863
|
+
context={
|
864
|
+
"errors": WorkflowCancelError(
|
865
|
+
"Execution was canceled from the event was set before "
|
866
|
+
"workflow execution."
|
867
|
+
).to_dict(),
|
868
|
+
},
|
869
|
+
)
|
870
|
+
|
871
|
+
with ThreadPoolExecutor(max_job_parallel, "wf") as executor:
|
872
|
+
futures: list[Future] = []
|
873
|
+
|
874
|
+
while not job_queue.empty() and (
|
875
|
+
not_timeout_flag := ((time.monotonic() - ts) < timeout)
|
876
|
+
):
|
877
|
+
job_id: str = job_queue.get()
|
878
|
+
job: Job = self.job(name=job_id)
|
879
|
+
if (check := job.check_needs(new_context["jobs"])) == WAIT:
|
880
|
+
job_queue.task_done()
|
881
|
+
job_queue.put(job_id)
|
882
|
+
time.sleep(0.15)
|
883
|
+
continue
|
884
|
+
elif check == FAILED: # pragma: no cov
|
885
|
+
return result.catch(
|
886
|
+
status=FAILED,
|
887
|
+
context={
|
888
|
+
"status": FAILED,
|
889
|
+
"errors": WorkflowError(
|
890
|
+
f"Validate job trigger rule was failed with "
|
891
|
+
f"{job.trigger_rule.value!r}."
|
892
|
+
).to_dict(),
|
893
|
+
},
|
894
|
+
)
|
895
|
+
elif check == SKIP: # pragma: no cov
|
896
|
+
result.trace.info(
|
897
|
+
f"[JOB]: Skip job: {job_id!r} from trigger rule."
|
898
|
+
)
|
899
|
+
job.set_outputs(output={"status": SKIP}, to=new_context)
|
900
|
+
job_queue.task_done()
|
901
|
+
skip_count += 1
|
902
|
+
continue
|
903
|
+
|
904
|
+
if max_job_parallel > 1:
|
905
|
+
futures.append(
|
906
|
+
executor.submit(
|
907
|
+
self.execute_job,
|
908
|
+
job=job,
|
909
|
+
params=new_context,
|
910
|
+
result=result,
|
911
|
+
event=event,
|
912
|
+
),
|
913
|
+
)
|
914
|
+
job_queue.task_done()
|
915
|
+
continue
|
916
|
+
|
917
|
+
if len(futures) < 1:
|
918
|
+
futures.append(
|
919
|
+
executor.submit(
|
920
|
+
self.execute_job,
|
921
|
+
job=job,
|
922
|
+
params=new_context,
|
923
|
+
result=result,
|
924
|
+
event=event,
|
925
|
+
)
|
926
|
+
)
|
927
|
+
elif (future := futures.pop(0)).done():
|
928
|
+
if e := future.exception():
|
929
|
+
sequence_statuses.append(get_status_from_error(e))
|
930
|
+
else:
|
931
|
+
st, _ = future.result()
|
932
|
+
sequence_statuses.append(st)
|
933
|
+
job_queue.put(job_id)
|
934
|
+
elif future.cancelled():
|
935
|
+
sequence_statuses.append(CANCEL)
|
936
|
+
job_queue.put(job_id)
|
937
|
+
elif future.running() or "state=pending" in str(future):
|
938
|
+
futures.insert(0, future)
|
939
|
+
job_queue.put(job_id)
|
940
|
+
else: # pragma: no cov
|
941
|
+
job_queue.put(job_id)
|
942
|
+
futures.insert(0, future)
|
943
|
+
result.trace.warning(
|
944
|
+
f"[WORKFLOW]: ... Execution non-threading not "
|
945
|
+
f"handle: {future}."
|
946
|
+
)
|
947
|
+
|
948
|
+
job_queue.task_done()
|
949
|
+
|
950
|
+
if not_timeout_flag:
|
951
|
+
job_queue.join()
|
952
|
+
for total, future in enumerate(as_completed(futures), start=0):
|
953
|
+
try:
|
954
|
+
statuses[total], _ = future.result()
|
955
|
+
except WorkflowError as e:
|
956
|
+
statuses[total] = get_status_from_error(e)
|
957
|
+
|
958
|
+
# NOTE: Update skipped status from the job trigger.
|
959
|
+
for i in range(skip_count):
|
960
|
+
statuses[total + 1 + i] = SKIP
|
961
|
+
|
962
|
+
# NOTE: Update status from none-parallel job execution.
|
963
|
+
for i, s in enumerate(sequence_statuses, start=0):
|
964
|
+
statuses[total + 1 + skip_count + i] = s
|
965
|
+
|
966
|
+
return result.catch(
|
967
|
+
status=validate_statuses(statuses), context=new_context
|
968
|
+
)
|
969
|
+
|
970
|
+
event.set()
|
971
|
+
for future in futures:
|
972
|
+
future.cancel()
|
973
|
+
|
974
|
+
result.trace.error(
|
975
|
+
f"[WORKFLOW]: {self.name!r} was timeout because it use exec "
|
976
|
+
f"time more than {timeout} seconds."
|
977
|
+
)
|
978
|
+
|
979
|
+
time.sleep(0.0025)
|
980
|
+
|
981
|
+
return result.catch(
|
982
|
+
status=FAILED,
|
983
|
+
context={
|
984
|
+
"errors": WorkflowTimeoutError(
|
985
|
+
f"{self.name!r} was timeout because it use exec time more "
|
986
|
+
f"than {timeout} seconds."
|
987
|
+
).to_dict(),
|
988
|
+
},
|
989
|
+
)
|
@@ -42,7 +42,10 @@ def test_workflow():
|
|
42
42
|
)
|
43
43
|
|
44
44
|
assert workflow.name == "manual-workflow"
|
45
|
-
|
45
|
+
|
46
|
+
set_job_id = job.model_copy()
|
47
|
+
set_job_id.id = "demo-run"
|
48
|
+
assert workflow.job("demo-run") == set_job_id
|
46
49
|
|
47
50
|
# NOTE: Raise ValueError when get a job with ID that does not exist.
|
48
51
|
with pytest.raises(ValueError):
|
@@ -25,20 +25,21 @@ def test_workflow_exec():
|
|
25
25
|
name="demo-workflow",
|
26
26
|
jobs={"sleep-run": job, "sleep-again-run": job},
|
27
27
|
)
|
28
|
+
assert all(j in workflow.jobs for j in ("sleep-run", "sleep-again-run"))
|
29
|
+
|
28
30
|
rs: Result = workflow.execute(params={}, max_job_parallel=1)
|
29
31
|
assert rs.status == SUCCESS
|
30
32
|
assert rs.context == {
|
31
33
|
"status": SUCCESS,
|
32
34
|
"params": {},
|
33
35
|
"jobs": {
|
36
|
+
"sleep-run": {
|
37
|
+
"status": SUCCESS,
|
38
|
+
"stages": {"7972360640": {"outputs": {}, "status": SUCCESS}},
|
39
|
+
},
|
34
40
|
"sleep-again-run": {
|
35
41
|
"status": SUCCESS,
|
36
|
-
"stages": {
|
37
|
-
"7972360640": {
|
38
|
-
"outputs": {},
|
39
|
-
"status": SUCCESS,
|
40
|
-
}
|
41
|
-
},
|
42
|
+
"stages": {"7972360640": {"outputs": {}, "status": SUCCESS}},
|
42
43
|
},
|
43
44
|
},
|
44
45
|
}
|
@@ -64,7 +65,7 @@ def test_workflow_exec_timeout():
|
|
64
65
|
"status": FAILED,
|
65
66
|
"params": {},
|
66
67
|
"jobs": {
|
67
|
-
"sleep-
|
68
|
+
"sleep-run": {
|
68
69
|
"status": CANCEL,
|
69
70
|
"stages": {"7972360640": {"outputs": {}, "status": SUCCESS}},
|
70
71
|
"errors": {
|
@@ -189,6 +190,10 @@ def test_workflow_exec_parallel():
|
|
189
190
|
"status": SUCCESS,
|
190
191
|
"params": {},
|
191
192
|
"jobs": {
|
193
|
+
"sleep-run": {
|
194
|
+
"status": SUCCESS,
|
195
|
+
"stages": {"7972360640": {"outputs": {}, "status": SUCCESS}},
|
196
|
+
},
|
192
197
|
"sleep-again-run": {
|
193
198
|
"status": SUCCESS,
|
194
199
|
"stages": {"7972360640": {"outputs": {}, "status": SUCCESS}},
|
@@ -0,0 +1,167 @@
|
|
1
|
+
from ddeutil.workflow import (
|
2
|
+
CANCEL,
|
3
|
+
FAILED,
|
4
|
+
SUCCESS,
|
5
|
+
Job,
|
6
|
+
Result,
|
7
|
+
Workflow,
|
8
|
+
)
|
9
|
+
|
10
|
+
|
11
|
+
def test_workflow_rerun():
|
12
|
+
job: Job = Job(
|
13
|
+
stages=[{"name": "Sleep", "run": "import time\ntime.sleep(2)"}],
|
14
|
+
)
|
15
|
+
workflow: Workflow = Workflow(
|
16
|
+
name="demo-workflow",
|
17
|
+
jobs={"sleep-run": job, "sleep-again-run": job},
|
18
|
+
)
|
19
|
+
rs: Result = workflow.rerun(
|
20
|
+
context={
|
21
|
+
"status": SUCCESS,
|
22
|
+
"params": {},
|
23
|
+
"jobs": {
|
24
|
+
"sleep-run": {
|
25
|
+
"status": SUCCESS,
|
26
|
+
"stages": {
|
27
|
+
"7972360640": {"outputs": {}, "status": SUCCESS}
|
28
|
+
},
|
29
|
+
},
|
30
|
+
"sleep-again-run": {
|
31
|
+
"status": SUCCESS,
|
32
|
+
"stages": {
|
33
|
+
"7972360640": {"outputs": {}, "status": SUCCESS}
|
34
|
+
},
|
35
|
+
},
|
36
|
+
},
|
37
|
+
},
|
38
|
+
max_job_parallel=1,
|
39
|
+
)
|
40
|
+
assert rs.status == SUCCESS
|
41
|
+
assert rs.context == {
|
42
|
+
"status": SUCCESS,
|
43
|
+
"params": {},
|
44
|
+
"jobs": {
|
45
|
+
"sleep-run": {
|
46
|
+
"status": SUCCESS,
|
47
|
+
"stages": {"7972360640": {"outputs": {}, "status": SUCCESS}},
|
48
|
+
},
|
49
|
+
"sleep-again-run": {
|
50
|
+
"status": SUCCESS,
|
51
|
+
"stages": {"7972360640": {"outputs": {}, "status": SUCCESS}},
|
52
|
+
},
|
53
|
+
},
|
54
|
+
}
|
55
|
+
|
56
|
+
rs: Result = workflow.rerun(
|
57
|
+
context={
|
58
|
+
"status": FAILED,
|
59
|
+
"params": {},
|
60
|
+
"jobs": {
|
61
|
+
"sleep-run": {
|
62
|
+
"status": SUCCESS,
|
63
|
+
"stages": {
|
64
|
+
"7972360640": {"outputs": {}, "status": SUCCESS}
|
65
|
+
},
|
66
|
+
},
|
67
|
+
"sleep-again-run": {
|
68
|
+
"status": FAILED,
|
69
|
+
"stages": {"7972360640": {"outputs": {}, "status": FAILED}},
|
70
|
+
"errors": {
|
71
|
+
"name": "DemoError",
|
72
|
+
"message": "Force error in job context.",
|
73
|
+
},
|
74
|
+
},
|
75
|
+
},
|
76
|
+
"errors": {
|
77
|
+
"name": "DemoError",
|
78
|
+
"message": "Force error in context data before rerun.",
|
79
|
+
},
|
80
|
+
},
|
81
|
+
max_job_parallel=1,
|
82
|
+
)
|
83
|
+
assert rs.status == SUCCESS
|
84
|
+
assert rs.context == {
|
85
|
+
"status": SUCCESS,
|
86
|
+
"params": {},
|
87
|
+
"jobs": {
|
88
|
+
"sleep-run": {
|
89
|
+
"status": SUCCESS,
|
90
|
+
"stages": {"7972360640": {"outputs": {}, "status": SUCCESS}},
|
91
|
+
},
|
92
|
+
"sleep-again-run": {
|
93
|
+
"status": SUCCESS,
|
94
|
+
"stages": {"7972360640": {"outputs": {}, "status": SUCCESS}},
|
95
|
+
},
|
96
|
+
},
|
97
|
+
}
|
98
|
+
|
99
|
+
|
100
|
+
def test_workflow_rerun_parallel_timeout():
|
101
|
+
job: Job = Job(
|
102
|
+
stages=[
|
103
|
+
{"name": "Sleep", "run": "import time\ntime.sleep(2)"},
|
104
|
+
{"name": "Echo Last Stage", "echo": "the last stage"},
|
105
|
+
],
|
106
|
+
)
|
107
|
+
workflow: Workflow = Workflow(
|
108
|
+
name="demo-workflow",
|
109
|
+
jobs={
|
110
|
+
"sleep-run": job,
|
111
|
+
"sleep-again-run": job.model_copy(update={"needs": ["sleep-run"]}),
|
112
|
+
},
|
113
|
+
extras={"stage_default_id": False},
|
114
|
+
)
|
115
|
+
rs: Result = workflow.rerun(
|
116
|
+
context={
|
117
|
+
"status": FAILED,
|
118
|
+
"params": {},
|
119
|
+
"jobs": {
|
120
|
+
"sleep-run": {
|
121
|
+
"status": CANCEL,
|
122
|
+
"stages": {},
|
123
|
+
"errors": {
|
124
|
+
"name": "JobCancelError",
|
125
|
+
"message": (
|
126
|
+
"Strategy execution was canceled from the event before "
|
127
|
+
"start stage execution."
|
128
|
+
),
|
129
|
+
},
|
130
|
+
},
|
131
|
+
},
|
132
|
+
"errors": {
|
133
|
+
"name": "WorkflowTimeoutError",
|
134
|
+
"message": (
|
135
|
+
"'demo-workflow' was timeout because it use exec time more "
|
136
|
+
"than 1.25 seconds."
|
137
|
+
),
|
138
|
+
},
|
139
|
+
},
|
140
|
+
timeout=1.25,
|
141
|
+
max_job_parallel=2,
|
142
|
+
)
|
143
|
+
assert rs.status == FAILED
|
144
|
+
assert rs.context == {
|
145
|
+
"status": FAILED,
|
146
|
+
"params": {},
|
147
|
+
"jobs": {
|
148
|
+
"sleep-run": {
|
149
|
+
"status": CANCEL,
|
150
|
+
"stages": {},
|
151
|
+
"errors": {
|
152
|
+
"name": "JobCancelError",
|
153
|
+
"message": (
|
154
|
+
"Strategy execution was canceled from the event before "
|
155
|
+
"start stage execution."
|
156
|
+
),
|
157
|
+
},
|
158
|
+
},
|
159
|
+
},
|
160
|
+
"errors": {
|
161
|
+
"name": "WorkflowTimeoutError",
|
162
|
+
"message": (
|
163
|
+
"'demo-workflow' was timeout because it use exec time more "
|
164
|
+
"than 1.25 seconds."
|
165
|
+
),
|
166
|
+
},
|
167
|
+
}
|
@@ -1 +0,0 @@
|
|
1
|
-
__version__: str = "0.0.69"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/api/routes/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil/workflow/api/routes/workflows.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil_workflow.egg-info/entry_points.txt
RENAMED
File without changes
|
{ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil_workflow.egg-info/requires.txt
RENAMED
File without changes
|
{ddeutil_workflow-0.0.69 → ddeutil_workflow-0.0.70}/src/ddeutil_workflow.egg-info/top_level.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|