ddeutil-workflow 0.0.35__py3-none-any.whl → 0.0.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__: str = "0.0.35"
1
+ __version__: str = "0.0.37"
@@ -37,9 +37,11 @@ from .exceptions import (
37
37
  )
38
38
  from .job import (
39
39
  Job,
40
+ RunsOn,
40
41
  Strategy,
41
42
  )
42
43
  from .logs import (
44
+ TraceData,
43
45
  TraceLog,
44
46
  get_dt_tznow,
45
47
  get_trace,
@@ -11,7 +11,11 @@ from datetime import datetime, timedelta
11
11
  from typing import TypedDict
12
12
 
13
13
  from dotenv import load_dotenv
14
- from fastapi import FastAPI
14
+ from fastapi import FastAPI, Request
15
+ from fastapi import status as st
16
+ from fastapi.encoders import jsonable_encoder
17
+ from fastapi.exceptions import RequestValidationError
18
+ from fastapi.middleware.cors import CORSMiddleware
15
19
  from fastapi.middleware.gzip import GZipMiddleware
16
20
  from fastapi.responses import UJSONResponse
17
21
 
@@ -20,10 +24,10 @@ from ..conf import config, get_logger
20
24
  from ..scheduler import ReleaseThread, ReleaseThreads
21
25
  from ..workflow import ReleaseQueue, WorkflowTask
22
26
  from .repeat import repeat_at
23
- from .routes import log
27
+ from .routes import job, log
24
28
 
25
29
  load_dotenv()
26
- logger = get_logger("ddeutil.workflow")
30
+ logger = get_logger("uvicorn.error")
27
31
 
28
32
 
29
33
  class State(TypedDict):
@@ -61,24 +65,38 @@ async def lifespan(a: FastAPI) -> AsyncIterator[State]:
61
65
 
62
66
 
63
67
  app = FastAPI(
64
- titile="Workflow API",
68
+ titile="Workflow",
65
69
  description=(
66
- "This is workflow FastAPI web application that use to manage manual "
67
- "execute or schedule workflow via RestAPI."
70
+ "This is a workflow FastAPI application that use to manage manual "
71
+ "execute, logging, and schedule workflow via RestAPI."
68
72
  ),
69
73
  version=__version__,
70
74
  lifespan=lifespan,
71
75
  default_response_class=UJSONResponse,
72
76
  )
73
77
  app.add_middleware(GZipMiddleware, minimum_size=1000)
78
+ origins: list[str] = [
79
+ "http://localhost",
80
+ "http://localhost:88",
81
+ "http://localhost:80",
82
+ ]
83
+ app.add_middleware(
84
+ CORSMiddleware,
85
+ allow_origins=origins,
86
+ allow_credentials=True,
87
+ allow_methods=["*"],
88
+ allow_headers=["*"],
89
+ )
74
90
 
75
91
 
76
92
  @app.get("/")
77
93
  async def health():
78
- return {"message": "Workflow API already start up"}
94
+ """Index view that not return any template without json status."""
95
+ return {"message": "Workflow already start up with healthy status."}
79
96
 
80
97
 
81
- # NOTE Add the logs route by default.
98
+ # NOTE Add the jobs and logs routes by default.
99
+ app.include_router(job, prefix=config.prefix_path)
82
100
  app.include_router(log, prefix=config.prefix_path)
83
101
 
84
102
 
@@ -111,12 +129,13 @@ if config.enable_route_schedule:
111
129
  stop=datetime.now(config.tz) + timedelta(minutes=1),
112
130
  queue=app.state.workflow_queue,
113
131
  threads=app.state.workflow_threads,
114
- log=get_audit(),
132
+ audit=get_audit(),
115
133
  )
116
134
 
117
135
  @schedule.on_event("startup")
118
136
  @repeat_at(cron="*/5 * * * *", delay=10)
119
137
  def monitoring():
138
+ """Monitoring workflow thread that running in the background."""
120
139
  logger.debug("[MONITOR]: Start monitoring threading.")
121
140
  snapshot_threads: list[str] = list(app.state.workflow_threads.keys())
122
141
  for t_name in snapshot_threads:
@@ -126,3 +145,25 @@ if config.enable_route_schedule:
126
145
  # NOTE: remove the thread that running success.
127
146
  if not thread_release["thread"].is_alive():
128
147
  app.state.workflow_threads.pop(t_name)
148
+
149
+
150
+ @app.exception_handler(RequestValidationError)
151
+ async def validation_exception_handler(
152
+ request: Request, exc: RequestValidationError
153
+ ):
154
+ _ = request
155
+ return UJSONResponse(
156
+ status_code=st.HTTP_422_UNPROCESSABLE_ENTITY,
157
+ content=jsonable_encoder({"detail": exc.errors(), "body": exc.body}),
158
+ )
159
+
160
+
161
+ if __name__ == "__main__":
162
+ import uvicorn
163
+
164
+ uvicorn.run(
165
+ app,
166
+ host="0.0.0.0",
167
+ port=80,
168
+ log_level="DEBUG",
169
+ )
@@ -0,0 +1,59 @@
1
+ from ..conf import config
2
+
3
+ LOGGING_CONFIG = { # pragma: no cov
4
+ "version": 1,
5
+ "disable_existing_loggers": False,
6
+ "formatters": {
7
+ "standard": {
8
+ "format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
9
+ },
10
+ "custom_formatter": {
11
+ "format": config.log_format,
12
+ "datefmt": config.log_datetime_format,
13
+ },
14
+ },
15
+ "root": {
16
+ "level": "DEBUG" if config.debug else "INFO",
17
+ },
18
+ "handlers": {
19
+ "default": {
20
+ "formatter": "standard",
21
+ "class": "logging.StreamHandler",
22
+ "stream": "ext://sys.stderr",
23
+ },
24
+ "stream_handler": {
25
+ "formatter": "custom_formatter",
26
+ "class": "logging.StreamHandler",
27
+ "stream": "ext://sys.stdout",
28
+ },
29
+ "file_handler": {
30
+ "formatter": "custom_formatter",
31
+ "class": "logging.handlers.RotatingFileHandler",
32
+ "filename": "logs/app.log",
33
+ "maxBytes": 1024 * 1024 * 1,
34
+ "backupCount": 3,
35
+ },
36
+ },
37
+ "loggers": {
38
+ "uvicorn": {
39
+ "handlers": ["default", "file_handler"],
40
+ "level": "DEBUG" if config.debug else "INFO",
41
+ "propagate": False,
42
+ },
43
+ "uvicorn.access": {
44
+ "handlers": ["stream_handler", "file_handler"],
45
+ "level": "DEBUG" if config.debug else "INFO",
46
+ "propagate": False,
47
+ },
48
+ "uvicorn.error": {
49
+ "handlers": ["stream_handler", "file_handler"],
50
+ "level": "DEBUG" if config.debug else "INFO",
51
+ "propagate": False,
52
+ },
53
+ # "uvicorn.asgi": {
54
+ # "handlers": ["stream_handler", "file_handler"],
55
+ # "level": "TRACE",
56
+ # "propagate": False,
57
+ # },
58
+ },
59
+ }
@@ -15,23 +15,32 @@ from starlette.concurrency import run_in_threadpool
15
15
  from ..__cron import CronJob
16
16
  from ..conf import config, get_logger
17
17
 
18
- logger = get_logger("ddeutil.workflow")
18
+ logger = get_logger("uvicorn.error")
19
19
 
20
20
 
21
21
  def get_cronjob_delta(cron: str) -> float:
22
22
  """This function returns the time delta between now and the next cron
23
23
  execution time.
24
+
25
+ :rtype: float
24
26
  """
25
27
  now: datetime = datetime.now(tz=config.tz)
26
28
  cron = CronJob(cron)
27
29
  return (cron.schedule(now).next - now).total_seconds()
28
30
 
29
31
 
30
- def cron_valid(cron: str):
32
+ def cron_valid(cron: str, raise_error: bool = True) -> bool:
33
+ """Check this crontab string value is valid with its cron syntax.
34
+
35
+ :rtype: bool
36
+ """
31
37
  try:
32
38
  CronJob(cron)
39
+ return True
33
40
  except Exception as err:
34
- raise ValueError(f"Crontab value does not valid, {cron}") from err
41
+ if raise_error:
42
+ raise ValueError(f"Crontab value does not valid, {cron}") from err
43
+ return False
35
44
 
36
45
 
37
46
  async def run_func(
@@ -41,6 +50,7 @@ async def run_func(
41
50
  raise_exceptions: bool = False,
42
51
  **kwargs,
43
52
  ):
53
+ """Run function inside the repeat decorator functions."""
44
54
  try:
45
55
  if is_coroutine:
46
56
  await func(*args, **kwargs)
@@ -62,11 +72,11 @@ def repeat_at(
62
72
  """This function returns a decorator that makes a function execute
63
73
  periodically as per the cron expression provided.
64
74
 
65
- :param cron: str
66
- Cron-style string for periodic execution, eg. '0 0 * * *' every midnight
67
- :param delay:
68
- :param raise_exceptions: bool (default False)
69
- Whether to raise exceptions or log them
75
+ :param cron: (str) A Cron-style string for periodic execution, e.g.
76
+ '0 0 * * *' every midnight
77
+ :param delay: (float) A delay seconds value.
78
+ :param raise_exceptions: (bool) A raise exception flag. Whether to raise
79
+ exceptions or log them if raise was set be false.
70
80
  :param max_repetitions: int (default None)
71
81
  Maximum number of times to repeat the function. If None, repeat
72
82
  indefinitely.
@@ -81,12 +91,12 @@ def repeat_at(
81
91
 
82
92
  @wraps(func)
83
93
  def wrapper(*_args, **_kwargs):
84
- repititions: int = 0
94
+ repetitions: int = 0
85
95
  cron_valid(cron)
86
96
 
87
97
  async def loop(*args, **kwargs):
88
- nonlocal repititions
89
- while max_repetitions is None or repititions < max_repetitions:
98
+ nonlocal repetitions
99
+ while max_repetitions is None or repetitions < max_repetitions:
90
100
  sleep_time = get_cronjob_delta(cron) + delay
91
101
  await asyncio.sleep(sleep_time)
92
102
  await run_func(
@@ -96,7 +106,7 @@ def repeat_at(
96
106
  raise_exceptions=raise_exceptions,
97
107
  **kwargs,
98
108
  )
99
- repititions += 1
109
+ repetitions += 1
100
110
 
101
111
  ensure_future(loop(*_args, **_kwargs))
102
112
 
@@ -3,6 +3,7 @@
3
3
  # Licensed under the MIT License. See LICENSE in the project root for
4
4
  # license information.
5
5
  # ------------------------------------------------------------------------------
6
+ from .job import job_route as job
6
7
  from .logs import log_route as log
7
8
  from .schedules import schedule_route as schedule
8
9
  from .workflows import workflow_route as workflow
@@ -0,0 +1,73 @@
1
+ # ------------------------------------------------------------------------------
2
+ # Copyright (c) 2022 Korawich Anuttra. All rights reserved.
3
+ # Licensed under the MIT License. See LICENSE in the project root for
4
+ # license information.
5
+ # ------------------------------------------------------------------------------
6
+ from __future__ import annotations
7
+
8
+ from typing import Any, Optional
9
+
10
+ from fastapi import APIRouter
11
+ from fastapi.responses import UJSONResponse
12
+ from pydantic import BaseModel
13
+
14
+ from ...__types import DictData
15
+ from ...conf import get_logger
16
+ from ...exceptions import JobException
17
+ from ...job import Job
18
+ from ...result import Result
19
+
20
+ logger = get_logger("uvicorn.error")
21
+
22
+
23
+ job_route = APIRouter(
24
+ prefix="/job",
25
+ tags=["job"],
26
+ default_response_class=UJSONResponse,
27
+ )
28
+
29
+
30
+ class ResultPost(BaseModel):
31
+ context: DictData
32
+ run_id: str
33
+ parent_run_id: Optional[str] = None
34
+
35
+
36
+ @job_route.post(path="/execute/")
37
+ async def job_execute(
38
+ result: ResultPost,
39
+ job: Job,
40
+ params: dict[str, Any],
41
+ ):
42
+ """Execute job via API."""
43
+ rs: Result = Result(
44
+ context=result.context,
45
+ run_id=result.run_id,
46
+ parent_run_id=result.parent_run_id,
47
+ )
48
+ try:
49
+ job.set_outputs(
50
+ job.execute(
51
+ params=params,
52
+ run_id=rs.run_id,
53
+ parent_run_id=rs.parent_run_id,
54
+ ).context,
55
+ to=params,
56
+ )
57
+ except JobException as err:
58
+ rs.trace.error(f"[WORKFLOW]: {err.__class__.__name__}: {err}")
59
+
60
+ return {
61
+ "message": "Start execute job via API.",
62
+ "result": {
63
+ "run_id": rs.run_id,
64
+ "parent_run_id": rs.parent_run_id,
65
+ },
66
+ "job": job.model_dump(
67
+ by_alias=True,
68
+ exclude_none=True,
69
+ exclude_unset=True,
70
+ exclude_defaults=True,
71
+ ),
72
+ "params": params,
73
+ }
@@ -3,19 +3,16 @@
3
3
  # Licensed under the MIT License. See LICENSE in the project root for
4
4
  # license information.
5
5
  # ------------------------------------------------------------------------------
6
+ """This route include audit and trace log paths."""
6
7
  from __future__ import annotations
7
8
 
8
9
  from fastapi import APIRouter
10
+ from fastapi import status as st
9
11
  from fastapi.responses import UJSONResponse
10
12
 
11
- from ...conf import get_logger
13
+ from ...audit import get_audit
12
14
  from ...logs import get_trace_obj
13
15
 
14
- logger = get_logger("ddeutil.workflow")
15
-
16
-
17
- # NOTE: Start create the schedule routes.
18
- #
19
16
  log_route = APIRouter(
20
17
  prefix="/logs",
21
18
  tags=["logs"],
@@ -23,14 +20,146 @@ log_route = APIRouter(
23
20
  )
24
21
 
25
22
 
26
- @log_route.get(path="/")
27
- async def get_logs():
23
+ @log_route.get(
24
+ path="/traces/",
25
+ response_class=UJSONResponse,
26
+ status_code=st.HTTP_200_OK,
27
+ summary="Read all trace logs.",
28
+ tags=["trace"],
29
+ )
30
+ async def get_traces():
31
+ """Return all trace logs from the current trace log path that config with
32
+ `WORKFLOW_LOG_PATH` environment variable name.
33
+ """
34
+ return {
35
+ "message": "Getting trace logs",
36
+ "traces": [
37
+ trace.model_dump(
38
+ by_alias=True,
39
+ exclude_none=True,
40
+ exclude_unset=True,
41
+ exclude_defaults=True,
42
+ )
43
+ for trace in get_trace_obj().find_logs()
44
+ ],
45
+ }
46
+
47
+
48
+ @log_route.get(
49
+ path="/traces/{run_id}",
50
+ response_class=UJSONResponse,
51
+ status_code=st.HTTP_200_OK,
52
+ summary="Read trace log with specific running ID.",
53
+ tags=["trace"],
54
+ )
55
+ async def get_trace_with_id(run_id: str):
56
+ """Return trace log with specific running ID from the current trace log path
57
+ that config with `WORKFLOW_LOG_PATH` environment variable name.
58
+
59
+ - **run_id**: A running ID that want to search a trace log from the log
60
+ path.
61
+ """
62
+ return {
63
+ "message": f"Getting trace log with specific running ID: {run_id}",
64
+ "trace": (
65
+ get_trace_obj()
66
+ .find_log_with_id(run_id)
67
+ .model_dump(
68
+ by_alias=True,
69
+ exclude_none=True,
70
+ exclude_unset=True,
71
+ exclude_defaults=True,
72
+ )
73
+ ),
74
+ }
75
+
76
+
77
+ @log_route.get(
78
+ path="/audits/",
79
+ response_class=UJSONResponse,
80
+ status_code=st.HTTP_200_OK,
81
+ summary="Read all audit logs.",
82
+ tags=["audit"],
83
+ )
84
+ async def get_audits():
85
+ """Return all audit logs from the current audit log path that config with
86
+ `WORKFLOW_AUDIT_PATH` environment variable name.
87
+ """
88
+ return {
89
+ "message": "Getting audit logs",
90
+ "audits": list(get_audit().find_audits(name="demo")),
91
+ }
92
+
93
+
94
+ @log_route.get(
95
+ path="/audits/{workflow}/",
96
+ response_class=UJSONResponse,
97
+ status_code=st.HTTP_200_OK,
98
+ summary="Read all audit logs with specific workflow name.",
99
+ tags=["audit"],
100
+ )
101
+ async def get_audit_with_workflow(workflow: str):
102
+ """Return all audit logs with specific workflow name from the current audit
103
+ log path that config with `WORKFLOW_AUDIT_PATH` environment variable name.
104
+
105
+ - **workflow**: A specific workflow name that want to find audit logs.
106
+ """
28
107
  return {
29
- "message": "Getting logs",
30
- "audits": list(get_trace_obj().find_logs()),
108
+ "message": f"Getting audit logs with workflow name {workflow}",
109
+ "audits": list(get_audit().find_audits(name="demo")),
31
110
  }
32
111
 
33
112
 
34
- @log_route.get(path="/{run_id}")
35
- async def get_log_with_run_id(run_id: str):
36
- return get_trace_obj().find_log_with_id(run_id)
113
+ @log_route.get(
114
+ path="/audits/{workflow}/{release}",
115
+ response_class=UJSONResponse,
116
+ status_code=st.HTTP_200_OK,
117
+ summary="Read all audit logs with specific workflow name and release date.",
118
+ tags=["audit"],
119
+ )
120
+ async def get_audit_with_workflow_release(workflow: str, release: str):
121
+ """Return all audit logs with specific workflow name and release date from
122
+ the current audit log path that config with `WORKFLOW_AUDIT_PATH`
123
+ environment variable name.
124
+
125
+ - **workflow**: A specific workflow name that want to find audit logs.
126
+ - **release**: A release date with a string format `%Y%m%d%H%M%S`.
127
+ """
128
+ return {
129
+ "message": (
130
+ f"Getting audit logs with workflow name {workflow} and release "
131
+ f"{release}"
132
+ ),
133
+ "audits": list(get_audit().find_audits(name="demo")),
134
+ }
135
+
136
+
137
+ @log_route.get(
138
+ path="/audits/{workflow}/{release}/{run_id}",
139
+ response_class=UJSONResponse,
140
+ status_code=st.HTTP_200_OK,
141
+ summary=(
142
+ "Read all audit logs with specific workflow name, release date "
143
+ "and running ID."
144
+ ),
145
+ tags=["audit"],
146
+ )
147
+ async def get_audit_with_workflow_release_run_id(
148
+ workflow: str, release: str, run_id: str
149
+ ):
150
+ """Return all audit logs with specific workflow name and release date from
151
+ the current audit log path that config with `WORKFLOW_AUDIT_PATH`
152
+ environment variable name.
153
+
154
+ - **workflow**: A specific workflow name that want to find audit logs.
155
+ - **release**: A release date with a string format `%Y%m%d%H%M%S`.
156
+ - **run_id**: A running ID that want to search audit log from this release
157
+ date.
158
+ """
159
+ return {
160
+ "message": (
161
+ f"Getting audit logs with workflow name {workflow}, release "
162
+ f"{release}, and running ID {run_id}"
163
+ ),
164
+ "audits": list(get_audit().find_audits(name="demo")),
165
+ }
@@ -15,7 +15,7 @@ from fastapi.responses import UJSONResponse
15
15
  from ...conf import config, get_logger
16
16
  from ...scheduler import Schedule
17
17
 
18
- logger = get_logger("ddeutil.workflow")
18
+ logger = get_logger("uvicorn.error")
19
19
 
20
20
  schedule_route = APIRouter(
21
21
  prefix="/schedules",
@@ -24,8 +24,9 @@ schedule_route = APIRouter(
24
24
  )
25
25
 
26
26
 
27
- @schedule_route.get(path="/{name}")
27
+ @schedule_route.get(path="/{name}", status_code=st.HTTP_200_OK)
28
28
  async def get_schedules(name: str):
29
+ """Get schedule object."""
29
30
  try:
30
31
  schedule: Schedule = Schedule.from_loader(name=name, externals={})
31
32
  except ValueError:
@@ -41,13 +42,13 @@ async def get_schedules(name: str):
41
42
  )
42
43
 
43
44
 
44
- @schedule_route.get(path="/deploy/")
45
+ @schedule_route.get(path="/deploy/", status_code=st.HTTP_200_OK)
45
46
  async def get_deploy_schedulers(request: Request):
46
47
  snapshot = copy.deepcopy(request.state.scheduler)
47
48
  return {"schedule": snapshot}
48
49
 
49
50
 
50
- @schedule_route.get(path="/deploy/{name}")
51
+ @schedule_route.get(path="/deploy/{name}", status_code=st.HTTP_200_OK)
51
52
  async def get_deploy_scheduler(request: Request, name: str):
52
53
  if name in request.state.scheduler:
53
54
  schedule = Schedule.from_loader(name)
@@ -75,7 +76,7 @@ async def get_deploy_scheduler(request: Request, name: str):
75
76
  )
76
77
 
77
78
 
78
- @schedule_route.post(path="/deploy/{name}")
79
+ @schedule_route.post(path="/deploy/{name}", status_code=st.HTTP_202_ACCEPTED)
79
80
  async def add_deploy_scheduler(request: Request, name: str):
80
81
  """Adding schedule name to application state store."""
81
82
  if name in request.state.scheduler:
@@ -115,7 +116,7 @@ async def add_deploy_scheduler(request: Request, name: str):
115
116
  }
116
117
 
117
118
 
118
- @schedule_route.delete(path="/deploy/{name}")
119
+ @schedule_route.delete(path="/deploy/{name}", status_code=st.HTTP_202_ACCEPTED)
119
120
  async def del_deploy_scheduler(request: Request, name: str):
120
121
  """Delete workflow task on the schedule listener."""
121
122
  if name in request.state.scheduler:
@@ -20,7 +20,7 @@ from ...conf import Loader, get_logger
20
20
  from ...result import Result
21
21
  from ...workflow import Workflow
22
22
 
23
- logger = get_logger("ddeutil.workflow")
23
+ logger = get_logger("uvicorn.error")
24
24
 
25
25
  workflow_route = APIRouter(
26
26
  prefix="/workflows",
@@ -29,7 +29,7 @@ workflow_route = APIRouter(
29
29
  )
30
30
 
31
31
 
32
- @workflow_route.get(path="/")
32
+ @workflow_route.get(path="/", status_code=st.HTTP_200_OK)
33
33
  async def get_workflows() -> DictData:
34
34
  """Return all workflow workflows that exists in config path."""
35
35
  workflows: DictData = dict(Loader.finds(Workflow))
@@ -40,7 +40,7 @@ async def get_workflows() -> DictData:
40
40
  }
41
41
 
42
42
 
43
- @workflow_route.get(path="/{name}")
43
+ @workflow_route.get(path="/{name}", status_code=st.HTTP_200_OK)
44
44
  async def get_workflow_by_name(name: str) -> DictData:
45
45
  """Return model of workflow that passing an input workflow name."""
46
46
  try:
@@ -66,7 +66,7 @@ class ExecutePayload(BaseModel):
66
66
 
67
67
 
68
68
  @workflow_route.post(path="/{name}/execute", status_code=st.HTTP_202_ACCEPTED)
69
- async def execute_workflow(name: str, payload: ExecutePayload) -> DictData:
69
+ async def workflow_execute(name: str, payload: ExecutePayload) -> DictData:
70
70
  """Return model of workflow that passing an input workflow name."""
71
71
  try:
72
72
  workflow: Workflow = Workflow.from_loader(name=name, externals={})
@@ -90,7 +90,7 @@ async def execute_workflow(name: str, payload: ExecutePayload) -> DictData:
90
90
  return asdict(result)
91
91
 
92
92
 
93
- @workflow_route.get(path="/{name}/audits")
93
+ @workflow_route.get(path="/{name}/audits", status_code=st.HTTP_200_OK)
94
94
  async def get_workflow_audits(name: str):
95
95
  try:
96
96
  return {
@@ -112,11 +112,13 @@ async def get_workflow_audits(name: str):
112
112
  ) from None
113
113
 
114
114
 
115
- @workflow_route.get(path="/{name}/audits/{release}")
115
+ @workflow_route.get(path="/{name}/audits/{release}", status_code=st.HTTP_200_OK)
116
116
  async def get_workflow_release_audit(name: str, release: str):
117
+ """Get Workflow audit log with an input release value."""
117
118
  try:
118
119
  audit: Audit = get_audit().find_audit_with_release(
119
- name=name, release=datetime.strptime(release, "%Y%m%d%H%M%S")
120
+ name=name,
121
+ release=datetime.strptime(release, "%Y%m%d%H%M%S"),
120
122
  )
121
123
  except FileNotFoundError:
122
124
  raise HTTPException(
ddeutil/workflow/audit.py CHANGED
@@ -112,7 +112,8 @@ class FileAudit(BaseAudit):
112
112
  :param release: A release datetime that want to search log.
113
113
 
114
114
  :raise FileNotFoundError:
115
- :raise NotImplementedError:
115
+ :raise NotImplementedError: If an input release does not pass to this
116
+ method. Because this method does not implement latest log.
116
117
 
117
118
  :rtype: Self
118
119
  """
@@ -181,7 +182,9 @@ class FileAudit(BaseAudit):
181
182
  trace.debug("[LOG]: Skip writing log cause config was set")
182
183
  return self
183
184
 
184
- log_file: Path = self.pointer() / f"{self.run_id}.log"
185
+ log_file: Path = (
186
+ self.pointer() / f"{self.parent_run_id or self.run_id}.log"
187
+ )
185
188
  log_file.write_text(
186
189
  json.dumps(
187
190
  self.model_dump(exclude=excluded),
@@ -196,7 +199,7 @@ class FileAudit(BaseAudit):
196
199
  class SQLiteAudit(BaseAudit): # pragma: no cov
197
200
  """SQLite Audit Pydantic Model."""
198
201
 
199
- table_name: ClassVar[str] = "workflow_log"
202
+ table_name: ClassVar[str] = "audits"
200
203
  schemas: ClassVar[
201
204
  str
202
205
  ] = """
@@ -91,7 +91,9 @@ def make_registry(submodule: str) -> dict[str, Registry]:
91
91
  for fstr, func in inspect.getmembers(importer, inspect.isfunction):
92
92
  # NOTE: check function attribute that already set tag by
93
93
  # ``utils.tag`` decorator.
94
- if not (hasattr(func, "tag") and hasattr(func, "name")):
94
+ if not (
95
+ hasattr(func, "tag") and hasattr(func, "name")
96
+ ): # pragma: no cov
95
97
  continue
96
98
 
97
99
  # NOTE: Define type of the func value.