ddeutil-workflow 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,13 +3,26 @@
3
3
  # Licensed under the MIT License. See LICENSE in the project root for
4
4
  # license information.
5
5
  # ------------------------------------------------------------------------------
6
+ """
7
+ The main schedule running is ``workflow_runner`` function that trigger the
8
+ multiprocess of ``workflow_control`` function for listing schedules on the
9
+ config by ``Loader.finds(Schedule)``.
10
+
11
+ The ``workflow_control`` is the scheduler function that release 2 schedule
12
+ functions; ``workflow_task``, and ``workflow_monitor``.
13
+
14
+ ``workflow_control`` --- Every minute at :02 --> ``workflow_task``
15
+ --- Every 5 minutes --> ``workflow_monitor``
16
+
17
+ The ``workflow_task`` will run ``task.release`` method in threading object
18
+ for multithreading strategy. This ``release`` method will run only one crontab
19
+ value with the on field.
20
+ """
6
21
  from __future__ import annotations
7
22
 
8
23
  import copy
9
24
  import inspect
10
- import json
11
25
  import logging
12
- import os
13
26
  import time
14
27
  from concurrent.futures import (
15
28
  Future,
@@ -39,18 +52,16 @@ except ImportError:
39
52
 
40
53
  try:
41
54
  from schedule import CancelJob
42
- except ImportError:
55
+ except ImportError: # pragma: no cov
43
56
  CancelJob = None
44
57
 
58
+ from .__cron import CronRunner
45
59
  from .__types import DictData, TupleStr
46
- from .conf import config
47
- from .cron import CronRunner
60
+ from .conf import FileLog, Loader, Log, config, get_logger
48
61
  from .exceptions import JobException, WorkflowException
49
62
  from .job import Job
50
- from .log import FileLog, Log, get_logger
51
63
  from .on import On
52
64
  from .utils import (
53
- Loader,
54
65
  Param,
55
66
  Result,
56
67
  batch,
@@ -75,7 +86,7 @@ __all__: TupleStr = (
75
86
  "Schedule",
76
87
  "ScheduleWorkflow",
77
88
  "workflow_task",
78
- "workflow_long_running_task",
89
+ "workflow_monitor",
79
90
  "workflow_control",
80
91
  "workflow_runner",
81
92
  )
@@ -184,7 +195,7 @@ class Workflow(BaseModel):
184
195
  return data
185
196
 
186
197
  @model_validator(mode="before")
187
- def __prepare_params(cls, values: DictData) -> DictData:
198
+ def __prepare_model_before__(cls, values: DictData) -> DictData:
188
199
  """Prepare the params key."""
189
200
  # NOTE: Prepare params type if it passing with only type value.
190
201
  if params := values.pop("params", {}):
@@ -199,9 +210,10 @@ class Workflow(BaseModel):
199
210
  return values
200
211
 
201
212
  @field_validator("desc", mode="after")
202
- def ___prepare_desc(cls, value: str) -> str:
213
+ def __dedent_desc__(cls, value: str) -> str:
203
214
  """Prepare description string that was created on a template.
204
215
 
216
+ :param value: A description string value that want to dedent.
205
217
  :rtype: str
206
218
  """
207
219
  return dedent(value)
@@ -217,8 +229,8 @@ class Workflow(BaseModel):
217
229
  need for need in self.jobs[job].needs if need not in self.jobs
218
230
  ]:
219
231
  raise WorkflowException(
220
- f"This needed jobs: {not_exist} do not exist in this "
221
- f"workflow, {self.name!r}"
232
+ f"The needed jobs: {not_exist} do not found in "
233
+ f"{self.name!r}."
222
234
  )
223
235
 
224
236
  # NOTE: update a job id with its job id from workflow template
@@ -341,11 +353,11 @@ class Workflow(BaseModel):
341
353
  # NOTE: get next schedule time that generate from now.
342
354
  next_time: datetime = gen.next
343
355
 
344
- # NOTE: get next utils it does not logger.
356
+ # NOTE: While-loop to getting next until it does not logger.
345
357
  while log.is_pointed(self.name, next_time, queue=queue):
346
358
  next_time: datetime = gen.next
347
359
 
348
- # NOTE: push this next running time to log queue
360
+ # NOTE: Heap-push this next running time to log queue list.
349
361
  heappush(queue, next_time)
350
362
 
351
363
  # VALIDATE: Check the different time between the next schedule time and
@@ -458,8 +470,10 @@ class Workflow(BaseModel):
458
470
  queue: list[datetime] = []
459
471
  results: list[Result] = []
460
472
 
461
- worker: int = int(os.getenv("WORKFLOW_CORE_MAX_NUM_POKING") or "4")
462
- with ThreadPoolExecutor(max_workers=worker) as executor:
473
+ with ThreadPoolExecutor(
474
+ max_workers=config.max_poking_pool_worker,
475
+ thread_name_prefix="wf_poking_",
476
+ ) as executor:
463
477
  futures: list[Future] = []
464
478
  for on in self.on:
465
479
  futures.append(
@@ -694,7 +708,7 @@ class Workflow(BaseModel):
694
708
  raise WorkflowException(f"{err}")
695
709
  try:
696
710
  future.result(timeout=60)
697
- except TimeoutError as err:
711
+ except TimeoutError as err: # pragma: no cove
698
712
  raise WorkflowException(
699
713
  "Timeout when getting result from future"
700
714
  ) from err
@@ -795,7 +809,7 @@ class ScheduleWorkflow(BaseModel):
795
809
  )
796
810
 
797
811
  @model_validator(mode="before")
798
- def __prepare_values(cls, values: DictData) -> DictData:
812
+ def __prepare_before__(cls, values: DictData) -> DictData:
799
813
  """Prepare incoming values before validating with model fields.
800
814
 
801
815
  :rtype: DictData
@@ -933,9 +947,11 @@ class Schedule(BaseModel):
933
947
  return workflow_tasks
934
948
 
935
949
 
936
- def catch_exceptions(
937
- cancel_on_failure: bool = False,
938
- ) -> Callable[P, Optional[CancelJob]]:
950
+ ReturnCancelJob = Callable[P, Optional[CancelJob]]
951
+ DecoratorCancelJob = Callable[[ReturnCancelJob], ReturnCancelJob]
952
+
953
+
954
+ def catch_exceptions(cancel_on_failure: bool = False) -> DecoratorCancelJob:
939
955
  """Catch exception error from scheduler job that running with schedule
940
956
  package and return CancelJob if this function raise an error.
941
957
 
@@ -944,9 +960,7 @@ def catch_exceptions(
944
960
  :rtype: Callable[P, Optional[CancelJob]]
945
961
  """
946
962
 
947
- def decorator(
948
- func: Callable[P, Optional[CancelJob]],
949
- ) -> Callable[P, Optional[CancelJob]]:
963
+ def decorator(func: ReturnCancelJob) -> ReturnCancelJob:
950
964
  try:
951
965
  # NOTE: Check the function that want to handle is method or not.
952
966
  if inspect.ismethod(func):
@@ -981,8 +995,8 @@ class WorkflowTaskData:
981
995
  workflow: Workflow
982
996
  on: On
983
997
  params: DictData = field(compare=False, hash=False)
984
- queue: list[datetime] = field(compare=False, hash=False)
985
- running: list[datetime] = field(compare=False, hash=False)
998
+ queue: dict[str, list[datetime]] = field(compare=False, hash=False)
999
+ running: dict[str, list[datetime]] = field(compare=False, hash=False)
986
1000
 
987
1001
  @catch_exceptions(cancel_on_failure=True)
988
1002
  def release(
@@ -1062,8 +1076,9 @@ class WorkflowTaskData:
1062
1076
  },
1063
1077
  }
1064
1078
 
1065
- # WARNING: Re-create workflow object that use new running workflow
1066
- # ID.
1079
+ # WARNING:
1080
+ # Re-create workflow object that use new running workflow ID.
1081
+ #
1067
1082
  runner: Workflow = wf.get_running_id(run_id=wf.new_run_id)
1068
1083
  rs: Result = runner.execute(
1069
1084
  params=param2template(self.params, release_params),
@@ -1116,6 +1131,7 @@ class WorkflowTaskData:
1116
1131
  self.workflow.name == other.workflow.name
1117
1132
  and self.on.cronjob == other.on.cronjob
1118
1133
  )
1134
+ return NotImplemented
1119
1135
 
1120
1136
 
1121
1137
  @catch_exceptions(cancel_on_failure=True)
@@ -1127,10 +1143,10 @@ def workflow_task(
1127
1143
  """Workflow task generator that create release pair of workflow and on to
1128
1144
  the threading in background.
1129
1145
 
1130
- This workflow task will start every minute at :02 second.
1146
+ This workflow task will start every minute at ':02' second.
1131
1147
 
1132
1148
  :param workflow_tasks:
1133
- :param stop:
1149
+ :param stop: A stop datetime object that force stop running scheduler.
1134
1150
  :param threads:
1135
1151
  :rtype: CancelJob | None
1136
1152
  """
@@ -1145,7 +1161,7 @@ def workflow_task(
1145
1161
  "running in background."
1146
1162
  )
1147
1163
  time.sleep(15)
1148
- workflow_long_running_task(threads)
1164
+ workflow_monitor(threads)
1149
1165
  return CancelJob
1150
1166
 
1151
1167
  # IMPORTANT:
@@ -1217,7 +1233,7 @@ def workflow_task(
1217
1233
  logger.debug(f"[WORKFLOW]: {'=' * 100}")
1218
1234
 
1219
1235
 
1220
- def workflow_long_running_task(threads: dict[str, Thread]) -> None:
1236
+ def workflow_monitor(threads: dict[str, Thread]) -> None:
1221
1237
  """Workflow schedule for monitoring long running thread from the schedule
1222
1238
  control.
1223
1239
 
@@ -1275,30 +1291,29 @@ def workflow_control(
1275
1291
  sch: Schedule = Schedule.from_loader(name, externals=externals)
1276
1292
  workflow_tasks.extend(
1277
1293
  sch.tasks(
1278
- start_date_waiting, wf_queue, wf_running, externals=externals
1294
+ start_date_waiting,
1295
+ queue=wf_queue,
1296
+ running=wf_running,
1297
+ externals=externals,
1279
1298
  ),
1280
1299
  )
1281
1300
 
1282
1301
  # NOTE: This schedule job will start every minute at :02 seconds.
1283
- schedule.every(1).minutes.at(":02").do(
1284
- workflow_task,
1285
- workflow_tasks=workflow_tasks,
1286
- stop=stop
1287
- or (
1288
- start_date
1289
- + timedelta(
1290
- **json.loads(
1291
- os.getenv("WORKFLOW_APP_STOP_BOUNDARY_DELTA")
1292
- or '{"minutes": 5, "seconds": 20}'
1293
- )
1294
- )
1295
- ),
1296
- threads=thread_releases,
1297
- ).tag("control")
1302
+ (
1303
+ schedule.every(1)
1304
+ .minutes.at(":02")
1305
+ .do(
1306
+ workflow_task,
1307
+ workflow_tasks=workflow_tasks,
1308
+ stop=(stop or (start_date + config.stop_boundary_delta)),
1309
+ threads=thread_releases,
1310
+ )
1311
+ .tag("control")
1312
+ )
1298
1313
 
1299
1314
  # NOTE: Checking zombie task with schedule job will start every 5 minute.
1300
1315
  schedule.every(5).minutes.at(":10").do(
1301
- workflow_long_running_task,
1316
+ workflow_monitor,
1302
1317
  threads=thread_releases,
1303
1318
  ).tag("monitor")
1304
1319
 
@@ -1332,14 +1347,16 @@ def workflow_runner(
1332
1347
  """Workflow application that running multiprocessing schedule with chunk of
1333
1348
  workflows that exists in config path.
1334
1349
 
1335
- :param stop:
1350
+ :param stop: A stop datetime object that force stop running scheduler.
1336
1351
  :param excluded:
1337
1352
  :param externals:
1353
+
1338
1354
  :rtype: list[str]
1339
1355
 
1340
1356
  This function will get all workflows that include on value that was
1341
- created in config path and chuck it with WORKFLOW_APP_SCHEDULE_PER_PROCESS
1342
- value to multiprocess executor pool.
1357
+ created in config path and chuck it with application config variable
1358
+ ``WORKFLOW_APP_MAX_SCHEDULE_PER_PROCESS`` env var to multiprocess executor
1359
+ pool.
1343
1360
 
1344
1361
  The current workflow logic that split to process will be below diagram:
1345
1362
 
@@ -1356,7 +1373,7 @@ def workflow_runner(
1356
1373
  excluded: list[str] = excluded or []
1357
1374
 
1358
1375
  with ProcessPoolExecutor(
1359
- max_workers=int(os.getenv("WORKFLOW_APP_PROCESS_WORKER") or "2"),
1376
+ max_workers=config.max_schedule_process,
1360
1377
  ) as executor:
1361
1378
  futures: list[Future] = [
1362
1379
  executor.submit(
@@ -1367,7 +1384,7 @@ def workflow_runner(
1367
1384
  )
1368
1385
  for loader in batch(
1369
1386
  Loader.finds(Schedule, excluded=excluded),
1370
- n=int(os.getenv("WORKFLOW_APP_SCHEDULE_PER_PROCESS") or "100"),
1387
+ n=config.max_schedule_per_process,
1371
1388
  )
1372
1389
  ]
1373
1390
 
ddeutil/workflow/stage.py CHANGED
@@ -3,8 +3,8 @@
3
3
  # Licensed under the MIT License. See LICENSE in the project root for
4
4
  # license information.
5
5
  # ------------------------------------------------------------------------------
6
- """Stage Model that use for getting stage data template from Job Model.
7
- The stage that handle the minimize task that run in some thread (same thread at
6
+ """Stage Model that use for getting stage data template from the Job Model.
7
+ The stage handle the minimize task that run in some thread (same thread at
8
8
  its job owner) that mean it is the lowest executor of a workflow workflow that
9
9
  can tracking logs.
10
10
 
@@ -12,11 +12,13 @@ can tracking logs.
12
12
  handle stage error on this stage model. I think stage model should have a lot of
13
13
  usecase and it does not worry when I want to create a new one.
14
14
 
15
- Execution --> Ok --> Result with 0
16
- --> Error --> Raise StageException
15
+ Execution --> Ok --> Result with 0
16
+ --> Error --> Result with 1 (if env var was set)
17
+ --> Raise StageException
17
18
 
18
- On the context I/O that pass to stage object at execute process. The execute
19
- method receive `{"params": {...}}` for mapping to template.
19
+ On the context I/O that pass to a stage object at execute process. The
20
+ execute method receives a `params={"params": {...}}` value for mapping to
21
+ template searching.
20
22
  """
21
23
  from __future__ import annotations
22
24
 
@@ -46,9 +48,8 @@ from pydantic.functional_validators import model_validator
46
48
  from typing_extensions import Self
47
49
 
48
50
  from .__types import DictData, DictStr, Re, TupleStr
49
- from .conf import config
51
+ from .conf import config, get_logger
50
52
  from .exceptions import StageException
51
- from .log import get_logger
52
53
  from .utils import (
53
54
  Registry,
54
55
  Result,
@@ -88,20 +89,28 @@ def handler_result(message: str | None = None) -> DecoratorResult:
88
89
  you force catching an output result with error message by specific
89
90
  environment variable,`WORKFLOW_CORE_STAGE_RAISE_ERROR`.
90
91
 
91
- Execution --> Ok --> Result with 0
92
+ Execution --> Ok --> Result
93
+ status: 0
94
+ context:
95
+ outputs: ...
96
+ --> Error --> Result (if env var was set)
97
+ status: 1
98
+ context:
99
+ error: ...
100
+ error_message: ...
92
101
  --> Error --> Raise StageException
93
- --> Result with 1 (if env var was set)
94
102
 
95
103
  On the last step, it will set the running ID on a return result object
96
104
  from current stage ID before release the final result.
97
105
 
98
106
  :param message: A message that want to add at prefix of exception statement.
107
+ :type message: str | None (Default=None)
99
108
  :rtype: Callable[P, Result]
100
109
  """
101
110
  # NOTE: The prefix message string that want to add on the first exception
102
111
  # message dialog.
103
112
  #
104
- # ... ValueError: {message}
113
+ # >>> ValueError: {message}
105
114
  # ... raise value error from the stage execution process.
106
115
  #
107
116
  message: str = message or ""
@@ -118,6 +127,7 @@ def handler_result(message: str | None = None) -> DecoratorResult:
118
127
  logger.error(
119
128
  f"({self.run_id}) [STAGE]: {err.__class__.__name__}: {err}"
120
129
  )
130
+ print("Stage Raise error:", config.stage_raise_error)
121
131
  if config.stage_raise_error:
122
132
  # NOTE: If error that raise from stage execution course by
123
133
  # itself, it will return that error with previous
@@ -175,11 +185,14 @@ class BaseStage(BaseModel, ABC):
175
185
  )
176
186
 
177
187
  @model_validator(mode="after")
178
- def __prepare_running_id(self) -> Self:
188
+ def __prepare_running_id__(self) -> Self:
179
189
  """Prepare stage running ID that use default value of field and this
180
190
  method will validate name and id fields should not contain any template
181
191
  parameter (exclude matrix template).
182
192
 
193
+ :raise ValueError: When the ID and name fields include matrix parameter
194
+ template with the 'matrix.' string value.
195
+
183
196
  :rtype: Self
184
197
  """
185
198
  if self.run_id is None:
@@ -189,7 +202,7 @@ class BaseStage(BaseModel, ABC):
189
202
  # template. (allow only matrix)
190
203
  if not_in_template(self.id) or not_in_template(self.name):
191
204
  raise ValueError(
192
- "Stage name and ID should only template with matrix."
205
+ "Stage name and ID should only template with 'matrix.'"
193
206
  )
194
207
 
195
208
  return self
@@ -226,16 +239,16 @@ class BaseStage(BaseModel, ABC):
226
239
  The result of the `to` variable will be;
227
240
 
228
241
  ... (iii) to: {
229
- 'stages': {
230
- '<stage-id>': {'outputs': {'foo': 'bar'}}
231
- }
242
+ 'stages': {
243
+ '<stage-id>': {'outputs': {'foo': 'bar'}}
232
244
  }
245
+ }
233
246
 
234
247
  :param output: A output data that want to extract to an output key.
235
248
  :param to: A context data that want to add output result.
236
249
  :rtype: DictData
237
250
  """
238
- if not (self.id or config.stage_default_id):
251
+ if self.id is None and not config.stage_default_id:
239
252
  logger.debug(
240
253
  f"({self.run_id}) [STAGE]: Output does not set because this "
241
254
  f"stage does not set ID or default stage ID config flag not be "
@@ -255,7 +268,7 @@ class BaseStage(BaseModel, ABC):
255
268
  )
256
269
 
257
270
  # NOTE: Set the output to that stage generated ID with ``outputs`` key.
258
- logger.debug(f"({self.run_id}) [STAGE]: Set outputs on: {_id}")
271
+ logger.debug(f"({self.run_id}) [STAGE]: Set outputs to {_id!r}")
259
272
  to["stages"][_id] = {"outputs": output}
260
273
  return to
261
274
 
@@ -263,6 +276,11 @@ class BaseStage(BaseModel, ABC):
263
276
  """Return true if condition of this stage do not correct. This process
264
277
  use build-in eval function to execute the if-condition.
265
278
 
279
+ :raise StageException: When it has any error raise from the eval
280
+ condition statement.
281
+ :raise StageException: When return type of the eval condition statement
282
+ does not return with boolean type.
283
+
266
284
  :param params: A parameters that want to pass to condition template.
267
285
  :rtype: bool
268
286
  """
@@ -299,6 +317,7 @@ class EmptyStage(BaseStage):
299
317
  sleep: float = Field(
300
318
  default=0,
301
319
  description="A second value to sleep before finish execution",
320
+ ge=0,
302
321
  )
303
322
 
304
323
  def execute(self, params: DictData) -> Result:
@@ -351,7 +370,7 @@ class BashStage(BaseStage):
351
370
  )
352
371
 
353
372
  @contextlib.contextmanager
354
- def __prepare_bash(self, bash: str, env: DictStr) -> Iterator[TupleStr]:
373
+ def prepare_bash(self, bash: str, env: DictStr) -> Iterator[TupleStr]:
355
374
  """Return context of prepared bash statement that want to execute. This
356
375
  step will write the `.sh` file before giving this file name to context.
357
376
  After that, it will auto delete this file automatic.
@@ -394,15 +413,12 @@ class BashStage(BaseStage):
394
413
  :rtype: Result
395
414
  """
396
415
  bash: str = param2template(dedent(self.bash), params)
397
- with self.__prepare_bash(
416
+ with self.prepare_bash(
398
417
  bash=bash, env=param2template(self.env, params)
399
418
  ) as sh:
400
419
  logger.info(f"({self.run_id}) [STAGE]: Shell-Execute: {sh}")
401
420
  rs: CompletedProcess = subprocess.run(
402
- sh,
403
- shell=False,
404
- capture_output=True,
405
- text=True,
421
+ sh, shell=False, capture_output=True, text=True
406
422
  )
407
423
  if rs.returncode > 0:
408
424
  # NOTE: Prepare stderr message that returning from subprocess.
@@ -419,8 +435,8 @@ class BashStage(BaseStage):
419
435
  status=0,
420
436
  context={
421
437
  "return_code": rs.returncode,
422
- "stdout": rs.stdout.rstrip("\n"),
423
- "stderr": rs.stderr.rstrip("\n"),
438
+ "stdout": rs.stdout.rstrip("\n") or None,
439
+ "stderr": rs.stderr.rstrip("\n") or None,
424
440
  },
425
441
  )
426
442
 
@@ -452,6 +468,15 @@ class PyStage(BaseStage):
452
468
  ),
453
469
  )
454
470
 
471
+ @staticmethod
472
+ def pick_keys_from_locals(values: DictData) -> Iterator[str]:
473
+ from inspect import ismodule
474
+
475
+ for value in values:
476
+ if value == "__annotations__" or ismodule(values[value]):
477
+ continue
478
+ yield value
479
+
455
480
  def set_outputs(self, output: DictData, to: DictData) -> DictData:
456
481
  """Override set an outputs method for the Python execution process that
457
482
  extract output from all the locals values.
@@ -461,15 +486,19 @@ class PyStage(BaseStage):
461
486
  :rtype: DictData
462
487
  """
463
488
  # NOTE: The output will fileter unnecessary keys from locals.
464
- _locals: DictData = output["locals"]
489
+ lc: DictData = output.get("locals", {})
465
490
  super().set_outputs(
466
- {k: _locals[k] for k in _locals if k != "__annotations__"}, to=to
491
+ (
492
+ {k: lc[k] for k in self.pick_keys_from_locals(lc)}
493
+ | {k: output[k] for k in output if k.startswith("error")}
494
+ ),
495
+ to=to,
467
496
  )
468
497
 
469
- # NOTE:
470
- # Override value that changing from the globals that pass via exec.
471
- _globals: DictData = output["globals"]
472
- to.update({k: _globals[k] for k in to if k in _globals})
498
+ # NOTE: Override value that changing from the globals that pass via the
499
+ # exec function.
500
+ gb: DictData = output.get("globals", {})
501
+ to.update({k: gb[k] for k in to if k in gb})
473
502
  return to
474
503
 
475
504
  @handler_result()
@@ -487,15 +516,15 @@ class PyStage(BaseStage):
487
516
  _globals: DictData = (
488
517
  globals() | params | param2template(self.vars, params)
489
518
  )
490
- _locals: DictData = {}
519
+ lc: DictData = {}
491
520
 
492
521
  # NOTE: Start exec the run statement.
493
522
  logger.info(f"({self.run_id}) [STAGE]: Py-Execute: {self.name}")
494
- exec(run, _globals, _locals)
523
+ exec(run, _globals, lc)
495
524
 
496
525
  return Result(
497
526
  status=0,
498
- context={"locals": _locals, "globals": _globals},
527
+ context={"locals": lc, "globals": _globals},
499
528
  )
500
529
 
501
530
 
@@ -514,6 +543,11 @@ def extract_hook(hook: str) -> Callable[[], TagFunc]:
514
543
  """Extract Hook function from string value to hook partial function that
515
544
  does run it at runtime.
516
545
 
546
+ :raise NotImplementedError: When the searching hook's function result does
547
+ not exist in the registry.
548
+ :raise NotImplementedError: When the searching hook's tag result does not
549
+ exists in the registry with its function key.
550
+
517
551
  :param hook: A hook value that able to match with Task regex.
518
552
  :rtype: Callable[[], TagFunc]
519
553
  """
@@ -554,14 +588,14 @@ class HookStage(BaseStage):
554
588
  >>> stage = {
555
589
  ... "name": "Task stage execution",
556
590
  ... "uses": "tasks/function-name@tag-name",
557
- ... "args": {
558
- ... "FOO": "BAR",
559
- ... },
591
+ ... "args": {"FOO": "BAR"},
560
592
  ... }
561
593
  """
562
594
 
563
595
  uses: str = Field(
564
- description="A pointer that want to load function from registry.",
596
+ description=(
597
+ "A pointer that want to load function from the hook registry."
598
+ ),
565
599
  )
566
600
  args: DictData = Field(
567
601
  default_factory=dict,
@@ -573,6 +607,11 @@ class HookStage(BaseStage):
573
607
  def execute(self, params: DictData) -> Result:
574
608
  """Execute the Hook function that already in the hook registry.
575
609
 
610
+ :raise ValueError: When the necessary arguments of hook function do not
611
+ set from the input params argument.
612
+ :raise TypeError: When the return type of hook function does not be
613
+ dict type.
614
+
576
615
  :param params: A parameter that want to pass before run any statement.
577
616
  :type params: DictData
578
617
  :rtype: Result
@@ -622,10 +661,7 @@ class TriggerStage(BaseStage):
622
661
  >>> stage = {
623
662
  ... "name": "Trigger workflow stage execution",
624
663
  ... "trigger": 'workflow-name-for-loader',
625
- ... "params": {
626
- ... "run-date": "2024-08-01",
627
- ... "source": "src",
628
- ... },
664
+ ... "params": {"run-date": "2024-08-01", "source": "src"},
629
665
  ... }
630
666
  """
631
667