ddeutil-workflow 0.0.71__tar.gz → 0.0.73__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. {ddeutil_workflow-0.0.71/src/ddeutil_workflow.egg-info → ddeutil_workflow-0.0.73}/PKG-INFO +4 -4
  2. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/pyproject.toml +4 -4
  3. ddeutil_workflow-0.0.73/src/ddeutil/workflow/__about__.py +1 -0
  4. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/cli.py +19 -3
  5. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/conf.py +31 -14
  6. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/event.py +1 -1
  7. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/stages.py +35 -9
  8. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/traces.py +20 -9
  9. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/utils.py +13 -1
  10. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/workflow.py +1 -1
  11. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73/src/ddeutil_workflow.egg-info}/PKG-INFO +4 -4
  12. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil_workflow.egg-info/SOURCES.txt +1 -0
  13. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil_workflow.egg-info/requires.txt +3 -3
  14. ddeutil_workflow-0.0.73/tests/test_cli.py +15 -0
  15. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_conf.py +58 -27
  16. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_traces.py +11 -6
  17. ddeutil_workflow-0.0.71/src/ddeutil/workflow/__about__.py +0 -1
  18. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/LICENSE +0 -0
  19. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/README.md +0 -0
  20. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/setup.cfg +0 -0
  21. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/__cron.py +0 -0
  22. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/__init__.py +0 -0
  23. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/__main__.py +0 -0
  24. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/__types.py +0 -0
  25. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/api/__init__.py +0 -0
  26. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/api/log_conf.py +0 -0
  27. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/api/routes/__init__.py +0 -0
  28. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/api/routes/job.py +0 -0
  29. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/api/routes/logs.py +0 -0
  30. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/api/routes/workflows.py +0 -0
  31. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/audits.py +0 -0
  32. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/errors.py +0 -0
  33. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/job.py +0 -0
  34. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/params.py +0 -0
  35. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/result.py +0 -0
  36. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil/workflow/reusables.py +0 -0
  37. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil_workflow.egg-info/dependency_links.txt +0 -0
  38. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil_workflow.egg-info/entry_points.txt +0 -0
  39. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/src/ddeutil_workflow.egg-info/top_level.txt +0 -0
  40. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test__cron.py +0 -0
  41. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test__regex.py +0 -0
  42. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_audits.py +0 -0
  43. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_errors.py +0 -0
  44. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_event.py +0 -0
  45. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_job.py +0 -0
  46. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_job_exec.py +0 -0
  47. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_job_exec_strategy.py +0 -0
  48. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_params.py +0 -0
  49. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_result.py +0 -0
  50. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_reusables_call_tag.py +0 -0
  51. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_reusables_func_model.py +0 -0
  52. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_reusables_template.py +0 -0
  53. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_reusables_template_filter.py +0 -0
  54. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_strategy.py +0 -0
  55. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_utils.py +0 -0
  56. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_workflow.py +0 -0
  57. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_workflow_exec.py +0 -0
  58. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_workflow_exec_job.py +0 -0
  59. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_workflow_release.py +0 -0
  60. {ddeutil_workflow-0.0.71 → ddeutil_workflow-0.0.73}/tests/test_workflow_rerun.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ddeutil-workflow
3
- Version: 0.0.71
3
+ Version: 0.0.73
4
4
  Summary: Lightweight workflow orchestration with YAML template
5
5
  Author-email: ddeutils <korawich.anu@gmail.com>
6
6
  License: MIT
@@ -24,9 +24,9 @@ Description-Content-Type: text/markdown
24
24
  License-File: LICENSE
25
25
  Requires-Dist: ddeutil[checksum]>=0.4.8
26
26
  Requires-Dist: ddeutil-io[toml,yaml]>=0.2.14
27
- Requires-Dist: pydantic==2.11.5
28
- Requires-Dist: pydantic-extra-types==2.10.4
29
- Requires-Dist: python-dotenv==1.1.0
27
+ Requires-Dist: pydantic<3.0.0,==2.11.5
28
+ Requires-Dist: pydantic-extra-types<3.0.0,>=2.10.4
29
+ Requires-Dist: python-dotenv>=1.1.0
30
30
  Requires-Dist: typer>=0.16.0
31
31
  Provides-Extra: all
32
32
  Requires-Dist: fastapi<1.0.0,>=0.115.0; extra == "all"
@@ -27,9 +27,9 @@ requires-python = ">=3.9.13"
27
27
  dependencies = [
28
28
  "ddeutil[checksum]>=0.4.8",
29
29
  "ddeutil-io[yaml,toml]>=0.2.14",
30
- "pydantic==2.11.5",
31
- "pydantic-extra-types==2.10.4",
32
- "python-dotenv==1.1.0",
30
+ "pydantic==2.11.5,<3.0.0",
31
+ "pydantic-extra-types>=2.10.4,<3.0.0",
32
+ "python-dotenv>=1.1.0",
33
33
  "typer>=0.16.0",
34
34
  ]
35
35
  dynamic = ["version"]
@@ -116,7 +116,7 @@ filterwarnings = [
116
116
  ]
117
117
  log_cli = true
118
118
  log_cli_level = "DEBUG"
119
- log_cli_format = "%(asctime)s [%(levelname)-7s] %(message)-120s (%(filename)s:%(lineno)s)"
119
+ log_cli_format = "%(asctime)s [%(levelname)-7s] (%(cut_id)s) %(message)-120s (%(filename)s:%(lineno)s)"
120
120
  log_cli_date_format = "%Y%m%d %H:%M:%S"
121
121
 
122
122
  [tool.black]
@@ -0,0 +1 @@
1
+ __version__: str = "0.0.73"
@@ -1,3 +1,10 @@
1
+ # ------------------------------------------------------------------------------
2
+ # Copyright (c) 2022 Korawich Anuttra. All rights reserved.
3
+ # Licensed under the MIT License. See LICENSE in the project root for
4
+ # license information.
5
+ # ------------------------------------------------------------------------------
6
+ from __future__ import annotations
7
+
1
8
  import json
2
9
  from pathlib import Path
3
10
  from platform import python_version
@@ -8,7 +15,6 @@ from pydantic import Field, TypeAdapter
8
15
 
9
16
  from .__about__ import __version__
10
17
  from .__types import DictData
11
- from .api import app as fastapp
12
18
  from .errors import JobError
13
19
  from .event import Crontab
14
20
  from .job import Job
@@ -46,7 +52,7 @@ def execute_job(
46
52
  """Job execution on the local.
47
53
 
48
54
  Example:
49
- ... workflow-cli job --params "{\"test\": 1}"
55
+ ... workflow-cli job --params \"{\\\"test\\\": 1}\"
50
56
  """
51
57
  try:
52
58
  params_dict: dict[str, Any] = json.loads(params)
@@ -92,6 +98,7 @@ def api(
92
98
  """
93
99
  import uvicorn
94
100
 
101
+ from .api import app as fastapp
95
102
  from .api.log_conf import LOGGING_CONFIG
96
103
 
97
104
  # LOGGING_CONFIG = {}
@@ -121,7 +128,7 @@ def make(
121
128
 
122
129
 
123
130
  workflow_app = typer.Typer()
124
- app.add_typer(workflow_app, name="workflow", help="An Only Workflow CLI.")
131
+ app.add_typer(workflow_app, name="workflows", help="An Only Workflow CLI.")
125
132
 
126
133
 
127
134
  @workflow_app.callback()
@@ -180,5 +187,14 @@ def workflow_json_schema(
180
187
  json.dump(template_schema | json_schema, f, indent=2)
181
188
 
182
189
 
190
+ log_app = typer.Typer()
191
+ app.add_typer(log_app, name="logs", help="An Only Log CLI.")
192
+
193
+
194
+ @log_app.callback()
195
+ def log_callback():
196
+ """Manage Only Log CLI."""
197
+
198
+
183
199
  if __name__ == "__main__":
184
200
  app()
@@ -9,7 +9,6 @@ import copy
9
9
  import os
10
10
  from collections.abc import Iterator
11
11
  from functools import cached_property
12
- from inspect import isclass
13
12
  from pathlib import Path
14
13
  from typing import Final, Optional, TypeVar, Union
15
14
  from zoneinfo import ZoneInfo
@@ -20,6 +19,7 @@ from ddeutil.io.paths import glob_files, is_ignored, read_ignore
20
19
  from pydantic import SecretStr
21
20
 
22
21
  from .__types import DictData
22
+ from .utils import obj_name
23
23
 
24
24
  T = TypeVar("T")
25
25
  PREFIX: Final[str] = "WORKFLOW"
@@ -109,7 +109,7 @@ class Config: # pragma: no cov
109
109
  "LOG_FORMAT",
110
110
  (
111
111
  "%(asctime)s.%(msecs)03d (%(process)-5d, "
112
- "%(thread)-5d) [%(levelname)-7s] %(message)-120s "
112
+ "%(thread)-5d) [%(levelname)-7s] (%(cut_id)s) %(message)-120s "
113
113
  "(%(filename)s:%(lineno)s) (%(name)-10s)"
114
114
  ),
115
115
  )
@@ -119,8 +119,8 @@ class Config: # pragma: no cov
119
119
  return env(
120
120
  "LOG_FORMAT_FILE",
121
121
  (
122
- "{datetime} ({process:5d}, {thread:5d}) {message:120s} "
123
- "({filename}:{lineno})"
122
+ "{datetime} ({process:5d}, {thread:5d}) ({cut_id}) "
123
+ "{message:120s} ({filename}:{lineno})"
124
124
  ),
125
125
  )
126
126
 
@@ -192,6 +192,7 @@ class YamlParser:
192
192
  path: Optional[Union[str, Path]] = None,
193
193
  externals: DictData | None = None,
194
194
  extras: DictData | None = None,
195
+ obj: Optional[Union[object, str]] = None,
195
196
  ) -> None:
196
197
  self.path: Path = Path(dynamic("conf_path", f=path, extras=extras))
197
198
  self.externals: DictData = externals or {}
@@ -201,6 +202,7 @@ class YamlParser:
201
202
  path=path,
202
203
  paths=self.extras.get("conf_paths"),
203
204
  extras=extras,
205
+ obj=obj,
204
206
  )
205
207
 
206
208
  # VALIDATE: check the data that reading should not empty.
@@ -218,7 +220,9 @@ class YamlParser:
218
220
  *,
219
221
  path: Optional[Path] = None,
220
222
  paths: Optional[list[Path]] = None,
223
+ obj: Optional[Union[object, str]] = None,
221
224
  extras: Optional[DictData] = None,
225
+ ignore_filename: Optional[str] = None,
222
226
  ) -> DictData:
223
227
  """Find data with specific key and return the latest modify date data if
224
228
  this key exists multiple files.
@@ -226,8 +230,12 @@ class YamlParser:
226
230
  :param name: (str) A name of data that want to find.
227
231
  :param path: (Path) A config path object.
228
232
  :param paths: (list[Path]) A list of config path object.
233
+ :param obj: (object | str) An object that want to validate matching
234
+ before return.
229
235
  :param extras: (DictData) An extra parameter that use to override core
230
236
  config values.
237
+ :param ignore_filename: (str) An ignore filename. Default is
238
+ ``.confignore`` filename.
231
239
 
232
240
  :rtype: DictData
233
241
  """
@@ -243,39 +251,49 @@ class YamlParser:
243
251
  paths.append(path)
244
252
 
245
253
  all_data: list[tuple[float, DictData]] = []
254
+ obj_type: Optional[str] = obj_name(obj)
255
+
246
256
  for path in paths:
247
257
  for file in glob_files(path):
248
258
 
249
- if cls.is_ignore(file, path):
259
+ if cls.is_ignore(file, path, ignore_filename=ignore_filename):
250
260
  continue
251
261
 
252
262
  if data := cls.filter_yaml(file, name=name):
253
- all_data.append((file.lstat().st_mtime, data))
263
+ if not obj_type:
264
+ all_data.append((file.lstat().st_mtime, data))
265
+ elif (t := data.get("type")) and t == obj_type:
266
+ all_data.append((file.lstat().st_mtime, data))
267
+ else:
268
+ continue
254
269
 
255
270
  return {} if not all_data else max(all_data, key=lambda x: x[0])[1]
256
271
 
257
272
  @classmethod
258
273
  def finds(
259
274
  cls,
260
- obj: object,
275
+ obj: Union[object, str],
261
276
  *,
262
277
  path: Optional[Path] = None,
263
278
  paths: Optional[list[Path]] = None,
264
279
  excluded: Optional[list[str]] = None,
265
280
  extras: Optional[DictData] = None,
281
+ ignore_filename: Optional[str] = None,
266
282
  ) -> Iterator[tuple[str, DictData]]:
267
283
  """Find all data that match with object type in config path. This class
268
284
  method can use include and exclude list of identity name for filter and
269
285
  adds-on.
270
286
 
271
- :param obj: (object) An object that want to validate matching before
272
- return.
287
+ :param obj: (object | str) An object that want to validate matching
288
+ before return.
273
289
  :param path: (Path) A config path object.
274
290
  :param paths: (list[Path]) A list of config path object.
275
291
  :param excluded: An included list of data key that want to filter from
276
292
  data.
277
293
  :param extras: (DictData) An extra parameter that use to override core
278
294
  config values.
295
+ :param ignore_filename: (str) An ignore filename. Default is
296
+ ``.confignore`` filename.
279
297
 
280
298
  :rtype: Iterator[tuple[str, DictData]]
281
299
  """
@@ -292,10 +310,12 @@ class YamlParser:
292
310
  paths.append(path)
293
311
 
294
312
  all_data: dict[str, list[tuple[float, DictData]]] = {}
313
+ obj_type: str = obj_name(obj)
314
+
295
315
  for path in paths:
296
316
  for file in glob_files(path):
297
317
 
298
- if cls.is_ignore(file, path):
318
+ if cls.is_ignore(file, path, ignore_filename=ignore_filename):
299
319
  continue
300
320
 
301
321
  for key, data in cls.filter_yaml(file).items():
@@ -303,10 +323,7 @@ class YamlParser:
303
323
  if key in excluded:
304
324
  continue
305
325
 
306
- if (
307
- data.get("type", "")
308
- == (obj if isclass(obj) else obj.__class__).__name__
309
- ):
326
+ if (t := data.get("type")) and t == obj_type:
310
327
  marking: tuple[float, DictData] = (
311
328
  file.lstat().st_mtime,
312
329
  data,
@@ -139,7 +139,7 @@ class Crontab(BaseModel):
139
139
  :rtype: Self
140
140
  """
141
141
  extras: DictData = extras or {}
142
- loader: YamlParser = YamlParser(name, extras=extras)
142
+ loader: YamlParser = YamlParser(name, extras=extras, obj=cls)
143
143
 
144
144
  # NOTE: Validate the config type match with current connection model
145
145
  if loader.type != cls.__name__:
@@ -1311,6 +1311,17 @@ class CallStage(BaseRetryStage):
1311
1311
  alias="with",
1312
1312
  )
1313
1313
 
1314
+ @field_validator("args", mode="before")
1315
+ def __validate_args_key(cls, value: Any) -> Any:
1316
+ if isinstance(value, dict):
1317
+ if any(k in value for k in ("result", "extras")):
1318
+ raise ValueError(
1319
+ "The argument on workflow template for the caller stage "
1320
+ "should not pass `result` and `extras`. They are special "
1321
+ "arguments."
1322
+ )
1323
+ return value
1324
+
1314
1325
  def execute(
1315
1326
  self,
1316
1327
  params: DictData,
@@ -1348,9 +1359,10 @@ class CallStage(BaseRetryStage):
1348
1359
 
1349
1360
  # VALIDATE: check input task caller parameters that exists before
1350
1361
  # calling.
1351
- args: DictData = {"result": result} | param2template(
1352
- self.args, params, extras=self.extras
1353
- )
1362
+ args: DictData = {
1363
+ "result": result,
1364
+ "extras": self.extras,
1365
+ } | param2template(self.args, params, extras=self.extras)
1354
1366
  sig = inspect.signature(call_func)
1355
1367
  necessary_params: list[str] = []
1356
1368
  has_keyword: bool = False
@@ -1369,14 +1381,21 @@ class CallStage(BaseRetryStage):
1369
1381
  (k.removeprefix("_") not in args and k not in args)
1370
1382
  for k in necessary_params
1371
1383
  ):
1384
+ necessary_params.remove("result")
1385
+ necessary_params.remove("extras")
1386
+ args.pop("result")
1387
+ args.pop("extras")
1372
1388
  raise ValueError(
1373
1389
  f"Necessary params, ({', '.join(necessary_params)}, ), "
1374
- f"does not set to args, {list(args.keys())}."
1390
+ f"does not set to args. It already set {list(args.keys())}."
1375
1391
  )
1376
1392
 
1377
1393
  if "result" not in sig.parameters and not has_keyword:
1378
1394
  args.pop("result")
1379
1395
 
1396
+ if "extras" not in sig.parameters and not has_keyword:
1397
+ args.pop("extras")
1398
+
1380
1399
  if event and event.is_set():
1381
1400
  raise StageCancelError(
1382
1401
  "Execution was canceled from the event before start parallel."
@@ -1441,9 +1460,10 @@ class CallStage(BaseRetryStage):
1441
1460
 
1442
1461
  # VALIDATE: check input task caller parameters that exists before
1443
1462
  # calling.
1444
- args: DictData = {"result": result} | param2template(
1445
- self.args, params, extras=self.extras
1446
- )
1463
+ args: DictData = {
1464
+ "result": result,
1465
+ "extras": self.extras,
1466
+ } | param2template(self.args, params, extras=self.extras)
1447
1467
  sig = inspect.signature(call_func)
1448
1468
  necessary_params: list[str] = []
1449
1469
  has_keyword: bool = False
@@ -1462,14 +1482,20 @@ class CallStage(BaseRetryStage):
1462
1482
  (k.removeprefix("_") not in args and k not in args)
1463
1483
  for k in necessary_params
1464
1484
  ):
1485
+ necessary_params.remove("result")
1486
+ necessary_params.remove("extras")
1487
+ args.pop("result")
1488
+ args.pop("extras")
1465
1489
  raise ValueError(
1466
1490
  f"Necessary params, ({', '.join(necessary_params)}, ), "
1467
- f"does not set to args, {list(args.keys())}."
1491
+ f"does not set to args. It already set {list(args.keys())}."
1468
1492
  )
1469
-
1470
1493
  if "result" not in sig.parameters and not has_keyword:
1471
1494
  args.pop("result")
1472
1495
 
1496
+ if "extras" not in sig.parameters and not has_keyword:
1497
+ args.pop("extras")
1498
+
1473
1499
  args: DictData = self.validate_model_args(call_func, args, result)
1474
1500
  if inspect.iscoroutinefunction(call_func):
1475
1501
  rs: DictOrModel = await call_func(
@@ -76,7 +76,7 @@ PREFIX_LOGS_REGEX: re.Pattern[str] = re.compile(
76
76
  ) # pragma: no cov
77
77
 
78
78
 
79
- class PrefixMsg(BaseModel):
79
+ class Message(BaseModel):
80
80
  """Prefix Message model for receive grouping dict from searching prefix data
81
81
  from logging message.
82
82
  """
@@ -92,9 +92,9 @@ class PrefixMsg(BaseModel):
92
92
  msg (str): A message that want to extract.
93
93
 
94
94
  Returns:
95
- PrefixMsg: the validated model from a string message.
95
+ Message: the validated model from a string message.
96
96
  """
97
- return PrefixMsg.model_validate(
97
+ return Message.model_validate(
98
98
  obj=PREFIX_LOGS_REGEX.search(msg).groupdict()
99
99
  )
100
100
 
@@ -126,6 +126,9 @@ class TraceMeta(BaseModel): # pragma: no cov
126
126
  process: int = Field(description="A process ID.")
127
127
  thread: int = Field(description="A thread ID.")
128
128
  message: str = Field(description="A message log.")
129
+ cut_id: Optional[str] = Field(
130
+ default=None, description="A cutting of running ID."
131
+ )
129
132
  filename: str = Field(description="A filename of this log.")
130
133
  lineno: int = Field(description="A line number of this log.")
131
134
 
@@ -157,6 +160,7 @@ class TraceMeta(BaseModel): # pragma: no cov
157
160
  mode: Literal["stdout", "stderr"],
158
161
  message: str,
159
162
  level: str,
163
+ cutting_id: str,
160
164
  *,
161
165
  extras: Optional[DictData] = None,
162
166
  ) -> Self:
@@ -166,6 +170,7 @@ class TraceMeta(BaseModel): # pragma: no cov
166
170
  :param mode: (Literal["stdout", "stderr"]) A metadata mode.
167
171
  :param message: (str) A message.
168
172
  :param level: (str) A log level.
173
+ :param cutting_id: (str)
169
174
  :param extras: (DictData) An extra parameter that want to override core
170
175
  config values.
171
176
 
@@ -185,6 +190,7 @@ class TraceMeta(BaseModel): # pragma: no cov
185
190
  process=os.getpid(),
186
191
  thread=get_ident(),
187
192
  message=message,
193
+ cut_id=cutting_id,
188
194
  filename=frame_info.filename.split(os.path.sep)[-1],
189
195
  lineno=frame_info.lineno,
190
196
  )
@@ -529,10 +535,7 @@ class ConsoleTrace(BaseTrace): # pragma: no cov
529
535
 
530
536
  :rtype: str
531
537
  """
532
- return prepare_newline(
533
- f"({self.cut_id}) "
534
- f"{PrefixMsg.from_str(message).prepare(self.extras)}"
535
- )
538
+ return prepare_newline(Message.from_str(message).prepare(self.extras))
536
539
 
537
540
  def _logging(
538
541
  self, message: str, mode: str, *, is_err: bool = False
@@ -655,7 +658,11 @@ class FileTrace(ConsoleTrace): # pragma: no cov
655
658
 
656
659
  mode: Literal["stdout", "stderr"] = "stderr" if is_err else "stdout"
657
660
  trace_meta: TraceMeta = TraceMeta.make(
658
- mode=mode, level=level, message=message, extras=self.extras
661
+ mode=mode,
662
+ level=level,
663
+ message=message,
664
+ cutting_id=self.cut_id,
665
+ extras=self.extras,
659
666
  )
660
667
 
661
668
  with (self.pointer / f"{mode}.txt").open(
@@ -684,7 +691,11 @@ class FileTrace(ConsoleTrace): # pragma: no cov
684
691
 
685
692
  mode: Literal["stdout", "stderr"] = "stderr" if is_err else "stdout"
686
693
  trace_meta: TraceMeta = TraceMeta.make(
687
- mode=mode, level=level, message=message, extras=self.extras
694
+ mode=mode,
695
+ level=level,
696
+ message=message,
697
+ cutting_id=self.cut_id,
698
+ extras=self.extras,
688
699
  )
689
700
 
690
701
  async with aiofiles.open(
@@ -11,7 +11,7 @@ import time
11
11
  from collections.abc import Iterator
12
12
  from datetime import date, datetime, timedelta
13
13
  from hashlib import md5
14
- from inspect import isfunction
14
+ from inspect import isclass, isfunction
15
15
  from itertools import product
16
16
  from pathlib import Path
17
17
  from random import randrange
@@ -303,3 +303,15 @@ def dump_all(
303
303
  elif isinstance(value, BaseModel):
304
304
  return value.model_dump(by_alias=by_alias)
305
305
  return value
306
+
307
+
308
+ def obj_name(obj: Optional[Union[str, object]] = None) -> Optional[str]:
309
+ if not obj:
310
+ obj_type: Optional[str] = None
311
+ elif isinstance(obj, str):
312
+ obj_type: str = obj
313
+ elif isclass(obj):
314
+ obj_type: str = obj.__name__
315
+ else:
316
+ obj_type: str = obj.__class__.__name__
317
+ return obj_type
@@ -126,7 +126,7 @@ class Workflow(BaseModel):
126
126
 
127
127
  :rtype: Self
128
128
  """
129
- load: YamlParser = YamlParser(name, path=path, extras=extras)
129
+ load: YamlParser = YamlParser(name, path=path, extras=extras, obj=cls)
130
130
 
131
131
  # NOTE: Validate the config type match with current connection model
132
132
  if load.type != cls.__name__:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ddeutil-workflow
3
- Version: 0.0.71
3
+ Version: 0.0.73
4
4
  Summary: Lightweight workflow orchestration with YAML template
5
5
  Author-email: ddeutils <korawich.anu@gmail.com>
6
6
  License: MIT
@@ -24,9 +24,9 @@ Description-Content-Type: text/markdown
24
24
  License-File: LICENSE
25
25
  Requires-Dist: ddeutil[checksum]>=0.4.8
26
26
  Requires-Dist: ddeutil-io[toml,yaml]>=0.2.14
27
- Requires-Dist: pydantic==2.11.5
28
- Requires-Dist: pydantic-extra-types==2.10.4
29
- Requires-Dist: python-dotenv==1.1.0
27
+ Requires-Dist: pydantic<3.0.0,==2.11.5
28
+ Requires-Dist: pydantic-extra-types<3.0.0,>=2.10.4
29
+ Requires-Dist: python-dotenv>=1.1.0
30
30
  Requires-Dist: typer>=0.16.0
31
31
  Provides-Extra: all
32
32
  Requires-Dist: fastapi<1.0.0,>=0.115.0; extra == "all"
@@ -34,6 +34,7 @@ src/ddeutil_workflow.egg-info/top_level.txt
34
34
  tests/test__cron.py
35
35
  tests/test__regex.py
36
36
  tests/test_audits.py
37
+ tests/test_cli.py
37
38
  tests/test_conf.py
38
39
  tests/test_errors.py
39
40
  tests/test_event.py
@@ -1,8 +1,8 @@
1
1
  ddeutil[checksum]>=0.4.8
2
2
  ddeutil-io[toml,yaml]>=0.2.14
3
- pydantic==2.11.5
4
- pydantic-extra-types==2.10.4
5
- python-dotenv==1.1.0
3
+ pydantic<3.0.0,==2.11.5
4
+ pydantic-extra-types<3.0.0,>=2.10.4
5
+ python-dotenv>=1.1.0
6
6
  typer>=0.16.0
7
7
 
8
8
  [all]
@@ -0,0 +1,15 @@
1
+ import pytest
2
+ from ddeutil.workflow.cli import app
3
+ from typer.testing import CliRunner
4
+
5
+
6
+ @pytest.fixture(scope="module")
7
+ def runner() -> CliRunner:
8
+ return CliRunner()
9
+
10
+
11
+ def test_app(runner: CliRunner):
12
+ result = runner.invoke(app, ["version"])
13
+ assert result.exit_code == 0
14
+ assert "ddeutil-workflow==" in result.output
15
+ assert "python-version==" in result.output
@@ -76,13 +76,22 @@ def test_load_file(target_path: Path):
76
76
  "env": "Asia/Bangkok",
77
77
  }
78
78
 
79
+ load = YamlParser(
80
+ "test_load_file", extras={"conf_paths": [target_path]}, obj="Workflow"
81
+ )
82
+ assert load.data == {
83
+ "type": "Workflow",
84
+ "desc": "Test multi config path",
85
+ "env": "${WORKFLOW_CORE_TIMEZONE}",
86
+ }
87
+
79
88
  # NOTE: Raise because passing `conf_paths` invalid type.
80
89
  with pytest.raises(TypeError):
81
90
  YamlParser("test_load_file", extras={"conf_paths": target_path})
82
91
 
83
92
 
84
93
  def test_load_file_finds(target_path: Path):
85
- dummy_file: Path = target_path / "test_simple_file.yaml"
94
+ dummy_file: Path = target_path / "01_test_simple_file.yaml"
86
95
  with dummy_file.open(mode="w") as f:
87
96
  yaml.dump(
88
97
  {
@@ -90,7 +99,7 @@ def test_load_file_finds(target_path: Path):
90
99
  "type": "Config",
91
100
  "foo": "bar",
92
101
  },
93
- "test_load_file": {"type": "Workflow"},
102
+ "test_load_file": {"type": "Workflow", "data": "foo"},
94
103
  },
95
104
  f,
96
105
  )
@@ -102,6 +111,7 @@ def test_load_file_finds(target_path: Path):
102
111
  {"type": "Config", "foo": "bar"},
103
112
  )
104
113
  ] == list(YamlParser.finds(Config, path=config.conf_path))
114
+
105
115
  assert [] == list(
106
116
  YamlParser.finds(
107
117
  Config,
@@ -110,6 +120,48 @@ def test_load_file_finds(target_path: Path):
110
120
  )
111
121
  )
112
122
 
123
+ # NOTE: Create duplicate data with the first order by filename.
124
+ dummy_file_dup: Path = target_path / "00_test_simple_file_duplicate.yaml"
125
+ with dummy_file_dup.open(mode="w") as f:
126
+ yaml.dump(
127
+ {"test_load_file": {"type": "Workflow", "data": "bar"}},
128
+ f,
129
+ )
130
+
131
+ assert [
132
+ (
133
+ "test_load_file",
134
+ {"type": "Workflow", "data": "bar"},
135
+ ),
136
+ ] == list(YamlParser.finds("Workflow", path=target_path))
137
+
138
+ dummy_file_dup.unlink()
139
+
140
+ # NOTE: Create duplicate data with the first order by filename.
141
+ dummy_file_dup: Path = target_path / "00_test_simple_file_duplicate.yaml"
142
+ with dummy_file_dup.open(mode="w") as f:
143
+ yaml.dump(
144
+ {"test_load_file": {"type": "Config", "data": "bar"}},
145
+ f,
146
+ )
147
+
148
+ assert [
149
+ (
150
+ "test_load_file",
151
+ {"type": "Workflow", "data": "foo"},
152
+ ),
153
+ ] == list(YamlParser.finds("Workflow", path=target_path))
154
+
155
+ load = YamlParser.find("test_load_file", path=target_path, obj="Workflow")
156
+ assert load == {"type": "Workflow", "data": "foo"}
157
+
158
+ # NOTE: Load with the same name, but it set different type.
159
+ load = YamlParser.find("test_load_file", path=target_path, obj="Config")
160
+ assert load == {"type": "Config", "data": "bar"}
161
+
162
+ load = YamlParser.find("test_load_file", path=target_path, obj="Crontab")
163
+ assert load == {}
164
+
113
165
  dummy_file.unlink()
114
166
 
115
167
 
@@ -117,12 +169,7 @@ def test_load_file_finds_raise(target_path: Path):
117
169
  dummy_file: Path = target_path / "test_simple_file_raise.yaml"
118
170
  with dummy_file.open(mode="w") as f:
119
171
  yaml.dump(
120
- {
121
- "test_load_file_config": {
122
- "foo": "bar",
123
- },
124
- "test_load_file": {"type": "Workflow"},
125
- },
172
+ {"test_load_file": {"type": "Workflow"}},
126
173
  f,
127
174
  )
128
175
 
@@ -130,27 +177,11 @@ def test_load_file_finds_raise(target_path: Path):
130
177
  with pytest.raises(ValueError):
131
178
  _ = YamlParser("test_load_file_config", path=config.conf_path).type
132
179
 
133
-
134
- @pytest.fixture(scope="module")
135
- def schedule_path(test_path):
136
- target_p = test_path / "test_schedule_conf"
137
- target_p.mkdir(exist_ok=True)
138
-
139
- with (target_p / "test_schedule_conf.yaml").open(mode="w") as f:
140
- yaml.dump(
141
- {
142
- "schedule-wf": {
143
- "type": "Schedule",
144
- "desc": "Test multi config path",
145
- }
146
- },
147
- f,
180
+ assert (
181
+ YamlParser("test_load_file", path=config.conf_path).type
182
+ == "Workflow"
148
183
  )
149
184
 
150
- yield target_p
151
-
152
- shutil.rmtree(target_p)
153
-
154
185
 
155
186
  def test_dynamic():
156
187
  conf = dynamic("audit_path", extras={"audit_path": Path("/extras-audits")})
@@ -4,7 +4,7 @@ import pytest
4
4
  from ddeutil.workflow import Result
5
5
  from ddeutil.workflow.traces import (
6
6
  FileTrace,
7
- PrefixMsg,
7
+ Message,
8
8
  TraceMeta,
9
9
  )
10
10
 
@@ -25,7 +25,7 @@ def test_trace_regex_message():
25
25
  "[STAGE]: Execute Empty-Stage: 'End trigger Priority Group': "
26
26
  "( End trigger Priority Group: 2 )"
27
27
  )
28
- prefix: PrefixMsg = PrefixMsg.from_str(msg)
28
+ prefix: Message = Message.from_str(msg)
29
29
  assert prefix.name == "STAGE"
30
30
  assert prefix.message == (
31
31
  "Execute Empty-Stage: 'End trigger Priority Group': "
@@ -36,7 +36,7 @@ def test_trace_regex_message():
36
36
  "[]: Execute Empty-Stage: 'End trigger Priority Group': "
37
37
  "( End trigger Priority Group: 2 )"
38
38
  )
39
- prefix: PrefixMsg = PrefixMsg.from_str(msg)
39
+ prefix: Message = Message.from_str(msg)
40
40
  assert prefix.name is None
41
41
  assert prefix.message == (
42
42
  "[]: Execute Empty-Stage: 'End trigger Priority Group': "
@@ -44,7 +44,7 @@ def test_trace_regex_message():
44
44
  )
45
45
 
46
46
  msg: str = ""
47
- prefix: PrefixMsg = PrefixMsg.from_str(msg)
47
+ prefix: Message = Message.from_str(msg)
48
48
  assert prefix.name is None
49
49
  assert prefix.message == ""
50
50
 
@@ -52,7 +52,7 @@ def test_trace_regex_message():
52
52
  "[WORKFLOW]: Execute Empty-Stage:\n'End trigger Priority Group':\n"
53
53
  "( End trigger Priority Group: 2 )"
54
54
  )
55
- prefix: PrefixMsg = PrefixMsg.from_str(msg)
55
+ prefix: Message = Message.from_str(msg)
56
56
  assert prefix.name == "WORKFLOW"
57
57
  assert prefix.message == (
58
58
  "Execute Empty-Stage:\n'End trigger Priority Group':\n"
@@ -69,13 +69,16 @@ def test_trace_regex_message():
69
69
 
70
70
 
71
71
  def test_trace_meta():
72
- meta = TraceMeta.make(mode="stderr", message="Foo", level="info")
72
+ meta = TraceMeta.make(
73
+ mode="stderr", message="Foo", level="info", cutting_id=""
74
+ )
73
75
  assert meta.message == "Foo"
74
76
 
75
77
  meta = TraceMeta.make(
76
78
  mode="stderr",
77
79
  message="Foo",
78
80
  level="info",
81
+ cutting_id="",
79
82
  extras={"logs_trace_frame_layer": 1},
80
83
  )
81
84
  assert meta.filename == "test_traces.py"
@@ -84,6 +87,7 @@ def test_trace_meta():
84
87
  mode="stderr",
85
88
  message="Foo",
86
89
  level="info",
90
+ cutting_id="",
87
91
  extras={"logs_trace_frame_layer": 2},
88
92
  )
89
93
  assert meta.filename == "python.py"
@@ -94,6 +98,7 @@ def test_trace_meta():
94
98
  mode="stderr",
95
99
  message="Foo",
96
100
  level="info",
101
+ cutting_id="",
97
102
  extras={"logs_trace_frame_layer": 100},
98
103
  )
99
104
 
@@ -1 +0,0 @@
1
- __version__: str = "0.0.71"