ddeutil-workflow 0.0.81__py3-none-any.whl → 0.0.83__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -49,33 +49,72 @@ import zlib
49
49
  from abc import ABC, abstractmethod
50
50
  from collections.abc import Iterator
51
51
  from datetime import datetime, timedelta
52
+ from enum import Enum
52
53
  from pathlib import Path
53
54
  from typing import Annotated, Any, ClassVar, Literal, Optional, Union
54
55
  from urllib.parse import ParseResult, urlparse
55
56
 
56
- from pydantic import BaseModel, Field, TypeAdapter
57
+ from pydantic import BaseModel, ConfigDict, Field, TypeAdapter
57
58
  from pydantic.functional_validators import field_validator, model_validator
58
59
  from typing_extensions import Self
59
60
 
60
61
  from .__types import DictData
61
62
  from .conf import dynamic
62
- from .traces import TraceManager, get_trace, set_logging
63
+ from .traces import Trace, get_trace, set_logging
63
64
 
64
65
  logger = logging.getLogger("ddeutil.workflow")
65
66
 
66
67
 
68
+ class ReleaseType(str, Enum):
69
+ """Release type enumeration for workflow execution modes.
70
+
71
+ This enum defines the different types of workflow releases that can be
72
+ triggered, each with specific behavior and use cases.
73
+
74
+ Attributes:
75
+ NORMAL: Standard workflow release execution
76
+ RERUN: Re-execution of previously failed workflow
77
+ DRYRUN: Dry-execution workflow
78
+ FORCE: Forced execution bypassing normal conditions
79
+ """
80
+
81
+ NORMAL = "normal"
82
+ RERUN = "rerun"
83
+ FORCE = "force"
84
+ DRYRUN = "dryrun"
85
+
86
+
87
+ NORMAL = ReleaseType.NORMAL
88
+ RERUN = ReleaseType.RERUN
89
+ DRYRUN = ReleaseType.DRYRUN
90
+ FORCE = ReleaseType.FORCE
91
+
92
+
67
93
  class AuditData(BaseModel):
94
+ """Audit Data model that use to be the core data for any Audit model manage
95
+ logging at the target pointer system or service like file-system, sqlite
96
+ database, etc.
97
+ """
98
+
99
+ model_config = ConfigDict(use_enum_values=True)
100
+
68
101
  name: str = Field(description="A workflow name.")
69
102
  release: datetime = Field(description="A release datetime.")
70
- type: str = Field(description="A running type before logging.")
103
+ type: ReleaseType = Field(
104
+ default=NORMAL,
105
+ description=(
106
+ "An execution type that should be value in ('normal', 'rerun', "
107
+ "'force', 'dryrun')."
108
+ ),
109
+ )
71
110
  context: DictData = Field(
72
111
  default_factory=dict,
73
112
  description="A context that receive from a workflow execution result.",
74
113
  )
114
+ run_id: str = Field(description="A running ID")
75
115
  parent_run_id: Optional[str] = Field(
76
116
  default=None, description="A parent running ID."
77
117
  )
78
- run_id: str = Field(description="A running ID")
79
118
  runs_metadata: DictData = Field(
80
119
  default_factory=dict,
81
120
  description="A runs metadata that will use to tracking this audit log.",
@@ -89,18 +128,17 @@ class BaseAudit(BaseModel, ABC):
89
128
  for logging subclasses like file, sqlite, etc.
90
129
  """
91
130
 
92
- type: str
131
+ type: Literal["base"] = "base"
132
+ logging_name: str = "ddeutil.workflow"
93
133
  extras: DictData = Field(
94
134
  default_factory=dict,
95
135
  description="An extras parameter that want to override core config",
96
136
  )
97
137
 
98
138
  @field_validator("extras", mode="before")
99
- def validate_extras(cls, v: Any) -> DictData:
139
+ def __prepare_extras(cls, v: Any) -> Any:
100
140
  """Validate extras field to ensure it's a dictionary."""
101
- if v is None:
102
- return {}
103
- return v
141
+ return {} if v is None else v
104
142
 
105
143
  @model_validator(mode="after")
106
144
  def __model_action(self) -> Self:
@@ -116,13 +154,13 @@ class BaseAudit(BaseModel, ABC):
116
154
  self.do_before()
117
155
 
118
156
  # NOTE: Start setting log config in this line with cache.
119
- set_logging("ddeutil.workflow")
157
+ set_logging(self.logging_name)
120
158
  return self
121
159
 
122
160
  @abstractmethod
123
161
  def is_pointed(
124
162
  self,
125
- data: AuditData,
163
+ data: Any,
126
164
  *,
127
165
  extras: Optional[DictData] = None,
128
166
  ) -> bool:
@@ -216,7 +254,7 @@ class BaseAudit(BaseModel, ABC):
216
254
  raise NotImplementedError("Audit should implement `save` method.")
217
255
 
218
256
 
219
- class FileAudit(BaseAudit):
257
+ class LocalFileAudit(BaseAudit):
220
258
  """File Audit Pydantic Model for saving log data from workflow execution.
221
259
 
222
260
  This class inherits from BaseAudit and implements file-based storage
@@ -224,19 +262,25 @@ class FileAudit(BaseAudit):
224
262
  in a structured directory hierarchy.
225
263
 
226
264
  Attributes:
227
- filename_fmt: Class variable defining the filename format for audit files.
265
+ file_fmt: Class variable defining the filename format for audit log.
266
+ file_release_fmt: Class variable defining the filename format for audit
267
+ release log.
228
268
  """
229
269
 
230
- filename_fmt: ClassVar[str] = (
231
- "workflow={name}/release={release:%Y%m%d%H%M%S}"
232
- )
270
+ file_fmt: ClassVar[str] = "workflow={name}"
271
+ file_release_fmt: ClassVar[str] = "release={release:%Y%m%d%H%M%S}"
233
272
 
234
273
  type: Literal["file"] = "file"
235
- path: str = Field(
236
- default="./audits",
274
+ path: Path = Field(
275
+ default=Path("./audits"),
237
276
  description="A file path that use to manage audit logs.",
238
277
  )
239
278
 
279
+ @field_validator("path", mode="before", json_schema_input_type=str)
280
+ def __prepare_path(cls, data: Any) -> Any:
281
+ """Prepare path that passing with string to Path instance."""
282
+ return Path(data) if isinstance(data, str) else data
283
+
240
284
  def do_before(self) -> None:
241
285
  """Create directory of release before saving log file.
242
286
 
@@ -246,7 +290,10 @@ class FileAudit(BaseAudit):
246
290
  Path(self.path).mkdir(parents=True, exist_ok=True)
247
291
 
248
292
  def find_audits(
249
- self, name: str, *, extras: Optional[DictData] = None
293
+ self,
294
+ name: str,
295
+ *,
296
+ extras: Optional[DictData] = None,
250
297
  ) -> Iterator[AuditData]:
251
298
  """Generate audit data found from logs path for a specific workflow name.
252
299
 
@@ -260,7 +307,7 @@ class FileAudit(BaseAudit):
260
307
  Raises:
261
308
  FileNotFoundError: If the workflow directory does not exist.
262
309
  """
263
- pointer: Path = Path(self.path) / f"workflow={name}"
310
+ pointer: Path = self.path / self.file_fmt.format(name=name)
264
311
  if not pointer.exists():
265
312
  raise FileNotFoundError(f"Pointer: {pointer.absolute()}.")
266
313
 
@@ -293,7 +340,7 @@ class FileAudit(BaseAudit):
293
340
  ValueError: If no releases found when release is None.
294
341
  """
295
342
  if release is None:
296
- pointer: Path = Path(self.path) / f"workflow={name}"
343
+ pointer: Path = self.path / self.file_fmt.format(name=name)
297
344
  if not pointer.exists():
298
345
  raise FileNotFoundError(f"Pointer: {pointer.absolute()}.")
299
346
 
@@ -328,21 +375,21 @@ class FileAudit(BaseAudit):
328
375
  return AuditData.model_validate(obj=json.load(f))
329
376
 
330
377
  def is_pointed(
331
- self, data: AuditData, *, extras: Optional[DictData] = None
378
+ self,
379
+ data: Any,
380
+ *,
381
+ extras: Optional[DictData] = None,
332
382
  ) -> bool:
333
383
  """Check if the release log already exists at the destination log path.
334
384
 
335
385
  Args:
336
- data: The workflow name.
386
+ data (str):
337
387
  extras: Optional extra parameters to override core config.
338
388
 
339
389
  Returns:
340
390
  bool: True if the release log exists, False otherwise.
341
391
  """
342
- # NOTE: Return False if enable writing log flag does not set.
343
- if not dynamic("enable_write_audit", extras=extras):
344
- return False
345
- return self.pointer(data).exists()
392
+ return self.pointer(AuditData.model_validate(data)).exists()
346
393
 
347
394
  def pointer(self, data: AuditData) -> Path:
348
395
  """Return release directory path generated from model data.
@@ -350,8 +397,10 @@ class FileAudit(BaseAudit):
350
397
  Returns:
351
398
  Path: The directory path for the current workflow and release.
352
399
  """
353
- return Path(self.path) / self.filename_fmt.format(
354
- name=data.name, release=data.release
400
+ return (
401
+ self.path
402
+ / self.file_fmt.format(**data.model_dump(by_alias=True))
403
+ / self.file_release_fmt.format(**data.model_dump(by_alias=True))
355
404
  )
356
405
 
357
406
  def save(self, data: Any, excluded: Optional[list[str]] = None) -> Self:
@@ -365,7 +414,7 @@ class FileAudit(BaseAudit):
365
414
  Self: The audit instance after saving.
366
415
  """
367
416
  audit = AuditData.model_validate(data)
368
- trace: TraceManager = get_trace(
417
+ trace: Trace = get_trace(
369
418
  audit.run_id,
370
419
  parent_run_id=audit.parent_run_id,
371
420
  extras=self.extras,
@@ -427,7 +476,7 @@ class FileAudit(BaseAudit):
427
476
  return cleaned_count
428
477
 
429
478
 
430
- class SQLiteAudit(BaseAudit): # pragma: no cov
479
+ class LocalSQLiteAudit(BaseAudit): # pragma: no cov
431
480
  """SQLite Audit model for database-based audit storage.
432
481
 
433
482
  This class inherits from BaseAudit and implements SQLite database storage
@@ -435,11 +484,11 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
435
484
 
436
485
  Attributes:
437
486
  table_name: Class variable defining the database table name.
438
- schemas: Class variable defining the database schema.
487
+ ddl: Class variable defining the database schema.
439
488
  """
440
489
 
441
490
  table_name: ClassVar[str] = "audits"
442
- schemas: ClassVar[
491
+ ddl: ClassVar[
443
492
  str
444
493
  ] = """
445
494
  CREATE TABLE IF NOT EXISTS audits (
@@ -457,22 +506,21 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
457
506
  """
458
507
 
459
508
  type: Literal["sqlite"] = "sqlite"
460
- path: str
509
+ path: Path = Field(
510
+ default=Path("./audits.db"),
511
+ description="A SQLite filepath.",
512
+ )
461
513
 
462
- def _ensure_table_exists(self) -> None:
514
+ def do_before(self) -> None:
463
515
  """Ensure the audit table exists in the database."""
464
- audit_url = dynamic("audit_url", extras=self.extras)
465
- if audit_url is None or not audit_url.path:
516
+ if self.path.is_dir():
466
517
  raise ValueError(
467
- "SQLite audit_url must specify a database file path"
518
+ "SQLite path must specify a database file path not dir."
468
519
  )
469
520
 
470
- audit_url_parse: ParseResult = urlparse(audit_url)
471
- db_path = Path(audit_url_parse.path)
472
- db_path.parent.mkdir(parents=True, exist_ok=True)
473
-
474
- with sqlite3.connect(db_path) as conn:
475
- conn.execute(self.schemas)
521
+ self.path.parent.mkdir(parents=True, exist_ok=True)
522
+ with sqlite3.connect(self.path) as conn:
523
+ conn.execute(self.ddl)
476
524
  conn.commit()
477
525
 
478
526
  def is_pointed(
@@ -655,7 +703,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
655
703
  ValueError: If SQLite database is not properly configured.
656
704
  """
657
705
  audit = AuditData.model_validate(data)
658
- trace: TraceManager = get_trace(
706
+ trace: Trace = get_trace(
659
707
  audit.run_id,
660
708
  parent_run_id=audit.parent_run_id,
661
709
  extras=self.extras,
@@ -739,27 +787,31 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
739
787
  return cursor.rowcount
740
788
 
741
789
 
790
+ class PostgresAudit(BaseAudit, ABC): ... # pragma: no cov
791
+
792
+
742
793
  Audit = Annotated[
743
794
  Union[
744
- FileAudit,
745
- SQLiteAudit,
795
+ LocalFileAudit,
796
+ LocalSQLiteAudit,
746
797
  ],
747
798
  Field(discriminator="type"),
748
799
  ]
749
800
 
750
801
 
751
802
  def get_audit(
752
- *,
803
+ audit_conf: Optional[DictData] = None,
753
804
  extras: Optional[DictData] = None,
754
805
  ) -> Audit: # pragma: no cov
755
806
  """Get an audit model dynamically based on the config audit path value.
756
807
 
757
808
  Args:
809
+ audit_conf (DictData):
758
810
  extras: Optional extra parameters to override the core config.
759
811
 
760
812
  Returns:
761
813
  Audit: The appropriate audit model class based on configuration.
762
814
  """
763
- audit_conf = dynamic("audit_conf", extras=extras)
815
+ audit_conf = dynamic("audit_conf", f=audit_conf, extras=extras)
764
816
  model = TypeAdapter(Audit).validate_python(audit_conf | {"extras": extras})
765
817
  return model
ddeutil/workflow/conf.py CHANGED
@@ -176,17 +176,6 @@ class YamlParser:
176
176
  """Base Load object that use to search config data by given some identity
177
177
  value like name of `Workflow` or `Crontab` templates.
178
178
 
179
- :param name: (str) A name of key of config data that read with YAML
180
- Environment object.
181
- :param path: (Path) A config path object.
182
- :param externals: (DictData) An external config data that want to add to
183
- loaded config data.
184
- :param extras: (DictDdata) An extra parameters that use to override core
185
- config values.
186
-
187
- :raise ValueError: If the data does not find on the config path with the
188
- name parameter.
189
-
190
179
  Noted:
191
180
  The config data should have `type` key for modeling validation that
192
181
  make this loader know what is config should to do pass to.
@@ -209,6 +198,23 @@ class YamlParser:
209
198
  extras: Optional[DictData] = None,
210
199
  obj: Optional[Union[object, str]] = None,
211
200
  ) -> None:
201
+ """Main constructure function.
202
+
203
+ Args:
204
+ name (str): A name of key of config data that read with YAML
205
+ Environment object.
206
+ path (Path): A config path object.
207
+ externals (DictData): An external config data that want to add to
208
+ loaded config data.
209
+ extras (DictDdata): An extra parameters that use to override core
210
+ config values.
211
+ obj (object | str): An object that want to validate from the `type`
212
+ key before keeping the config data.
213
+
214
+ Raises:
215
+ ValueError: If the data does not find on the config path with the
216
+ name parameter.
217
+ """
212
218
  self.path: Path = Path(dynamic("conf_path", f=path, extras=extras))
213
219
  self.externals: DictData = externals or {}
214
220
  self.extras: DictData = extras or {}
@@ -242,17 +248,19 @@ class YamlParser:
242
248
  """Find data with specific key and return the latest modify date data if
243
249
  this key exists multiple files.
244
250
 
245
- :param name: (str) A name of data that want to find.
246
- :param path: (Path) A config path object.
247
- :param paths: (list[Path]) A list of config path object.
248
- :param obj: (object | str) An object that want to validate matching
249
- before return.
250
- :param extras: (DictData) An extra parameter that use to override core
251
- config values.
252
- :param ignore_filename: (str) An ignore filename. Default is
253
- ``.confignore`` filename.
251
+ Args:
252
+ name (str): A name of data that want to find.
253
+ path (Path): A config path object.
254
+ paths (list[Path]): A list of config path object.
255
+ obj (object | str): An object that want to validate matching
256
+ before return.
257
+ extras (DictData): An extra parameter that use to override core
258
+ config values.
259
+ ignore_filename (str): An ignore filename. Default is
260
+ ``.confignore`` filename.
254
261
 
255
- :rtype: DictData
262
+ Returns:
263
+ DictData: A config data that was found on the searching paths.
256
264
  """
257
265
  path: Path = dynamic("conf_path", f=path, extras=extras)
258
266
  if not paths:
@@ -317,7 +325,9 @@ class YamlParser:
317
325
  ``.confignore`` filename.
318
326
  tags (list[str]): A list of tag that want to filter.
319
327
 
320
- :rtype: Iterator[tuple[str, DictData]]
328
+ Returns:
329
+ Iterator[tuple[str, DictData]]: An iterator of config data that was
330
+ found on the searching paths.
321
331
  """
322
332
  excluded: list[str] = excluded or []
323
333
  tags: list[str] = tags or []
@@ -353,8 +363,11 @@ class YamlParser:
353
363
  ):
354
364
  continue
355
365
 
356
- if (t := data.get("type")) and t == obj_type:
357
-
366
+ if (
367
+ # isinstance(data, dict) and
368
+ (t := data.get("type"))
369
+ and t == obj_type
370
+ ):
358
371
  # NOTE: Start adding file metadata.
359
372
  file_stat: os.stat_result = file.lstat()
360
373
  data["created_at"] = file_stat.st_ctime
@@ -397,6 +410,13 @@ class YamlParser:
397
410
  def filter_yaml(cls, file: Path, name: Optional[str] = None) -> DictData:
398
411
  """Read a YAML file context from an input file path and specific name.
399
412
 
413
+ Notes:
414
+ The data that will return from reading context will map with config
415
+ name if an input searching name does not pass to this function.
416
+
417
+ input: {"name": "foo", "type": "Some"}
418
+ output: {"foo": {"name": "foo", "type": "Some"}}
419
+
400
420
  Args:
401
421
  file (Path): A file path that want to extract YAML context.
402
422
  name (str): A key name that search on a YAML context.
@@ -413,7 +433,7 @@ class YamlParser:
413
433
  return (
414
434
  values[name] | {"name": name} if name in values else {}
415
435
  )
416
- return values
436
+ return {values["name"]: values} if "name" in values else values
417
437
  return {}
418
438
 
419
439
  @cached_property
@@ -166,6 +166,15 @@ class StageCancelError(StageError): ...
166
166
  class StageSkipError(StageError): ...
167
167
 
168
168
 
169
+ class StageNestedError(StageError): ...
170
+
171
+
172
+ class StageNestedCancelError(StageNestedError): ...
173
+
174
+
175
+ class StageNestedSkipError(StageNestedError): ...
176
+
177
+
169
178
  class JobError(BaseError): ...
170
179
 
171
180
 
@@ -175,6 +184,9 @@ class JobCancelError(JobError): ...
175
184
  class JobSkipError(JobError): ...
176
185
 
177
186
 
187
+ class EventError(BaseError): ...
188
+
189
+
178
190
  class WorkflowError(BaseError): ...
179
191
 
180
192
 
ddeutil/workflow/event.py CHANGED
@@ -16,13 +16,9 @@ Attributes:
16
16
  Interval: Type alias for scheduling intervals ('daily', 'weekly', 'monthly')
17
17
 
18
18
  Classes:
19
+ CrontabValue:
19
20
  Crontab: Main cron-based event scheduler.
20
21
  CrontabYear: Enhanced cron scheduler with year constraints.
21
- ReleaseEvent: Release-based event triggers.
22
- FileEvent: File system monitoring triggers.
23
- WebhookEvent: API/webhook-based triggers.
24
- DatabaseEvent: Database change monitoring triggers.
25
- SensorEvent: Sensor-based event monitoring.
26
22
 
27
23
  Example:
28
24
  >>> from ddeutil.workflow.event import Crontab
@@ -50,6 +46,8 @@ from pydantic_extra_types.timezone_name import TimeZoneName
50
46
 
51
47
  from .__cron import WEEKDAYS, CronJob, CronJobYear, CronRunner, Options
52
48
  from .__types import DictData
49
+ from .errors import EventError
50
+ from .utils import UTC, replace_sec
53
51
 
54
52
  Interval = Literal["daily", "weekly", "monthly"]
55
53
 
@@ -393,19 +391,16 @@ class Event(BaseModel):
393
391
  )
394
392
 
395
393
  @field_validator("schedule", mode="after")
396
- def __on_no_dup_and_reach_limit__(
397
- cls,
398
- value: list[Crontab],
399
- ) -> list[Crontab]:
394
+ def __prepare_schedule__(cls, value: list[Crontab]) -> list[Crontab]:
400
395
  """Validate the on fields should not contain duplicate values and if it
401
396
  contains the every minute value more than one value, it will remove to
402
397
  only one value.
403
398
 
404
399
  Args:
405
- value: A list of on object.
400
+ value (list[Crontab]): A list of on object.
406
401
 
407
402
  Returns:
408
- list[CronJobYear | Crontab]: The validated list of Crontab objects.
403
+ list[Crontab]: The validated list of Crontab objects.
409
404
 
410
405
  Raises:
411
406
  ValueError: If it has some duplicate value.
@@ -434,3 +429,31 @@ class Event(BaseModel):
434
429
  "The number of the on should not more than 10 crontabs."
435
430
  )
436
431
  return value
432
+
433
+ def validate_dt(self, dt: datetime) -> datetime:
434
+ """Validate the release datetime that should was replaced second and
435
+ millisecond to 0 and replaced timezone to None before checking it match
436
+ with the set `on` field.
437
+
438
+ Args:
439
+ dt (datetime): A datetime object that want to validate.
440
+
441
+ Returns:
442
+ datetime: The validated release datetime.
443
+ """
444
+ if dt.tzinfo is None:
445
+ dt = dt.replace(tzinfo=UTC)
446
+
447
+ release: datetime = replace_sec(dt.astimezone(UTC))
448
+
449
+ # NOTE: Return itself if schedule event does not set.
450
+ if not self.schedule:
451
+ return release
452
+
453
+ for on in self.schedule:
454
+ if release == on.cronjob.schedule(release, tz=UTC).next:
455
+ return release
456
+ raise EventError(
457
+ f"This datetime, {datetime}, does not support for this event "
458
+ f"schedule."
459
+ )