ddeutil-workflow 0.0.79__py3-none-any.whl → 0.0.80__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__: str = "0.0.79"
1
+ __version__: str = "0.0.80"
@@ -52,7 +52,7 @@ from .__types import DictData, DictStr, Matrix, Re, TupleStr
52
52
  from .audits import (
53
53
  Audit,
54
54
  FileAudit,
55
- get_audit_model,
55
+ get_audit,
56
56
  )
57
57
  from .conf import *
58
58
  from .errors import (
@@ -10,7 +10,7 @@ from fastapi import APIRouter, Path, Query
10
10
  from fastapi import status as st
11
11
  from fastapi.responses import UJSONResponse
12
12
 
13
- from ...audits import get_audit_model
13
+ from ...audits import get_audit
14
14
  from ...result import Result
15
15
 
16
16
  router = APIRouter(
@@ -90,7 +90,7 @@ async def get_audits():
90
90
  """
91
91
  return {
92
92
  "message": "Getting audit logs",
93
- "audits": list(get_audit_model().find_audits(name="demo")),
93
+ "audits": list(get_audit().find_audits(name="demo")),
94
94
  }
95
95
 
96
96
 
@@ -109,7 +109,7 @@ async def get_audit_with_workflow(workflow: str):
109
109
  """
110
110
  return {
111
111
  "message": f"Getting audit logs with workflow name {workflow}",
112
- "audits": list(get_audit_model().find_audits(name="demo")),
112
+ "audits": list(get_audit().find_audits(name="demo")),
113
113
  }
114
114
 
115
115
 
@@ -136,7 +136,7 @@ async def get_audit_with_workflow_release(
136
136
  f"Getting audit logs with workflow name {workflow} and release "
137
137
  f"{release}"
138
138
  ),
139
- "audits": list(get_audit_model().find_audits(name="demo")),
139
+ "audits": list(get_audit().find_audits(name="demo")),
140
140
  }
141
141
 
142
142
 
@@ -167,5 +167,5 @@ async def get_audit_with_workflow_release_run_id(
167
167
  f"Getting audit logs with workflow name {workflow}, release "
168
168
  f"{release}, and running ID {run_id}"
169
169
  ),
170
- "audits": list(get_audit_model().find_audits(name="demo")),
170
+ "audits": list(get_audit().find_audits(name="demo")),
171
171
  }
@@ -16,7 +16,7 @@ from fastapi.responses import UJSONResponse
16
16
  from pydantic import BaseModel
17
17
 
18
18
  from ...__types import DictData
19
- from ...audits import Audit, get_audit_model
19
+ from ...audits import Audit, get_audit
20
20
  from ...conf import YamlParser
21
21
  from ...result import Result
22
22
  from ...workflow import Workflow
@@ -100,7 +100,7 @@ async def get_workflow_audits(name: str):
100
100
  exclude_none=False,
101
101
  exclude_unset=True,
102
102
  )
103
- for audit in get_audit_model().find_audits(name=name)
103
+ for audit in get_audit().find_audits(name=name)
104
104
  ],
105
105
  }
106
106
  except FileNotFoundError:
@@ -114,7 +114,7 @@ async def get_workflow_audits(name: str):
114
114
  async def get_workflow_release_audit(name: str, release: str):
115
115
  """Get Workflow audit log with an input release value."""
116
116
  try:
117
- audit: Audit = get_audit_model().find_audit_with_release(
117
+ audit: Audit = get_audit().find_audit_with_release(
118
118
  name=name,
119
119
  release=datetime.strptime(release, "%Y%m%d%H%M%S"),
120
120
  )
@@ -32,10 +32,8 @@ Functions:
32
32
 
33
33
  Example:
34
34
 
35
- >>> from ddeutil.workflow.audits import get_audit_model
36
- >>> audit = get_audit_model(run_id="run-123")
37
- >>> audit.info("Workflow execution started")
38
- >>> audit.success("Workflow completed successfully")
35
+ >>> from ddeutil.workflow.audits import get_audit
36
+ >>> audit = get_audit(run_id="run-123")
39
37
 
40
38
  Note:
41
39
  Audit instances are automatically configured based on the workflow
@@ -52,11 +50,10 @@ from abc import ABC, abstractmethod
52
50
  from collections.abc import Iterator
53
51
  from datetime import datetime, timedelta
54
52
  from pathlib import Path
55
- from typing import Any, ClassVar, Optional, TypeVar, Union
53
+ from typing import Annotated, Any, ClassVar, Literal, Optional, Union
56
54
  from urllib.parse import ParseResult, urlparse
57
55
 
58
- from pydantic import BaseModel, Field
59
- from pydantic.functional_serializers import field_serializer
56
+ from pydantic import BaseModel, Field, TypeAdapter
60
57
  from pydantic.functional_validators import field_validator, model_validator
61
58
  from typing_extensions import Self
62
59
 
@@ -67,17 +64,7 @@ from .traces import TraceManager, get_trace, set_logging
67
64
  logger = logging.getLogger("ddeutil.workflow")
68
65
 
69
66
 
70
- class BaseAudit(BaseModel, ABC):
71
- """Base Audit Pydantic Model with abstraction class property.
72
-
73
- This model implements only model fields and should be used as a base class
74
- for logging subclasses like file, sqlite, etc.
75
- """
76
-
77
- extras: DictData = Field(
78
- default_factory=dict,
79
- description="An extras parameter that want to override core config",
80
- )
67
+ class AuditData(BaseModel):
81
68
  name: str = Field(description="A workflow name.")
82
69
  release: datetime = Field(description="A release datetime.")
83
70
  type: str = Field(description="A running type before logging.")
@@ -94,6 +81,20 @@ class BaseAudit(BaseModel, ABC):
94
81
  description="A runs metadata that will use to tracking this audit log.",
95
82
  )
96
83
 
84
+
85
+ class BaseAudit(BaseModel, ABC):
86
+ """Base Audit Pydantic Model with abstraction class property.
87
+
88
+ This model implements only model fields and should be used as a base class
89
+ for logging subclasses like file, sqlite, etc.
90
+ """
91
+
92
+ type: str
93
+ extras: DictData = Field(
94
+ default_factory=dict,
95
+ description="An extras parameter that want to override core config",
96
+ )
97
+
97
98
  @field_validator("extras", mode="before")
98
99
  def validate_extras(cls, v: Any) -> DictData:
99
100
  """Validate extras field to ensure it's a dictionary."""
@@ -118,20 +119,17 @@ class BaseAudit(BaseModel, ABC):
118
119
  set_logging("ddeutil.workflow")
119
120
  return self
120
121
 
121
- @classmethod
122
122
  @abstractmethod
123
123
  def is_pointed(
124
- cls,
125
- name: str,
126
- release: datetime,
124
+ self,
125
+ data: AuditData,
127
126
  *,
128
127
  extras: Optional[DictData] = None,
129
128
  ) -> bool:
130
129
  """Check if audit data exists for the given workflow and release.
131
130
 
132
131
  Args:
133
- name: The workflow name to check.
134
- release: The release datetime to check.
132
+ data:
135
133
  extras: Optional extra parameters to override core config.
136
134
 
137
135
  Returns:
@@ -144,10 +142,9 @@ class BaseAudit(BaseModel, ABC):
144
142
  "Audit should implement `is_pointed` class-method"
145
143
  )
146
144
 
147
- @classmethod
148
145
  @abstractmethod
149
146
  def find_audits(
150
- cls,
147
+ self,
151
148
  name: str,
152
149
  *,
153
150
  extras: Optional[DictData] = None,
@@ -168,10 +165,9 @@ class BaseAudit(BaseModel, ABC):
168
165
  "Audit should implement `find_audits` class-method"
169
166
  )
170
167
 
171
- @classmethod
172
168
  @abstractmethod
173
169
  def find_audit_with_release(
174
- cls,
170
+ self,
175
171
  name: str,
176
172
  release: Optional[datetime] = None,
177
173
  *,
@@ -203,11 +199,12 @@ class BaseAudit(BaseModel, ABC):
203
199
 
204
200
  @abstractmethod
205
201
  def save(
206
- self, excluded: Optional[list[str]] = None
202
+ self, data: Any, excluded: Optional[list[str]] = None
207
203
  ) -> Self: # pragma: no cov
208
204
  """Save this model logging to target logging store.
209
205
 
210
206
  Args:
207
+ data:
211
208
  excluded: Optional list of field names to exclude from saving.
212
209
 
213
210
  Returns:
@@ -234,20 +231,11 @@ class FileAudit(BaseAudit):
234
231
  "workflow={name}/release={release:%Y%m%d%H%M%S}"
235
232
  )
236
233
 
237
- @field_serializer("extras")
238
- def __serialize_extras(self, value: DictData) -> DictData:
239
- """Serialize extras field, converting ParseResult objects to URLs.
240
-
241
- Args:
242
- value: The extras dictionary to serialize.
243
-
244
- Returns:
245
- DictData: Serialized extras with ParseResult objects converted to URLs.
246
- """
247
- return {
248
- k: (v.geturl() if isinstance(v, ParseResult) else v)
249
- for k, v in value.items()
250
- }
234
+ type: Literal["file"] = "file"
235
+ path: str = Field(
236
+ default="./audits",
237
+ description="A file path that use to manage audit logs.",
238
+ )
251
239
 
252
240
  def do_before(self) -> None:
253
241
  """Create directory of release before saving log file.
@@ -255,12 +243,11 @@ class FileAudit(BaseAudit):
255
243
  This method ensures the target directory exists before attempting
256
244
  to save audit log files.
257
245
  """
258
- self.pointer().mkdir(parents=True, exist_ok=True)
246
+ Path(self.path).mkdir(parents=True, exist_ok=True)
259
247
 
260
- @classmethod
261
248
  def find_audits(
262
- cls, name: str, *, extras: Optional[DictData] = None
263
- ) -> Iterator[Self]:
249
+ self, name: str, *, extras: Optional[DictData] = None
250
+ ) -> Iterator[AuditData]:
264
251
  """Generate audit data found from logs path for a specific workflow name.
265
252
 
266
253
  Args:
@@ -273,27 +260,21 @@ class FileAudit(BaseAudit):
273
260
  Raises:
274
261
  FileNotFoundError: If the workflow directory does not exist.
275
262
  """
276
- audit_url = dynamic("audit_url", extras=extras)
277
- if audit_url is None:
278
- raise ValueError("audit_url configuration is not set")
279
-
280
- audit_url_parse: ParseResult = urlparse(audit_url)
281
- pointer: Path = Path(audit_url_parse.path) / f"workflow={name}"
263
+ pointer: Path = Path(self.path) / f"workflow={name}"
282
264
  if not pointer.exists():
283
265
  raise FileNotFoundError(f"Pointer: {pointer.absolute()}.")
284
266
 
285
267
  for file in pointer.glob("./release=*/*.log"):
286
268
  with file.open(mode="r", encoding="utf-8") as f:
287
- yield cls.model_validate(obj=json.load(f))
269
+ yield AuditData.model_validate(obj=json.load(f))
288
270
 
289
- @classmethod
290
271
  def find_audit_with_release(
291
- cls,
272
+ self,
292
273
  name: str,
293
274
  release: Optional[datetime] = None,
294
275
  *,
295
276
  extras: Optional[DictData] = None,
296
- ) -> Self:
277
+ ) -> AuditData:
297
278
  """Return audit data found from logs path for specific workflow and release.
298
279
 
299
280
  If a release is not provided, it will return the latest release from
@@ -305,20 +286,14 @@ class FileAudit(BaseAudit):
305
286
  extras: Optional extra parameters to override core config.
306
287
 
307
288
  Returns:
308
- Self: The audit instance for the specified workflow and release.
289
+ AuditData: The audit instance for the specified workflow and release.
309
290
 
310
291
  Raises:
311
292
  FileNotFoundError: If the specified workflow/release directory does not exist.
312
293
  ValueError: If no releases found when release is None.
313
294
  """
314
295
  if release is None:
315
- # Find the latest release
316
- audit_url = dynamic("audit_url", extras=extras)
317
- if audit_url is None:
318
- raise ValueError("audit_url configuration is not set")
319
-
320
- audit_url_parse: ParseResult = urlparse(audit_url)
321
- pointer: Path = Path(audit_url_parse.path) / f"workflow={name}"
296
+ pointer: Path = Path(self.path) / f"workflow={name}"
322
297
  if not pointer.exists():
323
298
  raise FileNotFoundError(f"Pointer: {pointer.absolute()}.")
324
299
 
@@ -332,13 +307,8 @@ class FileAudit(BaseAudit):
332
307
  pointer.glob("./release=*"), key=os.path.getctime
333
308
  )
334
309
  else:
335
- audit_url = dynamic("audit_url", extras=extras)
336
- if audit_url is None:
337
- raise ValueError("audit_url configuration is not set")
338
-
339
- audit_url_parse: ParseResult = urlparse(audit_url)
340
310
  release_pointer: Path = (
341
- Path(audit_url_parse.path)
311
+ Path(self.path)
342
312
  / f"workflow={name}/release={release:%Y%m%d%H%M%S}"
343
313
  )
344
314
  if not release_pointer.exists():
@@ -355,21 +325,15 @@ class FileAudit(BaseAudit):
355
325
  release_pointer.glob("./*.log"), key=os.path.getctime
356
326
  )
357
327
  with latest_file.open(mode="r", encoding="utf-8") as f:
358
- return cls.model_validate(obj=json.load(f))
328
+ return AuditData.model_validate(obj=json.load(f))
359
329
 
360
- @classmethod
361
330
  def is_pointed(
362
- cls,
363
- name: str,
364
- release: datetime,
365
- *,
366
- extras: Optional[DictData] = None,
331
+ self, data: AuditData, *, extras: Optional[DictData] = None
367
332
  ) -> bool:
368
333
  """Check if the release log already exists at the destination log path.
369
334
 
370
335
  Args:
371
- name: The workflow name.
372
- release: The release datetime.
336
+ data: The workflow name.
373
337
  extras: Optional extra parameters to override core config.
374
338
 
375
339
  Returns:
@@ -378,46 +342,32 @@ class FileAudit(BaseAudit):
378
342
  # NOTE: Return False if enable writing log flag does not set.
379
343
  if not dynamic("enable_write_audit", extras=extras):
380
344
  return False
345
+ return self.pointer(data).exists()
381
346
 
382
- # NOTE: create pointer path that use the same logic of pointer method.
383
- audit_url: Optional[str] = dynamic("audit_url", extras=extras)
384
- if audit_url is None:
385
- return False
386
-
387
- audit_url_parse: ParseResult = urlparse(audit_url)
388
- pointer: Path = Path(audit_url_parse.path) / cls.filename_fmt.format(
389
- name=name, release=release
390
- )
391
-
392
- return pointer.exists()
393
-
394
- def pointer(self) -> Path:
347
+ def pointer(self, data: AuditData) -> Path:
395
348
  """Return release directory path generated from model data.
396
349
 
397
350
  Returns:
398
351
  Path: The directory path for the current workflow and release.
399
352
  """
400
- audit_url = dynamic("audit_url", extras=self.extras)
401
- if audit_url is None:
402
- raise ValueError("audit_url configuration is not set")
403
-
404
- audit_url_parse: ParseResult = urlparse(audit_url)
405
- return Path(audit_url_parse.path) / self.filename_fmt.format(
406
- name=self.name, release=self.release
353
+ return Path(self.path) / self.filename_fmt.format(
354
+ name=data.name, release=data.release
407
355
  )
408
356
 
409
- def save(self, excluded: Optional[list[str]] = None) -> Self:
357
+ def save(self, data: Any, excluded: Optional[list[str]] = None) -> Self:
410
358
  """Save logging data received from workflow execution result.
411
359
 
412
360
  Args:
361
+ data:
413
362
  excluded: Optional list of field names to exclude from saving.
414
363
 
415
364
  Returns:
416
365
  Self: The audit instance after saving.
417
366
  """
367
+ audit = AuditData.model_validate(data)
418
368
  trace: TraceManager = get_trace(
419
- self.run_id,
420
- parent_run_id=self.parent_run_id,
369
+ audit.run_id,
370
+ parent_run_id=audit.parent_run_id,
421
371
  extras=self.extras,
422
372
  )
423
373
 
@@ -426,19 +376,21 @@ class FileAudit(BaseAudit):
426
376
  trace.debug("[AUDIT]: Skip writing audit log cause config was set.")
427
377
  return self
428
378
 
429
- log_file: Path = (
430
- self.pointer() / f"{self.parent_run_id or self.run_id}.log"
431
- )
379
+ pointer: Path = self.pointer(data=audit)
380
+ if not pointer.exists():
381
+ pointer.mkdir(parents=True)
382
+
383
+ log_file: Path = pointer / f"{audit.parent_run_id or audit.run_id}.log"
432
384
 
433
385
  # NOTE: Convert excluded list to set for pydantic compatibility
434
386
  exclude_set = set(excluded) if excluded else None
435
387
  trace.info(
436
388
  f"[AUDIT]: Start writing audit log with "
437
- f"release: {self.release:%Y%m%d%H%M%S}"
389
+ f"release: {audit.release:%Y%m%d%H%M%S}"
438
390
  )
439
391
  log_file.write_text(
440
392
  json.dumps(
441
- self.model_dump(exclude=exclude_set),
393
+ audit.model_dump(exclude=exclude_set),
442
394
  default=str,
443
395
  indent=2,
444
396
  ),
@@ -504,6 +456,9 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
504
456
  )
505
457
  """
506
458
 
459
+ type: Literal["sqlite"] = "sqlite"
460
+ path: str
461
+
507
462
  def _ensure_table_exists(self) -> None:
508
463
  """Ensure the audit table exists in the database."""
509
464
  audit_url = dynamic("audit_url", extras=self.extras)
@@ -520,19 +475,16 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
520
475
  conn.execute(self.schemas)
521
476
  conn.commit()
522
477
 
523
- @classmethod
524
478
  def is_pointed(
525
- cls,
526
- name: str,
527
- release: datetime,
479
+ self,
480
+ data: AuditData,
528
481
  *,
529
482
  extras: Optional[DictData] = None,
530
483
  ) -> bool:
531
484
  """Check if audit data exists for the given workflow and release.
532
485
 
533
486
  Args:
534
- name: The workflow name to check.
535
- release: The release datetime to check.
487
+ data:
536
488
  extras: Optional extra parameters to override core config.
537
489
 
538
490
  Returns:
@@ -553,7 +505,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
553
505
  with sqlite3.connect(db_path) as conn:
554
506
  cursor = conn.execute(
555
507
  "SELECT COUNT(*) FROM audits WHERE workflow = ? AND release = ?",
556
- (name, release.isoformat()),
508
+ (data.name, data.release.isoformat()),
557
509
  )
558
510
  return cursor.fetchone()[0] > 0
559
511
 
@@ -592,7 +544,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
592
544
  context = json.loads(cls._decompress_data(row[3]))
593
545
  metadata = json.loads(cls._decompress_data(row[6]))
594
546
 
595
- yield cls(
547
+ yield AuditData(
596
548
  name=row[0],
597
549
  release=datetime.fromisoformat(row[1]),
598
550
  type=row[2],
@@ -600,7 +552,6 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
600
552
  parent_run_id=row[4],
601
553
  run_id=row[5],
602
554
  runs_metadata=metadata,
603
- extras=extras or {},
604
555
  )
605
556
 
606
557
  @classmethod
@@ -610,7 +561,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
610
561
  release: Optional[datetime] = None,
611
562
  *,
612
563
  extras: Optional[DictData] = None,
613
- ) -> Self:
564
+ ) -> AuditData:
614
565
  """Find audit data for a specific workflow and release.
615
566
 
616
567
  Args:
@@ -656,7 +607,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
656
607
  context = json.loads(cls._decompress_data(row[3]))
657
608
  metadata = json.loads(cls._decompress_data(row[6]))
658
609
 
659
- return cls(
610
+ return AuditData(
660
611
  name=row[0],
661
612
  release=datetime.fromisoformat(row[1]),
662
613
  type=row[2],
@@ -664,7 +615,6 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
664
615
  parent_run_id=row[4],
665
616
  run_id=row[5],
666
617
  runs_metadata=metadata,
667
- extras=extras or {},
668
618
  )
669
619
 
670
620
  @staticmethod
@@ -691,10 +641,11 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
691
641
  """
692
642
  return zlib.decompress(data).decode("utf-8")
693
643
 
694
- def save(self, excluded: Optional[list[str]] = None) -> Self:
644
+ def save(self, data: Any, excluded: Optional[list[str]] = None) -> Self:
695
645
  """Save logging data received from workflow execution result.
696
646
 
697
647
  Args:
648
+ data: Any
698
649
  excluded: Optional list of field names to exclude from saving.
699
650
 
700
651
  Returns:
@@ -703,9 +654,10 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
703
654
  Raises:
704
655
  ValueError: If SQLite database is not properly configured.
705
656
  """
657
+ audit = AuditData.model_validate(data)
706
658
  trace: TraceManager = get_trace(
707
- self.run_id,
708
- parent_run_id=self.parent_run_id,
659
+ audit.run_id,
660
+ parent_run_id=audit.parent_run_id,
709
661
  extras=self.extras,
710
662
  )
711
663
 
@@ -726,7 +678,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
726
678
 
727
679
  # Prepare data for storage
728
680
  exclude_set = set(excluded) if excluded else None
729
- model_data = self.model_dump(exclude=exclude_set)
681
+ model_data = audit.model_dump(exclude=exclude_set)
730
682
 
731
683
  # Compress context and metadata
732
684
  context_blob = self._compress_data(
@@ -744,12 +696,12 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
744
696
  VALUES (?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP)
745
697
  """,
746
698
  (
747
- self.name,
748
- self.release.isoformat(),
749
- self.type,
699
+ audit.name,
700
+ audit.release.isoformat(),
701
+ audit.type,
750
702
  context_blob,
751
- self.parent_run_id,
752
- self.run_id,
703
+ audit.parent_run_id,
704
+ audit.run_id,
753
705
  metadata_blob,
754
706
  ),
755
707
  )
@@ -787,50 +739,27 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
787
739
  return cursor.rowcount
788
740
 
789
741
 
790
- Audit = Union[
791
- FileAudit,
792
- SQLiteAudit,
742
+ Audit = Annotated[
743
+ Union[
744
+ FileAudit,
745
+ SQLiteAudit,
746
+ ],
747
+ Field(discriminator="type"),
793
748
  ]
794
- AuditType = TypeVar("AuditType", bound=BaseAudit)
795
749
 
796
750
 
797
- def get_audit_model(
751
+ def get_audit(
798
752
  *,
799
753
  extras: Optional[DictData] = None,
800
- ) -> type[AuditType]: # pragma: no cov
754
+ ) -> Audit: # pragma: no cov
801
755
  """Get an audit model dynamically based on the config audit path value.
802
756
 
803
757
  Args:
804
758
  extras: Optional extra parameters to override the core config.
805
- This function allow you to pass `audit_model_mapping` for override
806
- the audit model object with your custom model.
807
759
 
808
760
  Returns:
809
- type[Audit]: The appropriate audit model class based on configuration.
810
-
811
- Raises:
812
- NotImplementedError: If the audit URL scheme is not supported.
761
+ Audit: The appropriate audit model class based on configuration.
813
762
  """
814
- # NOTE: Allow you to override audit model by the extra parameter.
815
- map_audit_models: dict[str, type[AuditType]] = (extras or {}).get(
816
- "audit_model_mapping", {}
817
- )
818
-
819
- audit_url = dynamic("audit_url", extras=extras)
820
- if audit_url is None:
821
- return map_audit_models.get("file", FileAudit)
822
-
823
- audit_url_parse: ParseResult = urlparse(audit_url)
824
- if not audit_url_parse.scheme:
825
- return map_audit_models.get("file", FileAudit)
826
-
827
- if audit_url_parse.scheme == "sqlite" or (
828
- audit_url_parse.scheme == "file"
829
- and Path(audit_url_parse.path).is_file()
830
- ):
831
- return map_audit_models.get("sqlite", SQLiteAudit)
832
- elif audit_url_parse.scheme != "file":
833
- raise NotImplementedError(
834
- f"Does not implement the audit model support for URL: {audit_url_parse}"
835
- )
836
- return map_audit_models.get("file", FileAudit)
763
+ audit_conf = dynamic("audit_conf", extras=extras)
764
+ model = TypeAdapter(Audit).validate_python(audit_conf | {"extras": extras})
765
+ return model
ddeutil/workflow/conf.py CHANGED
@@ -155,8 +155,10 @@ class Config: # pragma: no cov
155
155
  )
156
156
 
157
157
  @property
158
- def audit_url(self) -> str:
159
- return env("LOG_AUDIT_URL", "file:./audits")
158
+ def audit_conf(self) -> str:
159
+ return json.loads(
160
+ env("LOG_AUDIT_URL", '{"type": "file", "path": "./audits"}')
161
+ )
160
162
 
161
163
  @property
162
164
  def enable_write_audit(self) -> bool:
ddeutil/workflow/job.py CHANGED
@@ -1401,7 +1401,6 @@ def docker_execution(
1401
1401
  ),
1402
1402
  extras=job.extras,
1403
1403
  )
1404
- print(params)
1405
1404
  return Result(
1406
1405
  run_id=run_id,
1407
1406
  parent_run_id=parent_run_id,
@@ -107,7 +107,9 @@ class Message(BaseModel):
107
107
  with emoji support and categorization.
108
108
  """
109
109
 
110
- name: Optional[str] = Field(default=None, description="A prefix name.")
110
+ name: Optional[str] = Field(
111
+ default=None, description="A prefix name of message."
112
+ )
111
113
  message: Optional[str] = Field(default=None, description="A message.")
112
114
 
113
115
  @classmethod
@@ -952,9 +954,8 @@ class SQLiteHandler(BaseHandler): # pragma: no cov
952
954
  except Exception as e:
953
955
  logger.error(f"Failed to read from SQLite database: {e}")
954
956
 
955
- @classmethod
956
957
  def find_trace_with_id(
957
- cls,
958
+ self,
958
959
  run_id: str,
959
960
  force_raise: bool = True,
960
961
  *,
@@ -962,17 +963,7 @@ class SQLiteHandler(BaseHandler): # pragma: no cov
962
963
  extras: Optional[DictData] = None,
963
964
  ) -> TraceData:
964
965
  """Find trace log with specific run ID from SQLite database."""
965
- if path is None:
966
- url = dynamic("trace_url", extras=extras)
967
- if (
968
- url is not None
969
- and hasattr(url, "path")
970
- and getattr(url, "path", None)
971
- ):
972
- path = Path(url.path)
973
- else:
974
- path = Path("./logs/workflow_traces.db")
975
-
966
+ path = path or Path(self.path)
976
967
  if not path.exists():
977
968
  if force_raise:
978
969
  raise FileNotFoundError(f"SQLite database not found: {path}")
@@ -1737,7 +1728,9 @@ TraceHandler = Annotated[
1737
1728
  Union[
1738
1729
  ConsoleHandler,
1739
1730
  FileHandler,
1740
- SQLiteHandler,
1731
+ # SQLiteHandler,
1732
+ # RestAPIHandler,
1733
+ # ElasticHandler
1741
1734
  ],
1742
1735
  Field(discriminator="type"),
1743
1736
  ]
@@ -1874,7 +1867,9 @@ class BaseAsyncEmit(ABC):
1874
1867
 
1875
1868
 
1876
1869
  class TraceManager(BaseModel, BaseEmit, BaseAsyncEmit):
1877
- """Trace Management that keep all trance handler."""
1870
+ """Trace Manager model that keep all trance handler and emit log to its
1871
+ handler.
1872
+ """
1878
1873
 
1879
1874
  extras: DictData = Field(
1880
1875
  default_factory=dict,
@@ -1892,7 +1887,7 @@ class TraceManager(BaseModel, BaseEmit, BaseAsyncEmit):
1892
1887
  description="A list of Trace handler model."
1893
1888
  )
1894
1889
  buffer_size: int = Field(
1895
- default=1,
1890
+ default=10,
1896
1891
  description="A buffer size to trigger flush trace log",
1897
1892
  )
1898
1893
 
@@ -1929,8 +1924,8 @@ class TraceManager(BaseModel, BaseEmit, BaseAsyncEmit):
1929
1924
  """Emit a trace log to all handler. This will use synchronise process.
1930
1925
 
1931
1926
  Args:
1932
- msg: A message.
1933
- level: A tracing level.
1927
+ msg (str): A message.
1928
+ level (Level): A tracing level.
1934
1929
  """
1935
1930
  _msg: str = self.make_message(msg)
1936
1931
  metadata: Metadata = Metadata.make(
@@ -1942,16 +1937,18 @@ class TraceManager(BaseModel, BaseEmit, BaseAsyncEmit):
1942
1937
  parent_run_id=self.parent_run_id,
1943
1938
  extras=self.extras,
1944
1939
  )
1945
- if self._enable_buffer: # pragma: no cov
1946
- self._buffer.append(metadata)
1947
-
1948
- if len(self._buffer) >= self.buffer_size:
1949
- for handler in self.handlers:
1950
- handler.flush(self._buffer, extra=self.extras)
1951
- self._buffer.clear()
1952
- else:
1940
+ if not self._enable_buffer:
1953
1941
  for handler in self.handlers:
1954
1942
  handler.emit(metadata, extra=self.extras)
1943
+ return
1944
+
1945
+ # NOTE: Update metadata to the buffer.
1946
+ self._buffer.append(metadata)
1947
+
1948
+ if len(self._buffer) >= self.buffer_size: # pragma: no cov
1949
+ for handler in self.handlers:
1950
+ handler.flush(self._buffer, extra=self.extras)
1951
+ self._buffer.clear()
1955
1952
 
1956
1953
  async def amit(self, msg: str, level: Level) -> None:
1957
1954
  """Async write trace log with append mode and logging this message with
@@ -1974,22 +1971,43 @@ class TraceManager(BaseModel, BaseEmit, BaseAsyncEmit):
1974
1971
  for handler in self.handlers:
1975
1972
  await handler.amit(metadata, extra=self.extras)
1976
1973
 
1977
- def __enter__(self): # pragma: no cov
1974
+ def __enter__(self):
1975
+ """Enter the trace for catching the logs that run so fast. It will use
1976
+ buffer strategy to flush the logs instead emit.
1977
+ """
1978
1978
  self._enable_buffer = True
1979
+ return self
1980
+
1981
+ def __exit__(self, exc_type, exc_val, exc_tb):
1982
+ """Exit the trace that will clear all log in the buffer."""
1983
+ if exc_type:
1984
+ _msg: str = self.make_message(str(exc_val))
1985
+ metadata: Metadata = Metadata.make(
1986
+ error_flag=True,
1987
+ level="error",
1988
+ message=_msg,
1989
+ cutting_id=self.cut_id,
1990
+ run_id=self.run_id,
1991
+ parent_run_id=self.parent_run_id,
1992
+ extras=self.extras,
1993
+ )
1994
+ self._buffer.append(metadata)
1979
1995
 
1980
- def __exit__(self, exc_type, exc_val, exc_tb): # pragma: no cov
1981
1996
  if self._buffer:
1982
1997
  for handler in self.handlers:
1983
1998
  handler.flush(self._buffer, extra=self.extras)
1984
1999
  self._buffer.clear()
1985
2000
 
2001
+ # NOTE: Re-raise the exception if one occurred
2002
+ return False
2003
+
1986
2004
 
1987
2005
  def get_trace(
1988
2006
  run_id: str,
1989
2007
  *,
1990
2008
  parent_run_id: Optional[str] = None,
1991
2009
  extras: Optional[DictData] = None,
1992
- ) -> TraceManager: # pragma: no cov
2010
+ ) -> TraceManager:
1993
2011
  """Get dynamic TraceManager instance from the core config.
1994
2012
 
1995
2013
  This factory function returns the appropriate trace implementation based on
@@ -1997,8 +2015,8 @@ def get_trace(
1997
2015
  and parent running ID.
1998
2016
 
1999
2017
  Args:
2000
- run_id: A running ID.
2001
- parent_run_id: A parent running ID.
2018
+ run_id (str): A running ID.
2019
+ parent_run_id (str | None, default None): A parent running ID.
2002
2020
  extras: An extra parameter that want to override the core
2003
2021
  config values.
2004
2022
 
@@ -42,7 +42,7 @@ from pydantic.functional_validators import field_validator, model_validator
42
42
  from typing_extensions import Self
43
43
 
44
44
  from .__types import DictData
45
- from .audits import Audit, get_audit_model
45
+ from .audits import Audit, get_audit
46
46
  from .conf import YamlParser, dynamic
47
47
  from .errors import WorkflowCancelError, WorkflowError, WorkflowTimeoutError
48
48
  from .event import Event
@@ -509,24 +509,27 @@ class Workflow(BaseModel):
509
509
  trace.info(f"[RELEASE]: End {name!r} : {release:%Y-%m-%d %H:%M:%S}")
510
510
  trace.debug(f"[RELEASE]: Writing audit: {name!r}.")
511
511
  (
512
- (audit or get_audit_model(extras=self.extras))(
513
- name=name,
514
- release=release,
515
- type=release_type,
516
- context=context,
517
- parent_run_id=parent_run_id,
518
- run_id=run_id,
519
- extras=self.extras,
520
- runs_metadata=(
521
- (runs_metadata or {})
522
- | rs.info
523
- | {
524
- "timeout": timeout,
525
- "original_name": self.name,
526
- "audit_excluded": audit_excluded,
527
- }
528
- ),
529
- ).save(excluded=audit_excluded)
512
+ (audit or get_audit(extras=self.extras)).save(
513
+ data={
514
+ "name": name,
515
+ "release": release,
516
+ "type": release_type,
517
+ "context": context,
518
+ "parent_run_id": parent_run_id,
519
+ "run_id": run_id,
520
+ "extras": self.extras,
521
+ "runs_metadata": (
522
+ (runs_metadata or {})
523
+ | rs.info
524
+ | {
525
+ "timeout": timeout,
526
+ "original_name": self.name,
527
+ "audit_excluded": audit_excluded,
528
+ }
529
+ ),
530
+ },
531
+ excluded=audit_excluded,
532
+ )
530
533
  )
531
534
  return Result(
532
535
  run_id=run_id,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ddeutil-workflow
3
- Version: 0.0.79
3
+ Version: 0.0.80
4
4
  Summary: Lightweight workflow orchestration with YAML template
5
5
  Author-email: ddeutils <korawich.anu@gmail.com>
6
6
  License: MIT
@@ -168,19 +168,6 @@ For comprehensive API documentation, examples, and best practices:
168
168
  - **[Full Documentation](https://ddeutils.github.io/ddeutil-workflow/)** - Complete user guide and API reference
169
169
  - **[Getting Started](https://ddeutils.github.io/ddeutil-workflow/getting-started/)** - Quick start guide
170
170
  - **[API Reference](https://ddeutils.github.io/ddeutil-workflow/api/workflow/)** - Detailed API documentation
171
- - **[Optimized Tracing](docs/optimized-tracing.md)** - High-performance logging system (2-5x faster)
172
-
173
- ## ⚡ Performance Improvements
174
-
175
- The workflow system now includes an optimized tracing system that provides significant performance improvements:
176
-
177
- - **🚀 2-5x faster logging** with buffered I/O operations
178
- - **💾 60-80% reduction** in disk I/O operations
179
- - **🛡️ Built-in thread safety** with minimal overhead
180
- - **🔄 Backward compatible** - existing code automatically benefits
181
- - **📊 Lower memory footprint** for high-volume logging
182
-
183
- See [Optimized Tracing Documentation](docs/optimized-tracing.md) for details and performance benchmarks.
184
171
 
185
172
  ## 🎯 Usage
186
173
 
@@ -316,21 +303,21 @@ it will use default value and do not raise any error to you.
316
303
  > The config value that you will set on the environment should combine with
317
304
  > prefix, component, and name which is `WORKFLOW_{component}_{name}` (Upper case).
318
305
 
319
- | Name | Component | Default | Description |
320
- |:-----------------------------|:---------:|:--------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------|
321
- | **REGISTRY_CALLER** | CORE | `.` | List of importable string for the call stage. |
322
- | **REGISTRY_FILTER** | CORE | `ddeutil.workflow.templates` | List of importable string for the filter template. |
323
- | **CONF_PATH** | CORE | `./conf` | The config path that keep all template `.yaml` files. |
324
- | **STAGE_DEFAULT_ID** | CORE | `false` | A flag that enable default stage ID that use for catch an execution output. |
325
- | **GENERATE_ID_SIMPLE_MODE** | CORE | `true` | A flog that enable generating ID with `md5` algorithm. |
326
- | **DEBUG_MODE** | LOG | `true` | A flag that enable logging with debug level mode. |
327
- | **TIMEZONE** | LOG | `Asia/Bangkok` | A Timezone string value that will pass to `ZoneInfo` object. |
328
- | **FORMAT** | LOG | `%(asctime)s.%(msecs)03d (%(name)-10s, %(process)-5d,%(thread)-5d) [%(levelname)-7s] %(message)-120s (%(filename)s:%(lineno)s)` | A trace message console format. |
329
- | **FORMAT_FILE** | LOG | `{datetime} ({process:5d}, {thread:5d}) {message:120s} ({filename}:{lineno})` | A trace message format that use to write to target pointer. |
330
- | **DATETIME_FORMAT** | LOG | `%Y-%m-%d %H:%M:%S` | A datetime format of the trace log. |
331
- | **TRACE_HANDLERS** | LOG | `[{"type": "console"}]` | A pointer URL of trace log that use to emit log message. Now uses optimized handler by default. |
332
- | **AUDIT_URL** | LOG | `file:./audits` | A pointer URL of audit log that use to write audit metrix. |
333
- | **AUDIT_ENABLE_WRITE** | LOG | `true` | A flag that enable writing audit log after end execution in the workflow release step. |
306
+ | Name | Component | Default | Description |
307
+ |:----------------------------|:---------:|:--------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------|
308
+ | **REGISTRY_CALLER** | CORE | `.` | List of importable string for the call stage. |
309
+ | **REGISTRY_FILTER** | CORE | `ddeutil.workflow.templates` | List of importable string for the filter template. |
310
+ | **CONF_PATH** | CORE | `./conf` | The config path that keep all template `.yaml` files. |
311
+ | **STAGE_DEFAULT_ID** | CORE | `false` | A flag that enable default stage ID that use for catch an execution output. |
312
+ | **GENERATE_ID_SIMPLE_MODE** | CORE | `true` | A flog that enable generating ID with `md5` algorithm. |
313
+ | **DEBUG_MODE** | LOG | `true` | A flag that enable logging with debug level mode. |
314
+ | **TIMEZONE** | LOG | `Asia/Bangkok` | A Timezone string value that will pass to `ZoneInfo` object. |
315
+ | **FORMAT** | LOG | `%(asctime)s.%(msecs)03d (%(name)-10s, %(process)-5d,%(thread)-5d) [%(levelname)-7s] %(message)-120s (%(filename)s:%(lineno)s)` | A trace message console format. |
316
+ | **FORMAT_FILE** | LOG | `{datetime} ({process:5d}, {thread:5d}) {message:120s} ({filename}:{lineno})` | A trace message format that use to write to target pointer. |
317
+ | **DATETIME_FORMAT** | LOG | `%Y-%m-%d %H:%M:%S` | A datetime format of the trace log. |
318
+ | **TRACE_HANDLERS** | LOG | `[{"type": "console"}]` | A pointer URL of trace log that use to emit log message. Now uses optimized handler by default. |
319
+ | **AUDIT_CONF** | LOG | `{"type": "file", "path": "./audits"}` | A pointer URL of audit log that use to write audit metrix. |
320
+ | **AUDIT_ENABLE_WRITE** | LOG | `true` | A flag that enable writing audit log after end execution in the workflow release step. |
334
321
 
335
322
  ## :rocket: Deployment
336
323
 
@@ -1,36 +1,36 @@
1
- ddeutil/workflow/__about__.py,sha256=yvfj2RwP4ItaAPfKbYC9qJNcrKY9lxnnSRd6wZ4-CgQ,28
1
+ ddeutil/workflow/__about__.py,sha256=0fYC3KyobGtx1NtfXlGCbR9mrpL_yrH0UY_kp0NWaN8,28
2
2
  ddeutil/workflow/__cron.py,sha256=avOagaHl9xXOmizeRWm13cOrty9Tw0vRjFq-xoEgpAY,29167
3
- ddeutil/workflow/__init__.py,sha256=1xH6m7jXxFly_5FbWCoe8rqhdeSdnnrBMPzoiVo_Exo,3247
3
+ ddeutil/workflow/__init__.py,sha256=pRnUNCrwnKGrEQNSmdU9Ybf1tEQKg4LfsCpyj1Y3mhg,3241
4
4
  ddeutil/workflow/__main__.py,sha256=Qd-f8z2Q2vpiEP2x6PBFsJrpACWDVxFKQk820MhFmHo,59
5
5
  ddeutil/workflow/__types.py,sha256=tA2vsr6mzTSzbWB1sb62c5GgxODlfVRz6FvgLNJtQao,4788
6
- ddeutil/workflow/audits.py,sha256=7wfRXJmrG3T4YUSArABPfiWq6BzSBf8qoo24JAt433A,28280
6
+ ddeutil/workflow/audits.py,sha256=h7WVHe3z_FqtmLRvAxhnSqae8fFtZPN0tICrVr39wP4,25456
7
7
  ddeutil/workflow/cli.py,sha256=aNFOZ3Re_QJBBP6vkT9Lsjrg8wLxrw_LKrl-1SIvSOg,8152
8
- ddeutil/workflow/conf.py,sha256=vdTvR1OVk2TZBK5ZwwUpfxgg8GU4ldV8ukLzG1-tGDQ,16603
8
+ ddeutil/workflow/conf.py,sha256=4bFl1ufX7-p6ely7tJnanpTZ0wJoik81yTcmBrbcyxY,16661
9
9
  ddeutil/workflow/errors.py,sha256=UpUIqoyqkvzqjuxtUQ9535l1HeAsyh-plEG0PgDVR2w,5541
10
10
  ddeutil/workflow/event.py,sha256=qm7QHw-Pozm6oIUzAIxpDkPzzVZVtHgJIUlIle0vEfQ,13943
11
- ddeutil/workflow/job.py,sha256=UvzU66CebZkwIBg1KZch_aA3bZL0jpVrhRUQ2JIyiN4,46615
11
+ ddeutil/workflow/job.py,sha256=lSmOgh4l3_gBJXrTEWULhSSol648h6zPe6zKzz8jDHQ,46597
12
12
  ddeutil/workflow/params.py,sha256=y9f6DEIyae1j4awbj3Kbeq75-U2UPFlKv9K57Hdo_Go,17188
13
13
  ddeutil/workflow/result.py,sha256=BOk3DZMtmdE7xzQYeEYTGFlIkzJQ4Ed3fYzf0zF8Jo8,8963
14
14
  ddeutil/workflow/reusables.py,sha256=g_Cac3yHy0H5ffl4Bb8_eGl284ELxOuX4LI8GYPMZgw,24983
15
15
  ddeutil/workflow/stages.py,sha256=QufIa2b7A_ngOndVoGzyxKm_o5ZrauNeqxAC4vBkKFM,122678
16
- ddeutil/workflow/traces.py,sha256=0crly_08a7dfi8-w8QPmYizxh6T7VR8yDnEoxEEiwM0,72838
16
+ ddeutil/workflow/traces.py,sha256=e12_rDnwVo-XR6Ca1KLwCjo3tAwbJP7yXc1YU62YOt8,73415
17
17
  ddeutil/workflow/utils.py,sha256=-E-Z5hN_UTFuWDk-NpfKhNj0QtLfJSvZNDI5NzJsd5E,12122
18
- ddeutil/workflow/workflow.py,sha256=WTQAoSUNOmGpvZYgl28ziTY3kxtqQQw4jbTXPJOIBY4,42790
18
+ ddeutil/workflow/workflow.py,sha256=Uojf7k7l91sqOlsPMeSPwQmbrB5pgbWEmx9QgKYngmI,42924
19
19
  ddeutil/workflow/api/__init__.py,sha256=5DzYL3ngceoRshh5HYCSVWChqNJSiP01E1bEd8XxPi0,4799
20
20
  ddeutil/workflow/api/log_conf.py,sha256=WfS3udDLSyrP-C80lWOvxxmhd_XWKvQPkwDqKblcH3E,1834
21
21
  ddeutil/workflow/api/routes/__init__.py,sha256=JRaJZB0D6mgR17MbZo8yLtdYDtD62AA8MdKlFqhG84M,420
22
22
  ddeutil/workflow/api/routes/job.py,sha256=8eu2OAOS3fENQ54OO723lFpzgHMyz1D-b_xZj6OnmcA,2550
23
- ddeutil/workflow/api/routes/logs.py,sha256=RiZ62eQVMWArPHE3lpan955U4DdLLkethlvSMlwF7Mg,5312
24
- ddeutil/workflow/api/routes/workflows.py,sha256=1Mqx4Hft4uJglgJI-Wcw-JzkhomFYZrtP0DnQDBkAFQ,4410
23
+ ddeutil/workflow/api/routes/logs.py,sha256=O0I9L059SvtVHZ-TXCShChxbrHKUoT7MYRK0xZWwIMc,5282
24
+ ddeutil/workflow/api/routes/workflows.py,sha256=0pEZEsIrscRFBXG9gf6nttKw0aNbcdw7NsAZKLoKWtk,4392
25
25
  ddeutil/workflow/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  ddeutil/workflow/plugins/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  ddeutil/workflow/plugins/providers/aws.py,sha256=61uIFBEWt-_D5Sui24qUPier1Hiqlw_RP_eY-rXBCKc,31551
28
28
  ddeutil/workflow/plugins/providers/az.py,sha256=o3dh011lEtmr7-d7FPZJPgXdT0ytFzKfc5xnVxSyXGU,34867
29
29
  ddeutil/workflow/plugins/providers/container.py,sha256=DSN0RWxMjTJN5ANheeMauDaPa3X6Z2E1eGUcctYkENw,22134
30
30
  ddeutil/workflow/plugins/providers/gcs.py,sha256=KgAOdMBvdbMLTH_z_FwVriBFtZfKEYx8_34jzUOVjTY,27460
31
- ddeutil_workflow-0.0.79.dist-info/licenses/LICENSE,sha256=nGFZ1QEhhhWeMHf9n99_fdt4vQaXS29xWKxt-OcLywk,1085
32
- ddeutil_workflow-0.0.79.dist-info/METADATA,sha256=V6nV4VeqzwryOpJVDmd4sK5U9rqgn8H4qs8WcTf8ugw,16755
33
- ddeutil_workflow-0.0.79.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
- ddeutil_workflow-0.0.79.dist-info/entry_points.txt,sha256=qDTpPSauL0ciO6T4iSVt8bJeYrVEkkoEEw_RlGx6Kgk,63
35
- ddeutil_workflow-0.0.79.dist-info/top_level.txt,sha256=m9M6XeSWDwt_yMsmH6gcOjHZVK5O0-vgtNBuncHjzW4,8
36
- ddeutil_workflow-0.0.79.dist-info/RECORD,,
31
+ ddeutil_workflow-0.0.80.dist-info/licenses/LICENSE,sha256=nGFZ1QEhhhWeMHf9n99_fdt4vQaXS29xWKxt-OcLywk,1085
32
+ ddeutil_workflow-0.0.80.dist-info/METADATA,sha256=g43RJsjquJoYyQUl_d5Wfuxdoh00tukjjjgW-lL7Hdw,16087
33
+ ddeutil_workflow-0.0.80.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
+ ddeutil_workflow-0.0.80.dist-info/entry_points.txt,sha256=qDTpPSauL0ciO6T4iSVt8bJeYrVEkkoEEw_RlGx6Kgk,63
35
+ ddeutil_workflow-0.0.80.dist-info/top_level.txt,sha256=m9M6XeSWDwt_yMsmH6gcOjHZVK5O0-vgtNBuncHjzW4,8
36
+ ddeutil_workflow-0.0.80.dist-info/RECORD,,