ddeutil-workflow 0.0.79__py3-none-any.whl → 0.0.81__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__: str = "0.0.79"
1
+ __version__: str = "0.0.81"
@@ -52,7 +52,7 @@ from .__types import DictData, DictStr, Matrix, Re, TupleStr
52
52
  from .audits import (
53
53
  Audit,
54
54
  FileAudit,
55
- get_audit_model,
55
+ get_audit,
56
56
  )
57
57
  from .conf import *
58
58
  from .errors import (
@@ -10,7 +10,7 @@ from fastapi import APIRouter, Path, Query
10
10
  from fastapi import status as st
11
11
  from fastapi.responses import UJSONResponse
12
12
 
13
- from ...audits import get_audit_model
13
+ from ...audits import get_audit
14
14
  from ...result import Result
15
15
 
16
16
  router = APIRouter(
@@ -90,7 +90,7 @@ async def get_audits():
90
90
  """
91
91
  return {
92
92
  "message": "Getting audit logs",
93
- "audits": list(get_audit_model().find_audits(name="demo")),
93
+ "audits": list(get_audit().find_audits(name="demo")),
94
94
  }
95
95
 
96
96
 
@@ -109,7 +109,7 @@ async def get_audit_with_workflow(workflow: str):
109
109
  """
110
110
  return {
111
111
  "message": f"Getting audit logs with workflow name {workflow}",
112
- "audits": list(get_audit_model().find_audits(name="demo")),
112
+ "audits": list(get_audit().find_audits(name="demo")),
113
113
  }
114
114
 
115
115
 
@@ -136,7 +136,7 @@ async def get_audit_with_workflow_release(
136
136
  f"Getting audit logs with workflow name {workflow} and release "
137
137
  f"{release}"
138
138
  ),
139
- "audits": list(get_audit_model().find_audits(name="demo")),
139
+ "audits": list(get_audit().find_audits(name="demo")),
140
140
  }
141
141
 
142
142
 
@@ -167,5 +167,5 @@ async def get_audit_with_workflow_release_run_id(
167
167
  f"Getting audit logs with workflow name {workflow}, release "
168
168
  f"{release}, and running ID {run_id}"
169
169
  ),
170
- "audits": list(get_audit_model().find_audits(name="demo")),
170
+ "audits": list(get_audit().find_audits(name="demo")),
171
171
  }
@@ -16,7 +16,7 @@ from fastapi.responses import UJSONResponse
16
16
  from pydantic import BaseModel
17
17
 
18
18
  from ...__types import DictData
19
- from ...audits import Audit, get_audit_model
19
+ from ...audits import Audit, get_audit
20
20
  from ...conf import YamlParser
21
21
  from ...result import Result
22
22
  from ...workflow import Workflow
@@ -100,7 +100,7 @@ async def get_workflow_audits(name: str):
100
100
  exclude_none=False,
101
101
  exclude_unset=True,
102
102
  )
103
- for audit in get_audit_model().find_audits(name=name)
103
+ for audit in get_audit().find_audits(name=name)
104
104
  ],
105
105
  }
106
106
  except FileNotFoundError:
@@ -114,7 +114,7 @@ async def get_workflow_audits(name: str):
114
114
  async def get_workflow_release_audit(name: str, release: str):
115
115
  """Get Workflow audit log with an input release value."""
116
116
  try:
117
- audit: Audit = get_audit_model().find_audit_with_release(
117
+ audit: Audit = get_audit().find_audit_with_release(
118
118
  name=name,
119
119
  release=datetime.strptime(release, "%Y%m%d%H%M%S"),
120
120
  )
@@ -32,10 +32,8 @@ Functions:
32
32
 
33
33
  Example:
34
34
 
35
- >>> from ddeutil.workflow.audits import get_audit_model
36
- >>> audit = get_audit_model(run_id="run-123")
37
- >>> audit.info("Workflow execution started")
38
- >>> audit.success("Workflow completed successfully")
35
+ >>> from ddeutil.workflow.audits import get_audit
36
+ >>> audit = get_audit(run_id="run-123")
39
37
 
40
38
  Note:
41
39
  Audit instances are automatically configured based on the workflow
@@ -52,11 +50,10 @@ from abc import ABC, abstractmethod
52
50
  from collections.abc import Iterator
53
51
  from datetime import datetime, timedelta
54
52
  from pathlib import Path
55
- from typing import Any, ClassVar, Optional, TypeVar, Union
53
+ from typing import Annotated, Any, ClassVar, Literal, Optional, Union
56
54
  from urllib.parse import ParseResult, urlparse
57
55
 
58
- from pydantic import BaseModel, Field
59
- from pydantic.functional_serializers import field_serializer
56
+ from pydantic import BaseModel, Field, TypeAdapter
60
57
  from pydantic.functional_validators import field_validator, model_validator
61
58
  from typing_extensions import Self
62
59
 
@@ -67,17 +64,7 @@ from .traces import TraceManager, get_trace, set_logging
67
64
  logger = logging.getLogger("ddeutil.workflow")
68
65
 
69
66
 
70
- class BaseAudit(BaseModel, ABC):
71
- """Base Audit Pydantic Model with abstraction class property.
72
-
73
- This model implements only model fields and should be used as a base class
74
- for logging subclasses like file, sqlite, etc.
75
- """
76
-
77
- extras: DictData = Field(
78
- default_factory=dict,
79
- description="An extras parameter that want to override core config",
80
- )
67
+ class AuditData(BaseModel):
81
68
  name: str = Field(description="A workflow name.")
82
69
  release: datetime = Field(description="A release datetime.")
83
70
  type: str = Field(description="A running type before logging.")
@@ -94,6 +81,20 @@ class BaseAudit(BaseModel, ABC):
94
81
  description="A runs metadata that will use to tracking this audit log.",
95
82
  )
96
83
 
84
+
85
+ class BaseAudit(BaseModel, ABC):
86
+ """Base Audit Pydantic Model with abstraction class property.
87
+
88
+ This model implements only model fields and should be used as a base class
89
+ for logging subclasses like file, sqlite, etc.
90
+ """
91
+
92
+ type: str
93
+ extras: DictData = Field(
94
+ default_factory=dict,
95
+ description="An extras parameter that want to override core config",
96
+ )
97
+
97
98
  @field_validator("extras", mode="before")
98
99
  def validate_extras(cls, v: Any) -> DictData:
99
100
  """Validate extras field to ensure it's a dictionary."""
@@ -118,20 +119,17 @@ class BaseAudit(BaseModel, ABC):
118
119
  set_logging("ddeutil.workflow")
119
120
  return self
120
121
 
121
- @classmethod
122
122
  @abstractmethod
123
123
  def is_pointed(
124
- cls,
125
- name: str,
126
- release: datetime,
124
+ self,
125
+ data: AuditData,
127
126
  *,
128
127
  extras: Optional[DictData] = None,
129
128
  ) -> bool:
130
129
  """Check if audit data exists for the given workflow and release.
131
130
 
132
131
  Args:
133
- name: The workflow name to check.
134
- release: The release datetime to check.
132
+ data:
135
133
  extras: Optional extra parameters to override core config.
136
134
 
137
135
  Returns:
@@ -144,10 +142,9 @@ class BaseAudit(BaseModel, ABC):
144
142
  "Audit should implement `is_pointed` class-method"
145
143
  )
146
144
 
147
- @classmethod
148
145
  @abstractmethod
149
146
  def find_audits(
150
- cls,
147
+ self,
151
148
  name: str,
152
149
  *,
153
150
  extras: Optional[DictData] = None,
@@ -168,10 +165,9 @@ class BaseAudit(BaseModel, ABC):
168
165
  "Audit should implement `find_audits` class-method"
169
166
  )
170
167
 
171
- @classmethod
172
168
  @abstractmethod
173
169
  def find_audit_with_release(
174
- cls,
170
+ self,
175
171
  name: str,
176
172
  release: Optional[datetime] = None,
177
173
  *,
@@ -203,11 +199,12 @@ class BaseAudit(BaseModel, ABC):
203
199
 
204
200
  @abstractmethod
205
201
  def save(
206
- self, excluded: Optional[list[str]] = None
202
+ self, data: Any, excluded: Optional[list[str]] = None
207
203
  ) -> Self: # pragma: no cov
208
204
  """Save this model logging to target logging store.
209
205
 
210
206
  Args:
207
+ data:
211
208
  excluded: Optional list of field names to exclude from saving.
212
209
 
213
210
  Returns:
@@ -234,20 +231,11 @@ class FileAudit(BaseAudit):
234
231
  "workflow={name}/release={release:%Y%m%d%H%M%S}"
235
232
  )
236
233
 
237
- @field_serializer("extras")
238
- def __serialize_extras(self, value: DictData) -> DictData:
239
- """Serialize extras field, converting ParseResult objects to URLs.
240
-
241
- Args:
242
- value: The extras dictionary to serialize.
243
-
244
- Returns:
245
- DictData: Serialized extras with ParseResult objects converted to URLs.
246
- """
247
- return {
248
- k: (v.geturl() if isinstance(v, ParseResult) else v)
249
- for k, v in value.items()
250
- }
234
+ type: Literal["file"] = "file"
235
+ path: str = Field(
236
+ default="./audits",
237
+ description="A file path that use to manage audit logs.",
238
+ )
251
239
 
252
240
  def do_before(self) -> None:
253
241
  """Create directory of release before saving log file.
@@ -255,12 +243,11 @@ class FileAudit(BaseAudit):
255
243
  This method ensures the target directory exists before attempting
256
244
  to save audit log files.
257
245
  """
258
- self.pointer().mkdir(parents=True, exist_ok=True)
246
+ Path(self.path).mkdir(parents=True, exist_ok=True)
259
247
 
260
- @classmethod
261
248
  def find_audits(
262
- cls, name: str, *, extras: Optional[DictData] = None
263
- ) -> Iterator[Self]:
249
+ self, name: str, *, extras: Optional[DictData] = None
250
+ ) -> Iterator[AuditData]:
264
251
  """Generate audit data found from logs path for a specific workflow name.
265
252
 
266
253
  Args:
@@ -273,27 +260,21 @@ class FileAudit(BaseAudit):
273
260
  Raises:
274
261
  FileNotFoundError: If the workflow directory does not exist.
275
262
  """
276
- audit_url = dynamic("audit_url", extras=extras)
277
- if audit_url is None:
278
- raise ValueError("audit_url configuration is not set")
279
-
280
- audit_url_parse: ParseResult = urlparse(audit_url)
281
- pointer: Path = Path(audit_url_parse.path) / f"workflow={name}"
263
+ pointer: Path = Path(self.path) / f"workflow={name}"
282
264
  if not pointer.exists():
283
265
  raise FileNotFoundError(f"Pointer: {pointer.absolute()}.")
284
266
 
285
267
  for file in pointer.glob("./release=*/*.log"):
286
268
  with file.open(mode="r", encoding="utf-8") as f:
287
- yield cls.model_validate(obj=json.load(f))
269
+ yield AuditData.model_validate(obj=json.load(f))
288
270
 
289
- @classmethod
290
271
  def find_audit_with_release(
291
- cls,
272
+ self,
292
273
  name: str,
293
274
  release: Optional[datetime] = None,
294
275
  *,
295
276
  extras: Optional[DictData] = None,
296
- ) -> Self:
277
+ ) -> AuditData:
297
278
  """Return audit data found from logs path for specific workflow and release.
298
279
 
299
280
  If a release is not provided, it will return the latest release from
@@ -305,20 +286,14 @@ class FileAudit(BaseAudit):
305
286
  extras: Optional extra parameters to override core config.
306
287
 
307
288
  Returns:
308
- Self: The audit instance for the specified workflow and release.
289
+ AuditData: The audit instance for the specified workflow and release.
309
290
 
310
291
  Raises:
311
292
  FileNotFoundError: If the specified workflow/release directory does not exist.
312
293
  ValueError: If no releases found when release is None.
313
294
  """
314
295
  if release is None:
315
- # Find the latest release
316
- audit_url = dynamic("audit_url", extras=extras)
317
- if audit_url is None:
318
- raise ValueError("audit_url configuration is not set")
319
-
320
- audit_url_parse: ParseResult = urlparse(audit_url)
321
- pointer: Path = Path(audit_url_parse.path) / f"workflow={name}"
296
+ pointer: Path = Path(self.path) / f"workflow={name}"
322
297
  if not pointer.exists():
323
298
  raise FileNotFoundError(f"Pointer: {pointer.absolute()}.")
324
299
 
@@ -332,13 +307,8 @@ class FileAudit(BaseAudit):
332
307
  pointer.glob("./release=*"), key=os.path.getctime
333
308
  )
334
309
  else:
335
- audit_url = dynamic("audit_url", extras=extras)
336
- if audit_url is None:
337
- raise ValueError("audit_url configuration is not set")
338
-
339
- audit_url_parse: ParseResult = urlparse(audit_url)
340
310
  release_pointer: Path = (
341
- Path(audit_url_parse.path)
311
+ Path(self.path)
342
312
  / f"workflow={name}/release={release:%Y%m%d%H%M%S}"
343
313
  )
344
314
  if not release_pointer.exists():
@@ -355,21 +325,15 @@ class FileAudit(BaseAudit):
355
325
  release_pointer.glob("./*.log"), key=os.path.getctime
356
326
  )
357
327
  with latest_file.open(mode="r", encoding="utf-8") as f:
358
- return cls.model_validate(obj=json.load(f))
328
+ return AuditData.model_validate(obj=json.load(f))
359
329
 
360
- @classmethod
361
330
  def is_pointed(
362
- cls,
363
- name: str,
364
- release: datetime,
365
- *,
366
- extras: Optional[DictData] = None,
331
+ self, data: AuditData, *, extras: Optional[DictData] = None
367
332
  ) -> bool:
368
333
  """Check if the release log already exists at the destination log path.
369
334
 
370
335
  Args:
371
- name: The workflow name.
372
- release: The release datetime.
336
+ data: The workflow name.
373
337
  extras: Optional extra parameters to override core config.
374
338
 
375
339
  Returns:
@@ -378,46 +342,32 @@ class FileAudit(BaseAudit):
378
342
  # NOTE: Return False if enable writing log flag does not set.
379
343
  if not dynamic("enable_write_audit", extras=extras):
380
344
  return False
345
+ return self.pointer(data).exists()
381
346
 
382
- # NOTE: create pointer path that use the same logic of pointer method.
383
- audit_url: Optional[str] = dynamic("audit_url", extras=extras)
384
- if audit_url is None:
385
- return False
386
-
387
- audit_url_parse: ParseResult = urlparse(audit_url)
388
- pointer: Path = Path(audit_url_parse.path) / cls.filename_fmt.format(
389
- name=name, release=release
390
- )
391
-
392
- return pointer.exists()
393
-
394
- def pointer(self) -> Path:
347
+ def pointer(self, data: AuditData) -> Path:
395
348
  """Return release directory path generated from model data.
396
349
 
397
350
  Returns:
398
351
  Path: The directory path for the current workflow and release.
399
352
  """
400
- audit_url = dynamic("audit_url", extras=self.extras)
401
- if audit_url is None:
402
- raise ValueError("audit_url configuration is not set")
403
-
404
- audit_url_parse: ParseResult = urlparse(audit_url)
405
- return Path(audit_url_parse.path) / self.filename_fmt.format(
406
- name=self.name, release=self.release
353
+ return Path(self.path) / self.filename_fmt.format(
354
+ name=data.name, release=data.release
407
355
  )
408
356
 
409
- def save(self, excluded: Optional[list[str]] = None) -> Self:
357
+ def save(self, data: Any, excluded: Optional[list[str]] = None) -> Self:
410
358
  """Save logging data received from workflow execution result.
411
359
 
412
360
  Args:
361
+ data:
413
362
  excluded: Optional list of field names to exclude from saving.
414
363
 
415
364
  Returns:
416
365
  Self: The audit instance after saving.
417
366
  """
367
+ audit = AuditData.model_validate(data)
418
368
  trace: TraceManager = get_trace(
419
- self.run_id,
420
- parent_run_id=self.parent_run_id,
369
+ audit.run_id,
370
+ parent_run_id=audit.parent_run_id,
421
371
  extras=self.extras,
422
372
  )
423
373
 
@@ -426,19 +376,21 @@ class FileAudit(BaseAudit):
426
376
  trace.debug("[AUDIT]: Skip writing audit log cause config was set.")
427
377
  return self
428
378
 
429
- log_file: Path = (
430
- self.pointer() / f"{self.parent_run_id or self.run_id}.log"
431
- )
379
+ pointer: Path = self.pointer(data=audit)
380
+ if not pointer.exists():
381
+ pointer.mkdir(parents=True)
382
+
383
+ log_file: Path = pointer / f"{audit.parent_run_id or audit.run_id}.log"
432
384
 
433
385
  # NOTE: Convert excluded list to set for pydantic compatibility
434
386
  exclude_set = set(excluded) if excluded else None
435
387
  trace.info(
436
388
  f"[AUDIT]: Start writing audit log with "
437
- f"release: {self.release:%Y%m%d%H%M%S}"
389
+ f"release: {audit.release:%Y%m%d%H%M%S}"
438
390
  )
439
391
  log_file.write_text(
440
392
  json.dumps(
441
- self.model_dump(exclude=exclude_set),
393
+ audit.model_dump(exclude=exclude_set),
442
394
  default=str,
443
395
  indent=2,
444
396
  ),
@@ -504,6 +456,9 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
504
456
  )
505
457
  """
506
458
 
459
+ type: Literal["sqlite"] = "sqlite"
460
+ path: str
461
+
507
462
  def _ensure_table_exists(self) -> None:
508
463
  """Ensure the audit table exists in the database."""
509
464
  audit_url = dynamic("audit_url", extras=self.extras)
@@ -520,19 +475,16 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
520
475
  conn.execute(self.schemas)
521
476
  conn.commit()
522
477
 
523
- @classmethod
524
478
  def is_pointed(
525
- cls,
526
- name: str,
527
- release: datetime,
479
+ self,
480
+ data: AuditData,
528
481
  *,
529
482
  extras: Optional[DictData] = None,
530
483
  ) -> bool:
531
484
  """Check if audit data exists for the given workflow and release.
532
485
 
533
486
  Args:
534
- name: The workflow name to check.
535
- release: The release datetime to check.
487
+ data:
536
488
  extras: Optional extra parameters to override core config.
537
489
 
538
490
  Returns:
@@ -553,7 +505,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
553
505
  with sqlite3.connect(db_path) as conn:
554
506
  cursor = conn.execute(
555
507
  "SELECT COUNT(*) FROM audits WHERE workflow = ? AND release = ?",
556
- (name, release.isoformat()),
508
+ (data.name, data.release.isoformat()),
557
509
  )
558
510
  return cursor.fetchone()[0] > 0
559
511
 
@@ -592,7 +544,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
592
544
  context = json.loads(cls._decompress_data(row[3]))
593
545
  metadata = json.loads(cls._decompress_data(row[6]))
594
546
 
595
- yield cls(
547
+ yield AuditData(
596
548
  name=row[0],
597
549
  release=datetime.fromisoformat(row[1]),
598
550
  type=row[2],
@@ -600,7 +552,6 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
600
552
  parent_run_id=row[4],
601
553
  run_id=row[5],
602
554
  runs_metadata=metadata,
603
- extras=extras or {},
604
555
  )
605
556
 
606
557
  @classmethod
@@ -610,7 +561,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
610
561
  release: Optional[datetime] = None,
611
562
  *,
612
563
  extras: Optional[DictData] = None,
613
- ) -> Self:
564
+ ) -> AuditData:
614
565
  """Find audit data for a specific workflow and release.
615
566
 
616
567
  Args:
@@ -656,7 +607,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
656
607
  context = json.loads(cls._decompress_data(row[3]))
657
608
  metadata = json.loads(cls._decompress_data(row[6]))
658
609
 
659
- return cls(
610
+ return AuditData(
660
611
  name=row[0],
661
612
  release=datetime.fromisoformat(row[1]),
662
613
  type=row[2],
@@ -664,7 +615,6 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
664
615
  parent_run_id=row[4],
665
616
  run_id=row[5],
666
617
  runs_metadata=metadata,
667
- extras=extras or {},
668
618
  )
669
619
 
670
620
  @staticmethod
@@ -691,10 +641,11 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
691
641
  """
692
642
  return zlib.decompress(data).decode("utf-8")
693
643
 
694
- def save(self, excluded: Optional[list[str]] = None) -> Self:
644
+ def save(self, data: Any, excluded: Optional[list[str]] = None) -> Self:
695
645
  """Save logging data received from workflow execution result.
696
646
 
697
647
  Args:
648
+ data: Any
698
649
  excluded: Optional list of field names to exclude from saving.
699
650
 
700
651
  Returns:
@@ -703,9 +654,10 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
703
654
  Raises:
704
655
  ValueError: If SQLite database is not properly configured.
705
656
  """
657
+ audit = AuditData.model_validate(data)
706
658
  trace: TraceManager = get_trace(
707
- self.run_id,
708
- parent_run_id=self.parent_run_id,
659
+ audit.run_id,
660
+ parent_run_id=audit.parent_run_id,
709
661
  extras=self.extras,
710
662
  )
711
663
 
@@ -726,7 +678,7 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
726
678
 
727
679
  # Prepare data for storage
728
680
  exclude_set = set(excluded) if excluded else None
729
- model_data = self.model_dump(exclude=exclude_set)
681
+ model_data = audit.model_dump(exclude=exclude_set)
730
682
 
731
683
  # Compress context and metadata
732
684
  context_blob = self._compress_data(
@@ -744,12 +696,12 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
744
696
  VALUES (?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP)
745
697
  """,
746
698
  (
747
- self.name,
748
- self.release.isoformat(),
749
- self.type,
699
+ audit.name,
700
+ audit.release.isoformat(),
701
+ audit.type,
750
702
  context_blob,
751
- self.parent_run_id,
752
- self.run_id,
703
+ audit.parent_run_id,
704
+ audit.run_id,
753
705
  metadata_blob,
754
706
  ),
755
707
  )
@@ -787,50 +739,27 @@ class SQLiteAudit(BaseAudit): # pragma: no cov
787
739
  return cursor.rowcount
788
740
 
789
741
 
790
- Audit = Union[
791
- FileAudit,
792
- SQLiteAudit,
742
+ Audit = Annotated[
743
+ Union[
744
+ FileAudit,
745
+ SQLiteAudit,
746
+ ],
747
+ Field(discriminator="type"),
793
748
  ]
794
- AuditType = TypeVar("AuditType", bound=BaseAudit)
795
749
 
796
750
 
797
- def get_audit_model(
751
+ def get_audit(
798
752
  *,
799
753
  extras: Optional[DictData] = None,
800
- ) -> type[AuditType]: # pragma: no cov
754
+ ) -> Audit: # pragma: no cov
801
755
  """Get an audit model dynamically based on the config audit path value.
802
756
 
803
757
  Args:
804
758
  extras: Optional extra parameters to override the core config.
805
- This function allow you to pass `audit_model_mapping` for override
806
- the audit model object with your custom model.
807
759
 
808
760
  Returns:
809
- type[Audit]: The appropriate audit model class based on configuration.
810
-
811
- Raises:
812
- NotImplementedError: If the audit URL scheme is not supported.
761
+ Audit: The appropriate audit model class based on configuration.
813
762
  """
814
- # NOTE: Allow you to override audit model by the extra parameter.
815
- map_audit_models: dict[str, type[AuditType]] = (extras or {}).get(
816
- "audit_model_mapping", {}
817
- )
818
-
819
- audit_url = dynamic("audit_url", extras=extras)
820
- if audit_url is None:
821
- return map_audit_models.get("file", FileAudit)
822
-
823
- audit_url_parse: ParseResult = urlparse(audit_url)
824
- if not audit_url_parse.scheme:
825
- return map_audit_models.get("file", FileAudit)
826
-
827
- if audit_url_parse.scheme == "sqlite" or (
828
- audit_url_parse.scheme == "file"
829
- and Path(audit_url_parse.path).is_file()
830
- ):
831
- return map_audit_models.get("sqlite", SQLiteAudit)
832
- elif audit_url_parse.scheme != "file":
833
- raise NotImplementedError(
834
- f"Does not implement the audit model support for URL: {audit_url_parse}"
835
- )
836
- return map_audit_models.get("file", FileAudit)
763
+ audit_conf = dynamic("audit_conf", extras=extras)
764
+ model = TypeAdapter(Audit).validate_python(audit_conf | {"extras": extras})
765
+ return model