dataenginex 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. dataenginex/README.md +35 -0
  2. dataenginex/RELEASE_NOTES.md +38 -0
  3. dataenginex/__init__.py +16 -0
  4. dataenginex/api/__init__.py +11 -0
  5. dataenginex/api/auth.py +173 -0
  6. dataenginex/api/errors.py +70 -0
  7. dataenginex/api/health.py +133 -0
  8. dataenginex/api/pagination.py +94 -0
  9. dataenginex/api/rate_limit.py +122 -0
  10. dataenginex/api/routers/__init__.py +1 -0
  11. dataenginex/api/routers/v1.py +113 -0
  12. dataenginex/core/__init__.py +36 -0
  13. dataenginex/core/medallion_architecture.py +414 -0
  14. dataenginex/core/pipeline_config.py +111 -0
  15. dataenginex/core/schemas.py +304 -0
  16. dataenginex/core/validators.py +394 -0
  17. dataenginex/data/__init__.py +22 -0
  18. dataenginex/data/connectors.py +332 -0
  19. dataenginex/data/profiler.py +217 -0
  20. dataenginex/data/registry.py +148 -0
  21. dataenginex/lakehouse/__init__.py +22 -0
  22. dataenginex/lakehouse/catalog.py +145 -0
  23. dataenginex/lakehouse/partitioning.py +99 -0
  24. dataenginex/lakehouse/storage.py +177 -0
  25. dataenginex/middleware/__init__.py +19 -0
  26. dataenginex/middleware/logging_config.py +137 -0
  27. dataenginex/middleware/metrics.py +45 -0
  28. dataenginex/middleware/metrics_middleware.py +61 -0
  29. dataenginex/middleware/request_logging.py +77 -0
  30. dataenginex/middleware/tracing.py +87 -0
  31. dataenginex/ml/__init__.py +28 -0
  32. dataenginex/ml/drift.py +165 -0
  33. dataenginex/ml/registry.py +156 -0
  34. dataenginex/ml/serving.py +141 -0
  35. dataenginex/ml/training.py +205 -0
  36. dataenginex/warehouse/__init__.py +19 -0
  37. dataenginex/warehouse/lineage.py +164 -0
  38. dataenginex/warehouse/transforms.py +206 -0
  39. dataenginex-0.3.4.dist-info/METADATA +66 -0
  40. dataenginex-0.3.4.dist-info/RECORD +41 -0
  41. dataenginex-0.3.4.dist-info/WHEEL +4 -0
@@ -0,0 +1,148 @@
1
+ """
2
+ Schema registry — versioned schema management for DEX datasets.
3
+
4
+ Stores schema definitions (as JSON-serialisable dicts) with
5
+ semantic versioning, allowing pipelines to validate data against
6
+ a specific schema revision and to track schema evolution.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import json
12
+ from dataclasses import dataclass, field
13
+ from datetime import UTC, datetime
14
+ from pathlib import Path
15
+ from typing import Any
16
+
17
+ from loguru import logger
18
+
19
+
20
+ @dataclass
21
+ class SchemaVersion:
22
+ """An immutable snapshot of a schema at a particular version."""
23
+
24
+ name: str
25
+ version: str # semver string, e.g. "1.2.0"
26
+ fields: dict[str, str] # field_name → type_description
27
+ required_fields: list[str] = field(default_factory=list)
28
+ description: str = ""
29
+ created_at: datetime = field(default_factory=lambda: datetime.now(tz=UTC))
30
+ metadata: dict[str, Any] = field(default_factory=dict)
31
+
32
+ def to_dict(self) -> dict[str, Any]:
33
+ return {
34
+ "name": self.name,
35
+ "version": self.version,
36
+ "fields": self.fields,
37
+ "required_fields": self.required_fields,
38
+ "description": self.description,
39
+ "created_at": self.created_at.isoformat(),
40
+ "metadata": self.metadata,
41
+ }
42
+
43
+ def validate_record(self, record: dict[str, Any]) -> tuple[bool, list[str]]:
44
+ """Check that *record* has all required fields.
45
+
46
+ Returns ``(is_valid, errors)`` where *errors* lists the missing
47
+ required fields.
48
+ """
49
+ missing = [f for f in self.required_fields if f not in record]
50
+ return len(missing) == 0, [f"Missing required field: {f}" for f in missing]
51
+
52
+
53
+ class SchemaRegistry:
54
+ """In-process schema registry backed by an optional JSON file.
55
+
56
+ Parameters
57
+ ----------
58
+ persist_path:
59
+ If given, schemas are saved/loaded from this JSON file so they
60
+ survive across process restarts.
61
+ """
62
+
63
+ def __init__(self, persist_path: str | Path | None = None) -> None:
64
+ # schema_name → [SchemaVersion …] (ordered oldest → newest)
65
+ self._schemas: dict[str, list[SchemaVersion]] = {}
66
+ self._persist_path = Path(persist_path) if persist_path else None
67
+ if self._persist_path and self._persist_path.exists():
68
+ self._load()
69
+
70
+ # -- public API ----------------------------------------------------------
71
+
72
+ def register(self, schema: SchemaVersion) -> SchemaVersion:
73
+ """Register a new schema version. Duplicate versions are rejected."""
74
+ versions = self._schemas.setdefault(schema.name, [])
75
+ existing = {v.version for v in versions}
76
+ if schema.version in existing:
77
+ raise ValueError(
78
+ f"Schema {schema.name!r} version {schema.version} already registered"
79
+ )
80
+ versions.append(schema)
81
+ logger.info("Registered schema %s v%s", schema.name, schema.version)
82
+ self._save()
83
+ return schema
84
+
85
+ def get_latest(self, name: str) -> SchemaVersion | None:
86
+ """Return the most recently registered version for *name*."""
87
+ versions = self._schemas.get(name)
88
+ if not versions:
89
+ return None
90
+ return versions[-1]
91
+
92
+ def get_version(self, name: str, version: str) -> SchemaVersion | None:
93
+ """Return a specific version, or *None* if not found."""
94
+ for v in self._schemas.get(name, []):
95
+ if v.version == version:
96
+ return v
97
+ return None
98
+
99
+ def list_schemas(self) -> list[str]:
100
+ """Return all registered schema names."""
101
+ return list(self._schemas.keys())
102
+
103
+ def list_versions(self, name: str) -> list[str]:
104
+ """Return all registered versions for *name* (oldest first)."""
105
+ return [v.version for v in self._schemas.get(name, [])]
106
+
107
+ def validate(
108
+ self, name: str, record: dict[str, Any], version: str | None = None
109
+ ) -> tuple[bool, list[str]]:
110
+ """Validate *record* against a schema.
111
+
112
+ If *version* is ``None`` the latest version is used.
113
+ """
114
+ schema = (
115
+ self.get_version(name, version) if version else self.get_latest(name)
116
+ )
117
+ if schema is None:
118
+ return False, [f"Schema {name!r} (version={version}) not found"]
119
+ return schema.validate_record(record)
120
+
121
+ # -- persistence ---------------------------------------------------------
122
+
123
+ def _save(self) -> None:
124
+ if not self._persist_path:
125
+ return
126
+ data: dict[str, list[dict[str, Any]]] = {}
127
+ for name, versions in self._schemas.items():
128
+ data[name] = [v.to_dict() for v in versions]
129
+ self._persist_path.parent.mkdir(parents=True, exist_ok=True)
130
+ self._persist_path.write_text(json.dumps(data, indent=2, default=str))
131
+
132
+ def _load(self) -> None:
133
+ if not self._persist_path or not self._persist_path.exists():
134
+ return
135
+ raw = json.loads(self._persist_path.read_text())
136
+ for name, versions in raw.items():
137
+ self._schemas[name] = [
138
+ SchemaVersion(
139
+ name=v["name"],
140
+ version=v["version"],
141
+ fields=v["fields"],
142
+ required_fields=v.get("required_fields", []),
143
+ description=v.get("description", ""),
144
+ metadata=v.get("metadata", {}),
145
+ )
146
+ for v in versions
147
+ ]
148
+ logger.info("Loaded %d schemas from %s", len(self._schemas), self._persist_path)
@@ -0,0 +1,22 @@
1
+ """
2
+ dex-lakehouse — Storage backends, data catalog, and partitioning (Epic #39).
3
+
4
+ Provides:
5
+ - ``ParquetStorage`` / ``JsonStorage`` — concrete ``StorageBackend`` impls
6
+ - ``DataCatalog`` — registry of datasets with metadata
7
+ - ``PartitionStrategy`` — time/hash/range-based partitioning helpers
8
+ """
9
+
10
+ from .catalog import CatalogEntry, DataCatalog
11
+ from .partitioning import DatePartitioner, HashPartitioner, PartitionStrategy
12
+ from .storage import JsonStorage, ParquetStorage
13
+
14
+ __all__ = [
15
+ "CatalogEntry",
16
+ "DataCatalog",
17
+ "DatePartitioner",
18
+ "HashPartitioner",
19
+ "JsonStorage",
20
+ "ParquetStorage",
21
+ "PartitionStrategy",
22
+ ]
@@ -0,0 +1,145 @@
1
+ """
2
+ Data catalog — registry of lakehouse datasets with metadata.
3
+
4
+ ``DataCatalog`` keeps track of every dataset written to the lakehouse,
5
+ recording its layer, format, location, schema snapshot, and record counts
6
+ so that downstream consumers can discover available data.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import json
12
+ from dataclasses import asdict, dataclass, field
13
+ from datetime import UTC, datetime
14
+ from pathlib import Path
15
+ from typing import Any
16
+
17
+ from loguru import logger
18
+
19
+
20
+ @dataclass
21
+ class CatalogEntry:
22
+ """Metadata about a single dataset in the lakehouse."""
23
+
24
+ name: str
25
+ layer: str # "bronze", "silver", "gold"
26
+ format: str # "parquet", "json", "delta"
27
+ location: str # file path or table ref
28
+ record_count: int = 0
29
+ schema_fields: list[str] = field(default_factory=list)
30
+ description: str = ""
31
+ owner: str = ""
32
+ tags: list[str] = field(default_factory=list)
33
+ created_at: datetime = field(default_factory=lambda: datetime.now(tz=UTC))
34
+ updated_at: datetime = field(default_factory=lambda: datetime.now(tz=UTC))
35
+ metadata: dict[str, Any] = field(default_factory=dict)
36
+ version: int = 1
37
+
38
+ def to_dict(self) -> dict[str, Any]:
39
+ d = asdict(self)
40
+ d["created_at"] = self.created_at.isoformat()
41
+ d["updated_at"] = self.updated_at.isoformat()
42
+ return d
43
+
44
+
45
+ class DataCatalog:
46
+ """In-process data catalog backed by an optional JSON file.
47
+
48
+ Parameters
49
+ ----------
50
+ persist_path:
51
+ When set, catalog entries are persisted to this JSON file.
52
+ """
53
+
54
+ def __init__(self, persist_path: str | Path | None = None) -> None:
55
+ self._entries: dict[str, CatalogEntry] = {}
56
+ self._persist_path = Path(persist_path) if persist_path else None
57
+ if self._persist_path and self._persist_path.exists():
58
+ self._load()
59
+
60
+ # -- public API ----------------------------------------------------------
61
+
62
+ def register(self, entry: CatalogEntry) -> CatalogEntry:
63
+ """Register or update a dataset entry."""
64
+ existing = self._entries.get(entry.name)
65
+ if existing:
66
+ entry.version = existing.version + 1
67
+ entry.created_at = existing.created_at
68
+ entry.updated_at = datetime.now(tz=UTC)
69
+ self._entries[entry.name] = entry
70
+ logger.info(
71
+ "Catalog registered: %s (layer=%s, v%d)",
72
+ entry.name, entry.layer, entry.version,
73
+ )
74
+ self._save()
75
+ return entry
76
+
77
+ def get(self, name: str) -> CatalogEntry | None:
78
+ """Retrieve an entry by name."""
79
+ return self._entries.get(name)
80
+
81
+ def search(
82
+ self,
83
+ *,
84
+ layer: str | None = None,
85
+ tags: list[str] | None = None,
86
+ owner: str | None = None,
87
+ name_contains: str | None = None,
88
+ ) -> list[CatalogEntry]:
89
+ """Search entries by criteria."""
90
+ results = list(self._entries.values())
91
+ if layer:
92
+ results = [e for e in results if e.layer == layer]
93
+ if tags:
94
+ tag_set = set(tags)
95
+ results = [e for e in results if tag_set.issubset(set(e.tags))]
96
+ if owner:
97
+ results = [e for e in results if e.owner == owner]
98
+ if name_contains:
99
+ results = [e for e in results if name_contains.lower() in e.name.lower()]
100
+ return results
101
+
102
+ def list_all(self) -> list[CatalogEntry]:
103
+ """Return all catalog entries."""
104
+ return list(self._entries.values())
105
+
106
+ def delete(self, name: str) -> bool:
107
+ """Remove an entry by name."""
108
+ if name in self._entries:
109
+ del self._entries[name]
110
+ self._save()
111
+ return True
112
+ return False
113
+
114
+ def summary(self) -> dict[str, Any]:
115
+ """High-level catalog statistics."""
116
+ layers: dict[str, int] = {}
117
+ formats: dict[str, int] = {}
118
+ for e in self._entries.values():
119
+ layers[e.layer] = layers.get(e.layer, 0) + 1
120
+ formats[e.format] = formats.get(e.format, 0) + 1
121
+ return {
122
+ "total_datasets": len(self._entries),
123
+ "by_layer": layers,
124
+ "by_format": formats,
125
+ }
126
+
127
+ # -- persistence ---------------------------------------------------------
128
+
129
+ def _save(self) -> None:
130
+ if not self._persist_path:
131
+ return
132
+ self._persist_path.parent.mkdir(parents=True, exist_ok=True)
133
+ data = [e.to_dict() for e in self._entries.values()]
134
+ self._persist_path.write_text(json.dumps(data, indent=2, default=str))
135
+
136
+ def _load(self) -> None:
137
+ if not self._persist_path or not self._persist_path.exists():
138
+ return
139
+ raw = json.loads(self._persist_path.read_text())
140
+ for item in raw:
141
+ item.pop("created_at", None)
142
+ item.pop("updated_at", None)
143
+ entry = CatalogEntry(**item)
144
+ self._entries[entry.name] = entry
145
+ logger.info("Loaded %d catalog entries from %s", len(self._entries), self._persist_path)
@@ -0,0 +1,99 @@
1
+ """
2
+ Partitioning strategies for the DEX lakehouse.
3
+
4
+ ``PartitionStrategy`` is an ABC whose subclasses generate path segments
5
+ used by storage backends to organise data into predictable directory trees.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import hashlib
11
+ from abc import ABC, abstractmethod
12
+ from datetime import UTC, datetime
13
+ from typing import Any
14
+
15
+
16
+ class PartitionStrategy(ABC):
17
+ """Base class for partitioning strategies."""
18
+
19
+ @abstractmethod
20
+ def partition_key(self, record: dict[str, Any]) -> str:
21
+ """Return the partition path segment for *record*."""
22
+ ...
23
+
24
+ @abstractmethod
25
+ def partition_path(self, record: dict[str, Any], base: str = "") -> str:
26
+ """Return the full relative path (base + partition) for *record*."""
27
+ ...
28
+
29
+
30
+ class DatePartitioner(PartitionStrategy):
31
+ """Partition by a date field using ``year=…/month=…/day=…`` layout.
32
+
33
+ Parameters
34
+ ----------
35
+ date_field:
36
+ Name of the record field containing a date/datetime value.
37
+ granularity:
38
+ ``"day"`` (default), ``"month"``, or ``"year"``.
39
+ """
40
+
41
+ def __init__(self, date_field: str = "created_at", granularity: str = "day") -> None:
42
+ self.date_field = date_field
43
+ if granularity not in ("day", "month", "year"):
44
+ raise ValueError(f"granularity must be day/month/year, got {granularity!r}")
45
+ self.granularity = granularity
46
+
47
+ def partition_key(self, record: dict[str, Any]) -> str:
48
+ dt = self._extract_date(record)
49
+ parts = [f"year={dt.year}"]
50
+ if self.granularity in ("month", "day"):
51
+ parts.append(f"month={dt.month:02d}")
52
+ if self.granularity == "day":
53
+ parts.append(f"day={dt.day:02d}")
54
+ return "/".join(parts)
55
+
56
+ def partition_path(self, record: dict[str, Any], base: str = "") -> str:
57
+ key = self.partition_key(record)
58
+ return f"{base}/{key}" if base else key
59
+
60
+ def _extract_date(self, record: dict[str, Any]) -> datetime:
61
+ value = record.get(self.date_field)
62
+ if isinstance(value, datetime):
63
+ return value
64
+ if isinstance(value, str):
65
+ # Try ISO format
66
+ try:
67
+ return datetime.fromisoformat(value)
68
+ except ValueError:
69
+ pass
70
+ # Fallback to now
71
+ return datetime.now(tz=UTC)
72
+
73
+
74
+ class HashPartitioner(PartitionStrategy):
75
+ """Partition by a hash of one or more fields, distributing across *n_buckets*.
76
+
77
+ Parameters
78
+ ----------
79
+ fields:
80
+ Record fields whose values are hashed.
81
+ n_buckets:
82
+ Number of hash buckets (directories).
83
+ """
84
+
85
+ def __init__(self, fields: list[str], n_buckets: int = 16) -> None:
86
+ if not fields:
87
+ raise ValueError("At least one field is required for hash partitioning")
88
+ self.fields = fields
89
+ self.n_buckets = max(1, n_buckets)
90
+
91
+ def partition_key(self, record: dict[str, Any]) -> str:
92
+ content = "|".join(str(record.get(f, "")) for f in self.fields)
93
+ digest = hashlib.md5(content.encode()).hexdigest() # noqa: S324
94
+ bucket = int(digest, 16) % self.n_buckets
95
+ return f"bucket={bucket:04d}"
96
+
97
+ def partition_path(self, record: dict[str, Any], base: str = "") -> str:
98
+ key = self.partition_key(record)
99
+ return f"{base}/{key}" if base else key
@@ -0,0 +1,177 @@
1
+ """
2
+ Concrete storage backends for the DEX lakehouse.
3
+
4
+ Both ``ParquetStorage`` and ``JsonStorage`` implement the
5
+ ``StorageBackend`` ABC from ``dataenginex.core.medallion_architecture`` so
6
+ they can be used interchangeably by the ``DualStorage`` layer.
7
+
8
+ ``ParquetStorage`` delegates to *pyarrow* when available; otherwise it
9
+ falls back to ``JsonStorage`` with a logged warning.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import json
15
+ from pathlib import Path
16
+ from typing import Any
17
+
18
+ from loguru import logger
19
+
20
+ from dataenginex.core.medallion_architecture import StorageBackend, StorageFormat
21
+
22
+ # Try importing pyarrow — optional heavyweight dependency
23
+ try:
24
+ import pyarrow as pa # type: ignore[import-not-found]
25
+ import pyarrow.parquet as pq # type: ignore[import-not-found]
26
+
27
+ _HAS_PYARROW = True
28
+ except ImportError:
29
+ _HAS_PYARROW = False
30
+
31
+
32
+ # ---------------------------------------------------------------------------
33
+ # JSON storage (always available)
34
+ # ---------------------------------------------------------------------------
35
+
36
+ class JsonStorage(StorageBackend):
37
+ """Simple JSON-file storage for development and testing.
38
+
39
+ Each ``write`` call serialises *data* (list of dicts) as a JSON array.
40
+ """
41
+
42
+ def __init__(self, base_path: str = "data") -> None:
43
+ self.base_path = Path(base_path)
44
+ self.base_path.mkdir(parents=True, exist_ok=True)
45
+ logger.info("JsonStorage initialised at %s", self.base_path)
46
+
47
+ def write(
48
+ self,
49
+ data: Any,
50
+ path: str,
51
+ format: StorageFormat = StorageFormat.PARQUET,
52
+ ) -> bool:
53
+ try:
54
+ full = self.base_path / f"{path}.json"
55
+ full.parent.mkdir(parents=True, exist_ok=True)
56
+ records = self._normalise(data)
57
+ full.write_text(json.dumps(records, indent=2, default=str))
58
+ logger.info("Wrote %d records to %s", len(records), full)
59
+ return True
60
+ except Exception as exc:
61
+ logger.error("JsonStorage write failed: %s", exc)
62
+ return False
63
+
64
+ def read(self, path: str, format: StorageFormat = StorageFormat.PARQUET) -> Any:
65
+ try:
66
+ full = self.base_path / f"{path}.json"
67
+ if not full.exists():
68
+ logger.warning("File not found: %s", full)
69
+ return None
70
+ return json.loads(full.read_text())
71
+ except Exception as exc:
72
+ logger.error("JsonStorage read failed: %s", exc)
73
+ return None
74
+
75
+ def delete(self, path: str) -> bool:
76
+ try:
77
+ full = self.base_path / f"{path}.json"
78
+ if full.exists():
79
+ full.unlink()
80
+ logger.info("Deleted %s", full)
81
+ return True
82
+ except Exception as exc:
83
+ logger.error("JsonStorage delete failed: %s", exc)
84
+ return False
85
+
86
+ @staticmethod
87
+ def _normalise(data: Any) -> list[dict[str, Any]]:
88
+ if isinstance(data, list):
89
+ return data
90
+ if isinstance(data, dict):
91
+ return [data]
92
+ return [{"value": data}]
93
+
94
+
95
+ # ---------------------------------------------------------------------------
96
+ # Parquet storage (requires pyarrow)
97
+ # ---------------------------------------------------------------------------
98
+
99
+ class ParquetStorage(StorageBackend):
100
+ """Parquet file storage backed by *pyarrow*.
101
+
102
+ Falls back to ``JsonStorage`` when *pyarrow* is not installed.
103
+ """
104
+
105
+ def __init__(self, base_path: str = "data", compression: str = "snappy") -> None:
106
+ self.base_path = Path(base_path)
107
+ self.base_path.mkdir(parents=True, exist_ok=True)
108
+ self.compression = compression
109
+
110
+ if _HAS_PYARROW:
111
+ logger.info("ParquetStorage initialised at %s (pyarrow available)", self.base_path)
112
+ else:
113
+ logger.warning(
114
+ "pyarrow not installed — ParquetStorage will use JSON fallback"
115
+ )
116
+ self._fallback = JsonStorage(str(self.base_path))
117
+
118
+ def write(
119
+ self,
120
+ data: Any,
121
+ path: str,
122
+ format: StorageFormat = StorageFormat.PARQUET,
123
+ ) -> bool:
124
+ if not _HAS_PYARROW:
125
+ return self._fallback.write(data, path, format)
126
+
127
+ try:
128
+ full = self.base_path / f"{path}.parquet"
129
+ full.parent.mkdir(parents=True, exist_ok=True)
130
+ records = self._to_records(data)
131
+ if not records:
132
+ logger.warning("No records to write to %s", full)
133
+ return False
134
+ table = pa.Table.from_pylist(records)
135
+ pq.write_table(table, str(full), compression=self.compression)
136
+ logger.info("Wrote %d records to %s", len(records), full)
137
+ return True
138
+ except Exception as exc:
139
+ logger.error("ParquetStorage write failed: %s", exc)
140
+ return False
141
+
142
+ def read(self, path: str, format: StorageFormat = StorageFormat.PARQUET) -> Any:
143
+ if not _HAS_PYARROW:
144
+ return self._fallback.read(path, format)
145
+
146
+ try:
147
+ full = self.base_path / f"{path}.parquet"
148
+ if not full.exists():
149
+ logger.warning("Parquet file not found: %s", full)
150
+ return None
151
+ table = pq.read_table(str(full))
152
+ return table.to_pylist()
153
+ except Exception as exc:
154
+ logger.error("ParquetStorage read failed: %s", exc)
155
+ return None
156
+
157
+ def delete(self, path: str) -> bool:
158
+ if not _HAS_PYARROW:
159
+ return self._fallback.delete(path)
160
+
161
+ try:
162
+ full = self.base_path / f"{path}.parquet"
163
+ if full.exists():
164
+ full.unlink()
165
+ logger.info("Deleted %s", full)
166
+ return True
167
+ except Exception as exc:
168
+ logger.error("ParquetStorage delete failed: %s", exc)
169
+ return False
170
+
171
+ @staticmethod
172
+ def _to_records(data: Any) -> list[dict[str, Any]]:
173
+ if isinstance(data, list):
174
+ return data
175
+ if isinstance(data, dict):
176
+ return [data]
177
+ return []
@@ -0,0 +1,19 @@
1
+ """
2
+ Middleware - logging, metrics, tracing, and request handling.
3
+ """
4
+
5
+ from .logging_config import APP_VERSION, configure_logging # noqa: F401
6
+ from .metrics import get_metrics # noqa: F401
7
+ from .metrics_middleware import PrometheusMetricsMiddleware # noqa: F401
8
+ from .request_logging import RequestLoggingMiddleware # noqa: F401
9
+ from .tracing import configure_tracing, instrument_fastapi # noqa: F401
10
+
11
+ __all__ = [
12
+ "configure_logging",
13
+ "APP_VERSION",
14
+ "get_metrics",
15
+ "PrometheusMetricsMiddleware",
16
+ "RequestLoggingMiddleware",
17
+ "configure_tracing",
18
+ "instrument_fastapi",
19
+ ]