etlplus 0.12.4__py3-none-any.whl → 0.12.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,141 @@
1
+ """
2
+ :mod:`etlplus.file._imports` module.
3
+
4
+ Shared helpers for optional dependency imports.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from importlib import import_module
10
+ from typing import Any
11
+
12
+ # SECTION: INTERNAL CONSTANTS =============================================== #
13
+
14
+
15
+ _MODULE_CACHE: dict[str, Any] = {}
16
+
17
+
18
+ # SECTION: INTERNAL FUNCTIONS =============================================== #
19
+
20
+
21
+ def _error_message(
22
+ module_name: str,
23
+ format_name: str,
24
+ ) -> str:
25
+ """
26
+ Build an import error message for an optional dependency.
27
+
28
+ Parameters
29
+ ----------
30
+ module_name : str
31
+ Module name to look up.
32
+ format_name : str
33
+ Human-readable format name for templated messages.
34
+
35
+ Returns
36
+ -------
37
+ str
38
+ Formatted error message.
39
+ """
40
+ return (
41
+ f'{format_name} support requires '
42
+ f'optional dependency "{module_name}".\n'
43
+ f'Install with: pip install {module_name}'
44
+ )
45
+
46
+
47
+ # SECTION: FUNCTIONS ======================================================== #
48
+
49
+
50
+ def get_optional_module(
51
+ module_name: str,
52
+ *,
53
+ error_message: str,
54
+ ) -> Any:
55
+ """
56
+ Return an optional dependency module, caching on first import.
57
+
58
+ Parameters
59
+ ----------
60
+ module_name : str
61
+ Name of the module to import.
62
+ error_message : str
63
+ Error message to surface when the module is missing.
64
+
65
+ Returns
66
+ -------
67
+ Any
68
+ The imported module.
69
+
70
+ Raises
71
+ ------
72
+ ImportError
73
+ If the optional dependency is missing.
74
+ """
75
+ cached = _MODULE_CACHE.get(module_name)
76
+ if cached is not None: # pragma: no cover - tiny branch
77
+ return cached
78
+ try:
79
+ module = import_module(module_name)
80
+ except ImportError as e: # pragma: no cover
81
+ raise ImportError(error_message) from e
82
+ _MODULE_CACHE[module_name] = module
83
+ return module
84
+
85
+
86
+ def get_fastavro() -> Any:
87
+ """
88
+ Return the fastavro module, importing it on first use.
89
+
90
+ Raises an informative ImportError if the optional dependency is missing.
91
+
92
+ Notes
93
+ -----
94
+ Prefer :func:`get_optional_module` for new call sites.
95
+ """
96
+ return get_optional_module(
97
+ 'fastavro',
98
+ error_message=_error_message('fastavro', format_name='AVRO'),
99
+ )
100
+
101
+
102
+ def get_pandas(
103
+ format_name: str,
104
+ ) -> Any:
105
+ """
106
+ Return the pandas module, importing it on first use.
107
+
108
+ Parameters
109
+ ----------
110
+ format_name : str
111
+ Human-readable format name for error messages.
112
+
113
+ Returns
114
+ -------
115
+ Any
116
+ The pandas module.
117
+
118
+ Notes
119
+ -----
120
+ Prefer :func:`get_optional_module` for new call sites.
121
+ """
122
+ return get_optional_module(
123
+ 'pandas',
124
+ error_message=_error_message('pandas', format_name=format_name),
125
+ )
126
+
127
+
128
+ def get_yaml() -> Any:
129
+ """
130
+ Return the PyYAML module, importing it on first use.
131
+
132
+ Raises an informative ImportError if the optional dependency is missing.
133
+
134
+ Notes
135
+ -----
136
+ Prefer :func:`get_optional_module` for new call sites.
137
+ """
138
+ return get_optional_module(
139
+ 'yaml',
140
+ error_message=_error_message('PyYAML', format_name='YAML'),
141
+ )
etlplus/file/_io.py CHANGED
@@ -107,6 +107,7 @@ def write_delimited(path: Path, data: JSONData, *, delimiter: str) -> int:
107
107
  return 0
108
108
 
109
109
  fieldnames = sorted({key for row in rows for key in row})
110
+ path.parent.mkdir(parents=True, exist_ok=True)
110
111
  with path.open('w', encoding='utf-8', newline='') as handle:
111
112
  writer = csv.DictWriter(
112
113
  handle,
etlplus/file/avro.py CHANGED
@@ -10,6 +10,8 @@ from pathlib import Path
10
10
  from typing import Any
11
11
  from typing import cast
12
12
 
13
+ from etlplus.file._imports import get_fastavro
14
+
13
15
  from ..types import JSONData
14
16
  from ..types import JSONDict
15
17
  from ..types import JSONList
@@ -27,9 +29,6 @@ __all__ = [
27
29
  # SECTION: INTERNAL CONSTANTS =============================================== #
28
30
 
29
31
 
30
- _FASTAVRO_CACHE: dict[str, Any] = {}
31
-
32
-
33
32
  _PRIMITIVE_TYPES: tuple[type, ...] = (
34
33
  bool,
35
34
  int,
@@ -43,27 +42,6 @@ _PRIMITIVE_TYPES: tuple[type, ...] = (
43
42
  # SECTION: INTERNAL FUNCTIONS =============================================== #
44
43
 
45
44
 
46
- def _get_fastavro() -> Any:
47
- """
48
- Return the fastavro module, importing it on first use.
49
-
50
- Raises an informative ImportError if the optional dependency is missing.
51
- """
52
- mod = _FASTAVRO_CACHE.get('mod')
53
- if mod is not None: # pragma: no cover - tiny branch
54
- return mod
55
- try:
56
- _fastavro = __import__('fastavro') # type: ignore[assignment]
57
- except ImportError as e: # pragma: no cover
58
- raise ImportError(
59
- 'AVRO support requires optional dependency "fastavro".\n'
60
- 'Install with: pip install fastavro',
61
- ) from e
62
- _FASTAVRO_CACHE['mod'] = _fastavro
63
-
64
- return _fastavro
65
-
66
-
67
45
  def _infer_schema(records: JSONList) -> dict[str, Any]:
68
46
  """
69
47
  Infer a basic Avro schema from record payloads.
@@ -146,7 +124,7 @@ def read(
146
124
  JSONList
147
125
  The list of dictionaries read from the AVRO file.
148
126
  """
149
- fastavro = _get_fastavro()
127
+ fastavro = get_fastavro()
150
128
  with path.open('rb') as handle:
151
129
  reader = fastavro.reader(handle)
152
130
  return [cast(JSONDict, record) for record in reader]
@@ -175,7 +153,7 @@ def write(
175
153
  if not records:
176
154
  return 0
177
155
 
178
- fastavro = _get_fastavro()
156
+ fastavro = get_fastavro()
179
157
  schema = _infer_schema(records)
180
158
  parsed_schema = fastavro.parse_schema(schema)
181
159
 
etlplus/file/core.py CHANGED
@@ -7,25 +7,15 @@ files.
7
7
 
8
8
  from __future__ import annotations
9
9
 
10
+ import importlib
11
+ import inspect
10
12
  from dataclasses import dataclass
13
+ from functools import cache
11
14
  from pathlib import Path
15
+ from types import ModuleType
12
16
 
13
17
  from ..types import JSONData
14
- from . import avro
15
- from . import csv
16
- from . import feather
17
- from . import gz
18
- from . import json
19
- from . import ndjson
20
- from . import orc
21
- from . import parquet
22
- from . import tsv
23
- from . import txt
24
- from . import xls
25
- from . import xlsx
26
18
  from . import xml
27
- from . import yaml
28
- from . import zip as zip_
29
19
  from .enums import FileFormat
30
20
  from .enums import infer_file_format_and_compression
31
21
 
@@ -35,6 +25,53 @@ from .enums import infer_file_format_and_compression
35
25
  __all__ = ['File']
36
26
 
37
27
 
28
+ # SECTION: INTERNAL FUNCTIONS =============================================== #
29
+
30
+
31
+ def _accepts_root_tag(handler: object) -> bool:
32
+ """
33
+ Return True when ``handler`` supports a ``root_tag`` argument.
34
+
35
+ Parameters
36
+ ----------
37
+ handler : object
38
+ Callable to inspect.
39
+
40
+ Returns
41
+ -------
42
+ bool
43
+ True if ``root_tag`` is accepted by the handler.
44
+ """
45
+ if not callable(handler):
46
+ return False
47
+ try:
48
+ signature = inspect.signature(handler)
49
+ except (TypeError, ValueError):
50
+ return False
51
+ for param in signature.parameters.values():
52
+ if param.kind is param.VAR_KEYWORD:
53
+ return True
54
+ return 'root_tag' in signature.parameters
55
+
56
+
57
+ @cache
58
+ def _module_for_format(file_format: FileFormat) -> ModuleType:
59
+ """
60
+ Import and return the module for ``file_format``.
61
+
62
+ Parameters
63
+ ----------
64
+ file_format : FileFormat
65
+ File format enum value.
66
+
67
+ Returns
68
+ -------
69
+ ModuleType
70
+ The module implementing IO for the format.
71
+ """
72
+ return importlib.import_module(f'{__package__}.{file_format.value}')
73
+
74
+
38
75
  # SECTION: CLASSES ========================================================== #
39
76
 
40
77
 
@@ -174,6 +211,53 @@ class File:
174
211
  # Leave as None; _ensure_format() will raise on use if needed.
175
212
  return None
176
213
 
214
+ def _resolve_handler(self, name: str) -> object:
215
+ """
216
+ Resolve a handler from the module for the active file format.
217
+
218
+ Parameters
219
+ ----------
220
+ name : str
221
+ Attribute name to resolve (``'read'`` or ``'write'``).
222
+
223
+ Returns
224
+ -------
225
+ object
226
+ Callable handler exported by the module.
227
+
228
+ Raises
229
+ ------
230
+ ValueError
231
+ If the resolved file format is unsupported.
232
+ """
233
+ module = self._resolve_module()
234
+ try:
235
+ return getattr(module, name)
236
+ except AttributeError as e:
237
+ raise ValueError(
238
+ f'Module {module.__name__} does not implement {name}()',
239
+ ) from e
240
+
241
+ def _resolve_module(self) -> ModuleType:
242
+ """
243
+ Resolve the IO module for the active file format.
244
+
245
+ Returns
246
+ -------
247
+ ModuleType
248
+ The module that implements read/write for the format.
249
+
250
+ Raises
251
+ ------
252
+ ValueError
253
+ If the resolved file format is unsupported.
254
+ """
255
+ fmt = self._ensure_format()
256
+ try:
257
+ return _module_for_format(fmt)
258
+ except ModuleNotFoundError as e:
259
+ raise ValueError(f'Unsupported format: {fmt}') from e
260
+
177
261
  # -- Instance Methods -- #
178
262
 
179
263
  def read(self) -> JSONData:
@@ -187,43 +271,18 @@ class File:
187
271
 
188
272
  Raises
189
273
  ------
190
- ValueError
191
- If the resolved file format is unsupported.
274
+ TypeError
275
+ If the resolved 'read' handler is not callable.
192
276
  """
193
277
  self._assert_exists()
194
- fmt = self._ensure_format()
195
- match fmt:
196
- case FileFormat.AVRO:
197
- return avro.read(self.path)
198
- case FileFormat.CSV:
199
- return csv.read(self.path)
200
- case FileFormat.FEATHER:
201
- return feather.read(self.path)
202
- case FileFormat.GZ:
203
- return gz.read(self.path)
204
- case FileFormat.JSON:
205
- return json.read(self.path)
206
- case FileFormat.NDJSON:
207
- return ndjson.read(self.path)
208
- case FileFormat.ORC:
209
- return orc.read(self.path)
210
- case FileFormat.PARQUET:
211
- return parquet.read(self.path)
212
- case FileFormat.TSV:
213
- return tsv.read(self.path)
214
- case FileFormat.TXT:
215
- return txt.read(self.path)
216
- case FileFormat.XLS:
217
- return xls.read(self.path)
218
- case FileFormat.XLSX:
219
- return xlsx.read(self.path)
220
- case FileFormat.XML:
221
- return xml.read(self.path)
222
- case FileFormat.YAML:
223
- return yaml.read(self.path)
224
- case FileFormat.ZIP:
225
- return zip_.read(self.path)
226
- raise ValueError(f'Unsupported format: {fmt}')
278
+ reader = self._resolve_handler('read')
279
+ if callable(reader):
280
+ return reader(self.path)
281
+ else:
282
+ raise TypeError(
283
+ f"'read' handler for format {self.file_format} "
284
+ 'is not callable',
285
+ )
227
286
 
228
287
  def write(
229
288
  self,
@@ -249,39 +308,15 @@ class File:
249
308
 
250
309
  Raises
251
310
  ------
252
- ValueError
253
- If the resolved file format is unsupported.
311
+ TypeError
312
+ If the resolved 'write' handler is not callable.
254
313
  """
255
- fmt = self._ensure_format()
256
- match fmt:
257
- case FileFormat.AVRO:
258
- return avro.write(self.path, data)
259
- case FileFormat.CSV:
260
- return csv.write(self.path, data)
261
- case FileFormat.FEATHER:
262
- return feather.write(self.path, data)
263
- case FileFormat.GZ:
264
- return gz.write(self.path, data)
265
- case FileFormat.JSON:
266
- return json.write(self.path, data)
267
- case FileFormat.NDJSON:
268
- return ndjson.write(self.path, data)
269
- case FileFormat.ORC:
270
- return orc.write(self.path, data)
271
- case FileFormat.PARQUET:
272
- return parquet.write(self.path, data)
273
- case FileFormat.TSV:
274
- return tsv.write(self.path, data)
275
- case FileFormat.TXT:
276
- return txt.write(self.path, data)
277
- case FileFormat.XLS:
278
- return xls.write(self.path, data)
279
- case FileFormat.XLSX:
280
- return xlsx.write(self.path, data)
281
- case FileFormat.XML:
282
- return xml.write(self.path, data, root_tag=root_tag)
283
- case FileFormat.YAML:
284
- return yaml.write(self.path, data)
285
- case FileFormat.ZIP:
286
- return zip_.write(self.path, data)
287
- raise ValueError(f'Unsupported format: {fmt}')
314
+ writer = self._resolve_handler('write')
315
+ if not callable(writer):
316
+ raise TypeError(
317
+ f"'write' handler for format {self.file_format} "
318
+ 'is not callable',
319
+ )
320
+ if _accepts_root_tag(writer):
321
+ return writer(self.path, data, root_tag=root_tag)
322
+ return writer(self.path, data)
etlplus/file/dat.py ADDED
@@ -0,0 +1,66 @@
1
+ """
2
+ :mod:`etlplus.file.dat` module.
3
+
4
+ Helpers for reading/writing DAT (data) files.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from pathlib import Path
10
+
11
+ from ..types import JSONData
12
+ from ..types import JSONList
13
+ from . import stub
14
+
15
+ # SECTION: EXPORTS ========================================================== #
16
+
17
+
18
+ __all__ = [
19
+ 'read',
20
+ 'write',
21
+ ]
22
+
23
+
24
+ # SECTION: FUNCTIONS ======================================================== #
25
+
26
+
27
+ def read(
28
+ path: Path,
29
+ ) -> JSONList:
30
+ """
31
+ Read DAT content from ``path``.
32
+
33
+ Parameters
34
+ ----------
35
+ path : Path
36
+ Path to the DAT file on disk.
37
+
38
+ Returns
39
+ -------
40
+ JSONList
41
+ The list of dictionaries read from the DAT file.
42
+ """
43
+ return stub.read(path, format_name='DAT')
44
+
45
+
46
+ def write(
47
+ path: Path,
48
+ data: JSONData,
49
+ ) -> int:
50
+ """
51
+ Write ``data`` to DAT file at ``path`` and return record count.
52
+
53
+ Parameters
54
+ ----------
55
+ path : Path
56
+ Path to the DAT file on disk.
57
+ data : JSONData
58
+ Data to write as DAT file. Should be a list of dictionaries or a
59
+ single dictionary.
60
+
61
+ Returns
62
+ -------
63
+ int
64
+ The number of rows written to the DAT file.
65
+ """
66
+ return stub.write(path, data, format_name='DAT')
etlplus/file/enums.py CHANGED
@@ -61,21 +61,119 @@ class FileFormat(CoercibleStrEnum):
61
61
 
62
62
  # -- Constants -- #
63
63
 
64
- AVRO = 'avro'
65
- CSV = 'csv'
66
- FEATHER = 'feather'
67
- GZ = 'gz'
68
- JSON = 'json'
69
- NDJSON = 'ndjson'
70
- ORC = 'orc'
71
- PARQUET = 'parquet'
72
- TSV = 'tsv'
73
- TXT = 'txt'
74
- XLS = 'xls'
75
- XLSX = 'xlsx'
76
- ZIP = 'zip'
77
- XML = 'xml'
78
- YAML = 'yaml'
64
+ # Stubbed / placeholder
65
+ STUB = 'stub' # Placeholder format for tests & future connectors
66
+
67
+ # Tabular & delimited text
68
+ CSV = 'csv' # Comma-Separated Values
69
+ FWF = 'fwf' # Fixed-Width Formatted
70
+ DAT = 'dat' # Generic data file, often delimited or fixed-width
71
+ PSV = 'psv' # Pipe-Separated Values
72
+ TAB = 'tab' # Often synonymous with TSV
73
+ TSV = 'tsv' # Tab-Separated Values
74
+ TXT = 'txt' # Plain text, often delimited or fixed-width
75
+
76
+ # Semi-structured text
77
+ CFG = 'cfg' # Config-style key-value pairs
78
+ CONF = 'conf' # Config-style key-value pairs
79
+ INI = 'ini' # INI-style key-value pairs
80
+ JSON = 'json' # JavaScript Object Notation
81
+ NDJSON = 'ndjson' # Newline-Delimited JSON
82
+ PROPS = 'properties' # Java-style key-value pairs
83
+ TOML = 'toml' # Tom's Obvious Minimal Language
84
+ XML = 'xml' # Extensible Markup Language
85
+ YAML = 'yaml' # YAML Ain't Markup Language
86
+
87
+ # Columnar / analytics-friendly
88
+ ARROW = 'arrow' # Apache Arrow IPC
89
+ FEATHER = 'feather' # Apache Arrow Feather
90
+ ORC = 'orc' # Optimized Row Columnar; common in Hadoop
91
+ PARQUET = 'parquet' # Apache Parquet; common in Big Data
92
+
93
+ # Binary serialization & interchange
94
+ AVRO = 'avro' # Apache Avro
95
+ BSON = 'bson' # Binary JSON; common with MongoDB exports/dumps
96
+ CBOR = 'cbor' # Concise Binary Object Representation
97
+ ION = 'ion' # Amazon Ion
98
+ MSGPACK = 'msgpack' # MessagePack
99
+ PB = 'pb' # Protocol Buffers (Google Protobuf)
100
+ PBF = 'pbf' # Protocolbuffer Binary Format; often for GIS data
101
+ PROTO = 'proto' # Protocol Buffers schema; often in .pb / .bin
102
+
103
+ # Databases & embedded storage
104
+ ACCDB = 'accdb' # Microsoft Access database file (newer format)
105
+ DUCKDB = 'duckdb' # DuckDB database file
106
+ MDB = 'mdb' # Microsoft Access database file (older format)
107
+ SQLITE = 'sqlite' # SQLite database file
108
+
109
+ # Spreadsheets
110
+ NUMBERS = 'numbers' # Apple Numbers spreadsheet
111
+ ODS = 'ods' # OpenDocument Spreadsheet
112
+ WKS = 'wks' # Lotus 1-2-3 spreadsheet
113
+ XLS = 'xls' # Microsoft Excel (BIFF); read-only
114
+ XLSM = 'xlsm' # Microsoft Excel Macro-Enabled (Open XML)
115
+ XLSX = 'xlsx' # Microsoft Excel (Open XML)
116
+
117
+ # Statistical / scientific / numeric computing
118
+ DTA = 'dta' # Stata data file
119
+ H5 = 'h5' # Hierarchical Data Format
120
+ MAT = 'mat' # MATLAB data file
121
+ NC = 'nc' # NetCDF data file
122
+ RDA = 'rda' # RData workspace/object bundle
123
+ RDS = 'rds' # R data file
124
+ SAS7BDAT = 'sas7bdat' # SAS data file
125
+ SAV = 'sav' # SPSS data file
126
+ SYLK = 'sylk' # Symbolic Link (SYmbolic LinK)
127
+ XPT = 'xpt' # SAS Transport file
128
+ ZSAV = 'zsav' # Compressed SPSS data file
129
+
130
+ # Time series and financial data
131
+ CAMT = 'camt' # ISO 20022 Cash Management messages
132
+ FXT = 'fxt' # Forex time series data
133
+ MT940 = 'mt940' # SWIFT MT940 bank statement format
134
+ MT942 = 'mt942' # SWIFT MT942 interim transaction report format
135
+ OFX = 'ofx' # Open Financial Exchange
136
+ QFX = 'qfx' # Quicken Financial Exchange
137
+ QIF = 'qif' # Quicken Interchange Format
138
+ QQQ = 'qqq' # QuantQuote historical data
139
+ TRR = 'trr' # Trade and transaction reports
140
+ TSDB = 'tsdb' # Time series database export
141
+
142
+ # Geospatial data
143
+ GEOJSON = 'geojson' # GeoJSON
144
+ GEOTIFF = 'geotiff' # GeoTIFF
145
+ GML = 'gml' # Geography Markup Language
146
+ GPKG = 'gpkg' # GeoPackage
147
+ GPX = 'gpx' # GPS Exchange Format
148
+ KML = 'kml' # Keyhole Markup Language
149
+ LAS = 'las' # LiDAR Aerial Survey
150
+ LAZ = 'laz' # LASzip (compressed LAS)
151
+ OSM = 'osm' # OpenStreetMap XML Data
152
+ SHP = 'shp' # ESRI Shapefile
153
+ WKB = 'wkb' # Well-Known Binary
154
+ WKT = 'wkt' # Well-Known Text
155
+
156
+ # Logs & event streams
157
+ EVT = 'evt' # Windows Event Trace Log (pre-Vista)
158
+ EVTX = 'evtx' # Windows Event Trace Log (Vista and later)
159
+ LOG = 'log' # Generic log file
160
+ PCAP = 'pcap' # Packet Capture file
161
+ PCAPPNG = 'pcapng' # Packet Capture Next Generation file
162
+ SLOG = 'slog' # Structured log file
163
+ W3CLOG = 'w3clog' # W3C Extended Log File Format
164
+
165
+ # “Data archives” & packaging
166
+ _7Z = '7z' # 7-Zip archive
167
+ GZ = 'gz' # Gzip-compressed file
168
+ JAR = 'jar' # Java archive
169
+ RAR = 'rar' # RAR archive
170
+ SIT = 'sit' # StuffIt archive
171
+ SITX = 'sitx' # StuffIt X archive
172
+ TAR = 'tar' # TAR archive
173
+ TGZ = 'tgz' # Gzip-compressed TAR archive
174
+ ZIP = 'zip' # ZIP archive
175
+
176
+ # Domain-specific & less common
79
177
 
80
178
  # -- Class Methods -- #
81
179
 
@@ -104,6 +202,7 @@ class FileFormat(CoercibleStrEnum):
104
202
  '.orc': 'orc',
105
203
  '.parquet': 'parquet',
106
204
  '.pq': 'parquet',
205
+ '.stub': 'stub',
107
206
  '.tsv': 'tsv',
108
207
  '.txt': 'txt',
109
208
  '.xls': 'xls',
etlplus/file/feather.py CHANGED
@@ -11,8 +11,8 @@ from typing import cast
11
11
 
12
12
  from ..types import JSONData
13
13
  from ..types import JSONList
14
+ from ._imports import get_pandas
14
15
  from ._io import normalize_records
15
- from ._pandas import get_pandas
16
16
 
17
17
  # SECTION: EXPORTS ========================================================== #
18
18