etlplus 0.12.1__py3-none-any.whl → 0.12.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- etlplus/file/_imports.py +141 -0
- etlplus/file/_io.py +121 -0
- etlplus/file/accdb.py +78 -0
- etlplus/file/arrow.py +78 -0
- etlplus/file/avro.py +46 -68
- etlplus/file/bson.py +77 -0
- etlplus/file/cbor.py +78 -0
- etlplus/file/cfg.py +79 -0
- etlplus/file/conf.py +80 -0
- etlplus/file/core.py +119 -84
- etlplus/file/csv.py +17 -29
- etlplus/file/dat.py +78 -0
- etlplus/file/duckdb.py +78 -0
- etlplus/file/enums.py +114 -15
- etlplus/file/feather.py +18 -51
- etlplus/file/fwf.py +77 -0
- etlplus/file/ini.py +79 -0
- etlplus/file/ion.py +78 -0
- etlplus/file/json.py +13 -1
- etlplus/file/log.py +78 -0
- etlplus/file/mdb.py +78 -0
- etlplus/file/msgpack.py +78 -0
- etlplus/file/ndjson.py +14 -15
- etlplus/file/orc.py +18 -49
- etlplus/file/parquet.py +18 -51
- etlplus/file/pb.py +78 -0
- etlplus/file/pbf.py +77 -0
- etlplus/file/properties.py +78 -0
- etlplus/file/proto.py +77 -0
- etlplus/file/psv.py +79 -0
- etlplus/file/sqlite.py +78 -0
- etlplus/file/stub.py +84 -0
- etlplus/file/tab.py +81 -0
- etlplus/file/toml.py +78 -0
- etlplus/file/tsv.py +18 -29
- etlplus/file/txt.py +13 -10
- etlplus/file/xls.py +4 -48
- etlplus/file/xlsx.py +5 -48
- etlplus/file/xml.py +12 -1
- etlplus/file/yaml.py +15 -44
- {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/METADATA +119 -1
- {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/RECORD +46 -21
- {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/WHEEL +0 -0
- {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/entry_points.txt +0 -0
- {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/licenses/LICENSE +0 -0
- {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/top_level.txt +0 -0
etlplus/file/cbor.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.cbor` module.
|
|
3
|
+
|
|
4
|
+
Helpers for reading/writing Concise Binary Object Representation (CBOR) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A CBOR file is a binary data format designed for small code size and message
|
|
9
|
+
size, suitable for constrained environments.
|
|
10
|
+
- Common cases:
|
|
11
|
+
- IoT data interchange.
|
|
12
|
+
- Efficient data serialization.
|
|
13
|
+
- Storage of structured data in a compact binary form.
|
|
14
|
+
- Rule of thumb:
|
|
15
|
+
- If the file follows the CBOR specification, use this module for reading
|
|
16
|
+
and writing.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
from ..types import JSONData
|
|
24
|
+
from ..types import JSONList
|
|
25
|
+
from . import stub
|
|
26
|
+
|
|
27
|
+
# SECTION: EXPORTS ========================================================== #
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
__all__ = [
|
|
31
|
+
'read',
|
|
32
|
+
'write',
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def read(
|
|
40
|
+
path: Path,
|
|
41
|
+
) -> JSONList:
|
|
42
|
+
"""
|
|
43
|
+
Read CBOR content from ``path``.
|
|
44
|
+
|
|
45
|
+
Parameters
|
|
46
|
+
----------
|
|
47
|
+
path : Path
|
|
48
|
+
Path to the CBOR file on disk.
|
|
49
|
+
|
|
50
|
+
Returns
|
|
51
|
+
-------
|
|
52
|
+
JSONList
|
|
53
|
+
The list of dictionaries read from the CBOR file.
|
|
54
|
+
"""
|
|
55
|
+
return stub.read(path, format_name='CBOR')
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def write(
|
|
59
|
+
path: Path,
|
|
60
|
+
data: JSONData,
|
|
61
|
+
) -> int:
|
|
62
|
+
"""
|
|
63
|
+
Write ``data`` to CBOR at ``path`` and return record count.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
path : Path
|
|
68
|
+
Path to the CBOR file on disk.
|
|
69
|
+
data : JSONData
|
|
70
|
+
Data to write as CBOR. Should be a list of dictionaries or a
|
|
71
|
+
single dictionary.
|
|
72
|
+
|
|
73
|
+
Returns
|
|
74
|
+
-------
|
|
75
|
+
int
|
|
76
|
+
The number of rows written to the CBOR file.
|
|
77
|
+
"""
|
|
78
|
+
return stub.write(path, data, format_name='CBOR')
|
etlplus/file/cfg.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.cfg` module.
|
|
3
|
+
|
|
4
|
+
Helpers for reading/writing config (CFG) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A “CFG-formatted” file is a configuration file that may use various
|
|
9
|
+
syntaxes, such as INI, YAML, or custom formats.
|
|
10
|
+
- Common cases:
|
|
11
|
+
- INI-style key-value pairs with sections (such as in Python ecosystems,
|
|
12
|
+
using ``configparser``).
|
|
13
|
+
- YAML-like structures with indentation.
|
|
14
|
+
- Custom formats specific to certain applications.
|
|
15
|
+
- Rule of thumb:
|
|
16
|
+
- If the file follows a standard format like INI or YAML, consider using
|
|
17
|
+
dedicated parsers.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from __future__ import annotations
|
|
21
|
+
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
|
|
24
|
+
from ..types import JSONData
|
|
25
|
+
from ..types import JSONList
|
|
26
|
+
from . import stub
|
|
27
|
+
|
|
28
|
+
# SECTION: EXPORTS ========================================================== #
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
__all__ = [
|
|
32
|
+
'read',
|
|
33
|
+
'write',
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def read(
|
|
41
|
+
path: Path,
|
|
42
|
+
) -> JSONList:
|
|
43
|
+
"""
|
|
44
|
+
Read CFG content from ``path``.
|
|
45
|
+
|
|
46
|
+
Parameters
|
|
47
|
+
----------
|
|
48
|
+
path : Path
|
|
49
|
+
Path to the CFG file on disk.
|
|
50
|
+
|
|
51
|
+
Returns
|
|
52
|
+
-------
|
|
53
|
+
JSONList
|
|
54
|
+
The list of dictionaries read from the CFG file.
|
|
55
|
+
"""
|
|
56
|
+
return stub.read(path, format_name='CFG')
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def write(
|
|
60
|
+
path: Path,
|
|
61
|
+
data: JSONData,
|
|
62
|
+
) -> int:
|
|
63
|
+
"""
|
|
64
|
+
Write ``data`` to CFG file at ``path`` and return record count.
|
|
65
|
+
|
|
66
|
+
Parameters
|
|
67
|
+
----------
|
|
68
|
+
path : Path
|
|
69
|
+
Path to the CFG file on disk.
|
|
70
|
+
data : JSONData
|
|
71
|
+
Data to write as CFG file. Should be a list of dictionaries or a
|
|
72
|
+
single dictionary.
|
|
73
|
+
|
|
74
|
+
Returns
|
|
75
|
+
-------
|
|
76
|
+
int
|
|
77
|
+
The number of rows written to the CFG file.
|
|
78
|
+
"""
|
|
79
|
+
return stub.write(path, data, format_name='CFG')
|
etlplus/file/conf.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.conf` module.
|
|
3
|
+
|
|
4
|
+
Helpers for reading/writing config (CONF) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A “CONF-formatted” file is a configuration file that may use various
|
|
9
|
+
syntaxes, such as INI, YAML, or custom formats.
|
|
10
|
+
- Common cases:
|
|
11
|
+
- INI-style key-value pairs with sections.
|
|
12
|
+
- YAML-like structures with indentation.
|
|
13
|
+
- Custom formats specific to certain applications (such as Unix-like
|
|
14
|
+
systems, where ``.conf`` is a strong convention for "This is a
|
|
15
|
+
configuration file").
|
|
16
|
+
- Rule of thumb:
|
|
17
|
+
- If the file follows a standard format like INI or YAML, consider using
|
|
18
|
+
dedicated parsers.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
from ..types import JSONData
|
|
26
|
+
from ..types import JSONList
|
|
27
|
+
from . import stub
|
|
28
|
+
|
|
29
|
+
# SECTION: EXPORTS ========================================================== #
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
__all__ = [
|
|
33
|
+
'read',
|
|
34
|
+
'write',
|
|
35
|
+
]
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def read(
|
|
42
|
+
path: Path,
|
|
43
|
+
) -> JSONList:
|
|
44
|
+
"""
|
|
45
|
+
Read CONF content from ``path``.
|
|
46
|
+
|
|
47
|
+
Parameters
|
|
48
|
+
----------
|
|
49
|
+
path : Path
|
|
50
|
+
Path to the CONF file on disk.
|
|
51
|
+
|
|
52
|
+
Returns
|
|
53
|
+
-------
|
|
54
|
+
JSONList
|
|
55
|
+
The list of dictionaries read from the CONF file.
|
|
56
|
+
"""
|
|
57
|
+
return stub.read(path, format_name='CONF')
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def write(
|
|
61
|
+
path: Path,
|
|
62
|
+
data: JSONData,
|
|
63
|
+
) -> int:
|
|
64
|
+
"""
|
|
65
|
+
Write ``data`` to CONF at ``path`` and return record count.
|
|
66
|
+
|
|
67
|
+
Parameters
|
|
68
|
+
----------
|
|
69
|
+
path : Path
|
|
70
|
+
Path to the CONF file on disk.
|
|
71
|
+
data : JSONData
|
|
72
|
+
Data to write as CONF. Should be a list of dictionaries or a
|
|
73
|
+
single dictionary.
|
|
74
|
+
|
|
75
|
+
Returns
|
|
76
|
+
-------
|
|
77
|
+
int
|
|
78
|
+
The number of rows written to the CONF file.
|
|
79
|
+
"""
|
|
80
|
+
return stub.write(path, data, format_name='CONF')
|
etlplus/file/core.py
CHANGED
|
@@ -7,25 +7,15 @@ files.
|
|
|
7
7
|
|
|
8
8
|
from __future__ import annotations
|
|
9
9
|
|
|
10
|
+
import importlib
|
|
11
|
+
import inspect
|
|
10
12
|
from dataclasses import dataclass
|
|
13
|
+
from functools import cache
|
|
11
14
|
from pathlib import Path
|
|
15
|
+
from types import ModuleType
|
|
12
16
|
|
|
13
17
|
from ..types import JSONData
|
|
14
|
-
from . import avro
|
|
15
|
-
from . import csv
|
|
16
|
-
from . import feather
|
|
17
|
-
from . import gz
|
|
18
|
-
from . import json
|
|
19
|
-
from . import ndjson
|
|
20
|
-
from . import orc
|
|
21
|
-
from . import parquet
|
|
22
|
-
from . import tsv
|
|
23
|
-
from . import txt
|
|
24
|
-
from . import xls
|
|
25
|
-
from . import xlsx
|
|
26
18
|
from . import xml
|
|
27
|
-
from . import yaml
|
|
28
|
-
from . import zip
|
|
29
19
|
from .enums import FileFormat
|
|
30
20
|
from .enums import infer_file_format_and_compression
|
|
31
21
|
|
|
@@ -35,6 +25,53 @@ from .enums import infer_file_format_and_compression
|
|
|
35
25
|
__all__ = ['File']
|
|
36
26
|
|
|
37
27
|
|
|
28
|
+
# SECTION: INTERNAL FUNCTIONS =============================================== #
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _accepts_root_tag(handler: object) -> bool:
|
|
32
|
+
"""
|
|
33
|
+
Return True when ``handler`` supports a ``root_tag`` argument.
|
|
34
|
+
|
|
35
|
+
Parameters
|
|
36
|
+
----------
|
|
37
|
+
handler : object
|
|
38
|
+
Callable to inspect.
|
|
39
|
+
|
|
40
|
+
Returns
|
|
41
|
+
-------
|
|
42
|
+
bool
|
|
43
|
+
True if ``root_tag`` is accepted by the handler.
|
|
44
|
+
"""
|
|
45
|
+
if not callable(handler):
|
|
46
|
+
return False
|
|
47
|
+
try:
|
|
48
|
+
signature = inspect.signature(handler)
|
|
49
|
+
except (TypeError, ValueError):
|
|
50
|
+
return False
|
|
51
|
+
for param in signature.parameters.values():
|
|
52
|
+
if param.kind is param.VAR_KEYWORD:
|
|
53
|
+
return True
|
|
54
|
+
return 'root_tag' in signature.parameters
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@cache
|
|
58
|
+
def _module_for_format(file_format: FileFormat) -> ModuleType:
|
|
59
|
+
"""
|
|
60
|
+
Import and return the module for ``file_format``.
|
|
61
|
+
|
|
62
|
+
Parameters
|
|
63
|
+
----------
|
|
64
|
+
file_format : FileFormat
|
|
65
|
+
File format enum value.
|
|
66
|
+
|
|
67
|
+
Returns
|
|
68
|
+
-------
|
|
69
|
+
ModuleType
|
|
70
|
+
The module implementing IO for the format.
|
|
71
|
+
"""
|
|
72
|
+
return importlib.import_module(f'{__package__}.{file_format.value}')
|
|
73
|
+
|
|
74
|
+
|
|
38
75
|
# SECTION: CLASSES ========================================================== #
|
|
39
76
|
|
|
40
77
|
|
|
@@ -174,6 +211,53 @@ class File:
|
|
|
174
211
|
# Leave as None; _ensure_format() will raise on use if needed.
|
|
175
212
|
return None
|
|
176
213
|
|
|
214
|
+
def _resolve_handler(self, name: str) -> object:
|
|
215
|
+
"""
|
|
216
|
+
Resolve a handler from the module for the active file format.
|
|
217
|
+
|
|
218
|
+
Parameters
|
|
219
|
+
----------
|
|
220
|
+
name : str
|
|
221
|
+
Attribute name to resolve (``'read'`` or ``'write'``).
|
|
222
|
+
|
|
223
|
+
Returns
|
|
224
|
+
-------
|
|
225
|
+
object
|
|
226
|
+
Callable handler exported by the module.
|
|
227
|
+
|
|
228
|
+
Raises
|
|
229
|
+
------
|
|
230
|
+
ValueError
|
|
231
|
+
If the resolved file format is unsupported.
|
|
232
|
+
"""
|
|
233
|
+
module = self._resolve_module()
|
|
234
|
+
try:
|
|
235
|
+
return getattr(module, name)
|
|
236
|
+
except AttributeError as e:
|
|
237
|
+
raise ValueError(
|
|
238
|
+
f'Module {module.__name__} does not implement {name}()',
|
|
239
|
+
) from e
|
|
240
|
+
|
|
241
|
+
def _resolve_module(self) -> ModuleType:
|
|
242
|
+
"""
|
|
243
|
+
Resolve the IO module for the active file format.
|
|
244
|
+
|
|
245
|
+
Returns
|
|
246
|
+
-------
|
|
247
|
+
ModuleType
|
|
248
|
+
The module that implements read/write for the format.
|
|
249
|
+
|
|
250
|
+
Raises
|
|
251
|
+
------
|
|
252
|
+
ValueError
|
|
253
|
+
If the resolved file format is unsupported.
|
|
254
|
+
"""
|
|
255
|
+
fmt = self._ensure_format()
|
|
256
|
+
try:
|
|
257
|
+
return _module_for_format(fmt)
|
|
258
|
+
except ModuleNotFoundError as e:
|
|
259
|
+
raise ValueError(f'Unsupported format: {fmt}') from e
|
|
260
|
+
|
|
177
261
|
# -- Instance Methods -- #
|
|
178
262
|
|
|
179
263
|
def read(self) -> JSONData:
|
|
@@ -187,43 +271,18 @@ class File:
|
|
|
187
271
|
|
|
188
272
|
Raises
|
|
189
273
|
------
|
|
190
|
-
|
|
191
|
-
If the resolved
|
|
274
|
+
TypeError
|
|
275
|
+
If the resolved 'read' handler is not callable.
|
|
192
276
|
"""
|
|
193
277
|
self._assert_exists()
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
case FileFormat.GZ:
|
|
203
|
-
return gz.read(self.path)
|
|
204
|
-
case FileFormat.JSON:
|
|
205
|
-
return json.read(self.path)
|
|
206
|
-
case FileFormat.NDJSON:
|
|
207
|
-
return ndjson.read(self.path)
|
|
208
|
-
case FileFormat.ORC:
|
|
209
|
-
return orc.read(self.path)
|
|
210
|
-
case FileFormat.PARQUET:
|
|
211
|
-
return parquet.read(self.path)
|
|
212
|
-
case FileFormat.TSV:
|
|
213
|
-
return tsv.read(self.path)
|
|
214
|
-
case FileFormat.TXT:
|
|
215
|
-
return txt.read(self.path)
|
|
216
|
-
case FileFormat.XLS:
|
|
217
|
-
return xls.read(self.path)
|
|
218
|
-
case FileFormat.XLSX:
|
|
219
|
-
return xlsx.read(self.path)
|
|
220
|
-
case FileFormat.XML:
|
|
221
|
-
return xml.read(self.path)
|
|
222
|
-
case FileFormat.YAML:
|
|
223
|
-
return yaml.read(self.path)
|
|
224
|
-
case FileFormat.ZIP:
|
|
225
|
-
return zip.read(self.path)
|
|
226
|
-
raise ValueError(f'Unsupported format: {fmt}')
|
|
278
|
+
reader = self._resolve_handler('read')
|
|
279
|
+
if callable(reader):
|
|
280
|
+
return reader(self.path)
|
|
281
|
+
else:
|
|
282
|
+
raise TypeError(
|
|
283
|
+
f"'read' handler for format {self.file_format} "
|
|
284
|
+
'is not callable',
|
|
285
|
+
)
|
|
227
286
|
|
|
228
287
|
def write(
|
|
229
288
|
self,
|
|
@@ -249,39 +308,15 @@ class File:
|
|
|
249
308
|
|
|
250
309
|
Raises
|
|
251
310
|
------
|
|
252
|
-
|
|
253
|
-
If the resolved
|
|
311
|
+
TypeError
|
|
312
|
+
If the resolved 'write' handler is not callable.
|
|
254
313
|
"""
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
return gz.write(self.path, data)
|
|
265
|
-
case FileFormat.JSON:
|
|
266
|
-
return json.write(self.path, data)
|
|
267
|
-
case FileFormat.NDJSON:
|
|
268
|
-
return ndjson.write(self.path, data)
|
|
269
|
-
case FileFormat.ORC:
|
|
270
|
-
return orc.write(self.path, data)
|
|
271
|
-
case FileFormat.PARQUET:
|
|
272
|
-
return parquet.write(self.path, data)
|
|
273
|
-
case FileFormat.TSV:
|
|
274
|
-
return tsv.write(self.path, data)
|
|
275
|
-
case FileFormat.TXT:
|
|
276
|
-
return txt.write(self.path, data)
|
|
277
|
-
case FileFormat.XLS:
|
|
278
|
-
return xls.write(self.path, data)
|
|
279
|
-
case FileFormat.XLSX:
|
|
280
|
-
return xlsx.write(self.path, data)
|
|
281
|
-
case FileFormat.XML:
|
|
282
|
-
return xml.write(self.path, data, root_tag=root_tag)
|
|
283
|
-
case FileFormat.YAML:
|
|
284
|
-
return yaml.write(self.path, data)
|
|
285
|
-
case FileFormat.ZIP:
|
|
286
|
-
return zip.write(self.path, data)
|
|
287
|
-
raise ValueError(f'Unsupported format: {fmt}')
|
|
314
|
+
writer = self._resolve_handler('write')
|
|
315
|
+
if not callable(writer):
|
|
316
|
+
raise TypeError(
|
|
317
|
+
f"'write' handler for format {self.file_format} "
|
|
318
|
+
'is not callable',
|
|
319
|
+
)
|
|
320
|
+
if _accepts_root_tag(writer):
|
|
321
|
+
return writer(self.path, data, root_tag=root_tag)
|
|
322
|
+
return writer(self.path, data)
|
etlplus/file/csv.py
CHANGED
|
@@ -1,18 +1,29 @@
|
|
|
1
1
|
"""
|
|
2
2
|
:mod:`etlplus.file.csv` module.
|
|
3
3
|
|
|
4
|
-
Helpers for reading/writing CSV files.
|
|
4
|
+
Helpers for reading/writing Comma-Separated Values (CSV) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A CSV file is a plain text file that uses commas to separate values.
|
|
9
|
+
- Common cases:
|
|
10
|
+
- Each line in the file represents a single record.
|
|
11
|
+
- The first line often contains headers that define the column names.
|
|
12
|
+
- Values may be enclosed in quotes, especially if they contain commas
|
|
13
|
+
or special characters.
|
|
14
|
+
- Rule of thumb:
|
|
15
|
+
- If the file follows the CSV specification, use this module for
|
|
16
|
+
reading and writing.
|
|
5
17
|
"""
|
|
6
18
|
|
|
7
19
|
from __future__ import annotations
|
|
8
20
|
|
|
9
|
-
import csv
|
|
10
21
|
from pathlib import Path
|
|
11
|
-
from typing import cast
|
|
12
22
|
|
|
13
23
|
from ..types import JSONData
|
|
14
|
-
from ..types import JSONDict
|
|
15
24
|
from ..types import JSONList
|
|
25
|
+
from ._io import read_delimited
|
|
26
|
+
from ._io import write_delimited
|
|
16
27
|
|
|
17
28
|
# SECTION: EXPORTS ========================================================== #
|
|
18
29
|
|
|
@@ -42,14 +53,7 @@ def read(
|
|
|
42
53
|
JSONList
|
|
43
54
|
The list of dictionaries read from the CSV file.
|
|
44
55
|
"""
|
|
45
|
-
|
|
46
|
-
reader: csv.DictReader[str] = csv.DictReader(handle)
|
|
47
|
-
rows: JSONList = []
|
|
48
|
-
for row in reader:
|
|
49
|
-
if not any(row.values()):
|
|
50
|
-
continue
|
|
51
|
-
rows.append(cast(JSONDict, dict(row)))
|
|
52
|
-
return rows
|
|
56
|
+
return read_delimited(path, delimiter=',')
|
|
53
57
|
|
|
54
58
|
|
|
55
59
|
def write(
|
|
@@ -72,20 +76,4 @@ def write(
|
|
|
72
76
|
int
|
|
73
77
|
The number of rows written to the CSV file.
|
|
74
78
|
"""
|
|
75
|
-
|
|
76
|
-
if isinstance(data, list):
|
|
77
|
-
rows = [row for row in data if isinstance(row, dict)]
|
|
78
|
-
else:
|
|
79
|
-
rows = [data]
|
|
80
|
-
|
|
81
|
-
if not rows:
|
|
82
|
-
return 0
|
|
83
|
-
|
|
84
|
-
fieldnames = sorted({key for row in rows for key in row})
|
|
85
|
-
with path.open('w', encoding='utf-8', newline='') as handle:
|
|
86
|
-
writer = csv.DictWriter(handle, fieldnames=fieldnames)
|
|
87
|
-
writer.writeheader()
|
|
88
|
-
for row in rows:
|
|
89
|
-
writer.writerow({field: row.get(field) for field in fieldnames})
|
|
90
|
-
|
|
91
|
-
return len(rows)
|
|
79
|
+
return write_delimited(path, data, delimiter=',')
|
etlplus/file/dat.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.dat` module.
|
|
3
|
+
|
|
4
|
+
Helpers for reading/writing data (DAT) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A “DAT-formatted” file is a generic data file that may use various
|
|
9
|
+
delimiters or fixed-width formats.
|
|
10
|
+
- Common cases:
|
|
11
|
+
- Delimited text files (e.g., CSV, TSV).
|
|
12
|
+
- Fixed-width formatted files.
|
|
13
|
+
- Custom formats specific to certain applications.
|
|
14
|
+
- Rule of thumb:
|
|
15
|
+
- If the file does not follow a specific standard format, use this module
|
|
16
|
+
for reading and writing.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
from ..types import JSONData
|
|
24
|
+
from ..types import JSONList
|
|
25
|
+
from . import stub
|
|
26
|
+
|
|
27
|
+
# SECTION: EXPORTS ========================================================== #
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
__all__ = [
|
|
31
|
+
'read',
|
|
32
|
+
'write',
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def read(
|
|
40
|
+
path: Path,
|
|
41
|
+
) -> JSONList:
|
|
42
|
+
"""
|
|
43
|
+
Read DAT content from ``path``.
|
|
44
|
+
|
|
45
|
+
Parameters
|
|
46
|
+
----------
|
|
47
|
+
path : Path
|
|
48
|
+
Path to the DAT file on disk.
|
|
49
|
+
|
|
50
|
+
Returns
|
|
51
|
+
-------
|
|
52
|
+
JSONList
|
|
53
|
+
The list of dictionaries read from the DAT file.
|
|
54
|
+
"""
|
|
55
|
+
return stub.read(path, format_name='DAT')
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def write(
|
|
59
|
+
path: Path,
|
|
60
|
+
data: JSONData,
|
|
61
|
+
) -> int:
|
|
62
|
+
"""
|
|
63
|
+
Write ``data`` to DAT file at ``path`` and return record count.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
path : Path
|
|
68
|
+
Path to the DAT file on disk.
|
|
69
|
+
data : JSONData
|
|
70
|
+
Data to write as DAT file. Should be a list of dictionaries or a
|
|
71
|
+
single dictionary.
|
|
72
|
+
|
|
73
|
+
Returns
|
|
74
|
+
-------
|
|
75
|
+
int
|
|
76
|
+
The number of rows written to the DAT file.
|
|
77
|
+
"""
|
|
78
|
+
return stub.write(path, data, format_name='DAT')
|
etlplus/file/duckdb.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.duckdb` module.
|
|
3
|
+
|
|
4
|
+
Helpers for reading/writing DuckDB database (DUCKDB) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A DUCKDB file is a self-contained, serverless database file format used by
|
|
9
|
+
DuckDB.
|
|
10
|
+
- Common cases:
|
|
11
|
+
- Analytical data storage and processing.
|
|
12
|
+
- Embedded database applications.
|
|
13
|
+
- Fast querying of large datasets.
|
|
14
|
+
- Rule of thumb:
|
|
15
|
+
- If the file follows the DUCKDB specification, use this module for reading
|
|
16
|
+
and writing.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
from ..types import JSONData
|
|
24
|
+
from ..types import JSONList
|
|
25
|
+
from . import stub
|
|
26
|
+
|
|
27
|
+
# SECTION: EXPORTS ========================================================== #
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
__all__ = [
|
|
31
|
+
'read',
|
|
32
|
+
'write',
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def read(
|
|
40
|
+
path: Path,
|
|
41
|
+
) -> JSONList:
|
|
42
|
+
"""
|
|
43
|
+
Read DUCKDB content from ``path``.
|
|
44
|
+
|
|
45
|
+
Parameters
|
|
46
|
+
----------
|
|
47
|
+
path : Path
|
|
48
|
+
Path to the DUCKDB file on disk.
|
|
49
|
+
|
|
50
|
+
Returns
|
|
51
|
+
-------
|
|
52
|
+
JSONList
|
|
53
|
+
The list of dictionaries read from the DUCKDB file.
|
|
54
|
+
"""
|
|
55
|
+
return stub.read(path, format_name='DUCKDB')
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def write(
|
|
59
|
+
path: Path,
|
|
60
|
+
data: JSONData,
|
|
61
|
+
) -> int:
|
|
62
|
+
"""
|
|
63
|
+
Write ``data`` to DUCKDB at ``path`` and return record count.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
path : Path
|
|
68
|
+
Path to the DUCKDB file on disk.
|
|
69
|
+
data : JSONData
|
|
70
|
+
Data to write as DUCKDB. Should be a list of dictionaries or a
|
|
71
|
+
single dictionary.
|
|
72
|
+
|
|
73
|
+
Returns
|
|
74
|
+
-------
|
|
75
|
+
int
|
|
76
|
+
The number of rows written to the DUCKDB file.
|
|
77
|
+
"""
|
|
78
|
+
return stub.write(path, data, format_name='DUCKDB')
|