etlplus 0.12.3__py3-none-any.whl → 0.12.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- etlplus/file/_imports.py +141 -0
- etlplus/file/_io.py +1 -0
- etlplus/file/accdb.py +78 -0
- etlplus/file/arrow.py +78 -0
- etlplus/file/avro.py +17 -27
- etlplus/file/bson.py +77 -0
- etlplus/file/cbor.py +78 -0
- etlplus/file/cfg.py +79 -0
- etlplus/file/conf.py +80 -0
- etlplus/file/core.py +119 -84
- etlplus/file/csv.py +13 -1
- etlplus/file/dat.py +78 -0
- etlplus/file/dta.py +77 -0
- etlplus/file/duckdb.py +78 -0
- etlplus/file/enums.py +120 -15
- etlplus/file/feather.py +14 -2
- etlplus/file/fwf.py +77 -0
- etlplus/file/hbs.py +78 -0
- etlplus/file/hdf5.py +78 -0
- etlplus/file/ini.py +79 -0
- etlplus/file/ion.py +78 -0
- etlplus/file/jinja2.py +78 -0
- etlplus/file/json.py +13 -1
- etlplus/file/log.py +78 -0
- etlplus/file/mat.py +78 -0
- etlplus/file/mdb.py +78 -0
- etlplus/file/msgpack.py +78 -0
- etlplus/file/mustache.py +78 -0
- etlplus/file/nc.py +78 -0
- etlplus/file/ndjson.py +14 -15
- etlplus/file/numbers.py +75 -0
- etlplus/file/ods.py +79 -0
- etlplus/file/orc.py +14 -2
- etlplus/file/parquet.py +14 -2
- etlplus/file/pb.py +78 -0
- etlplus/file/pbf.py +77 -0
- etlplus/file/properties.py +78 -0
- etlplus/file/proto.py +77 -0
- etlplus/file/psv.py +79 -0
- etlplus/file/rda.py +78 -0
- etlplus/file/rds.py +78 -0
- etlplus/file/sas7bdat.py +78 -0
- etlplus/file/sav.py +77 -0
- etlplus/file/sqlite.py +78 -0
- etlplus/file/stub.py +84 -0
- etlplus/file/sylk.py +77 -0
- etlplus/file/tab.py +81 -0
- etlplus/file/toml.py +78 -0
- etlplus/file/tsv.py +14 -1
- etlplus/file/txt.py +13 -10
- etlplus/file/vm.py +78 -0
- etlplus/file/wks.py +77 -0
- etlplus/file/xls.py +1 -1
- etlplus/file/xlsm.py +79 -0
- etlplus/file/xlsx.py +1 -1
- etlplus/file/xml.py +12 -1
- etlplus/file/xpt.py +78 -0
- etlplus/file/yaml.py +15 -44
- etlplus/file/zsav.py +77 -0
- {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/METADATA +119 -1
- {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/RECORD +65 -23
- etlplus/file/_pandas.py +0 -58
- {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/WHEEL +0 -0
- {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/entry_points.txt +0 -0
- {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/licenses/LICENSE +0 -0
- {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/top_level.txt +0 -0
etlplus/file/core.py
CHANGED
|
@@ -7,25 +7,15 @@ files.
|
|
|
7
7
|
|
|
8
8
|
from __future__ import annotations
|
|
9
9
|
|
|
10
|
+
import importlib
|
|
11
|
+
import inspect
|
|
10
12
|
from dataclasses import dataclass
|
|
13
|
+
from functools import cache
|
|
11
14
|
from pathlib import Path
|
|
15
|
+
from types import ModuleType
|
|
12
16
|
|
|
13
17
|
from ..types import JSONData
|
|
14
|
-
from . import avro
|
|
15
|
-
from . import csv
|
|
16
|
-
from . import feather
|
|
17
|
-
from . import gz
|
|
18
|
-
from . import json
|
|
19
|
-
from . import ndjson
|
|
20
|
-
from . import orc
|
|
21
|
-
from . import parquet
|
|
22
|
-
from . import tsv
|
|
23
|
-
from . import txt
|
|
24
|
-
from . import xls
|
|
25
|
-
from . import xlsx
|
|
26
18
|
from . import xml
|
|
27
|
-
from . import yaml
|
|
28
|
-
from . import zip as zip_
|
|
29
19
|
from .enums import FileFormat
|
|
30
20
|
from .enums import infer_file_format_and_compression
|
|
31
21
|
|
|
@@ -35,6 +25,53 @@ from .enums import infer_file_format_and_compression
|
|
|
35
25
|
__all__ = ['File']
|
|
36
26
|
|
|
37
27
|
|
|
28
|
+
# SECTION: INTERNAL FUNCTIONS =============================================== #
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _accepts_root_tag(handler: object) -> bool:
|
|
32
|
+
"""
|
|
33
|
+
Return True when ``handler`` supports a ``root_tag`` argument.
|
|
34
|
+
|
|
35
|
+
Parameters
|
|
36
|
+
----------
|
|
37
|
+
handler : object
|
|
38
|
+
Callable to inspect.
|
|
39
|
+
|
|
40
|
+
Returns
|
|
41
|
+
-------
|
|
42
|
+
bool
|
|
43
|
+
True if ``root_tag`` is accepted by the handler.
|
|
44
|
+
"""
|
|
45
|
+
if not callable(handler):
|
|
46
|
+
return False
|
|
47
|
+
try:
|
|
48
|
+
signature = inspect.signature(handler)
|
|
49
|
+
except (TypeError, ValueError):
|
|
50
|
+
return False
|
|
51
|
+
for param in signature.parameters.values():
|
|
52
|
+
if param.kind is param.VAR_KEYWORD:
|
|
53
|
+
return True
|
|
54
|
+
return 'root_tag' in signature.parameters
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@cache
|
|
58
|
+
def _module_for_format(file_format: FileFormat) -> ModuleType:
|
|
59
|
+
"""
|
|
60
|
+
Import and return the module for ``file_format``.
|
|
61
|
+
|
|
62
|
+
Parameters
|
|
63
|
+
----------
|
|
64
|
+
file_format : FileFormat
|
|
65
|
+
File format enum value.
|
|
66
|
+
|
|
67
|
+
Returns
|
|
68
|
+
-------
|
|
69
|
+
ModuleType
|
|
70
|
+
The module implementing IO for the format.
|
|
71
|
+
"""
|
|
72
|
+
return importlib.import_module(f'{__package__}.{file_format.value}')
|
|
73
|
+
|
|
74
|
+
|
|
38
75
|
# SECTION: CLASSES ========================================================== #
|
|
39
76
|
|
|
40
77
|
|
|
@@ -174,6 +211,53 @@ class File:
|
|
|
174
211
|
# Leave as None; _ensure_format() will raise on use if needed.
|
|
175
212
|
return None
|
|
176
213
|
|
|
214
|
+
def _resolve_handler(self, name: str) -> object:
|
|
215
|
+
"""
|
|
216
|
+
Resolve a handler from the module for the active file format.
|
|
217
|
+
|
|
218
|
+
Parameters
|
|
219
|
+
----------
|
|
220
|
+
name : str
|
|
221
|
+
Attribute name to resolve (``'read'`` or ``'write'``).
|
|
222
|
+
|
|
223
|
+
Returns
|
|
224
|
+
-------
|
|
225
|
+
object
|
|
226
|
+
Callable handler exported by the module.
|
|
227
|
+
|
|
228
|
+
Raises
|
|
229
|
+
------
|
|
230
|
+
ValueError
|
|
231
|
+
If the resolved file format is unsupported.
|
|
232
|
+
"""
|
|
233
|
+
module = self._resolve_module()
|
|
234
|
+
try:
|
|
235
|
+
return getattr(module, name)
|
|
236
|
+
except AttributeError as e:
|
|
237
|
+
raise ValueError(
|
|
238
|
+
f'Module {module.__name__} does not implement {name}()',
|
|
239
|
+
) from e
|
|
240
|
+
|
|
241
|
+
def _resolve_module(self) -> ModuleType:
|
|
242
|
+
"""
|
|
243
|
+
Resolve the IO module for the active file format.
|
|
244
|
+
|
|
245
|
+
Returns
|
|
246
|
+
-------
|
|
247
|
+
ModuleType
|
|
248
|
+
The module that implements read/write for the format.
|
|
249
|
+
|
|
250
|
+
Raises
|
|
251
|
+
------
|
|
252
|
+
ValueError
|
|
253
|
+
If the resolved file format is unsupported.
|
|
254
|
+
"""
|
|
255
|
+
fmt = self._ensure_format()
|
|
256
|
+
try:
|
|
257
|
+
return _module_for_format(fmt)
|
|
258
|
+
except ModuleNotFoundError as e:
|
|
259
|
+
raise ValueError(f'Unsupported format: {fmt}') from e
|
|
260
|
+
|
|
177
261
|
# -- Instance Methods -- #
|
|
178
262
|
|
|
179
263
|
def read(self) -> JSONData:
|
|
@@ -187,43 +271,18 @@ class File:
|
|
|
187
271
|
|
|
188
272
|
Raises
|
|
189
273
|
------
|
|
190
|
-
|
|
191
|
-
If the resolved
|
|
274
|
+
TypeError
|
|
275
|
+
If the resolved 'read' handler is not callable.
|
|
192
276
|
"""
|
|
193
277
|
self._assert_exists()
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
case FileFormat.GZ:
|
|
203
|
-
return gz.read(self.path)
|
|
204
|
-
case FileFormat.JSON:
|
|
205
|
-
return json.read(self.path)
|
|
206
|
-
case FileFormat.NDJSON:
|
|
207
|
-
return ndjson.read(self.path)
|
|
208
|
-
case FileFormat.ORC:
|
|
209
|
-
return orc.read(self.path)
|
|
210
|
-
case FileFormat.PARQUET:
|
|
211
|
-
return parquet.read(self.path)
|
|
212
|
-
case FileFormat.TSV:
|
|
213
|
-
return tsv.read(self.path)
|
|
214
|
-
case FileFormat.TXT:
|
|
215
|
-
return txt.read(self.path)
|
|
216
|
-
case FileFormat.XLS:
|
|
217
|
-
return xls.read(self.path)
|
|
218
|
-
case FileFormat.XLSX:
|
|
219
|
-
return xlsx.read(self.path)
|
|
220
|
-
case FileFormat.XML:
|
|
221
|
-
return xml.read(self.path)
|
|
222
|
-
case FileFormat.YAML:
|
|
223
|
-
return yaml.read(self.path)
|
|
224
|
-
case FileFormat.ZIP:
|
|
225
|
-
return zip_.read(self.path)
|
|
226
|
-
raise ValueError(f'Unsupported format: {fmt}')
|
|
278
|
+
reader = self._resolve_handler('read')
|
|
279
|
+
if callable(reader):
|
|
280
|
+
return reader(self.path)
|
|
281
|
+
else:
|
|
282
|
+
raise TypeError(
|
|
283
|
+
f"'read' handler for format {self.file_format} "
|
|
284
|
+
'is not callable',
|
|
285
|
+
)
|
|
227
286
|
|
|
228
287
|
def write(
|
|
229
288
|
self,
|
|
@@ -249,39 +308,15 @@ class File:
|
|
|
249
308
|
|
|
250
309
|
Raises
|
|
251
310
|
------
|
|
252
|
-
|
|
253
|
-
If the resolved
|
|
311
|
+
TypeError
|
|
312
|
+
If the resolved 'write' handler is not callable.
|
|
254
313
|
"""
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
return gz.write(self.path, data)
|
|
265
|
-
case FileFormat.JSON:
|
|
266
|
-
return json.write(self.path, data)
|
|
267
|
-
case FileFormat.NDJSON:
|
|
268
|
-
return ndjson.write(self.path, data)
|
|
269
|
-
case FileFormat.ORC:
|
|
270
|
-
return orc.write(self.path, data)
|
|
271
|
-
case FileFormat.PARQUET:
|
|
272
|
-
return parquet.write(self.path, data)
|
|
273
|
-
case FileFormat.TSV:
|
|
274
|
-
return tsv.write(self.path, data)
|
|
275
|
-
case FileFormat.TXT:
|
|
276
|
-
return txt.write(self.path, data)
|
|
277
|
-
case FileFormat.XLS:
|
|
278
|
-
return xls.write(self.path, data)
|
|
279
|
-
case FileFormat.XLSX:
|
|
280
|
-
return xlsx.write(self.path, data)
|
|
281
|
-
case FileFormat.XML:
|
|
282
|
-
return xml.write(self.path, data, root_tag=root_tag)
|
|
283
|
-
case FileFormat.YAML:
|
|
284
|
-
return yaml.write(self.path, data)
|
|
285
|
-
case FileFormat.ZIP:
|
|
286
|
-
return zip_.write(self.path, data)
|
|
287
|
-
raise ValueError(f'Unsupported format: {fmt}')
|
|
314
|
+
writer = self._resolve_handler('write')
|
|
315
|
+
if not callable(writer):
|
|
316
|
+
raise TypeError(
|
|
317
|
+
f"'write' handler for format {self.file_format} "
|
|
318
|
+
'is not callable',
|
|
319
|
+
)
|
|
320
|
+
if _accepts_root_tag(writer):
|
|
321
|
+
return writer(self.path, data, root_tag=root_tag)
|
|
322
|
+
return writer(self.path, data)
|
etlplus/file/csv.py
CHANGED
|
@@ -1,7 +1,19 @@
|
|
|
1
1
|
"""
|
|
2
2
|
:mod:`etlplus.file.csv` module.
|
|
3
3
|
|
|
4
|
-
Helpers for reading/writing CSV files.
|
|
4
|
+
Helpers for reading/writing Comma-Separated Values (CSV) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A CSV file is a plain text file that uses commas to separate values.
|
|
9
|
+
- Common cases:
|
|
10
|
+
- Each line in the file represents a single record.
|
|
11
|
+
- The first line often contains headers that define the column names.
|
|
12
|
+
- Values may be enclosed in quotes, especially if they contain commas
|
|
13
|
+
or special characters.
|
|
14
|
+
- Rule of thumb:
|
|
15
|
+
- If the file follows the CSV specification, use this module for
|
|
16
|
+
reading and writing.
|
|
5
17
|
"""
|
|
6
18
|
|
|
7
19
|
from __future__ import annotations
|
etlplus/file/dat.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.dat` module.
|
|
3
|
+
|
|
4
|
+
Helpers for reading/writing data (DAT) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A “DAT-formatted” file is a generic data file that may use various
|
|
9
|
+
delimiters or fixed-width formats.
|
|
10
|
+
- Common cases:
|
|
11
|
+
- Delimited text files (e.g., CSV, TSV).
|
|
12
|
+
- Fixed-width formatted files.
|
|
13
|
+
- Custom formats specific to certain applications.
|
|
14
|
+
- Rule of thumb:
|
|
15
|
+
- If the file does not follow a specific standard format, use this module
|
|
16
|
+
for reading and writing.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
from ..types import JSONData
|
|
24
|
+
from ..types import JSONList
|
|
25
|
+
from . import stub
|
|
26
|
+
|
|
27
|
+
# SECTION: EXPORTS ========================================================== #
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
__all__ = [
|
|
31
|
+
'read',
|
|
32
|
+
'write',
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def read(
|
|
40
|
+
path: Path,
|
|
41
|
+
) -> JSONList:
|
|
42
|
+
"""
|
|
43
|
+
Read DAT content from ``path``.
|
|
44
|
+
|
|
45
|
+
Parameters
|
|
46
|
+
----------
|
|
47
|
+
path : Path
|
|
48
|
+
Path to the DAT file on disk.
|
|
49
|
+
|
|
50
|
+
Returns
|
|
51
|
+
-------
|
|
52
|
+
JSONList
|
|
53
|
+
The list of dictionaries read from the DAT file.
|
|
54
|
+
"""
|
|
55
|
+
return stub.read(path, format_name='DAT')
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def write(
|
|
59
|
+
path: Path,
|
|
60
|
+
data: JSONData,
|
|
61
|
+
) -> int:
|
|
62
|
+
"""
|
|
63
|
+
Write ``data`` to DAT file at ``path`` and return record count.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
path : Path
|
|
68
|
+
Path to the DAT file on disk.
|
|
69
|
+
data : JSONData
|
|
70
|
+
Data to write as DAT file. Should be a list of dictionaries or a
|
|
71
|
+
single dictionary.
|
|
72
|
+
|
|
73
|
+
Returns
|
|
74
|
+
-------
|
|
75
|
+
int
|
|
76
|
+
The number of rows written to the DAT file.
|
|
77
|
+
"""
|
|
78
|
+
return stub.write(path, data, format_name='DAT')
|
etlplus/file/dta.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.dta` module.
|
|
3
|
+
|
|
4
|
+
Helpers for reading/writing Stata (DTA) data files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- Stata DTA files are binary files used by Stata statistical software that
|
|
9
|
+
store datasets with variables, labels, and data types.
|
|
10
|
+
- Common cases:
|
|
11
|
+
- Reading data for analysis in Python.
|
|
12
|
+
- Writing processed data back to Stata format.
|
|
13
|
+
- Rule of thumb:
|
|
14
|
+
- If you need to work with Stata data files, use this module for reading
|
|
15
|
+
and writing.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from __future__ import annotations
|
|
19
|
+
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
from ..types import JSONData
|
|
23
|
+
from ..types import JSONList
|
|
24
|
+
from . import stub
|
|
25
|
+
|
|
26
|
+
# SECTION: EXPORTS ========================================================== #
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
__all__ = [
|
|
30
|
+
'read',
|
|
31
|
+
'write',
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def read(
|
|
39
|
+
path: Path,
|
|
40
|
+
) -> JSONList:
|
|
41
|
+
"""
|
|
42
|
+
Read DTA content from ``path``.
|
|
43
|
+
|
|
44
|
+
Parameters
|
|
45
|
+
----------
|
|
46
|
+
path : Path
|
|
47
|
+
Path to the DTA file on disk.
|
|
48
|
+
|
|
49
|
+
Returns
|
|
50
|
+
-------
|
|
51
|
+
JSONList
|
|
52
|
+
The list of dictionaries read from the DTA file.
|
|
53
|
+
"""
|
|
54
|
+
return stub.read(path, format_name='DTA')
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def write(
|
|
58
|
+
path: Path,
|
|
59
|
+
data: JSONData,
|
|
60
|
+
) -> int:
|
|
61
|
+
"""
|
|
62
|
+
Write ``data`` to DTA file at ``path`` and return record count.
|
|
63
|
+
|
|
64
|
+
Parameters
|
|
65
|
+
----------
|
|
66
|
+
path : Path
|
|
67
|
+
Path to the DTA file on disk.
|
|
68
|
+
data : JSONData
|
|
69
|
+
Data to write as DTA file. Should be a list of dictionaries or a
|
|
70
|
+
single dictionary.
|
|
71
|
+
|
|
72
|
+
Returns
|
|
73
|
+
-------
|
|
74
|
+
int
|
|
75
|
+
The number of rows written to the DTA file.
|
|
76
|
+
"""
|
|
77
|
+
return stub.write(path, data, format_name='DTA')
|
etlplus/file/duckdb.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.duckdb` module.
|
|
3
|
+
|
|
4
|
+
Helpers for reading/writing DuckDB database (DUCKDB) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A DUCKDB file is a self-contained, serverless database file format used by
|
|
9
|
+
DuckDB.
|
|
10
|
+
- Common cases:
|
|
11
|
+
- Analytical data storage and processing.
|
|
12
|
+
- Embedded database applications.
|
|
13
|
+
- Fast querying of large datasets.
|
|
14
|
+
- Rule of thumb:
|
|
15
|
+
- If the file follows the DUCKDB specification, use this module for reading
|
|
16
|
+
and writing.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
from ..types import JSONData
|
|
24
|
+
from ..types import JSONList
|
|
25
|
+
from . import stub
|
|
26
|
+
|
|
27
|
+
# SECTION: EXPORTS ========================================================== #
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
__all__ = [
|
|
31
|
+
'read',
|
|
32
|
+
'write',
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def read(
|
|
40
|
+
path: Path,
|
|
41
|
+
) -> JSONList:
|
|
42
|
+
"""
|
|
43
|
+
Read DUCKDB content from ``path``.
|
|
44
|
+
|
|
45
|
+
Parameters
|
|
46
|
+
----------
|
|
47
|
+
path : Path
|
|
48
|
+
Path to the DUCKDB file on disk.
|
|
49
|
+
|
|
50
|
+
Returns
|
|
51
|
+
-------
|
|
52
|
+
JSONList
|
|
53
|
+
The list of dictionaries read from the DUCKDB file.
|
|
54
|
+
"""
|
|
55
|
+
return stub.read(path, format_name='DUCKDB')
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def write(
|
|
59
|
+
path: Path,
|
|
60
|
+
data: JSONData,
|
|
61
|
+
) -> int:
|
|
62
|
+
"""
|
|
63
|
+
Write ``data`` to DUCKDB at ``path`` and return record count.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
path : Path
|
|
68
|
+
Path to the DUCKDB file on disk.
|
|
69
|
+
data : JSONData
|
|
70
|
+
Data to write as DUCKDB. Should be a list of dictionaries or a
|
|
71
|
+
single dictionary.
|
|
72
|
+
|
|
73
|
+
Returns
|
|
74
|
+
-------
|
|
75
|
+
int
|
|
76
|
+
The number of rows written to the DUCKDB file.
|
|
77
|
+
"""
|
|
78
|
+
return stub.write(path, data, format_name='DUCKDB')
|
etlplus/file/enums.py
CHANGED
|
@@ -61,21 +61,125 @@ class FileFormat(CoercibleStrEnum):
|
|
|
61
61
|
|
|
62
62
|
# -- Constants -- #
|
|
63
63
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
64
|
+
# Stubbed / placeholder
|
|
65
|
+
STUB = 'stub' # Placeholder format for tests & future connectors
|
|
66
|
+
|
|
67
|
+
# Tabular & delimited text
|
|
68
|
+
CSV = 'csv' # Comma-Separated Values
|
|
69
|
+
DAT = 'dat' # Generic data file, often delimited or fixed-width
|
|
70
|
+
FWF = 'fwf' # Fixed-Width Formatted
|
|
71
|
+
PSV = 'psv' # Pipe-Separated Values
|
|
72
|
+
TAB = 'tab' # Often synonymous with TSV
|
|
73
|
+
TSV = 'tsv' # Tab-Separated Values
|
|
74
|
+
TXT = 'txt' # Plain text, often delimited or fixed-width
|
|
75
|
+
|
|
76
|
+
# Semi-structured text
|
|
77
|
+
CFG = 'cfg' # Config-style key-value pairs
|
|
78
|
+
CONF = 'conf' # Config-style key-value pairs
|
|
79
|
+
INI = 'ini' # INI-style key-value pairs
|
|
80
|
+
JSON = 'json' # JavaScript Object Notation
|
|
81
|
+
NDJSON = 'ndjson' # Newline-Delimited JSON
|
|
82
|
+
PROPERTIES = 'properties' # Java-style key-value pairs
|
|
83
|
+
TOML = 'toml' # Tom's Obvious Minimal Language
|
|
84
|
+
XML = 'xml' # Extensible Markup Language
|
|
85
|
+
YAML = 'yaml' # YAML Ain't Markup Language
|
|
86
|
+
|
|
87
|
+
# Columnar / analytics-friendly
|
|
88
|
+
ARROW = 'arrow' # Apache Arrow IPC
|
|
89
|
+
FEATHER = 'feather' # Apache Arrow Feather
|
|
90
|
+
ORC = 'orc' # Optimized Row Columnar; common in Hadoop
|
|
91
|
+
PARQUET = 'parquet' # Apache Parquet; common in Big Data
|
|
92
|
+
|
|
93
|
+
# Binary serialization & interchange
|
|
94
|
+
AVRO = 'avro' # Apache Avro
|
|
95
|
+
BSON = 'bson' # Binary JSON; common with MongoDB exports/dumps
|
|
96
|
+
CBOR = 'cbor' # Concise Binary Object Representation
|
|
97
|
+
ION = 'ion' # Amazon Ion
|
|
98
|
+
MSGPACK = 'msgpack' # MessagePack
|
|
99
|
+
PB = 'pb' # Protocol Buffers (Google Protobuf)
|
|
100
|
+
PBF = 'pbf' # Protocolbuffer Binary Format; often for GIS data
|
|
101
|
+
PROTO = 'proto' # Protocol Buffers schema; often in .pb / .bin
|
|
102
|
+
|
|
103
|
+
# Databases & embedded storage
|
|
104
|
+
ACCDB = 'accdb' # Microsoft Access database file (newer format)
|
|
105
|
+
DUCKDB = 'duckdb' # DuckDB database file
|
|
106
|
+
MDB = 'mdb' # Microsoft Access database file (older format)
|
|
107
|
+
SQLITE = 'sqlite' # SQLite database file
|
|
108
|
+
|
|
109
|
+
# Spreadsheets
|
|
110
|
+
NUMBERS = 'numbers' # Apple Numbers spreadsheet
|
|
111
|
+
ODS = 'ods' # OpenDocument spreadsheet
|
|
112
|
+
WKS = 'wks' # Lotus 1-2-3 spreadsheet
|
|
113
|
+
XLS = 'xls' # Microsoft Excel (BIFF); read-only
|
|
114
|
+
XLSM = 'xlsm' # Microsoft Excel Macro-Enabled (Open XML)
|
|
115
|
+
XLSX = 'xlsx' # Microsoft Excel (Open XML)
|
|
116
|
+
|
|
117
|
+
# Statistical / scientific / numeric computing
|
|
118
|
+
DTA = 'dta' # Stata data file
|
|
119
|
+
HDF5 = 'hdf5' # Hierarchical Data Format
|
|
120
|
+
MAT = 'mat' # MATLAB data file
|
|
121
|
+
NC = 'nc' # NetCDF data file
|
|
122
|
+
RDA = 'rda' # RData workspace/object bundle
|
|
123
|
+
RDS = 'rds' # R data file
|
|
124
|
+
SAS7BDAT = 'sas7bdat' # SAS data file
|
|
125
|
+
SAV = 'sav' # SPSS data file
|
|
126
|
+
SYLK = 'sylk' # Symbolic Link (SYmbolic LinK)
|
|
127
|
+
XPT = 'xpt' # SAS Transport file
|
|
128
|
+
ZSAV = 'zsav' # Compressed SPSS data file
|
|
129
|
+
|
|
130
|
+
# Time series and financial data
|
|
131
|
+
CAMT = 'camt' # ISO 20022 Cash Management messages
|
|
132
|
+
FXT = 'fxt' # Forex time series data
|
|
133
|
+
MT940 = 'mt940' # SWIFT MT940 bank statement format
|
|
134
|
+
MT942 = 'mt942' # SWIFT MT942 interim transaction report format
|
|
135
|
+
OFX = 'ofx' # Open Financial Exchange
|
|
136
|
+
QFX = 'qfx' # Quicken Financial Exchange
|
|
137
|
+
QIF = 'qif' # Quicken Interchange Format
|
|
138
|
+
QQQ = 'qqq' # QuantQuote historical data
|
|
139
|
+
TRR = 'trr' # Trade and transaction reports
|
|
140
|
+
TSDB = 'tsdb' # Time series database export
|
|
141
|
+
|
|
142
|
+
# Geospatial data
|
|
143
|
+
GEOJSON = 'geojson' # GeoJSON
|
|
144
|
+
GEOTIFF = 'geotiff' # GeoTIFF
|
|
145
|
+
GML = 'gml' # Geography Markup Language
|
|
146
|
+
GPKG = 'gpkg' # GeoPackage
|
|
147
|
+
GPX = 'gpx' # GPS Exchange Format
|
|
148
|
+
KML = 'kml' # Keyhole Markup Language
|
|
149
|
+
LAS = 'las' # LiDAR Aerial Survey
|
|
150
|
+
LAZ = 'laz' # LASzip (compressed LAS)
|
|
151
|
+
OSM = 'osm' # OpenStreetMap XML Data
|
|
152
|
+
SHP = 'shp' # ESRI Shapefile
|
|
153
|
+
WKB = 'wkb' # Well-Known Binary
|
|
154
|
+
WKT = 'wkt' # Well-Known Text
|
|
155
|
+
|
|
156
|
+
# Logs & event streams
|
|
157
|
+
EVT = 'evt' # Windows Event Trace Log (pre-Vista)
|
|
158
|
+
EVTX = 'evtx' # Windows Event Trace Log (Vista and later)
|
|
159
|
+
LOG = 'log' # Generic log file
|
|
160
|
+
PCAP = 'pcap' # Packet Capture file
|
|
161
|
+
PCAPPNG = 'pcapng' # Packet Capture Next Generation file
|
|
162
|
+
SLOG = 'slog' # Structured log file
|
|
163
|
+
W3CLOG = 'w3clog' # W3C Extended Log File Format
|
|
164
|
+
|
|
165
|
+
# “Data archives” & packaging
|
|
166
|
+
_7Z = '7z' # 7-Zip archive
|
|
167
|
+
GZ = 'gz' # Gzip-compressed file
|
|
168
|
+
JAR = 'jar' # Java archive
|
|
169
|
+
RAR = 'rar' # RAR archive
|
|
170
|
+
SIT = 'sit' # StuffIt archive
|
|
171
|
+
SITX = 'sitx' # StuffIt X archive
|
|
172
|
+
TAR = 'tar' # TAR archive
|
|
173
|
+
TGZ = 'tgz' # Gzip-compressed TAR archive
|
|
174
|
+
ZIP = 'zip' # ZIP archive
|
|
175
|
+
|
|
176
|
+
# Domain-specific & less common
|
|
177
|
+
|
|
178
|
+
# Templates
|
|
179
|
+
HBS = 'hbs' # Handlebars
|
|
180
|
+
JINJA2 = 'jinja2' # Jinja2
|
|
181
|
+
MUSTACHE = 'mustache' # Mustache
|
|
182
|
+
VM = 'vm' # Apache Velocity
|
|
79
183
|
|
|
80
184
|
# -- Class Methods -- #
|
|
81
185
|
|
|
@@ -104,6 +208,7 @@ class FileFormat(CoercibleStrEnum):
|
|
|
104
208
|
'.orc': 'orc',
|
|
105
209
|
'.parquet': 'parquet',
|
|
106
210
|
'.pq': 'parquet',
|
|
211
|
+
'.stub': 'stub',
|
|
107
212
|
'.tsv': 'tsv',
|
|
108
213
|
'.txt': 'txt',
|
|
109
214
|
'.xls': 'xls',
|
etlplus/file/feather.py
CHANGED
|
@@ -1,7 +1,19 @@
|
|
|
1
1
|
"""
|
|
2
2
|
:mod:`etlplus.file.feather` module.
|
|
3
3
|
|
|
4
|
-
Helpers for reading/writing Feather files.
|
|
4
|
+
Helpers for reading/writing Apache Arrow Feather (FEATHER) files.
|
|
5
|
+
|
|
6
|
+
Notes
|
|
7
|
+
-----
|
|
8
|
+
- A FEATHER file is a binary file format designed for efficient
|
|
9
|
+
on-disk storage of data frames, built on top of Apache Arrow.
|
|
10
|
+
- Common cases:
|
|
11
|
+
- Fast read/write operations for data frames.
|
|
12
|
+
- Interoperability between different data analysis tools.
|
|
13
|
+
- Storage of large datasets with efficient compression.
|
|
14
|
+
- Rule of thumb:
|
|
15
|
+
- If the file follows the Apache Arrow Feather specification, use this
|
|
16
|
+
module for reading and writing.
|
|
5
17
|
"""
|
|
6
18
|
|
|
7
19
|
from __future__ import annotations
|
|
@@ -11,8 +23,8 @@ from typing import cast
|
|
|
11
23
|
|
|
12
24
|
from ..types import JSONData
|
|
13
25
|
from ..types import JSONList
|
|
26
|
+
from ._imports import get_pandas
|
|
14
27
|
from ._io import normalize_records
|
|
15
|
-
from ._pandas import get_pandas
|
|
16
28
|
|
|
17
29
|
# SECTION: EXPORTS ========================================================== #
|
|
18
30
|
|