etlplus 0.12.1__py3-none-any.whl → 0.12.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. etlplus/file/_imports.py +141 -0
  2. etlplus/file/_io.py +121 -0
  3. etlplus/file/accdb.py +78 -0
  4. etlplus/file/arrow.py +78 -0
  5. etlplus/file/avro.py +46 -68
  6. etlplus/file/bson.py +77 -0
  7. etlplus/file/cbor.py +78 -0
  8. etlplus/file/cfg.py +79 -0
  9. etlplus/file/conf.py +80 -0
  10. etlplus/file/core.py +119 -84
  11. etlplus/file/csv.py +17 -29
  12. etlplus/file/dat.py +78 -0
  13. etlplus/file/duckdb.py +78 -0
  14. etlplus/file/enums.py +114 -15
  15. etlplus/file/feather.py +18 -51
  16. etlplus/file/fwf.py +77 -0
  17. etlplus/file/ini.py +79 -0
  18. etlplus/file/ion.py +78 -0
  19. etlplus/file/json.py +13 -1
  20. etlplus/file/log.py +78 -0
  21. etlplus/file/mdb.py +78 -0
  22. etlplus/file/msgpack.py +78 -0
  23. etlplus/file/ndjson.py +14 -15
  24. etlplus/file/orc.py +18 -49
  25. etlplus/file/parquet.py +18 -51
  26. etlplus/file/pb.py +78 -0
  27. etlplus/file/pbf.py +77 -0
  28. etlplus/file/properties.py +78 -0
  29. etlplus/file/proto.py +77 -0
  30. etlplus/file/psv.py +79 -0
  31. etlplus/file/sqlite.py +78 -0
  32. etlplus/file/stub.py +84 -0
  33. etlplus/file/tab.py +81 -0
  34. etlplus/file/toml.py +78 -0
  35. etlplus/file/tsv.py +18 -29
  36. etlplus/file/txt.py +13 -10
  37. etlplus/file/xls.py +4 -48
  38. etlplus/file/xlsx.py +5 -48
  39. etlplus/file/xml.py +12 -1
  40. etlplus/file/yaml.py +15 -44
  41. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/METADATA +119 -1
  42. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/RECORD +46 -21
  43. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/WHEEL +0 -0
  44. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/entry_points.txt +0 -0
  45. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/licenses/LICENSE +0 -0
  46. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.msgpack` module.
3
+
4
+ Helpers for reading/writing MessagePack (MSGPACK) files.
5
+
6
+ Notes
7
+ -----
8
+ - A MsgPack file is a binary serialization format that is more compact than
9
+ JSON.
10
+ - Common cases:
11
+ - Efficient data storage and transmission.
12
+ - Inter-process communication.
13
+ - Data serialization in performance-critical applications.
14
+ - Rule of thumb:
15
+ - If the file follows the MsgPack specification, use this module for
16
+ reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read MsgPack content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the MsgPack file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the MsgPack file.
54
+ """
55
+ return stub.read(path, format_name='MSGPACK')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to MsgPack at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the MsgPack file on disk.
69
+ data : JSONData
70
+ Data to write as MsgPack. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the MsgPack file.
77
+ """
78
+ return stub.write(path, data, format_name='MSGPACK')
etlplus/file/ndjson.py CHANGED
@@ -1,7 +1,18 @@
1
1
  """
2
2
  :mod:`etlplus.file.ndjson` module.
3
3
 
4
- Helpers for reading/writing NDJSON files.
4
+ Helpers for reading/writing Newline Delimited JSON (NDJSON) files.
5
+
6
+ Notes
7
+ -----
8
+ - An NDJSON file is a format where each line is a separate JSON object.
9
+ - Common cases:
10
+ - Streaming JSON data.
11
+ - Log files with JSON entries.
12
+ - Large datasets that are processed line-by-line.
13
+ - Rule of thumb:
14
+ - If the file follows the NDJSON specification, use this module for
15
+ reading and writing.
5
16
  """
6
17
 
7
18
  from __future__ import annotations
@@ -14,6 +25,7 @@ from ..types import JSONData
14
25
  from ..types import JSONDict
15
26
  from ..types import JSONList
16
27
  from ..utils import count_records
28
+ from ._io import normalize_records
17
29
 
18
30
  # SECTION: EXPORTS ========================================================== #
19
31
 
@@ -81,21 +93,8 @@ def write(
81
93
  -------
82
94
  int
83
95
  Number of records written.
84
-
85
- Raises
86
- ------
87
- TypeError
88
- If ``data`` is a list containing non-dict items.
89
96
  """
90
- rows: JSONList
91
- if isinstance(data, list):
92
- if not all(isinstance(item, dict) for item in data):
93
- raise TypeError(
94
- 'NDJSON payloads must contain only objects (dicts)',
95
- )
96
- rows = cast(JSONList, data)
97
- else:
98
- rows = [cast(JSONDict, data)]
97
+ rows = normalize_records(data, 'NDJSON')
99
98
 
100
99
  if not rows:
101
100
  return 0
etlplus/file/orc.py CHANGED
@@ -1,18 +1,30 @@
1
1
  """
2
2
  :mod:`etlplus.file.orc` module.
3
3
 
4
- Helpers for reading/writing ORC files.
4
+ Helpers for reading/writing Optimized Row Columnar (ORC) files.
5
+
6
+ Notes
7
+ -----
8
+ - An ORC file is a columnar storage file format optimized for Big Data
9
+ processing.
10
+ - Common cases:
11
+ - Efficient storage and retrieval of large datasets.
12
+ - Integration with big data frameworks like Apache Hive and Apache Spark.
13
+ - Compression and performance optimization for analytical queries.
14
+ - Rule of thumb:
15
+ - If the file follows the ORC specification, use this module for reading
16
+ and writing.
5
17
  """
6
18
 
7
19
  from __future__ import annotations
8
20
 
9
21
  from pathlib import Path
10
- from typing import Any
11
22
  from typing import cast
12
23
 
13
24
  from ..types import JSONData
14
- from ..types import JSONDict
15
25
  from ..types import JSONList
26
+ from ._imports import get_pandas
27
+ from ._io import normalize_records
16
28
 
17
29
  # SECTION: EXPORTS ========================================================== #
18
30
 
@@ -23,49 +35,6 @@ __all__ = [
23
35
  ]
24
36
 
25
37
 
26
- # SECTION: INTERNAL CONSTANTS =============================================== #
27
-
28
-
29
- _PANDAS_CACHE: dict[str, Any] = {}
30
-
31
-
32
- # SECTION: INTERNAL FUNCTIONS =============================================== #
33
-
34
-
35
- def _get_pandas() -> Any:
36
- """
37
- Return the pandas module, importing it on first use.
38
-
39
- Raises an informative ImportError if the optional dependency is missing.
40
- """
41
- mod = _PANDAS_CACHE.get('mod')
42
- if mod is not None: # pragma: no cover - tiny branch
43
- return mod
44
- try:
45
- _pd = __import__('pandas') # type: ignore[assignment]
46
- except ImportError as e: # pragma: no cover
47
- raise ImportError(
48
- 'ORC support requires optional dependency "pandas".\n'
49
- 'Install with: pip install pandas',
50
- ) from e
51
- _PANDAS_CACHE['mod'] = _pd
52
-
53
- return _pd
54
-
55
-
56
- def _normalize_records(data: JSONData) -> JSONList:
57
- """
58
- Normalize JSON payloads into a list of dictionaries.
59
-
60
- Raises TypeError when payloads contain non-dict items.
61
- """
62
- if isinstance(data, list):
63
- if not all(isinstance(item, dict) for item in data):
64
- raise TypeError('ORC payloads must contain only objects (dicts)')
65
- return cast(JSONList, data)
66
- return [cast(JSONDict, data)]
67
-
68
-
69
38
  # SECTION: FUNCTIONS ======================================================== #
70
39
 
71
40
 
@@ -90,7 +59,7 @@ def read(
90
59
  ImportError
91
60
  When optional dependency "pyarrow" is missing.
92
61
  """
93
- pandas = _get_pandas()
62
+ pandas = get_pandas('ORC')
94
63
  try:
95
64
  frame = pandas.read_orc(path)
96
65
  except ImportError as e: # pragma: no cover
@@ -125,11 +94,11 @@ def write(
125
94
  ImportError
126
95
  When optional dependency "pyarrow" is missing.
127
96
  """
128
- records = _normalize_records(data)
97
+ records = normalize_records(data, 'ORC')
129
98
  if not records:
130
99
  return 0
131
100
 
132
- pandas = _get_pandas()
101
+ pandas = get_pandas('ORC')
133
102
  path.parent.mkdir(parents=True, exist_ok=True)
134
103
  frame = pandas.DataFrame.from_records(records)
135
104
  try:
etlplus/file/parquet.py CHANGED
@@ -1,18 +1,30 @@
1
1
  """
2
2
  :mod:`etlplus.file.parquet` module.
3
3
 
4
- Helpers for reading/writing Parquet files.
4
+ Helpers for reading/writing Apache Parquet (PARQUET) files.
5
+
6
+ Notes
7
+ -----
8
+ - An Apache Parquet file is a columnar storage file format optimized for Big
9
+ Data processing.
10
+ - Common cases:
11
+ - Efficient storage and retrieval of large datasets.
12
+ - Integration with big data frameworks like Apache Hive and Apache Spark.
13
+ - Compression and performance optimization for analytical queries.
14
+ - Rule of thumb:
15
+ - If the file follows the Apache Parquet specification, use this module for
16
+ reading and writing.
5
17
  """
6
18
 
7
19
  from __future__ import annotations
8
20
 
9
21
  from pathlib import Path
10
- from typing import Any
11
22
  from typing import cast
12
23
 
13
24
  from ..types import JSONData
14
- from ..types import JSONDict
15
25
  from ..types import JSONList
26
+ from ._imports import get_pandas
27
+ from ._io import normalize_records
16
28
 
17
29
  # SECTION: EXPORTS ========================================================== #
18
30
 
@@ -23,51 +35,6 @@ __all__ = [
23
35
  ]
24
36
 
25
37
 
26
- # SECTION: INTERNAL CONSTANTS =============================================== #
27
-
28
-
29
- _PANDAS_CACHE: dict[str, Any] = {}
30
-
31
-
32
- # SECTION: INTERNAL FUNCTIONS =============================================== #
33
-
34
-
35
- def _get_pandas() -> Any:
36
- """
37
- Return the pandas module, importing it on first use.
38
-
39
- Raises an informative ImportError if the optional dependency is missing.
40
- """
41
- mod = _PANDAS_CACHE.get('mod')
42
- if mod is not None: # pragma: no cover - tiny branch
43
- return mod
44
- try:
45
- _pd = __import__('pandas') # type: ignore[assignment]
46
- except ImportError as e: # pragma: no cover
47
- raise ImportError(
48
- 'Parquet support requires optional dependency "pandas".\n'
49
- 'Install with: pip install pandas',
50
- ) from e
51
- _PANDAS_CACHE['mod'] = _pd
52
-
53
- return _pd
54
-
55
-
56
- def _normalize_records(data: JSONData) -> JSONList:
57
- """
58
- Normalize JSON payloads into a list of dictionaries.
59
-
60
- Raises TypeError when payloads contain non-dict items.
61
- """
62
- if isinstance(data, list):
63
- if not all(isinstance(item, dict) for item in data):
64
- raise TypeError(
65
- 'Parquet payloads must contain only objects (dicts)',
66
- )
67
- return cast(JSONList, data)
68
- return [cast(JSONDict, data)]
69
-
70
-
71
38
  # SECTION: FUNCTIONS ======================================================== #
72
39
 
73
40
 
@@ -92,7 +59,7 @@ def read(
92
59
  ImportError
93
60
  If optional dependencies for Parquet support are missing.
94
61
  """
95
- pandas = _get_pandas()
62
+ pandas = get_pandas('Parquet')
96
63
  try:
97
64
  frame = pandas.read_parquet(path)
98
65
  except ImportError as e: # pragma: no cover
@@ -128,11 +95,11 @@ def write(
128
95
  ImportError
129
96
  If optional dependencies for Parquet support are missing.
130
97
  """
131
- records = _normalize_records(data)
98
+ records = normalize_records(data, 'Parquet')
132
99
  if not records:
133
100
  return 0
134
101
 
135
- pandas = _get_pandas()
102
+ pandas = get_pandas('Parquet')
136
103
  path.parent.mkdir(parents=True, exist_ok=True)
137
104
  frame = pandas.DataFrame.from_records(records)
138
105
  try:
etlplus/file/pb.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.pb` module.
3
+
4
+ Helpers for reading/writing Protocol Buffer (PB) files.
5
+
6
+ Notes
7
+ -----
8
+ - PB (a.k.a. Protobuff) is a binary serialization format developed by Google
9
+ for structured data.
10
+ - Common cases:
11
+ - Data interchange between services.
12
+ - Efficient storage of structured data.
13
+ - Communication in distributed systems.
14
+ - Rule of thumb:
15
+ - If the file follows the Protocol Buffer specification, use this module
16
+ for reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read PB content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the PB file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the PB file.
54
+ """
55
+ return stub.read(path, format_name='PB')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to PB at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the PB file on disk.
69
+ data : JSONData
70
+ Data to write as PB. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the PB file.
77
+ """
78
+ return stub.write(path, data, format_name='PB')
etlplus/file/pbf.py ADDED
@@ -0,0 +1,77 @@
1
+ """
2
+ :mod:`etlplus.file.pbf` module.
3
+
4
+ Helpers for reading/writing Protocolbuffer Binary Format (PBF) files.
5
+
6
+ Notes
7
+ -----
8
+ - PBF is a binary format used primarily for OpenStreetMap (OSM) data.
9
+ - Common cases:
10
+ - Efficient storage of large OSM datasets.
11
+ - Fast data interchange for mapping applications.
12
+ - Compression of OSM data for reduced file size.
13
+ - Rule of thumb:
14
+ - If the file follows the PBF specification, use this module for reading
15
+ and writing.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from pathlib import Path
21
+
22
+ from ..types import JSONData
23
+ from ..types import JSONList
24
+ from . import stub
25
+
26
+ # SECTION: EXPORTS ========================================================== #
27
+
28
+
29
+ __all__ = [
30
+ 'read',
31
+ 'write',
32
+ ]
33
+
34
+
35
+ # SECTION: FUNCTIONS ======================================================== #
36
+
37
+
38
+ def read(
39
+ path: Path,
40
+ ) -> JSONList:
41
+ """
42
+ Read PBF content from ``path``.
43
+
44
+ Parameters
45
+ ----------
46
+ path : Path
47
+ Path to the PBF file on disk.
48
+
49
+ Returns
50
+ -------
51
+ JSONList
52
+ The list of dictionaries read from the PBF file.
53
+ """
54
+ return stub.read(path, format_name='PBF')
55
+
56
+
57
+ def write(
58
+ path: Path,
59
+ data: JSONData,
60
+ ) -> int:
61
+ """
62
+ Write ``data`` to PBF at ``path`` and return record count.
63
+
64
+ Parameters
65
+ ----------
66
+ path : Path
67
+ Path to the PBF file on disk.
68
+ data : JSONData
69
+ Data to write as PBF. Should be a list of dictionaries or a
70
+ single dictionary.
71
+
72
+ Returns
73
+ -------
74
+ int
75
+ The number of rows written to the PBF file.
76
+ """
77
+ return stub.write(path, data, format_name='PBF')
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.props` module.
3
+
4
+ Helpers for reading/writing properties (PROPS) files.
5
+
6
+ Notes
7
+ -----
8
+ - A “PROPS-formatted” file is a properties file that typically uses
9
+ key-value pairs, often with a simple syntax.
10
+ - Common cases:
11
+ - Java-style properties files with ``key=value`` pairs.
12
+ - INI-style files without sections.
13
+ - Custom formats specific to certain applications.
14
+ - Rule of thumb:
15
+ - If the file follows a standard format like INI, consider using
16
+ dedicated parsers.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read PROPS content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the PROPS file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the PROPS file.
54
+ """
55
+ return stub.read(path, format_name='PROPS')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to PROPS at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the PROPS file on disk.
69
+ data : JSONData
70
+ Data to write as PROPS. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the PROPS file.
77
+ """
78
+ return stub.write(path, data, format_name='PROPS')
etlplus/file/proto.py ADDED
@@ -0,0 +1,77 @@
1
+ """
2
+ :mod:`etlplus.file.proto` module.
3
+
4
+ Helpers for reading/writing Protocol Buffers schema (PROTO) files.
5
+
6
+ Notes
7
+ -----
8
+ - A PROTO file defines the structure of Protocol Buffers messages.
9
+ - Common cases:
10
+ - Defining message formats for data interchange.
11
+ - Generating code for serialization/deserialization.
12
+ - Documenting data structures in distributed systems.
13
+ - Rule of thumb:
14
+ - If the file follows the Protocol Buffers schema specification, use this
15
+ module for reading and writing.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from pathlib import Path
21
+
22
+ from ..types import JSONData
23
+ from ..types import JSONList
24
+ from . import stub
25
+
26
+ # SECTION: EXPORTS ========================================================== #
27
+
28
+
29
+ __all__ = [
30
+ 'read',
31
+ 'write',
32
+ ]
33
+
34
+
35
+ # SECTION: FUNCTIONS ======================================================== #
36
+
37
+
38
+ def read(
39
+ path: Path,
40
+ ) -> JSONList:
41
+ """
42
+ Read PROTO content from ``path``.
43
+
44
+ Parameters
45
+ ----------
46
+ path : Path
47
+ Path to the PROTO file on disk.
48
+
49
+ Returns
50
+ -------
51
+ JSONList
52
+ The list of dictionaries read from the PROTO file.
53
+ """
54
+ return stub.read(path, format_name='PROTO')
55
+
56
+
57
+ def write(
58
+ path: Path,
59
+ data: JSONData,
60
+ ) -> int:
61
+ """
62
+ Write ``data`` to PROTO at ``path`` and return record count.
63
+
64
+ Parameters
65
+ ----------
66
+ path : Path
67
+ Path to the PROTO file on disk.
68
+ data : JSONData
69
+ Data to write as PROTO. Should be a list of dictionaries or a
70
+ single dictionary.
71
+
72
+ Returns
73
+ -------
74
+ int
75
+ The number of rows written to the PROTO file.
76
+ """
77
+ return stub.write(path, data, format_name='PROTO')