etlplus 0.12.1__py3-none-any.whl → 0.12.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. etlplus/file/_imports.py +141 -0
  2. etlplus/file/_io.py +121 -0
  3. etlplus/file/accdb.py +78 -0
  4. etlplus/file/arrow.py +78 -0
  5. etlplus/file/avro.py +46 -68
  6. etlplus/file/bson.py +77 -0
  7. etlplus/file/cbor.py +78 -0
  8. etlplus/file/cfg.py +79 -0
  9. etlplus/file/conf.py +80 -0
  10. etlplus/file/core.py +119 -84
  11. etlplus/file/csv.py +17 -29
  12. etlplus/file/dat.py +78 -0
  13. etlplus/file/duckdb.py +78 -0
  14. etlplus/file/enums.py +114 -15
  15. etlplus/file/feather.py +18 -51
  16. etlplus/file/fwf.py +77 -0
  17. etlplus/file/ini.py +79 -0
  18. etlplus/file/ion.py +78 -0
  19. etlplus/file/json.py +13 -1
  20. etlplus/file/log.py +78 -0
  21. etlplus/file/mdb.py +78 -0
  22. etlplus/file/msgpack.py +78 -0
  23. etlplus/file/ndjson.py +14 -15
  24. etlplus/file/orc.py +18 -49
  25. etlplus/file/parquet.py +18 -51
  26. etlplus/file/pb.py +78 -0
  27. etlplus/file/pbf.py +77 -0
  28. etlplus/file/properties.py +78 -0
  29. etlplus/file/proto.py +77 -0
  30. etlplus/file/psv.py +79 -0
  31. etlplus/file/sqlite.py +78 -0
  32. etlplus/file/stub.py +84 -0
  33. etlplus/file/tab.py +81 -0
  34. etlplus/file/toml.py +78 -0
  35. etlplus/file/tsv.py +18 -29
  36. etlplus/file/txt.py +13 -10
  37. etlplus/file/xls.py +4 -48
  38. etlplus/file/xlsx.py +5 -48
  39. etlplus/file/xml.py +12 -1
  40. etlplus/file/yaml.py +15 -44
  41. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/METADATA +119 -1
  42. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/RECORD +46 -21
  43. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/WHEEL +0 -0
  44. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/entry_points.txt +0 -0
  45. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/licenses/LICENSE +0 -0
  46. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,141 @@
1
+ """
2
+ :mod:`etlplus.file._imports` module.
3
+
4
+ Shared helpers for optional dependency imports.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from importlib import import_module
10
+ from typing import Any
11
+
12
+ # SECTION: INTERNAL CONSTANTS =============================================== #
13
+
14
+
15
+ _MODULE_CACHE: dict[str, Any] = {}
16
+
17
+
18
+ # SECTION: INTERNAL FUNCTIONS =============================================== #
19
+
20
+
21
+ def _error_message(
22
+ module_name: str,
23
+ format_name: str,
24
+ ) -> str:
25
+ """
26
+ Build an import error message for an optional dependency.
27
+
28
+ Parameters
29
+ ----------
30
+ module_name : str
31
+ Module name to look up.
32
+ format_name : str
33
+ Human-readable format name for templated messages.
34
+
35
+ Returns
36
+ -------
37
+ str
38
+ Formatted error message.
39
+ """
40
+ return (
41
+ f'{format_name} support requires '
42
+ f'optional dependency "{module_name}".\n'
43
+ f'Install with: pip install {module_name}'
44
+ )
45
+
46
+
47
+ # SECTION: FUNCTIONS ======================================================== #
48
+
49
+
50
+ def get_optional_module(
51
+ module_name: str,
52
+ *,
53
+ error_message: str,
54
+ ) -> Any:
55
+ """
56
+ Return an optional dependency module, caching on first import.
57
+
58
+ Parameters
59
+ ----------
60
+ module_name : str
61
+ Name of the module to import.
62
+ error_message : str
63
+ Error message to surface when the module is missing.
64
+
65
+ Returns
66
+ -------
67
+ Any
68
+ The imported module.
69
+
70
+ Raises
71
+ ------
72
+ ImportError
73
+ If the optional dependency is missing.
74
+ """
75
+ cached = _MODULE_CACHE.get(module_name)
76
+ if cached is not None: # pragma: no cover - tiny branch
77
+ return cached
78
+ try:
79
+ module = import_module(module_name)
80
+ except ImportError as e: # pragma: no cover
81
+ raise ImportError(error_message) from e
82
+ _MODULE_CACHE[module_name] = module
83
+ return module
84
+
85
+
86
+ def get_fastavro() -> Any:
87
+ """
88
+ Return the fastavro module, importing it on first use.
89
+
90
+ Raises an informative ImportError if the optional dependency is missing.
91
+
92
+ Notes
93
+ -----
94
+ Prefer :func:`get_optional_module` for new call sites.
95
+ """
96
+ return get_optional_module(
97
+ 'fastavro',
98
+ error_message=_error_message('fastavro', format_name='AVRO'),
99
+ )
100
+
101
+
102
+ def get_pandas(
103
+ format_name: str,
104
+ ) -> Any:
105
+ """
106
+ Return the pandas module, importing it on first use.
107
+
108
+ Parameters
109
+ ----------
110
+ format_name : str
111
+ Human-readable format name for error messages.
112
+
113
+ Returns
114
+ -------
115
+ Any
116
+ The pandas module.
117
+
118
+ Notes
119
+ -----
120
+ Prefer :func:`get_optional_module` for new call sites.
121
+ """
122
+ return get_optional_module(
123
+ 'pandas',
124
+ error_message=_error_message('pandas', format_name=format_name),
125
+ )
126
+
127
+
128
+ def get_yaml() -> Any:
129
+ """
130
+ Return the PyYAML module, importing it on first use.
131
+
132
+ Raises an informative ImportError if the optional dependency is missing.
133
+
134
+ Notes
135
+ -----
136
+ Prefer :func:`get_optional_module` for new call sites.
137
+ """
138
+ return get_optional_module(
139
+ 'yaml',
140
+ error_message=_error_message('PyYAML', format_name='YAML'),
141
+ )
etlplus/file/_io.py ADDED
@@ -0,0 +1,121 @@
1
+ """
2
+ :mod:`etlplus.file._io` module.
3
+
4
+ Shared helpers for record normalization and delimited text formats.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import csv
10
+ from pathlib import Path
11
+ from typing import cast
12
+
13
+ from ..types import JSONData
14
+ from ..types import JSONDict
15
+ from ..types import JSONList
16
+
17
+ # SECTION: FUNCTIONS ======================================================== #
18
+
19
+
20
+ def normalize_records(
21
+ data: JSONData,
22
+ format_name: str,
23
+ ) -> JSONList:
24
+ """
25
+ Normalize payloads into a list of dictionaries.
26
+
27
+ Parameters
28
+ ----------
29
+ data : JSONData
30
+ Input payload to normalize.
31
+ format_name : str
32
+ Human-readable format name for error messages.
33
+
34
+ Returns
35
+ -------
36
+ JSONList
37
+ Normalized list of dictionaries.
38
+
39
+ Raises
40
+ ------
41
+ TypeError
42
+ If a list payload contains non-dict items.
43
+ """
44
+ if isinstance(data, list):
45
+ if not all(isinstance(item, dict) for item in data):
46
+ raise TypeError(
47
+ f'{format_name} payloads must contain only objects (dicts)',
48
+ )
49
+ return cast(JSONList, data)
50
+ return [cast(JSONDict, data)]
51
+
52
+
53
+ def read_delimited(path: Path, *, delimiter: str) -> JSONList:
54
+ """
55
+ Read delimited content from ``path``.
56
+
57
+ Parameters
58
+ ----------
59
+ path : Path
60
+ Path to the delimited file on disk.
61
+ delimiter : str
62
+ Delimiter character for parsing.
63
+
64
+ Returns
65
+ -------
66
+ JSONList
67
+ The list of dictionaries read from the delimited file.
68
+ """
69
+ with path.open('r', encoding='utf-8', newline='') as handle:
70
+ reader: csv.DictReader[str] = csv.DictReader(
71
+ handle,
72
+ delimiter=delimiter,
73
+ )
74
+ rows: JSONList = []
75
+ for row in reader:
76
+ if not any(row.values()):
77
+ continue
78
+ rows.append(cast(JSONDict, dict(row)))
79
+ return rows
80
+
81
+
82
+ def write_delimited(path: Path, data: JSONData, *, delimiter: str) -> int:
83
+ """
84
+ Write ``data`` to a delimited file and return record count.
85
+
86
+ Parameters
87
+ ----------
88
+ path : Path
89
+ Path to the delimited file on disk.
90
+ data : JSONData
91
+ Data to write as delimited rows.
92
+ delimiter : str
93
+ Delimiter character for writing.
94
+
95
+ Returns
96
+ -------
97
+ int
98
+ The number of rows written.
99
+ """
100
+ rows: list[JSONDict]
101
+ if isinstance(data, list):
102
+ rows = [row for row in data if isinstance(row, dict)]
103
+ else:
104
+ rows = [data]
105
+
106
+ if not rows:
107
+ return 0
108
+
109
+ fieldnames = sorted({key for row in rows for key in row})
110
+ path.parent.mkdir(parents=True, exist_ok=True)
111
+ with path.open('w', encoding='utf-8', newline='') as handle:
112
+ writer = csv.DictWriter(
113
+ handle,
114
+ fieldnames=fieldnames,
115
+ delimiter=delimiter,
116
+ )
117
+ writer.writeheader()
118
+ for row in rows:
119
+ writer.writerow({field: row.get(field) for field in fieldnames})
120
+
121
+ return len(rows)
etlplus/file/accdb.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.accdb` module.
3
+
4
+ Helpers for reading/writing newer Microsoft Access database (ACCDB) files.
5
+
6
+ Notes
7
+ -----
8
+ - An ACCDB file is a proprietary database file format used by Microsoft Access
9
+ 2007 and later.
10
+ - Common cases:
11
+ - Storing relational data for small to medium-sized applications.
12
+ - Desktop database applications.
13
+ - Data management for non-enterprise solutions.
14
+ - Rule of thumb:
15
+ - If the file follows the ACCDB specification, use this module for reading
16
+ and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read ACCDB content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the ACCDB file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the ACCDB file.
54
+ """
55
+ return stub.read(path, format_name='ACCDB')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to ACCDB at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the ACCDB file on disk.
69
+ data : JSONData
70
+ Data to write as ACCDB. Should be a list of dictionaries or a single
71
+ dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the ACCDB file.
77
+ """
78
+ return stub.write(path, data, format_name='ACCDB')
etlplus/file/arrow.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.arrow` module.
3
+
4
+ Helpers for reading/writing Apache Arrow (ARROW) files.
5
+
6
+ Notes
7
+ -----
8
+ - An ARROW file is a binary file format designed for efficient
9
+ columnar data storage and processing.
10
+ - Common cases:
11
+ - High-performance data analytics.
12
+ - Interoperability between different data processing systems.
13
+ - In-memory data representation for fast computations.
14
+ - Rule of thumb:
15
+ - If the file follows the Apache Arrow specification, use this module for
16
+ reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read ARROW content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the Apache Arrow file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the Apache Arrow file.
54
+ """
55
+ return stub.read(path, format_name='ARROW')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to ARROW at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the ARROW file on disk.
69
+ data : JSONData
70
+ Data to write as ARROW. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the ARROW file.
77
+ """
78
+ return stub.write(path, data, format_name='ARROW')
etlplus/file/avro.py CHANGED
@@ -1,7 +1,19 @@
1
1
  """
2
2
  :mod:`etlplus.file.avro` module.
3
3
 
4
- Helpers for reading/writing Avro files.
4
+ Helpers for reading/writing Apache Avro (AVRO) files.
5
+
6
+ Notes
7
+ -----
8
+ - An AVRO file is a binary file format designed for efficient
9
+ on-disk storage of data, with a schema definition.
10
+ - Common cases:
11
+ - Data serialization for distributed systems.
12
+ - Interoperability between different programming languages.
13
+ - Storage of large datasets with schema evolution support.
14
+ - Rule of thumb:
15
+ - If the file follows the Apache Avro specification, use this module for
16
+ reading and writing.
5
17
  """
6
18
 
7
19
  from __future__ import annotations
@@ -10,9 +22,12 @@ from pathlib import Path
10
22
  from typing import Any
11
23
  from typing import cast
12
24
 
25
+ from etlplus.file._imports import get_fastavro
26
+
13
27
  from ..types import JSONData
14
28
  from ..types import JSONDict
15
29
  from ..types import JSONList
30
+ from ._io import normalize_records
16
31
 
17
32
  # SECTION: EXPORTS ========================================================== #
18
33
 
@@ -26,9 +41,6 @@ __all__ = [
26
41
  # SECTION: INTERNAL CONSTANTS =============================================== #
27
42
 
28
43
 
29
- _FASTAVRO_CACHE: dict[str, Any] = {}
30
-
31
-
32
44
  _PRIMITIVE_TYPES: tuple[type, ...] = (
33
45
  bool,
34
46
  int,
@@ -42,38 +54,37 @@ _PRIMITIVE_TYPES: tuple[type, ...] = (
42
54
  # SECTION: INTERNAL FUNCTIONS =============================================== #
43
55
 
44
56
 
45
- def _get_fastavro() -> Any:
57
+ def _infer_schema(records: JSONList) -> dict[str, Any]:
46
58
  """
47
- Return the fastavro module, importing it on first use.
59
+ Infer a basic Avro schema from record payloads.
48
60
 
49
- Raises an informative ImportError if the optional dependency is missing.
50
- """
51
- mod = _FASTAVRO_CACHE.get('mod')
52
- if mod is not None: # pragma: no cover - tiny branch
53
- return mod
54
- try:
55
- _fastavro = __import__('fastavro') # type: ignore[assignment]
56
- except ImportError as e: # pragma: no cover
57
- raise ImportError(
58
- 'AVRO support requires optional dependency "fastavro".\n'
59
- 'Install with: pip install fastavro',
60
- ) from e
61
- _FASTAVRO_CACHE['mod'] = _fastavro
62
-
63
- return _fastavro
64
-
65
-
66
- def _normalize_records(data: JSONData) -> JSONList:
61
+ Only primitive field values are supported; complex values raise TypeError.
67
62
  """
68
- Normalize JSON payloads into a list of dictionaries.
63
+ field_names = sorted({key for record in records for key in record})
64
+ fields: list[dict[str, Any]] = []
65
+ for name in field_names:
66
+ types: list[str] = []
67
+ for record in records:
68
+ value = record.get(name)
69
+ if value is None:
70
+ types.append('null')
71
+ continue
72
+ if isinstance(value, dict | list):
73
+ raise TypeError(
74
+ 'AVRO payloads must contain only primitive values',
75
+ )
76
+ if not isinstance(value, _PRIMITIVE_TYPES):
77
+ raise TypeError(
78
+ 'AVRO payloads must contain only primitive values',
79
+ )
80
+ types.append(cast(str, _infer_value_type(value)))
81
+ fields.append({'name': name, 'type': _merge_types(types)})
69
82
 
70
- Raises TypeError when payloads contain non-dict items.
71
- """
72
- if isinstance(data, list):
73
- if not all(isinstance(item, dict) for item in data):
74
- raise TypeError('AVRO payloads must contain only objects (dicts)')
75
- return cast(JSONList, data)
76
- return [cast(JSONDict, data)]
83
+ return {
84
+ 'name': 'etlplus_record',
85
+ 'type': 'record',
86
+ 'fields': fields,
87
+ }
77
88
 
78
89
 
79
90
  def _infer_value_type(value: object) -> str | list[str]:
@@ -106,39 +117,6 @@ def _merge_types(types: list[str]) -> str | list[str]:
106
117
  return ordered
107
118
 
108
119
 
109
- def _infer_schema(records: JSONList) -> dict[str, Any]:
110
- """
111
- Infer a basic Avro schema from record payloads.
112
-
113
- Only primitive field values are supported; complex values raise TypeError.
114
- """
115
- field_names = sorted({key for record in records for key in record})
116
- fields: list[dict[str, Any]] = []
117
- for name in field_names:
118
- types: list[str] = []
119
- for record in records:
120
- value = record.get(name)
121
- if value is None:
122
- types.append('null')
123
- continue
124
- if isinstance(value, dict | list):
125
- raise TypeError(
126
- 'AVRO payloads must contain only primitive values',
127
- )
128
- if not isinstance(value, _PRIMITIVE_TYPES):
129
- raise TypeError(
130
- 'AVRO payloads must contain only primitive values',
131
- )
132
- types.append(cast(str, _infer_value_type(value)))
133
- fields.append({'name': name, 'type': _merge_types(types)})
134
-
135
- return {
136
- 'name': 'etlplus_record',
137
- 'type': 'record',
138
- 'fields': fields,
139
- }
140
-
141
-
142
120
  # SECTION: FUNCTIONS ======================================================== #
143
121
 
144
122
 
@@ -158,7 +136,7 @@ def read(
158
136
  JSONList
159
137
  The list of dictionaries read from the AVRO file.
160
138
  """
161
- fastavro = _get_fastavro()
139
+ fastavro = get_fastavro()
162
140
  with path.open('rb') as handle:
163
141
  reader = fastavro.reader(handle)
164
142
  return [cast(JSONDict, record) for record in reader]
@@ -183,11 +161,11 @@ def write(
183
161
  int
184
162
  Number of records written.
185
163
  """
186
- records = _normalize_records(data)
164
+ records = normalize_records(data, 'AVRO')
187
165
  if not records:
188
166
  return 0
189
167
 
190
- fastavro = _get_fastavro()
168
+ fastavro = get_fastavro()
191
169
  schema = _infer_schema(records)
192
170
  parsed_schema = fastavro.parse_schema(schema)
193
171
 
etlplus/file/bson.py ADDED
@@ -0,0 +1,77 @@
1
+ """
2
+ :mod:`etlplus.file.bson` module.
3
+
4
+ Helpers for reading/writing Binary JSON (BSON) files.
5
+
6
+ Notes
7
+ -----
8
+ - A BSON file is a binary-encoded serialization of JSON-like documents.
9
+ - Common cases:
10
+ - Data storage in MongoDB.
11
+ - Efficient data interchange between systems.
12
+ - Handling of complex data types not supported in standard JSON.
13
+ - Rule of thumb:
14
+ - If the file follows the BSON specification, use this module for reading
15
+ and writing.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from pathlib import Path
21
+
22
+ from ..types import JSONData
23
+ from ..types import JSONList
24
+ from . import stub
25
+
26
+ # SECTION: EXPORTS ========================================================== #
27
+
28
+
29
+ __all__ = [
30
+ 'read',
31
+ 'write',
32
+ ]
33
+
34
+
35
+ # SECTION: FUNCTIONS ======================================================== #
36
+
37
+
38
+ def read(
39
+ path: Path,
40
+ ) -> JSONList:
41
+ """
42
+ Read BSON content from ``path``.
43
+
44
+ Parameters
45
+ ----------
46
+ path : Path
47
+ Path to the BSON file on disk.
48
+
49
+ Returns
50
+ -------
51
+ JSONList
52
+ The list of dictionaries read from the BSON file.
53
+ """
54
+ return stub.read(path, format_name='BSON')
55
+
56
+
57
+ def write(
58
+ path: Path,
59
+ data: JSONData,
60
+ ) -> int:
61
+ """
62
+ Write ``data`` to BSON at ``path`` and return record count.
63
+
64
+ Parameters
65
+ ----------
66
+ path : Path
67
+ Path to the BSON file on disk.
68
+ data : JSONData
69
+ Data to write as BSON. Should be a list of dictionaries or a
70
+ single dictionary.
71
+
72
+ Returns
73
+ -------
74
+ int
75
+ The number of rows written to the BSON file.
76
+ """
77
+ return stub.write(path, data, format_name='BSON')