etlplus 0.11.11__py3-none-any.whl → 0.12.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
etlplus/file/core.py CHANGED
@@ -25,7 +25,7 @@ from . import xls
25
25
  from . import xlsx
26
26
  from . import xml
27
27
  from . import yaml
28
- from . import zip
28
+ from . import zip as zip_
29
29
  from .enums import FileFormat
30
30
  from .enums import infer_file_format_and_compression
31
31
 
@@ -222,7 +222,7 @@ class File:
222
222
  case FileFormat.YAML:
223
223
  return yaml.read(self.path)
224
224
  case FileFormat.ZIP:
225
- return zip.read(self.path)
225
+ return zip_.read(self.path)
226
226
  raise ValueError(f'Unsupported format: {fmt}')
227
227
 
228
228
  def write(
@@ -283,5 +283,5 @@ class File:
283
283
  case FileFormat.YAML:
284
284
  return yaml.write(self.path, data)
285
285
  case FileFormat.ZIP:
286
- return zip.write(self.path, data)
286
+ return zip_.write(self.path, data)
287
287
  raise ValueError(f'Unsupported format: {fmt}')
etlplus/file/csv.py CHANGED
@@ -1,18 +1,26 @@
1
1
  """
2
2
  :mod:`etlplus.file.csv` module.
3
3
 
4
- CSV read/write helpers.
4
+ Helpers for reading/writing CSV files.
5
5
  """
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- import csv
10
9
  from pathlib import Path
11
- from typing import cast
12
10
 
13
11
  from ..types import JSONData
14
- from ..types import JSONDict
15
12
  from ..types import JSONList
13
+ from ._io import read_delimited
14
+ from ._io import write_delimited
15
+
16
+ # SECTION: EXPORTS ========================================================== #
17
+
18
+
19
+ __all__ = [
20
+ 'read',
21
+ 'write',
22
+ ]
23
+
16
24
 
17
25
  # SECTION: FUNCTIONS ======================================================== #
18
26
 
@@ -21,7 +29,7 @@ def read(
21
29
  path: Path,
22
30
  ) -> JSONList:
23
31
  """
24
- Load CSV content as a list of dictionaries.
32
+ Read CSV content from ``path``.
25
33
 
26
34
  Parameters
27
35
  ----------
@@ -33,14 +41,7 @@ def read(
33
41
  JSONList
34
42
  The list of dictionaries read from the CSV file.
35
43
  """
36
- with path.open('r', encoding='utf-8', newline='') as handle:
37
- reader: csv.DictReader[str] = csv.DictReader(handle)
38
- rows: JSONList = []
39
- for row in reader:
40
- if not any(row.values()):
41
- continue
42
- rows.append(cast(JSONDict, dict(row)))
43
- return rows
44
+ return read_delimited(path, delimiter=',')
44
45
 
45
46
 
46
47
  def write(
@@ -48,7 +49,7 @@ def write(
48
49
  data: JSONData,
49
50
  ) -> int:
50
51
  """
51
- Write CSV rows to ``path`` and return the number of rows.
52
+ Write ``data`` to CSV at ``path`` and return record count.
52
53
 
53
54
  Parameters
54
55
  ----------
@@ -63,20 +64,4 @@ def write(
63
64
  int
64
65
  The number of rows written to the CSV file.
65
66
  """
66
- rows: list[JSONDict]
67
- if isinstance(data, list):
68
- rows = [row for row in data if isinstance(row, dict)]
69
- else:
70
- rows = [data]
71
-
72
- if not rows:
73
- return 0
74
-
75
- fieldnames = sorted({key for row in rows for key in row})
76
- with path.open('w', encoding='utf-8', newline='') as handle:
77
- writer = csv.DictWriter(handle, fieldnames=fieldnames)
78
- writer.writeheader()
79
- for row in rows:
80
- writer.writerow({field: row.get(field) for field in fieldnames})
81
-
82
- return len(rows)
67
+ return write_delimited(path, data, delimiter=',')
etlplus/file/feather.py CHANGED
@@ -1,48 +1,74 @@
1
1
  """
2
2
  :mod:`etlplus.file.feather` module.
3
3
 
4
- Stub helpers for FEATHER read/write.
4
+ Helpers for reading/writing Feather files.
5
5
  """
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
9
  from pathlib import Path
10
+ from typing import cast
10
11
 
11
12
  from ..types import JSONData
13
+ from ..types import JSONList
14
+ from ._io import normalize_records
15
+ from ._pandas import get_pandas
12
16
 
13
17
  # SECTION: EXPORTS ========================================================== #
14
18
 
15
19
 
16
- def read(path: Path) -> JSONData:
20
+ __all__ = [
21
+ 'read',
22
+ 'write',
23
+ ]
24
+
25
+
26
+ # SECTION: FUNCTIONS ======================================================== #
27
+
28
+
29
+ def read(
30
+ path: Path,
31
+ ) -> JSONList:
17
32
  """
18
- Read FEATHER content from ``path``.
33
+ Read Feather content from ``path``.
19
34
 
20
35
  Parameters
21
36
  ----------
22
37
  path : Path
23
- Path to the FEATHER file on disk.
38
+ Path to the Feather file on disk.
24
39
 
25
40
  Returns
26
41
  -------
27
- JSONData
28
- Parsed payload.
42
+ JSONList
43
+ The list of dictionaries read from the Feather file.
29
44
 
30
45
  Raises
31
46
  ------
32
- NotImplementedError
33
- FEATHER :func:`read` is not implemented yet.
47
+ ImportError
48
+ When optional dependency "pyarrow" is missing.
34
49
  """
35
- raise NotImplementedError('FEATHER read is not implemented yet')
50
+ pandas = get_pandas('Feather')
51
+ try:
52
+ frame = pandas.read_feather(path)
53
+ except ImportError as e: # pragma: no cover
54
+ raise ImportError(
55
+ 'Feather support requires optional dependency "pyarrow".\n'
56
+ 'Install with: pip install pyarrow',
57
+ ) from e
58
+ return cast(JSONList, frame.to_dict(orient='records'))
36
59
 
37
60
 
38
- def write(path: Path, data: JSONData) -> int:
61
+ def write(
62
+ path: Path,
63
+ data: JSONData,
64
+ ) -> int:
39
65
  """
40
- Write ``data`` to FEATHER at ``path``.
66
+ Write ``data`` to Feather at ``path`` and return record count.
41
67
 
42
68
  Parameters
43
69
  ----------
44
70
  path : Path
45
- Path to the FEATHER file on disk.
71
+ Path to the Feather file on disk.
46
72
  data : JSONData
47
73
  Data to write.
48
74
 
@@ -53,7 +79,21 @@ def write(path: Path, data: JSONData) -> int:
53
79
 
54
80
  Raises
55
81
  ------
56
- NotImplementedError
57
- FEATHER :func:`write` is not implemented yet.
82
+ ImportError
83
+ When optional dependency "pyarrow" is missing.
58
84
  """
59
- raise NotImplementedError('FEATHER write is not implemented yet')
85
+ records = normalize_records(data, 'Feather')
86
+ if not records:
87
+ return 0
88
+
89
+ pandas = get_pandas('Feather')
90
+ path.parent.mkdir(parents=True, exist_ok=True)
91
+ frame = pandas.DataFrame.from_records(records)
92
+ try:
93
+ frame.to_feather(path)
94
+ except ImportError as e: # pragma: no cover
95
+ raise ImportError(
96
+ 'Feather support requires optional dependency "pyarrow".\n'
97
+ 'Install with: pip install pyarrow',
98
+ ) from e
99
+ return len(records)
etlplus/file/gz.py CHANGED
@@ -1,21 +1,37 @@
1
1
  """
2
2
  :mod:`etlplus.file.gz` module.
3
3
 
4
- Stub helpers for GZ read/write.
4
+ Helpers for reading/writing GZ files.
5
5
  """
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
+ import gzip
10
+ import tempfile
9
11
  from pathlib import Path
10
12
 
11
13
  from ..types import JSONData
14
+ from .enums import CompressionFormat
15
+ from .enums import FileFormat
16
+ from .enums import infer_file_format_and_compression
12
17
 
13
18
  # SECTION: EXPORTS ========================================================== #
14
19
 
15
20
 
16
- def read(path: Path) -> JSONData:
21
+ __all__ = [
22
+ 'read',
23
+ 'write',
24
+ ]
25
+
26
+
27
+ # SECTION: INTERNAL FUNCTIONS =============================================== #
28
+
29
+
30
+ def _resolve_format(
31
+ path: Path,
32
+ ) -> FileFormat:
17
33
  """
18
- Read GZ content from ``path``.
34
+ Resolve the inner file format from a .gz filename.
19
35
 
20
36
  Parameters
21
37
  ----------
@@ -24,20 +40,61 @@ def read(path: Path) -> JSONData:
24
40
 
25
41
  Returns
26
42
  -------
27
- JSONData
28
- Parsed payload.
43
+ FileFormat
44
+ The inferred inner file format.
29
45
 
30
46
  Raises
31
47
  ------
32
- NotImplementedError
33
- GZ :func:`read` is not implemented yet.
48
+ ValueError
49
+ If the file format cannot be inferred from the filename.
50
+ """
51
+ fmt, compression = infer_file_format_and_compression(path)
52
+ if compression is not CompressionFormat.GZ:
53
+ raise ValueError(f'Not a gzip file: {path}')
54
+ if fmt is None:
55
+ raise ValueError(
56
+ f'Cannot infer file format from compressed file {path!r}',
57
+ )
58
+ return fmt
59
+
60
+
61
+ # SECTION: FUNCTIONS ======================================================== #
62
+
63
+
64
+ def read(
65
+ path: Path,
66
+ ) -> JSONData:
67
+ """
68
+ Read GZ content from ``path`` and parse the inner payload.
69
+
70
+ Parameters
71
+ ----------
72
+ path : Path
73
+ Path to the GZ file on disk.
74
+
75
+ Returns
76
+ -------
77
+ JSONData
78
+ Parsed payload.
34
79
  """
35
- raise NotImplementedError('GZ read is not implemented yet')
80
+ fmt = _resolve_format(path)
81
+ with gzip.open(path, 'rb') as handle:
82
+ payload = handle.read()
83
+
84
+ with tempfile.TemporaryDirectory() as tmpdir:
85
+ tmp_path = Path(tmpdir) / f'payload.{fmt.value}'
86
+ tmp_path.write_bytes(payload)
87
+ from .core import File
36
88
 
89
+ return File(tmp_path, fmt).read()
37
90
 
38
- def write(path: Path, data: JSONData) -> int:
91
+
92
+ def write(
93
+ path: Path,
94
+ data: JSONData,
95
+ ) -> int:
39
96
  """
40
- Write ``data`` to GZ at ``path``.
97
+ Write ``data`` to GZ at ``path`` and return record count.
41
98
 
42
99
  Parameters
43
100
  ----------
@@ -50,10 +107,17 @@ def write(path: Path, data: JSONData) -> int:
50
107
  -------
51
108
  int
52
109
  Number of records written.
53
-
54
- Raises
55
- ------
56
- NotImplementedError
57
- GZ :func:`write` is not implemented yet.
58
110
  """
59
- raise NotImplementedError('GZ write is not implemented yet')
111
+ fmt = _resolve_format(path)
112
+ with tempfile.TemporaryDirectory() as tmpdir:
113
+ tmp_path = Path(tmpdir) / f'payload.{fmt.value}'
114
+ from .core import File
115
+
116
+ count = File(tmp_path, fmt).write(data)
117
+ payload = tmp_path.read_bytes()
118
+
119
+ path.parent.mkdir(parents=True, exist_ok=True)
120
+ with gzip.open(path, 'wb') as handle:
121
+ handle.write(payload)
122
+
123
+ return count
etlplus/file/json.py CHANGED
@@ -1,7 +1,7 @@
1
1
  """
2
2
  :mod:`etlplus.file.json` module.
3
3
 
4
- JSON read/write helpers.
4
+ Helpers for reading/writing JSON files.
5
5
  """
6
6
 
7
7
  from __future__ import annotations
@@ -15,6 +15,15 @@ from ..types import JSONDict
15
15
  from ..types import JSONList
16
16
  from ..utils import count_records
17
17
 
18
+ # SECTION: EXPORTS ========================================================== #
19
+
20
+
21
+ __all__ = [
22
+ 'read',
23
+ 'write',
24
+ ]
25
+
26
+
18
27
  # SECTION: FUNCTIONS ======================================================== #
19
28
 
20
29
 
@@ -22,7 +31,9 @@ def read(
22
31
  path: Path,
23
32
  ) -> JSONData:
24
33
  """
25
- Load and validate JSON payloads from ``path``.
34
+ Read JSON content from ``path``.
35
+
36
+ Validates that the JSON root is a dict or a list of dicts.
26
37
 
27
38
  Parameters
28
39
  ----------
etlplus/file/ndjson.py CHANGED
@@ -1,19 +1,35 @@
1
1
  """
2
2
  :mod:`etlplus.file.ndjson` module.
3
3
 
4
- Stub helpers for NDJSON read/write.
4
+ Helpers for reading/writing NDJSON files.
5
5
  """
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
+ import json
9
10
  from pathlib import Path
11
+ from typing import cast
10
12
 
11
13
  from ..types import JSONData
14
+ from ..types import JSONDict
15
+ from ..types import JSONList
16
+ from ..utils import count_records
12
17
 
13
18
  # SECTION: EXPORTS ========================================================== #
14
19
 
15
20
 
16
- def read(path: Path) -> JSONData:
21
+ __all__ = [
22
+ 'read',
23
+ 'write',
24
+ ]
25
+
26
+
27
+ # SECTION: FUNCTIONS ======================================================== #
28
+
29
+
30
+ def read(
31
+ path: Path,
32
+ ) -> JSONList:
17
33
  """
18
34
  Read NDJSON content from ``path``.
19
35
 
@@ -24,18 +40,33 @@ def read(path: Path) -> JSONData:
24
40
 
25
41
  Returns
26
42
  -------
27
- JSONData
28
- Parsed payload.
43
+ JSONList
44
+ The list of dictionaries read from the NDJSON file.
29
45
 
30
46
  Raises
31
47
  ------
32
- NotImplementedError
33
- NDJSON :func:`read` is not implemented yet.
48
+ TypeError
49
+ If any line in the NDJSON file is not a JSON object (dict).
34
50
  """
35
- raise NotImplementedError('NDJSON read is not implemented yet')
51
+ rows: JSONList = []
52
+ with path.open('r', encoding='utf-8') as handle:
53
+ for idx, line in enumerate(handle, start=1):
54
+ text = line.strip()
55
+ if not text:
56
+ continue
57
+ payload = json.loads(text)
58
+ if not isinstance(payload, dict):
59
+ raise TypeError(
60
+ f'NDJSON lines must be objects (dicts) (line {idx})',
61
+ )
62
+ rows.append(cast(JSONDict, payload))
63
+ return rows
36
64
 
37
65
 
38
- def write(path: Path, data: JSONData) -> int:
66
+ def write(
67
+ path: Path,
68
+ data: JSONData,
69
+ ) -> int:
39
70
  """
40
71
  Write ``data`` to NDJSON at ``path``.
41
72
 
@@ -53,7 +84,26 @@ def write(path: Path, data: JSONData) -> int:
53
84
 
54
85
  Raises
55
86
  ------
56
- NotImplementedError
57
- NDJSON :func:`write` is not implemented yet.
87
+ TypeError
88
+ If ``data`` is a list containing non-dict items.
58
89
  """
59
- raise NotImplementedError('NDJSON write is not implemented yet')
90
+ rows: JSONList
91
+ if isinstance(data, list):
92
+ if not all(isinstance(item, dict) for item in data):
93
+ raise TypeError(
94
+ 'NDJSON payloads must contain only objects (dicts)',
95
+ )
96
+ rows = cast(JSONList, data)
97
+ else:
98
+ rows = [cast(JSONDict, data)]
99
+
100
+ if not rows:
101
+ return 0
102
+
103
+ path.parent.mkdir(parents=True, exist_ok=True)
104
+ with path.open('w', encoding='utf-8') as handle:
105
+ for row in rows:
106
+ handle.write(json.dumps(row, ensure_ascii=False))
107
+ handle.write('\n')
108
+
109
+ return count_records(rows)
etlplus/file/orc.py CHANGED
@@ -1,19 +1,34 @@
1
1
  """
2
2
  :mod:`etlplus.file.orc` module.
3
3
 
4
- Stub helpers for ORC read/write.
4
+ Helpers for reading/writing ORC files.
5
5
  """
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
9
  from pathlib import Path
10
+ from typing import cast
10
11
 
11
12
  from ..types import JSONData
13
+ from ..types import JSONList
14
+ from ._io import normalize_records
15
+ from ._pandas import get_pandas
12
16
 
13
17
  # SECTION: EXPORTS ========================================================== #
14
18
 
15
19
 
16
- def read(path: Path) -> JSONData:
20
+ __all__ = [
21
+ 'read',
22
+ 'write',
23
+ ]
24
+
25
+
26
+ # SECTION: FUNCTIONS ======================================================== #
27
+
28
+
29
+ def read(
30
+ path: Path,
31
+ ) -> JSONList:
17
32
  """
18
33
  Read ORC content from ``path``.
19
34
 
@@ -24,20 +39,31 @@ def read(path: Path) -> JSONData:
24
39
 
25
40
  Returns
26
41
  -------
27
- JSONData
28
- Parsed payload.
42
+ JSONList
43
+ The list of dictionaries read from the ORC file.
29
44
 
30
45
  Raises
31
46
  ------
32
- NotImplementedError
33
- ORC :func:`read` is not implemented yet.
47
+ ImportError
48
+ When optional dependency "pyarrow" is missing.
34
49
  """
35
- raise NotImplementedError('ORC read is not implemented yet')
50
+ pandas = get_pandas('ORC')
51
+ try:
52
+ frame = pandas.read_orc(path)
53
+ except ImportError as e: # pragma: no cover
54
+ raise ImportError(
55
+ 'ORC support requires optional dependency "pyarrow".\n'
56
+ 'Install with: pip install pyarrow',
57
+ ) from e
58
+ return cast(JSONList, frame.to_dict(orient='records'))
36
59
 
37
60
 
38
- def write(path: Path, data: JSONData) -> int:
61
+ def write(
62
+ path: Path,
63
+ data: JSONData,
64
+ ) -> int:
39
65
  """
40
- Write ``data`` to ORC at ``path``.
66
+ Write ``data`` to ORC at ``path`` and return record count.
41
67
 
42
68
  Parameters
43
69
  ----------
@@ -53,7 +79,21 @@ def write(path: Path, data: JSONData) -> int:
53
79
 
54
80
  Raises
55
81
  ------
56
- NotImplementedError
57
- ORC :func:`write` is not implemented yet.
82
+ ImportError
83
+ When optional dependency "pyarrow" is missing.
58
84
  """
59
- raise NotImplementedError('ORC write is not implemented yet')
85
+ records = normalize_records(data, 'ORC')
86
+ if not records:
87
+ return 0
88
+
89
+ pandas = get_pandas('ORC')
90
+ path.parent.mkdir(parents=True, exist_ok=True)
91
+ frame = pandas.DataFrame.from_records(records)
92
+ try:
93
+ frame.to_orc(path, index=False)
94
+ except ImportError as e: # pragma: no cover
95
+ raise ImportError(
96
+ 'ORC support requires optional dependency "pyarrow".\n'
97
+ 'Install with: pip install pyarrow',
98
+ ) from e
99
+ return len(records)