etlplus 0.10.4__py3-none-any.whl → 0.11.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- etlplus/cli/commands.py +1 -1
- etlplus/cli/constants.py +1 -1
- etlplus/cli/io.py +2 -2
- etlplus/config/pipeline.py +2 -2
- etlplus/database/ddl.py +1 -1
- etlplus/enums.py +2 -270
- etlplus/extract.py +5 -7
- etlplus/file/__init__.py +25 -0
- etlplus/file/core.py +287 -0
- etlplus/file/csv.py +82 -0
- etlplus/file/enums.py +238 -0
- etlplus/file/json.py +87 -0
- etlplus/file/xml.py +165 -0
- etlplus/file/yaml.py +125 -0
- etlplus/load.py +9 -12
- etlplus/run.py +6 -9
- {etlplus-0.10.4.dist-info → etlplus-0.11.5.dist-info}/METADATA +1 -1
- {etlplus-0.10.4.dist-info → etlplus-0.11.5.dist-info}/RECORD +22 -16
- etlplus/file.py +0 -652
- {etlplus-0.10.4.dist-info → etlplus-0.11.5.dist-info}/WHEEL +0 -0
- {etlplus-0.10.4.dist-info → etlplus-0.11.5.dist-info}/entry_points.txt +0 -0
- {etlplus-0.10.4.dist-info → etlplus-0.11.5.dist-info}/licenses/LICENSE +0 -0
- {etlplus-0.10.4.dist-info → etlplus-0.11.5.dist-info}/top_level.txt +0 -0
etlplus/file/core.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.core` module.
|
|
3
|
+
|
|
4
|
+
Shared helpers for reading and writing structured and semi-structured data
|
|
5
|
+
files.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
from ..types import JSONData
|
|
14
|
+
from ..types import StrPath
|
|
15
|
+
from . import csv
|
|
16
|
+
from . import json
|
|
17
|
+
from . import xml
|
|
18
|
+
from . import yaml
|
|
19
|
+
from .enums import FileFormat
|
|
20
|
+
from .enums import infer_file_format_and_compression
|
|
21
|
+
|
|
22
|
+
# SECTION: EXPORTS ========================================================== #
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
__all__ = ['File']
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# SECTION: CLASSES ========================================================== #
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass(slots=True)
|
|
32
|
+
class File:
|
|
33
|
+
"""
|
|
34
|
+
Convenience wrapper around structured file IO.
|
|
35
|
+
|
|
36
|
+
This class encapsulates the one-off helpers in this module as convenient
|
|
37
|
+
instance methods while retaining the original function API for
|
|
38
|
+
backward compatibility (those functions delegate to this class).
|
|
39
|
+
|
|
40
|
+
Attributes
|
|
41
|
+
----------
|
|
42
|
+
path : Path
|
|
43
|
+
Path to the file on disk.
|
|
44
|
+
file_format : FileFormat | None, optional
|
|
45
|
+
Explicit format. If omitted, the format is inferred from the file
|
|
46
|
+
extension (``.csv``, ``.json``, or ``.xml``).
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
# -- Attributes -- #
|
|
50
|
+
|
|
51
|
+
path: Path
|
|
52
|
+
file_format: FileFormat | None = None
|
|
53
|
+
|
|
54
|
+
# -- Magic Methods (Object Lifecycle) -- #
|
|
55
|
+
|
|
56
|
+
def __post_init__(self) -> None:
|
|
57
|
+
"""
|
|
58
|
+
Auto-detect and set the file format on initialization.
|
|
59
|
+
|
|
60
|
+
If no explicit ``file_format`` is provided, attempt to infer it from
|
|
61
|
+
the file path's extension and update :attr:`file_format`. If the
|
|
62
|
+
extension is unknown, the attribute is left as ``None`` and will be
|
|
63
|
+
validated later by :meth:`_ensure_format`.
|
|
64
|
+
"""
|
|
65
|
+
# Normalize incoming path (allow str in constructor) to Path.
|
|
66
|
+
if isinstance(self.path, str):
|
|
67
|
+
self.path = Path(self.path)
|
|
68
|
+
|
|
69
|
+
if self.file_format is None:
|
|
70
|
+
try:
|
|
71
|
+
self.file_format = self._guess_format()
|
|
72
|
+
except ValueError:
|
|
73
|
+
# Leave as None; _ensure_format() will raise on use if needed.
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
# -- Internal Instance Methods -- #
|
|
77
|
+
|
|
78
|
+
def _assert_exists(self) -> None:
|
|
79
|
+
"""
|
|
80
|
+
Raise FileNotFoundError if :attr:`path` does not exist.
|
|
81
|
+
|
|
82
|
+
This centralizes existence checks across multiple read methods.
|
|
83
|
+
"""
|
|
84
|
+
if not self.path.exists():
|
|
85
|
+
raise FileNotFoundError(f'File not found: {self.path}')
|
|
86
|
+
|
|
87
|
+
def _ensure_format(self) -> FileFormat:
|
|
88
|
+
"""
|
|
89
|
+
Resolve the active format, guessing from extension if needed.
|
|
90
|
+
|
|
91
|
+
Returns
|
|
92
|
+
-------
|
|
93
|
+
FileFormat
|
|
94
|
+
The resolved file format.
|
|
95
|
+
"""
|
|
96
|
+
return (
|
|
97
|
+
self.file_format
|
|
98
|
+
if self.file_format is not None
|
|
99
|
+
else self._guess_format()
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
def _guess_format(self) -> FileFormat:
|
|
103
|
+
"""
|
|
104
|
+
Infer the file format from the filename extension.
|
|
105
|
+
|
|
106
|
+
Returns
|
|
107
|
+
-------
|
|
108
|
+
FileFormat
|
|
109
|
+
The inferred file format based on the file extension.
|
|
110
|
+
|
|
111
|
+
Raises
|
|
112
|
+
------
|
|
113
|
+
ValueError
|
|
114
|
+
If the extension is unknown or unsupported.
|
|
115
|
+
"""
|
|
116
|
+
fmt, compression = infer_file_format_and_compression(self.path)
|
|
117
|
+
if fmt is not None:
|
|
118
|
+
return fmt
|
|
119
|
+
if compression is not None:
|
|
120
|
+
raise ValueError(
|
|
121
|
+
'Cannot infer file format from compressed file '
|
|
122
|
+
f'{self.path!r} with compression {compression.value!r}',
|
|
123
|
+
)
|
|
124
|
+
raise ValueError(
|
|
125
|
+
f'Cannot infer file format from extension {self.path.suffix!r}',
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# -- Instance Methods (Generic API) -- #
|
|
129
|
+
|
|
130
|
+
def read(self) -> JSONData:
|
|
131
|
+
"""
|
|
132
|
+
Read structured data from :attr:`path` using :attr:`file_format`.
|
|
133
|
+
|
|
134
|
+
Returns
|
|
135
|
+
-------
|
|
136
|
+
JSONData
|
|
137
|
+
The structured data read from the file.
|
|
138
|
+
|
|
139
|
+
Raises
|
|
140
|
+
------
|
|
141
|
+
ValueError
|
|
142
|
+
If the resolved file format is unsupported.
|
|
143
|
+
"""
|
|
144
|
+
self._assert_exists()
|
|
145
|
+
fmt = self._ensure_format()
|
|
146
|
+
match fmt:
|
|
147
|
+
case FileFormat.CSV:
|
|
148
|
+
return csv.read(self.path)
|
|
149
|
+
case FileFormat.JSON:
|
|
150
|
+
return json.read(self.path)
|
|
151
|
+
case FileFormat.XML:
|
|
152
|
+
return xml.read(self.path)
|
|
153
|
+
case FileFormat.YAML:
|
|
154
|
+
return yaml.read(self.path)
|
|
155
|
+
raise ValueError(f'Unsupported format: {fmt}')
|
|
156
|
+
|
|
157
|
+
def write(
|
|
158
|
+
self,
|
|
159
|
+
data: JSONData,
|
|
160
|
+
*,
|
|
161
|
+
root_tag: str = xml.DEFAULT_XML_ROOT,
|
|
162
|
+
) -> int:
|
|
163
|
+
"""
|
|
164
|
+
Write ``data`` to :attr:`path` using :attr:`file_format`.
|
|
165
|
+
|
|
166
|
+
Parameters
|
|
167
|
+
----------
|
|
168
|
+
data : JSONData
|
|
169
|
+
Data to write to the file.
|
|
170
|
+
root_tag : str, optional
|
|
171
|
+
Root tag name to use when writing XML files. Defaults to
|
|
172
|
+
``'root'``.
|
|
173
|
+
|
|
174
|
+
Returns
|
|
175
|
+
-------
|
|
176
|
+
int
|
|
177
|
+
The number of records written.
|
|
178
|
+
|
|
179
|
+
Raises
|
|
180
|
+
------
|
|
181
|
+
ValueError
|
|
182
|
+
If the resolved file format is unsupported.
|
|
183
|
+
"""
|
|
184
|
+
fmt = self._ensure_format()
|
|
185
|
+
match fmt:
|
|
186
|
+
case FileFormat.CSV:
|
|
187
|
+
return csv.write(self.path, data)
|
|
188
|
+
case FileFormat.JSON:
|
|
189
|
+
return json.write(self.path, data)
|
|
190
|
+
case FileFormat.XML:
|
|
191
|
+
return xml.write(self.path, data, root_tag=root_tag)
|
|
192
|
+
case FileFormat.YAML:
|
|
193
|
+
return yaml.write(self.path, data)
|
|
194
|
+
raise ValueError(f'Unsupported format: {fmt}')
|
|
195
|
+
|
|
196
|
+
# -- Class Methods -- #
|
|
197
|
+
|
|
198
|
+
@classmethod
|
|
199
|
+
def from_path(
|
|
200
|
+
cls,
|
|
201
|
+
path: StrPath,
|
|
202
|
+
*,
|
|
203
|
+
file_format: FileFormat | str | None = None,
|
|
204
|
+
) -> File:
|
|
205
|
+
"""
|
|
206
|
+
Create a :class:`File` from any path-like and optional format.
|
|
207
|
+
|
|
208
|
+
Parameters
|
|
209
|
+
----------
|
|
210
|
+
path : StrPath
|
|
211
|
+
Path to the file on disk.
|
|
212
|
+
file_format : FileFormat | str | None, optional
|
|
213
|
+
Explicit format. If omitted, the format is inferred from the file
|
|
214
|
+
extension (``.csv``, ``.json``, or ``.xml``).
|
|
215
|
+
|
|
216
|
+
Returns
|
|
217
|
+
-------
|
|
218
|
+
File
|
|
219
|
+
The constructed :class:`File` instance.
|
|
220
|
+
"""
|
|
221
|
+
resolved = Path(path)
|
|
222
|
+
ff: FileFormat | None
|
|
223
|
+
if isinstance(file_format, str):
|
|
224
|
+
ff = FileFormat.coerce(file_format)
|
|
225
|
+
else:
|
|
226
|
+
ff = file_format
|
|
227
|
+
|
|
228
|
+
return cls(resolved, ff)
|
|
229
|
+
|
|
230
|
+
@classmethod
|
|
231
|
+
def read_file(
|
|
232
|
+
cls,
|
|
233
|
+
path: StrPath,
|
|
234
|
+
file_format: FileFormat | str | None = None,
|
|
235
|
+
) -> JSONData:
|
|
236
|
+
"""
|
|
237
|
+
Read structured data.
|
|
238
|
+
|
|
239
|
+
Parameters
|
|
240
|
+
----------
|
|
241
|
+
path : StrPath
|
|
242
|
+
Path to the file on disk.
|
|
243
|
+
file_format : FileFormat | str | None, optional
|
|
244
|
+
Explicit format. If omitted, the format is inferred from the file
|
|
245
|
+
extension (``.csv``, ``.json``, or ``.xml``).
|
|
246
|
+
|
|
247
|
+
Returns
|
|
248
|
+
-------
|
|
249
|
+
JSONData
|
|
250
|
+
The structured data read from the file.
|
|
251
|
+
"""
|
|
252
|
+
return cls.from_path(path, file_format=file_format).read()
|
|
253
|
+
|
|
254
|
+
@classmethod
|
|
255
|
+
def write_file(
|
|
256
|
+
cls,
|
|
257
|
+
path: StrPath,
|
|
258
|
+
data: JSONData,
|
|
259
|
+
file_format: FileFormat | str | None = None,
|
|
260
|
+
*,
|
|
261
|
+
root_tag: str = xml.DEFAULT_XML_ROOT,
|
|
262
|
+
) -> int:
|
|
263
|
+
"""
|
|
264
|
+
Write structured data and count written records.
|
|
265
|
+
|
|
266
|
+
Parameters
|
|
267
|
+
----------
|
|
268
|
+
path : StrPath
|
|
269
|
+
Path to the file on disk.
|
|
270
|
+
data : JSONData
|
|
271
|
+
Data to write to the file.
|
|
272
|
+
file_format : FileFormat | str | None, optional
|
|
273
|
+
Explicit format. If omitted, the format is inferred from the file
|
|
274
|
+
extension (``.csv``, ``.json``, or ``.xml``).
|
|
275
|
+
root_tag : str, optional
|
|
276
|
+
Root tag name to use when writing XML files. Defaults to
|
|
277
|
+
``'root'``.
|
|
278
|
+
|
|
279
|
+
Returns
|
|
280
|
+
-------
|
|
281
|
+
int
|
|
282
|
+
The number of records written to the file.
|
|
283
|
+
"""
|
|
284
|
+
return cls.from_path(path, file_format=file_format).write(
|
|
285
|
+
data,
|
|
286
|
+
root_tag=root_tag,
|
|
287
|
+
)
|
etlplus/file/csv.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.csv` module.
|
|
3
|
+
|
|
4
|
+
CSV read/write helpers.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import csv
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import cast
|
|
12
|
+
|
|
13
|
+
from ..types import JSONData
|
|
14
|
+
from ..types import JSONDict
|
|
15
|
+
from ..types import JSONList
|
|
16
|
+
|
|
17
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def read(
|
|
21
|
+
path: Path,
|
|
22
|
+
) -> JSONList:
|
|
23
|
+
"""
|
|
24
|
+
Load CSV content as a list of dictionaries.
|
|
25
|
+
|
|
26
|
+
Parameters
|
|
27
|
+
----------
|
|
28
|
+
path : Path
|
|
29
|
+
Path to the CSV file on disk.
|
|
30
|
+
|
|
31
|
+
Returns
|
|
32
|
+
-------
|
|
33
|
+
JSONList
|
|
34
|
+
The list of dictionaries read from the CSV file.
|
|
35
|
+
"""
|
|
36
|
+
with path.open('r', encoding='utf-8', newline='') as handle:
|
|
37
|
+
reader: csv.DictReader[str] = csv.DictReader(handle)
|
|
38
|
+
rows: JSONList = []
|
|
39
|
+
for row in reader:
|
|
40
|
+
if not any(row.values()):
|
|
41
|
+
continue
|
|
42
|
+
rows.append(cast(JSONDict, dict(row)))
|
|
43
|
+
return rows
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def write(
|
|
47
|
+
path: Path,
|
|
48
|
+
data: JSONData,
|
|
49
|
+
) -> int:
|
|
50
|
+
"""
|
|
51
|
+
Write CSV rows to ``path`` and return the number of rows.
|
|
52
|
+
|
|
53
|
+
Parameters
|
|
54
|
+
----------
|
|
55
|
+
path : Path
|
|
56
|
+
Path to the CSV file on disk.
|
|
57
|
+
data : JSONData
|
|
58
|
+
Data to write as CSV. Should be a list of dictionaries or a
|
|
59
|
+
single dictionary.
|
|
60
|
+
|
|
61
|
+
Returns
|
|
62
|
+
-------
|
|
63
|
+
int
|
|
64
|
+
The number of rows written to the CSV file.
|
|
65
|
+
"""
|
|
66
|
+
rows: list[JSONDict]
|
|
67
|
+
if isinstance(data, list):
|
|
68
|
+
rows = [row for row in data if isinstance(row, dict)]
|
|
69
|
+
else:
|
|
70
|
+
rows = [data]
|
|
71
|
+
|
|
72
|
+
if not rows:
|
|
73
|
+
return 0
|
|
74
|
+
|
|
75
|
+
fieldnames = sorted({key for row in rows for key in row})
|
|
76
|
+
with path.open('w', encoding='utf-8', newline='') as handle:
|
|
77
|
+
writer = csv.DictWriter(handle, fieldnames=fieldnames)
|
|
78
|
+
writer.writeheader()
|
|
79
|
+
for row in rows:
|
|
80
|
+
writer.writerow({field: row.get(field) for field in fieldnames})
|
|
81
|
+
|
|
82
|
+
return len(rows)
|
etlplus/file/enums.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.enums` module.
|
|
3
|
+
|
|
4
|
+
File-specific enums and helpers.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from pathlib import PurePath
|
|
10
|
+
|
|
11
|
+
from ..enums import CoercibleStrEnum
|
|
12
|
+
from ..types import StrStrMap
|
|
13
|
+
|
|
14
|
+
# SECTION: EXPORTS ========================================================= #
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
'CompressionFormat',
|
|
18
|
+
'FileFormat',
|
|
19
|
+
'infer_file_format_and_compression',
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# SECTION: ENUMS ============================================================ #
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class CompressionFormat(CoercibleStrEnum):
|
|
27
|
+
"""Supported compression formats."""
|
|
28
|
+
|
|
29
|
+
# -- Constants -- #
|
|
30
|
+
|
|
31
|
+
GZ = 'gz'
|
|
32
|
+
ZIP = 'zip'
|
|
33
|
+
|
|
34
|
+
# -- Class Methods -- #
|
|
35
|
+
|
|
36
|
+
@classmethod
|
|
37
|
+
def aliases(cls) -> StrStrMap:
|
|
38
|
+
"""
|
|
39
|
+
Return a mapping of common aliases for each enum member.
|
|
40
|
+
|
|
41
|
+
Returns
|
|
42
|
+
-------
|
|
43
|
+
StrStrMap
|
|
44
|
+
A mapping of alias names to their corresponding enum member names.
|
|
45
|
+
"""
|
|
46
|
+
return {
|
|
47
|
+
# File extensions
|
|
48
|
+
'.gz': 'gz',
|
|
49
|
+
'.gzip': 'gz',
|
|
50
|
+
'.zip': 'zip',
|
|
51
|
+
# MIME types
|
|
52
|
+
'application/gzip': 'gz',
|
|
53
|
+
'application/x-gzip': 'gz',
|
|
54
|
+
'application/zip': 'zip',
|
|
55
|
+
'application/x-zip-compressed': 'zip',
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class FileFormat(CoercibleStrEnum):
|
|
60
|
+
"""Supported file formats for extraction."""
|
|
61
|
+
|
|
62
|
+
# -- Constants -- #
|
|
63
|
+
|
|
64
|
+
AVRO = 'avro'
|
|
65
|
+
CSV = 'csv'
|
|
66
|
+
FEATHER = 'feather'
|
|
67
|
+
GZ = 'gz'
|
|
68
|
+
JSON = 'json'
|
|
69
|
+
NDJSON = 'ndjson'
|
|
70
|
+
ORC = 'orc'
|
|
71
|
+
PARQUET = 'parquet'
|
|
72
|
+
TSV = 'tsv'
|
|
73
|
+
TXT = 'txt'
|
|
74
|
+
XLS = 'xls'
|
|
75
|
+
XLSX = 'xlsx'
|
|
76
|
+
ZIP = 'zip'
|
|
77
|
+
XML = 'xml'
|
|
78
|
+
YAML = 'yaml'
|
|
79
|
+
|
|
80
|
+
# -- Class Methods -- #
|
|
81
|
+
|
|
82
|
+
@classmethod
|
|
83
|
+
def aliases(cls) -> StrStrMap:
|
|
84
|
+
"""
|
|
85
|
+
Return a mapping of common aliases for each enum member.
|
|
86
|
+
|
|
87
|
+
Returns
|
|
88
|
+
-------
|
|
89
|
+
StrStrMap
|
|
90
|
+
A mapping of alias names to their corresponding enum member names.
|
|
91
|
+
"""
|
|
92
|
+
return {
|
|
93
|
+
# Common shorthand
|
|
94
|
+
'parq': 'parquet',
|
|
95
|
+
'yml': 'yaml',
|
|
96
|
+
# File extensions
|
|
97
|
+
'.avro': 'avro',
|
|
98
|
+
'.csv': 'csv',
|
|
99
|
+
'.feather': 'feather',
|
|
100
|
+
'.gz': 'gz',
|
|
101
|
+
'.json': 'json',
|
|
102
|
+
'.jsonl': 'ndjson',
|
|
103
|
+
'.ndjson': 'ndjson',
|
|
104
|
+
'.orc': 'orc',
|
|
105
|
+
'.parquet': 'parquet',
|
|
106
|
+
'.pq': 'parquet',
|
|
107
|
+
'.tsv': 'tsv',
|
|
108
|
+
'.txt': 'txt',
|
|
109
|
+
'.xls': 'xls',
|
|
110
|
+
'.xlsx': 'xlsx',
|
|
111
|
+
'.zip': 'zip',
|
|
112
|
+
'.xml': 'xml',
|
|
113
|
+
'.yaml': 'yaml',
|
|
114
|
+
'.yml': 'yaml',
|
|
115
|
+
# MIME types
|
|
116
|
+
'application/avro': 'avro',
|
|
117
|
+
'application/csv': 'csv',
|
|
118
|
+
'application/feather': 'feather',
|
|
119
|
+
'application/gzip': 'gz',
|
|
120
|
+
'application/json': 'json',
|
|
121
|
+
'application/jsonlines': 'ndjson',
|
|
122
|
+
'application/ndjson': 'ndjson',
|
|
123
|
+
'application/orc': 'orc',
|
|
124
|
+
'application/parquet': 'parquet',
|
|
125
|
+
'application/vnd.apache.avro': 'avro',
|
|
126
|
+
'application/vnd.apache.parquet': 'parquet',
|
|
127
|
+
'application/vnd.apache.arrow.file': 'feather',
|
|
128
|
+
'application/vnd.apache.orc': 'orc',
|
|
129
|
+
'application/vnd.ms-excel': 'xls',
|
|
130
|
+
(
|
|
131
|
+
'application/vnd.openxmlformats-'
|
|
132
|
+
'officedocument.spreadsheetml.sheet'
|
|
133
|
+
): 'xlsx',
|
|
134
|
+
'application/x-avro': 'avro',
|
|
135
|
+
'application/x-csv': 'csv',
|
|
136
|
+
'application/x-feather': 'feather',
|
|
137
|
+
'application/x-orc': 'orc',
|
|
138
|
+
'application/x-ndjson': 'ndjson',
|
|
139
|
+
'application/x-parquet': 'parquet',
|
|
140
|
+
'application/x-yaml': 'yaml',
|
|
141
|
+
'application/xml': 'xml',
|
|
142
|
+
'application/zip': 'zip',
|
|
143
|
+
'text/csv': 'csv',
|
|
144
|
+
'text/plain': 'txt',
|
|
145
|
+
'text/tab-separated-values': 'tsv',
|
|
146
|
+
'text/tsv': 'tsv',
|
|
147
|
+
'text/xml': 'xml',
|
|
148
|
+
'text/yaml': 'yaml',
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
# SECTION: INTERNAL CONSTANTS =============================================== #
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
# Compression formats that are also file formats.
|
|
156
|
+
_COMPRESSION_FILE_FORMATS: set[FileFormat] = {
|
|
157
|
+
FileFormat.GZ,
|
|
158
|
+
FileFormat.ZIP,
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
# TODO: Convert to a method on FileFormat or CompressionFormat?
|
|
166
|
+
def infer_file_format_and_compression(
|
|
167
|
+
value: object,
|
|
168
|
+
filename: object | None = None,
|
|
169
|
+
) -> tuple[FileFormat | None, CompressionFormat | None]:
|
|
170
|
+
"""
|
|
171
|
+
Infer data format and compression from a filename, extension, or MIME type.
|
|
172
|
+
|
|
173
|
+
Parameters
|
|
174
|
+
----------
|
|
175
|
+
value : object
|
|
176
|
+
A filename, extension, MIME type, or existing enum member.
|
|
177
|
+
filename : object | None, optional
|
|
178
|
+
A filename to consult for extension-based inference (e.g. when
|
|
179
|
+
``value`` is ``application/octet-stream``).
|
|
180
|
+
|
|
181
|
+
Returns
|
|
182
|
+
-------
|
|
183
|
+
tuple[FileFormat | None, CompressionFormat | None]
|
|
184
|
+
The inferred data format and compression, if any.
|
|
185
|
+
"""
|
|
186
|
+
if isinstance(value, FileFormat):
|
|
187
|
+
if value in _COMPRESSION_FILE_FORMATS:
|
|
188
|
+
return None, CompressionFormat.coerce(value.value)
|
|
189
|
+
return value, None
|
|
190
|
+
if isinstance(value, CompressionFormat):
|
|
191
|
+
return None, value
|
|
192
|
+
|
|
193
|
+
text = str(value).strip()
|
|
194
|
+
if not text:
|
|
195
|
+
return None, None
|
|
196
|
+
|
|
197
|
+
normalized = text.casefold()
|
|
198
|
+
mime = normalized.split(';', 1)[0].strip()
|
|
199
|
+
|
|
200
|
+
is_octet_stream = mime == 'application/octet-stream'
|
|
201
|
+
compression = CompressionFormat.try_coerce(mime)
|
|
202
|
+
fmt = None if is_octet_stream else FileFormat.try_coerce(mime)
|
|
203
|
+
|
|
204
|
+
is_mime = mime.startswith(
|
|
205
|
+
(
|
|
206
|
+
'application/',
|
|
207
|
+
'text/',
|
|
208
|
+
'audio/',
|
|
209
|
+
'image/',
|
|
210
|
+
'video/',
|
|
211
|
+
'multipart/',
|
|
212
|
+
),
|
|
213
|
+
)
|
|
214
|
+
suffix_source: object | None = filename if filename is not None else text
|
|
215
|
+
if is_mime and filename is None:
|
|
216
|
+
suffix_source = None
|
|
217
|
+
|
|
218
|
+
suffixes = (
|
|
219
|
+
PurePath(str(suffix_source)).suffixes
|
|
220
|
+
if suffix_source is not None
|
|
221
|
+
else []
|
|
222
|
+
)
|
|
223
|
+
if suffixes:
|
|
224
|
+
normalized_suffixes = [suffix.casefold() for suffix in suffixes]
|
|
225
|
+
compression = (
|
|
226
|
+
CompressionFormat.try_coerce(normalized_suffixes[-1])
|
|
227
|
+
or compression
|
|
228
|
+
)
|
|
229
|
+
if compression is not None:
|
|
230
|
+
normalized_suffixes = normalized_suffixes[:-1]
|
|
231
|
+
if normalized_suffixes:
|
|
232
|
+
fmt = FileFormat.try_coerce(normalized_suffixes[-1]) or fmt
|
|
233
|
+
|
|
234
|
+
if fmt in _COMPRESSION_FILE_FORMATS:
|
|
235
|
+
compression = compression or CompressionFormat.coerce(fmt.value)
|
|
236
|
+
fmt = None
|
|
237
|
+
|
|
238
|
+
return fmt, compression
|
etlplus/file/json.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""
|
|
2
|
+
:mod:`etlplus.file.json` module.
|
|
3
|
+
|
|
4
|
+
JSON read/write helpers.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import cast
|
|
12
|
+
|
|
13
|
+
from ..types import JSONData
|
|
14
|
+
from ..types import JSONDict
|
|
15
|
+
from ..types import JSONList
|
|
16
|
+
from ..utils import count_records
|
|
17
|
+
|
|
18
|
+
# SECTION: FUNCTIONS ======================================================== #
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def read(
|
|
22
|
+
path: Path,
|
|
23
|
+
) -> JSONData:
|
|
24
|
+
"""
|
|
25
|
+
Load and validate JSON payloads from ``path``.
|
|
26
|
+
|
|
27
|
+
Parameters
|
|
28
|
+
----------
|
|
29
|
+
path : Path
|
|
30
|
+
Path to the JSON file on disk.
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
JSONData
|
|
35
|
+
The structured data read from the JSON file.
|
|
36
|
+
|
|
37
|
+
Raises
|
|
38
|
+
------
|
|
39
|
+
TypeError
|
|
40
|
+
If the JSON root is not an object or an array of objects.
|
|
41
|
+
"""
|
|
42
|
+
with path.open('r', encoding='utf-8') as handle:
|
|
43
|
+
loaded = json.load(handle)
|
|
44
|
+
|
|
45
|
+
if isinstance(loaded, dict):
|
|
46
|
+
return cast(JSONDict, loaded)
|
|
47
|
+
if isinstance(loaded, list):
|
|
48
|
+
if all(isinstance(item, dict) for item in loaded):
|
|
49
|
+
return cast(JSONList, loaded)
|
|
50
|
+
raise TypeError(
|
|
51
|
+
'JSON array must contain only objects (dicts) when loading file',
|
|
52
|
+
)
|
|
53
|
+
raise TypeError(
|
|
54
|
+
'JSON root must be an object or an array of objects when loading file',
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def write(
|
|
59
|
+
path: Path,
|
|
60
|
+
data: JSONData,
|
|
61
|
+
) -> int:
|
|
62
|
+
"""
|
|
63
|
+
Write ``data`` as formatted JSON to ``path``.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
path : Path
|
|
68
|
+
Path to the JSON file on disk.
|
|
69
|
+
data : JSONData
|
|
70
|
+
Data to serialize as JSON.
|
|
71
|
+
|
|
72
|
+
Returns
|
|
73
|
+
-------
|
|
74
|
+
int
|
|
75
|
+
The number of records written to the JSON file.
|
|
76
|
+
"""
|
|
77
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
78
|
+
with path.open('w', encoding='utf-8') as handle:
|
|
79
|
+
json.dump(
|
|
80
|
+
data,
|
|
81
|
+
handle,
|
|
82
|
+
indent=2,
|
|
83
|
+
ensure_ascii=False,
|
|
84
|
+
)
|
|
85
|
+
handle.write('\n')
|
|
86
|
+
|
|
87
|
+
return count_records(data)
|