etlplus 0.12.1__py3-none-any.whl → 0.12.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. etlplus/file/_imports.py +141 -0
  2. etlplus/file/_io.py +121 -0
  3. etlplus/file/accdb.py +78 -0
  4. etlplus/file/arrow.py +78 -0
  5. etlplus/file/avro.py +46 -68
  6. etlplus/file/bson.py +77 -0
  7. etlplus/file/cbor.py +78 -0
  8. etlplus/file/cfg.py +79 -0
  9. etlplus/file/conf.py +80 -0
  10. etlplus/file/core.py +119 -84
  11. etlplus/file/csv.py +17 -29
  12. etlplus/file/dat.py +78 -0
  13. etlplus/file/duckdb.py +78 -0
  14. etlplus/file/enums.py +114 -15
  15. etlplus/file/feather.py +18 -51
  16. etlplus/file/fwf.py +77 -0
  17. etlplus/file/ini.py +79 -0
  18. etlplus/file/ion.py +78 -0
  19. etlplus/file/json.py +13 -1
  20. etlplus/file/log.py +78 -0
  21. etlplus/file/mdb.py +78 -0
  22. etlplus/file/msgpack.py +78 -0
  23. etlplus/file/ndjson.py +14 -15
  24. etlplus/file/orc.py +18 -49
  25. etlplus/file/parquet.py +18 -51
  26. etlplus/file/pb.py +78 -0
  27. etlplus/file/pbf.py +77 -0
  28. etlplus/file/properties.py +78 -0
  29. etlplus/file/proto.py +77 -0
  30. etlplus/file/psv.py +79 -0
  31. etlplus/file/sqlite.py +78 -0
  32. etlplus/file/stub.py +84 -0
  33. etlplus/file/tab.py +81 -0
  34. etlplus/file/toml.py +78 -0
  35. etlplus/file/tsv.py +18 -29
  36. etlplus/file/txt.py +13 -10
  37. etlplus/file/xls.py +4 -48
  38. etlplus/file/xlsx.py +5 -48
  39. etlplus/file/xml.py +12 -1
  40. etlplus/file/yaml.py +15 -44
  41. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/METADATA +119 -1
  42. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/RECORD +46 -21
  43. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/WHEEL +0 -0
  44. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/entry_points.txt +0 -0
  45. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/licenses/LICENSE +0 -0
  46. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/top_level.txt +0 -0
etlplus/file/enums.py CHANGED
@@ -61,21 +61,119 @@ class FileFormat(CoercibleStrEnum):
61
61
 
62
62
  # -- Constants -- #
63
63
 
64
- AVRO = 'avro'
65
- CSV = 'csv'
66
- FEATHER = 'feather'
67
- GZ = 'gz'
68
- JSON = 'json'
69
- NDJSON = 'ndjson'
70
- ORC = 'orc'
71
- PARQUET = 'parquet'
72
- TSV = 'tsv'
73
- TXT = 'txt'
74
- XLS = 'xls'
75
- XLSX = 'xlsx'
76
- ZIP = 'zip'
77
- XML = 'xml'
78
- YAML = 'yaml'
64
+ # Stubbed / placeholder
65
+ STUB = 'stub' # Placeholder format for tests & future connectors
66
+
67
+ # Tabular & delimited text
68
+ CSV = 'csv' # Comma-Separated Values
69
+ DAT = 'dat' # Generic data file, often delimited or fixed-width
70
+ FWF = 'fwf' # Fixed-Width Formatted
71
+ PSV = 'psv' # Pipe-Separated Values
72
+ TAB = 'tab' # Often synonymous with TSV
73
+ TSV = 'tsv' # Tab-Separated Values
74
+ TXT = 'txt' # Plain text, often delimited or fixed-width
75
+
76
+ # Semi-structured text
77
+ CFG = 'cfg' # Config-style key-value pairs
78
+ CONF = 'conf' # Config-style key-value pairs
79
+ INI = 'ini' # INI-style key-value pairs
80
+ JSON = 'json' # JavaScript Object Notation
81
+ NDJSON = 'ndjson' # Newline-Delimited JSON
82
+ PROPS = 'properties' # Java-style key-value pairs
83
+ TOML = 'toml' # Tom's Obvious Minimal Language
84
+ XML = 'xml' # Extensible Markup Language
85
+ YAML = 'yaml' # YAML Ain't Markup Language
86
+
87
+ # Columnar / analytics-friendly
88
+ ARROW = 'arrow' # Apache Arrow IPC
89
+ FEATHER = 'feather' # Apache Arrow Feather
90
+ ORC = 'orc' # Optimized Row Columnar; common in Hadoop
91
+ PARQUET = 'parquet' # Apache Parquet; common in Big Data
92
+
93
+ # Binary serialization & interchange
94
+ AVRO = 'avro' # Apache Avro
95
+ BSON = 'bson' # Binary JSON; common with MongoDB exports/dumps
96
+ CBOR = 'cbor' # Concise Binary Object Representation
97
+ ION = 'ion' # Amazon Ion
98
+ MSGPACK = 'msgpack' # MessagePack
99
+ PB = 'pb' # Protocol Buffers (Google Protobuf)
100
+ PBF = 'pbf' # Protocolbuffer Binary Format; often for GIS data
101
+ PROTO = 'proto' # Protocol Buffers schema; often in .pb / .bin
102
+
103
+ # Databases & embedded storage
104
+ ACCDB = 'accdb' # Microsoft Access database file (newer format)
105
+ DUCKDB = 'duckdb' # DuckDB database file
106
+ MDB = 'mdb' # Microsoft Access database file (older format)
107
+ SQLITE = 'sqlite' # SQLite database file
108
+
109
+ # Spreadsheets
110
+ NUMBERS = 'numbers' # Apple Numbers spreadsheet
111
+ ODS = 'ods' # OpenDocument Spreadsheet
112
+ WKS = 'wks' # Lotus 1-2-3 spreadsheet
113
+ XLS = 'xls' # Microsoft Excel (BIFF); read-only
114
+ XLSM = 'xlsm' # Microsoft Excel Macro-Enabled (Open XML)
115
+ XLSX = 'xlsx' # Microsoft Excel (Open XML)
116
+
117
+ # Statistical / scientific / numeric computing
118
+ DTA = 'dta' # Stata data file
119
+ H5 = 'h5' # Hierarchical Data Format
120
+ MAT = 'mat' # MATLAB data file
121
+ NC = 'nc' # NetCDF data file
122
+ RDA = 'rda' # RData workspace/object bundle
123
+ RDS = 'rds' # R data file
124
+ SAS7BDAT = 'sas7bdat' # SAS data file
125
+ SAV = 'sav' # SPSS data file
126
+ SYLK = 'sylk' # Symbolic Link (SYmbolic LinK)
127
+ XPT = 'xpt' # SAS Transport file
128
+ ZSAV = 'zsav' # Compressed SPSS data file
129
+
130
+ # Time series and financial data
131
+ CAMT = 'camt' # ISO 20022 Cash Management messages
132
+ FXT = 'fxt' # Forex time series data
133
+ MT940 = 'mt940' # SWIFT MT940 bank statement format
134
+ MT942 = 'mt942' # SWIFT MT942 interim transaction report format
135
+ OFX = 'ofx' # Open Financial Exchange
136
+ QFX = 'qfx' # Quicken Financial Exchange
137
+ QIF = 'qif' # Quicken Interchange Format
138
+ QQQ = 'qqq' # QuantQuote historical data
139
+ TRR = 'trr' # Trade and transaction reports
140
+ TSDB = 'tsdb' # Time series database export
141
+
142
+ # Geospatial data
143
+ GEOJSON = 'geojson' # GeoJSON
144
+ GEOTIFF = 'geotiff' # GeoTIFF
145
+ GML = 'gml' # Geography Markup Language
146
+ GPKG = 'gpkg' # GeoPackage
147
+ GPX = 'gpx' # GPS Exchange Format
148
+ KML = 'kml' # Keyhole Markup Language
149
+ LAS = 'las' # LiDAR Aerial Survey
150
+ LAZ = 'laz' # LASzip (compressed LAS)
151
+ OSM = 'osm' # OpenStreetMap XML Data
152
+ SHP = 'shp' # ESRI Shapefile
153
+ WKB = 'wkb' # Well-Known Binary
154
+ WKT = 'wkt' # Well-Known Text
155
+
156
+ # Logs & event streams
157
+ EVT = 'evt' # Windows Event Trace Log (pre-Vista)
158
+ EVTX = 'evtx' # Windows Event Trace Log (Vista and later)
159
+ LOG = 'log' # Generic log file
160
+ PCAP = 'pcap' # Packet Capture file
161
+ PCAPPNG = 'pcapng' # Packet Capture Next Generation file
162
+ SLOG = 'slog' # Structured log file
163
+ W3CLOG = 'w3clog' # W3C Extended Log File Format
164
+
165
+ # “Data archives” & packaging
166
+ _7Z = '7z' # 7-Zip archive
167
+ GZ = 'gz' # Gzip-compressed file
168
+ JAR = 'jar' # Java archive
169
+ RAR = 'rar' # RAR archive
170
+ SIT = 'sit' # StuffIt archive
171
+ SITX = 'sitx' # StuffIt X archive
172
+ TAR = 'tar' # TAR archive
173
+ TGZ = 'tgz' # Gzip-compressed TAR archive
174
+ ZIP = 'zip' # ZIP archive
175
+
176
+ # Domain-specific & less common
79
177
 
80
178
  # -- Class Methods -- #
81
179
 
@@ -104,6 +202,7 @@ class FileFormat(CoercibleStrEnum):
104
202
  '.orc': 'orc',
105
203
  '.parquet': 'parquet',
106
204
  '.pq': 'parquet',
205
+ '.stub': 'stub',
107
206
  '.tsv': 'tsv',
108
207
  '.txt': 'txt',
109
208
  '.xls': 'xls',
etlplus/file/feather.py CHANGED
@@ -1,18 +1,30 @@
1
1
  """
2
2
  :mod:`etlplus.file.feather` module.
3
3
 
4
- Helpers for reading/writing Feather files.
4
+ Helpers for reading/writing Apache Arrow Feather (FEATHER) files.
5
+
6
+ Notes
7
+ -----
8
+ - A FEATHER file is a binary file format designed for efficient
9
+ on-disk storage of data frames, built on top of Apache Arrow.
10
+ - Common cases:
11
+ - Fast read/write operations for data frames.
12
+ - Interoperability between different data analysis tools.
13
+ - Storage of large datasets with efficient compression.
14
+ - Rule of thumb:
15
+ - If the file follows the Apache Arrow Feather specification, use this
16
+ module for reading and writing.
5
17
  """
6
18
 
7
19
  from __future__ import annotations
8
20
 
9
21
  from pathlib import Path
10
- from typing import Any
11
22
  from typing import cast
12
23
 
13
24
  from ..types import JSONData
14
- from ..types import JSONDict
15
25
  from ..types import JSONList
26
+ from ._imports import get_pandas
27
+ from ._io import normalize_records
16
28
 
17
29
  # SECTION: EXPORTS ========================================================== #
18
30
 
@@ -23,51 +35,6 @@ __all__ = [
23
35
  ]
24
36
 
25
37
 
26
- # SECTION: INTERNAL CONSTANTS =============================================== #
27
-
28
-
29
- _PANDAS_CACHE: dict[str, Any] = {}
30
-
31
-
32
- # SECTION: INTERNAL FUNCTIONS =============================================== #
33
-
34
-
35
- def _get_pandas() -> Any:
36
- """
37
- Return the pandas module, importing it on first use.
38
-
39
- Raises an informative ImportError if the optional dependency is missing.
40
- """
41
- mod = _PANDAS_CACHE.get('mod')
42
- if mod is not None: # pragma: no cover - tiny branch
43
- return mod
44
- try:
45
- _pd = __import__('pandas') # type: ignore[assignment]
46
- except ImportError as e: # pragma: no cover
47
- raise ImportError(
48
- 'Feather support requires optional dependency "pandas".\n'
49
- 'Install with: pip install pandas',
50
- ) from e
51
- _PANDAS_CACHE['mod'] = _pd
52
-
53
- return _pd
54
-
55
-
56
- def _normalize_records(data: JSONData) -> JSONList:
57
- """
58
- Normalize JSON payloads into a list of dictionaries.
59
-
60
- Raises TypeError when payloads contain non-dict items.
61
- """
62
- if isinstance(data, list):
63
- if not all(isinstance(item, dict) for item in data):
64
- raise TypeError(
65
- 'Feather payloads must contain only objects (dicts)',
66
- )
67
- return cast(JSONList, data)
68
- return [cast(JSONDict, data)]
69
-
70
-
71
38
  # SECTION: FUNCTIONS ======================================================== #
72
39
 
73
40
 
@@ -92,7 +59,7 @@ def read(
92
59
  ImportError
93
60
  When optional dependency "pyarrow" is missing.
94
61
  """
95
- pandas = _get_pandas()
62
+ pandas = get_pandas('Feather')
96
63
  try:
97
64
  frame = pandas.read_feather(path)
98
65
  except ImportError as e: # pragma: no cover
@@ -127,11 +94,11 @@ def write(
127
94
  ImportError
128
95
  When optional dependency "pyarrow" is missing.
129
96
  """
130
- records = _normalize_records(data)
97
+ records = normalize_records(data, 'Feather')
131
98
  if not records:
132
99
  return 0
133
100
 
134
- pandas = _get_pandas()
101
+ pandas = get_pandas('Feather')
135
102
  path.parent.mkdir(parents=True, exist_ok=True)
136
103
  frame = pandas.DataFrame.from_records(records)
137
104
  try:
etlplus/file/fwf.py ADDED
@@ -0,0 +1,77 @@
1
+ """
2
+ :mod:`etlplus.file.fwf` module.
3
+
4
+ Helpers for reading/writing Fixed-Width Fields (FWF) files.
5
+
6
+ Notes
7
+ -----
8
+ - An FWF file is a text file format where each field has a fixed width.
9
+ - Common cases:
10
+ - Data files from legacy systems.
11
+ - Reports with aligned columns.
12
+ - Data exchange in mainframe environments.
13
+ - Rule of thumb:
14
+ - If the file follows the FWF specification, use this module for
15
+ reading and writing.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from pathlib import Path
21
+
22
+ from ..types import JSONData
23
+ from ..types import JSONList
24
+ from . import stub
25
+
26
+ # SECTION: EXPORTS ========================================================== #
27
+
28
+
29
+ __all__ = [
30
+ 'read',
31
+ 'write',
32
+ ]
33
+
34
+
35
+ # SECTION: FUNCTIONS ======================================================== #
36
+
37
+
38
+ def read(
39
+ path: Path,
40
+ ) -> JSONList:
41
+ """
42
+ Read FWF content from ``path``.
43
+
44
+ Parameters
45
+ ----------
46
+ path : Path
47
+ Path to the FWF file on disk.
48
+
49
+ Returns
50
+ -------
51
+ JSONList
52
+ The list of dictionaries read from the FWF file.
53
+ """
54
+ return stub.read(path, format_name='FWF')
55
+
56
+
57
+ def write(
58
+ path: Path,
59
+ data: JSONData,
60
+ ) -> int:
61
+ """
62
+ Write ``data`` to FWF file at ``path`` and return record count.
63
+
64
+ Parameters
65
+ ----------
66
+ path : Path
67
+ Path to the FWF file on disk.
68
+ data : JSONData
69
+ Data to write as FWF file. Should be a list of dictionaries or a
70
+ single dictionary.
71
+
72
+ Returns
73
+ -------
74
+ int
75
+ The number of rows written to the FWF file.
76
+ """
77
+ return stub.write(path, data, format_name='FWF')
etlplus/file/ini.py ADDED
@@ -0,0 +1,79 @@
1
+ """
2
+ :mod:`etlplus.file.ini` module.
3
+
4
+ Helpers for reading/writing initialization (INI) files.
5
+
6
+ Notes
7
+ -----
8
+ - An INI file is a simple configuration file format that uses sections,
9
+ properties, and values.
10
+ - Common cases:
11
+ - Sections are denoted by square brackets (e.g., ``[section]``).
12
+ - Properties are key-value pairs (e.g., ``key=value``).
13
+ - Comments are often indicated by semicolons (``;``) or hash symbols
14
+ (``#``).
15
+ - Rule of thumb:
16
+ - If the file follows the INI specification, use this module for
17
+ reading and writing.
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ from pathlib import Path
23
+
24
+ from ..types import JSONData
25
+ from ..types import JSONList
26
+ from . import stub
27
+
28
+ # SECTION: EXPORTS ========================================================== #
29
+
30
+
31
+ __all__ = [
32
+ 'read',
33
+ 'write',
34
+ ]
35
+
36
+
37
+ # SECTION: FUNCTIONS ======================================================== #
38
+
39
+
40
+ def read(
41
+ path: Path,
42
+ ) -> JSONList:
43
+ """
44
+ Read INI content from ``path``.
45
+
46
+ Parameters
47
+ ----------
48
+ path : Path
49
+ Path to the INI file on disk.
50
+
51
+ Returns
52
+ -------
53
+ JSONList
54
+ The list of dictionaries read from the INI file.
55
+ """
56
+ return stub.read(path, format_name='INI')
57
+
58
+
59
+ def write(
60
+ path: Path,
61
+ data: JSONData,
62
+ ) -> int:
63
+ """
64
+ Write ``data`` to INI at ``path`` and return record count.
65
+
66
+ Parameters
67
+ ----------
68
+ path : Path
69
+ Path to the INI file on disk.
70
+ data : JSONData
71
+ Data to write as INI. Should be a list of dictionaries or a
72
+ single dictionary.
73
+
74
+ Returns
75
+ -------
76
+ int
77
+ The number of rows written to the INI file.
78
+ """
79
+ return stub.write(path, data, format_name='INI')
etlplus/file/ion.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.ion` module.
3
+
4
+ Helpers for reading/writing Amazon Ion (ION) files.
5
+
6
+ Notes
7
+ -----
8
+ - An ION file is a richly-typed, self-describing data format developed by
9
+ Amazon, designed for efficient data interchange and storage.
10
+ - Common cases:
11
+ - Data serialization for distributed systems.
12
+ - Interoperability between different programming languages.
13
+ - Handling of complex data types beyond standard JSON capabilities.
14
+ - Rule of thumb:
15
+ - If the file follows the Amazon Ion specification, use this module for
16
+ reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read ION content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the ION file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the ION file.
54
+ """
55
+ return stub.read(path, format_name='ION')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to ION at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the ION file on disk.
69
+ data : JSONData
70
+ Data to write as ION. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the ION file.
77
+ """
78
+ return stub.write(path, data, format_name='ION')
etlplus/file/json.py CHANGED
@@ -1,7 +1,19 @@
1
1
  """
2
2
  :mod:`etlplus.file.json` module.
3
3
 
4
- Helpers for reading/writing JSON files.
4
+ Helpers for reading/writing JavaScript Object Notation (JSON) files.
5
+
6
+ Notes
7
+ -----
8
+ - A JSON file is a widely used data interchange format that uses
9
+ human-readable text to represent structured data.
10
+ - Common cases:
11
+ - Data interchange between web applications and servers.
12
+ - Configuration files for applications.
13
+ - Data storage for NoSQL databases.
14
+ - Rule of thumb:
15
+ - If the file follows the JSON specification, use this module for
16
+ reading and writing.
5
17
  """
6
18
 
7
19
  from __future__ import annotations
etlplus/file/log.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.log` module.
3
+
4
+ Helpers for reading/writing generic log (LOG) files.
5
+
6
+ Notes
7
+ -----
8
+ - A LOG file is a plain text file that contains log messages generated by
9
+ applications or systems.
10
+ - Common cases:
11
+ - Each line in the file represents a single log entry.
12
+ - Log entries may include timestamps, log levels, and messages.
13
+ - Formats may vary widely depending on the application generating the logs.
14
+ - Rule of thumb:
15
+ - If the file is a generic log file, use this module for reading and
16
+ writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read LOG content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the LOG file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the LOG file.
54
+ """
55
+ return stub.read(path, format_name='LOG')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to LOG at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the LOG file on disk.
69
+ data : JSONData
70
+ Data to write as LOG. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the LOG file.
77
+ """
78
+ return stub.write(path, data, format_name='LOG')
etlplus/file/mdb.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.mdb` module.
3
+
4
+ Helpers for reading/writing newer Microsoft Access database (MDB) files.
5
+
6
+ Notes
7
+ -----
8
+ - An MDB file is a proprietary database file format used by Microsoft Access
9
+ 2003 and earlier.
10
+ - Common cases:
11
+ - Storing relational data for small to medium-sized applications.
12
+ - Desktop database applications.
13
+ - Data management for non-enterprise solutions.
14
+ - Rule of thumb:
15
+ - If the file follows the MDB specification, use this module for reading
16
+ and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read CSV content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the CSV file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the CSV file.
54
+ """
55
+ return stub.read(path, format_name='DAT')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to CSV at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the CSV file on disk.
69
+ data : JSONData
70
+ Data to write as CSV. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the CSV file.
77
+ """
78
+ return stub.write(path, data, format_name='DAT')