etlplus 0.9.0__py3-none-any.whl → 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. etlplus/README.md +37 -0
  2. etlplus/__init__.py +1 -26
  3. etlplus/api/README.md +51 -3
  4. etlplus/api/__init__.py +10 -0
  5. etlplus/api/config.py +39 -28
  6. etlplus/api/endpoint_client.py +3 -3
  7. etlplus/api/enums.py +51 -0
  8. etlplus/api/pagination/client.py +1 -1
  9. etlplus/api/rate_limiting/config.py +13 -1
  10. etlplus/api/rate_limiting/rate_limiter.py +8 -11
  11. etlplus/api/request_manager.py +11 -6
  12. etlplus/api/transport.py +14 -2
  13. etlplus/api/types.py +96 -6
  14. etlplus/{run_helpers.py → api/utils.py} +209 -153
  15. etlplus/cli/README.md +40 -0
  16. etlplus/cli/commands.py +94 -61
  17. etlplus/cli/constants.py +1 -1
  18. etlplus/cli/handlers.py +40 -12
  19. etlplus/cli/io.py +2 -2
  20. etlplus/cli/main.py +1 -1
  21. etlplus/cli/state.py +4 -7
  22. etlplus/database/README.md +48 -0
  23. etlplus/database/ddl.py +1 -1
  24. etlplus/database/engine.py +19 -3
  25. etlplus/database/orm.py +2 -0
  26. etlplus/database/schema.py +1 -1
  27. etlplus/enums.py +1 -107
  28. etlplus/file/README.md +105 -0
  29. etlplus/file/__init__.py +25 -0
  30. etlplus/file/_imports.py +141 -0
  31. etlplus/file/_io.py +160 -0
  32. etlplus/file/accdb.py +78 -0
  33. etlplus/file/arrow.py +78 -0
  34. etlplus/file/avro.py +176 -0
  35. etlplus/file/bson.py +77 -0
  36. etlplus/file/cbor.py +78 -0
  37. etlplus/file/cfg.py +79 -0
  38. etlplus/file/conf.py +80 -0
  39. etlplus/file/core.py +322 -0
  40. etlplus/file/csv.py +79 -0
  41. etlplus/file/dat.py +78 -0
  42. etlplus/file/dta.py +77 -0
  43. etlplus/file/duckdb.py +78 -0
  44. etlplus/file/enums.py +343 -0
  45. etlplus/file/feather.py +111 -0
  46. etlplus/file/fwf.py +77 -0
  47. etlplus/file/gz.py +123 -0
  48. etlplus/file/hbs.py +78 -0
  49. etlplus/file/hdf5.py +78 -0
  50. etlplus/file/ini.py +79 -0
  51. etlplus/file/ion.py +78 -0
  52. etlplus/file/jinja2.py +78 -0
  53. etlplus/file/json.py +98 -0
  54. etlplus/file/log.py +78 -0
  55. etlplus/file/mat.py +78 -0
  56. etlplus/file/mdb.py +78 -0
  57. etlplus/file/msgpack.py +78 -0
  58. etlplus/file/mustache.py +78 -0
  59. etlplus/file/nc.py +78 -0
  60. etlplus/file/ndjson.py +108 -0
  61. etlplus/file/numbers.py +75 -0
  62. etlplus/file/ods.py +79 -0
  63. etlplus/file/orc.py +111 -0
  64. etlplus/file/parquet.py +113 -0
  65. etlplus/file/pb.py +78 -0
  66. etlplus/file/pbf.py +77 -0
  67. etlplus/file/properties.py +78 -0
  68. etlplus/file/proto.py +77 -0
  69. etlplus/file/psv.py +79 -0
  70. etlplus/file/rda.py +78 -0
  71. etlplus/file/rds.py +78 -0
  72. etlplus/file/sas7bdat.py +78 -0
  73. etlplus/file/sav.py +77 -0
  74. etlplus/file/sqlite.py +78 -0
  75. etlplus/file/stub.py +84 -0
  76. etlplus/file/sylk.py +77 -0
  77. etlplus/file/tab.py +81 -0
  78. etlplus/file/toml.py +78 -0
  79. etlplus/file/tsv.py +80 -0
  80. etlplus/file/txt.py +102 -0
  81. etlplus/file/vm.py +78 -0
  82. etlplus/file/wks.py +77 -0
  83. etlplus/file/xls.py +88 -0
  84. etlplus/file/xlsm.py +79 -0
  85. etlplus/file/xlsx.py +99 -0
  86. etlplus/file/xml.py +185 -0
  87. etlplus/file/xpt.py +78 -0
  88. etlplus/file/yaml.py +95 -0
  89. etlplus/file/zip.py +175 -0
  90. etlplus/file/zsav.py +77 -0
  91. etlplus/ops/README.md +50 -0
  92. etlplus/ops/__init__.py +61 -0
  93. etlplus/{extract.py → ops/extract.py} +81 -99
  94. etlplus/{load.py → ops/load.py} +78 -101
  95. etlplus/{run.py → ops/run.py} +159 -127
  96. etlplus/{transform.py → ops/transform.py} +75 -68
  97. etlplus/{validation → ops}/utils.py +53 -17
  98. etlplus/{validate.py → ops/validate.py} +22 -12
  99. etlplus/templates/README.md +46 -0
  100. etlplus/types.py +5 -4
  101. etlplus/utils.py +136 -2
  102. etlplus/workflow/README.md +52 -0
  103. etlplus/{config → workflow}/__init__.py +10 -23
  104. etlplus/{config → workflow}/connector.py +58 -44
  105. etlplus/workflow/dag.py +105 -0
  106. etlplus/{config → workflow}/jobs.py +105 -32
  107. etlplus/{config → workflow}/pipeline.py +59 -51
  108. etlplus/{config → workflow}/profile.py +8 -5
  109. etlplus/workflow/types.py +115 -0
  110. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/METADATA +210 -17
  111. etlplus-0.9.2.dist-info/RECORD +134 -0
  112. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/WHEEL +1 -1
  113. etlplus/config/types.py +0 -204
  114. etlplus/config/utils.py +0 -120
  115. etlplus/file.py +0 -657
  116. etlplus/validation/__init__.py +0 -44
  117. etlplus-0.9.0.dist-info/RECORD +0 -65
  118. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/entry_points.txt +0 -0
  119. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/licenses/LICENSE +0 -0
  120. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/top_level.txt +0 -0
etlplus/file/mdb.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.mdb` module.
3
+
4
+ Helpers for reading/writing newer Microsoft Access database (MDB) files.
5
+
6
+ Notes
7
+ -----
8
+ - An MDB file is a proprietary database file format used by Microsoft Access
9
+ 2003 and earlier.
10
+ - Common cases:
11
+ - Storing relational data for small to medium-sized applications.
12
+ - Desktop database applications.
13
+ - Data management for non-enterprise solutions.
14
+ - Rule of thumb:
15
+ - If the file follows the MDB specification, use this module for reading
16
+ and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read CSV content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the CSV file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the CSV file.
54
+ """
55
+ return stub.read(path, format_name='DAT')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to CSV at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the CSV file on disk.
69
+ data : JSONData
70
+ Data to write as CSV. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the CSV file.
77
+ """
78
+ return stub.write(path, data, format_name='DAT')
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.msgpack` module.
3
+
4
+ Helpers for reading/writing MessagePack (MSGPACK) files.
5
+
6
+ Notes
7
+ -----
8
+ - A MsgPack file is a binary serialization format that is more compact than
9
+ JSON.
10
+ - Common cases:
11
+ - Efficient data storage and transmission.
12
+ - Inter-process communication.
13
+ - Data serialization in performance-critical applications.
14
+ - Rule of thumb:
15
+ - If the file follows the MsgPack specification, use this module for
16
+ reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read MsgPack content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the MsgPack file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the MsgPack file.
54
+ """
55
+ return stub.read(path, format_name='MSGPACK')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to MsgPack at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the MsgPack file on disk.
69
+ data : JSONData
70
+ Data to write as MsgPack. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the MsgPack file.
77
+ """
78
+ return stub.write(path, data, format_name='MSGPACK')
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.mustache` module.
3
+
4
+ Helpers for reading/writing Mustache (MUSTACHE) template files.
5
+
6
+ Notes
7
+ -----
8
+ - A MUSTACHE file is a text file used for generating HTML or other text formats
9
+ by combining templates with data.
10
+ - Common cases:
11
+ - HTML templates.
12
+ - Email templates.
13
+ - Configuration files.
14
+ - Rule of thumb:
15
+ - If you need to work with Mustache template files, use this module for
16
+ reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read MUSTACHE content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the MUSTACHE file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the MUSTACHE file.
54
+ """
55
+ return stub.read(path, format_name='MUSTACHE')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to MUSTACHE file at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the MUSTACHE file on disk.
69
+ data : JSONData
70
+ Data to write as MUSTACHE file. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the MUSTACHE file.
77
+ """
78
+ return stub.write(path, data, format_name='MUSTACHE')
etlplus/file/nc.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.nc` module.
3
+
4
+ Helpers for reading/writing NetCDF (NC) data files.
5
+
6
+ Notes
7
+ -----
8
+ - A NC file is a binary file format used for array-oriented scientific data,
9
+ particularly in meteorology, oceanography, and climate science.
10
+ - Common cases:
11
+ - Storing multi-dimensional scientific data.
12
+ - Sharing large datasets in research communities.
13
+ - Efficient data access and manipulation.
14
+ - Rule of thumb:
15
+ - If the file follows the NetCDF standard, use this module for
16
+ reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read NC content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the NC file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the NC file.
54
+ """
55
+ return stub.read(path, format_name='NC')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to NC file at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the NC file on disk.
69
+ data : JSONData
70
+ Data to write as NC file. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the NC file.
77
+ """
78
+ return stub.write(path, data, format_name='NC')
etlplus/file/ndjson.py ADDED
@@ -0,0 +1,108 @@
1
+ """
2
+ :mod:`etlplus.file.ndjson` module.
3
+
4
+ Helpers for reading/writing Newline Delimited JSON (NDJSON) files.
5
+
6
+ Notes
7
+ -----
8
+ - An NDJSON file is a format where each line is a separate JSON object.
9
+ - Common cases:
10
+ - Streaming JSON data.
11
+ - Log files with JSON entries.
12
+ - Large datasets that are processed line-by-line.
13
+ - Rule of thumb:
14
+ - If the file follows the NDJSON specification, use this module for
15
+ reading and writing.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import json
21
+ from pathlib import Path
22
+ from typing import cast
23
+
24
+ from ..types import JSONData
25
+ from ..types import JSONDict
26
+ from ..types import JSONList
27
+ from ..utils import count_records
28
+ from ._io import normalize_records
29
+
30
+ # SECTION: EXPORTS ========================================================== #
31
+
32
+
33
+ __all__ = [
34
+ 'read',
35
+ 'write',
36
+ ]
37
+
38
+
39
+ # SECTION: FUNCTIONS ======================================================== #
40
+
41
+
42
+ def read(
43
+ path: Path,
44
+ ) -> JSONList:
45
+ """
46
+ Read NDJSON content from ``path``.
47
+
48
+ Parameters
49
+ ----------
50
+ path : Path
51
+ Path to the NDJSON file on disk.
52
+
53
+ Returns
54
+ -------
55
+ JSONList
56
+ The list of dictionaries read from the NDJSON file.
57
+
58
+ Raises
59
+ ------
60
+ TypeError
61
+ If any line in the NDJSON file is not a JSON object (dict).
62
+ """
63
+ rows: JSONList = []
64
+ with path.open('r', encoding='utf-8') as handle:
65
+ for idx, line in enumerate(handle, start=1):
66
+ text = line.strip()
67
+ if not text:
68
+ continue
69
+ payload = json.loads(text)
70
+ if not isinstance(payload, dict):
71
+ raise TypeError(
72
+ f'NDJSON lines must be objects (dicts) (line {idx})',
73
+ )
74
+ rows.append(cast(JSONDict, payload))
75
+ return rows
76
+
77
+
78
+ def write(
79
+ path: Path,
80
+ data: JSONData,
81
+ ) -> int:
82
+ """
83
+ Write ``data`` to NDJSON at ``path``.
84
+
85
+ Parameters
86
+ ----------
87
+ path : Path
88
+ Path to the NDJSON file on disk.
89
+ data : JSONData
90
+ Data to write.
91
+
92
+ Returns
93
+ -------
94
+ int
95
+ Number of records written.
96
+ """
97
+ rows = normalize_records(data, 'NDJSON')
98
+
99
+ if not rows:
100
+ return 0
101
+
102
+ path.parent.mkdir(parents=True, exist_ok=True)
103
+ with path.open('w', encoding='utf-8') as handle:
104
+ for row in rows:
105
+ handle.write(json.dumps(row, ensure_ascii=False))
106
+ handle.write('\n')
107
+
108
+ return count_records(rows)
@@ -0,0 +1,75 @@
1
+ """
2
+ :mod:`etlplus.file.numbers` module.
3
+
4
+ Helpers for reading/writing Apple Numbers (NUMBERS) spreadsheet files.
5
+
6
+ Notes
7
+ -----
8
+ - A NUMBERS file is a spreadsheet file created by Apple Numbers.
9
+ - Common cases:
10
+ - Spreadsheet files created by Apple Numbers.
11
+ - Rule of thumb:
12
+ - If you need to read/write NUMBERS files, consider converting them to
13
+ more common formats like CSV or XLSX for better compatibility.
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ from pathlib import Path
19
+
20
+ from ..types import JSONData
21
+ from ..types import JSONList
22
+ from . import stub
23
+
24
+ # SECTION: EXPORTS ========================================================== #
25
+
26
+
27
+ __all__ = [
28
+ 'read',
29
+ 'write',
30
+ ]
31
+
32
+
33
+ # SECTION: FUNCTIONS ======================================================== #
34
+
35
+
36
+ def read(
37
+ path: Path,
38
+ ) -> JSONList:
39
+ """
40
+ Read NUMBERS content from ``path``.
41
+
42
+ Parameters
43
+ ----------
44
+ path : Path
45
+ Path to the NUMBERS file on disk.
46
+
47
+ Returns
48
+ -------
49
+ JSONList
50
+ The list of dictionaries read from the NUMBERS file.
51
+ """
52
+ return stub.read(path, format_name='NUMBERS')
53
+
54
+
55
+ def write(
56
+ path: Path,
57
+ data: JSONData,
58
+ ) -> int:
59
+ """
60
+ Write ``data`` to NUMBERS file at ``path`` and return record count.
61
+
62
+ Parameters
63
+ ----------
64
+ path : Path
65
+ Path to the NUMBERS file on disk.
66
+ data : JSONData
67
+ Data to write as NUMBERS file. Should be a list of dictionaries or a
68
+ single dictionary.
69
+
70
+ Returns
71
+ -------
72
+ int
73
+ The number of rows written to the NUMBERS file.
74
+ """
75
+ return stub.write(path, data, format_name='NUMBERS')
etlplus/file/ods.py ADDED
@@ -0,0 +1,79 @@
1
+ """
2
+ :mod:`etlplus.file.ods` module.
3
+
4
+ Helpers for reading/writing OpenDocument (ODS) spreadsheet files.
5
+
6
+ Notes
7
+ -----
8
+ - An ODS file is a spreadsheet file created using the OpenDocument format.
9
+ - Common cases:
10
+ - Spreadsheet files created by LibreOffice Calc, Apache OpenOffice Calc, or
11
+ other applications that support the OpenDocument format.
12
+ - Spreadsheet files exchanged in open standards environments.
13
+ - Spreadsheet files used in government or educational institutions
14
+ promoting open formats.
15
+ - Rule of thumb:
16
+ - If the file follows the OpenDocument specification, use this module for
17
+ reading and writing.
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ from pathlib import Path
23
+
24
+ from ..types import JSONData
25
+ from ..types import JSONList
26
+ from . import stub
27
+
28
+ # SECTION: EXPORTS ========================================================== #
29
+
30
+
31
+ __all__ = [
32
+ 'read',
33
+ 'write',
34
+ ]
35
+
36
+
37
+ # SECTION: FUNCTIONS ======================================================== #
38
+
39
+
40
+ def read(
41
+ path: Path,
42
+ ) -> JSONList:
43
+ """
44
+ Read ODS content from ``path``.
45
+
46
+ Parameters
47
+ ----------
48
+ path : Path
49
+ Path to the ODS file on disk.
50
+
51
+ Returns
52
+ -------
53
+ JSONList
54
+ The list of dictionaries read from the ODS file.
55
+ """
56
+ return stub.read(path, format_name='ODS')
57
+
58
+
59
+ def write(
60
+ path: Path,
61
+ data: JSONData,
62
+ ) -> int:
63
+ """
64
+ Write ``data`` to ODS file at ``path`` and return record count.
65
+
66
+ Parameters
67
+ ----------
68
+ path : Path
69
+ Path to the ODS file on disk.
70
+ data : JSONData
71
+ Data to write as ODS file. Should be a list of dictionaries or a
72
+ single dictionary.
73
+
74
+ Returns
75
+ -------
76
+ int
77
+ The number of rows written to the ODS file.
78
+ """
79
+ return stub.write(path, data, format_name='ODS')
etlplus/file/orc.py ADDED
@@ -0,0 +1,111 @@
1
+ """
2
+ :mod:`etlplus.file.orc` module.
3
+
4
+ Helpers for reading/writing Optimized Row Columnar (ORC) files.
5
+
6
+ Notes
7
+ -----
8
+ - An ORC file is a columnar storage file format optimized for Big Data
9
+ processing.
10
+ - Common cases:
11
+ - Efficient storage and retrieval of large datasets.
12
+ - Integration with big data frameworks like Apache Hive and Apache Spark.
13
+ - Compression and performance optimization for analytical queries.
14
+ - Rule of thumb:
15
+ - If the file follows the ORC specification, use this module for reading
16
+ and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+ from typing import cast
23
+
24
+ from ..types import JSONData
25
+ from ..types import JSONList
26
+ from ._imports import get_pandas
27
+ from ._io import normalize_records
28
+
29
+ # SECTION: EXPORTS ========================================================== #
30
+
31
+
32
+ __all__ = [
33
+ 'read',
34
+ 'write',
35
+ ]
36
+
37
+
38
+ # SECTION: FUNCTIONS ======================================================== #
39
+
40
+
41
+ def read(
42
+ path: Path,
43
+ ) -> JSONList:
44
+ """
45
+ Read ORC content from ``path``.
46
+
47
+ Parameters
48
+ ----------
49
+ path : Path
50
+ Path to the ORC file on disk.
51
+
52
+ Returns
53
+ -------
54
+ JSONList
55
+ The list of dictionaries read from the ORC file.
56
+
57
+ Raises
58
+ ------
59
+ ImportError
60
+ When optional dependency "pyarrow" is missing.
61
+ """
62
+ pandas = get_pandas('ORC')
63
+ try:
64
+ frame = pandas.read_orc(path)
65
+ except ImportError as e: # pragma: no cover
66
+ raise ImportError(
67
+ 'ORC support requires optional dependency "pyarrow".\n'
68
+ 'Install with: pip install pyarrow',
69
+ ) from e
70
+ return cast(JSONList, frame.to_dict(orient='records'))
71
+
72
+
73
+ def write(
74
+ path: Path,
75
+ data: JSONData,
76
+ ) -> int:
77
+ """
78
+ Write ``data`` to ORC at ``path`` and return record count.
79
+
80
+ Parameters
81
+ ----------
82
+ path : Path
83
+ Path to the ORC file on disk.
84
+ data : JSONData
85
+ Data to write.
86
+
87
+ Returns
88
+ -------
89
+ int
90
+ Number of records written.
91
+
92
+ Raises
93
+ ------
94
+ ImportError
95
+ When optional dependency "pyarrow" is missing.
96
+ """
97
+ records = normalize_records(data, 'ORC')
98
+ if not records:
99
+ return 0
100
+
101
+ pandas = get_pandas('ORC')
102
+ path.parent.mkdir(parents=True, exist_ok=True)
103
+ frame = pandas.DataFrame.from_records(records)
104
+ try:
105
+ frame.to_orc(path, index=False)
106
+ except ImportError as e: # pragma: no cover
107
+ raise ImportError(
108
+ 'ORC support requires optional dependency "pyarrow".\n'
109
+ 'Install with: pip install pyarrow',
110
+ ) from e
111
+ return len(records)