etlplus 0.9.0__py3-none-any.whl → 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. etlplus/README.md +37 -0
  2. etlplus/__init__.py +1 -26
  3. etlplus/api/README.md +51 -3
  4. etlplus/api/__init__.py +10 -0
  5. etlplus/api/config.py +39 -28
  6. etlplus/api/endpoint_client.py +3 -3
  7. etlplus/api/enums.py +51 -0
  8. etlplus/api/pagination/client.py +1 -1
  9. etlplus/api/rate_limiting/config.py +13 -1
  10. etlplus/api/rate_limiting/rate_limiter.py +8 -11
  11. etlplus/api/request_manager.py +11 -6
  12. etlplus/api/transport.py +14 -2
  13. etlplus/api/types.py +96 -6
  14. etlplus/{run_helpers.py → api/utils.py} +209 -153
  15. etlplus/cli/README.md +40 -0
  16. etlplus/cli/commands.py +94 -61
  17. etlplus/cli/constants.py +1 -1
  18. etlplus/cli/handlers.py +40 -12
  19. etlplus/cli/io.py +2 -2
  20. etlplus/cli/main.py +1 -1
  21. etlplus/cli/state.py +4 -7
  22. etlplus/database/README.md +48 -0
  23. etlplus/database/ddl.py +1 -1
  24. etlplus/database/engine.py +19 -3
  25. etlplus/database/orm.py +2 -0
  26. etlplus/database/schema.py +1 -1
  27. etlplus/enums.py +1 -107
  28. etlplus/file/README.md +105 -0
  29. etlplus/file/__init__.py +25 -0
  30. etlplus/file/_imports.py +141 -0
  31. etlplus/file/_io.py +160 -0
  32. etlplus/file/accdb.py +78 -0
  33. etlplus/file/arrow.py +78 -0
  34. etlplus/file/avro.py +176 -0
  35. etlplus/file/bson.py +77 -0
  36. etlplus/file/cbor.py +78 -0
  37. etlplus/file/cfg.py +79 -0
  38. etlplus/file/conf.py +80 -0
  39. etlplus/file/core.py +322 -0
  40. etlplus/file/csv.py +79 -0
  41. etlplus/file/dat.py +78 -0
  42. etlplus/file/dta.py +77 -0
  43. etlplus/file/duckdb.py +78 -0
  44. etlplus/file/enums.py +343 -0
  45. etlplus/file/feather.py +111 -0
  46. etlplus/file/fwf.py +77 -0
  47. etlplus/file/gz.py +123 -0
  48. etlplus/file/hbs.py +78 -0
  49. etlplus/file/hdf5.py +78 -0
  50. etlplus/file/ini.py +79 -0
  51. etlplus/file/ion.py +78 -0
  52. etlplus/file/jinja2.py +78 -0
  53. etlplus/file/json.py +98 -0
  54. etlplus/file/log.py +78 -0
  55. etlplus/file/mat.py +78 -0
  56. etlplus/file/mdb.py +78 -0
  57. etlplus/file/msgpack.py +78 -0
  58. etlplus/file/mustache.py +78 -0
  59. etlplus/file/nc.py +78 -0
  60. etlplus/file/ndjson.py +108 -0
  61. etlplus/file/numbers.py +75 -0
  62. etlplus/file/ods.py +79 -0
  63. etlplus/file/orc.py +111 -0
  64. etlplus/file/parquet.py +113 -0
  65. etlplus/file/pb.py +78 -0
  66. etlplus/file/pbf.py +77 -0
  67. etlplus/file/properties.py +78 -0
  68. etlplus/file/proto.py +77 -0
  69. etlplus/file/psv.py +79 -0
  70. etlplus/file/rda.py +78 -0
  71. etlplus/file/rds.py +78 -0
  72. etlplus/file/sas7bdat.py +78 -0
  73. etlplus/file/sav.py +77 -0
  74. etlplus/file/sqlite.py +78 -0
  75. etlplus/file/stub.py +84 -0
  76. etlplus/file/sylk.py +77 -0
  77. etlplus/file/tab.py +81 -0
  78. etlplus/file/toml.py +78 -0
  79. etlplus/file/tsv.py +80 -0
  80. etlplus/file/txt.py +102 -0
  81. etlplus/file/vm.py +78 -0
  82. etlplus/file/wks.py +77 -0
  83. etlplus/file/xls.py +88 -0
  84. etlplus/file/xlsm.py +79 -0
  85. etlplus/file/xlsx.py +99 -0
  86. etlplus/file/xml.py +185 -0
  87. etlplus/file/xpt.py +78 -0
  88. etlplus/file/yaml.py +95 -0
  89. etlplus/file/zip.py +175 -0
  90. etlplus/file/zsav.py +77 -0
  91. etlplus/ops/README.md +50 -0
  92. etlplus/ops/__init__.py +61 -0
  93. etlplus/{extract.py → ops/extract.py} +81 -99
  94. etlplus/{load.py → ops/load.py} +78 -101
  95. etlplus/{run.py → ops/run.py} +159 -127
  96. etlplus/{transform.py → ops/transform.py} +75 -68
  97. etlplus/{validation → ops}/utils.py +53 -17
  98. etlplus/{validate.py → ops/validate.py} +22 -12
  99. etlplus/templates/README.md +46 -0
  100. etlplus/types.py +5 -4
  101. etlplus/utils.py +136 -2
  102. etlplus/workflow/README.md +52 -0
  103. etlplus/{config → workflow}/__init__.py +10 -23
  104. etlplus/{config → workflow}/connector.py +58 -44
  105. etlplus/workflow/dag.py +105 -0
  106. etlplus/{config → workflow}/jobs.py +105 -32
  107. etlplus/{config → workflow}/pipeline.py +59 -51
  108. etlplus/{config → workflow}/profile.py +8 -5
  109. etlplus/workflow/types.py +115 -0
  110. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/METADATA +210 -17
  111. etlplus-0.9.2.dist-info/RECORD +134 -0
  112. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/WHEEL +1 -1
  113. etlplus/config/types.py +0 -204
  114. etlplus/config/utils.py +0 -120
  115. etlplus/file.py +0 -657
  116. etlplus/validation/__init__.py +0 -44
  117. etlplus-0.9.0.dist-info/RECORD +0 -65
  118. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/entry_points.txt +0 -0
  119. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/licenses/LICENSE +0 -0
  120. {etlplus-0.9.0.dist-info → etlplus-0.9.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,113 @@
1
+ """
2
+ :mod:`etlplus.file.parquet` module.
3
+
4
+ Helpers for reading/writing Apache Parquet (PARQUET) files.
5
+
6
+ Notes
7
+ -----
8
+ - An Apache Parquet file is a columnar storage file format optimized for Big
9
+ Data processing.
10
+ - Common cases:
11
+ - Efficient storage and retrieval of large datasets.
12
+ - Integration with big data frameworks like Apache Hive and Apache Spark.
13
+ - Compression and performance optimization for analytical queries.
14
+ - Rule of thumb:
15
+ - If the file follows the Apache Parquet specification, use this module for
16
+ reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+ from typing import cast
23
+
24
+ from ..types import JSONData
25
+ from ..types import JSONList
26
+ from ._imports import get_pandas
27
+ from ._io import normalize_records
28
+
29
+ # SECTION: EXPORTS ========================================================== #
30
+
31
+
32
+ __all__ = [
33
+ 'read',
34
+ 'write',
35
+ ]
36
+
37
+
38
+ # SECTION: FUNCTIONS ======================================================== #
39
+
40
+
41
+ def read(
42
+ path: Path,
43
+ ) -> JSONList:
44
+ """
45
+ Read Parquet content from ``path``.
46
+
47
+ Parameters
48
+ ----------
49
+ path : Path
50
+ Path to the PARQUET file on disk.
51
+
52
+ Returns
53
+ -------
54
+ JSONList
55
+ The list of dictionaries read from the Parquet file.
56
+
57
+ Raises
58
+ ------
59
+ ImportError
60
+ If optional dependencies for Parquet support are missing.
61
+ """
62
+ pandas = get_pandas('Parquet')
63
+ try:
64
+ frame = pandas.read_parquet(path)
65
+ except ImportError as e: # pragma: no cover
66
+ raise ImportError(
67
+ 'Parquet support requires optional dependency '
68
+ '"pyarrow" or "fastparquet".\n'
69
+ 'Install with: pip install pyarrow',
70
+ ) from e
71
+ return cast(JSONList, frame.to_dict(orient='records'))
72
+
73
+
74
+ def write(
75
+ path: Path,
76
+ data: JSONData,
77
+ ) -> int:
78
+ """
79
+ Write ``data`` to Parquet at ``path`` and return record count.
80
+
81
+ Parameters
82
+ ----------
83
+ path : Path
84
+ Path to the PARQUET file on disk.
85
+ data : JSONData
86
+ Data to write.
87
+
88
+ Returns
89
+ -------
90
+ int
91
+ Number of records written.
92
+
93
+ Raises
94
+ ------
95
+ ImportError
96
+ If optional dependencies for Parquet support are missing.
97
+ """
98
+ records = normalize_records(data, 'Parquet')
99
+ if not records:
100
+ return 0
101
+
102
+ pandas = get_pandas('Parquet')
103
+ path.parent.mkdir(parents=True, exist_ok=True)
104
+ frame = pandas.DataFrame.from_records(records)
105
+ try:
106
+ frame.to_parquet(path, index=False)
107
+ except ImportError as e: # pragma: no cover
108
+ raise ImportError(
109
+ 'Parquet support requires optional dependency '
110
+ '"pyarrow" or "fastparquet".\n'
111
+ 'Install with: pip install pyarrow',
112
+ ) from e
113
+ return len(records)
etlplus/file/pb.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.pb` module.
3
+
4
+ Helpers for reading/writing Protocol Buffer (PB) files.
5
+
6
+ Notes
7
+ -----
8
+ - PB (a.k.a. Protobuff) is a binary serialization format developed by Google
9
+ for structured data.
10
+ - Common cases:
11
+ - Data interchange between services.
12
+ - Efficient storage of structured data.
13
+ - Communication in distributed systems.
14
+ - Rule of thumb:
15
+ - If the file follows the Protocol Buffer specification, use this module
16
+ for reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read PB content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the PB file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the PB file.
54
+ """
55
+ return stub.read(path, format_name='PB')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to PB at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the PB file on disk.
69
+ data : JSONData
70
+ Data to write as PB. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the PB file.
77
+ """
78
+ return stub.write(path, data, format_name='PB')
etlplus/file/pbf.py ADDED
@@ -0,0 +1,77 @@
1
+ """
2
+ :mod:`etlplus.file.pbf` module.
3
+
4
+ Helpers for reading/writing Protocolbuffer Binary Format (PBF) files.
5
+
6
+ Notes
7
+ -----
8
+ - PBF is a binary format used primarily for OpenStreetMap (OSM) data.
9
+ - Common cases:
10
+ - Efficient storage of large OSM datasets.
11
+ - Fast data interchange for mapping applications.
12
+ - Compression of OSM data for reduced file size.
13
+ - Rule of thumb:
14
+ - If the file follows the PBF specification, use this module for reading
15
+ and writing.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from pathlib import Path
21
+
22
+ from ..types import JSONData
23
+ from ..types import JSONList
24
+ from . import stub
25
+
26
+ # SECTION: EXPORTS ========================================================== #
27
+
28
+
29
+ __all__ = [
30
+ 'read',
31
+ 'write',
32
+ ]
33
+
34
+
35
+ # SECTION: FUNCTIONS ======================================================== #
36
+
37
+
38
+ def read(
39
+ path: Path,
40
+ ) -> JSONList:
41
+ """
42
+ Read PBF content from ``path``.
43
+
44
+ Parameters
45
+ ----------
46
+ path : Path
47
+ Path to the PBF file on disk.
48
+
49
+ Returns
50
+ -------
51
+ JSONList
52
+ The list of dictionaries read from the PBF file.
53
+ """
54
+ return stub.read(path, format_name='PBF')
55
+
56
+
57
+ def write(
58
+ path: Path,
59
+ data: JSONData,
60
+ ) -> int:
61
+ """
62
+ Write ``data`` to PBF at ``path`` and return record count.
63
+
64
+ Parameters
65
+ ----------
66
+ path : Path
67
+ Path to the PBF file on disk.
68
+ data : JSONData
69
+ Data to write as PBF. Should be a list of dictionaries or a
70
+ single dictionary.
71
+
72
+ Returns
73
+ -------
74
+ int
75
+ The number of rows written to the PBF file.
76
+ """
77
+ return stub.write(path, data, format_name='PBF')
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.properties` module.
3
+
4
+ Helpers for reading/writing properties (PROPERTIES) files.
5
+
6
+ Notes
7
+ -----
8
+ - A PROPERTIES file is a properties file that typically uses key-value pairs,
9
+ often with a simple syntax.
10
+ - Common cases:
11
+ - Java-style properties files with ``key=value`` pairs.
12
+ - INI-style files without sections.
13
+ - Custom formats specific to certain applications.
14
+ - Rule of thumb:
15
+ - If the file follows a standard format like INI, consider using
16
+ dedicated parsers.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read PROPERTIES content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the PROPERTIES file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the PROPERTIES file.
54
+ """
55
+ return stub.read(path, format_name='PROPERTIES')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to PROPERTIES at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the PROPERTIES file on disk.
69
+ data : JSONData
70
+ Data to write as PROPERTIES. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the PROPERTIES file.
77
+ """
78
+ return stub.write(path, data, format_name='PROPERTIES')
etlplus/file/proto.py ADDED
@@ -0,0 +1,77 @@
1
+ """
2
+ :mod:`etlplus.file.proto` module.
3
+
4
+ Helpers for reading/writing Protocol Buffers schema (PROTO) files.
5
+
6
+ Notes
7
+ -----
8
+ - A PROTO file defines the structure of Protocol Buffers messages.
9
+ - Common cases:
10
+ - Defining message formats for data interchange.
11
+ - Generating code for serialization/deserialization.
12
+ - Documenting data structures in distributed systems.
13
+ - Rule of thumb:
14
+ - If the file follows the Protocol Buffers schema specification, use this
15
+ module for reading and writing.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from pathlib import Path
21
+
22
+ from ..types import JSONData
23
+ from ..types import JSONList
24
+ from . import stub
25
+
26
+ # SECTION: EXPORTS ========================================================== #
27
+
28
+
29
+ __all__ = [
30
+ 'read',
31
+ 'write',
32
+ ]
33
+
34
+
35
+ # SECTION: FUNCTIONS ======================================================== #
36
+
37
+
38
+ def read(
39
+ path: Path,
40
+ ) -> JSONList:
41
+ """
42
+ Read PROTO content from ``path``.
43
+
44
+ Parameters
45
+ ----------
46
+ path : Path
47
+ Path to the PROTO file on disk.
48
+
49
+ Returns
50
+ -------
51
+ JSONList
52
+ The list of dictionaries read from the PROTO file.
53
+ """
54
+ return stub.read(path, format_name='PROTO')
55
+
56
+
57
+ def write(
58
+ path: Path,
59
+ data: JSONData,
60
+ ) -> int:
61
+ """
62
+ Write ``data`` to PROTO at ``path`` and return record count.
63
+
64
+ Parameters
65
+ ----------
66
+ path : Path
67
+ Path to the PROTO file on disk.
68
+ data : JSONData
69
+ Data to write as PROTO. Should be a list of dictionaries or a
70
+ single dictionary.
71
+
72
+ Returns
73
+ -------
74
+ int
75
+ The number of rows written to the PROTO file.
76
+ """
77
+ return stub.write(path, data, format_name='PROTO')
etlplus/file/psv.py ADDED
@@ -0,0 +1,79 @@
1
+ """
2
+ :mod:`etlplus.file.psv` module.
3
+
4
+ Helpers for reading/writing Pipe-Separated Values (PSV) files.
5
+
6
+ Notes
7
+ -----
8
+ - A PSV file is a plain text file that uses the pipe character (`|`) to
9
+ separate values.
10
+ - Common cases:
11
+ - Each line in the file represents a single record.
12
+ - The first line often contains headers that define the column names.
13
+ - Values may be enclosed in quotes, especially if they contain pipes
14
+ or special characters.
15
+ - Rule of thumb:
16
+ - If the file follows the PSV specification, use this module for
17
+ reading and writing.
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ from pathlib import Path
23
+
24
+ from ..types import JSONData
25
+ from ..types import JSONList
26
+ from . import stub
27
+
28
+ # SECTION: EXPORTS ========================================================== #
29
+
30
+
31
+ __all__ = [
32
+ 'read',
33
+ 'write',
34
+ ]
35
+
36
+
37
+ # SECTION: FUNCTIONS ======================================================== #
38
+
39
+
40
+ def read(
41
+ path: Path,
42
+ ) -> JSONList:
43
+ """
44
+ Read PSV content from ``path``.
45
+
46
+ Parameters
47
+ ----------
48
+ path : Path
49
+ Path to the PSV file on disk.
50
+
51
+ Returns
52
+ -------
53
+ JSONList
54
+ The list of dictionaries read from the PSV file.
55
+ """
56
+ return stub.read(path, format_name='PSV')
57
+
58
+
59
+ def write(
60
+ path: Path,
61
+ data: JSONData,
62
+ ) -> int:
63
+ """
64
+ Write ``data`` to PSV file at ``path`` and return record count.
65
+
66
+ Parameters
67
+ ----------
68
+ path : Path
69
+ Path to the PSV file on disk.
70
+ data : JSONData
71
+ Data to write as PSV file. Should be a list of dictionaries or a
72
+ single dictionary.
73
+
74
+ Returns
75
+ -------
76
+ int
77
+ The number of rows written to the PSV file.
78
+ """
79
+ return stub.write(path, data, format_name='PSV')
etlplus/file/rda.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.rda` module.
3
+
4
+ Helpers for reading/writing RData workspace/object bundle (RDA) files.
5
+
6
+ Notes
7
+ -----
8
+ - A RDA file is a binary file format used by R to store workspace objects,
9
+ including data frames, lists, and other R objects.
10
+ - Common cases:
11
+ - Storing R data objects for later use.
12
+ - Sharing R datasets between users.
13
+ - Loading R data into Python for analysis.
14
+ - Rule of thumb:
15
+ - If the file follows the RDA specification, use this module for reading
16
+ and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read RDA content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the RDA file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the RDA file.
54
+ """
55
+ return stub.read(path, format_name='RDA')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to RDA file at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the RDA file on disk.
69
+ data : JSONData
70
+ Data to write as RDA file. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the RDA file.
77
+ """
78
+ return stub.write(path, data, format_name='RDA')
etlplus/file/rds.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.rds` module.
3
+
4
+ Helpers for reading/writing R (RDS) data files.
5
+
6
+ Notes
7
+ -----
8
+ - An RDS file is a binary file format used by R to store a single R object,
9
+ such as a data frame, list, or vector.
10
+ - Common cases:
11
+ - Storing R objects for later use.
12
+ - Sharing R data between users.
13
+ - Loading R data into Python for analysis.
14
+ - Rule of thumb:
15
+ - If the file follows the RDS specification, use this module for reading
16
+ and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read RDS content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the RDS file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the RDS file.
54
+ """
55
+ return stub.read(path, format_name='RDS')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to RDS file at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the RDS file on disk.
69
+ data : JSONData
70
+ Data to write as RDS file. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the RDS file.
77
+ """
78
+ return stub.write(path, data, format_name='RDS')