etlplus 0.12.1__py3-none-any.whl → 0.12.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. etlplus/file/_imports.py +141 -0
  2. etlplus/file/_io.py +121 -0
  3. etlplus/file/accdb.py +78 -0
  4. etlplus/file/arrow.py +78 -0
  5. etlplus/file/avro.py +46 -68
  6. etlplus/file/bson.py +77 -0
  7. etlplus/file/cbor.py +78 -0
  8. etlplus/file/cfg.py +79 -0
  9. etlplus/file/conf.py +80 -0
  10. etlplus/file/core.py +119 -84
  11. etlplus/file/csv.py +17 -29
  12. etlplus/file/dat.py +78 -0
  13. etlplus/file/duckdb.py +78 -0
  14. etlplus/file/enums.py +114 -15
  15. etlplus/file/feather.py +18 -51
  16. etlplus/file/fwf.py +77 -0
  17. etlplus/file/ini.py +79 -0
  18. etlplus/file/ion.py +78 -0
  19. etlplus/file/json.py +13 -1
  20. etlplus/file/log.py +78 -0
  21. etlplus/file/mdb.py +78 -0
  22. etlplus/file/msgpack.py +78 -0
  23. etlplus/file/ndjson.py +14 -15
  24. etlplus/file/orc.py +18 -49
  25. etlplus/file/parquet.py +18 -51
  26. etlplus/file/pb.py +78 -0
  27. etlplus/file/pbf.py +77 -0
  28. etlplus/file/properties.py +78 -0
  29. etlplus/file/proto.py +77 -0
  30. etlplus/file/psv.py +79 -0
  31. etlplus/file/sqlite.py +78 -0
  32. etlplus/file/stub.py +84 -0
  33. etlplus/file/tab.py +81 -0
  34. etlplus/file/toml.py +78 -0
  35. etlplus/file/tsv.py +18 -29
  36. etlplus/file/txt.py +13 -10
  37. etlplus/file/xls.py +4 -48
  38. etlplus/file/xlsx.py +5 -48
  39. etlplus/file/xml.py +12 -1
  40. etlplus/file/yaml.py +15 -44
  41. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/METADATA +119 -1
  42. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/RECORD +46 -21
  43. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/WHEEL +0 -0
  44. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/entry_points.txt +0 -0
  45. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/licenses/LICENSE +0 -0
  46. {etlplus-0.12.1.dist-info → etlplus-0.12.10.dist-info}/top_level.txt +0 -0
etlplus/file/psv.py ADDED
@@ -0,0 +1,79 @@
1
+ """
2
+ :mod:`etlplus.file.psv` module.
3
+
4
+ Helpers for reading/writing Pipe-Separated Values (PSV) files.
5
+
6
+ Notes
7
+ -----
8
+ - A PSV file is a plain text file that uses the pipe character (`|`) to
9
+ separate values.
10
+ - Common cases:
11
+ - Each line in the file represents a single record.
12
+ - The first line often contains headers that define the column names.
13
+ - Values may be enclosed in quotes, especially if they contain pipes
14
+ or special characters.
15
+ - Rule of thumb:
16
+ - If the file follows the PSV specification, use this module for
17
+ reading and writing.
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ from pathlib import Path
23
+
24
+ from ..types import JSONData
25
+ from ..types import JSONList
26
+ from . import stub
27
+
28
+ # SECTION: EXPORTS ========================================================== #
29
+
30
+
31
+ __all__ = [
32
+ 'read',
33
+ 'write',
34
+ ]
35
+
36
+
37
+ # SECTION: FUNCTIONS ======================================================== #
38
+
39
+
40
+ def read(
41
+ path: Path,
42
+ ) -> JSONList:
43
+ """
44
+ Read PSV content from ``path``.
45
+
46
+ Parameters
47
+ ----------
48
+ path : Path
49
+ Path to the PSV file on disk.
50
+
51
+ Returns
52
+ -------
53
+ JSONList
54
+ The list of dictionaries read from the PSV file.
55
+ """
56
+ return stub.read(path, format_name='PSV')
57
+
58
+
59
+ def write(
60
+ path: Path,
61
+ data: JSONData,
62
+ ) -> int:
63
+ """
64
+ Write ``data`` to PSV file at ``path`` and return record count.
65
+
66
+ Parameters
67
+ ----------
68
+ path : Path
69
+ Path to the PSV file on disk.
70
+ data : JSONData
71
+ Data to write as PSV file. Should be a list of dictionaries or a
72
+ single dictionary.
73
+
74
+ Returns
75
+ -------
76
+ int
77
+ The number of rows written to the PSV file.
78
+ """
79
+ return stub.write(path, data, format_name='PSV')
etlplus/file/sqlite.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.sqlite` module.
3
+
4
+ Helpers for reading/writing SQLite database (SQLITE) files.
5
+
6
+ Notes
7
+ -----
8
+ - A SQLITE file is a self-contained, serverless database file format used by
9
+ SQLite.
10
+ - Common cases:
11
+ - Lightweight database applications.
12
+ - Embedded database solutions.
13
+ - Mobile and desktop applications requiring local data storage.
14
+ - Rule of thumb:
15
+ - If the file follows the SQLITE specification, use this module for reading
16
+ and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read SQLITE content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the SQLITE file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the SQLITE file.
54
+ """
55
+ return stub.read(path, format_name='SQLITE')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to SQLITE at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the SQLITE file on disk.
69
+ data : JSONData
70
+ Data to write as SQLITE. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the SQLITE file.
77
+ """
78
+ return stub.write(path, data, format_name='SQLITE')
etlplus/file/stub.py ADDED
@@ -0,0 +1,84 @@
1
+ """
2
+ :mod:`etlplus.file.stub` module.
3
+
4
+ Helpers for reading/writing stubbed files.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from pathlib import Path
10
+
11
+ from ..types import JSONData
12
+ from ..types import JSONList
13
+
14
+ # SECTION: EXPORTS ========================================================== #
15
+
16
+
17
+ __all__ = [
18
+ 'read',
19
+ 'write',
20
+ ]
21
+
22
+
23
+ # SECTION: FUNCTIONS ======================================================== #
24
+
25
+
26
+ def read(
27
+ path: Path,
28
+ format_name: str = 'Stubbed',
29
+ ) -> JSONList:
30
+ """
31
+ Raises a :class:`NotImplementedError` for stubbed reads.
32
+
33
+ Parameters
34
+ ----------
35
+ path : Path
36
+ Path to the stubbed file on disk.
37
+ format_name : str
38
+ Human-readable format name.
39
+
40
+ Returns
41
+ -------
42
+ JSONList
43
+ The list of dictionaries read from the stubbed file.
44
+
45
+ Raises
46
+ ------
47
+ NotImplementedError
48
+ Always, since this is a stub implementation.
49
+ """
50
+ _ = path
51
+ raise NotImplementedError(f'{format_name} read is not implemented yet')
52
+
53
+
54
+ def write(
55
+ path: Path,
56
+ data: JSONData,
57
+ format_name: str = 'Stubbed',
58
+ ) -> int:
59
+ """
60
+ Raises a :class:`NotImplementedError` for stubbed writes.
61
+
62
+ Parameters
63
+ ----------
64
+ path : Path
65
+ Path to the stubbed file on disk.
66
+ data : JSONData
67
+ Data to write as stubbed file. Should be a list of dictionaries or a
68
+ single dictionary.
69
+ format_name : str
70
+ Human-readable format name.
71
+
72
+ Returns
73
+ -------
74
+ int
75
+ The number of rows written to the stubbed file.
76
+
77
+ Raises
78
+ ------
79
+ NotImplementedError
80
+ Always, since this is a stub implementation.
81
+ """
82
+ _ = path
83
+ _ = data
84
+ raise NotImplementedError(f'{format_name} write is not implemented yet')
etlplus/file/tab.py ADDED
@@ -0,0 +1,81 @@
1
+ """
2
+ :mod:`etlplus.file.tab` module.
3
+
4
+ Helpers for reading/writing "tab"-formatted (TAB) files.
5
+
6
+ Notes
7
+ -----
8
+ - A TAB file is not necessarily a TSV file when tabs aren’t actually the
9
+ delimiter that defines the fields, even if the text looks column-aligned.
10
+ - Common cases:
11
+ - Fixed-width text (FWF) that uses tabs for alignment.
12
+ - Mixed whitespace (tabs + spaces) as “pretty printing”.
13
+ - Tabs embedded inside quoted fields (or unescaped tabs in free text).
14
+ - Header/metadata lines or multi-line records that break TSV assumptions.
15
+ - Not actually tab-delimited despite the name.
16
+ - Rule of thumb:
17
+ - If the file is truly tab-delimited, use :mod:`etlplus.file.tsv`.
18
+ - If the file has fixed-width fields, use :mod:`etlplus.file.fwf`.
19
+ - Otherwise, use :mod:`etlplus.file.tab` (i.e., this module).
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ from pathlib import Path
25
+
26
+ from ..types import JSONData
27
+ from ..types import JSONList
28
+ from . import stub
29
+
30
+ # SECTION: EXPORTS ========================================================== #
31
+
32
+
33
+ __all__ = [
34
+ 'read',
35
+ 'write',
36
+ ]
37
+
38
+
39
+ # SECTION: FUNCTIONS ======================================================== #
40
+
41
+
42
+ def read(
43
+ path: Path,
44
+ ) -> JSONList:
45
+ """
46
+ Read TAB content from ``path``.
47
+
48
+ Parameters
49
+ ----------
50
+ path : Path
51
+ Path to the TAB file on disk.
52
+
53
+ Returns
54
+ -------
55
+ JSONList
56
+ The list of dictionaries read from the TAB file.
57
+ """
58
+ return stub.read(path, format_name='TAB')
59
+
60
+
61
+ def write(
62
+ path: Path,
63
+ data: JSONData,
64
+ ) -> int:
65
+ """
66
+ Write ``data`` to TAB file at ``path`` and return record count.
67
+
68
+ Parameters
69
+ ----------
70
+ path : Path
71
+ Path to the TAB file on disk.
72
+ data : JSONData
73
+ Data to write as TAB file. Should be a list of dictionaries or a
74
+ single dictionary.
75
+
76
+ Returns
77
+ -------
78
+ int
79
+ The number of rows written to the TAB file.
80
+ """
81
+ return stub.write(path, data, format_name='TAB')
etlplus/file/toml.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.toml` module.
3
+
4
+ Helpers for reading/writing Tom's Obvious Minimal Language (TOML) files.
5
+
6
+ Notes
7
+ -----
8
+ - A “TOML-formatted” file is a configuration file that uses the TOML syntax.
9
+ - Common cases:
10
+ - Simple key-value pairs.
11
+ - Nested tables and arrays.
12
+ - Data types such as strings, integers, floats, booleans, dates, and
13
+ arrays.
14
+ - Rule of thumb:
15
+ - If the file follows the TOML specification, use this module for
16
+ reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read TOML content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the TOML file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the TOML file.
54
+ """
55
+ return stub.read(path, format_name='TOML')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to TOML at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the TOML file on disk.
69
+ data : JSONData
70
+ Data to write as TOML. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the TOML file.
77
+ """
78
+ return stub.write(path, data, format_name='TOML')
etlplus/file/tsv.py CHANGED
@@ -1,18 +1,30 @@
1
1
  """
2
2
  :mod:`etlplus.file.tsv` module.
3
3
 
4
- Helpers for reading/writing TSV files.
4
+ Helpers for reading/writing Tab-Separated Values (TSV) files.
5
+
6
+ Notes
7
+ -----
8
+ - A TSV file is a plain text file that uses the tab character (``\t``) to
9
+ separate values.
10
+ - Common cases:
11
+ - Each line in the file represents a single record.
12
+ - The first line often contains headers that define the column names.
13
+ - Values may be enclosed in quotes, especially if they contain tabs
14
+ or special characters.
15
+ - Rule of thumb:
16
+ - If the file follows the TSV specification, use this module for
17
+ reading and writing.
5
18
  """
6
19
 
7
20
  from __future__ import annotations
8
21
 
9
- import csv
10
22
  from pathlib import Path
11
- from typing import cast
12
23
 
13
24
  from ..types import JSONData
14
- from ..types import JSONDict
15
25
  from ..types import JSONList
26
+ from ._io import read_delimited
27
+ from ._io import write_delimited
16
28
 
17
29
  # SECTION: EXPORTS ========================================================== #
18
30
 
@@ -42,14 +54,7 @@ def read(
42
54
  JSONList
43
55
  The list of dictionaries read from the TSV file.
44
56
  """
45
- with path.open('r', encoding='utf-8', newline='') as handle:
46
- reader: csv.DictReader[str] = csv.DictReader(handle, delimiter='\t')
47
- rows: JSONList = []
48
- for row in reader:
49
- if not any(row.values()):
50
- continue
51
- rows.append(cast(JSONDict, dict(row)))
52
- return rows
57
+ return read_delimited(path, delimiter='\t')
53
58
 
54
59
 
55
60
  def write(
@@ -72,20 +77,4 @@ def write(
72
77
  int
73
78
  The number of rows written to the TSV file.
74
79
  """
75
- rows: list[JSONDict]
76
- if isinstance(data, list):
77
- rows = [row for row in data if isinstance(row, dict)]
78
- else:
79
- rows = [data]
80
-
81
- if not rows:
82
- return 0
83
-
84
- fieldnames = sorted({key for row in rows for key in row})
85
- with path.open('w', encoding='utf-8', newline='') as handle:
86
- writer = csv.DictWriter(handle, fieldnames=fieldnames, delimiter='\t')
87
- writer.writeheader()
88
- for row in rows:
89
- writer.writerow({field: row.get(field) for field in fieldnames})
90
-
91
- return len(rows)
80
+ return write_delimited(path, data, delimiter='\t')
etlplus/file/txt.py CHANGED
@@ -1,18 +1,27 @@
1
1
  """
2
2
  :mod:`etlplus.file.txt` module.
3
3
 
4
- Helpers for reading/writing text files.
4
+ Helpers for reading/writing text (TXT) files.
5
+
6
+ Notes
7
+ -----
8
+ - A TXT file is a plain text file that contains unformatted text.
9
+ - Common cases:
10
+ - Each line in the file represents a single piece of text.
11
+ - Lines may vary in length and content.
12
+ - Rule of thumb:
13
+ - If the file is a simple text file without specific formatting
14
+ requirements, use this module for reading and writing.
5
15
  """
6
16
 
7
17
  from __future__ import annotations
8
18
 
9
19
  from pathlib import Path
10
- from typing import cast
11
20
 
12
21
  from ..types import JSONData
13
- from ..types import JSONDict
14
22
  from ..types import JSONList
15
23
  from ..utils import count_records
24
+ from ._io import normalize_records
16
25
 
17
26
  # SECTION: EXPORTS ========================================================== #
18
27
 
@@ -77,13 +86,7 @@ def write(
77
86
  If any item in ``data`` is not a dictionary or if any dictionary
78
87
  does not contain a ``'text'`` key.
79
88
  """
80
- rows: JSONList
81
- if isinstance(data, list):
82
- if not all(isinstance(item, dict) for item in data):
83
- raise TypeError('TXT payloads must contain only objects (dicts)')
84
- rows = cast(JSONList, data)
85
- else:
86
- rows = [cast(JSONDict, data)]
89
+ rows = normalize_records(data, 'TXT')
87
90
 
88
91
  if not rows:
89
92
  return 0
etlplus/file/xls.py CHANGED
@@ -7,12 +7,11 @@ Helpers for reading/writing Excel XLS files.
7
7
  from __future__ import annotations
8
8
 
9
9
  from pathlib import Path
10
- from typing import Any
11
10
  from typing import cast
12
11
 
13
12
  from ..types import JSONData
14
- from ..types import JSONDict
15
13
  from ..types import JSONList
14
+ from ._imports import get_pandas
16
15
 
17
16
  # SECTION: EXPORTS ========================================================== #
18
17
 
@@ -23,49 +22,6 @@ __all__ = [
23
22
  ]
24
23
 
25
24
 
26
- # SECTION: INTERNAL CONSTANTS =============================================== #
27
-
28
-
29
- _PANDAS_CACHE: dict[str, Any] = {}
30
-
31
-
32
- # SECTION: INTERNAL FUNCTIONS =============================================== #
33
-
34
-
35
- def _get_pandas() -> Any:
36
- """
37
- Return the pandas module, importing it on first use.
38
-
39
- Raises an informative ImportError if the optional dependency is missing.
40
- """
41
- mod = _PANDAS_CACHE.get('mod')
42
- if mod is not None: # pragma: no cover - tiny branch
43
- return mod
44
- try:
45
- _pd = __import__('pandas') # type: ignore[assignment]
46
- except ImportError as e: # pragma: no cover
47
- raise ImportError(
48
- 'XLS support requires optional dependency "pandas".\n'
49
- 'Install with: pip install pandas',
50
- ) from e
51
- _PANDAS_CACHE['mod'] = _pd
52
-
53
- return _pd
54
-
55
-
56
- def _normalize_records(data: JSONData) -> JSONList:
57
- """
58
- Normalize JSON payloads into a list of dictionaries.
59
-
60
- Raises TypeError when payloads contain non-dict items.
61
- """
62
- if isinstance(data, list):
63
- if not all(isinstance(item, dict) for item in data):
64
- raise TypeError('XLS payloads must contain only objects (dicts)')
65
- return cast(JSONList, data)
66
- return [cast(JSONDict, data)]
67
-
68
-
69
25
  # SECTION: FUNCTIONS ======================================================== #
70
26
 
71
27
 
@@ -90,7 +46,7 @@ def read(
90
46
  ImportError
91
47
  If the optional dependency "xlrd" is not installed.
92
48
  """
93
- pandas = _get_pandas()
49
+ pandas = get_pandas('XLS')
94
50
  try:
95
51
  frame = pandas.read_excel(path, engine='xlrd')
96
52
  except ImportError as e: # pragma: no cover
@@ -126,7 +82,7 @@ def write(
126
82
 
127
83
  Raises
128
84
  ------
129
- ImportError
130
- If the optional dependency "xlwt" is not installed.
85
+ RuntimeError
86
+ If XLS writing is attempted.
131
87
  """
132
88
  raise RuntimeError('XLS write is not supported; use XLSX instead')
etlplus/file/xlsx.py CHANGED
@@ -7,12 +7,12 @@ Helpers for reading/writing Excel XLSX files.
7
7
  from __future__ import annotations
8
8
 
9
9
  from pathlib import Path
10
- from typing import Any
11
10
  from typing import cast
12
11
 
13
12
  from ..types import JSONData
14
- from ..types import JSONDict
15
13
  from ..types import JSONList
14
+ from ._imports import get_pandas
15
+ from ._io import normalize_records
16
16
 
17
17
  # SECTION: EXPORTS ========================================================== #
18
18
 
@@ -23,49 +23,6 @@ __all__ = [
23
23
  ]
24
24
 
25
25
 
26
- # SECTION: INTERNAL CONSTANTS =============================================== #
27
-
28
-
29
- _PANDAS_CACHE: dict[str, Any] = {}
30
-
31
-
32
- # SECTION: INTERNAL FUNCTIONS =============================================== #
33
-
34
-
35
- def _get_pandas() -> Any:
36
- """
37
- Return the pandas module, importing it on first use.
38
-
39
- Raises an informative ImportError if the optional dependency is missing.
40
- """
41
- mod = _PANDAS_CACHE.get('mod')
42
- if mod is not None: # pragma: no cover - tiny branch
43
- return mod
44
- try:
45
- _pd = __import__('pandas') # type: ignore[assignment]
46
- except ImportError as e: # pragma: no cover
47
- raise ImportError(
48
- 'XLSX support requires optional dependency "pandas".\n'
49
- 'Install with: pip install pandas',
50
- ) from e
51
- _PANDAS_CACHE['mod'] = _pd
52
-
53
- return _pd
54
-
55
-
56
- def _normalize_records(data: JSONData) -> JSONList:
57
- """
58
- Normalize JSON payloads into a list of dictionaries.
59
-
60
- Raises TypeError when payloads contain non-dict items.
61
- """
62
- if isinstance(data, list):
63
- if not all(isinstance(item, dict) for item in data):
64
- raise TypeError('XLSX payloads must contain only objects (dicts)')
65
- return cast(JSONList, data)
66
- return [cast(JSONDict, data)]
67
-
68
-
69
26
  # SECTION: FUNCTIONS ======================================================== #
70
27
 
71
28
 
@@ -90,7 +47,7 @@ def read(
90
47
  ImportError
91
48
  If optional dependencies for XLSX support are missing.
92
49
  """
93
- pandas = _get_pandas()
50
+ pandas = get_pandas('XLSX')
94
51
  try:
95
52
  frame = pandas.read_excel(path)
96
53
  except ImportError as e: # pragma: no cover
@@ -125,11 +82,11 @@ def write(
125
82
  ImportError
126
83
  If optional dependencies for XLSX support are missing.
127
84
  """
128
- records = _normalize_records(data)
85
+ records = normalize_records(data, 'XLSX')
129
86
  if not records:
130
87
  return 0
131
88
 
132
- pandas = _get_pandas()
89
+ pandas = get_pandas('XLSX')
133
90
  path.parent.mkdir(parents=True, exist_ok=True)
134
91
  frame = pandas.DataFrame.from_records(records)
135
92
  try:
etlplus/file/xml.py CHANGED
@@ -1,7 +1,18 @@
1
1
  """
2
2
  :mod:`etlplus.file.xml` module.
3
3
 
4
- Helpers for reading/writing XML files.
4
+ Helpers for reading/writing Extensible Markup Language (XML) files.
5
+
6
+ Notes
7
+ -----
8
+ - An XML file is a markup language file that uses tags to define elements.
9
+ - Common cases:
10
+ - Configuration files.
11
+ - Data interchange between systems.
12
+ - Document formatting.
13
+ - Rule of thumb:
14
+ - If the file follows the XML specification, use this module for
15
+ reading and writing.
5
16
  """
6
17
 
7
18
  from __future__ import annotations