etlplus 0.12.3__py3-none-any.whl → 0.12.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. etlplus/file/_imports.py +141 -0
  2. etlplus/file/_io.py +1 -0
  3. etlplus/file/accdb.py +78 -0
  4. etlplus/file/arrow.py +78 -0
  5. etlplus/file/avro.py +17 -27
  6. etlplus/file/bson.py +77 -0
  7. etlplus/file/cbor.py +78 -0
  8. etlplus/file/cfg.py +79 -0
  9. etlplus/file/conf.py +80 -0
  10. etlplus/file/core.py +119 -84
  11. etlplus/file/csv.py +13 -1
  12. etlplus/file/dat.py +78 -0
  13. etlplus/file/dta.py +77 -0
  14. etlplus/file/duckdb.py +78 -0
  15. etlplus/file/enums.py +120 -15
  16. etlplus/file/feather.py +14 -2
  17. etlplus/file/fwf.py +77 -0
  18. etlplus/file/hbs.py +78 -0
  19. etlplus/file/hdf5.py +78 -0
  20. etlplus/file/ini.py +79 -0
  21. etlplus/file/ion.py +78 -0
  22. etlplus/file/jinja2.py +78 -0
  23. etlplus/file/json.py +13 -1
  24. etlplus/file/log.py +78 -0
  25. etlplus/file/mat.py +78 -0
  26. etlplus/file/mdb.py +78 -0
  27. etlplus/file/msgpack.py +78 -0
  28. etlplus/file/mustache.py +78 -0
  29. etlplus/file/nc.py +78 -0
  30. etlplus/file/ndjson.py +14 -15
  31. etlplus/file/numbers.py +75 -0
  32. etlplus/file/ods.py +79 -0
  33. etlplus/file/orc.py +14 -2
  34. etlplus/file/parquet.py +14 -2
  35. etlplus/file/pb.py +78 -0
  36. etlplus/file/pbf.py +77 -0
  37. etlplus/file/properties.py +78 -0
  38. etlplus/file/proto.py +77 -0
  39. etlplus/file/psv.py +79 -0
  40. etlplus/file/rda.py +78 -0
  41. etlplus/file/rds.py +78 -0
  42. etlplus/file/sas7bdat.py +78 -0
  43. etlplus/file/sav.py +77 -0
  44. etlplus/file/sqlite.py +78 -0
  45. etlplus/file/stub.py +84 -0
  46. etlplus/file/sylk.py +77 -0
  47. etlplus/file/tab.py +81 -0
  48. etlplus/file/toml.py +78 -0
  49. etlplus/file/tsv.py +14 -1
  50. etlplus/file/txt.py +13 -10
  51. etlplus/file/vm.py +78 -0
  52. etlplus/file/wks.py +77 -0
  53. etlplus/file/xls.py +1 -1
  54. etlplus/file/xlsm.py +79 -0
  55. etlplus/file/xlsx.py +1 -1
  56. etlplus/file/xml.py +12 -1
  57. etlplus/file/xpt.py +78 -0
  58. etlplus/file/yaml.py +15 -44
  59. etlplus/file/zsav.py +77 -0
  60. {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/METADATA +119 -1
  61. {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/RECORD +65 -23
  62. etlplus/file/_pandas.py +0 -58
  63. {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/WHEEL +0 -0
  64. {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/entry_points.txt +0 -0
  65. {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/licenses/LICENSE +0 -0
  66. {etlplus-0.12.3.dist-info → etlplus-0.12.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,141 @@
1
+ """
2
+ :mod:`etlplus.file._imports` module.
3
+
4
+ Shared helpers for optional dependency imports.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from importlib import import_module
10
+ from typing import Any
11
+
12
+ # SECTION: INTERNAL CONSTANTS =============================================== #
13
+
14
+
15
+ _MODULE_CACHE: dict[str, Any] = {}
16
+
17
+
18
+ # SECTION: INTERNAL FUNCTIONS =============================================== #
19
+
20
+
21
+ def _error_message(
22
+ module_name: str,
23
+ format_name: str,
24
+ ) -> str:
25
+ """
26
+ Build an import error message for an optional dependency.
27
+
28
+ Parameters
29
+ ----------
30
+ module_name : str
31
+ Module name to look up.
32
+ format_name : str
33
+ Human-readable format name for templated messages.
34
+
35
+ Returns
36
+ -------
37
+ str
38
+ Formatted error message.
39
+ """
40
+ return (
41
+ f'{format_name} support requires '
42
+ f'optional dependency "{module_name}".\n'
43
+ f'Install with: pip install {module_name}'
44
+ )
45
+
46
+
47
+ # SECTION: FUNCTIONS ======================================================== #
48
+
49
+
50
+ def get_optional_module(
51
+ module_name: str,
52
+ *,
53
+ error_message: str,
54
+ ) -> Any:
55
+ """
56
+ Return an optional dependency module, caching on first import.
57
+
58
+ Parameters
59
+ ----------
60
+ module_name : str
61
+ Name of the module to import.
62
+ error_message : str
63
+ Error message to surface when the module is missing.
64
+
65
+ Returns
66
+ -------
67
+ Any
68
+ The imported module.
69
+
70
+ Raises
71
+ ------
72
+ ImportError
73
+ If the optional dependency is missing.
74
+ """
75
+ cached = _MODULE_CACHE.get(module_name)
76
+ if cached is not None: # pragma: no cover - tiny branch
77
+ return cached
78
+ try:
79
+ module = import_module(module_name)
80
+ except ImportError as e: # pragma: no cover
81
+ raise ImportError(error_message) from e
82
+ _MODULE_CACHE[module_name] = module
83
+ return module
84
+
85
+
86
+ def get_fastavro() -> Any:
87
+ """
88
+ Return the fastavro module, importing it on first use.
89
+
90
+ Raises an informative ImportError if the optional dependency is missing.
91
+
92
+ Notes
93
+ -----
94
+ Prefer :func:`get_optional_module` for new call sites.
95
+ """
96
+ return get_optional_module(
97
+ 'fastavro',
98
+ error_message=_error_message('fastavro', format_name='AVRO'),
99
+ )
100
+
101
+
102
+ def get_pandas(
103
+ format_name: str,
104
+ ) -> Any:
105
+ """
106
+ Return the pandas module, importing it on first use.
107
+
108
+ Parameters
109
+ ----------
110
+ format_name : str
111
+ Human-readable format name for error messages.
112
+
113
+ Returns
114
+ -------
115
+ Any
116
+ The pandas module.
117
+
118
+ Notes
119
+ -----
120
+ Prefer :func:`get_optional_module` for new call sites.
121
+ """
122
+ return get_optional_module(
123
+ 'pandas',
124
+ error_message=_error_message('pandas', format_name=format_name),
125
+ )
126
+
127
+
128
+ def get_yaml() -> Any:
129
+ """
130
+ Return the PyYAML module, importing it on first use.
131
+
132
+ Raises an informative ImportError if the optional dependency is missing.
133
+
134
+ Notes
135
+ -----
136
+ Prefer :func:`get_optional_module` for new call sites.
137
+ """
138
+ return get_optional_module(
139
+ 'yaml',
140
+ error_message=_error_message('PyYAML', format_name='YAML'),
141
+ )
etlplus/file/_io.py CHANGED
@@ -107,6 +107,7 @@ def write_delimited(path: Path, data: JSONData, *, delimiter: str) -> int:
107
107
  return 0
108
108
 
109
109
  fieldnames = sorted({key for row in rows for key in row})
110
+ path.parent.mkdir(parents=True, exist_ok=True)
110
111
  with path.open('w', encoding='utf-8', newline='') as handle:
111
112
  writer = csv.DictWriter(
112
113
  handle,
etlplus/file/accdb.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.accdb` module.
3
+
4
+ Helpers for reading/writing newer Microsoft Access database (ACCDB) files.
5
+
6
+ Notes
7
+ -----
8
+ - An ACCDB file is a proprietary database file format used by Microsoft Access
9
+ 2007 and later.
10
+ - Common cases:
11
+ - Storing relational data for small to medium-sized applications.
12
+ - Desktop database applications.
13
+ - Data management for non-enterprise solutions.
14
+ - Rule of thumb:
15
+ - If the file follows the ACCDB specification, use this module for reading
16
+ and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read ACCDB content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the ACCDB file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the ACCDB file.
54
+ """
55
+ return stub.read(path, format_name='ACCDB')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to ACCDB at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the ACCDB file on disk.
69
+ data : JSONData
70
+ Data to write as ACCDB. Should be a list of dictionaries or a single
71
+ dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the ACCDB file.
77
+ """
78
+ return stub.write(path, data, format_name='ACCDB')
etlplus/file/arrow.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.arrow` module.
3
+
4
+ Helpers for reading/writing Apache Arrow (ARROW) files.
5
+
6
+ Notes
7
+ -----
8
+ - An ARROW file is a binary file format designed for efficient
9
+ columnar data storage and processing.
10
+ - Common cases:
11
+ - High-performance data analytics.
12
+ - Interoperability between different data processing systems.
13
+ - In-memory data representation for fast computations.
14
+ - Rule of thumb:
15
+ - If the file follows the Apache Arrow specification, use this module for
16
+ reading and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read ARROW content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the Apache Arrow file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the Apache Arrow file.
54
+ """
55
+ return stub.read(path, format_name='ARROW')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to ARROW at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the ARROW file on disk.
69
+ data : JSONData
70
+ Data to write as ARROW. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the ARROW file.
77
+ """
78
+ return stub.write(path, data, format_name='ARROW')
etlplus/file/avro.py CHANGED
@@ -1,7 +1,19 @@
1
1
  """
2
2
  :mod:`etlplus.file.avro` module.
3
3
 
4
- Helpers for reading/writing Avro files.
4
+ Helpers for reading/writing Apache Avro (AVRO) files.
5
+
6
+ Notes
7
+ -----
8
+ - An AVRO file is a binary file format designed for efficient
9
+ on-disk storage of data, with a schema definition.
10
+ - Common cases:
11
+ - Data serialization for distributed systems.
12
+ - Interoperability between different programming languages.
13
+ - Storage of large datasets with schema evolution support.
14
+ - Rule of thumb:
15
+ - If the file follows the Apache Avro specification, use this module for
16
+ reading and writing.
5
17
  """
6
18
 
7
19
  from __future__ import annotations
@@ -10,6 +22,8 @@ from pathlib import Path
10
22
  from typing import Any
11
23
  from typing import cast
12
24
 
25
+ from etlplus.file._imports import get_fastavro
26
+
13
27
  from ..types import JSONData
14
28
  from ..types import JSONDict
15
29
  from ..types import JSONList
@@ -27,9 +41,6 @@ __all__ = [
27
41
  # SECTION: INTERNAL CONSTANTS =============================================== #
28
42
 
29
43
 
30
- _FASTAVRO_CACHE: dict[str, Any] = {}
31
-
32
-
33
44
  _PRIMITIVE_TYPES: tuple[type, ...] = (
34
45
  bool,
35
46
  int,
@@ -43,27 +54,6 @@ _PRIMITIVE_TYPES: tuple[type, ...] = (
43
54
  # SECTION: INTERNAL FUNCTIONS =============================================== #
44
55
 
45
56
 
46
- def _get_fastavro() -> Any:
47
- """
48
- Return the fastavro module, importing it on first use.
49
-
50
- Raises an informative ImportError if the optional dependency is missing.
51
- """
52
- mod = _FASTAVRO_CACHE.get('mod')
53
- if mod is not None: # pragma: no cover - tiny branch
54
- return mod
55
- try:
56
- _fastavro = __import__('fastavro') # type: ignore[assignment]
57
- except ImportError as e: # pragma: no cover
58
- raise ImportError(
59
- 'AVRO support requires optional dependency "fastavro".\n'
60
- 'Install with: pip install fastavro',
61
- ) from e
62
- _FASTAVRO_CACHE['mod'] = _fastavro
63
-
64
- return _fastavro
65
-
66
-
67
57
  def _infer_schema(records: JSONList) -> dict[str, Any]:
68
58
  """
69
59
  Infer a basic Avro schema from record payloads.
@@ -146,7 +136,7 @@ def read(
146
136
  JSONList
147
137
  The list of dictionaries read from the AVRO file.
148
138
  """
149
- fastavro = _get_fastavro()
139
+ fastavro = get_fastavro()
150
140
  with path.open('rb') as handle:
151
141
  reader = fastavro.reader(handle)
152
142
  return [cast(JSONDict, record) for record in reader]
@@ -175,7 +165,7 @@ def write(
175
165
  if not records:
176
166
  return 0
177
167
 
178
- fastavro = _get_fastavro()
168
+ fastavro = get_fastavro()
179
169
  schema = _infer_schema(records)
180
170
  parsed_schema = fastavro.parse_schema(schema)
181
171
 
etlplus/file/bson.py ADDED
@@ -0,0 +1,77 @@
1
+ """
2
+ :mod:`etlplus.file.bson` module.
3
+
4
+ Helpers for reading/writing Binary JSON (BSON) files.
5
+
6
+ Notes
7
+ -----
8
+ - A BSON file is a binary-encoded serialization of JSON-like documents.
9
+ - Common cases:
10
+ - Data storage in MongoDB.
11
+ - Efficient data interchange between systems.
12
+ - Handling of complex data types not supported in standard JSON.
13
+ - Rule of thumb:
14
+ - If the file follows the BSON specification, use this module for reading
15
+ and writing.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from pathlib import Path
21
+
22
+ from ..types import JSONData
23
+ from ..types import JSONList
24
+ from . import stub
25
+
26
+ # SECTION: EXPORTS ========================================================== #
27
+
28
+
29
+ __all__ = [
30
+ 'read',
31
+ 'write',
32
+ ]
33
+
34
+
35
+ # SECTION: FUNCTIONS ======================================================== #
36
+
37
+
38
+ def read(
39
+ path: Path,
40
+ ) -> JSONList:
41
+ """
42
+ Read BSON content from ``path``.
43
+
44
+ Parameters
45
+ ----------
46
+ path : Path
47
+ Path to the BSON file on disk.
48
+
49
+ Returns
50
+ -------
51
+ JSONList
52
+ The list of dictionaries read from the BSON file.
53
+ """
54
+ return stub.read(path, format_name='BSON')
55
+
56
+
57
+ def write(
58
+ path: Path,
59
+ data: JSONData,
60
+ ) -> int:
61
+ """
62
+ Write ``data`` to BSON at ``path`` and return record count.
63
+
64
+ Parameters
65
+ ----------
66
+ path : Path
67
+ Path to the BSON file on disk.
68
+ data : JSONData
69
+ Data to write as BSON. Should be a list of dictionaries or a
70
+ single dictionary.
71
+
72
+ Returns
73
+ -------
74
+ int
75
+ The number of rows written to the BSON file.
76
+ """
77
+ return stub.write(path, data, format_name='BSON')
etlplus/file/cbor.py ADDED
@@ -0,0 +1,78 @@
1
+ """
2
+ :mod:`etlplus.file.cbor` module.
3
+
4
+ Helpers for reading/writing Concise Binary Object Representation (CBOR) files.
5
+
6
+ Notes
7
+ -----
8
+ - A CBOR file is a binary data format designed for small code size and message
9
+ size, suitable for constrained environments.
10
+ - Common cases:
11
+ - IoT data interchange.
12
+ - Efficient data serialization.
13
+ - Storage of structured data in a compact binary form.
14
+ - Rule of thumb:
15
+ - If the file follows the CBOR specification, use this module for reading
16
+ and writing.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ from pathlib import Path
22
+
23
+ from ..types import JSONData
24
+ from ..types import JSONList
25
+ from . import stub
26
+
27
+ # SECTION: EXPORTS ========================================================== #
28
+
29
+
30
+ __all__ = [
31
+ 'read',
32
+ 'write',
33
+ ]
34
+
35
+
36
+ # SECTION: FUNCTIONS ======================================================== #
37
+
38
+
39
+ def read(
40
+ path: Path,
41
+ ) -> JSONList:
42
+ """
43
+ Read CBOR content from ``path``.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ Path to the CBOR file on disk.
49
+
50
+ Returns
51
+ -------
52
+ JSONList
53
+ The list of dictionaries read from the CBOR file.
54
+ """
55
+ return stub.read(path, format_name='CBOR')
56
+
57
+
58
+ def write(
59
+ path: Path,
60
+ data: JSONData,
61
+ ) -> int:
62
+ """
63
+ Write ``data`` to CBOR at ``path`` and return record count.
64
+
65
+ Parameters
66
+ ----------
67
+ path : Path
68
+ Path to the CBOR file on disk.
69
+ data : JSONData
70
+ Data to write as CBOR. Should be a list of dictionaries or a
71
+ single dictionary.
72
+
73
+ Returns
74
+ -------
75
+ int
76
+ The number of rows written to the CBOR file.
77
+ """
78
+ return stub.write(path, data, format_name='CBOR')
etlplus/file/cfg.py ADDED
@@ -0,0 +1,79 @@
1
+ """
2
+ :mod:`etlplus.file.cfg` module.
3
+
4
+ Helpers for reading/writing config (CFG) files.
5
+
6
+ Notes
7
+ -----
8
+ - A CFG file is a configuration file that may use various syntaxes, such as
9
+ INI, YAML, or custom formats.
10
+ - Common cases:
11
+ - INI-style key-value pairs with sections (such as in Python ecosystems,
12
+ using ``configparser``).
13
+ - YAML-like structures with indentation.
14
+ - Custom formats specific to certain applications.
15
+ - Rule of thumb:
16
+ - If the file follows a standard format like INI or YAML, consider using
17
+ dedicated parsers.
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ from pathlib import Path
23
+
24
+ from ..types import JSONData
25
+ from ..types import JSONList
26
+ from . import stub
27
+
28
+ # SECTION: EXPORTS ========================================================== #
29
+
30
+
31
+ __all__ = [
32
+ 'read',
33
+ 'write',
34
+ ]
35
+
36
+
37
+ # SECTION: FUNCTIONS ======================================================== #
38
+
39
+
40
+ def read(
41
+ path: Path,
42
+ ) -> JSONList:
43
+ """
44
+ Read CFG content from ``path``.
45
+
46
+ Parameters
47
+ ----------
48
+ path : Path
49
+ Path to the CFG file on disk.
50
+
51
+ Returns
52
+ -------
53
+ JSONList
54
+ The list of dictionaries read from the CFG file.
55
+ """
56
+ return stub.read(path, format_name='CFG')
57
+
58
+
59
+ def write(
60
+ path: Path,
61
+ data: JSONData,
62
+ ) -> int:
63
+ """
64
+ Write ``data`` to CFG file at ``path`` and return record count.
65
+
66
+ Parameters
67
+ ----------
68
+ path : Path
69
+ Path to the CFG file on disk.
70
+ data : JSONData
71
+ Data to write as CFG file. Should be a list of dictionaries or a
72
+ single dictionary.
73
+
74
+ Returns
75
+ -------
76
+ int
77
+ The number of rows written to the CFG file.
78
+ """
79
+ return stub.write(path, data, format_name='CFG')
etlplus/file/conf.py ADDED
@@ -0,0 +1,80 @@
1
+ """
2
+ :mod:`etlplus.file.conf` module.
3
+
4
+ Helpers for reading/writing config (CONF) files.
5
+
6
+ Notes
7
+ -----
8
+ - A CONF file is a configuration file that may use various syntaxes, such as
9
+ INI, YAML, or custom formats.
10
+ - Common cases:
11
+ - INI-style key-value pairs with sections.
12
+ - YAML-like structures with indentation.
13
+ - Custom formats specific to certain applications (such as Unix-like
14
+ systems, where ``.conf`` is a strong convention for "This is a
15
+ configuration file").
16
+ - Rule of thumb:
17
+ - If the file follows a standard format like INI or YAML, consider using
18
+ dedicated parsers.
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ from pathlib import Path
24
+
25
+ from ..types import JSONData
26
+ from ..types import JSONList
27
+ from . import stub
28
+
29
+ # SECTION: EXPORTS ========================================================== #
30
+
31
+
32
+ __all__ = [
33
+ 'read',
34
+ 'write',
35
+ ]
36
+
37
+
38
+ # SECTION: FUNCTIONS ======================================================== #
39
+
40
+
41
+ def read(
42
+ path: Path,
43
+ ) -> JSONList:
44
+ """
45
+ Read CONF content from ``path``.
46
+
47
+ Parameters
48
+ ----------
49
+ path : Path
50
+ Path to the CONF file on disk.
51
+
52
+ Returns
53
+ -------
54
+ JSONList
55
+ The list of dictionaries read from the CONF file.
56
+ """
57
+ return stub.read(path, format_name='CONF')
58
+
59
+
60
+ def write(
61
+ path: Path,
62
+ data: JSONData,
63
+ ) -> int:
64
+ """
65
+ Write ``data`` to CONF at ``path`` and return record count.
66
+
67
+ Parameters
68
+ ----------
69
+ path : Path
70
+ Path to the CONF file on disk.
71
+ data : JSONData
72
+ Data to write as CONF. Should be a list of dictionaries or a
73
+ single dictionary.
74
+
75
+ Returns
76
+ -------
77
+ int
78
+ The number of rows written to the CONF file.
79
+ """
80
+ return stub.write(path, data, format_name='CONF')