etlplus 0.16.10__py3-none-any.whl → 0.17.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. etlplus/file/README.md +33 -0
  2. etlplus/file/_imports.py +35 -20
  3. etlplus/file/_io.py +138 -15
  4. etlplus/file/_r.py +48 -0
  5. etlplus/file/_sql.py +224 -0
  6. etlplus/file/accdb.py +7 -6
  7. etlplus/file/arrow.py +29 -10
  8. etlplus/file/avro.py +13 -10
  9. etlplus/file/bson.py +94 -10
  10. etlplus/file/cbor.py +29 -17
  11. etlplus/file/cfg.py +7 -6
  12. etlplus/file/conf.py +7 -6
  13. etlplus/file/core.py +1 -1
  14. etlplus/file/csv.py +8 -7
  15. etlplus/file/dat.py +52 -11
  16. etlplus/file/dta.py +36 -16
  17. etlplus/file/duckdb.py +72 -11
  18. etlplus/file/enums.py +29 -0
  19. etlplus/file/feather.py +15 -30
  20. etlplus/file/fwf.py +44 -10
  21. etlplus/file/gz.py +12 -7
  22. etlplus/file/hbs.py +7 -6
  23. etlplus/file/hdf5.py +71 -8
  24. etlplus/file/ini.py +60 -17
  25. etlplus/file/ion.py +7 -6
  26. etlplus/file/jinja2.py +7 -6
  27. etlplus/file/json.py +10 -11
  28. etlplus/file/log.py +7 -6
  29. etlplus/file/mat.py +7 -6
  30. etlplus/file/mdb.py +7 -6
  31. etlplus/file/msgpack.py +27 -15
  32. etlplus/file/mustache.py +7 -6
  33. etlplus/file/nc.py +69 -11
  34. etlplus/file/ndjson.py +10 -6
  35. etlplus/file/numbers.py +7 -6
  36. etlplus/file/ods.py +48 -11
  37. etlplus/file/orc.py +15 -30
  38. etlplus/file/parquet.py +10 -6
  39. etlplus/file/pb.py +36 -24
  40. etlplus/file/pbf.py +7 -6
  41. etlplus/file/properties.py +44 -18
  42. etlplus/file/proto.py +24 -18
  43. etlplus/file/psv.py +12 -11
  44. etlplus/file/rda.py +57 -15
  45. etlplus/file/rds.py +50 -14
  46. etlplus/file/sas7bdat.py +26 -16
  47. etlplus/file/sav.py +34 -16
  48. etlplus/file/sqlite.py +70 -10
  49. etlplus/file/stub.py +8 -6
  50. etlplus/file/sylk.py +7 -6
  51. etlplus/file/tab.py +13 -13
  52. etlplus/file/toml.py +56 -17
  53. etlplus/file/tsv.py +8 -7
  54. etlplus/file/txt.py +10 -7
  55. etlplus/file/vm.py +7 -6
  56. etlplus/file/wks.py +7 -6
  57. etlplus/file/xls.py +8 -5
  58. etlplus/file/xlsm.py +48 -10
  59. etlplus/file/xlsx.py +10 -6
  60. etlplus/file/xml.py +11 -9
  61. etlplus/file/xpt.py +46 -10
  62. etlplus/file/yaml.py +10 -11
  63. etlplus/file/zip.py +10 -5
  64. etlplus/file/zsav.py +7 -6
  65. {etlplus-0.16.10.dist-info → etlplus-0.17.3.dist-info}/METADATA +44 -26
  66. {etlplus-0.16.10.dist-info → etlplus-0.17.3.dist-info}/RECORD +70 -68
  67. {etlplus-0.16.10.dist-info → etlplus-0.17.3.dist-info}/WHEEL +0 -0
  68. {etlplus-0.16.10.dist-info → etlplus-0.17.3.dist-info}/entry_points.txt +0 -0
  69. {etlplus-0.16.10.dist-info → etlplus-0.17.3.dist-info}/licenses/LICENSE +0 -0
  70. {etlplus-0.16.10.dist-info → etlplus-0.17.3.dist-info}/top_level.txt +0 -0
etlplus/file/README.md CHANGED
@@ -9,6 +9,12 @@ and writing data files.
9
9
  types
10
10
  - Exposes a `File` class with instance methods for reading and writing data
11
11
 
12
+ Some formats require optional dependencies. Install with:
13
+
14
+ ```bash
15
+ pip install -e ".[file]"
16
+ ```
17
+
12
18
  Back to project overview: see the top-level [README](../../README.md).
13
19
 
14
20
  - [`etlplus.file` Subpackage](#etlplusfile-subpackage)
@@ -29,21 +35,48 @@ matrix across all `FileFormat` values, see the top-level [README](../../README.m
29
35
  | Format | Description |
30
36
  |-----------|---------------------------------------------|
31
37
  | avro | Apache Avro binary serialization |
38
+ | arrow | Apache Arrow IPC |
39
+ | bson | Binary JSON (BSON) |
40
+ | cbor | Concise Binary Object Representation |
32
41
  | csv | Comma-separated values text files |
42
+ | dat | Generic data files (delimited) |
43
+ | dta | Stata datasets |
44
+ | duckdb | DuckDB database file |
33
45
  | feather | Apache Arrow Feather columnar format |
46
+ | fwf | Fixed-width formatted text files |
34
47
  | gz | Gzip-compressed files (see Compression) |
48
+ | hdf5 | Hierarchical Data Format |
49
+ | ini | INI config files |
35
50
  | json | Standard JSON files |
51
+ | msgpack | MessagePack binary serialization |
52
+ | nc | NetCDF datasets |
36
53
  | ndjson | Newline-delimited JSON (JSON Lines) |
54
+ | ods | OpenDocument spreadsheets |
37
55
  | orc | Apache ORC columnar format |
38
56
  | parquet | Apache Parquet columnar format |
57
+ | pb | Protocol Buffers binary |
58
+ | properties | Java-style properties |
59
+ | proto | Protocol Buffers schema |
60
+ | psv | Pipe-separated values text files |
61
+ | rda | RData workspace bundles |
62
+ | rds | RDS datasets |
63
+ | sas7bdat | SAS datasets |
64
+ | sav | SPSS datasets |
65
+ | sqlite | SQLite database file |
66
+ | tab | Tab-delimited text files |
67
+ | toml | TOML config files |
39
68
  | tsv | Tab-separated values text files |
40
69
  | txt | Plain text files |
41
70
  | xls | Microsoft Excel (legacy .xls; read-only) |
71
+ | xlsm | Microsoft Excel Macro-Enabled (XLSM) |
42
72
  | xlsx | Microsoft Excel (modern .xlsx) |
73
+ | xpt | SAS transport files |
43
74
  | zip | ZIP-compressed files (see Compression) |
44
75
  | xml | XML files |
45
76
  | yaml | YAML files |
46
77
 
78
+ Note: HDF5 support is read-only; writing is currently disabled.
79
+
47
80
  Compression formats (gz, zip) are also supported as wrappers for other formats. Formats not listed
48
81
  here are currently stubbed and will raise `NotImplementedError` on read/write.
49
82
 
etlplus/file/_imports.py CHANGED
@@ -22,6 +22,7 @@ _MODULE_CACHE: dict[str, Any] = {}
22
22
  def _error_message(
23
23
  module_name: str,
24
24
  format_name: str,
25
+ pip_name: str | None = None,
25
26
  ) -> str:
26
27
  """
27
28
  Build an import error message for an optional dependency.
@@ -32,16 +33,19 @@ def _error_message(
32
33
  Module name to look up.
33
34
  format_name : str
34
35
  Human-readable format name for templated messages.
36
+ pip_name : str | None, optional
37
+ Package name to suggest for installation. Defaults to *module_name*.
35
38
 
36
39
  Returns
37
40
  -------
38
41
  str
39
42
  Formatted error message.
40
43
  """
44
+ install_name = pip_name or module_name
41
45
  return (
42
46
  f'{format_name} support requires '
43
- f'optional dependency "{module_name}".\n'
44
- f'Install with: pip install {module_name}'
47
+ f'optional dependency "{install_name}".\n'
48
+ f'Install with: pip install {install_name}'
45
49
  )
46
50
 
47
51
 
@@ -84,19 +88,36 @@ def get_optional_module(
84
88
  return module
85
89
 
86
90
 
87
- def get_fastavro() -> Any:
91
+ def get_dependency(
92
+ module_name: str,
93
+ *,
94
+ format_name: str,
95
+ pip_name: str | None = None,
96
+ ) -> Any:
88
97
  """
89
- Return the fastavro module, importing it on first use.
98
+ Return an optional dependency module with a standardized error message.
90
99
 
91
- Raises an informative ImportError if the optional dependency is missing.
100
+ Parameters
101
+ ----------
102
+ module_name : str
103
+ Name of the module to import.
104
+ format_name : str
105
+ Human-readable format name for error messages.
106
+ pip_name : str | None, optional
107
+ Package name to suggest for installation (defaults to *module_name*).
92
108
 
93
- Notes
94
- -----
95
- Prefer :func:`get_optional_module` for new call sites.
109
+ Returns
110
+ -------
111
+ Any
112
+ The imported module.
96
113
  """
97
114
  return get_optional_module(
98
- 'fastavro',
99
- error_message=_error_message('fastavro', format_name='AVRO'),
115
+ module_name,
116
+ error_message=_error_message(
117
+ module_name,
118
+ format_name=format_name,
119
+ pip_name=pip_name,
120
+ ),
100
121
  )
101
122
 
102
123
 
@@ -118,12 +139,9 @@ def get_pandas(
118
139
 
119
140
  Notes
120
141
  -----
121
- Prefer :func:`get_optional_module` for new call sites.
142
+ Prefer :func:`get_dependency` for new call sites.
122
143
  """
123
- return get_optional_module(
124
- 'pandas',
125
- error_message=_error_message('pandas', format_name=format_name),
126
- )
144
+ return get_dependency('pandas', format_name=format_name)
127
145
 
128
146
 
129
147
  def get_yaml() -> Any:
@@ -134,9 +152,6 @@ def get_yaml() -> Any:
134
152
 
135
153
  Notes
136
154
  -----
137
- Prefer :func:`get_optional_module` for new call sites.
155
+ Prefer :func:`get_dependency` for new call sites.
138
156
  """
139
- return get_optional_module(
140
- 'yaml',
141
- error_message=_error_message('PyYAML', format_name='YAML'),
142
- )
157
+ return get_dependency('yaml', format_name='YAML', pip_name='PyYAML')
etlplus/file/_io.py CHANGED
@@ -14,10 +14,30 @@ from typing import cast
14
14
  from ..types import JSONData
15
15
  from ..types import JSONDict
16
16
  from ..types import JSONList
17
+ from ..types import StrPath
17
18
 
18
19
  # SECTION: FUNCTIONS ======================================================== #
19
20
 
20
21
 
22
+ def coerce_path(
23
+ path: StrPath,
24
+ ) -> Path:
25
+ """
26
+ Coerce path-like inputs into :class:`~pathlib.Path`.
27
+
28
+ Parameters
29
+ ----------
30
+ path : StrPath
31
+ Path-like input to normalize.
32
+
33
+ Returns
34
+ -------
35
+ Path
36
+ Normalized :class:`~pathlib.Path` instance.
37
+ """
38
+ return path if isinstance(path, Path) else Path(path)
39
+
40
+
21
41
  def coerce_record_payload(
22
42
  payload: Any,
23
43
  *,
@@ -56,6 +76,21 @@ def coerce_record_payload(
56
76
  )
57
77
 
58
78
 
79
+ def ensure_parent_dir(
80
+ path: StrPath,
81
+ ) -> None:
82
+ """
83
+ Ensure the parent directory for *path* exists.
84
+
85
+ Parameters
86
+ ----------
87
+ path : StrPath
88
+ Target path to ensure the parent directory for.
89
+ """
90
+ path = coerce_path(path)
91
+ path.parent.mkdir(parents=True, exist_ok=True)
92
+
93
+
59
94
  def normalize_records(
60
95
  data: JSONData,
61
96
  format_name: str,
@@ -78,7 +113,7 @@ def normalize_records(
78
113
  Raises
79
114
  ------
80
115
  TypeError
81
- If a list payload contains non-dict items.
116
+ If the payload is not a dict or a list of dicts.
82
117
  """
83
118
  if isinstance(data, list):
84
119
  if not all(isinstance(item, dict) for item in data):
@@ -86,11 +121,15 @@ def normalize_records(
86
121
  f'{format_name} payloads must contain only objects (dicts)',
87
122
  )
88
123
  return cast(JSONList, data)
89
- return [cast(JSONDict, data)]
124
+ if isinstance(data, dict):
125
+ return [cast(JSONDict, data)]
126
+ raise TypeError(
127
+ f'{format_name} payloads must be an object or an array of objects',
128
+ )
90
129
 
91
130
 
92
131
  def read_delimited(
93
- path: Path,
132
+ path: StrPath,
94
133
  *,
95
134
  delimiter: str,
96
135
  ) -> JSONList:
@@ -99,7 +138,7 @@ def read_delimited(
99
138
 
100
139
  Parameters
101
140
  ----------
102
- path : Path
141
+ path : StrPath
103
142
  Path to the delimited file on disk.
104
143
  delimiter : str
105
144
  Delimiter character for parsing.
@@ -109,6 +148,7 @@ def read_delimited(
109
148
  JSONList
110
149
  The list of dictionaries read from the delimited file.
111
150
  """
151
+ path = coerce_path(path)
112
152
  with path.open('r', encoding='utf-8', newline='') as handle:
113
153
  reader: csv.DictReader[str] = csv.DictReader(
114
154
  handle,
@@ -122,40 +162,123 @@ def read_delimited(
122
162
  return rows
123
163
 
124
164
 
165
+ def require_dict_payload(
166
+ data: JSONData,
167
+ *,
168
+ format_name: str,
169
+ ) -> JSONDict:
170
+ """
171
+ Validate that *data* is a dictionary payload.
172
+
173
+ Parameters
174
+ ----------
175
+ data : JSONData
176
+ Input payload to validate.
177
+ format_name : str
178
+ Human-readable format name for error messages.
179
+
180
+ Returns
181
+ -------
182
+ JSONDict
183
+ Validated dictionary payload.
184
+
185
+ Raises
186
+ ------
187
+ TypeError
188
+ If the payload is not a dictionary.
189
+ """
190
+ if isinstance(data, list) or not isinstance(data, dict):
191
+ raise TypeError(f'{format_name} payloads must be a dict')
192
+ return cast(JSONDict, data)
193
+
194
+
195
+ def require_str_key(
196
+ payload: JSONDict,
197
+ *,
198
+ format_name: str,
199
+ key: str,
200
+ ) -> str:
201
+ """
202
+ Require a string value for *key* in *payload*.
203
+
204
+ Parameters
205
+ ----------
206
+ payload : JSONDict
207
+ Dictionary payload to inspect.
208
+ format_name : str
209
+ Human-readable format name for error messages.
210
+ key : str
211
+ Key to extract.
212
+
213
+ Returns
214
+ -------
215
+ str
216
+ The string value for *key*.
217
+
218
+ Raises
219
+ ------
220
+ TypeError
221
+ If the key is missing or not a string.
222
+ """
223
+ value = payload.get(key)
224
+ if not isinstance(value, str):
225
+ raise TypeError(
226
+ f'{format_name} payloads must include a "{key}" string',
227
+ )
228
+ return value
229
+
230
+
231
+ def stringify_value(value: Any) -> str:
232
+ """
233
+ Normalize configuration-like values into strings.
234
+
235
+ Parameters
236
+ ----------
237
+ value : Any
238
+ Value to normalize.
239
+
240
+ Returns
241
+ -------
242
+ str
243
+ Stringified value (``''`` for ``None``).
244
+ """
245
+ if value is None:
246
+ return ''
247
+ return str(value)
248
+
249
+
125
250
  def write_delimited(
126
- path: Path,
251
+ path: StrPath,
127
252
  data: JSONData,
128
253
  *,
129
254
  delimiter: str,
255
+ format_name: str = 'Delimited',
130
256
  ) -> int:
131
257
  """
132
258
  Write *data* to a delimited file and return record count.
133
259
 
134
260
  Parameters
135
261
  ----------
136
- path : Path
262
+ path : StrPath
137
263
  Path to the delimited file on disk.
138
264
  data : JSONData
139
265
  Data to write as delimited rows.
140
266
  delimiter : str
141
267
  Delimiter character for writing.
268
+ format_name : str, optional
269
+ Human-readable format name for error messages. Defaults to
270
+ ``'Delimited'``.
142
271
 
143
272
  Returns
144
273
  -------
145
274
  int
146
275
  The number of rows written.
147
276
  """
148
- rows: list[JSONDict]
149
- if isinstance(data, list):
150
- rows = [row for row in data if isinstance(row, dict)]
151
- else:
152
- rows = [data]
153
-
154
- if not rows:
155
- return 0
277
+ path = coerce_path(path)
278
+ rows = normalize_records(data, format_name)
156
279
 
157
280
  fieldnames = sorted({key for row in rows for key in row})
158
- path.parent.mkdir(parents=True, exist_ok=True)
281
+ ensure_parent_dir(path)
159
282
  with path.open('w', encoding='utf-8', newline='') as handle:
160
283
  writer = csv.DictWriter(
161
284
  handle,
etlplus/file/_r.py ADDED
@@ -0,0 +1,48 @@
1
+ """
2
+ :mod:`etlplus.file._r` module.
3
+
4
+ Shared helpers for R-related file formats.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import Any
10
+
11
+ from ..types import JSONData
12
+
13
+ # SECTION: EXPORTS ========================================================== #
14
+
15
+
16
+ __all__ = [
17
+ 'coerce_r_object',
18
+ ]
19
+
20
+
21
+ # SECTION: FUNCTIONS ======================================================== #
22
+
23
+
24
+ def coerce_r_object(value: Any, pandas: Any) -> JSONData:
25
+ """
26
+ Normalize a pyreadr object into JSON-friendly data.
27
+
28
+ Parameters
29
+ ----------
30
+ value : Any
31
+ Object returned by ``pyreadr``.
32
+ pandas : Any
33
+ pandas module used for DataFrame checks.
34
+
35
+ Returns
36
+ -------
37
+ JSONData
38
+ Normalized JSON-like payload.
39
+ """
40
+ if isinstance(value, pandas.DataFrame):
41
+ return value.to_dict(orient='records')
42
+ if isinstance(value, dict):
43
+ return value
44
+ if isinstance(value, list) and all(
45
+ isinstance(item, dict) for item in value
46
+ ):
47
+ return value
48
+ return {'value': value}
etlplus/file/_sql.py ADDED
@@ -0,0 +1,224 @@
1
+ """
2
+ :mod:`etlplus.file._sql` module.
3
+
4
+ Shared helpers for lightweight SQL-backed file formats.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ from dataclasses import dataclass
11
+ from typing import Any
12
+
13
+ from ..types import JSONList
14
+
15
+ # SECTION: EXPORTS ========================================================== #
16
+
17
+
18
+ __all__ = [
19
+ # Constants
20
+ 'DEFAULT_TABLE',
21
+ 'DUCKDB_DIALECT',
22
+ 'SQLITE_DIALECT',
23
+ # Data Classes
24
+ 'SqlDialect',
25
+ # Functions
26
+ 'collect_column_values',
27
+ 'coerce_sql_value',
28
+ 'infer_column_type',
29
+ 'quote_identifier',
30
+ 'resolve_table',
31
+ ]
32
+
33
+
34
+ # SECTION: DATA CLASSES ===================================================== #
35
+
36
+
37
+ @dataclass(frozen=True, slots=True)
38
+ class SqlDialect:
39
+ """
40
+ Simple SQL type mapping for inferred column types.
41
+
42
+ Attributes
43
+ ----------
44
+ text : str
45
+ Textual column type name.
46
+ integer : str
47
+ Integer column type name.
48
+ floating : str
49
+ Floating-point column type name.
50
+ boolean : str
51
+ Boolean column type name.
52
+ """
53
+
54
+ text: str
55
+ integer: str
56
+ floating: str
57
+ boolean: str
58
+
59
+
60
+ # SECTION: CONSTANTS ======================================================== #
61
+
62
+
63
+ DEFAULT_TABLE = 'data'
64
+
65
+ SQLITE_DIALECT = SqlDialect(
66
+ text='TEXT',
67
+ integer='INTEGER',
68
+ floating='REAL',
69
+ boolean='INTEGER',
70
+ )
71
+
72
+ DUCKDB_DIALECT = SqlDialect(
73
+ text='VARCHAR',
74
+ integer='BIGINT',
75
+ floating='DOUBLE',
76
+ boolean='BOOLEAN',
77
+ )
78
+
79
+
80
+ # SECTION: FUNCTIONS ======================================================== #
81
+
82
+
83
+ def coerce_sql_value(value: Any) -> Any:
84
+ """
85
+ Normalize values into SQL-compatible scalar types.
86
+
87
+ Parameters
88
+ ----------
89
+ value : Any
90
+ Value to normalize.
91
+
92
+ Returns
93
+ -------
94
+ Any
95
+ Scalar value or serialized JSON string for complex objects.
96
+ """
97
+ if value is None or isinstance(value, (str, int, float, bool)):
98
+ return value
99
+ return json.dumps(value, ensure_ascii=True)
100
+
101
+
102
+ def collect_column_values(
103
+ records: JSONList,
104
+ ) -> tuple[list[str], dict[str, list[Any]]]:
105
+ """
106
+ Collect column names and values from record payloads.
107
+
108
+ Parameters
109
+ ----------
110
+ records : JSONList
111
+ Record payloads to scan.
112
+
113
+ Returns
114
+ -------
115
+ tuple[list[str], dict[str, list[Any]]]
116
+ Sorted column names and mapped column values.
117
+ """
118
+ columns = sorted({key for row in records for key in row})
119
+ column_values: dict[str, list[Any]] = {col: [] for col in columns}
120
+ for row in records:
121
+ for column in columns:
122
+ column_values[column].append(row.get(column))
123
+ return columns, column_values
124
+
125
+
126
+ def infer_column_type(values: list[Any], dialect: SqlDialect) -> str:
127
+ """
128
+ Infer a SQL column type for the provided values.
129
+
130
+ Parameters
131
+ ----------
132
+ values : list[Any]
133
+ Sample values for a column.
134
+ dialect : SqlDialect
135
+ Dialect mapping for type names.
136
+
137
+ Returns
138
+ -------
139
+ str
140
+ Dialect-specific type name.
141
+ """
142
+ seen_bool = False
143
+ seen_int = False
144
+ seen_float = False
145
+ seen_other = False
146
+ for value in values:
147
+ if value is None:
148
+ continue
149
+ if isinstance(value, bool):
150
+ seen_bool = True
151
+ elif isinstance(value, int):
152
+ seen_int = True
153
+ elif isinstance(value, float):
154
+ seen_float = True
155
+ else:
156
+ seen_other = True
157
+ break
158
+ if seen_other:
159
+ return dialect.text
160
+ if seen_float:
161
+ return dialect.floating
162
+ if seen_int:
163
+ return dialect.integer
164
+ if seen_bool:
165
+ return dialect.boolean
166
+ return dialect.text
167
+
168
+
169
+ def quote_identifier(value: str) -> str:
170
+ """
171
+ Return a safely quoted SQL identifier.
172
+
173
+ Parameters
174
+ ----------
175
+ value : str
176
+ Identifier to quote.
177
+
178
+ Returns
179
+ -------
180
+ str
181
+ Quoted identifier.
182
+ """
183
+ escaped = value.replace('"', '""')
184
+ return f'"{escaped}"'
185
+
186
+
187
+ def resolve_table(
188
+ tables: list[str],
189
+ *,
190
+ engine_name: str,
191
+ default_table: str = DEFAULT_TABLE,
192
+ ) -> str | None:
193
+ """
194
+ Pick a table name for read operations.
195
+
196
+ Parameters
197
+ ----------
198
+ tables : list[str]
199
+ Table names available in the database.
200
+ engine_name : str
201
+ Engine name used in error messages.
202
+ default_table : str, optional
203
+ Preferred table name to look for first.
204
+
205
+ Returns
206
+ -------
207
+ str | None
208
+ Selected table name or ``None`` when no tables exist.
209
+
210
+ Raises
211
+ ------
212
+ ValueError
213
+ If multiple candidate tables exist.
214
+ """
215
+ if not tables:
216
+ return None
217
+ if default_table in tables:
218
+ return default_table
219
+ if len(tables) == 1:
220
+ return tables[0]
221
+ raise ValueError(
222
+ f'Multiple tables found in {engine_name} file; expected '
223
+ f'"{default_table}" or a single table',
224
+ )