table2db 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. table2db-0.1.0/LICENSE +21 -0
  2. table2db-0.1.0/PKG-INFO +451 -0
  3. table2db-0.1.0/README.md +421 -0
  4. table2db-0.1.0/pyproject.toml +51 -0
  5. table2db-0.1.0/setup.cfg +4 -0
  6. table2db-0.1.0/table2db/__init__.py +15 -0
  7. table2db-0.1.0/table2db/cli.py +187 -0
  8. table2db-0.1.0/table2db/converter.py +122 -0
  9. table2db-0.1.0/table2db/describe.py +150 -0
  10. table2db-0.1.0/table2db/errors.py +14 -0
  11. table2db-0.1.0/table2db/loaders/__init__.py +4 -0
  12. table2db-0.1.0/table2db/loaders/base.py +22 -0
  13. table2db-0.1.0/table2db/loaders/sqlite_loader.py +187 -0
  14. table2db-0.1.0/table2db/models.py +70 -0
  15. table2db-0.1.0/table2db/pipeline/__init__.py +0 -0
  16. table2db-0.1.0/table2db/pipeline/cleaner.py +183 -0
  17. table2db-0.1.0/table2db/pipeline/island_detector.py +113 -0
  18. table2db-0.1.0/table2db/pipeline/loader.py +11 -0
  19. table2db-0.1.0/table2db/pipeline/reader.py +272 -0
  20. table2db-0.1.0/table2db/pipeline/relator.py +119 -0
  21. table2db-0.1.0/table2db/pipeline/structure.py +296 -0
  22. table2db-0.1.0/table2db/pipeline/typer.py +188 -0
  23. table2db-0.1.0/table2db.egg-info/PKG-INFO +451 -0
  24. table2db-0.1.0/table2db.egg-info/SOURCES.txt +39 -0
  25. table2db-0.1.0/table2db.egg-info/dependency_links.txt +1 -0
  26. table2db-0.1.0/table2db.egg-info/entry_points.txt +2 -0
  27. table2db-0.1.0/table2db.egg-info/requires.txt +6 -0
  28. table2db-0.1.0/table2db.egg-info/top_level.txt +1 -0
  29. table2db-0.1.0/tests/test_async.py +20 -0
  30. table2db-0.1.0/tests/test_cleaner.py +108 -0
  31. table2db-0.1.0/tests/test_cli.py +68 -0
  32. table2db-0.1.0/tests/test_converter.py +223 -0
  33. table2db-0.1.0/tests/test_describe.py +92 -0
  34. table2db-0.1.0/tests/test_island_detector.py +55 -0
  35. table2db-0.1.0/tests/test_loader.py +156 -0
  36. table2db-0.1.0/tests/test_loaders.py +58 -0
  37. table2db-0.1.0/tests/test_models.py +59 -0
  38. table2db-0.1.0/tests/test_reader.py +127 -0
  39. table2db-0.1.0/tests/test_relator.py +107 -0
  40. table2db-0.1.0/tests/test_structure.py +214 -0
  41. table2db-0.1.0/tests/test_typer.py +111 -0
table2db-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 swenyang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,451 @@
1
+ Metadata-Version: 2.4
2
+ Name: table2db
3
+ Version: 0.1.0
4
+ Summary: Convert tabular files (Excel, CSV, TSV) into clean, queryable SQLite databases
5
+ Author: swenyang
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/swenyang/table2db
8
+ Project-URL: Repository, https://github.com/swenyang/table2db
9
+ Project-URL: Issues, https://github.com/swenyang/table2db/issues
10
+ Keywords: excel,sqlite,csv,table,database,etl,data-cleaning
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Topic :: Database
20
+ Classifier: Topic :: Office/Business :: Financial :: Spreadsheet
21
+ Requires-Python: >=3.10
22
+ Description-Content-Type: text/markdown
23
+ License-File: LICENSE
24
+ Requires-Dist: openpyxl>=3.1.0
25
+ Requires-Dist: xlrd>=2.0.0
26
+ Provides-Extra: dev
27
+ Requires-Dist: pytest>=7.0; extra == "dev"
28
+ Requires-Dist: pytest-cov>=4.0; extra == "dev"
29
+ Dynamic: license-file
30
+
31
+ # table2db
32
+
33
+ Convert messy Excel files into clean, queryable SQLite databases — merged cells, multi-level headers, subtotals, mixed types, and all.
34
+
35
+ ## Installation
36
+
37
+ ```bash
38
+ pip install table2db
39
+ ```
40
+
41
+ Requires Python 3.10+. Dependencies (`openpyxl`, `xlrd`) are installed automatically.
42
+
43
+ ## Quick Start
44
+
45
+ ### As a Library
46
+
47
+ ```python
48
+ from table2db import TableConverter
49
+
50
+ converter = TableConverter()
51
+
52
+ # Use as a context manager — temp DB is auto-cleaned on exit
53
+ with converter.convert("sales_report.xlsx") as result:
54
+ print(result.db_path) # path to the SQLite file
55
+ print(result.tables) # list of TableInfo objects
56
+ print(result.relationships) # detected foreign keys
57
+ print(result.warnings) # any processing warnings
58
+
59
+ # Query with your favorite SQLite tool
60
+ import sqlite3
61
+ conn = sqlite3.connect(result.db_path)
62
+ rows = conn.execute("SELECT * FROM sheet1 LIMIT 10").fetchall()
63
+
64
+ # From a file-like object (e.g., FastAPI UploadFile)
65
+ import io
66
+ with open("data.xlsx", "rb") as f:
67
+ stream = io.BytesIO(f.read())
68
+ with converter.convert(stream, file_name="data.xlsx") as result:
69
+ print(result.db_path)
70
+ ```
71
+
72
+ To persist the database to a specific path:
73
+
74
+ ```python
75
+ from table2db import TableConverter, SqliteLoader
76
+
77
+ converter = TableConverter()
78
+ loader = SqliteLoader(output_path="output.db")
79
+ result = converter.convert("sales_report.xlsx", loader=loader)
80
+
81
+ # result.db_path == "output.db", file persists after the program exits
82
+ print(f"Created {len(result.tables)} tables")
83
+ ```
84
+
85
+ ### As a CLI
86
+
87
+ ```bash
88
+ # Convert an Excel file to SQLite
89
+ table2db convert report.xlsx -o report.db
90
+
91
+ # Convert with a summary printed to stdout
92
+ table2db convert report.xlsx -o report.db --summary
93
+
94
+ # Describe an existing database (generates LLM-friendly Markdown)
95
+ table2db describe report.db
96
+
97
+ # Save the description to a file
98
+ table2db describe report.db -o report_summary.md
99
+ ```
100
+
101
+ **CLI options for `convert`:**
102
+
103
+ | Flag | Description |
104
+ |------|-------------|
105
+ | `-o`, `--output` | Output SQLite path (default: `<input_name>.db` in current directory) |
106
+ | `--summary` | Print a Markdown summary after conversion |
107
+ | `--sample-rows N` | Number of sample rows in the summary (default: 3) |
108
+ | `--type-threshold F` | Type inference majority threshold (default: 0.8) |
109
+ | `--fk-threshold F` | FK detection confidence threshold (default: 0.8) |
110
+
111
+ **CLI options for `describe`:**
112
+
113
+ | Flag | Description |
114
+ |------|-------------|
115
+ | `-o`, `--output` | Output Markdown file path (default: stdout) |
116
+ | `--sample-rows N` | Number of sample rows to include (default: 3) |
117
+
118
+ ## Features
119
+
120
+ | Category | Feature |
121
+ |----------|---------|
122
+ | **File Formats** | `.xlsx` / `.xlsm` (openpyxl), `.xls` (xlrd), `.csv` / `.tsv` (built-in csv) |
123
+ | **Multi-Sheet** | Each sheet → separate SQLite table; auto name normalization and conflict handling |
124
+ | **Multi-Table in One Sheet** | Auto-detects multiple tables via island detection (connected-component analysis); splits into separate DB tables |
125
+ | **Merged Cells** | Auto-detect and expand; top-left value fills all merged cells (labels and numeric subtotals) |
126
+ | **Header Detection** | Auto-locate header row (supports offset / non-A1 start); multi-level headers merged as `level1_level2` |
127
+ | **Subtotal Filtering** | Three-signal weighted detection (keyword + sum verification + style); built-in Chinese & English keywords, extensible |
128
+ | **Type Inference** | Majority vote (≥80%); INTEGER/REAL merged counting; supports `INTEGER`, `REAL`, `TEXT`, `DATE`; numbers-as-text auto-correction; bool → integer |
129
+ | **Error Values** | `#REF!`, `#N/A`, `#DIV/0!`, etc. → `NULL` |
130
+ | **Formulas** | Takes computed values; warns on uncalculated formulas |
131
+ | **Primary Key Inference** | Column name pattern + uniqueness + non-null detection |
132
+ | **Cross-Sheet FK** | Column name matching + value containment (≥90%), cardinality protection (≥10 distinct values); FK written to SQLite DDL + `_meta` table |
133
+ | **Hidden Rows/Cols/Sheets** | Hidden sheets skipped by default (configurable); hidden rows/cols data preserved but flagged in metadata |
134
+ | **Data Cleaning** | Auto-remove empty rows; deduplicate identical rows |
135
+ | **DB Summary** | Generates LLM-friendly Markdown with table structure, sample data, and column stats — no LLM needed |
136
+ | **Async Support** | `convert_async()` / `process_async()` for non-blocking FastAPI integration |
137
+ | **Pluggable Loaders** | `BaseLoader` ABC; default `SqliteLoader` with optional output path; bring your own DB backend |
138
+ | **Lifecycle Management** | Temp SQLite file + context manager for auto-cleanup |
139
+ | **Error Handling** | Full exception hierarchy: `FileReadError`, `NoDataError`, `UnsupportedFormatError`, `SchemaError` |
140
+ | **Observability** | Per-stage logging via Python `logging`; warnings collection during processing |
141
+
142
+ ### Explicitly Not Supported
143
+
144
+ | Exclusion | Note |
145
+ |-----------|------|
146
+ | `.xlsb` format | Binary Excel; would require extra dependencies |
147
+ | Password-protected files | Raises `FileReadError` |
148
+
149
+ ## Architecture
150
+
151
+ table2db processes files through a **6-stage pipeline**:
152
+
153
+ ```
154
+ Input File (.xlsx / .xlsm / .xls / .csv / .tsv)
155
+
156
+ ├─ Stage 1: Raw Reading — Parse cells, merged regions, hidden state
157
+ ├─ Stage 2: Structure Detection — Island detection, find headers, multi-table split
158
+ ├─ Stage 3: Data Cleaning — Strip subtotals, empty rows, deduplicate
159
+ ├─ Stage 4: Type Inference — Majority-vote column types, coerce values
160
+ ├─ Stage 5: Relationship Inference — Detect PKs and cross-sheet FKs
161
+ └─ Stage 6: Database Loading — Create tables, insert data, add constraints
162
+
163
+
164
+ ConversionResult (SQLite DB + metadata + warnings)
165
+ ```
166
+
167
+ Stages 1–5 are handled by `TableConverter.process()`, which returns an intermediate `WorkbookData` object. Stage 6 is handled by a pluggable `BaseLoader` implementation. Calling `TableConverter.convert()` runs all six stages end-to-end.
168
+
169
+ ## Pluggable Loaders
170
+
171
+ The default loader writes to SQLite, but you can implement your own by subclassing `BaseLoader`:
172
+
173
+ ### Using SqliteLoader
174
+
175
+ ```python
176
+ from table2db import TableConverter, SqliteLoader
177
+
178
+ converter = TableConverter()
179
+ loader = SqliteLoader(output_path="my_data.db")
180
+ result = converter.convert("input.xlsx", loader=loader)
181
+ ```
182
+
183
+ ### Writing a Custom Loader
184
+
185
+ ```python
186
+ from table2db import BaseLoader, ConversionResult, WorkbookData
187
+
188
+ class PostgresLoader(BaseLoader):
189
+ def __init__(self, connection_string: str):
190
+ self.connection_string = connection_string
191
+
192
+ def load(self, wb: WorkbookData) -> ConversionResult:
193
+ # 1. Connect to your database
194
+ # 2. Create tables from wb.tables
195
+ # 3. Insert data
196
+ # 4. Return a ConversionResult
197
+ ...
198
+
199
+ # Use with TableConverter
200
+ converter = TableConverter()
201
+ loader = PostgresLoader("postgresql://localhost/mydb")
202
+ result = converter.convert("input.xlsx", loader=loader)
203
+ ```
204
+
205
+ ### Two-Phase Usage (Process + Load Separately)
206
+
207
+ ```python
208
+ converter = TableConverter()
209
+
210
+ # Stages 1-5 only: parse, detect structure, clean, infer types & relationships
211
+ workbook_data, warnings = converter.process("input.xlsx")
212
+
213
+ # Inspect intermediate results before loading
214
+ for sheet in workbook_data.sheets:
215
+ print(f"{sheet.name}: {len(sheet.headers)} columns, {len(sheet.rows)} rows")
216
+
217
+ # Stage 6: load into SQLite (or any loader)
218
+ loader = SqliteLoader(output_path="output.db")
219
+ result = loader.load(workbook_data)
220
+ ```
221
+
222
+ ## Async Support
223
+
224
+ For non-blocking usage in FastAPI or other async frameworks:
225
+
226
+ ```python
227
+ # Async support for FastAPI / asyncio
228
+ result = await converter.convert_async("data.xlsx")
229
+
230
+ wb, warnings = await converter.process_async("data.xlsx")
231
+ ```
232
+
233
+ Both methods use `asyncio.to_thread()` internally to avoid blocking the event loop.
234
+
235
+ ## DB Summary (LLM Context)
236
+
237
+ Generate a Markdown summary of the converted database — useful for providing schema context to LLMs:
238
+
239
+ ```python
240
+ from table2db import TableConverter
241
+ from table2db.describe import generate_db_summary
242
+
243
+ with TableConverter().convert("data.xlsx") as result:
244
+ summary = generate_db_summary(result, sample_rows=3)
245
+ print(summary)
246
+ ```
247
+
248
+ Or via the CLI:
249
+
250
+ ```bash
251
+ table2db describe my_database.db --sample-rows 5
252
+ ```
253
+
254
+ The summary includes table schemas, column types, sample rows, and basic column statistics.
255
+
256
+ ## Configuration
257
+
258
+ All configuration is passed to the `TableConverter` constructor:
259
+
260
+ | Parameter | Type | Default | Description |
261
+ |-----------|------|---------|-------------|
262
+ | `subtotal_keywords` | `list[str] \| None` | `None` | Additional keywords for subtotal/total row detection. Built-in keywords (Chinese & English) are always active. |
263
+ | `type_threshold` | `float` | `0.8` | Minimum fraction of non-null values that must share a type for the column to be inferred as that type. Range: 0.0–1.0. |
264
+ | `skip_hidden_sheets` | `bool` | `True` | Whether to skip hidden sheets during processing. |
265
+ | `fk_confidence_threshold` | `float` | `0.8` | Minimum confidence for cross-sheet foreign key detection. Higher values reduce false positives. |
266
+ | `header_min_fill_ratio` | `float` | `0.5` | Min ratio of non-empty cells for header row detection. Lower values = more lenient. |
267
+ | `header_min_string_ratio` | `float` | `0.7` | Min ratio of string cells for header row detection. Lower values = more lenient. |
268
+
269
+ ```python
270
+ converter = TableConverter(
271
+ subtotal_keywords=["subtotal", "grand total"],
272
+ type_threshold=0.7,
273
+ skip_hidden_sheets=False,
274
+ fk_confidence_threshold=0.9,
275
+ header_min_fill_ratio=0.4,
276
+ header_min_string_ratio=0.6,
277
+ )
278
+ ```
279
+
280
+ ## What It Handles
281
+
282
+ ### Structural Complexity (fully handled)
283
+
284
+ Merged cells (labels & numeric) · multi-level headers · offset data regions (not starting at A1) · **multiple tables in one sheet** (island detection) · multi-sheet → multi-table · cross-sheet foreign keys · hidden rows, columns, and sheets.
285
+
286
+ ### Data-Level Complexity (key focus)
287
+
288
+ Formula cells (computed values) · uncalculated formulas (with warnings) · Excel error values (`#REF!`, `#N/A`, etc.) · empty rows/columns interspersed in data · manual subtotal/total rows · mixed-type columns · numbers stored as text · inconsistent date formats · boolean values · implicit type coercion · duplicate rows.
289
+
290
+ ### Formatting (ignored — does not affect data)
291
+
292
+ Data validation (dropdowns) · conditional formatting · named ranges · comments and notes · embedded objects (charts, images).
293
+
294
+ ## API Reference
295
+
296
+ ### `TableConverter`
297
+
298
+ The main entry point for converting Excel files.
299
+
300
+ ```python
301
+ class TableConverter:
302
+ def __init__(
303
+ self,
304
+ subtotal_keywords: list[str] | None = None,
305
+ type_threshold: float = 0.8,
306
+ skip_hidden_sheets: bool = True,
307
+ fk_confidence_threshold: float = 0.8,
308
+ header_min_fill_ratio: float = 0.5,
309
+ header_min_string_ratio: float = 0.7,
310
+ ): ...
311
+
312
+ def process(
313
+ self, file_path: str | BinaryIO, *, file_name: str | None = None
314
+ ) -> tuple[WorkbookData, list[str]]:
315
+ """Run stages 1-5. Returns (WorkbookData, warnings).
316
+ Accepts a file path or file-like object (BytesIO).
317
+ file_name is required for streams to detect format."""
318
+
319
+ def convert(
320
+ self, file_path: str | BinaryIO, *, file_name: str | None = None,
321
+ loader: BaseLoader | None = None,
322
+ ) -> ConversionResult:
323
+ """Run the full 6-stage pipeline. Uses SqliteLoader by default."""
324
+
325
+ async def convert_async(
326
+ self, file_path: str | BinaryIO, *, file_name: str | None = None,
327
+ loader: BaseLoader | None = None,
328
+ ) -> ConversionResult:
329
+ """Async version of convert(). Uses asyncio.to_thread()."""
330
+
331
+ async def process_async(
332
+ self, file_path: str | BinaryIO, *, file_name: str | None = None
333
+ ) -> tuple[WorkbookData, list[str]]:
334
+ """Async version of process(). Uses asyncio.to_thread()."""
335
+ ```
336
+
337
+ ### `ConversionResult`
338
+
339
+ Returned by `convert()` and loader `load()` methods. Supports context manager protocol for automatic cleanup.
340
+
341
+ ```python
342
+ @dataclass
343
+ class ConversionResult:
344
+ db_path: str # Path to the SQLite database
345
+ tables: list[TableInfo] # Table metadata
346
+ relationships: list[ForeignKey] # Detected foreign keys
347
+ warnings: list[str] # Processing warnings
348
+ metadata: dict # Additional metadata
349
+
350
+ def cleanup(self) -> None:
351
+ """Delete the database file (for temp DBs)."""
352
+
353
+ def __enter__(self) -> ConversionResult: ...
354
+ def __exit__(self, *args) -> None: ...
355
+ ```
356
+
357
+ ### `SqliteLoader`
358
+
359
+ The built-in loader that writes to a SQLite database.
360
+
361
+ ```python
362
+ class SqliteLoader(BaseLoader):
363
+ def __init__(self, output_path: str | None = None):
364
+ """If output_path is None, creates a temporary file."""
365
+
366
+ def load(self, wb: WorkbookData) -> ConversionResult: ...
367
+ ```
368
+
369
+ ### `BaseLoader`
370
+
371
+ Abstract base class for implementing custom loaders.
372
+
373
+ ```python
374
+ class BaseLoader(ABC):
375
+ @abstractmethod
376
+ def load(self, wb: WorkbookData) -> ConversionResult: ...
377
+ ```
378
+
379
+ ### Key Data Classes
380
+
381
+ - **`WorkbookData`** — Intermediate representation of the parsed workbook (stages 1–5 output).
382
+ - **`TableInfo`** — Metadata for a single converted table (name, columns, row count, `confidence` float from island detection — 1.0 for single-table sheets).
383
+ - **`ForeignKey`** — A detected cross-sheet foreign key relationship.
384
+
385
+ ### Exceptions
386
+
387
+ All exceptions inherit from `ExcelToDbError`:
388
+
389
+ | Exception | When Raised |
390
+ |-----------|-------------|
391
+ | `ExcelToDbError` | Base exception for all library errors |
392
+ | `FileReadError` | File not found, permission denied, password-protected, or corrupt |
393
+ | `NoDataError` | Sheet or workbook contains no usable data |
394
+ | `UnsupportedFormatError` | File format is not `.xlsx`, `.xlsm`, `.xls`, `.csv`, or `.tsv` |
395
+ | `SchemaError` | Cannot construct a valid schema (e.g., no columns detected) |
396
+
397
+ ## Development
398
+
399
+ ```bash
400
+ git clone https://github.com/swenyang/table2db.git
401
+ cd table2db
402
+ pip install -e ".[dev]"
403
+ pytest tests/ -v
404
+ ```
405
+
406
+ ### Test Fixtures & Outputs
407
+
408
+ Test fixtures (Excel/CSV/TSV) are auto-generated when you run `pytest` for the first time. To manually regenerate:
409
+
410
+ ```bash
411
+ # Regenerate test fixture files (tests/fixtures/)
412
+ python tests/generate_fixtures.py
413
+
414
+ # Regenerate output DBs and Markdown summaries (tests/output_dbs/)
415
+ python tests/generate_outputs.py
416
+ ```
417
+
418
+ Both directories are in `.gitignore` since they are derived artifacts.
419
+
420
+ ### Project Structure
421
+
422
+ ```
423
+ table2db/ # project root
424
+ ├── pyproject.toml
425
+ ├── README.md
426
+ ├── table2db/ # package
427
+ │ ├── __init__.py # Public API exports
428
+ │ ├── converter.py # TableConverter
429
+ │ ├── models.py # Data classes (WorkbookData, TableInfo, etc.)
430
+ │ ├── errors.py # Exception hierarchy
431
+ │ ├── loaders/
432
+ │ │ ├── base.py # BaseLoader ABC
433
+ │ │ └── sqlite_loader.py # SqliteLoader
434
+ │ ├── pipeline/
435
+ │ │ ├── reader.py # Stage 1: Raw Reading
436
+ │ │ ├── structure.py # Stage 2: Structure Detection
437
+ │ │ ├── cleaner.py # Stage 3: Data Cleaning
438
+ │ │ ├── typer.py # Stage 4: Type Inference
439
+ │ │ ├── relator.py # Stage 5: Relationship Inference
440
+ │ │ └── island_detector.py # Multi-table detection
441
+ │ ├── describe.py # DB summary generation
442
+ │ └── cli.py # CLI entry point
443
+ └── tests/
444
+ ├── conftest.py
445
+ ├── generate_fixtures.py # Auto-generates test fixtures
446
+ └── test_*.py
447
+ ```
448
+
449
+ ## License
450
+
451
+ MIT