datablade 0.0.0__tar.gz → 0.0.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {datablade-0.0.0 → datablade-0.0.5}/LICENSE +20 -20
- datablade-0.0.5/PKG-INFO +351 -0
- datablade-0.0.5/pyproject.toml +83 -0
- datablade-0.0.5/readme.md +288 -0
- {datablade-0.0.0 → datablade-0.0.5}/setup.cfg +4 -4
- datablade-0.0.5/src/datablade/__init__.py +41 -0
- datablade-0.0.5/src/datablade/blade.py +153 -0
- datablade-0.0.5/src/datablade/core/__init__.py +28 -0
- datablade-0.0.5/src/datablade/core/frames.py +23 -0
- datablade-0.0.5/src/datablade/core/json.py +5 -0
- datablade-0.0.5/src/datablade/core/lists.py +5 -0
- datablade-0.0.5/src/datablade/core/messages.py +23 -0
- datablade-0.0.5/src/datablade/core/strings.py +5 -0
- datablade-0.0.5/src/datablade/core/zip.py +5 -0
- datablade-0.0.5/src/datablade/dataframes/__init__.py +43 -0
- datablade-0.0.5/src/datablade/dataframes/frames.py +485 -0
- datablade-0.0.5/src/datablade/dataframes/readers.py +540 -0
- datablade-0.0.5/src/datablade/io/__init__.py +15 -0
- datablade-0.0.5/src/datablade/io/json.py +33 -0
- datablade-0.0.5/src/datablade/io/zip.py +73 -0
- datablade-0.0.5/src/datablade/sql/__init__.py +32 -0
- datablade-0.0.5/src/datablade/sql/bulk_load.py +405 -0
- datablade-0.0.5/src/datablade/sql/ddl.py +227 -0
- datablade-0.0.5/src/datablade/sql/ddl_pyarrow.py +287 -0
- datablade-0.0.5/src/datablade/sql/dialects.py +10 -0
- datablade-0.0.5/src/datablade/sql/quoting.py +42 -0
- datablade-0.0.5/src/datablade/utils/__init__.py +37 -0
- datablade-0.0.5/src/datablade/utils/lists.py +29 -0
- datablade-0.0.5/src/datablade/utils/logging.py +159 -0
- datablade-0.0.5/src/datablade/utils/messages.py +29 -0
- datablade-0.0.5/src/datablade/utils/strings.py +86 -0
- datablade-0.0.5/src/datablade.egg-info/PKG-INFO +351 -0
- datablade-0.0.5/src/datablade.egg-info/SOURCES.txt +40 -0
- datablade-0.0.5/src/datablade.egg-info/requires.txt +36 -0
- datablade-0.0.5/tests/test_dataframes.py +356 -0
- datablade-0.0.5/tests/test_integration.py +221 -0
- datablade-0.0.5/tests/test_io.py +121 -0
- datablade-0.0.5/tests/test_readers.py +375 -0
- datablade-0.0.5/tests/test_sql.py +977 -0
- datablade-0.0.5/tests/test_utils.py +186 -0
- datablade-0.0.0/PKG-INFO +0 -13
- datablade-0.0.0/pyproject.toml +0 -3
- datablade-0.0.0/setup.py +0 -12
- datablade-0.0.0/src/datablade/__init__.py +0 -1
- datablade-0.0.0/src/datablade/core/__init__.py +0 -7
- datablade-0.0.0/src/datablade/core/frames.py +0 -236
- datablade-0.0.0/src/datablade/core/json.py +0 -10
- datablade-0.0.0/src/datablade/core/lists.py +0 -10
- datablade-0.0.0/src/datablade/core/messages.py +0 -11
- datablade-0.0.0/src/datablade/core/strings.py +0 -43
- datablade-0.0.0/src/datablade/core/zip.py +0 -24
- datablade-0.0.0/src/datablade.egg-info/PKG-INFO +0 -13
- datablade-0.0.0/src/datablade.egg-info/SOURCES.txt +0 -16
- datablade-0.0.0/src/datablade.egg-info/requires.txt +0 -5
- {datablade-0.0.0 → datablade-0.0.5}/src/datablade.egg-info/dependency_links.txt +0 -0
- {datablade-0.0.0 → datablade-0.0.5}/src/datablade.egg-info/top_level.txt +0 -0
|
@@ -1,21 +1,21 @@
|
|
|
1
|
-
MIT License
|
|
2
|
-
|
|
3
|
-
Copyright (c) 2024 Brent Carpenetti
|
|
4
|
-
|
|
5
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
-
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
-
in the Software without restriction, including without limitation the rights
|
|
8
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
-
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
-
furnished to do so, subject to the following conditions:
|
|
11
|
-
|
|
12
|
-
The above copyright notice and this permission notice shall be included in all
|
|
13
|
-
copies or substantial portions of the Software.
|
|
14
|
-
|
|
15
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Brent Carpenetti
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
21
|
SOFTWARE.
|
datablade-0.0.5/PKG-INFO
ADDED
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: datablade
|
|
3
|
+
Version: 0.0.5
|
|
4
|
+
Summary: datablade is a suite of functions to provide standard syntax across data engineering projects.
|
|
5
|
+
Author-email: Brent Carpenetti <brentwc.git@pm.me>
|
|
6
|
+
License: MIT License
|
|
7
|
+
|
|
8
|
+
Copyright (c) 2024 Brent Carpenetti
|
|
9
|
+
|
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
+
in the Software without restriction, including without limitation the rights
|
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
+
furnished to do so, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
|
18
|
+
copies or substantial portions of the Software.
|
|
19
|
+
|
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
SOFTWARE.
|
|
27
|
+
Requires-Python: >=3.12
|
|
28
|
+
Description-Content-Type: text/markdown
|
|
29
|
+
License-File: LICENSE
|
|
30
|
+
Requires-Dist: pandas
|
|
31
|
+
Requires-Dist: pyarrow
|
|
32
|
+
Requires-Dist: numpy
|
|
33
|
+
Requires-Dist: openpyxl
|
|
34
|
+
Requires-Dist: requests
|
|
35
|
+
Provides-Extra: performance
|
|
36
|
+
Requires-Dist: polars; extra == "performance"
|
|
37
|
+
Requires-Dist: psutil; extra == "performance"
|
|
38
|
+
Provides-Extra: test
|
|
39
|
+
Requires-Dist: pytest>=7.0.0; extra == "test"
|
|
40
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "test"
|
|
41
|
+
Requires-Dist: pytest-mock>=3.10.0; extra == "test"
|
|
42
|
+
Provides-Extra: dev
|
|
43
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
44
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
45
|
+
Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
|
|
46
|
+
Requires-Dist: polars; extra == "dev"
|
|
47
|
+
Requires-Dist: psutil; extra == "dev"
|
|
48
|
+
Requires-Dist: black; extra == "dev"
|
|
49
|
+
Requires-Dist: flake8; extra == "dev"
|
|
50
|
+
Requires-Dist: mypy; extra == "dev"
|
|
51
|
+
Requires-Dist: isort; extra == "dev"
|
|
52
|
+
Provides-Extra: all
|
|
53
|
+
Requires-Dist: polars; extra == "all"
|
|
54
|
+
Requires-Dist: psutil; extra == "all"
|
|
55
|
+
Requires-Dist: pytest>=7.0.0; extra == "all"
|
|
56
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "all"
|
|
57
|
+
Requires-Dist: pytest-mock>=3.10.0; extra == "all"
|
|
58
|
+
Requires-Dist: black; extra == "all"
|
|
59
|
+
Requires-Dist: flake8; extra == "all"
|
|
60
|
+
Requires-Dist: mypy; extra == "all"
|
|
61
|
+
Requires-Dist: isort; extra == "all"
|
|
62
|
+
Dynamic: license-file
|
|
63
|
+
|
|
64
|
+
# datablade
|
|
65
|
+
|
|
66
|
+
[](https://github.com/brentwc/data-prep/actions/workflows/test.yml)
|
|
67
|
+
[](https://www.python.org/downloads/)
|
|
68
|
+
[](https://opensource.org/licenses/MIT)
|
|
69
|
+
|
|
70
|
+
**datablade** is a small, single-machine Python toolkit for data engineers who need reliable “file → DataFrame/Parquet → SQL DDL” workflows.
|
|
71
|
+
|
|
72
|
+
It focuses on:
|
|
73
|
+
|
|
74
|
+
- Reading common file formats with memory-aware heuristics
|
|
75
|
+
- Streaming large files in chunks (without concatenating)
|
|
76
|
+
- Normalizing DataFrame columns for downstream systems
|
|
77
|
+
- Generating `CREATE TABLE` DDL across a small set of SQL dialects
|
|
78
|
+
- Producing bulk-load commands (and executing BCP for SQL Server)
|
|
79
|
+
|
|
80
|
+
## What datablade Does
|
|
81
|
+
|
|
82
|
+
datablade helps data engineers:
|
|
83
|
+
|
|
84
|
+
- **Load data efficiently** from common file formats with automatic memory heuristics
|
|
85
|
+
- **Standardize data cleaning** with consistent column naming and type inference
|
|
86
|
+
- **Generate database schemas** for multiple SQL dialects from DataFrames or Parquet schemas
|
|
87
|
+
- **Handle datasets that don't fit in memory** using chunked iteration and optional Polars acceleration
|
|
88
|
+
- **Work across databases** with cross-dialect DDL and bulk-load command generation
|
|
89
|
+
- **Maintain data quality** with built-in validation and logging
|
|
90
|
+
|
|
91
|
+
## When to Use datablade
|
|
92
|
+
|
|
93
|
+
datablade is ideal for:
|
|
94
|
+
|
|
95
|
+
✅ **ETL/ELT Pipelines** - Building reproducible data ingestion workflows across multiple source formats
|
|
96
|
+
|
|
97
|
+
✅ **Multi-Database Projects** - Deploying the same schema to SQL Server, PostgreSQL, MySQL, or DuckDB
|
|
98
|
+
|
|
99
|
+
✅ **Large File Processing** - Streaming CSV/TSV/TXT/Parquet without concatenating
|
|
100
|
+
|
|
101
|
+
✅ **Data Lake to Warehouse** - Converting raw files to Parquet with optimized schemas
|
|
102
|
+
|
|
103
|
+
✅ **Ad-hoc Data Analysis** - Quickly exploring and preparing datasets with consistent patterns
|
|
104
|
+
|
|
105
|
+
✅ **Legacy System Integration** - Standardizing messy column names and data types from external sources
|
|
106
|
+
|
|
107
|
+
## When datablade is not the right tool
|
|
108
|
+
|
|
109
|
+
- Real-time streaming ingestion (Kafka, Spark Structured Streaming)
|
|
110
|
+
- Distributed compute / cluster execution (Spark, Dask)
|
|
111
|
+
- Warehouse-native transformations and modeling (dbt)
|
|
112
|
+
- A full-featured schema migration tool (Alembic, Flyway)
|
|
113
|
+
- Direct database connectivity/transactions (datablade generates SQL; it does not manage connections)
|
|
114
|
+
|
|
115
|
+
## Installation
|
|
116
|
+
|
|
117
|
+
```bash
|
|
118
|
+
pip install git+https://github.com/brentwc/data-prep.git
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
**Optional dependencies:**
|
|
122
|
+
|
|
123
|
+
```bash
|
|
124
|
+
# For high-performance file reading with Polars
|
|
125
|
+
pip install git+https://github.com/brentwc/data-prep.git#egg=datablade[performance]
|
|
126
|
+
|
|
127
|
+
# For development and testing
|
|
128
|
+
pip install git+https://github.com/brentwc/data-prep.git#egg=datablade[dev]
|
|
129
|
+
|
|
130
|
+
# All optional dependencies
|
|
131
|
+
pip install git+https://github.com/brentwc/data-prep.git#egg=datablade[all]
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
## Features
|
|
135
|
+
|
|
136
|
+
datablade provides four main modules:
|
|
137
|
+
|
|
138
|
+
### 📊 `datablade.dataframes`
|
|
139
|
+
|
|
140
|
+
DataFrame operations and transformations:
|
|
141
|
+
|
|
142
|
+
- Clean and normalize DataFrame columns
|
|
143
|
+
- Auto-detect and convert data types
|
|
144
|
+
- Generate optimized Parquet schemas
|
|
145
|
+
- Convert pandas DataFrames to PyArrow tables
|
|
146
|
+
- Generate multi-dialect SQL DDL statements
|
|
147
|
+
- **Memory-aware file reading** with automatic chunking
|
|
148
|
+
- **Polars integration** for high-performance large file processing
|
|
149
|
+
- Partitioned Parquet writing for datasets that don't fit in memory
|
|
150
|
+
|
|
151
|
+
### 🌐 `datablade.io`
|
|
152
|
+
|
|
153
|
+
Input/output operations for external data:
|
|
154
|
+
|
|
155
|
+
- Fetch JSON data from URLs
|
|
156
|
+
- Download and extract ZIP files
|
|
157
|
+
|
|
158
|
+
### 🛠️ `datablade.utils`
|
|
159
|
+
|
|
160
|
+
General utility functions:
|
|
161
|
+
|
|
162
|
+
- SQL name quoting
|
|
163
|
+
- Path standardization
|
|
164
|
+
- List flattening
|
|
165
|
+
- **Configurable logging** with Python logging module
|
|
166
|
+
|
|
167
|
+
### 🗄️ `datablade.sql`
|
|
168
|
+
|
|
169
|
+
Multi-dialect SQL utilities:
|
|
170
|
+
|
|
171
|
+
- **Multi-dialect support**: SQL Server, PostgreSQL, MySQL, DuckDB
|
|
172
|
+
- Dialect-aware identifier quoting
|
|
173
|
+
- CREATE TABLE generation for all dialects (from pandas DataFrames)
|
|
174
|
+
- CREATE TABLE generation from Parquet schemas (schema-only, via PyArrow)
|
|
175
|
+
- Bulk loading helpers:
|
|
176
|
+
- SQL Server: executes `bcp` via subprocess
|
|
177
|
+
- PostgreSQL/MySQL/DuckDB: returns command strings you can run in your environment
|
|
178
|
+
|
|
179
|
+
## Quick Start
|
|
180
|
+
|
|
181
|
+
```python
|
|
182
|
+
import pandas as pd
|
|
183
|
+
from datablade import configure_logging, read_file_smart
|
|
184
|
+
from datablade.dataframes import clean_dataframe_columns, pandas_to_parquet_table
|
|
185
|
+
from datablade.io import get_json
|
|
186
|
+
from datablade.utils import sql_quotename
|
|
187
|
+
from datablade.sql import Dialect, generate_create_table, generate_create_table_from_parquet
|
|
188
|
+
|
|
189
|
+
# Configure logging
|
|
190
|
+
import logging
|
|
191
|
+
configure_logging(level=logging.INFO, log_file="datablade.log")
|
|
192
|
+
|
|
193
|
+
# Read a file into a single DataFrame (materializes)
|
|
194
|
+
df = read_file_smart('large_dataset.csv', verbose=True)
|
|
195
|
+
|
|
196
|
+
# Clean DataFrame
|
|
197
|
+
df = clean_dataframe_columns(df, verbose=True)
|
|
198
|
+
|
|
199
|
+
# Convert to Parquet
|
|
200
|
+
table = pandas_to_parquet_table(df, convert=True)
|
|
201
|
+
|
|
202
|
+
# Generate SQL DDL for multiple dialects
|
|
203
|
+
sql_sqlserver = generate_create_table(df, table='my_table', dialect=Dialect.SQLSERVER)
|
|
204
|
+
sql_postgres = generate_create_table(df, table='my_table', dialect=Dialect.POSTGRES)
|
|
205
|
+
|
|
206
|
+
# Generate SQL DDL directly from an existing Parquet schema (no data materialization)
|
|
207
|
+
# Note: nested Parquet types (struct/list/map/union) are dropped with a warning.
|
|
208
|
+
ddl_from_parquet = generate_create_table_from_parquet(
|
|
209
|
+
"events.parquet",
|
|
210
|
+
table="events",
|
|
211
|
+
dialect=Dialect.POSTGRES,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Fetch JSON data
|
|
215
|
+
data = get_json('https://api.example.com/data.json')
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
### Memory-Aware File Reading
|
|
219
|
+
|
|
220
|
+
```python
|
|
221
|
+
from datablade.dataframes import read_file_chunked, read_file_iter, read_file_to_parquets, stream_to_parquets
|
|
222
|
+
|
|
223
|
+
# Read large files in chunks
|
|
224
|
+
for chunk in read_file_chunked('huge_file.csv', memory_fraction=0.5):
|
|
225
|
+
process(chunk)
|
|
226
|
+
|
|
227
|
+
# Stream without ever concatenating/materializing
|
|
228
|
+
for chunk in read_file_iter('huge_file.csv', memory_fraction=0.3, verbose=True):
|
|
229
|
+
process(chunk)
|
|
230
|
+
|
|
231
|
+
# Parquet is also supported for streaming (single .parquet files)
|
|
232
|
+
for chunk in read_file_iter('huge_file.parquet', memory_fraction=0.3, verbose=True):
|
|
233
|
+
process(chunk)
|
|
234
|
+
|
|
235
|
+
# Partition large files to multiple Parquets
|
|
236
|
+
files = read_file_to_parquets(
|
|
237
|
+
'large_file.csv',
|
|
238
|
+
output_dir='partitioned/',
|
|
239
|
+
convert_types=True,
|
|
240
|
+
verbose=True
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
# Stream to Parquet partitions without materializing
|
|
244
|
+
files = stream_to_parquets(
|
|
245
|
+
'large_file.csv',
|
|
246
|
+
output_dir='partitioned_streamed/',
|
|
247
|
+
rows_per_file=200_000,
|
|
248
|
+
convert_types=True,
|
|
249
|
+
verbose=True,
|
|
250
|
+
)
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
## Blade (Optional Facade)
|
|
254
|
+
|
|
255
|
+
The canonical API is module-level functions (for example, `datablade.dataframes.read_file_iter`).
|
|
256
|
+
|
|
257
|
+
If you prefer an object-style entrypoint with shared defaults, you can use the optional `Blade` facade:
|
|
258
|
+
|
|
259
|
+
```python
|
|
260
|
+
from datablade import Blade
|
|
261
|
+
from datablade.sql import Dialect
|
|
262
|
+
|
|
263
|
+
blade = Blade(memory_fraction=0.3, verbose=True, convert_types=True)
|
|
264
|
+
|
|
265
|
+
for chunk in blade.iter("huge.csv"):
|
|
266
|
+
process(chunk)
|
|
267
|
+
|
|
268
|
+
files = blade.stream_to_parquets("huge.csv", output_dir="partitioned/")
|
|
269
|
+
|
|
270
|
+
# Generate DDL (CREATE TABLE)
|
|
271
|
+
ddl = blade.create_table_sql(
|
|
272
|
+
df,
|
|
273
|
+
table="my_table",
|
|
274
|
+
dialect=Dialect.POSTGRES,
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Generate DDL from an existing Parquet file (schema-only)
|
|
278
|
+
ddl2 = blade.create_table_sql_from_parquet(
|
|
279
|
+
"events.parquet",
|
|
280
|
+
table="events",
|
|
281
|
+
dialect=Dialect.POSTGRES,
|
|
282
|
+
)
|
|
283
|
+
```
|
|
284
|
+
|
|
285
|
+
## Documentation
|
|
286
|
+
|
|
287
|
+
- [Docs Home](docs/README.md) - Documentation landing page
|
|
288
|
+
- [Usage Guide](docs/USAGE.md) - File reading (including streaming), SQL, IO, logging
|
|
289
|
+
- [Testing Guide](docs/TESTING.md) - How to run tests locally
|
|
290
|
+
- [Test Suite](tests/README.md) - Testing documentation and coverage
|
|
291
|
+
|
|
292
|
+
## Testing
|
|
293
|
+
|
|
294
|
+
Run the test suite:
|
|
295
|
+
|
|
296
|
+
```bash
|
|
297
|
+
# Install with test dependencies
|
|
298
|
+
pip install -e ".[test]"
|
|
299
|
+
|
|
300
|
+
# Run all tests
|
|
301
|
+
pytest
|
|
302
|
+
|
|
303
|
+
# Run with coverage report
|
|
304
|
+
pytest --cov=datablade --cov-report=html
|
|
305
|
+
```
|
|
306
|
+
|
|
307
|
+
See [tests/README.md](tests/README.md) for detailed testing documentation.
|
|
308
|
+
|
|
309
|
+
## Backward Compatibility
|
|
310
|
+
|
|
311
|
+
All functions are available through the legacy `datablade.core` module for backward compatibility:
|
|
312
|
+
|
|
313
|
+
```python
|
|
314
|
+
# Legacy imports (still supported)
|
|
315
|
+
from datablade.core.frames import clean_dataframe_columns
|
|
316
|
+
from datablade.core.json import get
|
|
317
|
+
```
|
|
318
|
+
|
|
319
|
+
## Requirements
|
|
320
|
+
|
|
321
|
+
**Core dependencies:**
|
|
322
|
+
|
|
323
|
+
- pandas
|
|
324
|
+
- pyarrow
|
|
325
|
+
- numpy
|
|
326
|
+
- openpyxl
|
|
327
|
+
- requests
|
|
328
|
+
|
|
329
|
+
## Design choices and limitations
|
|
330
|
+
|
|
331
|
+
- **Single-machine focus**: datablade is designed for laptop/VM/server execution, not clusters.
|
|
332
|
+
- **Streaming vs materializing**:
|
|
333
|
+
- Use `read_file_iter()` to process arbitrarily large files chunk-by-chunk.
|
|
334
|
+
- `read_file_smart()` returns a single DataFrame and may still be memory-intensive.
|
|
335
|
+
- **Parquet support**:
|
|
336
|
+
- Streaming reads support single `.parquet` files.
|
|
337
|
+
- Parquet “dataset directories” (Hive partitions / directory-of-parquets) are not a primary target API.
|
|
338
|
+
- **Parquet → SQL DDL**:
|
|
339
|
+
- Uses the Parquet schema (PyArrow) without scanning data.
|
|
340
|
+
- Complex/nested columns (struct/list/map/union) are dropped and logged as warnings.
|
|
341
|
+
- **DDL scope**: `CREATE TABLE` generation is column/type oriented (no indexes/constraints).
|
|
342
|
+
|
|
343
|
+
**Optional dependencies:**
|
|
344
|
+
|
|
345
|
+
- polars (for high-performance file reading)
|
|
346
|
+
- psutil (for memory-aware operations)
|
|
347
|
+
- pytest (for testing)
|
|
348
|
+
|
|
349
|
+
## License
|
|
350
|
+
|
|
351
|
+
MIT
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "datablade"
|
|
7
|
+
dynamic = ["version"]
|
|
8
|
+
description = "datablade is a suite of functions to provide standard syntax across data engineering projects."
|
|
9
|
+
readme = "readme.md"
|
|
10
|
+
requires-python = ">=3.12"
|
|
11
|
+
license = { file = "LICENSE" }
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Brent Carpenetti", email = "brentwc.git@pm.me" },
|
|
14
|
+
]
|
|
15
|
+
dependencies = [
|
|
16
|
+
"pandas",
|
|
17
|
+
"pyarrow",
|
|
18
|
+
"numpy",
|
|
19
|
+
"openpyxl",
|
|
20
|
+
"requests",
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
[project.optional-dependencies]
|
|
24
|
+
performance = [
|
|
25
|
+
"polars",
|
|
26
|
+
"psutil",
|
|
27
|
+
]
|
|
28
|
+
test = [
|
|
29
|
+
"pytest>=7.0.0",
|
|
30
|
+
"pytest-cov>=4.0.0",
|
|
31
|
+
"pytest-mock>=3.10.0",
|
|
32
|
+
]
|
|
33
|
+
dev = [
|
|
34
|
+
"pytest>=7.0.0",
|
|
35
|
+
"pytest-cov>=4.0.0",
|
|
36
|
+
"pytest-mock>=3.10.0",
|
|
37
|
+
"polars",
|
|
38
|
+
"psutil",
|
|
39
|
+
"black",
|
|
40
|
+
"flake8",
|
|
41
|
+
"mypy",
|
|
42
|
+
"isort",
|
|
43
|
+
]
|
|
44
|
+
all = [
|
|
45
|
+
"polars",
|
|
46
|
+
"psutil",
|
|
47
|
+
"pytest>=7.0.0",
|
|
48
|
+
"pytest-cov>=4.0.0",
|
|
49
|
+
"pytest-mock>=3.10.0",
|
|
50
|
+
"black",
|
|
51
|
+
"flake8",
|
|
52
|
+
"mypy",
|
|
53
|
+
"isort",
|
|
54
|
+
]
|
|
55
|
+
|
|
56
|
+
[tool.setuptools]
|
|
57
|
+
include-package-data = true
|
|
58
|
+
|
|
59
|
+
[tool.setuptools.packages.find]
|
|
60
|
+
where = ["src"]
|
|
61
|
+
|
|
62
|
+
[tool.setuptools.dynamic]
|
|
63
|
+
version = { attr = "datablade.__version__" }
|
|
64
|
+
|
|
65
|
+
[tool.black]
|
|
66
|
+
line-length = 88
|
|
67
|
+
target-version = ['py312', 'py313', 'py314']
|
|
68
|
+
include = '\.pyi?$'
|
|
69
|
+
|
|
70
|
+
[tool.isort]
|
|
71
|
+
profile = "black"
|
|
72
|
+
line_length = 88
|
|
73
|
+
multi_line_output = 3
|
|
74
|
+
include_trailing_comma = true
|
|
75
|
+
force_grid_wrap = 0
|
|
76
|
+
use_parentheses = true
|
|
77
|
+
ensure_newline_before_comments = true
|
|
78
|
+
|
|
79
|
+
[tool.mypy]
|
|
80
|
+
python_version = "3.12"
|
|
81
|
+
warn_return_any = true
|
|
82
|
+
warn_unused_configs = true
|
|
83
|
+
ignore_missing_imports = true
|