datablade 0.0.0__tar.gz → 0.0.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {datablade-0.0.0 → datablade-0.0.6}/LICENSE +20 -20
  2. datablade-0.0.6/PKG-INFO +406 -0
  3. datablade-0.0.6/pyproject.toml +87 -0
  4. datablade-0.0.6/readme.md +343 -0
  5. {datablade-0.0.0 → datablade-0.0.6}/setup.cfg +4 -4
  6. datablade-0.0.6/src/datablade/__init__.py +49 -0
  7. datablade-0.0.6/src/datablade/blade.py +322 -0
  8. datablade-0.0.6/src/datablade/core/__init__.py +28 -0
  9. datablade-0.0.6/src/datablade/core/frames.py +23 -0
  10. datablade-0.0.6/src/datablade/core/json.py +5 -0
  11. datablade-0.0.6/src/datablade/core/lists.py +5 -0
  12. datablade-0.0.6/src/datablade/core/messages.py +23 -0
  13. datablade-0.0.6/src/datablade/core/strings.py +5 -0
  14. datablade-0.0.6/src/datablade/core/zip.py +5 -0
  15. datablade-0.0.6/src/datablade/dataframes/__init__.py +51 -0
  16. datablade-0.0.6/src/datablade/dataframes/frames.py +585 -0
  17. datablade-0.0.6/src/datablade/dataframes/readers.py +1367 -0
  18. datablade-0.0.6/src/datablade/docs/ARCHITECTURE.md +102 -0
  19. datablade-0.0.6/src/datablade/docs/OBJECT_REGISTRY.md +194 -0
  20. datablade-0.0.6/src/datablade/docs/README.md +57 -0
  21. datablade-0.0.6/src/datablade/docs/TESTING.md +37 -0
  22. datablade-0.0.6/src/datablade/docs/USAGE.md +409 -0
  23. datablade-0.0.6/src/datablade/docs/__init__.py +87 -0
  24. datablade-0.0.6/src/datablade/docs/__main__.py +6 -0
  25. datablade-0.0.6/src/datablade/io/__init__.py +15 -0
  26. datablade-0.0.6/src/datablade/io/json.py +70 -0
  27. datablade-0.0.6/src/datablade/io/zip.py +111 -0
  28. datablade-0.0.6/src/datablade/registry.py +581 -0
  29. datablade-0.0.6/src/datablade/sql/__init__.py +56 -0
  30. datablade-0.0.6/src/datablade/sql/bulk_load.py +665 -0
  31. datablade-0.0.6/src/datablade/sql/ddl.py +402 -0
  32. datablade-0.0.6/src/datablade/sql/ddl_pyarrow.py +411 -0
  33. datablade-0.0.6/src/datablade/sql/dialects.py +12 -0
  34. datablade-0.0.6/src/datablade/sql/quoting.py +44 -0
  35. datablade-0.0.6/src/datablade/sql/schema_spec.py +65 -0
  36. datablade-0.0.6/src/datablade/sql/sqlserver.py +390 -0
  37. datablade-0.0.6/src/datablade/utils/__init__.py +38 -0
  38. datablade-0.0.6/src/datablade/utils/lists.py +32 -0
  39. datablade-0.0.6/src/datablade/utils/logging.py +204 -0
  40. datablade-0.0.6/src/datablade/utils/messages.py +29 -0
  41. datablade-0.0.6/src/datablade/utils/strings.py +249 -0
  42. datablade-0.0.6/src/datablade.egg-info/PKG-INFO +406 -0
  43. datablade-0.0.6/src/datablade.egg-info/SOURCES.txt +51 -0
  44. datablade-0.0.6/src/datablade.egg-info/requires.txt +36 -0
  45. datablade-0.0.6/tests/test_dataframes.py +380 -0
  46. datablade-0.0.6/tests/test_integration.py +230 -0
  47. datablade-0.0.6/tests/test_io.py +121 -0
  48. datablade-0.0.6/tests/test_readers.py +491 -0
  49. datablade-0.0.6/tests/test_registry.py +118 -0
  50. datablade-0.0.6/tests/test_sql.py +1364 -0
  51. datablade-0.0.6/tests/test_utils.py +222 -0
  52. datablade-0.0.0/PKG-INFO +0 -13
  53. datablade-0.0.0/pyproject.toml +0 -3
  54. datablade-0.0.0/setup.py +0 -12
  55. datablade-0.0.0/src/datablade/__init__.py +0 -1
  56. datablade-0.0.0/src/datablade/core/__init__.py +0 -7
  57. datablade-0.0.0/src/datablade/core/frames.py +0 -236
  58. datablade-0.0.0/src/datablade/core/json.py +0 -10
  59. datablade-0.0.0/src/datablade/core/lists.py +0 -10
  60. datablade-0.0.0/src/datablade/core/messages.py +0 -11
  61. datablade-0.0.0/src/datablade/core/strings.py +0 -43
  62. datablade-0.0.0/src/datablade/core/zip.py +0 -24
  63. datablade-0.0.0/src/datablade.egg-info/PKG-INFO +0 -13
  64. datablade-0.0.0/src/datablade.egg-info/SOURCES.txt +0 -16
  65. datablade-0.0.0/src/datablade.egg-info/requires.txt +0 -5
  66. {datablade-0.0.0 → datablade-0.0.6}/src/datablade.egg-info/dependency_links.txt +0 -0
  67. {datablade-0.0.0 → datablade-0.0.6}/src/datablade.egg-info/top_level.txt +0 -0
@@ -1,21 +1,21 @@
1
- MIT License
2
-
3
- Copyright (c) 2024 Brent Carpenetti
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Brent Carpenetti
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
21
  SOFTWARE.
@@ -0,0 +1,406 @@
1
+ Metadata-Version: 2.4
2
+ Name: datablade
3
+ Version: 0.0.6
4
+ Summary: datablade is a suite of functions to provide standard syntax across data engineering projects.
5
+ Author-email: Brent Carpenetti <brentwc.git@pm.me>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2024 Brent Carpenetti
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ Requires-Python: >=3.12
28
+ Description-Content-Type: text/markdown
29
+ License-File: LICENSE
30
+ Requires-Dist: pandas
31
+ Requires-Dist: pyarrow
32
+ Requires-Dist: numpy
33
+ Requires-Dist: openpyxl
34
+ Requires-Dist: requests
35
+ Provides-Extra: performance
36
+ Requires-Dist: polars; extra == "performance"
37
+ Requires-Dist: psutil; extra == "performance"
38
+ Provides-Extra: test
39
+ Requires-Dist: pytest>=7.0.0; extra == "test"
40
+ Requires-Dist: pytest-cov>=4.0.0; extra == "test"
41
+ Requires-Dist: pytest-mock>=3.10.0; extra == "test"
42
+ Provides-Extra: dev
43
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
44
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
45
+ Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
46
+ Requires-Dist: polars; extra == "dev"
47
+ Requires-Dist: psutil; extra == "dev"
48
+ Requires-Dist: black; extra == "dev"
49
+ Requires-Dist: flake8; extra == "dev"
50
+ Requires-Dist: mypy; extra == "dev"
51
+ Requires-Dist: isort; extra == "dev"
52
+ Provides-Extra: all
53
+ Requires-Dist: polars; extra == "all"
54
+ Requires-Dist: psutil; extra == "all"
55
+ Requires-Dist: pytest>=7.0.0; extra == "all"
56
+ Requires-Dist: pytest-cov>=4.0.0; extra == "all"
57
+ Requires-Dist: pytest-mock>=3.10.0; extra == "all"
58
+ Requires-Dist: black; extra == "all"
59
+ Requires-Dist: flake8; extra == "all"
60
+ Requires-Dist: mypy; extra == "all"
61
+ Requires-Dist: isort; extra == "all"
62
+ Dynamic: license-file
63
+
64
+ # datablade
65
+
66
+ [![Python 3.12+](https://img.shields.io/badge/python-3.12+-blue.svg)](https://www.python.org/downloads/)
67
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
68
+
69
+ **datablade** is a small, single-machine Python toolkit for data engineers who need reliable “file → DataFrame/Parquet → SQL DDL” workflows.
70
+
71
+ It focuses on:
72
+
73
+ - Reading common file formats with memory-aware heuristics
74
+ - Streaming large files in chunks (without concatenating)
75
+ - Normalizing DataFrame columns for downstream systems
76
+ - Generating `CREATE TABLE` DDL across a small set of SQL dialects
77
+ - Producing bulk-load commands (and executing BCP for SQL Server)
78
+
79
+ ## What datablade Does
80
+
81
+ datablade helps data engineers:
82
+
83
+ - **Load data efficiently** from common file formats with automatic memory heuristics
84
+ - **Standardize data cleaning** with consistent column naming and type inference
85
+ - **Generate database schemas** for multiple SQL dialects from DataFrames or Parquet schemas
86
+ - **Handle datasets that don't fit in memory** using chunked iteration and optional Polars acceleration
87
+ - **Work across databases** with cross-dialect DDL and bulk-load command generation
88
+ - **Maintain data quality** with built-in validation and logging
89
+
90
+ ## When to Use datablade
91
+
92
+ datablade is ideal for:
93
+
94
+ ✅ **ETL/ELT Pipelines** - Building reproducible data ingestion workflows across multiple source formats
95
+
96
+ ✅ **Multi-Database Projects** - Deploying the same schema to SQL Server, PostgreSQL, MySQL, or DuckDB
97
+
98
+ ✅ **Large File Processing** - Streaming CSV/TSV/TXT/Parquet without concatenating
99
+
100
+ ✅ **Data Lake to Warehouse** - Converting raw files to Parquet with optimized schemas
101
+
102
+ ✅ **Ad-hoc Data Analysis** - Quickly exploring and preparing datasets with consistent patterns
103
+
104
+ ✅ **Legacy System Integration** - Standardizing messy column names and data types from external sources
105
+
106
+ ## When datablade is not the right tool
107
+
108
+ - Real-time streaming ingestion (Kafka, Spark Structured Streaming)
109
+ - Distributed compute / cluster execution (Spark, Dask)
110
+ - Warehouse-native transformations and modeling (dbt)
111
+ - A full-featured schema migration tool (Alembic, Flyway)
112
+ - Direct database connectivity/transactions (datablade generates SQL; it does not manage connections)
113
+
114
+ ## Installation
115
+
116
+ ```bash
117
+ pip install datablade
118
+ ```
119
+
120
+ **Optional dependencies:**
121
+
122
+ ```bash
123
+ # For high-performance file reading with Polars
124
+ pip install "datablade[performance]"
125
+
126
+ # For testing
127
+ pip install "datablade[test]"
128
+
129
+ # For development (includes testing + lint/format tooling)
130
+ pip install "datablade[dev]"
131
+
132
+ # All optional dependencies
133
+ pip install "datablade[all]"
134
+ ```
135
+
136
+ ## Features
137
+
138
+ datablade provides four main modules:
139
+
140
+ ### 📊 `datablade.dataframes`
141
+
142
+ DataFrame operations and transformations:
143
+
144
+ - Clean and normalize DataFrame columns
145
+ - Auto-detect and convert data types
146
+ - Generate optimized Parquet schemas
147
+ - Convert pandas DataFrames to PyArrow tables
148
+ - Generate multi-dialect SQL DDL statements
149
+ - **Memory-aware file reading** with automatic chunking
150
+ - **Polars integration** for high-performance large file processing
151
+ - Partitioned Parquet writing for datasets that don't fit in memory
152
+
153
+ ### 🌐 `datablade.io`
154
+
155
+ Input/output operations for external data:
156
+
157
+ - Fetch JSON data from URLs
158
+ - Download and extract ZIP files
159
+
160
+ ### 🛠️ `datablade.utils`
161
+
162
+ General utility functions:
163
+
164
+ - SQL name quoting
165
+ - Path standardization
166
+ - List flattening
167
+ - **Configurable logging** with Python logging module
168
+
169
+ ### 🗄️ `datablade.sql`
170
+
171
+ Multi-dialect SQL utilities:
172
+
173
+ - **Multi-dialect support**: SQL Server, PostgreSQL, MySQL, DuckDB
174
+ - Dialect-aware identifier quoting
175
+ - CREATE TABLE generation for all dialects (from pandas DataFrames)
176
+ - CREATE TABLE generation from Parquet schemas (schema-only, via PyArrow)
177
+ - Optional `schema_spec` overrides for column types, nullability, and string sizing
178
+ - Bulk loading helpers:
179
+ - SQL Server: executes `bcp` via subprocess
180
+ - PostgreSQL/MySQL/DuckDB: returns command strings you can run in your environment
181
+
182
+ ## Quick Start
183
+
184
+ ```python
185
+ import pandas as pd
186
+ from datablade import configure_logging, read_file_smart
187
+ from datablade.dataframes import clean_dataframe_columns, pandas_to_parquet_table
188
+ from datablade.io import get_json
189
+ from datablade.utils import sql_quotename
190
+ from datablade.sql import Dialect, generate_create_table, generate_create_table_from_parquet
191
+
192
+ # Configure logging
193
+ import logging
194
+ configure_logging(level=logging.INFO, log_file="datablade.log")
195
+
196
+ # Read a file into a single DataFrame (materializes)
197
+ df = read_file_smart('large_dataset.csv', verbose=True)
198
+
199
+ # Clean DataFrame
200
+ df = clean_dataframe_columns(df, verbose=True)
201
+
202
+ # Convert to Parquet
203
+ table = pandas_to_parquet_table(df, convert=True)
204
+
205
+ # Generate SQL DDL for multiple dialects
206
+ sql_sqlserver = generate_create_table(df, table='my_table', dialect=Dialect.SQLSERVER)
207
+ sql_postgres = generate_create_table(df, table='my_table', dialect=Dialect.POSTGRES)
208
+
209
+ # Generate SQL DDL directly from an existing Parquet schema (no data materialization)
210
+ # Note: nested Parquet types (struct/list/map/union) are dropped with a warning.
211
+ ddl_from_parquet = generate_create_table_from_parquet(
212
+ "events.parquet",
213
+ table="events",
214
+ dialect=Dialect.POSTGRES,
215
+ )
216
+
217
+ # Fetch JSON data
218
+ data = get_json('https://api.example.com/data.json')
219
+ ```
220
+
221
+ Most file path parameters accept `str` or `pathlib.Path`. To treat case mismatches
222
+ as errors on case-insensitive filesystems, use `configure_paths(path_strict=True)`.
223
+
224
+ ### Memory-Aware File Reading
225
+
226
+ See the file format support matrix in the bundled USAGE doc:
227
+
228
+ ```bash
229
+ python -m datablade.docs --show USAGE
230
+ ```
231
+
232
+ ```python
233
+ from datablade.dataframes import (
234
+ excel_to_parquets,
235
+ read_file_chunked,
236
+ read_file_iter,
237
+ read_file_to_parquets,
238
+ stream_to_parquets,
239
+ )
240
+
241
+ # Read large files in chunks
242
+ for chunk in read_file_chunked('huge_file.csv', memory_fraction=0.5):
243
+ process(chunk)
244
+
245
+ # Stream without ever concatenating/materializing
246
+ for chunk in read_file_iter('huge_file.csv', memory_fraction=0.3, verbose=True):
247
+ process(chunk)
248
+
249
+ # Parquet is also supported for streaming (single .parquet files)
250
+ for chunk in read_file_iter('huge_file.parquet', memory_fraction=0.3, verbose=True):
251
+ process(chunk)
252
+
253
+ # Excel streaming is available with openpyxl installed (read-only mode)
254
+ for chunk in read_file_iter('large.xlsx', chunksize=25_000, verbose=True):
255
+ process(chunk)
256
+
257
+ # Partition large files to multiple Parquets
258
+ files = read_file_to_parquets(
259
+ 'large_file.csv',
260
+ output_dir='partitioned/',
261
+ convert_types=True,
262
+ verbose=True
263
+ )
264
+
265
+ # Stream to Parquet partitions without materializing
266
+ files = stream_to_parquets(
267
+ 'large_file.csv',
268
+ output_dir='partitioned_streamed/',
269
+ rows_per_file=200_000,
270
+ convert_types=True,
271
+ verbose=True,
272
+ )
273
+
274
+ # Excel streaming to Parquet partitions
275
+ files = excel_to_parquets(
276
+ 'large.xlsx',
277
+ output_dir='partitioned_excel/',
278
+ rows_per_file=200_000,
279
+ convert_types=True,
280
+ verbose=True,
281
+ )
282
+ ```
283
+
284
+ ## Blade (Optional Facade)
285
+
286
+ The canonical API is module-level functions (for example, `datablade.dataframes.read_file_iter`).
287
+
288
+ If you prefer an object-style entrypoint with shared defaults, you can use the optional `Blade` facade:
289
+
290
+ ```python
291
+ from datablade import Blade
292
+ from datablade.sql import Dialect
293
+
294
+ blade = Blade(memory_fraction=0.3, verbose=True, convert_types=True)
295
+
296
+ for chunk in blade.iter("huge.csv"):
297
+ process(chunk)
298
+
299
+ files = blade.stream_to_parquets("huge.csv", output_dir="partitioned/")
300
+
301
+ # Generate DDL (CREATE TABLE)
302
+ ddl = blade.create_table_sql(
303
+ df,
304
+ table="my_table",
305
+ dialect=Dialect.POSTGRES,
306
+ )
307
+
308
+ # Generate DDL from an existing Parquet file (schema-only)
309
+ ddl2 = blade.create_table_sql_from_parquet(
310
+ "events.parquet",
311
+ table="events",
312
+ dialect=Dialect.POSTGRES,
313
+ )
314
+ ```
315
+
316
+ ## Documentation
317
+
318
+ Docs are bundled with the installed package:
319
+
320
+ ```bash
321
+ python -m datablade.docs --list
322
+ python -m datablade.docs --show USAGE
323
+ python -m datablade.docs --write-dir .\datablade-docs
324
+ ```
325
+
326
+ After writing docs to disk, open the markdown files locally:
327
+
328
+ - README (docs landing page)
329
+ - USAGE (file reading, streaming, SQL, IO, logging)
330
+ - TESTING (how to run tests locally)
331
+ - ARCHITECTURE (pipeline overview)
332
+ - OBJECT_REGISTRY (registry reference)
333
+
334
+ ## Testing
335
+
336
+ Run the test suite:
337
+
338
+ ```bash
339
+ # Install with test dependencies
340
+ pip install -e ".[test]"
341
+
342
+ # Run all tests
343
+ pytest
344
+
345
+ # Run with coverage report
346
+ pytest --cov=datablade --cov-report=html
347
+ ```
348
+
349
+ For detailed testing documentation, use the bundled TESTING doc:
350
+
351
+ ```bash
352
+ python -m datablade.docs --show TESTING
353
+ ```
354
+
355
+ ## Backward Compatibility
356
+
357
+ All functions are available through the legacy `datablade.core` module for backward compatibility:
358
+
359
+ ```python
360
+ # Legacy imports (still supported)
361
+ from datablade.core.frames import clean_dataframe_columns
362
+ from datablade.core.json import get
363
+ ```
364
+
365
+ ## Requirements
366
+
367
+ **Core dependencies:**
368
+
369
+ - pandas
370
+ - pyarrow
371
+ - numpy
372
+ - openpyxl
373
+ - requests
374
+
375
+ ## Design choices and limitations
376
+
377
+ - **Single-machine focus**: datablade is designed for laptop/VM/server execution, not clusters.
378
+ - **Streaming vs materializing**:
379
+ - Use `read_file_iter()` to process arbitrarily large files chunk-by-chunk.
380
+ - `read_file_smart()` returns a single DataFrame and may still be memory-intensive.
381
+ - **Chunk concatenation**: the large-file pandas fallback in `read_file_smart()` can
382
+ temporarily spike memory usage during concat. Use `read_file_iter()` or
383
+ `return_type="iterator"` to avoid concatenation.
384
+ - **Polars materialization**: when returning a pandas DataFrame, Polars still
385
+ collects into memory; use `return_type="polars"` or `"polars_lazy"` to keep
386
+ Polars frames.
387
+ - **Parquet support**:
388
+ - Streaming reads support single `.parquet` files.
389
+ - Parquet “dataset directories” (Hive partitions / directory-of-parquets) are not a primary target API.
390
+ - **Parquet → SQL DDL**:
391
+ - Uses the Parquet schema (PyArrow) without scanning data.
392
+ - Complex/nested columns (struct/list/map/union) are dropped and logged as warnings.
393
+ - **DDL scope**: `CREATE TABLE` generation is column/type oriented (no indexes/constraints).
394
+ - **SQL Server bulk load**: the SQL Server helpers use the `bcp` CLI and require it
395
+ to be installed and available on PATH. When using `-U`/`-P`, credentials are
396
+ passed via process args (logs are redacted); prefer `-T` or `-G` where possible.
397
+
398
+ **Optional dependencies:**
399
+
400
+ - polars (for high-performance file reading)
401
+ - psutil (for memory-aware operations)
402
+ - pytest (for testing)
403
+
404
+ ## License
405
+
406
+ MIT
@@ -0,0 +1,87 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "datablade"
7
+ dynamic = ["version"]
8
+ description = "datablade is a suite of functions to provide standard syntax across data engineering projects."
9
+ readme = { file = "readme.md", content-type = "text/markdown" }
10
+ requires-python = ">=3.12"
11
+ license = { file = "LICENSE" }
12
+ authors = [
13
+ { name = "Brent Carpenetti", email = "brentwc.git@pm.me" },
14
+ ]
15
+ dependencies = [
16
+ "pandas",
17
+ "pyarrow",
18
+ "numpy",
19
+ "openpyxl",
20
+ "requests",
21
+ ]
22
+
23
+ [project.optional-dependencies]
24
+ performance = [
25
+ "polars",
26
+ "psutil",
27
+ ]
28
+ test = [
29
+ "pytest>=7.0.0",
30
+ "pytest-cov>=4.0.0",
31
+ "pytest-mock>=3.10.0",
32
+ ]
33
+ dev = [
34
+ "pytest>=7.0.0",
35
+ "pytest-cov>=4.0.0",
36
+ "pytest-mock>=3.10.0",
37
+ "polars",
38
+ "psutil",
39
+ "black",
40
+ "flake8",
41
+ "mypy",
42
+ "isort",
43
+ ]
44
+ all = [
45
+ "polars",
46
+ "psutil",
47
+ "pytest>=7.0.0",
48
+ "pytest-cov>=4.0.0",
49
+ "pytest-mock>=3.10.0",
50
+ "black",
51
+ "flake8",
52
+ "mypy",
53
+ "isort",
54
+ ]
55
+
56
+ [tool.setuptools]
57
+ include-package-data = true
58
+
59
+ [tool.setuptools.package-data]
60
+ datablade = ["docs/*.md"]
61
+ "datablade.docs" = ["*.md"]
62
+
63
+ [tool.setuptools.packages.find]
64
+ where = ["src"]
65
+
66
+ [tool.setuptools.dynamic]
67
+ version = { attr = "datablade.__version__" }
68
+
69
+ [tool.black]
70
+ line-length = 88
71
+ target-version = ['py312', 'py313', 'py314']
72
+ include = '\.pyi?$'
73
+
74
+ [tool.isort]
75
+ profile = "black"
76
+ line_length = 88
77
+ multi_line_output = 3
78
+ include_trailing_comma = true
79
+ force_grid_wrap = 0
80
+ use_parentheses = true
81
+ ensure_newline_before_comments = true
82
+
83
+ [tool.mypy]
84
+ python_version = "3.12"
85
+ warn_return_any = true
86
+ warn_unused_configs = true
87
+ ignore_missing_imports = true