dlt-iceberg 0.1.4__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,222 @@
1
+ """
2
+ DuckDB SQL client for Iceberg tables.
3
+
4
+ Provides queryable access to Iceberg tables via DuckDB views using iceberg_scan().
5
+ This enables pipeline.dataset() to work with the Iceberg destination.
6
+ """
7
+
8
+ import logging
9
+ from typing import TYPE_CHECKING, Any, List, Tuple
10
+
11
+ import duckdb
12
+ from packaging.version import Version
13
+
14
+ from dlt.common.destination.exceptions import DestinationUndefinedEntity
15
+ from dlt.common.destination.typing import PreparedTableSchema
16
+ from dlt.destinations.impl.duckdb.sql_client import WithTableScanners
17
+ from dlt.destinations.impl.duckdb.factory import DuckDbCredentials
18
+ from dlt.destinations.sql_client import raise_database_error
19
+
20
+ if TYPE_CHECKING:
21
+ from dlt_iceberg.destination_client import IcebergRestClient
22
+ else:
23
+ IcebergRestClient = Any
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class IcebergSqlClient(WithTableScanners):
29
+ """SQL client that maps Iceberg tables as DuckDB views.
30
+
31
+ Creates DuckDB views using iceberg_scan() that point to the Iceberg
32
+ table metadata, enabling SQL queries over Iceberg tables.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ remote_client: "IcebergRestClient",
38
+ dataset_name: str,
39
+ cache_db: DuckDbCredentials = None,
40
+ persist_secrets: bool = False,
41
+ ) -> None:
42
+ super().__init__(remote_client, dataset_name, cache_db, persist_secrets=persist_secrets)
43
+ self.remote_client: "IcebergRestClient" = remote_client
44
+ self.iceberg_initialized = False
45
+
46
+ def can_create_view(self, table_schema: PreparedTableSchema) -> bool:
47
+ """Check if a view can be created for this table."""
48
+ # All Iceberg tables can have views created
49
+ return True
50
+
51
+ def should_replace_view(self, view_name: str, table_schema: PreparedTableSchema) -> bool:
52
+ """Determine if view should be replaced to get fresh data."""
53
+ # Always replace to get latest snapshot
54
+ # TODO: Could optimize with configuration option
55
+ return True
56
+
57
+ @raise_database_error
58
+ def open_connection(self) -> duckdb.DuckDBPyConnection:
59
+ """Open DuckDB connection and set up for Iceberg access."""
60
+ with self.credentials.conn_pool._conn_lock:
61
+ first_connection = self.credentials.conn_pool.never_borrowed
62
+ super().open_connection()
63
+
64
+ if first_connection:
65
+ # Set up storage credentials if available
66
+ self._setup_storage_credentials()
67
+
68
+ return self._conn
69
+
70
+ def _setup_storage_credentials(self) -> None:
71
+ """Set up DuckDB secrets for storage access."""
72
+ config = self.remote_client.config
73
+
74
+ # S3 credentials
75
+ if config.s3_access_key_id and config.s3_secret_access_key:
76
+ secret_sql = f"""
77
+ CREATE SECRET IF NOT EXISTS iceberg_s3_secret (
78
+ TYPE S3,
79
+ KEY_ID '{config.s3_access_key_id}',
80
+ SECRET '{config.s3_secret_access_key}'
81
+ """
82
+ if config.s3_region:
83
+ secret_sql += f", REGION '{config.s3_region}'"
84
+ if config.s3_endpoint:
85
+ # Handle endpoint URL
86
+ endpoint = config.s3_endpoint
87
+ if endpoint.startswith("http://"):
88
+ secret_sql += f", ENDPOINT '{endpoint[7:]}', USE_SSL false"
89
+ elif endpoint.startswith("https://"):
90
+ secret_sql += f", ENDPOINT '{endpoint[8:]}'"
91
+ else:
92
+ secret_sql += f", ENDPOINT '{endpoint}'"
93
+ secret_sql += ")"
94
+
95
+ try:
96
+ self._conn.execute(secret_sql)
97
+ logger.info("Created DuckDB S3 secret for Iceberg access")
98
+ except Exception as e:
99
+ logger.warning(f"Failed to create S3 secret: {e}")
100
+
101
+ @raise_database_error
102
+ def create_view(self, view_name: str, table_schema: PreparedTableSchema) -> None:
103
+ """Create a DuckDB view for an Iceberg table using iceberg_scan()."""
104
+ table_name = table_schema["name"]
105
+
106
+ # Get table location from catalog
107
+ try:
108
+ table_location = self.remote_client.get_open_table_location("iceberg", table_name)
109
+ except Exception as e:
110
+ raise DestinationUndefinedEntity(table_name) from e
111
+
112
+ if not table_location:
113
+ raise DestinationUndefinedEntity(table_name)
114
+
115
+ # Ensure iceberg extension is loaded
116
+ if not self.iceberg_initialized:
117
+ self._setup_iceberg(self._conn)
118
+ self.iceberg_initialized = True
119
+
120
+ # Get metadata file path
121
+ metadata_file = self._get_metadata_file(table_location)
122
+ if not metadata_file:
123
+ raise DestinationUndefinedEntity(table_name)
124
+
125
+ # Check for gzip compression
126
+ compression = ""
127
+ if ".gz." in metadata_file:
128
+ compression = ", metadata_compression_codec = 'gzip'"
129
+
130
+ # Scanner options based on DuckDB version
131
+ if Version(duckdb.__version__) > Version("1.3.0"):
132
+ scanner_options = "union_by_name=true"
133
+ else:
134
+ scanner_options = "skip_schema_inference=false"
135
+
136
+ # Build column selection from schema
137
+ columns = list(self.schema.get_table_columns(table_name).keys())
138
+ escaped_columns = [self.escape_column_name(c) for c in columns]
139
+ columns_sql = ", ".join(escaped_columns) if columns else "*"
140
+
141
+ # Create the view
142
+ view_sql = f"""
143
+ CREATE OR REPLACE VIEW {self.escape_column_name(view_name)} AS
144
+ SELECT {columns_sql}
145
+ FROM iceberg_scan('{metadata_file}'{compression}, {scanner_options})
146
+ """
147
+
148
+ logger.info(f"Creating view {view_name} for Iceberg table {table_name}")
149
+ self._conn.execute(view_sql)
150
+
151
+ def _get_metadata_file(self, table_location: str) -> str:
152
+ """Get the latest metadata file path for an Iceberg table.
153
+
154
+ Args:
155
+ table_location: Base location of the Iceberg table
156
+
157
+ Returns:
158
+ Path to the latest metadata JSON file
159
+ """
160
+ # Try to get metadata from the catalog directly
161
+ try:
162
+ # Load the table through PyIceberg to get metadata location
163
+ catalog = self.remote_client._get_catalog()
164
+ namespace = self.remote_client.config.namespace
165
+
166
+ # Extract table name from location
167
+ table_name = table_location.rstrip("/").split("/")[-1]
168
+ identifier = f"{namespace}.{table_name}"
169
+
170
+ iceberg_table = catalog.load_table(identifier)
171
+ metadata_location = iceberg_table.metadata_location
172
+
173
+ if metadata_location:
174
+ return metadata_location
175
+ except Exception as e:
176
+ logger.debug(f"Could not get metadata from catalog: {e}")
177
+
178
+ # Fallback: scan metadata directory
179
+ metadata_path = f"{table_location.rstrip('/')}/metadata"
180
+ return self._find_latest_metadata(metadata_path)
181
+
182
+ def _find_latest_metadata(self, metadata_path: str) -> str:
183
+ """Find the latest metadata file in a metadata directory.
184
+
185
+ Args:
186
+ metadata_path: Path to the metadata directory
187
+
188
+ Returns:
189
+ Path to the latest metadata file or empty string
190
+ """
191
+ import os
192
+ from urllib.parse import urlparse
193
+
194
+ parsed = urlparse(metadata_path)
195
+
196
+ # Only support local filesystem for fallback scan
197
+ if parsed.scheme and parsed.scheme not in ("file", ""):
198
+ logger.warning(
199
+ f"Cannot scan metadata directory for {parsed.scheme} storage, "
200
+ "use catalog for metadata location"
201
+ )
202
+ return ""
203
+
204
+ local_path = parsed.path if parsed.scheme == "file" else metadata_path
205
+
206
+ if not os.path.exists(local_path):
207
+ return ""
208
+
209
+ # Find latest metadata file
210
+ metadata_files = [
211
+ f for f in os.listdir(local_path)
212
+ if f.endswith(".metadata.json")
213
+ ]
214
+
215
+ if not metadata_files:
216
+ return ""
217
+
218
+ # Sort by version number (format: v1.metadata.json, v2.metadata.json, etc.)
219
+ # or by timestamp for UUID-based names
220
+ metadata_files.sort(reverse=True)
221
+
222
+ return f"{metadata_path}/{metadata_files[0]}"
@@ -0,0 +1,442 @@
1
+ Metadata-Version: 2.4
2
+ Name: dlt-iceberg
3
+ Version: 0.2.0
4
+ Summary: dlt destination for Apache Iceberg with atomic multi-file commits via REST catalogs
5
+ Project-URL: Homepage, https://github.com/sidequery/dlt-iceberg
6
+ Project-URL: Repository, https://github.com/sidequery/dlt-iceberg
7
+ Project-URL: Issues, https://github.com/sidequery/dlt-iceberg/issues
8
+ Author-email: Sidequery <hello@sidequery.com>
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Keywords: data-engineering,data-pipeline,dlt,elt,etl,iceberg
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Topic :: Database
20
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
21
+ Requires-Python: >=3.11
22
+ Requires-Dist: boto3>=1.40.50
23
+ Requires-Dist: dlt>=1.17.1
24
+ Requires-Dist: duckdb>=1.4.3
25
+ Requires-Dist: pandas>=2.3.3
26
+ Requires-Dist: pyarrow>=21.0.0
27
+ Requires-Dist: pydantic<2.11
28
+ Requires-Dist: pyiceberg[pyiceberg-core]>=0.10.0
29
+ Requires-Dist: requests>=2.32.5
30
+ Requires-Dist: s3fs>=0.4.2
31
+ Requires-Dist: sqlalchemy>=2.0.44
32
+ Description-Content-Type: text/markdown
33
+
34
+ # dlt-iceberg
35
+
36
+ A [dlt](https://dlthub.com/) destination for [Apache Iceberg](https://iceberg.apache.org/) tables using REST catalogs.
37
+
38
+ ## Features
39
+
40
+ - **Atomic Multi-File Commits**: Multiple parquet files committed as single Iceberg snapshot per table
41
+ - **REST Catalog Support**: Works with Nessie, Polaris, AWS Glue, Unity Catalog
42
+ - **Credential Vending**: Most REST catalogs vend storage credentials automatically
43
+ - **Partitioning**: Full support for Iceberg partition transforms via `iceberg_adapter()`
44
+ - **Merge Strategies**: Delete-insert and upsert with hard delete support
45
+ - **DuckDB Integration**: Query loaded data via `pipeline.dataset()`
46
+ - **Schema Evolution**: Automatic schema updates when adding columns
47
+
48
+ ## Installation
49
+
50
+ ```bash
51
+ pip install dlt-iceberg
52
+ ```
53
+
54
+ Or with uv:
55
+
56
+ ```bash
57
+ uv add dlt-iceberg
58
+ ```
59
+
60
+ ## Quick Start
61
+
62
+ ```python
63
+ import dlt
64
+ from dlt_iceberg import iceberg_rest
65
+
66
+ @dlt.resource(name="events", write_disposition="append")
67
+ def generate_events():
68
+ yield {"event_id": 1, "value": 100}
69
+
70
+ pipeline = dlt.pipeline(
71
+ pipeline_name="my_pipeline",
72
+ destination=iceberg_rest(
73
+ catalog_uri="https://my-catalog.example.com/api/catalog",
74
+ namespace="analytics",
75
+ warehouse="my_warehouse",
76
+ credential="client-id:client-secret",
77
+ oauth2_server_uri="https://my-catalog.example.com/oauth/tokens",
78
+ ),
79
+ )
80
+
81
+ pipeline.run(generate_events())
82
+ ```
83
+
84
+ ### Query Loaded Data
85
+
86
+ ```python
87
+ # Query data via DuckDB
88
+ dataset = pipeline.dataset()
89
+
90
+ # Access as dataframe
91
+ df = dataset["events"].df()
92
+
93
+ # Run SQL queries
94
+ result = dataset.query("SELECT * FROM events WHERE value > 50").fetchall()
95
+
96
+ # Get Arrow table
97
+ arrow_table = dataset["events"].arrow()
98
+ ```
99
+
100
+ ### Merge/Upsert
101
+
102
+ ```python
103
+ @dlt.resource(
104
+ name="users",
105
+ write_disposition="merge",
106
+ primary_key="user_id"
107
+ )
108
+ def generate_users():
109
+ yield {"user_id": 1, "name": "Alice", "status": "active"}
110
+
111
+ pipeline.run(generate_users())
112
+ ```
113
+
114
+ ## Configuration
115
+
116
+ ### Required Options
117
+
118
+ ```python
119
+ iceberg_rest(
120
+ catalog_uri="...", # REST catalog endpoint (or sqlite:// for local)
121
+ namespace="...", # Iceberg namespace (database)
122
+ )
123
+ ```
124
+
125
+ ### Authentication
126
+
127
+ Choose based on your catalog:
128
+
129
+ | Catalog | Auth Method |
130
+ |---------|-------------|
131
+ | Polaris, Lakekeeper | `credential` + `oauth2_server_uri` |
132
+ | Unity Catalog | `token` |
133
+ | AWS Glue | `sigv4_enabled` + `signing_region` |
134
+ | Local SQLite | None needed |
135
+
136
+ Most REST catalogs (Polaris, Lakekeeper, etc.) **vend storage credentials automatically** via the catalog API. You typically don't need to configure S3/GCS/Azure credentials manually.
137
+
138
+ <details>
139
+ <summary><b>Advanced Options</b></summary>
140
+
141
+ ```python
142
+ iceberg_rest(
143
+ # ... required options ...
144
+
145
+ # Manual storage credentials (usually not needed with credential vending)
146
+ s3_endpoint="...",
147
+ s3_access_key_id="...",
148
+ s3_secret_access_key="...",
149
+ s3_region="...",
150
+
151
+ # Performance tuning
152
+ max_retries=5, # Retry attempts for transient failures
153
+ retry_backoff_base=2.0, # Exponential backoff multiplier
154
+ merge_batch_size=500000, # Rows per batch for merge operations
155
+ strict_casting=False, # Fail on potential data loss
156
+
157
+ # Table management
158
+ table_location_layout=None, # Custom table location pattern
159
+ register_new_tables=False, # Register tables found in storage
160
+ hard_delete_column="_dlt_deleted_at", # Column for hard deletes
161
+ )
162
+ ```
163
+
164
+ </details>
165
+
166
+ ## Catalog Examples
167
+
168
+ <details>
169
+ <summary><b>Polaris / Lakekeeper</b></summary>
170
+
171
+ ```python
172
+ iceberg_rest(
173
+ catalog_uri="https://polaris.example.com/api/catalog",
174
+ warehouse="my_warehouse",
175
+ namespace="production",
176
+ credential="client-id:client-secret",
177
+ oauth2_server_uri="https://polaris.example.com/api/catalog/v1/oauth/tokens",
178
+ )
179
+ ```
180
+
181
+ Storage credentials are vended automatically by the catalog.
182
+
183
+ </details>
184
+
185
+ <details>
186
+ <summary><b>Unity Catalog (Databricks)</b></summary>
187
+
188
+ ```python
189
+ iceberg_rest(
190
+ catalog_uri="https://<workspace>.cloud.databricks.com/api/2.1/unity-catalog/iceberg-rest",
191
+ warehouse="<catalog-name>",
192
+ namespace="<schema-name>",
193
+ token="<databricks-token>",
194
+ )
195
+ ```
196
+
197
+ </details>
198
+
199
+ <details>
200
+ <summary><b>AWS Glue</b></summary>
201
+
202
+ ```python
203
+ iceberg_rest(
204
+ catalog_uri="https://glue.us-east-1.amazonaws.com/iceberg",
205
+ warehouse="<account-id>:s3tablescatalog/<bucket>",
206
+ namespace="my_database",
207
+ sigv4_enabled=True,
208
+ signing_region="us-east-1",
209
+ )
210
+ ```
211
+
212
+ Requires AWS credentials in environment (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`).
213
+
214
+ </details>
215
+
216
+ <details>
217
+ <summary><b>Local SQLite Catalog</b></summary>
218
+
219
+ ```python
220
+ iceberg_rest(
221
+ catalog_uri="sqlite:///catalog.db",
222
+ warehouse="file:///path/to/warehouse",
223
+ namespace="my_namespace",
224
+ )
225
+ ```
226
+
227
+ Great for local development and testing.
228
+
229
+ </details>
230
+
231
+ <details>
232
+ <summary><b>Nessie (Docker)</b></summary>
233
+
234
+ ```python
235
+ iceberg_rest(
236
+ catalog_uri="http://localhost:19120/iceberg/main",
237
+ namespace="my_namespace",
238
+ s3_endpoint="http://localhost:9000",
239
+ s3_access_key_id="minioadmin",
240
+ s3_secret_access_key="minioadmin",
241
+ s3_region="us-east-1",
242
+ )
243
+ ```
244
+
245
+ Start Nessie + MinIO with `docker compose up -d` (see docker-compose.yml in repo).
246
+
247
+ </details>
248
+
249
+ ## Partitioning
250
+
251
+ ### Using iceberg_adapter (Recommended)
252
+
253
+ The `iceberg_adapter` function provides a clean API for configuring Iceberg partitioning:
254
+
255
+ ```python
256
+ from dlt_iceberg import iceberg_adapter, iceberg_partition
257
+
258
+ @dlt.resource(name="events")
259
+ def events():
260
+ yield {"event_date": "2024-01-01", "user_id": 123, "region": "US"}
261
+
262
+ # Single partition
263
+ adapted = iceberg_adapter(events, partition="region")
264
+
265
+ # Multiple partitions with transforms
266
+ adapted = iceberg_adapter(
267
+ events,
268
+ partition=[
269
+ iceberg_partition.month("event_date"),
270
+ iceberg_partition.bucket(10, "user_id"),
271
+ "region", # identity partition
272
+ ]
273
+ )
274
+
275
+ pipeline.run(adapted)
276
+ ```
277
+
278
+ ### Partition Transforms
279
+
280
+ ```python
281
+ # Temporal transforms (for timestamp/date columns)
282
+ iceberg_partition.year("created_at")
283
+ iceberg_partition.month("created_at")
284
+ iceberg_partition.day("created_at")
285
+ iceberg_partition.hour("created_at")
286
+
287
+ # Identity (no transformation)
288
+ iceberg_partition.identity("region")
289
+
290
+ # Bucket (hash into N buckets)
291
+ iceberg_partition.bucket(10, "user_id")
292
+
293
+ # Truncate (truncate to width)
294
+ iceberg_partition.truncate(4, "email")
295
+
296
+ # Custom partition field names
297
+ iceberg_partition.month("created_at", "event_month")
298
+ iceberg_partition.bucket(8, "user_id", "user_bucket")
299
+ ```
300
+
301
+ ### Using Column Hints
302
+
303
+ You can also use dlt column hints for partitioning:
304
+
305
+ ```python
306
+ @dlt.resource(
307
+ name="events",
308
+ columns={
309
+ "event_date": {
310
+ "data_type": "date",
311
+ "partition": True,
312
+ "partition_transform": "day",
313
+ },
314
+ "user_id": {
315
+ "data_type": "bigint",
316
+ "partition": True,
317
+ "partition_transform": "bucket[10]",
318
+ }
319
+ }
320
+ )
321
+ def events():
322
+ ...
323
+ ```
324
+
325
+ ## Write Dispositions
326
+
327
+ ### Append
328
+ ```python
329
+ write_disposition="append"
330
+ ```
331
+ Adds new data without modifying existing rows.
332
+
333
+ ### Replace
334
+ ```python
335
+ write_disposition="replace"
336
+ ```
337
+ Truncates table and inserts new data.
338
+
339
+ ### Merge
340
+
341
+ #### Delete-Insert Strategy (Default)
342
+ ```python
343
+ @dlt.resource(
344
+ write_disposition={"disposition": "merge", "strategy": "delete-insert"},
345
+ primary_key="user_id"
346
+ )
347
+ ```
348
+ Deletes matching rows then inserts new data. Single atomic transaction.
349
+
350
+ #### Upsert Strategy
351
+ ```python
352
+ @dlt.resource(
353
+ write_disposition={"disposition": "merge", "strategy": "upsert"},
354
+ primary_key="user_id"
355
+ )
356
+ ```
357
+ Updates existing rows, inserts new rows.
358
+
359
+ #### Hard Deletes
360
+
361
+ Mark rows for deletion by setting the `_dlt_deleted_at` column:
362
+
363
+ ```python
364
+ @dlt.resource(
365
+ write_disposition={"disposition": "merge", "strategy": "delete-insert"},
366
+ primary_key="user_id"
367
+ )
368
+ def users_with_deletes():
369
+ from datetime import datetime
370
+ yield {"user_id": 1, "name": "alice", "_dlt_deleted_at": None} # Keep
371
+ yield {"user_id": 2, "name": "bob", "_dlt_deleted_at": datetime.now()} # Delete
372
+ ```
373
+
374
+ ## Development
375
+
376
+ ### Run Tests
377
+
378
+ ```bash
379
+ # Start Docker services (for Nessie tests)
380
+ docker compose up -d
381
+
382
+ # Run all tests
383
+ uv run pytest tests/ -v
384
+
385
+ # Run only unit tests (no Docker required)
386
+ uv run pytest tests/ --ignore=tests/nessie -v
387
+
388
+ # Run Nessie integration tests
389
+ uv run pytest tests/nessie/ -v
390
+ ```
391
+
392
+ ### Project Structure
393
+
394
+ ```
395
+ dlt-iceberg/
396
+ ├── src/dlt_iceberg/
397
+ │ ├── __init__.py # Public API
398
+ │ ├── destination_client.py # Class-based destination (atomic commits)
399
+ │ ├── destination.py # Function-based destination (legacy)
400
+ │ ├── adapter.py # iceberg_adapter() for partitioning
401
+ │ ├── sql_client.py # DuckDB integration for dataset()
402
+ │ ├── schema_converter.py # dlt → Iceberg schema conversion
403
+ │ ├── schema_casting.py # Arrow table casting
404
+ │ ├── schema_evolution.py # Schema updates
405
+ │ ├── partition_builder.py # Partition specs
406
+ │ └── error_handling.py # Retry logic
407
+ ├── tests/
408
+ │ ├── test_adapter.py # iceberg_adapter tests
409
+ │ ├── test_capabilities.py # Hard delete, partition names tests
410
+ │ ├── test_dataset.py # DuckDB integration tests
411
+ │ ├── test_merge_disposition.py
412
+ │ ├── test_schema_evolution.py
413
+ │ └── ...
414
+ ├── examples/
415
+ │ ├── incremental_load.py # CSV incremental loading
416
+ │ ├── merge_load.py # CSV merge/upsert
417
+ │ └── data/ # Sample CSV files
418
+ └── docker-compose.yml # Nessie + MinIO for testing
419
+ ```
420
+
421
+ ## How It Works
422
+
423
+ The class-based destination uses dlt's `JobClientBase` interface to accumulate parquet files during a load and commit them atomically in `complete_load()`:
424
+
425
+ 1. dlt extracts data and writes parquet files
426
+ 2. Each file is registered in module-level global state
427
+ 3. After all files complete, `complete_load()` is called
428
+ 4. All files for a table are combined and committed as single Iceberg snapshot
429
+ 5. Each table gets one snapshot per load
430
+
431
+ This ensures atomic commits even though dlt creates multiple client instances.
432
+
433
+ ## License
434
+
435
+ MIT License - see LICENSE file
436
+
437
+ ## Resources
438
+
439
+ - [dlt Documentation](https://dlthub.com/docs)
440
+ - [Apache Iceberg](https://iceberg.apache.org/)
441
+ - [PyIceberg](https://py.iceberg.apache.org/)
442
+ - [Iceberg REST Spec](https://iceberg.apache.org/rest-catalog-spec/)
@@ -0,0 +1,14 @@
1
+ dlt_iceberg/__init__.py,sha256=lwTBzF5tUzh1inDwqqutwcniQzgaK0_H27oRxCiRh5Y,968
2
+ dlt_iceberg/adapter.py,sha256=mpwnBz07B84yX2jbixv33-LJOt3XZi6lt_8yP3X6LTM,9674
3
+ dlt_iceberg/destination.py,sha256=oE0J8mcuCmR9KX0E6nEJ9kOTLPwPb6lq3ZHVorMlOAc,19409
4
+ dlt_iceberg/destination_client.py,sha256=Br-uCIM7QGumRbgw1ehNraRUjeNcbZRlE0uP5KvSwVI,41060
5
+ dlt_iceberg/error_handling.py,sha256=k6Kkldi9BDRsXQ63VEBMMSw1xx2-b1BMjsgRFKI2iB0,7852
6
+ dlt_iceberg/partition_builder.py,sha256=ERAewxVXbqXh0XX92KXt4a6h9bnKmf4D-uTFSoExbm8,10401
7
+ dlt_iceberg/schema_casting.py,sha256=oSQrnOcCMFcinMS65N8YQ1uzrqnQmN50mCCuQyE3794,15247
8
+ dlt_iceberg/schema_converter.py,sha256=ImpxvUY4oEietOgycqQZaJJ0mISqVyH4IkQ-fQ_lf6Y,5717
9
+ dlt_iceberg/schema_evolution.py,sha256=ieOkCA9ngQdJ5lbZLYQ09deTLZEW8whxDn2arpoH-aM,8326
10
+ dlt_iceberg/sql_client.py,sha256=EIHpsH0k4XoEffLbzobm4NJvr0Se6fA7pkc97DQqT88,8202
11
+ dlt_iceberg-0.2.0.dist-info/METADATA,sha256=O1oR3OQkMoYdnJBH5iUJgvoYqPA-h1PkLCarsNo7zRI,11747
12
+ dlt_iceberg-0.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
13
+ dlt_iceberg-0.2.0.dist-info/licenses/LICENSE,sha256=0amGlcH0msYju3WUhlsuUxO4aj3ZODkkIZ0MKOq9fQ4,1066
14
+ dlt_iceberg-0.2.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any