duckrun 0.2.16.dev2__py3-none-any.whl → 0.2.19.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
duckrun/__init__.py CHANGED
@@ -1,10 +1,12 @@
1
1
  """Duckrun - Lakehouse task runner powered by DuckDB"""
2
2
 
3
3
  from duckrun.core import Duckrun
4
+ from duckrun.notebook import import_notebook_from_web, import_notebook
5
+ from duckrun import rle
4
6
 
5
- __version__ = "0.2.14.dev2"
7
+ __version__ = "0.2.18"
6
8
 
7
9
  # Expose unified connect method at module level
8
10
  connect = Duckrun.connect
9
11
 
10
- __all__ = ["Duckrun", "connect"]
12
+ __all__ = ["Duckrun", "connect", "import_notebook_from_web", "import_notebook", "rle"]
duckrun/core.py CHANGED
@@ -12,7 +12,71 @@ from .runner import run as _run
12
12
  from .files import copy as _copy, download as _download
13
13
  from .writer import QueryResult
14
14
 
15
- class Duckrun:
15
+
16
+ class WorkspaceOperationsMixin:
17
+ """
18
+ Mixin class for workspace-level operations that work for both
19
+ full Duckrun connections and workspace-only connections.
20
+ """
21
+
22
+ def import_notebook_from_web(self, url: str,
23
+ notebook_name: Optional[str] = None,
24
+ overwrite: bool = False) -> dict:
25
+ """
26
+ Import a Jupyter notebook from a web URL into the workspace.
27
+
28
+ Args:
29
+ url: URL to the notebook file (e.g., GitHub raw URL). Required.
30
+ notebook_name: Name for the imported notebook. Optional - derived from URL if not provided.
31
+ overwrite: Whether to overwrite if notebook already exists (default: False)
32
+
33
+ Returns:
34
+ Dictionary with import result
35
+
36
+ Examples:
37
+ con = duckrun.connect("workspace/lakehouse.lakehouse")
38
+ result = con.import_notebook_from_web(
39
+ url="https://raw.githubusercontent.com/user/repo/main/notebook.ipynb"
40
+ )
41
+
42
+ ws = duckrun.connect("workspace")
43
+ result = ws.import_notebook_from_web(
44
+ url="https://raw.githubusercontent.com/user/repo/main/notebook.ipynb"
45
+ )
46
+ """
47
+ from .notebook import import_notebook_from_web as _import_notebook_from_web
48
+
49
+ # Get workspace name from either self.workspace or self.workspace_name
50
+ workspace_name = getattr(self, 'workspace', None) or getattr(self, 'workspace_name', None)
51
+
52
+ return _import_notebook_from_web(
53
+ url=url,
54
+ notebook_name=notebook_name,
55
+ overwrite=overwrite,
56
+ workspace_name=workspace_name
57
+ )
58
+
59
+ def _get_workspace_id_by_name(self, token: str, workspace_name: str) -> Optional[str]:
60
+ """Helper method to get workspace ID from name"""
61
+ try:
62
+ url = "https://api.fabric.microsoft.com/v1/workspaces"
63
+ headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
64
+
65
+ response = requests.get(url, headers=headers)
66
+ response.raise_for_status()
67
+
68
+ workspaces = response.json().get("value", [])
69
+ for workspace in workspaces:
70
+ if workspace.get("displayName") == workspace_name:
71
+ return workspace.get("id")
72
+
73
+ return None
74
+
75
+ except Exception:
76
+ return None
77
+
78
+
79
+ class Duckrun(WorkspaceOperationsMixin):
16
80
  """
17
81
  OneLake task runner with clean tuple-based API.
18
82
  Supports lakehouses, warehouses, databases, and other OneLake items.
@@ -971,33 +1035,44 @@ class Duckrun:
971
1035
  """Get underlying DuckDB connection"""
972
1036
  return self.con
973
1037
 
974
- def get_stats(self, source: str):
1038
+ def get_stats(self, source: str = None, detailed = False):
975
1039
  """
976
1040
  Get comprehensive statistics for Delta Lake tables.
977
1041
 
978
1042
  Args:
979
- source: Can be one of:
1043
+ source: Optional. Can be one of:
1044
+ - None: Use all tables in the connection's schema (default)
980
1045
  - Table name: 'table_name' (uses current schema)
981
1046
  - Schema.table: 'schema.table_name' (specific table in schema)
982
1047
  - Schema only: 'schema' (all tables in schema)
1048
+ detailed: Optional. Controls the level of detail in statistics:
1049
+ - False (default): Aggregated table-level stats
1050
+ - True: Row group level statistics with compression details
983
1051
 
984
1052
  Returns:
985
- Arrow table with statistics including total rows, file count, row groups,
986
- average row group size, file sizes, VORDER status, and timestamp
1053
+ DataFrame with statistics based on detailed parameter:
1054
+ - If detailed=False: Aggregated table-level summary
1055
+ - If detailed=True: Granular file and row group level stats
987
1056
 
988
1057
  Examples:
989
1058
  con = duckrun.connect("tmp/data.lakehouse/aemo")
990
1059
 
991
- # Single table in current schema
1060
+ # All tables in current schema (aemo) - aggregated
1061
+ stats = con.get_stats()
1062
+
1063
+ # Single table in current schema - aggregated
992
1064
  stats = con.get_stats('price')
993
1065
 
1066
+ # Single table with detailed row group statistics
1067
+ stats_detailed = con.get_stats('price', detailed=True)
1068
+
994
1069
  # Specific table in different schema
995
1070
  stats = con.get_stats('aemo.price')
996
1071
 
997
1072
  # All tables in a schema
998
1073
  stats = con.get_stats('aemo')
999
1074
  """
1000
- return _get_stats(self, source)
1075
+ return _get_stats(self, source, detailed)
1001
1076
 
1002
1077
  def list_lakehouses(self) -> List[str]:
1003
1078
  """
@@ -1111,7 +1186,7 @@ class Duckrun:
1111
1186
  return False
1112
1187
 
1113
1188
  def deploy(self, bim_url: str, dataset_name: Optional[str] = None,
1114
- wait_seconds: int = 5) -> int:
1189
+ wait_seconds: int = 5, refresh: str = "full") -> int:
1115
1190
  """
1116
1191
  Deploy a semantic model from a BIM file using DirectLake mode.
1117
1192
 
@@ -1120,8 +1195,11 @@ class Duckrun:
1120
1195
  - URL: "https://raw.githubusercontent.com/.../model.bim"
1121
1196
  - Local file: "model.bim"
1122
1197
  - Workspace/Model: "workspace_name/model_name"
1123
- dataset_name: Name for the semantic model (default: source model name if workspace/model format, else lakehouse_schema)
1198
+ dataset_name: Name for the semantic model (default: schema name)
1124
1199
  wait_seconds: Seconds to wait for permission propagation (default: 5)
1200
+ refresh: Refresh strategy:
1201
+ - "full": Clear values and process full refresh (default)
1202
+ - "ignore": Skip refresh entirely
1125
1203
 
1126
1204
  Returns:
1127
1205
  1 for success, 0 for failure
@@ -1129,14 +1207,17 @@ class Duckrun:
1129
1207
  Examples:
1130
1208
  dr = Duckrun.connect("My Workspace/My Lakehouse.lakehouse/dbo")
1131
1209
 
1210
+ # Deploy with schema name as dataset name (dbo)
1211
+ dr.deploy("https://github.com/.../model.bim")
1212
+
1132
1213
  # Deploy from workspace/model (uses same name by default)
1133
1214
  dr.deploy("Source Workspace/Source Model") # Creates "Source Model"
1134
1215
 
1135
1216
  # Deploy with custom name
1136
- dr.deploy("Source Workspace/Source Model", dataset_name="Sales Model Copy")
1217
+ dr.deploy("https://github.com/.../model.bim", dataset_name="Sales Model")
1137
1218
 
1138
- # Deploy from URL or local file
1139
- dr.deploy("https://raw.githubusercontent.com/.../model.bim", dataset_name="My Model")
1219
+ # Deploy without refresh
1220
+ dr.deploy("https://github.com/.../model.bim", refresh="ignore")
1140
1221
  """
1141
1222
  from .semantic_model import deploy_semantic_model
1142
1223
 
@@ -1148,9 +1229,9 @@ class Duckrun:
1148
1229
  if len(parts) == 2:
1149
1230
  dataset_name = parts[1] # Use the model name
1150
1231
  else:
1151
- dataset_name = f"{self.lakehouse_name}_{self.schema}"
1232
+ dataset_name = self.schema # Use schema name
1152
1233
  else:
1153
- dataset_name = f"{self.lakehouse_name}_{self.schema}"
1234
+ dataset_name = self.schema # Use schema name
1154
1235
 
1155
1236
  # Call the deployment function (DirectLake only)
1156
1237
  return deploy_semantic_model(
@@ -1159,36 +1240,203 @@ class Duckrun:
1159
1240
  schema_name=self.schema,
1160
1241
  dataset_name=dataset_name,
1161
1242
  bim_url_or_path=bim_url,
1162
- wait_seconds=wait_seconds
1243
+ wait_seconds=wait_seconds,
1244
+ refresh=refresh
1163
1245
  )
1164
1246
 
1165
- def _get_workspace_id_by_name(self, token: str, workspace_name: str) -> Optional[str]:
1166
- """Helper method to get workspace ID from name"""
1167
- try:
1168
- url = "https://api.fabric.microsoft.com/v1/workspaces"
1169
- headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
1247
+ def rle(self, table_name: str = None, mode = "natural",
1248
+ min_distinct_threshold: int = 2, max_cardinality_pct: float = 0.01,
1249
+ max_ordering_depth: int = 3, limit: int = None):
1250
+ """
1251
+ Analyze RLE (Run-Length Encoding) compression potential for Delta Lake tables.
1252
+
1253
+ Args:
1254
+ table_name: Name of the table to analyze. Can be:
1255
+ - 'table_name' (uses current schema)
1256
+ - 'schema.table_name' (specific schema)
1257
+ mode: Analysis mode or column ordering:
1258
+ - "natural": Calculate RLE for natural order only (fastest)
1259
+ - "auto": Natural order + cardinality-based ordering (recommended)
1260
+ - "advanced": Natural + cardinality + greedy incremental search (most thorough)
1261
+ - List[str]: Specific column ordering to test, e.g., ['date', 'duid']
1262
+ min_distinct_threshold: Exclude columns with fewer distinct values (default: 2)
1263
+ max_cardinality_pct: Exclude columns with cardinality above this % (default: 0.01 = 1%)
1264
+ max_ordering_depth: Maximum depth for greedy search in "advanced" mode (default: 3)
1265
+ limit: Optional row limit for testing/development (default: None, analyzes all rows)
1266
+
1267
+ Returns:
1268
+ DataFrame with RLE analysis results
1269
+
1270
+ Examples:
1271
+ # Natural order only (baseline)
1272
+ con = duckrun.connect("workspace/lakehouse.lakehouse/schema")
1273
+ con.rle("mytable") # same as con.rle("mytable", "natural")
1170
1274
 
1171
- response = requests.get(url, headers=headers)
1172
- response.raise_for_status()
1275
+ # Auto optimization (natural + cardinality-based)
1276
+ con.rle("mytable", "auto")
1173
1277
 
1174
- workspaces = response.json().get("value", [])
1175
- for workspace in workspaces:
1176
- if workspace.get("displayName") == workspace_name:
1177
- return workspace.get("id")
1278
+ # Advanced optimization (greedy incremental search)
1279
+ con.rle("mytable", "advanced")
1280
+
1281
+ # Test specific column ordering
1282
+ con.rle("mytable", ["date", "duid"])
1283
+ con.rle("mytable", ["cutoff", "time", "DUID", "date"])
1178
1284
 
1285
+ # Advanced with custom depth
1286
+ con.rle("mytable", "advanced", max_ordering_depth=4)
1287
+
1288
+ # Analyze table from different schema
1289
+ con.rle("otherschema.mytable", "auto")
1290
+
1291
+ # Custom thresholds for small tables
1292
+ con.rle("mytable", "auto", max_cardinality_pct=0.05)
1293
+
1294
+ # Limit rows for testing
1295
+ con.rle("mytable", "auto", limit=10000)
1296
+ """
1297
+ from .rle import (
1298
+ calculate_cardinality_ratio,
1299
+ test_column_orderings_smart,
1300
+ calculate_rle_for_columns
1301
+ )
1302
+ from deltalake import DeltaTable
1303
+
1304
+ # Parse table name and construct path
1305
+ if table_name is None:
1306
+ if mode != "summary":
1307
+ print("⚠️ Table name is required for 'smart' and 'full' modes")
1308
+ return None
1309
+ # TODO: Implement all-tables summary
1310
+ print("⚠️ All-tables summary not yet implemented. Please specify a table name.")
1179
1311
  return None
1312
+
1313
+ # Parse schema.table or just table
1314
+ if '.' in table_name:
1315
+ schema_name, tbl = table_name.split('.', 1)
1316
+ else:
1317
+ schema_name = self.schema
1318
+ tbl = table_name
1319
+
1320
+ # Construct the full table path using the same logic as get_stats
1321
+ table_path = f"{self.table_base_url}{schema_name}/{tbl}"
1322
+
1323
+ # Verify table exists and is not empty
1324
+ print(f"📊 Analyzing table: {schema_name}.{tbl}")
1325
+
1326
+ try:
1327
+ dt = DeltaTable(table_path)
1328
+ delta_files = dt.files()
1180
1329
 
1181
- except Exception:
1330
+ if not delta_files:
1331
+ print("⚠️ Table is empty (no files)")
1332
+ return None
1333
+
1334
+ except Exception as e:
1335
+ print(f"❌ Error accessing Delta table: {e}")
1182
1336
  return None
1337
+
1338
+ # Check if mode is a list of columns (custom ordering)
1339
+ if isinstance(mode, list):
1340
+ # User wants to test a specific column ordering
1341
+ print(f"Testing custom column ordering: {', '.join(mode)}")
1342
+
1343
+ # Calculate cardinality for NDV values
1344
+ card_stats = calculate_cardinality_ratio(self.con, table_name if table_name else f"delta_scan('{table_path}')", is_parquet=False)
1345
+
1346
+ # Calculate RLE for the specified ordering
1347
+ rle_counts = calculate_rle_for_columns(self.con, table_path, mode, limit)
1348
+
1349
+ total_rle_all = sum(rle_counts.values())
1350
+
1351
+ print(f"\nResults:")
1352
+ print(f" Custom ordering: [{', '.join(mode)}]")
1353
+ print(f" Total RLE (all columns): {total_rle_all:,} runs")
1354
+
1355
+ # Return as DataFrame for consistency
1356
+ import pandas as pd
1357
+ results = [{
1358
+ 'schema': schema_name,
1359
+ 'table': tbl,
1360
+ 'sort_order': 'custom',
1361
+ 'columns_used': ', '.join(mode),
1362
+ 'total_rle_all': total_rle_all,
1363
+ **rle_counts
1364
+ }]
1365
+
1366
+ df = pd.DataFrame(results)
1367
+
1368
+ # Transform to long format
1369
+ long_format_results = []
1370
+
1371
+ for _, row in df.iterrows():
1372
+ schema_val = row['schema']
1373
+ table_val = row['table']
1374
+ sort_order = row['sort_order']
1375
+ columns_used = row['columns_used']
1376
+ total_rle_all_val = row['total_rle_all']
1377
+
1378
+ # Get all column names except metadata columns
1379
+ metadata_cols = ['schema', 'table', 'sort_order', 'columns_used', 'total_rle_all']
1380
+ data_columns = [col for col in df.columns if col not in metadata_cols]
1381
+
1382
+ # Get total rows from card_stats if available
1383
+ total_rows = card_stats[data_columns[0]]['total_rows'] if card_stats and data_columns else None
1384
+
1385
+ # Parse the columns_used to get ordering
1386
+ sort_columns_list = [c.strip() for c in columns_used.split(',')]
1387
+
1388
+ # Create one row per data column
1389
+ for col in data_columns:
1390
+ rle_value = row[col]
1391
+
1392
+ # Get NDV from card_stats
1393
+ ndv_value = card_stats[col]['distinct_values'] if card_stats and col in card_stats else None
1394
+
1395
+ # Determine if column was included in the sort and its position
1396
+ is_in_sort = col in sort_columns_list
1397
+ order_position = sort_columns_list.index(col) + 1 if is_in_sort else None
1398
+ comment = '' if is_in_sort else 'not included in the sort'
1399
+
1400
+ long_format_results.append({
1401
+ 'schema': schema_val,
1402
+ 'table': table_val,
1403
+ 'sort_type': sort_order,
1404
+ 'column': col,
1405
+ 'order': order_position,
1406
+ 'RLE': rle_value,
1407
+ 'NDV': ndv_value,
1408
+ 'total_rows': total_rows,
1409
+ 'total_RLE': total_rle_all_val,
1410
+ 'comments': comment
1411
+ })
1412
+
1413
+ long_df = pd.DataFrame(long_format_results)
1414
+
1415
+ return long_df
1416
+
1417
+ # All modes now use test_column_orderings_smart with the mode parameter
1418
+ return test_column_orderings_smart(
1419
+ self.con,
1420
+ table_path,
1421
+ table_name=table_name, # Pass table name for cardinality calculation on full dataset
1422
+ mode=mode,
1423
+ limit=limit,
1424
+ min_distinct_threshold=min_distinct_threshold,
1425
+ max_cardinality_pct=max_cardinality_pct,
1426
+ max_ordering_depth=max_ordering_depth,
1427
+ schema_name=schema_name,
1428
+ table_display_name=tbl
1429
+ )
1183
1430
 
1184
1431
  def close(self):
1185
1432
  """Close DuckDB connection"""
1433
+
1186
1434
  if self.con:
1187
1435
  self.con.close()
1188
1436
  print("Connection closed")
1189
1437
 
1190
1438
 
1191
- class WorkspaceConnection:
1439
+ class WorkspaceConnection(WorkspaceOperationsMixin):
1192
1440
  """
1193
1441
  Simple workspace connection for lakehouse management operations.
1194
1442
  """
@@ -1428,23 +1676,4 @@ class WorkspaceConnection:
1428
1676
  print(f"❌ Error downloading semantic model: {e}")
1429
1677
  import traceback
1430
1678
  traceback.print_exc()
1431
- return None
1432
-
1433
- def _get_workspace_id_by_name(self, token: str, workspace_name: str) -> Optional[str]:
1434
- """Helper method to get workspace ID from name"""
1435
- try:
1436
- url = "https://api.fabric.microsoft.com/v1/workspaces"
1437
- headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
1438
-
1439
- response = requests.get(url, headers=headers)
1440
- response.raise_for_status()
1441
-
1442
- workspaces = response.json().get("value", [])
1443
- for workspace in workspaces:
1444
- if workspace.get("displayName") == workspace_name:
1445
- return workspace.get("id")
1446
-
1447
- return None
1448
-
1449
- except Exception:
1450
1679
  return None