dbt-cube-sync 0.1.0a1__tar.gz → 0.1.0a2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dbt-cube-sync
3
- Version: 0.1.0a1
3
+ Version: 0.1.0a2
4
4
  Summary: Synchronization tool for dbt models to Cube.js schemas and BI tools
5
5
  Author: Ponder
6
6
  Requires-Python: >=3.9,<4.0
@@ -191,15 +191,15 @@ class SupersetConnector(BaseConnector):
191
191
 
192
192
  cube_name = cube_name_match.group(1)
193
193
 
194
- # Extract schema and table name from SQL query
194
+ # Use public schema and cube name for Superset dataset
195
+ schema_name = "public"
196
+ table_name = cube_name # Use cube name (not database table name)
197
+
198
+ # Extract actual database table for reference (but don't use it for dataset)
195
199
  sql_match = re.search(r'sql:\s*[`"\']\s*SELECT\s+.*FROM\s+(\w+\.\w+)', content, re.IGNORECASE)
200
+ actual_db_table = None
196
201
  if sql_match:
197
- schema_table = sql_match.group(1)
198
- schema_name, table_name = schema_table.split('.')
199
- else:
200
- # Fallback to cube name
201
- schema_name = "public"
202
- table_name = cube_name
202
+ actual_db_table = sql_match.group(1)
203
203
 
204
204
  print(f" Cube: {cube_name}")
205
205
  print(f" Schema: {schema_name}")
@@ -214,7 +214,8 @@ class SupersetConnector(BaseConnector):
214
214
  return {
215
215
  'cube_name': cube_name,
216
216
  'schema': schema_name,
217
- 'table_name': table_name,
217
+ 'table_name': table_name, # This is now the cube name for dataset creation
218
+ 'actual_db_table': actual_db_table, # This is the real DB table
218
219
  'dimensions': dimensions,
219
220
  'measures': measures
220
221
  }
@@ -408,13 +409,27 @@ class SupersetConnector(BaseConnector):
408
409
  """Create a new dataset in Superset"""
409
410
  dataset_url = f"{self.base_url}/api/v1/dataset/"
410
411
 
411
- payload = {
412
- "database": self.database_id,
413
- "schema": schema_info['schema'],
414
- "table_name": schema_info['table_name'],
415
- "normalize_columns": False,
416
- "always_filter_main_dttm": False
417
- }
412
+ # If we have actual DB table info, use custom SQL, otherwise use table reference
413
+ if schema_info.get('actual_db_table'):
414
+ # Create a custom SQL dataset that references the actual table but is named with cube name
415
+ sql_query = f"SELECT * FROM {schema_info['actual_db_table']}"
416
+ payload = {
417
+ "database": self.database_id,
418
+ "schema": schema_info['schema'], # "public"
419
+ "table_name": schema_info['table_name'], # cube name like "CoursePerformanceSummary"
420
+ "sql": sql_query,
421
+ "normalize_columns": False,
422
+ "always_filter_main_dttm": False
423
+ }
424
+ else:
425
+ # Fallback to direct table reference
426
+ payload = {
427
+ "database": self.database_id,
428
+ "schema": schema_info['schema'],
429
+ "table_name": schema_info['table_name'],
430
+ "normalize_columns": False,
431
+ "always_filter_main_dttm": False
432
+ }
418
433
 
419
434
  print(f"\\n📊 Creating new dataset: {schema_info['table_name']}")
420
435
  response = self.session.post(dataset_url, json=payload)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "dbt-cube-sync"
3
- version = "0.1.0a1"
3
+ version = "0.1.0a2"
4
4
  description = "Synchronization tool for dbt models to Cube.js schemas and BI tools"
5
5
  authors = ["Ponder"]
6
6
  readme = "README.md"