dbt-cube-sync 0.1.0a2__tar.gz → 0.1.0a3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dbt-cube-sync
3
- Version: 0.1.0a2
3
+ Version: 0.1.0a3
4
4
  Summary: Synchronization tool for dbt models to Cube.js schemas and BI tools
5
5
  Author: Ponder
6
6
  Requires-Python: >=3.9,<4.0
@@ -191,19 +191,7 @@ class SupersetConnector(BaseConnector):
191
191
 
192
192
  cube_name = cube_name_match.group(1)
193
193
 
194
- # Use public schema and cube name for Superset dataset
195
- schema_name = "public"
196
- table_name = cube_name # Use cube name (not database table name)
197
-
198
- # Extract actual database table for reference (but don't use it for dataset)
199
- sql_match = re.search(r'sql:\s*[`"\']\s*SELECT\s+.*FROM\s+(\w+\.\w+)', content, re.IGNORECASE)
200
- actual_db_table = None
201
- if sql_match:
202
- actual_db_table = sql_match.group(1)
203
-
204
194
  print(f" Cube: {cube_name}")
205
- print(f" Schema: {schema_name}")
206
- print(f" Table: {table_name}")
207
195
 
208
196
  # Parse dimensions
209
197
  dimensions = self._parse_dimensions(content)
@@ -213,9 +201,8 @@ class SupersetConnector(BaseConnector):
213
201
 
214
202
  return {
215
203
  'cube_name': cube_name,
216
- 'schema': schema_name,
217
- 'table_name': table_name, # This is now the cube name for dataset creation
218
- 'actual_db_table': actual_db_table, # This is the real DB table
204
+ 'schema': 'public', # Always use public schema for Cube.js
205
+ 'table_name': cube_name, # Use cube name as table name (e.g., CoursePerformanceSummary)
219
206
  'dimensions': dimensions,
220
207
  'measures': measures
221
208
  }
@@ -409,27 +396,14 @@ class SupersetConnector(BaseConnector):
409
396
  """Create a new dataset in Superset"""
410
397
  dataset_url = f"{self.base_url}/api/v1/dataset/"
411
398
 
412
- # If we have actual DB table info, use custom SQL, otherwise use table reference
413
- if schema_info.get('actual_db_table'):
414
- # Create a custom SQL dataset that references the actual table but is named with cube name
415
- sql_query = f"SELECT * FROM {schema_info['actual_db_table']}"
416
- payload = {
417
- "database": self.database_id,
418
- "schema": schema_info['schema'], # "public"
419
- "table_name": schema_info['table_name'], # cube name like "CoursePerformanceSummary"
420
- "sql": sql_query,
421
- "normalize_columns": False,
422
- "always_filter_main_dttm": False
423
- }
424
- else:
425
- # Fallback to direct table reference
426
- payload = {
427
- "database": self.database_id,
428
- "schema": schema_info['schema'],
429
- "table_name": schema_info['table_name'],
430
- "normalize_columns": False,
431
- "always_filter_main_dttm": False
432
- }
399
+ # Create a simple table dataset (Cube.js will handle the actual data source)
400
+ payload = {
401
+ "database": self.database_id,
402
+ "schema": schema_info['schema'], # "public"
403
+ "table_name": schema_info['table_name'], # cube name like "CoursePerformanceSummary"
404
+ "normalize_columns": False,
405
+ "always_filter_main_dttm": False
406
+ }
433
407
 
434
408
  print(f"\\n📊 Creating new dataset: {schema_info['table_name']}")
435
409
  response = self.session.post(dataset_url, json=payload)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "dbt-cube-sync"
3
- version = "0.1.0a2"
3
+ version = "0.1.0a3"
4
4
  description = "Synchronization tool for dbt models to Cube.js schemas and BI tools"
5
5
  authors = ["Ponder"]
6
6
  readme = "README.md"