iflow-mcp_niclasolofsson-dbt-core-mcp 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. dbt_core_mcp/__init__.py +18 -0
  2. dbt_core_mcp/__main__.py +436 -0
  3. dbt_core_mcp/context.py +459 -0
  4. dbt_core_mcp/cte_generator.py +601 -0
  5. dbt_core_mcp/dbt/__init__.py +1 -0
  6. dbt_core_mcp/dbt/bridge_runner.py +1361 -0
  7. dbt_core_mcp/dbt/manifest.py +781 -0
  8. dbt_core_mcp/dbt/runner.py +67 -0
  9. dbt_core_mcp/dependencies.py +50 -0
  10. dbt_core_mcp/server.py +381 -0
  11. dbt_core_mcp/tools/__init__.py +77 -0
  12. dbt_core_mcp/tools/analyze_impact.py +78 -0
  13. dbt_core_mcp/tools/build_models.py +190 -0
  14. dbt_core_mcp/tools/demo/__init__.py +1 -0
  15. dbt_core_mcp/tools/demo/hello.html +267 -0
  16. dbt_core_mcp/tools/demo/ui_demo.py +41 -0
  17. dbt_core_mcp/tools/get_column_lineage.py +1988 -0
  18. dbt_core_mcp/tools/get_lineage.py +89 -0
  19. dbt_core_mcp/tools/get_project_info.py +96 -0
  20. dbt_core_mcp/tools/get_resource_info.py +134 -0
  21. dbt_core_mcp/tools/install_deps.py +102 -0
  22. dbt_core_mcp/tools/list_resources.py +84 -0
  23. dbt_core_mcp/tools/load_seeds.py +179 -0
  24. dbt_core_mcp/tools/query_database.py +459 -0
  25. dbt_core_mcp/tools/run_models.py +234 -0
  26. dbt_core_mcp/tools/snapshot_models.py +120 -0
  27. dbt_core_mcp/tools/test_models.py +238 -0
  28. dbt_core_mcp/utils/__init__.py +1 -0
  29. dbt_core_mcp/utils/env_detector.py +186 -0
  30. dbt_core_mcp/utils/process_check.py +130 -0
  31. dbt_core_mcp/utils/tool_utils.py +411 -0
  32. dbt_core_mcp/utils/warehouse_adapter.py +82 -0
  33. dbt_core_mcp/utils/warehouse_databricks.py +297 -0
  34. iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/METADATA +784 -0
  35. iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/RECORD +38 -0
  36. iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/WHEEL +4 -0
  37. iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/entry_points.txt +2 -0
  38. iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,601 @@
1
+ """
2
+ CTE Test Generator for dbt unit tests.
3
+
4
+ Automatically generates isolated CTE test models and tests from unit tests
5
+ marked with `config: cte_test: true`.
6
+ """
7
+
8
+ import csv
9
+ import hashlib
10
+ import logging
11
+ import re
12
+ import shutil
13
+ from io import StringIO
14
+ from pathlib import Path
15
+ from typing import Any
16
+
17
+ import yaml
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ def rows_to_sql(rows: list[dict[str, Any]], columns: list[str] | None = None) -> str:
23
+ """Convert list of row dicts (or empty with known columns) to SQL SELECT statements joined by UNION ALL."""
24
+ # Determine column order
25
+ if columns is None:
26
+ cols_union: set[str] = set()
27
+ for row in rows:
28
+ cols_union.update(row.keys())
29
+ columns = sorted(cols_union)
30
+
31
+ if not columns:
32
+ return "SELECT NULL WHERE FALSE" # no columns known
33
+
34
+ if not rows:
35
+ col_exprs = [f"NULL as {c}" for c in columns]
36
+ return f"SELECT {', '.join(col_exprs)} WHERE 1=0"
37
+
38
+ selects = []
39
+ for row in rows:
40
+ exprs = []
41
+ for col in columns:
42
+ v = row.get(col)
43
+ if v is None:
44
+ exprs.append(f"NULL as {col}")
45
+ elif isinstance(v, str):
46
+ # Try to detect numeric strings from CSV parsing
47
+ # If it looks like a number, use it as-is; otherwise escape as string
48
+ if v.isdigit() or (v.replace(".", "", 1).replace("-", "", 1).isdigit()):
49
+ # It's a number string from CSV - use as numeric literal
50
+ exprs.append(f"{v} as {col}")
51
+ else:
52
+ # SQL standard: escape single quotes by doubling them
53
+ escaped = v.replace("'", "''")
54
+ exprs.append(f"'{escaped}' as {col}")
55
+ else:
56
+ exprs.append(f"{v} as {col}")
57
+ selects.append(f"SELECT {', '.join(exprs)}")
58
+
59
+ return "\nUNION ALL\n".join(selects)
60
+
61
+
62
+ def parse_csv_fixture(csv_text: str) -> tuple[list[str], list[dict[str, Any]]]:
63
+ """Parse a csv fixture string into (columns, rows_as_dicts)."""
64
+ sio = StringIO(csv_text.strip("\n"))
65
+ reader = csv.DictReader(line for line in sio if line.strip() != "")
66
+ columns = list(reader.fieldnames) if reader.fieldnames else []
67
+ rows = [dict(row) for row in reader]
68
+ return columns, rows
69
+
70
+
71
+ def is_position_in_comment(sql: str, pos: int) -> bool:
72
+ """Check if a position in SQL is inside a comment (SQL or Jinja)."""
73
+ # Check for line comment: is there a '--' before pos on the same line?
74
+ line_start = sql.rfind("\n", 0, pos) + 1 # Start of current line
75
+ line_content = sql[line_start:pos]
76
+ if "--" in line_content:
77
+ return True
78
+
79
+ # Check for SQL block comment: count /* and */ before pos
80
+ block_comment_depth = 0
81
+ jinja_comment_depth = 0
82
+ i = 0
83
+ while i < pos:
84
+ if i + 1 < len(sql):
85
+ two_char = sql[i : i + 2]
86
+ # SQL block comments
87
+ if two_char == "/*":
88
+ block_comment_depth += 1
89
+ i += 2
90
+ continue
91
+ elif two_char == "*/":
92
+ block_comment_depth -= 1
93
+ i += 2
94
+ continue
95
+ # Jinja comments {# ... #}
96
+ elif two_char == "{#":
97
+ jinja_comment_depth += 1
98
+ i += 2
99
+ continue
100
+ elif two_char == "#}":
101
+ jinja_comment_depth -= 1
102
+ i += 2
103
+ continue
104
+ i += 1
105
+
106
+ return block_comment_depth > 0 or jinja_comment_depth > 0
107
+
108
+
109
+ def replace_cte_with_mock(sql: str, cte_name: str, rows: list[dict[str, Any]], columns: list[str] | None = None) -> str:
110
+ """Replace a CTE definition with a mocked version from fixture rows."""
111
+
112
+ # Find the CTE definition (skip commented matches)
113
+ pattern = rf"\b{cte_name}\s+as\s*\("
114
+ matches = list(re.finditer(pattern, sql, re.IGNORECASE))
115
+
116
+ if not matches:
117
+ logger.warning(f"Could not find CTE '{cte_name}' to mock")
118
+ return sql
119
+
120
+ # Find first match that's not in a comment
121
+ match = None
122
+ for m in matches:
123
+ if not is_position_in_comment(sql, m.start()):
124
+ match = m
125
+ break
126
+
127
+ if not match:
128
+ logger.warning(f"CTE '{cte_name}' only found in comments")
129
+ return sql
130
+
131
+ # Find matching closing paren
132
+ paren_pos = sql.index("(", match.start())
133
+ paren_count = 1
134
+ end_pos = paren_pos + 1
135
+ in_string = False
136
+ string_char = None
137
+ in_line_comment = False
138
+ in_block_comment = False
139
+
140
+ while end_pos < len(sql) and paren_count > 0:
141
+ char = sql[end_pos]
142
+ next_char = sql[end_pos + 1] if end_pos + 1 < len(sql) else ""
143
+
144
+ # Handle line comments
145
+ if not in_string and not in_block_comment and char == "-" and next_char == "-":
146
+ in_line_comment = True
147
+ end_pos += 2
148
+ continue
149
+
150
+ if in_line_comment:
151
+ if char == "\n":
152
+ in_line_comment = False
153
+ end_pos += 1
154
+ continue
155
+
156
+ # Handle block comments
157
+ if not in_string and not in_line_comment and char == "/" and next_char == "*":
158
+ in_block_comment = True
159
+ end_pos += 2
160
+ continue
161
+
162
+ if in_block_comment:
163
+ if char == "*" and next_char == "/":
164
+ in_block_comment = False
165
+ end_pos += 2
166
+ else:
167
+ end_pos += 1
168
+ continue
169
+
170
+ # Handle string literals (both single and double quotes)
171
+ if char in ('"', "'"):
172
+ if not in_string:
173
+ in_string = True
174
+ string_char = char
175
+ elif char == string_char:
176
+ in_string = False
177
+ string_char = None
178
+
179
+ # Count parens only outside strings and comments
180
+ if not in_string and not in_line_comment and not in_block_comment:
181
+ if char == "(":
182
+ paren_count += 1
183
+ elif char == ")":
184
+ paren_count -= 1
185
+
186
+ end_pos += 1
187
+
188
+ # Replace with mocked CTE
189
+ mock_sql = rows_to_sql(rows, columns=columns)
190
+ mocked_cte = f"{cte_name} AS (\n {mock_sql}\n)"
191
+
192
+ # Replace in original SQL
193
+ original_cte = sql[match.start() : end_pos]
194
+ return sql.replace(original_cte, mocked_cte)
195
+
196
+
197
+ def generate_cte_model(base_model_path: Path, cte_name: str, test_given: list[dict[str, Any]], output_path: Path) -> bool:
198
+ """Generate a truncated model that selects from the target CTE.
199
+
200
+ Extracts from start of SQL through target CTE's closing paren,
201
+ preserving all upstream CTEs and WITH clause.
202
+
203
+ Also applies any CTE mocks from test fixtures.
204
+ """
205
+
206
+ # Read original model
207
+ sql = base_model_path.read_text()
208
+
209
+ # Find "cte_name [AS] (" (skip commented matches)
210
+ # AS is optional in Spark SQL/Databricks, so allow it but do not require it
211
+ # Require whitespace before the opening paren to avoid matching function calls like name(...)
212
+ pattern = rf"\b{re.escape(cte_name)}(?:\s+AS)?\s+\("
213
+ matches = list(re.finditer(pattern, sql, re.IGNORECASE))
214
+
215
+ if not matches:
216
+ logger.error(f"Could not find CTE '{cte_name}' in {base_model_path}")
217
+ return False
218
+
219
+ # Find first match that's not in a comment
220
+ match = None
221
+ for m in matches:
222
+ if not is_position_in_comment(sql, m.start()):
223
+ match = m
224
+ break
225
+
226
+ if not match:
227
+ logger.error(f"CTE '{cte_name}' only found in comments in {base_model_path}")
228
+ return False
229
+
230
+ # Position of opening paren
231
+ paren_pos = sql.index("(", match.start())
232
+
233
+ # Count parens to find matching closing paren
234
+ # Track strings and comments to avoid counting parens inside them
235
+ paren_count = 1
236
+ i = paren_pos + 1
237
+ in_string = False
238
+ string_char = None
239
+ in_line_comment = False
240
+ in_block_comment = False
241
+
242
+ while i < len(sql) and paren_count > 0:
243
+ char = sql[i]
244
+ next_char = sql[i + 1] if i + 1 < len(sql) else ""
245
+
246
+ # Handle line comments: -- until newline
247
+ if not in_string and not in_block_comment and char == "-" and next_char == "-":
248
+ in_line_comment = True
249
+ i += 2 # Skip both dashes
250
+ continue
251
+
252
+ if in_line_comment:
253
+ if char == "\n":
254
+ in_line_comment = False
255
+ i += 1
256
+ continue
257
+
258
+ # Handle block comments: /* until */
259
+ if not in_string and not in_line_comment and char == "/" and next_char == "*":
260
+ in_block_comment = True
261
+ i += 2 # Skip /*
262
+ continue
263
+
264
+ if in_block_comment:
265
+ if char == "*" and next_char == "/":
266
+ in_block_comment = False
267
+ i += 2 # Skip */
268
+ else:
269
+ i += 1
270
+ continue
271
+
272
+ # Handle string literals (both single and double quotes)
273
+ if char in ('"', "'") and (i == 0 or sql[i - 1] != "\\"):
274
+ if not in_string:
275
+ in_string = True
276
+ string_char = char
277
+ elif char == string_char:
278
+ in_string = False
279
+ string_char = None
280
+
281
+ # Count parens only outside strings and comments
282
+ if not in_string and not in_line_comment and not in_block_comment:
283
+ if char == "(":
284
+ paren_count += 1
285
+ elif char == ")":
286
+ paren_count -= 1
287
+
288
+ i += 1
289
+
290
+ if paren_count != 0:
291
+ logger.error(f"Could not find matching closing paren for CTE '{cte_name}'")
292
+ return False
293
+
294
+ # Extract from start of SQL to closing paren (includes everything automatically)
295
+ upstream_sql = sql[:i].rstrip()
296
+
297
+ # Apply CTE mocks from test fixtures
298
+ # Convention: any given with input starting with '::' denotes a CTE mock
299
+ for given in test_given:
300
+ inp = given.get("input")
301
+ if isinstance(inp, str) and inp.startswith("::"):
302
+ mock_cte_name = inp.lstrip(":")
303
+ fmt = given.get("format", "dict")
304
+ if fmt == "csv":
305
+ columns, mock_rows = parse_csv_fixture(given.get("rows", ""))
306
+ else:
307
+ columns, mock_rows = None, given.get("rows", [])
308
+ logger.debug(f"Mocking CTE: {mock_cte_name}")
309
+ upstream_sql = replace_cte_with_mock(upstream_sql, mock_cte_name, mock_rows, columns)
310
+
311
+ # Add final SELECT
312
+ generated_sql = f"{upstream_sql}\n\nselect * from {cte_name}"
313
+
314
+ # Add SQLFluff disable directive at the top
315
+ final_sql = f"-- sqlfluff:disable\n{generated_sql}"
316
+
317
+ # Write generated model
318
+ output_path.parent.mkdir(parents=True, exist_ok=True)
319
+ output_path.write_text(final_sql)
320
+ logger.debug(f"Generated CTE model: {output_path}")
321
+
322
+ return True
323
+
324
+
325
+ def generate_cte_test(
326
+ test_yaml_path: Path,
327
+ test_name: str,
328
+ base_model: str,
329
+ cte_name: str,
330
+ generated_model: str,
331
+ gen_model_path: Path,
332
+ output_path: Path,
333
+ ) -> bool:
334
+ """Generate an enabled test file targeting the generated model.
335
+
336
+ Auto-detects missing inputs and adds empty fixtures for them.
337
+ """
338
+
339
+ # Parse original YAML
340
+ with open(test_yaml_path) as f:
341
+ test_data = yaml.safe_load(f)
342
+
343
+ # Find the test
344
+ target_test = None
345
+ for test in test_data.get("unit_tests", []):
346
+ if test["name"] == test_name:
347
+ target_test = test.copy()
348
+ break
349
+
350
+ if not target_test:
351
+ logger.error(f"Could not find test '{test_name}' in {test_yaml_path}")
352
+ return False
353
+
354
+ # Read generated model to find all refs/sources
355
+ generated_sql = gen_model_path.read_text()
356
+
357
+ # Find all ref() calls
358
+ ref_pattern = r"ref\(['\"](\w+)['\"]\)"
359
+ refs = re.findall(ref_pattern, generated_sql)
360
+
361
+ # Find all source() calls
362
+ source_pattern = r"source\(['\"](\w+)['\"],\s*['\"](\w+)['\"]\)"
363
+ sources = re.findall(source_pattern, generated_sql)
364
+
365
+ # Build set of actually used inputs from the final model
366
+ actually_used = set()
367
+ for ref_name in refs:
368
+ actually_used.add(f"ref('{ref_name}')")
369
+ for source_name, table_name in sources:
370
+ actually_used.add(f"source('{source_name}', '{table_name}')")
371
+
372
+ # Filter given to keep only actually used inputs
373
+ clean_given = []
374
+ for given in target_test.get("given", []):
375
+ if "input" in given:
376
+ input_ref = given["input"]
377
+ if input_ref in actually_used:
378
+ clean_given.append(given)
379
+
380
+ target_test["given"] = clean_given
381
+ existing_inputs = set()
382
+ for given in target_test.get("given", []):
383
+ existing_inputs.add(given.get("input", ""))
384
+
385
+ # Auto-add missing ref inputs as empty fixtures
386
+ for ref_name in refs:
387
+ ref_input = f"ref('{ref_name}')"
388
+ if ref_input not in existing_inputs:
389
+ logger.debug(f"Auto-adding empty fixture: {ref_input}")
390
+ target_test["given"].append({"input": ref_input, "rows": []})
391
+ existing_inputs.add(ref_input)
392
+
393
+ # Auto-add missing source inputs as empty fixtures
394
+ for source_name, table_name in sources:
395
+ source_input = f"source('{source_name}', '{table_name}')"
396
+ if source_input not in existing_inputs:
397
+ logger.debug(f"Auto-adding empty fixture: {source_input}")
398
+ target_test["given"].append({"input": source_input, "rows": []})
399
+ existing_inputs.add(source_input)
400
+
401
+ # Modify: update model, and ensure generated test is enabled and schema-clean
402
+ target_test["model"] = generated_model
403
+ # Drop any config (enabled/cte_test) from the generated test to avoid skips and non-standard fields
404
+ if "config" in target_test:
405
+ del target_test["config"]
406
+
407
+ # Build new YAML
408
+ output_data = {"version": 2, "unit_tests": [target_test]}
409
+
410
+ # Write YAML with proper indentation for lists
411
+ output_path.parent.mkdir(parents=True, exist_ok=True)
412
+
413
+ class IndentDumper(yaml.SafeDumper):
414
+ def increase_indent(self, flow: bool = False, indentless: bool = False) -> int | None: # type: ignore[override]
415
+ return super(IndentDumper, self).increase_indent(flow, False)
416
+
417
+ with open(output_path, "w") as f:
418
+ yaml.dump(output_data, f, Dumper=IndentDumper, default_flow_style=False, sort_keys=False, width=120, indent=2)
419
+
420
+ logger.debug(f"Generated CTE test: {output_path}")
421
+ return True
422
+
423
+
424
+ def _load_project_config(project_dir: Path) -> dict[str, Any]:
425
+ """Load dbt_project.yml configuration.
426
+
427
+ Args:
428
+ project_dir: Path to dbt project root
429
+
430
+ Returns:
431
+ Dict with project configuration
432
+ """
433
+ project_file = project_dir / "dbt_project.yml"
434
+ if not project_file.exists():
435
+ logger.warning(f"dbt_project.yml not found at {project_file}, using defaults")
436
+ return {}
437
+
438
+ with open(project_file) as f:
439
+ config = yaml.safe_load(f)
440
+
441
+ return config or {}
442
+
443
+
444
+ def generate_cte_tests(project_dir: Path) -> int:
445
+ """Scan project and generate all CTE tests.
446
+
447
+ Args:
448
+ project_dir: Path to dbt project root
449
+
450
+ Returns:
451
+ Number of CTE tests generated
452
+ """
453
+ logger.info("Generating CTE tests...")
454
+
455
+ # Load project configuration
456
+ config = _load_project_config(project_dir)
457
+
458
+ # Get configured paths (use first element if multiple)
459
+ test_paths = config.get("test-paths", ["tests"])
460
+ model_paths = config.get("model-paths", ["models"])
461
+
462
+ # For unit tests, check both test-paths and unit_tests directory
463
+ # (unit_tests is a common convention even if not in test-paths)
464
+ unit_tests_dirs = []
465
+ for test_path in test_paths:
466
+ unit_tests_dirs.append(project_dir / test_path)
467
+ # Also check for unit_tests directory as fallback
468
+ if (project_dir / "unit_tests").exists():
469
+ unit_tests_dirs.append(project_dir / "unit_tests")
470
+
471
+ # Use first model path for generated models
472
+ models_dir = project_dir / model_paths[0]
473
+
474
+ # Output directories (generated files go in first model path and preferred test path)
475
+ gen_models_dir = project_dir / model_paths[0] / "marts" / "__cte_tests"
476
+
477
+ # Determine output test directory:
478
+ # - If unit_tests exists, use it (common convention)
479
+ # - Otherwise use first configured test path
480
+ if (project_dir / "unit_tests").exists():
481
+ gen_tests_dir = project_dir / "unit_tests" / "marts" / "__cte_tests"
482
+ else:
483
+ gen_tests_dir = project_dir / test_paths[0] / "marts" / "__cte_tests"
484
+
485
+ # Clean up old generated files
486
+ if gen_models_dir.exists():
487
+ shutil.rmtree(gen_models_dir)
488
+ logger.debug(f"Cleaned up {gen_models_dir}")
489
+ if gen_tests_dir.exists():
490
+ shutil.rmtree(gen_tests_dir)
491
+ logger.debug(f"Cleaned up {gen_tests_dir}")
492
+
493
+ # Discover all unit test YAML files from all test directories
494
+ test_files = []
495
+ for unit_tests_dir in unit_tests_dirs:
496
+ if unit_tests_dir.exists():
497
+ test_files.extend(list(unit_tests_dir.rglob("*_unit_tests.yml")))
498
+ logger.debug(f"Found {len(test_files)} unit test files")
499
+
500
+ cte_tests_found = 0
501
+
502
+ # Process each test file
503
+ for test_file in test_files:
504
+ # Read test YAML
505
+ with open(test_file) as f:
506
+ test_yaml = yaml.safe_load(f)
507
+
508
+ # Find CTE tests (marked with cte_test: true config)
509
+ for test in test_yaml.get("unit_tests", []):
510
+ config = test.get("config", {})
511
+ if config.get("cte_test") is True:
512
+ cte_tests_found += 1
513
+ test_name = test["name"]
514
+ model_spec = test["model"]
515
+ base_model, cte_name = model_spec.split("::")
516
+
517
+ logger.debug(f"Found CTE test: {test_name} (model: {base_model}, CTE: {cte_name})")
518
+
519
+ # Generate short hash from test name for unique filenames
520
+ test_hash = hashlib.md5(test_name.encode()).hexdigest()[:6]
521
+
522
+ # Determine model file path from test file structure
523
+ # Find which test directory this file belongs to
524
+ test_dir = None
525
+ for candidate_dir in unit_tests_dirs:
526
+ try:
527
+ # Check if test_file is relative to this directory
528
+ relative_path = test_file.relative_to(candidate_dir)
529
+ test_dir = candidate_dir
530
+ break
531
+ except ValueError:
532
+ # Not relative to this directory, try next
533
+ continue
534
+
535
+ if not test_dir:
536
+ logger.warning(f"Could not determine test directory for {test_file}")
537
+ continue
538
+
539
+ # Assume tests mirror model structure: <test_dir>/marts/X.yml -> models/marts/X.sql
540
+ relative_path = test_file.relative_to(test_dir)
541
+ model_subdir = relative_path.parent
542
+ model_file = models_dir / model_subdir / f"{base_model}.sql"
543
+
544
+ if not model_file.exists():
545
+ logger.warning(f"Model file not found: {model_file}")
546
+ continue
547
+
548
+ # Generate model name with hash suffix
549
+ gen_model_name = f"{base_model}__{cte_name}__{test_hash}"
550
+ gen_model_path = gen_models_dir / f"{gen_model_name}.sql"
551
+ gen_test_path = gen_tests_dir / f"{gen_model_name}_unit_tests.yml"
552
+
553
+ # Generate files
554
+ if generate_cte_model(model_file, cte_name, test.get("given", []), gen_model_path):
555
+ if generate_cte_test(test_file, test_name, base_model, cte_name, gen_model_name, gen_model_path, gen_test_path):
556
+ logger.debug(f"Generated CTE test: {gen_model_name}")
557
+ else:
558
+ logger.error(f"Failed to generate CTE test YAML for {test_name}")
559
+ else:
560
+ logger.error(f"Failed to generate CTE model for {test_name}")
561
+
562
+ if cte_tests_found > 0:
563
+ logger.info(f"Generated {cte_tests_found} CTE test(s)")
564
+ else:
565
+ logger.debug("No CTE tests found (tests with config: cte_test: true)")
566
+
567
+ return cte_tests_found
568
+
569
+
570
+ def cleanup_cte_tests(project_dir: Path) -> None:
571
+ """Clean up all __cte_tests directories.
572
+
573
+ Recursively searches model paths for __cte_tests directories and removes them.
574
+
575
+ Args:
576
+ project_dir: Path to dbt project root
577
+ """
578
+ logger.debug("Cleaning up CTE test files...")
579
+
580
+ # Load project configuration
581
+ config = _load_project_config(project_dir)
582
+ model_paths = config.get("model-paths", ["models"])
583
+
584
+ # Find and remove all __cte_tests directories
585
+ removed_count = 0
586
+ for model_path in model_paths:
587
+ search_dir = project_dir / model_path
588
+ if not search_dir.exists():
589
+ continue
590
+
591
+ # Find all __cte_tests directories recursively
592
+ for cte_tests_dir in search_dir.rglob("__cte_tests"):
593
+ if cte_tests_dir.is_dir():
594
+ shutil.rmtree(cte_tests_dir)
595
+ logger.debug(f"Removed {cte_tests_dir}")
596
+ removed_count += 1
597
+
598
+ if removed_count > 0:
599
+ logger.info(f"Cleaned up {removed_count} __cte_tests director{'y' if removed_count == 1 else 'ies'}")
600
+ else:
601
+ logger.debug("No __cte_tests directories to clean up")
@@ -0,0 +1 @@
1
+ """dbt utilities for dbt-core-mcp."""