iflow-mcp_niclasolofsson-dbt-core-mcp 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbt_core_mcp/__init__.py +18 -0
- dbt_core_mcp/__main__.py +436 -0
- dbt_core_mcp/context.py +459 -0
- dbt_core_mcp/cte_generator.py +601 -0
- dbt_core_mcp/dbt/__init__.py +1 -0
- dbt_core_mcp/dbt/bridge_runner.py +1361 -0
- dbt_core_mcp/dbt/manifest.py +781 -0
- dbt_core_mcp/dbt/runner.py +67 -0
- dbt_core_mcp/dependencies.py +50 -0
- dbt_core_mcp/server.py +381 -0
- dbt_core_mcp/tools/__init__.py +77 -0
- dbt_core_mcp/tools/analyze_impact.py +78 -0
- dbt_core_mcp/tools/build_models.py +190 -0
- dbt_core_mcp/tools/demo/__init__.py +1 -0
- dbt_core_mcp/tools/demo/hello.html +267 -0
- dbt_core_mcp/tools/demo/ui_demo.py +41 -0
- dbt_core_mcp/tools/get_column_lineage.py +1988 -0
- dbt_core_mcp/tools/get_lineage.py +89 -0
- dbt_core_mcp/tools/get_project_info.py +96 -0
- dbt_core_mcp/tools/get_resource_info.py +134 -0
- dbt_core_mcp/tools/install_deps.py +102 -0
- dbt_core_mcp/tools/list_resources.py +84 -0
- dbt_core_mcp/tools/load_seeds.py +179 -0
- dbt_core_mcp/tools/query_database.py +459 -0
- dbt_core_mcp/tools/run_models.py +234 -0
- dbt_core_mcp/tools/snapshot_models.py +120 -0
- dbt_core_mcp/tools/test_models.py +238 -0
- dbt_core_mcp/utils/__init__.py +1 -0
- dbt_core_mcp/utils/env_detector.py +186 -0
- dbt_core_mcp/utils/process_check.py +130 -0
- dbt_core_mcp/utils/tool_utils.py +411 -0
- dbt_core_mcp/utils/warehouse_adapter.py +82 -0
- dbt_core_mcp/utils/warehouse_databricks.py +297 -0
- iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/METADATA +784 -0
- iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/RECORD +38 -0
- iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/WHEEL +4 -0
- iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/entry_points.txt +2 -0
- iflow_mcp_niclasolofsson_dbt_core_mcp-1.7.0.dist-info/licenses/LICENSE +21 -0
dbt_core_mcp/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""
|
|
2
|
+
dbt Core MCP Server.
|
|
3
|
+
|
|
4
|
+
This package provides an MCP server for interacting with dbt projects
|
|
5
|
+
via the Model Context Protocol.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from importlib.metadata import PackageNotFoundError
|
|
9
|
+
from importlib.metadata import version as pkg_version
|
|
10
|
+
|
|
11
|
+
from .server import create_server
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
__version__ = pkg_version("dbt-core-mcp")
|
|
15
|
+
except PackageNotFoundError: # pragma: no cover - when not installed
|
|
16
|
+
__version__ = "0.0.0"
|
|
17
|
+
|
|
18
|
+
__all__ = ["create_server", "__version__"]
|
dbt_core_mcp/__main__.py
ADDED
|
@@ -0,0 +1,436 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Main entry point for the dbt Core MCP Server.
|
|
4
|
+
|
|
5
|
+
This script provides the command-line interface to run the MCP server
|
|
6
|
+
for interacting with dbt projects.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import argparse
|
|
10
|
+
import asyncio
|
|
11
|
+
import contextlib
|
|
12
|
+
import logging
|
|
13
|
+
import os
|
|
14
|
+
import signal
|
|
15
|
+
import sys
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
from .server import create_server
|
|
20
|
+
|
|
21
|
+
# Platform-specific imports for process management
|
|
22
|
+
if sys.platform == "win32":
|
|
23
|
+
import ctypes
|
|
24
|
+
|
|
25
|
+
# Unix-specific imports
|
|
26
|
+
if sys.platform != "win32":
|
|
27
|
+
import ctypes.util
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def setup_logging(debug: bool = False) -> None:
|
|
31
|
+
"""Set up logging configuration."""
|
|
32
|
+
import tempfile
|
|
33
|
+
|
|
34
|
+
level = logging.DEBUG if debug else logging.INFO
|
|
35
|
+
|
|
36
|
+
# Simpler format for stderr (VS Code adds timestamps)
|
|
37
|
+
stderr_formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
|
|
38
|
+
# Full format for file logging
|
|
39
|
+
file_formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
|
40
|
+
|
|
41
|
+
stderr_handler = logging.StreamHandler(sys.stderr)
|
|
42
|
+
stderr_handler.setFormatter(stderr_formatter)
|
|
43
|
+
|
|
44
|
+
root_logger = logging.getLogger()
|
|
45
|
+
root_logger.setLevel(level)
|
|
46
|
+
root_logger.addHandler(stderr_handler)
|
|
47
|
+
|
|
48
|
+
# Suppress FastMCP's internal INFO logs unless debug is enabled
|
|
49
|
+
fastmcp_level = logging.DEBUG if debug else logging.WARNING
|
|
50
|
+
logging.getLogger("fastmcp").setLevel(fastmcp_level)
|
|
51
|
+
logging.getLogger("fakeredis").setLevel(logging.WARNING)
|
|
52
|
+
logging.getLogger("docket").setLevel(logging.WARNING)
|
|
53
|
+
|
|
54
|
+
# Add file logging
|
|
55
|
+
try:
|
|
56
|
+
temp_log_dir = os.path.join(tempfile.gettempdir(), "dbt_core_mcp_logs")
|
|
57
|
+
os.makedirs(temp_log_dir, exist_ok=True)
|
|
58
|
+
log_path = os.path.join(temp_log_dir, "dbt_core_mcp.log")
|
|
59
|
+
|
|
60
|
+
file_handler = logging.FileHandler(log_path, encoding="utf-8")
|
|
61
|
+
file_handler.setFormatter(file_formatter)
|
|
62
|
+
root_logger.addHandler(file_handler)
|
|
63
|
+
|
|
64
|
+
print(f"[dbt Core MCP] Log file: {log_path}", file=sys.stderr)
|
|
65
|
+
except Exception:
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def parse_arguments() -> argparse.Namespace:
|
|
70
|
+
"""Parse command-line arguments."""
|
|
71
|
+
from . import __version__
|
|
72
|
+
|
|
73
|
+
parser = argparse.ArgumentParser(
|
|
74
|
+
description="dbt Core MCP Server - Interact with dbt projects via MCP",
|
|
75
|
+
prog="dbt-core-mcp",
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
parser.add_argument("--debug", action="store_true", help="Enable debug logging")
|
|
79
|
+
|
|
80
|
+
parser.add_argument(
|
|
81
|
+
"--project-dir",
|
|
82
|
+
type=str,
|
|
83
|
+
help="Optional: Path to dbt project directory (auto-detects from workspace if not provided)",
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
parser.add_argument(
|
|
87
|
+
"--dbt-command-timeout",
|
|
88
|
+
type=float,
|
|
89
|
+
default=None,
|
|
90
|
+
help="Timeout in seconds for dbt commands (default: None for no timeout; 0 or negative values also mean no timeout)",
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
parser.add_argument(
|
|
94
|
+
"--reload",
|
|
95
|
+
action="store_true",
|
|
96
|
+
help="Enable auto-reload on file changes (development mode)",
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
parser.add_argument(
|
|
100
|
+
"--reload-dir",
|
|
101
|
+
type=str,
|
|
102
|
+
action="append",
|
|
103
|
+
help="Directories to watch for changes (default: src/dbt_core_mcp)",
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
parser.add_argument(
|
|
107
|
+
"--stateless",
|
|
108
|
+
action="store_true",
|
|
109
|
+
help="Enable stateless mode (automatically enabled with --reload)",
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
|
|
113
|
+
|
|
114
|
+
return parser.parse_args()
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _source_file_filter(change: object, path: str) -> bool:
|
|
118
|
+
"""Filter for source files (Python and HTML)."""
|
|
119
|
+
# Test reload - iteration 6 (checking loop entry)
|
|
120
|
+
return path.endswith(".py") or path.endswith(".html")
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def _setup_windows_job_object() -> Any:
|
|
124
|
+
"""Create a Windows job object that kills all processes when closed.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Job handle (or None on error)
|
|
128
|
+
"""
|
|
129
|
+
if sys.platform != "win32":
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
# Windows API constants
|
|
134
|
+
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x2000
|
|
135
|
+
JOB_OBJECT_EXTENDED_LIMIT_INFORMATION = 9
|
|
136
|
+
|
|
137
|
+
# Create job object
|
|
138
|
+
job = ctypes.windll.kernel32.CreateJobObjectW(None, None)
|
|
139
|
+
if not job:
|
|
140
|
+
logging.warning("Failed to create Windows job object")
|
|
141
|
+
return None
|
|
142
|
+
|
|
143
|
+
# Use a simple byte buffer approach for the job info structure
|
|
144
|
+
# JOBOBJECT_EXTENDED_LIMIT_INFORMATION is 144 bytes on 64-bit Windows
|
|
145
|
+
job_info = ctypes.create_string_buffer(144)
|
|
146
|
+
|
|
147
|
+
# LimitFlags is at offset 16 in JOBOBJECT_BASIC_LIMIT_INFORMATION
|
|
148
|
+
# which is the first member of JOBOBJECT_EXTENDED_LIMIT_INFORMATION
|
|
149
|
+
limit_flags_offset = 16
|
|
150
|
+
ctypes.c_ulong.from_buffer(job_info, limit_flags_offset).value = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
|
|
151
|
+
|
|
152
|
+
result = ctypes.windll.kernel32.SetInformationJobObject(
|
|
153
|
+
job,
|
|
154
|
+
JOB_OBJECT_EXTENDED_LIMIT_INFORMATION,
|
|
155
|
+
ctypes.byref(job_info),
|
|
156
|
+
len(job_info),
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
if not result:
|
|
160
|
+
error_code = ctypes.windll.kernel32.GetLastError()
|
|
161
|
+
logging.warning(f"Failed to set job object information (error code: {error_code})")
|
|
162
|
+
ctypes.windll.kernel32.CloseHandle(job)
|
|
163
|
+
return None
|
|
164
|
+
|
|
165
|
+
logging.debug("Created Windows job object for automatic child process cleanup")
|
|
166
|
+
return job
|
|
167
|
+
except Exception as e:
|
|
168
|
+
logging.warning(f"Error setting up Windows job object: {e}")
|
|
169
|
+
return None
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def _add_process_to_job(job: Any, process_handle: int) -> bool:
|
|
173
|
+
"""Add a process to a Windows job object.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
job: Job object handle
|
|
177
|
+
process_handle: Process handle
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
True if successful
|
|
181
|
+
"""
|
|
182
|
+
if not job or sys.platform != "win32":
|
|
183
|
+
return False
|
|
184
|
+
|
|
185
|
+
try:
|
|
186
|
+
result = ctypes.windll.kernel32.AssignProcessToJobObject(job, process_handle)
|
|
187
|
+
if result:
|
|
188
|
+
logging.debug("Added process to job object")
|
|
189
|
+
return True
|
|
190
|
+
else:
|
|
191
|
+
logging.warning("Failed to add process to job object")
|
|
192
|
+
return False
|
|
193
|
+
except Exception as e:
|
|
194
|
+
logging.warning(f"Error adding process to job: {e}")
|
|
195
|
+
return False
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def _set_pdeathsig_preexec() -> None:
|
|
199
|
+
"""Preexec function for Unix to set parent death signal.
|
|
200
|
+
|
|
201
|
+
This function is called in the child process before exec.
|
|
202
|
+
It sets the process to receive SIGKILL when the parent dies.
|
|
203
|
+
"""
|
|
204
|
+
if sys.platform == "win32":
|
|
205
|
+
return
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
# Load libc
|
|
209
|
+
libc_name = ctypes.util.find_library("c")
|
|
210
|
+
if not libc_name:
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
libc = ctypes.CDLL(libc_name)
|
|
214
|
+
|
|
215
|
+
# PR_SET_PDEATHSIG = 1, SIGKILL = 9
|
|
216
|
+
PR_SET_PDEATHSIG = 1
|
|
217
|
+
SIGKILL = 9
|
|
218
|
+
|
|
219
|
+
# Set parent death signal
|
|
220
|
+
libc.prctl(PR_SET_PDEATHSIG, SIGKILL)
|
|
221
|
+
except Exception:
|
|
222
|
+
# Silently fail - this is a best-effort cleanup mechanism
|
|
223
|
+
pass
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
async def _terminate_process(process: asyncio.subprocess.Process) -> None:
|
|
227
|
+
"""Terminate a subprocess gracefully."""
|
|
228
|
+
try:
|
|
229
|
+
process.terminate()
|
|
230
|
+
await asyncio.wait_for(process.wait(), timeout=5.0)
|
|
231
|
+
except asyncio.TimeoutError:
|
|
232
|
+
process.kill()
|
|
233
|
+
await process.wait()
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
async def run_with_reload(
|
|
237
|
+
cmd: list[str],
|
|
238
|
+
reload_dirs: list[Path] | None = None,
|
|
239
|
+
) -> None:
|
|
240
|
+
"""Run a command with file watching and auto-reload.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
cmd: Command to run as subprocess
|
|
244
|
+
reload_dirs: Directories to watch for changes (default: src/dbt_core_mcp)
|
|
245
|
+
"""
|
|
246
|
+
from watchfiles import awatch
|
|
247
|
+
|
|
248
|
+
# Default to watching src/dbt_core_mcp directory
|
|
249
|
+
if reload_dirs is None:
|
|
250
|
+
src_dir = Path(__file__).parent
|
|
251
|
+
watch_paths = [src_dir]
|
|
252
|
+
else:
|
|
253
|
+
watch_paths = reload_dirs
|
|
254
|
+
|
|
255
|
+
process: asyncio.subprocess.Process | None = None
|
|
256
|
+
first_run = True
|
|
257
|
+
|
|
258
|
+
# Create Windows job object for automatic child process cleanup
|
|
259
|
+
job = _setup_windows_job_object() if sys.platform == "win32" else None
|
|
260
|
+
|
|
261
|
+
logging.info("Reload mode enabled - watching for file changes...")
|
|
262
|
+
for watch_path in watch_paths:
|
|
263
|
+
logging.info(f" Watching: {watch_path}")
|
|
264
|
+
|
|
265
|
+
# Handle SIGTERM/SIGINT gracefully with proper asyncio integration
|
|
266
|
+
shutdown_event = asyncio.Event()
|
|
267
|
+
loop = asyncio.get_running_loop()
|
|
268
|
+
|
|
269
|
+
def signal_handler() -> None:
|
|
270
|
+
logging.info("Received shutdown signal, stopping...")
|
|
271
|
+
shutdown_event.set()
|
|
272
|
+
|
|
273
|
+
# Windows doesn't support add_signal_handler
|
|
274
|
+
if sys.platform != "win32":
|
|
275
|
+
loop.add_signal_handler(signal.SIGTERM, signal_handler)
|
|
276
|
+
loop.add_signal_handler(signal.SIGINT, signal_handler)
|
|
277
|
+
|
|
278
|
+
try:
|
|
279
|
+
while not shutdown_event.is_set():
|
|
280
|
+
# Start the subprocess with platform-specific cleanup mechanisms
|
|
281
|
+
if sys.platform == "win32":
|
|
282
|
+
# On Windows, start process and add to job object
|
|
283
|
+
process = await asyncio.create_subprocess_exec(
|
|
284
|
+
*cmd,
|
|
285
|
+
stdin=None,
|
|
286
|
+
stdout=None,
|
|
287
|
+
stderr=None,
|
|
288
|
+
)
|
|
289
|
+
# Add to job object for automatic cleanup
|
|
290
|
+
if job and process.pid:
|
|
291
|
+
# Get process handle (Windows-specific)
|
|
292
|
+
PROCESS_ALL_ACCESS = 0x1F0FFF
|
|
293
|
+
process_handle = ctypes.windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, False, process.pid)
|
|
294
|
+
if process_handle:
|
|
295
|
+
_add_process_to_job(job, process_handle)
|
|
296
|
+
ctypes.windll.kernel32.CloseHandle(process_handle)
|
|
297
|
+
else:
|
|
298
|
+
logging.warning(f"Failed to open process handle for PID {process.pid}")
|
|
299
|
+
else:
|
|
300
|
+
# On Unix, use prctl to set parent death signal
|
|
301
|
+
process = await asyncio.create_subprocess_exec(
|
|
302
|
+
*cmd,
|
|
303
|
+
stdin=None,
|
|
304
|
+
stdout=None,
|
|
305
|
+
stderr=None,
|
|
306
|
+
preexec_fn=_set_pdeathsig_preexec,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
if first_run:
|
|
310
|
+
logging.info("Server started - watching for changes...")
|
|
311
|
+
first_run = False
|
|
312
|
+
|
|
313
|
+
# Watch for either: file changes OR process death
|
|
314
|
+
async def watch_for_changes() -> set[Any]:
|
|
315
|
+
return await anext(aiter(awatch(*watch_paths, watch_filter=_source_file_filter)))
|
|
316
|
+
|
|
317
|
+
watch_task = asyncio.create_task(watch_for_changes())
|
|
318
|
+
wait_task = asyncio.create_task(process.wait())
|
|
319
|
+
shutdown_task = asyncio.create_task(shutdown_event.wait())
|
|
320
|
+
|
|
321
|
+
done, pending = await asyncio.wait(
|
|
322
|
+
[watch_task, wait_task, shutdown_task],
|
|
323
|
+
return_when=asyncio.FIRST_COMPLETED,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
for task in pending:
|
|
327
|
+
task.cancel()
|
|
328
|
+
with contextlib.suppress(asyncio.CancelledError):
|
|
329
|
+
await task
|
|
330
|
+
|
|
331
|
+
if shutdown_task in done:
|
|
332
|
+
# User requested shutdown
|
|
333
|
+
break
|
|
334
|
+
|
|
335
|
+
if wait_task in done:
|
|
336
|
+
# Server died on its own - wait for file change before restart
|
|
337
|
+
code = wait_task.result()
|
|
338
|
+
if code != 0:
|
|
339
|
+
logging.error(f"Server exited with code {code}, waiting for file change...")
|
|
340
|
+
else:
|
|
341
|
+
logging.info("Server exited, waiting for file change...")
|
|
342
|
+
|
|
343
|
+
# Wait for file change or shutdown (avoid hot loop on crash)
|
|
344
|
+
async def watch_for_changes() -> set[Any]:
|
|
345
|
+
return await anext(aiter(awatch(*watch_paths, watch_filter=_source_file_filter)))
|
|
346
|
+
|
|
347
|
+
watch_task = asyncio.create_task(watch_for_changes())
|
|
348
|
+
shutdown_task = asyncio.create_task(shutdown_event.wait())
|
|
349
|
+
done, pending = await asyncio.wait(
|
|
350
|
+
[watch_task, shutdown_task],
|
|
351
|
+
return_when=asyncio.FIRST_COMPLETED,
|
|
352
|
+
)
|
|
353
|
+
for task in pending:
|
|
354
|
+
task.cancel()
|
|
355
|
+
with contextlib.suppress(asyncio.CancelledError):
|
|
356
|
+
await task
|
|
357
|
+
if shutdown_task in done:
|
|
358
|
+
break
|
|
359
|
+
logging.info("Detected changes, restarting...")
|
|
360
|
+
else:
|
|
361
|
+
# File changed - restart server
|
|
362
|
+
changes = watch_task.result()
|
|
363
|
+
logging.info(f"Detected changes in {len(changes)} file(s), restarting...")
|
|
364
|
+
await _terminate_process(process)
|
|
365
|
+
|
|
366
|
+
except KeyboardInterrupt:
|
|
367
|
+
# Handle Ctrl+C on Windows (where add_signal_handler isn't available)
|
|
368
|
+
logging.info("Received shutdown signal, stopping...")
|
|
369
|
+
|
|
370
|
+
finally:
|
|
371
|
+
# Clean up signal handlers
|
|
372
|
+
if sys.platform != "win32":
|
|
373
|
+
loop.remove_signal_handler(signal.SIGTERM)
|
|
374
|
+
loop.remove_signal_handler(signal.SIGINT)
|
|
375
|
+
if process and process.returncode is None:
|
|
376
|
+
await _terminate_process(process)
|
|
377
|
+
# Clean up Windows job object
|
|
378
|
+
if job and sys.platform == "win32":
|
|
379
|
+
try:
|
|
380
|
+
ctypes.windll.kernel32.CloseHandle(job)
|
|
381
|
+
logging.debug("Closed Windows job object")
|
|
382
|
+
except Exception:
|
|
383
|
+
pass
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
def main() -> None:
|
|
387
|
+
"""Main entry point."""
|
|
388
|
+
args = parse_arguments()
|
|
389
|
+
setup_logging(args.debug)
|
|
390
|
+
|
|
391
|
+
from . import __version__
|
|
392
|
+
|
|
393
|
+
logging.info(f"Running version {__version__}")
|
|
394
|
+
|
|
395
|
+
# Handle reload mode
|
|
396
|
+
if args.reload:
|
|
397
|
+
# Build command to run without reload flag (prevent infinite spawning)
|
|
398
|
+
# Always include --stateless for seamless restarts
|
|
399
|
+
cmd = [sys.executable, "-m", "dbt_core_mcp", "--stateless"]
|
|
400
|
+
if args.debug:
|
|
401
|
+
cmd.append("--debug")
|
|
402
|
+
if args.project_dir:
|
|
403
|
+
cmd.extend(["--project-dir", args.project_dir])
|
|
404
|
+
if args.dbt_command_timeout is not None:
|
|
405
|
+
cmd.extend(["--dbt-command-timeout", str(args.dbt_command_timeout)])
|
|
406
|
+
|
|
407
|
+
# Parse reload directories
|
|
408
|
+
reload_dirs = None
|
|
409
|
+
if args.reload_dir:
|
|
410
|
+
reload_dirs = [Path(d).resolve() for d in args.reload_dir]
|
|
411
|
+
|
|
412
|
+
# Run with reload
|
|
413
|
+
try:
|
|
414
|
+
asyncio.run(run_with_reload(cmd, reload_dirs))
|
|
415
|
+
except KeyboardInterrupt:
|
|
416
|
+
logging.info("Server stopped by user")
|
|
417
|
+
return
|
|
418
|
+
|
|
419
|
+
# Normal run mode (no reload)
|
|
420
|
+
# Pass project_dir if specified, otherwise let server auto-detect from workspace roots
|
|
421
|
+
# Treat timeout <= 0 as None (no timeout)
|
|
422
|
+
timeout = args.dbt_command_timeout if args.dbt_command_timeout and args.dbt_command_timeout > 0 else None
|
|
423
|
+
server = create_server(project_dir=args.project_dir, timeout=timeout)
|
|
424
|
+
|
|
425
|
+
try:
|
|
426
|
+
# Enable stateless mode if requested (automatically enabled by --reload)
|
|
427
|
+
server.run(stateless=args.stateless)
|
|
428
|
+
except KeyboardInterrupt:
|
|
429
|
+
logging.info("Server stopped by user")
|
|
430
|
+
except Exception as e:
|
|
431
|
+
logging.error(f"Server error: {e}")
|
|
432
|
+
sys.exit(1)
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
if __name__ == "__main__":
|
|
436
|
+
main()
|