databricks-advanced-mcp 0.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. databricks_advanced_mcp-0.0.2/.env.example +20 -0
  2. databricks_advanced_mcp-0.0.2/.github/workflows/workflow.yml +32 -0
  3. databricks_advanced_mcp-0.0.2/.gitignore +26 -0
  4. databricks_advanced_mcp-0.0.2/LICENSE +21 -0
  5. databricks_advanced_mcp-0.0.2/PKG-INFO +346 -0
  6. databricks_advanced_mcp-0.0.2/README.md +314 -0
  7. databricks_advanced_mcp-0.0.2/infra/INSTALL.md +451 -0
  8. databricks_advanced_mcp-0.0.2/infra/deploy.ps1 +193 -0
  9. databricks_advanced_mcp-0.0.2/infra/main.bicep +62 -0
  10. databricks_advanced_mcp-0.0.2/infra/main.parameters.json +18 -0
  11. databricks_advanced_mcp-0.0.2/pyproject.toml +68 -0
  12. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/__init__.py +7 -0
  13. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/client.py +31 -0
  14. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/config.py +42 -0
  15. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/graph/__init__.py +1 -0
  16. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/graph/builder.py +392 -0
  17. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/graph/cache.py +106 -0
  18. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/graph/models.py +209 -0
  19. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/parsers/__init__.py +1 -0
  20. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/parsers/dlt_parser.py +172 -0
  21. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/parsers/notebook_parser.py +257 -0
  22. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/parsers/sql_parser.py +185 -0
  23. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/reviewers/__init__.py +1 -0
  24. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/reviewers/performance.py +210 -0
  25. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/reviewers/standards.py +183 -0
  26. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/reviewers/suggestions.py +187 -0
  27. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/server.py +31 -0
  28. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/tools/__init__.py +33 -0
  29. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/tools/dependency_scanner.py +375 -0
  30. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/tools/impact_analysis.py +408 -0
  31. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/tools/job_pipeline_ops.py +431 -0
  32. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/tools/notebook_reviewer.py +116 -0
  33. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/tools/sql_executor.py +97 -0
  34. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/tools/table_info.py +245 -0
  35. databricks_advanced_mcp-0.0.2/src/databricks_advanced_mcp/tools/workspace_listing.py +154 -0
  36. databricks_advanced_mcp-0.0.2/tests/conftest.py +170 -0
  37. databricks_advanced_mcp-0.0.2/tests/test_graph.py +105 -0
  38. databricks_advanced_mcp-0.0.2/tests/test_impact_analysis.py +142 -0
  39. databricks_advanced_mcp-0.0.2/tests/test_notebook_parser.py +139 -0
  40. databricks_advanced_mcp-0.0.2/tests/test_reviewers.py +113 -0
  41. databricks_advanced_mcp-0.0.2/tests/test_spec_compliance.py +608 -0
  42. databricks_advanced_mcp-0.0.2/tests/test_sql_parser.py +105 -0
  43. databricks_advanced_mcp-0.0.2/tests/test_tools_integration.py +100 -0
  44. databricks_advanced_mcp-0.0.2/tests/test_workspace_listing.py +314 -0
  45. databricks_advanced_mcp-0.0.2/uv.lock +1749 -0
@@ -0,0 +1,20 @@
1
+ # Databricks Configuration (Required)
2
+ # Azure Databricks: https://adb-xxxx.azuredatabricks.net
3
+ # Databricks on AWS: https://dbc-xxxx.cloud.databricks.com
4
+ DATABRICKS_HOST=https://your-workspace.azuredatabricks.net
5
+
6
+ # Authentication — provide ONE of the following:
7
+ DATABRICKS_TOKEN=dapi_your_personal_access_token
8
+ # OR use Azure CLI / managed identity (no env vars needed if already configured)
9
+ # ARM_CLIENT_ID=your-service-principal-client-id
10
+ # ARM_TENANT_ID=your-azure-tenant-id
11
+ # ARM_CLIENT_SECRET=your-service-principal-secret
12
+
13
+ # SQL Warehouse (Required for SQL execution tools)
14
+ DATABRICKS_WAREHOUSE_ID=your-sql-warehouse-id
15
+
16
+ # Optional — defaults for unqualified table names
17
+ # Azure Databricks: typically "main"
18
+ # Databricks on AWS/GCP: typically "workspace"
19
+ DATABRICKS_CATALOG=main
20
+ DATABRICKS_SCHEMA=default
@@ -0,0 +1,32 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+
7
+ permissions:
8
+ id-token: write
9
+
10
+ jobs:
11
+ pypi-publish:
12
+ name: Upload release to PyPI
13
+ runs-on: ubuntu-latest
14
+ environment:
15
+ name: pypi
16
+ url: https://pypi.org/p/databricks-advanced-mcp
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+
20
+ - name: Set up Python
21
+ uses: actions/setup-python@v5
22
+ with:
23
+ python-version: "3.11"
24
+
25
+ - name: Install build dependencies
26
+ run: pip install build
27
+
28
+ - name: Build package
29
+ run: python -m build
30
+
31
+ - name: Publish to PyPI
32
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,26 @@
1
+ # Environment
2
+ .env
3
+ .env_*
4
+ .venv/
5
+
6
+ # Python
7
+ __pycache__/
8
+ *.pyc
9
+ *.pyo
10
+ *.egg-info/
11
+ dist/
12
+ build/
13
+
14
+ # Testing
15
+ .pytest_cache/
16
+ .coverage
17
+ htmlcov/
18
+
19
+ # IDE
20
+ .idea/
21
+ *.swp
22
+ *.swo
23
+
24
+ # OS
25
+ .DS_Store
26
+ Thumbs.db
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Henry Bravo
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,346 @@
1
+ Metadata-Version: 2.4
2
+ Name: databricks-advanced-mcp
3
+ Version: 0.0.2
4
+ Summary: Advanced MCP server for Databricks workspace intelligence — dependency scanning, impact analysis, notebook review, and job/pipeline operations.
5
+ Project-URL: Homepage, https://github.com/henrybravo/databricks-advanced-mcp-server
6
+ Project-URL: Repository, https://github.com/henrybravo/databricks-advanced-mcp-server
7
+ Project-URL: Issues, https://github.com/henrybravo/databricks-advanced-mcp-server/issues
8
+ Author: Henry Bravo
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Keywords: aws-databricks,azure-databricks,claude,copilot,databricks,databricks-cloud,fastmcp,mcp,mcp-server
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Requires-Python: >=3.11
20
+ Requires-Dist: databricks-sdk>=0.30.0
21
+ Requires-Dist: fastmcp>=2.0.0
22
+ Requires-Dist: networkx>=3.0
23
+ Requires-Dist: pydantic-settings>=2.0.0
24
+ Requires-Dist: pydantic>=2.0.0
25
+ Requires-Dist: sqlglot>=25.0.0
26
+ Provides-Extra: dev
27
+ Requires-Dist: mypy>=1.10; extra == 'dev'
28
+ Requires-Dist: pytest-asyncio>=0.23; extra == 'dev'
29
+ Requires-Dist: pytest>=8.0; extra == 'dev'
30
+ Requires-Dist: ruff>=0.5.0; extra == 'dev'
31
+ Description-Content-Type: text/markdown
32
+
33
+ # Databricks Advanced MCP Server
34
+
35
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11%2B-blue.svg)](https://www.python.org/downloads/)
36
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE)
37
+ [![MCP](https://img.shields.io/badge/MCP-compatible-purple.svg)](https://modelcontextprotocol.io)
38
+
39
+ An advanced [Model Context Protocol (MCP)](https://modelcontextprotocol.io) server that gives AI assistants deep visibility into your Databricks workspace - dependency scanning, impact analysis, notebook review, job/pipeline operations, SQL execution, and table metadata inspection.
40
+
41
+ ## Features
42
+
43
+ | Domain | What it does |
44
+ |---|---|
45
+ | **SQL Execution** | Run SQL queries against Databricks SQL warehouses with configurable result limits |
46
+ | **Table Information** | Inspect table metadata, schemas, column details, row counts, and storage info |
47
+ | **Dependency Scanning** | Scan notebooks, jobs, and DLT pipelines to build a workspace dependency graph (DAG) |
48
+ | **Impact Analysis** | Predict downstream breakage from column drops, schema changes, or pipeline failures |
49
+ | **Notebook Review** | Detect performance anti-patterns, coding standard violations, and suggest optimizations |
50
+ | **Job & Pipeline Ops** | List jobs/pipelines, get run status with error diagnostics, trigger reruns |
51
+
52
+ ## Quick Start
53
+
54
+ ### Prerequisites
55
+
56
+ - **Python 3.11+**
57
+ - **[uv](https://docs.astral.sh/uv/)** — fast Python package manager
58
+ - A **Databricks workspace** with a SQL warehouse
59
+ - A Databricks **personal access token**
60
+
61
+ > **Other auth methods:** The Databricks SDK supports [unified authentication](https://docs.databricks.com/en/dev-tools/auth/unified-auth.html) — if you don't set `DATABRICKS_TOKEN`, it will fall back to Azure CLI, managed identity, or `.databrickscfg`. The `.env` setup below uses a PAT for simplicity.
62
+ >
63
+ > **Don't have a Databricks workspace yet?** See [`infra/INSTALL.md`](infra/INSTALL.md) for a one-command Azure deployment using Bicep.
64
+
65
+ ### 1. Install
66
+
67
+ #### Option A: Install from PyPI (recommended)
68
+
69
+ ```bash
70
+ uv pip install databricks-advanced-mcp
71
+ ```
72
+
73
+ Or with pip:
74
+
75
+ ```bash
76
+ pip install databricks-advanced-mcp
77
+ ```
78
+
79
+ #### Option B: Install from source
80
+
81
+ ```bash
82
+ git clone https://github.com/henrybravo/databricks-advanced-mcp-server.git
83
+ cd databricks-advanced-mcp-server
84
+ ```
85
+
86
+ Create and activate a virtual environment:
87
+
88
+ **Windows (PowerShell)**
89
+ ```powershell
90
+ uv venv .venv
91
+ .\.venv\Scripts\Activate.ps1
92
+ uv pip install -e .
93
+ ```
94
+
95
+ **macOS / Linux**
96
+ ```bash
97
+ uv venv .venv
98
+ source .venv/bin/activate
99
+ uv pip install -e .
100
+ ```
101
+
102
+ ### 2. Configure
103
+
104
+ ```bash
105
+ cp .env.example .env
106
+ ```
107
+
108
+ Edit `.env` with your Databricks credentials:
109
+
110
+ ```dotenv
111
+ # Azure Databricks:
112
+ DATABRICKS_HOST=https://adb-xxxx.azuredatabricks.net
113
+ # Databricks on AWS / GCP:
114
+ # DATABRICKS_HOST=https://dbc-xxxx.cloud.databricks.com
115
+
116
+ DATABRICKS_TOKEN=dapi_your_token
117
+ DATABRICKS_WAREHOUSE_ID=your_warehouse_id
118
+
119
+ # Optional (defaults shown)
120
+ # Azure workspaces typically use "main"; AWS/GCP workspaces use "workspace"
121
+ DATABRICKS_CATALOG=main
122
+ DATABRICKS_SCHEMA=default
123
+ ```
124
+
125
+ ### 3. Add to your IDE
126
+
127
+ Create `.vscode/mcp.json` in your project to register the MCP server with VS Code / GitHub Copilot.
128
+
129
+ #### Option A: PyPI install (recommended)
130
+
131
+ If you installed from PyPI (`pip install databricks-advanced-mcp`), the `databricks-mcp` CLI is available on your PATH:
132
+
133
+ ```jsonc
134
+ {
135
+ "servers": {
136
+ "databricks-mcp": {
137
+ "type": "stdio",
138
+ "command": "databricks-mcp",
139
+ "env": {
140
+ "DATABRICKS_HOST": "https://adb-xxxx.azuredatabricks.net",
141
+ "DATABRICKS_TOKEN": "dapi_your_token",
142
+ "DATABRICKS_WAREHOUSE_ID": "your_warehouse_id"
143
+ }
144
+ }
145
+ }
146
+ }
147
+ ```
148
+
149
+ #### Option B: Virtual environment (source install)
150
+
151
+ If you cloned the repo and installed into a local `.venv`, point directly to the Python interpreter:
152
+
153
+ **Windows**
154
+ ```jsonc
155
+ {
156
+ "servers": {
157
+ "databricks-mcp": {
158
+ "type": "stdio",
159
+ "command": "${workspaceFolder}/.venv/Scripts/python.exe",
160
+ "args": ["-m", "databricks_advanced_mcp.server"],
161
+ "envFile": "${workspaceFolder}/.env"
162
+ }
163
+ }
164
+ }
165
+ ```
166
+
167
+ **macOS / Linux**
168
+ ```jsonc
169
+ {
170
+ "servers": {
171
+ "databricks-mcp": {
172
+ "type": "stdio",
173
+ "command": "${workspaceFolder}/.venv/bin/python",
174
+ "args": ["-m", "databricks_advanced_mcp.server"],
175
+ "envFile": "${workspaceFolder}/.env"
176
+ }
177
+ }
178
+ }
179
+ ```
180
+
181
+ #### Multiple Workspaces
182
+
183
+ Each MCP server instance connects to exactly one Databricks workspace. To work with multiple workspaces simultaneously, register a separate server entry per workspace — each with its own credentials:
184
+
185
+ ```jsonc
186
+ {
187
+ "servers": {
188
+ // AWS / GCP workspace
189
+ "databricks-cloud": {
190
+ "type": "stdio",
191
+ "command": "databricks-mcp",
192
+ "env": {
193
+ "DATABRICKS_HOST": "https://dbc-xxxx.cloud.databricks.com",
194
+ "DATABRICKS_TOKEN": "dapi_cloud_token",
195
+ "DATABRICKS_WAREHOUSE_ID": "cloud_warehouse_id",
196
+ "DATABRICKS_CATALOG": "workspace"
197
+ }
198
+ },
199
+ // Azure workspace
200
+ "databricks-azure": {
201
+ "type": "stdio",
202
+ "command": "databricks-mcp",
203
+ "env": {
204
+ "DATABRICKS_HOST": "https://adb-xxxx.azuredatabricks.net",
205
+ "DATABRICKS_TOKEN": "dapi_azure_token",
206
+ "DATABRICKS_WAREHOUSE_ID": "azure_warehouse_id",
207
+ "DATABRICKS_CATALOG": "main"
208
+ }
209
+ }
210
+ }
211
+ }
212
+ ```
213
+
214
+ Alternatively, with a source install you can use separate `.env` files per workspace:
215
+
216
+ ```jsonc
217
+ {
218
+ "servers": {
219
+ "databricks-cloud": {
220
+ "type": "stdio",
221
+ "command": "${workspaceFolder}/.venv/bin/python",
222
+ "args": ["-m", "databricks_advanced_mcp.server"],
223
+ "envFile": "${workspaceFolder}/.env"
224
+ },
225
+ "databricks-azure": {
226
+ "type": "stdio",
227
+ "command": "${workspaceFolder}/.venv/bin/python",
228
+ "args": ["-m", "databricks_advanced_mcp.server"],
229
+ "envFile": "${workspaceFolder}/.env_azure"
230
+ }
231
+ }
232
+ }
233
+ ```
234
+
235
+ ### 4. Start using
236
+
237
+ Once configured, your AI assistant can call any of the tools below. Try prompts like:
238
+
239
+ - *"List all tables in the `analytics` schema"*
240
+ - *"Review the notebook at `/Users/me/etl_pipeline` for performance issues"*
241
+ - *"What would break if I drop the `customer_id` column from `main.sales.orders`?"*
242
+ - *"Show me the status of job 12345"*
243
+
244
+ ## MCP Tools
245
+
246
+ | Tool | Description |
247
+ |---|---|
248
+ | `execute_query` | Execute SQL against a Databricks SQL warehouse |
249
+ | `get_table_info` | Get table metadata — columns, row count, properties, storage |
250
+ | `list_tables` | List tables in a catalog.schema |
251
+ | `scan_notebook` | Scan a notebook for table/column references |
252
+ | `scan_jobs` | Scan all jobs for table dependencies |
253
+ | `scan_dlt_pipelines` | Scan all DLT pipelines for source/target tables |
254
+ | `build_dependency_graph` | Build the full workspace dependency graph |
255
+ | `get_table_dependencies` | Get upstream/downstream dependencies for a table |
256
+ | `refresh_graph` | Invalidate and rebuild the dependency graph cache |
257
+ | `analyze_impact` | Analyze impact of column drop / schema change / pipeline failure |
258
+ | `review_notebook` | Review a notebook for issues, anti-patterns, and optimizations |
259
+ | `list_jobs` | List jobs with status and schedule info |
260
+ | `get_job_status` | Get detailed job run status with error diagnostics |
261
+ | `list_pipelines` | List DLT pipelines with state and update status |
262
+ | `get_pipeline_status` | Get pipeline update details with event log |
263
+ | `trigger_rerun` | Trigger a job rerun (requires confirmation) |
264
+
265
+ ## Configuration Reference
266
+
267
+ | Variable | Required | Default | Description |
268
+ |---|---|---|---|
269
+ | `DATABRICKS_HOST` | Yes | — | Workspace URL (`https://adb-xxx.azuredatabricks.net` for Azure, `https://dbc-xxx.cloud.databricks.com` for AWS/GCP) |
270
+ | `DATABRICKS_TOKEN` | Yes | — | Personal access token or service principal token |
271
+ | `DATABRICKS_WAREHOUSE_ID` | Yes | — | SQL warehouse ID for query execution |
272
+ | `DATABRICKS_CATALOG` | No | `main` | Default catalog for unqualified table names — use `workspace` for AWS/GCP |
273
+ | `DATABRICKS_SCHEMA` | No | `default` | Default schema for unqualified table names |
274
+
275
+ ### Cloud Provider Notes
276
+
277
+ This server is tested against **Azure Databricks** and **Databricks on AWS** (`.cloud.databricks.com`). Key differences:
278
+
279
+ | Aspect | Azure | AWS / GCP |
280
+ |---|---|---|
281
+ | Host format | `https://adb-xxx.azuredatabricks.net` | `https://dbc-xxx.cloud.databricks.com` |
282
+ | Default catalog | `main` | `workspace` |
283
+ | Workspace root objects | `DIRECTORY` | `DIRECTORY` and `REPO` |
284
+
285
+ All tools work on both platforms. Set `DATABRICKS_CATALOG` to match your workspace's default catalog.
286
+
287
+ ## Infrastructure (Optional)
288
+
289
+ If you need to provision a new Azure Databricks workspace, the `infra/` directory contains:
290
+
291
+ - **`main.bicep`** — Azure Bicep template (Premium SKU, Unity Catalog enabled)
292
+ - **`deploy.ps1`** — One-command PowerShell deployment script
293
+ - **`INSTALL.md`** — Detailed step-by-step deployment guide
294
+
295
+ ```bash
296
+ cd infra
297
+ ./deploy.ps1 -ResourceGroupName rg-databricks-mcp -Location eastus2
298
+ ```
299
+
300
+ ## Development
301
+
302
+ ```bash
303
+ # Install with dev dependencies
304
+ uv pip install -e ".[dev]"
305
+
306
+ # Run tests
307
+ uv run pytest
308
+
309
+ # Lint
310
+ uv run ruff check src/ tests/
311
+
312
+ # Type check
313
+ uv run mypy src/
314
+ ```
315
+
316
+ ## Architecture
317
+
318
+ ```
319
+ src/databricks_advanced_mcp/
320
+ ├── server.py # FastMCP server + CLI entry point
321
+ ├── config.py # Pydantic settings from env vars
322
+ ├── client.py # Databricks SDK client factory
323
+ ├── tools/ # MCP tool implementations
324
+ │ ├── sql_executor.py
325
+ │ ├── table_info.py
326
+ │ ├── dependency_scanner.py
327
+ │ ├── impact_analysis.py
328
+ │ ├── notebook_reviewer.py
329
+ │ └── job_pipeline_ops.py
330
+ ├── parsers/ # Code parsing engines
331
+ │ ├── sql_parser.py # sqlglot-based SQL extraction
332
+ │ ├── notebook_parser.py # Databricks notebook cell parsing
333
+ │ └── dlt_parser.py # DLT pipeline definition parsing
334
+ ├── graph/ # Dependency graph
335
+ │ ├── models.py # Node, Edge, DependencyGraph data models
336
+ │ ├── builder.py # Graph builder (orchestrates scans)
337
+ │ └── cache.py # In-memory graph cache with TTL
338
+ └── reviewers/ # Notebook review rule engines
339
+ ├── performance.py # Performance anti-patterns
340
+ ├── standards.py # Coding standards checks
341
+ └── suggestions.py # Optimization suggestions
342
+ ```
343
+
344
+ ## License
345
+
346
+ [MIT](LICENSE)