mcp-automl 0.1.2__tar.gz → 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_automl-0.1.4/.dockerignore +66 -0
- mcp_automl-0.1.4/.github/workflows/docker-publish.yml +62 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/.github/workflows/publish.yml +8 -0
- mcp_automl-0.1.4/Dockerfile +37 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/PKG-INFO +35 -8
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/README.md +34 -7
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/pyproject.toml +1 -1
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/.github/workflows/test.yml +0 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/.gitignore +0 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/.python-version +0 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/LICENSE +0 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/skill/data-science-workflow/SKILL.md +0 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/src/mcp_automl/__init__.py +0 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/src/mcp_automl/__main__.py +0 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/src/mcp_automl/server.py +0 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/tests/test_server.py +0 -0
- {mcp_automl-0.1.2 → mcp_automl-0.1.4}/uv.lock +0 -0
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# Git
|
|
2
|
+
.git
|
|
3
|
+
.gitignore
|
|
4
|
+
.gitattributes
|
|
5
|
+
|
|
6
|
+
# Python
|
|
7
|
+
__pycache__
|
|
8
|
+
*.py[cod]
|
|
9
|
+
*$py.class
|
|
10
|
+
*.so
|
|
11
|
+
.Python
|
|
12
|
+
build/
|
|
13
|
+
develop-eggs/
|
|
14
|
+
dist/
|
|
15
|
+
downloads/
|
|
16
|
+
eggs/
|
|
17
|
+
.eggs/
|
|
18
|
+
lib/
|
|
19
|
+
lib64/
|
|
20
|
+
parts/
|
|
21
|
+
sdist/
|
|
22
|
+
var/
|
|
23
|
+
wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
|
|
28
|
+
# Virtual environments
|
|
29
|
+
.venv
|
|
30
|
+
venv/
|
|
31
|
+
ENV/
|
|
32
|
+
env/
|
|
33
|
+
|
|
34
|
+
# Testing
|
|
35
|
+
.pytest_cache/
|
|
36
|
+
.coverage
|
|
37
|
+
htmlcov/
|
|
38
|
+
.tox/
|
|
39
|
+
.hypothesis/
|
|
40
|
+
|
|
41
|
+
# IDE
|
|
42
|
+
.vscode/
|
|
43
|
+
.idea/
|
|
44
|
+
*.swp
|
|
45
|
+
*.swo
|
|
46
|
+
*~
|
|
47
|
+
|
|
48
|
+
# Project specific
|
|
49
|
+
logs.log
|
|
50
|
+
*.log
|
|
51
|
+
.python-version
|
|
52
|
+
uv.lock
|
|
53
|
+
|
|
54
|
+
# CI/CD
|
|
55
|
+
.github/
|
|
56
|
+
.dockerignore
|
|
57
|
+
|
|
58
|
+
# Documentation
|
|
59
|
+
*.md
|
|
60
|
+
!README.md
|
|
61
|
+
|
|
62
|
+
# MacOS
|
|
63
|
+
.DS_Store
|
|
64
|
+
|
|
65
|
+
# User data
|
|
66
|
+
experiments/
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
name: Build and Publish Docker Image
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
workflow_dispatch:
|
|
5
|
+
push:
|
|
6
|
+
tags:
|
|
7
|
+
- 'v*' # Trigger on version tags like v0.1.0, v1.2.3, etc.
|
|
8
|
+
release:
|
|
9
|
+
types: [published]
|
|
10
|
+
|
|
11
|
+
jobs:
|
|
12
|
+
docker:
|
|
13
|
+
name: Build and Push Docker Image
|
|
14
|
+
runs-on: ubuntu-latest
|
|
15
|
+
permissions:
|
|
16
|
+
contents: read
|
|
17
|
+
packages: write
|
|
18
|
+
|
|
19
|
+
steps:
|
|
20
|
+
- name: Checkout code
|
|
21
|
+
uses: actions/checkout@v4
|
|
22
|
+
|
|
23
|
+
- name: Set up Docker Buildx
|
|
24
|
+
uses: docker/setup-buildx-action@v3
|
|
25
|
+
|
|
26
|
+
- name: Log in to Docker Hub
|
|
27
|
+
uses: docker/login-action@v3
|
|
28
|
+
with:
|
|
29
|
+
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
30
|
+
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
31
|
+
|
|
32
|
+
- name: Extract metadata (tags, labels)
|
|
33
|
+
id: meta
|
|
34
|
+
uses: docker/metadata-action@v5
|
|
35
|
+
with:
|
|
36
|
+
images: idea7766/mcp-automl
|
|
37
|
+
tags: |
|
|
38
|
+
type=semver,pattern={{version}}
|
|
39
|
+
type=semver,pattern={{major}}.{{minor}}
|
|
40
|
+
type=semver,pattern={{major}}
|
|
41
|
+
type=raw,value=latest,enable={{is_default_branch}}
|
|
42
|
+
|
|
43
|
+
- name: Build and push Docker image
|
|
44
|
+
uses: docker/build-push-action@v5
|
|
45
|
+
with:
|
|
46
|
+
context: .
|
|
47
|
+
file: ./Dockerfile
|
|
48
|
+
platforms: linux/amd64,linux/arm64
|
|
49
|
+
push: true
|
|
50
|
+
tags: ${{ steps.meta.outputs.tags }}
|
|
51
|
+
labels: ${{ steps.meta.outputs.labels }}
|
|
52
|
+
cache-from: type=registry,ref=idea7766/mcp-automl:buildcache
|
|
53
|
+
cache-to: type=registry,ref=idea7766/mcp-automl:buildcache,mode=max
|
|
54
|
+
|
|
55
|
+
- name: Update Docker Hub description
|
|
56
|
+
uses: peter-evans/dockerhub-description@v4
|
|
57
|
+
with:
|
|
58
|
+
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
59
|
+
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
60
|
+
repository: idea7766/mcp-automl
|
|
61
|
+
short-description: "MCP server for end-to-end machine learning with AutoML"
|
|
62
|
+
readme-filepath: ./README.md
|
|
@@ -88,3 +88,11 @@ jobs:
|
|
|
88
88
|
|
|
89
89
|
- name: Publish distributions to PyPI
|
|
90
90
|
run: uv publish
|
|
91
|
+
|
|
92
|
+
- name: Trigger Docker Build
|
|
93
|
+
env:
|
|
94
|
+
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
95
|
+
NEW_VERSION: ${{ inputs.version }}
|
|
96
|
+
run: |
|
|
97
|
+
gh workflow run docker-publish.yml \
|
|
98
|
+
--ref "v$NEW_VERSION"
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# Multi-stage build for smaller final image
|
|
2
|
+
FROM python:3.11-slim as builder
|
|
3
|
+
|
|
4
|
+
# Install uv
|
|
5
|
+
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
|
|
6
|
+
|
|
7
|
+
# Set working directory
|
|
8
|
+
WORKDIR /app
|
|
9
|
+
|
|
10
|
+
# Copy project files
|
|
11
|
+
COPY pyproject.toml README.md ./
|
|
12
|
+
COPY src/mcp_automl ./src/mcp_automl
|
|
13
|
+
|
|
14
|
+
# Install dependencies and build
|
|
15
|
+
RUN uv pip install --system --no-cache .
|
|
16
|
+
|
|
17
|
+
# Final stage
|
|
18
|
+
FROM python:3.11-slim
|
|
19
|
+
|
|
20
|
+
# Install system dependencies for LightGBM (includes OpenMP)
|
|
21
|
+
RUN apt-get update && \
|
|
22
|
+
apt-get install -y --no-install-recommends \
|
|
23
|
+
libgomp1 \
|
|
24
|
+
&& rm -rf /var/lib/apt/lists/*
|
|
25
|
+
|
|
26
|
+
# Copy installed packages from builder
|
|
27
|
+
COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
|
|
28
|
+
COPY --from=builder /usr/local/bin /usr/local/bin
|
|
29
|
+
|
|
30
|
+
# Create directory for experiments
|
|
31
|
+
RUN mkdir -p /root/.mcp-automl/experiments
|
|
32
|
+
|
|
33
|
+
# Set working directory
|
|
34
|
+
WORKDIR /workspace
|
|
35
|
+
|
|
36
|
+
# Run the MCP server
|
|
37
|
+
ENTRYPOINT ["mcp-automl"]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mcp-automl
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: MCP server for end-to-end machine learning
|
|
5
5
|
Author-email: ke <idea7766@gmail.com>
|
|
6
6
|
License-File: LICENSE
|
|
@@ -39,12 +39,26 @@ Add to your MCP client configuration (e.g., Claude Desktop, Gemini CLI, Cursor,
|
|
|
39
39
|
"mcpServers": {
|
|
40
40
|
"mcp-automl": {
|
|
41
41
|
"command": "uvx",
|
|
42
|
-
"args": ["--
|
|
42
|
+
"args": ["--python", "3.11", "mcp-automl"]
|
|
43
43
|
}
|
|
44
44
|
}
|
|
45
45
|
}
|
|
46
46
|
```
|
|
47
47
|
|
|
48
|
+
**Or using Docker:**
|
|
49
|
+
|
|
50
|
+
```json
|
|
51
|
+
{
|
|
52
|
+
"mcpServers": {
|
|
53
|
+
"mcp-automl": {
|
|
54
|
+
"command": "docker",
|
|
55
|
+
"args": ["run", "-i", "--rm", "-v", "${PWD}:/workspace", "-v", "${HOME}/.mcp-automl:/root/.mcp-automl", "idea7766/mcp-automl:latest"]
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
|
|
48
62
|
### Available Tools
|
|
49
63
|
|
|
50
64
|
| Tool | Description |
|
|
@@ -67,16 +81,18 @@ MCP AutoML includes an **data science workflow skill** that guides AI agents thr
|
|
|
67
81
|
|
|
68
82
|
### Installing the Skill
|
|
69
83
|
|
|
70
|
-
|
|
84
|
+
**For Gemini CLI:**
|
|
71
85
|
|
|
72
86
|
```bash
|
|
73
|
-
|
|
74
|
-
|
|
87
|
+
gemini skills install https://github.com/idea7766/mcp-automl --path skill/data-science-workflow
|
|
88
|
+
```
|
|
75
89
|
|
|
76
|
-
|
|
77
|
-
cp -r skill/data-science-workflow ~/.claude/skills/
|
|
90
|
+
**For Claude Code:**
|
|
78
91
|
|
|
79
|
-
|
|
92
|
+
```bash
|
|
93
|
+
# Clone the repo and copy the skill
|
|
94
|
+
git clone https://github.com/idea7766/mcp-automl.git
|
|
95
|
+
cp -r mcp-automl/skill/data-science-workflow ~/.claude/skills/
|
|
80
96
|
```
|
|
81
97
|
|
|
82
98
|
The skill file is located at `skill/data-science-workflow/SKILL.md`.
|
|
@@ -84,6 +100,17 @@ The skill file is located at `skill/data-science-workflow/SKILL.md`.
|
|
|
84
100
|
## Configuration
|
|
85
101
|
|
|
86
102
|
Models and experiments are saved to `~/.mcp-automl/experiments/` by default.
|
|
103
|
+
## Troubleshooting
|
|
104
|
+
|
|
105
|
+
### macOS: LightGBM OpenMP Error
|
|
106
|
+
|
|
107
|
+
If you encounter an error like `Library not loaded: @rpath/libomp.dylib`, you need to install OpenMP:
|
|
108
|
+
|
|
109
|
+
```bash
|
|
110
|
+
brew install libomp
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
This is a system-level dependency required by LightGBM on macOS. Linux and Windows users typically don't need this step.
|
|
87
114
|
|
|
88
115
|
## Dependencies
|
|
89
116
|
|
|
@@ -23,12 +23,26 @@ Add to your MCP client configuration (e.g., Claude Desktop, Gemini CLI, Cursor,
|
|
|
23
23
|
"mcpServers": {
|
|
24
24
|
"mcp-automl": {
|
|
25
25
|
"command": "uvx",
|
|
26
|
-
"args": ["--
|
|
26
|
+
"args": ["--python", "3.11", "mcp-automl"]
|
|
27
27
|
}
|
|
28
28
|
}
|
|
29
29
|
}
|
|
30
30
|
```
|
|
31
31
|
|
|
32
|
+
**Or using Docker:**
|
|
33
|
+
|
|
34
|
+
```json
|
|
35
|
+
{
|
|
36
|
+
"mcpServers": {
|
|
37
|
+
"mcp-automl": {
|
|
38
|
+
"command": "docker",
|
|
39
|
+
"args": ["run", "-i", "--rm", "-v", "${PWD}:/workspace", "-v", "${HOME}/.mcp-automl:/root/.mcp-automl", "idea7766/mcp-automl:latest"]
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
|
|
32
46
|
### Available Tools
|
|
33
47
|
|
|
34
48
|
| Tool | Description |
|
|
@@ -51,16 +65,18 @@ MCP AutoML includes an **data science workflow skill** that guides AI agents thr
|
|
|
51
65
|
|
|
52
66
|
### Installing the Skill
|
|
53
67
|
|
|
54
|
-
|
|
68
|
+
**For Gemini CLI:**
|
|
55
69
|
|
|
56
70
|
```bash
|
|
57
|
-
|
|
58
|
-
|
|
71
|
+
gemini skills install https://github.com/idea7766/mcp-automl --path skill/data-science-workflow
|
|
72
|
+
```
|
|
59
73
|
|
|
60
|
-
|
|
61
|
-
cp -r skill/data-science-workflow ~/.claude/skills/
|
|
74
|
+
**For Claude Code:**
|
|
62
75
|
|
|
63
|
-
|
|
76
|
+
```bash
|
|
77
|
+
# Clone the repo and copy the skill
|
|
78
|
+
git clone https://github.com/idea7766/mcp-automl.git
|
|
79
|
+
cp -r mcp-automl/skill/data-science-workflow ~/.claude/skills/
|
|
64
80
|
```
|
|
65
81
|
|
|
66
82
|
The skill file is located at `skill/data-science-workflow/SKILL.md`.
|
|
@@ -68,6 +84,17 @@ The skill file is located at `skill/data-science-workflow/SKILL.md`.
|
|
|
68
84
|
## Configuration
|
|
69
85
|
|
|
70
86
|
Models and experiments are saved to `~/.mcp-automl/experiments/` by default.
|
|
87
|
+
## Troubleshooting
|
|
88
|
+
|
|
89
|
+
### macOS: LightGBM OpenMP Error
|
|
90
|
+
|
|
91
|
+
If you encounter an error like `Library not loaded: @rpath/libomp.dylib`, you need to install OpenMP:
|
|
92
|
+
|
|
93
|
+
```bash
|
|
94
|
+
brew install libomp
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
This is a system-level dependency required by LightGBM on macOS. Linux and Windows users typically don't need this step.
|
|
71
98
|
|
|
72
99
|
## Dependencies
|
|
73
100
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|