mcpswitch-cli 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcpswitch_cli-0.1.0/PKG-INFO +130 -0
- mcpswitch_cli-0.1.0/README.md +106 -0
- mcpswitch_cli-0.1.0/mcpswitch/__init__.py +3 -0
- mcpswitch_cli-0.1.0/mcpswitch/auto.py +353 -0
- mcpswitch_cli-0.1.0/mcpswitch/billing.py +173 -0
- mcpswitch_cli-0.1.0/mcpswitch/cli.py +1289 -0
- mcpswitch_cli-0.1.0/mcpswitch/community.py +209 -0
- mcpswitch_cli-0.1.0/mcpswitch/config.py +62 -0
- mcpswitch_cli-0.1.0/mcpswitch/email.py +204 -0
- mcpswitch_cli-0.1.0/mcpswitch/hooks.py +269 -0
- mcpswitch_cli-0.1.0/mcpswitch/profiles.py +96 -0
- mcpswitch_cli-0.1.0/mcpswitch/sync.py +237 -0
- mcpswitch_cli-0.1.0/mcpswitch/team.py +588 -0
- mcpswitch_cli-0.1.0/mcpswitch/tier.py +170 -0
- mcpswitch_cli-0.1.0/mcpswitch/tokens.py +426 -0
- mcpswitch_cli-0.1.0/mcpswitch/usage.py +232 -0
- mcpswitch_cli-0.1.0/mcpswitch_cli.egg-info/PKG-INFO +130 -0
- mcpswitch_cli-0.1.0/mcpswitch_cli.egg-info/SOURCES.txt +22 -0
- mcpswitch_cli-0.1.0/mcpswitch_cli.egg-info/dependency_links.txt +1 -0
- mcpswitch_cli-0.1.0/mcpswitch_cli.egg-info/entry_points.txt +2 -0
- mcpswitch_cli-0.1.0/mcpswitch_cli.egg-info/requires.txt +7 -0
- mcpswitch_cli-0.1.0/mcpswitch_cli.egg-info/top_level.txt +1 -0
- mcpswitch_cli-0.1.0/pyproject.toml +44 -0
- mcpswitch_cli-0.1.0/setup.cfg +4 -0
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mcpswitch-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Smart MCP profile manager for Claude Code — save 30-40% of your context window
|
|
5
|
+
License: MIT
|
|
6
|
+
Project-URL: Homepage, https://mcpswitch.dev
|
|
7
|
+
Project-URL: Repository, https://github.com/sathibabunaidu58/mcpswitch
|
|
8
|
+
Project-URL: Issues, https://github.com/sathibabunaidu58/mcpswitch/issues
|
|
9
|
+
Keywords: claude,mcp,claude-code,context-window,tokens,llm
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Environment :: Console
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
16
|
+
Requires-Python: >=3.10
|
|
17
|
+
Description-Content-Type: text/markdown
|
|
18
|
+
Requires-Dist: click>=8.0
|
|
19
|
+
Requires-Dist: rich>=13.0
|
|
20
|
+
Requires-Dist: tiktoken>=0.5
|
|
21
|
+
Provides-Extra: server
|
|
22
|
+
Requires-Dist: flask>=3.0; extra == "server"
|
|
23
|
+
Requires-Dist: requests>=2.31; extra == "server"
|
|
24
|
+
|
|
25
|
+
# MCPSwitch
|
|
26
|
+
|
|
27
|
+
**Smart MCP profile manager for Claude Code.**
|
|
28
|
+
Stop wasting 30-40% of your context window on MCP tools you never use.
|
|
29
|
+
|
|
30
|
+
---
|
|
31
|
+
|
|
32
|
+
## The Problem
|
|
33
|
+
|
|
34
|
+
Every MCP server you install adds its tool schemas to Claude's context window at session start.
|
|
35
|
+
84 tools across 6 servers = **15,540 tokens consumed before you type a single message.**
|
|
36
|
+
That's up to 40% of your context window gone — every session, every time.
|
|
37
|
+
|
|
38
|
+
MCPSwitch lets you define lean profiles for different workflows and switch between them instantly.
|
|
39
|
+
|
|
40
|
+
---
|
|
41
|
+
|
|
42
|
+
## Install
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install mcpswitch
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
Or run from source:
|
|
49
|
+
```bash
|
|
50
|
+
git clone https://github.com/sathibabu/mcpswitch
|
|
51
|
+
cd mcpswitch
|
|
52
|
+
pip install -e .
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
---
|
|
56
|
+
|
|
57
|
+
## Quick Start
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
# See what your current MCP config is costing you
|
|
61
|
+
mcpswitch analyze
|
|
62
|
+
|
|
63
|
+
# Save your current config as a profile
|
|
64
|
+
mcpswitch import --name full
|
|
65
|
+
|
|
66
|
+
# Create a lean profile
|
|
67
|
+
mcpswitch create python-backend
|
|
68
|
+
|
|
69
|
+
# Add only the servers you need for Python work
|
|
70
|
+
mcpswitch add python-backend github
|
|
71
|
+
mcpswitch add python-backend context7
|
|
72
|
+
|
|
73
|
+
# Switch to it before starting a Claude Code session
|
|
74
|
+
mcpswitch use python-backend
|
|
75
|
+
|
|
76
|
+
# See all profiles and their token costs
|
|
77
|
+
mcpswitch list
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
---
|
|
81
|
+
|
|
82
|
+
## Commands
|
|
83
|
+
|
|
84
|
+
| Command | What it does |
|
|
85
|
+
|---------|-------------|
|
|
86
|
+
| `mcpswitch status` | Show active profile + token cost of loaded servers |
|
|
87
|
+
| `mcpswitch analyze` | Break down token cost by server, show savings potential |
|
|
88
|
+
| `mcpswitch list` | List all profiles with token estimates |
|
|
89
|
+
| `mcpswitch use <profile>` | Switch to a profile (rewrites Claude config) |
|
|
90
|
+
| `mcpswitch import --name <n>` | Save current config as a named profile |
|
|
91
|
+
| `mcpswitch create <profile>` | Create a new empty profile |
|
|
92
|
+
| `mcpswitch add <profile> <server>` | Add a server to a profile |
|
|
93
|
+
| `mcpswitch remove <profile> <server>` | Remove a server from a profile |
|
|
94
|
+
| `mcpswitch save <profile>` | Save current active config as profile |
|
|
95
|
+
| `mcpswitch delete <profile>` | Delete a profile |
|
|
96
|
+
|
|
97
|
+
---
|
|
98
|
+
|
|
99
|
+
## Real-World Savings
|
|
100
|
+
|
|
101
|
+
| Scenario | All MCPs | Lean Profile | Saved |
|
|
102
|
+
|----------|----------|-------------|-------|
|
|
103
|
+
| Python backend work | 15,540 tokens | 3,200 tokens | 12,340 tokens (79%) |
|
|
104
|
+
| Frontend work | 15,540 tokens | 4,100 tokens | 11,440 tokens (74%) |
|
|
105
|
+
| Writing/docs | 15,540 tokens | 900 tokens | 14,640 tokens (94%) |
|
|
106
|
+
|
|
107
|
+
Fewer wasted tokens = more context for your actual code = better Claude responses.
|
|
108
|
+
|
|
109
|
+
---
|
|
110
|
+
|
|
111
|
+
## How It Works
|
|
112
|
+
|
|
113
|
+
MCPSwitch reads and writes the Claude MCP config files:
|
|
114
|
+
- **Claude Code CLI**: `~/.claude/claude_desktop_config.json`
|
|
115
|
+
- **Claude Desktop**: `~/AppData/Roaming/Claude/claude_desktop_config.json` (Windows)
|
|
116
|
+
|
|
117
|
+
Profiles are stored in `~/.mcpswitch/profiles.json`. Every config change is backed up automatically before overwriting.
|
|
118
|
+
|
|
119
|
+
---
|
|
120
|
+
|
|
121
|
+
## Pricing
|
|
122
|
+
|
|
123
|
+
- **Free**: unlimited profiles, all commands, open source
|
|
124
|
+
- **Pro** ($19/month): coming soon — auto-detect project type and switch profiles automatically, usage analytics, team profile sharing
|
|
125
|
+
|
|
126
|
+
---
|
|
127
|
+
|
|
128
|
+
## License
|
|
129
|
+
|
|
130
|
+
MIT
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# MCPSwitch
|
|
2
|
+
|
|
3
|
+
**Smart MCP profile manager for Claude Code.**
|
|
4
|
+
Stop wasting 30-40% of your context window on MCP tools you never use.
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## The Problem
|
|
9
|
+
|
|
10
|
+
Every MCP server you install adds its tool schemas to Claude's context window at session start.
|
|
11
|
+
84 tools across 6 servers = **15,540 tokens consumed before you type a single message.**
|
|
12
|
+
That's up to 40% of your context window gone — every session, every time.
|
|
13
|
+
|
|
14
|
+
MCPSwitch lets you define lean profiles for different workflows and switch between them instantly.
|
|
15
|
+
|
|
16
|
+
---
|
|
17
|
+
|
|
18
|
+
## Install
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
pip install mcpswitch
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
Or run from source:
|
|
25
|
+
```bash
|
|
26
|
+
git clone https://github.com/sathibabu/mcpswitch
|
|
27
|
+
cd mcpswitch
|
|
28
|
+
pip install -e .
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
---
|
|
32
|
+
|
|
33
|
+
## Quick Start
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
# See what your current MCP config is costing you
|
|
37
|
+
mcpswitch analyze
|
|
38
|
+
|
|
39
|
+
# Save your current config as a profile
|
|
40
|
+
mcpswitch import --name full
|
|
41
|
+
|
|
42
|
+
# Create a lean profile
|
|
43
|
+
mcpswitch create python-backend
|
|
44
|
+
|
|
45
|
+
# Add only the servers you need for Python work
|
|
46
|
+
mcpswitch add python-backend github
|
|
47
|
+
mcpswitch add python-backend context7
|
|
48
|
+
|
|
49
|
+
# Switch to it before starting a Claude Code session
|
|
50
|
+
mcpswitch use python-backend
|
|
51
|
+
|
|
52
|
+
# See all profiles and their token costs
|
|
53
|
+
mcpswitch list
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
---
|
|
57
|
+
|
|
58
|
+
## Commands
|
|
59
|
+
|
|
60
|
+
| Command | What it does |
|
|
61
|
+
|---------|-------------|
|
|
62
|
+
| `mcpswitch status` | Show active profile + token cost of loaded servers |
|
|
63
|
+
| `mcpswitch analyze` | Break down token cost by server, show savings potential |
|
|
64
|
+
| `mcpswitch list` | List all profiles with token estimates |
|
|
65
|
+
| `mcpswitch use <profile>` | Switch to a profile (rewrites Claude config) |
|
|
66
|
+
| `mcpswitch import --name <n>` | Save current config as a named profile |
|
|
67
|
+
| `mcpswitch create <profile>` | Create a new empty profile |
|
|
68
|
+
| `mcpswitch add <profile> <server>` | Add a server to a profile |
|
|
69
|
+
| `mcpswitch remove <profile> <server>` | Remove a server from a profile |
|
|
70
|
+
| `mcpswitch save <profile>` | Save current active config as profile |
|
|
71
|
+
| `mcpswitch delete <profile>` | Delete a profile |
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
|
|
75
|
+
## Real-World Savings
|
|
76
|
+
|
|
77
|
+
| Scenario | All MCPs | Lean Profile | Saved |
|
|
78
|
+
|----------|----------|-------------|-------|
|
|
79
|
+
| Python backend work | 15,540 tokens | 3,200 tokens | 12,340 tokens (79%) |
|
|
80
|
+
| Frontend work | 15,540 tokens | 4,100 tokens | 11,440 tokens (74%) |
|
|
81
|
+
| Writing/docs | 15,540 tokens | 900 tokens | 14,640 tokens (94%) |
|
|
82
|
+
|
|
83
|
+
Fewer wasted tokens = more context for your actual code = better Claude responses.
|
|
84
|
+
|
|
85
|
+
---
|
|
86
|
+
|
|
87
|
+
## How It Works
|
|
88
|
+
|
|
89
|
+
MCPSwitch reads and writes the Claude MCP config files:
|
|
90
|
+
- **Claude Code CLI**: `~/.claude/claude_desktop_config.json`
|
|
91
|
+
- **Claude Desktop**: `~/AppData/Roaming/Claude/claude_desktop_config.json` (Windows)
|
|
92
|
+
|
|
93
|
+
Profiles are stored in `~/.mcpswitch/profiles.json`. Every config change is backed up automatically before overwriting.
|
|
94
|
+
|
|
95
|
+
---
|
|
96
|
+
|
|
97
|
+
## Pricing
|
|
98
|
+
|
|
99
|
+
- **Free**: unlimited profiles, all commands, open source
|
|
100
|
+
- **Pro** ($19/month): coming soon — auto-detect project type and switch profiles automatically, usage analytics, team profile sharing
|
|
101
|
+
|
|
102
|
+
---
|
|
103
|
+
|
|
104
|
+
## License
|
|
105
|
+
|
|
106
|
+
MIT
|
|
@@ -0,0 +1,353 @@
|
|
|
1
|
+
"""Context-aware automatic profile selection.
|
|
2
|
+
|
|
3
|
+
How it works:
|
|
4
|
+
1. Read conversation context from ~/.claude/history.jsonl (last N messages)
|
|
5
|
+
2. Scan project directory for tech stack signals
|
|
6
|
+
3. Score each profile against detected context
|
|
7
|
+
4. Switch to the best match — silently if confident, ask if not
|
|
8
|
+
|
|
9
|
+
This runs at session start via: mcpswitch auto
|
|
10
|
+
Or manually: mcpswitch auto --dir /path/to/project
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import json
|
|
14
|
+
import os
|
|
15
|
+
import re
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Optional
|
|
18
|
+
|
|
19
|
+
from .profiles import load_profiles, get_active_profile, set_active_profile
|
|
20
|
+
from .config import get_claude_code_config_path, set_mcp_servers
|
|
21
|
+
from .tokens import estimate_total_tokens
|
|
22
|
+
|
|
23
|
+
# ── Tech stack signals → MCP server names ────────────────────────────────────
|
|
24
|
+
|
|
25
|
+
# File presence signals: if these files exist, these servers are likely needed
|
|
26
|
+
FILE_SIGNALS: list[tuple[str, list[str]]] = [
|
|
27
|
+
# (glob pattern, [relevant mcp server names])
|
|
28
|
+
("package.json", ["github", "playwright", "context7"]),
|
|
29
|
+
("requirements.txt", ["github", "context7", "postgres"]),
|
|
30
|
+
("pyproject.toml", ["github", "context7", "postgres"]),
|
|
31
|
+
("go.mod", ["github", "context7"]),
|
|
32
|
+
("Cargo.toml", ["github", "context7"]),
|
|
33
|
+
("docker-compose.yml", ["github", "postgres", "sqlite"]),
|
|
34
|
+
(".github/workflows/*.yml",["github"]),
|
|
35
|
+
("prisma/schema.prisma", ["github", "postgres"]),
|
|
36
|
+
("supabase/**", ["github", "postgres"]),
|
|
37
|
+
("*.test.*", ["playwright", "github"]),
|
|
38
|
+
("playwright.config.*", ["playwright"]),
|
|
39
|
+
("*.md", ["filesystem", "fetch"]),
|
|
40
|
+
("docs/**", ["filesystem", "fetch"]),
|
|
41
|
+
]
|
|
42
|
+
|
|
43
|
+
# Conversation keyword signals: words in recent messages → relevant servers
|
|
44
|
+
KEYWORD_SIGNALS: dict[str, list[str]] = {
|
|
45
|
+
# Code & git
|
|
46
|
+
"pull request": ["github"],
|
|
47
|
+
"pr review": ["github"],
|
|
48
|
+
"github": ["github"],
|
|
49
|
+
"commit": ["github"],
|
|
50
|
+
"branch": ["github"],
|
|
51
|
+
"merge": ["github"],
|
|
52
|
+
|
|
53
|
+
# Browser / frontend
|
|
54
|
+
"browser": ["playwright"],
|
|
55
|
+
"screenshot": ["playwright"],
|
|
56
|
+
"click": ["playwright"],
|
|
57
|
+
"scrape": ["playwright"],
|
|
58
|
+
"selenium": ["playwright"],
|
|
59
|
+
"test": ["playwright", "github"],
|
|
60
|
+
|
|
61
|
+
# Search
|
|
62
|
+
"search": ["brave-search"],
|
|
63
|
+
"google": ["brave-search"],
|
|
64
|
+
"find online": ["brave-search"],
|
|
65
|
+
"look up": ["brave-search"],
|
|
66
|
+
|
|
67
|
+
# Database
|
|
68
|
+
"database": ["postgres", "sqlite"],
|
|
69
|
+
"sql": ["postgres", "sqlite"],
|
|
70
|
+
"postgres": ["postgres"],
|
|
71
|
+
"sqlite": ["sqlite"],
|
|
72
|
+
"query": ["postgres", "sqlite"],
|
|
73
|
+
|
|
74
|
+
# Docs / files
|
|
75
|
+
"documentation": ["filesystem", "context7"],
|
|
76
|
+
"readme": ["filesystem"],
|
|
77
|
+
"file": ["filesystem"],
|
|
78
|
+
"read file": ["filesystem"],
|
|
79
|
+
|
|
80
|
+
# API docs
|
|
81
|
+
"api docs": ["context7"],
|
|
82
|
+
"documentation": ["context7"],
|
|
83
|
+
"latest docs": ["context7"],
|
|
84
|
+
"how to use": ["context7"],
|
|
85
|
+
|
|
86
|
+
# Notes / knowledge
|
|
87
|
+
"obsidian": ["obsidian"],
|
|
88
|
+
"notes": ["obsidian"],
|
|
89
|
+
"vault": ["obsidian"],
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
CONFIDENCE_THRESHOLD = 0.4 # switch silently above this, ask below
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _read_recent_history(n_messages: int = 20) -> str:
|
|
96
|
+
"""Read the last N messages from Claude Code history.
|
|
97
|
+
|
|
98
|
+
Returns concatenated text of recent user messages.
|
|
99
|
+
"""
|
|
100
|
+
history_file = Path.home() / ".claude" / "history.jsonl"
|
|
101
|
+
if not history_file.exists():
|
|
102
|
+
return ""
|
|
103
|
+
|
|
104
|
+
lines = []
|
|
105
|
+
try:
|
|
106
|
+
with open(history_file, "r", encoding="utf-8") as f:
|
|
107
|
+
lines = f.readlines()
|
|
108
|
+
except Exception:
|
|
109
|
+
return ""
|
|
110
|
+
|
|
111
|
+
messages = []
|
|
112
|
+
for line in reversed(lines[-200:]): # look at last 200 lines max
|
|
113
|
+
try:
|
|
114
|
+
entry = json.loads(line.strip())
|
|
115
|
+
# Extract user message text
|
|
116
|
+
if isinstance(entry, dict):
|
|
117
|
+
content = entry.get("message", "") or entry.get("content", "")
|
|
118
|
+
if isinstance(content, list):
|
|
119
|
+
for block in content:
|
|
120
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
121
|
+
messages.append(block.get("text", ""))
|
|
122
|
+
elif isinstance(content, str):
|
|
123
|
+
messages.append(content)
|
|
124
|
+
except Exception:
|
|
125
|
+
continue
|
|
126
|
+
if len(messages) >= n_messages:
|
|
127
|
+
break
|
|
128
|
+
|
|
129
|
+
return " ".join(messages).lower()
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def _scan_project_signals(directory: str) -> list[str]:
|
|
133
|
+
"""Scan directory for tech stack files. Returns list of relevant MCP server names."""
|
|
134
|
+
path = Path(directory)
|
|
135
|
+
if not path.exists():
|
|
136
|
+
return []
|
|
137
|
+
|
|
138
|
+
relevant_servers = []
|
|
139
|
+
for pattern, servers in FILE_SIGNALS:
|
|
140
|
+
if "**" in pattern or "*" in pattern:
|
|
141
|
+
matches = list(path.glob(pattern))
|
|
142
|
+
else:
|
|
143
|
+
matches = [path / pattern] if (path / pattern).exists() else []
|
|
144
|
+
|
|
145
|
+
if matches:
|
|
146
|
+
relevant_servers.extend(servers)
|
|
147
|
+
|
|
148
|
+
return list(set(relevant_servers))
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _score_profile(
|
|
152
|
+
profile_name: str,
|
|
153
|
+
profile_servers: dict,
|
|
154
|
+
context_servers: list[str],
|
|
155
|
+
conversation_servers: list[str],
|
|
156
|
+
) -> float:
|
|
157
|
+
"""Score a profile 0.0–1.0 based on how well it matches detected context.
|
|
158
|
+
|
|
159
|
+
Higher = better match.
|
|
160
|
+
"""
|
|
161
|
+
if not profile_servers:
|
|
162
|
+
return 0.0
|
|
163
|
+
|
|
164
|
+
loaded = set(profile_servers.keys())
|
|
165
|
+
needed = set(context_servers + conversation_servers)
|
|
166
|
+
|
|
167
|
+
if not needed:
|
|
168
|
+
return 0.0
|
|
169
|
+
|
|
170
|
+
# Coverage: what % of needed servers does this profile have?
|
|
171
|
+
coverage = len(loaded & needed) / len(needed)
|
|
172
|
+
|
|
173
|
+
# Precision: what % of loaded servers are actually needed? (penalize bloat)
|
|
174
|
+
precision = len(loaded & needed) / len(loaded) if loaded else 0.0
|
|
175
|
+
|
|
176
|
+
# Weighted: coverage matters more than precision
|
|
177
|
+
score = (coverage * 0.7) + (precision * 0.3)
|
|
178
|
+
return round(score, 3)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def select_best_profile(
|
|
182
|
+
directory: Optional[str] = None,
|
|
183
|
+
conversation_text: Optional[str] = None,
|
|
184
|
+
) -> dict:
|
|
185
|
+
"""Select the best profile for current context.
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
{
|
|
189
|
+
"recommended": str, # profile name
|
|
190
|
+
"confidence": float, # 0.0 - 1.0
|
|
191
|
+
"reason": str, # human-readable explanation
|
|
192
|
+
"scores": {name: score}, # all profile scores
|
|
193
|
+
"context_servers": [...],# servers detected from project
|
|
194
|
+
"conversation_servers":[],# servers detected from conversation
|
|
195
|
+
"current_profile": str,
|
|
196
|
+
}
|
|
197
|
+
"""
|
|
198
|
+
profiles = load_profiles()
|
|
199
|
+
current = get_active_profile()
|
|
200
|
+
|
|
201
|
+
if not profiles:
|
|
202
|
+
return {
|
|
203
|
+
"recommended": None,
|
|
204
|
+
"confidence": 0.0,
|
|
205
|
+
"reason": "No profiles configured. Run: mcpswitch import --name <name>",
|
|
206
|
+
"scores": {},
|
|
207
|
+
"context_servers": [],
|
|
208
|
+
"conversation_servers": [],
|
|
209
|
+
"current_profile": current,
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
# 1. Detect from project files
|
|
213
|
+
project_dir = directory or os.getcwd()
|
|
214
|
+
context_servers = _scan_project_signals(project_dir)
|
|
215
|
+
|
|
216
|
+
# 2. Detect from conversation history
|
|
217
|
+
if conversation_text is None:
|
|
218
|
+
conversation_text = _read_recent_history(20)
|
|
219
|
+
|
|
220
|
+
conversation_servers = []
|
|
221
|
+
for keyword, servers in KEYWORD_SIGNALS.items():
|
|
222
|
+
if keyword in conversation_text:
|
|
223
|
+
conversation_servers.extend(servers)
|
|
224
|
+
conversation_servers = list(set(conversation_servers))
|
|
225
|
+
|
|
226
|
+
# 3. Score all profiles
|
|
227
|
+
scores = {}
|
|
228
|
+
for name, servers in profiles.items():
|
|
229
|
+
scores[name] = _score_profile(name, servers, context_servers, conversation_servers)
|
|
230
|
+
|
|
231
|
+
if not any(scores.values()):
|
|
232
|
+
# No signals detected — recommend the profile with fewest tokens (lean default)
|
|
233
|
+
config_path = get_claude_code_config_path()
|
|
234
|
+
best = min(profiles.keys(), key=lambda n: estimate_total_tokens(profiles[n])["total"])
|
|
235
|
+
return {
|
|
236
|
+
"recommended": best,
|
|
237
|
+
"confidence": 0.2,
|
|
238
|
+
"reason": "No project signals detected. Recommending leanest profile.",
|
|
239
|
+
"scores": scores,
|
|
240
|
+
"context_servers": context_servers,
|
|
241
|
+
"conversation_servers": conversation_servers,
|
|
242
|
+
"current_profile": current,
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
best = max(scores, key=lambda k: scores[k])
|
|
246
|
+
confidence = scores[best]
|
|
247
|
+
|
|
248
|
+
# Build reason string
|
|
249
|
+
reason_parts = []
|
|
250
|
+
if context_servers:
|
|
251
|
+
reason_parts.append(f"project signals: {', '.join(context_servers[:3])}")
|
|
252
|
+
if conversation_servers:
|
|
253
|
+
reason_parts.append(f"conversation context: {', '.join(conversation_servers[:3])}")
|
|
254
|
+
reason = "Detected " + " | ".join(reason_parts) if reason_parts else "Best available match"
|
|
255
|
+
|
|
256
|
+
return {
|
|
257
|
+
"recommended": best,
|
|
258
|
+
"confidence": confidence,
|
|
259
|
+
"current_profile": current,
|
|
260
|
+
"scores": scores,
|
|
261
|
+
"context_servers": context_servers,
|
|
262
|
+
"conversation_servers": conversation_servers,
|
|
263
|
+
"reason": reason,
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def auto_switch(
|
|
268
|
+
directory: Optional[str] = None,
|
|
269
|
+
confirm_threshold: float = CONFIDENCE_THRESHOLD,
|
|
270
|
+
force: bool = False,
|
|
271
|
+
silent: bool = False,
|
|
272
|
+
) -> dict:
|
|
273
|
+
"""Auto-select and switch profile based on context.
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
{
|
|
277
|
+
"switched": bool,
|
|
278
|
+
"from_profile": str,
|
|
279
|
+
"to_profile": str,
|
|
280
|
+
"confidence": float,
|
|
281
|
+
"needs_confirmation": bool, # True if confidence below threshold
|
|
282
|
+
"reason": str,
|
|
283
|
+
}
|
|
284
|
+
"""
|
|
285
|
+
result = select_best_profile(directory=directory)
|
|
286
|
+
recommended = result["recommended"]
|
|
287
|
+
confidence = result["confidence"]
|
|
288
|
+
current = result["current_profile"]
|
|
289
|
+
|
|
290
|
+
if not recommended:
|
|
291
|
+
return {
|
|
292
|
+
"switched": False,
|
|
293
|
+
"from_profile": current,
|
|
294
|
+
"to_profile": None,
|
|
295
|
+
"confidence": 0.0,
|
|
296
|
+
"needs_confirmation": False,
|
|
297
|
+
"reason": result["reason"],
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
# Already on the best profile
|
|
301
|
+
if recommended == current and not force:
|
|
302
|
+
return {
|
|
303
|
+
"switched": False,
|
|
304
|
+
"from_profile": current,
|
|
305
|
+
"to_profile": recommended,
|
|
306
|
+
"confidence": confidence,
|
|
307
|
+
"needs_confirmation": False,
|
|
308
|
+
"reason": f"Already on best profile '{current}'",
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
# Low confidence — ask user
|
|
312
|
+
if confidence < confirm_threshold and not force:
|
|
313
|
+
return {
|
|
314
|
+
"switched": False,
|
|
315
|
+
"from_profile": current,
|
|
316
|
+
"to_profile": recommended,
|
|
317
|
+
"confidence": confidence,
|
|
318
|
+
"needs_confirmation": True,
|
|
319
|
+
"reason": result["reason"],
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
# High confidence — switch silently
|
|
323
|
+
from .profiles import get_profile
|
|
324
|
+
from .config import get_claude_desktop_config_path
|
|
325
|
+
|
|
326
|
+
servers = get_profile(recommended)
|
|
327
|
+
if not servers and servers != {}:
|
|
328
|
+
return {
|
|
329
|
+
"switched": False,
|
|
330
|
+
"from_profile": current,
|
|
331
|
+
"to_profile": recommended,
|
|
332
|
+
"confidence": confidence,
|
|
333
|
+
"needs_confirmation": False,
|
|
334
|
+
"reason": f"Profile '{recommended}' not found",
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
config_path = get_claude_code_config_path()
|
|
338
|
+
set_mcp_servers(config_path, servers)
|
|
339
|
+
|
|
340
|
+
desktop_path = get_claude_desktop_config_path()
|
|
341
|
+
if desktop_path and desktop_path.exists():
|
|
342
|
+
set_mcp_servers(desktop_path, servers)
|
|
343
|
+
|
|
344
|
+
set_active_profile(recommended)
|
|
345
|
+
|
|
346
|
+
return {
|
|
347
|
+
"switched": True,
|
|
348
|
+
"from_profile": current,
|
|
349
|
+
"to_profile": recommended,
|
|
350
|
+
"confidence": confidence,
|
|
351
|
+
"needs_confirmation": False,
|
|
352
|
+
"reason": result["reason"],
|
|
353
|
+
}
|