msapling-cli 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ """MSapling MCP Server - Expose MSapling features to Claude Code, Cursor, and other MCP-compatible tools."""
@@ -0,0 +1,411 @@
1
+ """
2
+ MSapling MCP Server
3
+
4
+ Exposes MSapling capabilities as MCP tools that Claude Code, Cursor,
5
+ Windsurf, and other MCP-compatible AI tools can call.
6
+
7
+ Tools exposed:
8
+ - msapling_chat: Send a message to any LLM via MSapling (multi-model)
9
+ - msapling_diff: Generate unified diff between two texts
10
+ - msapling_apply_diff: Apply a unified diff to file content
11
+ - msapling_search_files: Semantic search across MDrive files
12
+ - msapling_read_file: Read a file from MDrive
13
+ - msapling_write_file: Write a file to MDrive with versioning
14
+ - msapling_project_context: Scan local project and build LLM context
15
+ - msapling_cost_check: Check remaining fuel credits and usage
16
+ - msapling_models: List available models with pricing
17
+
18
+ Usage in Claude Code:
19
+ Add to ~/.claude/settings.json:
20
+ {
21
+ "mcpServers": {
22
+ "msapling": {
23
+ "command": "msapling",
24
+ "args": ["mcp-serve"],
25
+ "env": {
26
+ "MSAPLING_TOKEN": "your-jwt-token",
27
+ "MSAPLING_API_URL": "https://api.msapling.com"
28
+ }
29
+ }
30
+ }
31
+ }
32
+
33
+ Usage in Cursor:
34
+ Add to .cursor/mcp.json:
35
+ {
36
+ "msapling": {
37
+ "command": "msapling",
38
+ "args": ["mcp-serve"]
39
+ }
40
+ }
41
+ """
42
+ from __future__ import annotations
43
+
44
+ import asyncio
45
+ import json
46
+ import sys
47
+ from typing import Any, Dict, List, Optional
48
+
49
+ # MCP protocol: communicate over stdin/stdout with JSON-RPC 2.0
50
+
51
+
52
+ def _write_message(msg: dict) -> None:
53
+ """Write a JSON-RPC message to stdout."""
54
+ raw = json.dumps(msg)
55
+ sys.stdout.write(f"Content-Length: {len(raw)}\r\n\r\n{raw}")
56
+ sys.stdout.flush()
57
+
58
+
59
+ def _read_message() -> Optional[dict]:
60
+ """Read a JSON-RPC message from stdin."""
61
+ # Read headers
62
+ headers = {}
63
+ while True:
64
+ line = sys.stdin.readline()
65
+ if not line or line.strip() == "":
66
+ break
67
+ if ":" in line:
68
+ key, val = line.split(":", 1)
69
+ headers[key.strip().lower()] = val.strip()
70
+
71
+ content_length = int(headers.get("content-length", 0))
72
+ if content_length == 0:
73
+ return None
74
+
75
+ # Read exactly content_length bytes, handling partial reads
76
+ body = ""
77
+ remaining = content_length
78
+ while remaining > 0:
79
+ chunk = sys.stdin.read(remaining)
80
+ if not chunk:
81
+ break
82
+ body += chunk
83
+ remaining -= len(chunk)
84
+ if len(body) < content_length:
85
+ return None
86
+ return json.loads(body)
87
+
88
+
89
+ # ─── Tool Definitions ─────────────────────────────────────────────────
90
+
91
+ TOOLS = [
92
+ {
93
+ "name": "msapling_chat",
94
+ "description": "Send a message to any LLM through MSapling. Supports 200+ models via OpenRouter, OpenAI, Anthropic, Google, Ollama. Tracks costs automatically.",
95
+ "inputSchema": {
96
+ "type": "object",
97
+ "properties": {
98
+ "prompt": {"type": "string", "description": "The message to send"},
99
+ "model": {"type": "string", "description": "Model ID (e.g. 'google/gemini-flash-1.5', 'anthropic/claude-3.5-sonnet')", "default": "google/gemini-flash-1.5"},
100
+ },
101
+ "required": ["prompt"],
102
+ },
103
+ },
104
+ {
105
+ "name": "msapling_diff",
106
+ "description": "Generate a unified diff between old and new content. Uses MSapling's MLineage engine for validated, structured diffs.",
107
+ "inputSchema": {
108
+ "type": "object",
109
+ "properties": {
110
+ "old_content": {"type": "string", "description": "Original file content"},
111
+ "new_content": {"type": "string", "description": "Modified file content"},
112
+ "filename": {"type": "string", "description": "Filename for diff headers", "default": "file"},
113
+ },
114
+ "required": ["old_content", "new_content"],
115
+ },
116
+ },
117
+ {
118
+ "name": "msapling_apply_diff",
119
+ "description": "Apply a unified diff to original content. Returns the patched content. Safe: validates diff format before applying.",
120
+ "inputSchema": {
121
+ "type": "object",
122
+ "properties": {
123
+ "original_content": {"type": "string", "description": "Original file content"},
124
+ "diff_text": {"type": "string", "description": "Unified diff to apply"},
125
+ },
126
+ "required": ["original_content", "diff_text"],
127
+ },
128
+ },
129
+ {
130
+ "name": "msapling_search_files",
131
+ "description": "Search files in an MDrive project by keyword relevance. Returns ranked results with content previews.",
132
+ "inputSchema": {
133
+ "type": "object",
134
+ "properties": {
135
+ "project_id": {"type": "string", "description": "MDrive project ID"},
136
+ "query": {"type": "string", "description": "Search query"},
137
+ "limit": {"type": "integer", "description": "Max results", "default": 5},
138
+ },
139
+ "required": ["project_id", "query"],
140
+ },
141
+ },
142
+ {
143
+ "name": "msapling_read_file",
144
+ "description": "Read a file from MDrive storage. Returns file content with version info.",
145
+ "inputSchema": {
146
+ "type": "object",
147
+ "properties": {
148
+ "project_id": {"type": "string", "description": "MDrive project ID"},
149
+ "file_path": {"type": "string", "description": "Path to file"},
150
+ },
151
+ "required": ["project_id", "file_path"],
152
+ },
153
+ },
154
+ {
155
+ "name": "msapling_write_file",
156
+ "description": "Write a file to MDrive storage with automatic versioning and conflict detection.",
157
+ "inputSchema": {
158
+ "type": "object",
159
+ "properties": {
160
+ "project_id": {"type": "string", "description": "MDrive project ID"},
161
+ "file_path": {"type": "string", "description": "Path to file"},
162
+ "content": {"type": "string", "description": "File content to write"},
163
+ },
164
+ "required": ["project_id", "file_path", "content"],
165
+ },
166
+ },
167
+ {
168
+ "name": "msapling_project_context",
169
+ "description": "Scan a local directory and build an LLM-ready context block with file tree and contents. Works offline, no server needed.",
170
+ "inputSchema": {
171
+ "type": "object",
172
+ "properties": {
173
+ "path": {"type": "string", "description": "Project directory path", "default": "."},
174
+ "max_files": {"type": "integer", "description": "Maximum files to include", "default": 30},
175
+ "max_file_size_kb": {"type": "integer", "description": "Skip files larger than this (KB)", "default": 50},
176
+ },
177
+ },
178
+ },
179
+ {
180
+ "name": "msapling_cost_check",
181
+ "description": "Check your MSapling account: remaining fuel credits, tier, and usage stats.",
182
+ "inputSchema": {"type": "object", "properties": {}},
183
+ },
184
+ {
185
+ "name": "msapling_models",
186
+ "description": "List available LLM models with pricing info. Includes OpenRouter, OpenAI, Anthropic, Google, Ollama models.",
187
+ "inputSchema": {"type": "object", "properties": {}},
188
+ },
189
+ {
190
+ "name": "msapling_multi_chat",
191
+ "description": "Send the same prompt to multiple LLM models IN PARALLEL. Returns all responses for comparison. Useful for getting diverse perspectives or benchmarking models.",
192
+ "inputSchema": {
193
+ "type": "object",
194
+ "properties": {
195
+ "prompt": {"type": "string", "description": "The prompt to send to all models"},
196
+ "models": {
197
+ "type": "array",
198
+ "items": {"type": "string"},
199
+ "description": "List of model IDs to query in parallel",
200
+ "default": ["google/gemini-flash-1.5", "anthropic/claude-3-haiku", "openai/gpt-4o-mini"],
201
+ },
202
+ },
203
+ "required": ["prompt"],
204
+ },
205
+ },
206
+ {
207
+ "name": "msapling_swarm",
208
+ "description": "Run a multi-model swarm: sends prompt to N models in parallel, then a judge model synthesizes the best answer. Returns both individual responses and the synthesis.",
209
+ "inputSchema": {
210
+ "type": "object",
211
+ "properties": {
212
+ "prompt": {"type": "string", "description": "The task for the swarm"},
213
+ "models": {
214
+ "type": "array",
215
+ "items": {"type": "string"},
216
+ "description": "Models to run in parallel (default: gemini, claude, gpt)",
217
+ },
218
+ "judge_model": {"type": "string", "description": "Model to synthesize final answer"},
219
+ },
220
+ "required": ["prompt"],
221
+ },
222
+ },
223
+ ]
224
+
225
+
226
+ # ─── Tool Handlers ────────────────────────────────────────────────────
227
+
228
+ async def _handle_tool(name: str, args: Dict[str, Any]) -> Dict[str, Any]:
229
+ """Dispatch a tool call to the appropriate handler."""
230
+ from ..api import MSaplingClient
231
+ from ..local import detect_project_root, build_file_tree, read_files_as_context
232
+
233
+ if name == "msapling_project_context":
234
+ # Local only — no server needed
235
+ path = args.get("path", ".")
236
+ root, info = detect_project_root(path)
237
+ files = build_file_tree(root, max_files=args.get("max_files", 30))
238
+ context = read_files_as_context(root, files, max_size_kb=args.get("max_file_size_kb", 50))
239
+ return {
240
+ "content": [{"type": "text", "text": context}],
241
+ "metadata": {"root": root, "type": info["type"], "files": len(files), "tokens_est": len(context) // 4},
242
+ }
243
+
244
+ # All other tools need the API client
245
+ client = MSaplingClient()
246
+ try:
247
+ if name == "msapling_chat":
248
+ import uuid
249
+ parts = []
250
+ async for chunk in client.stream_chat(
251
+ chat_id=str(uuid.uuid4()),
252
+ prompt=args["prompt"],
253
+ model=args.get("model", "google/gemini-flash-1.5"),
254
+ ):
255
+ content = chunk.get("content", "")
256
+ if content:
257
+ parts.append(content)
258
+ return {"content": [{"type": "text", "text": "".join(parts)}]}
259
+
260
+ elif name == "msapling_diff":
261
+ result = await client.generate_diff(
262
+ args["old_content"], args["new_content"], args.get("filename", "file"),
263
+ )
264
+ diff_text = result.get("diff", result.get("unified_diff", ""))
265
+ return {"content": [{"type": "text", "text": diff_text}]}
266
+
267
+ elif name == "msapling_apply_diff":
268
+ result = await client.apply_diff(args["original_content"], args["diff_text"])
269
+ applied = result.get("applied_content", result.get("content", ""))
270
+ return {"content": [{"type": "text", "text": applied}]}
271
+
272
+ elif name == "msapling_search_files":
273
+ # Direct HTTP call for search
274
+ http_client = await client._get_client()
275
+ resp = await http_client.post("/api/mdrive/search", json={
276
+ "project_id": args["project_id"],
277
+ "query": args["query"],
278
+ "limit": args.get("limit", 5),
279
+ })
280
+ resp.raise_for_status()
281
+ return {"content": [{"type": "text", "text": json.dumps(resp.json(), indent=2)}]}
282
+
283
+ elif name == "msapling_read_file":
284
+ content = await client.read_file(args["project_id"], args["file_path"])
285
+ return {"content": [{"type": "text", "text": content}]}
286
+
287
+ elif name == "msapling_write_file":
288
+ result = await client.write_file(args["project_id"], args["file_path"], args["content"])
289
+ return {"content": [{"type": "text", "text": json.dumps(result)}]}
290
+
291
+ elif name == "msapling_cost_check":
292
+ user = await client.me()
293
+ text = (
294
+ f"Tier: {user.get('tier', 'free')}\n"
295
+ f"Fuel Credits: ${user.get('fuel_credits', 0):.4f}\n"
296
+ f"Pro: {user.get('is_pro', False)}"
297
+ )
298
+ return {"content": [{"type": "text", "text": text}]}
299
+
300
+ elif name == "msapling_models":
301
+ models = await client.get_models()
302
+ lines = [f"{m.get('id', '?')} (ctx: {m.get('context_length', '?')})" for m in models[:30]]
303
+ return {"content": [{"type": "text", "text": "\n".join(lines)}]}
304
+
305
+ elif name == "msapling_multi_chat":
306
+ # Tier check: multi requires pro
307
+ user = await client.me()
308
+ tier = str(user.get("tier", "free")).lower()
309
+ if tier not in ("pro", "monthly", "lifetime", "enterprise") and not user.get("is_pro"):
310
+ return {"content": [{"type": "text", "text": "multi_chat requires Pro subscription. Upgrade at https://msapling.com/pricing"}], "isError": True}
311
+ default_models = ["google/gemini-flash-1.5", "anthropic/claude-3-haiku", "openai/gpt-4o-mini"]
312
+ models = args.get("models", default_models)
313
+ results = await client.multi_chat(args["prompt"], models)
314
+ lines = []
315
+ for r in results:
316
+ status = "OK" if r["status"] == "ok" else f"FAILED: {r.get('error', '')}"
317
+ lines.append(f"### {r['model']} ({status})\n{r['response'][:2000]}")
318
+ return {"content": [{"type": "text", "text": "\n\n---\n\n".join(lines)}]}
319
+
320
+ elif name == "msapling_swarm":
321
+ # Tier check: swarm requires pro
322
+ user = await client.me()
323
+ tier = str(user.get("tier", "free")).lower()
324
+ if tier not in ("pro", "monthly", "lifetime", "enterprise") and not user.get("is_pro"):
325
+ return {"content": [{"type": "text", "text": "swarm requires Pro subscription. Upgrade at https://msapling.com/pricing"}], "isError": True}
326
+ models = args.get("models")
327
+ judge = args.get("judge_model")
328
+ result = await client.swarm(args["prompt"], models=models, synthesize_model=judge)
329
+ parts = []
330
+ for r in result["agent_responses"]:
331
+ parts.append(f"**{r['model']}**: {r['response'][:1000]}")
332
+ parts.append(f"\n---\n## Synthesis ({result['judge_model']})\n{result['synthesis']}")
333
+ return {"content": [{"type": "text", "text": "\n\n".join(parts)}]}
334
+
335
+ else:
336
+ return {"content": [{"type": "text", "text": f"Unknown tool: {name}"}], "isError": True}
337
+
338
+ finally:
339
+ await client.close()
340
+
341
+
342
+ # ─── MCP Protocol Handler ─────────────────────────────────────────────
343
+
344
+ def serve():
345
+ """Run the MCP server (stdin/stdout JSON-RPC)."""
346
+ import sys
347
+ sys.stderr.write("MSapling MCP Server starting...\n")
348
+
349
+ # Single event loop for the entire server lifetime
350
+ loop = asyncio.new_event_loop()
351
+ asyncio.set_event_loop(loop)
352
+
353
+ try:
354
+ while True:
355
+ msg = _read_message()
356
+ if msg is None:
357
+ break
358
+
359
+ method = msg.get("method", "")
360
+ msg_id = msg.get("id")
361
+ params = msg.get("params", {})
362
+
363
+ if method == "initialize":
364
+ _write_message({
365
+ "jsonrpc": "2.0",
366
+ "id": msg_id,
367
+ "result": {
368
+ "protocolVersion": "2024-11-05",
369
+ "capabilities": {"tools": {}},
370
+ "serverInfo": {"name": "msapling", "version": "0.1.0"},
371
+ },
372
+ })
373
+
374
+ elif method == "notifications/initialized":
375
+ pass # No response needed
376
+
377
+ elif method == "tools/list":
378
+ _write_message({
379
+ "jsonrpc": "2.0",
380
+ "id": msg_id,
381
+ "result": {"tools": TOOLS},
382
+ })
383
+
384
+ elif method == "tools/call":
385
+ tool_name = params.get("name", "")
386
+ tool_args = params.get("arguments", {})
387
+ try:
388
+ result = loop.run_until_complete(_handle_tool(tool_name, tool_args))
389
+ _write_message({
390
+ "jsonrpc": "2.0",
391
+ "id": msg_id,
392
+ "result": result,
393
+ })
394
+ except Exception as e:
395
+ _write_message({
396
+ "jsonrpc": "2.0",
397
+ "id": msg_id,
398
+ "result": {
399
+ "content": [{"type": "text", "text": f"Error: {e}"}],
400
+ "isError": True,
401
+ },
402
+ })
403
+
404
+ elif msg_id is not None:
405
+ _write_message({
406
+ "jsonrpc": "2.0",
407
+ "id": msg_id,
408
+ "error": {"code": -32601, "message": f"Method not found: {method}"},
409
+ })
410
+ finally:
411
+ loop.close()
msapling_cli/memory.py ADDED
@@ -0,0 +1,97 @@
1
+ """Persistent memory for MSapling CLI.
2
+
3
+ Auto-memory that survives across sessions — like Claude Code's memory system.
4
+ Stores user preferences, project notes, and learned patterns.
5
+
6
+ Memory is stored per-project at .msapling/memory.json
7
+ Global memory at ~/.msapling/memory.json
8
+ """
9
+ from __future__ import annotations
10
+
11
+ import json
12
+ import time
13
+ from pathlib import Path
14
+ from typing import Dict, List, Optional
15
+
16
+
17
+ _GLOBAL_MEMORY_FILE = Path.home() / ".msapling" / "memory.json"
18
+
19
+
20
+ def _project_memory_file(project_root: str) -> Path:
21
+ return Path(project_root) / ".msapling" / "memory.json"
22
+
23
+
24
+ def _load(path: Path) -> List[Dict]:
25
+ if path.exists():
26
+ try:
27
+ return json.loads(path.read_text(encoding="utf-8"))
28
+ except Exception:
29
+ return []
30
+ return []
31
+
32
+
33
+ def _save(path: Path, entries: List[Dict]):
34
+ path.parent.mkdir(parents=True, exist_ok=True)
35
+ path.write_text(json.dumps(entries, indent=2), encoding="utf-8")
36
+ try:
37
+ path.chmod(0o600)
38
+ except OSError:
39
+ pass
40
+
41
+
42
+ def add_memory(text: str, *, project_root: Optional[str] = None, scope: str = "project") -> str:
43
+ """Add a memory entry. Returns confirmation."""
44
+ path = _project_memory_file(project_root) if project_root and scope == "project" else _GLOBAL_MEMORY_FILE
45
+ entries = _load(path)
46
+ entry = {
47
+ "text": text.strip(),
48
+ "created_at": time.time(),
49
+ "scope": scope,
50
+ }
51
+ entries.append(entry)
52
+ # Cap at 100 entries
53
+ if len(entries) > 100:
54
+ entries = entries[-100:]
55
+ _save(path, entries)
56
+ return f"Saved ({scope}): {text[:80]}"
57
+
58
+
59
+ def get_memories(project_root: Optional[str] = None) -> str:
60
+ """Get all memories as a formatted string for LLM context injection."""
61
+ parts = []
62
+
63
+ # Global memories
64
+ global_entries = _load(_GLOBAL_MEMORY_FILE)
65
+ if global_entries:
66
+ global_text = "\n".join(f"- {e['text']}" for e in global_entries[-20:])
67
+ parts.append(f"[Global memories]\n{global_text}")
68
+
69
+ # Project memories
70
+ if project_root:
71
+ proj_entries = _load(_project_memory_file(project_root))
72
+ if proj_entries:
73
+ proj_text = "\n".join(f"- {e['text']}" for e in proj_entries[-20:])
74
+ parts.append(f"[Project memories]\n{proj_text}")
75
+
76
+ return "\n\n".join(parts)
77
+
78
+
79
+ def list_memories(project_root: Optional[str] = None) -> List[Dict]:
80
+ """List all memories with metadata."""
81
+ results = []
82
+ for entry in _load(_GLOBAL_MEMORY_FILE):
83
+ entry["_scope"] = "global"
84
+ results.append(entry)
85
+ if project_root:
86
+ for entry in _load(_project_memory_file(project_root)):
87
+ entry["_scope"] = "project"
88
+ results.append(entry)
89
+ return results
90
+
91
+
92
+ def clear_memories(project_root: Optional[str] = None, scope: str = "all"):
93
+ """Clear memories."""
94
+ if scope in ("all", "global"):
95
+ _save(_GLOBAL_MEMORY_FILE, [])
96
+ if scope in ("all", "project") and project_root:
97
+ _save(_project_memory_file(project_root), [])
@@ -0,0 +1,102 @@
1
+ """Session persistence for MSapling CLI.
2
+
3
+ Saves and resumes conversation history, project context, and settings
4
+ so users can pick up where they left off.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ import json
9
+ import time
10
+ from pathlib import Path
11
+ from typing import Any, Dict, List, Optional
12
+
13
+ _SESSION_DIR = Path.home() / ".msapling" / "sessions"
14
+
15
+
16
+ def _ensure_dir():
17
+ _SESSION_DIR.mkdir(parents=True, exist_ok=True)
18
+ try:
19
+ _SESSION_DIR.chmod(0o700)
20
+ except OSError:
21
+ pass # Windows doesn't support Unix permissions
22
+
23
+
24
+ def save_session(
25
+ session_id: str,
26
+ messages: List[Dict[str, str]],
27
+ model: str,
28
+ project_root: Optional[str] = None,
29
+ cost_total: float = 0.0,
30
+ tokens_total: int = 0,
31
+ metadata: Optional[Dict[str, Any]] = None,
32
+ ) -> Path:
33
+ _ensure_dir()
34
+ path = _SESSION_DIR / f"{session_id}.json"
35
+ data = {
36
+ "id": session_id,
37
+ "messages": messages,
38
+ "model": model,
39
+ "project_root": project_root,
40
+ "cost_total": cost_total,
41
+ "tokens_total": tokens_total,
42
+ "updated_at": time.time(),
43
+ "metadata": metadata or {},
44
+ }
45
+ path.write_text(json.dumps(data, indent=2), encoding="utf-8")
46
+ try:
47
+ path.chmod(0o600)
48
+ except OSError:
49
+ pass # Windows doesn't support Unix permissions
50
+ return path
51
+
52
+
53
+ def load_session(session_id: str) -> Optional[Dict[str, Any]]:
54
+ _ensure_dir()
55
+ path = _SESSION_DIR / f"{session_id}.json"
56
+ if not path.exists():
57
+ return None
58
+ try:
59
+ return json.loads(path.read_text(encoding="utf-8"))
60
+ except Exception:
61
+ return None
62
+
63
+
64
+ def list_sessions(limit: int = 20) -> List[Dict[str, Any]]:
65
+ _ensure_dir()
66
+ sessions = []
67
+ for p in sorted(_SESSION_DIR.glob("*.json"), key=lambda x: x.stat().st_mtime, reverse=True):
68
+ try:
69
+ data = json.loads(p.read_text(encoding="utf-8"))
70
+ data["_path"] = str(p)
71
+ msg_count = len(data.get("messages", []))
72
+ last_msg = ""
73
+ for m in reversed(data.get("messages", [])):
74
+ if m.get("role") == "user":
75
+ last_msg = (m.get("content", "") or "")[:80]
76
+ break
77
+ data["_summary"] = f"{msg_count} msgs | {last_msg}"
78
+ sessions.append(data)
79
+ except Exception:
80
+ continue
81
+ if len(sessions) >= limit:
82
+ break
83
+ return sessions
84
+
85
+
86
+ def delete_session(session_id: str) -> bool:
87
+ path = _SESSION_DIR / f"{session_id}.json"
88
+ if not path.exists():
89
+ return False
90
+ try:
91
+ path.chmod(0o666) # Ensure writable before delete (Windows compat)
92
+ except OSError:
93
+ pass
94
+ for _ in range(5):
95
+ try:
96
+ path.unlink()
97
+ return True
98
+ except PermissionError:
99
+ time.sleep(0.05)
100
+ except FileNotFoundError:
101
+ return True
102
+ return False