vplex-memory 2.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +146 -0
- package/bin.js +2 -0
- package/package.json +28 -0
- package/vplex-mcp-server.mjs +1187 -0
package/README.md
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
# vplex-memory
|
|
2
|
+
|
|
3
|
+
**Persistent cross-session memory for AI coding tools via MCP (Model Context Protocol).**
|
|
4
|
+
|
|
5
|
+
Works with Claude Code, Cursor, VS Code Copilot, Windsurf, Codex, and any MCP-compatible tool. No VPLEX Desktop required.
|
|
6
|
+
|
|
7
|
+
## Quick Start
|
|
8
|
+
|
|
9
|
+
```json
|
|
10
|
+
{
|
|
11
|
+
"mcpServers": {
|
|
12
|
+
"vplex-memory": {
|
|
13
|
+
"command": "npx",
|
|
14
|
+
"args": ["-y", "vplex-memory@2.3.0"]
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
That's it. Add this to your MCP config and the server handles everything — authentication, project detection, and memory persistence.
|
|
21
|
+
|
|
22
|
+
## Setup by Tool
|
|
23
|
+
|
|
24
|
+
### Claude Code
|
|
25
|
+
|
|
26
|
+
Add to `.mcp.json` in your project root or `~/.claude/settings.json` globally:
|
|
27
|
+
|
|
28
|
+
```json
|
|
29
|
+
{
|
|
30
|
+
"mcpServers": {
|
|
31
|
+
"vplex-memory": {
|
|
32
|
+
"command": "npx",
|
|
33
|
+
"args": ["-y", "vplex-memory@2.3.0"]
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
### Cursor
|
|
40
|
+
|
|
41
|
+
Add to `.cursor/mcp.json`:
|
|
42
|
+
|
|
43
|
+
```json
|
|
44
|
+
{
|
|
45
|
+
"mcpServers": {
|
|
46
|
+
"vplex-memory": {
|
|
47
|
+
"type": "stdio",
|
|
48
|
+
"command": "npx",
|
|
49
|
+
"args": ["-y", "vplex-memory@2.3.0"]
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### VS Code (Copilot)
|
|
56
|
+
|
|
57
|
+
Add to `.vscode/mcp.json`:
|
|
58
|
+
|
|
59
|
+
```json
|
|
60
|
+
{
|
|
61
|
+
"servers": {
|
|
62
|
+
"vplex-memory": {
|
|
63
|
+
"type": "stdio",
|
|
64
|
+
"command": "npx",
|
|
65
|
+
"args": ["-y", "vplex-memory@2.3.0"]
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
### Windsurf
|
|
72
|
+
|
|
73
|
+
Add to `~/.windsurf/mcp.json`:
|
|
74
|
+
|
|
75
|
+
```json
|
|
76
|
+
{
|
|
77
|
+
"mcpServers": {
|
|
78
|
+
"vplex-memory": {
|
|
79
|
+
"type": "stdio",
|
|
80
|
+
"command": "npx",
|
|
81
|
+
"args": ["-y", "vplex-memory@2.3.0"]
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## Authentication
|
|
88
|
+
|
|
89
|
+
On first use, the server detects that no session exists and initiates a browser-based auth flow:
|
|
90
|
+
|
|
91
|
+
1. The MCP server returns an error with a URL and a code: `Open https://vplex-web.vercel.app/auth/cli and enter code: XXXX`
|
|
92
|
+
2. Your AI tool displays this message — open the URL and enter the code
|
|
93
|
+
3. After approval, a session token is saved to `~/.vplex/session.json`
|
|
94
|
+
4. All subsequent tool calls work automatically
|
|
95
|
+
|
|
96
|
+
The token refreshes automatically. You only need to authenticate once per machine.
|
|
97
|
+
|
|
98
|
+
## How It Works
|
|
99
|
+
|
|
100
|
+
- **stdio-only** — no TCP/HTTP listener, no network attack surface
|
|
101
|
+
- **Project-scoped** — memories are grouped by project (hashed from `process.cwd()`)
|
|
102
|
+
- **Persistent** — memories survive across sessions, tools, and devices
|
|
103
|
+
- **Semantic search** — find past decisions by meaning, not just keywords
|
|
104
|
+
- **Zero dependencies** — single `.mjs` file, runs on Node.js 18+
|
|
105
|
+
|
|
106
|
+
## Available Tools (15)
|
|
107
|
+
|
|
108
|
+
| Tool | Description |
|
|
109
|
+
|------|-------------|
|
|
110
|
+
| `memory_store` | Store a memory with type, importance, tags, and related files |
|
|
111
|
+
| `memory_search` | Semantic + keyword search across project memories |
|
|
112
|
+
| `memory_list` | List all memories for the current project |
|
|
113
|
+
| `memory_list_projects` | List all projects with stored memories |
|
|
114
|
+
| `memory_modify` | Update, archive, delete, or reactivate memories |
|
|
115
|
+
| `memory_expand` | Get full content of memories by IDs |
|
|
116
|
+
| `memory_session_recap` | Summary of recent activity across sessions |
|
|
117
|
+
| `memory_get_rules` | Retrieve project-specific behavioral rules |
|
|
118
|
+
| `memory_projects` | Manage project documents (briefs, PRDs, plans) |
|
|
119
|
+
| `memory_tasks` | Create, complete, and list project tasks |
|
|
120
|
+
| `memory_start_thinking` | Start a structured thinking sequence |
|
|
121
|
+
| `memory_add_thought` | Add thoughts to a sequence; use "conclusion" to finalize |
|
|
122
|
+
| `memory_export` | Export all memories as JSON (requires Max plan) |
|
|
123
|
+
| `memory_insights` | Discover clusters of related memories (requires Base plan) |
|
|
124
|
+
| `memory_upload_document` | Upload PDF/MD/TXT into searchable memory chunks (requires Base plan) |
|
|
125
|
+
|
|
126
|
+
## Memory Types
|
|
127
|
+
|
|
128
|
+
`feature` `code-snippet` `debug` `design` `decision` `rule` `learning` `research` `discussion` `progress` `task` `working-notes` `pattern` `context` `bug` `document-chunk`
|
|
129
|
+
|
|
130
|
+
## Security
|
|
131
|
+
|
|
132
|
+
- stdio-only transport — no network listener
|
|
133
|
+
- Auth token stored in `~/.vplex/session.json` with `0600` permissions (Unix)
|
|
134
|
+
- All API calls over HTTPS to `termplex-api.vercel.app`
|
|
135
|
+
- Input validation: length limits, type checks, ID format validation
|
|
136
|
+
- Rate limiting: per-tool fixed window (30 stores/min, 60 searches/min)
|
|
137
|
+
- Content quality gate rejects noise (raw errors, stack traces, install logs)
|
|
138
|
+
|
|
139
|
+
## Requirements
|
|
140
|
+
|
|
141
|
+
- Node.js 18+
|
|
142
|
+
- VPLEX account (free tier available at [vplex-web.vercel.app](https://vplex-web.vercel.app))
|
|
143
|
+
|
|
144
|
+
## License
|
|
145
|
+
|
|
146
|
+
MIT
|
package/bin.js
ADDED
package/package.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "vplex-memory",
|
|
3
|
+
"version": "2.3.0",
|
|
4
|
+
"description": "VPLEX Memory MCP Server — persistent cross-session memory for AI coding tools",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"vplex-memory": "bin.js"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"vplex-mcp-server.mjs",
|
|
11
|
+
"bin.js",
|
|
12
|
+
"README.md"
|
|
13
|
+
],
|
|
14
|
+
"keywords": [
|
|
15
|
+
"mcp",
|
|
16
|
+
"memory",
|
|
17
|
+
"ai",
|
|
18
|
+
"cursor",
|
|
19
|
+
"claude",
|
|
20
|
+
"copilot",
|
|
21
|
+
"model-context-protocol"
|
|
22
|
+
],
|
|
23
|
+
"license": "MIT",
|
|
24
|
+
"repository": {
|
|
25
|
+
"type": "git",
|
|
26
|
+
"url": "git+https://github.com/0xGUCCIFER/VPLEX.git"
|
|
27
|
+
}
|
|
28
|
+
}
|
|
@@ -0,0 +1,1187 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* VPLEX MCP Server — Secure Memory Bridge for AI CLIs
|
|
4
|
+
*
|
|
5
|
+
* Exposes VPLEX Memory system via MCP Protocol (stdio only — no network listener)
|
|
6
|
+
* Compatible with Claude Code, Cursor, Codex, and other MCP-enabled tools
|
|
7
|
+
*
|
|
8
|
+
* Security model:
|
|
9
|
+
* - stdio-only: no TCP/HTTP listener, no network attack surface
|
|
10
|
+
* - Auth: reads Bearer token from ~/.vplex/session.json (0o600 on Unix)
|
|
11
|
+
* - All API calls go to VPLEX backend over HTTPS
|
|
12
|
+
* - Input validation: length limits, type checks, ID format validation
|
|
13
|
+
* - Rate limiting: per-tool fixed window
|
|
14
|
+
* - Error sanitization: no API internals leaked to caller
|
|
15
|
+
* - Token cached in-memory with TTL, re-read from disk on expiry
|
|
16
|
+
*
|
|
17
|
+
* v2.1 — Security hardened
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import { readFileSync, writeFileSync, statSync, mkdirSync, realpathSync } from "fs";
|
|
21
|
+
import { homedir, platform } from "os";
|
|
22
|
+
import { join, resolve, relative } from "path";
|
|
23
|
+
import { createInterface } from "readline";
|
|
24
|
+
|
|
25
|
+
const API_URL = "https://termplex-api.vercel.app";
|
|
26
|
+
const WEB_URL = "https://vplex-web.vercel.app";
|
|
27
|
+
const SERVER_NAME = "vplex-memory";
|
|
28
|
+
const SERVER_VERSION = "2.3.0";
|
|
29
|
+
|
|
30
|
+
// ── Security Constants ──────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
const MAX_CONTENT_LENGTH = 50_000; // 50KB max memory content
|
|
33
|
+
const MAX_TAG_LENGTH = 100; // per tag
|
|
34
|
+
const MAX_TAGS = 50; // max tags per memory
|
|
35
|
+
const MAX_FILE_PATH_LENGTH = 500; // related file path limit
|
|
36
|
+
const MAX_FILES = 50; // max related files
|
|
37
|
+
const MAX_QUERY_LENGTH = 500; // search query limit (must match API)
|
|
38
|
+
const MAX_SESSION_NAME_LENGTH = 200; // session name limit
|
|
39
|
+
const MAX_SEARCH_LIMIT = 50; // max search results
|
|
40
|
+
const ID_PATTERN = /^[a-f0-9-]{8,64}$/i; // UUID-ish IDs only
|
|
41
|
+
const HASH_PATTERN = /^[a-z0-9]{1,20}$/; // project hash format
|
|
42
|
+
|
|
43
|
+
// Rate limiting: max calls per tool per window
|
|
44
|
+
const RATE_LIMITS = {
|
|
45
|
+
memory_store: { max: 30, windowMs: 60_000 }, // 30 stores/min
|
|
46
|
+
memory_search: { max: 60, windowMs: 60_000 }, // 60 searches/min
|
|
47
|
+
memory_modify: { max: 20, windowMs: 60_000 }, // 20 modifies/min
|
|
48
|
+
_default: { max: 120, windowMs: 60_000 }, // 120/min for reads
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
const rateBuckets = new Map(); // tool -> { count, resetAt }
|
|
52
|
+
|
|
53
|
+
function checkRateLimit(tool) {
|
|
54
|
+
const config = RATE_LIMITS[tool] || RATE_LIMITS._default;
|
|
55
|
+
const now = Date.now();
|
|
56
|
+
let bucket = rateBuckets.get(tool);
|
|
57
|
+
|
|
58
|
+
if (!bucket || now >= bucket.resetAt) {
|
|
59
|
+
bucket = { count: 0, resetAt: now + config.windowMs };
|
|
60
|
+
rateBuckets.set(tool, bucket);
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
bucket.count++;
|
|
64
|
+
if (bucket.count > config.max) {
|
|
65
|
+
throw new Error(`Rate limit exceeded for ${tool}. Try again in ${Math.ceil((bucket.resetAt - now) / 1000)}s.`);
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// ── Input Validation ────────────────────────────────────────────────
|
|
70
|
+
|
|
71
|
+
function validateString(val, name, maxLen) {
|
|
72
|
+
if (typeof val !== "string") throw new Error(`${name} must be a string`);
|
|
73
|
+
if (val.length === 0) throw new Error(`${name} cannot be empty`);
|
|
74
|
+
if (val.length > maxLen) throw new Error(`${name} exceeds maximum length of ${maxLen}`);
|
|
75
|
+
return val;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
function validateId(val, name) {
|
|
79
|
+
if (typeof val !== "string") throw new Error(`${name} must be a string`);
|
|
80
|
+
if (!ID_PATTERN.test(val)) throw new Error(`${name} has invalid format`);
|
|
81
|
+
return val;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
function validateHash(val, name) {
|
|
85
|
+
if (typeof val !== "string") throw new Error(`${name} must be a string`);
|
|
86
|
+
if (!HASH_PATTERN.test(val)) throw new Error(`${name} has invalid format`);
|
|
87
|
+
return val;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
function validateTags(tags) {
|
|
91
|
+
if (!Array.isArray(tags)) return [];
|
|
92
|
+
return tags
|
|
93
|
+
.filter((t) => typeof t === "string" && t.length > 0)
|
|
94
|
+
.slice(0, MAX_TAGS)
|
|
95
|
+
.map((t) => t.slice(0, MAX_TAG_LENGTH).replace(/[<>{}]/g, ""));
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
function validateFiles(files) {
|
|
99
|
+
if (!Array.isArray(files)) return undefined;
|
|
100
|
+
return files
|
|
101
|
+
.filter((f) => typeof f === "string" && f.length > 0)
|
|
102
|
+
.slice(0, MAX_FILES)
|
|
103
|
+
.map((f) => f.slice(0, MAX_FILE_PATH_LENGTH));
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
function validateImportance(val) {
|
|
107
|
+
if (val === undefined || val === null) return undefined;
|
|
108
|
+
const n = Number(val);
|
|
109
|
+
if (isNaN(n)) return undefined;
|
|
110
|
+
return Math.max(0, Math.min(1, n));
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
function validateMemoryType(val) {
|
|
114
|
+
if (!MEMORY_TYPES.includes(val)) throw new Error(`Invalid memory type: ${val}`);
|
|
115
|
+
return val;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// ── Content Quality Gate ────────────────────────────────────────────
|
|
119
|
+
|
|
120
|
+
const NOISE_PATTERNS = [
|
|
121
|
+
// Generic errors / stack traces (not project-specific insights)
|
|
122
|
+
/^(error|Error|ERROR)[:\s].*?(ENOENT|EACCES|EPERM|ETIMEDOUT|ECONNREFUSED|ECONNRESET|MODULE_NOT_FOUND)/i,
|
|
123
|
+
// Raw stack traces without analysis
|
|
124
|
+
/^\s*at\s+[\w.]+\s+\(.*:\d+:\d+\)/m,
|
|
125
|
+
// npm/pnpm install output
|
|
126
|
+
/^(added|removed|up to date)\s+\d+\s+packages?\s+in/i,
|
|
127
|
+
// Git status noise
|
|
128
|
+
/^(On branch|Your branch is|nothing to commit|Changes not staged)/,
|
|
129
|
+
// CLI startup/version banners
|
|
130
|
+
/^(v?\d+\.\d+\.\d+|Welcome to|Starting|Initializing|Loading)\s/i,
|
|
131
|
+
// Compiler warnings without context
|
|
132
|
+
/^warning\[?\w*\]?:\s+unused/i,
|
|
133
|
+
// Raw file listings
|
|
134
|
+
/^(total\s+\d+|-[rwx-]{9})\s/,
|
|
135
|
+
// Process exit/signal noise
|
|
136
|
+
/^(Process exited|Killed|Terminated|Segmentation fault)/i,
|
|
137
|
+
// Empty-ish content after trimming
|
|
138
|
+
/^\s*$/,
|
|
139
|
+
];
|
|
140
|
+
|
|
141
|
+
// Secret patterns — reject content that contains credentials or private keys
|
|
142
|
+
const SECRET_PATTERNS = [
|
|
143
|
+
{ pattern: /-----BEGIN\s+(RSA |EC |DSA |OPENSSH )?PRIVATE KEY-----/, label: "private key" },
|
|
144
|
+
{ pattern: /AKIA[0-9A-Z]{16}/, label: "AWS access key" },
|
|
145
|
+
{ pattern: /ghp_[a-zA-Z0-9]{36}/, label: "GitHub personal access token" },
|
|
146
|
+
{ pattern: /gho_[a-zA-Z0-9]{36}/, label: "GitHub OAuth token" },
|
|
147
|
+
{ pattern: /github_pat_[a-zA-Z0-9]{22}_[a-zA-Z0-9]{59}/, label: "GitHub fine-grained token" },
|
|
148
|
+
{ pattern: /sk-[a-zA-Z0-9]{20,}/, label: "API secret key (OpenAI/Stripe)" },
|
|
149
|
+
{ pattern: /sk-proj-[a-zA-Z0-9_-]{40,}/, label: "OpenAI project key" },
|
|
150
|
+
{ pattern: /xox[bpsar]-[a-zA-Z0-9-]{10,}/, label: "Slack token" },
|
|
151
|
+
{ pattern: /npm_[a-zA-Z0-9]{36}/, label: "npm token" },
|
|
152
|
+
{ pattern: /glpat-[a-zA-Z0-9_-]{20,}/, label: "GitLab token" },
|
|
153
|
+
{ pattern: /eyJ[a-zA-Z0-9_-]{20,}\.eyJ[a-zA-Z0-9_-]{20,}\./, label: "JWT token" },
|
|
154
|
+
];
|
|
155
|
+
|
|
156
|
+
const MIN_CONTENT_LENGTH = 15; // memories shorter than this are almost always noise
|
|
157
|
+
const MIN_WORD_COUNT = 3; // at least 3 words for meaningful content
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Rejects low-quality or noisy content before it reaches the API.
|
|
161
|
+
* Returns null if content is acceptable, or an error message explaining rejection.
|
|
162
|
+
*/
|
|
163
|
+
function checkContentQuality(content, type) {
|
|
164
|
+
const trimmed = content.trim();
|
|
165
|
+
|
|
166
|
+
// Too short
|
|
167
|
+
if (trimmed.length < MIN_CONTENT_LENGTH) {
|
|
168
|
+
return `Content too short (${trimmed.length} chars). Memories should be complete, descriptive thoughts — not fragments.`;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
// Too few words
|
|
172
|
+
const wordCount = trimmed.split(/\s+/).length;
|
|
173
|
+
if (wordCount < MIN_WORD_COUNT) {
|
|
174
|
+
return `Content has only ${wordCount} word(s). Write a complete sentence describing the insight, decision, or finding.`;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// Secret detection — never store credentials
|
|
178
|
+
for (const { pattern, label } of SECRET_PATTERNS) {
|
|
179
|
+
if (pattern.test(trimmed)) {
|
|
180
|
+
return `Content contains what appears to be a ${label}. Storing secrets in memory is a security risk. Remove the credential and store only the insight (e.g., "Auth uses JWT tokens stored in ~/.vplex/session.json").`;
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// Noise pattern detection
|
|
185
|
+
for (const pattern of NOISE_PATTERNS) {
|
|
186
|
+
if (pattern.test(trimmed)) {
|
|
187
|
+
return `Content looks like terminal noise, not a project insight. Memories should capture decisions, learnings, or patterns — not raw output. If this IS valuable, rephrase it as an insight (e.g., "Learned that X fails because Y — fix is Z").`;
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Type-content mismatch: "pattern" type should describe actual patterns, not errors
|
|
192
|
+
if (type === "pattern" && /\b(error|fail|crash|exception|traceback)\b/i.test(trimmed) && !/\b(pattern|always|whenever|every time|consistently)\b/i.test(trimmed)) {
|
|
193
|
+
return `Type "pattern" is for recurring codebase patterns (e.g., "All stores use create() with devtools"). For errors, use type "debug" instead and describe the root cause + fix.`;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// Type "rule" should be prescriptive
|
|
197
|
+
if (type === "rule" && !/\b(always|never|must|should|shall|don't|do not|prefer|avoid)\b/i.test(trimmed)) {
|
|
198
|
+
return `Type "rule" should contain a prescriptive guideline (e.g., "Never use .unwrap() in production"). Consider "learning" or "decision" for observations.`;
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
return null; // content is fine
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// ── Session & Token Management ──────────────────────────────────────
|
|
205
|
+
|
|
206
|
+
let cachedToken = null;
|
|
207
|
+
let tokenReadAt = 0;
|
|
208
|
+
const TOKEN_CACHE_MS = 30_000; // re-read from disk every 30s
|
|
209
|
+
const TOKEN_REFRESH_MARGIN_S = 120; // refresh if < 2 min until expiry
|
|
210
|
+
|
|
211
|
+
// CLI auth flow state
|
|
212
|
+
let cliAuthPending = null; // { sessionId, userCode, verificationUrl, pollInterval, expiresAt }
|
|
213
|
+
|
|
214
|
+
function getSessionPath() {
|
|
215
|
+
return join(homedir(), ".vplex", "session.json");
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
function readSession() {
|
|
219
|
+
try {
|
|
220
|
+
const sessionPath = getSessionPath();
|
|
221
|
+
|
|
222
|
+
// Verify file permissions on Unix (skip on Windows — no chmod)
|
|
223
|
+
if (platform() !== "win32") {
|
|
224
|
+
const stat = statSync(sessionPath);
|
|
225
|
+
const mode = stat.mode & 0o777;
|
|
226
|
+
if (mode !== 0o600) {
|
|
227
|
+
process.stderr.write(`[vplex-mcp] Warning: ${sessionPath} has permissions ${mode.toString(8)}, expected 600\n`);
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
return JSON.parse(readFileSync(sessionPath, "utf-8"));
|
|
232
|
+
} catch {
|
|
233
|
+
return null;
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
function saveSession(session) {
|
|
238
|
+
const dir = join(homedir(), ".vplex");
|
|
239
|
+
try { mkdirSync(dir, { recursive: true }); } catch { /* exists */ }
|
|
240
|
+
const sessionPath = join(dir, "session.json");
|
|
241
|
+
writeFileSync(sessionPath, JSON.stringify(session, null, 2), { mode: 0o600 });
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
function getTokenExpiry(token) {
|
|
245
|
+
try {
|
|
246
|
+
const payload = JSON.parse(Buffer.from(token.split(".")[1], "base64url").toString());
|
|
247
|
+
return payload.exp || 0;
|
|
248
|
+
} catch { return 0; }
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
async function refreshTokenFromAPI(refreshTokenStr) {
|
|
252
|
+
try {
|
|
253
|
+
const response = await fetch(`${API_URL}/auth/refresh`, {
|
|
254
|
+
method: "POST",
|
|
255
|
+
headers: { "Content-Type": "application/json" },
|
|
256
|
+
body: JSON.stringify({ refresh_token: refreshTokenStr }),
|
|
257
|
+
});
|
|
258
|
+
if (!response.ok) return null;
|
|
259
|
+
const data = await response.json();
|
|
260
|
+
if (!data.token) return null;
|
|
261
|
+
return { token: data.token, refresh_token: data.refresh_token, user: data.user };
|
|
262
|
+
} catch { return null; }
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
async function getToken() {
|
|
266
|
+
const now = Date.now();
|
|
267
|
+
|
|
268
|
+
// Return cached token if still fresh
|
|
269
|
+
if (cachedToken && (now - tokenReadAt) < TOKEN_CACHE_MS) {
|
|
270
|
+
// Even with cache, check if close to expiry and refresh proactively
|
|
271
|
+
const exp = getTokenExpiry(cachedToken);
|
|
272
|
+
if (exp > 0 && (exp - now / 1000) < TOKEN_REFRESH_MARGIN_S) {
|
|
273
|
+
const session = readSession();
|
|
274
|
+
if (session?.refresh_token) {
|
|
275
|
+
const refreshed = await refreshTokenFromAPI(session.refresh_token);
|
|
276
|
+
if (refreshed) {
|
|
277
|
+
saveSession({ ...session, token: refreshed.token, refresh_token: refreshed.refresh_token, user: refreshed.user || session.user });
|
|
278
|
+
cachedToken = refreshed.token;
|
|
279
|
+
tokenReadAt = Date.now();
|
|
280
|
+
return cachedToken;
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
return cachedToken;
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// Read from disk
|
|
288
|
+
const session = readSession();
|
|
289
|
+
if (!session?.token || typeof session.token !== "string") {
|
|
290
|
+
cachedToken = null;
|
|
291
|
+
return null;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// Check if token is expired or close to expiry
|
|
295
|
+
const exp = getTokenExpiry(session.token);
|
|
296
|
+
const nowSec = now / 1000;
|
|
297
|
+
|
|
298
|
+
if (exp > 0 && (exp - nowSec) < TOKEN_REFRESH_MARGIN_S) {
|
|
299
|
+
// Token is close to expiry or already expired — try refresh
|
|
300
|
+
if (session.refresh_token) {
|
|
301
|
+
const refreshed = await refreshTokenFromAPI(session.refresh_token);
|
|
302
|
+
if (refreshed) {
|
|
303
|
+
saveSession({ ...session, token: refreshed.token, refresh_token: refreshed.refresh_token, user: refreshed.user || session.user });
|
|
304
|
+
cachedToken = refreshed.token;
|
|
305
|
+
tokenReadAt = Date.now();
|
|
306
|
+
return cachedToken;
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
// Refresh failed — if token is fully expired, return null
|
|
310
|
+
if (exp > 0 && exp < nowSec) {
|
|
311
|
+
cachedToken = null;
|
|
312
|
+
return null;
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
cachedToken = session.token;
|
|
317
|
+
tokenReadAt = now;
|
|
318
|
+
return cachedToken;
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
// ── CLI Auth Flow (Browser-based) ───────────────────────────────────
|
|
322
|
+
|
|
323
|
+
async function initCliAuth() {
|
|
324
|
+
try {
|
|
325
|
+
const response = await fetch(`${API_URL}/auth/cli/init`, {
|
|
326
|
+
method: "POST",
|
|
327
|
+
headers: { "Content-Type": "application/json" },
|
|
328
|
+
});
|
|
329
|
+
if (!response.ok) return null;
|
|
330
|
+
const data = await response.json();
|
|
331
|
+
return data;
|
|
332
|
+
} catch { return null; }
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
async function pollCliAuth(sessionId) {
|
|
336
|
+
try {
|
|
337
|
+
const response = await fetch(`${API_URL}/auth/cli/poll/${sessionId}`);
|
|
338
|
+
if (!response.ok) return null;
|
|
339
|
+
return response.json();
|
|
340
|
+
} catch { return null; }
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
function startCliAuthPolling(sessionId) {
|
|
344
|
+
if (cliAuthPending?.pollInterval) {
|
|
345
|
+
clearInterval(cliAuthPending.pollInterval);
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
const interval = setInterval(async () => {
|
|
349
|
+
const result = await pollCliAuth(sessionId);
|
|
350
|
+
if (!result) return;
|
|
351
|
+
|
|
352
|
+
if (result.status === "approved" && result.token) {
|
|
353
|
+
clearInterval(interval);
|
|
354
|
+
const session = readSession() || {};
|
|
355
|
+
saveSession({
|
|
356
|
+
...session,
|
|
357
|
+
token: result.token,
|
|
358
|
+
refresh_token: result.refresh_token,
|
|
359
|
+
});
|
|
360
|
+
cachedToken = result.token;
|
|
361
|
+
tokenReadAt = Date.now();
|
|
362
|
+
cliAuthPending = null;
|
|
363
|
+
process.stderr.write("[vplex-mcp] CLI auth approved — token saved.\n");
|
|
364
|
+
} else if (result.status === "expired") {
|
|
365
|
+
clearInterval(interval);
|
|
366
|
+
cliAuthPending = null;
|
|
367
|
+
process.stderr.write("[vplex-mcp] CLI auth session expired.\n");
|
|
368
|
+
}
|
|
369
|
+
}, 2000);
|
|
370
|
+
|
|
371
|
+
return interval;
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
/**
|
|
375
|
+
* Starts CLI auth flow if no token is available.
|
|
376
|
+
* Returns an error message with the auth URL and code for the user.
|
|
377
|
+
*/
|
|
378
|
+
async function startCliAuthFlow() {
|
|
379
|
+
// Don't start if already pending
|
|
380
|
+
if (cliAuthPending && Date.now() < cliAuthPending.expiresAt) {
|
|
381
|
+
return `Not authenticated. Open ${cliAuthPending.verificationUrl} and enter code: ${cliAuthPending.userCode}`;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
const result = await initCliAuth();
|
|
385
|
+
if (!result?.cli_session_id) {
|
|
386
|
+
return "Not authenticated. Please log in via VPLEX Desktop App.";
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
const pollInterval = startCliAuthPolling(result.cli_session_id);
|
|
390
|
+
|
|
391
|
+
cliAuthPending = {
|
|
392
|
+
sessionId: result.cli_session_id,
|
|
393
|
+
userCode: result.user_code,
|
|
394
|
+
verificationUrl: result.verification_url || `${WEB_URL}/auth/cli`,
|
|
395
|
+
pollInterval,
|
|
396
|
+
expiresAt: Date.now() + (result.expires_in || 600) * 1000,
|
|
397
|
+
};
|
|
398
|
+
|
|
399
|
+
return `Not authenticated. Open ${cliAuthPending.verificationUrl} and enter code: ${cliAuthPending.userCode}`;
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
// ── Project Identification ──────────────────────────────────────────
|
|
403
|
+
|
|
404
|
+
function hashString(str) {
|
|
405
|
+
let hash = 0;
|
|
406
|
+
for (let i = 0; i < str.length; i++) {
|
|
407
|
+
const char = str.charCodeAt(i);
|
|
408
|
+
hash = ((hash << 5) - hash) + char;
|
|
409
|
+
hash |= 0;
|
|
410
|
+
}
|
|
411
|
+
return Math.abs(hash).toString(36);
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
const projectHash = hashString(process.cwd());
|
|
415
|
+
const projectName = process.cwd().replace(/\\/g, "/").split("/").pop() || process.cwd();
|
|
416
|
+
const projectPath = process.cwd();
|
|
417
|
+
|
|
418
|
+
// ── HTTP Helper ─────────────────────────────────────────────────────
|
|
419
|
+
|
|
420
|
+
async function apiFetch(path, options = {}) {
|
|
421
|
+
const token = await getToken();
|
|
422
|
+
if (!token) {
|
|
423
|
+
// Start or return existing CLI auth flow message
|
|
424
|
+
const authMessage = await startCliAuthFlow();
|
|
425
|
+
throw new Error(authMessage);
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
// Validate path is a safe relative API path (allows %-encoding for query params)
|
|
429
|
+
if (!/^\/[a-zA-Z0-9/_?&=.%-]+$/.test(path)) {
|
|
430
|
+
throw new Error("Invalid API path");
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
const url = `${API_URL}${path}`;
|
|
434
|
+
const headers = {
|
|
435
|
+
"Content-Type": "application/json",
|
|
436
|
+
Authorization: `Bearer ${token}`,
|
|
437
|
+
...(options.headers || {}),
|
|
438
|
+
};
|
|
439
|
+
|
|
440
|
+
const response = await fetch(url, { ...options, headers });
|
|
441
|
+
if (!response.ok) {
|
|
442
|
+
// Sanitize error — don't leak API response body to caller
|
|
443
|
+
const status = response.status;
|
|
444
|
+
if (status === 401) throw new Error("Authentication expired. Please re-login in VPLEX.");
|
|
445
|
+
if (status === 403) throw new Error("Access denied. Check your VPLEX plan.");
|
|
446
|
+
if (status === 404) throw new Error("Resource not found.");
|
|
447
|
+
if (status === 429) throw new Error("API rate limit reached. Please wait.");
|
|
448
|
+
throw new Error(`Request failed (${status}). Please try again.`);
|
|
449
|
+
}
|
|
450
|
+
const contentType = response.headers.get("content-type") || "";
|
|
451
|
+
if (!contentType.includes("application/json")) {
|
|
452
|
+
throw new Error(`Unexpected response format from API (${contentType || "no content-type"})`);
|
|
453
|
+
}
|
|
454
|
+
return response.json();
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
// ── MCP Tool Definitions ────────────────────────────────────────────
|
|
458
|
+
|
|
459
|
+
const MEMORY_TYPES = [
|
|
460
|
+
"feature", "code-snippet", "debug", "design", "decision", "rule",
|
|
461
|
+
"learning", "research", "discussion", "progress", "task", "working-notes",
|
|
462
|
+
"pattern", "context", "bug", "document-chunk",
|
|
463
|
+
];
|
|
464
|
+
|
|
465
|
+
const TOOLS = [
|
|
466
|
+
{
|
|
467
|
+
name: "memory_store",
|
|
468
|
+
description: "Store a memory in VPLEX for cross-session context. Memories persist across all AI tools (VPLEX, Claude Code, Cursor). IMPORTANT: Automatically call this after completing tasks, making decisions, fixing bugs, or learning something new.",
|
|
469
|
+
inputSchema: {
|
|
470
|
+
type: "object",
|
|
471
|
+
properties: {
|
|
472
|
+
content: { type: "string", description: "The memory content — what to remember (max 50KB)" },
|
|
473
|
+
type: { type: "string", enum: MEMORY_TYPES, description: "Memory type" },
|
|
474
|
+
tags: { type: "array", items: { type: "string" }, description: "Tags for categorization (max 50)" },
|
|
475
|
+
related_files: { type: "array", items: { type: "string" }, description: "Related file paths (max 50)" },
|
|
476
|
+
importance_score: { type: "number", description: "Importance 0.0-1.0 (0.9=critical, 0.5=normal, 0.3=minor)" },
|
|
477
|
+
session_name: { type: "string", description: "Session grouping (e.g. 'sprint-1')" },
|
|
478
|
+
related_memory_ids: { type: "array", items: { type: "string" }, description: "IDs of related memories" },
|
|
479
|
+
},
|
|
480
|
+
required: ["content", "type"],
|
|
481
|
+
},
|
|
482
|
+
},
|
|
483
|
+
{
|
|
484
|
+
name: "memory_search",
|
|
485
|
+
description: "Search VPLEX memories for relevant context from previous sessions. Use this BEFORE making code changes to find past decisions, patterns, and bug fixes.",
|
|
486
|
+
inputSchema: {
|
|
487
|
+
type: "object",
|
|
488
|
+
properties: {
|
|
489
|
+
query: { type: "string", description: "Search query (max 500 chars)" },
|
|
490
|
+
limit: { type: "number", description: "Max results 1-50 (default 10)" },
|
|
491
|
+
memory_type: { type: "string", enum: MEMORY_TYPES, description: "Filter by type" },
|
|
492
|
+
file_path: { type: "string", description: "Filter by related file" },
|
|
493
|
+
recent_only: { type: "boolean", description: "Only last 30 days" },
|
|
494
|
+
search_mode: { type: "string", enum: ["fulltext", "keyword", "hybrid"], description: "Search mode (default: hybrid)" },
|
|
495
|
+
},
|
|
496
|
+
required: ["query"],
|
|
497
|
+
},
|
|
498
|
+
},
|
|
499
|
+
{
|
|
500
|
+
name: "memory_list",
|
|
501
|
+
description: "List all memories for the current project.",
|
|
502
|
+
inputSchema: { type: "object", properties: {} },
|
|
503
|
+
},
|
|
504
|
+
{
|
|
505
|
+
name: "memory_list_projects",
|
|
506
|
+
description: "List all projects that have stored memories.",
|
|
507
|
+
inputSchema: { type: "object", properties: {} },
|
|
508
|
+
},
|
|
509
|
+
{
|
|
510
|
+
name: "memory_modify",
|
|
511
|
+
description: "Update, archive (soft delete), permanently delete, unarchive, or reactivate memories.",
|
|
512
|
+
inputSchema: {
|
|
513
|
+
type: "object",
|
|
514
|
+
properties: {
|
|
515
|
+
action: { type: "string", enum: ["update", "inactivate", "reactivate", "delete", "unarchive"], description: "Action" },
|
|
516
|
+
memory_id: { type: "string", description: "Memory ID" },
|
|
517
|
+
content: { type: "string", description: "New content (for update)" },
|
|
518
|
+
memory_type: { type: "string", enum: MEMORY_TYPES, description: "New type (for update)" },
|
|
519
|
+
tags: { type: "array", items: { type: "string" }, description: "New tags (for update)" },
|
|
520
|
+
importance_score: { type: "number", description: "New importance 0-1 (for update)" },
|
|
521
|
+
related_files: { type: "array", items: { type: "string" }, description: "New related files (for update)" },
|
|
522
|
+
reason: { type: "string", description: "Reason (required for inactivate)" },
|
|
523
|
+
},
|
|
524
|
+
required: ["action", "memory_id"],
|
|
525
|
+
},
|
|
526
|
+
},
|
|
527
|
+
{
|
|
528
|
+
name: "memory_expand",
|
|
529
|
+
description: "Get full content of memories by IDs (max 10).",
|
|
530
|
+
inputSchema: {
|
|
531
|
+
type: "object",
|
|
532
|
+
properties: {
|
|
533
|
+
memory_ids: { type: "array", items: { type: "string" }, description: "Memory IDs (max 10)" },
|
|
534
|
+
},
|
|
535
|
+
required: ["memory_ids"],
|
|
536
|
+
},
|
|
537
|
+
},
|
|
538
|
+
{
|
|
539
|
+
name: "memory_session_recap",
|
|
540
|
+
description: "Get a summary of recent activity across sessions. Use at the START of each session to resume context.",
|
|
541
|
+
inputSchema: {
|
|
542
|
+
type: "object",
|
|
543
|
+
properties: {
|
|
544
|
+
project_hash: { type: "string", description: "Project hash (default: current)" },
|
|
545
|
+
days_back: { type: "number", description: "Days to look back 1-30 (default 7)" },
|
|
546
|
+
},
|
|
547
|
+
},
|
|
548
|
+
},
|
|
549
|
+
{
|
|
550
|
+
name: "memory_get_rules",
|
|
551
|
+
description: "Get behavioral rules and guardrails for a project.",
|
|
552
|
+
inputSchema: {
|
|
553
|
+
type: "object",
|
|
554
|
+
properties: {
|
|
555
|
+
project_hash: { type: "string", description: "Project hash (default: current)" },
|
|
556
|
+
include_global: { type: "boolean", description: "Include global rules (default true)" },
|
|
557
|
+
},
|
|
558
|
+
},
|
|
559
|
+
},
|
|
560
|
+
{
|
|
561
|
+
name: "memory_projects",
|
|
562
|
+
description: "Manage project documents (briefs, PRDs, plans).",
|
|
563
|
+
inputSchema: {
|
|
564
|
+
type: "object",
|
|
565
|
+
properties: {
|
|
566
|
+
action: { type: "string", enum: ["list_all", "create", "update", "get"], description: "Action" },
|
|
567
|
+
project_hash: { type: "string", description: "Project hash" },
|
|
568
|
+
doc_type: { type: "string", enum: ["brief", "prd", "plan"], description: "Doc type (for create)" },
|
|
569
|
+
content: { type: "string", description: "Content (for create/update)" },
|
|
570
|
+
doc_id: { type: "string", description: "Doc ID (for update)" },
|
|
571
|
+
},
|
|
572
|
+
required: ["action"],
|
|
573
|
+
},
|
|
574
|
+
},
|
|
575
|
+
{
|
|
576
|
+
name: "memory_tasks",
|
|
577
|
+
description: "Manage tasks: create, complete, or list.",
|
|
578
|
+
inputSchema: {
|
|
579
|
+
type: "object",
|
|
580
|
+
properties: {
|
|
581
|
+
action: { type: "string", enum: ["create", "complete", "get"], description: "Action" },
|
|
582
|
+
project_hash: { type: "string", description: "Project hash (default: current)" },
|
|
583
|
+
task_description: { type: "string", description: "Description (for create)" },
|
|
584
|
+
task_id: { type: "string", description: "Task ID (for complete)" },
|
|
585
|
+
include_completed: { type: "boolean", description: "Include completed (default false)" },
|
|
586
|
+
},
|
|
587
|
+
required: ["action"],
|
|
588
|
+
},
|
|
589
|
+
},
|
|
590
|
+
{
|
|
591
|
+
name: "memory_start_thinking",
|
|
592
|
+
description: "Start a structured thinking sequence for complex problems.",
|
|
593
|
+
inputSchema: {
|
|
594
|
+
type: "object",
|
|
595
|
+
properties: {
|
|
596
|
+
goal: { type: "string", description: "Problem or goal" },
|
|
597
|
+
project_hash: { type: "string", description: "Project hash (default: current)" },
|
|
598
|
+
},
|
|
599
|
+
required: ["goal"],
|
|
600
|
+
},
|
|
601
|
+
},
|
|
602
|
+
{
|
|
603
|
+
name: "memory_add_thought",
|
|
604
|
+
description: "Add a thought to a thinking sequence. Use 'conclusion' to finalize.",
|
|
605
|
+
inputSchema: {
|
|
606
|
+
type: "object",
|
|
607
|
+
properties: {
|
|
608
|
+
sequence_id: { type: "string", description: "Sequence ID" },
|
|
609
|
+
thought: { type: "string", description: "Thought content" },
|
|
610
|
+
thought_type: {
|
|
611
|
+
type: "string",
|
|
612
|
+
enum: ["observation", "hypothesis", "question", "reasoning", "analysis", "conclusion", "branch", "general"],
|
|
613
|
+
description: "Thought type (default: general)",
|
|
614
|
+
},
|
|
615
|
+
branch_name: { type: "string", description: "Branch name (for branch type)" },
|
|
616
|
+
},
|
|
617
|
+
required: ["sequence_id", "thought"],
|
|
618
|
+
},
|
|
619
|
+
},
|
|
620
|
+
{
|
|
621
|
+
name: "memory_export",
|
|
622
|
+
description: "Export all memories for a project as JSON (requires Max plan).",
|
|
623
|
+
inputSchema: {
|
|
624
|
+
type: "object",
|
|
625
|
+
properties: {
|
|
626
|
+
project_hash: { type: "string", description: "Project hash (default: current)" },
|
|
627
|
+
},
|
|
628
|
+
},
|
|
629
|
+
},
|
|
630
|
+
{
|
|
631
|
+
name: "memory_insights",
|
|
632
|
+
description: "Discover clusters of related memories and knowledge gaps. Groups semantically similar memories to reveal patterns and consolidation opportunities. Requires Base plan.",
|
|
633
|
+
inputSchema: {
|
|
634
|
+
type: "object",
|
|
635
|
+
properties: {
|
|
636
|
+
project_hash: { type: "string", description: "Project hash (default: current)" },
|
|
637
|
+
threshold: { type: "number", description: "Similarity threshold 0.5-0.99 (default: 0.85)" },
|
|
638
|
+
min_cluster: { type: "number", description: "Minimum cluster size 2-10 (default: 3)" },
|
|
639
|
+
limit: { type: "number", description: "Max clusters 1-20 (default: 5)" },
|
|
640
|
+
},
|
|
641
|
+
},
|
|
642
|
+
},
|
|
643
|
+
{
|
|
644
|
+
name: "memory_upload_document",
|
|
645
|
+
description: "Upload and process a document (PDF, Markdown, or plain text) into memory chunks. Reads the file from disk and stores each chunk as a searchable memory. Max 4MB. Requires Base plan.",
|
|
646
|
+
inputSchema: {
|
|
647
|
+
type: "object",
|
|
648
|
+
properties: {
|
|
649
|
+
file_path: { type: "string", description: "Absolute path to the file to upload" },
|
|
650
|
+
tags: { type: "array", items: { type: "string" }, description: "Tags for all chunks" },
|
|
651
|
+
},
|
|
652
|
+
required: ["file_path"],
|
|
653
|
+
},
|
|
654
|
+
},
|
|
655
|
+
];
|
|
656
|
+
|
|
657
|
+
// ── Plan Verification (cached) ──────────────────────────────────────
|
|
658
|
+
|
|
659
|
+
let mcpAccessCache = null;
|
|
660
|
+
let mcpAccessCheckedAt = 0;
|
|
661
|
+
const MCP_ACCESS_CACHE_MS = 300_000; // cache plan check for 5min
|
|
662
|
+
|
|
663
|
+
async function checkMcpAccess() {
|
|
664
|
+
const now = Date.now();
|
|
665
|
+
if (mcpAccessCache && (now - mcpAccessCheckedAt) < MCP_ACCESS_CACHE_MS) {
|
|
666
|
+
return mcpAccessCache;
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
try {
|
|
670
|
+
const features = await apiFetch("/memory/features");
|
|
671
|
+
if (!features.mcp_access) {
|
|
672
|
+
mcpAccessCache = {
|
|
673
|
+
allowed: false,
|
|
674
|
+
plan: features.plan,
|
|
675
|
+
error: `MCP access requires Pro plan or higher (current: ${features.plan}).`,
|
|
676
|
+
};
|
|
677
|
+
} else {
|
|
678
|
+
mcpAccessCache = { allowed: true, plan: features.plan };
|
|
679
|
+
}
|
|
680
|
+
mcpAccessCheckedAt = now;
|
|
681
|
+
return mcpAccessCache;
|
|
682
|
+
} catch {
|
|
683
|
+
// On network error, allow access (don't block offline usage)
|
|
684
|
+
return { allowed: true, plan: "unknown" };
|
|
685
|
+
}
|
|
686
|
+
}
|
|
687
|
+
|
|
688
|
+
// ── Tool Handlers ───────────────────────────────────────────────────
|
|
689
|
+
|
|
690
|
+
async function handleToolCall(name, args) {
|
|
691
|
+
// Rate limit check
|
|
692
|
+
checkRateLimit(name);
|
|
693
|
+
|
|
694
|
+
// Plan access check
|
|
695
|
+
const access = await checkMcpAccess();
|
|
696
|
+
if (!access.allowed) {
|
|
697
|
+
return { content: [{ type: "text", text: access.error }], isError: true };
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
switch (name) {
|
|
701
|
+
case "memory_store": {
|
|
702
|
+
const content = validateString(args.content, "content", MAX_CONTENT_LENGTH);
|
|
703
|
+
const type = validateMemoryType(args.type);
|
|
704
|
+
const tags = validateTags(args.tags);
|
|
705
|
+
const related_files = validateFiles(args.related_files);
|
|
706
|
+
const importance_score = validateImportance(args.importance_score);
|
|
707
|
+
const session_name = args.session_name ? validateString(args.session_name, "session_name", MAX_SESSION_NAME_LENGTH) : undefined;
|
|
708
|
+
const related_memory_ids = Array.isArray(args.related_memory_ids)
|
|
709
|
+
? args.related_memory_ids.filter((id) => ID_PATTERN.test(id)).slice(0, 20)
|
|
710
|
+
: undefined;
|
|
711
|
+
|
|
712
|
+
// Quality gate — reject noise before it reaches the API
|
|
713
|
+
const qualityIssue = checkContentQuality(content, type);
|
|
714
|
+
if (qualityIssue) {
|
|
715
|
+
return {
|
|
716
|
+
content: [{
|
|
717
|
+
type: "text",
|
|
718
|
+
text: `Memory rejected: ${qualityIssue}\n\nPlease rephrase and try again. Good memories are complete thoughts that will be useful in future sessions.`,
|
|
719
|
+
}],
|
|
720
|
+
isError: true,
|
|
721
|
+
};
|
|
722
|
+
}
|
|
723
|
+
|
|
724
|
+
const memory = await apiFetch("/memory/store", {
|
|
725
|
+
method: "POST",
|
|
726
|
+
body: JSON.stringify({
|
|
727
|
+
content, type, tags, source: "mcp",
|
|
728
|
+
projectHash, projectName, projectPath,
|
|
729
|
+
relatedFiles: related_files, importanceScore: importance_score,
|
|
730
|
+
sessionName: session_name, relatedMemoryIds: related_memory_ids,
|
|
731
|
+
}),
|
|
732
|
+
});
|
|
733
|
+
const dedupNote = memory.deduplicated
|
|
734
|
+
? ` [DEDUPLICATED — merged with existing ${memory.id.slice(0, 8)}, tags merged: ${(memory.tags || []).join(", ")}]`
|
|
735
|
+
: "";
|
|
736
|
+
return {
|
|
737
|
+
content: [{
|
|
738
|
+
type: "text",
|
|
739
|
+
text: `Stored [${memory.type}] memory (${memory.id.slice(0, 8)})${dedupNote}. Importance: ${memory.importanceScore}. Files: ${(memory.relatedFiles || []).join(", ") || "none"}.`,
|
|
740
|
+
}],
|
|
741
|
+
};
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
case "memory_search": {
|
|
745
|
+
const query = validateString(args.query, "query", MAX_QUERY_LENGTH);
|
|
746
|
+
const limit = Math.max(1, Math.min(MAX_SEARCH_LIMIT, Number(args.limit) || 10));
|
|
747
|
+
const memory_type = args.memory_type ? validateMemoryType(args.memory_type) : undefined;
|
|
748
|
+
const file_path = args.file_path ? validateString(args.file_path, "file_path", MAX_FILE_PATH_LENGTH) : undefined;
|
|
749
|
+
|
|
750
|
+
const memories = await apiFetch("/memory/search", {
|
|
751
|
+
method: "POST",
|
|
752
|
+
body: JSON.stringify({
|
|
753
|
+
query, limit, projectHash,
|
|
754
|
+
memoryType: memory_type, filePath: file_path,
|
|
755
|
+
recentOnly: !!args.recent_only,
|
|
756
|
+
searchMode: args.search_mode === "keyword" ? "keyword" : args.search_mode === "fulltext" ? "fulltext" : "hybrid",
|
|
757
|
+
}),
|
|
758
|
+
});
|
|
759
|
+
|
|
760
|
+
if (memories.length === 0) {
|
|
761
|
+
return { content: [{ type: "text", text: `No memories found for: "${query}"` }] };
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
const formatted = memories
|
|
765
|
+
.map((m, i) => {
|
|
766
|
+
const files = (m.relatedFiles || []).length > 0 ? `\n Files: ${m.relatedFiles.join(", ")}` : "";
|
|
767
|
+
const relevance = m.relevance !== undefined ? ` (score: ${Number(m.relevance).toFixed(3)})` : "";
|
|
768
|
+
return `${i + 1}. [${m.type}]${relevance} ${m.content}\n ID: ${m.id} | Tags: ${(m.tags || []).join(", ") || "none"}${files}`;
|
|
769
|
+
})
|
|
770
|
+
.join("\n\n");
|
|
771
|
+
|
|
772
|
+
return { content: [{ type: "text", text: `Found ${memories.length} memories:\n\n${formatted}` }] };
|
|
773
|
+
}
|
|
774
|
+
|
|
775
|
+
case "memory_list": {
|
|
776
|
+
const memories = await apiFetch(`/memory/project/${projectHash}`);
|
|
777
|
+
if (memories.length === 0) {
|
|
778
|
+
return { content: [{ type: "text", text: "No memories for this project." }] };
|
|
779
|
+
}
|
|
780
|
+
|
|
781
|
+
const grouped = memories.reduce((acc, m) => {
|
|
782
|
+
if (!acc[m.type]) acc[m.type] = [];
|
|
783
|
+
acc[m.type].push(m);
|
|
784
|
+
return acc;
|
|
785
|
+
}, {});
|
|
786
|
+
|
|
787
|
+
const sections = Object.entries(grouped)
|
|
788
|
+
.map(([type, items]) => {
|
|
789
|
+
const list = items.map((m) => ` - ${m.content}${(m.relatedFiles || []).length ? ` [${m.relatedFiles.length} files]` : ""}`).join("\n");
|
|
790
|
+
return `${type.toUpperCase()} (${items.length}):\n${list}`;
|
|
791
|
+
})
|
|
792
|
+
.join("\n\n");
|
|
793
|
+
|
|
794
|
+
return { content: [{ type: "text", text: `Project Memories (${memories.length}):\n\n${sections}` }] };
|
|
795
|
+
}
|
|
796
|
+
|
|
797
|
+
case "memory_list_projects": {
|
|
798
|
+
const data = await apiFetch("/memory/projects");
|
|
799
|
+
const projects = data.projects;
|
|
800
|
+
if (projects.length === 0) {
|
|
801
|
+
return { content: [{ type: "text", text: "No projects with memories." }] };
|
|
802
|
+
}
|
|
803
|
+
const list = projects
|
|
804
|
+
.map((p) => `- ${p.project_name ?? p.project_hash}: ${p.memory_count} memories`)
|
|
805
|
+
.join("\n");
|
|
806
|
+
return { content: [{ type: "text", text: `Projects (${projects.length}):\n\n${list}` }] };
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
case "memory_modify": {
|
|
810
|
+
const { action } = args;
|
|
811
|
+
const memory_id = validateId(args.memory_id, "memory_id");
|
|
812
|
+
|
|
813
|
+
if (action === "update") {
|
|
814
|
+
const updates = {};
|
|
815
|
+
if (args.content !== undefined) updates.content = validateString(args.content, "content", MAX_CONTENT_LENGTH);
|
|
816
|
+
if (args.memory_type !== undefined) updates.type = validateMemoryType(args.memory_type);
|
|
817
|
+
if (args.tags !== undefined) updates.tags = validateTags(args.tags);
|
|
818
|
+
if (args.importance_score !== undefined) updates.importanceScore = validateImportance(args.importance_score);
|
|
819
|
+
if (args.related_files !== undefined) updates.relatedFiles = validateFiles(args.related_files);
|
|
820
|
+
|
|
821
|
+
const result = await apiFetch(`/memory/${memory_id}`, { method: "PATCH", body: JSON.stringify(updates) });
|
|
822
|
+
return { content: [{ type: "text", text: `Memory ${memory_id.slice(0, 8)} updated. Type: ${result.type}` }] };
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
if (action === "inactivate") {
|
|
826
|
+
if (!args.reason || typeof args.reason !== "string" || args.reason.trim().length === 0) {
|
|
827
|
+
return { content: [{ type: "text", text: "Error: reason required for inactivation" }], isError: true };
|
|
828
|
+
}
|
|
829
|
+
const reason = validateString(args.reason, "reason", 500);
|
|
830
|
+
await apiFetch(`/memory/${memory_id}/inactivate`, { method: "POST", body: JSON.stringify({ reason }) });
|
|
831
|
+
return { content: [{ type: "text", text: `Memory ${memory_id.slice(0, 8)} archived.` }] };
|
|
832
|
+
}
|
|
833
|
+
|
|
834
|
+
if (action === "reactivate") {
|
|
835
|
+
await apiFetch(`/memory/${memory_id}/reactivate`, { method: "POST" });
|
|
836
|
+
return { content: [{ type: "text", text: `Memory ${memory_id.slice(0, 8)} reactivated.` }] };
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
if (action === "delete") {
|
|
840
|
+
await apiFetch(`/memory/${memory_id}`, { method: "DELETE" });
|
|
841
|
+
return { content: [{ type: "text", text: `Memory ${memory_id.slice(0, 8)} permanently deleted.` }] };
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
if (action === "unarchive") {
|
|
845
|
+
const result = await apiFetch(`/memory/${memory_id}/unarchive`, { method: "POST" });
|
|
846
|
+
const swapped = result.swapped ? " (oldest active memory was swapped out to stay within plan limits)" : "";
|
|
847
|
+
return { content: [{ type: "text", text: `Memory ${memory_id.slice(0, 8)} unarchived.${swapped}` }] };
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
return { content: [{ type: "text", text: `Unknown action: ${action}` }], isError: true };
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
case "memory_expand": {
|
|
854
|
+
const ids = (args.memory_ids || []).filter((id) => ID_PATTERN.test(id)).slice(0, 10);
|
|
855
|
+
if (ids.length === 0) return { content: [{ type: "text", text: "Error: valid memory_ids required" }], isError: true };
|
|
856
|
+
|
|
857
|
+
const memories = await apiFetch("/memory/expand", { method: "POST", body: JSON.stringify({ memoryIds: ids }) });
|
|
858
|
+
if (memories.length === 0) return { content: [{ type: "text", text: "No memories found." }] };
|
|
859
|
+
|
|
860
|
+
const formatted = memories
|
|
861
|
+
.map((m) => `--- ${m.id} [${m.type}] ---\nImportance: ${m.importanceScore} | Status: ${m.status}\nTags: ${(m.tags || []).join(", ") || "none"}\nFiles: ${(m.relatedFiles || []).join(", ") || "none"}\n\n${m.content}`)
|
|
862
|
+
.join("\n\n");
|
|
863
|
+
|
|
864
|
+
return { content: [{ type: "text", text: formatted }] };
|
|
865
|
+
}
|
|
866
|
+
|
|
867
|
+
case "memory_session_recap": {
|
|
868
|
+
const hash = args.project_hash ? validateHash(args.project_hash, "project_hash") : projectHash;
|
|
869
|
+
const daysBack = Math.min(Math.max(Number(args.days_back) || 7, 1), 30);
|
|
870
|
+
|
|
871
|
+
const result = await apiFetch("/memory/session-recap", {
|
|
872
|
+
method: "POST",
|
|
873
|
+
body: JSON.stringify({ projectHash: hash, daysBack }),
|
|
874
|
+
});
|
|
875
|
+
|
|
876
|
+
if (result.totalMemories === 0) {
|
|
877
|
+
return { content: [{ type: "text", text: `No activity in the last ${daysBack} days.` }] };
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
const sections = result.projects
|
|
881
|
+
.map((p) => {
|
|
882
|
+
const name = p.projectName || p.projectHash || "Global";
|
|
883
|
+
const list = p.recentMemories
|
|
884
|
+
.map((m) => ` - [${m.type}] ${m.content.substring(0, 120)}${m.content.length > 120 ? "..." : ""}`)
|
|
885
|
+
.join("\n");
|
|
886
|
+
return `${name} (${p.memoryCount}, types: ${p.types.join(", ")}):\n${list}`;
|
|
887
|
+
})
|
|
888
|
+
.join("\n\n");
|
|
889
|
+
|
|
890
|
+
return { content: [{ type: "text", text: `Recap (${daysBack}d, ${result.totalMemories} memories):\n\n${sections}` }] };
|
|
891
|
+
}
|
|
892
|
+
|
|
893
|
+
case "memory_get_rules": {
|
|
894
|
+
const hash = args.project_hash ? validateHash(args.project_hash, "project_hash") : projectHash;
|
|
895
|
+
const includeGlobal = args.include_global !== false;
|
|
896
|
+
const result = await apiFetch(`/memory/rules/${hash}?includeGlobal=${includeGlobal}`);
|
|
897
|
+
|
|
898
|
+
const fmt = (rules, label) => {
|
|
899
|
+
if (rules.length === 0) return `${label}: none`;
|
|
900
|
+
return `${label} (${rules.length}):\n${rules.map((r) => ` - ${r.content}`).join("\n")}`;
|
|
901
|
+
};
|
|
902
|
+
|
|
903
|
+
const text = [
|
|
904
|
+
fmt(result.projectRules, "Project Rules"),
|
|
905
|
+
includeGlobal ? fmt(result.globalRules, "Global Rules") : null,
|
|
906
|
+
].filter(Boolean).join("\n\n");
|
|
907
|
+
|
|
908
|
+
return { content: [{ type: "text", text }] };
|
|
909
|
+
}
|
|
910
|
+
|
|
911
|
+
case "memory_projects": {
|
|
912
|
+
const { action } = args;
|
|
913
|
+
const hash = args.project_hash ? validateHash(args.project_hash, "project_hash") : projectHash;
|
|
914
|
+
|
|
915
|
+
if (action === "list_all") {
|
|
916
|
+
const data = await apiFetch("/memory/projects");
|
|
917
|
+
if (data.projects.length === 0) return { content: [{ type: "text", text: "No projects." }] };
|
|
918
|
+
const list = data.projects.map((p) => `- ${p.project_name ?? p.project_hash}: ${p.memory_count} memories`).join("\n");
|
|
919
|
+
return { content: [{ type: "text", text: `Projects:\n\n${list}` }] };
|
|
920
|
+
}
|
|
921
|
+
|
|
922
|
+
if (action === "get") {
|
|
923
|
+
const docs = await apiFetch(`/memory/project-docs/${hash}`);
|
|
924
|
+
if (docs.length === 0) return { content: [{ type: "text", text: "No documents." }] };
|
|
925
|
+
const formatted = docs.map((d) => `--- ${d.docType.toUpperCase()} (${d.id}) ---\n${d.content}`).join("\n\n");
|
|
926
|
+
return { content: [{ type: "text", text: formatted }] };
|
|
927
|
+
}
|
|
928
|
+
|
|
929
|
+
if (action === "create") {
|
|
930
|
+
if (!args.doc_type || !args.content) return { content: [{ type: "text", text: "Error: doc_type and content required" }], isError: true };
|
|
931
|
+
const content = validateString(args.content, "content", MAX_CONTENT_LENGTH);
|
|
932
|
+
const doc = await apiFetch("/memory/project-docs", {
|
|
933
|
+
method: "POST",
|
|
934
|
+
body: JSON.stringify({ projectHash: hash, docType: args.doc_type, content }),
|
|
935
|
+
});
|
|
936
|
+
return { content: [{ type: "text", text: `Doc created: ${doc.id} (${doc.docType})` }] };
|
|
937
|
+
}
|
|
938
|
+
|
|
939
|
+
if (action === "update") {
|
|
940
|
+
const doc_id = validateId(args.doc_id, "doc_id");
|
|
941
|
+
const content = validateString(args.content, "content", MAX_CONTENT_LENGTH);
|
|
942
|
+
await apiFetch(`/memory/project-docs/${doc_id}`, { method: "PATCH", body: JSON.stringify({ content }) });
|
|
943
|
+
return { content: [{ type: "text", text: `Doc ${doc_id.slice(0, 8)} updated.` }] };
|
|
944
|
+
}
|
|
945
|
+
|
|
946
|
+
return { content: [{ type: "text", text: `Unknown action: ${action}` }], isError: true };
|
|
947
|
+
}
|
|
948
|
+
|
|
949
|
+
case "memory_tasks": {
|
|
950
|
+
const { action } = args;
|
|
951
|
+
const hash = args.project_hash ? validateHash(args.project_hash, "project_hash") : projectHash;
|
|
952
|
+
|
|
953
|
+
if (action === "get") {
|
|
954
|
+
const qs = args.include_completed ? "?includeCompleted=true" : "";
|
|
955
|
+
const tasks = await apiFetch(`/memory/tasks/${hash}${qs}`);
|
|
956
|
+
if (tasks.length === 0) return { content: [{ type: "text", text: "No tasks." }] };
|
|
957
|
+
const list = tasks.map((t) => `- [${t.isCompleted ? "DONE" : "TODO"}] ${t.content} (${t.id.slice(0, 8)})`).join("\n");
|
|
958
|
+
return { content: [{ type: "text", text: `Tasks (${tasks.length}):\n\n${list}` }] };
|
|
959
|
+
}
|
|
960
|
+
|
|
961
|
+
if (action === "create") {
|
|
962
|
+
const desc = validateString(args.task_description, "task_description", MAX_CONTENT_LENGTH);
|
|
963
|
+
const task = await apiFetch("/memory/tasks", { method: "POST", body: JSON.stringify({ projectHash: hash, description: desc }) });
|
|
964
|
+
return { content: [{ type: "text", text: `Task created: ${task.id.slice(0, 8)}` }] };
|
|
965
|
+
}
|
|
966
|
+
|
|
967
|
+
if (action === "complete") {
|
|
968
|
+
const task_id = validateId(args.task_id, "task_id");
|
|
969
|
+
await apiFetch(`/memory/tasks/${task_id}/complete`, { method: "POST" });
|
|
970
|
+
return { content: [{ type: "text", text: `Task ${task_id.slice(0, 8)} completed.` }] };
|
|
971
|
+
}
|
|
972
|
+
|
|
973
|
+
return { content: [{ type: "text", text: `Unknown action: ${action}` }], isError: true };
|
|
974
|
+
}
|
|
975
|
+
|
|
976
|
+
case "memory_start_thinking": {
|
|
977
|
+
const goal = validateString(args.goal, "goal", MAX_CONTENT_LENGTH);
|
|
978
|
+
const hash = args.project_hash ? validateHash(args.project_hash, "project_hash") : projectHash;
|
|
979
|
+
const result = await apiFetch("/memory/thinking", { method: "POST", body: JSON.stringify({ projectHash: hash, goal }) });
|
|
980
|
+
return {
|
|
981
|
+
content: [{
|
|
982
|
+
type: "text",
|
|
983
|
+
text: `Thinking started: ${result.id}\nGoal: ${result.goal}\n\nUse memory_add_thought with this sequence_id. Use thought_type "conclusion" to finalize.`,
|
|
984
|
+
}],
|
|
985
|
+
};
|
|
986
|
+
}
|
|
987
|
+
|
|
988
|
+
case "memory_add_thought": {
|
|
989
|
+
const sequence_id = validateId(args.sequence_id, "sequence_id");
|
|
990
|
+
const thought = validateString(args.thought, "thought", MAX_CONTENT_LENGTH);
|
|
991
|
+
const thought_type = args.thought_type || "general";
|
|
992
|
+
|
|
993
|
+
const result = await apiFetch(`/memory/thinking/${sequence_id}/thought`, {
|
|
994
|
+
method: "POST",
|
|
995
|
+
body: JSON.stringify({ thought, thoughtType: thought_type, branchName: args.branch_name }),
|
|
996
|
+
});
|
|
997
|
+
|
|
998
|
+
const done = result.status === "completed" ? " [CONCLUDED — stored as decision]" : "";
|
|
999
|
+
return { content: [{ type: "text", text: `Thought #${result.thoughtCount} (${result.thought.type})${done}` }] };
|
|
1000
|
+
}
|
|
1001
|
+
|
|
1002
|
+
case "memory_export": {
|
|
1003
|
+
const hash = args.project_hash ? validateHash(args.project_hash, "project_hash") : projectHash;
|
|
1004
|
+
const result = await apiFetch(`/memory/export/${hash}`);
|
|
1005
|
+
if (result.total === 0) {
|
|
1006
|
+
return { content: [{ type: "text", text: "No memories to export." }] };
|
|
1007
|
+
}
|
|
1008
|
+
const summary = result.memories
|
|
1009
|
+
.map((m) => `- [${m.type}] ${m.content.substring(0, 100)}${m.content.length > 100 ? "..." : ""} (${m.archived ? "archived" : "active"})`)
|
|
1010
|
+
.join("\n");
|
|
1011
|
+
return { content: [{ type: "text", text: `Exported ${result.total} memories (${result.project_hash}):\n\n${summary}` }] };
|
|
1012
|
+
}
|
|
1013
|
+
|
|
1014
|
+
case "memory_insights": {
|
|
1015
|
+
const hash = args.project_hash ? validateHash(args.project_hash, "project_hash") : projectHash;
|
|
1016
|
+
const threshold = Math.max(0.5, Math.min(0.99, Number(args.threshold) || 0.85));
|
|
1017
|
+
const minCluster = Math.max(2, Math.min(10, Number(args.min_cluster) || 3));
|
|
1018
|
+
const limit = Math.max(1, Math.min(20, Number(args.limit) || 5));
|
|
1019
|
+
|
|
1020
|
+
const result = await apiFetch("/memory/insights", {
|
|
1021
|
+
method: "POST",
|
|
1022
|
+
body: JSON.stringify({ projectHash: hash, threshold, minCluster, limit }),
|
|
1023
|
+
});
|
|
1024
|
+
|
|
1025
|
+
if (result.totalClusters === 0) {
|
|
1026
|
+
return { content: [{ type: "text", text: "No memory clusters found. Try lowering the threshold or adding more memories." }] };
|
|
1027
|
+
}
|
|
1028
|
+
|
|
1029
|
+
const formatted = result.clusters
|
|
1030
|
+
.map((c, i) => {
|
|
1031
|
+
const previews = c.memberPreviews
|
|
1032
|
+
.map((p, j) => ` ${j + 1}. [${c.memberTypes[j]}] ${p}`)
|
|
1033
|
+
.join("\n");
|
|
1034
|
+
return `Cluster ${i + 1} (${c.clusterSize} memories):\n${previews}`;
|
|
1035
|
+
})
|
|
1036
|
+
.join("\n\n");
|
|
1037
|
+
|
|
1038
|
+
return { content: [{ type: "text", text: `Found ${result.totalClusters} clusters:\n\n${formatted}` }] };
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
case "memory_upload_document": {
|
|
1042
|
+
const filePath = validateString(args.file_path, "file_path", MAX_FILE_PATH_LENGTH);
|
|
1043
|
+
|
|
1044
|
+
// Security: restrict file access to project directory (cwd) to prevent data exfiltration
|
|
1045
|
+
const cwd = process.cwd();
|
|
1046
|
+
const absPath = resolve(filePath);
|
|
1047
|
+
let realPath;
|
|
1048
|
+
try { realPath = realpathSync(absPath); } catch { realPath = absPath; }
|
|
1049
|
+
const normalizedCwd = cwd.replace(/\\/g, "/").toLowerCase();
|
|
1050
|
+
const normalizedReal = realPath.replace(/\\/g, "/").toLowerCase();
|
|
1051
|
+
if (!normalizedReal.startsWith(normalizedCwd + "/") && normalizedReal !== normalizedCwd) {
|
|
1052
|
+
return { content: [{ type: "text", text: `Security: file must be within the project directory (${cwd}). Access to files outside the project is denied to prevent data exfiltration.` }], isError: true };
|
|
1053
|
+
}
|
|
1054
|
+
|
|
1055
|
+
// Read file from disk
|
|
1056
|
+
let fileBuffer;
|
|
1057
|
+
let filename;
|
|
1058
|
+
try {
|
|
1059
|
+
fileBuffer = readFileSync(realPath);
|
|
1060
|
+
filename = realPath.replace(/\\/g, "/").split("/").pop() || "document";
|
|
1061
|
+
} catch (err) {
|
|
1062
|
+
return { content: [{ type: "text", text: `Error reading file: ${err.message}` }], isError: true };
|
|
1063
|
+
}
|
|
1064
|
+
|
|
1065
|
+
// Determine MIME type
|
|
1066
|
+
const ext = filename.split(".").pop()?.toLowerCase();
|
|
1067
|
+
const mimeMap = { pdf: "application/pdf", md: "text/markdown", txt: "text/plain", text: "text/plain" };
|
|
1068
|
+
const mimeType = mimeMap[ext] || "text/plain";
|
|
1069
|
+
|
|
1070
|
+
if (!["pdf", "md", "txt", "text"].includes(ext || "")) {
|
|
1071
|
+
return { content: [{ type: "text", text: `Unsupported file type: .${ext}. Supported: .pdf, .md, .txt` }], isError: true };
|
|
1072
|
+
}
|
|
1073
|
+
|
|
1074
|
+
if (fileBuffer.length > 4 * 1024 * 1024) {
|
|
1075
|
+
return { content: [{ type: "text", text: `File too large (${(fileBuffer.length / 1024 / 1024).toFixed(1)}MB). Maximum: 4MB.` }], isError: true };
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
// Build multipart form data
|
|
1079
|
+
const tags = validateTags(args.tags);
|
|
1080
|
+
const blob = new Blob([fileBuffer], { type: mimeType });
|
|
1081
|
+
const formData = new FormData();
|
|
1082
|
+
formData.append("file", blob, filename);
|
|
1083
|
+
formData.append("projectHash", projectHash);
|
|
1084
|
+
if (tags.length > 0) formData.append("tags", tags.join(","));
|
|
1085
|
+
|
|
1086
|
+
// Send to API (custom headers for multipart)
|
|
1087
|
+
const token = await getToken();
|
|
1088
|
+
if (!token) {
|
|
1089
|
+
const authMessage = await startCliAuthFlow();
|
|
1090
|
+
return { content: [{ type: "text", text: authMessage }], isError: true };
|
|
1091
|
+
}
|
|
1092
|
+
|
|
1093
|
+
const response = await fetch(`${API_URL}/memory/documents/upload`, {
|
|
1094
|
+
method: "POST",
|
|
1095
|
+
headers: { Authorization: `Bearer ${token}` },
|
|
1096
|
+
body: formData,
|
|
1097
|
+
});
|
|
1098
|
+
|
|
1099
|
+
if (!response.ok) {
|
|
1100
|
+
const errBody = await response.json().catch(() => ({}));
|
|
1101
|
+
return { content: [{ type: "text", text: `Upload failed (${response.status}): ${errBody.error || "Unknown error"}` }], isError: true };
|
|
1102
|
+
}
|
|
1103
|
+
|
|
1104
|
+
const result = await response.json();
|
|
1105
|
+
return {
|
|
1106
|
+
content: [{
|
|
1107
|
+
type: "text",
|
|
1108
|
+
text: `Document "${result.filename}" processed: ${result.chunkCount} chunks created.\nUpload ID: ${result.uploadId}`,
|
|
1109
|
+
}],
|
|
1110
|
+
};
|
|
1111
|
+
}
|
|
1112
|
+
|
|
1113
|
+
default:
|
|
1114
|
+
return { content: [{ type: "text", text: `Unknown tool: ${name}` }], isError: true };
|
|
1115
|
+
}
|
|
1116
|
+
}
|
|
1117
|
+
|
|
1118
|
+
// ── MCP JSON-RPC Handler ────────────────────────────────────────────
|
|
1119
|
+
|
|
1120
|
+
async function handleRequest(request) {
|
|
1121
|
+
const { method, params, id } = request;
|
|
1122
|
+
|
|
1123
|
+
// Validate JSON-RPC structure
|
|
1124
|
+
if (typeof method !== "string") {
|
|
1125
|
+
return { jsonrpc: "2.0", id: id ?? null, error: { code: -32600, message: "Invalid request: method required" } };
|
|
1126
|
+
}
|
|
1127
|
+
|
|
1128
|
+
try {
|
|
1129
|
+
switch (method) {
|
|
1130
|
+
case "initialize":
|
|
1131
|
+
return {
|
|
1132
|
+
jsonrpc: "2.0", id,
|
|
1133
|
+
result: {
|
|
1134
|
+
protocolVersion: "2024-11-05",
|
|
1135
|
+
capabilities: { tools: {} },
|
|
1136
|
+
serverInfo: { name: SERVER_NAME, version: SERVER_VERSION },
|
|
1137
|
+
},
|
|
1138
|
+
};
|
|
1139
|
+
|
|
1140
|
+
case "initialized":
|
|
1141
|
+
return null;
|
|
1142
|
+
|
|
1143
|
+
case "tools/list":
|
|
1144
|
+
return { jsonrpc: "2.0", id, result: { tools: TOOLS } };
|
|
1145
|
+
|
|
1146
|
+
case "tools/call": {
|
|
1147
|
+
if (!params || typeof params.name !== "string") {
|
|
1148
|
+
return { jsonrpc: "2.0", id, error: { code: -32602, message: "Invalid params: name required" } };
|
|
1149
|
+
}
|
|
1150
|
+
const result = await handleToolCall(params.name, params.arguments || {});
|
|
1151
|
+
return { jsonrpc: "2.0", id, result };
|
|
1152
|
+
}
|
|
1153
|
+
|
|
1154
|
+
default:
|
|
1155
|
+
return { jsonrpc: "2.0", id, error: { code: -32601, message: `Unknown method: ${method}` } };
|
|
1156
|
+
}
|
|
1157
|
+
} catch (error) {
|
|
1158
|
+
// Sanitize: only return our own error messages, never raw stack traces
|
|
1159
|
+
const safeMessage = error.message || "Internal error";
|
|
1160
|
+
return { jsonrpc: "2.0", id, error: { code: -32603, message: safeMessage } };
|
|
1161
|
+
}
|
|
1162
|
+
}
|
|
1163
|
+
|
|
1164
|
+
// ── stdio Communication Loop ────────────────────────────────────────
|
|
1165
|
+
|
|
1166
|
+
const rl = createInterface({ input: process.stdin, output: process.stdout, terminal: false });
|
|
1167
|
+
|
|
1168
|
+
rl.on("line", async (line) => {
|
|
1169
|
+
if (!line.trim()) return;
|
|
1170
|
+
|
|
1171
|
+
try {
|
|
1172
|
+
const request = JSON.parse(line);
|
|
1173
|
+
const response = await handleRequest(request);
|
|
1174
|
+
if (response !== null) {
|
|
1175
|
+
// Write to stdout only — stderr is reserved for diagnostics
|
|
1176
|
+
process.stdout.write(JSON.stringify(response) + "\n");
|
|
1177
|
+
}
|
|
1178
|
+
} catch {
|
|
1179
|
+
process.stdout.write(JSON.stringify({
|
|
1180
|
+
jsonrpc: "2.0", id: null,
|
|
1181
|
+
error: { code: -32700, message: "Parse error" },
|
|
1182
|
+
}) + "\n");
|
|
1183
|
+
}
|
|
1184
|
+
});
|
|
1185
|
+
|
|
1186
|
+
rl.on("close", () => process.exit(0));
|
|
1187
|
+
process.on("SIGINT", () => process.exit(0));
|