vektori-cortex 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +46 -0
- package/dist/api.d.ts +9 -0
- package/dist/api.js +47 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +223 -0
- package/dist/types.d.ts +51 -0
- package/dist/types.js +1 -0
- package/package.json +43 -0
package/README.md
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# Vektori Cortex
|
|
2
|
+
|
|
3
|
+
Sync your AI coding decisions across Claude, Cursor, and other MCP tools.
|
|
4
|
+
|
|
5
|
+
## Quick Setup
|
|
6
|
+
|
|
7
|
+
### 1. Add to Claude Desktop
|
|
8
|
+
|
|
9
|
+
Open config file:
|
|
10
|
+
- **Windows:** `%APPDATA%\Claude\claude_desktop_config.json`
|
|
11
|
+
- **Mac:** `~/Library/Application Support/Claude/claude_desktop_config.json`
|
|
12
|
+
|
|
13
|
+
Paste this:
|
|
14
|
+
```json
|
|
15
|
+
{
|
|
16
|
+
"mcpServers": {
|
|
17
|
+
"vektori-cortex": {
|
|
18
|
+
"command": "npx",
|
|
19
|
+
"args": ["-y", "vektori-cortex"],
|
|
20
|
+
"env": {
|
|
21
|
+
"VEKTORI_TOKEN": "YOUR_TOKEN"
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
### 2. Get Your Token
|
|
29
|
+
|
|
30
|
+
Copy your JWT from the Vektori browser extension (Settings → Copy Token).
|
|
31
|
+
|
|
32
|
+
### 3. Restart Claude Desktop
|
|
33
|
+
|
|
34
|
+
Done. Claude now remembers your decisions across sessions.
|
|
35
|
+
|
|
36
|
+
---
|
|
37
|
+
|
|
38
|
+
## What It Does
|
|
39
|
+
|
|
40
|
+
| You say | Claude does |
|
|
41
|
+
|---------|-------------|
|
|
42
|
+
| "Let's use PostgreSQL" | Saves the decision |
|
|
43
|
+
| "What did we decide about auth?" | Searches your memory |
|
|
44
|
+
| *(new session starts)* | Loads recent context |
|
|
45
|
+
|
|
46
|
+
Decisions sync across all your MCP tools.
|
package/dist/api.d.ts
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { SaveDecisionRequest, SaveDecisionResponse, SearchMemoryRequest, SearchMemoryResponse, ContextResponse } from "./types.js";
|
|
2
|
+
export declare class VektoriAPI {
|
|
3
|
+
private token;
|
|
4
|
+
constructor(token: string);
|
|
5
|
+
private request;
|
|
6
|
+
saveDecision(data: SaveDecisionRequest): Promise<SaveDecisionResponse>;
|
|
7
|
+
searchMemory(data: SearchMemoryRequest): Promise<SearchMemoryResponse>;
|
|
8
|
+
getContext(projectId?: string, limit?: number): Promise<ContextResponse>;
|
|
9
|
+
}
|
package/dist/api.js
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
const BASE_URL = "https://vektori-memory.vektori-cloud.workers.dev";
|
|
2
|
+
export class VektoriAPI {
|
|
3
|
+
token;
|
|
4
|
+
constructor(token) {
|
|
5
|
+
this.token = token;
|
|
6
|
+
}
|
|
7
|
+
async request(endpoint, options = {}) {
|
|
8
|
+
const url = `${BASE_URL}${endpoint}`;
|
|
9
|
+
const response = await fetch(url, {
|
|
10
|
+
...options,
|
|
11
|
+
headers: {
|
|
12
|
+
"Content-Type": "application/json",
|
|
13
|
+
Authorization: `Bearer ${this.token}`,
|
|
14
|
+
...options.headers,
|
|
15
|
+
},
|
|
16
|
+
});
|
|
17
|
+
if (!response.ok) {
|
|
18
|
+
const error = await response.text();
|
|
19
|
+
throw new Error(`API Error ${response.status}: ${error}`);
|
|
20
|
+
}
|
|
21
|
+
return response.json();
|
|
22
|
+
}
|
|
23
|
+
async saveDecision(data) {
|
|
24
|
+
return this.request("/api/cortex/save", {
|
|
25
|
+
method: "POST",
|
|
26
|
+
body: JSON.stringify(data),
|
|
27
|
+
});
|
|
28
|
+
}
|
|
29
|
+
async searchMemory(data) {
|
|
30
|
+
return this.request("/api/cortex/search", {
|
|
31
|
+
method: "POST",
|
|
32
|
+
body: JSON.stringify(data),
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
async getContext(projectId, limit) {
|
|
36
|
+
const params = new URLSearchParams();
|
|
37
|
+
if (projectId)
|
|
38
|
+
params.set("project_id", projectId);
|
|
39
|
+
if (limit)
|
|
40
|
+
params.set("limit", limit.toString());
|
|
41
|
+
const query = params.toString();
|
|
42
|
+
const endpoint = `/api/cortex/context${query ? `?${query}` : ""}`;
|
|
43
|
+
return this.request(endpoint, {
|
|
44
|
+
method: "GET",
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
3
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
4
|
+
import { CallToolRequestSchema, ListToolsRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
|
5
|
+
import { VektoriAPI } from "./api.js";
|
|
6
|
+
// Get auth token from environment
|
|
7
|
+
const VEKTORI_TOKEN = process.env.VEKTORI_TOKEN;
|
|
8
|
+
if (!VEKTORI_TOKEN) {
|
|
9
|
+
console.error("Error: VEKTORI_TOKEN environment variable is required");
|
|
10
|
+
process.exit(1);
|
|
11
|
+
}
|
|
12
|
+
const api = new VektoriAPI(VEKTORI_TOKEN);
|
|
13
|
+
// Create MCP server
|
|
14
|
+
const server = new Server({
|
|
15
|
+
name: "vektori-cortex",
|
|
16
|
+
version: "1.0.0",
|
|
17
|
+
}, {
|
|
18
|
+
capabilities: {
|
|
19
|
+
tools: {},
|
|
20
|
+
},
|
|
21
|
+
});
|
|
22
|
+
// Define available tools
|
|
23
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
24
|
+
return {
|
|
25
|
+
tools: [
|
|
26
|
+
{
|
|
27
|
+
name: "save_decision",
|
|
28
|
+
description: "Save a technical decision or insight to Vektori memory. Use this when the user makes an important architectural choice, picks a technology, or establishes a pattern.",
|
|
29
|
+
inputSchema: {
|
|
30
|
+
type: "object",
|
|
31
|
+
properties: {
|
|
32
|
+
content: {
|
|
33
|
+
type: "string",
|
|
34
|
+
description: "The decision in one clear sentence, e.g. 'Use Redis for session caching'",
|
|
35
|
+
},
|
|
36
|
+
topic: {
|
|
37
|
+
type: "string",
|
|
38
|
+
description: "Single word topic, e.g. 'caching', 'auth', 'database'",
|
|
39
|
+
},
|
|
40
|
+
reasoning: {
|
|
41
|
+
type: "string",
|
|
42
|
+
description: "Brief explanation of why this decision was made",
|
|
43
|
+
},
|
|
44
|
+
type: {
|
|
45
|
+
type: "string",
|
|
46
|
+
enum: ["decision", "fact", "insight"],
|
|
47
|
+
description: "Type of memory: decision (choice made), fact (established truth), insight (learned observation)",
|
|
48
|
+
},
|
|
49
|
+
project_id: {
|
|
50
|
+
type: "string",
|
|
51
|
+
description: "Optional project identifier to scope this decision",
|
|
52
|
+
},
|
|
53
|
+
},
|
|
54
|
+
required: ["content", "topic", "type"],
|
|
55
|
+
},
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
name: "search_memory",
|
|
59
|
+
description: "Search the user's past decisions, facts, and insights from previous sessions. Use this to recall what was decided before or to check for existing patterns.",
|
|
60
|
+
inputSchema: {
|
|
61
|
+
type: "object",
|
|
62
|
+
properties: {
|
|
63
|
+
query: {
|
|
64
|
+
type: "string",
|
|
65
|
+
description: "Natural language search query, e.g. 'authentication decisions'",
|
|
66
|
+
},
|
|
67
|
+
project_id: {
|
|
68
|
+
type: "string",
|
|
69
|
+
description: "Optional project ID to scope search",
|
|
70
|
+
},
|
|
71
|
+
types: {
|
|
72
|
+
type: "array",
|
|
73
|
+
items: {
|
|
74
|
+
type: "string",
|
|
75
|
+
enum: ["decision", "fact", "insight"],
|
|
76
|
+
},
|
|
77
|
+
description: "Filter by memory types",
|
|
78
|
+
},
|
|
79
|
+
limit: {
|
|
80
|
+
type: "number",
|
|
81
|
+
description: "Maximum results to return (default 5)",
|
|
82
|
+
},
|
|
83
|
+
},
|
|
84
|
+
required: ["query"],
|
|
85
|
+
},
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
name: "get_context",
|
|
89
|
+
description: "Get recent decisions and active features for a project. Call this at the start of a session to understand what the user has been working on.",
|
|
90
|
+
inputSchema: {
|
|
91
|
+
type: "object",
|
|
92
|
+
properties: {
|
|
93
|
+
project_id: {
|
|
94
|
+
type: "string",
|
|
95
|
+
description: "Project identifier to get context for",
|
|
96
|
+
},
|
|
97
|
+
limit: {
|
|
98
|
+
type: "number",
|
|
99
|
+
description: "Maximum number of recent decisions to return",
|
|
100
|
+
},
|
|
101
|
+
},
|
|
102
|
+
required: [],
|
|
103
|
+
},
|
|
104
|
+
},
|
|
105
|
+
],
|
|
106
|
+
};
|
|
107
|
+
});
|
|
108
|
+
// Handle tool calls
|
|
109
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
110
|
+
const { name, arguments: args } = request.params;
|
|
111
|
+
if (!args) {
|
|
112
|
+
return {
|
|
113
|
+
content: [{ type: "text", text: "Error: No arguments provided" }],
|
|
114
|
+
isError: true,
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
try {
|
|
118
|
+
switch (name) {
|
|
119
|
+
case "save_decision": {
|
|
120
|
+
const result = await api.saveDecision({
|
|
121
|
+
content: args.content,
|
|
122
|
+
topic: args.topic,
|
|
123
|
+
reasoning: args.reasoning,
|
|
124
|
+
type: args.type,
|
|
125
|
+
project_id: args.project_id,
|
|
126
|
+
});
|
|
127
|
+
return {
|
|
128
|
+
content: [
|
|
129
|
+
{
|
|
130
|
+
type: "text",
|
|
131
|
+
text: `✓ Saved ${args.type}: "${args.content}" (ID: ${result.id})`,
|
|
132
|
+
},
|
|
133
|
+
],
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
case "search_memory": {
|
|
137
|
+
const result = await api.searchMemory({
|
|
138
|
+
query: args.query,
|
|
139
|
+
project_id: args.project_id,
|
|
140
|
+
types: args.types,
|
|
141
|
+
limit: args.limit,
|
|
142
|
+
});
|
|
143
|
+
if (result.results.length === 0) {
|
|
144
|
+
return {
|
|
145
|
+
content: [
|
|
146
|
+
{
|
|
147
|
+
type: "text",
|
|
148
|
+
text: "No matching memories found.",
|
|
149
|
+
},
|
|
150
|
+
],
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
const formatted = result.results
|
|
154
|
+
.map((r, i) => `${i + 1}. [${r.topic}] ${r.content}${r.reasoning ? `\n Reason: ${r.reasoning}` : ""}`)
|
|
155
|
+
.join("\n\n");
|
|
156
|
+
return {
|
|
157
|
+
content: [
|
|
158
|
+
{
|
|
159
|
+
type: "text",
|
|
160
|
+
text: `Found ${result.results.length} memories:\n\n${formatted}`,
|
|
161
|
+
},
|
|
162
|
+
],
|
|
163
|
+
};
|
|
164
|
+
}
|
|
165
|
+
case "get_context": {
|
|
166
|
+
const result = await api.getContext(args.project_id, args.limit);
|
|
167
|
+
let text = `Project: ${result.project_id || "default"}\n\n`;
|
|
168
|
+
if (result.warnings.length > 0) {
|
|
169
|
+
text += `⚠️ Warnings:\n${result.warnings.map((w) => `- ${w}`).join("\n")}\n\n`;
|
|
170
|
+
}
|
|
171
|
+
if (result.active_features.length > 0) {
|
|
172
|
+
text += `Active Features:\n`;
|
|
173
|
+
text += result.active_features
|
|
174
|
+
.map((f) => `- ${f.name} (${f.status})`)
|
|
175
|
+
.join("\n");
|
|
176
|
+
text += "\n\n";
|
|
177
|
+
}
|
|
178
|
+
if (result.recent_decisions.length > 0) {
|
|
179
|
+
text += `Recent Decisions:\n`;
|
|
180
|
+
text += result.recent_decisions
|
|
181
|
+
.map((d) => `- [${d.topic}] ${d.decision}`)
|
|
182
|
+
.join("\n");
|
|
183
|
+
}
|
|
184
|
+
if (result.active_features.length === 0 &&
|
|
185
|
+
result.recent_decisions.length === 0) {
|
|
186
|
+
text += "No context available yet for this project.";
|
|
187
|
+
}
|
|
188
|
+
return {
|
|
189
|
+
content: [
|
|
190
|
+
{
|
|
191
|
+
type: "text",
|
|
192
|
+
text,
|
|
193
|
+
},
|
|
194
|
+
],
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
default:
|
|
198
|
+
throw new Error(`Unknown tool: ${name}`);
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
catch (error) {
|
|
202
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
203
|
+
return {
|
|
204
|
+
content: [
|
|
205
|
+
{
|
|
206
|
+
type: "text",
|
|
207
|
+
text: `Error: ${message}`,
|
|
208
|
+
},
|
|
209
|
+
],
|
|
210
|
+
isError: true,
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
});
|
|
214
|
+
// Start server
|
|
215
|
+
async function main() {
|
|
216
|
+
const transport = new StdioServerTransport();
|
|
217
|
+
await server.connect(transport);
|
|
218
|
+
console.error("Vektori Cortex MCP server running");
|
|
219
|
+
}
|
|
220
|
+
main().catch((error) => {
|
|
221
|
+
console.error("Fatal error:", error);
|
|
222
|
+
process.exit(1);
|
|
223
|
+
});
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
export interface SaveDecisionRequest {
|
|
2
|
+
content: string;
|
|
3
|
+
topic: string;
|
|
4
|
+
reasoning?: string;
|
|
5
|
+
type: "decision" | "fact" | "insight";
|
|
6
|
+
project_id?: string;
|
|
7
|
+
}
|
|
8
|
+
export interface SaveDecisionResponse {
|
|
9
|
+
success: boolean;
|
|
10
|
+
id: string;
|
|
11
|
+
message: string;
|
|
12
|
+
}
|
|
13
|
+
export interface SearchMemoryRequest {
|
|
14
|
+
query: string;
|
|
15
|
+
project_id?: string;
|
|
16
|
+
types?: ("decision" | "fact" | "insight")[];
|
|
17
|
+
limit?: number;
|
|
18
|
+
}
|
|
19
|
+
export interface SearchResult {
|
|
20
|
+
id: string;
|
|
21
|
+
content: string;
|
|
22
|
+
topic: string;
|
|
23
|
+
reasoning?: string;
|
|
24
|
+
score: number;
|
|
25
|
+
created_at: string;
|
|
26
|
+
}
|
|
27
|
+
export interface SearchMemoryResponse {
|
|
28
|
+
results: SearchResult[];
|
|
29
|
+
}
|
|
30
|
+
export interface Decision {
|
|
31
|
+
topic: string;
|
|
32
|
+
decision: string;
|
|
33
|
+
reasoning?: string;
|
|
34
|
+
created_at: string;
|
|
35
|
+
}
|
|
36
|
+
export interface ActiveFeature {
|
|
37
|
+
name: string;
|
|
38
|
+
status: "planning" | "in_progress" | "completed" | "abandoned";
|
|
39
|
+
files?: string[];
|
|
40
|
+
last_updated: string;
|
|
41
|
+
}
|
|
42
|
+
export interface ContextResponse {
|
|
43
|
+
project_id: string;
|
|
44
|
+
recent_decisions: Decision[];
|
|
45
|
+
active_features: ActiveFeature[];
|
|
46
|
+
warnings: string[];
|
|
47
|
+
}
|
|
48
|
+
export interface ApiError {
|
|
49
|
+
error: string;
|
|
50
|
+
message: string;
|
|
51
|
+
}
|
package/dist/types.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
package/package.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "vektori-cortex",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "MCP server for syncing AI coding decisions across tools",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"vektori-cortex": "dist/index.js"
|
|
9
|
+
},
|
|
10
|
+
"files": [
|
|
11
|
+
"dist"
|
|
12
|
+
],
|
|
13
|
+
"scripts": {
|
|
14
|
+
"build": "tsc",
|
|
15
|
+
"start": "node dist/index.js",
|
|
16
|
+
"dev": "tsx src/index.ts",
|
|
17
|
+
"prepublishOnly": "npm run build"
|
|
18
|
+
},
|
|
19
|
+
"keywords": [
|
|
20
|
+
"mcp",
|
|
21
|
+
"vektori",
|
|
22
|
+
"ai",
|
|
23
|
+
"memory",
|
|
24
|
+
"claude",
|
|
25
|
+
"cursor",
|
|
26
|
+
"context"
|
|
27
|
+
],
|
|
28
|
+
"author": "Vektori",
|
|
29
|
+
"license": "MIT",
|
|
30
|
+
"repository": {
|
|
31
|
+
"type": "git",
|
|
32
|
+
"url": "https://github.com/vektori-ai/vektori-cortex"
|
|
33
|
+
},
|
|
34
|
+
"dependencies": {
|
|
35
|
+
"@modelcontextprotocol/sdk": "^1.0.0",
|
|
36
|
+
"@qdrant/js-client-rest": "^1.9.0"
|
|
37
|
+
},
|
|
38
|
+
"devDependencies": {
|
|
39
|
+
"@types/node": "^20.10.0",
|
|
40
|
+
"tsx": "^4.7.0",
|
|
41
|
+
"typescript": "^5.3.0"
|
|
42
|
+
}
|
|
43
|
+
}
|