@cybermem/mcp 0.5.1 → 0.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,75 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ const execa_1 = __importDefault(require("execa"));
7
+ const fs_1 = __importDefault(require("fs"));
8
+ const backup_1 = require("../backup");
9
+ // Mock dependencies
10
+ jest.mock('execa');
11
+ jest.mock('fs');
12
+ jest.mock('path', () => ({
13
+ ...jest.requireActual('path'),
14
+ resolve: jest.fn((...args) => args.join('/')),
15
+ basename: jest.fn((p) => p.split('/').pop() || ''),
16
+ dirname: jest.fn((p) => p.split('/').slice(0, -1).join('/') || '/'),
17
+ }));
18
+ // Mock console to keep output clean
19
+ const originalConsoleLog = console.log;
20
+ const originalConsoleError = console.error;
21
+ describe('backup command', () => {
22
+ beforeAll(() => {
23
+ console.log = jest.fn();
24
+ console.error = jest.fn();
25
+ });
26
+ afterAll(() => {
27
+ console.log = originalConsoleLog;
28
+ console.error = originalConsoleError;
29
+ });
30
+ beforeEach(() => {
31
+ jest.clearAllMocks();
32
+ // Default process.cwd
33
+ jest.spyOn(process, 'cwd').mockReturnValue('/mock/cwd');
34
+ });
35
+ it('should create a backup successfully when openmemory container exists', async () => {
36
+ // Mock docker inspect success
37
+ execa_1.default.mockResolvedValueOnce({ exitCode: 0 });
38
+ // Mock docker run success
39
+ execa_1.default.mockResolvedValueOnce({ exitCode: 0 });
40
+ // Mock fs.existsSync to true (file created)
41
+ fs_1.default.existsSync.mockReturnValue(true);
42
+ // Mock fs.statSync
43
+ fs_1.default.statSync.mockReturnValue({ size: 5 * 1024 * 1024 }); // 5MB
44
+ await (0, backup_1.backup)({});
45
+ // Expect docker inspect check
46
+ expect(execa_1.default).toHaveBeenCalledWith('docker', ['inspect', 'cybermem-openmemory']);
47
+ // Expect docker run tar
48
+ const tarCall = execa_1.default.mock.calls[1];
49
+ expect(tarCall[0]).toBe('docker');
50
+ expect(tarCall[1]).toContain('run');
51
+ expect(tarCall[1]).toContain('tar');
52
+ expect(tarCall[1]).toContain('czf');
53
+ // Check volumes from
54
+ expect(tarCall[1]).toContain('--volumes-from');
55
+ expect(tarCall[1]).toContain('cybermem-openmemory');
56
+ });
57
+ it('should fail if openmemory container does not exist', async () => {
58
+ // Mock inspect failure
59
+ execa_1.default.mockRejectedValueOnce(new Error('No such container'));
60
+ const exitSpy = jest.spyOn(process, 'exit').mockImplementation((() => { }));
61
+ await (0, backup_1.backup)({});
62
+ expect(console.error).toHaveBeenCalledWith(expect.stringContaining('not found'));
63
+ expect(exitSpy).toHaveBeenCalledWith(1);
64
+ });
65
+ it('should error if backup file is not created', async () => {
66
+ execa_1.default.mockResolvedValue({ exitCode: 0 }); // inspect OK
67
+ execa_1.default.mockResolvedValue({ exitCode: 0 }); // run OK
68
+ // Mock file check false
69
+ fs_1.default.existsSync.mockReturnValue(false);
70
+ const exitSpy = jest.spyOn(process, 'exit').mockImplementation((() => { }));
71
+ await (0, backup_1.backup)({});
72
+ expect(console.error).toHaveBeenCalledWith(expect.stringContaining('Backup failed'));
73
+ expect(exitSpy).toHaveBeenCalledWith(1);
74
+ });
75
+ });
@@ -0,0 +1,70 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ const execa_1 = __importDefault(require("execa"));
7
+ const fs_1 = __importDefault(require("fs"));
8
+ const restore_1 = require("../restore");
9
+ jest.mock('execa');
10
+ jest.mock('fs');
11
+ jest.mock('path', () => ({
12
+ ...jest.requireActual('path'),
13
+ resolve: jest.fn((...args) => args.join('/')),
14
+ basename: jest.fn((p) => p.split('/').pop() || ''),
15
+ dirname: jest.fn((p) => p.split('/').slice(0, -1).join('/') || '/'),
16
+ }));
17
+ const originalConsoleLog = console.log;
18
+ const originalConsoleError = console.error;
19
+ describe('restore command', () => {
20
+ beforeAll(() => {
21
+ console.log = jest.fn();
22
+ console.error = jest.fn();
23
+ });
24
+ afterAll(() => {
25
+ console.log = originalConsoleLog;
26
+ console.error = originalConsoleError;
27
+ });
28
+ beforeEach(() => {
29
+ jest.clearAllMocks();
30
+ jest.spyOn(process, 'cwd').mockReturnValue('/mock/cwd');
31
+ });
32
+ it('should restore successfully', async () => {
33
+ // Mock file exists
34
+ fs_1.default.existsSync.mockReturnValue(true);
35
+ // Mock docker stop (success)
36
+ execa_1.default.mockResolvedValueOnce({ exitCode: 0 });
37
+ // Mock docker run tar (success)
38
+ execa_1.default.mockResolvedValueOnce({ exitCode: 0 });
39
+ // Mock docker start (success)
40
+ execa_1.default.mockResolvedValueOnce({ exitCode: 0 });
41
+ await (0, restore_1.restore)('backup.tar.gz', {});
42
+ // Check docker stop
43
+ expect(execa_1.default).toHaveBeenCalledWith('docker', ['stop', 'cybermem-openmemory']);
44
+ // Check docker run tar
45
+ const tarCall = execa_1.default.mock.calls[1];
46
+ expect(tarCall[0]).toBe('docker');
47
+ expect(tarCall[1]).toContain('run');
48
+ expect(tarCall[1]).toContain('tar xzf /backup/backup.tar.gz -C / && chown -R 1001:1001 /data');
49
+ // Check docker start
50
+ expect(execa_1.default).toHaveBeenCalledWith('docker', ['start', 'cybermem-openmemory']);
51
+ });
52
+ it('should ignore docker stop error (if container not running)', async () => {
53
+ fs_1.default.existsSync.mockReturnValue(true);
54
+ // Mock docker stop FAIL
55
+ execa_1.default.mockRejectedValueOnce(new Error('No such container'));
56
+ // Mock succeeding calls
57
+ execa_1.default.mockResolvedValue({ exitCode: 0 });
58
+ await (0, restore_1.restore)('backup.tar.gz', {});
59
+ // Should still proceed to restore
60
+ expect(execa_1.default).toHaveBeenCalledWith('docker', expect.arrayContaining(['run']));
61
+ expect(execa_1.default).toHaveBeenCalledWith('docker', ['start', 'cybermem-openmemory']);
62
+ });
63
+ it('should fail if backup file missing', async () => {
64
+ fs_1.default.existsSync.mockReturnValue(false);
65
+ const exitSpy = jest.spyOn(process, 'exit').mockImplementation((() => { }));
66
+ await (0, restore_1.restore)('mia.tar.gz', {});
67
+ expect(console.error).toHaveBeenCalledWith(expect.stringContaining('not found'));
68
+ expect(exitSpy).toHaveBeenCalledWith(1);
69
+ });
70
+ });
@@ -0,0 +1,52 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.backup = backup;
7
+ const chalk_1 = __importDefault(require("chalk"));
8
+ const execa_1 = __importDefault(require("execa"));
9
+ const fs_1 = __importDefault(require("fs"));
10
+ const path_1 = __importDefault(require("path"));
11
+ async function backup(options) {
12
+ const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
13
+ const filename = `cybermem-backup-${timestamp}.tar.gz`;
14
+ const outputPath = path_1.default.resolve(process.cwd(), filename);
15
+ console.log(chalk_1.default.blue(`📦 Creating backup: ${filename}...`));
16
+ try {
17
+ // Check if container exists
18
+ try {
19
+ await (0, execa_1.default)('docker', ['inspect', 'cybermem-openmemory']);
20
+ }
21
+ catch (e) {
22
+ console.error(chalk_1.default.red('Error: cybermem-openmemory container not found. Is CyberMem installed?'));
23
+ process.exit(1);
24
+ }
25
+ // Use a transient alpine container to tar the /data volume
26
+ // We mount the current working directory to /backup in the container
27
+ // And we use --volumes-from to access the data volume of the running service
28
+ const cmd = [
29
+ 'run', '--rm',
30
+ '--volumes-from', 'cybermem-openmemory',
31
+ '-v', `${process.cwd()}:/backup`,
32
+ 'alpine',
33
+ 'tar', 'czf', `/backup/${filename}`, '-C', '/', 'data'
34
+ ];
35
+ console.log(chalk_1.default.gray(`Running: docker ${cmd.join(' ')}`));
36
+ await (0, execa_1.default)('docker', cmd, { stdio: 'inherit' });
37
+ if (fs_1.default.existsSync(outputPath)) {
38
+ const stats = fs_1.default.statSync(outputPath);
39
+ const sizeMb = (stats.size / 1024 / 1024).toFixed(2);
40
+ console.log(chalk_1.default.green(`\n✅ Backup created successfully!`));
41
+ console.log(`File: ${chalk_1.default.bold(outputPath)}`);
42
+ console.log(`Size: ${sizeMb} MB`);
43
+ }
44
+ else {
45
+ throw new Error('Backup file not found after generation.');
46
+ }
47
+ }
48
+ catch (error) {
49
+ console.error(chalk_1.default.red('Backup failed:'), error);
50
+ process.exit(1);
51
+ }
52
+ }
@@ -3,43 +3,36 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
3
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.deployCommand = void 0;
6
+ exports.deploy = deploy;
7
7
  const chalk_1 = __importDefault(require("chalk"));
8
- const commander_1 = require("commander");
9
8
  const crypto_1 = __importDefault(require("crypto"));
10
9
  const execa_1 = __importDefault(require("execa"));
11
10
  const fs_1 = __importDefault(require("fs"));
12
11
  const inquirer_1 = __importDefault(require("inquirer"));
13
12
  const os_1 = __importDefault(require("os"));
14
13
  const path_1 = __importDefault(require("path"));
15
- exports.deployCommand = new commander_1.Command('deploy')
16
- .description('Deploy CyberMem services')
17
- .option('-t, --target <target>', 'Deployment target (local, rpi, vps)', 'local')
18
- .option('-h, --host <host>', 'SSH Host (user@ip) for remote deployment')
19
- .option('--remote-access', 'Enable remote access via Tailscale Funnel (HTTPS)', false)
20
- .option('--tailscale', 'Alias for --remote-access (deprecated)', false)
21
- .option('--caddy', 'Use Caddy for automatic HTTPS (VPS only)')
22
- .action(async (options) => {
23
- const target = options.target;
24
- const useTailscale = options.remoteAccess || options.tailscale;
25
- const useCaddy = options.caddy;
26
- console.log(chalk_1.default.blue(`Deploying to ${target}...`));
14
+ async function deploy(options) {
15
+ // Determine target from flags
16
+ let target = 'local';
17
+ if (options.rpi)
18
+ target = 'rpi';
19
+ if (options.vps)
20
+ target = 'vps';
21
+ const useTailscale = options.remoteAccess;
22
+ console.log(chalk_1.default.blue(`Deploying CyberMem (${target})...`));
27
23
  try {
28
24
  // Resolve Template Directory (Support both Dev and Prod)
29
- // Resolve Template Directory (Support both Dev and Prod)
30
- // In Prod: __dirname is dist/commands, so templates is ../templates (dist/templates)
31
- // In Dev (ts-node): __dirname is src/commands, so templates is ../../templates (root/packages/cli/templates)
32
- // Try production path first (dist/templates)
33
- let templateDir = path_1.default.resolve(__dirname, '../templates');
34
- // If not found, try development path (src/../../templates)
25
+ let templateDir = path_1.default.resolve(__dirname, '../../templates');
35
26
  if (!fs_1.default.existsSync(templateDir)) {
36
- templateDir = path_1.default.resolve(__dirname, '../../templates');
27
+ templateDir = path_1.default.resolve(__dirname, '../../../templates');
37
28
  }
38
- // Final sanity check
39
29
  if (!fs_1.default.existsSync(templateDir)) {
40
- // Fallback for when running from root with ts-node directly (unlikely but possible)
41
30
  templateDir = path_1.default.resolve(process.cwd(), 'packages/cli/templates');
42
31
  }
32
+ if (!fs_1.default.existsSync(templateDir)) {
33
+ // Fallback for different build structures
34
+ templateDir = path_1.default.resolve(__dirname, '../templates');
35
+ }
43
36
  if (!fs_1.default.existsSync(templateDir)) {
44
37
  throw new Error(`Templates not found at ${templateDir}. Please ensure package is built correctly.`);
45
38
  }
@@ -68,8 +61,6 @@ exports.deployCommand = new commander_1.Command('deploy')
68
61
  console.log(chalk_1.default.green(`Created .env at ${envFile}`));
69
62
  }
70
63
  console.log(chalk_1.default.blue('Starting CyberMem services in Local Mode...'));
71
- // Execute docker-compose with internal file and USER HOME env
72
- // Note: We pass CYBERMEM_API_KEY="" explicitly for local mode to trigger keyless bypass
73
64
  await (0, execa_1.default)('docker-compose', [
74
65
  '-f', composeFile,
75
66
  '--env-file', envFile,
@@ -81,7 +72,7 @@ exports.deployCommand = new commander_1.Command('deploy')
81
72
  ...process.env,
82
73
  DATA_DIR: dataDir,
83
74
  CYBERMEM_ENV_PATH: envFile,
84
- CYBERMEM_API_KEY: ''
75
+ OM_API_KEY: ''
85
76
  }
86
77
  });
87
78
  console.log(chalk_1.default.green('\n🎉 CyberMem Installed!'));
@@ -110,8 +101,40 @@ exports.deployCommand = new commander_1.Command('deploy')
110
101
  console.log(chalk_1.default.blue(`Remote deploying to ${sshHost}...`));
111
102
  // 1. Create remote directory
112
103
  await (0, execa_1.default)('ssh', [sshHost, 'mkdir -p ~/.cybermem/data']);
104
+ // 1.5 Check and fix Docker architecture (64-bit kernel with 32-bit Docker)
105
+ console.log(chalk_1.default.blue('Checking Docker architecture...'));
106
+ try {
107
+ const { stdout: kernelArch } = await (0, execa_1.default)('ssh', [sshHost, 'uname -m']);
108
+ const { stdout: dockerArch } = await (0, execa_1.default)('ssh', [sshHost, 'docker version --format "{{.Server.Arch}}" 2>/dev/null || echo "unknown"']);
109
+ if (kernelArch.trim() === 'aarch64' && dockerArch.trim() !== 'arm64') {
110
+ console.log(chalk_1.default.yellow(`⚠️ Docker is ${dockerArch.trim()}, kernel is aarch64. Installing arm64 Docker...`));
111
+ const installCmd = `
112
+ sudo systemctl stop docker docker.socket 2>/dev/null || true
113
+ curl -fsSL https://download.docker.com/linux/static/stable/aarch64/docker-27.5.1.tgz -o /tmp/docker.tgz
114
+ sudo tar -xzf /tmp/docker.tgz -C /usr/local/bin --strip-components=1
115
+ sudo /usr/local/bin/dockerd &
116
+ sleep 5
117
+ docker version --format "{{.Server.Arch}}"
118
+ `;
119
+ const { stdout } = await (0, execa_1.default)('ssh', [sshHost, installCmd], { shell: true });
120
+ if (stdout.includes('arm64')) {
121
+ console.log(chalk_1.default.green('✅ Docker arm64 installed successfully'));
122
+ }
123
+ else {
124
+ console.log(chalk_1.default.yellow('⚠️ Docker arm64 install may need manual verification'));
125
+ }
126
+ }
127
+ else if (dockerArch.trim() === 'arm64') {
128
+ console.log(chalk_1.default.green(`✅ Docker is already arm64`));
129
+ }
130
+ else {
131
+ console.log(chalk_1.default.gray(`Docker arch: ${dockerArch.trim()}, kernel: ${kernelArch.trim()}`));
132
+ }
133
+ }
134
+ catch (e) {
135
+ console.log(chalk_1.default.yellow(`⚠️ Docker arch check skipped: ${e.message}`));
136
+ }
113
137
  // 2. Initial Env Setup (if missing)
114
- // We read remote file check using ssh
115
138
  try {
116
139
  await (0, execa_1.default)('ssh', [sshHost, '[ -f ~/.cybermem/.env ]']);
117
140
  console.log(chalk_1.default.gray('Remote .env exists, skipping generation.'));
@@ -120,10 +143,13 @@ exports.deployCommand = new commander_1.Command('deploy')
120
143
  console.log(chalk_1.default.yellow('Generating remote .env...'));
121
144
  let envContent = fs_1.default.readFileSync(internalEnvExample, 'utf-8');
122
145
  const newKey = `sk-${crypto_1.default.randomBytes(16).toString('hex')}`;
123
- if (envContent.includes('CYBERMEM_API_KEY=')) {
124
- envContent = envContent.replace(/CYBERMEM_API_KEY=.*/, `CYBERMEM_API_KEY=${newKey}`);
146
+ // Replace OM_API_KEY with generated key
147
+ if (envContent.includes('OM_API_KEY=')) {
148
+ envContent = envContent.replace(/OM_API_KEY=.*/, `OM_API_KEY=${newKey}`);
149
+ }
150
+ else {
151
+ envContent += `\nOM_API_KEY=${newKey}\n`;
125
152
  }
126
- // Write to temp file then scp
127
153
  const tempEnv = path_1.default.join(os_1.default.tmpdir(), 'cybermem-rpi.env');
128
154
  fs_1.default.writeFileSync(tempEnv, envContent);
129
155
  await (0, execa_1.default)('scp', [tempEnv, `${sshHost}:~/.cybermem/.env`]);
@@ -134,17 +160,15 @@ exports.deployCommand = new commander_1.Command('deploy')
134
160
  await (0, execa_1.default)('scp', [composeFile, `${sshHost}:~/.cybermem/docker-compose.yml`]);
135
161
  // 4. Run Docker Compose Remotely
136
162
  console.log(chalk_1.default.blue('Starting services on RPi...'));
137
- // We pass CYBERMEM_ENV_PATH explicitly as ~/.cybermem/.env and DATA_DIR as ~/.cybermem/data
138
- // The template uses ${CYBERMEM_ENV_PATH} and maps volumes.
139
- // We need to set these vars in the shell when running docker-compose
163
+ // DOCKER_DEFAULT_PLATFORM=linux/arm64 forces arm64 images on RPi with 64-bit kernel but 32-bit Docker
140
164
  const remoteCmd = `
141
165
  export CYBERMEM_ENV_PATH=~/.cybermem/.env
142
166
  export DATA_DIR=~/.cybermem/data
167
+ export DOCKER_DEFAULT_PLATFORM=linux/arm64
143
168
  docker-compose -f ~/.cybermem/docker-compose.yml up -d --remove-orphans
144
169
  `;
145
170
  await (0, execa_1.default)('ssh', [sshHost, remoteCmd], { stdio: 'inherit' });
146
171
  console.log(chalk_1.default.green('\n✅ RPi deployment successful!'));
147
- // Parse host from ssh string for convenience
148
172
  const hostIp = sshHost.split('@')[1];
149
173
  console.log(chalk_1.default.bold('Access Points (LAN):'));
150
174
  console.log(` - Dashboard: ${chalk_1.default.underline(`http://${hostIp}:3000`)} (admin/admin)`);
@@ -153,7 +177,6 @@ exports.deployCommand = new commander_1.Command('deploy')
153
177
  if (useTailscale) {
154
178
  console.log(chalk_1.default.blue('\n🔗 Setting up Remote Access (Tailscale Funnel)...'));
155
179
  try {
156
- // 1. Check/Install Tailscale
157
180
  try {
158
181
  await (0, execa_1.default)('ssh', [sshHost, 'which tailscale']);
159
182
  }
@@ -161,29 +184,21 @@ exports.deployCommand = new commander_1.Command('deploy')
161
184
  console.log(chalk_1.default.yellow(' Tailscale not found. Installing...'));
162
185
  await (0, execa_1.default)('ssh', [sshHost, 'curl -fsSL https://tailscale.com/install.sh | sh'], { stdio: 'inherit' });
163
186
  }
164
- // 2. Auth (interactive if needed)
165
187
  console.log(chalk_1.default.blue(' Ensuring Tailscale is up...'));
166
188
  try {
167
- // Check status first to avoid re-auth if already up
168
189
  await (0, execa_1.default)('ssh', [sshHost, 'tailscale status']);
169
190
  }
170
191
  catch (e) {
171
- // Interactive auth
172
192
  console.log(chalk_1.default.yellow(' ⚠️ Tailscale authentication required. Please follow the prompts:'));
173
193
  await (0, execa_1.default)('ssh', [sshHost, 'sudo tailscale up'], { stdio: 'inherit' });
174
194
  }
175
- // 3. Configure Funnel (Verified commands)
176
195
  console.log(chalk_1.default.blue(' Configuring HTTPS Funnel (requires sudo access)...'));
177
196
  console.log(chalk_1.default.gray(' You may be prompted for your RPi password.'));
178
- // Routes:
179
- // - / -> Dashboard (3000)
180
- // - /cybermem/mcp -> MCP API (8626/mcp)
181
197
  await (0, execa_1.default)('ssh', ['-t', sshHost, 'sudo tailscale serve reset'], { stdio: 'inherit' }).catch(() => { });
182
198
  await (0, execa_1.default)('ssh', ['-t', sshHost, 'sudo tailscale serve --bg --set-path /cybermem http://127.0.0.1:8626'], { stdio: 'inherit' });
183
199
  await (0, execa_1.default)('ssh', ['-t', sshHost, 'sudo tailscale serve --bg http://127.0.0.1:3000'], { stdio: 'inherit' });
184
200
  await (0, execa_1.default)('ssh', ['-t', sshHost, 'sudo tailscale funnel --bg 443'], { stdio: 'inherit' });
185
- // Get DNS name
186
- const { stdout } = await (0, execa_1.default)('ssh', [sshHost, "tailscale status --json | jq -r '.Self.DNSName' | sed 's/\\.$//'"], { shell: true });
201
+ const { stdout } = await (0, execa_1.default)('ssh', [sshHost, "tailscale status --json | jq -r '.Self.DNSName' | sed 's/\\.$//'"]);
187
202
  const dnsName = stdout.trim();
188
203
  console.log(chalk_1.default.green('\n🌐 Remote Access Active (HTTPS):'));
189
204
  console.log(` - Dashboard: ${chalk_1.default.underline(`https://${dnsName}/`)}`);
@@ -196,15 +211,15 @@ exports.deployCommand = new commander_1.Command('deploy')
196
211
  }
197
212
  }
198
213
  else {
199
- console.log(chalk_1.default.gray('\n💡 For remote access, re-run with: cybermem deploy --target rpi --remote-access'));
214
+ console.log(chalk_1.default.gray('\n💡 For remote access, re-run with: npx @cybermem/mcp --rpi --remote-access'));
200
215
  }
201
216
  }
202
217
  else if (target === 'vps') {
203
218
  console.log(chalk_1.default.yellow('VPS deployment is similar to RPi.'));
204
219
  console.log(chalk_1.default.blue('\n📋 VPS Deployment Steps:'));
205
- console.log('1. Run: cybermem deploy --target rpi --host user@your-vps-ip');
220
+ console.log('1. Run: npx @cybermem/mcp --rpi --host user@your-vps-ip');
206
221
  console.log('2. For HTTPS, choose one of:');
207
- console.log(chalk_1.default.gray(' a) Tailscale Funnel: --tailscale flag'));
222
+ console.log(chalk_1.default.gray(' a) Tailscale Funnel: --remote-access flag'));
208
223
  console.log(chalk_1.default.gray(' b) Caddy (recommended for public VPS):'));
209
224
  console.log(chalk_1.default.gray(' - Install Caddy: sudo apt install caddy'));
210
225
  console.log(chalk_1.default.gray(' - Configure /etc/caddy/Caddyfile:'));
@@ -219,12 +234,9 @@ exports.deployCommand = new commander_1.Command('deploy')
219
234
  console.log(chalk_1.default.gray(' - Restart: sudo systemctl restart caddy'));
220
235
  console.log(chalk_1.default.green('\n📚 Full docs: https://cybermem.dev/docs#https'));
221
236
  }
222
- else {
223
- console.error(chalk_1.default.red(`Unknown target: ${target}. Use: local, rpi, or vps`));
224
- }
225
237
  }
226
238
  catch (error) {
227
239
  console.error(chalk_1.default.red('Deployment failed:'), error);
228
240
  process.exit(1);
229
241
  }
230
- });
242
+ }
@@ -0,0 +1,62 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.restore = restore;
7
+ const chalk_1 = __importDefault(require("chalk"));
8
+ const execa_1 = __importDefault(require("execa"));
9
+ const fs_1 = __importDefault(require("fs"));
10
+ const path_1 = __importDefault(require("path"));
11
+ async function restore(file, options) {
12
+ if (!file) {
13
+ console.error(chalk_1.default.red('Error: Please specify the backup file to restore.'));
14
+ console.log(`Usage: npx @cybermem/mcp restore <file>`);
15
+ process.exit(1);
16
+ }
17
+ const backupPath = path_1.default.resolve(process.cwd(), file);
18
+ if (!fs_1.default.existsSync(backupPath)) {
19
+ console.error(chalk_1.default.red(`Error: File not found at ${backupPath}`));
20
+ process.exit(1);
21
+ }
22
+ console.log(chalk_1.default.blue(`♻️ Restoring from: ${path_1.default.basename(backupPath)}...`));
23
+ console.log(chalk_1.default.yellow('⚠️ This will overwrite current data!'));
24
+ try {
25
+ // 1. Stop the OpenMemory service to safely write to DB
26
+ console.log(chalk_1.default.blue('Stopping OpenMemory service...'));
27
+ try {
28
+ await (0, execa_1.default)('docker', ['stop', 'cybermem-openmemory']);
29
+ }
30
+ catch (e) {
31
+ console.log(chalk_1.default.gray('Container not running (or not found), proceeding...'));
32
+ }
33
+ // 2. Restore data using transient alpine container
34
+ console.log(chalk_1.default.blue('Extracting data to volume...'));
35
+ // We handle both absolute paths (by mounting dir) or relative
36
+ const dir = path_1.default.dirname(backupPath);
37
+ const filename = path_1.default.basename(backupPath);
38
+ const cmd = [
39
+ 'run', '--rm',
40
+ '--volumes-from', 'cybermem-openmemory', // Access the volume even if container is stopped
41
+ '-v', `${dir}:/backup`,
42
+ 'alpine',
43
+ 'sh', '-c',
44
+ // Extract to root / (since backup was relative to /data we need to be careful how it was packed)
45
+ // In backup we did: tar czf ... -C / data
46
+ // So it contains "data/..."
47
+ // Extracting to / will put it in /data
48
+ `tar xzf /backup/${filename} -C / && chown -R 1001:1001 /data`
49
+ ];
50
+ await (0, execa_1.default)('docker', cmd, { stdio: 'inherit' });
51
+ // 3. Restart the service
52
+ console.log(chalk_1.default.blue('Restarting OpenMemory service...'));
53
+ await (0, execa_1.default)('docker', ['start', 'cybermem-openmemory']);
54
+ console.log(chalk_1.default.green(`\n✅ Restore completed successfully!`));
55
+ console.log('Your memory has been recovered.');
56
+ }
57
+ catch (error) {
58
+ console.error(chalk_1.default.red('Restore failed:'), error);
59
+ console.log(chalk_1.default.yellow('Suggestion: Check if Docker is running and "cybermem-openmemory" container exists.'));
60
+ process.exit(1);
61
+ }
62
+ }
package/dist/index.js CHANGED
@@ -1,212 +1,30 @@
1
- #!/usr/bin/env node
2
1
  "use strict";
3
- var __importDefault = (this && this.__importDefault) || function (mod) {
4
- return (mod && mod.__esModule) ? mod : { "default": mod };
5
- };
6
2
  Object.defineProperty(exports, "__esModule", { value: true });
7
- const chalk_1 = __importDefault(require("chalk"));
8
3
  const commander_1 = require("commander");
9
- const crypto_1 = __importDefault(require("crypto"));
10
- const execa_1 = __importDefault(require("execa"));
11
- const fs_1 = __importDefault(require("fs"));
12
- const inquirer_1 = __importDefault(require("inquirer"));
13
- const os_1 = __importDefault(require("os"));
14
- const path_1 = __importDefault(require("path"));
4
+ const backup_1 = require("./commands/backup");
5
+ const deploy_1 = require("./commands/deploy");
6
+ const restore_1 = require("./commands/restore");
15
7
  const program = new commander_1.Command();
16
8
  program
17
9
  .name('mcp')
18
10
  .description('CyberMem - Deploy your AI memory server in one command')
19
- .version('1.0.0')
11
+ .version('1.0.0');
12
+ // Default Command: Deploy
13
+ program
14
+ .command('deploy', { isDefault: true })
15
+ .description('Deploy CyberMem (Default)')
20
16
  .option('--rpi', 'Deploy to Raspberry Pi (default: local)')
21
17
  .option('--vps', 'Deploy to VPS/Cloud server')
22
18
  .option('-h, --host <host>', 'SSH Host (user@ip) for remote deployment')
23
19
  .option('--remote-access', 'Enable Tailscale Funnel for HTTPS remote access')
24
- .action(async (options) => {
25
- // Determine target from flags
26
- let target = 'local';
27
- if (options.rpi)
28
- target = 'rpi';
29
- if (options.vps)
30
- target = 'vps';
31
- const useTailscale = options.remoteAccess;
32
- console.log(chalk_1.default.blue(`Deploying CyberMem (${target})...`));
33
- try {
34
- // Resolve Template Directory (Support both Dev and Prod)
35
- let templateDir = path_1.default.resolve(__dirname, '../templates');
36
- if (!fs_1.default.existsSync(templateDir)) {
37
- templateDir = path_1.default.resolve(__dirname, '../../templates');
38
- }
39
- if (!fs_1.default.existsSync(templateDir)) {
40
- templateDir = path_1.default.resolve(process.cwd(), 'packages/cli/templates');
41
- }
42
- if (!fs_1.default.existsSync(templateDir)) {
43
- throw new Error(`Templates not found at ${templateDir}. Please ensure package is built correctly.`);
44
- }
45
- if (target === 'local') {
46
- const composeFile = path_1.default.join(templateDir, 'docker-compose.yml');
47
- const internalEnvExample = path_1.default.join(templateDir, 'envs/local.example');
48
- if (!fs_1.default.existsSync(composeFile)) {
49
- console.error(chalk_1.default.red(`Internal Error: Template not found at ${composeFile}`));
50
- process.exit(1);
51
- }
52
- // Home Directory Config
53
- const homeDir = os_1.default.homedir();
54
- const configDir = path_1.default.join(homeDir, '.cybermem');
55
- const envFile = path_1.default.join(configDir, '.env');
56
- const dataDir = path_1.default.join(configDir, 'data');
57
- // 1. Ensure ~/.cybermem exists
58
- if (!fs_1.default.existsSync(configDir)) {
59
- fs_1.default.mkdirSync(configDir, { recursive: true });
60
- fs_1.default.mkdirSync(dataDir, { recursive: true });
61
- }
62
- // 2. Local Mode: Simplified setup without mandatory API key
63
- if (!fs_1.default.existsSync(envFile)) {
64
- console.log(chalk_1.default.yellow(`Initializing local configuration in ${configDir}...`));
65
- const envContent = fs_1.default.readFileSync(internalEnvExample, 'utf-8');
66
- fs_1.default.writeFileSync(envFile, envContent);
67
- console.log(chalk_1.default.green(`Created .env at ${envFile}`));
68
- }
69
- console.log(chalk_1.default.blue('Starting CyberMem services in Local Mode...'));
70
- await (0, execa_1.default)('docker-compose', [
71
- '-f', composeFile,
72
- '--env-file', envFile,
73
- '--project-name', 'cybermem',
74
- 'up', '-d', '--remove-orphans'
75
- ], {
76
- stdio: 'inherit',
77
- env: {
78
- ...process.env,
79
- DATA_DIR: dataDir,
80
- CYBERMEM_ENV_PATH: envFile,
81
- CYBERMEM_API_KEY: ''
82
- }
83
- });
84
- console.log(chalk_1.default.green('\n🎉 CyberMem Installed!'));
85
- console.log('');
86
- console.log(chalk_1.default.bold('Next Steps:'));
87
- console.log(` 1. Open ${chalk_1.default.underline('http://localhost:3000/client-connect')} to connect your MCP clients`);
88
- console.log(` 2. Default password: ${chalk_1.default.bold('admin')} (you'll be prompted to change it)`);
89
- console.log('');
90
- console.log(chalk_1.default.dim('Local mode is active: No API key required for connections from this laptop.'));
91
- }
92
- else if (target === 'rpi') {
93
- const composeFile = path_1.default.join(templateDir, 'docker-compose.yml');
94
- const internalEnvExample = path_1.default.join(templateDir, 'envs/rpi.example');
95
- let sshHost = options.host;
96
- if (!sshHost) {
97
- const answers = await inquirer_1.default.prompt([
98
- {
99
- type: 'input',
100
- name: 'host',
101
- message: 'Enter SSH Host (e.g. pi@raspberrypi.local):',
102
- validate: (input) => input.includes('@') ? true : 'Format must be user@host'
103
- }
104
- ]);
105
- sshHost = answers.host;
106
- }
107
- console.log(chalk_1.default.blue(`Remote deploying to ${sshHost}...`));
108
- // 1. Create remote directory
109
- await (0, execa_1.default)('ssh', [sshHost, 'mkdir -p ~/.cybermem/data']);
110
- // 2. Initial Env Setup (if missing)
111
- try {
112
- await (0, execa_1.default)('ssh', [sshHost, '[ -f ~/.cybermem/.env ]']);
113
- console.log(chalk_1.default.gray('Remote .env exists, skipping generation.'));
114
- }
115
- catch (e) {
116
- console.log(chalk_1.default.yellow('Generating remote .env...'));
117
- let envContent = fs_1.default.readFileSync(internalEnvExample, 'utf-8');
118
- const newKey = `sk-${crypto_1.default.randomBytes(16).toString('hex')}`;
119
- if (envContent.includes('CYBERMEM_API_KEY=')) {
120
- envContent = envContent.replace(/CYBERMEM_API_KEY=.*/, `CYBERMEM_API_KEY=${newKey}`);
121
- }
122
- const tempEnv = path_1.default.join(os_1.default.tmpdir(), 'cybermem-rpi.env');
123
- fs_1.default.writeFileSync(tempEnv, envContent);
124
- await (0, execa_1.default)('scp', [tempEnv, `${sshHost}:~/.cybermem/.env`]);
125
- fs_1.default.unlinkSync(tempEnv);
126
- }
127
- // 3. Copy Docker Compose
128
- console.log(chalk_1.default.blue('Uploading templates...'));
129
- await (0, execa_1.default)('scp', [composeFile, `${sshHost}:~/.cybermem/docker-compose.yml`]);
130
- // 4. Run Docker Compose Remotely
131
- console.log(chalk_1.default.blue('Starting services on RPi...'));
132
- // DOCKER_DEFAULT_PLATFORM=linux/arm64 forces arm64 images on RPi with 64-bit kernel but 32-bit Docker
133
- const remoteCmd = `
134
- export CYBERMEM_ENV_PATH=~/.cybermem/.env
135
- export DATA_DIR=~/.cybermem/data
136
- export DOCKER_DEFAULT_PLATFORM=linux/arm64
137
- docker-compose -f ~/.cybermem/docker-compose.yml up -d --remove-orphans
138
- `;
139
- await (0, execa_1.default)('ssh', [sshHost, remoteCmd], { stdio: 'inherit' });
140
- console.log(chalk_1.default.green('\n✅ RPi deployment successful!'));
141
- const hostIp = sshHost.split('@')[1];
142
- console.log(chalk_1.default.bold('Access Points (LAN):'));
143
- console.log(` - Dashboard: ${chalk_1.default.underline(`http://${hostIp}:3000`)} (admin/admin)`);
144
- console.log(` - OpenMemory: ${chalk_1.default.underline(`http://${hostIp}:8080`)}`);
145
- // Tailscale Funnel setup
146
- if (useTailscale) {
147
- console.log(chalk_1.default.blue('\n🔗 Setting up Remote Access (Tailscale Funnel)...'));
148
- try {
149
- try {
150
- await (0, execa_1.default)('ssh', [sshHost, 'which tailscale']);
151
- }
152
- catch (e) {
153
- console.log(chalk_1.default.yellow(' Tailscale not found. Installing...'));
154
- await (0, execa_1.default)('ssh', [sshHost, 'curl -fsSL https://tailscale.com/install.sh | sh'], { stdio: 'inherit' });
155
- }
156
- console.log(chalk_1.default.blue(' Ensuring Tailscale is up...'));
157
- try {
158
- await (0, execa_1.default)('ssh', [sshHost, 'tailscale status']);
159
- }
160
- catch (e) {
161
- console.log(chalk_1.default.yellow(' ⚠️ Tailscale authentication required. Please follow the prompts:'));
162
- await (0, execa_1.default)('ssh', [sshHost, 'sudo tailscale up'], { stdio: 'inherit' });
163
- }
164
- console.log(chalk_1.default.blue(' Configuring HTTPS Funnel (requires sudo access)...'));
165
- console.log(chalk_1.default.gray(' You may be prompted for your RPi password.'));
166
- await (0, execa_1.default)('ssh', ['-t', sshHost, 'sudo tailscale serve reset'], { stdio: 'inherit' }).catch(() => { });
167
- await (0, execa_1.default)('ssh', ['-t', sshHost, 'sudo tailscale serve --bg --set-path /cybermem http://127.0.0.1:8626'], { stdio: 'inherit' });
168
- await (0, execa_1.default)('ssh', ['-t', sshHost, 'sudo tailscale serve --bg http://127.0.0.1:3000'], { stdio: 'inherit' });
169
- await (0, execa_1.default)('ssh', ['-t', sshHost, 'sudo tailscale funnel --bg 443'], { stdio: 'inherit' });
170
- const { stdout } = await (0, execa_1.default)('ssh', [sshHost, "tailscale status --json | jq -r '.Self.DNSName' | sed 's/\\.$//'"]);
171
- const dnsName = stdout.trim();
172
- console.log(chalk_1.default.green('\n🌐 Remote Access Active (HTTPS):'));
173
- console.log(` - Dashboard: ${chalk_1.default.underline(`https://${dnsName}/`)}`);
174
- console.log(` - MCP API: ${chalk_1.default.underline(`https://${dnsName}/cybermem/mcp`)}`);
175
- }
176
- catch (e) {
177
- console.log(chalk_1.default.red('\n❌ Remote Access setup failed:'));
178
- console.error(e);
179
- console.log(chalk_1.default.gray('Manual setup: curl -fsSL https://tailscale.com/install.sh | sh && sudo tailscale up'));
180
- }
181
- }
182
- else {
183
- console.log(chalk_1.default.gray('\n💡 For remote access, re-run with: npx @cybermem/mcp --rpi --remote-access'));
184
- }
185
- }
186
- else if (target === 'vps') {
187
- console.log(chalk_1.default.yellow('VPS deployment is similar to RPi.'));
188
- console.log(chalk_1.default.blue('\n📋 VPS Deployment Steps:'));
189
- console.log('1. Run: npx @cybermem/mcp --rpi --host user@your-vps-ip');
190
- console.log('2. For HTTPS, choose one of:');
191
- console.log(chalk_1.default.gray(' a) Tailscale Funnel: --remote-access flag'));
192
- console.log(chalk_1.default.gray(' b) Caddy (recommended for public VPS):'));
193
- console.log(chalk_1.default.gray(' - Install Caddy: sudo apt install caddy'));
194
- console.log(chalk_1.default.gray(' - Configure /etc/caddy/Caddyfile:'));
195
- console.log(chalk_1.default.cyan(`
196
- cybermem.yourdomain.com {
197
- reverse_proxy localhost:3000
198
- }
199
- api.cybermem.yourdomain.com {
200
- reverse_proxy localhost:8080
201
- }
202
- `));
203
- console.log(chalk_1.default.gray(' - Restart: sudo systemctl restart caddy'));
204
- console.log(chalk_1.default.green('\n📚 Full docs: https://cybermem.dev/docs#https'));
205
- }
206
- }
207
- catch (error) {
208
- console.error(chalk_1.default.red('Deployment failed:'), error);
209
- process.exit(1);
210
- }
211
- });
20
+ .action(deploy_1.deploy);
21
+ program
22
+ .command('backup')
23
+ .description('Backup CyberMem data to a tarball')
24
+ .action(backup_1.backup);
25
+ program
26
+ .command('restore')
27
+ .description('Restore CyberMem data from a backup file')
28
+ .argument('<file>', 'Backup file to restore')
29
+ .action(restore_1.restore);
212
30
  program.parse(process.argv);
@@ -20,13 +20,30 @@ services:
20
20
  volumes:
21
21
  - /var/run/docker.sock:/var/run/docker.sock:ro
22
22
  - ./monitoring/traefik/traefik.yml:/etc/traefik/traefik.yml:ro
23
+ - ./monitoring/traefik/dynamic:/etc/traefik/dynamic:ro
23
24
  - traefik-logs:/var/log/traefik
24
25
  labels:
25
26
  - traefik.enable=true
26
27
  restart: unless-stopped
27
28
 
29
+ # Workaround: responds 200 on GET /mcp for Perplexity validation
30
+ mcp-responder:
31
+ build:
32
+ context: ./mcp-responder
33
+ dockerfile: Dockerfile
34
+ container_name: cybermem-mcp-responder
35
+ labels:
36
+ - traefik.enable=true
37
+ - traefik.http.routers.mcp-get.entrypoints=web
38
+ - traefik.http.routers.mcp-get.rule=Method(`GET`) && Path(`/mcp`)
39
+ - traefik.http.routers.mcp-get.priority=200
40
+ - traefik.http.services.mcp-get.loadbalancer.server.port=8081
41
+ restart: unless-stopped
42
+
28
43
  openmemory:
29
- image: ghcr.io/mikhailkogan17/cybermem-openmemory:latest
44
+ build:
45
+ context: ./openmemory
46
+ dockerfile: Dockerfile
30
47
  container_name: cybermem-openmemory
31
48
  ports: [] # Access via Traefik on 8626
32
49
  volumes:
@@ -19,7 +19,7 @@ PG_USER=openmemory
19
19
  PG_PASSWORD=change-me
20
20
 
21
21
  # OpenMemory API key (Optional for local mode)
22
- # CYBERMEM_API_KEY=
22
+ # OM_API_KEY=
23
23
 
24
24
  # Monitoring
25
25
  PROM_RETENTION=7d
@@ -19,7 +19,7 @@ PG_USER=openmemory
19
19
  PG_PASSWORD=not-used
20
20
 
21
21
  # OpenMemory
22
- CYBERMEM_API_KEY=key-change-me
22
+ OM_API_KEY=key-change-me
23
23
 
24
24
  # Monitoring (short retention for disk space)
25
25
  PROM_RETENTION=3d
@@ -17,7 +17,7 @@ PG_USER=openmemory
17
17
  PG_PASSWORD=change-me-in-production-use-secrets
18
18
 
19
19
  # OpenMemory
20
- CYBERMEM_API_KEY=change-me-in-production-use-secrets
20
+ OM_API_KEY=change-me-in-production-use-secrets
21
21
 
22
22
  # Monitoring
23
23
  PROM_RETENTION=30d
@@ -0,0 +1,6 @@
1
+ FROM node:20-alpine
2
+ WORKDIR /app
3
+ COPY server.js .
4
+ USER node
5
+ EXPOSE 8081
6
+ CMD ["node", "server.js"]
@@ -0,0 +1,22 @@
1
+ const http = require('http');
2
+
3
+ const server = http.createServer((req, res) => {
4
+ if (req.method === 'GET' && req.url === '/mcp') {
5
+ res.writeHead(200, { 'Content-Type': 'application/json' });
6
+ res.end(JSON.stringify({
7
+ jsonrpc: '2.0',
8
+ result: {
9
+ serverInfo: { name: 'openmemory-mcp', version: '1.3.2' },
10
+ protocolVersion: '2025-06-18',
11
+ capabilities: { tools: {}, resources: {}, logging: {} },
12
+ message: 'Use POST /mcp for MCP requests'
13
+ },
14
+ id: null
15
+ }));
16
+ } else {
17
+ res.writeHead(404);
18
+ res.end();
19
+ }
20
+ });
21
+
22
+ server.listen(8081, () => console.log('MCP responder on :8081'));
@@ -0,0 +1,19 @@
1
+ # OpenMemory using official npm package
2
+ FROM node:20-alpine
3
+
4
+ WORKDIR /app
5
+
6
+ # Install openmemory-js from npm (waiting for release with MCP fix)
7
+ RUN npm install openmemory-js@1.3.2
8
+
9
+ # Create data directory
10
+ RUN mkdir -p /data && chown -R node:node /data /app
11
+
12
+ USER node
13
+
14
+ EXPOSE 8080
15
+
16
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
17
+ CMD node -e "require('http').get('http://localhost:8080/health', (res) => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"
18
+
19
+ CMD ["npm", "start", "--prefix", "/app/node_modules/openmemory-js"]
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@cybermem/mcp",
3
- "version": "0.5.1",
3
+ "version": "0.5.3",
4
4
  "description": "CyberMem — Universal Long-Term Memory for AI Agents",
5
5
  "homepage": "https://cybermem.dev",
6
6
  "repository": {
@@ -20,13 +20,30 @@ services:
20
20
  volumes:
21
21
  - /var/run/docker.sock:/var/run/docker.sock:ro
22
22
  - ./monitoring/traefik/traefik.yml:/etc/traefik/traefik.yml:ro
23
+ - ./monitoring/traefik/dynamic:/etc/traefik/dynamic:ro
23
24
  - traefik-logs:/var/log/traefik
24
25
  labels:
25
26
  - traefik.enable=true
26
27
  restart: unless-stopped
27
28
 
29
+ # Workaround: responds 200 on GET /mcp for Perplexity validation
30
+ mcp-responder:
31
+ build:
32
+ context: ./mcp-responder
33
+ dockerfile: Dockerfile
34
+ container_name: cybermem-mcp-responder
35
+ labels:
36
+ - traefik.enable=true
37
+ - traefik.http.routers.mcp-get.entrypoints=web
38
+ - traefik.http.routers.mcp-get.rule=Method(`GET`) && Path(`/mcp`)
39
+ - traefik.http.routers.mcp-get.priority=200
40
+ - traefik.http.services.mcp-get.loadbalancer.server.port=8081
41
+ restart: unless-stopped
42
+
28
43
  openmemory:
29
- image: ghcr.io/mikhailkogan17/cybermem-openmemory:latest
44
+ build:
45
+ context: ./openmemory
46
+ dockerfile: Dockerfile
30
47
  container_name: cybermem-openmemory
31
48
  ports: [] # Access via Traefik on 8626
32
49
  volumes:
@@ -19,7 +19,7 @@ PG_USER=openmemory
19
19
  PG_PASSWORD=change-me
20
20
 
21
21
  # OpenMemory API key (Optional for local mode)
22
- # CYBERMEM_API_KEY=
22
+ # OM_API_KEY=
23
23
 
24
24
  # Monitoring
25
25
  PROM_RETENTION=7d
@@ -19,7 +19,7 @@ PG_USER=openmemory
19
19
  PG_PASSWORD=not-used
20
20
 
21
21
  # OpenMemory
22
- CYBERMEM_API_KEY=key-change-me
22
+ OM_API_KEY=key-change-me
23
23
 
24
24
  # Monitoring (short retention for disk space)
25
25
  PROM_RETENTION=3d
@@ -17,7 +17,7 @@ PG_USER=openmemory
17
17
  PG_PASSWORD=change-me-in-production-use-secrets
18
18
 
19
19
  # OpenMemory
20
- CYBERMEM_API_KEY=change-me-in-production-use-secrets
20
+ OM_API_KEY=change-me-in-production-use-secrets
21
21
 
22
22
  # Monitoring
23
23
  PROM_RETENTION=30d
@@ -0,0 +1,6 @@
1
+ FROM node:20-alpine
2
+ WORKDIR /app
3
+ COPY server.js .
4
+ USER node
5
+ EXPOSE 8081
6
+ CMD ["node", "server.js"]
@@ -0,0 +1,22 @@
1
+ const http = require('http');
2
+
3
+ const server = http.createServer((req, res) => {
4
+ if (req.method === 'GET' && req.url === '/mcp') {
5
+ res.writeHead(200, { 'Content-Type': 'application/json' });
6
+ res.end(JSON.stringify({
7
+ jsonrpc: '2.0',
8
+ result: {
9
+ serverInfo: { name: 'openmemory-mcp', version: '1.3.2' },
10
+ protocolVersion: '2025-06-18',
11
+ capabilities: { tools: {}, resources: {}, logging: {} },
12
+ message: 'Use POST /mcp for MCP requests'
13
+ },
14
+ id: null
15
+ }));
16
+ } else {
17
+ res.writeHead(404);
18
+ res.end();
19
+ }
20
+ });
21
+
22
+ server.listen(8081, () => console.log('MCP responder on :8081'));
File without changes
@@ -0,0 +1,19 @@
1
+ # OpenMemory using official npm package
2
+ FROM node:20-alpine
3
+
4
+ WORKDIR /app
5
+
6
+ # Install openmemory-js from npm (waiting for release with MCP fix)
7
+ RUN npm install openmemory-js@1.3.2
8
+
9
+ # Create data directory
10
+ RUN mkdir -p /data && chown -R node:node /data /app
11
+
12
+ USER node
13
+
14
+ EXPOSE 8080
15
+
16
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
17
+ CMD node -e "require('http').get('http://localhost:8080/health', (res) => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"
18
+
19
+ CMD ["npm", "start", "--prefix", "/app/node_modules/openmemory-js"]