@levalicious/server-memory 0.0.12 → 0.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,343 @@
1
+ /*
2
+ * Memory File - mmap-based arena allocator
3
+ *
4
+ * Originally from biscuit/server; adapted for MCP memory server.
5
+ * Added: flock-based concurrency, read/write helpers.
6
+ */
7
+
8
+ #include "memoryfile.h"
9
+ #include <stdlib.h>
10
+ #include <string.h>
11
+ #include <unistd.h>
12
+ #include <fcntl.h>
13
+ #include <sys/mman.h>
14
+ #include <sys/stat.h>
15
+ #include <sys/file.h>
16
+
17
+ /* =========================================================================
18
+ * Pointer conversion
19
+ * ========================================================================= */
20
+
21
+ void *memfile_ptr(memfile_t *mf, u64 offset) {
22
+ if (offset == 0 || offset >= mf->mmap_size) return NULL;
23
+ return (u8*)mf->mmap_base + offset;
24
+ }
25
+
26
+ /* =========================================================================
27
+ * Direct read/write at offset
28
+ * ========================================================================= */
29
+
30
+ int memfile_read(memfile_t *mf, u64 offset, void *buf, u64 len) {
31
+ if (offset + len > mf->mmap_size) return -1;
32
+ void *src = memfile_ptr(mf, offset);
33
+ if (!src) return -1;
34
+ memcpy(buf, src, len);
35
+ return 0;
36
+ }
37
+
38
+ int memfile_write(memfile_t *mf, u64 offset, const void *buf, u64 len) {
39
+ if (offset + len > mf->mmap_size) return -1;
40
+ void *dst = memfile_ptr(mf, offset);
41
+ if (!dst) return -1;
42
+ memcpy(dst, buf, len);
43
+ return 0;
44
+ }
45
+
46
+ /* =========================================================================
47
+ * File growth via mremap
48
+ * ========================================================================= */
49
+
50
+ static int memfile_remap(memfile_t *mf, size_t new_size) {
51
+ if (ftruncate(mf->fd, new_size) < 0) {
52
+ return -1;
53
+ }
54
+
55
+ void *new_base = mremap(mf->mmap_base, mf->mmap_size, new_size, MREMAP_MAYMOVE);
56
+ if (new_base == MAP_FAILED) {
57
+ return -1;
58
+ }
59
+
60
+ mf->mmap_base = new_base;
61
+ mf->mmap_size = new_size;
62
+ mf->header = (memfile_header_t*)new_base;
63
+ mf->header->file_size = new_size;
64
+
65
+ return 0;
66
+ }
67
+
68
+ static int memfile_ensure_space(memfile_t *mf, u64 needed) {
69
+ if (mf->header->allocated + needed <= mf->header->file_size) {
70
+ return 0;
71
+ }
72
+
73
+ size_t new_size = mf->mmap_size * 2;
74
+ if (new_size < mf->header->allocated + needed) {
75
+ new_size = mf->header->allocated + needed + 4096;
76
+ }
77
+
78
+ return memfile_remap(mf, new_size);
79
+ }
80
+
81
+ /* =========================================================================
82
+ * Allocation
83
+ * ========================================================================= */
84
+
85
+ u64 memfile_alloc(memfile_t *mf, u64 size) {
86
+ u64 total_size = size + sizeof(memfile_alloc_t);
87
+
88
+ /* Align to 8 bytes */
89
+ total_size = (total_size + 7) & ~7ULL;
90
+
91
+ /* Try free list first (first-fit) */
92
+ u64 prev_offset = 0;
93
+ u64 free_offset = mf->header->free_list_head;
94
+
95
+ while (free_offset != 0) {
96
+ memfile_free_t *free_block = (memfile_free_t*)memfile_ptr(mf, free_offset);
97
+
98
+ if (free_block->size >= total_size) {
99
+ u64 remaining = free_block->size - total_size;
100
+
101
+ if (remaining >= sizeof(memfile_free_t) + 8) {
102
+ /* Split the block */
103
+ u64 new_free_offset = free_offset + total_size;
104
+ memfile_free_t *new_free = (memfile_free_t*)memfile_ptr(mf, new_free_offset);
105
+ new_free->size = remaining;
106
+ new_free->next = free_block->next;
107
+
108
+ if (prev_offset == 0) {
109
+ mf->header->free_list_head = new_free_offset;
110
+ } else {
111
+ memfile_free_t *prev = (memfile_free_t*)memfile_ptr(mf, prev_offset);
112
+ prev->next = new_free_offset;
113
+ }
114
+ } else {
115
+ /* Use entire block (avoid tiny leftover) */
116
+ total_size = free_block->size;
117
+
118
+ if (prev_offset == 0) {
119
+ mf->header->free_list_head = free_block->next;
120
+ } else {
121
+ memfile_free_t *prev = (memfile_free_t*)memfile_ptr(mf, prev_offset);
122
+ prev->next = free_block->next;
123
+ }
124
+ }
125
+
126
+ memfile_alloc_t *alloc = (memfile_alloc_t*)memfile_ptr(mf, free_offset);
127
+ alloc->size = total_size;
128
+
129
+ return free_offset + sizeof(memfile_alloc_t);
130
+ }
131
+
132
+ prev_offset = free_offset;
133
+ free_offset = free_block->next;
134
+ }
135
+
136
+ /* No suitable free block - bump allocate from end */
137
+ if (memfile_ensure_space(mf, total_size) < 0) {
138
+ return 0;
139
+ }
140
+
141
+ u64 offset = mf->header->allocated;
142
+ memfile_alloc_t *alloc = (memfile_alloc_t*)memfile_ptr(mf, offset);
143
+ alloc->size = total_size;
144
+ mf->header->allocated += total_size;
145
+
146
+ return offset + sizeof(memfile_alloc_t);
147
+ }
148
+
149
+ void memfile_free(memfile_t *mf, u64 offset) {
150
+ if (offset == 0) return;
151
+
152
+ u64 alloc_offset = offset - sizeof(memfile_alloc_t);
153
+ memfile_alloc_t *alloc = (memfile_alloc_t*)memfile_ptr(mf, alloc_offset);
154
+
155
+ /* Free list node lives in the freed space itself */
156
+ memfile_free_t *free_block = (memfile_free_t*)alloc;
157
+ free_block->size = alloc->size;
158
+ free_block->next = mf->header->free_list_head;
159
+
160
+ mf->header->free_list_head = alloc_offset;
161
+ }
162
+
163
+ /* =========================================================================
164
+ * Coalescing - merge adjacent free blocks
165
+ * ========================================================================= */
166
+
167
+ void memfile_coalesce(memfile_t *mf) {
168
+ if (mf->header->free_list_head == 0) return;
169
+
170
+ /* Count free blocks */
171
+ u32 free_count = 0;
172
+ u64 offset = mf->header->free_list_head;
173
+ while (offset != 0) {
174
+ free_count++;
175
+ memfile_free_t *block = (memfile_free_t*)memfile_ptr(mf, offset);
176
+ offset = block->next;
177
+ }
178
+
179
+ if (free_count < 2) return;
180
+
181
+ /* Collect into temp array */
182
+ struct { u64 offset; u64 size; } *blocks = malloc(free_count * sizeof(*blocks));
183
+
184
+ offset = mf->header->free_list_head;
185
+ for (u32 i = 0; i < free_count; i++) {
186
+ memfile_free_t *block = (memfile_free_t*)memfile_ptr(mf, offset);
187
+ blocks[i].offset = offset;
188
+ blocks[i].size = block->size;
189
+ offset = block->next;
190
+ }
191
+
192
+ /* Sort by offset (insertion sort - fine for expected small N) */
193
+ for (u32 i = 1; i < free_count; i++) {
194
+ u64 key_off = blocks[i].offset;
195
+ u64 key_size = blocks[i].size;
196
+ int j = i - 1;
197
+ while (j >= 0 && blocks[j].offset > key_off) {
198
+ blocks[j + 1] = blocks[j];
199
+ j--;
200
+ }
201
+ blocks[j + 1].offset = key_off;
202
+ blocks[j + 1].size = key_size;
203
+ }
204
+
205
+ /* Merge adjacent */
206
+ u32 write_idx = 0;
207
+ for (u32 i = 0; i < free_count; i++) {
208
+ if (write_idx > 0 &&
209
+ blocks[write_idx - 1].offset + blocks[write_idx - 1].size == blocks[i].offset) {
210
+ blocks[write_idx - 1].size += blocks[i].size;
211
+ } else {
212
+ if (write_idx != i) {
213
+ blocks[write_idx] = blocks[i];
214
+ }
215
+ write_idx++;
216
+ }
217
+ }
218
+
219
+ /* Rebuild free list in offset order */
220
+ mf->header->free_list_head = blocks[0].offset;
221
+ for (u32 i = 0; i < write_idx; i++) {
222
+ memfile_free_t *block = (memfile_free_t*)memfile_ptr(mf, blocks[i].offset);
223
+ block->size = blocks[i].size;
224
+ block->next = (i + 1 < write_idx) ? blocks[i + 1].offset : 0;
225
+ }
226
+
227
+ free(blocks);
228
+ }
229
+
230
+ /* =========================================================================
231
+ * Refresh mapping after another process grows the file
232
+ * ========================================================================= */
233
+
234
+ int memfile_refresh(memfile_t *mf) {
235
+ struct stat st;
236
+ if (fstat(mf->fd, &st) < 0) return -1;
237
+
238
+ size_t actual_size = (size_t)st.st_size;
239
+ if (actual_size <= mf->mmap_size) return 0; /* No growth detected */
240
+
241
+ /* File grew — remap to cover the new size */
242
+ void *new_base = mremap(mf->mmap_base, mf->mmap_size, actual_size, MREMAP_MAYMOVE);
243
+ if (new_base == MAP_FAILED) return -1;
244
+
245
+ mf->mmap_base = new_base;
246
+ mf->mmap_size = actual_size;
247
+ mf->header = (memfile_header_t*)new_base;
248
+
249
+ return 0;
250
+ }
251
+
252
+ /* =========================================================================
253
+ * Concurrency - POSIX flock
254
+ * ========================================================================= */
255
+
256
+ int memfile_lock_shared(memfile_t *mf) {
257
+ return flock(mf->fd, LOCK_SH);
258
+ }
259
+
260
+ int memfile_lock_exclusive(memfile_t *mf) {
261
+ return flock(mf->fd, LOCK_EX);
262
+ }
263
+
264
+ int memfile_unlock(memfile_t *mf) {
265
+ return flock(mf->fd, LOCK_UN);
266
+ }
267
+
268
+ /* =========================================================================
269
+ * Open/close
270
+ * ========================================================================= */
271
+
272
+ memfile_t *memfile_open(const char *path, size_t initial_size) {
273
+ memfile_t *mf = calloc(1, sizeof(memfile_t));
274
+ if (!mf) return NULL;
275
+ mf->path = strdup(path);
276
+
277
+ struct stat st;
278
+ int exists = (stat(path, &st) == 0 && st.st_size > 0);
279
+
280
+ if (exists) {
281
+ mf->fd = open(path, O_RDWR);
282
+ if (mf->fd < 0) goto fail;
283
+
284
+ mf->mmap_size = st.st_size;
285
+ mf->mmap_base = mmap(NULL, mf->mmap_size, PROT_READ | PROT_WRITE,
286
+ MAP_SHARED, mf->fd, 0);
287
+ if (mf->mmap_base == MAP_FAILED) goto fail_fd;
288
+
289
+ mf->header = (memfile_header_t*)mf->mmap_base;
290
+
291
+ if (mf->header->magic != MEMFILE_MAGIC) {
292
+ munmap(mf->mmap_base, mf->mmap_size);
293
+ goto fail_fd;
294
+ }
295
+ } else {
296
+ mf->fd = open(path, O_RDWR | O_CREAT, 0644);
297
+ if (mf->fd < 0) goto fail;
298
+
299
+ if (initial_size < sizeof(memfile_header_t) + 64) {
300
+ initial_size = 4096;
301
+ }
302
+
303
+ if (ftruncate(mf->fd, initial_size) < 0) goto fail_fd;
304
+
305
+ mf->mmap_size = initial_size;
306
+ mf->mmap_base = mmap(NULL, mf->mmap_size, PROT_READ | PROT_WRITE,
307
+ MAP_SHARED, mf->fd, 0);
308
+ if (mf->mmap_base == MAP_FAILED) {
309
+ unlink(path);
310
+ goto fail_fd;
311
+ }
312
+
313
+ mf->header = (memfile_header_t*)mf->mmap_base;
314
+ mf->header->magic = MEMFILE_MAGIC;
315
+ mf->header->version = MEMFILE_VERSION;
316
+ mf->header->file_size = initial_size;
317
+ mf->header->allocated = sizeof(memfile_header_t);
318
+ mf->header->free_list_head = 0;
319
+ }
320
+
321
+ return mf;
322
+
323
+ fail_fd:
324
+ close(mf->fd);
325
+ fail:
326
+ free(mf->path);
327
+ free(mf);
328
+ return NULL;
329
+ }
330
+
331
+ void memfile_sync(memfile_t *mf) {
332
+ if (!mf || mf->closed || !mf->mmap_base) return;
333
+ msync(mf->mmap_base, mf->mmap_size, MS_SYNC);
334
+ }
335
+
336
+ void memfile_close(memfile_t *mf) {
337
+ if (!mf || mf->closed) return;
338
+ mf->closed = 1;
339
+ memfile_sync(mf);
340
+ munmap(mf->mmap_base, mf->mmap_size);
341
+ close(mf->fd);
342
+ free(mf->path);
343
+ }
@@ -0,0 +1,82 @@
1
+ /*
2
+ * Memory File - mmap-based arena allocator with automatic growth
3
+ *
4
+ * The file header IS the arena struct (packed, at offset 0).
5
+ * All allocations return offsets, not pointers.
6
+ * Pointers become invalid after mremap, offsets remain valid.
7
+ *
8
+ * Originally from biscuit/server; adapted for MCP memory server.
9
+ */
10
+
11
+ #ifndef MEMORYFILE_H
12
+ #define MEMORYFILE_H
13
+
14
+ #include <stdint.h>
15
+ #include <stddef.h>
16
+
17
+ typedef uint8_t u8;
18
+ typedef uint16_t u16;
19
+ typedef uint32_t u32;
20
+ typedef uint64_t u64;
21
+
22
+ #define MEMFILE_MAGIC 0x4D454D46 /* "MEMF" */
23
+ #define MEMFILE_VERSION 1
24
+
25
+ /* File header - lives at offset 0, IS the arena */
26
+ typedef struct __attribute__((packed)) {
27
+ u32 magic;
28
+ u32 version;
29
+ u64 file_size; /* Current file size */
30
+ u64 allocated; /* Bump pointer: next allocation from end */
31
+ u64 free_list_head; /* Offset to first free block (0=none) */
32
+ } memfile_header_t;
33
+
34
+ /* Free block header - lives IN the free space it describes */
35
+ typedef struct __attribute__((packed)) {
36
+ u64 size; /* Size of this free block (including header) */
37
+ u64 next; /* Offset to next free block (0=none) */
38
+ } memfile_free_t;
39
+
40
+ /* Allocation header - immediately before each allocated block */
41
+ typedef struct __attribute__((packed)) {
42
+ u64 size; /* Size of allocation (including this header) */
43
+ } memfile_alloc_t;
44
+
45
+ /* Handle for working with memory file */
46
+ typedef struct {
47
+ int fd;
48
+ char *path;
49
+ void *mmap_base; /* Base address of mmap */
50
+ size_t mmap_size; /* Current mmap size */
51
+ memfile_header_t *header; /* Points to offset 0 */
52
+ int closed; /* Set after close to prevent double-free */
53
+ } memfile_t;
54
+
55
+ /* Lifecycle */
56
+ memfile_t *memfile_open(const char *path, size_t initial_size);
57
+ void memfile_close(memfile_t *mf);
58
+ void memfile_sync(memfile_t *mf);
59
+
60
+ /* Allocation - returns offset from file start (0 = failed) */
61
+ u64 memfile_alloc(memfile_t *mf, u64 size);
62
+ void memfile_free(memfile_t *mf, u64 offset);
63
+
64
+ /* Defragmentation */
65
+ void memfile_coalesce(memfile_t *mf);
66
+
67
+ /* Direct read/write at offset */
68
+ int memfile_read(memfile_t *mf, u64 offset, void *buf, u64 len);
69
+ int memfile_write(memfile_t *mf, u64 offset, const void *buf, u64 len);
70
+
71
+ /* Convert offset to pointer (CAUTION: invalid after alloc that triggers remap) */
72
+ void *memfile_ptr(memfile_t *mf, u64 offset);
73
+
74
+ /* Refresh mapping if the file was grown by another process */
75
+ int memfile_refresh(memfile_t *mf);
76
+
77
+ /* Concurrency - POSIX flock on the underlying fd */
78
+ int memfile_lock_shared(memfile_t *mf);
79
+ int memfile_lock_exclusive(memfile_t *mf);
80
+ int memfile_unlock(memfile_t *mf);
81
+
82
+ #endif /* MEMORYFILE_H */
package/package.json CHANGED
@@ -1,21 +1,26 @@
1
1
  {
2
2
  "name": "@levalicious/server-memory",
3
- "version": "0.0.12",
3
+ "version": "0.0.14",
4
4
  "description": "MCP server for enabling memory for Claude through a knowledge graph",
5
5
  "license": "MIT",
6
6
  "author": "Levalicious",
7
7
  "homepage": "https://modelcontextprotocol.io",
8
8
  "bugs": "https://github.com/modelcontextprotocol/servers/issues",
9
9
  "type": "module",
10
+ "gypfile": true,
10
11
  "bin": {
11
12
  "mcp-server-memory": "dist/index.js"
12
13
  },
13
14
  "files": [
14
- "dist"
15
+ "dist",
16
+ "binding.gyp",
17
+ "native"
15
18
  ],
16
19
  "scripts": {
17
20
  "build": "node-gyp rebuild && tsc && shx chmod +x dist/*.js",
18
21
  "build:native": "node-gyp rebuild",
22
+ "lint": "eslint .",
23
+ "lint:fix": "eslint . --fix",
19
24
  "prepare": "husky && npm run build",
20
25
  "watch": "tsc --watch",
21
26
  "test": "NODE_OPTIONS='--experimental-vm-modules' jest"
@@ -28,10 +33,12 @@
28
33
  "@types/jest": "^30.0.0",
29
34
  "@types/node": "^25",
30
35
  "@types/proper-lockfile": "^4.1.4",
36
+ "eslint": "^10.0.0",
31
37
  "husky": "^9.1.7",
32
38
  "jest": "^30.2.0",
33
39
  "shx": "^0.4.0",
34
40
  "ts-jest": "^29.4.5",
35
- "typescript": "^5.6.2"
41
+ "typescript": "^5.6.2",
42
+ "typescript-eslint": "^8.55.0"
36
43
  }
37
44
  }