@levalicious/server-memory 0.0.12 → 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/binding.gyp +16 -0
- package/dist/server.js +1 -1
- package/dist/src/memoryfile.js +15 -2
- package/native/binding.c +340 -0
- package/native/memoryfile.c +343 -0
- package/native/memoryfile.h +82 -0
- package/package.json +5 -2
package/binding.gyp
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
{
|
|
2
|
+
"targets": [
|
|
3
|
+
{
|
|
4
|
+
"target_name": "memoryfile",
|
|
5
|
+
"sources": [
|
|
6
|
+
"native/memoryfile.c",
|
|
7
|
+
"native/binding.c"
|
|
8
|
+
],
|
|
9
|
+
"include_dirs": [
|
|
10
|
+
"native"
|
|
11
|
+
],
|
|
12
|
+
"cflags": ["-std=c11", "-Wall", "-Wextra", "-O2"],
|
|
13
|
+
"defines": ["_GNU_SOURCE"]
|
|
14
|
+
}
|
|
15
|
+
]
|
|
16
|
+
}
|
package/dist/server.js
CHANGED
package/dist/src/memoryfile.js
CHANGED
|
@@ -5,12 +5,25 @@
|
|
|
5
5
|
* Buffers passed to/from the native layer are Node Buffers.
|
|
6
6
|
*/
|
|
7
7
|
import { createRequire } from 'module';
|
|
8
|
+
import { existsSync } from 'fs';
|
|
8
9
|
import { dirname, join } from 'path';
|
|
9
10
|
import { fileURLToPath } from 'url';
|
|
10
11
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
11
12
|
const require = createRequire(import.meta.url);
|
|
12
|
-
//
|
|
13
|
-
|
|
13
|
+
// Walk up from __dirname to find the package root containing build/Release/memoryfile.node.
|
|
14
|
+
// Works from source (src/), compiled (dist/src/), and npx cache contexts.
|
|
15
|
+
function findNative() {
|
|
16
|
+
let dir = __dirname;
|
|
17
|
+
const { root } = require('path').parse(dir);
|
|
18
|
+
while (dir !== root) {
|
|
19
|
+
const candidate = join(dir, 'build', 'Release', 'memoryfile.node');
|
|
20
|
+
if (existsSync(candidate))
|
|
21
|
+
return candidate;
|
|
22
|
+
dir = dirname(dir);
|
|
23
|
+
}
|
|
24
|
+
throw new Error('Could not find native memoryfile.node — was the C addon built? Run: node-gyp rebuild');
|
|
25
|
+
}
|
|
26
|
+
const native = require(findNative());
|
|
14
27
|
export class MemoryFile {
|
|
15
28
|
handle;
|
|
16
29
|
closed = false;
|
package/native/binding.c
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* N-API binding for memoryfile
|
|
3
|
+
*
|
|
4
|
+
* Exposes the C memoryfile allocator to Node.js.
|
|
5
|
+
* Each MemoryFile handle is wrapped in a pointerless external.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
#define NAPI_VERSION 8
|
|
9
|
+
#include <node_api.h>
|
|
10
|
+
#include <stdlib.h>
|
|
11
|
+
#include <string.h>
|
|
12
|
+
#include "memoryfile.h"
|
|
13
|
+
|
|
14
|
+
/* =========================================================================
|
|
15
|
+
* Helpers
|
|
16
|
+
* ========================================================================= */
|
|
17
|
+
|
|
18
|
+
#define NAPI_CALL(call) \
|
|
19
|
+
do { \
|
|
20
|
+
napi_status status = (call); \
|
|
21
|
+
if (status != napi_ok) { \
|
|
22
|
+
napi_throw_error(env, NULL, "N-API call failed: " #call); \
|
|
23
|
+
return NULL; \
|
|
24
|
+
} \
|
|
25
|
+
} while (0)
|
|
26
|
+
|
|
27
|
+
static napi_value make_u64(napi_env env, u64 val) {
|
|
28
|
+
/* Use BigInt for u64 to avoid precision loss */
|
|
29
|
+
napi_value result;
|
|
30
|
+
NAPI_CALL(napi_create_bigint_uint64(env, val, &result));
|
|
31
|
+
return result;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
static u64 get_u64(napi_env env, napi_value val) {
|
|
35
|
+
bool lossless;
|
|
36
|
+
uint64_t result;
|
|
37
|
+
napi_get_value_bigint_uint64(env, val, &result, &lossless);
|
|
38
|
+
return result;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
static void mf_release(napi_env env, void *data, void *hint) {
|
|
42
|
+
(void)env; (void)hint;
|
|
43
|
+
memfile_t *mf = (memfile_t*)data;
|
|
44
|
+
if (mf) {
|
|
45
|
+
memfile_close(mf); /* no-op if already closed */
|
|
46
|
+
mf->mmap_base = NULL;
|
|
47
|
+
mf->header = NULL;
|
|
48
|
+
free(mf);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
static memfile_t *unwrap_mf(napi_env env, napi_value val) {
|
|
53
|
+
memfile_t *mf;
|
|
54
|
+
napi_get_value_external(env, val, (void**)&mf);
|
|
55
|
+
return mf;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/* =========================================================================
|
|
59
|
+
* memfile_open(path: string, initialSize: number) => external
|
|
60
|
+
* ========================================================================= */
|
|
61
|
+
|
|
62
|
+
static napi_value n_memfile_open(napi_env env, napi_callback_info info) {
|
|
63
|
+
size_t argc = 2;
|
|
64
|
+
napi_value argv[2];
|
|
65
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
66
|
+
|
|
67
|
+
/* Get path string */
|
|
68
|
+
char path[4096];
|
|
69
|
+
size_t path_len;
|
|
70
|
+
NAPI_CALL(napi_get_value_string_utf8(env, argv[0], path, sizeof(path), &path_len));
|
|
71
|
+
|
|
72
|
+
/* Get initial size */
|
|
73
|
+
uint32_t initial_size;
|
|
74
|
+
NAPI_CALL(napi_get_value_uint32(env, argv[1], &initial_size));
|
|
75
|
+
|
|
76
|
+
memfile_t *mf = memfile_open(path, (size_t)initial_size);
|
|
77
|
+
if (!mf) {
|
|
78
|
+
napi_throw_error(env, NULL, "memfile_open failed");
|
|
79
|
+
return NULL;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
napi_value result;
|
|
83
|
+
NAPI_CALL(napi_create_external(env, mf, mf_release, NULL, &result));
|
|
84
|
+
return result;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/* =========================================================================
|
|
88
|
+
* memfile_close(handle: external) => void
|
|
89
|
+
* ========================================================================= */
|
|
90
|
+
|
|
91
|
+
static napi_value n_memfile_close(napi_env env, napi_callback_info info) {
|
|
92
|
+
size_t argc = 1;
|
|
93
|
+
napi_value argv[1];
|
|
94
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
95
|
+
|
|
96
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
97
|
+
memfile_close(mf);
|
|
98
|
+
|
|
99
|
+
return NULL;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/* =========================================================================
|
|
103
|
+
* memfile_sync(handle: external) => void
|
|
104
|
+
* ========================================================================= */
|
|
105
|
+
|
|
106
|
+
static napi_value n_memfile_sync(napi_env env, napi_callback_info info) {
|
|
107
|
+
size_t argc = 1;
|
|
108
|
+
napi_value argv[1];
|
|
109
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
110
|
+
|
|
111
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
112
|
+
memfile_sync(mf);
|
|
113
|
+
|
|
114
|
+
return NULL;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
/* =========================================================================
|
|
118
|
+
* memfile_alloc(handle: external, size: bigint) => bigint (offset)
|
|
119
|
+
* ========================================================================= */
|
|
120
|
+
|
|
121
|
+
static napi_value n_memfile_alloc(napi_env env, napi_callback_info info) {
|
|
122
|
+
size_t argc = 2;
|
|
123
|
+
napi_value argv[2];
|
|
124
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
125
|
+
|
|
126
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
127
|
+
u64 size = get_u64(env, argv[1]);
|
|
128
|
+
|
|
129
|
+
u64 offset = memfile_alloc(mf, size);
|
|
130
|
+
return make_u64(env, offset);
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
/* =========================================================================
|
|
134
|
+
* memfile_free(handle: external, offset: bigint) => void
|
|
135
|
+
* ========================================================================= */
|
|
136
|
+
|
|
137
|
+
static napi_value n_memfile_free(napi_env env, napi_callback_info info) {
|
|
138
|
+
size_t argc = 2;
|
|
139
|
+
napi_value argv[2];
|
|
140
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
141
|
+
|
|
142
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
143
|
+
u64 offset = get_u64(env, argv[1]);
|
|
144
|
+
|
|
145
|
+
memfile_free(mf, offset);
|
|
146
|
+
|
|
147
|
+
return NULL;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
/* =========================================================================
|
|
151
|
+
* memfile_coalesce(handle: external) => void
|
|
152
|
+
* ========================================================================= */
|
|
153
|
+
|
|
154
|
+
static napi_value n_memfile_coalesce(napi_env env, napi_callback_info info) {
|
|
155
|
+
size_t argc = 1;
|
|
156
|
+
napi_value argv[1];
|
|
157
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
158
|
+
|
|
159
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
160
|
+
memfile_coalesce(mf);
|
|
161
|
+
|
|
162
|
+
return NULL;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/* =========================================================================
|
|
166
|
+
* memfile_read(handle: external, offset: bigint, length: bigint) => Buffer
|
|
167
|
+
* ========================================================================= */
|
|
168
|
+
|
|
169
|
+
static napi_value n_memfile_read(napi_env env, napi_callback_info info) {
|
|
170
|
+
size_t argc = 3;
|
|
171
|
+
napi_value argv[3];
|
|
172
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
173
|
+
|
|
174
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
175
|
+
u64 offset = get_u64(env, argv[1]);
|
|
176
|
+
u64 len = get_u64(env, argv[2]);
|
|
177
|
+
|
|
178
|
+
/* Create a Node Buffer and copy data into it */
|
|
179
|
+
void *buf_data;
|
|
180
|
+
napi_value result;
|
|
181
|
+
NAPI_CALL(napi_create_buffer(env, (size_t)len, &buf_data, &result));
|
|
182
|
+
|
|
183
|
+
if (memfile_read(mf, offset, buf_data, len) < 0) {
|
|
184
|
+
napi_throw_error(env, NULL, "memfile_read: offset/length out of bounds");
|
|
185
|
+
return NULL;
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
return result;
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
/* =========================================================================
|
|
192
|
+
* memfile_write(handle: external, offset: bigint, data: Buffer) => void
|
|
193
|
+
* ========================================================================= */
|
|
194
|
+
|
|
195
|
+
static napi_value n_memfile_write(napi_env env, napi_callback_info info) {
|
|
196
|
+
size_t argc = 3;
|
|
197
|
+
napi_value argv[3];
|
|
198
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
199
|
+
|
|
200
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
201
|
+
u64 offset = get_u64(env, argv[1]);
|
|
202
|
+
|
|
203
|
+
/* Get Buffer data */
|
|
204
|
+
void *buf_data;
|
|
205
|
+
size_t buf_len;
|
|
206
|
+
NAPI_CALL(napi_get_buffer_info(env, argv[2], &buf_data, &buf_len));
|
|
207
|
+
|
|
208
|
+
if (memfile_write(mf, offset, buf_data, buf_len) < 0) {
|
|
209
|
+
napi_throw_error(env, NULL, "memfile_write: offset/length out of bounds");
|
|
210
|
+
return NULL;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
return NULL;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
/* =========================================================================
|
|
217
|
+
* memfile_lock_shared(handle: external) => void
|
|
218
|
+
* ========================================================================= */
|
|
219
|
+
|
|
220
|
+
static napi_value n_memfile_lock_shared(napi_env env, napi_callback_info info) {
|
|
221
|
+
size_t argc = 1;
|
|
222
|
+
napi_value argv[1];
|
|
223
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
224
|
+
|
|
225
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
226
|
+
if (memfile_lock_shared(mf) < 0) {
|
|
227
|
+
napi_throw_error(env, NULL, "memfile_lock_shared failed");
|
|
228
|
+
return NULL;
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
return NULL;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
/* =========================================================================
|
|
235
|
+
* memfile_lock_exclusive(handle: external) => void
|
|
236
|
+
* ========================================================================= */
|
|
237
|
+
|
|
238
|
+
static napi_value n_memfile_lock_exclusive(napi_env env, napi_callback_info info) {
|
|
239
|
+
size_t argc = 1;
|
|
240
|
+
napi_value argv[1];
|
|
241
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
242
|
+
|
|
243
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
244
|
+
if (memfile_lock_exclusive(mf) < 0) {
|
|
245
|
+
napi_throw_error(env, NULL, "memfile_lock_exclusive failed");
|
|
246
|
+
return NULL;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
return NULL;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/* =========================================================================
|
|
253
|
+
* memfile_unlock(handle: external) => void
|
|
254
|
+
* ========================================================================= */
|
|
255
|
+
|
|
256
|
+
static napi_value n_memfile_unlock(napi_env env, napi_callback_info info) {
|
|
257
|
+
size_t argc = 1;
|
|
258
|
+
napi_value argv[1];
|
|
259
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
260
|
+
|
|
261
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
262
|
+
if (memfile_unlock(mf) < 0) {
|
|
263
|
+
napi_throw_error(env, NULL, "memfile_unlock failed");
|
|
264
|
+
return NULL;
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
return NULL;
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
/* =========================================================================
|
|
271
|
+
* memfile_stats(handle: external) => { fileSize, allocated, freeListHead }
|
|
272
|
+
* ========================================================================= */
|
|
273
|
+
|
|
274
|
+
static napi_value n_memfile_stats(napi_env env, napi_callback_info info) {
|
|
275
|
+
size_t argc = 1;
|
|
276
|
+
napi_value argv[1];
|
|
277
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
278
|
+
|
|
279
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
280
|
+
|
|
281
|
+
napi_value result;
|
|
282
|
+
NAPI_CALL(napi_create_object(env, &result));
|
|
283
|
+
|
|
284
|
+
napi_value v;
|
|
285
|
+
NAPI_CALL(napi_create_bigint_uint64(env, mf->header->file_size, &v));
|
|
286
|
+
NAPI_CALL(napi_set_named_property(env, result, "fileSize", v));
|
|
287
|
+
|
|
288
|
+
NAPI_CALL(napi_create_bigint_uint64(env, mf->header->allocated, &v));
|
|
289
|
+
NAPI_CALL(napi_set_named_property(env, result, "allocated", v));
|
|
290
|
+
|
|
291
|
+
NAPI_CALL(napi_create_bigint_uint64(env, mf->header->free_list_head, &v));
|
|
292
|
+
NAPI_CALL(napi_set_named_property(env, result, "freeListHead", v));
|
|
293
|
+
|
|
294
|
+
return result;
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
/* =========================================================================
|
|
298
|
+
* memfile_refresh(handle: external) => void
|
|
299
|
+
* ========================================================================= */
|
|
300
|
+
|
|
301
|
+
static napi_value n_memfile_refresh(napi_env env, napi_callback_info info) {
|
|
302
|
+
size_t argc = 1;
|
|
303
|
+
napi_value argv[1];
|
|
304
|
+
NAPI_CALL(napi_get_cb_info(env, info, &argc, argv, NULL, NULL));
|
|
305
|
+
|
|
306
|
+
memfile_t *mf = unwrap_mf(env, argv[0]);
|
|
307
|
+
if (memfile_refresh(mf) < 0) {
|
|
308
|
+
napi_throw_error(env, NULL, "memfile_refresh failed");
|
|
309
|
+
return NULL;
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
return NULL;
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
/* =========================================================================
|
|
316
|
+
* Module init
|
|
317
|
+
* ========================================================================= */
|
|
318
|
+
|
|
319
|
+
#define EXPORT_FN(name, fn) do { \
|
|
320
|
+
napi_value _fn; \
|
|
321
|
+
napi_create_function(env, name, NAPI_AUTO_LENGTH, fn, NULL, &_fn); \
|
|
322
|
+
napi_set_named_property(env, exports, name, _fn); \
|
|
323
|
+
} while(0)
|
|
324
|
+
|
|
325
|
+
NAPI_MODULE_INIT(/* napi_env env, napi_value exports */) {
|
|
326
|
+
EXPORT_FN("open", n_memfile_open);
|
|
327
|
+
EXPORT_FN("close", n_memfile_close);
|
|
328
|
+
EXPORT_FN("sync", n_memfile_sync);
|
|
329
|
+
EXPORT_FN("alloc", n_memfile_alloc);
|
|
330
|
+
EXPORT_FN("free", n_memfile_free);
|
|
331
|
+
EXPORT_FN("coalesce", n_memfile_coalesce);
|
|
332
|
+
EXPORT_FN("read", n_memfile_read);
|
|
333
|
+
EXPORT_FN("write", n_memfile_write);
|
|
334
|
+
EXPORT_FN("lockShared", n_memfile_lock_shared);
|
|
335
|
+
EXPORT_FN("lockExclusive", n_memfile_lock_exclusive);
|
|
336
|
+
EXPORT_FN("unlock", n_memfile_unlock);
|
|
337
|
+
EXPORT_FN("stats", n_memfile_stats);
|
|
338
|
+
EXPORT_FN("refresh", n_memfile_refresh);
|
|
339
|
+
return exports;
|
|
340
|
+
}
|
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* Memory File - mmap-based arena allocator
|
|
3
|
+
*
|
|
4
|
+
* Originally from biscuit/server; adapted for MCP memory server.
|
|
5
|
+
* Added: flock-based concurrency, read/write helpers.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
#include "memoryfile.h"
|
|
9
|
+
#include <stdlib.h>
|
|
10
|
+
#include <string.h>
|
|
11
|
+
#include <unistd.h>
|
|
12
|
+
#include <fcntl.h>
|
|
13
|
+
#include <sys/mman.h>
|
|
14
|
+
#include <sys/stat.h>
|
|
15
|
+
#include <sys/file.h>
|
|
16
|
+
|
|
17
|
+
/* =========================================================================
|
|
18
|
+
* Pointer conversion
|
|
19
|
+
* ========================================================================= */
|
|
20
|
+
|
|
21
|
+
void *memfile_ptr(memfile_t *mf, u64 offset) {
|
|
22
|
+
if (offset == 0 || offset >= mf->mmap_size) return NULL;
|
|
23
|
+
return (u8*)mf->mmap_base + offset;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/* =========================================================================
|
|
27
|
+
* Direct read/write at offset
|
|
28
|
+
* ========================================================================= */
|
|
29
|
+
|
|
30
|
+
int memfile_read(memfile_t *mf, u64 offset, void *buf, u64 len) {
|
|
31
|
+
if (offset + len > mf->mmap_size) return -1;
|
|
32
|
+
void *src = memfile_ptr(mf, offset);
|
|
33
|
+
if (!src) return -1;
|
|
34
|
+
memcpy(buf, src, len);
|
|
35
|
+
return 0;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
int memfile_write(memfile_t *mf, u64 offset, const void *buf, u64 len) {
|
|
39
|
+
if (offset + len > mf->mmap_size) return -1;
|
|
40
|
+
void *dst = memfile_ptr(mf, offset);
|
|
41
|
+
if (!dst) return -1;
|
|
42
|
+
memcpy(dst, buf, len);
|
|
43
|
+
return 0;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/* =========================================================================
|
|
47
|
+
* File growth via mremap
|
|
48
|
+
* ========================================================================= */
|
|
49
|
+
|
|
50
|
+
static int memfile_remap(memfile_t *mf, size_t new_size) {
|
|
51
|
+
if (ftruncate(mf->fd, new_size) < 0) {
|
|
52
|
+
return -1;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
void *new_base = mremap(mf->mmap_base, mf->mmap_size, new_size, MREMAP_MAYMOVE);
|
|
56
|
+
if (new_base == MAP_FAILED) {
|
|
57
|
+
return -1;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
mf->mmap_base = new_base;
|
|
61
|
+
mf->mmap_size = new_size;
|
|
62
|
+
mf->header = (memfile_header_t*)new_base;
|
|
63
|
+
mf->header->file_size = new_size;
|
|
64
|
+
|
|
65
|
+
return 0;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
static int memfile_ensure_space(memfile_t *mf, u64 needed) {
|
|
69
|
+
if (mf->header->allocated + needed <= mf->header->file_size) {
|
|
70
|
+
return 0;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
size_t new_size = mf->mmap_size * 2;
|
|
74
|
+
if (new_size < mf->header->allocated + needed) {
|
|
75
|
+
new_size = mf->header->allocated + needed + 4096;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
return memfile_remap(mf, new_size);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/* =========================================================================
|
|
82
|
+
* Allocation
|
|
83
|
+
* ========================================================================= */
|
|
84
|
+
|
|
85
|
+
u64 memfile_alloc(memfile_t *mf, u64 size) {
|
|
86
|
+
u64 total_size = size + sizeof(memfile_alloc_t);
|
|
87
|
+
|
|
88
|
+
/* Align to 8 bytes */
|
|
89
|
+
total_size = (total_size + 7) & ~7ULL;
|
|
90
|
+
|
|
91
|
+
/* Try free list first (first-fit) */
|
|
92
|
+
u64 prev_offset = 0;
|
|
93
|
+
u64 free_offset = mf->header->free_list_head;
|
|
94
|
+
|
|
95
|
+
while (free_offset != 0) {
|
|
96
|
+
memfile_free_t *free_block = (memfile_free_t*)memfile_ptr(mf, free_offset);
|
|
97
|
+
|
|
98
|
+
if (free_block->size >= total_size) {
|
|
99
|
+
u64 remaining = free_block->size - total_size;
|
|
100
|
+
|
|
101
|
+
if (remaining >= sizeof(memfile_free_t) + 8) {
|
|
102
|
+
/* Split the block */
|
|
103
|
+
u64 new_free_offset = free_offset + total_size;
|
|
104
|
+
memfile_free_t *new_free = (memfile_free_t*)memfile_ptr(mf, new_free_offset);
|
|
105
|
+
new_free->size = remaining;
|
|
106
|
+
new_free->next = free_block->next;
|
|
107
|
+
|
|
108
|
+
if (prev_offset == 0) {
|
|
109
|
+
mf->header->free_list_head = new_free_offset;
|
|
110
|
+
} else {
|
|
111
|
+
memfile_free_t *prev = (memfile_free_t*)memfile_ptr(mf, prev_offset);
|
|
112
|
+
prev->next = new_free_offset;
|
|
113
|
+
}
|
|
114
|
+
} else {
|
|
115
|
+
/* Use entire block (avoid tiny leftover) */
|
|
116
|
+
total_size = free_block->size;
|
|
117
|
+
|
|
118
|
+
if (prev_offset == 0) {
|
|
119
|
+
mf->header->free_list_head = free_block->next;
|
|
120
|
+
} else {
|
|
121
|
+
memfile_free_t *prev = (memfile_free_t*)memfile_ptr(mf, prev_offset);
|
|
122
|
+
prev->next = free_block->next;
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
memfile_alloc_t *alloc = (memfile_alloc_t*)memfile_ptr(mf, free_offset);
|
|
127
|
+
alloc->size = total_size;
|
|
128
|
+
|
|
129
|
+
return free_offset + sizeof(memfile_alloc_t);
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
prev_offset = free_offset;
|
|
133
|
+
free_offset = free_block->next;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
/* No suitable free block - bump allocate from end */
|
|
137
|
+
if (memfile_ensure_space(mf, total_size) < 0) {
|
|
138
|
+
return 0;
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
u64 offset = mf->header->allocated;
|
|
142
|
+
memfile_alloc_t *alloc = (memfile_alloc_t*)memfile_ptr(mf, offset);
|
|
143
|
+
alloc->size = total_size;
|
|
144
|
+
mf->header->allocated += total_size;
|
|
145
|
+
|
|
146
|
+
return offset + sizeof(memfile_alloc_t);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
void memfile_free(memfile_t *mf, u64 offset) {
|
|
150
|
+
if (offset == 0) return;
|
|
151
|
+
|
|
152
|
+
u64 alloc_offset = offset - sizeof(memfile_alloc_t);
|
|
153
|
+
memfile_alloc_t *alloc = (memfile_alloc_t*)memfile_ptr(mf, alloc_offset);
|
|
154
|
+
|
|
155
|
+
/* Free list node lives in the freed space itself */
|
|
156
|
+
memfile_free_t *free_block = (memfile_free_t*)alloc;
|
|
157
|
+
free_block->size = alloc->size;
|
|
158
|
+
free_block->next = mf->header->free_list_head;
|
|
159
|
+
|
|
160
|
+
mf->header->free_list_head = alloc_offset;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/* =========================================================================
|
|
164
|
+
* Coalescing - merge adjacent free blocks
|
|
165
|
+
* ========================================================================= */
|
|
166
|
+
|
|
167
|
+
void memfile_coalesce(memfile_t *mf) {
|
|
168
|
+
if (mf->header->free_list_head == 0) return;
|
|
169
|
+
|
|
170
|
+
/* Count free blocks */
|
|
171
|
+
u32 free_count = 0;
|
|
172
|
+
u64 offset = mf->header->free_list_head;
|
|
173
|
+
while (offset != 0) {
|
|
174
|
+
free_count++;
|
|
175
|
+
memfile_free_t *block = (memfile_free_t*)memfile_ptr(mf, offset);
|
|
176
|
+
offset = block->next;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if (free_count < 2) return;
|
|
180
|
+
|
|
181
|
+
/* Collect into temp array */
|
|
182
|
+
struct { u64 offset; u64 size; } *blocks = malloc(free_count * sizeof(*blocks));
|
|
183
|
+
|
|
184
|
+
offset = mf->header->free_list_head;
|
|
185
|
+
for (u32 i = 0; i < free_count; i++) {
|
|
186
|
+
memfile_free_t *block = (memfile_free_t*)memfile_ptr(mf, offset);
|
|
187
|
+
blocks[i].offset = offset;
|
|
188
|
+
blocks[i].size = block->size;
|
|
189
|
+
offset = block->next;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/* Sort by offset (insertion sort - fine for expected small N) */
|
|
193
|
+
for (u32 i = 1; i < free_count; i++) {
|
|
194
|
+
u64 key_off = blocks[i].offset;
|
|
195
|
+
u64 key_size = blocks[i].size;
|
|
196
|
+
int j = i - 1;
|
|
197
|
+
while (j >= 0 && blocks[j].offset > key_off) {
|
|
198
|
+
blocks[j + 1] = blocks[j];
|
|
199
|
+
j--;
|
|
200
|
+
}
|
|
201
|
+
blocks[j + 1].offset = key_off;
|
|
202
|
+
blocks[j + 1].size = key_size;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
/* Merge adjacent */
|
|
206
|
+
u32 write_idx = 0;
|
|
207
|
+
for (u32 i = 0; i < free_count; i++) {
|
|
208
|
+
if (write_idx > 0 &&
|
|
209
|
+
blocks[write_idx - 1].offset + blocks[write_idx - 1].size == blocks[i].offset) {
|
|
210
|
+
blocks[write_idx - 1].size += blocks[i].size;
|
|
211
|
+
} else {
|
|
212
|
+
if (write_idx != i) {
|
|
213
|
+
blocks[write_idx] = blocks[i];
|
|
214
|
+
}
|
|
215
|
+
write_idx++;
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
/* Rebuild free list in offset order */
|
|
220
|
+
mf->header->free_list_head = blocks[0].offset;
|
|
221
|
+
for (u32 i = 0; i < write_idx; i++) {
|
|
222
|
+
memfile_free_t *block = (memfile_free_t*)memfile_ptr(mf, blocks[i].offset);
|
|
223
|
+
block->size = blocks[i].size;
|
|
224
|
+
block->next = (i + 1 < write_idx) ? blocks[i + 1].offset : 0;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
free(blocks);
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
/* =========================================================================
|
|
231
|
+
* Refresh mapping after another process grows the file
|
|
232
|
+
* ========================================================================= */
|
|
233
|
+
|
|
234
|
+
int memfile_refresh(memfile_t *mf) {
|
|
235
|
+
struct stat st;
|
|
236
|
+
if (fstat(mf->fd, &st) < 0) return -1;
|
|
237
|
+
|
|
238
|
+
size_t actual_size = (size_t)st.st_size;
|
|
239
|
+
if (actual_size <= mf->mmap_size) return 0; /* No growth detected */
|
|
240
|
+
|
|
241
|
+
/* File grew — remap to cover the new size */
|
|
242
|
+
void *new_base = mremap(mf->mmap_base, mf->mmap_size, actual_size, MREMAP_MAYMOVE);
|
|
243
|
+
if (new_base == MAP_FAILED) return -1;
|
|
244
|
+
|
|
245
|
+
mf->mmap_base = new_base;
|
|
246
|
+
mf->mmap_size = actual_size;
|
|
247
|
+
mf->header = (memfile_header_t*)new_base;
|
|
248
|
+
|
|
249
|
+
return 0;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/* =========================================================================
|
|
253
|
+
* Concurrency - POSIX flock
|
|
254
|
+
* ========================================================================= */
|
|
255
|
+
|
|
256
|
+
int memfile_lock_shared(memfile_t *mf) {
|
|
257
|
+
return flock(mf->fd, LOCK_SH);
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
int memfile_lock_exclusive(memfile_t *mf) {
|
|
261
|
+
return flock(mf->fd, LOCK_EX);
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
int memfile_unlock(memfile_t *mf) {
|
|
265
|
+
return flock(mf->fd, LOCK_UN);
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
/* =========================================================================
|
|
269
|
+
* Open/close
|
|
270
|
+
* ========================================================================= */
|
|
271
|
+
|
|
272
|
+
memfile_t *memfile_open(const char *path, size_t initial_size) {
|
|
273
|
+
memfile_t *mf = calloc(1, sizeof(memfile_t));
|
|
274
|
+
if (!mf) return NULL;
|
|
275
|
+
mf->path = strdup(path);
|
|
276
|
+
|
|
277
|
+
struct stat st;
|
|
278
|
+
int exists = (stat(path, &st) == 0 && st.st_size > 0);
|
|
279
|
+
|
|
280
|
+
if (exists) {
|
|
281
|
+
mf->fd = open(path, O_RDWR);
|
|
282
|
+
if (mf->fd < 0) goto fail;
|
|
283
|
+
|
|
284
|
+
mf->mmap_size = st.st_size;
|
|
285
|
+
mf->mmap_base = mmap(NULL, mf->mmap_size, PROT_READ | PROT_WRITE,
|
|
286
|
+
MAP_SHARED, mf->fd, 0);
|
|
287
|
+
if (mf->mmap_base == MAP_FAILED) goto fail_fd;
|
|
288
|
+
|
|
289
|
+
mf->header = (memfile_header_t*)mf->mmap_base;
|
|
290
|
+
|
|
291
|
+
if (mf->header->magic != MEMFILE_MAGIC) {
|
|
292
|
+
munmap(mf->mmap_base, mf->mmap_size);
|
|
293
|
+
goto fail_fd;
|
|
294
|
+
}
|
|
295
|
+
} else {
|
|
296
|
+
mf->fd = open(path, O_RDWR | O_CREAT, 0644);
|
|
297
|
+
if (mf->fd < 0) goto fail;
|
|
298
|
+
|
|
299
|
+
if (initial_size < sizeof(memfile_header_t) + 64) {
|
|
300
|
+
initial_size = 4096;
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
if (ftruncate(mf->fd, initial_size) < 0) goto fail_fd;
|
|
304
|
+
|
|
305
|
+
mf->mmap_size = initial_size;
|
|
306
|
+
mf->mmap_base = mmap(NULL, mf->mmap_size, PROT_READ | PROT_WRITE,
|
|
307
|
+
MAP_SHARED, mf->fd, 0);
|
|
308
|
+
if (mf->mmap_base == MAP_FAILED) {
|
|
309
|
+
unlink(path);
|
|
310
|
+
goto fail_fd;
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
mf->header = (memfile_header_t*)mf->mmap_base;
|
|
314
|
+
mf->header->magic = MEMFILE_MAGIC;
|
|
315
|
+
mf->header->version = MEMFILE_VERSION;
|
|
316
|
+
mf->header->file_size = initial_size;
|
|
317
|
+
mf->header->allocated = sizeof(memfile_header_t);
|
|
318
|
+
mf->header->free_list_head = 0;
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
return mf;
|
|
322
|
+
|
|
323
|
+
fail_fd:
|
|
324
|
+
close(mf->fd);
|
|
325
|
+
fail:
|
|
326
|
+
free(mf->path);
|
|
327
|
+
free(mf);
|
|
328
|
+
return NULL;
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
void memfile_sync(memfile_t *mf) {
|
|
332
|
+
if (!mf || mf->closed || !mf->mmap_base) return;
|
|
333
|
+
msync(mf->mmap_base, mf->mmap_size, MS_SYNC);
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
void memfile_close(memfile_t *mf) {
|
|
337
|
+
if (!mf || mf->closed) return;
|
|
338
|
+
mf->closed = 1;
|
|
339
|
+
memfile_sync(mf);
|
|
340
|
+
munmap(mf->mmap_base, mf->mmap_size);
|
|
341
|
+
close(mf->fd);
|
|
342
|
+
free(mf->path);
|
|
343
|
+
}
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* Memory File - mmap-based arena allocator with automatic growth
|
|
3
|
+
*
|
|
4
|
+
* The file header IS the arena struct (packed, at offset 0).
|
|
5
|
+
* All allocations return offsets, not pointers.
|
|
6
|
+
* Pointers become invalid after mremap, offsets remain valid.
|
|
7
|
+
*
|
|
8
|
+
* Originally from biscuit/server; adapted for MCP memory server.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
#ifndef MEMORYFILE_H
|
|
12
|
+
#define MEMORYFILE_H
|
|
13
|
+
|
|
14
|
+
#include <stdint.h>
|
|
15
|
+
#include <stddef.h>
|
|
16
|
+
|
|
17
|
+
typedef uint8_t u8;
|
|
18
|
+
typedef uint16_t u16;
|
|
19
|
+
typedef uint32_t u32;
|
|
20
|
+
typedef uint64_t u64;
|
|
21
|
+
|
|
22
|
+
#define MEMFILE_MAGIC 0x4D454D46 /* "MEMF" */
|
|
23
|
+
#define MEMFILE_VERSION 1
|
|
24
|
+
|
|
25
|
+
/* File header - lives at offset 0, IS the arena */
|
|
26
|
+
typedef struct __attribute__((packed)) {
|
|
27
|
+
u32 magic;
|
|
28
|
+
u32 version;
|
|
29
|
+
u64 file_size; /* Current file size */
|
|
30
|
+
u64 allocated; /* Bump pointer: next allocation from end */
|
|
31
|
+
u64 free_list_head; /* Offset to first free block (0=none) */
|
|
32
|
+
} memfile_header_t;
|
|
33
|
+
|
|
34
|
+
/* Free block header - lives IN the free space it describes */
|
|
35
|
+
typedef struct __attribute__((packed)) {
|
|
36
|
+
u64 size; /* Size of this free block (including header) */
|
|
37
|
+
u64 next; /* Offset to next free block (0=none) */
|
|
38
|
+
} memfile_free_t;
|
|
39
|
+
|
|
40
|
+
/* Allocation header - immediately before each allocated block */
|
|
41
|
+
typedef struct __attribute__((packed)) {
|
|
42
|
+
u64 size; /* Size of allocation (including this header) */
|
|
43
|
+
} memfile_alloc_t;
|
|
44
|
+
|
|
45
|
+
/* Handle for working with memory file */
|
|
46
|
+
typedef struct {
|
|
47
|
+
int fd;
|
|
48
|
+
char *path;
|
|
49
|
+
void *mmap_base; /* Base address of mmap */
|
|
50
|
+
size_t mmap_size; /* Current mmap size */
|
|
51
|
+
memfile_header_t *header; /* Points to offset 0 */
|
|
52
|
+
int closed; /* Set after close to prevent double-free */
|
|
53
|
+
} memfile_t;
|
|
54
|
+
|
|
55
|
+
/* Lifecycle */
|
|
56
|
+
memfile_t *memfile_open(const char *path, size_t initial_size);
|
|
57
|
+
void memfile_close(memfile_t *mf);
|
|
58
|
+
void memfile_sync(memfile_t *mf);
|
|
59
|
+
|
|
60
|
+
/* Allocation - returns offset from file start (0 = failed) */
|
|
61
|
+
u64 memfile_alloc(memfile_t *mf, u64 size);
|
|
62
|
+
void memfile_free(memfile_t *mf, u64 offset);
|
|
63
|
+
|
|
64
|
+
/* Defragmentation */
|
|
65
|
+
void memfile_coalesce(memfile_t *mf);
|
|
66
|
+
|
|
67
|
+
/* Direct read/write at offset */
|
|
68
|
+
int memfile_read(memfile_t *mf, u64 offset, void *buf, u64 len);
|
|
69
|
+
int memfile_write(memfile_t *mf, u64 offset, const void *buf, u64 len);
|
|
70
|
+
|
|
71
|
+
/* Convert offset to pointer (CAUTION: invalid after alloc that triggers remap) */
|
|
72
|
+
void *memfile_ptr(memfile_t *mf, u64 offset);
|
|
73
|
+
|
|
74
|
+
/* Refresh mapping if the file was grown by another process */
|
|
75
|
+
int memfile_refresh(memfile_t *mf);
|
|
76
|
+
|
|
77
|
+
/* Concurrency - POSIX flock on the underlying fd */
|
|
78
|
+
int memfile_lock_shared(memfile_t *mf);
|
|
79
|
+
int memfile_lock_exclusive(memfile_t *mf);
|
|
80
|
+
int memfile_unlock(memfile_t *mf);
|
|
81
|
+
|
|
82
|
+
#endif /* MEMORYFILE_H */
|
package/package.json
CHANGED
|
@@ -1,17 +1,20 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@levalicious/server-memory",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.13",
|
|
4
4
|
"description": "MCP server for enabling memory for Claude through a knowledge graph",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Levalicious",
|
|
7
7
|
"homepage": "https://modelcontextprotocol.io",
|
|
8
8
|
"bugs": "https://github.com/modelcontextprotocol/servers/issues",
|
|
9
9
|
"type": "module",
|
|
10
|
+
"gypfile": true,
|
|
10
11
|
"bin": {
|
|
11
12
|
"mcp-server-memory": "dist/index.js"
|
|
12
13
|
},
|
|
13
14
|
"files": [
|
|
14
|
-
"dist"
|
|
15
|
+
"dist",
|
|
16
|
+
"binding.gyp",
|
|
17
|
+
"native"
|
|
15
18
|
],
|
|
16
19
|
"scripts": {
|
|
17
20
|
"build": "node-gyp rebuild && tsc && shx chmod +x dist/*.js",
|