localmemcache 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- data/AUTHORS +1 -0
- data/COPYING +21 -0
- data/LICENSE +3 -0
- data/Makefile.in +18 -0
- data/README +83 -0
- data/Rakefile +78 -0
- data/VERSION +1 -0
- data/aclocal.m4 +3 -0
- data/configure +5254 -0
- data/configure.in +42 -0
- data/site/index.html +70 -0
- data/site/style.css +37 -0
- data/src/Makefile.in +53 -0
- data/src/lmc_config.h.in +4 -0
- data/src/lmc_error.c +18 -0
- data/src/lmc_error.h +19 -0
- data/src/lmc_hashtable.c +104 -0
- data/src/lmc_hashtable.h +33 -0
- data/src/lmc_lock.c +65 -0
- data/src/lmc_lock.h +22 -0
- data/src/lmc_shm.c +92 -0
- data/src/lmc_shm.h +22 -0
- data/src/lmc_valloc.c +324 -0
- data/src/lmc_valloc.h +31 -0
- data/src/localmemcache.c +130 -0
- data/src/localmemcache.h +33 -0
- data/src/ruby-binding/extconf.rb +14 -0
- data/src/ruby-binding/localmemcache.rb +32 -0
- data/src/ruby-binding/rblocalmemcache.c +119 -0
- data/src/tests/alloc +11 -0
- data/src/tests/alloc.rb +61 -0
- data/src/tests/bacon.rb +301 -0
- data/src/tests/bench +11 -0
- data/src/tests/bench.rb +46 -0
- data/src/tests/extconf.rb +14 -0
- data/src/tests/lmc +11 -0
- data/src/tests/lmc.rb +85 -0
- data/src/tests/lmctestapi.c +162 -0
- data/src/tests/runtest.sh +9 -0
- data/src/tests/shm +11 -0
- data/src/tests/shm.rb +20 -0
- data/src/tests/torture.rb +56 -0
- data/src/tests/ttalloc +11 -0
- data/src/tests/ttalloc.rb +47 -0
- data/src/tests/ttlmc +11 -0
- data/src/tests/ttlmc.rb +21 -0
- metadata +99 -0
data/src/lmc_valloc.c
ADDED
@@ -0,0 +1,324 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) 2009, Sven C. Koehler
|
3
|
+
*/
|
4
|
+
|
5
|
+
#include <stdio.h>
|
6
|
+
#include <stdlib.h>
|
7
|
+
#include <sys/types.h>
|
8
|
+
#include <sys/stat.h>
|
9
|
+
#include <string.h>
|
10
|
+
#include <unistd.h>
|
11
|
+
#include <fcntl.h>
|
12
|
+
#include <sys/mman.h>
|
13
|
+
|
14
|
+
#include "lmc_valloc.h"
|
15
|
+
|
16
|
+
#include "lmc_lock.h"
|
17
|
+
|
18
|
+
typedef struct {
|
19
|
+
size_t next;
|
20
|
+
size_t size;
|
21
|
+
} mem_chunk_descriptor_t;
|
22
|
+
|
23
|
+
mem_chunk_descriptor_t *md_first_free(void *base) {
|
24
|
+
mem_descriptor_t *md = base;
|
25
|
+
return md->first_free == 0 ? 0 : base + md->first_free;
|
26
|
+
}
|
27
|
+
|
28
|
+
void lmc_dump_chunk(void *base, mem_chunk_descriptor_t* c) {
|
29
|
+
size_t va_c = (void *)c - base;
|
30
|
+
printf("chunk %zd:\n"
|
31
|
+
" start: %zd\n"
|
32
|
+
" end : %zd\n"
|
33
|
+
" size : %zd\n"
|
34
|
+
" next : %zd\n"
|
35
|
+
" ------------------------\n"
|
36
|
+
, va_c, va_c, va_c + c->size, c->size, c->next);
|
37
|
+
}
|
38
|
+
|
39
|
+
void lmc_dump_chunk_brief(char *who, void *base, mem_chunk_descriptor_t* c) {
|
40
|
+
if (!c) { return; }
|
41
|
+
size_t va_c = (void *)c - base;
|
42
|
+
printf("[%s] chunk %zd:\n", who, va_c);
|
43
|
+
}
|
44
|
+
|
45
|
+
|
46
|
+
void lmc_dump(void *base) {
|
47
|
+
mem_chunk_descriptor_t* c = md_first_free(base);
|
48
|
+
size_t free = 0;
|
49
|
+
long chunks = 0;
|
50
|
+
while (c) {
|
51
|
+
lmc_dump_chunk(base, c);
|
52
|
+
free += c->size;
|
53
|
+
chunks++;
|
54
|
+
if (c->next == 0) { c = 0; } else { c = base + c->next; }
|
55
|
+
}
|
56
|
+
}
|
57
|
+
|
58
|
+
int is_va_valid(void *base, size_t va) {
|
59
|
+
mem_descriptor_t *md = base;
|
60
|
+
mem_chunk_descriptor_t* c = base + va;
|
61
|
+
return !(((void *)c < base ) || (base + md->total_size + sizeof(mem_descriptor_t)) < (void *)c);
|
62
|
+
}
|
63
|
+
|
64
|
+
mem_status_t lmc_status(void *base, char *where) {
|
65
|
+
mem_descriptor_t *md = base;
|
66
|
+
mem_chunk_descriptor_t* c = md_first_free(base);
|
67
|
+
mem_status_t ms;
|
68
|
+
size_t free = 0;
|
69
|
+
size_t largest_chunk = 0;
|
70
|
+
long chunks = 0;
|
71
|
+
ms.total_mem = md->total_size;
|
72
|
+
while (c) {
|
73
|
+
if (!is_va_valid(base, (void *)c - base)) {
|
74
|
+
printf("[%s] invalid pointer detected: %ld...\n", where, (void *)c - base);
|
75
|
+
lmc_dump(base);
|
76
|
+
abort();
|
77
|
+
}
|
78
|
+
free += c->size;
|
79
|
+
if (c->size > largest_chunk) { largest_chunk = c->size; }
|
80
|
+
chunks++;
|
81
|
+
if (c->next == 0) { c = 0; } else { c = base + c->next; }
|
82
|
+
}
|
83
|
+
ms.total_free_mem = free;
|
84
|
+
ms.free_mem = free > 0 ? free - sizeof(size_t) : 0;
|
85
|
+
ms.largest_chunk = largest_chunk;
|
86
|
+
ms.free_chunks = chunks;
|
87
|
+
return ms;
|
88
|
+
}
|
89
|
+
|
90
|
+
void lmc_show_status(void *base) {
|
91
|
+
mem_status_t ms = lmc_status(base, "lmc_ss");
|
92
|
+
printf("total: %zu\n", ms.total_mem);
|
93
|
+
printf("chunks: %zu, free: %zu\n", ms.free_chunks, ms.free_mem);
|
94
|
+
}
|
95
|
+
|
96
|
+
int is_lmc_already_initialized(void *base) {
|
97
|
+
mem_descriptor_t *md = base;
|
98
|
+
if (md->magic == 0xF00D) {
|
99
|
+
#ifdef LMC_DEBUG_ALLOC
|
100
|
+
printf("memory already initialized, skipping...\n");
|
101
|
+
#endif
|
102
|
+
return 1;
|
103
|
+
}
|
104
|
+
return 0;
|
105
|
+
}
|
106
|
+
|
107
|
+
void lmc_init_memory(void *ptr, size_t size) {
|
108
|
+
mem_descriptor_t *md = ptr;
|
109
|
+
size_t s = size - sizeof(mem_descriptor_t);
|
110
|
+
// size: enough space for mem_descriptor_t + mem_chunk_descriptor_t
|
111
|
+
md->first_free = sizeof(mem_descriptor_t);
|
112
|
+
md->magic = 0xF00D;
|
113
|
+
md->locked = 0;
|
114
|
+
md->total_size = s;
|
115
|
+
mem_chunk_descriptor_t *c = ptr + sizeof(mem_descriptor_t);
|
116
|
+
c->next = 0;
|
117
|
+
c->size = s;
|
118
|
+
}
|
119
|
+
|
120
|
+
size_t lmc_max(size_t a, size_t b) {
|
121
|
+
return a > b ? a : b;
|
122
|
+
}
|
123
|
+
|
124
|
+
size_t __s(char *where, mem_status_t ms, size_t mem_before, size_t expected_diff) {
|
125
|
+
size_t free = ms.total_free_mem;
|
126
|
+
printf("(%s) ", where);
|
127
|
+
if (mem_before) { printf("[%ld:%zd] ", free - mem_before, expected_diff); }
|
128
|
+
printf("mem_free: %zu, chunks: %zu\n", free, ms.free_chunks);
|
129
|
+
if (expected_diff && expected_diff != free - mem_before) {
|
130
|
+
printf("expected_diff (%zu) != diff (%ld)\n", expected_diff,
|
131
|
+
free - mem_before);
|
132
|
+
abort();
|
133
|
+
}
|
134
|
+
return free;
|
135
|
+
}
|
136
|
+
|
137
|
+
size_t lmc_valloc(void *base, size_t size) {
|
138
|
+
mem_descriptor_t *md = base;
|
139
|
+
// MOD by power of 2
|
140
|
+
size_t s = lmc_max(size + sizeof(size_t),
|
141
|
+
sizeof(mem_chunk_descriptor_t) + sizeof(size_t));
|
142
|
+
// larger than available space?
|
143
|
+
mem_chunk_descriptor_t *c = md_first_free(base);
|
144
|
+
mem_chunk_descriptor_t *p = NULL;
|
145
|
+
if (size == 0) { return 0; }
|
146
|
+
while (c && c->size <s ) {
|
147
|
+
p = c;
|
148
|
+
if (c->next == 0) {
|
149
|
+
c = 0;
|
150
|
+
break;
|
151
|
+
}
|
152
|
+
c = base + c->next;
|
153
|
+
}
|
154
|
+
if (!c) {
|
155
|
+
//fprintf(stderr, "lmc_valloc: Failed to allocate %d bytes!\n", size);
|
156
|
+
return 0;
|
157
|
+
}
|
158
|
+
size_t r = 0;
|
159
|
+
if (c->size - s < sizeof(mem_chunk_descriptor_t)) { s = c->size; }
|
160
|
+
// ----------------- -------------------
|
161
|
+
// | chunk | wanted: | |
|
162
|
+
// ----------------- -------------------
|
163
|
+
if (c->size == s) {
|
164
|
+
if (p) { p->next = c->next; }
|
165
|
+
else {md->first_free = c->next; }
|
166
|
+
r = (size_t)((void*)c - (void*)base);
|
167
|
+
} else {
|
168
|
+
// ----------------- -------------------
|
169
|
+
// | chunk | wanted: | |
|
170
|
+
// | | -------------------
|
171
|
+
// -----------------
|
172
|
+
c->size -= s;
|
173
|
+
r = (size_t)((void*)c - base) + c->size;
|
174
|
+
}
|
175
|
+
*(size_t *)(r + base) = s;
|
176
|
+
return r + sizeof(size_t);
|
177
|
+
}
|
178
|
+
|
179
|
+
// compact_chunks,
|
180
|
+
void lmc_check_coalesce(void *base, size_t va_chunk) {
|
181
|
+
mem_descriptor_t *md = base;
|
182
|
+
mem_chunk_descriptor_t *chunk = base + va_chunk;
|
183
|
+
size_t c_size = chunk->size;
|
184
|
+
size_t va_chunk_p = 0;
|
185
|
+
size_t va_c_free_chunk = 0;
|
186
|
+
mem_chunk_descriptor_t* c_free_chunk = base + va_c_free_chunk;
|
187
|
+
size_t va_previous = 0;
|
188
|
+
size_t merge1_chunk = 0;
|
189
|
+
int merge1 = 0;
|
190
|
+
while (c_free_chunk) {
|
191
|
+
va_c_free_chunk = (void *)c_free_chunk - base;
|
192
|
+
if (va_c_free_chunk != va_chunk) {
|
193
|
+
if (c_free_chunk->next == va_chunk) { va_chunk_p = va_c_free_chunk; }
|
194
|
+
else if (!merge1) {
|
195
|
+
// ----------------------
|
196
|
+
// | a_free_chunk |
|
197
|
+
// ---------------------- <---- if (...)
|
198
|
+
// | chunk |
|
199
|
+
// ----------------------
|
200
|
+
if (va_c_free_chunk + c_free_chunk->size == va_chunk) {
|
201
|
+
merge1 = 1;
|
202
|
+
merge1_chunk = va_c_free_chunk;
|
203
|
+
} else
|
204
|
+
// ----------------------
|
205
|
+
// | chunk |
|
206
|
+
// ---------------------- <---- if (...)
|
207
|
+
// | a_free_chunk |
|
208
|
+
// ----------------------
|
209
|
+
if (va_chunk + c_size == va_c_free_chunk) {
|
210
|
+
chunk->size += c_free_chunk->size;
|
211
|
+
if (chunk->next == va_c_free_chunk) { va_previous = va_chunk; }
|
212
|
+
mem_chunk_descriptor_t *p = va_previous ? base + va_previous : 0;
|
213
|
+
if (p) { p->next = c_free_chunk->next; }
|
214
|
+
break;
|
215
|
+
}
|
216
|
+
}
|
217
|
+
va_previous = va_c_free_chunk;
|
218
|
+
}
|
219
|
+
va_c_free_chunk = c_free_chunk->next;
|
220
|
+
if (va_c_free_chunk == 0) { c_free_chunk = NULL; }
|
221
|
+
else { c_free_chunk = base + va_c_free_chunk; }
|
222
|
+
}
|
223
|
+
// ----------------------
|
224
|
+
// | a_free_chunk |
|
225
|
+
// ---------------------- <---- if (...)
|
226
|
+
// | chunk |
|
227
|
+
// ----------------------
|
228
|
+
if (merge1) {
|
229
|
+
mem_chunk_descriptor_t *cd = base + merge1_chunk;
|
230
|
+
mem_chunk_descriptor_t *p = va_chunk_p ? base + va_chunk_p : 0;
|
231
|
+
mem_chunk_descriptor_t *vacd = va_chunk? base + va_chunk : 0;
|
232
|
+
if (p) { p->next = vacd->next; }
|
233
|
+
if (md->first_free == va_chunk) { md->first_free = chunk->next; }
|
234
|
+
cd->size += c_size;
|
235
|
+
}
|
236
|
+
}
|
237
|
+
|
238
|
+
|
239
|
+
void lmc_free(void *base, size_t chunk) {
|
240
|
+
#ifdef LMC_DEBUG_ALLOC
|
241
|
+
size_t mb = __s("free1", lmc_status(base, "lmc_free1"), 0, 0);
|
242
|
+
#endif
|
243
|
+
if (chunk == 0) { return; }
|
244
|
+
mem_descriptor_t *md = base;
|
245
|
+
size_t va_used_chunk = chunk - sizeof(size_t);
|
246
|
+
void *used_chunk_p = base + va_used_chunk;
|
247
|
+
mem_chunk_descriptor_t *mcd_used_chunk = used_chunk_p;
|
248
|
+
size_t uc_size = *(size_t *)used_chunk_p;
|
249
|
+
size_t va_c_free_chunk = 0;
|
250
|
+
mem_chunk_descriptor_t* c_free_chunk = base + va_c_free_chunk;
|
251
|
+
size_t va_previous = 0;
|
252
|
+
size_t va_c_free_end = 0;
|
253
|
+
if (!(chunk >= sizeof(mem_descriptor_t) + sizeof(size_t)) ||
|
254
|
+
!is_va_valid(base, chunk)) {
|
255
|
+
printf("lmc_free: Invalid pointer: %zd\n", chunk);
|
256
|
+
return;
|
257
|
+
}
|
258
|
+
#ifdef LMC_DEBUG_ALLOC
|
259
|
+
if (uc_size == 0) {
|
260
|
+
printf("SIZE is 0!\n");
|
261
|
+
lmc_dump(base);
|
262
|
+
abort();
|
263
|
+
}
|
264
|
+
memset(base + chunk, 0xF9, uc_size - sizeof(size_t));
|
265
|
+
#endif
|
266
|
+
int freed = 0;
|
267
|
+
while (c_free_chunk) {
|
268
|
+
va_c_free_chunk = (void *)c_free_chunk - base;
|
269
|
+
va_c_free_end = va_c_free_chunk + c_free_chunk->size;
|
270
|
+
// ----------------------
|
271
|
+
// | c_free_chunk |
|
272
|
+
// ---------------------- <---- if (...)
|
273
|
+
// | used_chunk |
|
274
|
+
// ----------------------
|
275
|
+
if (va_c_free_end == va_used_chunk) {
|
276
|
+
freed = 1;
|
277
|
+
c_free_chunk->size += uc_size;
|
278
|
+
lmc_check_coalesce(base, va_c_free_chunk);
|
279
|
+
break;
|
280
|
+
} else
|
281
|
+
// ----------------------
|
282
|
+
// | used_chunk |
|
283
|
+
// ---------------------- <---- if (...)
|
284
|
+
// | c_free_chunk |
|
285
|
+
// ----------------------
|
286
|
+
if (va_used_chunk + uc_size == va_c_free_chunk) {
|
287
|
+
freed = 1;
|
288
|
+
mem_chunk_descriptor_t *p = base + va_previous;
|
289
|
+
mcd_used_chunk->next = c_free_chunk->next;
|
290
|
+
mcd_used_chunk->size = uc_size + c_free_chunk->size;
|
291
|
+
p->next = va_used_chunk;
|
292
|
+
lmc_check_coalesce(base, va_used_chunk);
|
293
|
+
break;
|
294
|
+
}
|
295
|
+
if (va_used_chunk >= va_c_free_chunk && va_used_chunk <= va_c_free_end) {
|
296
|
+
fprintf(stderr, "Was pointer already freed?\n");
|
297
|
+
return;
|
298
|
+
}
|
299
|
+
va_previous = va_c_free_chunk;
|
300
|
+
va_c_free_chunk = c_free_chunk->next;
|
301
|
+
if (va_c_free_chunk == 0) { c_free_chunk = NULL; }
|
302
|
+
else { c_free_chunk = base + va_c_free_chunk; }
|
303
|
+
}
|
304
|
+
// ----------------------
|
305
|
+
// | otherwise allocated |
|
306
|
+
// ----------------------
|
307
|
+
// | used_chunk |
|
308
|
+
// ----------------------
|
309
|
+
// | otherwise allocated |
|
310
|
+
// ----------------------
|
311
|
+
if (!freed) {
|
312
|
+
mcd_used_chunk->next = md->first_free;
|
313
|
+
mcd_used_chunk->size = uc_size;
|
314
|
+
md->first_free = va_used_chunk;
|
315
|
+
}
|
316
|
+
#ifdef LMC_DEBUG_ALLOC
|
317
|
+
__s("free2", lmc_status(base, "lmc_free2"), mb, uc_size);
|
318
|
+
#endif
|
319
|
+
}
|
320
|
+
|
321
|
+
void lmc_realloc(void *base, size_t chunk) {
|
322
|
+
// check if enough reserved space, true: resize; otherwise: alloc new and
|
323
|
+
// then free
|
324
|
+
}
|
data/src/lmc_valloc.h
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) 2009, Sven C. Koehler
|
3
|
+
*/
|
4
|
+
|
5
|
+
#ifndef _LMC_VALLOC_H_INCLUDED_
|
6
|
+
#define _LMC_VALLOC_H_INCLUDED_
|
7
|
+
#undef LMC_DEBUG_ALLOC
|
8
|
+
typedef struct {
|
9
|
+
size_t free_chunks;
|
10
|
+
size_t total_mem;
|
11
|
+
size_t total_free_mem;
|
12
|
+
size_t free_mem;
|
13
|
+
size_t largest_chunk;
|
14
|
+
} mem_status_t;
|
15
|
+
|
16
|
+
typedef struct {
|
17
|
+
size_t first_free;
|
18
|
+
size_t dummy2;
|
19
|
+
size_t total_size;
|
20
|
+
size_t magic;
|
21
|
+
size_t va_hash;
|
22
|
+
int locked;
|
23
|
+
} mem_descriptor_t;
|
24
|
+
|
25
|
+
|
26
|
+
size_t lmc_valloc(void *base, size_t size);
|
27
|
+
void lmc_free(void *base, size_t chunk);
|
28
|
+
mem_status_t lmc_status(void *base, char *where);
|
29
|
+
int is_lmc_already_initialized(void *base);
|
30
|
+
void lmc_init_memory(void *ptr, size_t size);
|
31
|
+
#endif
|
data/src/localmemcache.c
ADDED
@@ -0,0 +1,130 @@
|
|
1
|
+
#include "localmemcache.h"
|
2
|
+
#include <stdio.h>
|
3
|
+
#include <string.h>
|
4
|
+
#include "lmc_valloc.h"
|
5
|
+
#include "lmc_shm.h"
|
6
|
+
|
7
|
+
int lmc_set_lock_flag(void *base, lmc_error_t *e) {
|
8
|
+
mem_descriptor_t *md = base;
|
9
|
+
if (md->locked != 0) {
|
10
|
+
strncpy(e->error_str, "Failed to lock shared memory region--"
|
11
|
+
"may be corrupt.", 1023);
|
12
|
+
return 0;
|
13
|
+
} else {
|
14
|
+
md->locked = 1;
|
15
|
+
}
|
16
|
+
return 1;
|
17
|
+
}
|
18
|
+
|
19
|
+
int lmc_release_lock_flag(void *base, lmc_error_t *e) {
|
20
|
+
mem_descriptor_t *md = base;
|
21
|
+
if (md->locked != 1) {
|
22
|
+
strncpy(e->error_str, "Shared memory region appears to be unlocked already"
|
23
|
+
"--may be corrupt.", 1023);
|
24
|
+
return 0;
|
25
|
+
} else {
|
26
|
+
md->locked = 0;
|
27
|
+
}
|
28
|
+
return 1;
|
29
|
+
}
|
30
|
+
|
31
|
+
int local_memcache_clear_namespace(const char *namespace, int repair,
|
32
|
+
lmc_error_t *e) {
|
33
|
+
lmc_clean_namespace(namespace, e);
|
34
|
+
if (repair) {
|
35
|
+
lmc_lock_t *l = lmc_lock_init(namespace, 1, e);
|
36
|
+
lmc_lock_repair(l);
|
37
|
+
free(l);
|
38
|
+
}
|
39
|
+
return 1;
|
40
|
+
}
|
41
|
+
|
42
|
+
local_memcache_t *local_memcache_create(const char *namespace, size_t size,
|
43
|
+
lmc_error_t* e) {
|
44
|
+
local_memcache_t *lmc = calloc(1, sizeof(local_memcache_t));
|
45
|
+
if (!lmc || (lmc->namespace = strdup(namespace)) == NULL) return NULL;
|
46
|
+
lmc->size = size;
|
47
|
+
if ((lmc->lock = lmc_lock_init(lmc->namespace, 1, e)) == NULL) goto failed;
|
48
|
+
if (!lmc_is_lock_working(lmc->lock, e)) {
|
49
|
+
strncpy(e->error_str, "Failed to lock shared memory!", 1023);
|
50
|
+
goto failed;
|
51
|
+
}
|
52
|
+
{
|
53
|
+
if (!lmc_lock_obtain("local_memcache_create", lmc->lock, &lmc->error))
|
54
|
+
goto failed;
|
55
|
+
if ((lmc->shm = lmc_shm_create(lmc->namespace, lmc->size, 0, e)) == NULL)
|
56
|
+
goto release_and_fail;
|
57
|
+
lmc->base = lmc->shm->base;
|
58
|
+
if (is_lmc_already_initialized(lmc->base)) {
|
59
|
+
if (!lmc_set_lock_flag(lmc->base, e)) goto release_and_fail;
|
60
|
+
mem_descriptor_t *md = lmc->base;
|
61
|
+
lmc->va_hash = md->va_hash;
|
62
|
+
} else {
|
63
|
+
lmc_init_memory(lmc->base, lmc->size);
|
64
|
+
mem_descriptor_t *md = lmc->base;
|
65
|
+
if ((md->va_hash = ht_hash_create(lmc->base, e)) == 0)
|
66
|
+
goto unlock_and_fail;
|
67
|
+
lmc->va_hash = md->va_hash;
|
68
|
+
}
|
69
|
+
lmc_release_lock_flag(lmc->base, e);
|
70
|
+
lmc_lock_release("local_memcache_create", lmc->lock, e);
|
71
|
+
}
|
72
|
+
return lmc;
|
73
|
+
|
74
|
+
unlock_and_fail:
|
75
|
+
lmc_release_lock_flag(lmc->base, e);
|
76
|
+
release_and_fail:
|
77
|
+
lmc_lock_release("local_memcache_create", lmc->lock, e);
|
78
|
+
failed:
|
79
|
+
free(lmc);
|
80
|
+
return NULL;
|
81
|
+
}
|
82
|
+
|
83
|
+
int lmc_lock_shm_region(const char *who, local_memcache_t *lmc) {
|
84
|
+
if (!lmc_lock_obtain(who, lmc->lock, &lmc->error)) return 0;
|
85
|
+
if (!lmc_set_lock_flag(lmc->base, &lmc->error)) {
|
86
|
+
lmc_lock_release(who, lmc->lock, &lmc->error);
|
87
|
+
return 0;
|
88
|
+
}
|
89
|
+
return 1;
|
90
|
+
}
|
91
|
+
|
92
|
+
int lmc_unlock_shm_region(const char *who, local_memcache_t *lmc) {
|
93
|
+
int r = 1;
|
94
|
+
if (!lmc_release_lock_flag(lmc->base, &lmc->error)) r = 0;
|
95
|
+
lmc_lock_release(who, lmc->lock, &lmc->error);
|
96
|
+
return r;
|
97
|
+
}
|
98
|
+
|
99
|
+
char *local_memcache_get(local_memcache_t *lmc, const char *key) {
|
100
|
+
if (!lmc_lock_shm_region("local_memcache_get", lmc)) return 0;
|
101
|
+
char *r = ht_get(lmc->base, lmc->va_hash, key);
|
102
|
+
if (!lmc_unlock_shm_region("local_memcache_get", lmc)) return 0;
|
103
|
+
return r;
|
104
|
+
}
|
105
|
+
|
106
|
+
int local_memcache_set(local_memcache_t *lmc,
|
107
|
+
const char *key, const char* value) {
|
108
|
+
if (!lmc_lock_shm_region("local_memcache_set", lmc)) return 0;
|
109
|
+
int r = ht_set(lmc->base, lmc->va_hash, key, value, &lmc->error);
|
110
|
+
if (!lmc_unlock_shm_region("local_memcache_get", lmc)) return 0;
|
111
|
+
return r;
|
112
|
+
}
|
113
|
+
|
114
|
+
int local_memcache_delete(local_memcache_t *lmc, char *key) {
|
115
|
+
if (!lmc_lock_shm_region("local_memcache_delete", lmc)) return 0;
|
116
|
+
int r = ht_delete(lmc->base, lmc->va_hash, key);
|
117
|
+
if (!lmc_unlock_shm_region("local_memcache_delete", lmc)) return 0;
|
118
|
+
return r;
|
119
|
+
}
|
120
|
+
|
121
|
+
int local_memcache_free(local_memcache_t *lmc) {
|
122
|
+
lmc_error_t e;
|
123
|
+
if (!lmc_lock_shm_region("local_memcache_free", lmc)) return 0;
|
124
|
+
int r = ht_hash_destroy(lmc->base, lmc->va_hash);
|
125
|
+
if (!lmc_unlock_shm_region("local_memcache_free", lmc)) return 0;
|
126
|
+
lmc_shm_destroy(lmc->shm, &e);
|
127
|
+
free(lmc->namespace);
|
128
|
+
free(lmc->lock);
|
129
|
+
return r;
|
130
|
+
}
|
data/src/localmemcache.h
ADDED
@@ -0,0 +1,33 @@
|
|
1
|
+
#ifndef _LOCAL_MEMCACHE_INCLUDED_
|
2
|
+
#define _LOCAL_MEMCACHE_INCLUDED_
|
3
|
+
|
4
|
+
#include <stdlib.h>
|
5
|
+
#include "lmc_hashtable.h"
|
6
|
+
#include "lmc_shm.h"
|
7
|
+
#include "lmc_lock.h"
|
8
|
+
#include "lmc_error.h"
|
9
|
+
|
10
|
+
#define LOCAL_MEMCACHE_FAILED 0
|
11
|
+
#define LOCAL_MEMCACHE_SUCCESS 1
|
12
|
+
|
13
|
+
typedef struct {
|
14
|
+
char *namespace;
|
15
|
+
size_t size;
|
16
|
+
lmc_shm_t *shm;
|
17
|
+
size_t va_hash;
|
18
|
+
lmc_lock_t *lock;
|
19
|
+
lmc_lock_t *root_lock;
|
20
|
+
void* base;
|
21
|
+
lmc_error_t error;
|
22
|
+
} local_memcache_t;
|
23
|
+
|
24
|
+
local_memcache_t *local_memcache_create(const char *namespace, size_t size,
|
25
|
+
lmc_error_t *e);
|
26
|
+
char *local_memcache_get(local_memcache_t *lmc, const char *key);
|
27
|
+
int local_memcache_set(local_memcache_t *lmc, const char *key, const char* value);
|
28
|
+
int local_memcache_delete(local_memcache_t *lmc, char *key);
|
29
|
+
int local_memcache_free(local_memcache_t *lmc);
|
30
|
+
int local_memcache_clear_namespace(const char *namespace, int repair,
|
31
|
+
lmc_error_t *e);
|
32
|
+
|
33
|
+
#endif
|
@@ -0,0 +1,14 @@
|
|
1
|
+
require 'mkmf'
|
2
|
+
|
3
|
+
dir = File.dirname(__FILE__)
|
4
|
+
|
5
|
+
$defs << "-DRUBY_VERSION_CODE=#{RUBY_VERSION.gsub(/\D/, '')}"
|
6
|
+
|
7
|
+
$srcs = ['rblocalmemcache.c']
|
8
|
+
$objs = ['rblocalmemcache.o']
|
9
|
+
|
10
|
+
$CFLAGS << " -g -I .."
|
11
|
+
$LDFLAGS << " ../liblmc.a -lpthread -lrt "
|
12
|
+
|
13
|
+
dir_config('rblocalmemcache')
|
14
|
+
create_makefile('rblocalmemcache')
|
@@ -0,0 +1,32 @@
|
|
1
|
+
require 'rblocalmemcache'
|
2
|
+
|
3
|
+
# == Overview
|
4
|
+
# TestRDocUsage: A useless file
|
5
|
+
#
|
6
|
+
# == Example
|
7
|
+
#
|
8
|
+
# Usage: ruby testRDocUsage.rb [options]
|
9
|
+
#
|
10
|
+
class LocalMemCache
|
11
|
+
|
12
|
+
# Creates a new handle for accessing a shared memory region.
|
13
|
+
#
|
14
|
+
# LocalMemCache.new :namespace=>"foo", :size_mb=> 1
|
15
|
+
#
|
16
|
+
# The namespace parameter is mandatory.
|
17
|
+
# The size_mb defaults to 1024 (1 GB).
|
18
|
+
#
|
19
|
+
def self.new(options)
|
20
|
+
o = { :size_mb => 1024 }.update(options || {})
|
21
|
+
raise "Missing mandatory option ':namespace'" if !o[:namespace]
|
22
|
+
_new(o[:namespace].gsub("/", "-"), (o[:size_mb].to_f * 1024 * 1024).to_i );
|
23
|
+
end
|
24
|
+
|
25
|
+
# Deletes the given namespaces, removing semaphores if necessary.
|
26
|
+
# Do only use if you are sure the namespace is not used anymore by other
|
27
|
+
# processes.
|
28
|
+
#
|
29
|
+
def self.clear_namespace(namespace, repair = false)
|
30
|
+
_clear_namespace(namespace, repair)
|
31
|
+
end
|
32
|
+
end
|
@@ -0,0 +1,119 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (C) 2009, Sven C. Koehler
|
3
|
+
*/
|
4
|
+
|
5
|
+
#include <ruby.h>
|
6
|
+
#include "localmemcache.h"
|
7
|
+
|
8
|
+
/* :nodoc: */
|
9
|
+
long long_value(VALUE i) { return NUM2LONG(rb_Integer(i)); }
|
10
|
+
/* :nodoc: */
|
11
|
+
VALUE num2string(long i) { return rb_big2str(rb_int2big(i), 10); }
|
12
|
+
/* :nodoc: */
|
13
|
+
char *rstring_ptr(VALUE s) {
|
14
|
+
char* r = NIL_P(s) ? "nil" : RSTRING_PTR(rb_String(s));
|
15
|
+
return r ? r : "nil";
|
16
|
+
}
|
17
|
+
/* :nodoc: */
|
18
|
+
static VALUE ruby_string(char *s) { return s ? rb_str_new2(s) : Qnil; }
|
19
|
+
/* :nodoc: */
|
20
|
+
int bool_value(VALUE v) { return v == Qtrue; }
|
21
|
+
|
22
|
+
static VALUE LocalMemCacheError;
|
23
|
+
|
24
|
+
/* :nodoc: */
|
25
|
+
void raise_exception(VALUE error_klass, lmc_error_t *e) {
|
26
|
+
rb_raise(error_klass, e->error_str);
|
27
|
+
}
|
28
|
+
|
29
|
+
/* :nodoc: */
|
30
|
+
static VALUE LocalMemCache__new2(VALUE klass, VALUE namespace, VALUE size) {
|
31
|
+
lmc_error_t e;
|
32
|
+
local_memcache_t *lmc = local_memcache_create(rstring_ptr(namespace),
|
33
|
+
long_value(size), &e);
|
34
|
+
if (!lmc) { raise_exception(LocalMemCacheError, &e); }
|
35
|
+
return Data_Wrap_Struct(klass, NULL, local_memcache_free, lmc);
|
36
|
+
}
|
37
|
+
|
38
|
+
/* :nodoc: */
|
39
|
+
static VALUE LocalMemCache__clear_namespace(VALUE klass, VALUE ns, VALUE repair) {
|
40
|
+
lmc_error_t e;
|
41
|
+
if (!local_memcache_clear_namespace(rstring_ptr(ns), bool_value(repair), &e)) {
|
42
|
+
raise_exception(LocalMemCacheError, &e);
|
43
|
+
}
|
44
|
+
return Qnil;
|
45
|
+
}
|
46
|
+
|
47
|
+
/* :nodoc: */
|
48
|
+
local_memcache_t *get_LocalMemCache(VALUE obj) {
|
49
|
+
local_memcache_t *lmc;
|
50
|
+
Data_Get_Struct(obj, local_memcache_t, lmc);
|
51
|
+
return lmc;
|
52
|
+
}
|
53
|
+
|
54
|
+
/*
|
55
|
+
* call-seq:
|
56
|
+
* lmc.get(key) -> Qnil
|
57
|
+
* lmc[key] -> Qnil
|
58
|
+
*
|
59
|
+
* Retrieve value from hashtable.
|
60
|
+
*/
|
61
|
+
static VALUE LocalMemCache__get(VALUE obj, VALUE key) {
|
62
|
+
return ruby_string(local_memcache_get(get_LocalMemCache(obj),
|
63
|
+
rstring_ptr(key)));
|
64
|
+
}
|
65
|
+
|
66
|
+
/*
|
67
|
+
* call-seq:
|
68
|
+
* lmc.set(key, value) -> Qnil
|
69
|
+
* lmc[key]=value -> Qnil
|
70
|
+
*
|
71
|
+
* Set value for key in hashtable.
|
72
|
+
*/
|
73
|
+
|
74
|
+
static VALUE LocalMemCache__set(VALUE obj, VALUE key, VALUE value) {
|
75
|
+
local_memcache_t *lmc = get_LocalMemCache(obj);
|
76
|
+
if (!local_memcache_set(lmc, rstring_ptr(key), rstring_ptr(value))) {
|
77
|
+
raise_exception(LocalMemCacheError, &lmc->error);
|
78
|
+
}
|
79
|
+
return Qnil;
|
80
|
+
}
|
81
|
+
|
82
|
+
/*
|
83
|
+
* call-seq:
|
84
|
+
* lmc.delete(key) -> Qnil
|
85
|
+
*
|
86
|
+
* Deletes key from hashtable.
|
87
|
+
*/
|
88
|
+
static VALUE LocalMemCache__delete(VALUE obj, VALUE key) {
|
89
|
+
return local_memcache_delete(get_LocalMemCache(obj),
|
90
|
+
rstring_ptr(key));
|
91
|
+
return Qnil;
|
92
|
+
}
|
93
|
+
|
94
|
+
/*
|
95
|
+
* call-seq:
|
96
|
+
* lmc.close() -> Qnil
|
97
|
+
*
|
98
|
+
* Releases hashtable.
|
99
|
+
*/
|
100
|
+
static VALUE LocalMemCache__close(VALUE obj) {
|
101
|
+
local_memcache_free(get_LocalMemCache(obj));
|
102
|
+
return Qnil;
|
103
|
+
}
|
104
|
+
|
105
|
+
static VALUE LocalMemCache;
|
106
|
+
|
107
|
+
void Init_rblocalmemcache() {
|
108
|
+
LocalMemCacheError = rb_define_class("LocalMemCacheError", rb_eStandardError);
|
109
|
+
LocalMemCache = rb_define_class("LocalMemCache", rb_cObject);
|
110
|
+
rb_define_singleton_method(LocalMemCache, "_new", LocalMemCache__new2, 2);
|
111
|
+
rb_define_singleton_method(LocalMemCache, "_clear_namespace",
|
112
|
+
LocalMemCache__clear_namespace, 2);
|
113
|
+
rb_define_method(LocalMemCache, "get", LocalMemCache__get, 1);
|
114
|
+
rb_define_method(LocalMemCache, "[]", LocalMemCache__get, 1);
|
115
|
+
rb_define_method(LocalMemCache, "delete", LocalMemCache__delete, 1);
|
116
|
+
rb_define_method(LocalMemCache, "set", LocalMemCache__set, 2);
|
117
|
+
rb_define_method(LocalMemCache, "[]=", LocalMemCache__set, 2);
|
118
|
+
rb_define_method(LocalMemCache, "close", LocalMemCache__close, 0);
|
119
|
+
}
|