pogocache-ruby 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1990 @@
1
+ // https://github.com/tidwall/pogocache
2
+ //
3
+ // Copyright 2025 Polypoint Labs, LLC. All rights reserved.
4
+ // This file is part of the Pogocache project.
5
+ // Use of this source code is governed by the AGPL that can be found in
6
+ // the LICENSE file.
7
+ //
8
+ // For alternative licensing options or general questions, please contact
9
+ // us at licensing@polypointlabs.com.
10
+ //
11
+ // Unit pogocache.c is the primary caching engine library, which is designed
12
+ // to be standalone and embeddable.
13
+ #include <stdbool.h>
14
+ #include <inttypes.h>
15
+ #include <stdatomic.h>
16
+ #include <errno.h>
17
+ #include <assert.h>
18
+ #include <stdio.h>
19
+ #include <string.h>
20
+ #include <ctype.h>
21
+ #include <stdlib.h>
22
+ #include <time.h>
23
+ #include <math.h>
24
+ #include "pogocache.h"
25
+
26
+ #define MINLOADFACTOR_RH 55 // 55%
27
+ #define MAXLOADFACTOR_RH 95 // 95%
28
+ #define DEFLOADFACTOR 75 // 75%
29
+ #define SHRINKAT 10 // 10%
30
+ #define DEFSHARDS 4096 // default number of shards
31
+ #define INITCAP 64 // intial number of buckets per shard
32
+
33
+ // #define DBGCHECKENTRY
34
+ // #define EVICTONITER
35
+ // #define HALFSECONDTIME
36
+ // #define NO48BITPTRS
37
+
38
+ #if INTPTR_MAX == INT64_MAX
39
+ #ifdef NO48BITPTRS
40
+ #define PTRSIZE 8
41
+ #else
42
+ #define PTRSIZE 6
43
+ #endif
44
+ #elif INTPTR_MAX == INT32_MAX
45
+ #define PTRSIZE 4
46
+ #else
47
+ #error Unknown pointer size
48
+ #endif
49
+
50
+ static struct pogocache_count_opts defcountopts = { 0 };
51
+ static struct pogocache_total_opts deftotalopts = { 0 };
52
+ static struct pogocache_size_opts defsizeopts = { 0 };
53
+ static struct pogocache_sweep_opts defsweepopts = { 0 };
54
+ static struct pogocache_clear_opts defclearopts = { 0 };
55
+ static struct pogocache_store_opts defstoreopts = { 0 };
56
+ static struct pogocache_load_opts defloadopts = { 0 };
57
+ static struct pogocache_delete_opts defdeleteopts = { 0 };
58
+ static struct pogocache_iter_opts defiteropts = { 0 };
59
+ static struct pogocache_sweep_poll_opts defsweeppollopts = { 0 };
60
+
61
+ static int64_t nanotime(struct timespec *ts) {
62
+ int64_t x = ts->tv_sec;
63
+ x *= 1000000000;
64
+ x += ts->tv_nsec;
65
+ return x;
66
+ }
67
+
68
+ // returns monotonic nanoseconds of the CPU clock.
69
+ static int64_t gettime(void) {
70
+ struct timespec now = { 0 };
71
+ #ifdef __linux__
72
+ clock_gettime(CLOCK_BOOTTIME, &now);
73
+ #elif defined(__APPLE__)
74
+ clock_gettime(CLOCK_UPTIME_RAW, &now);
75
+ #else
76
+ clock_gettime(CLOCK_MONOTONIC, &now);
77
+ #endif
78
+ return nanotime(&now);
79
+ }
80
+
81
+ // returns offset of system clock since first call in thread.
82
+ static int64_t getnow(void) {
83
+ return gettime();
84
+ }
85
+
86
+ // https://github.com/tidwall/th64
87
+ static uint64_t th64(const void *data, size_t len, uint64_t seed) {
88
+ uint8_t*p=(uint8_t*)data,*e=p+len;
89
+ uint64_t r=0x14020a57acced8b7,x,h=seed;
90
+ while(p+8<=e)memcpy(&x,p,8),x*=r,p+=8,x=x<<31|x>>33,h=h*r^x,h=h<<31|h>>33;
91
+ while(p<e)h=h*r^*(p++);
92
+ return(h=h*r+len,h^=h>>31,h*=r,h^=h>>31,h*=r,h^=h>>31,h*=r,h);
93
+ }
94
+
95
+ // Load a pointer from an unaligned memory.
96
+ static void *load_ptr(const uint8_t data[PTRSIZE]) {
97
+ #if PTRSIZE == 4
98
+ uint32_t uptr;
99
+ memcpy(&uptr, data, 4);
100
+ return (void*)(uintptr_t)uptr;
101
+ #elif PTRSIZE == 6
102
+ uint64_t uptr = 0;
103
+ uptr |= ((uint64_t)data[0])<<0;
104
+ uptr |= ((uint64_t)data[1])<<8;
105
+ uptr |= ((uint64_t)data[2])<<16;
106
+ uptr |= ((uint64_t)data[3])<<24;
107
+ uptr |= ((uint64_t)data[4])<<32;
108
+ uptr |= ((uint64_t)data[5])<<40;
109
+ return (void*)(uintptr_t)uptr;
110
+ #elif PTRSIZE == 8
111
+ uint64_t uptr;
112
+ memcpy(&uptr, data, 8);
113
+ return (void*)(uintptr_t)uptr;
114
+ #endif
115
+ }
116
+
117
+ // Store a pointer into unaligned memory.
118
+ static void store_ptr(uint8_t data[PTRSIZE], void *ptr) {
119
+ #if PTRSIZE == 4
120
+ uint32_t uptr = (uintptr_t)(void*)ptr;
121
+ memcpy(data, &uptr, 4);
122
+ #elif PTRSIZE == 6
123
+ uint64_t uptr = (uintptr_t)(void*)ptr;
124
+ data[0] = (uptr>>0)&0xFF;
125
+ data[1] = (uptr>>8)&0xFF;
126
+ data[2] = (uptr>>16)&0xFF;
127
+ data[3] = (uptr>>24)&0xFF;
128
+ data[4] = (uptr>>32)&0xFF;
129
+ data[5] = (uptr>>40)&0xFF;
130
+ #elif PTRSIZE == 8
131
+ uint64_t uptr = (uintptr_t)(void*)ptr;
132
+ memcpy(data, &uptr, 8);
133
+ #endif
134
+ }
135
+
136
+ // https://zimbry.blogspot.com/2011/09/better-bit-mixing-improving-on.html
137
+ static uint64_t mix13(uint64_t key) {
138
+ key ^= (key >> 30);
139
+ key *= UINT64_C(0xbf58476d1ce4e5b9);
140
+ key ^= (key >> 27);
141
+ key *= UINT64_C(0x94d049bb133111eb);
142
+ key ^= (key >> 31);
143
+ return key;
144
+ }
145
+
146
+ // Sixpack compression algorithm
147
+ // - Converts a simple 8-bit string into 6-bit string.
148
+ // - Intended to be used on small strings that only use characters commonly
149
+ // used for keys in KV data stores.
150
+ // - Allows the following 64 item character set:
151
+ // -.0123456789:ABCDEFGHIJKLMNOPRSTUVWXY_abcdefghijklmnopqrstuvwxy
152
+ // Note that the characters "QZz" are not included.
153
+ // - Sortable and comparable using memcmp.
154
+ static char tosix[256] = {
155
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0-15
156
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16-31
157
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, // 32-47
158
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0, 0, 0, 0, // 48-63
159
+ 0, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, // 64-79
160
+ 29, 0, 30, 31, 32, 33, 34, 35, 36, 37, 0, 0, 0, 0, 0, 38, // 80-95
161
+ 0, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, // 96-111
162
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0, 0, 0, 0, 0, 0, // 112-127
163
+ };
164
+
165
+ static char fromsix[] = {
166
+ 0, '-', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':',
167
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
168
+ 'O', 'P', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', '_', 'a', 'b', 'c',
169
+ 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
170
+ 'r', 's', 't', 'u', 'v', 'w', 'x', 'y'
171
+ };
172
+
173
+ // 0: [000000..] bitpos: 0
174
+ // 1: [00000011][1111....] bitpos: 6
175
+ // 2: [00000011][11112222][22......] bitpos: 12
176
+ // 3: [00000011][11112222][22333333] bitpos: 18
177
+
178
+ // Sixpack data
179
+ // Fills the data in dst and returns the number of bytes filled.
180
+ // Returns 0 if not a sixpackable.
181
+ // The dst array must be large enough to hold packed value
182
+ static int sixpack(const char *data, int len, char dst[]){
183
+ const unsigned char *bytes = (unsigned char*)data;
184
+ int j = 0;
185
+ for (int i = 0; i < len; i++) {
186
+ int k6v = tosix[bytes[i]];
187
+ if (k6v == 0) {
188
+ return 0;
189
+ }
190
+ if (i%4 == 0) {
191
+ dst[j++] = k6v<<2;
192
+ } else if (i%4 == 1) {
193
+ dst[j-1] |= k6v>>4;
194
+ dst[j++] = k6v<<4;
195
+ } else if (i%4 == 2) {
196
+ dst[j-1] |= k6v>>2;
197
+ dst[j++] = k6v<<6;
198
+ } else {
199
+ dst[j-1] |= k6v;
200
+ }
201
+ }
202
+ return j;
203
+ }
204
+
205
+ // (Un)sixpack data.
206
+ // Fills the data in dst and returns the len of original data.
207
+ // The data must be sixpacked and len must be > 0.
208
+ // The dst array must be large enough to hold unpacked value
209
+ static int unsixpack(const char *data, int len, char dst[]) {
210
+ const unsigned char *bytes = (unsigned char*)data;
211
+ int j = 0;
212
+ int k = 0;
213
+ for (int i = 0; i < len; i++) {
214
+ if (k == 0) {
215
+ dst[j++] = fromsix[bytes[i]>>2];
216
+ k++;
217
+ } else if (k == 1) {
218
+ dst[j++] = fromsix[((bytes[i-1]<<4)|(bytes[i]>>4))&63];
219
+ k++;
220
+ } else {
221
+ dst[j++] = fromsix[((bytes[i-1]<<2)|(bytes[i]>>6))&63];
222
+ dst[j++] = fromsix[bytes[i]&63];
223
+ k = 0;
224
+ }
225
+ }
226
+ if (j > 0 && dst[j-1] == 0) {
227
+ j--;
228
+ }
229
+ return j;
230
+ }
231
+
232
+ // Safely adds two int64_t values, clamping on overflow.
233
+ static int64_t int64_add_clamp(int64_t a, int64_t b) {
234
+ if (!((a ^ b) < 0)) { // Opposite signs can't overflow
235
+ if (a > 0) {
236
+ if (b > INT64_MAX - a) {
237
+ return INT64_MAX;
238
+ }
239
+ } else if (b < INT64_MIN - a) {
240
+ return INT64_MIN;
241
+ }
242
+ }
243
+ return a + b;
244
+ }
245
+
246
+ /// https://github.com/tidwall/varint.c
247
+ static int varint_write_u64(void *data, uint64_t x) {
248
+ uint8_t *bytes = data;
249
+ if (x < 128) {
250
+ *bytes = x;
251
+ return 1;
252
+ }
253
+ int n = 0;
254
+ do {
255
+ bytes[n++] = (uint8_t)x | 128;
256
+ x >>= 7;
257
+ } while (x >= 128);
258
+ bytes[n++] = (uint8_t)x;
259
+ return n;
260
+ }
261
+
262
+ static int varint_read_u64(const void *data, size_t len, uint64_t *x) {
263
+ const uint8_t *bytes = data;
264
+ if (len > 0 && bytes[0] < 128) {
265
+ *x = bytes[0];
266
+ return 1;
267
+ }
268
+ uint64_t b;
269
+ *x = 0;
270
+ size_t i = 0;
271
+ while (i < len && i < 10) {
272
+ b = bytes[i];
273
+ *x |= (b & 127) << (7 * i);
274
+ if (b < 128) {
275
+ return i + 1;
276
+ }
277
+ i++;
278
+ }
279
+ return i == 10 ? -1 : 0;
280
+ }
281
+
282
+ #ifdef HALFSECONDTIME
283
+ typedef uint32_t etime_t;
284
+ #else
285
+ typedef int64_t etime_t;
286
+ #endif
287
+
288
+
289
+ // Mostly a copy of the pogocache_opts, but used internally
290
+ // See the opts_to_ctx function for translation.
291
+ struct pgctx {
292
+ void *(*malloc)(size_t);
293
+ void (*free)(void*);
294
+ size_t (*malloc_size)(void*);
295
+ void (*yield)(void *udata);
296
+ void (*evicted)(int shard, int reason, int64_t time, const void *key,
297
+ size_t keylen, const void *val, size_t vallen, int64_t expires,
298
+ uint32_t flags, uint64_t cas, void *udata);
299
+ void *udata;
300
+ bool usecas;
301
+ bool nosixpack;
302
+ bool noevict;
303
+ bool allowshrink;
304
+ bool usethreadbatch;
305
+ int nshards;
306
+ double loadfactor;
307
+ double shrinkfactor;
308
+ uint64_t seed;
309
+ };
310
+
311
+ // The entry structure is a simple allocation with all the fields, being
312
+ // variable in size, slammed together contiguously. There's a one byte header
313
+ // that provides information about what is available in the structure.
314
+ // The format is: (header,time,expires?,flags?,cas?,key,value)
315
+ // The expires, flags, and cas fields are optional. The optionality depends on
316
+ // header bit flags.
317
+ struct entry;
318
+
319
+ // Returns the sizeof the entry struct, which takes up no space at all.
320
+ // This would be like doing a sizeof(struct entry), if entry had a structure.
321
+ static size_t entry_struct_size(void) {
322
+ return 0;
323
+ }
324
+
325
+ // Returns the data portion of the entry, which is the entire allocation.
326
+ static const uint8_t *entry_data(const struct entry *entry) {
327
+ return (uint8_t*)entry;
328
+ }
329
+
330
+ static int64_t entry_expires(const struct entry *entry) {
331
+ const uint8_t *p = entry_data(entry);
332
+ uint8_t hdr = *(p++); // hdr
333
+ p += sizeof(etime_t); // time
334
+ int64_t x = 0;
335
+ if ((hdr>>0)&1) {
336
+ memcpy(&x, p, 8);
337
+ }
338
+ return x;
339
+ }
340
+
341
+ static int64_t entry_time(struct entry *entry) {
342
+ const uint8_t *p = entry_data(entry);
343
+ etime_t etime;
344
+ memcpy(&etime, p+1, sizeof(etime_t));
345
+ #ifdef HALFSECONDTIME
346
+ int64_t time = (int64_t)etime * INT64_C(500000000);
347
+ #else
348
+ int64_t time = etime;
349
+ #endif
350
+ return time;
351
+ }
352
+
353
+ static void entry_settime(struct entry *entry, int64_t time) {
354
+ const uint8_t *p = entry_data(entry);
355
+ #ifdef HALFSECONDTIME
356
+ // Eviction time is stored as half seconds.
357
+ etime_t etime = time / INT64_C(500000000);
358
+ etime = etime > UINT32_MAX ? UINT32_MAX : etime;
359
+ #else
360
+ etime_t etime = time;
361
+ #endif
362
+ memcpy((uint8_t*)(p+1), &etime, sizeof(etime_t));
363
+ }
364
+
365
+ static int entry_alive_exp(int64_t expires, int64_t etime, int64_t now,
366
+ int64_t cleartime)
367
+ {
368
+ return etime < cleartime ? POGOCACHE_REASON_CLEARED :
369
+ expires > 0 && expires <= now ? POGOCACHE_REASON_EXPIRED :
370
+ 0;
371
+ }
372
+
373
+ static int entry_alive(struct entry *entry, int64_t now, int64_t cleartime) {
374
+ int64_t etime = entry_time(entry);
375
+ int64_t expires = entry_expires(entry);
376
+ return entry_alive_exp(expires, etime, now, cleartime);
377
+ }
378
+
379
+ static uint64_t entry_cas(const struct entry *entry) {
380
+ const uint8_t *p = entry_data(entry);
381
+ uint8_t hdr = *(p++); // hdr
382
+ p += sizeof(etime_t); // time
383
+ if ((hdr>>0)&1) {
384
+ p += 8; // expires
385
+ }
386
+ if ((hdr>>1)&1) {
387
+ p += 4; // flags
388
+ }
389
+ uint64_t x = 0;
390
+ if ((hdr>>2)&1) {
391
+ memcpy(&x, p, 8);
392
+ }
393
+ return x;
394
+ }
395
+
396
+ // returns the key. If using sixpack make sure to copy the result asap.
397
+ static const char *entry_key(const struct entry *entry, size_t *keylen_out,
398
+ char buf[128])
399
+ {
400
+ const uint8_t *p = entry_data(entry);
401
+ const uint8_t hdr = *(p++); // hdr
402
+ p += sizeof(etime_t); // time
403
+ if ((hdr>>0)&1) {
404
+ p += 8; // expires
405
+ }
406
+ if ((hdr>>1)&1) {
407
+ p += 4; // flags
408
+ }
409
+ if ((hdr>>2)&1) {
410
+ p += 8; // cas
411
+ }
412
+ uint64_t x;
413
+ p += varint_read_u64(p, 10, &x); // keylen
414
+ size_t keylen = x;
415
+ char *key = (char*)p;
416
+ if ((hdr>>3)&1) {
417
+ keylen = unsixpack(key, (int)keylen, buf);
418
+ key = buf;
419
+ }
420
+ *keylen_out = keylen;
421
+ return key;
422
+ }
423
+
424
+ // returns the raw key. sixpack will be returned in it's raw format
425
+ static const char *entry_rawkey(const struct entry *entry, size_t *keylen_out) {
426
+ const uint8_t *p = entry_data(entry);
427
+ const uint8_t hdr = *(p++); // hdr
428
+ p += sizeof(etime_t); // time
429
+ if ((hdr>>0)&1) {
430
+ p += 8; // expires
431
+ }
432
+ if ((hdr>>1)&1) {
433
+ p += 4; // flags
434
+ }
435
+ if ((hdr>>2)&1) {
436
+ p += 8; // cas
437
+ }
438
+ uint64_t x;
439
+ p += varint_read_u64(p, 10, &x); // keylen
440
+ size_t keylen = x;
441
+ char *key = (char*)p;
442
+ *keylen_out = keylen;
443
+ return key;
444
+ }
445
+
446
+ static bool entry_sixpacked(const struct entry *entry) {
447
+ const uint8_t *p = entry_data(entry);
448
+ uint8_t hdr = *(p);
449
+ return (hdr>>3)&1;
450
+ }
451
+
452
+ static size_t entry_extract(const struct entry *entry, const char **key,
453
+ size_t *keylen, char buf[128], const char **val, size_t *vallen,
454
+ int64_t *expires, uint32_t *flags, uint64_t *cas,
455
+ struct pgctx *ctx)
456
+ {
457
+ const uint8_t *p = entry_data(entry);
458
+ uint8_t hdr = *(p++); // hdr
459
+ p += sizeof(etime_t); // time
460
+ if ((hdr>>0)&1) {
461
+ if (expires) {
462
+ memcpy(expires, p, 8);
463
+ }
464
+ p += 8; // expires
465
+ } else {
466
+ if (expires) {
467
+ *expires = 0;
468
+ }
469
+ }
470
+ if ((hdr>>1)&1) {
471
+ if (flags) {
472
+ memcpy(flags, p, 4);
473
+ }
474
+ p += 4; // flags
475
+ } else {
476
+ if (flags) {
477
+ *flags = 0;
478
+ }
479
+ }
480
+ if (ctx->usecas) {
481
+ if (cas) {
482
+ memcpy(cas, p, 8);
483
+ }
484
+ p += 8; // cas
485
+ } else {
486
+ if (cas) {
487
+ *cas = 0;
488
+ }
489
+ }
490
+ uint64_t x;
491
+ p += varint_read_u64(p, 10, &x); // keylen
492
+ if (key) {
493
+ *key = (char*)p;
494
+ *keylen = x;
495
+ if ((hdr>>3)&1) {
496
+ *keylen = unsixpack(*key, (int)*keylen, buf);
497
+ *key = buf;
498
+ }
499
+ }
500
+ p += x; // key
501
+ p += varint_read_u64(p, 10, &x); // vallen
502
+ if (val) {
503
+ *val = (char*)p;
504
+ *vallen = x;
505
+ }
506
+ p += x; // val
507
+ return entry_struct_size()+(p-(uint8_t*)entry);
508
+ }
509
+
510
+ static size_t entry_memsize(const struct entry *entry,
511
+ struct pgctx *ctx)
512
+ {
513
+ const uint8_t *p = entry_data(entry);
514
+ uint8_t hdr = *(p++); // hdr
515
+ p += sizeof(etime_t); // time
516
+ if ((hdr>>0)&1) {
517
+ p += 8; // expires
518
+ }
519
+ if ((hdr>>1)&1) {
520
+ p += 4; // flags
521
+ }
522
+ if (ctx->usecas) {
523
+ p += 8; // cas
524
+ }
525
+ uint64_t x;
526
+ p += varint_read_u64(p, 10, &x); // keylen
527
+ p += x; // key
528
+ p += varint_read_u64(p, 10, &x); // vallen
529
+ p += x; // val
530
+ return entry_struct_size()+(p-(uint8_t*)entry);
531
+ }
532
+
533
+ // The 'cas' param should always be set to zero unless loading from disk.
534
+ // Setting to zero will set a new unique cas to the entry.
535
+ static struct entry *entry_new(const char *key, size_t keylen, const char *val,
536
+ size_t vallen, int64_t expires, uint32_t flags, uint64_t cas,
537
+ struct pgctx *ctx)
538
+ {
539
+ bool usesixpack = !ctx->nosixpack;
540
+ #ifdef DBGCHECKENTRY
541
+ // printf("entry_new(key=[%.*s], keylen=%zu, val=[%.*s], vallen=%zu, "
542
+ // "expires=%" PRId64 ", flags=%" PRId32 ", cas=%" PRIu64 ", "
543
+ // "usesixpack=%d\n", (int)keylen, key, keylen, (int)vallen, key, vallen,
544
+ // expires, flags, cas, usesixpack);
545
+ int64_t oexpires = expires;
546
+ uint32_t oflags = flags;
547
+ uint64_t ocas = cas;
548
+ const char *okey = key;
549
+ size_t okeylen = keylen;
550
+ const char *oval = val;
551
+ size_t ovallen = vallen;
552
+ #endif
553
+ uint8_t hdr = 0;
554
+ uint8_t keylenbuf[10];
555
+ uint8_t vallenbuf[10];
556
+ int nexplen, nflagslen, ncaslen, nkeylen, nvallen;
557
+ if (expires > 0) {
558
+ hdr |= 1;
559
+ nexplen = 8;
560
+ } else {
561
+ nexplen = 0;
562
+ }
563
+ if (flags > 0) {
564
+ hdr |= 2;
565
+ nflagslen = 4;
566
+ } else {
567
+ nflagslen = 0;
568
+ }
569
+ if (ctx->usecas) {
570
+ hdr |= 4;
571
+ ncaslen = 8;
572
+ } else {
573
+ ncaslen = 0;
574
+ }
575
+ char buf[128];
576
+ if (usesixpack && keylen <= 128) {
577
+ size_t len = sixpack(key, keylen, buf);
578
+ if (len > 0) {
579
+ hdr |= 8;
580
+ keylen = len;
581
+ key = buf;
582
+ }
583
+ }
584
+ nkeylen = varint_write_u64(keylenbuf, keylen);
585
+ nvallen = varint_write_u64(vallenbuf, vallen);
586
+ struct entry *entry_out = 0;
587
+ size_t size = entry_struct_size()+1+sizeof(etime_t)+nexplen+nflagslen+
588
+ ncaslen+nkeylen+keylen+nvallen+vallen;
589
+ // printf("malloc=%p size=%zu, ctx=%p\n", ctx->malloc, size, ctx);
590
+ void *mem = ctx->malloc(size);
591
+ struct entry *entry = mem;
592
+ if (!entry) {
593
+ return 0;
594
+ }
595
+ uint8_t *p = (void*)entry_data(entry);
596
+ *(p++) = hdr;
597
+ memset(p, 0, sizeof(etime_t));
598
+ p += sizeof(etime_t); // time
599
+ if (nexplen > 0) {
600
+ memcpy(p, &expires, nexplen);
601
+ p += nexplen;
602
+ }
603
+ if (nflagslen > 0) {
604
+ memcpy(p, &flags, nflagslen);
605
+ p += nflagslen;
606
+ }
607
+ if (ncaslen > 0) {
608
+ memcpy(p, &cas, ncaslen);
609
+ p += ncaslen;
610
+ }
611
+ memcpy(p, keylenbuf, nkeylen);
612
+ p += nkeylen;
613
+ memcpy(p, key, keylen);
614
+ p += keylen;
615
+ memcpy(p, vallenbuf, nvallen);
616
+ p += nvallen;
617
+ memcpy(p, val, vallen);
618
+ p += vallen;
619
+ entry_out = entry;
620
+ #ifdef DBGCHECKENTRY
621
+ // check the key
622
+ const char *key2, *val2;
623
+ size_t keylen2, vallen2;
624
+ int64_t expires2;
625
+ uint32_t flags2;
626
+ uint64_t cas2;
627
+ char buf1[256];
628
+ entry_extract(entry_out, &key2, &keylen2, buf1, &val2, &vallen2, &expires2,
629
+ &flags2, &cas2, ctx);
630
+ assert(expires2 == oexpires);
631
+ assert(flags2 == oflags);
632
+ assert(cas2 == ocas);
633
+ assert(keylen2 == okeylen);
634
+ assert(memcmp(key2, okey, okeylen) == 0);
635
+ assert(vallen2 == ovallen);
636
+ assert(memcmp(val2, oval, ovallen) == 0);
637
+ #endif
638
+ return entry_out;
639
+ }
640
+
641
+ static void entry_free(struct entry *entry, struct pgctx *ctx) {
642
+ ctx->free(entry);
643
+ }
644
+
645
+ static int entry_compare(const struct entry *a, const struct entry *b) {
646
+ size_t akeylen, bkeylen;
647
+ char buf1[256], buf2[256];
648
+ const char *akey;
649
+ const char *bkey;
650
+ if (entry_sixpacked(a) == entry_sixpacked(b)) {
651
+ akey = entry_rawkey(a, &akeylen);
652
+ bkey = entry_rawkey(b, &bkeylen);
653
+ } else {
654
+ akey = entry_key(a, &akeylen, buf1);
655
+ bkey = entry_key(b, &bkeylen, buf2);
656
+ }
657
+ size_t size = akeylen < bkeylen ? akeylen : bkeylen;
658
+ int cmp = memcmp(akey, bkey, size);
659
+ if (cmp == 0) {
660
+ cmp = akeylen < bkeylen ? -1 : akeylen > bkeylen;
661
+ }
662
+ return cmp;
663
+ }
664
+
665
+ #ifndef HASHSIZE
666
+ #define HASHSIZE 3
667
+ #endif
668
+ #if HASHSIZE < 1 || HASHSIZE > 4
669
+ #error bad hash size
670
+ #endif
671
+
672
+ struct bucket {
673
+ uint8_t entry[PTRSIZE]; // 48-bit pointer
674
+ uint8_t hash[HASHSIZE]; // 24-bit hash
675
+ uint8_t dib; // distance to bucket
676
+ };
677
+
678
+ static_assert(sizeof(struct bucket) == PTRSIZE+HASHSIZE+1, "bad bucket size");
679
+
680
+ struct map {
681
+ int cap; // initial capacity
682
+ int nbuckets; // number of buckets
683
+ int count; // current entry count
684
+ int mask; // bit mask for
685
+ int growat;
686
+ int shrinkat;
687
+ struct bucket *buckets;
688
+ uint64_t total; // current entry count
689
+ size_t entsize; // memory size of all entries
690
+
691
+ };
692
+
693
+ struct shard {
694
+ atomic_uintptr_t lock; // spinlock (batch pointer)
695
+ uint64_t cas; // compare and store value
696
+ int64_t cleartime; // last clear time
697
+ int clearcount; // number of items cleared
698
+ struct map map; // robinhood hashmap
699
+ // for batch linked list only
700
+ struct shard *next;
701
+ };
702
+
703
+ static void lock_init(struct shard *shard) {
704
+ atomic_init(&shard->lock, 0);
705
+ }
706
+
707
+ struct batch {
708
+ struct pogocache *cache; // associated cache.
709
+ struct shard *shard; // first locked shard
710
+ int64_t time; // timestamp
711
+ };
712
+
713
+ struct pogocache {
714
+ bool isbatch;
715
+ union {
716
+ struct pgctx ctx;
717
+ struct batch batch;
718
+ };
719
+ struct shard shards[];
720
+ };
721
+
722
+ static struct entry *get_entry(struct bucket *bucket) {
723
+ return load_ptr(bucket->entry);
724
+ }
725
+
726
+ static void set_entry(struct bucket *bucket, struct entry *entry) {
727
+ store_ptr(bucket->entry, entry);
728
+ }
729
+
730
+ #if HASHSIZE == 1
731
+ static uint32_t clip_hash(uint32_t hash) {
732
+ return hash&0xFF;
733
+ }
734
+ static void write_hash(uint8_t data[1], uint32_t hash) {
735
+ data[0] = (hash>>0)&0xFF;
736
+ }
737
+
738
+ static uint32_t read_hash(uint8_t data[1]) {
739
+ uint32_t hash = 0;
740
+ hash |= ((uint64_t)data[0])<<0;
741
+ return hash;
742
+ }
743
+ #elif HASHSIZE == 2
744
+ static uint32_t clip_hash(uint32_t hash) {
745
+ return hash&0xFFFF;
746
+ }
747
+ static void write_hash(uint8_t data[2], uint32_t hash) {
748
+ data[0] = (hash>>0)&0xFF;
749
+ data[1] = (hash>>8)&0xFF;
750
+ }
751
+
752
+ static uint32_t read_hash(uint8_t data[2]) {
753
+ uint32_t hash = 0;
754
+ hash |= ((uint64_t)data[0])<<0;
755
+ hash |= ((uint64_t)data[1])<<8;
756
+ return hash;
757
+ }
758
+ #elif HASHSIZE == 3
759
+ static uint32_t clip_hash(uint32_t hash) {
760
+ return hash&0xFFFFFF;
761
+ }
762
+ static void write_hash(uint8_t data[3], uint32_t hash) {
763
+ data[0] = (hash>>0)&0xFF;
764
+ data[1] = (hash>>8)&0xFF;
765
+ data[2] = (hash>>16)&0xFF;
766
+ }
767
+
768
+ static uint32_t read_hash(uint8_t data[3]) {
769
+ uint32_t hash = 0;
770
+ hash |= ((uint64_t)data[0])<<0;
771
+ hash |= ((uint64_t)data[1])<<8;
772
+ hash |= ((uint64_t)data[2])<<16;
773
+ return hash;
774
+ }
775
+ #else
776
+ static uint32_t clip_hash(uint32_t hash) {
777
+ return hash;
778
+ }
779
+ static void write_hash(uint8_t data[4], uint32_t hash) {
780
+ data[0] = (hash>>0)&0xFF;
781
+ data[1] = (hash>>8)&0xFF;
782
+ data[2] = (hash>>16)&0xFF;
783
+ data[3] = (hash>>24)&0xFF;
784
+ }
785
+
786
+ static uint32_t read_hash(uint8_t data[4]) {
787
+ uint32_t hash = 0;
788
+ hash |= ((uint64_t)data[0])<<0;
789
+ hash |= ((uint64_t)data[1])<<8;
790
+ hash |= ((uint64_t)data[2])<<16;
791
+ hash |= ((uint64_t)data[3])<<24;
792
+ return hash;
793
+ }
794
+ #endif
795
+
796
+ static uint32_t get_hash(struct bucket *bucket) {
797
+ return read_hash(bucket->hash);
798
+ }
799
+
800
+ static void set_hash(struct bucket *bucket, uint32_t hash) {
801
+ write_hash(bucket->hash, hash);
802
+ }
803
+
804
+ static uint8_t get_dib(struct bucket *bucket) {
805
+ return bucket->dib;
806
+ }
807
+
808
+ static void set_dib(struct bucket *bucket, uint8_t dib) {
809
+ bucket->dib = dib;
810
+ }
811
+
812
+ static bool map_init(struct map *map, size_t cap, struct pgctx *ctx) {
813
+ map->cap = cap;
814
+ map->nbuckets = cap;
815
+ map->count = 0;
816
+ map->mask = map->nbuckets-1;
817
+ map->growat = map->nbuckets * ctx->loadfactor;
818
+ map->shrinkat = map->nbuckets * ctx->shrinkfactor;
819
+ size_t size = sizeof(struct bucket)*map->nbuckets;
820
+ map->buckets = ctx->malloc(size);
821
+ if (!map->buckets) {
822
+ // nomem
823
+ memset(map, 0, sizeof(struct map));
824
+ return false;
825
+ }
826
+ memset(map->buckets, 0, size);
827
+ return true;
828
+ }
829
+
830
+ static bool resize(struct map *map, size_t new_cap, struct pgctx *ctx) {
831
+ struct map map2;
832
+ if (!map_init(&map2, new_cap, ctx)) {
833
+ return false;
834
+ }
835
+ for (int i = 0; i < map->nbuckets; i++) {
836
+ struct bucket ebkt = map->buckets[i];
837
+ if (get_dib(&ebkt)) {
838
+ set_dib(&ebkt, 1);
839
+ size_t j = get_hash(&ebkt) & map2.mask;
840
+ while (1) {
841
+ if (get_dib(&map2.buckets[j]) == 0) {
842
+ map2.buckets[j] = ebkt;
843
+ break;
844
+ }
845
+ if (get_dib(&map2.buckets[j]) < get_dib(&ebkt)) {
846
+ struct bucket tmp = map2.buckets[j];
847
+ map2.buckets[j] = ebkt;
848
+ ebkt = tmp;
849
+ }
850
+ j = (j + 1) & map2.mask;
851
+ set_dib(&ebkt, get_dib(&ebkt)+1);
852
+ }
853
+ }
854
+ }
855
+ int org_cap = map->cap;
856
+ int org_count = map->count;
857
+ ctx->free(map->buckets);
858
+ memcpy(map, &map2, sizeof(struct map));
859
+ map->cap = org_cap;
860
+ map->count = org_count;
861
+ return true;
862
+ }
863
+
864
+ static bool map_insert(struct map *map, struct entry *entry, uint32_t hash,
865
+ struct entry **old, struct pgctx *ctx)
866
+ {
867
+ hash = clip_hash(hash);
868
+ if (map->count >= map->growat) {
869
+ if (!resize(map, map->nbuckets*2, ctx)) {
870
+ *old = 0;
871
+ return false;
872
+ }
873
+ }
874
+ map->entsize += entry_memsize(entry, ctx);
875
+ struct bucket ebkt;
876
+ set_entry(&ebkt, entry);
877
+ set_hash(&ebkt, hash);
878
+ set_dib(&ebkt, 1);
879
+ size_t i = hash & map->mask;
880
+ while (1) {
881
+ if (get_dib(&map->buckets[i]) == 0) {
882
+ // new entry
883
+ map->buckets[i] = ebkt;
884
+ map->count++;
885
+ map->total++;
886
+ *old = 0;
887
+ return true;
888
+ }
889
+ if (get_hash(&ebkt) == get_hash(&map->buckets[i]) &&
890
+ entry_compare(get_entry(&ebkt), get_entry(&map->buckets[i])) == 0)
891
+ {
892
+ // replaced
893
+ *old = get_entry(&map->buckets[i]);
894
+ map->entsize -= entry_memsize(*old, ctx);
895
+ set_entry(&map->buckets[i], get_entry(&ebkt));
896
+ return true;
897
+ }
898
+ if (get_dib(&map->buckets[i]) < get_dib(&ebkt)) {
899
+ struct bucket tmp = map->buckets[i];
900
+ map->buckets[i] = ebkt;
901
+ ebkt = tmp;
902
+ }
903
+ i = (i + 1) & map->mask;
904
+ set_dib(&ebkt, get_dib(&ebkt)+1);
905
+ }
906
+ }
907
+
908
+ static bool bucket_eq(struct map *map, size_t i, const char *key,
909
+ size_t keylen, uint32_t hash)
910
+ {
911
+ if (get_hash(&map->buckets[i]) != hash) {
912
+ return false;
913
+ }
914
+ size_t keylen2;
915
+ char buf[128];
916
+ const char *key2 = entry_key(get_entry(&map->buckets[i]), &keylen2, buf);
917
+ return keylen == keylen2 && memcmp(key, key2, keylen) == 0;
918
+ }
919
+
920
+ // Returns the bucket index for key, or -1 if not found.
921
+ static int map_get_bucket(struct map *map, const char *key, size_t keylen,
922
+ uint32_t hash)
923
+ {
924
+ hash = clip_hash(hash);
925
+ size_t i = hash & map->mask;
926
+ while (1) {
927
+ struct bucket *bkt = &map->buckets[i];
928
+ if (get_dib(bkt) == 0) {
929
+ return -1;
930
+ }
931
+ if (bucket_eq(map, i, key, keylen, hash)) {
932
+ return i;
933
+ }
934
+ i = (i + 1) & map->mask;
935
+ }
936
+ }
937
+
938
+ static struct entry *map_get_entry(struct map *map, const char *key,
939
+ size_t keylen, uint32_t hash, int *bkt_idx_out)
940
+ {
941
+ int i = map_get_bucket(map, key, keylen, hash);
942
+ *bkt_idx_out = i;
943
+ return i >= 0 ? get_entry(&map->buckets[i]) : 0;
944
+ }
945
+
946
+ // This deletes entry from bucket and adjusts the dibs buckets to right, if
947
+ // needed.
948
+ static void delbkt(struct map *map, size_t i) {
949
+ set_dib(&map->buckets[i], 0);
950
+ while (1) {
951
+ size_t h = i;
952
+ i = (i + 1) & map->mask;
953
+ if (get_dib(&map->buckets[i]) <= 1) {
954
+ set_dib(&map->buckets[h], 0);
955
+ break;
956
+ }
957
+ map->buckets[h] = map->buckets[i];
958
+ set_dib(&map->buckets[h], get_dib(&map->buckets[h])-1);
959
+ }
960
+ map->count--;
961
+ }
962
+
963
+ static bool needsshrink(struct map *map, struct pgctx *ctx) {
964
+ return ctx->allowshrink && map->nbuckets > map->cap &&
965
+ map->count <= map->shrinkat;
966
+ }
967
+
968
+ // Try to shrink the hashmap. If needed, this will allocate a new hashmap that
969
+ // has fewer buckets and move all existing entries into the smaller map.
970
+ // The 'multi' param is a hint that multi entries may have been deleted, such
971
+ // as with the iter or clear operations.
972
+ // If the resize fails due to an allocation error then the existing hashmap
973
+ // will be retained.
974
+ static void tryshrink(struct map *map, bool multi, struct pgctx *ctx) {
975
+ if (!needsshrink(map, ctx)) {
976
+ return;
977
+ }
978
+ int cap;
979
+ if (multi) {
980
+ // Determine how many buckets are needed to store all entries.
981
+ cap = map->cap;
982
+ int growat = cap * ctx->loadfactor;
983
+ while (map->count >= growat) {
984
+ cap *= 2;
985
+ growat = cap * ctx->loadfactor;
986
+ }
987
+ } else {
988
+ // Just half the buckets
989
+ cap = map->nbuckets / 2;
990
+ }
991
+ resize(map, cap, ctx);
992
+ }
993
+
994
+ // delete an entry at bucket position. not called directly
995
+ static struct entry *delentry_at_bkt(struct map *map, size_t i,
996
+ struct pgctx *ctx)
997
+ {
998
+ struct entry *old = get_entry(&map->buckets[i]);
999
+ assert(old);
1000
+ map->entsize -= entry_memsize(old, ctx);
1001
+ delbkt(map, i);
1002
+ return old;
1003
+ }
1004
+
1005
+ static struct entry *map_delete(struct map *map, const char *key,
1006
+ size_t keylen, uint32_t hash, struct pgctx *ctx)
1007
+ {
1008
+ hash = clip_hash(hash);
1009
+ int i = hash & map->mask;
1010
+ while (1) {
1011
+ if (get_dib(&map->buckets[i]) == 0) {
1012
+ return 0;
1013
+ }
1014
+ if (bucket_eq(map, i, key, keylen, hash)) {
1015
+ return delentry_at_bkt(map, i, ctx);
1016
+ }
1017
+ i = (i + 1) & map->mask;
1018
+ }
1019
+ }
1020
+
1021
+ static size_t evict_entry(struct shard *shard, int shardidx,
1022
+ struct entry *entry, int64_t now, int reason, struct pgctx *ctx)
1023
+ {
1024
+ char buf[128];
1025
+ size_t keylen;
1026
+ const char *key = entry_key(entry, &keylen, buf);
1027
+ uint32_t hash = th64(key, keylen, ctx->seed);
1028
+ struct entry *del = map_delete(&shard->map, key, keylen, hash, ctx);
1029
+ assert(del == entry); (void)del;
1030
+ if (ctx->evicted) {
1031
+ // Notify user that an entry was evicted.
1032
+ const char *val;
1033
+ size_t vallen;
1034
+ int64_t expires = 0;
1035
+ uint32_t flags = 0;
1036
+ uint64_t cas = 0;
1037
+ entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,
1038
+ ctx);
1039
+ ctx->evicted(shardidx, reason, now, key, keylen, val,
1040
+ vallen, expires, flags, cas, ctx->udata);
1041
+ }
1042
+ shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);
1043
+ size_t size = entry_memsize(entry, ctx);
1044
+ entry_free(entry, ctx);
1045
+ return size;
1046
+ }
1047
+
1048
+ // evict an entry using the 2-random algorithm.
1049
+ // Pick two random entries and delete the one with the oldest access time.
1050
+ // Do not evict the entry if it matches the provided hash.
1051
+ static void auto_evict_entry(struct shard *shard, int shardidx, uint32_t hash,
1052
+ int64_t now, struct pgctx *ctx)
1053
+ {
1054
+ hash = clip_hash(hash);
1055
+ struct map *map = &shard->map;
1056
+ struct entry *entries[2];
1057
+ int count = 0;
1058
+ for (int i = 1; i < map->nbuckets && count < 2; i++) {
1059
+ size_t j = (i+hash)&(map->nbuckets-1);
1060
+ struct bucket *bkt = &map->buckets[j];
1061
+ if (get_dib(bkt) == 0) {
1062
+ continue;
1063
+ }
1064
+ struct entry *entry = get_entry(bkt);
1065
+ int reason = entry_alive(entry, now, shard->cleartime);
1066
+ if (reason) {
1067
+ // Entry has expired. Evict this one instead.
1068
+ evict_entry(shard, shardidx, entry, now, reason, ctx);
1069
+ return;
1070
+ }
1071
+ if (get_hash(bkt) == hash) {
1072
+ continue;
1073
+ }
1074
+ entries[count++] = entry;
1075
+ }
1076
+ int choose;
1077
+ if (count == 1) {
1078
+ choose = 0;
1079
+ } else if (count == 2) {
1080
+ // We now have two candidates.
1081
+ if (entry_time(entries[0]) < entry_time(entries[1])) {
1082
+ choose = 0;
1083
+ } else {
1084
+ choose = 1;
1085
+ }
1086
+ } else {
1087
+ return;
1088
+ }
1089
+ evict_entry(shard, shardidx, entries[choose], now, POGOCACHE_REASON_LOWMEM,
1090
+ ctx);
1091
+ }
1092
+
1093
+ static void shard_deinit(struct shard *shard, struct pgctx *ctx) {
1094
+ struct map *map = &shard->map;
1095
+ if (!map->buckets) {
1096
+ return;
1097
+ }
1098
+ for (int i = 0; i < map->nbuckets; i++) {
1099
+ struct bucket *bkt = &map->buckets[i];
1100
+ if (get_dib(bkt) == 0) {
1101
+ continue;
1102
+ }
1103
+ struct entry *entry = get_entry(bkt);
1104
+ entry_free(entry, ctx);
1105
+ }
1106
+ ctx->free(map->buckets);
1107
+ }
1108
+
1109
+ static bool shard_init(struct shard *shard, struct pgctx *ctx) {
1110
+ memset(shard, 0, sizeof(struct shard));
1111
+ lock_init(shard);
1112
+ if (!map_init(&shard->map, INITCAP, ctx)) {
1113
+ // nomem
1114
+ shard_deinit(shard, ctx);
1115
+ return false;
1116
+ }
1117
+ return true;
1118
+ }
1119
+
1120
+ /// Free all cache and shard hashmap allocations.
1121
+ /// This does not access the value data in any of the entries. If it is needed
1122
+ /// for the further cleanup at an entry value level, then use the
1123
+ /// pogocache_iter to perform the cleanup on each entry before calling this
1124
+ /// operation.
1125
+ /// Also this is not threadsafe. Make sure that other threads are not
1126
+ /// currently using the cache concurrently nor after this function is called.
1127
+ void pogocache_free(struct pogocache *cache) {
1128
+ if (!cache) {
1129
+ return;
1130
+ }
1131
+ struct pgctx *ctx = &cache->ctx;
1132
+ for (int i = 0; i < cache->ctx.nshards; i++) {
1133
+ shard_deinit(&cache->shards[i], ctx);
1134
+ }
1135
+ cache->ctx.free(cache);
1136
+ }
1137
+
1138
+ static void opts_to_ctx(int nshards, struct pogocache_opts *opts,
1139
+ struct pgctx *ctx)
1140
+ {
1141
+ ctx->nshards = nshards;
1142
+ int loadfactor = 0;
1143
+ if (opts) {
1144
+ ctx->yield = opts->yield;
1145
+ ctx->evicted = opts->evicted;
1146
+ ctx->udata = opts->udata;
1147
+ ctx->usecas = opts->usecas;
1148
+ ctx->nosixpack = opts->nosixpack;
1149
+ ctx->noevict = opts->noevict;
1150
+ ctx->seed = opts->seed;
1151
+ loadfactor = opts->loadfactor;
1152
+ ctx->allowshrink = opts->allowshrink;
1153
+ ctx->usethreadbatch = opts->usethreadbatch;
1154
+ }
1155
+ // make loadfactor a floating point
1156
+ loadfactor = loadfactor == 0 ? DEFLOADFACTOR :
1157
+ loadfactor < MINLOADFACTOR_RH ? MINLOADFACTOR_RH :
1158
+ loadfactor > MAXLOADFACTOR_RH ? MAXLOADFACTOR_RH :
1159
+ loadfactor;
1160
+ ctx->loadfactor = ((double)loadfactor/100.0);
1161
+ ctx->shrinkfactor = ((double)SHRINKAT/100.0);
1162
+ }
1163
+
1164
+ static struct pogocache_opts newdefopts = { 0 };
1165
+
1166
+ /// Returns a new cache or null if there is not enough memory available.
1167
+ /// See 'pogocache_opts' for all options.
1168
+ struct pogocache *pogocache_new(struct pogocache_opts *opts) {
1169
+ if (!opts) {
1170
+ opts = &newdefopts;
1171
+ }
1172
+ void *(*_malloc)(size_t) = opts->malloc ? opts->malloc : malloc;
1173
+ void (*_free)(void*) = opts->free ? opts->free : free;
1174
+ int shards = !opts || opts->nshards <= 0 ? DEFSHARDS : opts->nshards;
1175
+ size_t size = sizeof(struct pogocache)+shards*sizeof(struct shard);
1176
+ struct pogocache *cache = _malloc(size);
1177
+ if (!cache) {
1178
+ return 0;
1179
+ }
1180
+ memset(cache, 0, sizeof(struct pogocache));
1181
+ struct pgctx *ctx = &cache->ctx;
1182
+ opts_to_ctx(shards, opts, ctx);
1183
+ ctx->malloc = _malloc;
1184
+ ctx->free = _free;
1185
+ for (int i = 0; i < ctx->nshards; i++) {
1186
+ if (!shard_init(&cache->shards[i], ctx)) {
1187
+ // nomem
1188
+ pogocache_free(cache);
1189
+ return 0;
1190
+ }
1191
+ }
1192
+ return cache;
1193
+ }
1194
+
1195
+ static int shard_index(struct pogocache *cache, uint64_t hash) {
1196
+ return (hash>>32)%cache->ctx.nshards;
1197
+ }
1198
+
1199
+ static struct shard *shard_get(struct pogocache *cache, int index) {
1200
+ return &cache->shards[index];
1201
+ }
1202
+
1203
+ /// Returns a timestamp.
1204
+ int64_t pogocache_now(void) {
1205
+ return getnow();
1206
+ }
1207
+
1208
+ static __thread struct pogocache thbatch;
1209
+
1210
+ struct pogocache *pogocache_begin(struct pogocache *cache) {
1211
+ struct pogocache *batch;
1212
+ if (cache->ctx.usethreadbatch) {
1213
+ batch = &thbatch;
1214
+ } else {
1215
+ batch = cache->ctx.malloc(sizeof(struct pogocache));
1216
+ if (!batch) {
1217
+ return 0;
1218
+ }
1219
+ }
1220
+ batch->isbatch = true;
1221
+ batch->batch.cache = cache;
1222
+ batch->batch.shard = 0;
1223
+ batch->batch.time = 0;
1224
+ return batch;
1225
+ }
1226
+
1227
+ void pogocache_end(struct pogocache *batch) {
1228
+ assert(batch->isbatch);
1229
+ struct shard *shard = batch->batch.shard;
1230
+ while (shard) {
1231
+ struct shard *next = shard->next;
1232
+ shard->next = 0;
1233
+ atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE);
1234
+ shard = next;
1235
+ }
1236
+ if (!batch->batch.cache->ctx.usethreadbatch) {
1237
+ batch->batch.cache->ctx.free(batch);
1238
+ }
1239
+ }
1240
+
1241
+ static void lock(struct batch *batch, struct shard *shard, struct pgctx *ctx) {
1242
+ if (batch) {
1243
+ while (1) {
1244
+ uintptr_t val = 0;
1245
+ if (atomic_compare_exchange_weak_explicit(&shard->lock, &val,
1246
+ (uintptr_t)(void*)batch, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
1247
+ {
1248
+ shard->next = batch->shard;
1249
+ batch->shard = shard;
1250
+ break;
1251
+ }
1252
+ if (val == (uintptr_t)(void*)batch) {
1253
+ break;
1254
+ }
1255
+ if (ctx->yield) {
1256
+ ctx->yield(ctx->udata);
1257
+ }
1258
+ }
1259
+ } else {
1260
+ while (1) {
1261
+ uintptr_t val = 0;
1262
+ if (atomic_compare_exchange_weak_explicit(&shard->lock, &val,
1263
+ UINTPTR_MAX, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
1264
+ {
1265
+ break;
1266
+ }
1267
+ if (ctx->yield) {
1268
+ ctx->yield(ctx->udata);
1269
+ }
1270
+ }
1271
+ }
1272
+ }
1273
+
1274
+ static bool acquire_for_scan(int shardidx, struct shard **shard_out,
1275
+ struct pogocache **cache_inout)
1276
+ {
1277
+ struct pogocache *cache = *cache_inout;
1278
+ struct batch *batch = 0;
1279
+ if (cache->isbatch) {
1280
+ // use batch
1281
+ batch = &cache->batch;
1282
+ cache = batch->cache;
1283
+ }
1284
+ struct pgctx *ctx = &cache->ctx;
1285
+ struct shard *shard = shard_get(cache, shardidx);
1286
+ lock(batch, shard, ctx);
1287
+ *shard_out = shard;
1288
+ *cache_inout = cache;
1289
+ return batch != 0;
1290
+ }
1291
+
1292
+ // acquire a lock for the key
1293
+ static bool acquire_for_key(const char *key, size_t keylen, uint32_t *hash_out,
1294
+ struct shard **shard_out, int *shardidx_out, struct pogocache **cache_inout)
1295
+ {
1296
+ struct pogocache *cache = *cache_inout;
1297
+ struct batch *batch = 0;
1298
+ if (cache->isbatch) {
1299
+ // use batch
1300
+ batch = &cache->batch;
1301
+ cache = batch->cache;
1302
+ }
1303
+ struct pgctx *ctx = &cache->ctx;
1304
+ uint64_t fhash = th64(key, keylen, cache->ctx.seed);
1305
+ int shardidx = shard_index(cache, fhash);
1306
+ struct shard *shard = shard_get(cache, shardidx);
1307
+ lock(batch, shard, ctx);
1308
+ *hash_out = fhash;
1309
+ *shard_out = shard;
1310
+ *shardidx_out = shardidx;
1311
+ *cache_inout = cache;
1312
+ return batch != 0;
1313
+ }
1314
+
1315
+ // Acquire a lock on the shard for key and execute the provided operation.
1316
+ #define ACQUIRE_FOR_KEY_AND_EXECUTE(rettype, key, keylen, op) ({ \
1317
+ int shardidx; \
1318
+ uint32_t hash; \
1319
+ struct shard *shard; \
1320
+ bool usebatch = acquire_for_key((key), (keylen), &hash, &shard, &shardidx, \
1321
+ &cache); \
1322
+ struct pgctx *ctx = &cache->ctx; \
1323
+ (void)shardidx, (void)hash, (void)ctx; \
1324
+ rettype status = op; \
1325
+ if (!usebatch) { \
1326
+ atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \
1327
+ } \
1328
+ status; \
1329
+ })
1330
+
1331
+ // Acquire a lock on the shard at index and execute the provided operation.
1332
+ #define ACQUIRE_FOR_SCAN_AND_EXECUTE(rettype, shardidx, op) ({ \
1333
+ struct shard *shard; \
1334
+ bool usebatch = acquire_for_scan((shardidx), &shard, &cache); \
1335
+ struct pgctx *ctx = &cache->ctx; \
1336
+ (void)ctx; \
1337
+ rettype status = op; \
1338
+ if (!usebatch) { \
1339
+ atomic_store_explicit(&shard->lock, 0, __ATOMIC_RELEASE); \
1340
+ } \
1341
+ status; \
1342
+ })
1343
+
1344
+ static int loadop(const void *key, size_t keylen,
1345
+ struct pogocache_load_opts *opts, struct shard *shard, int shardidx,
1346
+ uint32_t hash, struct pgctx *ctx)
1347
+ {
1348
+ opts = opts ? opts : &defloadopts;
1349
+ int64_t now = opts->time > 0 ? opts->time : getnow();
1350
+ // Get the entry bucket index for the entry with key.
1351
+ int bidx = map_get_bucket(&shard->map, key, keylen, hash);
1352
+ if (bidx == -1) {
1353
+ return POGOCACHE_NOTFOUND;
1354
+ }
1355
+ // Extract the bucket, entry, and values.
1356
+ struct bucket *bkt = &shard->map.buckets[bidx];
1357
+ struct entry *entry = get_entry(bkt);
1358
+ const char *val;
1359
+ size_t vallen;
1360
+ int64_t expires;
1361
+ uint32_t flags;
1362
+ uint64_t cas;
1363
+ entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas, ctx);
1364
+ int reason = entry_alive(entry, now, shard->cleartime);
1365
+ if (reason) {
1366
+ // Entry is no longer alive. Evict the entry and clear the bucket.
1367
+ if (ctx->evicted) {
1368
+ ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,
1369
+ expires, flags, cas, ctx->udata);
1370
+ }
1371
+ shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);
1372
+ entry_free(entry, ctx);
1373
+ delbkt(&shard->map, bidx);
1374
+ return POGOCACHE_NOTFOUND;
1375
+ }
1376
+ if (!opts->notouch) {
1377
+ entry_settime(entry, now);
1378
+ }
1379
+ if (opts->entry) {
1380
+ struct pogocache_update *update = 0;
1381
+ opts->entry(shardidx, now, key, keylen, val, vallen, expires, flags,
1382
+ cas, &update, opts->udata);
1383
+ if (update) {
1384
+ // User wants to update the entry.
1385
+ shard->cas++;
1386
+ cas = shard->cas;
1387
+ struct entry *entry2 = entry_new(key, keylen, update->value,
1388
+ update->valuelen, update->expires, update->flags, cas, ctx);
1389
+ if (!entry2) {
1390
+ return POGOCACHE_NOMEM;
1391
+ }
1392
+ entry_settime(entry2, now);
1393
+ set_entry(bkt, entry2);
1394
+ entry_free(entry, ctx);
1395
+ }
1396
+ }
1397
+ return POGOCACHE_FOUND;
1398
+ }
1399
+
1400
+ /// Loads an entry from the cache.
1401
+ /// Use the pogocache_load_opts.entry callback to access the value of the entry.
1402
+ /// It's possible to update the value using the 'update' param in the callback.
1403
+ /// See 'pogocache_load_opts' for all options.
1404
+ /// @returns POGOCACHE_FOUND when the entry was found.
1405
+ /// @returns POGOCACHE_NOMEM when the entry cannot be updated due to no memory.
1406
+ /// @returns POGOCACHE_NOTFOUND when the entry was not found.
1407
+ int pogocache_load(struct pogocache *cache, const void *key, size_t keylen,
1408
+ struct pogocache_load_opts *opts)
1409
+ {
1410
+ return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,
1411
+ loadop(key, keylen, opts, shard, shardidx, hash, ctx)
1412
+ );
1413
+ }
1414
+
1415
+ static int deleteop(const void *key, size_t keylen,
1416
+ struct pogocache_delete_opts *opts, struct shard *shard, int shardidx,
1417
+ uint32_t hash, struct pgctx *ctx)
1418
+ {
1419
+ opts = opts ? opts : &defdeleteopts;
1420
+ int64_t now = opts->time > 0 ? opts->time : getnow();
1421
+ struct entry *entry = map_delete(&shard->map, key, keylen, hash, ctx);
1422
+ if (!entry) {
1423
+ // Entry does not exist
1424
+ return POGOCACHE_NOTFOUND;
1425
+ }
1426
+ const char *val;
1427
+ size_t vallen;
1428
+ int64_t expires;
1429
+ uint32_t flags;
1430
+ uint64_t cas;
1431
+ int reason = entry_alive(entry, now, shard->cleartime);
1432
+ if (reason) {
1433
+ // Entry is no longer alive. It was already deleted from the map but
1434
+ // we still need to notify the user.
1435
+ if (ctx->evicted) {
1436
+ entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,
1437
+ ctx);
1438
+ ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,
1439
+ expires, flags, cas, ctx->udata);
1440
+ }
1441
+ shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);
1442
+ tryshrink(&shard->map, false, ctx);
1443
+ entry_free(entry, ctx);
1444
+ return POGOCACHE_NOTFOUND;
1445
+ }
1446
+ if (opts->entry) {
1447
+ entry_extract(entry, 0, 0, 0, &val, &vallen, &expires, &flags, &cas,
1448
+ ctx);
1449
+ if (!opts->entry(shardidx, now, key, keylen, val, vallen,
1450
+ expires, flags, cas, opts->udata))
1451
+ {
1452
+ // User canceled the delete. Put it back into the map.
1453
+ // This insert will not cause an allocation error because the
1454
+ // previous delete operation left us with at least one available
1455
+ // bucket.
1456
+ struct entry *old;
1457
+ bool ok = map_insert(&shard->map, entry, hash, &old, ctx);
1458
+ assert(ok); (void)ok;
1459
+ assert(!old);
1460
+ return POGOCACHE_CANCELED;
1461
+ }
1462
+ }
1463
+ // Entry was successfully deleted.
1464
+ tryshrink(&shard->map, false, ctx);
1465
+ entry_free(entry, ctx);
1466
+ return POGOCACHE_DELETED;
1467
+ }
1468
+
1469
+ /// Deletes an entry from the cache.
1470
+ /// See 'pogocache_delete_opts' for all options.
1471
+ /// @returns POGOCACHE_DELETED when the entry was successfully deleted.
1472
+ /// @returns POGOCACHE_NOTFOUND when the entry was not found.
1473
+ /// @returns POGOCACHE_CANCELED when opts.entry callback returned false.
1474
+ int pogocache_delete(struct pogocache *cache, const void *key, size_t keylen,
1475
+ struct pogocache_delete_opts *opts)
1476
+ {
1477
+ return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,
1478
+ deleteop(key, keylen, opts, shard, shardidx, hash, ctx)
1479
+ );
1480
+ }
1481
+
1482
+ static int storeop(const void *key, size_t keylen, const void *val,
1483
+ size_t vallen, struct pogocache_store_opts *opts, struct shard *shard,
1484
+ int shardidx, uint32_t hash, struct pgctx *ctx)
1485
+ {
1486
+ int count = shard->map.count;
1487
+ opts = opts ? opts : &defstoreopts;
1488
+ int64_t now = opts->time > 0 ? opts->time : getnow();
1489
+ int64_t expires = 0;
1490
+ if (opts->expires > 0) {
1491
+ expires = opts->expires;
1492
+ } else if (opts->ttl > 0) {
1493
+ expires = int64_add_clamp(now, opts->ttl);
1494
+ }
1495
+ uint64_t cas = opts->cas; // use a standalone variable, may mutate
1496
+ if (opts->keepttl) {
1497
+ // User wants to keep the existing ttl. Get the existing entry from the
1498
+ // map first and take its expiration.
1499
+ int i;
1500
+ struct entry *old = map_get_entry(&shard->map, key, keylen, hash, &i);
1501
+ if (old) {
1502
+ int reason = entry_alive(old, now, shard->cleartime);
1503
+ if (reason == 0) {
1504
+ expires = entry_expires(old);
1505
+ }
1506
+ }
1507
+ }
1508
+ if (ctx->usecas) {
1509
+ if (cas == 0) {
1510
+ shard->cas++;
1511
+ cas = shard->cas;
1512
+ } else if (cas > shard->cas) {
1513
+ shard->cas = cas;
1514
+ }
1515
+ }
1516
+ struct entry *entry = entry_new(key, keylen, val, vallen, expires,
1517
+ opts->flags, cas, ctx);
1518
+ if (!entry) {
1519
+ goto nomem;
1520
+ }
1521
+ entry_settime(entry, now);
1522
+ if (opts->lowmem && ctx->noevict) {
1523
+ goto nomem;
1524
+ }
1525
+ // Insert new entry into map
1526
+ struct entry *old;
1527
+ if (!map_insert(&shard->map, entry, hash, &old, ctx)) {
1528
+ goto nomem;
1529
+ }
1530
+ if (old) {
1531
+ int reason = entry_alive(old, now, shard->cleartime);
1532
+ if (reason) {
1533
+ // There's an old entry, but it's no longer alive.
1534
+ // Treat this like an eviction and notify the user.
1535
+ if (ctx->evicted) {
1536
+ const char *oval;
1537
+ size_t ovallen;
1538
+ int64_t oexpires = 0;
1539
+ uint32_t oflags = 0;
1540
+ uint64_t ocas = 0;
1541
+ entry_extract(old, 0, 0, 0,
1542
+ &oval, &ovallen, &oexpires, &oflags, &ocas, ctx);
1543
+ ctx->evicted(shardidx, reason, now, key, keylen, oval, ovallen,
1544
+ oexpires, oflags, ocas, ctx->udata);
1545
+ }
1546
+ shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);
1547
+ entry_free(old, ctx);
1548
+ old = 0;
1549
+ }
1550
+ }
1551
+ int put_back_status = 0;
1552
+ if (old) {
1553
+ if (opts->casop) {
1554
+ // User is requesting the cas operation.
1555
+ if (ctx->usecas) {
1556
+ uint64_t old_cas = entry_cas(old);
1557
+ if (cas != old_cas) {
1558
+ // CAS test failed.
1559
+ // printf(". cas failed: expected %" PRIu64 ", "
1560
+ // "got %" PRIu64 "\n", cas, old_cas);
1561
+ put_back_status = POGOCACHE_FOUND;
1562
+ }
1563
+ } else {
1564
+ put_back_status = POGOCACHE_FOUND;
1565
+ }
1566
+ } else if (opts->nx) {
1567
+ put_back_status = POGOCACHE_FOUND;
1568
+ }
1569
+ if (put_back_status) {
1570
+ put_back:;
1571
+ // The entry needs be put back into the map and operation must
1572
+ // return early.
1573
+ // This insert operation must not fail since the entry 'e' and
1574
+ // 'old' both exist and will always be bucket swapped. There will
1575
+ // never be a new allocation.
1576
+ struct entry *e = 0;
1577
+ bool ok = map_insert(&shard->map, old, hash, &e, ctx);
1578
+ assert(ok); (void)ok;
1579
+ assert(e == entry);
1580
+ entry_free(entry, ctx);
1581
+ return put_back_status;
1582
+ }
1583
+ } else if (opts->xx || opts->casop) {
1584
+ // The new entry must not be inserted.
1585
+ // Delete it and return early.
1586
+ struct entry *e = map_delete(&shard->map, key, keylen, hash, ctx);
1587
+ assert(e == entry); (void)e;
1588
+ entry_free(entry, ctx);
1589
+ return POGOCACHE_NOTFOUND;
1590
+ }
1591
+ if (old && opts->entry) {
1592
+ // User is requesting to verify the old entry before allowing it to be
1593
+ // replaced by the new entry.
1594
+ const char *val;
1595
+ size_t vallen;
1596
+ int64_t oexpires = 0;
1597
+ uint32_t oflags = 0;
1598
+ uint64_t ocas = 0;
1599
+ entry_extract(old, 0, 0, 0, &val, &vallen, &oexpires, &oflags, &ocas,
1600
+ ctx);
1601
+ if (!opts->entry(shardidx, now, key, keylen, val, vallen, oexpires,
1602
+ oflags, ocas, opts->udata))
1603
+ {
1604
+ // User wants to keep the old entry.
1605
+ put_back_status = POGOCACHE_CANCELED;
1606
+ goto put_back;
1607
+ }
1608
+ }
1609
+ // The new entry was inserted.
1610
+ if (old) {
1611
+ entry_free(old, ctx);
1612
+ return POGOCACHE_REPLACED;
1613
+ } else {
1614
+ if (opts->lowmem && shard->map.count > count) {
1615
+ // The map grew by one bucket, yet the user indicates that there is
1616
+ // a low memory event. Evict one entry.
1617
+ auto_evict_entry(shard, shardidx, hash, now, ctx);
1618
+ }
1619
+ return POGOCACHE_INSERTED;
1620
+ }
1621
+ nomem:
1622
+ entry_free(entry, ctx);
1623
+ return POGOCACHE_NOMEM;
1624
+ }
1625
+
1626
+ /// Insert or replace an entry in the cache.
1627
+ /// If an entry with the same key already exists then the cache then the
1628
+ /// the opts.entry callback can be used to check the existing
1629
+ /// value first, allowing the operation to be canceled.
1630
+ /// See 'pogocache_store_opts' for all options.
1631
+ /// @returns POGOCACHE_INSERTED when the entry was inserted.
1632
+ /// @returns POGOCACHE_REPLACED when the entry replaced an existing one.
1633
+ /// @returns POGOCACHE_FOUND when the entry already exists. (cas/nx)
1634
+ /// @returns POGOCACHE_CANCELED when the operation was canceled.
1635
+ /// @returns POGOCACHE_NOMEM when there is system memory available.
1636
+ int pogocache_store(struct pogocache *cache, const void *key, size_t keylen,
1637
+ const void *val, size_t vallen, struct pogocache_store_opts *opts)
1638
+ {
1639
+ return ACQUIRE_FOR_KEY_AND_EXECUTE(int, key, keylen,
1640
+ storeop(key, keylen, val, vallen, opts, shard, shardidx, hash, ctx)
1641
+ );
1642
+ }
1643
+
1644
+
1645
+ static struct pogocache *rootcache(struct pogocache *cache) {
1646
+ return cache->isbatch ? cache->batch.cache : cache;
1647
+ }
1648
+
1649
+ /// Returns the number of shards in cache
1650
+ int pogocache_nshards(struct pogocache *cache) {
1651
+ cache = rootcache(cache);
1652
+ return cache->ctx.nshards;
1653
+ }
1654
+
1655
+ static int iterop(struct shard *shard, int shardidx, int64_t now,
1656
+ struct pogocache_iter_opts *opts, struct pgctx *ctx)
1657
+ {
1658
+ char buf[128];
1659
+ int status = POGOCACHE_FINISHED;
1660
+ for (int i = 0; i < shard->map.nbuckets; i++) {
1661
+ struct bucket *bkt = &shard->map.buckets[i];
1662
+ if (get_dib(bkt) == 0) {
1663
+ continue;
1664
+ }
1665
+ struct entry *entry = get_entry(bkt);
1666
+ const char *key, *val;
1667
+ size_t keylen, vallen;
1668
+ int64_t expires;
1669
+ uint32_t flags;
1670
+ uint64_t cas;
1671
+ entry_extract(entry, &key, &keylen, buf, &val, &vallen,
1672
+ &expires, &flags, &cas, ctx);
1673
+ int reason = entry_alive(entry, now, shard->cleartime);
1674
+ if (reason) {
1675
+ #ifdef EVICTONITER
1676
+ if (ctx->evicted) {
1677
+ ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,
1678
+ expires, flags, cas, ctx->udata);
1679
+ }
1680
+ shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);
1681
+ // Delete entry at bucket.
1682
+ delbkt(&shard->map, i);
1683
+ entry_free(entry, ctx);
1684
+ i--;
1685
+ #endif
1686
+ } else {
1687
+ // Entry is alive, check with user for next action.
1688
+ int action = POGOCACHE_ITER_CONTINUE;
1689
+ if (opts->entry) {
1690
+ action = opts->entry(shardidx, now, key, keylen, val,
1691
+ vallen, expires, flags, cas, opts->udata);
1692
+ }
1693
+ if (action != POGOCACHE_ITER_CONTINUE) {
1694
+ if (action&POGOCACHE_ITER_DELETE) {
1695
+ // Delete entry at bucket
1696
+ delbkt(&shard->map, i);
1697
+ entry_free(entry, ctx);
1698
+ i--;
1699
+ }
1700
+ if (action&POGOCACHE_ITER_STOP) {
1701
+ status = POGOCACHE_CANCELED;
1702
+ break;
1703
+ }
1704
+ }
1705
+ }
1706
+ }
1707
+ tryshrink(&shard->map, true, ctx);
1708
+ return status;
1709
+ }
1710
+
1711
+ /// Iterate over entries in the cache.
1712
+ /// There's an option to allow for isolating the operation to a single shard.
1713
+ /// The pogocache_iter_opts.entry callback can be used to perform actions such
1714
+ /// as: deleting entries and stopping iteration early.
1715
+ /// See 'pogocache_iter_opts' for all options.
1716
+ /// @return POGOCACHE_FINISHED if iteration completed
1717
+ /// @return POGOCACHE_CANCELED if iteration stopped early
1718
+ int pogocache_iter(struct pogocache *cache, struct pogocache_iter_opts *opts) {
1719
+ int nshards = pogocache_nshards(cache);
1720
+ opts = opts ? opts : &defiteropts;
1721
+ int64_t now = opts->time > 0 ? opts->time : getnow();
1722
+ if (opts->oneshard) {
1723
+ if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {
1724
+ return POGOCACHE_FINISHED;
1725
+ }
1726
+ return ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,
1727
+ iterop(shard, opts->oneshardidx, now, opts, &cache->ctx)
1728
+ );
1729
+ }
1730
+ for (int i = 0; i < nshards; i++) {
1731
+ int status = ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,
1732
+ iterop(shard, i, now, opts, &cache->ctx)
1733
+ );
1734
+ if (status != POGOCACHE_FINISHED) {
1735
+ return status;
1736
+ }
1737
+ }
1738
+ return POGOCACHE_FINISHED;
1739
+ }
1740
+
1741
+ static size_t countop(struct shard *shard) {
1742
+ return shard->map.count - shard->clearcount;
1743
+ }
1744
+
1745
+ /// Returns the number of entries in the cache.
1746
+ /// There's an option to allow for isolating the operation to a single shard.
1747
+ size_t pogocache_count(struct pogocache *cache,
1748
+ struct pogocache_count_opts *opts)
1749
+ {
1750
+ int nshards = pogocache_nshards(cache);
1751
+ opts = opts ? opts : &defcountopts;
1752
+ if (opts->oneshard) {
1753
+ if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {
1754
+ return 0;
1755
+ }
1756
+ return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,
1757
+ countop(shard);
1758
+ );
1759
+ }
1760
+ size_t count = 0;
1761
+ for (int i = 0; i < nshards; i++) {
1762
+ count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,
1763
+ countop(shard);
1764
+ );
1765
+ }
1766
+ return count;
1767
+ }
1768
+
1769
+ static uint64_t totalop(struct shard *shard) {
1770
+ return shard->map.total;
1771
+ }
1772
+
1773
+ /// Returns the total number of entries that have ever been stored in the cache.
1774
+ /// For the current number of entries use pogocache_count().
1775
+ /// There's an option to allow for isolating the operation to a single shard.
1776
+ uint64_t pogocache_total(struct pogocache *cache,
1777
+ struct pogocache_total_opts *opts)
1778
+ {
1779
+ int nshards = pogocache_nshards(cache);
1780
+ opts = opts ? opts : &deftotalopts;
1781
+ if (opts->oneshard) {
1782
+ if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {
1783
+ return 0;
1784
+ }
1785
+ return ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, opts->oneshardidx,
1786
+ totalop(shard);
1787
+ );
1788
+ }
1789
+ uint64_t count = 0;
1790
+ for (int i = 0; i < nshards; i++) {
1791
+ count += ACQUIRE_FOR_SCAN_AND_EXECUTE(uint64_t, i,
1792
+ totalop(shard);
1793
+ );
1794
+ }
1795
+ return count;
1796
+ }
1797
+
1798
+ static size_t sizeop(struct shard *shard, bool entriesonly) {
1799
+ size_t size = 0;
1800
+ if (!entriesonly) {
1801
+ size += sizeof(struct shard);
1802
+ size += sizeof(struct bucket)*shard->map.nbuckets;
1803
+ }
1804
+ size += shard->map.entsize;
1805
+ return size;
1806
+ }
1807
+
1808
+ /// Returns the total memory size of the shard.
1809
+ /// This includes the memory size of all data structures and entries.
1810
+ /// Use the entriesonly option to limit the result to only the entries.
1811
+ /// There's an option to allow for isolating the operation to a single shard.
1812
+ size_t pogocache_size(struct pogocache *cache,
1813
+ struct pogocache_size_opts *opts)
1814
+ {
1815
+ int nshards = pogocache_nshards(cache);
1816
+ opts = opts ? opts : &defsizeopts;
1817
+ if (opts->oneshard) {
1818
+ if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {
1819
+ return 0;
1820
+ }
1821
+ return ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, opts->oneshardidx,
1822
+ sizeop(shard, opts->entriesonly);
1823
+ );
1824
+ }
1825
+ size_t count = 0;
1826
+ for (int i = 0; i < nshards; i++) {
1827
+ count += ACQUIRE_FOR_SCAN_AND_EXECUTE(size_t, i,
1828
+ sizeop(shard, opts->entriesonly);
1829
+ );
1830
+ }
1831
+ return count;
1832
+ }
1833
+
1834
+
1835
+
1836
+ static int sweepop(struct shard *shard, int shardidx, int64_t now,
1837
+ size_t *swept, size_t *kept, struct pgctx *ctx)
1838
+ {
1839
+ char buf[128];
1840
+ for (int i = 0; i < shard->map.nbuckets; i++) {
1841
+ struct bucket *bkt = &shard->map.buckets[i];
1842
+ if (get_dib(bkt) == 0) {
1843
+ continue;
1844
+ }
1845
+ struct entry *entry = get_entry(bkt);
1846
+ int64_t expires = entry_expires(entry);
1847
+ int64_t etime = entry_time(entry);
1848
+ int reason = entry_alive_exp(expires, etime, now, shard->cleartime);
1849
+ if (reason == 0) {
1850
+ // entry is still alive
1851
+ (*kept)++;
1852
+ continue;
1853
+ }
1854
+ // entry is no longer alive.
1855
+ if (ctx->evicted) {
1856
+ const char *key, *val;
1857
+ size_t keylen, vallen;
1858
+ int64_t expires;
1859
+ uint32_t flags;
1860
+ uint64_t cas;
1861
+ entry_extract(entry, &key, &keylen, buf, &val, &vallen, &expires,
1862
+ &flags, &cas, ctx);
1863
+ // Report eviction to user
1864
+ ctx->evicted(shardidx, reason, now, key, keylen, val, vallen,
1865
+ expires, flags, cas, ctx->udata);
1866
+ }
1867
+ shard->clearcount -= (reason==POGOCACHE_REASON_CLEARED);
1868
+ delbkt(&shard->map, i);
1869
+ entry_free(entry, ctx);
1870
+ (*swept)++;
1871
+ // Entry was deleted from bucket, which may move entries to the right
1872
+ // over one bucket to the left. So we need to check the same bucket
1873
+ // again.
1874
+ i--;
1875
+ }
1876
+ tryshrink(&shard->map, true, ctx);
1877
+ return 0;
1878
+ }
1879
+
1880
+ /// Remove expired entries from the cache.
1881
+ /// There's an option to allow for isolating the operation to a single shard.
1882
+ /// The final 'kept' or 'swept' counts are returned.
1883
+ /// @return POGOCACHE_FINISHED when iteration completed
1884
+ /// @return POGOCACHE_CANCELED when iteration stopped early
1885
+ void pogocache_sweep(struct pogocache *cache, size_t *swept, size_t *kept,
1886
+ struct pogocache_sweep_opts *opts)
1887
+ {
1888
+ int nshards = pogocache_nshards(cache);
1889
+ opts = opts ? opts : &defsweepopts;
1890
+ int64_t now = opts->time > 0 ? opts->time : getnow();
1891
+ size_t sweptc = 0;
1892
+ size_t keptc = 0;
1893
+ if (opts->oneshard) {
1894
+ if (opts->oneshardidx >= 0 && opts->oneshardidx < nshards) {
1895
+ ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,
1896
+ sweepop(shard, opts->oneshardidx, now, &sweptc, &keptc,
1897
+ &cache->ctx);
1898
+ );
1899
+ }
1900
+ } else {
1901
+ for (int i = 0; i < nshards; i++) {
1902
+ size_t sweptc2 = 0;
1903
+ size_t keptc2 = 0;
1904
+ ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,
1905
+ sweepop(shard, i, now, &sweptc2, &keptc2, &cache->ctx);
1906
+ );
1907
+ sweptc += sweptc2;
1908
+ keptc += keptc2;
1909
+ }
1910
+ }
1911
+ if (swept) {
1912
+ *swept = sweptc;
1913
+ }
1914
+ if (kept) {
1915
+ *kept = keptc;
1916
+ }
1917
+ }
1918
+
1919
+ static int clearop(struct shard *shard, int shardidx, int64_t now,
1920
+ struct pgctx *ctx)
1921
+ {
1922
+ (void)shardidx, (void)ctx;
1923
+ shard->cleartime = now;
1924
+ shard->clearcount += (shard->map.count-shard->clearcount);
1925
+ return 0;
1926
+ }
1927
+
1928
+ /// Clear the cache.
1929
+ /// There's an option to allow for isolating the operation to a single shard.
1930
+ void pogocache_clear(struct pogocache *cache, struct pogocache_clear_opts *opts)
1931
+ {
1932
+ int nshards = pogocache_nshards(cache);
1933
+ opts = opts ? opts : &defclearopts;
1934
+ int64_t now = opts->time > 0 ? opts->time : getnow();
1935
+ if (opts->oneshard) {
1936
+ if (opts->oneshardidx < 0 || opts->oneshardidx >= nshards) {
1937
+ return;
1938
+ }
1939
+ ACQUIRE_FOR_SCAN_AND_EXECUTE(int, opts->oneshardidx,
1940
+ clearop(shard, opts->oneshardidx, now, &cache->ctx);
1941
+ );
1942
+ return;
1943
+ }
1944
+ for (int i = 0; i < cache->ctx.nshards; i++) {
1945
+ ACQUIRE_FOR_SCAN_AND_EXECUTE(int, i,
1946
+ clearop(shard, i, now, &cache->ctx);
1947
+ );
1948
+ }
1949
+ }
1950
+
1951
+ static int sweeppollop(struct shard *shard, int shardidx, int64_t now,
1952
+ int pollsize, double *percent)
1953
+ {
1954
+ // start at random bucket
1955
+ int count = 0;
1956
+ int dead = 0;
1957
+ int bidx = mix13(now+shardidx)%shard->map.nbuckets;
1958
+ for (int i = 0; i < shard->map.nbuckets && count < pollsize; i++) {
1959
+ struct bucket *bkt = &shard->map.buckets[(bidx+i)%shard->map.nbuckets];
1960
+ if (get_dib(bkt) == 0) {
1961
+ continue;
1962
+ }
1963
+ struct entry *entry = get_entry(bkt);
1964
+ count++;
1965
+ dead += (entry_alive(entry, now, shard->cleartime) != 0);
1966
+ }
1967
+ if (count == 0) {
1968
+ *percent = 0;
1969
+ return 0;
1970
+ }
1971
+ *percent = (double)dead/(double)count;
1972
+ return 0;
1973
+ }
1974
+
1975
+ double pogocache_sweep_poll(struct pogocache *cache,
1976
+ struct pogocache_sweep_poll_opts *opts)
1977
+ {
1978
+ int nshards = pogocache_nshards(cache);
1979
+ opts = opts ? opts : &defsweeppollopts;
1980
+ int64_t now = opts->time > 0 ? opts->time : getnow();
1981
+ int pollsize = opts->pollsize == 0 ? 20 : opts->pollsize;
1982
+
1983
+ // choose a random shard
1984
+ int shardidx = mix13(now)%nshards;
1985
+ double percent;
1986
+ ACQUIRE_FOR_SCAN_AND_EXECUTE(int, shardidx,
1987
+ sweeppollop(shard, shardidx, now, pollsize, &percent);
1988
+ );
1989
+ return percent;
1990
+ }