inmemory_kv 0.1.1 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -13
- data/ext/inmemory_kv.c +150 -50
- data/lib/inmemory_kv/version.rb +1 -1
- metadata +13 -12
checksums.yaml
CHANGED
@@ -1,15 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
|
5
|
-
data.tar.gz: !binary |-
|
6
|
-
Mjk4MWUzN2FhMmE1N2QyOGNjMWFmM2NmMWU1YjBhODcxNmRhNjM0Mw==
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 4f83c12e65316b730f53c6d223859355b5fa8e00
|
4
|
+
data.tar.gz: 78c378060063ec7c7ce46721d775bfbdfcfcc741
|
7
5
|
SHA512:
|
8
|
-
metadata.gz:
|
9
|
-
|
10
|
-
Njk2OTU4YWZkYmVkZmExNDYzOTJhZWE3YmRiNmQxZWUwYmE1ODIzYjMxOWFl
|
11
|
-
MWQ0ODI0YTk0YTQxY2RmNDEzYzFkOWIyYzk2YmEyNTI5ZThhYWM=
|
12
|
-
data.tar.gz: !binary |-
|
13
|
-
YjdlNDY4M2VjZGE2Njc5MzY2MmFmNjA3YThhYmM1YmRiOGI0NWM0MDgzMjU3
|
14
|
-
MDdhYjQzZTgwYTZhNjM0ZjY3NmU2ZTFkYTdkMTM5ZWZjZGU1Nzk1ZjU4ZTNi
|
15
|
-
MGUzYTY3ZGJiMTk2ZjAzMDVlNzE2MzZlZjE3M2QwZTJkM2YwZWI=
|
6
|
+
metadata.gz: a64d4273444283c58f16eb5cb17536073d708bf6e9e908f28647f74268d4e606d61244a4932c1066096637a9df5d272690cc67acec877dedb8156b58424a710c
|
7
|
+
data.tar.gz: 59f871c45e85e7fc198a4ebb8bd764f7676754bc9d384b1ca6ec9ed915976821d79e51f71bd6e476b9ea054ab15c5643e6f092a5fda5db76503f0b2fd125efda
|
data/ext/inmemory_kv.c
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
#include <ruby/intern.h>
|
3
3
|
#include <assert.h>
|
4
4
|
#include <malloc.h>
|
5
|
+
#include <stddef.h>
|
5
6
|
|
6
7
|
#include <stdio.h>
|
7
8
|
#ifdef HAV_STDLIB_H
|
@@ -10,19 +11,109 @@
|
|
10
11
|
#include <string.h>
|
11
12
|
|
12
13
|
typedef unsigned int u32;
|
14
|
+
typedef unsigned char u8;
|
13
15
|
|
14
16
|
typedef struct hash_item {
|
15
17
|
u32 pos;
|
16
|
-
u32 rc;
|
17
|
-
u32
|
18
|
-
|
19
|
-
u32
|
20
|
-
|
18
|
+
u32 rc : 31;
|
19
|
+
u32 big : 1;
|
20
|
+
#ifndef HAVE_MALLOC_USABLE_SIZE
|
21
|
+
u32 item_size;
|
22
|
+
#endif
|
23
|
+
union {
|
24
|
+
struct {
|
25
|
+
u8 key_size;
|
26
|
+
u8 val_size;
|
27
|
+
char key[0];
|
28
|
+
} small;
|
29
|
+
struct {
|
30
|
+
u32 key_size;
|
31
|
+
u32 val_size;
|
32
|
+
char key[0];
|
33
|
+
} big;
|
34
|
+
} kind;
|
21
35
|
} hash_item;
|
22
36
|
|
37
|
+
#ifdef HAVE_MALLOC_USABLE_SIZE
|
38
|
+
static inline size_t
|
39
|
+
item_size(hash_item* item) {
|
40
|
+
return malloc_usable_size(item);
|
41
|
+
}
|
42
|
+
#else
|
43
|
+
static inline size_t
|
44
|
+
item_size(hash_item* item) {
|
45
|
+
return item->item_size;
|
46
|
+
}
|
47
|
+
#endif
|
48
|
+
|
49
|
+
static inline int
|
50
|
+
item_need_big(u32 key_size, u32 val_size) {
|
51
|
+
return key_size > 255 || val_size > 255;
|
52
|
+
}
|
53
|
+
|
54
|
+
static inline u32
|
55
|
+
item_key_size(hash_item* item) {
|
56
|
+
return item->big ? item->kind.big.key_size : item->kind.small.key_size;
|
57
|
+
}
|
58
|
+
|
59
|
+
static inline u32
|
60
|
+
item_val_size(hash_item* item) {
|
61
|
+
return item->big ? item->kind.big.val_size : item->kind.small.val_size;
|
62
|
+
}
|
63
|
+
|
64
|
+
static inline void
|
65
|
+
item_set_sizes(hash_item* item, u32 key_size, u32 val_size) {
|
66
|
+
if (item_need_big(key_size, val_size)) {
|
67
|
+
item->big = 1;
|
68
|
+
item->kind.big.key_size = key_size;
|
69
|
+
item->kind.big.val_size = val_size;
|
70
|
+
} else {
|
71
|
+
item->big = 0;
|
72
|
+
item->kind.small.key_size = key_size;
|
73
|
+
item->kind.small.val_size = val_size;
|
74
|
+
}
|
75
|
+
}
|
76
|
+
|
77
|
+
static inline void
|
78
|
+
item_set_val_size(hash_item* item, u32 val_size) {
|
79
|
+
assert( (val_size > 255) == (item->big == 1));
|
80
|
+
if (item->big) {
|
81
|
+
item->kind.big.val_size = val_size;
|
82
|
+
} else {
|
83
|
+
item->kind.small.val_size = val_size;
|
84
|
+
}
|
85
|
+
}
|
86
|
+
|
87
|
+
static inline char*
|
88
|
+
item_key(hash_item* item) {
|
89
|
+
return item->big ? item->kind.big.key : item->kind.small.key;
|
90
|
+
}
|
91
|
+
|
23
92
|
static inline char*
|
24
93
|
item_val(hash_item* item) {
|
25
|
-
return item
|
94
|
+
return item_key(item) + item_key_size(item);
|
95
|
+
}
|
96
|
+
|
97
|
+
static inline u32
|
98
|
+
item_need_size(u32 key_size, u32 val_size) {
|
99
|
+
if (item_need_big(key_size, val_size)) {
|
100
|
+
return offsetof(hash_item, kind.big.key) + key_size + val_size;
|
101
|
+
} else {
|
102
|
+
return offsetof(hash_item, kind.small.key) + key_size + val_size;
|
103
|
+
}
|
104
|
+
}
|
105
|
+
|
106
|
+
static inline int
|
107
|
+
item_compatible(hash_item* item, u32 val_size) {
|
108
|
+
u32 key_size, need_size, have_size;
|
109
|
+
key_size = item_key_size(item);
|
110
|
+
if (item->big != item_need_big(key_size, val_size))
|
111
|
+
return 0;
|
112
|
+
need_size = item_need_size(key_size, val_size);
|
113
|
+
have_size = item_size(item);
|
114
|
+
if (need_size > have_size || need_size < have_size/2)
|
115
|
+
return 0;
|
116
|
+
return 1;
|
26
117
|
}
|
27
118
|
|
28
119
|
typedef struct hash_entry {
|
@@ -152,7 +243,7 @@ hash_unchain(hash_table* tab, u32 pos) {
|
|
152
243
|
|
153
244
|
static void
|
154
245
|
hash_up(hash_table* tab, u32 pos) {
|
155
|
-
assert(tab->entries[pos].
|
246
|
+
assert(tab->entries[pos].item != NULL);
|
156
247
|
if (tab->last == pos+1) return;
|
157
248
|
hash_unchain(tab, pos);
|
158
249
|
hash_enchain(tab, pos);
|
@@ -160,7 +251,7 @@ hash_up(hash_table* tab, u32 pos) {
|
|
160
251
|
|
161
252
|
static void
|
162
253
|
hash_down(hash_table* tab, u32 pos) {
|
163
|
-
assert(tab->entries[pos].
|
254
|
+
assert(tab->entries[pos].item != NULL);
|
164
255
|
if (tab->first == pos+1) return;
|
165
256
|
hash_unchain(tab, pos);
|
166
257
|
hash_enchain_first(tab, pos);
|
@@ -187,7 +278,7 @@ hash_insert(hash_table* tab, u32 hash) {
|
|
187
278
|
free(tab->buckets);
|
188
279
|
tab->buckets = calloc(new_nbuckets, sizeof(u32));
|
189
280
|
for (i=0; i<tab->alloced; i++) {
|
190
|
-
if (tab->entries[i].
|
281
|
+
if (tab->entries[i].item == NULL)
|
191
282
|
continue;
|
192
283
|
buc = tab->entries[i].hash % new_nbuckets;
|
193
284
|
npos = tab->buckets[buc];
|
@@ -232,6 +323,7 @@ hash_delete(hash_table* tab, u32 pos) {
|
|
232
323
|
hash_unchain(tab, i);
|
233
324
|
tab->empty = i+1;
|
234
325
|
tab->entries[i].hash = 0;
|
326
|
+
tab->entries[i].item = NULL;
|
235
327
|
tab->size--;
|
236
328
|
}
|
237
329
|
|
@@ -260,8 +352,7 @@ static void kv_copy_to(inmemory_kv *from, inmemory_kv *to);
|
|
260
352
|
#ifdef HAV_RB_MEMHASH
|
261
353
|
static inline u32
|
262
354
|
kv_hash(const char* key, u32 key_size) {
|
263
|
-
|
264
|
-
return hash ? hash : ~(u32)0;
|
355
|
+
return rb_memhash(key, key_size);
|
265
356
|
}
|
266
357
|
#else
|
267
358
|
static inline u32
|
@@ -274,8 +365,7 @@ kv_hash(const char* key, u32 key_size) {
|
|
274
365
|
a2 = (a2 ^ k) * 9;
|
275
366
|
}
|
276
367
|
a1 ^= key_size; a1 *= 5; a2 *= 9;
|
277
|
-
a1
|
278
|
-
return a1 ? a1 : ~(u32)0;
|
368
|
+
return a1 ^ a2;
|
279
369
|
}
|
280
370
|
#endif
|
281
371
|
|
@@ -287,8 +377,8 @@ kv_insert(inmemory_kv *kv, const char* key, u32 key_size, const char* val, u32 v
|
|
287
377
|
pos = hash_hash_first(&kv->tab, hash);
|
288
378
|
while (pos != end) {
|
289
379
|
item = kv->tab.entries[pos].item;
|
290
|
-
if (item
|
291
|
-
memcmp(key, item
|
380
|
+
if (item_key_size(item) == key_size &&
|
381
|
+
memcmp(key, item_key(item), key_size) == 0) {
|
292
382
|
break;
|
293
383
|
}
|
294
384
|
pos = hash_hash_next(&kv->tab, hash, pos);
|
@@ -298,8 +388,8 @@ kv_insert(inmemory_kv *kv, const char* key, u32 key_size, const char* val, u32 v
|
|
298
388
|
item = NULL;
|
299
389
|
} else {
|
300
390
|
hash_up(&kv->tab, pos);
|
301
|
-
if (val_size
|
302
|
-
kv->total_size -=
|
391
|
+
if (!item_compatible(item, val_size) || item->rc > 0) {
|
392
|
+
kv->total_size -= item_size(item);
|
303
393
|
if (item->rc > 0)
|
304
394
|
item->rc--;
|
305
395
|
else
|
@@ -308,25 +398,25 @@ kv_insert(inmemory_kv *kv, const char* key, u32 key_size, const char* val, u32 v
|
|
308
398
|
}
|
309
399
|
}
|
310
400
|
if (item == NULL) {
|
401
|
+
u32 new_size = item_need_size(key_size, val_size);
|
311
402
|
#ifdef HAVE_MALLOC_USABLE_SIZE
|
312
|
-
item = malloc(
|
403
|
+
item = malloc(new_size);
|
313
404
|
assert(item);
|
405
|
+
new_size = malloc_usable_size(item);
|
314
406
|
item->rc = 0;
|
315
|
-
item->val_size_max = malloc_usable_size(item) - sizeof(*item) - key_size;
|
316
407
|
#else
|
317
|
-
|
318
|
-
|
319
|
-
item = malloc(sizeof(*item) + key_size + val_size_max);
|
408
|
+
new_size = (new_size + 7) & 7;
|
409
|
+
item = malloc(new_size);
|
320
410
|
assert(item);
|
321
411
|
item->rc = 0;
|
322
|
-
item->
|
412
|
+
item->item_size = new_size;
|
323
413
|
#endif
|
324
|
-
kv->total_size +=
|
325
|
-
item
|
414
|
+
kv->total_size += new_size;
|
415
|
+
item_set_sizes(item, key_size, val_size);
|
326
416
|
item->pos = pos;
|
327
|
-
memcpy(item
|
417
|
+
memcpy(item_key(item), key, key_size);
|
328
418
|
}
|
329
|
-
item
|
419
|
+
item_set_val_size(item, val_size);
|
330
420
|
memcpy(item_val(item), val, val_size);
|
331
421
|
kv->tab.entries[pos].item = item;
|
332
422
|
return item;
|
@@ -340,8 +430,8 @@ kv_fetch(inmemory_kv *kv, const char* key, u32 key_size) {
|
|
340
430
|
pos = hash_hash_first(&kv->tab, hash);
|
341
431
|
while (pos != end) {
|
342
432
|
item = kv->tab.entries[pos].item;
|
343
|
-
if (item
|
344
|
-
memcmp(key, item
|
433
|
+
if (item_key_size(item) == key_size &&
|
434
|
+
memcmp(key, item_key(item), key_size) == 0) {
|
345
435
|
break;
|
346
436
|
}
|
347
437
|
pos = hash_hash_next(&kv->tab, hash, pos);
|
@@ -362,7 +452,7 @@ kv_down(inmemory_kv *kv, hash_item* item) {
|
|
362
452
|
static void
|
363
453
|
kv_delete(inmemory_kv *kv, hash_item* item) {
|
364
454
|
hash_delete(&kv->tab, item->pos);
|
365
|
-
kv->total_size -=
|
455
|
+
kv->total_size -= item_size(item);
|
366
456
|
if (item->rc > 0) {
|
367
457
|
item->rc--;
|
368
458
|
} else {
|
@@ -392,7 +482,7 @@ static void
|
|
392
482
|
kv_destroy(inmemory_kv *kv) {
|
393
483
|
u32 i;
|
394
484
|
for (i=0; i<kv->tab.alloced; i++) {
|
395
|
-
if (kv->tab.entries[i].
|
485
|
+
if (kv->tab.entries[i].item != NULL) {
|
396
486
|
hash_item* item = kv->tab.entries[i].item;
|
397
487
|
if (item->rc > 0) {
|
398
488
|
item->rc--;
|
@@ -417,7 +507,7 @@ kv_copy_to(inmemory_kv *from, inmemory_kv *to) {
|
|
417
507
|
memcpy(to->tab.buckets, from->tab.buckets,
|
418
508
|
sizeof(u32)*from->tab.nbuckets);
|
419
509
|
for (i=0; i<to->tab.alloced; i++) {
|
420
|
-
if (to->tab.entries[i].
|
510
|
+
if (to->tab.entries[i].item != NULL) {
|
421
511
|
to->tab.entries[i].item->rc++;
|
422
512
|
}
|
423
513
|
}
|
@@ -455,6 +545,16 @@ rb_kv_alloc(VALUE klass) {
|
|
455
545
|
return TypedData_Wrap_Struct(klass, &InMemoryKV_data_type, kv);
|
456
546
|
}
|
457
547
|
|
548
|
+
static inline VALUE
|
549
|
+
item_key_str(hash_item* item) {
|
550
|
+
return rb_str_new(item_key(item), item_key_size(item));
|
551
|
+
}
|
552
|
+
|
553
|
+
static inline VALUE
|
554
|
+
item_val_str(hash_item* item) {
|
555
|
+
return rb_str_new(item_val(item), item_val_size(item));
|
556
|
+
}
|
557
|
+
|
458
558
|
static VALUE
|
459
559
|
rb_kv_get(VALUE self, VALUE vkey) {
|
460
560
|
inmemory_kv* kv;
|
@@ -468,7 +568,7 @@ rb_kv_get(VALUE self, VALUE vkey) {
|
|
468
568
|
size = RSTRING_LEN(vkey);
|
469
569
|
item = kv_fetch(kv, key, size);
|
470
570
|
if (item == NULL) return Qnil;
|
471
|
-
return
|
571
|
+
return item_val_str(item);
|
472
572
|
}
|
473
573
|
|
474
574
|
static VALUE
|
@@ -485,7 +585,7 @@ rb_kv_up(VALUE self, VALUE vkey) {
|
|
485
585
|
item = kv_fetch(kv, key, size);
|
486
586
|
if (item == NULL) return Qnil;
|
487
587
|
kv_up(kv, item);
|
488
|
-
return
|
588
|
+
return item_val_str(item);
|
489
589
|
}
|
490
590
|
|
491
591
|
static VALUE
|
@@ -502,7 +602,7 @@ rb_kv_down(VALUE self, VALUE vkey) {
|
|
502
602
|
item = kv_fetch(kv, key, size);
|
503
603
|
if (item == NULL) return Qnil;
|
504
604
|
kv_down(kv, item);
|
505
|
-
return
|
605
|
+
return item_val_str(item);
|
506
606
|
}
|
507
607
|
|
508
608
|
static VALUE
|
@@ -551,7 +651,7 @@ rb_kv_del(VALUE self, VALUE vkey) {
|
|
551
651
|
size = RSTRING_LEN(vkey);
|
552
652
|
item = kv_fetch(kv, key, size);
|
553
653
|
if (item == NULL) return Qnil;
|
554
|
-
res =
|
654
|
+
res = item_val_str(item);
|
555
655
|
kv_delete(kv, item);
|
556
656
|
return res;
|
557
657
|
}
|
@@ -565,8 +665,8 @@ rb_kv_first(VALUE self) {
|
|
565
665
|
GetKV(self, kv);
|
566
666
|
item = kv_first(kv);
|
567
667
|
if (item == NULL) return Qnil;
|
568
|
-
key =
|
569
|
-
val =
|
668
|
+
key = item_key_str(item);
|
669
|
+
val = item_val_str(item);
|
570
670
|
return rb_assoc_new(key, val);
|
571
671
|
}
|
572
672
|
|
@@ -579,8 +679,8 @@ rb_kv_shift(VALUE self) {
|
|
579
679
|
GetKV(self, kv);
|
580
680
|
item = kv_first(kv);
|
581
681
|
if (item == NULL) return Qnil;
|
582
|
-
key =
|
583
|
-
val =
|
682
|
+
key = item_key_str(item);
|
683
|
+
val = item_val_str(item);
|
584
684
|
kv_delete(kv, item);
|
585
685
|
return rb_assoc_new(key, val);
|
586
686
|
}
|
@@ -637,21 +737,21 @@ rb_kv_total_size(VALUE self) {
|
|
637
737
|
static void
|
638
738
|
keys_i(hash_item* item, void* arg) {
|
639
739
|
VALUE ary = (VALUE)arg;
|
640
|
-
rb_ary_push(ary,
|
740
|
+
rb_ary_push(ary, item_key_str(item));
|
641
741
|
}
|
642
742
|
|
643
743
|
static void
|
644
744
|
vals_i(hash_item* item, void* arg) {
|
645
745
|
VALUE ary = (VALUE)arg;
|
646
|
-
rb_ary_push(ary,
|
746
|
+
rb_ary_push(ary, item_val_str(item));
|
647
747
|
}
|
648
748
|
|
649
749
|
static void
|
650
750
|
pairs_i(hash_item* item, void* arg) {
|
651
751
|
VALUE ary = (VALUE)arg;
|
652
752
|
VALUE key, val;
|
653
|
-
key =
|
654
|
-
val =
|
753
|
+
key = item_key_str(item);
|
754
|
+
val = item_val_str(item);
|
655
755
|
rb_ary_push(ary, rb_assoc_new(key, val));
|
656
756
|
}
|
657
757
|
|
@@ -687,19 +787,19 @@ rb_kv_entries(VALUE self) {
|
|
687
787
|
|
688
788
|
static void
|
689
789
|
key_i(hash_item* item, void* _ __attribute__((unused))) {
|
690
|
-
rb_yield(
|
790
|
+
rb_yield(item_key_str(item));
|
691
791
|
}
|
692
792
|
|
693
793
|
static void
|
694
794
|
val_i(hash_item* item, void* _ __attribute__((unused))) {
|
695
|
-
rb_yield(
|
795
|
+
rb_yield(item_val_str(item));
|
696
796
|
}
|
697
797
|
|
698
798
|
static void
|
699
799
|
pair_i(hash_item* item, void* _ __attribute__((unused))) {
|
700
800
|
VALUE key, val;
|
701
|
-
key =
|
702
|
-
val =
|
801
|
+
key = item_key_str(item);
|
802
|
+
val = item_val_str(item);
|
703
803
|
rb_yield(rb_assoc_new(key, val));
|
704
804
|
}
|
705
805
|
|
@@ -737,14 +837,14 @@ static void
|
|
737
837
|
inspect_i(hash_item* item, void* arg) {
|
738
838
|
struct inspect_arg* a = arg;
|
739
839
|
VALUE ins;
|
740
|
-
rb_str_cat(a->tmp, item
|
840
|
+
rb_str_cat(a->tmp, item_key(item), item_key_size(item));
|
741
841
|
ins = rb_inspect(a->tmp);
|
742
842
|
rb_str_cat(a->str, " ", 1);
|
743
843
|
rb_str_cat(a->str, RSTRING_PTR(ins), RSTRING_LEN(ins));
|
744
844
|
rb_str_cat(a->str, "=>", 2);
|
745
845
|
rb_str_resize(ins, 0);
|
746
846
|
rb_str_resize(a->tmp, 0);
|
747
|
-
rb_str_buf_cat(a->tmp, item_val(item), item
|
847
|
+
rb_str_buf_cat(a->tmp, item_val(item), item_val_size(item));
|
748
848
|
ins = rb_inspect(a->tmp);
|
749
849
|
rb_str_append(a->str, ins);
|
750
850
|
rb_str_resize(ins, 0);
|
data/lib/inmemory_kv/version.rb
CHANGED
metadata
CHANGED
@@ -1,55 +1,55 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: inmemory_kv
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sokolov Yura aka funny_falcon
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2015-
|
11
|
+
date: 2015-07-01 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
15
15
|
requirement: !ruby/object:Gem::Requirement
|
16
16
|
requirements:
|
17
|
-
- - ~>
|
17
|
+
- - "~>"
|
18
18
|
- !ruby/object:Gem::Version
|
19
19
|
version: '1.7'
|
20
20
|
type: :development
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
|
-
- - ~>
|
24
|
+
- - "~>"
|
25
25
|
- !ruby/object:Gem::Version
|
26
26
|
version: '1.7'
|
27
27
|
- !ruby/object:Gem::Dependency
|
28
28
|
name: rake
|
29
29
|
requirement: !ruby/object:Gem::Requirement
|
30
30
|
requirements:
|
31
|
-
- - ~>
|
31
|
+
- - "~>"
|
32
32
|
- !ruby/object:Gem::Version
|
33
33
|
version: '10.0'
|
34
34
|
type: :development
|
35
35
|
prerelease: false
|
36
36
|
version_requirements: !ruby/object:Gem::Requirement
|
37
37
|
requirements:
|
38
|
-
- - ~>
|
38
|
+
- - "~>"
|
39
39
|
- !ruby/object:Gem::Version
|
40
40
|
version: '10.0'
|
41
41
|
- !ruby/object:Gem::Dependency
|
42
42
|
name: minitest
|
43
43
|
requirement: !ruby/object:Gem::Requirement
|
44
44
|
requirements:
|
45
|
-
- -
|
45
|
+
- - ">="
|
46
46
|
- !ruby/object:Gem::Version
|
47
47
|
version: '0'
|
48
48
|
type: :development
|
49
49
|
prerelease: false
|
50
50
|
version_requirements: !ruby/object:Gem::Requirement
|
51
51
|
requirements:
|
52
|
-
- -
|
52
|
+
- - ">="
|
53
53
|
- !ruby/object:Gem::Version
|
54
54
|
version: '0'
|
55
55
|
description: Simple in memory string/string hash
|
@@ -60,7 +60,7 @@ extensions:
|
|
60
60
|
- ext/extconf.rb
|
61
61
|
extra_rdoc_files: []
|
62
62
|
files:
|
63
|
-
- .gitignore
|
63
|
+
- ".gitignore"
|
64
64
|
- Gemfile
|
65
65
|
- LICENSE.txt
|
66
66
|
- README.md
|
@@ -82,19 +82,20 @@ require_paths:
|
|
82
82
|
- ext
|
83
83
|
required_ruby_version: !ruby/object:Gem::Requirement
|
84
84
|
requirements:
|
85
|
-
- -
|
85
|
+
- - ">="
|
86
86
|
- !ruby/object:Gem::Version
|
87
87
|
version: '0'
|
88
88
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
89
89
|
requirements:
|
90
|
-
- -
|
90
|
+
- - ">="
|
91
91
|
- !ruby/object:Gem::Version
|
92
92
|
version: '0'
|
93
93
|
requirements: []
|
94
94
|
rubyforge_project:
|
95
|
-
rubygems_version: 2.4.
|
95
|
+
rubygems_version: 2.4.4
|
96
96
|
signing_key:
|
97
97
|
specification_version: 4
|
98
98
|
summary: Simple in memory string/string hash
|
99
99
|
test_files:
|
100
100
|
- test/test_str2str.rb
|
101
|
+
has_rdoc:
|