ox 2.14.3 → 2.14.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +33 -1
- data/README.md +1 -1
- data/ext/ox/builder.c +8 -8
- data/ext/ox/cache.c +320 -131
- data/ext/ox/cache.h +15 -13
- data/ext/ox/dump.c +2 -2
- data/ext/ox/extconf.rb +5 -2
- data/ext/ox/gen_load.c +8 -76
- data/ext/ox/hash_load.c +0 -4
- data/ext/ox/intern.c +158 -0
- data/ext/ox/intern.h +25 -0
- data/ext/ox/obj_load.c +12 -85
- data/ext/ox/ox.c +1018 -931
- data/ext/ox/ox.h +188 -210
- data/ext/ox/oxcache.c +160 -0
- data/ext/ox/oxcache.h +19 -0
- data/ext/ox/parse.c +72 -31
- data/ext/ox/sax.c +1093 -1279
- data/ext/ox/sax.h +45 -31
- data/ext/ox/sax_as.c +3 -5
- data/ext/ox/sax_buf.c +7 -16
- data/lib/ox/version.rb +1 -1
- metadata +11 -5
- data/ext/ox/sax_has.h +0 -53
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d34e2f569a6f870bc46c1ed8bf2e39e860f63ee8da1ad8aec0897e11904d8444
|
4
|
+
data.tar.gz: 90049302c1ba8e443a28aa1e69115db636ae86517695c6aab278fc8f7dcc8590
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2f5f3e9ef3f7172a7c25b43b00a92c57858e052e592ec7265077dabf1ab8280888953639232ca06e088e4524287e4f264ac3ddfe017dfde9d5c5b8dc5bd847a8
|
7
|
+
data.tar.gz: 35ebe783817b228b82eaacfcbdc7805de7327ad77fcf34f605baf7fab53103b79f5bd893c13b9a1a8f74f2e529b241ae805a21f487993db229e03b817f78aabf
|
data/CHANGELOG.md
CHANGED
@@ -2,7 +2,39 @@
|
|
2
2
|
|
3
3
|
All changes to the Ox gem are documented here. Releases follow semantic versioning.
|
4
4
|
|
5
|
-
## [2.14.
|
5
|
+
## [2.14.7] - 2022-02-03
|
6
|
+
|
7
|
+
### Fixed
|
8
|
+
|
9
|
+
- All classes and symbols are now registered to avoid issues with GC compaction movement.
|
10
|
+
- Parsing of any size processing instruction is now allowed. There is no 1024 limit.
|
11
|
+
- Fixed the `\r` replacement with `\n` according to https://www.w3.org/TR/2008/REC-xml-20081126/#sec-line-ends.
|
12
|
+
|
13
|
+
### Changed
|
14
|
+
|
15
|
+
- Symbol and string caching changed but should have no impact on use
|
16
|
+
other than being slightly faster and handles large numbers of cached
|
17
|
+
items more efficiently.
|
18
|
+
|
19
|
+
## [2.14.6] - 2021-11-03
|
20
|
+
|
21
|
+
### Fixed
|
22
|
+
|
23
|
+
- Closing tags in builder are now escapped correctly thanks to ezekg.
|
24
|
+
|
25
|
+
## [2.14.5] - 2021-06-04
|
26
|
+
|
27
|
+
### Fixed
|
28
|
+
|
29
|
+
- Fixed RDoc for for Ox::Builder.
|
30
|
+
|
31
|
+
## [2.14.4] - 2021-03-19
|
32
|
+
|
33
|
+
### Fixed
|
34
|
+
|
35
|
+
- Really fixed code issue around HAVE_RB_ENC_ASSOCIATE.
|
36
|
+
|
37
|
+
## [2.14.3] - 2021-03-12
|
6
38
|
|
7
39
|
### Fixed
|
8
40
|
|
data/README.md
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# Ox gem
|
2
2
|
A fast XML parser and Object marshaller as a Ruby gem.
|
3
3
|
|
4
|
-
[](https://github.com/ohler55/ox/actions/workflows/CI.yml)
|
5
5
|
|
6
6
|
## Installation
|
7
7
|
gem install ox
|
data/ext/ox/builder.c
CHANGED
@@ -9,9 +9,7 @@
|
|
9
9
|
#include <string.h>
|
10
10
|
|
11
11
|
#include "ruby.h"
|
12
|
-
#if HAVE_RB_ENC_ASSOCIATE
|
13
12
|
#include "ruby/encoding.h"
|
14
|
-
#endif
|
15
13
|
#include "ox.h"
|
16
14
|
#include "buf.h"
|
17
15
|
#include "err.h"
|
@@ -286,7 +284,7 @@ pop(Builder b) {
|
|
286
284
|
append_indent(b);
|
287
285
|
}
|
288
286
|
buf_append_string(&b->buf, "</", 2);
|
289
|
-
|
287
|
+
append_string(b, e->name, e->len, xml_element_chars, false);
|
290
288
|
buf_append(&b->buf, '>');
|
291
289
|
b->col += e->len + 3;
|
292
290
|
b->pos += e->len + 3;
|
@@ -335,9 +333,7 @@ to_s(Builder b) {
|
|
335
333
|
rstr = rb_str_new(b->buf.head, buf_len(&b->buf));
|
336
334
|
|
337
335
|
if ('\0' != *b->encoding) {
|
338
|
-
#if HAVE_RB_ENC_ASSOCIATE
|
339
336
|
rb_enc_associate(rstr, rb_enc_find(b->encoding));
|
340
|
-
#endif
|
341
337
|
}
|
342
338
|
return rstr;
|
343
339
|
}
|
@@ -898,8 +894,7 @@ builder_pos(VALUE self) {
|
|
898
894
|
*
|
899
895
|
* Closes the current element.
|
900
896
|
*/
|
901
|
-
static VALUE
|
902
|
-
builder_pop(VALUE self) {
|
897
|
+
static VALUE builder_pop(VALUE self) {
|
903
898
|
pop((Builder)DATA_PTR(self));
|
904
899
|
|
905
900
|
return Qnil;
|
@@ -921,7 +916,12 @@ builder_close(VALUE self) {
|
|
921
916
|
*
|
922
917
|
* An XML builder.
|
923
918
|
*/
|
924
|
-
void
|
919
|
+
void
|
920
|
+
ox_init_builder(VALUE ox) {
|
921
|
+
#if 0
|
922
|
+
// Just for rdoc.
|
923
|
+
ox = rb_define_module("Ox");
|
924
|
+
#endif
|
925
925
|
builder_class = rb_define_class_under(ox, "Builder", rb_cObject);
|
926
926
|
rb_define_module_function(builder_class, "new", builder_new, -1);
|
927
927
|
rb_define_module_function(builder_class, "file", builder_file, -1);
|
data/ext/ox/cache.c
CHANGED
@@ -1,160 +1,349 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
* All rights reserved.
|
4
|
-
*/
|
1
|
+
// Copyright (c) 2011, 2021 Peter Ohler. All rights reserved.
|
2
|
+
// Licensed under the MIT License. See LICENSE file in the project root for license details.
|
5
3
|
|
4
|
+
#if HAVE_PTHREAD_MUTEX_INIT
|
5
|
+
#include <pthread.h>
|
6
|
+
#endif
|
6
7
|
#include <stdlib.h>
|
7
|
-
#include <errno.h>
|
8
|
-
#include <stdio.h>
|
9
|
-
#include <string.h>
|
10
|
-
#include <strings.h>
|
11
|
-
#include <stdarg.h>
|
12
|
-
#include <stdint.h>
|
13
8
|
|
14
9
|
#include "cache.h"
|
15
10
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
of the key. */
|
20
|
-
char *key;
|
21
|
-
VALUE value;
|
22
|
-
struct _cache *slots[16];
|
23
|
-
};
|
11
|
+
// The stdlib calloc, realloc, and free are used instead of the Ruby ALLOC,
|
12
|
+
// ALLOC_N, REALLOC, and xfree since the later could trigger a GC which will
|
13
|
+
// either corrupt memory or if the mark function locks will deadlock.
|
24
14
|
|
25
|
-
|
15
|
+
#define REHASH_LIMIT 4
|
16
|
+
#define MIN_SHIFT 8
|
17
|
+
#define REUSE_MAX 8192
|
26
18
|
|
27
|
-
|
28
|
-
|
29
|
-
|
19
|
+
#if HAVE_PTHREAD_MUTEX_INIT
|
20
|
+
#define CACHE_LOCK(c) pthread_mutex_lock(&((c)->mutex))
|
21
|
+
#define CACHE_UNLOCK(c) pthread_mutex_unlock(&((c)->mutex))
|
22
|
+
#else
|
23
|
+
#define CACHE_LOCK(c) rb_mutex_lock((c)->mutex)
|
24
|
+
#define CACHE_UNLOCK(c) rb_mutex_unlock((c)->mutex)
|
25
|
+
#endif
|
30
26
|
|
31
|
-
|
32
|
-
|
27
|
+
// almost the Murmur hash algorithm
|
28
|
+
#define M 0x5bd1e995
|
33
29
|
|
34
|
-
|
30
|
+
typedef struct _slot {
|
31
|
+
struct _slot * next;
|
32
|
+
VALUE val;
|
33
|
+
uint64_t hash;
|
34
|
+
volatile uint32_t use_cnt;
|
35
|
+
uint8_t klen;
|
36
|
+
char key[CACHE_MAX_KEY];
|
37
|
+
} * Slot;
|
38
|
+
|
39
|
+
typedef struct _cache {
|
40
|
+
volatile Slot * slots;
|
41
|
+
volatile size_t cnt;
|
42
|
+
VALUE (*form)(const char *str, size_t len);
|
43
|
+
uint64_t size;
|
44
|
+
uint64_t mask;
|
45
|
+
VALUE (*intern)(struct _cache *c, const char *key, size_t len, const char **keyp);
|
46
|
+
volatile Slot reuse;
|
47
|
+
size_t rcnt;
|
48
|
+
#if HAVE_PTHREAD_MUTEX_INIT
|
49
|
+
pthread_mutex_t mutex;
|
50
|
+
#else
|
51
|
+
VALUE mutex;
|
52
|
+
#endif
|
53
|
+
uint8_t xrate;
|
54
|
+
bool mark;
|
55
|
+
} * Cache;
|
56
|
+
|
57
|
+
void cache_set_form(Cache c, VALUE (*form)(const char *str, size_t len)) {
|
58
|
+
c->form = form;
|
35
59
|
}
|
36
60
|
|
37
|
-
|
38
|
-
|
39
|
-
*
|
40
|
-
(
|
41
|
-
|
42
|
-
|
61
|
+
static uint64_t hash_calc(const uint8_t *key, size_t len) {
|
62
|
+
const uint8_t *end = key + len;
|
63
|
+
const uint8_t *endless = key + (len & 0xFFFFFFFC);
|
64
|
+
uint64_t h = (uint64_t)len;
|
65
|
+
uint64_t k;
|
66
|
+
|
67
|
+
while (key < endless) {
|
68
|
+
k = (uint64_t)*key++;
|
69
|
+
k |= (uint64_t)*key++ << 8;
|
70
|
+
k |= (uint64_t)*key++ << 16;
|
71
|
+
k |= (uint64_t)*key++ << 24;
|
72
|
+
|
73
|
+
k *= M;
|
74
|
+
k ^= k >> 24;
|
75
|
+
h *= M;
|
76
|
+
h ^= k * M;
|
77
|
+
}
|
78
|
+
if (1 < end - key) {
|
79
|
+
uint16_t k16 = (uint16_t)*key++;
|
80
|
+
|
81
|
+
k16 |= (uint16_t)*key++ << 8;
|
82
|
+
h ^= k16 << 8;
|
83
|
+
}
|
84
|
+
if (key < end) {
|
85
|
+
h ^= *key;
|
86
|
+
}
|
87
|
+
h *= M;
|
88
|
+
h ^= h >> 13;
|
89
|
+
h *= M;
|
90
|
+
h ^= h >> 15;
|
91
|
+
|
92
|
+
return h;
|
43
93
|
}
|
44
94
|
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
95
|
+
static void rehash(Cache c) {
|
96
|
+
uint64_t osize;
|
97
|
+
Slot * end;
|
98
|
+
Slot * sp;
|
99
|
+
|
100
|
+
osize = c->size;
|
101
|
+
c->size = osize * 4;
|
102
|
+
c->mask = c->size - 1;
|
103
|
+
c->slots = realloc((void *)c->slots, sizeof(Slot) * c->size);
|
104
|
+
memset((Slot *)c->slots + osize, 0, sizeof(Slot) * osize * 3);
|
105
|
+
end = (Slot *)c->slots + osize;
|
106
|
+
for (sp = (Slot *)c->slots; sp < end; sp++) {
|
107
|
+
Slot s = *sp;
|
108
|
+
Slot next = NULL;
|
109
|
+
|
110
|
+
*sp = NULL;
|
111
|
+
for (; NULL != s; s = next) {
|
112
|
+
uint64_t h = s->hash & c->mask;
|
113
|
+
Slot * bucket = (Slot *)c->slots + h;
|
114
|
+
|
115
|
+
next = s->next;
|
116
|
+
s->next = *bucket;
|
117
|
+
*bucket = s;
|
118
|
+
}
|
119
|
+
}
|
120
|
+
}
|
121
|
+
|
122
|
+
static VALUE lockless_intern(Cache c, const char *key, size_t len, const char **keyp) {
|
123
|
+
uint64_t h = hash_calc((const uint8_t *)key, len);
|
124
|
+
Slot * bucket = (Slot *)c->slots + (h & c->mask);
|
125
|
+
Slot b;
|
126
|
+
volatile VALUE rkey;
|
127
|
+
|
128
|
+
while (REUSE_MAX < c->rcnt) {
|
129
|
+
if (NULL != (b = c->reuse)) {
|
130
|
+
c->reuse = b->next;
|
131
|
+
free(b);
|
132
|
+
c->rcnt--;
|
133
|
+
} else {
|
134
|
+
// An accounting error occured somewhere so correct it.
|
135
|
+
c->rcnt = 0;
|
136
|
+
}
|
137
|
+
}
|
138
|
+
for (b = *bucket; NULL != b; b = b->next) {
|
139
|
+
if ((uint8_t)len == b->klen && 0 == strncmp(b->key, key, len)) {
|
140
|
+
b->use_cnt += 16;
|
141
|
+
if (NULL != keyp) {
|
142
|
+
*keyp = b->key;
|
143
|
+
}
|
144
|
+
return b->val;
|
145
|
+
}
|
146
|
+
}
|
147
|
+
rkey = c->form(key, len);
|
148
|
+
if (NULL == (b = c->reuse)) {
|
149
|
+
b = calloc(1, sizeof(struct _slot));
|
150
|
+
} else {
|
151
|
+
c->reuse = b->next;
|
152
|
+
c->rcnt--;
|
153
|
+
}
|
154
|
+
b->hash = h;
|
155
|
+
memcpy(b->key, key, len);
|
156
|
+
b->klen = (uint8_t)len;
|
157
|
+
b->key[len] = '\0';
|
158
|
+
b->val = rkey;
|
159
|
+
b->use_cnt = 4;
|
160
|
+
b->next = *bucket;
|
161
|
+
*bucket = b;
|
162
|
+
c->cnt++; // Don't worry about wrapping. Worse case is the entry is removed and recreated.
|
163
|
+
if (NULL != keyp) {
|
164
|
+
*keyp = b->key;
|
165
|
+
}
|
166
|
+
if (REHASH_LIMIT < c->cnt / c->size) {
|
167
|
+
rehash(c);
|
168
|
+
}
|
169
|
+
return rkey;
|
170
|
+
}
|
171
|
+
|
172
|
+
static VALUE locking_intern(Cache c, const char *key, size_t len, const char **keyp) {
|
173
|
+
uint64_t h;
|
174
|
+
Slot * bucket;
|
175
|
+
Slot b;
|
176
|
+
uint64_t old_size;
|
177
|
+
volatile VALUE rkey;
|
178
|
+
|
179
|
+
CACHE_LOCK(c);
|
180
|
+
while (REUSE_MAX < c->rcnt) {
|
181
|
+
if (NULL != (b = c->reuse)) {
|
182
|
+
c->reuse = b->next;
|
183
|
+
free(b);
|
184
|
+
c->rcnt--;
|
185
|
+
} else {
|
186
|
+
// An accounting error occured somewhere so correct it.
|
187
|
+
c->rcnt = 0;
|
54
188
|
}
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
if ('\0' == *(k + 1)) { /* exact match */
|
68
|
-
if (0 == cache->key) { /* nothing in this spot so take it */
|
69
|
-
cache->key = form_key(key);
|
70
|
-
break;
|
71
|
-
} else if ((depth == *cache->key || 255 < depth) && 0 == strcmp(key, cache->key + 1)) { /* match */
|
72
|
-
break;
|
73
|
-
} else { /* have to move the current premature key/value deeper */
|
74
|
-
unsigned char *ck = (unsigned char*)(cache->key + depth + 1);
|
75
|
-
Cache orig = *cp;
|
76
|
-
|
77
|
-
cp = (*cp)->slots + (*ck >> 4);
|
78
|
-
ox_cache_new(cp);
|
79
|
-
cp = (*cp)->slots + (*ck & 0x0F);
|
80
|
-
ox_cache_new(cp);
|
81
|
-
(*cp)->key = cache->key;
|
82
|
-
(*cp)->value = cache->value;
|
83
|
-
orig->key = form_key(key);
|
84
|
-
orig->value = Qundef;
|
85
|
-
}
|
86
|
-
} else { /* not exact match but on the path */
|
87
|
-
if (0 != cache->key) { /* there is a key/value here already */
|
88
|
-
if (depth == *cache->key || (255 <= depth && 0 == strncmp(cache->key, key, depth) && '\0' == cache->key[depth])) { /* key belongs here */
|
89
|
-
continue;
|
90
|
-
} else {
|
91
|
-
unsigned char *ck = (unsigned char*)(cache->key + depth + 1);
|
92
|
-
Cache orig = *cp;
|
93
|
-
|
94
|
-
cp = (*cp)->slots + (*ck >> 4);
|
95
|
-
ox_cache_new(cp);
|
96
|
-
cp = (*cp)->slots + (*ck & 0x0F);
|
97
|
-
ox_cache_new(cp);
|
98
|
-
(*cp)->key = cache->key;
|
99
|
-
(*cp)->value = cache->value;
|
100
|
-
orig->key = 0;
|
101
|
-
orig->value = Qundef;
|
102
|
-
}
|
103
|
-
}
|
104
|
-
}
|
189
|
+
}
|
190
|
+
h = hash_calc((const uint8_t *)key, len);
|
191
|
+
bucket = (Slot *)c->slots + (h & c->mask);
|
192
|
+
for (b = *bucket; NULL != b; b = b->next) {
|
193
|
+
if ((uint8_t)len == b->klen && 0 == strncmp(b->key, key, len)) {
|
194
|
+
b->use_cnt += 4;
|
195
|
+
if (NULL != keyp) {
|
196
|
+
*keyp = b->key;
|
197
|
+
}
|
198
|
+
CACHE_UNLOCK(c);
|
199
|
+
|
200
|
+
return b->val;
|
105
201
|
}
|
106
202
|
}
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
203
|
+
old_size = c->size;
|
204
|
+
// The creation of a new value may trigger a GC which be a problem if the
|
205
|
+
// cache is locked so make sure it is unlocked for the key value creation.
|
206
|
+
if (NULL != (b = c->reuse)) {
|
207
|
+
c->reuse = b->next;
|
208
|
+
c->rcnt--;
|
209
|
+
}
|
210
|
+
CACHE_UNLOCK(c);
|
211
|
+
if (NULL == b) {
|
212
|
+
b = calloc(1, sizeof(struct _slot));
|
213
|
+
}
|
214
|
+
rkey = c->form(key, len);
|
215
|
+
b->hash = h;
|
216
|
+
memcpy(b->key, key, len);
|
217
|
+
b->klen = (uint8_t)len;
|
218
|
+
b->key[len] = '\0';
|
219
|
+
b->val = rkey;
|
220
|
+
b->use_cnt = 16;
|
221
|
+
|
222
|
+
// Lock again to add the new entry.
|
223
|
+
CACHE_LOCK(c);
|
224
|
+
if (old_size != c->size) {
|
225
|
+
h = hash_calc((const uint8_t *)key, len);
|
226
|
+
bucket = (Slot *)c->slots + (h & c->mask);
|
227
|
+
}
|
228
|
+
b->next = *bucket;
|
229
|
+
*bucket = b;
|
230
|
+
c->cnt++; // Don't worry about wrapping. Worse case is the entry is removed and recreated.
|
231
|
+
if (NULL != keyp) {
|
232
|
+
*keyp = b->key;
|
233
|
+
}
|
234
|
+
if (REHASH_LIMIT < c->cnt / c->size) {
|
235
|
+
rehash(c);
|
236
|
+
}
|
237
|
+
CACHE_UNLOCK(c);
|
238
|
+
|
239
|
+
return rkey;
|
240
|
+
}
|
241
|
+
|
242
|
+
Cache cache_create(size_t size,
|
243
|
+
VALUE (*form)(const char *str, size_t len),
|
244
|
+
bool mark,
|
245
|
+
bool locking) {
|
246
|
+
Cache c = calloc(1, sizeof(struct _cache));
|
247
|
+
int shift = 0;
|
248
|
+
|
249
|
+
for (; REHASH_LIMIT < size; size /= 2, shift++) {
|
115
250
|
}
|
116
|
-
|
251
|
+
if (shift < MIN_SHIFT) {
|
252
|
+
shift = MIN_SHIFT;
|
253
|
+
}
|
254
|
+
#if HAVE_PTHREAD_MUTEX_INIT
|
255
|
+
pthread_mutex_init(&c->mutex, NULL);
|
256
|
+
#else
|
257
|
+
c->mutex = rb_mutex_new();
|
258
|
+
#endif
|
259
|
+
c->size = 1 << shift;
|
260
|
+
c->mask = c->size - 1;
|
261
|
+
c->slots = calloc(c->size, sizeof(Slot));
|
262
|
+
c->form = form;
|
263
|
+
c->xrate = 1; // low
|
264
|
+
c->mark = mark;
|
265
|
+
if (locking) {
|
266
|
+
c->intern = locking_intern;
|
267
|
+
} else {
|
268
|
+
c->intern = lockless_intern;
|
269
|
+
}
|
270
|
+
return c;
|
271
|
+
}
|
272
|
+
|
273
|
+
void cache_set_expunge_rate(Cache c, int rate) {
|
274
|
+
c->xrate = (uint8_t)rate;
|
117
275
|
}
|
118
276
|
|
119
|
-
void
|
120
|
-
|
121
|
-
|
122
|
-
|
277
|
+
void cache_free(Cache c) {
|
278
|
+
uint64_t i;
|
279
|
+
|
280
|
+
for (i = 0; i < c->size; i++) {
|
281
|
+
Slot next;
|
282
|
+
Slot s;
|
283
|
+
|
284
|
+
for (s = c->slots[i]; NULL != s; s = next) {
|
285
|
+
next = s->next;
|
286
|
+
free(s);
|
287
|
+
}
|
288
|
+
}
|
289
|
+
free((void *)c->slots);
|
290
|
+
free(c);
|
123
291
|
}
|
124
292
|
|
125
|
-
|
126
|
-
|
127
|
-
char indent[256];
|
128
|
-
Cache *cp;
|
129
|
-
unsigned int i;
|
293
|
+
void cache_mark(Cache c) {
|
294
|
+
uint64_t i;
|
130
295
|
|
131
|
-
|
132
|
-
|
296
|
+
#if !HAVE_PTHREAD_MUTEX_INIT
|
297
|
+
rb_gc_mark(c->mutex);
|
298
|
+
#endif
|
299
|
+
if (0 == c->cnt) {
|
300
|
+
return;
|
133
301
|
}
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
/*printf("%s%02u:\n", indent, i);*/
|
139
|
-
} else {
|
140
|
-
if (0 == (*cp)->key && Qundef == (*cp)->value) {
|
141
|
-
printf("%s%02u:\n", indent, i);
|
142
|
-
} else {
|
143
|
-
const char *vs;
|
144
|
-
const char *clas;
|
145
|
-
|
146
|
-
if (Qundef == (*cp)->value) {
|
147
|
-
vs = "undefined";
|
148
|
-
clas = "";
|
149
|
-
} else {
|
150
|
-
VALUE rs = rb_funcall2((*cp)->value, rb_intern("to_s"), 0, 0);
|
302
|
+
for (i = 0; i < c->size; i++) {
|
303
|
+
Slot s;
|
304
|
+
Slot prev = NULL;
|
305
|
+
Slot next;
|
151
306
|
|
152
|
-
|
153
|
-
|
307
|
+
for (s = c->slots[i]; NULL != s; s = next) {
|
308
|
+
next = s->next;
|
309
|
+
if (0 == s->use_cnt) {
|
310
|
+
if (NULL == prev) {
|
311
|
+
c->slots[i] = next;
|
312
|
+
} else {
|
313
|
+
prev->next = next;
|
154
314
|
}
|
155
|
-
|
315
|
+
c->cnt--;
|
316
|
+
s->next = c->reuse;
|
317
|
+
c->reuse = s;
|
318
|
+
c->rcnt++;
|
319
|
+
continue;
|
320
|
+
}
|
321
|
+
switch (c->xrate) {
|
322
|
+
case 0: break;
|
323
|
+
case 2: s->use_cnt -= 2; break;
|
324
|
+
case 3: s->use_cnt /= 2; break;
|
325
|
+
default: s->use_cnt--; break;
|
326
|
+
}
|
327
|
+
if (c->mark) {
|
328
|
+
rb_gc_mark(s->val);
|
329
|
+
}
|
330
|
+
prev = s;
|
331
|
+
}
|
332
|
+
}
|
333
|
+
}
|
334
|
+
|
335
|
+
VALUE
|
336
|
+
cache_intern(Cache c, const char *key, size_t len, const char **keyp) {
|
337
|
+
if (CACHE_MAX_KEY <= len) {
|
338
|
+
if (NULL != keyp) {
|
339
|
+
volatile VALUE rkey = c->form(key, len);
|
340
|
+
|
341
|
+
if (SYMBOL_P(rkey)) {
|
342
|
+
*keyp = rb_id2name(rb_sym2id(rkey));
|
156
343
|
}
|
157
|
-
|
344
|
+
return rkey;
|
158
345
|
}
|
346
|
+
return c->form(key, len);
|
159
347
|
}
|
348
|
+
return c->intern(c, key, len, keyp);
|
160
349
|
}
|
data/ext/ox/cache.h
CHANGED
@@ -1,19 +1,21 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
* All rights reserved.
|
4
|
-
*/
|
1
|
+
// Copyright (c) 2021 Peter Ohler. All rights reserved.
|
2
|
+
// Licensed under the MIT License. See LICENSE file in the project root for license details.
|
5
3
|
|
6
|
-
#ifndef
|
7
|
-
#define
|
4
|
+
#ifndef CACHE_H
|
5
|
+
#define CACHE_H
|
8
6
|
|
9
|
-
#include
|
7
|
+
#include <ruby.h>
|
8
|
+
#include <stdbool.h>
|
10
9
|
|
11
|
-
|
10
|
+
#define CACHE_MAX_KEY 35
|
12
11
|
|
13
|
-
|
12
|
+
struct _cache;
|
14
13
|
|
15
|
-
extern
|
14
|
+
extern struct _cache *cache_create(size_t size, VALUE (*form)(const char *str, size_t len), bool mark, bool locking);
|
15
|
+
extern void cache_free(struct _cache *c);
|
16
|
+
extern void cache_mark(struct _cache *c);
|
17
|
+
extern void cache_set_form(struct _cache *c, VALUE (*form)(const char *str, size_t len));
|
18
|
+
extern VALUE cache_intern(struct _cache *c, const char *key, size_t len, const char **keyp);
|
19
|
+
extern void cache_set_expunge_rate(struct _cache *c, int rate);
|
16
20
|
|
17
|
-
|
18
|
-
|
19
|
-
#endif /* OX_CACHE_H */
|
21
|
+
#endif /* CACHE_H */
|
data/ext/ox/dump.c
CHANGED
@@ -511,7 +511,7 @@ dump_date(Out out, VALUE obj) {
|
|
511
511
|
static void
|
512
512
|
dump_time_xsd(Out out, VALUE obj) {
|
513
513
|
struct tm *tm;
|
514
|
-
#if
|
514
|
+
#if HAVE_RB_TIME_TIMESPEC
|
515
515
|
struct timespec ts = rb_time_timespec(obj);
|
516
516
|
time_t sec = ts.tv_sec;
|
517
517
|
long nsec = ts.tv_nsec;
|
@@ -528,7 +528,7 @@ dump_time_xsd(Out out, VALUE obj) {
|
|
528
528
|
}
|
529
529
|
/* 2010-07-09T10:47:45.895826+09:00 */
|
530
530
|
tm = localtime(&sec);
|
531
|
-
#if
|
531
|
+
#if HAVE_ST_TM_GMTOFF
|
532
532
|
if (0 > tm->tm_gmtoff) {
|
533
533
|
tzsign = '-';
|
534
534
|
tzhour = (int)(tm->tm_gmtoff / -3600);
|
data/ext/ox/extconf.rb
CHANGED
@@ -33,11 +33,14 @@ CONFIG['warnflags'].slice!(/ -Wdeclaration-after-statement/)
|
|
33
33
|
CONFIG['warnflags'].slice!(/ -Wmissing-noreturn/)
|
34
34
|
|
35
35
|
have_func('rb_time_timespec')
|
36
|
-
have_func('rb_enc_associate')
|
37
|
-
have_func('rb_enc_find')
|
38
36
|
have_func('rb_struct_alloc_noinit')
|
39
37
|
have_func('rb_obj_encoding')
|
40
38
|
have_func('rb_ivar_foreach')
|
39
|
+
have_func('rb_ext_ractor_safe', 'ruby.h')
|
40
|
+
have_func('pthread_mutex_init')
|
41
|
+
have_func('rb_enc_interned_str')
|
42
|
+
have_func('rb_time_nano_new')
|
43
|
+
have_func('index')
|
41
44
|
|
42
45
|
have_header('ruby/st.h')
|
43
46
|
have_header('sys/uio.h')
|