ox 2.14.5 → 2.14.9

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f3288687ada02ee5a8c19fbde18da50329577718b7db9650a3cf544e423d3c20
4
- data.tar.gz: 711f74754b5e417028c8b6c1956eda6c6483ca7a1ce906bbdb64a60c36169de4
3
+ metadata.gz: a3cc19bc7d94f71b8e8252f3b22475383d3700b2cfe7f3e97aeaff4c36dc4730
4
+ data.tar.gz: 0034552f9848f7215d7533066d5a971c59dbfd1526257b479512489fe30e371b
5
5
  SHA512:
6
- metadata.gz: 3ed4c311f6e5100b36db59aa46eaf5870479bee44ac733bd844591b87205da49437f715c504f0e048a73e7c0f82c953e74e5fe08772a723561c7b2c5ec30ef20
7
- data.tar.gz: 2149589a87c214dbbc5b999c4ae3123a106813d453b1f0e9c9b387d2356668d6b84de964b0e82a47f695810a776cb5e9960490ca6d391a4c77cbb1b6c8a39204
6
+ metadata.gz: b6ea9d757870451eaf28771481fe703ade50de9dc85d91575545be65ba0a65d7a71a4d193684130725ccad58ad506e1b47fd241aef92950c619be7d49779cbef
7
+ data.tar.gz: cdd97b3ee73f8755cc7a4296c64d41553b03619c7f37f561ad55b958e769e0dc65e652800da0a03b1a9ecb74a17b563188f01a1b8f86fed2d01395ec2eabd587
data/CHANGELOG.md CHANGED
@@ -2,6 +2,38 @@
2
2
 
3
3
  All changes to the Ox gem are documented here. Releases follow semantic versioning.
4
4
 
5
+ ## [2.14.9] - 2022-02-11
6
+
7
+ ### Fixed
8
+
9
+ - Fixed the `\r` replacement with `\n` with the SAX parser according to https://www.w3.org/TR/2008/REC-xml-20081126/#sec-line-ends.
10
+
11
+ ## [2.14.8] - 2022-02-09
12
+
13
+ ### Fixed
14
+
15
+ - Renamed internal functions to avoid linking issues where Oj and Ox function names collided.
16
+
17
+ ## [2.14.7] - 2022-02-03
18
+
19
+ ### Fixed
20
+
21
+ - All classes and symbols are now registered to avoid issues with GC compaction movement.
22
+ - Parsing of any size processing instruction is now allowed. There is no 1024 limit.
23
+ - Fixed the `\r` replacement with `\n` according to https://www.w3.org/TR/2008/REC-xml-20081126/#sec-line-ends.
24
+
25
+ ### Changed
26
+
27
+ - Symbol and string caching changed but should have no impact on use
28
+ other than being slightly faster and handles large numbers of cached
29
+ items more efficiently.
30
+
31
+ ## [2.14.6] - 2021-11-03
32
+
33
+ ### Fixed
34
+
35
+ - Closing tags in builder are now escapped correctly thanks to ezekg.
36
+
5
37
  ## [2.14.5] - 2021-06-04
6
38
 
7
39
  ### Fixed
data/README.md CHANGED
@@ -1,7 +1,7 @@
1
1
  # Ox gem
2
2
  A fast XML parser and Object marshaller as a Ruby gem.
3
3
 
4
- [![Build status](https://ci.appveyor.com/api/projects/status/pg2w4wspbrflbi8c?svg=true)](https://ci.appveyor.com/project/ohler55/ox)
4
+ [![Build Status](https://img.shields.io/github/workflow/status/ohler55/ox/CI?logo=github)](https://github.com/ohler55/ox/actions/workflows/CI.yml)
5
5
 
6
6
  ## Installation
7
7
  gem install ox
data/ext/ox/builder.c CHANGED
@@ -9,9 +9,7 @@
9
9
  #include <string.h>
10
10
 
11
11
  #include "ruby.h"
12
- #if HAVE_RB_ENC_ASSOCIATE
13
12
  #include "ruby/encoding.h"
14
- #endif
15
13
  #include "ox.h"
16
14
  #include "buf.h"
17
15
  #include "err.h"
@@ -286,7 +284,7 @@ pop(Builder b) {
286
284
  append_indent(b);
287
285
  }
288
286
  buf_append_string(&b->buf, "</", 2);
289
- buf_append_string(&b->buf, e->name, e->len);
287
+ append_string(b, e->name, e->len, xml_element_chars, false);
290
288
  buf_append(&b->buf, '>');
291
289
  b->col += e->len + 3;
292
290
  b->pos += e->len + 3;
@@ -335,9 +333,7 @@ to_s(Builder b) {
335
333
  rstr = rb_str_new(b->buf.head, buf_len(&b->buf));
336
334
 
337
335
  if ('\0' != *b->encoding) {
338
- #if HAVE_RB_ENC_ASSOCIATE
339
336
  rb_enc_associate(rstr, rb_enc_find(b->encoding));
340
- #endif
341
337
  }
342
338
  return rstr;
343
339
  }
data/ext/ox/cache.c CHANGED
@@ -1,160 +1,338 @@
1
- /* cache.c
2
- * Copyright (c) 2011, Peter Ohler
3
- * All rights reserved.
4
- */
1
+ // Copyright (c) 2011, 2021 Peter Ohler. All rights reserved.
2
+ // Licensed under the MIT License. See LICENSE file in the project root for license details.
5
3
 
4
+ #if HAVE_PTHREAD_MUTEX_INIT
5
+ #include <pthread.h>
6
+ #endif
6
7
  #include <stdlib.h>
7
- #include <errno.h>
8
- #include <stdio.h>
9
- #include <string.h>
10
- #include <strings.h>
11
- #include <stdarg.h>
12
- #include <stdint.h>
13
8
 
14
9
  #include "cache.h"
15
10
 
16
- struct _cache {
17
- /* The key is a length byte followed by the key as a string. If the key is longer than 254 characters then the
18
- length is 255. The key can be for a premature value and in that case the length byte is greater than the length
19
- of the key. */
20
- char *key;
21
- VALUE value;
22
- struct _cache *slots[16];
23
- };
11
+ // The stdlib calloc, realloc, and free are used instead of the Ruby ALLOC,
12
+ // ALLOC_N, REALLOC, and xfree since the later could trigger a GC which will
13
+ // either corrupt memory or if the mark function locks will deadlock.
24
14
 
25
- static void slot_print(Cache cache, unsigned int depth);
15
+ #define REHASH_LIMIT 4
16
+ #define MIN_SHIFT 8
17
+ #define REUSE_MAX 8192
26
18
 
27
- static char* form_key(const char *s) {
28
- size_t len = strlen(s);
29
- char *d = ALLOC_N(char, len + 2);
19
+ #if HAVE_PTHREAD_MUTEX_INIT
20
+ #define CACHE_LOCK(c) pthread_mutex_lock(&((c)->mutex))
21
+ #define CACHE_UNLOCK(c) pthread_mutex_unlock(&((c)->mutex))
22
+ #else
23
+ #define CACHE_LOCK(c) rb_mutex_lock((c)->mutex)
24
+ #define CACHE_UNLOCK(c) rb_mutex_unlock((c)->mutex)
25
+ #endif
30
26
 
31
- *(uint8_t*)d = (255 <= len) ? 255 : len;
32
- memcpy(d + 1, s, len + 1);
27
+ // almost the Murmur hash algorithm
28
+ #define M 0x5bd1e995
33
29
 
34
- return d;
30
+ typedef struct _slot {
31
+ struct _slot *next;
32
+ VALUE val;
33
+ uint64_t hash;
34
+ volatile uint32_t use_cnt;
35
+ uint8_t klen;
36
+ char key[CACHE_MAX_KEY];
37
+ } * Slot;
38
+
39
+ typedef struct _cache {
40
+ volatile Slot *slots;
41
+ volatile size_t cnt;
42
+ VALUE (*form)(const char *str, size_t len);
43
+ uint64_t size;
44
+ uint64_t mask;
45
+ VALUE (*intern)(struct _cache *c, const char *key, size_t len, const char **keyp);
46
+ volatile Slot reuse;
47
+ size_t rcnt;
48
+ #if HAVE_PTHREAD_MUTEX_INIT
49
+ pthread_mutex_t mutex;
50
+ #else
51
+ VALUE mutex;
52
+ #endif
53
+ uint8_t xrate;
54
+ bool mark;
55
+ } * Cache;
56
+
57
+ static uint64_t hash_calc(const uint8_t *key, size_t len) {
58
+ const uint8_t *end = key + len;
59
+ const uint8_t *endless = key + (len & 0xFFFFFFFC);
60
+ uint64_t h = (uint64_t)len;
61
+ uint64_t k;
62
+
63
+ while (key < endless) {
64
+ k = (uint64_t)*key++;
65
+ k |= (uint64_t)*key++ << 8;
66
+ k |= (uint64_t)*key++ << 16;
67
+ k |= (uint64_t)*key++ << 24;
68
+
69
+ k *= M;
70
+ k ^= k >> 24;
71
+ h *= M;
72
+ h ^= k * M;
73
+ }
74
+ if (1 < end - key) {
75
+ uint16_t k16 = (uint16_t)*key++;
76
+
77
+ k16 |= (uint16_t)*key++ << 8;
78
+ h ^= k16 << 8;
79
+ }
80
+ if (key < end) {
81
+ h ^= *key;
82
+ }
83
+ h *= M;
84
+ h ^= h >> 13;
85
+ h *= M;
86
+ h ^= h >> 15;
87
+
88
+ return h;
35
89
  }
36
90
 
37
- void
38
- ox_cache_new(Cache *cache) {
39
- *cache = ALLOC(struct _cache);
40
- (*cache)->key = 0;
41
- (*cache)->value = Qundef;
42
- memset((*cache)->slots, 0, sizeof((*cache)->slots));
91
+ static void rehash(Cache c) {
92
+ uint64_t osize;
93
+ Slot *end;
94
+ Slot *sp;
95
+
96
+ osize = c->size;
97
+ c->size = osize * 4;
98
+ c->mask = c->size - 1;
99
+ c->slots = realloc((void *)c->slots, sizeof(Slot) * c->size);
100
+ memset((Slot *)c->slots + osize, 0, sizeof(Slot) * osize * 3);
101
+ end = (Slot *)c->slots + osize;
102
+ for (sp = (Slot *)c->slots; sp < end; sp++) {
103
+ Slot s = *sp;
104
+ Slot next = NULL;
105
+
106
+ *sp = NULL;
107
+ for (; NULL != s; s = next) {
108
+ uint64_t h = s->hash & c->mask;
109
+ Slot *bucket = (Slot *)c->slots + h;
110
+
111
+ next = s->next;
112
+ s->next = *bucket;
113
+ *bucket = s;
114
+ }
115
+ }
43
116
  }
44
117
 
45
- VALUE
46
- ox_cache_get(Cache cache, const char *key, VALUE **slot, const char **keyp) {
47
- unsigned char *k = (unsigned char*)key;
48
- Cache *cp;
49
-
50
- for (; '\0' != *k; k++) {
51
- cp = cache->slots + (unsigned int)(*k >> 4); /* upper 4 bits */
52
- if (0 == *cp) {
53
- ox_cache_new(cp);
118
+ static VALUE ox_lockless_intern(Cache c, const char *key, size_t len, const char **keyp) {
119
+ uint64_t h = hash_calc((const uint8_t *)key, len);
120
+ Slot *bucket = (Slot *)c->slots + (h & c->mask);
121
+ Slot b;
122
+ volatile VALUE rkey;
123
+
124
+ while (REUSE_MAX < c->rcnt) {
125
+ if (NULL != (b = c->reuse)) {
126
+ c->reuse = b->next;
127
+ free(b);
128
+ c->rcnt--;
129
+ } else {
130
+ // An accounting error occured somewhere so correct it.
131
+ c->rcnt = 0;
132
+ }
133
+ }
134
+ for (b = *bucket; NULL != b; b = b->next) {
135
+ if ((uint8_t)len == b->klen && 0 == strncmp(b->key, key, len)) {
136
+ b->use_cnt += 16;
137
+ if (NULL != keyp) {
138
+ *keyp = b->key;
139
+ }
140
+ return b->val;
54
141
  }
55
- cache = *cp;
56
- cp = cache->slots + (unsigned int)(*k & 0x0F); /* lower 4 bits */
57
- if (0 == *cp) { /* nothing on this tree so set key and value as a premature key/value pair */
58
- ox_cache_new(cp);
59
- cache = *cp;
60
- cache->key = form_key(key);
61
- break;
62
- } else {
63
- int depth = (int)(k - (unsigned char*)key + 1);
64
-
65
- cache = *cp;
66
-
67
- if ('\0' == *(k + 1)) { /* exact match */
68
- if (0 == cache->key) { /* nothing in this spot so take it */
69
- cache->key = form_key(key);
70
- break;
71
- } else if ((depth == *cache->key || 255 < depth) && 0 == strcmp(key, cache->key + 1)) { /* match */
72
- break;
73
- } else { /* have to move the current premature key/value deeper */
74
- unsigned char *ck = (unsigned char*)(cache->key + depth + 1);
75
- Cache orig = *cp;
76
-
77
- cp = (*cp)->slots + (*ck >> 4);
78
- ox_cache_new(cp);
79
- cp = (*cp)->slots + (*ck & 0x0F);
80
- ox_cache_new(cp);
81
- (*cp)->key = cache->key;
82
- (*cp)->value = cache->value;
83
- orig->key = form_key(key);
84
- orig->value = Qundef;
85
- }
86
- } else { /* not exact match but on the path */
87
- if (0 != cache->key) { /* there is a key/value here already */
88
- if (depth == *cache->key || (255 <= depth && 0 == strncmp(cache->key, key, depth) && '\0' == cache->key[depth])) { /* key belongs here */
89
- continue;
90
- } else {
91
- unsigned char *ck = (unsigned char*)(cache->key + depth + 1);
92
- Cache orig = *cp;
93
-
94
- cp = (*cp)->slots + (*ck >> 4);
95
- ox_cache_new(cp);
96
- cp = (*cp)->slots + (*ck & 0x0F);
97
- ox_cache_new(cp);
98
- (*cp)->key = cache->key;
99
- (*cp)->value = cache->value;
100
- orig->key = 0;
101
- orig->value = Qundef;
102
- }
103
- }
104
- }
142
+ }
143
+ rkey = c->form(key, len);
144
+ if (NULL == (b = c->reuse)) {
145
+ b = calloc(1, sizeof(struct _slot));
146
+ } else {
147
+ c->reuse = b->next;
148
+ c->rcnt--;
149
+ }
150
+ b->hash = h;
151
+ memcpy(b->key, key, len);
152
+ b->klen = (uint8_t)len;
153
+ b->key[len] = '\0';
154
+ b->val = rkey;
155
+ b->use_cnt = 4;
156
+ b->next = *bucket;
157
+ *bucket = b;
158
+ c->cnt++; // Don't worry about wrapping. Worse case is the entry is removed and recreated.
159
+ if (NULL != keyp) {
160
+ *keyp = b->key;
161
+ }
162
+ if (REHASH_LIMIT < c->cnt / c->size) {
163
+ rehash(c);
164
+ }
165
+ return rkey;
166
+ }
167
+
168
+ static VALUE ox_locking_intern(Cache c, const char *key, size_t len, const char **keyp) {
169
+ uint64_t h;
170
+ Slot *bucket;
171
+ Slot b;
172
+ uint64_t old_size;
173
+ volatile VALUE rkey;
174
+
175
+ CACHE_LOCK(c);
176
+ while (REUSE_MAX < c->rcnt) {
177
+ if (NULL != (b = c->reuse)) {
178
+ c->reuse = b->next;
179
+ free(b);
180
+ c->rcnt--;
181
+ } else {
182
+ // An accounting error occured somewhere so correct it.
183
+ c->rcnt = 0;
184
+ }
185
+ }
186
+ h = hash_calc((const uint8_t *)key, len);
187
+ bucket = (Slot *)c->slots + (h & c->mask);
188
+ for (b = *bucket; NULL != b; b = b->next) {
189
+ if ((uint8_t)len == b->klen && 0 == strncmp(b->key, key, len)) {
190
+ b->use_cnt += 4;
191
+ if (NULL != keyp) {
192
+ *keyp = b->key;
193
+ }
194
+ CACHE_UNLOCK(c);
195
+
196
+ return b->val;
105
197
  }
106
198
  }
107
- *slot = &cache->value;
108
- if (0 != keyp) {
109
- if (0 == cache->key) {
110
- printf("*** Error: failed to set the key for '%s'\n", key);
111
- *keyp = 0;
112
- } else {
113
- *keyp = cache->key + 1;
114
- }
199
+ old_size = c->size;
200
+ // The creation of a new value may trigger a GC which be a problem if the
201
+ // cache is locked so make sure it is unlocked for the key value creation.
202
+ if (NULL != (b = c->reuse)) {
203
+ c->reuse = b->next;
204
+ c->rcnt--;
205
+ }
206
+ CACHE_UNLOCK(c);
207
+ if (NULL == b) {
208
+ b = calloc(1, sizeof(struct _slot));
209
+ }
210
+ rkey = c->form(key, len);
211
+ b->hash = h;
212
+ memcpy(b->key, key, len);
213
+ b->klen = (uint8_t)len;
214
+ b->key[len] = '\0';
215
+ b->val = rkey;
216
+ b->use_cnt = 16;
217
+
218
+ // Lock again to add the new entry.
219
+ CACHE_LOCK(c);
220
+ if (old_size != c->size) {
221
+ h = hash_calc((const uint8_t *)key, len);
222
+ bucket = (Slot *)c->slots + (h & c->mask);
223
+ }
224
+ b->next = *bucket;
225
+ *bucket = b;
226
+ c->cnt++; // Don't worry about wrapping. Worse case is the entry is removed and recreated.
227
+ if (NULL != keyp) {
228
+ *keyp = b->key;
115
229
  }
116
- return cache->value;
230
+ if (REHASH_LIMIT < c->cnt / c->size) {
231
+ rehash(c);
232
+ }
233
+ CACHE_UNLOCK(c);
234
+
235
+ return rkey;
117
236
  }
118
237
 
119
- void
120
- ox_cache_print(Cache cache) {
121
- /*printf("-------------------------------------------\n");*/
122
- slot_print(cache, 0);
238
+ Cache ox_cache_create(size_t size, VALUE (*form)(const char *str, size_t len), bool mark, bool locking) {
239
+ Cache c = calloc(1, sizeof(struct _cache));
240
+ int shift = 0;
241
+
242
+ for (; REHASH_LIMIT < size; size /= 2, shift++) {
243
+ }
244
+ if (shift < MIN_SHIFT) {
245
+ shift = MIN_SHIFT;
246
+ }
247
+ #if HAVE_PTHREAD_MUTEX_INIT
248
+ pthread_mutex_init(&c->mutex, NULL);
249
+ #else
250
+ c->mutex = rb_mutex_new();
251
+ #endif
252
+ c->size = 1 << shift;
253
+ c->mask = c->size - 1;
254
+ c->slots = calloc(c->size, sizeof(Slot));
255
+ c->form = form;
256
+ c->xrate = 1; // low
257
+ c->mark = mark;
258
+ if (locking) {
259
+ c->intern = ox_locking_intern;
260
+ } else {
261
+ c->intern = ox_lockless_intern;
262
+ }
263
+ return c;
123
264
  }
124
265
 
125
- static void
126
- slot_print(Cache c, unsigned int depth) {
127
- char indent[256];
128
- Cache *cp;
129
- unsigned int i;
266
+ void ox_cache_free(Cache c) {
267
+ uint64_t i;
130
268
 
131
- if (sizeof(indent) - 1 < depth) {
132
- depth = ((int)sizeof(indent) - 1);
269
+ for (i = 0; i < c->size; i++) {
270
+ Slot next;
271
+ Slot s;
272
+
273
+ for (s = c->slots[i]; NULL != s; s = next) {
274
+ next = s->next;
275
+ free(s);
276
+ }
133
277
  }
134
- memset(indent, ' ', depth);
135
- indent[depth] = '\0';
136
- for (i = 0, cp = c->slots; i < 16; i++, cp++) {
137
- if (0 == *cp) {
138
- /*printf("%s%02u:\n", indent, i);*/
139
- } else {
140
- if (0 == (*cp)->key && Qundef == (*cp)->value) {
141
- printf("%s%02u:\n", indent, i);
142
- } else {
143
- const char *vs;
144
- const char *clas;
145
-
146
- if (Qundef == (*cp)->value) {
147
- vs = "undefined";
148
- clas = "";
149
- } else {
150
- VALUE rs = rb_funcall2((*cp)->value, rb_intern("to_s"), 0, 0);
278
+ free((void *)c->slots);
279
+ free(c);
280
+ }
281
+
282
+ void ox_cache_mark(Cache c) {
283
+ uint64_t i;
284
+
285
+ #if !HAVE_PTHREAD_MUTEX_INIT
286
+ rb_gc_mark(c->mutex);
287
+ #endif
288
+ if (0 == c->cnt) {
289
+ return;
290
+ }
291
+ for (i = 0; i < c->size; i++) {
292
+ Slot s;
293
+ Slot prev = NULL;
294
+ Slot next;
151
295
 
152
- vs = StringValuePtr(rs);
153
- clas = rb_class2name(rb_obj_class((*cp)->value));
296
+ for (s = c->slots[i]; NULL != s; s = next) {
297
+ next = s->next;
298
+ if (0 == s->use_cnt) {
299
+ if (NULL == prev) {
300
+ c->slots[i] = next;
301
+ } else {
302
+ prev->next = next;
154
303
  }
155
- printf("%s%02u: %s = %s (%s)\n", indent, i, (*cp)->key, vs, clas);
304
+ c->cnt--;
305
+ s->next = c->reuse;
306
+ c->reuse = s;
307
+ c->rcnt++;
308
+ continue;
309
+ }
310
+ switch (c->xrate) {
311
+ case 0: break;
312
+ case 2: s->use_cnt -= 2; break;
313
+ case 3: s->use_cnt /= 2; break;
314
+ default: s->use_cnt--; break;
315
+ }
316
+ if (c->mark) {
317
+ rb_gc_mark(s->val);
318
+ }
319
+ prev = s;
320
+ }
321
+ }
322
+ }
323
+
324
+ VALUE
325
+ ox_cache_intern(Cache c, const char *key, size_t len, const char **keyp) {
326
+ if (CACHE_MAX_KEY <= len) {
327
+ if (NULL != keyp) {
328
+ volatile VALUE rkey = c->form(key, len);
329
+
330
+ if (SYMBOL_P(rkey)) {
331
+ *keyp = rb_id2name(rb_sym2id(rkey));
156
332
  }
157
- slot_print(*cp, depth + 2);
333
+ return rkey;
158
334
  }
335
+ return c->form(key, len);
159
336
  }
337
+ return c->intern(c, key, len, keyp);
160
338
  }
data/ext/ox/cache.h CHANGED
@@ -1,19 +1,19 @@
1
- /* cache.h
2
- * Copyright (c) 2011, Peter Ohler
3
- * All rights reserved.
4
- */
1
+ // Copyright (c) 2021 Peter Ohler. All rights reserved.
2
+ // Licensed under the MIT License. See LICENSE file in the project root for license details.
5
3
 
6
4
  #ifndef OX_CACHE_H
7
5
  #define OX_CACHE_H
8
6
 
9
- #include "ruby.h"
7
+ #include <ruby.h>
8
+ #include <stdbool.h>
10
9
 
11
- typedef struct _cache *Cache;
10
+ #define CACHE_MAX_KEY 35
12
11
 
13
- extern void ox_cache_new(Cache *cache);
12
+ struct _cache;
14
13
 
15
- extern VALUE ox_cache_get(Cache cache, const char *key, VALUE **slot, const char **keyp);
16
-
17
- extern void ox_cache_print(Cache cache);
14
+ extern struct _cache *ox_cache_create(size_t size, VALUE (*form)(const char *str, size_t len), bool mark, bool locking);
15
+ extern void ox_cache_free(struct _cache *c);
16
+ extern void ox_cache_mark(struct _cache *c);
17
+ extern VALUE ox_cache_intern(struct _cache *c, const char *key, size_t len, const char **keyp);
18
18
 
19
19
  #endif /* OX_CACHE_H */
data/ext/ox/dump.c CHANGED
@@ -511,7 +511,7 @@ dump_date(Out out, VALUE obj) {
511
511
  static void
512
512
  dump_time_xsd(Out out, VALUE obj) {
513
513
  struct tm *tm;
514
- #if HAS_RB_TIME_TIMESPEC
514
+ #if HAVE_RB_TIME_TIMESPEC
515
515
  struct timespec ts = rb_time_timespec(obj);
516
516
  time_t sec = ts.tv_sec;
517
517
  long nsec = ts.tv_nsec;
@@ -528,7 +528,7 @@ dump_time_xsd(Out out, VALUE obj) {
528
528
  }
529
529
  /* 2010-07-09T10:47:45.895826+09:00 */
530
530
  tm = localtime(&sec);
531
- #if HAS_TM_GMTOFF
531
+ #if HAVE_ST_TM_GMTOFF
532
532
  if (0 > tm->tm_gmtoff) {
533
533
  tzsign = '-';
534
534
  tzhour = (int)(tm->tm_gmtoff / -3600);
data/ext/ox/extconf.rb CHANGED
@@ -33,11 +33,14 @@ CONFIG['warnflags'].slice!(/ -Wdeclaration-after-statement/)
33
33
  CONFIG['warnflags'].slice!(/ -Wmissing-noreturn/)
34
34
 
35
35
  have_func('rb_time_timespec')
36
- have_func('rb_enc_associate')
37
- have_func('rb_enc_find')
38
36
  have_func('rb_struct_alloc_noinit')
39
37
  have_func('rb_obj_encoding')
40
38
  have_func('rb_ivar_foreach')
39
+ have_func('rb_ext_ractor_safe', 'ruby.h')
40
+ have_func('pthread_mutex_init')
41
+ have_func('rb_enc_interned_str')
42
+ have_func('rb_time_nano_new')
43
+ have_func('index')
41
44
 
42
45
  have_header('ruby/st.h')
43
46
  have_header('sys/uio.h')