ruby-minigraph 0.0.20.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +21 -0
  3. data/README.md +62 -0
  4. data/ext/Rakefile +56 -0
  5. data/ext/cmappy/cmappy.c +7 -0
  6. data/ext/cmappy/cmappy.h +8 -0
  7. data/ext/minigraph/LICENSE.txt +23 -0
  8. data/ext/minigraph/Makefile +66 -0
  9. data/ext/minigraph/NEWS.md +317 -0
  10. data/ext/minigraph/README.md +207 -0
  11. data/ext/minigraph/algo.c +194 -0
  12. data/ext/minigraph/algo.h +33 -0
  13. data/ext/minigraph/asm-call.c +147 -0
  14. data/ext/minigraph/bseq.c +133 -0
  15. data/ext/minigraph/bseq.h +76 -0
  16. data/ext/minigraph/cal_cov.c +139 -0
  17. data/ext/minigraph/doc/example1.png +0 -0
  18. data/ext/minigraph/doc/example2.png +0 -0
  19. data/ext/minigraph/doc/examples.graffle +0 -0
  20. data/ext/minigraph/format.c +241 -0
  21. data/ext/minigraph/galign.c +140 -0
  22. data/ext/minigraph/gchain1.c +532 -0
  23. data/ext/minigraph/gcmisc.c +223 -0
  24. data/ext/minigraph/gfa-aug.c +260 -0
  25. data/ext/minigraph/gfa-base.c +526 -0
  26. data/ext/minigraph/gfa-bbl.c +372 -0
  27. data/ext/minigraph/gfa-ed.c +617 -0
  28. data/ext/minigraph/gfa-io.c +395 -0
  29. data/ext/minigraph/gfa-priv.h +154 -0
  30. data/ext/minigraph/gfa.h +166 -0
  31. data/ext/minigraph/ggen.c +182 -0
  32. data/ext/minigraph/ggen.h +21 -0
  33. data/ext/minigraph/ggsimple.c +570 -0
  34. data/ext/minigraph/gmap.c +211 -0
  35. data/ext/minigraph/index.c +230 -0
  36. data/ext/minigraph/kalloc.c +224 -0
  37. data/ext/minigraph/kalloc.h +82 -0
  38. data/ext/minigraph/kavl.h +414 -0
  39. data/ext/minigraph/kdq.h +134 -0
  40. data/ext/minigraph/ketopt.h +116 -0
  41. data/ext/minigraph/khashl.h +348 -0
  42. data/ext/minigraph/krmq.h +474 -0
  43. data/ext/minigraph/kseq.h +256 -0
  44. data/ext/minigraph/ksort.h +164 -0
  45. data/ext/minigraph/kstring.h +165 -0
  46. data/ext/minigraph/kthread.c +159 -0
  47. data/ext/minigraph/kthread.h +15 -0
  48. data/ext/minigraph/kvec-km.h +105 -0
  49. data/ext/minigraph/kvec.h +110 -0
  50. data/ext/minigraph/lchain.c +441 -0
  51. data/ext/minigraph/main.c +301 -0
  52. data/ext/minigraph/map-algo.c +500 -0
  53. data/ext/minigraph/mgpriv.h +128 -0
  54. data/ext/minigraph/minigraph.1 +359 -0
  55. data/ext/minigraph/minigraph.h +176 -0
  56. data/ext/minigraph/miniwfa.c +834 -0
  57. data/ext/minigraph/miniwfa.h +95 -0
  58. data/ext/minigraph/misc/mgutils.js +1451 -0
  59. data/ext/minigraph/misc.c +12 -0
  60. data/ext/minigraph/options.c +134 -0
  61. data/ext/minigraph/shortk.c +251 -0
  62. data/ext/minigraph/sketch.c +109 -0
  63. data/ext/minigraph/sys.c +147 -0
  64. data/ext/minigraph/sys.h +20 -0
  65. data/ext/minigraph/test/MT-chimp.fa +277 -0
  66. data/ext/minigraph/test/MT-human.fa +239 -0
  67. data/ext/minigraph/test/MT-orangA.fa +276 -0
  68. data/ext/minigraph/test/MT.gfa +19 -0
  69. data/ext/minigraph/tex/Makefile +13 -0
  70. data/ext/minigraph/tex/minigraph.bib +676 -0
  71. data/ext/minigraph/tex/minigraph.tex +986 -0
  72. data/ext/minigraph/tex/plots/CHM13-f1-90.bb.anno.gp +42 -0
  73. data/ext/minigraph/tex/plots/CHM13-f1-90.bb.anno.tbl +13 -0
  74. data/ext/minigraph/tex/plots/CHM13-f1-90.bb.mini-inter-none.win.gp +269 -0
  75. data/ext/minigraph/tex/plots/CHM13-f1-90.bb.mini-inter-none.win.sh +7 -0
  76. data/ext/minigraph/tex/plots/CHM13v1.cen.bed +23 -0
  77. data/ext/minigraph/tex/plots/CHM13v1.size +23 -0
  78. data/ext/minigraph/tex/plots/anno2tbl.js +40 -0
  79. data/ext/minigraph/tex/plots/bedutils.js +367 -0
  80. data/ext/minigraph/tex/plots/chr-plot.js +130 -0
  81. data/ext/minigraph/tex/plots/gen-anno.mak +24 -0
  82. data/ext/minigraph.patch +21 -0
  83. data/lib/minigraph/ffi/constants.rb +230 -0
  84. data/lib/minigraph/ffi/functions.rb +70 -0
  85. data/lib/minigraph/ffi/mappy.rb +8 -0
  86. data/lib/minigraph/ffi.rb +27 -0
  87. data/lib/minigraph/version.rb +5 -0
  88. data/lib/minigraph.rb +72 -0
  89. metadata +159 -0
@@ -0,0 +1,82 @@
1
+ #ifndef _KALLOC_H_
2
+ #define _KALLOC_H_
3
+
4
+ #include <stddef.h> /* for size_t */
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ typedef struct {
11
+ size_t capacity, available, n_blocks, n_cores, largest;
12
+ } km_stat_t;
13
+
14
+ void *kmalloc(void *km, size_t size);
15
+ void *krealloc(void *km, void *ptr, size_t size);
16
+ void *krelocate(void *km, void *ap, size_t n_bytes);
17
+ void *kcalloc(void *km, size_t count, size_t size);
18
+ void kfree(void *km, void *ptr);
19
+
20
+ void *km_init(void);
21
+ void *km_init2(void *km_par, size_t min_core_size);
22
+ void km_destroy(void *km);
23
+ void km_stat(const void *_km, km_stat_t *s);
24
+ void km_stat_print(const void *km);
25
+
26
+ #ifdef __cplusplus
27
+ }
28
+ #endif
29
+
30
+ #define Kmalloc(km, type, cnt) ((type*)kmalloc((km), (cnt) * sizeof(type)))
31
+ #define Kcalloc(km, type, cnt) ((type*)kcalloc((km), (cnt), sizeof(type)))
32
+ #define Krealloc(km, type, ptr, cnt) ((type*)krealloc((km), (ptr), (cnt) * sizeof(type)))
33
+
34
+ #define KMALLOC(km, ptr, len) ((ptr) = (__typeof__(ptr))kmalloc((km), (len) * sizeof(*(ptr))))
35
+ #define KCALLOC(km, ptr, len) ((ptr) = (__typeof__(ptr))kcalloc((km), (len), sizeof(*(ptr))))
36
+ #define KREALLOC(km, ptr, len) ((ptr) = (__typeof__(ptr))krealloc((km), (ptr), (len) * sizeof(*(ptr))))
37
+
38
+ #define KEXPAND(km, a, m) do { \
39
+ (m) = (m) >= 4? (m) + ((m)>>1) : 16; \
40
+ KREALLOC((km), (a), (m)); \
41
+ } while (0)
42
+
43
+ #ifndef klib_unused
44
+ #if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
45
+ #define klib_unused __attribute__ ((__unused__))
46
+ #else
47
+ #define klib_unused
48
+ #endif
49
+ #endif /* klib_unused */
50
+
51
+ #define KALLOC_POOL_INIT2(SCOPE, name, kmptype_t) \
52
+ typedef struct { \
53
+ size_t cnt, n, max; \
54
+ kmptype_t **buf; \
55
+ void *km; \
56
+ } kmp_##name##_t; \
57
+ SCOPE kmp_##name##_t *kmp_init_##name(void *km) { \
58
+ kmp_##name##_t *mp; \
59
+ KCALLOC(km, mp, 1); \
60
+ mp->km = km; \
61
+ return mp; \
62
+ } \
63
+ SCOPE void kmp_destroy_##name(kmp_##name##_t *mp) { \
64
+ size_t k; \
65
+ for (k = 0; k < mp->n; ++k) kfree(mp->km, mp->buf[k]); \
66
+ kfree(mp->km, mp->buf); kfree(mp->km, mp); \
67
+ } \
68
+ SCOPE kmptype_t *kmp_alloc_##name(kmp_##name##_t *mp) { \
69
+ ++mp->cnt; \
70
+ if (mp->n == 0) return (kmptype_t*)kcalloc(mp->km, 1, sizeof(kmptype_t)); \
71
+ return mp->buf[--mp->n]; \
72
+ } \
73
+ SCOPE void kmp_free_##name(kmp_##name##_t *mp, kmptype_t *p) { \
74
+ --mp->cnt; \
75
+ if (mp->n == mp->max) KEXPAND(mp->km, mp->buf, mp->max); \
76
+ mp->buf[mp->n++] = p; \
77
+ }
78
+
79
+ #define KALLOC_POOL_INIT(name, kmptype_t) \
80
+ KALLOC_POOL_INIT2(static inline klib_unused, name, kmptype_t)
81
+
82
+ #endif
@@ -0,0 +1,414 @@
1
+ /* The MIT License
2
+
3
+ Copyright (c) 2018 by Attractive Chaos <attractor@live.co.uk>
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
20
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ SOFTWARE.
24
+ */
25
+
26
+ /* An example:
27
+
28
+ #include <stdio.h>
29
+ #include <string.h>
30
+ #include <stdlib.h>
31
+ #include "kavl.h"
32
+
33
+ struct my_node {
34
+ char key;
35
+ KAVL_HEAD(struct my_node) head;
36
+ };
37
+ #define my_cmp(p, q) (((q)->key < (p)->key) - ((p)->key < (q)->key))
38
+ KAVL_INIT(my, struct my_node, head, my_cmp)
39
+
40
+ int main(void) {
41
+ const char *str = "MNOLKQOPHIA"; // from wiki, except a duplicate
42
+ struct my_node *root = 0;
43
+ int i, l = strlen(str);
44
+ for (i = 0; i < l; ++i) { // insert in the input order
45
+ struct my_node *q, *p = malloc(sizeof(*p));
46
+ p->key = str[i];
47
+ q = kavl_insert(my, &root, p, 0);
48
+ if (p != q) free(p); // if already present, free
49
+ }
50
+ kavl_itr_t(my) itr;
51
+ kavl_itr_first(my, root, &itr); // place at first
52
+ do { // traverse
53
+ const struct my_node *p = kavl_at(&itr);
54
+ putchar(p->key);
55
+ free((void*)p); // free node
56
+ } while (kavl_itr_next(my, &itr));
57
+ putchar('\n');
58
+ return 0;
59
+ }
60
+ */
61
+
62
+ #ifndef KAVL_H
63
+ #define KAVL_H
64
+
65
+ #ifdef __STRICT_ANSI__
66
+ #define inline __inline__
67
+ #endif
68
+
69
+ #define KAVL_MAX_DEPTH 64
70
+
71
+ #define kavl_size(head, p) ((p)? (p)->head.size : 0)
72
+ #define kavl_size_child(head, q, i) ((q)->head.p[(i)]? (q)->head.p[(i)]->head.size : 0)
73
+
74
+ #define KAVL_HEAD(__type) \
75
+ struct { \
76
+ __type *p[2]; \
77
+ signed char balance; /* balance factor */ \
78
+ unsigned size; /* #elements in subtree */ \
79
+ }
80
+
81
+ #define __KAVL_FIND(suf, __scope, __type, __head, __cmp) \
82
+ __scope __type *kavl_find_##suf(const __type *root, const __type *x, unsigned *cnt_) { \
83
+ const __type *p = root; \
84
+ unsigned cnt = 0; \
85
+ while (p != 0) { \
86
+ int cmp; \
87
+ cmp = __cmp(x, p); \
88
+ if (cmp >= 0) cnt += kavl_size_child(__head, p, 0) + 1; \
89
+ if (cmp < 0) p = p->__head.p[0]; \
90
+ else if (cmp > 0) p = p->__head.p[1]; \
91
+ else break; \
92
+ } \
93
+ if (cnt_) *cnt_ = cnt; \
94
+ return (__type*)p; \
95
+ } \
96
+ __scope __type *kavl_interval_##suf(const __type *root, const __type *x, __type **lower, __type **upper) { \
97
+ const __type *p = root, *l = 0, *u = 0; \
98
+ while (p != 0) { \
99
+ int cmp; \
100
+ cmp = __cmp(x, p); \
101
+ if (cmp < 0) u = p, p = p->__head.p[0]; \
102
+ else if (cmp > 0) l = p, p = p->__head.p[1]; \
103
+ else { l = u = p; break; } \
104
+ } \
105
+ if (lower) *lower = (__type*)l; \
106
+ if (upper) *upper = (__type*)u; \
107
+ return (__type*)p; \
108
+ }
109
+
110
+ #define __KAVL_ROTATE(suf, __type, __head) \
111
+ /* one rotation: (a,(b,c)q)p => ((a,b)p,c)q */ \
112
+ static inline __type *kavl_rotate1_##suf(__type *p, int dir) { /* dir=0 to left; dir=1 to right */ \
113
+ int opp = 1 - dir; /* opposite direction */ \
114
+ __type *q = p->__head.p[opp]; \
115
+ unsigned size_p = p->__head.size; \
116
+ p->__head.size -= q->__head.size - kavl_size_child(__head, q, dir); \
117
+ q->__head.size = size_p; \
118
+ p->__head.p[opp] = q->__head.p[dir]; \
119
+ q->__head.p[dir] = p; \
120
+ return q; \
121
+ } \
122
+ /* two consecutive rotations: (a,((b,c)r,d)q)p => ((a,b)p,(c,d)q)r */ \
123
+ static inline __type *kavl_rotate2_##suf(__type *p, int dir) { \
124
+ int b1, opp = 1 - dir; \
125
+ __type *q = p->__head.p[opp], *r = q->__head.p[dir]; \
126
+ unsigned size_x_dir = kavl_size_child(__head, r, dir); \
127
+ r->__head.size = p->__head.size; \
128
+ p->__head.size -= q->__head.size - size_x_dir; \
129
+ q->__head.size -= size_x_dir + 1; \
130
+ p->__head.p[opp] = r->__head.p[dir]; \
131
+ r->__head.p[dir] = p; \
132
+ q->__head.p[dir] = r->__head.p[opp]; \
133
+ r->__head.p[opp] = q; \
134
+ b1 = dir == 0? +1 : -1; \
135
+ if (r->__head.balance == b1) q->__head.balance = 0, p->__head.balance = -b1; \
136
+ else if (r->__head.balance == 0) q->__head.balance = p->__head.balance = 0; \
137
+ else q->__head.balance = b1, p->__head.balance = 0; \
138
+ r->__head.balance = 0; \
139
+ return r; \
140
+ }
141
+
142
+ #define __KAVL_INSERT(suf, __scope, __type, __head, __cmp) \
143
+ __scope __type *kavl_insert_##suf(__type **root_, __type *x, unsigned *cnt_) { \
144
+ unsigned char stack[KAVL_MAX_DEPTH]; \
145
+ __type *path[KAVL_MAX_DEPTH]; \
146
+ __type *bp, *bq; \
147
+ __type *p, *q, *r = 0; /* _r_ is potentially the new root */ \
148
+ int i, which = 0, top, b1, path_len; \
149
+ unsigned cnt = 0; \
150
+ bp = *root_, bq = 0; \
151
+ /* find the insertion location */ \
152
+ for (p = bp, q = bq, top = path_len = 0; p; q = p, p = p->__head.p[which]) { \
153
+ int cmp; \
154
+ cmp = __cmp(x, p); \
155
+ if (cmp >= 0) cnt += kavl_size_child(__head, p, 0) + 1; \
156
+ if (cmp == 0) { \
157
+ if (cnt_) *cnt_ = cnt; \
158
+ return p; \
159
+ } \
160
+ if (p->__head.balance != 0) \
161
+ bq = q, bp = p, top = 0; \
162
+ stack[top++] = which = (cmp > 0); \
163
+ path[path_len++] = p; \
164
+ } \
165
+ if (cnt_) *cnt_ = cnt; \
166
+ x->__head.balance = 0, x->__head.size = 1, x->__head.p[0] = x->__head.p[1] = 0; \
167
+ if (q == 0) *root_ = x; \
168
+ else q->__head.p[which] = x; \
169
+ if (bp == 0) return x; \
170
+ for (i = 0; i < path_len; ++i) ++path[i]->__head.size; \
171
+ for (p = bp, top = 0; p != x; p = p->__head.p[stack[top]], ++top) /* update balance factors */ \
172
+ if (stack[top] == 0) --p->__head.balance; \
173
+ else ++p->__head.balance; \
174
+ if (bp->__head.balance > -2 && bp->__head.balance < 2) return x; /* no re-balance needed */ \
175
+ /* re-balance */ \
176
+ which = (bp->__head.balance < 0); \
177
+ b1 = which == 0? +1 : -1; \
178
+ q = bp->__head.p[1 - which]; \
179
+ if (q->__head.balance == b1) { \
180
+ r = kavl_rotate1_##suf(bp, which); \
181
+ q->__head.balance = bp->__head.balance = 0; \
182
+ } else r = kavl_rotate2_##suf(bp, which); \
183
+ if (bq == 0) *root_ = r; \
184
+ else bq->__head.p[bp != bq->__head.p[0]] = r; \
185
+ return x; \
186
+ }
187
+
188
+ #define __KAVL_ERASE(suf, __scope, __type, __head, __cmp) \
189
+ __scope __type *kavl_erase_##suf(__type **root_, const __type *x, unsigned *cnt_) { \
190
+ __type *p, *path[KAVL_MAX_DEPTH], fake; \
191
+ unsigned char dir[KAVL_MAX_DEPTH]; \
192
+ int i, d = 0, cmp; \
193
+ unsigned cnt = 0; \
194
+ fake.__head.p[0] = *root_, fake.__head.p[1] = 0; \
195
+ if (cnt_) *cnt_ = 0; \
196
+ if (x) { \
197
+ for (cmp = -1, p = &fake; cmp; cmp = __cmp(x, p)) { \
198
+ int which = (cmp > 0); \
199
+ if (cmp > 0) cnt += kavl_size_child(__head, p, 0) + 1; \
200
+ dir[d] = which; \
201
+ path[d++] = p; \
202
+ p = p->__head.p[which]; \
203
+ if (p == 0) { \
204
+ if (cnt_) *cnt_ = 0; \
205
+ return 0; \
206
+ } \
207
+ } \
208
+ cnt += kavl_size_child(__head, p, 0) + 1; /* because p==x is not counted */ \
209
+ } else { \
210
+ for (p = &fake, cnt = 1; p; p = p->__head.p[0]) \
211
+ dir[d] = 0, path[d++] = p; \
212
+ p = path[--d]; \
213
+ } \
214
+ if (cnt_) *cnt_ = cnt; \
215
+ for (i = 1; i < d; ++i) --path[i]->__head.size; \
216
+ if (p->__head.p[1] == 0) { /* ((1,.)2,3)4 => (1,3)4; p=2 */ \
217
+ path[d-1]->__head.p[dir[d-1]] = p->__head.p[0]; \
218
+ } else { \
219
+ __type *q = p->__head.p[1]; \
220
+ if (q->__head.p[0] == 0) { /* ((1,2)3,4)5 => ((1)2,4)5; p=3 */ \
221
+ q->__head.p[0] = p->__head.p[0]; \
222
+ q->__head.balance = p->__head.balance; \
223
+ path[d-1]->__head.p[dir[d-1]] = q; \
224
+ path[d] = q, dir[d++] = 1; \
225
+ q->__head.size = p->__head.size - 1; \
226
+ } else { /* ((1,((.,2)3,4)5)6,7)8 => ((1,(2,4)5)3,7)8; p=6 */ \
227
+ __type *r; \
228
+ int e = d++; /* backup _d_ */\
229
+ for (;;) { \
230
+ dir[d] = 0; \
231
+ path[d++] = q; \
232
+ r = q->__head.p[0]; \
233
+ if (r->__head.p[0] == 0) break; \
234
+ q = r; \
235
+ } \
236
+ r->__head.p[0] = p->__head.p[0]; \
237
+ q->__head.p[0] = r->__head.p[1]; \
238
+ r->__head.p[1] = p->__head.p[1]; \
239
+ r->__head.balance = p->__head.balance; \
240
+ path[e-1]->__head.p[dir[e-1]] = r; \
241
+ path[e] = r, dir[e] = 1; \
242
+ for (i = e + 1; i < d; ++i) --path[i]->__head.size; \
243
+ r->__head.size = p->__head.size - 1; \
244
+ } \
245
+ } \
246
+ while (--d > 0) { \
247
+ __type *q = path[d]; \
248
+ int which, other, b1 = 1, b2 = 2; \
249
+ which = dir[d], other = 1 - which; \
250
+ if (which) b1 = -b1, b2 = -b2; \
251
+ q->__head.balance += b1; \
252
+ if (q->__head.balance == b1) break; \
253
+ else if (q->__head.balance == b2) { \
254
+ __type *r = q->__head.p[other]; \
255
+ if (r->__head.balance == -b1) { \
256
+ path[d-1]->__head.p[dir[d-1]] = kavl_rotate2_##suf(q, which); \
257
+ } else { \
258
+ path[d-1]->__head.p[dir[d-1]] = kavl_rotate1_##suf(q, which); \
259
+ if (r->__head.balance == 0) { \
260
+ r->__head.balance = -b1; \
261
+ q->__head.balance = b1; \
262
+ break; \
263
+ } else r->__head.balance = q->__head.balance = 0; \
264
+ } \
265
+ } \
266
+ } \
267
+ *root_ = fake.__head.p[0]; \
268
+ return p; \
269
+ }
270
+
271
+ #define kavl_free(__type, __head, __root, __free) do { \
272
+ __type *_p, *_q; \
273
+ for (_p = __root; _p; _p = _q) { \
274
+ if (_p->__head.p[0] == 0) { \
275
+ _q = _p->__head.p[1]; \
276
+ __free(_p); \
277
+ } else { \
278
+ _q = _p->__head.p[0]; \
279
+ _p->__head.p[0] = _q->__head.p[1]; \
280
+ _q->__head.p[1] = _p; \
281
+ } \
282
+ } \
283
+ } while (0)
284
+
285
+ #define __KAVL_ITR(suf, __scope, __type, __head, __cmp) \
286
+ struct kavl_itr_##suf { \
287
+ const __type *stack[KAVL_MAX_DEPTH], **top; \
288
+ }; \
289
+ __scope void kavl_itr_first_##suf(const __type *root, struct kavl_itr_##suf *itr) { \
290
+ const __type *p; \
291
+ for (itr->top = itr->stack - 1, p = root; p; p = p->__head.p[0]) \
292
+ *++itr->top = p; \
293
+ } \
294
+ __scope int kavl_itr_find_##suf(const __type *root, const __type *x, struct kavl_itr_##suf *itr) { \
295
+ const __type *p = root; \
296
+ itr->top = itr->stack - 1; \
297
+ while (p != 0) { \
298
+ int cmp; \
299
+ *++itr->top = p; \
300
+ cmp = __cmp(x, p); \
301
+ if (cmp < 0) p = p->__head.p[0]; \
302
+ else if (cmp > 0) p = p->__head.p[1]; \
303
+ else break; \
304
+ } \
305
+ return p? 1 : 0; \
306
+ } \
307
+ __scope int kavl_itr_next_bidir_##suf(struct kavl_itr_##suf *itr, int dir) { \
308
+ const __type *p; \
309
+ if (itr->top < itr->stack) return 0; \
310
+ dir = !!dir; \
311
+ p = (*itr->top)->__head.p[dir]; \
312
+ if (p) { /* go down */ \
313
+ for (; p; p = p->__head.p[!dir]) \
314
+ *++itr->top = p; \
315
+ return 1; \
316
+ } else { /* go up */ \
317
+ do { \
318
+ p = *itr->top--; \
319
+ } while (itr->top >= itr->stack && p == (*itr->top)->__head.p[dir]); \
320
+ return itr->top < itr->stack? 0 : 1; \
321
+ } \
322
+ } \
323
+
324
+ /**
325
+ * Insert a node to the tree
326
+ *
327
+ * @param suf name suffix used in KAVL_INIT()
328
+ * @param proot pointer to the root of the tree (in/out: root may change)
329
+ * @param x node to insert (in)
330
+ * @param cnt number of nodes smaller than or equal to _x_; can be NULL (out)
331
+ *
332
+ * @return _x_ if not present in the tree, or the node equal to x.
333
+ */
334
+ #define kavl_insert(suf, proot, x, cnt) kavl_insert_##suf(proot, x, cnt)
335
+
336
+ /**
337
+ * Find a node in the tree
338
+ *
339
+ * @param suf name suffix used in KAVL_INIT()
340
+ * @param root root of the tree
341
+ * @param x node value to find (in)
342
+ * @param cnt number of nodes smaller than or equal to _x_; can be NULL (out)
343
+ *
344
+ * @return node equal to _x_ if present, or NULL if absent
345
+ */
346
+ #define kavl_find(suf, root, x, cnt) kavl_find_##suf(root, x, cnt)
347
+ #define kavl_interval(suf, root, x, lower, upper) kavl_interval_##suf(root, x, lower, upper)
348
+
349
+ /**
350
+ * Delete a node from the tree
351
+ *
352
+ * @param suf name suffix used in KAVL_INIT()
353
+ * @param proot pointer to the root of the tree (in/out: root may change)
354
+ * @param x node value to delete; if NULL, delete the first node (in)
355
+ *
356
+ * @return node removed from the tree if present, or NULL if absent
357
+ */
358
+ #define kavl_erase(suf, proot, x, cnt) kavl_erase_##suf(proot, x, cnt)
359
+ #define kavl_erase_first(suf, proot) kavl_erase_##suf(proot, 0, 0)
360
+
361
+ #define kavl_itr_t(suf) struct kavl_itr_##suf
362
+
363
+ /**
364
+ * Place the iterator at the smallest object
365
+ *
366
+ * @param suf name suffix used in KAVL_INIT()
367
+ * @param root root of the tree
368
+ * @param itr iterator
369
+ */
370
+ #define kavl_itr_first(suf, root, itr) kavl_itr_first_##suf(root, itr)
371
+
372
+ /**
373
+ * Place the iterator at the object equal to or greater than the query
374
+ *
375
+ * @param suf name suffix used in KAVL_INIT()
376
+ * @param root root of the tree
377
+ * @param x query (in)
378
+ * @param itr iterator (out)
379
+ *
380
+ * @return 1 if find; 0 otherwise. kavl_at(itr) is NULL if and only if query is
381
+ * larger than all objects in the tree
382
+ */
383
+ #define kavl_itr_find(suf, root, x, itr) kavl_itr_find_##suf(root, x, itr)
384
+
385
+ /**
386
+ * Move to the next object in order
387
+ *
388
+ * @param itr iterator (modified)
389
+ *
390
+ * @return 1 if there is a next object; 0 otherwise
391
+ */
392
+ #define kavl_itr_next(suf, itr) kavl_itr_next_bidir_##suf(itr, 1)
393
+ #define kavl_itr_prev(suf, itr) kavl_itr_next_bidir_##suf(itr, 0)
394
+
395
+ /**
396
+ * Return the pointer at the iterator
397
+ *
398
+ * @param itr iterator
399
+ *
400
+ * @return pointer if present; NULL otherwise
401
+ */
402
+ #define kavl_at(itr) ((itr)->top < (itr)->stack? 0 : *(itr)->top)
403
+
404
+ #define KAVL_INIT2(suf, __scope, __type, __head, __cmp) \
405
+ __KAVL_FIND(suf, __scope, __type, __head, __cmp) \
406
+ __KAVL_ROTATE(suf, __type, __head) \
407
+ __KAVL_INSERT(suf, __scope, __type, __head, __cmp) \
408
+ __KAVL_ERASE(suf, __scope, __type, __head, __cmp) \
409
+ __KAVL_ITR(suf, __scope, __type, __head, __cmp)
410
+
411
+ #define KAVL_INIT(suf, __type, __head, __cmp) \
412
+ KAVL_INIT2(suf,, __type, __head, __cmp)
413
+
414
+ #endif
@@ -0,0 +1,134 @@
1
+ #ifndef __AC_KDQ_H
2
+ #define __AC_KDQ_H
3
+
4
+ #include <stdlib.h>
5
+ #include <string.h>
6
+ #include <stdint.h>
7
+ #include "kalloc.h"
8
+
9
+ #define __KDQ_TYPE(type) \
10
+ typedef struct { \
11
+ uint64_t front:58, bits:6, count, mask; \
12
+ type *a; \
13
+ void *km; \
14
+ } kdq_##type##_t;
15
+
16
+ #define kdq_t(type) kdq_##type##_t
17
+ #define kdq_size(q) ((q)->count)
18
+ #define kdq_first(q) ((q)->a[(q)->front])
19
+ #define kdq_last(q) ((q)->a[((q)->front + (q)->count - 1) & (q)->mask])
20
+ #define kdq_at(q, i) ((q)->a[((q)->front + (i)) & (q)->mask])
21
+
22
+ #define __KDQ_IMPL(type, SCOPE) \
23
+ SCOPE kdq_##type##_t *kdq_init2_##type(void *km, int32_t bits) \
24
+ { \
25
+ kdq_##type##_t *q; \
26
+ q = (kdq_##type##_t*)kcalloc(km, 1, sizeof(kdq_##type##_t)); \
27
+ q->bits = bits, q->mask = (1ULL<<q->bits) - 1; \
28
+ q->a = (type*)kmalloc(km, (1<<q->bits) * sizeof(type)); \
29
+ q->km = km; \
30
+ return q; \
31
+ } \
32
+ SCOPE kdq_##type##_t *kdq_init_##type(void *km) { return kdq_init2_##type(km, 2); } \
33
+ SCOPE void kdq_destroy_##type(kdq_##type##_t *q) \
34
+ { \
35
+ if (q == 0) return; \
36
+ kfree(q->km, q->a); kfree(q->km, q); \
37
+ } \
38
+ SCOPE int kdq_resize_##type(kdq_##type##_t *q, int new_bits) \
39
+ { \
40
+ size_t new_size = 1ULL<<new_bits, old_size = 1ULL<<q->bits; \
41
+ if (new_size < q->count) { /* not big enough */ \
42
+ int i; \
43
+ for (i = 0; i < 64; ++i) \
44
+ if (1ULL<<i > q->count) break; \
45
+ new_bits = i, new_size = 1ULL<<new_bits; \
46
+ } \
47
+ if (new_bits == q->bits) return q->bits; /* unchanged */ \
48
+ if (new_bits > q->bits) q->a = (type*)krealloc(q->km, q->a, (1ULL<<new_bits) * sizeof(type)); \
49
+ if (q->front + q->count <= old_size) { /* unwrapped */ \
50
+ if (q->front + q->count > new_size) /* only happens for shrinking */ \
51
+ memmove(q->a, q->a + new_size, (q->front + q->count - new_size) * sizeof(type)); \
52
+ } else { /* wrapped */ \
53
+ memmove(q->a + (new_size - (old_size - q->front)), q->a + q->front, (old_size - q->front) * sizeof(type)); \
54
+ q->front = new_size - (old_size - q->front); \
55
+ } \
56
+ q->bits = new_bits, q->mask = (1ULL<<q->bits) - 1; \
57
+ if (new_bits < q->bits) q->a = (type*)krealloc(q->km, q->a, (1ULL<<new_bits) * sizeof(type)); \
58
+ return q->bits; \
59
+ } \
60
+ SCOPE type *kdq_pushp_##type(kdq_##type##_t *q) \
61
+ { \
62
+ if (q->count == 1ULL<<q->bits) kdq_resize_##type(q, q->bits + 1); \
63
+ return &q->a[((q->count++) + q->front) & (q)->mask]; \
64
+ } \
65
+ SCOPE void kdq_push_##type(kdq_##type##_t *q, type v) \
66
+ { \
67
+ if (q->count == 1ULL<<q->bits) kdq_resize_##type(q, q->bits + 1); \
68
+ q->a[((q->count++) + q->front) & (q)->mask] = v; \
69
+ } \
70
+ SCOPE type *kdq_unshiftp_##type(kdq_##type##_t *q) \
71
+ { \
72
+ if (q->count == 1ULL<<q->bits) kdq_resize_##type(q, q->bits + 1); \
73
+ ++q->count; \
74
+ q->front = q->front? q->front - 1 : (1ULL<<q->bits) - 1; \
75
+ return &q->a[q->front]; \
76
+ } \
77
+ SCOPE void kdq_unshift_##type(kdq_##type##_t *q, type v) \
78
+ { \
79
+ type *p; \
80
+ p = kdq_unshiftp_##type(q); \
81
+ *p = v; \
82
+ } \
83
+ SCOPE type *kdq_pop_##type(kdq_##type##_t *q) \
84
+ { \
85
+ return q->count? &q->a[((--q->count) + q->front) & q->mask] : 0; \
86
+ } \
87
+ SCOPE type *kdq_shift_##type(kdq_##type##_t *q) \
88
+ { \
89
+ type *d = 0; \
90
+ if (q->count == 0) return 0; \
91
+ d = &q->a[q->front++]; \
92
+ q->front &= q->mask; \
93
+ --q->count; \
94
+ return d; \
95
+ }
96
+
97
+ #define KDQ_INIT2(type, SCOPE) \
98
+ __KDQ_TYPE(type) \
99
+ __KDQ_IMPL(type, SCOPE)
100
+
101
+ #ifndef klib_unused
102
+ #if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
103
+ #define klib_unused __attribute__ ((__unused__))
104
+ #else
105
+ #define klib_unused
106
+ #endif
107
+ #endif /* klib_unused */
108
+
109
+ #define KDQ_INIT(type) KDQ_INIT2(type, static inline klib_unused)
110
+
111
+ #define KDQ_DECLARE(type) \
112
+ __KDQ_TYPE(type) \
113
+ kdq_##type##_t *kdq_init_##type(); \
114
+ void kdq_destroy_##type(kdq_##type##_t *q); \
115
+ int kdq_resize_##type(kdq_##type##_t *q, int new_bits); \
116
+ type *kdq_pushp_##type(kdq_##type##_t *q); \
117
+ void kdq_push_##type(kdq_##type##_t *q, type v); \
118
+ type *kdq_unshiftp_##type(kdq_##type##_t *q); \
119
+ void kdq_unshift_##type(kdq_##type##_t *q, type v); \
120
+ type *kdq_pop_##type(kdq_##type##_t *q); \
121
+ type *kdq_shift_##type(kdq_##type##_t *q);
122
+
123
+ #define kdq_init2(type, km, bits) kdq_init2_##type(km, bits)
124
+ #define kdq_init(type, km) kdq_init_##type(km)
125
+ #define kdq_destroy(type, q) kdq_destroy_##type(q)
126
+ #define kdq_resize(type, q, new_bits) kdq_resize_##type(q, new_bits)
127
+ #define kdq_pushp(type, q) kdq_pushp_##type(q)
128
+ #define kdq_push(type, q, v) kdq_push_##type(q, v)
129
+ #define kdq_pop(type, q) kdq_pop_##type(q)
130
+ #define kdq_unshiftp(type, q) kdq_unshiftp_##type(q)
131
+ #define kdq_unshift(type, q, v) kdq_unshift_##type(q, v)
132
+ #define kdq_shift(type, q) kdq_shift_##type(q)
133
+
134
+ #endif