minimap2 0.2.22.0 → 0.2.24.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (101) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +60 -76
  3. data/ext/Rakefile +55 -0
  4. data/ext/cmappy/cmappy.c +129 -0
  5. data/ext/cmappy/cmappy.h +44 -0
  6. data/ext/minimap2/FAQ.md +46 -0
  7. data/ext/minimap2/LICENSE.txt +24 -0
  8. data/ext/minimap2/MANIFEST.in +10 -0
  9. data/ext/minimap2/Makefile +132 -0
  10. data/ext/minimap2/Makefile.simde +97 -0
  11. data/ext/minimap2/NEWS.md +821 -0
  12. data/ext/minimap2/README.md +403 -0
  13. data/ext/minimap2/align.c +1020 -0
  14. data/ext/minimap2/bseq.c +169 -0
  15. data/ext/minimap2/bseq.h +64 -0
  16. data/ext/minimap2/code_of_conduct.md +30 -0
  17. data/ext/minimap2/cookbook.md +243 -0
  18. data/ext/minimap2/esterr.c +64 -0
  19. data/ext/minimap2/example.c +63 -0
  20. data/ext/minimap2/format.c +559 -0
  21. data/ext/minimap2/hit.c +466 -0
  22. data/ext/minimap2/index.c +775 -0
  23. data/ext/minimap2/kalloc.c +205 -0
  24. data/ext/minimap2/kalloc.h +76 -0
  25. data/ext/minimap2/kdq.h +132 -0
  26. data/ext/minimap2/ketopt.h +120 -0
  27. data/ext/minimap2/khash.h +615 -0
  28. data/ext/minimap2/krmq.h +474 -0
  29. data/ext/minimap2/kseq.h +256 -0
  30. data/ext/minimap2/ksort.h +153 -0
  31. data/ext/minimap2/ksw2.h +184 -0
  32. data/ext/minimap2/ksw2_dispatch.c +96 -0
  33. data/ext/minimap2/ksw2_extd2_sse.c +402 -0
  34. data/ext/minimap2/ksw2_exts2_sse.c +416 -0
  35. data/ext/minimap2/ksw2_extz2_sse.c +313 -0
  36. data/ext/minimap2/ksw2_ll_sse.c +152 -0
  37. data/ext/minimap2/kthread.c +159 -0
  38. data/ext/minimap2/kthread.h +15 -0
  39. data/ext/minimap2/kvec.h +105 -0
  40. data/ext/minimap2/lchain.c +369 -0
  41. data/ext/minimap2/main.c +459 -0
  42. data/ext/minimap2/map.c +714 -0
  43. data/ext/minimap2/minimap.h +410 -0
  44. data/ext/minimap2/minimap2.1 +725 -0
  45. data/ext/minimap2/misc/README.md +179 -0
  46. data/ext/minimap2/misc/mmphase.js +335 -0
  47. data/ext/minimap2/misc/paftools.js +3149 -0
  48. data/ext/minimap2/misc.c +162 -0
  49. data/ext/minimap2/mmpriv.h +132 -0
  50. data/ext/minimap2/options.c +234 -0
  51. data/ext/minimap2/pe.c +177 -0
  52. data/ext/minimap2/python/README.rst +196 -0
  53. data/ext/minimap2/python/cmappy.h +152 -0
  54. data/ext/minimap2/python/cmappy.pxd +153 -0
  55. data/ext/minimap2/python/mappy.pyx +273 -0
  56. data/ext/minimap2/python/minimap2.py +39 -0
  57. data/ext/minimap2/sdust.c +213 -0
  58. data/ext/minimap2/sdust.h +25 -0
  59. data/ext/minimap2/seed.c +131 -0
  60. data/ext/minimap2/setup.py +55 -0
  61. data/ext/minimap2/sketch.c +143 -0
  62. data/ext/minimap2/splitidx.c +84 -0
  63. data/ext/minimap2/sse2neon/emmintrin.h +1689 -0
  64. data/ext/minimap2/test/MT-human.fa +278 -0
  65. data/ext/minimap2/test/MT-orang.fa +276 -0
  66. data/ext/minimap2/test/q-inv.fa +4 -0
  67. data/ext/minimap2/test/q2.fa +2 -0
  68. data/ext/minimap2/test/t-inv.fa +127 -0
  69. data/ext/minimap2/test/t2.fa +2 -0
  70. data/ext/minimap2/tex/Makefile +21 -0
  71. data/ext/minimap2/tex/bioinfo.cls +930 -0
  72. data/ext/minimap2/tex/blasr-mc.eval +17 -0
  73. data/ext/minimap2/tex/bowtie2-s3.sam.eval +28 -0
  74. data/ext/minimap2/tex/bwa-s3.sam.eval +52 -0
  75. data/ext/minimap2/tex/bwa.eval +55 -0
  76. data/ext/minimap2/tex/eval2roc.pl +33 -0
  77. data/ext/minimap2/tex/graphmap.eval +4 -0
  78. data/ext/minimap2/tex/hs38-simu.sh +10 -0
  79. data/ext/minimap2/tex/minialign.eval +49 -0
  80. data/ext/minimap2/tex/minimap2.bib +460 -0
  81. data/ext/minimap2/tex/minimap2.tex +724 -0
  82. data/ext/minimap2/tex/mm2-s3.sam.eval +62 -0
  83. data/ext/minimap2/tex/mm2-update.tex +240 -0
  84. data/ext/minimap2/tex/mm2.approx.eval +12 -0
  85. data/ext/minimap2/tex/mm2.eval +13 -0
  86. data/ext/minimap2/tex/natbib.bst +1288 -0
  87. data/ext/minimap2/tex/natbib.sty +803 -0
  88. data/ext/minimap2/tex/ngmlr.eval +38 -0
  89. data/ext/minimap2/tex/roc.gp +60 -0
  90. data/ext/minimap2/tex/snap-s3.sam.eval +62 -0
  91. data/ext/minimap2.patch +19 -0
  92. data/lib/minimap2/aligner.rb +4 -4
  93. data/lib/minimap2/alignment.rb +11 -11
  94. data/lib/minimap2/ffi/constants.rb +20 -16
  95. data/lib/minimap2/ffi/functions.rb +5 -0
  96. data/lib/minimap2/ffi.rb +4 -5
  97. data/lib/minimap2/version.rb +2 -2
  98. data/lib/minimap2.rb +51 -15
  99. metadata +97 -79
  100. data/lib/minimap2/ffi_helper.rb +0 -53
  101. data/vendor/libminimap2.so +0 -0
@@ -0,0 +1,402 @@
1
+ #include <string.h>
2
+ #include <stdio.h>
3
+ #include <assert.h>
4
+ #include "ksw2.h"
5
+
6
+ #ifdef __SSE2__
7
+ #ifdef USE_SIMDE
8
+ #include <simde/x86/sse2.h>
9
+ #else
10
+ #include <emmintrin.h>
11
+ #endif
12
+
13
+ #ifdef KSW_SSE2_ONLY
14
+ #undef __SSE4_1__
15
+ #endif
16
+
17
+ #ifdef __SSE4_1__
18
+ #ifdef USE_SIMDE
19
+ #include <simde/x86/sse4.1.h>
20
+ #else
21
+ #include <smmintrin.h>
22
+ #endif
23
+ #endif
24
+
25
+ #ifdef KSW_CPU_DISPATCH
26
+ #ifdef __SSE4_1__
27
+ void ksw_extd2_sse41(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat,
28
+ int8_t q, int8_t e, int8_t q2, int8_t e2, int w, int zdrop, int end_bonus, int flag, ksw_extz_t *ez)
29
+ #else
30
+ void ksw_extd2_sse2(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat,
31
+ int8_t q, int8_t e, int8_t q2, int8_t e2, int w, int zdrop, int end_bonus, int flag, ksw_extz_t *ez)
32
+ #endif
33
+ #else
34
+ void ksw_extd2_sse(void *km, int qlen, const uint8_t *query, int tlen, const uint8_t *target, int8_t m, const int8_t *mat,
35
+ int8_t q, int8_t e, int8_t q2, int8_t e2, int w, int zdrop, int end_bonus, int flag, ksw_extz_t *ez)
36
+ #endif // ~KSW_CPU_DISPATCH
37
+ {
38
+ #define __dp_code_block1 \
39
+ z = _mm_load_si128(&s[t]); \
40
+ xt1 = _mm_load_si128(&x[t]); /* xt1 <- x[r-1][t..t+15] */ \
41
+ tmp = _mm_srli_si128(xt1, 15); /* tmp <- x[r-1][t+15] */ \
42
+ xt1 = _mm_or_si128(_mm_slli_si128(xt1, 1), x1_); /* xt1 <- x[r-1][t-1..t+14] */ \
43
+ x1_ = tmp; \
44
+ vt1 = _mm_load_si128(&v[t]); /* vt1 <- v[r-1][t..t+15] */ \
45
+ tmp = _mm_srli_si128(vt1, 15); /* tmp <- v[r-1][t+15] */ \
46
+ vt1 = _mm_or_si128(_mm_slli_si128(vt1, 1), v1_); /* vt1 <- v[r-1][t-1..t+14] */ \
47
+ v1_ = tmp; \
48
+ a = _mm_add_epi8(xt1, vt1); /* a <- x[r-1][t-1..t+14] + v[r-1][t-1..t+14] */ \
49
+ ut = _mm_load_si128(&u[t]); /* ut <- u[t..t+15] */ \
50
+ b = _mm_add_epi8(_mm_load_si128(&y[t]), ut); /* b <- y[r-1][t..t+15] + u[r-1][t..t+15] */ \
51
+ x2t1= _mm_load_si128(&x2[t]); \
52
+ tmp = _mm_srli_si128(x2t1, 15); \
53
+ x2t1= _mm_or_si128(_mm_slli_si128(x2t1, 1), x21_); \
54
+ x21_= tmp; \
55
+ a2= _mm_add_epi8(x2t1, vt1); \
56
+ b2= _mm_add_epi8(_mm_load_si128(&y2[t]), ut);
57
+
58
+ #define __dp_code_block2 \
59
+ _mm_store_si128(&u[t], _mm_sub_epi8(z, vt1)); /* u[r][t..t+15] <- z - v[r-1][t-1..t+14] */ \
60
+ _mm_store_si128(&v[t], _mm_sub_epi8(z, ut)); /* v[r][t..t+15] <- z - u[r-1][t..t+15] */ \
61
+ tmp = _mm_sub_epi8(z, q_); \
62
+ a = _mm_sub_epi8(a, tmp); \
63
+ b = _mm_sub_epi8(b, tmp); \
64
+ tmp = _mm_sub_epi8(z, q2_); \
65
+ a2= _mm_sub_epi8(a2, tmp); \
66
+ b2= _mm_sub_epi8(b2, tmp);
67
+
68
+ int r, t, qe = q + e, n_col_, *off = 0, *off_end = 0, tlen_, qlen_, last_st, last_en, wl, wr, max_sc, min_sc, long_thres, long_diff;
69
+ int with_cigar = !(flag&KSW_EZ_SCORE_ONLY), approx_max = !!(flag&KSW_EZ_APPROX_MAX);
70
+ int32_t *H = 0, H0 = 0, last_H0_t = 0;
71
+ uint8_t *qr, *sf, *mem, *mem2 = 0;
72
+ __m128i q_, q2_, qe_, qe2_, zero_, sc_mch_, sc_mis_, m1_, sc_N_;
73
+ __m128i *u, *v, *x, *y, *x2, *y2, *s, *p = 0;
74
+
75
+ ksw_reset_extz(ez);
76
+ if (m <= 1 || qlen <= 0 || tlen <= 0) return;
77
+
78
+ if (q2 + e2 < q + e) t = q, q = q2, q2 = t, t = e, e = e2, e2 = t; // make sure q+e no larger than q2+e2
79
+
80
+ zero_ = _mm_set1_epi8(0);
81
+ q_ = _mm_set1_epi8(q);
82
+ q2_ = _mm_set1_epi8(q2);
83
+ qe_ = _mm_set1_epi8(q + e);
84
+ qe2_ = _mm_set1_epi8(q2 + e2);
85
+ sc_mch_ = _mm_set1_epi8(mat[0]);
86
+ sc_mis_ = _mm_set1_epi8(mat[1]);
87
+ sc_N_ = mat[m*m-1] == 0? _mm_set1_epi8(-e2) : _mm_set1_epi8(mat[m*m-1]);
88
+ m1_ = _mm_set1_epi8(m - 1); // wildcard
89
+
90
+ if (w < 0) w = tlen > qlen? tlen : qlen;
91
+ wl = wr = w;
92
+ tlen_ = (tlen + 15) / 16;
93
+ n_col_ = qlen < tlen? qlen : tlen;
94
+ n_col_ = ((n_col_ < w + 1? n_col_ : w + 1) + 15) / 16 + 1;
95
+ qlen_ = (qlen + 15) / 16;
96
+ for (t = 1, max_sc = mat[0], min_sc = mat[1]; t < m * m; ++t) {
97
+ max_sc = max_sc > mat[t]? max_sc : mat[t];
98
+ min_sc = min_sc < mat[t]? min_sc : mat[t];
99
+ }
100
+ if (-min_sc > 2 * (q + e)) return; // otherwise, we won't see any mismatches
101
+
102
+ long_thres = e != e2? (q2 - q) / (e - e2) - 1 : 0;
103
+ if (q2 + e2 + long_thres * e2 > q + e + long_thres * e)
104
+ ++long_thres;
105
+ long_diff = long_thres * (e - e2) - (q2 - q) - e2;
106
+
107
+ mem = (uint8_t*)kcalloc(km, tlen_ * 8 + qlen_ + 1, 16);
108
+ u = (__m128i*)(((size_t)mem + 15) >> 4 << 4); // 16-byte aligned
109
+ v = u + tlen_, x = v + tlen_, y = x + tlen_, x2 = y + tlen_, y2 = x2 + tlen_;
110
+ s = y2 + tlen_, sf = (uint8_t*)(s + tlen_), qr = sf + tlen_ * 16;
111
+ memset(u, -q - e, tlen_ * 16);
112
+ memset(v, -q - e, tlen_ * 16);
113
+ memset(x, -q - e, tlen_ * 16);
114
+ memset(y, -q - e, tlen_ * 16);
115
+ memset(x2, -q2 - e2, tlen_ * 16);
116
+ memset(y2, -q2 - e2, tlen_ * 16);
117
+ if (!approx_max) {
118
+ H = (int32_t*)kmalloc(km, tlen_ * 16 * 4);
119
+ for (t = 0; t < tlen_ * 16; ++t) H[t] = KSW_NEG_INF;
120
+ }
121
+ if (with_cigar) {
122
+ mem2 = (uint8_t*)kmalloc(km, ((size_t)(qlen + tlen - 1) * n_col_ + 1) * 16);
123
+ p = (__m128i*)(((size_t)mem2 + 15) >> 4 << 4);
124
+ off = (int*)kmalloc(km, (qlen + tlen - 1) * sizeof(int) * 2);
125
+ off_end = off + qlen + tlen - 1;
126
+ }
127
+
128
+ for (t = 0; t < qlen; ++t) qr[t] = query[qlen - 1 - t];
129
+ memcpy(sf, target, tlen);
130
+
131
+ for (r = 0, last_st = last_en = -1; r < qlen + tlen - 1; ++r) {
132
+ int st = 0, en = tlen - 1, st0, en0, st_, en_;
133
+ int8_t x1, x21, v1;
134
+ uint8_t *qrr = qr + (qlen - 1 - r);
135
+ int8_t *u8 = (int8_t*)u, *v8 = (int8_t*)v, *x8 = (int8_t*)x, *x28 = (int8_t*)x2;
136
+ __m128i x1_, x21_, v1_;
137
+ // find the boundaries
138
+ if (st < r - qlen + 1) st = r - qlen + 1;
139
+ if (en > r) en = r;
140
+ if (st < (r-wr+1)>>1) st = (r-wr+1)>>1; // take the ceil
141
+ if (en > (r+wl)>>1) en = (r+wl)>>1; // take the floor
142
+ if (st > en) {
143
+ ez->zdropped = 1;
144
+ break;
145
+ }
146
+ st0 = st, en0 = en;
147
+ st = st / 16 * 16, en = (en + 16) / 16 * 16 - 1;
148
+ // set boundary conditions
149
+ if (st > 0) {
150
+ if (st - 1 >= last_st && st - 1 <= last_en) {
151
+ x1 = x8[st - 1], x21 = x28[st - 1], v1 = v8[st - 1]; // (r-1,s-1) calculated in the last round
152
+ } else {
153
+ x1 = -q - e, x21 = -q2 - e2;
154
+ v1 = -q - e;
155
+ }
156
+ } else {
157
+ x1 = -q - e, x21 = -q2 - e2;
158
+ v1 = r == 0? -q - e : r < long_thres? -e : r == long_thres? long_diff : -e2;
159
+ }
160
+ if (en >= r) {
161
+ ((int8_t*)y)[r] = -q - e, ((int8_t*)y2)[r] = -q2 - e2;
162
+ u8[r] = r == 0? -q - e : r < long_thres? -e : r == long_thres? long_diff : -e2;
163
+ }
164
+ // loop fission: set scores first
165
+ if (!(flag & KSW_EZ_GENERIC_SC)) {
166
+ for (t = st0; t <= en0; t += 16) {
167
+ __m128i sq, st, tmp, mask;
168
+ sq = _mm_loadu_si128((__m128i*)&sf[t]);
169
+ st = _mm_loadu_si128((__m128i*)&qrr[t]);
170
+ mask = _mm_or_si128(_mm_cmpeq_epi8(sq, m1_), _mm_cmpeq_epi8(st, m1_));
171
+ tmp = _mm_cmpeq_epi8(sq, st);
172
+ #ifdef __SSE4_1__
173
+ tmp = _mm_blendv_epi8(sc_mis_, sc_mch_, tmp);
174
+ tmp = _mm_blendv_epi8(tmp, sc_N_, mask);
175
+ #else
176
+ tmp = _mm_or_si128(_mm_andnot_si128(tmp, sc_mis_), _mm_and_si128(tmp, sc_mch_));
177
+ tmp = _mm_or_si128(_mm_andnot_si128(mask, tmp), _mm_and_si128(mask, sc_N_));
178
+ #endif
179
+ _mm_storeu_si128((__m128i*)((int8_t*)s + t), tmp);
180
+ }
181
+ } else {
182
+ for (t = st0; t <= en0; ++t)
183
+ ((uint8_t*)s)[t] = mat[sf[t] * m + qrr[t]];
184
+ }
185
+ // core loop
186
+ x1_ = _mm_cvtsi32_si128((uint8_t)x1);
187
+ x21_ = _mm_cvtsi32_si128((uint8_t)x21);
188
+ v1_ = _mm_cvtsi32_si128((uint8_t)v1);
189
+ st_ = st / 16, en_ = en / 16;
190
+ assert(en_ - st_ + 1 <= n_col_);
191
+ if (!with_cigar) { // score only
192
+ for (t = st_; t <= en_; ++t) {
193
+ __m128i z, a, b, a2, b2, xt1, x2t1, vt1, ut, tmp;
194
+ __dp_code_block1;
195
+ #ifdef __SSE4_1__
196
+ z = _mm_max_epi8(z, a);
197
+ z = _mm_max_epi8(z, b);
198
+ z = _mm_max_epi8(z, a2);
199
+ z = _mm_max_epi8(z, b2);
200
+ z = _mm_min_epi8(z, sc_mch_);
201
+ __dp_code_block2; // save u[] and v[]; update a, b, a2 and b2
202
+ _mm_store_si128(&x[t], _mm_sub_epi8(_mm_max_epi8(a, zero_), qe_));
203
+ _mm_store_si128(&y[t], _mm_sub_epi8(_mm_max_epi8(b, zero_), qe_));
204
+ _mm_store_si128(&x2[t], _mm_sub_epi8(_mm_max_epi8(a2, zero_), qe2_));
205
+ _mm_store_si128(&y2[t], _mm_sub_epi8(_mm_max_epi8(b2, zero_), qe2_));
206
+ #else
207
+ tmp = _mm_cmpgt_epi8(a, z);
208
+ z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a));
209
+ tmp = _mm_cmpgt_epi8(b, z);
210
+ z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b));
211
+ tmp = _mm_cmpgt_epi8(a2, z);
212
+ z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a2));
213
+ tmp = _mm_cmpgt_epi8(b2, z);
214
+ z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b2));
215
+ tmp = _mm_cmplt_epi8(sc_mch_, z);
216
+ z = _mm_or_si128(_mm_and_si128(tmp, sc_mch_), _mm_andnot_si128(tmp, z));
217
+ __dp_code_block2;
218
+ tmp = _mm_cmpgt_epi8(a, zero_);
219
+ _mm_store_si128(&x[t], _mm_sub_epi8(_mm_and_si128(tmp, a), qe_));
220
+ tmp = _mm_cmpgt_epi8(b, zero_);
221
+ _mm_store_si128(&y[t], _mm_sub_epi8(_mm_and_si128(tmp, b), qe_));
222
+ tmp = _mm_cmpgt_epi8(a2, zero_);
223
+ _mm_store_si128(&x2[t], _mm_sub_epi8(_mm_and_si128(tmp, a2), qe2_));
224
+ tmp = _mm_cmpgt_epi8(b2, zero_);
225
+ _mm_store_si128(&y2[t], _mm_sub_epi8(_mm_and_si128(tmp, b2), qe2_));
226
+ #endif
227
+ }
228
+ } else if (!(flag&KSW_EZ_RIGHT)) { // gap left-alignment
229
+ __m128i *pr = p + (size_t)r * n_col_ - st_;
230
+ off[r] = st, off_end[r] = en;
231
+ for (t = st_; t <= en_; ++t) {
232
+ __m128i d, z, a, b, a2, b2, xt1, x2t1, vt1, ut, tmp;
233
+ __dp_code_block1;
234
+ #ifdef __SSE4_1__
235
+ d = _mm_and_si128(_mm_cmpgt_epi8(a, z), _mm_set1_epi8(1)); // d = a > z? 1 : 0
236
+ z = _mm_max_epi8(z, a);
237
+ d = _mm_blendv_epi8(d, _mm_set1_epi8(2), _mm_cmpgt_epi8(b, z)); // d = b > z? 2 : d
238
+ z = _mm_max_epi8(z, b);
239
+ d = _mm_blendv_epi8(d, _mm_set1_epi8(3), _mm_cmpgt_epi8(a2, z)); // d = a2 > z? 3 : d
240
+ z = _mm_max_epi8(z, a2);
241
+ d = _mm_blendv_epi8(d, _mm_set1_epi8(4), _mm_cmpgt_epi8(b2, z)); // d = a2 > z? 3 : d
242
+ z = _mm_max_epi8(z, b2);
243
+ z = _mm_min_epi8(z, sc_mch_);
244
+ #else // we need to emulate SSE4.1 intrinsics _mm_max_epi8() and _mm_blendv_epi8()
245
+ tmp = _mm_cmpgt_epi8(a, z);
246
+ d = _mm_and_si128(tmp, _mm_set1_epi8(1));
247
+ z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a));
248
+ tmp = _mm_cmpgt_epi8(b, z);
249
+ d = _mm_or_si128(_mm_andnot_si128(tmp, d), _mm_and_si128(tmp, _mm_set1_epi8(2)));
250
+ z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b));
251
+ tmp = _mm_cmpgt_epi8(a2, z);
252
+ d = _mm_or_si128(_mm_andnot_si128(tmp, d), _mm_and_si128(tmp, _mm_set1_epi8(3)));
253
+ z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, a2));
254
+ tmp = _mm_cmpgt_epi8(b2, z);
255
+ d = _mm_or_si128(_mm_andnot_si128(tmp, d), _mm_and_si128(tmp, _mm_set1_epi8(4)));
256
+ z = _mm_or_si128(_mm_andnot_si128(tmp, z), _mm_and_si128(tmp, b2));
257
+ tmp = _mm_cmplt_epi8(sc_mch_, z);
258
+ z = _mm_or_si128(_mm_and_si128(tmp, sc_mch_), _mm_andnot_si128(tmp, z));
259
+ #endif
260
+ __dp_code_block2;
261
+ tmp = _mm_cmpgt_epi8(a, zero_);
262
+ _mm_store_si128(&x[t], _mm_sub_epi8(_mm_and_si128(tmp, a), qe_));
263
+ d = _mm_or_si128(d, _mm_and_si128(tmp, _mm_set1_epi8(0x08))); // d = a > 0? 1<<3 : 0
264
+ tmp = _mm_cmpgt_epi8(b, zero_);
265
+ _mm_store_si128(&y[t], _mm_sub_epi8(_mm_and_si128(tmp, b), qe_));
266
+ d = _mm_or_si128(d, _mm_and_si128(tmp, _mm_set1_epi8(0x10))); // d = b > 0? 1<<4 : 0
267
+ tmp = _mm_cmpgt_epi8(a2, zero_);
268
+ _mm_store_si128(&x2[t], _mm_sub_epi8(_mm_and_si128(tmp, a2), qe2_));
269
+ d = _mm_or_si128(d, _mm_and_si128(tmp, _mm_set1_epi8(0x20))); // d = a > 0? 1<<5 : 0
270
+ tmp = _mm_cmpgt_epi8(b2, zero_);
271
+ _mm_store_si128(&y2[t], _mm_sub_epi8(_mm_and_si128(tmp, b2), qe2_));
272
+ d = _mm_or_si128(d, _mm_and_si128(tmp, _mm_set1_epi8(0x40))); // d = b > 0? 1<<6 : 0
273
+ _mm_store_si128(&pr[t], d);
274
+ }
275
+ } else { // gap right-alignment
276
+ __m128i *pr = p + (size_t)r * n_col_ - st_;
277
+ off[r] = st, off_end[r] = en;
278
+ for (t = st_; t <= en_; ++t) {
279
+ __m128i d, z, a, b, a2, b2, xt1, x2t1, vt1, ut, tmp;
280
+ __dp_code_block1;
281
+ #ifdef __SSE4_1__
282
+ d = _mm_andnot_si128(_mm_cmpgt_epi8(z, a), _mm_set1_epi8(1)); // d = z > a? 0 : 1
283
+ z = _mm_max_epi8(z, a);
284
+ d = _mm_blendv_epi8(_mm_set1_epi8(2), d, _mm_cmpgt_epi8(z, b)); // d = z > b? d : 2
285
+ z = _mm_max_epi8(z, b);
286
+ d = _mm_blendv_epi8(_mm_set1_epi8(3), d, _mm_cmpgt_epi8(z, a2)); // d = z > a2? d : 3
287
+ z = _mm_max_epi8(z, a2);
288
+ d = _mm_blendv_epi8(_mm_set1_epi8(4), d, _mm_cmpgt_epi8(z, b2)); // d = z > b2? d : 4
289
+ z = _mm_max_epi8(z, b2);
290
+ z = _mm_min_epi8(z, sc_mch_);
291
+ #else // we need to emulate SSE4.1 intrinsics _mm_max_epi8() and _mm_blendv_epi8()
292
+ tmp = _mm_cmpgt_epi8(z, a);
293
+ d = _mm_andnot_si128(tmp, _mm_set1_epi8(1));
294
+ z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, a));
295
+ tmp = _mm_cmpgt_epi8(z, b);
296
+ d = _mm_or_si128(_mm_and_si128(tmp, d), _mm_andnot_si128(tmp, _mm_set1_epi8(2)));
297
+ z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, b));
298
+ tmp = _mm_cmpgt_epi8(z, a2);
299
+ d = _mm_or_si128(_mm_and_si128(tmp, d), _mm_andnot_si128(tmp, _mm_set1_epi8(3)));
300
+ z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, a2));
301
+ tmp = _mm_cmpgt_epi8(z, b2);
302
+ d = _mm_or_si128(_mm_and_si128(tmp, d), _mm_andnot_si128(tmp, _mm_set1_epi8(4)));
303
+ z = _mm_or_si128(_mm_and_si128(tmp, z), _mm_andnot_si128(tmp, b2));
304
+ tmp = _mm_cmplt_epi8(sc_mch_, z);
305
+ z = _mm_or_si128(_mm_and_si128(tmp, sc_mch_), _mm_andnot_si128(tmp, z));
306
+ #endif
307
+ __dp_code_block2;
308
+ tmp = _mm_cmpgt_epi8(zero_, a);
309
+ _mm_store_si128(&x[t], _mm_sub_epi8(_mm_andnot_si128(tmp, a), qe_));
310
+ d = _mm_or_si128(d, _mm_andnot_si128(tmp, _mm_set1_epi8(0x08))); // d = a > 0? 1<<3 : 0
311
+ tmp = _mm_cmpgt_epi8(zero_, b);
312
+ _mm_store_si128(&y[t], _mm_sub_epi8(_mm_andnot_si128(tmp, b), qe_));
313
+ d = _mm_or_si128(d, _mm_andnot_si128(tmp, _mm_set1_epi8(0x10))); // d = b > 0? 1<<4 : 0
314
+ tmp = _mm_cmpgt_epi8(zero_, a2);
315
+ _mm_store_si128(&x2[t], _mm_sub_epi8(_mm_andnot_si128(tmp, a2), qe2_));
316
+ d = _mm_or_si128(d, _mm_andnot_si128(tmp, _mm_set1_epi8(0x20))); // d = a > 0? 1<<5 : 0
317
+ tmp = _mm_cmpgt_epi8(zero_, b2);
318
+ _mm_store_si128(&y2[t], _mm_sub_epi8(_mm_andnot_si128(tmp, b2), qe2_));
319
+ d = _mm_or_si128(d, _mm_andnot_si128(tmp, _mm_set1_epi8(0x40))); // d = b > 0? 1<<6 : 0
320
+ _mm_store_si128(&pr[t], d);
321
+ }
322
+ }
323
+ if (!approx_max) { // find the exact max with a 32-bit score array
324
+ int32_t max_H, max_t;
325
+ // compute H[], max_H and max_t
326
+ if (r > 0) {
327
+ int32_t HH[4], tt[4], en1 = st0 + (en0 - st0) / 4 * 4, i;
328
+ __m128i max_H_, max_t_;
329
+ max_H = H[en0] = en0 > 0? H[en0-1] + u8[en0] : H[en0] + v8[en0]; // special casing the last element
330
+ max_t = en0;
331
+ max_H_ = _mm_set1_epi32(max_H);
332
+ max_t_ = _mm_set1_epi32(max_t);
333
+ for (t = st0; t < en1; t += 4) { // this implements: H[t]+=v8[t]-qe; if(H[t]>max_H) max_H=H[t],max_t=t;
334
+ __m128i H1, tmp, t_;
335
+ H1 = _mm_loadu_si128((__m128i*)&H[t]);
336
+ t_ = _mm_setr_epi32(v8[t], v8[t+1], v8[t+2], v8[t+3]);
337
+ H1 = _mm_add_epi32(H1, t_);
338
+ _mm_storeu_si128((__m128i*)&H[t], H1);
339
+ t_ = _mm_set1_epi32(t);
340
+ tmp = _mm_cmpgt_epi32(H1, max_H_);
341
+ #ifdef __SSE4_1__
342
+ max_H_ = _mm_blendv_epi8(max_H_, H1, tmp);
343
+ max_t_ = _mm_blendv_epi8(max_t_, t_, tmp);
344
+ #else
345
+ max_H_ = _mm_or_si128(_mm_and_si128(tmp, H1), _mm_andnot_si128(tmp, max_H_));
346
+ max_t_ = _mm_or_si128(_mm_and_si128(tmp, t_), _mm_andnot_si128(tmp, max_t_));
347
+ #endif
348
+ }
349
+ _mm_storeu_si128((__m128i*)HH, max_H_);
350
+ _mm_storeu_si128((__m128i*)tt, max_t_);
351
+ for (i = 0; i < 4; ++i)
352
+ if (max_H < HH[i]) max_H = HH[i], max_t = tt[i] + i;
353
+ for (; t < en0; ++t) { // for the rest of values that haven't been computed with SSE
354
+ H[t] += (int32_t)v8[t];
355
+ if (H[t] > max_H)
356
+ max_H = H[t], max_t = t;
357
+ }
358
+ } else H[0] = v8[0] - qe, max_H = H[0], max_t = 0; // special casing r==0
359
+ // update ez
360
+ if (en0 == tlen - 1 && H[en0] > ez->mte)
361
+ ez->mte = H[en0], ez->mte_q = r - en;
362
+ if (r - st0 == qlen - 1 && H[st0] > ez->mqe)
363
+ ez->mqe = H[st0], ez->mqe_t = st0;
364
+ if (ksw_apply_zdrop(ez, 1, max_H, r, max_t, zdrop, e2)) break;
365
+ if (r == qlen + tlen - 2 && en0 == tlen - 1)
366
+ ez->score = H[tlen - 1];
367
+ } else { // find approximate max; Z-drop might be inaccurate, too.
368
+ if (r > 0) {
369
+ if (last_H0_t >= st0 && last_H0_t <= en0 && last_H0_t + 1 >= st0 && last_H0_t + 1 <= en0) {
370
+ int32_t d0 = v8[last_H0_t];
371
+ int32_t d1 = u8[last_H0_t + 1];
372
+ if (d0 > d1) H0 += d0;
373
+ else H0 += d1, ++last_H0_t;
374
+ } else if (last_H0_t >= st0 && last_H0_t <= en0) {
375
+ H0 += v8[last_H0_t];
376
+ } else {
377
+ ++last_H0_t, H0 += u8[last_H0_t];
378
+ }
379
+ } else H0 = v8[0] - qe, last_H0_t = 0;
380
+ if ((flag & KSW_EZ_APPROX_DROP) && ksw_apply_zdrop(ez, 1, H0, r, last_H0_t, zdrop, e2)) break;
381
+ if (r == qlen + tlen - 2 && en0 == tlen - 1)
382
+ ez->score = H0;
383
+ }
384
+ last_st = st, last_en = en;
385
+ //for (t = st0; t <= en0; ++t) printf("(%d,%d)\t(%d,%d,%d,%d)\t%d\n", r, t, ((int8_t*)u)[t], ((int8_t*)v)[t], ((int8_t*)x)[t], ((int8_t*)y)[t], H[t]); // for debugging
386
+ }
387
+ kfree(km, mem);
388
+ if (!approx_max) kfree(km, H);
389
+ if (with_cigar) { // backtrack
390
+ int rev_cigar = !!(flag & KSW_EZ_REV_CIGAR);
391
+ if (!ez->zdropped && !(flag&KSW_EZ_EXTZ_ONLY)) {
392
+ ksw_backtrack(km, 1, rev_cigar, 0, (uint8_t*)p, off, off_end, n_col_*16, tlen-1, qlen-1, &ez->m_cigar, &ez->n_cigar, &ez->cigar);
393
+ } else if (!ez->zdropped && (flag&KSW_EZ_EXTZ_ONLY) && ez->mqe + end_bonus > (int)ez->max) {
394
+ ez->reach_end = 1;
395
+ ksw_backtrack(km, 1, rev_cigar, 0, (uint8_t*)p, off, off_end, n_col_*16, ez->mqe_t, qlen-1, &ez->m_cigar, &ez->n_cigar, &ez->cigar);
396
+ } else if (ez->max_t >= 0 && ez->max_q >= 0) {
397
+ ksw_backtrack(km, 1, rev_cigar, 0, (uint8_t*)p, off, off_end, n_col_*16, ez->max_t, ez->max_q, &ez->m_cigar, &ez->n_cigar, &ez->cigar);
398
+ }
399
+ kfree(km, mem2); kfree(km, off);
400
+ }
401
+ }
402
+ #endif // __SSE2__