normal-grain-merge 0.1.3__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- normal_grain_merge/__init__.py +2 -0
- normal_grain_merge/kernel_kind.py +8 -0
- normal_grain_merge/normal_grain_merge.c +1331 -0
- normal_grain_merge/normal_grain_merge.cp311-win_amd64.pyd +0 -0
- normal_grain_merge/normal_grain_merge.pyi +28 -0
- normal_grain_merge-0.1.3.dist-info/METADATA +109 -0
- normal_grain_merge-0.1.3.dist-info/RECORD +10 -0
- normal_grain_merge-0.1.3.dist-info/WHEEL +5 -0
- normal_grain_merge-0.1.3.dist-info/licenses/LICENSE +21 -0
- normal_grain_merge-0.1.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1331 @@
|
|
|
1
|
+
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
|
2
|
+
#include <stdio.h>
|
|
3
|
+
#include <math.h>
|
|
4
|
+
#include <float.h>
|
|
5
|
+
#include <stdint.h>
|
|
6
|
+
#include <string.h>
|
|
7
|
+
#include <Python.h>
|
|
8
|
+
#include <numpy/arrayobject.h>
|
|
9
|
+
#include <smmintrin.h>
|
|
10
|
+
#include <tmmintrin.h>
|
|
11
|
+
#include <immintrin.h> /* AVX2 + SSE4.2 */
|
|
12
|
+
|
|
13
|
+
#if defined(__FMA__) || (defined(_MSC_VER) && defined(__AVX2__))
|
|
14
|
+
#define NGM_HAS_FMA 1
|
|
15
|
+
#endif
|
|
16
|
+
|
|
17
|
+
/* ----- Runtime CPU feature detection (GCC/Clang + MSVC) ----- */
|
|
18
|
+
#if defined(_MSC_VER)
|
|
19
|
+
#include <intrin.h>
|
|
20
|
+
static int os_supports_avx(void) {
|
|
21
|
+
/* Check OSXSAVE + XCR0[2:1] == 11b so OS saves YMM state */
|
|
22
|
+
int cpuInfo[4];
|
|
23
|
+
__cpuid(cpuInfo, 1);
|
|
24
|
+
int ecx = cpuInfo[2];
|
|
25
|
+
int osxsave = (ecx >> 27) & 1;
|
|
26
|
+
if (!osxsave) return 0;
|
|
27
|
+
unsigned long long xcr0 = _xgetbv(0);
|
|
28
|
+
return ((xcr0 & 0x6) == 0x6); /* XMM (bit1) and YMM (bit2) state enabled */
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
static int cpu_supports_avx2(void) {
|
|
32
|
+
int cpuInfo[4];
|
|
33
|
+
__cpuid(cpuInfo, 1);
|
|
34
|
+
int ecx = cpuInfo[2];
|
|
35
|
+
int avx = (ecx >> 28) & 1;
|
|
36
|
+
int osxsave = (ecx >> 27) & 1;
|
|
37
|
+
if (!(avx && osxsave && os_supports_avx())) return 0;
|
|
38
|
+
|
|
39
|
+
/* Leaf 7, subleaf 0: EBX bit 5 = AVX2 */
|
|
40
|
+
int ex[4];
|
|
41
|
+
__cpuidex(ex, 7, 0);
|
|
42
|
+
int ebx = ex[1];
|
|
43
|
+
return (ebx >> 5) & 1;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
static int cpu_supports_sse42(void) {
|
|
47
|
+
int cpuInfo[4];
|
|
48
|
+
__cpuid(cpuInfo, 1);
|
|
49
|
+
int ecx = cpuInfo[2];
|
|
50
|
+
return (ecx >> 20) & 1; /* SSE4.2 */
|
|
51
|
+
}
|
|
52
|
+
#else
|
|
53
|
+
/* GCC/Clang path */
|
|
54
|
+
static int os_supports_avx(void) {
|
|
55
|
+
#if defined(__GNUC__) || defined(__clang__)
|
|
56
|
+
/* If we’re here, assume OS supports AVX when the CPU supports it.
|
|
57
|
+
For full rigor you can also call xgetbv via inline asm, but it’s uncommon to lack it. */
|
|
58
|
+
return 1;
|
|
59
|
+
#else
|
|
60
|
+
return 0;
|
|
61
|
+
#endif
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
static int cpu_supports_avx2(void) {
|
|
65
|
+
#if defined(__GNUC__) || defined(__clang__)
|
|
66
|
+
/* Requires -mavx2 at compile, but we only *call* the AVX2 kernel if true. */
|
|
67
|
+
return __builtin_cpu_supports("avx2");
|
|
68
|
+
#else
|
|
69
|
+
return 0;
|
|
70
|
+
#endif
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
static int cpu_supports_sse42(void) {
|
|
74
|
+
#if defined(__GNUC__) || defined(__clang__)
|
|
75
|
+
return __builtin_cpu_supports("sse4.2");
|
|
76
|
+
#else
|
|
77
|
+
return 0;
|
|
78
|
+
#endif
|
|
79
|
+
}
|
|
80
|
+
#endif
|
|
81
|
+
|
|
82
|
+
#define SKIN_WEIGHT 0.3f
|
|
83
|
+
|
|
84
|
+
typedef enum {
|
|
85
|
+
KERNEL_AUTO = 0,
|
|
86
|
+
KERNEL_SCALAR = 1,
|
|
87
|
+
KERNEL_SSE42 = 2,
|
|
88
|
+
KERNEL_AVX2 = 3
|
|
89
|
+
} kernel_kind;
|
|
90
|
+
|
|
91
|
+
/* ---------- Utility: safe views, shape checks ---------- */
|
|
92
|
+
|
|
93
|
+
/* Make a new uint8, C-contiguous, aligned view we own. Never DECREF the input obj. */
|
|
94
|
+
static inline int get_uint8_c_contig(PyObject *obj, PyArrayObject **out, const char *name) {
|
|
95
|
+
const int flags = NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS;
|
|
96
|
+
PyArrayObject *arr = (PyArrayObject*)PyArray_FROM_OTF(obj, NPY_UINT8, flags);
|
|
97
|
+
if (!arr) {
|
|
98
|
+
PyErr_Format(PyExc_TypeError, "%s must be a uint8 ndarray", name);
|
|
99
|
+
return 0;
|
|
100
|
+
}
|
|
101
|
+
*out = arr; /* new reference */
|
|
102
|
+
return 1;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
static inline int ensure_uint8_contig(PyArrayObject **arr, const char *name) {
|
|
106
|
+
PyArrayObject *tmp = (PyArrayObject*)PyArray_FROM_OTF(
|
|
107
|
+
(PyObject*)(*arr), NPY_UINT8, NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS);
|
|
108
|
+
if (!tmp) return 0;
|
|
109
|
+
Py_XDECREF(*arr);
|
|
110
|
+
*arr = tmp;
|
|
111
|
+
return 1;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
static inline int check_shape_requirements(PyArrayObject *base,
|
|
115
|
+
PyArrayObject *texture,
|
|
116
|
+
PyArrayObject *skin,
|
|
117
|
+
PyArrayObject *im_alpha,
|
|
118
|
+
int *texture_has_alpha,
|
|
119
|
+
npy_intp *height,
|
|
120
|
+
npy_intp *width) {
|
|
121
|
+
if (PyArray_NDIM(base) != 3 || PyArray_DIMS(base)[2] != 3) {
|
|
122
|
+
PyErr_SetString(PyExc_ValueError, "base must have shape (H, W, 3)");
|
|
123
|
+
return 0;
|
|
124
|
+
}
|
|
125
|
+
if (PyArray_NDIM(texture) != 3) {
|
|
126
|
+
PyErr_SetString(PyExc_ValueError, "texture must have shape (H, W, 3) or (H, W, 4)");
|
|
127
|
+
return 0;
|
|
128
|
+
}
|
|
129
|
+
npy_intp tc = PyArray_DIMS(texture)[2];
|
|
130
|
+
if (!(tc == 3 || tc == 4)) {
|
|
131
|
+
PyErr_SetString(PyExc_ValueError, "texture must have 3 or 4 channels");
|
|
132
|
+
return 0;
|
|
133
|
+
}
|
|
134
|
+
*texture_has_alpha = (tc == 4);
|
|
135
|
+
|
|
136
|
+
if (PyArray_NDIM(skin) != 3 || PyArray_DIMS(skin)[2] != 3) {
|
|
137
|
+
PyErr_SetString(PyExc_ValueError, "skin must have shape (H, W, 3)");
|
|
138
|
+
return 0;
|
|
139
|
+
}
|
|
140
|
+
if (PyArray_NDIM(im_alpha) != 2) {
|
|
141
|
+
PyErr_SetString(PyExc_ValueError, "im_alpha must have shape (H, W)");
|
|
142
|
+
return 0;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
npy_intp h = PyArray_DIMS(base)[0], w = PyArray_DIMS(base)[1];
|
|
146
|
+
if (PyArray_DIMS(texture)[0] != h || PyArray_DIMS(texture)[1] != w ||
|
|
147
|
+
PyArray_DIMS(skin)[0] != h || PyArray_DIMS(skin)[1] != w ||
|
|
148
|
+
PyArray_DIMS(im_alpha)[0] != h|| PyArray_DIMS(im_alpha)[1] != w) {
|
|
149
|
+
PyErr_SetString(PyExc_ValueError, "All inputs must share the same H and W");
|
|
150
|
+
return 0;
|
|
151
|
+
}
|
|
152
|
+
*height = h; *width = w;
|
|
153
|
+
return 1;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/* ---------- Scalar reference kernel (clear, correct, easy to modify) ---------- */
|
|
157
|
+
/* Converts uint8 to float32 in [0,1], does placeholder math, writes back to uint8. */
|
|
158
|
+
/* Replace the placeholder math with your blend. */
|
|
159
|
+
|
|
160
|
+
/*
|
|
161
|
+
* Converts nan and inf values to 0 and 255 respectively.
|
|
162
|
+
*/
|
|
163
|
+
static inline float nan_to_num(float x) {
|
|
164
|
+
if (isnan(x)) {
|
|
165
|
+
return 0.0f; // replace NaN with 0
|
|
166
|
+
}
|
|
167
|
+
if (isinf(x)) {
|
|
168
|
+
if (x > 0) {
|
|
169
|
+
return 255.0f; // positive infinity -> max uint8
|
|
170
|
+
} else {
|
|
171
|
+
return 0.0f; // negative infinity -> min uint8
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
else {
|
|
175
|
+
return x; // keep finite values as they are
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
/*
|
|
180
|
+
* Scaler kernel for RGB texture input.
|
|
181
|
+
*/
|
|
182
|
+
static void kernel_scalar_rgb(const uint8_t *base, const uint8_t *texture,
|
|
183
|
+
const uint8_t *skin, const uint8_t *im_alpha,
|
|
184
|
+
uint8_t *out, npy_intp pixels) {
|
|
185
|
+
for (npy_intp i = 0; i < pixels; ++i) {
|
|
186
|
+
const uint8_t b_r = base[3*i+0];
|
|
187
|
+
const uint8_t b_g = base[3*i+1];
|
|
188
|
+
const uint8_t b_b = base[3*i+2];
|
|
189
|
+
|
|
190
|
+
const uint8_t t_r = texture[3*i+0];
|
|
191
|
+
const uint8_t t_g = texture[3*i+1];
|
|
192
|
+
const uint8_t t_b = texture[3*i+2];
|
|
193
|
+
|
|
194
|
+
const uint8_t s_r = skin[3*i+0];
|
|
195
|
+
const uint8_t s_g = skin[3*i+1];
|
|
196
|
+
const uint8_t s_b = skin[3*i+2];
|
|
197
|
+
|
|
198
|
+
const uint8_t a_im = im_alpha[i];
|
|
199
|
+
|
|
200
|
+
/* float32 intermediates in [0,1] */
|
|
201
|
+
const float fb_r = b_r * (1.0f/255.0f);
|
|
202
|
+
const float fb_g = b_g * (1.0f/255.0f);
|
|
203
|
+
const float fb_b = b_b * (1.0f/255.0f);
|
|
204
|
+
|
|
205
|
+
const float ft_r = t_r * (1.0f/255.0f);
|
|
206
|
+
const float ft_g = t_g * (1.0f/255.0f);
|
|
207
|
+
const float ft_b = t_b * (1.0f/255.0f);
|
|
208
|
+
|
|
209
|
+
const float fs_r = s_r * (1.0f/255.0f);
|
|
210
|
+
const float fs_g = s_g * (1.0f/255.0f);
|
|
211
|
+
const float fs_b = s_b * (1.0f/255.0f);
|
|
212
|
+
const float fa_im = a_im * (1.0f/255.0f);
|
|
213
|
+
|
|
214
|
+
/*
|
|
215
|
+
**********************
|
|
216
|
+
* normal grain merge *
|
|
217
|
+
**********************
|
|
218
|
+
*/
|
|
219
|
+
|
|
220
|
+
/* inverse_tpa */
|
|
221
|
+
float fit_a = 1.0f - fa_im;
|
|
222
|
+
/* gm_out = np.clip(texture + skin - 0.5, 0.0, 1.0) */
|
|
223
|
+
float fr = ft_r + fs_r - 0.5f;
|
|
224
|
+
float fg = ft_g + fs_g - 0.5f;
|
|
225
|
+
float fb = ft_b + fs_b - 0.5f;
|
|
226
|
+
/* np.clip */
|
|
227
|
+
fr = fr < 0.0f ? 0.0f : (fr > 1.0f ? 1.0f : fr);
|
|
228
|
+
fg = fg < 0.0f ? 0.0f : (fg > 1.0f ? 1.0f : fg);
|
|
229
|
+
fb = fb < 0.0f ? 0.0f : (fb > 1.0f ? 1.0f : fb);
|
|
230
|
+
/* gm_out = gm_out * texture_alpha + texture * inverse_tpa */
|
|
231
|
+
fr = fr * fa_im + ft_r * fit_a;
|
|
232
|
+
fg = fg * fa_im + ft_g * fit_a;
|
|
233
|
+
fb = fb * fa_im + ft_b * fit_a;
|
|
234
|
+
|
|
235
|
+
/* gm_out = gm_out * (1 - SKIN_WEIGHT) + (skin * SKIN_WEIGHT) */
|
|
236
|
+
fr = fr * (1.0f - SKIN_WEIGHT) + fs_r * SKIN_WEIGHT;
|
|
237
|
+
fg = fg * (1.0f - SKIN_WEIGHT) + fs_g * SKIN_WEIGHT;
|
|
238
|
+
fb = fb * (1.0f - SKIN_WEIGHT) + fs_b * SKIN_WEIGHT;
|
|
239
|
+
|
|
240
|
+
/* np.nan_to_num(gm_out, copy=False) */
|
|
241
|
+
fr = nan_to_num(fr);
|
|
242
|
+
fg = nan_to_num(fg);
|
|
243
|
+
fb = nan_to_num(fb);
|
|
244
|
+
|
|
245
|
+
/* Normal merge
|
|
246
|
+
* n_out = gm_out * texture_alpha + base * inverse_tpa
|
|
247
|
+
*
|
|
248
|
+
* In this case, texture_alpha is supplied by im_alpha since texture doesn't have an alpha channel here.
|
|
249
|
+
*/
|
|
250
|
+
fr = fr * fa_im + fb_r * fit_a;
|
|
251
|
+
fg = fg * fa_im + fb_g * fit_a;
|
|
252
|
+
fb = fb * fa_im + fb_b * fit_a;
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
out[3*i+0] = (uint8_t)(fr * 255.0f);
|
|
256
|
+
out[3*i+1] = (uint8_t)(fg * 255.0f);
|
|
257
|
+
out[3*i+2] = (uint8_t)(fb * 255.0f);
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
static void kernel_scalar_rgba(const uint8_t *base, const uint8_t *texture,
|
|
262
|
+
const uint8_t *skin, const uint8_t *im_alpha,
|
|
263
|
+
uint8_t *out, npy_intp pixels) {
|
|
264
|
+
for (npy_intp i = 0; i < pixels; ++i) {
|
|
265
|
+
const uint8_t b_r = base[3*i+0];
|
|
266
|
+
const uint8_t b_g = base[3*i+1];
|
|
267
|
+
const uint8_t b_b = base[3*i+2];
|
|
268
|
+
|
|
269
|
+
const uint8_t t_r = texture[4*i+0];
|
|
270
|
+
const uint8_t t_g = texture[4*i+1];
|
|
271
|
+
const uint8_t t_b = texture[4*i+2];
|
|
272
|
+
const uint8_t t_a = texture[4*i+3]; /* present in RGBA branch */
|
|
273
|
+
|
|
274
|
+
const uint8_t s_r = skin[3*i+0];
|
|
275
|
+
const uint8_t s_g = skin[3*i+1];
|
|
276
|
+
const uint8_t s_b = skin[3*i+2];
|
|
277
|
+
|
|
278
|
+
const uint8_t a_im = im_alpha[i];
|
|
279
|
+
|
|
280
|
+
const float fb_r = b_r * (1.0f/255.0f);
|
|
281
|
+
const float fb_g = b_g * (1.0f/255.0f);
|
|
282
|
+
const float fb_b = b_b * (1.0f/255.0f);
|
|
283
|
+
|
|
284
|
+
const float ft_r = t_r * (1.0f/255.0f);
|
|
285
|
+
const float ft_g = t_g * (1.0f/255.0f);
|
|
286
|
+
const float ft_b = t_b * (1.0f/255.0f);
|
|
287
|
+
float ft_a = t_a * (1.0f/255.0f);
|
|
288
|
+
|
|
289
|
+
const float fs_r = s_r * (1.0f/255.0f);
|
|
290
|
+
const float fs_g = s_g * (1.0f/255.0f);
|
|
291
|
+
const float fs_b = s_b * (1.0f/255.0f);
|
|
292
|
+
const float fa_im = a_im * (1.0f/255.0f);
|
|
293
|
+
|
|
294
|
+
/*
|
|
295
|
+
**********************
|
|
296
|
+
* normal grain merge *
|
|
297
|
+
**********************
|
|
298
|
+
*/
|
|
299
|
+
/* Merge texture alpha with the external mask */
|
|
300
|
+
|
|
301
|
+
/* texture_alpha = texture[..., 3] * im_alpha*/
|
|
302
|
+
ft_a = ft_a * fa_im;
|
|
303
|
+
/* inverse_tpa = 1 - texture_alpha */
|
|
304
|
+
float fit_a = 1.0f - ft_a;
|
|
305
|
+
|
|
306
|
+
/* gm_out = np.clip(texture + skin - 0.5, 0.0, 1.0) */
|
|
307
|
+
float fr = ft_r + fs_r - 0.5f;
|
|
308
|
+
float fg = ft_g + fs_g - 0.5f;
|
|
309
|
+
float fb = ft_b + fs_b - 0.5f;
|
|
310
|
+
/* np.clip */
|
|
311
|
+
fr = fr < 0.0f ? 0.0f : (fr > 1.0f ? 1.0f : fr);
|
|
312
|
+
fg = fg < 0.0f ? 0.0f : (fg > 1.0f ? 1.0f : fg);
|
|
313
|
+
fb = fb < 0.0f ? 0.0f : (fb > 1.0f ? 1.0f : fb);
|
|
314
|
+
|
|
315
|
+
/* gm_out = gm_out * texture_alpha + texture * inverse_tpa */
|
|
316
|
+
fr = fr * ft_a + ft_r * fit_a;
|
|
317
|
+
fg = fg * ft_a + ft_g * fit_a;
|
|
318
|
+
fb = fb * ft_a + ft_b * fit_a;
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
/* gm_out = gm_out * (1 - SKIN_WEIGHT) + (skin * SKIN_WEIGHT) */
|
|
322
|
+
fr = fr * (1.0f - SKIN_WEIGHT) + fs_r * SKIN_WEIGHT;
|
|
323
|
+
fg = fg * (1.0f - SKIN_WEIGHT) + fs_g * SKIN_WEIGHT;
|
|
324
|
+
fb = fb * (1.0f - SKIN_WEIGHT) + fs_b * SKIN_WEIGHT;
|
|
325
|
+
|
|
326
|
+
/* np.nan_to_num(gm_out, copy=False) */
|
|
327
|
+
fr = nan_to_num(fr);
|
|
328
|
+
fg = nan_to_num(fg);
|
|
329
|
+
fb = nan_to_num(fb);
|
|
330
|
+
|
|
331
|
+
/* Normal merge
|
|
332
|
+
* n_out = gm_out * texture_alpha + base * inverse_tpa
|
|
333
|
+
*/
|
|
334
|
+
fr = fr * ft_a + fb_r * fit_a;
|
|
335
|
+
fg = fg * ft_a + fb_g * fit_a;
|
|
336
|
+
fb = fb * ft_a + fb_b * fit_a;
|
|
337
|
+
|
|
338
|
+
out[3*i+0] = (uint8_t)(fr * 255.0f);
|
|
339
|
+
out[3*i+1] = (uint8_t)(fg * 255.0f);
|
|
340
|
+
out[3*i+2] = (uint8_t)(fb * 255.0f);
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
/* ---------- AVX2 helpers ----------
|
|
345
|
+
Interleaved RGB(A) is awkward for SIMD. We build 8-lane vectors per channel by
|
|
346
|
+
reusing the scalar u8x4 -> f32 helpers instead of relying on gathers.
|
|
347
|
+
*/
|
|
348
|
+
|
|
349
|
+
static inline __m128 bytes4_to_unit_f32(__m128i bytes, __m128 inv255) {
|
|
350
|
+
__m128i v32 = _mm_cvtepu8_epi32(bytes);
|
|
351
|
+
return _mm_mul_ps(_mm_cvtepi32_ps(v32), inv255);
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
/* Forward declarations for SSE4.2 kernels used in AVX2 tail handling. */
|
|
355
|
+
static void kernel_sse42_rgb(const uint8_t *base, const uint8_t *texture,
|
|
356
|
+
const uint8_t *skin, const uint8_t *im_alpha,
|
|
357
|
+
uint8_t *out, npy_intp pixels);
|
|
358
|
+
static void kernel_sse42_rgba(const uint8_t *base, const uint8_t *texture,
|
|
359
|
+
const uint8_t *skin, const uint8_t *im_alpha,
|
|
360
|
+
uint8_t *out, npy_intp pixels);
|
|
361
|
+
|
|
362
|
+
static inline void load4_rgb_to_unit_f32(const uint8_t *p, __m128 inv255,
|
|
363
|
+
__m128 *r, __m128 *g, __m128 *b) {
|
|
364
|
+
const __m128i src = _mm_loadu_si128((const __m128i*)p);
|
|
365
|
+
const __m128i mask_r = _mm_setr_epi8(0, 3, 6, 9,
|
|
366
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
367
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
368
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
369
|
+
const __m128i mask_g = _mm_setr_epi8(1, 4, 7, 10,
|
|
370
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
371
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
372
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
373
|
+
const __m128i mask_b = _mm_setr_epi8(2, 5, 8, 11,
|
|
374
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
375
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
376
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
377
|
+
|
|
378
|
+
__m128i rb = _mm_shuffle_epi8(src, mask_r);
|
|
379
|
+
__m128i gb = _mm_shuffle_epi8(src, mask_g);
|
|
380
|
+
__m128i bb = _mm_shuffle_epi8(src, mask_b);
|
|
381
|
+
|
|
382
|
+
*r = bytes4_to_unit_f32(rb, inv255);
|
|
383
|
+
*g = bytes4_to_unit_f32(gb, inv255);
|
|
384
|
+
*b = bytes4_to_unit_f32(bb, inv255);
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
static inline void load4_rgba_to_unit_f32(const uint8_t *p, __m128 inv255,
|
|
388
|
+
__m128 *r, __m128 *g, __m128 *b, __m128 *a) {
|
|
389
|
+
const __m128i src = _mm_loadu_si128((const __m128i*)p);
|
|
390
|
+
const __m128i mask_r = _mm_setr_epi8(0, 4, 8, 12,
|
|
391
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
392
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
393
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
394
|
+
const __m128i mask_g = _mm_setr_epi8(1, 5, 9, 13,
|
|
395
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
396
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
397
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
398
|
+
const __m128i mask_b = _mm_setr_epi8(2, 6, 10, 14,
|
|
399
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
400
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
401
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
402
|
+
const __m128i mask_a = _mm_setr_epi8(3, 7, 11, 15,
|
|
403
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
404
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80,
|
|
405
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
406
|
+
|
|
407
|
+
__m128i rb = _mm_shuffle_epi8(src, mask_r);
|
|
408
|
+
__m128i gb = _mm_shuffle_epi8(src, mask_g);
|
|
409
|
+
__m128i bb = _mm_shuffle_epi8(src, mask_b);
|
|
410
|
+
__m128i ab = _mm_shuffle_epi8(src, mask_a);
|
|
411
|
+
|
|
412
|
+
*r = bytes4_to_unit_f32(rb, inv255);
|
|
413
|
+
*g = bytes4_to_unit_f32(gb, inv255);
|
|
414
|
+
*b = bytes4_to_unit_f32(bb, inv255);
|
|
415
|
+
*a = bytes4_to_unit_f32(ab, inv255);
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
static inline __m256 mul_add_ps256(__m256 a, __m256 b, __m256 c) {
|
|
419
|
+
#ifdef __FMA__
|
|
420
|
+
return _mm256_fmadd_ps(a, b, c);
|
|
421
|
+
#else
|
|
422
|
+
return _mm256_add_ps(_mm256_mul_ps(a, b), c);
|
|
423
|
+
#endif
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
static inline __m256 fnmadd_ps256(__m256 a, __m256 b, __m256 c) {
|
|
427
|
+
#ifdef __FMA__
|
|
428
|
+
return _mm256_fnmadd_ps(a, b, c);
|
|
429
|
+
#else
|
|
430
|
+
return _mm256_sub_ps(c, _mm256_mul_ps(a, b));
|
|
431
|
+
#endif
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
/* Convert 8 consecutive u8 to float32 in [0,1] (for grayscale im_alpha). */
|
|
435
|
+
static inline __m256 load8_u8_to_unit_f32_avx2(const uint8_t *p, __m256 inv255) {
|
|
436
|
+
__m128i v8 = _mm_loadl_epi64((const __m128i*)p); /* 8 bytes -> XMM */
|
|
437
|
+
__m256i v32 = _mm256_cvtepu8_epi32(v8); /* widen to 8 x u32 */
|
|
438
|
+
return _mm256_mul_ps(_mm256_cvtepi32_ps(v32), inv255);
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
static inline void load16_u8_to_unit_f32_avx2(const uint8_t *p, __m256 inv255,
|
|
442
|
+
__m256 *lo, __m256 *hi) {
|
|
443
|
+
__m128i v16 = _mm_loadu_si128((const __m128i*)p); /* 16 bytes */
|
|
444
|
+
__m256i v32_lo = _mm256_cvtepu8_epi32(v16);
|
|
445
|
+
__m128i v8_hi = _mm_srli_si128(v16, 8);
|
|
446
|
+
__m256i v32_hi = _mm256_cvtepu8_epi32(v8_hi);
|
|
447
|
+
*lo = _mm256_mul_ps(_mm256_cvtepi32_ps(v32_lo), inv255);
|
|
448
|
+
*hi = _mm256_mul_ps(_mm256_cvtepi32_ps(v32_hi), inv255);
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
static inline void load16_u8_to_unit_f32_avx2_from_xmm(__m128i v16, __m256 inv255,
|
|
452
|
+
__m256 *lo, __m256 *hi) {
|
|
453
|
+
__m256i v32_lo = _mm256_cvtepu8_epi32(v16);
|
|
454
|
+
__m128i v8_hi = _mm_srli_si128(v16, 8);
|
|
455
|
+
__m256i v32_hi = _mm256_cvtepu8_epi32(v8_hi);
|
|
456
|
+
*lo = _mm256_mul_ps(_mm256_cvtepi32_ps(v32_lo), inv255);
|
|
457
|
+
*hi = _mm256_mul_ps(_mm256_cvtepi32_ps(v32_hi), inv255);
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
static inline __m256 clamp01_ps(__m256 x) {
|
|
461
|
+
return _mm256_min_ps(_mm256_max_ps(x, _mm256_set1_ps(0.0f)), _mm256_set1_ps(1.0f));
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
/* Replace NaN with 0.0f (Inf is not expected from uint8-origin math). */
|
|
465
|
+
static inline __m256 nan_to_num_ps(__m256 x) {
|
|
466
|
+
__m256 cmp = _mm256_cmp_ps(x, x, _CMP_ORD_Q); /* 0 for NaN lanes */
|
|
467
|
+
return _mm256_blendv_ps(_mm256_set1_ps(0.0f), x, cmp);
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
/* Convert 4 float32 RGB vectors in [0,1] to uint8_t RGBRGBRGBRGB without branches. */
|
|
471
|
+
static inline __m128i pack_unit_f32_to_u8_rgb4(__m128 fr, __m128 fg, __m128 fb) {
|
|
472
|
+
const __m128 scale = _mm_set1_ps(255.0f);
|
|
473
|
+
const __m128i zero = _mm_setzero_si128();
|
|
474
|
+
const __m128i max255 = _mm_set1_epi32(255);
|
|
475
|
+
|
|
476
|
+
__m128i ir = _mm_cvttps_epi32(_mm_mul_ps(fr, scale));
|
|
477
|
+
__m128i ig = _mm_cvttps_epi32(_mm_mul_ps(fg, scale));
|
|
478
|
+
__m128i ib = _mm_cvttps_epi32(_mm_mul_ps(fb, scale));
|
|
479
|
+
|
|
480
|
+
ir = _mm_min_epi32(_mm_max_epi32(ir, zero), max255);
|
|
481
|
+
ig = _mm_min_epi32(_mm_max_epi32(ig, zero), max255);
|
|
482
|
+
ib = _mm_min_epi32(_mm_max_epi32(ib, zero), max255);
|
|
483
|
+
|
|
484
|
+
__m128i ir16 = _mm_packus_epi32(ir, zero);
|
|
485
|
+
__m128i ig16 = _mm_packus_epi32(ig, zero);
|
|
486
|
+
__m128i ib16 = _mm_packus_epi32(ib, zero);
|
|
487
|
+
|
|
488
|
+
__m128i ir8 = _mm_packus_epi16(ir16, zero);
|
|
489
|
+
__m128i ig8 = _mm_packus_epi16(ig16, zero);
|
|
490
|
+
__m128i ib8 = _mm_packus_epi16(ib16, zero);
|
|
491
|
+
|
|
492
|
+
const __m128i mask_r = _mm_setr_epi8(
|
|
493
|
+
0, (char)0x80, (char)0x80, 1,
|
|
494
|
+
(char)0x80, (char)0x80, 2, (char)0x80,
|
|
495
|
+
(char)0x80, 3, (char)0x80, (char)0x80,
|
|
496
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
497
|
+
const __m128i mask_g = _mm_setr_epi8(
|
|
498
|
+
(char)0x80, 0, (char)0x80, (char)0x80,
|
|
499
|
+
1, (char)0x80, (char)0x80, 2,
|
|
500
|
+
(char)0x80, (char)0x80, 3, (char)0x80,
|
|
501
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
502
|
+
const __m128i mask_b = _mm_setr_epi8(
|
|
503
|
+
(char)0x80, (char)0x80, 0, (char)0x80,
|
|
504
|
+
(char)0x80, 1, (char)0x80, (char)0x80,
|
|
505
|
+
2, (char)0x80, (char)0x80, 3,
|
|
506
|
+
(char)0x80, (char)0x80, (char)0x80, (char)0x80);
|
|
507
|
+
|
|
508
|
+
__m128i packed = _mm_or_si128(
|
|
509
|
+
_mm_or_si128(_mm_shuffle_epi8(ir8, mask_r),
|
|
510
|
+
_mm_shuffle_epi8(ig8, mask_g)),
|
|
511
|
+
_mm_shuffle_epi8(ib8, mask_b));
|
|
512
|
+
|
|
513
|
+
return packed;
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
static inline void store_unit_f32_to_u8_rgb4(__m128 fr, __m128 fg, __m128 fb,
|
|
517
|
+
uint8_t *out_ptr) {
|
|
518
|
+
__m128i packed = pack_unit_f32_to_u8_rgb4(fr, fg, fb);
|
|
519
|
+
_mm_storel_epi64((__m128i*)out_ptr, packed);
|
|
520
|
+
__m128i tail_vec = _mm_srli_si128(packed, 8);
|
|
521
|
+
uint32_t tail = (uint32_t)_mm_cvtsi128_si32(tail_vec);
|
|
522
|
+
memcpy(out_ptr + 8, &tail, sizeof(tail));
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
static inline void store_unit_f32_to_u8_rgb4_u16(__m128 fr, __m128 fg, __m128 fb,
|
|
526
|
+
uint8_t *out_ptr) {
|
|
527
|
+
__m128i packed = pack_unit_f32_to_u8_rgb4(fr, fg, fb);
|
|
528
|
+
_mm_storeu_si128((__m128i*)out_ptr, packed);
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
/* texture is RGB: texture_alpha = im_alpha broadcast, inverse_tpa = 1 - texture_alpha */
|
|
532
|
+
static void kernel_avx2_rgb(const uint8_t *base, const uint8_t *texture,
|
|
533
|
+
const uint8_t *skin, const uint8_t *im_alpha,
|
|
534
|
+
uint8_t *out, npy_intp pixels) {
|
|
535
|
+
const __m256 inv255 = _mm256_set1_ps(1.0f/255.0f);
|
|
536
|
+
const __m128 inv255_128 = _mm_set1_ps(1.0f/255.0f);
|
|
537
|
+
const __m256 half = _mm256_set1_ps(0.5f);
|
|
538
|
+
const __m256 one = _mm256_set1_ps(1.0f);
|
|
539
|
+
const __m256 w = _mm256_set1_ps((float)SKIN_WEIGHT);
|
|
540
|
+
const __m256 invw = _mm256_set1_ps(1.0f - (float)SKIN_WEIGHT);
|
|
541
|
+
|
|
542
|
+
npy_intp i = 0;
|
|
543
|
+
for (; i + 18 <= pixels; i += 16) {
|
|
544
|
+
if (i + 32 < pixels) {
|
|
545
|
+
_mm_prefetch((const char*)(base + 3*(i + 32)), _MM_HINT_T0);
|
|
546
|
+
_mm_prefetch((const char*)(texture + 3*(i + 32)), _MM_HINT_T0);
|
|
547
|
+
_mm_prefetch((const char*)(skin + 3*(i + 32)), _MM_HINT_T0);
|
|
548
|
+
_mm_prefetch((const char*)(im_alpha + (i + 32)), _MM_HINT_T0);
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
const uint8_t *base_blk = base + 3*i;
|
|
552
|
+
const uint8_t *tex_blk = texture + 3*i;
|
|
553
|
+
const uint8_t *skin_blk = skin + 3*i;
|
|
554
|
+
|
|
555
|
+
__m128i a16 = _mm_loadu_si128((const __m128i*)(im_alpha + i));
|
|
556
|
+
__m128i a_zero = _mm_cmpeq_epi8(a16, _mm_setzero_si128());
|
|
557
|
+
if (_mm_movemask_epi8(a_zero) == 0xFFFF) {
|
|
558
|
+
memcpy(out + 3*i, base_blk, 48);
|
|
559
|
+
continue;
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
__m256 fa_im0, fa_im1;
|
|
563
|
+
load16_u8_to_unit_f32_avx2_from_xmm(a16, inv255, &fa_im0, &fa_im1);
|
|
564
|
+
__m256 fit_a0 = fnmadd_ps256(fa_im0, one, one);
|
|
565
|
+
__m256 fit_a1 = fnmadd_ps256(fa_im1, one, one);
|
|
566
|
+
|
|
567
|
+
/* base RGB in [0,1] */
|
|
568
|
+
__m128 fb_r0, fb_g0, fb_b0;
|
|
569
|
+
__m128 fb_r1, fb_g1, fb_b1;
|
|
570
|
+
load4_rgb_to_unit_f32(base_blk, inv255_128, &fb_r0, &fb_g0, &fb_b0);
|
|
571
|
+
load4_rgb_to_unit_f32(base_blk + 12, inv255_128, &fb_r1, &fb_g1, &fb_b1);
|
|
572
|
+
__m256 fb_r = _mm256_set_m128(fb_r1, fb_r0);
|
|
573
|
+
__m256 fb_g = _mm256_set_m128(fb_g1, fb_g0);
|
|
574
|
+
__m256 fb_b = _mm256_set_m128(fb_b1, fb_b0);
|
|
575
|
+
|
|
576
|
+
__m128 fb_r2, fb_g2, fb_b2;
|
|
577
|
+
__m128 fb_r3, fb_g3, fb_b3;
|
|
578
|
+
load4_rgb_to_unit_f32(base_blk + 24, inv255_128, &fb_r2, &fb_g2, &fb_b2);
|
|
579
|
+
load4_rgb_to_unit_f32(base_blk + 36, inv255_128, &fb_r3, &fb_g3, &fb_b3);
|
|
580
|
+
__m256 fb_r_2 = _mm256_set_m128(fb_r3, fb_r2);
|
|
581
|
+
__m256 fb_g_2 = _mm256_set_m128(fb_g3, fb_g2);
|
|
582
|
+
__m256 fb_b_2 = _mm256_set_m128(fb_b3, fb_b2);
|
|
583
|
+
|
|
584
|
+
/* texture RGB in [0,1] */
|
|
585
|
+
__m128 ft_r0, ft_g0, ft_b0;
|
|
586
|
+
__m128 ft_r1, ft_g1, ft_b1;
|
|
587
|
+
load4_rgb_to_unit_f32(tex_blk, inv255_128, &ft_r0, &ft_g0, &ft_b0);
|
|
588
|
+
load4_rgb_to_unit_f32(tex_blk + 12, inv255_128, &ft_r1, &ft_g1, &ft_b1);
|
|
589
|
+
__m256 ft_r = _mm256_set_m128(ft_r1, ft_r0);
|
|
590
|
+
__m256 ft_g = _mm256_set_m128(ft_g1, ft_g0);
|
|
591
|
+
__m256 ft_b = _mm256_set_m128(ft_b1, ft_b0);
|
|
592
|
+
|
|
593
|
+
__m128 ft_r2, ft_g2, ft_b2;
|
|
594
|
+
__m128 ft_r3, ft_g3, ft_b3;
|
|
595
|
+
load4_rgb_to_unit_f32(tex_blk + 24, inv255_128, &ft_r2, &ft_g2, &ft_b2);
|
|
596
|
+
load4_rgb_to_unit_f32(tex_blk + 36, inv255_128, &ft_r3, &ft_g3, &ft_b3);
|
|
597
|
+
__m256 ft_r_2 = _mm256_set_m128(ft_r3, ft_r2);
|
|
598
|
+
__m256 ft_g_2 = _mm256_set_m128(ft_g3, ft_g2);
|
|
599
|
+
__m256 ft_b_2 = _mm256_set_m128(ft_b3, ft_b2);
|
|
600
|
+
|
|
601
|
+
/* skin RGB in [0,1] */
|
|
602
|
+
__m128 fs_r0, fs_g0, fs_b0;
|
|
603
|
+
__m128 fs_r1, fs_g1, fs_b1;
|
|
604
|
+
load4_rgb_to_unit_f32(skin_blk, inv255_128, &fs_r0, &fs_g0, &fs_b0);
|
|
605
|
+
load4_rgb_to_unit_f32(skin_blk + 12, inv255_128, &fs_r1, &fs_g1, &fs_b1);
|
|
606
|
+
__m256 fs_r = _mm256_set_m128(fs_r1, fs_r0);
|
|
607
|
+
__m256 fs_g = _mm256_set_m128(fs_g1, fs_g0);
|
|
608
|
+
__m256 fs_b = _mm256_set_m128(fs_b1, fs_b0);
|
|
609
|
+
|
|
610
|
+
__m128 fs_r2, fs_g2, fs_b2;
|
|
611
|
+
__m128 fs_r3, fs_g3, fs_b3;
|
|
612
|
+
load4_rgb_to_unit_f32(skin_blk + 24, inv255_128, &fs_r2, &fs_g2, &fs_b2);
|
|
613
|
+
load4_rgb_to_unit_f32(skin_blk + 36, inv255_128, &fs_r3, &fs_g3, &fs_b3);
|
|
614
|
+
__m256 fs_r_2 = _mm256_set_m128(fs_r3, fs_r2);
|
|
615
|
+
__m256 fs_g_2 = _mm256_set_m128(fs_g3, fs_g2);
|
|
616
|
+
__m256 fs_b_2 = _mm256_set_m128(fs_b3, fs_b2);
|
|
617
|
+
|
|
618
|
+
/* gm_out = clip(texture + skin - 0.5) */
|
|
619
|
+
__m256 gm_r = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_r, fs_r), half));
|
|
620
|
+
__m256 gm_g = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_g, fs_g), half));
|
|
621
|
+
__m256 gm_b = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_b, fs_b), half));
|
|
622
|
+
|
|
623
|
+
__m256 gm_r2 = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_r_2, fs_r_2), half));
|
|
624
|
+
__m256 gm_g2 = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_g_2, fs_g_2), half));
|
|
625
|
+
__m256 gm_b2 = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_b_2, fs_b_2), half));
|
|
626
|
+
|
|
627
|
+
/* gm_out = gm_out * texture_alpha + texture * inverse_tpa */
|
|
628
|
+
gm_r = mul_add_ps256(gm_r, fa_im0, _mm256_mul_ps(ft_r, fit_a0));
|
|
629
|
+
gm_g = mul_add_ps256(gm_g, fa_im0, _mm256_mul_ps(ft_g, fit_a0));
|
|
630
|
+
gm_b = mul_add_ps256(gm_b, fa_im0, _mm256_mul_ps(ft_b, fit_a0));
|
|
631
|
+
|
|
632
|
+
gm_r2 = mul_add_ps256(gm_r2, fa_im1, _mm256_mul_ps(ft_r_2, fit_a1));
|
|
633
|
+
gm_g2 = mul_add_ps256(gm_g2, fa_im1, _mm256_mul_ps(ft_g_2, fit_a1));
|
|
634
|
+
gm_b2 = mul_add_ps256(gm_b2, fa_im1, _mm256_mul_ps(ft_b_2, fit_a1));
|
|
635
|
+
|
|
636
|
+
/* gm_out = gm_out * (1 - w) + skin * w */
|
|
637
|
+
gm_r = mul_add_ps256(gm_r, invw, _mm256_mul_ps(fs_r, w));
|
|
638
|
+
gm_g = mul_add_ps256(gm_g, invw, _mm256_mul_ps(fs_g, w));
|
|
639
|
+
gm_b = mul_add_ps256(gm_b, invw, _mm256_mul_ps(fs_b, w));
|
|
640
|
+
|
|
641
|
+
gm_r2 = mul_add_ps256(gm_r2, invw, _mm256_mul_ps(fs_r_2, w));
|
|
642
|
+
gm_g2 = mul_add_ps256(gm_g2, invw, _mm256_mul_ps(fs_g_2, w));
|
|
643
|
+
gm_b2 = mul_add_ps256(gm_b2, invw, _mm256_mul_ps(fs_b_2, w));
|
|
644
|
+
|
|
645
|
+
/* nan_to_num */
|
|
646
|
+
gm_r = nan_to_num_ps(gm_r);
|
|
647
|
+
gm_g = nan_to_num_ps(gm_g);
|
|
648
|
+
gm_b = nan_to_num_ps(gm_b);
|
|
649
|
+
|
|
650
|
+
gm_r2 = nan_to_num_ps(gm_r2);
|
|
651
|
+
gm_g2 = nan_to_num_ps(gm_g2);
|
|
652
|
+
gm_b2 = nan_to_num_ps(gm_b2);
|
|
653
|
+
|
|
654
|
+
/* n_out = gm_out * texture_alpha + base * inverse_tpa */
|
|
655
|
+
__m256 fr = mul_add_ps256(gm_r, fa_im0, _mm256_mul_ps(fb_r, fit_a0));
|
|
656
|
+
__m256 fg = mul_add_ps256(gm_g, fa_im0, _mm256_mul_ps(fb_g, fit_a0));
|
|
657
|
+
__m256 fb = mul_add_ps256(gm_b, fa_im0, _mm256_mul_ps(fb_b, fit_a0));
|
|
658
|
+
|
|
659
|
+
__m256 fr2 = mul_add_ps256(gm_r2, fa_im1, _mm256_mul_ps(fb_r_2, fit_a1));
|
|
660
|
+
__m256 fg2 = mul_add_ps256(gm_g2, fa_im1, _mm256_mul_ps(fb_g_2, fit_a1));
|
|
661
|
+
__m256 fb2 = mul_add_ps256(gm_b2, fa_im1, _mm256_mul_ps(fb_b_2, fit_a1));
|
|
662
|
+
|
|
663
|
+
__m128 fr_lo = _mm256_castps256_ps128(fr);
|
|
664
|
+
__m128 fg_lo = _mm256_castps256_ps128(fg);
|
|
665
|
+
__m128 fb_lo = _mm256_castps256_ps128(fb);
|
|
666
|
+
store_unit_f32_to_u8_rgb4_u16(fr_lo, fg_lo, fb_lo, out + 3*i);
|
|
667
|
+
|
|
668
|
+
__m128 fr_hi = _mm256_extractf128_ps(fr, 1);
|
|
669
|
+
__m128 fg_hi = _mm256_extractf128_ps(fg, 1);
|
|
670
|
+
__m128 fb_hi = _mm256_extractf128_ps(fb, 1);
|
|
671
|
+
store_unit_f32_to_u8_rgb4_u16(fr_hi, fg_hi, fb_hi, out + 3*i + 12);
|
|
672
|
+
|
|
673
|
+
__m128 fr2_lo = _mm256_castps256_ps128(fr2);
|
|
674
|
+
__m128 fg2_lo = _mm256_castps256_ps128(fg2);
|
|
675
|
+
__m128 fb2_lo = _mm256_castps256_ps128(fb2);
|
|
676
|
+
store_unit_f32_to_u8_rgb4_u16(fr2_lo, fg2_lo, fb2_lo, out + 3*i + 24);
|
|
677
|
+
|
|
678
|
+
__m128 fr2_hi = _mm256_extractf128_ps(fr2, 1);
|
|
679
|
+
__m128 fg2_hi = _mm256_extractf128_ps(fg2, 1);
|
|
680
|
+
__m128 fb2_hi = _mm256_extractf128_ps(fb2, 1);
|
|
681
|
+
store_unit_f32_to_u8_rgb4_u16(fr2_hi, fg2_hi, fb2_hi, out + 3*i + 36);
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
for (; i + 10 <= pixels; i += 8) {
|
|
685
|
+
const uint8_t *base_blk = base + 3*i;
|
|
686
|
+
const uint8_t *tex_blk = texture + 3*i;
|
|
687
|
+
const uint8_t *skin_blk = skin + 3*i;
|
|
688
|
+
|
|
689
|
+
/* base RGB in [0,1] */
|
|
690
|
+
__m128 fb_r0, fb_g0, fb_b0;
|
|
691
|
+
__m128 fb_r1, fb_g1, fb_b1;
|
|
692
|
+
load4_rgb_to_unit_f32(base_blk, inv255_128, &fb_r0, &fb_g0, &fb_b0);
|
|
693
|
+
load4_rgb_to_unit_f32(base_blk + 12, inv255_128, &fb_r1, &fb_g1, &fb_b1);
|
|
694
|
+
__m256 fb_r = _mm256_set_m128(fb_r1, fb_r0);
|
|
695
|
+
__m256 fb_g = _mm256_set_m128(fb_g1, fb_g0);
|
|
696
|
+
__m256 fb_b = _mm256_set_m128(fb_b1, fb_b0);
|
|
697
|
+
|
|
698
|
+
/* texture RGB in [0,1] */
|
|
699
|
+
__m128 ft_r0, ft_g0, ft_b0;
|
|
700
|
+
__m128 ft_r1, ft_g1, ft_b1;
|
|
701
|
+
load4_rgb_to_unit_f32(tex_blk, inv255_128, &ft_r0, &ft_g0, &ft_b0);
|
|
702
|
+
load4_rgb_to_unit_f32(tex_blk + 12, inv255_128, &ft_r1, &ft_g1, &ft_b1);
|
|
703
|
+
__m256 ft_r = _mm256_set_m128(ft_r1, ft_r0);
|
|
704
|
+
__m256 ft_g = _mm256_set_m128(ft_g1, ft_g0);
|
|
705
|
+
__m256 ft_b = _mm256_set_m128(ft_b1, ft_b0);
|
|
706
|
+
|
|
707
|
+
/* skin RGB in [0,1] */
|
|
708
|
+
__m128 fs_r0, fs_g0, fs_b0;
|
|
709
|
+
__m128 fs_r1, fs_g1, fs_b1;
|
|
710
|
+
load4_rgb_to_unit_f32(skin_blk, inv255_128, &fs_r0, &fs_g0, &fs_b0);
|
|
711
|
+
load4_rgb_to_unit_f32(skin_blk + 12, inv255_128, &fs_r1, &fs_g1, &fs_b1);
|
|
712
|
+
__m256 fs_r = _mm256_set_m128(fs_r1, fs_r0);
|
|
713
|
+
__m256 fs_g = _mm256_set_m128(fs_g1, fs_g0);
|
|
714
|
+
__m256 fs_b = _mm256_set_m128(fs_b1, fs_b0);
|
|
715
|
+
|
|
716
|
+
if (i + 32 < pixels) {
|
|
717
|
+
_mm_prefetch((const char*)(base + 3*(i + 32)), _MM_HINT_T0);
|
|
718
|
+
_mm_prefetch((const char*)(texture + 3*(i + 32)), _MM_HINT_T0);
|
|
719
|
+
_mm_prefetch((const char*)(skin + 3*(i + 32)), _MM_HINT_T0);
|
|
720
|
+
_mm_prefetch((const char*)(im_alpha + (i + 32)), _MM_HINT_T0);
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
__m128i a8 = _mm_loadl_epi64((const __m128i*)(im_alpha + i));
|
|
724
|
+
__m128i a_zero = _mm_cmpeq_epi8(a8, _mm_setzero_si128());
|
|
725
|
+
if (_mm_movemask_epi8(a_zero) == 0xFFFF) {
|
|
726
|
+
memcpy(out + 3*i, base_blk, 24);
|
|
727
|
+
continue;
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
/* texture_alpha = im_alpha */
|
|
731
|
+
__m256 fa_im = load8_u8_to_unit_f32_avx2(im_alpha + i, inv255);
|
|
732
|
+
__m256 fit_a = fnmadd_ps256(fa_im, one, one);
|
|
733
|
+
|
|
734
|
+
/* gm_out = clip(texture + skin - 0.5) */
|
|
735
|
+
__m256 gm_r = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_r, fs_r), half));
|
|
736
|
+
__m256 gm_g = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_g, fs_g), half));
|
|
737
|
+
__m256 gm_b = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_b, fs_b), half));
|
|
738
|
+
|
|
739
|
+
/* gm_out = gm_out * texture_alpha + texture * inverse_tpa */
|
|
740
|
+
gm_r = mul_add_ps256(gm_r, fa_im, _mm256_mul_ps(ft_r, fit_a));
|
|
741
|
+
gm_g = mul_add_ps256(gm_g, fa_im, _mm256_mul_ps(ft_g, fit_a));
|
|
742
|
+
gm_b = mul_add_ps256(gm_b, fa_im, _mm256_mul_ps(ft_b, fit_a));
|
|
743
|
+
|
|
744
|
+
/* gm_out = gm_out * (1 - w) + skin * w */
|
|
745
|
+
gm_r = mul_add_ps256(gm_r, invw, _mm256_mul_ps(fs_r, w));
|
|
746
|
+
gm_g = mul_add_ps256(gm_g, invw, _mm256_mul_ps(fs_g, w));
|
|
747
|
+
gm_b = mul_add_ps256(gm_b, invw, _mm256_mul_ps(fs_b, w));
|
|
748
|
+
|
|
749
|
+
/* nan_to_num */
|
|
750
|
+
gm_r = nan_to_num_ps(gm_r);
|
|
751
|
+
gm_g = nan_to_num_ps(gm_g);
|
|
752
|
+
gm_b = nan_to_num_ps(gm_b);
|
|
753
|
+
|
|
754
|
+
/* n_out = gm_out * texture_alpha + base * inverse_tpa */
|
|
755
|
+
__m256 fr = mul_add_ps256(gm_r, fa_im, _mm256_mul_ps(fb_r, fit_a));
|
|
756
|
+
__m256 fg = mul_add_ps256(gm_g, fa_im, _mm256_mul_ps(fb_g, fit_a));
|
|
757
|
+
__m256 fb = mul_add_ps256(gm_b, fa_im, _mm256_mul_ps(fb_b, fit_a));
|
|
758
|
+
|
|
759
|
+
__m128 fr_lo = _mm256_castps256_ps128(fr);
|
|
760
|
+
__m128 fg_lo = _mm256_castps256_ps128(fg);
|
|
761
|
+
__m128 fb_lo = _mm256_castps256_ps128(fb);
|
|
762
|
+
store_unit_f32_to_u8_rgb4_u16(fr_lo, fg_lo, fb_lo, out + 3*i);
|
|
763
|
+
|
|
764
|
+
__m128 fr_hi = _mm256_extractf128_ps(fr, 1);
|
|
765
|
+
__m128 fg_hi = _mm256_extractf128_ps(fg, 1);
|
|
766
|
+
__m128 fb_hi = _mm256_extractf128_ps(fb, 1);
|
|
767
|
+
store_unit_f32_to_u8_rgb4_u16(fr_hi, fg_hi, fb_hi, out + 3*i + 12);
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
if (i < pixels) {
|
|
771
|
+
npy_intp rem = pixels - i;
|
|
772
|
+
if (rem >= 6) {
|
|
773
|
+
kernel_sse42_rgb(base + 3*i, texture + 3*i, skin + 3*i, im_alpha + i,
|
|
774
|
+
out + 3*i, rem);
|
|
775
|
+
} else {
|
|
776
|
+
kernel_scalar_rgb(base + 3*i, texture + 3*i, skin + 3*i, im_alpha + i,
|
|
777
|
+
out + 3*i, rem);
|
|
778
|
+
}
|
|
779
|
+
}
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
/* texture is RGBA: texture_alpha = texture.A * im_alpha, inverse_tpa = 1 - texture_alpha */
|
|
783
|
+
static void kernel_avx2_rgba(const uint8_t *base, const uint8_t *texture,
|
|
784
|
+
const uint8_t *skin, const uint8_t *im_alpha,
|
|
785
|
+
uint8_t *out, npy_intp pixels) {
|
|
786
|
+
const __m256 inv255 = _mm256_set1_ps(1.0f/255.0f);
|
|
787
|
+
const __m128 inv255_128 = _mm_set1_ps(1.0f/255.0f);
|
|
788
|
+
const __m256 half = _mm256_set1_ps(0.5f);
|
|
789
|
+
const __m256 one = _mm256_set1_ps(1.0f);
|
|
790
|
+
const __m256 w = _mm256_set1_ps((float)SKIN_WEIGHT);
|
|
791
|
+
const __m256 invw = _mm256_set1_ps(1.0f - (float)SKIN_WEIGHT);
|
|
792
|
+
|
|
793
|
+
npy_intp i = 0;
|
|
794
|
+
for (; i + 16 <= pixels; i += 16) {
|
|
795
|
+
if (i + 32 < pixels) {
|
|
796
|
+
_mm_prefetch((const char*)(base + 3*(i + 32)), _MM_HINT_T0);
|
|
797
|
+
_mm_prefetch((const char*)(texture + 4*(i + 32)), _MM_HINT_T0);
|
|
798
|
+
_mm_prefetch((const char*)(skin + 3*(i + 32)), _MM_HINT_T0);
|
|
799
|
+
_mm_prefetch((const char*)(im_alpha + (i + 32)), _MM_HINT_T0);
|
|
800
|
+
}
|
|
801
|
+
|
|
802
|
+
const uint8_t *base_blk = base + 3*i;
|
|
803
|
+
const uint8_t *tex_blk = texture + 4*i;
|
|
804
|
+
const uint8_t *skin_blk = skin + 3*i;
|
|
805
|
+
|
|
806
|
+
__m128i a16 = _mm_loadu_si128((const __m128i*)(im_alpha + i));
|
|
807
|
+
__m128i a_zero = _mm_cmpeq_epi8(a16, _mm_setzero_si128());
|
|
808
|
+
if (_mm_movemask_epi8(a_zero) == 0xFFFF) {
|
|
809
|
+
memcpy(out + 3*i, base_blk, 48);
|
|
810
|
+
continue;
|
|
811
|
+
}
|
|
812
|
+
|
|
813
|
+
__m128i a_ff = _mm_cmpeq_epi8(a16, _mm_set1_epi8((char)0xFF));
|
|
814
|
+
const int all_ff = (_mm_movemask_epi8(a_ff) == 0xFFFF);
|
|
815
|
+
__m256 fa_im0, fa_im1;
|
|
816
|
+
if (all_ff) {
|
|
817
|
+
fa_im0 = _mm256_set1_ps(1.0f);
|
|
818
|
+
fa_im1 = _mm256_set1_ps(1.0f);
|
|
819
|
+
} else {
|
|
820
|
+
load16_u8_to_unit_f32_avx2_from_xmm(a16, inv255, &fa_im0, &fa_im1);
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
__m128 fb_r0, fb_g0, fb_b0;
|
|
824
|
+
__m128 fb_r1, fb_g1, fb_b1;
|
|
825
|
+
load4_rgb_to_unit_f32(base_blk, inv255_128, &fb_r0, &fb_g0, &fb_b0);
|
|
826
|
+
load4_rgb_to_unit_f32(base_blk + 12, inv255_128, &fb_r1, &fb_g1, &fb_b1);
|
|
827
|
+
__m256 fb_r = _mm256_set_m128(fb_r1, fb_r0);
|
|
828
|
+
__m256 fb_g = _mm256_set_m128(fb_g1, fb_g0);
|
|
829
|
+
__m256 fb_b = _mm256_set_m128(fb_b1, fb_b0);
|
|
830
|
+
|
|
831
|
+
__m128 ft_r0, ft_g0, ft_b0, ft_a0;
|
|
832
|
+
__m128 ft_r1, ft_g1, ft_b1, ft_a1;
|
|
833
|
+
load4_rgba_to_unit_f32(tex_blk, inv255_128, &ft_r0, &ft_g0, &ft_b0, &ft_a0);
|
|
834
|
+
load4_rgba_to_unit_f32(tex_blk + 16, inv255_128, &ft_r1, &ft_g1, &ft_b1, &ft_a1);
|
|
835
|
+
__m256 ft_r = _mm256_set_m128(ft_r1, ft_r0);
|
|
836
|
+
__m256 ft_g = _mm256_set_m128(ft_g1, ft_g0);
|
|
837
|
+
__m256 ft_b = _mm256_set_m128(ft_b1, ft_b0);
|
|
838
|
+
__m256 ft_a = _mm256_set_m128(ft_a1, ft_a0); /* texture alpha */
|
|
839
|
+
|
|
840
|
+
__m128 fs_r0, fs_g0, fs_b0;
|
|
841
|
+
__m128 fs_r1, fs_g1, fs_b1;
|
|
842
|
+
load4_rgb_to_unit_f32(skin_blk, inv255_128, &fs_r0, &fs_g0, &fs_b0);
|
|
843
|
+
load4_rgb_to_unit_f32(skin_blk + 12, inv255_128, &fs_r1, &fs_g1, &fs_b1);
|
|
844
|
+
__m256 fs_r = _mm256_set_m128(fs_r1, fs_r0);
|
|
845
|
+
__m256 fs_g = _mm256_set_m128(fs_g1, fs_g0);
|
|
846
|
+
__m256 fs_b = _mm256_set_m128(fs_b1, fs_b0);
|
|
847
|
+
|
|
848
|
+
__m256 fta = all_ff ? ft_a : _mm256_mul_ps(ft_a, fa_im0); /* texture_alpha */
|
|
849
|
+
__m256 fit_a = fnmadd_ps256(fta, one, one); /* inverse_tpa */
|
|
850
|
+
|
|
851
|
+
__m256 gm_r = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_r, fs_r), half));
|
|
852
|
+
__m256 gm_g = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_g, fs_g), half));
|
|
853
|
+
__m256 gm_b = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_b, fs_b), half));
|
|
854
|
+
|
|
855
|
+
gm_r = mul_add_ps256(gm_r, fta, _mm256_mul_ps(ft_r, fit_a));
|
|
856
|
+
gm_g = mul_add_ps256(gm_g, fta, _mm256_mul_ps(ft_g, fit_a));
|
|
857
|
+
gm_b = mul_add_ps256(gm_b, fta, _mm256_mul_ps(ft_b, fit_a));
|
|
858
|
+
|
|
859
|
+
gm_r = mul_add_ps256(gm_r, invw, _mm256_mul_ps(fs_r, w));
|
|
860
|
+
gm_g = mul_add_ps256(gm_g, invw, _mm256_mul_ps(fs_g, w));
|
|
861
|
+
gm_b = mul_add_ps256(gm_b, invw, _mm256_mul_ps(fs_b, w));
|
|
862
|
+
|
|
863
|
+
gm_r = nan_to_num_ps(gm_r);
|
|
864
|
+
gm_g = nan_to_num_ps(gm_g);
|
|
865
|
+
gm_b = nan_to_num_ps(gm_b);
|
|
866
|
+
|
|
867
|
+
__m256 fr = mul_add_ps256(gm_r, fta, _mm256_mul_ps(fb_r, fit_a));
|
|
868
|
+
__m256 fg = mul_add_ps256(gm_g, fta, _mm256_mul_ps(fb_g, fit_a));
|
|
869
|
+
__m256 fb = mul_add_ps256(gm_b, fta, _mm256_mul_ps(fb_b, fit_a));
|
|
870
|
+
|
|
871
|
+
__m128 fr_lo = _mm256_castps256_ps128(fr);
|
|
872
|
+
__m128 fg_lo = _mm256_castps256_ps128(fg);
|
|
873
|
+
__m128 fb_lo = _mm256_castps256_ps128(fb);
|
|
874
|
+
store_unit_f32_to_u8_rgb4_u16(fr_lo, fg_lo, fb_lo, out + 3*i);
|
|
875
|
+
|
|
876
|
+
__m128 fr_hi = _mm256_extractf128_ps(fr, 1);
|
|
877
|
+
__m128 fg_hi = _mm256_extractf128_ps(fg, 1);
|
|
878
|
+
__m128 fb_hi = _mm256_extractf128_ps(fb, 1);
|
|
879
|
+
store_unit_f32_to_u8_rgb4(fr_hi, fg_hi, fb_hi, out + 3*i + 12);
|
|
880
|
+
|
|
881
|
+
__m128 fb_r2, fb_g2, fb_b2;
|
|
882
|
+
__m128 fb_r3, fb_g3, fb_b3;
|
|
883
|
+
load4_rgb_to_unit_f32(base_blk + 24, inv255_128, &fb_r2, &fb_g2, &fb_b2);
|
|
884
|
+
load4_rgb_to_unit_f32(base_blk + 36, inv255_128, &fb_r3, &fb_g3, &fb_b3);
|
|
885
|
+
__m256 fb_r_2 = _mm256_set_m128(fb_r3, fb_r2);
|
|
886
|
+
__m256 fb_g_2 = _mm256_set_m128(fb_g3, fb_g2);
|
|
887
|
+
__m256 fb_b_2 = _mm256_set_m128(fb_b3, fb_b2);
|
|
888
|
+
|
|
889
|
+
__m128 ft_r2, ft_g2, ft_b2, ft_a2;
|
|
890
|
+
__m128 ft_r3, ft_g3, ft_b3, ft_a3;
|
|
891
|
+
load4_rgba_to_unit_f32(tex_blk + 32, inv255_128, &ft_r2, &ft_g2, &ft_b2, &ft_a2);
|
|
892
|
+
load4_rgba_to_unit_f32(tex_blk + 48, inv255_128, &ft_r3, &ft_g3, &ft_b3, &ft_a3);
|
|
893
|
+
__m256 ft_r_2 = _mm256_set_m128(ft_r3, ft_r2);
|
|
894
|
+
__m256 ft_g_2 = _mm256_set_m128(ft_g3, ft_g2);
|
|
895
|
+
__m256 ft_b_2 = _mm256_set_m128(ft_b3, ft_b2);
|
|
896
|
+
__m256 ft_a_2 = _mm256_set_m128(ft_a3, ft_a2);
|
|
897
|
+
|
|
898
|
+
__m128 fs_r2, fs_g2, fs_b2;
|
|
899
|
+
__m128 fs_r3, fs_g3, fs_b3;
|
|
900
|
+
load4_rgb_to_unit_f32(skin_blk + 24, inv255_128, &fs_r2, &fs_g2, &fs_b2);
|
|
901
|
+
load4_rgb_to_unit_f32(skin_blk + 36, inv255_128, &fs_r3, &fs_g3, &fs_b3);
|
|
902
|
+
__m256 fs_r_2 = _mm256_set_m128(fs_r3, fs_r2);
|
|
903
|
+
__m256 fs_g_2 = _mm256_set_m128(fs_g3, fs_g2);
|
|
904
|
+
__m256 fs_b_2 = _mm256_set_m128(fs_b3, fs_b2);
|
|
905
|
+
|
|
906
|
+
__m256 fta2 = all_ff ? ft_a_2 : _mm256_mul_ps(ft_a_2, fa_im1);
|
|
907
|
+
__m256 fit_a2 = fnmadd_ps256(fta2, one, one);
|
|
908
|
+
|
|
909
|
+
__m256 gm_r2 = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_r_2, fs_r_2), half));
|
|
910
|
+
__m256 gm_g2 = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_g_2, fs_g_2), half));
|
|
911
|
+
__m256 gm_b2 = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_b_2, fs_b_2), half));
|
|
912
|
+
|
|
913
|
+
gm_r2 = mul_add_ps256(gm_r2, fta2, _mm256_mul_ps(ft_r_2, fit_a2));
|
|
914
|
+
gm_g2 = mul_add_ps256(gm_g2, fta2, _mm256_mul_ps(ft_g_2, fit_a2));
|
|
915
|
+
gm_b2 = mul_add_ps256(gm_b2, fta2, _mm256_mul_ps(ft_b_2, fit_a2));
|
|
916
|
+
|
|
917
|
+
gm_r2 = mul_add_ps256(gm_r2, invw, _mm256_mul_ps(fs_r_2, w));
|
|
918
|
+
gm_g2 = mul_add_ps256(gm_g2, invw, _mm256_mul_ps(fs_g_2, w));
|
|
919
|
+
gm_b2 = mul_add_ps256(gm_b2, invw, _mm256_mul_ps(fs_b_2, w));
|
|
920
|
+
|
|
921
|
+
gm_r2 = nan_to_num_ps(gm_r2);
|
|
922
|
+
gm_g2 = nan_to_num_ps(gm_g2);
|
|
923
|
+
gm_b2 = nan_to_num_ps(gm_b2);
|
|
924
|
+
|
|
925
|
+
__m256 fr2 = mul_add_ps256(gm_r2, fta2, _mm256_mul_ps(fb_r_2, fit_a2));
|
|
926
|
+
__m256 fg2 = mul_add_ps256(gm_g2, fta2, _mm256_mul_ps(fb_g_2, fit_a2));
|
|
927
|
+
__m256 fb2 = mul_add_ps256(gm_b2, fta2, _mm256_mul_ps(fb_b_2, fit_a2));
|
|
928
|
+
|
|
929
|
+
__m128 fr2_lo = _mm256_castps256_ps128(fr2);
|
|
930
|
+
__m128 fg2_lo = _mm256_castps256_ps128(fg2);
|
|
931
|
+
__m128 fb2_lo = _mm256_castps256_ps128(fb2);
|
|
932
|
+
store_unit_f32_to_u8_rgb4_u16(fr2_lo, fg2_lo, fb2_lo, out + 3*i + 24);
|
|
933
|
+
|
|
934
|
+
__m128 fr2_hi = _mm256_extractf128_ps(fr2, 1);
|
|
935
|
+
__m128 fg2_hi = _mm256_extractf128_ps(fg2, 1);
|
|
936
|
+
__m128 fb2_hi = _mm256_extractf128_ps(fb2, 1);
|
|
937
|
+
store_unit_f32_to_u8_rgb4(fr2_hi, fg2_hi, fb2_hi, out + 3*i + 36);
|
|
938
|
+
}
|
|
939
|
+
|
|
940
|
+
for (; i + 8 <= pixels; i += 8) {
|
|
941
|
+
if (i + 32 < pixels) {
|
|
942
|
+
_mm_prefetch((const char*)(base + 3*(i + 32)), _MM_HINT_T0);
|
|
943
|
+
_mm_prefetch((const char*)(texture + 4*(i + 32)), _MM_HINT_T0);
|
|
944
|
+
_mm_prefetch((const char*)(skin + 3*(i + 32)), _MM_HINT_T0);
|
|
945
|
+
_mm_prefetch((const char*)(im_alpha + (i + 32)), _MM_HINT_T0);
|
|
946
|
+
}
|
|
947
|
+
|
|
948
|
+
const uint8_t *base_blk = base + 3*i;
|
|
949
|
+
const uint8_t *tex_blk = texture + 4*i;
|
|
950
|
+
const uint8_t *skin_blk = skin + 3*i;
|
|
951
|
+
|
|
952
|
+
__m128i a8 = _mm_loadl_epi64((const __m128i*)(im_alpha + i));
|
|
953
|
+
__m128i a_zero = _mm_cmpeq_epi8(a8, _mm_setzero_si128());
|
|
954
|
+
if (_mm_movemask_epi8(a_zero) == 0xFFFF) {
|
|
955
|
+
memcpy(out + 3*i, base_blk, 24);
|
|
956
|
+
continue;
|
|
957
|
+
}
|
|
958
|
+
|
|
959
|
+
__m128 fb_r0, fb_g0, fb_b0;
|
|
960
|
+
__m128 fb_r1, fb_g1, fb_b1;
|
|
961
|
+
load4_rgb_to_unit_f32(base_blk, inv255_128, &fb_r0, &fb_g0, &fb_b0);
|
|
962
|
+
load4_rgb_to_unit_f32(base_blk + 12, inv255_128, &fb_r1, &fb_g1, &fb_b1);
|
|
963
|
+
__m256 fb_r = _mm256_set_m128(fb_r1, fb_r0);
|
|
964
|
+
__m256 fb_g = _mm256_set_m128(fb_g1, fb_g0);
|
|
965
|
+
__m256 fb_b = _mm256_set_m128(fb_b1, fb_b0);
|
|
966
|
+
|
|
967
|
+
__m128 ft_r0, ft_g0, ft_b0, ft_a0;
|
|
968
|
+
__m128 ft_r1, ft_g1, ft_b1, ft_a1;
|
|
969
|
+
load4_rgba_to_unit_f32(tex_blk, inv255_128, &ft_r0, &ft_g0, &ft_b0, &ft_a0);
|
|
970
|
+
load4_rgba_to_unit_f32(tex_blk + 16, inv255_128, &ft_r1, &ft_g1, &ft_b1, &ft_a1);
|
|
971
|
+
__m256 ft_r = _mm256_set_m128(ft_r1, ft_r0);
|
|
972
|
+
__m256 ft_g = _mm256_set_m128(ft_g1, ft_g0);
|
|
973
|
+
__m256 ft_b = _mm256_set_m128(ft_b1, ft_b0);
|
|
974
|
+
__m256 ft_a = _mm256_set_m128(ft_a1, ft_a0);
|
|
975
|
+
|
|
976
|
+
__m128 fs_r0, fs_g0, fs_b0;
|
|
977
|
+
__m128 fs_r1, fs_g1, fs_b1;
|
|
978
|
+
load4_rgb_to_unit_f32(skin_blk, inv255_128, &fs_r0, &fs_g0, &fs_b0);
|
|
979
|
+
load4_rgb_to_unit_f32(skin_blk + 12, inv255_128, &fs_r1, &fs_g1, &fs_b1);
|
|
980
|
+
__m256 fs_r = _mm256_set_m128(fs_r1, fs_r0);
|
|
981
|
+
__m256 fs_g = _mm256_set_m128(fs_g1, fs_g0);
|
|
982
|
+
__m256 fs_b = _mm256_set_m128(fs_b1, fs_b0);
|
|
983
|
+
|
|
984
|
+
__m256 fa_im = load8_u8_to_unit_f32_avx2(im_alpha + i, inv255);
|
|
985
|
+
__m256 fta = _mm256_mul_ps(ft_a, fa_im);
|
|
986
|
+
__m256 fit_a = fnmadd_ps256(fta, one, one);
|
|
987
|
+
|
|
988
|
+
__m256 gm_r = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_r, fs_r), half));
|
|
989
|
+
__m256 gm_g = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_g, fs_g), half));
|
|
990
|
+
__m256 gm_b = clamp01_ps(_mm256_sub_ps(_mm256_add_ps(ft_b, fs_b), half));
|
|
991
|
+
|
|
992
|
+
gm_r = mul_add_ps256(gm_r, fta, _mm256_mul_ps(ft_r, fit_a));
|
|
993
|
+
gm_g = mul_add_ps256(gm_g, fta, _mm256_mul_ps(ft_g, fit_a));
|
|
994
|
+
gm_b = mul_add_ps256(gm_b, fta, _mm256_mul_ps(ft_b, fit_a));
|
|
995
|
+
|
|
996
|
+
gm_r = mul_add_ps256(gm_r, invw, _mm256_mul_ps(fs_r, w));
|
|
997
|
+
gm_g = mul_add_ps256(gm_g, invw, _mm256_mul_ps(fs_g, w));
|
|
998
|
+
gm_b = mul_add_ps256(gm_b, invw, _mm256_mul_ps(fs_b, w));
|
|
999
|
+
|
|
1000
|
+
gm_r = nan_to_num_ps(gm_r);
|
|
1001
|
+
gm_g = nan_to_num_ps(gm_g);
|
|
1002
|
+
gm_b = nan_to_num_ps(gm_b);
|
|
1003
|
+
|
|
1004
|
+
__m256 fr = mul_add_ps256(gm_r, fta, _mm256_mul_ps(fb_r, fit_a));
|
|
1005
|
+
__m256 fg = mul_add_ps256(gm_g, fta, _mm256_mul_ps(fb_g, fit_a));
|
|
1006
|
+
__m256 fb = mul_add_ps256(gm_b, fta, _mm256_mul_ps(fb_b, fit_a));
|
|
1007
|
+
|
|
1008
|
+
__m128 fr_lo = _mm256_castps256_ps128(fr);
|
|
1009
|
+
__m128 fg_lo = _mm256_castps256_ps128(fg);
|
|
1010
|
+
__m128 fb_lo = _mm256_castps256_ps128(fb);
|
|
1011
|
+
store_unit_f32_to_u8_rgb4_u16(fr_lo, fg_lo, fb_lo, out + 3*i);
|
|
1012
|
+
|
|
1013
|
+
__m128 fr_hi = _mm256_extractf128_ps(fr, 1);
|
|
1014
|
+
__m128 fg_hi = _mm256_extractf128_ps(fg, 1);
|
|
1015
|
+
__m128 fb_hi = _mm256_extractf128_ps(fb, 1);
|
|
1016
|
+
store_unit_f32_to_u8_rgb4(fr_hi, fg_hi, fb_hi, out + 3*i + 12);
|
|
1017
|
+
}
|
|
1018
|
+
|
|
1019
|
+
if (i < pixels) {
|
|
1020
|
+
npy_intp rem = pixels - i;
|
|
1021
|
+
if (rem >= 4) {
|
|
1022
|
+
kernel_sse42_rgba(base + 3*i, texture + 4*i, skin + 3*i, im_alpha + i,
|
|
1023
|
+
out + 3*i, rem);
|
|
1024
|
+
} else {
|
|
1025
|
+
kernel_scalar_rgba(base + 3*i, texture + 4*i, skin + 3*i, im_alpha + i,
|
|
1026
|
+
out + 3*i, rem);
|
|
1027
|
+
}
|
|
1028
|
+
}
|
|
1029
|
+
}
|
|
1030
|
+
|
|
1031
|
+
/* ---------- SSE4.2 skeleton (process 4 pixels via manual loads) ---------- */
|
|
1032
|
+
|
|
1033
|
+
static inline __m128 load4_u8_to_unit_f32(const uint8_t *p) {
|
|
1034
|
+
/* p[0..3] are consecutive bytes (for im_alpha) */
|
|
1035
|
+
__m128i v8 = _mm_cvtsi32_si128(*(const int*)p); /* 4 bytes into xmm */
|
|
1036
|
+
__m128i v16 = _mm_cvtepu8_epi16(v8); /* widen to 8 x u16, we use low 4 */
|
|
1037
|
+
__m128i v32 = _mm_cvtepu16_epi32(v16);
|
|
1038
|
+
return _mm_mul_ps(_mm_cvtepi32_ps(v32), _mm_set1_ps(1.0f/255.0f));
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
static inline __m128 clamp01_ps128(__m128 x) {
|
|
1042
|
+
return _mm_min_ps(_mm_max_ps(x, _mm_set1_ps(0.0f)), _mm_set1_ps(1.0f));
|
|
1043
|
+
}
|
|
1044
|
+
|
|
1045
|
+
static inline __m128 nan_to_num_ps128(__m128 x) {
|
|
1046
|
+
__m128 cmp = _mm_cmpord_ps(x, x); /* 0 for NaN lanes */
|
|
1047
|
+
return _mm_blendv_ps(_mm_set1_ps(0.0f), x, cmp);
|
|
1048
|
+
}
|
|
1049
|
+
|
|
1050
|
+
static inline __m128 mul_add_ps128(__m128 a, __m128 b, __m128 c) {
|
|
1051
|
+
#ifdef __FMA__
|
|
1052
|
+
return _mm_fmadd_ps(a, b, c);
|
|
1053
|
+
#else
|
|
1054
|
+
return _mm_add_ps(_mm_mul_ps(a, b), c);
|
|
1055
|
+
#endif
|
|
1056
|
+
}
|
|
1057
|
+
|
|
1058
|
+
static void kernel_sse42_rgb(const uint8_t *base, const uint8_t *texture,
|
|
1059
|
+
const uint8_t *skin, const uint8_t *im_alpha,
|
|
1060
|
+
uint8_t *out, npy_intp pixels) {
|
|
1061
|
+
const __m128 half = _mm_set1_ps(0.5f);
|
|
1062
|
+
const __m128 one = _mm_set1_ps(1.0f);
|
|
1063
|
+
const __m128 w = _mm_set1_ps((float)SKIN_WEIGHT);
|
|
1064
|
+
const __m128 invw = _mm_set1_ps(1.0f - (float)SKIN_WEIGHT);
|
|
1065
|
+
const __m128 inv255 = _mm_set1_ps(1.0f/255.0f);
|
|
1066
|
+
|
|
1067
|
+
npy_intp i = 0;
|
|
1068
|
+
for (; i + 6 <= pixels; i += 4) {
|
|
1069
|
+
__m128i a4 = _mm_cvtsi32_si128(*(const int*)(im_alpha + i));
|
|
1070
|
+
__m128i a_zero = _mm_cmpeq_epi8(a4, _mm_setzero_si128());
|
|
1071
|
+
if (_mm_movemask_epi8(a_zero) == 0xFFFF) {
|
|
1072
|
+
memcpy(out + 3*i, base + 3*i, 12);
|
|
1073
|
+
continue;
|
|
1074
|
+
}
|
|
1075
|
+
|
|
1076
|
+
__m128 fb_r, fb_g, fb_b;
|
|
1077
|
+
__m128 ft_r, ft_g, ft_b;
|
|
1078
|
+
__m128 fs_r, fs_g, fs_b;
|
|
1079
|
+
load4_rgb_to_unit_f32(base + 3*i, inv255, &fb_r, &fb_g, &fb_b);
|
|
1080
|
+
load4_rgb_to_unit_f32(texture + 3*i, inv255, &ft_r, &ft_g, &ft_b);
|
|
1081
|
+
load4_rgb_to_unit_f32(skin + 3*i, inv255, &fs_r, &fs_g, &fs_b);
|
|
1082
|
+
|
|
1083
|
+
__m128 fa_im = load4_u8_to_unit_f32(im_alpha + i);
|
|
1084
|
+
__m128 fit_a = _mm_sub_ps(one, fa_im);
|
|
1085
|
+
|
|
1086
|
+
__m128 gm_r = clamp01_ps128(_mm_sub_ps(_mm_add_ps(ft_r, fs_r), half));
|
|
1087
|
+
__m128 gm_g = clamp01_ps128(_mm_sub_ps(_mm_add_ps(ft_g, fs_g), half));
|
|
1088
|
+
__m128 gm_b = clamp01_ps128(_mm_sub_ps(_mm_add_ps(ft_b, fs_b), half));
|
|
1089
|
+
|
|
1090
|
+
gm_r = mul_add_ps128(gm_r, fa_im, _mm_mul_ps(ft_r, fit_a));
|
|
1091
|
+
gm_g = mul_add_ps128(gm_g, fa_im, _mm_mul_ps(ft_g, fit_a));
|
|
1092
|
+
gm_b = mul_add_ps128(gm_b, fa_im, _mm_mul_ps(ft_b, fit_a));
|
|
1093
|
+
|
|
1094
|
+
gm_r = mul_add_ps128(gm_r, invw, _mm_mul_ps(fs_r, w));
|
|
1095
|
+
gm_g = mul_add_ps128(gm_g, invw, _mm_mul_ps(fs_g, w));
|
|
1096
|
+
gm_b = mul_add_ps128(gm_b, invw, _mm_mul_ps(fs_b, w));
|
|
1097
|
+
|
|
1098
|
+
gm_r = nan_to_num_ps128(gm_r);
|
|
1099
|
+
gm_g = nan_to_num_ps128(gm_g);
|
|
1100
|
+
gm_b = nan_to_num_ps128(gm_b);
|
|
1101
|
+
|
|
1102
|
+
__m128 fr = mul_add_ps128(gm_r, fa_im, _mm_mul_ps(fb_r, fit_a));
|
|
1103
|
+
__m128 fg = mul_add_ps128(gm_g, fa_im, _mm_mul_ps(fb_g, fit_a));
|
|
1104
|
+
__m128 fb = mul_add_ps128(gm_b, fa_im, _mm_mul_ps(fb_b, fit_a));
|
|
1105
|
+
|
|
1106
|
+
store_unit_f32_to_u8_rgb4_u16(fr, fg, fb, out + 3*i);
|
|
1107
|
+
}
|
|
1108
|
+
|
|
1109
|
+
if (i < pixels) {
|
|
1110
|
+
kernel_scalar_rgb(base + 3*i, texture + 3*i, skin + 3*i, im_alpha + i,
|
|
1111
|
+
out + 3*i, pixels - i);
|
|
1112
|
+
}
|
|
1113
|
+
}
|
|
1114
|
+
|
|
1115
|
+
static void kernel_sse42_rgba(const uint8_t *base, const uint8_t *texture,
|
|
1116
|
+
const uint8_t *skin, const uint8_t *im_alpha,
|
|
1117
|
+
uint8_t *out, npy_intp pixels) {
|
|
1118
|
+
const __m128 half = _mm_set1_ps(0.5f);
|
|
1119
|
+
const __m128 one = _mm_set1_ps(1.0f);
|
|
1120
|
+
const __m128 w = _mm_set1_ps((float)SKIN_WEIGHT);
|
|
1121
|
+
const __m128 invw = _mm_set1_ps(1.0f - (float)SKIN_WEIGHT);
|
|
1122
|
+
const __m128 inv255 = _mm_set1_ps(1.0f/255.0f);
|
|
1123
|
+
|
|
1124
|
+
npy_intp i = 0;
|
|
1125
|
+
for (; i + 4 <= pixels; i += 4) {
|
|
1126
|
+
__m128i a4 = _mm_cvtsi32_si128(*(const int*)(im_alpha + i));
|
|
1127
|
+
__m128i a_zero = _mm_cmpeq_epi8(a4, _mm_setzero_si128());
|
|
1128
|
+
if (_mm_movemask_epi8(a_zero) == 0xFFFF) {
|
|
1129
|
+
memcpy(out + 3*i, base + 3*i, 12);
|
|
1130
|
+
continue;
|
|
1131
|
+
}
|
|
1132
|
+
|
|
1133
|
+
__m128 fb_r, fb_g, fb_b;
|
|
1134
|
+
__m128 ft_r, ft_g, ft_b, ft_a;
|
|
1135
|
+
__m128 fs_r, fs_g, fs_b;
|
|
1136
|
+
load4_rgb_to_unit_f32(base + 3*i, inv255, &fb_r, &fb_g, &fb_b);
|
|
1137
|
+
load4_rgba_to_unit_f32(texture + 4*i, inv255, &ft_r, &ft_g, &ft_b, &ft_a);
|
|
1138
|
+
load4_rgb_to_unit_f32(skin + 3*i, inv255, &fs_r, &fs_g, &fs_b);
|
|
1139
|
+
|
|
1140
|
+
__m128 fa_im = load4_u8_to_unit_f32(im_alpha + i);
|
|
1141
|
+
__m128 fta = _mm_mul_ps(ft_a, fa_im); /* texture_alpha */
|
|
1142
|
+
__m128 fit_a = _mm_sub_ps(one, fta);
|
|
1143
|
+
|
|
1144
|
+
__m128 gm_r = clamp01_ps128(_mm_sub_ps(_mm_add_ps(ft_r, fs_r), half));
|
|
1145
|
+
__m128 gm_g = clamp01_ps128(_mm_sub_ps(_mm_add_ps(ft_g, fs_g), half));
|
|
1146
|
+
__m128 gm_b = clamp01_ps128(_mm_sub_ps(_mm_add_ps(ft_b, fs_b), half));
|
|
1147
|
+
|
|
1148
|
+
gm_r = mul_add_ps128(gm_r, fta, _mm_mul_ps(ft_r, fit_a));
|
|
1149
|
+
gm_g = mul_add_ps128(gm_g, fta, _mm_mul_ps(ft_g, fit_a));
|
|
1150
|
+
gm_b = mul_add_ps128(gm_b, fta, _mm_mul_ps(ft_b, fit_a));
|
|
1151
|
+
|
|
1152
|
+
gm_r = mul_add_ps128(gm_r, invw, _mm_mul_ps(fs_r, w));
|
|
1153
|
+
gm_g = mul_add_ps128(gm_g, invw, _mm_mul_ps(fs_g, w));
|
|
1154
|
+
gm_b = mul_add_ps128(gm_b, invw, _mm_mul_ps(fs_b, w));
|
|
1155
|
+
|
|
1156
|
+
gm_r = nan_to_num_ps128(gm_r);
|
|
1157
|
+
gm_g = nan_to_num_ps128(gm_g);
|
|
1158
|
+
gm_b = nan_to_num_ps128(gm_b);
|
|
1159
|
+
|
|
1160
|
+
__m128 fr = mul_add_ps128(gm_r, fta, _mm_mul_ps(fb_r, fit_a));
|
|
1161
|
+
__m128 fg = mul_add_ps128(gm_g, fta, _mm_mul_ps(fb_g, fit_a));
|
|
1162
|
+
__m128 fb = mul_add_ps128(gm_b, fta, _mm_mul_ps(fb_b, fit_a));
|
|
1163
|
+
|
|
1164
|
+
store_unit_f32_to_u8_rgb4(fr, fg, fb, out + 3*i);
|
|
1165
|
+
}
|
|
1166
|
+
|
|
1167
|
+
if (i < pixels) {
|
|
1168
|
+
kernel_scalar_rgba(base + 3*i, texture + 4*i, skin + 3*i, im_alpha + i,
|
|
1169
|
+
out + 3*i, pixels - i);
|
|
1170
|
+
}
|
|
1171
|
+
}
|
|
1172
|
+
|
|
1173
|
+
|
|
1174
|
+
/* ---------- Kernel dispatch ---------- */
|
|
1175
|
+
|
|
1176
|
+
static kernel_kind pick_kernel(const char *force_name) {
|
|
1177
|
+
if (force_name) {
|
|
1178
|
+
if (strcmp(force_name, "scalar") == 0) return KERNEL_SCALAR;
|
|
1179
|
+
if (strcmp(force_name, "sse42") == 0) return KERNEL_SSE42;
|
|
1180
|
+
if (strcmp(force_name, "avx2") == 0) return KERNEL_AVX2;
|
|
1181
|
+
if (strcmp(force_name, "auto") == 0) {/* fall through */}
|
|
1182
|
+
}
|
|
1183
|
+
/* Auto: prefer AVX2, then SSE4.2, else scalar */
|
|
1184
|
+
if (cpu_supports_avx2() && os_supports_avx()) return KERNEL_AVX2;
|
|
1185
|
+
if (cpu_supports_sse42()) return KERNEL_SSE42;
|
|
1186
|
+
return KERNEL_SCALAR;
|
|
1187
|
+
}
|
|
1188
|
+
|
|
1189
|
+
/* ---------- Python binding ---------- */
|
|
1190
|
+
|
|
1191
|
+
/* Convert base (H,W,3 or H,W,4) -> packed RGB (H,W,3). Returns a NEW ref.
|
|
1192
|
+
If base is already (H,W,3), this returns a new C-contig copy of it (to be safe). */
|
|
1193
|
+
static PyArrayObject* ensure_base_rgb(PyArrayObject *base_in, const char *name) {
|
|
1194
|
+
if (PyArray_NDIM(base_in) != 3) {
|
|
1195
|
+
PyErr_Format(PyExc_ValueError, "%s must have shape (H, W, 3) or (H, W, 4)", name);
|
|
1196
|
+
return NULL;
|
|
1197
|
+
}
|
|
1198
|
+
npy_intp const *dims_in = PyArray_DIMS(base_in);
|
|
1199
|
+
npy_intp H = dims_in[0], W = dims_in[1], C = dims_in[2];
|
|
1200
|
+
if (!(C == 3 || C == 4)) {
|
|
1201
|
+
PyErr_Format(PyExc_ValueError, "%s must have 3 or 4 channels", name);
|
|
1202
|
+
return NULL;
|
|
1203
|
+
}
|
|
1204
|
+
|
|
1205
|
+
/* Always produce a fresh C-contiguous uint8 (H,W,3) we own. */
|
|
1206
|
+
npy_intp dims_out[3] = {H, W, 3};
|
|
1207
|
+
PyArrayObject *base_rgb = (PyArrayObject*)PyArray_SimpleNew(3, dims_out, NPY_UINT8);
|
|
1208
|
+
if (!base_rgb) return NULL;
|
|
1209
|
+
|
|
1210
|
+
const uint8_t *src = (const uint8_t*)PyArray_DATA(base_in);
|
|
1211
|
+
uint8_t *dst = (uint8_t*)PyArray_DATA(base_rgb);
|
|
1212
|
+
const npy_intp pixels = H * W;
|
|
1213
|
+
|
|
1214
|
+
if (C == 3) {
|
|
1215
|
+
/* Packed copy */
|
|
1216
|
+
memcpy(dst, src, (size_t)(pixels * 3));
|
|
1217
|
+
return base_rgb;
|
|
1218
|
+
}
|
|
1219
|
+
|
|
1220
|
+
/* C == 4: strip alpha, keep RGB packed */
|
|
1221
|
+
for (npy_intp i = 0; i < pixels; ++i) {
|
|
1222
|
+
dst[3*i + 0] = src[4*i + 0];
|
|
1223
|
+
dst[3*i + 1] = src[4*i + 1];
|
|
1224
|
+
dst[3*i + 2] = src[4*i + 2];
|
|
1225
|
+
}
|
|
1226
|
+
return base_rgb;
|
|
1227
|
+
}
|
|
1228
|
+
|
|
1229
|
+
static PyObject* py_normal_grain_merge(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
1230
|
+
static char *kwlist[] = {"base", "texture", "skin", "im_alpha", "kernel", NULL};
|
|
1231
|
+
|
|
1232
|
+
PyObject *base_obj = NULL, *texture_obj = NULL, *skin_obj = NULL, *im_alpha_obj = NULL;
|
|
1233
|
+
const char *kernel_name = "auto";
|
|
1234
|
+
|
|
1235
|
+
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OOOO|s", kwlist,
|
|
1236
|
+
&base_obj, &texture_obj, &skin_obj, &im_alpha_obj,
|
|
1237
|
+
&kernel_name)) {
|
|
1238
|
+
return NULL;
|
|
1239
|
+
}
|
|
1240
|
+
|
|
1241
|
+
/* Materialize arrays we own. Do NOT decref the *_obj borrowed refs. */
|
|
1242
|
+
/* Borrowed -> owned, uint8, C-contig (you already have get_uint8_c_contig) */
|
|
1243
|
+
PyArrayObject *base_u8 = NULL, *texture = NULL, *skin = NULL, *im_alpha = NULL;
|
|
1244
|
+
if (!get_uint8_c_contig(base_obj, &base_u8, "base") ||
|
|
1245
|
+
!get_uint8_c_contig(texture_obj, &texture, "texture") ||
|
|
1246
|
+
!get_uint8_c_contig(skin_obj, &skin, "skin") ||
|
|
1247
|
+
!get_uint8_c_contig(im_alpha_obj, &im_alpha, "im_alpha")) {
|
|
1248
|
+
Py_XDECREF(base_u8); Py_XDECREF(texture); Py_XDECREF(skin); Py_XDECREF(im_alpha);
|
|
1249
|
+
return NULL;
|
|
1250
|
+
}
|
|
1251
|
+
|
|
1252
|
+
/* If base is RGBA, pack to RGB; if it’s already RGB, make a packed copy */
|
|
1253
|
+
PyArrayObject *base = ensure_base_rgb(base_u8, "base");
|
|
1254
|
+
if (!base) {
|
|
1255
|
+
Py_DECREF(base_u8); Py_DECREF(texture); Py_DECREF(skin); Py_DECREF(im_alpha);
|
|
1256
|
+
return NULL;
|
|
1257
|
+
}
|
|
1258
|
+
Py_DECREF(base_u8); /* drop the intermediate reference, we own `base` now */
|
|
1259
|
+
|
|
1260
|
+
int texture_has_alpha = 0;
|
|
1261
|
+
npy_intp H = 0, W = 0;
|
|
1262
|
+
if (!check_shape_requirements(base, texture, skin, im_alpha,
|
|
1263
|
+
&texture_has_alpha, &H, &W)) {
|
|
1264
|
+
Py_DECREF(base); Py_DECREF(texture); Py_DECREF(skin); Py_DECREF(im_alpha);
|
|
1265
|
+
return NULL;
|
|
1266
|
+
}
|
|
1267
|
+
|
|
1268
|
+
/* Allocate output (H, W, 3) uint8 */
|
|
1269
|
+
PyObject *out = PyArray_NewLikeArray(base, NPY_ANYORDER, NULL, 0);
|
|
1270
|
+
if (!out) {
|
|
1271
|
+
Py_XDECREF(base); Py_XDECREF(texture); Py_XDECREF(skin); Py_XDECREF(im_alpha);
|
|
1272
|
+
return NULL;
|
|
1273
|
+
}
|
|
1274
|
+
|
|
1275
|
+
const uint8_t *p_base = (const uint8_t*)PyArray_DATA(base);
|
|
1276
|
+
const uint8_t *p_texture = (const uint8_t*)PyArray_DATA(texture);
|
|
1277
|
+
const uint8_t *p_skin = (const uint8_t*)PyArray_DATA(skin);
|
|
1278
|
+
const uint8_t *p_imalpha = (const uint8_t*)PyArray_DATA(im_alpha);
|
|
1279
|
+
uint8_t *p_out = (uint8_t*)PyArray_DATA((PyArrayObject*)out);
|
|
1280
|
+
|
|
1281
|
+
const npy_intp pixels = H * W;
|
|
1282
|
+
|
|
1283
|
+
kernel_kind k = pick_kernel(kernel_name);
|
|
1284
|
+
|
|
1285
|
+
/* Optional: release the GIL around pure C loops. No Python API calls inside kernels. */
|
|
1286
|
+
NPY_BEGIN_ALLOW_THREADS
|
|
1287
|
+
|
|
1288
|
+
if (!texture_has_alpha) {
|
|
1289
|
+
if (k == KERNEL_AVX2) {
|
|
1290
|
+
kernel_avx2_rgb(p_base, p_texture, p_skin, p_imalpha, p_out, pixels);
|
|
1291
|
+
} else if (k == KERNEL_SSE42) {
|
|
1292
|
+
kernel_sse42_rgb(p_base, p_texture, p_skin, p_imalpha, p_out, pixels);
|
|
1293
|
+
} else {
|
|
1294
|
+
kernel_scalar_rgb(p_base, p_texture, p_skin, p_imalpha, p_out, pixels);
|
|
1295
|
+
}
|
|
1296
|
+
} else {
|
|
1297
|
+
if (k == KERNEL_AVX2) {
|
|
1298
|
+
kernel_avx2_rgba(p_base, p_texture, p_skin, p_imalpha, p_out, pixels);
|
|
1299
|
+
} else if (k == KERNEL_SSE42) {
|
|
1300
|
+
kernel_sse42_rgba(p_base, p_texture, p_skin, p_imalpha, p_out, pixels);
|
|
1301
|
+
} else {
|
|
1302
|
+
kernel_scalar_rgba(p_base, p_texture, p_skin, p_imalpha, p_out, pixels);
|
|
1303
|
+
}
|
|
1304
|
+
}
|
|
1305
|
+
|
|
1306
|
+
NPY_END_ALLOW_THREADS
|
|
1307
|
+
|
|
1308
|
+
/* DECREF only what we own. */
|
|
1309
|
+
Py_DECREF(base); Py_DECREF(texture); Py_DECREF(skin); Py_DECREF(im_alpha);
|
|
1310
|
+
return out;
|
|
1311
|
+
}
|
|
1312
|
+
|
|
1313
|
+
static PyMethodDef Methods[] = {
|
|
1314
|
+
{"normal_grain_merge", (PyCFunction)py_normal_grain_merge, METH_VARARGS | METH_KEYWORDS,
|
|
1315
|
+
"normal_grain_merge(base, texture, skin, im_alpha, kernel='auto') -> np.ndarray\n"
|
|
1316
|
+
"kernel: 'auto', 'scalar', 'sse42', or 'avx2'"},
|
|
1317
|
+
{NULL, NULL, 0, NULL}
|
|
1318
|
+
};
|
|
1319
|
+
|
|
1320
|
+
static struct PyModuleDef moduledef = {
|
|
1321
|
+
PyModuleDef_HEAD_INIT,
|
|
1322
|
+
"normal_grain_merge",
|
|
1323
|
+
"Normal Grain Merge Module",
|
|
1324
|
+
-1,
|
|
1325
|
+
Methods
|
|
1326
|
+
};
|
|
1327
|
+
|
|
1328
|
+
PyMODINIT_FUNC PyInit_normal_grain_merge(void) {
|
|
1329
|
+
import_array();
|
|
1330
|
+
return PyModule_Create(&moduledef);
|
|
1331
|
+
}
|