ruby_dsp 0.0.8 → 0.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3760 @@
1
+ /*
2
+ This file is part of pocketfft.
3
+
4
+ Copyright (C) 2010-2024 Max-Planck-Society
5
+ Copyright (C) 2019-2020 Peter Bell
6
+
7
+ For the odd-sized DCT-IV transforms:
8
+ Copyright (C) 2003, 2007-14 Matteo Frigo
9
+ Copyright (C) 2003, 2007-14 Massachusetts Institute of Technology
10
+
11
+ For the prev_good_size search:
12
+ Copyright (C) 2024 Tan Ping Liang, Peter Bell
13
+
14
+ For the safeguards against integer overflow in good_size search:
15
+ Copyright (C) 2024 Cris Luengo
16
+
17
+ Authors: Martin Reinecke, Peter Bell
18
+
19
+ All rights reserved.
20
+
21
+ Redistribution and use in source and binary forms, with or without modification,
22
+ are permitted provided that the following conditions are met:
23
+
24
+ * Redistributions of source code must retain the above copyright notice, this
25
+ list of conditions and the following disclaimer.
26
+ * Redistributions in binary form must reproduce the above copyright notice, this
27
+ list of conditions and the following disclaimer in the documentation and/or
28
+ other materials provided with the distribution.
29
+ * Neither the name of the copyright holder nor the names of its contributors may
30
+ be used to endorse or promote products derived from this software without
31
+ specific prior written permission.
32
+
33
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
34
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
35
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
36
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
37
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
40
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
42
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43
+ */
44
+
45
+ #ifndef POCKETFFT_HDRONLY_H
46
+ #define POCKETFFT_HDRONLY_H
47
+
48
+ #ifndef __cplusplus
49
+ #error This file is C++ and requires a C++ compiler.
50
+ #endif
51
+
52
+ #if !(__cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L))
53
+ #error This file requires at least C++11 support.
54
+ #endif
55
+
56
+ #ifndef POCKETFFT_CACHE_SIZE
57
+ #define POCKETFFT_CACHE_SIZE 0
58
+ #endif
59
+
60
+ #include <cmath>
61
+ #include <cstdlib>
62
+ #include <cstddef>
63
+ #include <cstdint>
64
+ #include <exception>
65
+ #include <stdexcept>
66
+ #include <memory>
67
+ #include <vector>
68
+ #include <complex>
69
+ #include <algorithm>
70
+ #include <limits>
71
+ #if POCKETFFT_CACHE_SIZE!=0
72
+ #include <array>
73
+ #include <mutex>
74
+ #endif
75
+
76
+ #ifndef POCKETFFT_NO_MULTITHREADING
77
+ #include <mutex>
78
+ #include <condition_variable>
79
+ #include <thread>
80
+ #include <queue>
81
+ #include <atomic>
82
+ #include <functional>
83
+ #include <new>
84
+
85
+ #ifdef POCKETFFT_PTHREADS
86
+ # include <pthread.h>
87
+ #endif
88
+ #endif
89
+
90
+ #if defined(__GNUC__)
91
+ #define POCKETFFT_NOINLINE __attribute__((noinline))
92
+ #define POCKETFFT_RESTRICT __restrict__
93
+ #elif defined(_MSC_VER)
94
+ #define POCKETFFT_NOINLINE __declspec(noinline)
95
+ #define POCKETFFT_RESTRICT __restrict
96
+ #else
97
+ #define POCKETFFT_NOINLINE
98
+ #define POCKETFFT_RESTRICT
99
+ #endif
100
+
101
+ namespace pocketfft {
102
+
103
+ namespace detail {
104
+ using std::size_t;
105
+ using std::ptrdiff_t;
106
+
107
+ // Always use std:: for <cmath> functions
108
+ template <typename T> T cos(T) = delete;
109
+ template <typename T> T sin(T) = delete;
110
+ template <typename T> T sqrt(T) = delete;
111
+
112
+ using shape_t = std::vector<size_t>;
113
+ using stride_t = std::vector<ptrdiff_t>;
114
+
115
+ constexpr bool FORWARD = true,
116
+ BACKWARD = false;
117
+
118
+ // only enable vector support for gcc>=5.0 and clang>=5.0
119
+ #ifndef POCKETFFT_NO_VECTORS
120
+ #define POCKETFFT_NO_VECTORS
121
+ #if defined(__INTEL_COMPILER)
122
+ // do nothing. This is necessary because this compiler also sets __GNUC__.
123
+ #elif defined(__clang__)
124
+ // AppleClang has their own version numbering
125
+ #ifdef __apple_build_version__
126
+ # if (__clang_major__ > 9) || (__clang_major__ == 9 && __clang_minor__ >= 1)
127
+ # undef POCKETFFT_NO_VECTORS
128
+ # endif
129
+ #elif __clang_major__ >= 5
130
+ # undef POCKETFFT_NO_VECTORS
131
+ #endif
132
+ #elif defined(__GNUC__)
133
+ #if __GNUC__>=5
134
+ #undef POCKETFFT_NO_VECTORS
135
+ #endif
136
+ #endif
137
+ #endif
138
+
139
+ template<typename T> struct VLEN { static constexpr size_t val=1; };
140
+
141
+ #ifndef POCKETFFT_NO_VECTORS
142
+ #if (defined(__AVX512F__))
143
+ template<> struct VLEN<float> { static constexpr size_t val=16; };
144
+ template<> struct VLEN<double> { static constexpr size_t val=8; };
145
+ #elif (defined(__AVX__))
146
+ template<> struct VLEN<float> { static constexpr size_t val=8; };
147
+ template<> struct VLEN<double> { static constexpr size_t val=4; };
148
+ #elif (defined(__SSE2__))
149
+ template<> struct VLEN<float> { static constexpr size_t val=4; };
150
+ template<> struct VLEN<double> { static constexpr size_t val=2; };
151
+ #elif (defined(__VSX__))
152
+ template<> struct VLEN<float> { static constexpr size_t val=4; };
153
+ template<> struct VLEN<double> { static constexpr size_t val=2; };
154
+ #elif (defined(__ARM_NEON__) || defined(__ARM_NEON))
155
+ template<> struct VLEN<float> { static constexpr size_t val=4; };
156
+ template<> struct VLEN<double> { static constexpr size_t val=2; };
157
+ #else
158
+ #define POCKETFFT_NO_VECTORS
159
+ #endif
160
+ #endif
161
+
162
+ // std::aligned_alloc is a bit cursed ... it doesn't exist on MacOS < 10.15
163
+ // and in musl, and other OSes seem to have even more peculiarities.
164
+ // Let's unconditionally work around it for now.
165
+ #if defined(POCKETFFT_USE_POSIX_MEMALIGN) && (defined(__APPLE__) || defined(__unix__))
166
+ // Use posix_memalign on POSIX systems when explicitly enabled.
167
+ // The portable aligned_alloc below stores metadata at ptr[-1], which conflicts
168
+ // with ASAN's heap redzone and causes intermittent bus errors.
169
+ inline void *aligned_alloc(size_t align, size_t size)
170
+ {
171
+ align = std::max(align, sizeof(void*)); // posix_memalign requires align >= sizeof(void*)
172
+ void *ptr = nullptr;
173
+ if (posix_memalign(&ptr, align, size) != 0)
174
+ throw std::bad_alloc();
175
+ return ptr;
176
+ }
177
+ inline void aligned_dealloc(void *ptr)
178
+ { free(ptr); }
179
+ #else
180
+ # if 0
181
+ //#if (__cplusplus >= 201703L) && (!defined(__MINGW32__)) && (!defined(_MSC_VER)) && (__MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_15)
182
+ inline void *aligned_alloc(size_t align, size_t size)
183
+ {
184
+ // aligned_alloc() requires that the requested size is a multiple of "align"
185
+ void *ptr = ::aligned_alloc(align,(size+align-1)&(~(align-1)));
186
+ if (!ptr) throw std::bad_alloc();
187
+ return ptr;
188
+ }
189
+ inline void aligned_dealloc(void *ptr)
190
+ { free(ptr); }
191
+ #else // portable emulation
192
+ inline void *aligned_alloc(size_t align, size_t size)
193
+ {
194
+ align = std::max(align, alignof(max_align_t));
195
+ void *ptr = malloc(size+align);
196
+ if (!ptr) throw std::bad_alloc();
197
+ void *res = reinterpret_cast<void *>
198
+ ((reinterpret_cast<uintptr_t>(ptr) & ~(uintptr_t(align-1))) + uintptr_t(align));
199
+ (reinterpret_cast<void**>(res))[-1] = ptr;
200
+ return res;
201
+ }
202
+ inline void aligned_dealloc(void *ptr)
203
+ { if (ptr) free((reinterpret_cast<void**>(ptr))[-1]); }
204
+ # endif
205
+ #endif
206
+
207
+ template<typename T> class arr
208
+ {
209
+ private:
210
+ T *p;
211
+ size_t sz;
212
+
213
+ #if defined(POCKETFFT_NO_VECTORS)
214
+ static T *ralloc(size_t num)
215
+ {
216
+ if (num==0) return nullptr;
217
+ void *res = malloc(num*sizeof(T));
218
+ if (!res) throw std::bad_alloc();
219
+ return reinterpret_cast<T *>(res);
220
+ }
221
+ static void dealloc(T *ptr)
222
+ { free(ptr); }
223
+ #else
224
+ static T *ralloc(size_t num)
225
+ {
226
+ if (num==0) return nullptr;
227
+ void *ptr = aligned_alloc(64, num*sizeof(T));
228
+ return static_cast<T*>(ptr);
229
+ }
230
+ static void dealloc(T *ptr)
231
+ { aligned_dealloc(ptr); }
232
+ #endif
233
+
234
+ public:
235
+ arr() : p(0), sz(0) {}
236
+ explicit arr(size_t n) : p(ralloc(n)), sz(n) {}
237
+ arr(arr &&other) noexcept
238
+ : p(other.p), sz(other.sz)
239
+ { other.p=nullptr; other.sz=0; }
240
+ ~arr() { dealloc(p); }
241
+
242
+ void resize(size_t n)
243
+ {
244
+ if (n==sz) return;
245
+ dealloc(p);
246
+ p = ralloc(n);
247
+ sz = n;
248
+ }
249
+
250
+ T &operator[](size_t idx) { return p[idx]; }
251
+ const T &operator[](size_t idx) const { return p[idx]; }
252
+
253
+ T *data() { return p; }
254
+ const T *data() const { return p; }
255
+
256
+ size_t size() const { return sz; }
257
+ };
258
+
259
+ template<typename T> struct cmplx {
260
+ T r, i;
261
+ cmplx() = default;
262
+ cmplx(T r_, T i_) : r(r_), i(i_) {}
263
+ void Set(T r_, T i_) { r=r_; i=i_; }
264
+ void Set(T r_) { r=r_; i=T(0); }
265
+ cmplx &operator+= (const cmplx &other)
266
+ { r+=other.r; i+=other.i; return *this; }
267
+ template<typename T2>cmplx &operator*= (T2 other)
268
+ { r*=other; i*=other; return *this; }
269
+ template<typename T2>cmplx &operator*= (const cmplx<T2> &other)
270
+ {
271
+ T tmp = r*other.r - i*other.i;
272
+ i = r*other.i + i*other.r;
273
+ r = tmp;
274
+ return *this;
275
+ }
276
+ template<typename T2>cmplx &operator+= (const cmplx<T2> &other)
277
+ { r+=other.r; i+=other.i; return *this; }
278
+ template<typename T2>cmplx &operator-= (const cmplx<T2> &other)
279
+ { r-=other.r; i-=other.i; return *this; }
280
+ template<typename T2> auto operator* (const T2 &other) const
281
+ -> cmplx<decltype(r*other)>
282
+ { return {r*other, i*other}; }
283
+ template<typename T2> auto operator+ (const cmplx<T2> &other) const
284
+ -> cmplx<decltype(r+other.r)>
285
+ { return {r+other.r, i+other.i}; }
286
+ template<typename T2> auto operator- (const cmplx<T2> &other) const
287
+ -> cmplx<decltype(r+other.r)>
288
+ { return {r-other.r, i-other.i}; }
289
+ template<typename T2> auto operator* (const cmplx<T2> &other) const
290
+ -> cmplx<decltype(r+other.r)>
291
+ { return {r*other.r-i*other.i, r*other.i + i*other.r}; }
292
+ template<bool fwd, typename T2> auto special_mul (const cmplx<T2> &other) const
293
+ -> cmplx<decltype(r+other.r)>
294
+ {
295
+ using Tres = cmplx<decltype(r+other.r)>;
296
+ return fwd ? Tres(r*other.r+i*other.i, i*other.r-r*other.i)
297
+ : Tres(r*other.r-i*other.i, r*other.i+i*other.r);
298
+ }
299
+ };
300
+ template<typename T> inline void PM(T &a, T &b, T c, T d)
301
+ { a=c+d; b=c-d; }
302
+ template<typename T> inline void PMINPLACE(T &a, T &b)
303
+ { T t = a; a+=b; b=t-b; }
304
+ template<typename T> inline void MPINPLACE(T &a, T &b)
305
+ { T t = a; a-=b; b=t+b; }
306
+ template<typename T> cmplx<T> conj(const cmplx<T> &a)
307
+ { return {a.r, -a.i}; }
308
+ template<bool fwd, typename T, typename T2> void special_mul (const cmplx<T> &v1, const cmplx<T2> &v2, cmplx<T> &res)
309
+ {
310
+ res = fwd ? cmplx<T>(v1.r*v2.r+v1.i*v2.i, v1.i*v2.r-v1.r*v2.i)
311
+ : cmplx<T>(v1.r*v2.r-v1.i*v2.i, v1.r*v2.i+v1.i*v2.r);
312
+ }
313
+
314
+ template<typename T> void ROT90(cmplx<T> &a)
315
+ { auto tmp_=a.r; a.r=-a.i; a.i=tmp_; }
316
+ template<bool fwd, typename T> void ROTX90(cmplx<T> &a)
317
+ { auto tmp_= fwd ? -a.r : a.r; a.r = fwd ? a.i : -a.i; a.i=tmp_; }
318
+
319
+ //
320
+ // twiddle factor section
321
+ //
322
+ template<typename T> class sincos_2pibyn
323
+ {
324
+ private:
325
+ using Thigh = typename std::conditional<(sizeof(T)>sizeof(double)), T, double>::type;
326
+ size_t N, mask, shift;
327
+ arr<cmplx<Thigh>> v1, v2;
328
+
329
+ static cmplx<Thigh> calc(size_t x, size_t n, Thigh ang)
330
+ {
331
+ x<<=3;
332
+ if (x<4*n) // first half
333
+ {
334
+ if (x<2*n) // first quadrant
335
+ {
336
+ if (x<n) return cmplx<Thigh>(std::cos(Thigh(x)*ang), std::sin(Thigh(x)*ang));
337
+ return cmplx<Thigh>(std::sin(Thigh(2*n-x)*ang), std::cos(Thigh(2*n-x)*ang));
338
+ }
339
+ else // second quadrant
340
+ {
341
+ x-=2*n;
342
+ if (x<n) return cmplx<Thigh>(-std::sin(Thigh(x)*ang), std::cos(Thigh(x)*ang));
343
+ return cmplx<Thigh>(-std::cos(Thigh(2*n-x)*ang), std::sin(Thigh(2*n-x)*ang));
344
+ }
345
+ }
346
+ else
347
+ {
348
+ x=8*n-x;
349
+ if (x<2*n) // third quadrant
350
+ {
351
+ if (x<n) return cmplx<Thigh>(std::cos(Thigh(x)*ang), -std::sin(Thigh(x)*ang));
352
+ return cmplx<Thigh>(std::sin(Thigh(2*n-x)*ang), -std::cos(Thigh(2*n-x)*ang));
353
+ }
354
+ else // fourth quadrant
355
+ {
356
+ x-=2*n;
357
+ if (x<n) return cmplx<Thigh>(-std::sin(Thigh(x)*ang), -std::cos(Thigh(x)*ang));
358
+ return cmplx<Thigh>(-std::cos(Thigh(2*n-x)*ang), -std::sin(Thigh(2*n-x)*ang));
359
+ }
360
+ }
361
+ }
362
+
363
+ public:
364
+ POCKETFFT_NOINLINE explicit sincos_2pibyn(size_t n)
365
+ : N(n)
366
+ {
367
+ constexpr auto pi = 3.141592653589793238462643383279502884197L;
368
+ Thigh ang = Thigh(0.25L*pi/n);
369
+ size_t nval = (n+2)/2;
370
+ shift = 1;
371
+ while((size_t(1)<<shift)*(size_t(1)<<shift) < nval) ++shift;
372
+ mask = (size_t(1)<<shift)-1;
373
+ v1.resize(mask+1);
374
+ v1[0].Set(Thigh(1), Thigh(0));
375
+ for (size_t i=1; i<v1.size(); ++i)
376
+ v1[i]=calc(i,n,ang);
377
+ v2.resize((nval+mask)/(mask+1));
378
+ v2[0].Set(Thigh(1), Thigh(0));
379
+ for (size_t i=1; i<v2.size(); ++i)
380
+ v2[i]=calc(i*(mask+1),n,ang);
381
+ }
382
+
383
+ cmplx<T> operator[](size_t idx) const
384
+ {
385
+ if (2*idx<=N)
386
+ {
387
+ auto x1=v1[idx&mask], x2=v2[idx>>shift];
388
+ return cmplx<T>(T(x1.r*x2.r-x1.i*x2.i), T(x1.r*x2.i+x1.i*x2.r));
389
+ }
390
+ idx = N-idx;
391
+ auto x1=v1[idx&mask], x2=v2[idx>>shift];
392
+ return cmplx<T>(T(x1.r*x2.r-x1.i*x2.i), -T(x1.r*x2.i+x1.i*x2.r));
393
+ }
394
+ };
395
+
396
+ struct util // hack to avoid duplicate symbols
397
+ {
398
+ static POCKETFFT_NOINLINE size_t largest_prime_factor (size_t n)
399
+ {
400
+ size_t res=1;
401
+ while ((n&1)==0)
402
+ { res=2; n>>=1; }
403
+ for (size_t x=3; x*x<=n; x+=2)
404
+ while ((n%x)==0)
405
+ { res=x; n/=x; }
406
+ if (n>1) res=n;
407
+ return res;
408
+ }
409
+
410
+ static POCKETFFT_NOINLINE double cost_guess (size_t n)
411
+ {
412
+ constexpr double lfp=1.1; // penalty for non-hardcoded larger factors
413
+ size_t ni=n;
414
+ double result=0.;
415
+ while ((n&1)==0)
416
+ { result+=2; n>>=1; }
417
+ for (size_t x=3; x*x<=n; x+=2)
418
+ while ((n%x)==0)
419
+ {
420
+ result+= (x<=5) ? double(x) : lfp*double(x); // penalize larger prime factors
421
+ n/=x;
422
+ }
423
+ if (n>1) result+=(n<=5) ? double(n) : lfp*double(n);
424
+ return result*double(ni);
425
+ }
426
+
427
+ /* inner workings of good_size_cmplx() */
428
+ template<typename UIntT>
429
+ static POCKETFFT_NOINLINE UIntT good_size_cmplx_typed(UIntT n)
430
+ {
431
+ static_assert(std::numeric_limits<UIntT>::is_integer && (!std::numeric_limits<UIntT>::is_signed),
432
+ "type must be unsigned integer");
433
+ if (n<=12) return n;
434
+ if (n>std::numeric_limits<UIntT>::max()/11/2)
435
+ {
436
+ // The algorithm below doesn't work for this value, the multiplication can overflow.
437
+ if (sizeof(UIntT)<sizeof(std::uint64_t))
438
+ {
439
+ // We can try using this algorithm with 64-bit integers:
440
+ auto res = good_size_cmplx_typed<std::uint64_t>(n);
441
+ if (res<=std::numeric_limits<UIntT>::max())
442
+ return static_cast<UIntT>(res);
443
+ }
444
+ // Otherwise, this size is ridiculously large, people shouldn't be computing FFTs this large.
445
+ throw std::runtime_error("FFT size is too large.");
446
+ }
447
+
448
+ UIntT bestfac=2*n;
449
+ for (UIntT f11=1; f11<bestfac; f11*=11)
450
+ for (UIntT f117=f11; f117<bestfac; f117*=7)
451
+ for (UIntT f1175=f117; f1175<bestfac; f1175*=5)
452
+ {
453
+ UIntT x=f1175;
454
+ while (x<n) x*=2;
455
+ for (;;)
456
+ {
457
+ if (x<n)
458
+ x*=3;
459
+ else if (x>n)
460
+ {
461
+ if (x<bestfac) bestfac=x;
462
+ if (x&1) break;
463
+ x>>=1;
464
+ }
465
+ else
466
+ return n;
467
+ }
468
+ }
469
+ return bestfac;
470
+ }
471
+ /* returns the smallest composite of 2, 3, 5, 7 and 11 which is >= n */
472
+ static POCKETFFT_NOINLINE size_t good_size_cmplx(size_t n)
473
+ {
474
+ return good_size_cmplx_typed(n);
475
+ }
476
+ /* returns the smallest composite of 2, 3, 5, 7 and 11 which is >= n
477
+ and a multiple of required_factor. */
478
+ static POCKETFFT_NOINLINE size_t good_size_cmplx(size_t n,
479
+ size_t required_factor)
480
+ {
481
+ if (required_factor<1)
482
+ throw std::runtime_error("required factor must not be 0");
483
+ return good_size_cmplx((n+required_factor-1)/required_factor) * required_factor;
484
+ }
485
+
486
+ /* inner workings of good_size_real() */
487
+ template<typename UIntT>
488
+ static POCKETFFT_NOINLINE UIntT good_size_real_typed(UIntT n)
489
+ {
490
+ static_assert(std::numeric_limits<UIntT>::is_integer && (!std::numeric_limits<UIntT>::is_signed),
491
+ "type must be unsigned integer");
492
+ if (n<=6) return n;
493
+ if (n>std::numeric_limits<UIntT>::max()/5/2)
494
+ {
495
+ // The algorithm below doesn't work for this value, the multiplication can overflow.
496
+ if (sizeof(UIntT)<sizeof(std::uint64_t))
497
+ {
498
+ // We can try using this algorithm with 64-bit integers:
499
+ std::uint64_t res = good_size_real_typed<std::uint64_t>(n);
500
+ if (res<=std::numeric_limits<UIntT>::max())
501
+ return static_cast<UIntT>(res);
502
+ }
503
+ // Otherwise, this size is ridiculously large, people shouldn't be computing FFTs this large.
504
+ throw std::runtime_error("FFT size is too large.");
505
+ }
506
+
507
+ UIntT bestfac=2*n;
508
+ for (UIntT f5=1; f5<bestfac; f5*=5)
509
+ {
510
+ UIntT x = f5;
511
+ while (x<n) x *= 2;
512
+ for (;;)
513
+ {
514
+ if (x<n)
515
+ x*=3;
516
+ else if (x>n)
517
+ {
518
+ if (x<bestfac) bestfac=x;
519
+ if (x&1) break;
520
+ x>>=1;
521
+ }
522
+ else
523
+ return n;
524
+ }
525
+ }
526
+ return bestfac;
527
+ }
528
+ /* returns the smallest composite of 2, 3, 5 which is >= n */
529
+ static POCKETFFT_NOINLINE size_t good_size_real(size_t n)
530
+ {
531
+ return good_size_real_typed(n);
532
+ }
533
+ /* returns the smallest composite of 2, 3, 5 which is >= n
534
+ and a multiple of required_factor. */
535
+ static POCKETFFT_NOINLINE size_t good_size_real(size_t n,
536
+ size_t required_factor)
537
+ {
538
+ if (required_factor<1)
539
+ throw std::runtime_error("required factor must not be 0");
540
+ return good_size_real((n+required_factor-1)/required_factor) * required_factor;
541
+ }
542
+
543
+ /* inner workings of prev_good_size_cmplx() */
544
+ template<typename UIntT>
545
+ static POCKETFFT_NOINLINE UIntT prev_good_size_cmplx_typed(UIntT n)
546
+ {
547
+ static_assert(std::numeric_limits<UIntT>::is_integer && (!std::numeric_limits<UIntT>::is_signed),
548
+ "type must be unsigned integer");
549
+ if (n<=12) return n;
550
+ if (n>std::numeric_limits<UIntT>::max()/11)
551
+ {
552
+ // The algorithm below doesn't work for this value, the multiplication can overflow.
553
+ if (sizeof(UIntT)<sizeof(std::uint64_t))
554
+ {
555
+ // We can try using this algorithm with 64-bit integers:
556
+ auto res = prev_good_size_cmplx_typed<std::uint64_t>(n);
557
+ if (res<=std::numeric_limits<UIntT>::max())
558
+ return static_cast<UIntT>(res);
559
+ }
560
+ // Otherwise, this size is ridiculously large, people shouldn't be computing FFTs this large.
561
+ throw std::runtime_error("FFT size is too large.");
562
+ }
563
+
564
+ UIntT bestfound = 1;
565
+ for (UIntT f11 = 1;f11 <= n; f11 *= 11)
566
+ for (UIntT f117 = f11; f117 <= n; f117 *= 7)
567
+ for (UIntT f1175 = f117; f1175 <= n; f1175 *= 5)
568
+ {
569
+ UIntT x = f1175;
570
+ while (x*2 <= n) x *= 2;
571
+ if (x > bestfound) bestfound = x;
572
+ while (true)
573
+ {
574
+ if (x * 3 <= n) x *= 3;
575
+ else if (x % 2 == 0) x /= 2;
576
+ else break;
577
+
578
+ if (x > bestfound) bestfound = x;
579
+ }
580
+ }
581
+ return bestfound;
582
+ }
583
+ /* returns the largest composite of 2, 3, 5, 7 and 11 which is <= n */
584
+ static POCKETFFT_NOINLINE size_t prev_good_size_cmplx(size_t n)
585
+ {
586
+ return prev_good_size_cmplx_typed(n);
587
+ }
588
+
589
+ /* inner workings of prev_good_size_real() */
590
+ template<typename UIntT>
591
+ static POCKETFFT_NOINLINE UIntT prev_good_size_real_typed(UIntT n)
592
+ {
593
+ static_assert(std::numeric_limits<UIntT>::is_integer && (!std::numeric_limits<UIntT>::is_signed),
594
+ "type must be unsigned integer");
595
+ if (n<=6) return n;
596
+ if (n>std::numeric_limits<UIntT>::max()/5)
597
+ {
598
+ // The algorithm below doesn't work for this value, the multiplication can overflow.
599
+ if (sizeof(UIntT)<sizeof(std::uint64_t))
600
+ {
601
+ // We can try using this algorithm with 64-bit integers:
602
+ auto res = prev_good_size_real_typed<std::uint64_t>(n);
603
+ if (res<=std::numeric_limits<UIntT>::max())
604
+ return static_cast<UIntT>(res);
605
+ }
606
+ // Otherwise, this size is ridiculously large, people shouldn't be computing FFTs this large.
607
+ throw std::runtime_error("FFT size is too large.");
608
+ }
609
+
610
+ UIntT bestfound = 1;
611
+ for (UIntT f5 = 1; f5 <= n; f5 *= 5)
612
+ {
613
+ UIntT x = f5;
614
+ while (x*2 <= n) x *= 2;
615
+ if (x > bestfound) bestfound = x;
616
+ while (true)
617
+ {
618
+ if (x * 3 <= n) x *= 3;
619
+ else if (x % 2 == 0) x /= 2;
620
+ else break;
621
+
622
+ if (x > bestfound) bestfound = x;
623
+ }
624
+ }
625
+ return bestfound;
626
+ }
627
+ /* returns the largest composite of 2, 3, 5 which is <= n */
628
+ static POCKETFFT_NOINLINE size_t prev_good_size_real(size_t n)
629
+ {
630
+ return prev_good_size_real_typed(n);
631
+ }
632
+
633
+ static size_t prod(const shape_t &shape)
634
+ {
635
+ size_t res=1;
636
+ for (auto sz: shape)
637
+ res*=sz;
638
+ return res;
639
+ }
640
+
641
+ static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape,
642
+ const stride_t &stride_in, const stride_t &stride_out, bool inplace)
643
+ {
644
+ auto ndim = shape.size();
645
+ if (ndim<1) throw std::runtime_error("ndim must be >= 1");
646
+ if ((stride_in.size()!=ndim) || (stride_out.size()!=ndim))
647
+ throw std::runtime_error("stride dimension mismatch");
648
+ if (inplace && (stride_in!=stride_out))
649
+ throw std::runtime_error("stride mismatch");
650
+ }
651
+
652
+ static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape,
653
+ const stride_t &stride_in, const stride_t &stride_out, bool inplace,
654
+ const shape_t &axes)
655
+ {
656
+ sanity_check(shape, stride_in, stride_out, inplace);
657
+ auto ndim = shape.size();
658
+ shape_t tmp(ndim,0);
659
+ for (auto ax : axes)
660
+ {
661
+ if (ax>=ndim) throw std::invalid_argument("bad axis number");
662
+ if (++tmp[ax]>1) throw std::invalid_argument("axis specified repeatedly");
663
+ }
664
+ }
665
+
666
+ static POCKETFFT_NOINLINE void sanity_check(const shape_t &shape,
667
+ const stride_t &stride_in, const stride_t &stride_out, bool inplace,
668
+ size_t axis)
669
+ {
670
+ sanity_check(shape, stride_in, stride_out, inplace);
671
+ if (axis>=shape.size()) throw std::invalid_argument("bad axis number");
672
+ }
673
+
674
+ #ifdef POCKETFFT_NO_MULTITHREADING
675
+ static size_t thread_count (size_t /*nthreads*/, const shape_t &/*shape*/,
676
+ size_t /*axis*/, size_t /*vlen*/)
677
+ { return 1; }
678
+ #else
679
+ static size_t thread_count (size_t nthreads, const shape_t &shape,
680
+ size_t axis, size_t vlen)
681
+ {
682
+ if (nthreads==1) return 1;
683
+ size_t size = prod(shape);
684
+ size_t parallel = size / (shape[axis] * vlen);
685
+ if (shape[axis] < 1000)
686
+ parallel /= 4;
687
+ size_t max_threads = nthreads == 0 ?
688
+ std::thread::hardware_concurrency() : nthreads;
689
+ return std::max(size_t(1), std::min(parallel, max_threads));
690
+ }
691
+ #endif
692
+ };
693
+
694
+ namespace threading {
695
+
696
+ #ifdef POCKETFFT_NO_MULTITHREADING
697
+
698
+ constexpr inline size_t thread_id() { return 0; }
699
+ constexpr inline size_t num_threads() { return 1; }
700
+
701
+ template <typename Func>
702
+ void thread_map(size_t /* nthreads */, Func f)
703
+ { f(); }
704
+
705
+ #else
706
+
707
+ inline size_t &thread_id()
708
+ {
709
+ static thread_local size_t thread_id_=0;
710
+ return thread_id_;
711
+ }
712
+ inline size_t &num_threads()
713
+ {
714
+ static thread_local size_t num_threads_=1;
715
+ return num_threads_;
716
+ }
717
+ static const size_t max_threads = std::max(1u, std::thread::hardware_concurrency());
718
+
719
+ class latch
720
+ {
721
+ std::atomic<size_t> num_left_;
722
+ std::mutex mut_;
723
+ std::condition_variable completed_;
724
+ using lock_t = std::unique_lock<std::mutex>;
725
+
726
+ public:
727
+ explicit latch(size_t n): num_left_(n) {}
728
+
729
+ void count_down()
730
+ {
731
+ lock_t lock(mut_);
732
+ if (--num_left_)
733
+ return;
734
+ completed_.notify_all();
735
+ }
736
+
737
+ void wait()
738
+ {
739
+ lock_t lock(mut_);
740
+ completed_.wait(lock, [this]{ return is_ready(); });
741
+ }
742
+ bool is_ready() { return num_left_ == 0; }
743
+ };
744
+
745
+ template <typename T> class concurrent_queue
746
+ {
747
+ std::queue<T> q_;
748
+ std::mutex mut_;
749
+ std::atomic<size_t> size_;
750
+ using lock_t = std::lock_guard<std::mutex>;
751
+
752
+ public:
753
+
754
+ void push(T val)
755
+ {
756
+ lock_t lock(mut_);
757
+ ++size_;
758
+ q_.push(std::move(val));
759
+ }
760
+
761
+ bool try_pop(T &val)
762
+ {
763
+ if (size_ == 0) return false;
764
+ lock_t lock(mut_);
765
+ // Queue might have been emptied while we acquired the lock
766
+ if (q_.empty()) return false;
767
+
768
+ val = std::move(q_.front());
769
+ --size_;
770
+ q_.pop();
771
+ return true;
772
+ }
773
+
774
+ bool empty() const { return size_==0; }
775
+ };
776
+
777
+ // C++ allocator with support for over-aligned types
778
+ template <typename T> struct aligned_allocator
779
+ {
780
+ using value_type = T;
781
+ template <class U>
782
+ explicit aligned_allocator(const aligned_allocator<U>&) {}
783
+ aligned_allocator() = default;
784
+
785
+ T *allocate(size_t n)
786
+ {
787
+ void* mem = aligned_alloc(alignof(T), n*sizeof(T));
788
+ return static_cast<T*>(mem);
789
+ }
790
+
791
+ void deallocate(T *p, size_t /*n*/)
792
+ { aligned_dealloc(p); }
793
+ };
794
+
795
+ class thread_pool
796
+ {
797
+ // A reasonable guess, probably close enough for most hardware
798
+ static constexpr size_t cache_line_size = 64;
799
+ struct alignas(cache_line_size) worker
800
+ {
801
+ std::thread thread;
802
+ std::condition_variable work_ready;
803
+ std::mutex mut;
804
+ std::atomic_flag busy_flag = ATOMIC_FLAG_INIT;
805
+ std::function<void()> work;
806
+
807
+ void worker_main(
808
+ std::atomic<bool> &shutdown_flag,
809
+ std::atomic<size_t> &unscheduled_tasks,
810
+ concurrent_queue<std::function<void()>> &overflow_work)
811
+ {
812
+ using lock_t_inner = std::unique_lock<std::mutex>;
813
+ bool expect_work = true;
814
+ while (!shutdown_flag || expect_work)
815
+ {
816
+ std::function<void()> local_work;
817
+ if (expect_work || unscheduled_tasks == 0)
818
+ {
819
+ lock_t_inner lock(mut);
820
+ // Wait until there is work to be executed
821
+ work_ready.wait(lock, [&]{ return (work || shutdown_flag); });
822
+ local_work.swap(work);
823
+ expect_work = false;
824
+ }
825
+
826
+ bool marked_busy = false;
827
+ if (local_work)
828
+ {
829
+ marked_busy = true;
830
+ local_work();
831
+ }
832
+
833
+ if (!overflow_work.empty())
834
+ {
835
+ if (!marked_busy && busy_flag.test_and_set())
836
+ {
837
+ expect_work = true;
838
+ continue;
839
+ }
840
+ marked_busy = true;
841
+
842
+ while (overflow_work.try_pop(local_work))
843
+ {
844
+ --unscheduled_tasks;
845
+ local_work();
846
+ }
847
+ }
848
+
849
+ if (marked_busy) busy_flag.clear();
850
+ }
851
+ }
852
+ };
853
+
854
+ concurrent_queue<std::function<void()>> overflow_work_;
855
+ std::mutex mut_;
856
+ std::vector<worker, aligned_allocator<worker>> workers_;
857
+ std::atomic<bool> shutdown_;
858
+ std::atomic<size_t> unscheduled_tasks_;
859
+ using lock_t = std::lock_guard<std::mutex>;
860
+
861
+ void create_threads()
862
+ {
863
+ lock_t lock(mut_);
864
+ size_t nthreads=workers_.size();
865
+ for (size_t i=0; i<nthreads; ++i)
866
+ {
867
+ try
868
+ {
869
+ auto *worker_i = &workers_[i];
870
+ worker_i->busy_flag.clear();
871
+ worker_i->work = nullptr;
872
+ worker_i->thread = std::thread([worker_i, this]
873
+ {
874
+ worker_i->worker_main(shutdown_, unscheduled_tasks_, overflow_work_);
875
+ });
876
+ }
877
+ catch (...)
878
+ {
879
+ shutdown_locked();
880
+ throw;
881
+ }
882
+ }
883
+ }
884
+
885
+ void shutdown_locked()
886
+ {
887
+ shutdown_ = true;
888
+ for (auto &worker_i : workers_)
889
+ worker_i.work_ready.notify_all();
890
+
891
+ for (auto &worker_i : workers_)
892
+ if (worker_i.thread.joinable())
893
+ worker_i.thread.join();
894
+ }
895
+
896
+ public:
897
+ explicit thread_pool(size_t nthreads):
898
+ workers_(nthreads)
899
+ { create_threads(); }
900
+
901
+ thread_pool(): thread_pool(max_threads) {}
902
+
903
+ ~thread_pool() { shutdown(); }
904
+
905
+ void submit(std::function<void()> work)
906
+ {
907
+ lock_t lock(mut_);
908
+ if (shutdown_)
909
+ throw std::runtime_error("Work item submitted after shutdown");
910
+
911
+ ++unscheduled_tasks_;
912
+
913
+ // First check for any idle workers and wake those
914
+ for (auto &worker_i : workers_)
915
+ if (!worker_i.busy_flag.test_and_set())
916
+ {
917
+ --unscheduled_tasks_;
918
+ {
919
+ lock_t lock_inner(worker_i.mut);
920
+ worker_i.work = std::move(work);
921
+ }
922
+ worker_i.work_ready.notify_one();
923
+ return;
924
+ }
925
+
926
+ // If no workers were idle, push onto the overflow queue for later
927
+ overflow_work_.push(std::move(work));
928
+ }
929
+
930
+ void shutdown()
931
+ {
932
+ lock_t lock(mut_);
933
+ shutdown_locked();
934
+ }
935
+
936
+ void restart()
937
+ {
938
+ shutdown_ = false;
939
+ create_threads();
940
+ }
941
+ };
942
+
943
+ inline thread_pool & get_pool()
944
+ {
945
+ static thread_pool pool;
946
+ #ifdef POCKETFFT_PTHREADS
947
+ static std::once_flag f;
948
+ std::call_once(f,
949
+ []{
950
+ pthread_atfork(
951
+ +[]{ get_pool().shutdown(); }, // prepare
952
+ +[]{ get_pool().restart(); }, // parent
953
+ +[]{ get_pool().restart(); } // child
954
+ );
955
+ });
956
+ #endif
957
+
958
+ return pool;
959
+ }
960
+
961
+ /** Map a function f over nthreads */
962
+ template <typename Func>
963
+ void thread_map(size_t nthreads, Func f)
964
+ {
965
+ if (nthreads == 0)
966
+ nthreads = max_threads;
967
+
968
+ if (nthreads == 1)
969
+ { f(); return; }
970
+
971
+ auto & pool = get_pool();
972
+ latch counter(nthreads);
973
+ std::exception_ptr ex;
974
+ std::mutex ex_mut;
975
+ for (size_t i=0; i<nthreads; ++i)
976
+ {
977
+ pool.submit(
978
+ [&f, &counter, &ex, &ex_mut, i, nthreads] {
979
+ thread_id() = i;
980
+ num_threads() = nthreads;
981
+ try { f(); }
982
+ catch (...)
983
+ {
984
+ std::lock_guard<std::mutex> lock(ex_mut);
985
+ ex = std::current_exception();
986
+ }
987
+ counter.count_down();
988
+ });
989
+ }
990
+ counter.wait();
991
+ if (ex)
992
+ std::rethrow_exception(ex);
993
+ }
994
+
995
+ #endif
996
+
997
+ }
998
+
999
+ //
1000
+ // complex FFTPACK transforms
1001
+ //
1002
+
1003
+ template<typename T0> class cfftp
1004
+ {
1005
+ private:
1006
+ struct fctdata
1007
+ {
1008
+ size_t fct;
1009
+ cmplx<T0> *tw, *tws;
1010
+ };
1011
+
1012
+ size_t length;
1013
+ arr<cmplx<T0>> mem;
1014
+ std::vector<fctdata> fact;
1015
+
1016
+ void add_factor(size_t factor)
1017
+ { fact.push_back({factor, nullptr, nullptr}); }
1018
+
1019
+ template<bool fwd, typename T> void pass2 (size_t ido, size_t l1,
1020
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1021
+ const cmplx<T0> * POCKETFFT_RESTRICT wa) const
1022
+ {
1023
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
1024
+ { return ch[a+ido*(b+l1*c)]; };
1025
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
1026
+ { return cc[a+ido*(b+2*c)]; };
1027
+ auto WA = [wa, ido](size_t x, size_t i)
1028
+ { return wa[i-1+x*(ido-1)]; };
1029
+
1030
+ if (ido==1)
1031
+ for (size_t k=0; k<l1; ++k)
1032
+ {
1033
+ CH(0,k,0) = CC(0,0,k)+CC(0,1,k);
1034
+ CH(0,k,1) = CC(0,0,k)-CC(0,1,k);
1035
+ }
1036
+ else
1037
+ for (size_t k=0; k<l1; ++k)
1038
+ {
1039
+ CH(0,k,0) = CC(0,0,k)+CC(0,1,k);
1040
+ CH(0,k,1) = CC(0,0,k)-CC(0,1,k);
1041
+ for (size_t i=1; i<ido; ++i)
1042
+ {
1043
+ CH(i,k,0) = CC(i,0,k)+CC(i,1,k);
1044
+ special_mul<fwd>(CC(i,0,k)-CC(i,1,k),WA(0,i),CH(i,k,1));
1045
+ }
1046
+ }
1047
+ }
1048
+
1049
+ #define POCKETFFT_PREP3(idx) \
1050
+ T t0 = CC(idx,0,k), t1, t2; \
1051
+ PM (t1,t2,CC(idx,1,k),CC(idx,2,k)); \
1052
+ CH(idx,k,0)=t0+t1;
1053
+ #define POCKETFFT_PARTSTEP3a(u1,u2,twr,twi) \
1054
+ { \
1055
+ T ca=t0+t1*twr; \
1056
+ T cb{-t2.i*twi, t2.r*twi}; \
1057
+ PM(CH(0,k,u1),CH(0,k,u2),ca,cb) ;\
1058
+ }
1059
+ #define POCKETFFT_PARTSTEP3b(u1,u2,twr,twi) \
1060
+ { \
1061
+ T ca=t0+t1*twr; \
1062
+ T cb{-t2.i*twi, t2.r*twi}; \
1063
+ special_mul<fwd>(ca+cb,WA(u1-1,i),CH(i,k,u1)); \
1064
+ special_mul<fwd>(ca-cb,WA(u2-1,i),CH(i,k,u2)); \
1065
+ }
1066
+ template<bool fwd, typename T> void pass3 (size_t ido, size_t l1,
1067
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1068
+ const cmplx<T0> * POCKETFFT_RESTRICT wa) const
1069
+ {
1070
+ constexpr T0 tw1r=-0.5,
1071
+ tw1i= (fwd ? -1: 1) * T0(0.8660254037844386467637231707529362L);
1072
+
1073
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
1074
+ { return ch[a+ido*(b+l1*c)]; };
1075
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
1076
+ { return cc[a+ido*(b+3*c)]; };
1077
+ auto WA = [wa, ido](size_t x, size_t i)
1078
+ { return wa[i-1+x*(ido-1)]; };
1079
+
1080
+ if (ido==1)
1081
+ for (size_t k=0; k<l1; ++k)
1082
+ {
1083
+ POCKETFFT_PREP3(0)
1084
+ POCKETFFT_PARTSTEP3a(1,2,tw1r,tw1i)
1085
+ }
1086
+ else
1087
+ for (size_t k=0; k<l1; ++k)
1088
+ {
1089
+ {
1090
+ POCKETFFT_PREP3(0)
1091
+ POCKETFFT_PARTSTEP3a(1,2,tw1r,tw1i)
1092
+ }
1093
+ for (size_t i=1; i<ido; ++i)
1094
+ {
1095
+ POCKETFFT_PREP3(i)
1096
+ POCKETFFT_PARTSTEP3b(1,2,tw1r,tw1i)
1097
+ }
1098
+ }
1099
+ }
1100
+
1101
+ #undef POCKETFFT_PARTSTEP3b
1102
+ #undef POCKETFFT_PARTSTEP3a
1103
+ #undef POCKETFFT_PREP3
1104
+
1105
+ template<bool fwd, typename T> void pass4 (size_t ido, size_t l1,
1106
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1107
+ const cmplx<T0> * POCKETFFT_RESTRICT wa) const
1108
+ {
1109
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
1110
+ { return ch[a+ido*(b+l1*c)]; };
1111
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
1112
+ { return cc[a+ido*(b+4*c)]; };
1113
+ auto WA = [wa, ido](size_t x, size_t i)
1114
+ { return wa[i-1+x*(ido-1)]; };
1115
+
1116
+ if (ido==1)
1117
+ for (size_t k=0; k<l1; ++k)
1118
+ {
1119
+ T t1, t2, t3, t4;
1120
+ PM(t2,t1,CC(0,0,k),CC(0,2,k));
1121
+ PM(t3,t4,CC(0,1,k),CC(0,3,k));
1122
+ ROTX90<fwd>(t4);
1123
+ PM(CH(0,k,0),CH(0,k,2),t2,t3);
1124
+ PM(CH(0,k,1),CH(0,k,3),t1,t4);
1125
+ }
1126
+ else
1127
+ for (size_t k=0; k<l1; ++k)
1128
+ {
1129
+ {
1130
+ T t1, t2, t3, t4;
1131
+ PM(t2,t1,CC(0,0,k),CC(0,2,k));
1132
+ PM(t3,t4,CC(0,1,k),CC(0,3,k));
1133
+ ROTX90<fwd>(t4);
1134
+ PM(CH(0,k,0),CH(0,k,2),t2,t3);
1135
+ PM(CH(0,k,1),CH(0,k,3),t1,t4);
1136
+ }
1137
+ for (size_t i=1; i<ido; ++i)
1138
+ {
1139
+ T t1, t2, t3, t4;
1140
+ T cc0=CC(i,0,k), cc1=CC(i,1,k),cc2=CC(i,2,k),cc3=CC(i,3,k);
1141
+ PM(t2,t1,cc0,cc2);
1142
+ PM(t3,t4,cc1,cc3);
1143
+ ROTX90<fwd>(t4);
1144
+ CH(i,k,0) = t2+t3;
1145
+ special_mul<fwd>(t1+t4,WA(0,i),CH(i,k,1));
1146
+ special_mul<fwd>(t2-t3,WA(1,i),CH(i,k,2));
1147
+ special_mul<fwd>(t1-t4,WA(2,i),CH(i,k,3));
1148
+ }
1149
+ }
1150
+ }
1151
+
1152
+ #define POCKETFFT_PREP5(idx) \
1153
+ T t0 = CC(idx,0,k), t1, t2, t3, t4; \
1154
+ PM (t1,t4,CC(idx,1,k),CC(idx,4,k)); \
1155
+ PM (t2,t3,CC(idx,2,k),CC(idx,3,k)); \
1156
+ CH(idx,k,0).r=t0.r+t1.r+t2.r; \
1157
+ CH(idx,k,0).i=t0.i+t1.i+t2.i;
1158
+
1159
+ #define POCKETFFT_PARTSTEP5a(u1,u2,twar,twbr,twai,twbi) \
1160
+ { \
1161
+ T ca,cb; \
1162
+ ca.r=t0.r+twar*t1.r+twbr*t2.r; \
1163
+ ca.i=t0.i+twar*t1.i+twbr*t2.i; \
1164
+ cb.i=twai*t4.r twbi*t3.r; \
1165
+ cb.r=-(twai*t4.i twbi*t3.i); \
1166
+ PM(CH(0,k,u1),CH(0,k,u2),ca,cb); \
1167
+ }
1168
+
1169
+ #define POCKETFFT_PARTSTEP5b(u1,u2,twar,twbr,twai,twbi) \
1170
+ { \
1171
+ T ca,cb; \
1172
+ ca.r=t0.r+twar*t1.r+twbr*t2.r; \
1173
+ ca.i=t0.i+twar*t1.i+twbr*t2.i; \
1174
+ cb.i=twai*t4.r twbi*t3.r; \
1175
+ cb.r=-(twai*t4.i twbi*t3.i); \
1176
+ special_mul<fwd>(ca+cb,WA(u1-1,i),CH(i,k,u1)); \
1177
+ special_mul<fwd>(ca-cb,WA(u2-1,i),CH(i,k,u2)); \
1178
+ }
1179
+ template<bool fwd, typename T> void pass5 (size_t ido, size_t l1,
1180
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1181
+ const cmplx<T0> * POCKETFFT_RESTRICT wa) const
1182
+ {
1183
+ constexpr T0 tw1r= T0(0.3090169943749474241022934171828191L),
1184
+ tw1i= (fwd ? -1: 1) * T0(0.9510565162951535721164393333793821L),
1185
+ tw2r= T0(-0.8090169943749474241022934171828191L),
1186
+ tw2i= (fwd ? -1: 1) * T0(0.5877852522924731291687059546390728L);
1187
+
1188
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
1189
+ { return ch[a+ido*(b+l1*c)]; };
1190
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
1191
+ { return cc[a+ido*(b+5*c)]; };
1192
+ auto WA = [wa, ido](size_t x, size_t i)
1193
+ { return wa[i-1+x*(ido-1)]; };
1194
+
1195
+ if (ido==1)
1196
+ for (size_t k=0; k<l1; ++k)
1197
+ {
1198
+ POCKETFFT_PREP5(0)
1199
+ POCKETFFT_PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
1200
+ POCKETFFT_PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
1201
+ }
1202
+ else
1203
+ for (size_t k=0; k<l1; ++k)
1204
+ {
1205
+ {
1206
+ POCKETFFT_PREP5(0)
1207
+ POCKETFFT_PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
1208
+ POCKETFFT_PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
1209
+ }
1210
+ for (size_t i=1; i<ido; ++i)
1211
+ {
1212
+ POCKETFFT_PREP5(i)
1213
+ POCKETFFT_PARTSTEP5b(1,4,tw1r,tw2r,+tw1i,+tw2i)
1214
+ POCKETFFT_PARTSTEP5b(2,3,tw2r,tw1r,+tw2i,-tw1i)
1215
+ }
1216
+ }
1217
+ }
1218
+
1219
+ #undef POCKETFFT_PARTSTEP5b
1220
+ #undef POCKETFFT_PARTSTEP5a
1221
+ #undef POCKETFFT_PREP5
1222
+
1223
+ #define POCKETFFT_PREP7(idx) \
1224
+ T t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7; \
1225
+ PM (t2,t7,CC(idx,1,k),CC(idx,6,k)); \
1226
+ PM (t3,t6,CC(idx,2,k),CC(idx,5,k)); \
1227
+ PM (t4,t5,CC(idx,3,k),CC(idx,4,k)); \
1228
+ CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r; \
1229
+ CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i;
1230
+
1231
+ #define POCKETFFT_PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,out1,out2) \
1232
+ { \
1233
+ T ca,cb; \
1234
+ ca.r=t1.r+x1*t2.r+x2*t3.r+x3*t4.r; \
1235
+ ca.i=t1.i+x1*t2.i+x2*t3.i+x3*t4.i; \
1236
+ cb.i=y1*t7.r y2*t6.r y3*t5.r; \
1237
+ cb.r=-(y1*t7.i y2*t6.i y3*t5.i); \
1238
+ PM(out1,out2,ca,cb); \
1239
+ }
1240
+ #define POCKETFFT_PARTSTEP7a(u1,u2,x1,x2,x3,y1,y2,y3) \
1241
+ POCKETFFT_PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,CH(0,k,u1),CH(0,k,u2))
1242
+ #define POCKETFFT_PARTSTEP7(u1,u2,x1,x2,x3,y1,y2,y3) \
1243
+ { \
1244
+ T da,db; \
1245
+ POCKETFFT_PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,da,db) \
1246
+ special_mul<fwd>(da,WA(u1-1,i),CH(i,k,u1)); \
1247
+ special_mul<fwd>(db,WA(u2-1,i),CH(i,k,u2)); \
1248
+ }
1249
+
1250
+ template<bool fwd, typename T> void pass7(size_t ido, size_t l1,
1251
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1252
+ const cmplx<T0> * POCKETFFT_RESTRICT wa) const
1253
+ {
1254
+ constexpr T0 tw1r= T0(0.6234898018587335305250048840042398L),
1255
+ tw1i= (fwd ? -1 : 1) * T0(0.7818314824680298087084445266740578L),
1256
+ tw2r= T0(-0.2225209339563144042889025644967948L),
1257
+ tw2i= (fwd ? -1 : 1) * T0(0.9749279121818236070181316829939312L),
1258
+ tw3r= T0(-0.9009688679024191262361023195074451L),
1259
+ tw3i= (fwd ? -1 : 1) * T0(0.433883739117558120475768332848359L);
1260
+
1261
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
1262
+ { return ch[a+ido*(b+l1*c)]; };
1263
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
1264
+ { return cc[a+ido*(b+7*c)]; };
1265
+ auto WA = [wa, ido](size_t x, size_t i)
1266
+ { return wa[i-1+x*(ido-1)]; };
1267
+
1268
+ if (ido==1)
1269
+ for (size_t k=0; k<l1; ++k)
1270
+ {
1271
+ POCKETFFT_PREP7(0)
1272
+ POCKETFFT_PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
1273
+ POCKETFFT_PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
1274
+ POCKETFFT_PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
1275
+ }
1276
+ else
1277
+ for (size_t k=0; k<l1; ++k)
1278
+ {
1279
+ {
1280
+ POCKETFFT_PREP7(0)
1281
+ POCKETFFT_PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
1282
+ POCKETFFT_PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
1283
+ POCKETFFT_PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
1284
+ }
1285
+ for (size_t i=1; i<ido; ++i)
1286
+ {
1287
+ POCKETFFT_PREP7(i)
1288
+ POCKETFFT_PARTSTEP7(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
1289
+ POCKETFFT_PARTSTEP7(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
1290
+ POCKETFFT_PARTSTEP7(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
1291
+ }
1292
+ }
1293
+ }
1294
+
1295
+ #undef POCKETFFT_PARTSTEP7
1296
+ #undef POCKETFFT_PARTSTEP7a0
1297
+ #undef POCKETFFT_PARTSTEP7a
1298
+ #undef POCKETFFT_PREP7
1299
+
1300
+ template <bool fwd, typename T> void ROTX45(T &a) const
1301
+ {
1302
+ constexpr T0 hsqt2=T0(0.707106781186547524400844362104849L);
1303
+ if (fwd)
1304
+ { auto tmp_=a.r; a.r=hsqt2*(a.r+a.i); a.i=hsqt2*(a.i-tmp_); }
1305
+ else
1306
+ { auto tmp_=a.r; a.r=hsqt2*(a.r-a.i); a.i=hsqt2*(a.i+tmp_); }
1307
+ }
1308
+ template <bool fwd, typename T> void ROTX135(T &a) const
1309
+ {
1310
+ constexpr T0 hsqt2=T0(0.707106781186547524400844362104849L);
1311
+ if (fwd)
1312
+ { auto tmp_=a.r; a.r=hsqt2*(a.i-a.r); a.i=hsqt2*(-tmp_-a.i); }
1313
+ else
1314
+ { auto tmp_=a.r; a.r=hsqt2*(-a.r-a.i); a.i=hsqt2*(tmp_-a.i); }
1315
+ }
1316
+
1317
+ template<bool fwd, typename T> void pass8 (size_t ido, size_t l1,
1318
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1319
+ const cmplx<T0> * POCKETFFT_RESTRICT wa) const
1320
+ {
1321
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
1322
+ { return ch[a+ido*(b+l1*c)]; };
1323
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
1324
+ { return cc[a+ido*(b+8*c)]; };
1325
+ auto WA = [wa, ido](size_t x, size_t i)
1326
+ { return wa[i-1+x*(ido-1)]; };
1327
+
1328
+ if (ido==1)
1329
+ for (size_t k=0; k<l1; ++k)
1330
+ {
1331
+ T a0, a1, a2, a3, a4, a5, a6, a7;
1332
+ PM(a1,a5,CC(0,1,k),CC(0,5,k));
1333
+ PM(a3,a7,CC(0,3,k),CC(0,7,k));
1334
+ PMINPLACE(a1,a3);
1335
+ ROTX90<fwd>(a3);
1336
+
1337
+ ROTX90<fwd>(a7);
1338
+ PMINPLACE(a5,a7);
1339
+ ROTX45<fwd>(a5);
1340
+ ROTX135<fwd>(a7);
1341
+
1342
+ PM(a0,a4,CC(0,0,k),CC(0,4,k));
1343
+ PM(a2,a6,CC(0,2,k),CC(0,6,k));
1344
+ PM(CH(0,k,0),CH(0,k,4),a0+a2,a1);
1345
+ PM(CH(0,k,2),CH(0,k,6),a0-a2,a3);
1346
+ ROTX90<fwd>(a6);
1347
+ PM(CH(0,k,1),CH(0,k,5),a4+a6,a5);
1348
+ PM(CH(0,k,3),CH(0,k,7),a4-a6,a7);
1349
+ }
1350
+ else
1351
+ for (size_t k=0; k<l1; ++k)
1352
+ {
1353
+ {
1354
+ T a0, a1, a2, a3, a4, a5, a6, a7;
1355
+ PM(a1,a5,CC(0,1,k),CC(0,5,k));
1356
+ PM(a3,a7,CC(0,3,k),CC(0,7,k));
1357
+ PMINPLACE(a1,a3);
1358
+ ROTX90<fwd>(a3);
1359
+
1360
+ ROTX90<fwd>(a7);
1361
+ PMINPLACE(a5,a7);
1362
+ ROTX45<fwd>(a5);
1363
+ ROTX135<fwd>(a7);
1364
+
1365
+ PM(a0,a4,CC(0,0,k),CC(0,4,k));
1366
+ PM(a2,a6,CC(0,2,k),CC(0,6,k));
1367
+ PM(CH(0,k,0),CH(0,k,4),a0+a2,a1);
1368
+ PM(CH(0,k,2),CH(0,k,6),a0-a2,a3);
1369
+ ROTX90<fwd>(a6);
1370
+ PM(CH(0,k,1),CH(0,k,5),a4+a6,a5);
1371
+ PM(CH(0,k,3),CH(0,k,7),a4-a6,a7);
1372
+ }
1373
+ for (size_t i=1; i<ido; ++i)
1374
+ {
1375
+ T a0, a1, a2, a3, a4, a5, a6, a7;
1376
+ PM(a1,a5,CC(i,1,k),CC(i,5,k));
1377
+ PM(a3,a7,CC(i,3,k),CC(i,7,k));
1378
+ ROTX90<fwd>(a7);
1379
+ PMINPLACE(a1,a3);
1380
+ ROTX90<fwd>(a3);
1381
+ PMINPLACE(a5,a7);
1382
+ ROTX45<fwd>(a5);
1383
+ ROTX135<fwd>(a7);
1384
+ PM(a0,a4,CC(i,0,k),CC(i,4,k));
1385
+ PM(a2,a6,CC(i,2,k),CC(i,6,k));
1386
+ PMINPLACE(a0,a2);
1387
+ CH(i,k,0) = a0+a1;
1388
+ special_mul<fwd>(a0-a1,WA(3,i),CH(i,k,4));
1389
+ special_mul<fwd>(a2+a3,WA(1,i),CH(i,k,2));
1390
+ special_mul<fwd>(a2-a3,WA(5,i),CH(i,k,6));
1391
+ ROTX90<fwd>(a6);
1392
+ PMINPLACE(a4,a6);
1393
+ special_mul<fwd>(a4+a5,WA(0,i),CH(i,k,1));
1394
+ special_mul<fwd>(a4-a5,WA(4,i),CH(i,k,5));
1395
+ special_mul<fwd>(a6+a7,WA(2,i),CH(i,k,3));
1396
+ special_mul<fwd>(a6-a7,WA(6,i),CH(i,k,7));
1397
+ }
1398
+ }
1399
+ }
1400
+
1401
+
1402
+ #define POCKETFFT_PREP11(idx) \
1403
+ T t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7, t8, t9, t10, t11; \
1404
+ PM (t2,t11,CC(idx,1,k),CC(idx,10,k)); \
1405
+ PM (t3,t10,CC(idx,2,k),CC(idx, 9,k)); \
1406
+ PM (t4,t9 ,CC(idx,3,k),CC(idx, 8,k)); \
1407
+ PM (t5,t8 ,CC(idx,4,k),CC(idx, 7,k)); \
1408
+ PM (t6,t7 ,CC(idx,5,k),CC(idx, 6,k)); \
1409
+ CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r+t5.r+t6.r; \
1410
+ CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i+t5.i+t6.i;
1411
+
1412
+ #define POCKETFFT_PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,out1,out2) \
1413
+ { \
1414
+ T ca = t1 + t2*x1 + t3*x2 + t4*x3 + t5*x4 +t6*x5, \
1415
+ cb; \
1416
+ cb.i=y1*t11.r y2*t10.r y3*t9.r y4*t8.r y5*t7.r; \
1417
+ cb.r=-(y1*t11.i y2*t10.i y3*t9.i y4*t8.i y5*t7.i ); \
1418
+ PM(out1,out2,ca,cb); \
1419
+ }
1420
+ #define POCKETFFT_PARTSTEP11a(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \
1421
+ POCKETFFT_PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,CH(0,k,u1),CH(0,k,u2))
1422
+ #define POCKETFFT_PARTSTEP11(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \
1423
+ { \
1424
+ T da,db; \
1425
+ POCKETFFT_PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,da,db) \
1426
+ special_mul<fwd>(da,WA(u1-1,i),CH(i,k,u1)); \
1427
+ special_mul<fwd>(db,WA(u2-1,i),CH(i,k,u2)); \
1428
+ }
1429
+
1430
+ template<bool fwd, typename T> void pass11 (size_t ido, size_t l1,
1431
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1432
+ const cmplx<T0> * POCKETFFT_RESTRICT wa) const
1433
+ {
1434
+ constexpr T0 tw1r= T0(0.8412535328311811688618116489193677L),
1435
+ tw1i= (fwd ? -1 : 1) * T0(0.5406408174555975821076359543186917L),
1436
+ tw2r= T0(0.4154150130018864255292741492296232L),
1437
+ tw2i= (fwd ? -1 : 1) * T0(0.9096319953545183714117153830790285L),
1438
+ tw3r= T0(-0.1423148382732851404437926686163697L),
1439
+ tw3i= (fwd ? -1 : 1) * T0(0.9898214418809327323760920377767188L),
1440
+ tw4r= T0(-0.6548607339452850640569250724662936L),
1441
+ tw4i= (fwd ? -1 : 1) * T0(0.7557495743542582837740358439723444L),
1442
+ tw5r= T0(-0.9594929736144973898903680570663277L),
1443
+ tw5i= (fwd ? -1 : 1) * T0(0.2817325568414296977114179153466169L);
1444
+
1445
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
1446
+ { return ch[a+ido*(b+l1*c)]; };
1447
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
1448
+ { return cc[a+ido*(b+11*c)]; };
1449
+ auto WA = [wa, ido](size_t x, size_t i)
1450
+ { return wa[i-1+x*(ido-1)]; };
1451
+
1452
+ if (ido==1)
1453
+ for (size_t k=0; k<l1; ++k)
1454
+ {
1455
+ POCKETFFT_PREP11(0)
1456
+ POCKETFFT_PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
1457
+ POCKETFFT_PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
1458
+ POCKETFFT_PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
1459
+ POCKETFFT_PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
1460
+ POCKETFFT_PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
1461
+ }
1462
+ else
1463
+ for (size_t k=0; k<l1; ++k)
1464
+ {
1465
+ {
1466
+ POCKETFFT_PREP11(0)
1467
+ POCKETFFT_PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
1468
+ POCKETFFT_PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
1469
+ POCKETFFT_PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
1470
+ POCKETFFT_PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
1471
+ POCKETFFT_PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
1472
+ }
1473
+ for (size_t i=1; i<ido; ++i)
1474
+ {
1475
+ POCKETFFT_PREP11(i)
1476
+ POCKETFFT_PARTSTEP11(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
1477
+ POCKETFFT_PARTSTEP11(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
1478
+ POCKETFFT_PARTSTEP11(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
1479
+ POCKETFFT_PARTSTEP11(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
1480
+ POCKETFFT_PARTSTEP11(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
1481
+ }
1482
+ }
1483
+ }
1484
+
1485
+ #undef POCKETFFT_PARTSTEP11
1486
+ #undef POCKETFFT_PARTSTEP11a0
1487
+ #undef POCKETFFT_PARTSTEP11a
1488
+ #undef POCKETFFT_PREP11
1489
+
1490
+ template<bool fwd, typename T> void passg (size_t ido, size_t ip,
1491
+ size_t l1, T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1492
+ const cmplx<T0> * POCKETFFT_RESTRICT wa,
1493
+ const cmplx<T0> * POCKETFFT_RESTRICT csarr) const
1494
+ {
1495
+ const size_t cdim=ip;
1496
+ size_t ipph = (ip+1)/2;
1497
+ size_t idl1 = ido*l1;
1498
+
1499
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
1500
+ { return ch[a+ido*(b+l1*c)]; };
1501
+ auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
1502
+ { return cc[a+ido*(b+cdim*c)]; };
1503
+ auto CX = [cc, ido, l1](size_t a, size_t b, size_t c) -> T&
1504
+ { return cc[a+ido*(b+l1*c)]; };
1505
+ auto CX2 = [cc, idl1](size_t a, size_t b) -> T&
1506
+ { return cc[a+idl1*b]; };
1507
+ auto CH2 = [ch, idl1](size_t a, size_t b) -> const T&
1508
+ { return ch[a+idl1*b]; };
1509
+
1510
+ arr<cmplx<T0>> wal(ip);
1511
+ wal[0] = cmplx<T0>(1., 0.);
1512
+ for (size_t i=1; i<ip; ++i)
1513
+ wal[i]=cmplx<T0>(csarr[i].r,fwd ? -csarr[i].i : csarr[i].i);
1514
+
1515
+ for (size_t k=0; k<l1; ++k)
1516
+ for (size_t i=0; i<ido; ++i)
1517
+ CH(i,k,0) = CC(i,0,k);
1518
+ for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc)
1519
+ for (size_t k=0; k<l1; ++k)
1520
+ for (size_t i=0; i<ido; ++i)
1521
+ PM(CH(i,k,j),CH(i,k,jc),CC(i,j,k),CC(i,jc,k));
1522
+ for (size_t k=0; k<l1; ++k)
1523
+ for (size_t i=0; i<ido; ++i)
1524
+ {
1525
+ T tmp = CH(i,k,0);
1526
+ for (size_t j=1; j<ipph; ++j)
1527
+ tmp+=CH(i,k,j);
1528
+ CX(i,k,0) = tmp;
1529
+ }
1530
+ for (size_t l=1, lc=ip-1; l<ipph; ++l, --lc)
1531
+ {
1532
+ // j=0
1533
+ for (size_t ik=0; ik<idl1; ++ik)
1534
+ {
1535
+ CX2(ik,l).r = CH2(ik,0).r+wal[l].r*CH2(ik,1).r+wal[2*l].r*CH2(ik,2).r;
1536
+ CX2(ik,l).i = CH2(ik,0).i+wal[l].r*CH2(ik,1).i+wal[2*l].r*CH2(ik,2).i;
1537
+ CX2(ik,lc).r=-wal[l].i*CH2(ik,ip-1).i-wal[2*l].i*CH2(ik,ip-2).i;
1538
+ CX2(ik,lc).i=wal[l].i*CH2(ik,ip-1).r+wal[2*l].i*CH2(ik,ip-2).r;
1539
+ }
1540
+
1541
+ size_t iwal=2*l;
1542
+ size_t j=3, jc=ip-3;
1543
+ for (; j<ipph-1; j+=2, jc-=2)
1544
+ {
1545
+ iwal+=l; if (iwal>ip) iwal-=ip;
1546
+ cmplx<T0> xwal=wal[iwal];
1547
+ iwal+=l; if (iwal>ip) iwal-=ip;
1548
+ cmplx<T0> xwal2=wal[iwal];
1549
+ for (size_t ik=0; ik<idl1; ++ik)
1550
+ {
1551
+ CX2(ik,l).r += CH2(ik,j).r*xwal.r+CH2(ik,j+1).r*xwal2.r;
1552
+ CX2(ik,l).i += CH2(ik,j).i*xwal.r+CH2(ik,j+1).i*xwal2.r;
1553
+ CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i+CH2(ik,jc-1).i*xwal2.i;
1554
+ CX2(ik,lc).i += CH2(ik,jc).r*xwal.i+CH2(ik,jc-1).r*xwal2.i;
1555
+ }
1556
+ }
1557
+ for (; j<ipph; ++j, --jc)
1558
+ {
1559
+ iwal+=l; if (iwal>ip) iwal-=ip;
1560
+ cmplx<T0> xwal=wal[iwal];
1561
+ for (size_t ik=0; ik<idl1; ++ik)
1562
+ {
1563
+ CX2(ik,l).r += CH2(ik,j).r*xwal.r;
1564
+ CX2(ik,l).i += CH2(ik,j).i*xwal.r;
1565
+ CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i;
1566
+ CX2(ik,lc).i += CH2(ik,jc).r*xwal.i;
1567
+ }
1568
+ }
1569
+ }
1570
+
1571
+ // shuffling and twiddling
1572
+ if (ido==1)
1573
+ for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc)
1574
+ for (size_t ik=0; ik<idl1; ++ik)
1575
+ {
1576
+ T t1=CX2(ik,j), t2=CX2(ik,jc);
1577
+ PM(CX2(ik,j),CX2(ik,jc),t1,t2);
1578
+ }
1579
+ else
1580
+ {
1581
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc)
1582
+ for (size_t k=0; k<l1; ++k)
1583
+ {
1584
+ T t1=CX(0,k,j), t2=CX(0,k,jc);
1585
+ PM(CX(0,k,j),CX(0,k,jc),t1,t2);
1586
+ for (size_t i=1; i<ido; ++i)
1587
+ {
1588
+ T x1, x2;
1589
+ PM(x1,x2,CX(i,k,j),CX(i,k,jc));
1590
+ size_t idij=(j-1)*(ido-1)+i-1;
1591
+ special_mul<fwd>(x1,wa[idij],CX(i,k,j));
1592
+ idij=(jc-1)*(ido-1)+i-1;
1593
+ special_mul<fwd>(x2,wa[idij],CX(i,k,jc));
1594
+ }
1595
+ }
1596
+ }
1597
+ }
1598
+
1599
+ template<bool fwd, typename T> void pass_all(T c[], T0 fct) const
1600
+ {
1601
+ if (length==1) { c[0]*=fct; return; }
1602
+ size_t l1=1;
1603
+ arr<T> ch(length);
1604
+ T *p1=c, *p2=ch.data();
1605
+
1606
+ for(size_t k1=0; k1<fact.size(); k1++)
1607
+ {
1608
+ size_t ip=fact[k1].fct;
1609
+ size_t l2=ip*l1;
1610
+ size_t ido = length/l2;
1611
+ if (ip==4)
1612
+ pass4<fwd> (ido, l1, p1, p2, fact[k1].tw);
1613
+ else if(ip==8)
1614
+ pass8<fwd>(ido, l1, p1, p2, fact[k1].tw);
1615
+ else if(ip==2)
1616
+ pass2<fwd>(ido, l1, p1, p2, fact[k1].tw);
1617
+ else if(ip==3)
1618
+ pass3<fwd> (ido, l1, p1, p2, fact[k1].tw);
1619
+ else if(ip==5)
1620
+ pass5<fwd> (ido, l1, p1, p2, fact[k1].tw);
1621
+ else if(ip==7)
1622
+ pass7<fwd> (ido, l1, p1, p2, fact[k1].tw);
1623
+ else if(ip==11)
1624
+ pass11<fwd> (ido, l1, p1, p2, fact[k1].tw);
1625
+ else
1626
+ {
1627
+ passg<fwd>(ido, ip, l1, p1, p2, fact[k1].tw, fact[k1].tws);
1628
+ std::swap(p1,p2);
1629
+ }
1630
+ std::swap(p1,p2);
1631
+ l1=l2;
1632
+ }
1633
+ if (p1!=c)
1634
+ {
1635
+ if (fct!=1.)
1636
+ for (size_t i=0; i<length; ++i)
1637
+ c[i] = ch[i]*fct;
1638
+ else
1639
+ std::copy_n (p1, length, c);
1640
+ }
1641
+ else
1642
+ if (fct!=1.)
1643
+ for (size_t i=0; i<length; ++i)
1644
+ c[i] *= fct;
1645
+ }
1646
+
1647
+ public:
1648
+ template<typename T> void exec(T c[], T0 fct, bool fwd) const
1649
+ { fwd ? pass_all<true>(c, fct) : pass_all<false>(c, fct); }
1650
+
1651
+ private:
1652
+ POCKETFFT_NOINLINE void factorize()
1653
+ {
1654
+ size_t len=length;
1655
+ while ((len&7)==0)
1656
+ { add_factor(8); len>>=3; }
1657
+ while ((len&3)==0)
1658
+ { add_factor(4); len>>=2; }
1659
+ if ((len&1)==0)
1660
+ {
1661
+ len>>=1;
1662
+ // factor 2 should be at the front of the factor list
1663
+ add_factor(2);
1664
+ std::swap(fact[0].fct, fact.back().fct);
1665
+ }
1666
+ for (size_t divisor=3; divisor*divisor<=len; divisor+=2)
1667
+ while ((len%divisor)==0)
1668
+ {
1669
+ add_factor(divisor);
1670
+ len/=divisor;
1671
+ }
1672
+ if (len>1) add_factor(len);
1673
+ }
1674
+
1675
+ size_t twsize() const
1676
+ {
1677
+ size_t twsize=0, l1=1;
1678
+ for (size_t k=0; k<fact.size(); ++k)
1679
+ {
1680
+ size_t ip=fact[k].fct, ido= length/(l1*ip);
1681
+ twsize+=(ip-1)*(ido-1);
1682
+ if (ip>11)
1683
+ twsize+=ip;
1684
+ l1*=ip;
1685
+ }
1686
+ return twsize;
1687
+ }
1688
+
1689
+ void comp_twiddle()
1690
+ {
1691
+ sincos_2pibyn<T0> twiddle(length);
1692
+ size_t l1=1;
1693
+ size_t memofs=0;
1694
+ for (size_t k=0; k<fact.size(); ++k)
1695
+ {
1696
+ size_t ip=fact[k].fct, ido=length/(l1*ip);
1697
+ fact[k].tw=mem.data()+memofs;
1698
+ memofs+=(ip-1)*(ido-1);
1699
+ for (size_t j=1; j<ip; ++j)
1700
+ for (size_t i=1; i<ido; ++i)
1701
+ fact[k].tw[(j-1)*(ido-1)+i-1] = twiddle[j*l1*i];
1702
+ if (ip>11)
1703
+ {
1704
+ fact[k].tws=mem.data()+memofs;
1705
+ memofs+=ip;
1706
+ for (size_t j=0; j<ip; ++j)
1707
+ fact[k].tws[j] = twiddle[j*l1*ido];
1708
+ }
1709
+ l1*=ip;
1710
+ }
1711
+ }
1712
+
1713
+ public:
1714
+ POCKETFFT_NOINLINE explicit cfftp(size_t length_)
1715
+ : length(length_)
1716
+ {
1717
+ if (length==0) throw std::runtime_error("zero-length FFT requested");
1718
+ if (length==1) return;
1719
+ factorize();
1720
+ mem.resize(twsize());
1721
+ comp_twiddle();
1722
+ }
1723
+ };
1724
+
1725
+ //
1726
+ // real-valued FFTPACK transforms
1727
+ //
1728
+
1729
+ template<typename T0> class rfftp
1730
+ {
1731
+ private:
1732
+ struct fctdata
1733
+ {
1734
+ size_t fct;
1735
+ T0 *tw, *tws;
1736
+ };
1737
+
1738
+ size_t length;
1739
+ arr<T0> mem;
1740
+ std::vector<fctdata> fact;
1741
+
1742
+ void add_factor(size_t factor)
1743
+ { fact.push_back({factor, nullptr, nullptr}); }
1744
+
1745
+ /* (a+ib) = conj(c+id) * (e+if) */
1746
+ template<typename T1, typename T2, typename T3> inline void MULPM
1747
+ (T1 &a, T1 &b, T2 c, T2 d, T3 e, T3 f) const
1748
+ { a=c*e+d*f; b=c*f-d*e; }
1749
+
1750
+ template<typename T> void radf2 (size_t ido, size_t l1,
1751
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1752
+ const T0 * POCKETFFT_RESTRICT wa) const
1753
+ {
1754
+ auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
1755
+ auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
1756
+ { return cc[a+ido*(b+l1*c)]; };
1757
+ auto CH = [ch,ido](size_t a, size_t b, size_t c) -> T&
1758
+ { return ch[a+ido*(b+2*c)]; };
1759
+
1760
+ for (size_t k=0; k<l1; k++)
1761
+ PM (CH(0,0,k),CH(ido-1,1,k),CC(0,k,0),CC(0,k,1));
1762
+ if ((ido&1)==0)
1763
+ for (size_t k=0; k<l1; k++)
1764
+ {
1765
+ CH( 0,1,k) = -CC(ido-1,k,1);
1766
+ CH(ido-1,0,k) = CC(ido-1,k,0);
1767
+ }
1768
+ if (ido<=2) return;
1769
+ for (size_t k=0; k<l1; k++)
1770
+ for (size_t i=2; i<ido; i+=2)
1771
+ {
1772
+ size_t ic=ido-i;
1773
+ T tr2, ti2;
1774
+ MULPM (tr2,ti2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1));
1775
+ PM (CH(i-1,0,k),CH(ic-1,1,k),CC(i-1,k,0),tr2);
1776
+ PM (CH(i ,0,k),CH(ic ,1,k),ti2,CC(i ,k,0));
1777
+ }
1778
+ }
1779
+
1780
+ // a2=a+b; b2=i*(b-a);
1781
+ #define POCKETFFT_REARRANGE(rx, ix, ry, iy) \
1782
+ {\
1783
+ auto t1=rx+ry, t2=ry-rx, t3=ix+iy, t4=ix-iy; \
1784
+ rx=t1; ix=t3; ry=t4; iy=t2; \
1785
+ }
1786
+
1787
+ template<typename T> void radf3(size_t ido, size_t l1,
1788
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1789
+ const T0 * POCKETFFT_RESTRICT wa) const
1790
+ {
1791
+ constexpr T0 taur=-0.5, taui=T0(0.8660254037844386467637231707529362L);
1792
+
1793
+ auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
1794
+ auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
1795
+ { return cc[a+ido*(b+l1*c)]; };
1796
+ auto CH = [ch,ido](size_t a, size_t b, size_t c) -> T&
1797
+ { return ch[a+ido*(b+3*c)]; };
1798
+
1799
+ for (size_t k=0; k<l1; k++)
1800
+ {
1801
+ T cr2=CC(0,k,1)+CC(0,k,2);
1802
+ CH(0,0,k) = CC(0,k,0)+cr2;
1803
+ CH(0,2,k) = taui*(CC(0,k,2)-CC(0,k,1));
1804
+ CH(ido-1,1,k) = CC(0,k,0)+taur*cr2;
1805
+ }
1806
+ if (ido==1) return;
1807
+ for (size_t k=0; k<l1; k++)
1808
+ for (size_t i=2; i<ido; i+=2)
1809
+ {
1810
+ size_t ic=ido-i;
1811
+ T di2, di3, dr2, dr3;
1812
+ MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)); // d2=conj(WA0)*CC1
1813
+ MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2)); // d3=conj(WA1)*CC2
1814
+ POCKETFFT_REARRANGE(dr2, di2, dr3, di3);
1815
+ CH(i-1,0,k) = CC(i-1,k,0)+dr2; // c add
1816
+ CH(i ,0,k) = CC(i ,k,0)+di2;
1817
+ T tr2 = CC(i-1,k,0)+taur*dr2; // c add
1818
+ T ti2 = CC(i ,k,0)+taur*di2;
1819
+ T tr3 = taui*dr3; // t3 = taui*i*(d3-d2)?
1820
+ T ti3 = taui*di3;
1821
+ PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr3); // PM(i) = t2+t3
1822
+ PM(CH(i ,2,k),CH(ic ,1,k),ti3,ti2); // PM(ic) = conj(t2-t3)
1823
+ }
1824
+ }
1825
+
1826
+ template<typename T> void radf4(size_t ido, size_t l1,
1827
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1828
+ const T0 * POCKETFFT_RESTRICT wa) const
1829
+ {
1830
+ constexpr T0 hsqt2=T0(0.707106781186547524400844362104849L);
1831
+
1832
+ auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
1833
+ auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
1834
+ { return cc[a+ido*(b+l1*c)]; };
1835
+ auto CH = [ch,ido](size_t a, size_t b, size_t c) -> T&
1836
+ { return ch[a+ido*(b+4*c)]; };
1837
+
1838
+ for (size_t k=0; k<l1; k++)
1839
+ {
1840
+ T tr1,tr2;
1841
+ PM (tr1,CH(0,2,k),CC(0,k,3),CC(0,k,1));
1842
+ PM (tr2,CH(ido-1,1,k),CC(0,k,0),CC(0,k,2));
1843
+ PM (CH(0,0,k),CH(ido-1,3,k),tr2,tr1);
1844
+ }
1845
+ if ((ido&1)==0)
1846
+ for (size_t k=0; k<l1; k++)
1847
+ {
1848
+ T ti1=-hsqt2*(CC(ido-1,k,1)+CC(ido-1,k,3));
1849
+ T tr1= hsqt2*(CC(ido-1,k,1)-CC(ido-1,k,3));
1850
+ PM (CH(ido-1,0,k),CH(ido-1,2,k),CC(ido-1,k,0),tr1);
1851
+ PM (CH( 0,3,k),CH( 0,1,k),ti1,CC(ido-1,k,2));
1852
+ }
1853
+ if (ido<=2) return;
1854
+ for (size_t k=0; k<l1; k++)
1855
+ for (size_t i=2; i<ido; i+=2)
1856
+ {
1857
+ size_t ic=ido-i;
1858
+ T ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
1859
+ MULPM(cr2,ci2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1));
1860
+ MULPM(cr3,ci3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2));
1861
+ MULPM(cr4,ci4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3));
1862
+ PM(tr1,tr4,cr4,cr2);
1863
+ PM(ti1,ti4,ci2,ci4);
1864
+ PM(tr2,tr3,CC(i-1,k,0),cr3);
1865
+ PM(ti2,ti3,CC(i ,k,0),ci3);
1866
+ PM(CH(i-1,0,k),CH(ic-1,3,k),tr2,tr1);
1867
+ PM(CH(i ,0,k),CH(ic ,3,k),ti1,ti2);
1868
+ PM(CH(i-1,2,k),CH(ic-1,1,k),tr3,ti4);
1869
+ PM(CH(i ,2,k),CH(ic ,1,k),tr4,ti3);
1870
+ }
1871
+ }
1872
+
1873
+ template<typename T> void radf5(size_t ido, size_t l1,
1874
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1875
+ const T0 * POCKETFFT_RESTRICT wa) const
1876
+ {
1877
+ constexpr T0 tr11= T0(0.3090169943749474241022934171828191L),
1878
+ ti11= T0(0.9510565162951535721164393333793821L),
1879
+ tr12= T0(-0.8090169943749474241022934171828191L),
1880
+ ti12= T0(0.5877852522924731291687059546390728L);
1881
+
1882
+ auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
1883
+ auto CC = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
1884
+ { return cc[a+ido*(b+l1*c)]; };
1885
+ auto CH = [ch,ido](size_t a, size_t b, size_t c) -> T&
1886
+ { return ch[a+ido*(b+5*c)]; };
1887
+
1888
+ for (size_t k=0; k<l1; k++)
1889
+ {
1890
+ T cr2, cr3, ci4, ci5;
1891
+ PM (cr2,ci5,CC(0,k,4),CC(0,k,1));
1892
+ PM (cr3,ci4,CC(0,k,3),CC(0,k,2));
1893
+ CH(0,0,k)=CC(0,k,0)+cr2+cr3;
1894
+ CH(ido-1,1,k)=CC(0,k,0)+tr11*cr2+tr12*cr3;
1895
+ CH(0,2,k)=ti11*ci5+ti12*ci4;
1896
+ CH(ido-1,3,k)=CC(0,k,0)+tr12*cr2+tr11*cr3;
1897
+ CH(0,4,k)=ti12*ci5-ti11*ci4;
1898
+ }
1899
+ if (ido==1) return;
1900
+ for (size_t k=0; k<l1;++k)
1901
+ for (size_t i=2, ic=ido-2; i<ido; i+=2, ic-=2)
1902
+ {
1903
+ T di2, di3, di4, di5, dr2, dr3, dr4, dr5;
1904
+ MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1));
1905
+ MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2));
1906
+ MULPM (dr4,di4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3));
1907
+ MULPM (dr5,di5,WA(3,i-2),WA(3,i-1),CC(i-1,k,4),CC(i,k,4));
1908
+ POCKETFFT_REARRANGE(dr2, di2, dr5, di5);
1909
+ POCKETFFT_REARRANGE(dr3, di3, dr4, di4);
1910
+ CH(i-1,0,k)=CC(i-1,k,0)+dr2+dr3;
1911
+ CH(i ,0,k)=CC(i ,k,0)+di2+di3;
1912
+ T tr2=CC(i-1,k,0)+tr11*dr2+tr12*dr3;
1913
+ T ti2=CC(i ,k,0)+tr11*di2+tr12*di3;
1914
+ T tr3=CC(i-1,k,0)+tr12*dr2+tr11*dr3;
1915
+ T ti3=CC(i ,k,0)+tr12*di2+tr11*di3;
1916
+ T tr5 = ti11*dr5 + ti12*dr4;
1917
+ T ti5 = ti11*di5 + ti12*di4;
1918
+ T tr4 = ti12*dr5 - ti11*dr4;
1919
+ T ti4 = ti12*di5 - ti11*di4;
1920
+ PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr5);
1921
+ PM(CH(i ,2,k),CH(ic ,1,k),ti5,ti2);
1922
+ PM(CH(i-1,4,k),CH(ic-1,3,k),tr3,tr4);
1923
+ PM(CH(i ,4,k),CH(ic ,3,k),ti4,ti3);
1924
+ }
1925
+ }
1926
+
1927
+ #undef POCKETFFT_REARRANGE
1928
+
1929
+ template<typename T> void radfg(size_t ido, size_t ip, size_t l1,
1930
+ T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
1931
+ const T0 * POCKETFFT_RESTRICT wa, const T0 * POCKETFFT_RESTRICT csarr) const
1932
+ {
1933
+ const size_t cdim=ip;
1934
+ size_t ipph=(ip+1)/2;
1935
+ size_t idl1 = ido*l1;
1936
+
1937
+ auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> T&
1938
+ { return cc[a+ido*(b+cdim*c)]; };
1939
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> const T&
1940
+ { return ch[a+ido*(b+l1*c)]; };
1941
+ auto C1 = [cc,ido,l1] (size_t a, size_t b, size_t c) -> T&
1942
+ { return cc[a+ido*(b+l1*c)]; };
1943
+ auto C2 = [cc,idl1] (size_t a, size_t b) -> T&
1944
+ { return cc[a+idl1*b]; };
1945
+ auto CH2 = [ch,idl1] (size_t a, size_t b) -> T&
1946
+ { return ch[a+idl1*b]; };
1947
+
1948
+ if (ido>1)
1949
+ {
1950
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 114
1951
+ {
1952
+ size_t is=(j-1)*(ido-1),
1953
+ is2=(jc-1)*(ido-1);
1954
+ for (size_t k=0; k<l1; ++k) // 113
1955
+ {
1956
+ size_t idij=is;
1957
+ size_t idij2=is2;
1958
+ for (size_t i=1; i<=ido-2; i+=2) // 112
1959
+ {
1960
+ T t1=C1(i,k,j ), t2=C1(i+1,k,j ),
1961
+ t3=C1(i,k,jc), t4=C1(i+1,k,jc);
1962
+ T x1=wa[idij]*t1 + wa[idij+1]*t2,
1963
+ x2=wa[idij]*t2 - wa[idij+1]*t1,
1964
+ x3=wa[idij2]*t3 + wa[idij2+1]*t4,
1965
+ x4=wa[idij2]*t4 - wa[idij2+1]*t3;
1966
+ PM(C1(i,k,j),C1(i+1,k,jc),x3,x1);
1967
+ PM(C1(i+1,k,j),C1(i,k,jc),x2,x4);
1968
+ idij+=2;
1969
+ idij2+=2;
1970
+ }
1971
+ }
1972
+ }
1973
+ }
1974
+
1975
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 123
1976
+ for (size_t k=0; k<l1; ++k) // 122
1977
+ MPINPLACE(C1(0,k,jc), C1(0,k,j));
1978
+
1979
+ //everything in C
1980
+ //memset(ch,0,ip*l1*ido*sizeof(double));
1981
+
1982
+ for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc) // 127
1983
+ {
1984
+ for (size_t ik=0; ik<idl1; ++ik) // 124
1985
+ {
1986
+ CH2(ik,l ) = C2(ik,0)+csarr[2*l]*C2(ik,1)+csarr[4*l]*C2(ik,2);
1987
+ CH2(ik,lc) = csarr[2*l+1]*C2(ik,ip-1)+csarr[4*l+1]*C2(ik,ip-2);
1988
+ }
1989
+ size_t iang = 2*l;
1990
+ size_t j=3, jc=ip-3;
1991
+ for (; j<ipph-3; j+=4,jc-=4) // 126
1992
+ {
1993
+ iang+=l; if (iang>=ip) iang-=ip;
1994
+ T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1];
1995
+ iang+=l; if (iang>=ip) iang-=ip;
1996
+ T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1];
1997
+ iang+=l; if (iang>=ip) iang-=ip;
1998
+ T0 ar3=csarr[2*iang], ai3=csarr[2*iang+1];
1999
+ iang+=l; if (iang>=ip) iang-=ip;
2000
+ T0 ar4=csarr[2*iang], ai4=csarr[2*iang+1];
2001
+ for (size_t ik=0; ik<idl1; ++ik) // 125
2002
+ {
2003
+ CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1)
2004
+ +ar3*C2(ik,j +2)+ar4*C2(ik,j +3);
2005
+ CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1)
2006
+ +ai3*C2(ik,jc-2)+ai4*C2(ik,jc-3);
2007
+ }
2008
+ }
2009
+ for (; j<ipph-1; j+=2,jc-=2) // 126
2010
+ {
2011
+ iang+=l; if (iang>=ip) iang-=ip;
2012
+ T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1];
2013
+ iang+=l; if (iang>=ip) iang-=ip;
2014
+ T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1];
2015
+ for (size_t ik=0; ik<idl1; ++ik) // 125
2016
+ {
2017
+ CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1);
2018
+ CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1);
2019
+ }
2020
+ }
2021
+ for (; j<ipph; ++j,--jc) // 126
2022
+ {
2023
+ iang+=l; if (iang>=ip) iang-=ip;
2024
+ T0 ar=csarr[2*iang], ai=csarr[2*iang+1];
2025
+ for (size_t ik=0; ik<idl1; ++ik) // 125
2026
+ {
2027
+ CH2(ik,l ) += ar*C2(ik,j );
2028
+ CH2(ik,lc) += ai*C2(ik,jc);
2029
+ }
2030
+ }
2031
+ }
2032
+ for (size_t ik=0; ik<idl1; ++ik) // 101
2033
+ CH2(ik,0) = C2(ik,0);
2034
+ for (size_t j=1; j<ipph; ++j) // 129
2035
+ for (size_t ik=0; ik<idl1; ++ik) // 128
2036
+ CH2(ik,0) += C2(ik,j);
2037
+
2038
+ // everything in CH at this point!
2039
+ //memset(cc,0,ip*l1*ido*sizeof(double));
2040
+
2041
+ for (size_t k=0; k<l1; ++k) // 131
2042
+ for (size_t i=0; i<ido; ++i) // 130
2043
+ CC(i,0,k) = CH(i,k,0);
2044
+
2045
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 137
2046
+ {
2047
+ size_t j2=2*j-1;
2048
+ for (size_t k=0; k<l1; ++k) // 136
2049
+ {
2050
+ CC(ido-1,j2,k) = CH(0,k,j);
2051
+ CC(0,j2+1,k) = CH(0,k,jc);
2052
+ }
2053
+ }
2054
+
2055
+ if (ido==1) return;
2056
+
2057
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 140
2058
+ {
2059
+ size_t j2=2*j-1;
2060
+ for(size_t k=0; k<l1; ++k) // 139
2061
+ for(size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 138
2062
+ {
2063
+ CC(i ,j2+1,k) = CH(i ,k,j )+CH(i ,k,jc);
2064
+ CC(ic ,j2 ,k) = CH(i ,k,j )-CH(i ,k,jc);
2065
+ CC(i+1 ,j2+1,k) = CH(i+1,k,j )+CH(i+1,k,jc);
2066
+ CC(ic+1,j2 ,k) = CH(i+1,k,jc)-CH(i+1,k,j );
2067
+ }
2068
+ }
2069
+ }
2070
+
2071
+ template<typename T> void radb2(size_t ido, size_t l1,
2072
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
2073
+ const T0 * POCKETFFT_RESTRICT wa) const
2074
+ {
2075
+ auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
2076
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
2077
+ { return cc[a+ido*(b+2*c)]; };
2078
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
2079
+ { return ch[a+ido*(b+l1*c)]; };
2080
+
2081
+ for (size_t k=0; k<l1; k++)
2082
+ PM (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(ido-1,1,k));
2083
+ if ((ido&1)==0)
2084
+ for (size_t k=0; k<l1; k++)
2085
+ {
2086
+ CH(ido-1,k,0) = 2*CC(ido-1,0,k);
2087
+ CH(ido-1,k,1) =-2*CC(0 ,1,k);
2088
+ }
2089
+ if (ido<=2) return;
2090
+ for (size_t k=0; k<l1;++k)
2091
+ for (size_t i=2; i<ido; i+=2)
2092
+ {
2093
+ size_t ic=ido-i;
2094
+ T ti2, tr2;
2095
+ PM (CH(i-1,k,0),tr2,CC(i-1,0,k),CC(ic-1,1,k));
2096
+ PM (ti2,CH(i ,k,0),CC(i ,0,k),CC(ic ,1,k));
2097
+ MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ti2,tr2);
2098
+ }
2099
+ }
2100
+
2101
+ template<typename T> void radb3(size_t ido, size_t l1,
2102
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
2103
+ const T0 * POCKETFFT_RESTRICT wa) const
2104
+ {
2105
+ constexpr T0 taur=-0.5, taui=T0(0.8660254037844386467637231707529362L);
2106
+
2107
+ auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
2108
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
2109
+ { return cc[a+ido*(b+3*c)]; };
2110
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
2111
+ { return ch[a+ido*(b+l1*c)]; };
2112
+
2113
+ for (size_t k=0; k<l1; k++)
2114
+ {
2115
+ T tr2=2*CC(ido-1,1,k);
2116
+ T cr2=CC(0,0,k)+taur*tr2;
2117
+ CH(0,k,0)=CC(0,0,k)+tr2;
2118
+ T ci3=2*taui*CC(0,2,k);
2119
+ PM (CH(0,k,2),CH(0,k,1),cr2,ci3);
2120
+ }
2121
+ if (ido==1) return;
2122
+ for (size_t k=0; k<l1; k++)
2123
+ for (size_t i=2, ic=ido-2; i<ido; i+=2, ic-=2)
2124
+ {
2125
+ T tr2=CC(i-1,2,k)+CC(ic-1,1,k); // t2=CC(I) + conj(CC(ic))
2126
+ T ti2=CC(i ,2,k)-CC(ic ,1,k);
2127
+ T cr2=CC(i-1,0,k)+taur*tr2; // c2=CC +taur*t2
2128
+ T ci2=CC(i ,0,k)+taur*ti2;
2129
+ CH(i-1,k,0)=CC(i-1,0,k)+tr2; // CH=CC+t2
2130
+ CH(i ,k,0)=CC(i ,0,k)+ti2;
2131
+ T cr3=taui*(CC(i-1,2,k)-CC(ic-1,1,k));// c3=taui*(CC(i)-conj(CC(ic)))
2132
+ T ci3=taui*(CC(i ,2,k)+CC(ic ,1,k));
2133
+ T di2, di3, dr2, dr3;
2134
+ PM(dr3,dr2,cr2,ci3); // d2= (cr2-ci3, ci2+cr3) = c2+i*c3
2135
+ PM(di2,di3,ci2,cr3); // d3= (cr2+ci3, ci2-cr3) = c2-i*c3
2136
+ MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2); // ch = WA*d2
2137
+ MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3);
2138
+ }
2139
+ }
2140
+
2141
+ template<typename T> void radb4(size_t ido, size_t l1,
2142
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
2143
+ const T0 * POCKETFFT_RESTRICT wa) const
2144
+ {
2145
+ constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
2146
+
2147
+ auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
2148
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
2149
+ { return cc[a+ido*(b+4*c)]; };
2150
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
2151
+ { return ch[a+ido*(b+l1*c)]; };
2152
+
2153
+ for (size_t k=0; k<l1; k++)
2154
+ {
2155
+ T tr1, tr2;
2156
+ PM (tr2,tr1,CC(0,0,k),CC(ido-1,3,k));
2157
+ T tr3=2*CC(ido-1,1,k);
2158
+ T tr4=2*CC(0,2,k);
2159
+ PM (CH(0,k,0),CH(0,k,2),tr2,tr3);
2160
+ PM (CH(0,k,3),CH(0,k,1),tr1,tr4);
2161
+ }
2162
+ if ((ido&1)==0)
2163
+ for (size_t k=0; k<l1; k++)
2164
+ {
2165
+ T tr1,tr2,ti1,ti2;
2166
+ PM (ti1,ti2,CC(0 ,3,k),CC(0 ,1,k));
2167
+ PM (tr2,tr1,CC(ido-1,0,k),CC(ido-1,2,k));
2168
+ CH(ido-1,k,0)=tr2+tr2;
2169
+ CH(ido-1,k,1)=sqrt2*(tr1-ti1);
2170
+ CH(ido-1,k,2)=ti2+ti2;
2171
+ CH(ido-1,k,3)=-sqrt2*(tr1+ti1);
2172
+ }
2173
+ if (ido<=2) return;
2174
+ for (size_t k=0; k<l1;++k)
2175
+ for (size_t i=2; i<ido; i+=2)
2176
+ {
2177
+ T ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
2178
+ size_t ic=ido-i;
2179
+ PM (tr2,tr1,CC(i-1,0,k),CC(ic-1,3,k));
2180
+ PM (ti1,ti2,CC(i ,0,k),CC(ic ,3,k));
2181
+ PM (tr4,ti3,CC(i ,2,k),CC(ic ,1,k));
2182
+ PM (tr3,ti4,CC(i-1,2,k),CC(ic-1,1,k));
2183
+ PM (CH(i-1,k,0),cr3,tr2,tr3);
2184
+ PM (CH(i ,k,0),ci3,ti2,ti3);
2185
+ PM (cr4,cr2,tr1,tr4);
2186
+ PM (ci2,ci4,ti1,ti4);
2187
+ MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ci2,cr2);
2188
+ MULPM (CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),ci3,cr3);
2189
+ MULPM (CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),ci4,cr4);
2190
+ }
2191
+ }
2192
+
2193
+ template<typename T> void radb5(size_t ido, size_t l1,
2194
+ const T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
2195
+ const T0 * POCKETFFT_RESTRICT wa) const
2196
+ {
2197
+ constexpr T0 tr11= T0(0.3090169943749474241022934171828191L),
2198
+ ti11= T0(0.9510565162951535721164393333793821L),
2199
+ tr12= T0(-0.8090169943749474241022934171828191L),
2200
+ ti12= T0(0.5877852522924731291687059546390728L);
2201
+
2202
+ auto WA = [wa,ido](size_t x, size_t i) { return wa[i+x*(ido-1)]; };
2203
+ auto CC = [cc,ido](size_t a, size_t b, size_t c) -> const T&
2204
+ { return cc[a+ido*(b+5*c)]; };
2205
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
2206
+ { return ch[a+ido*(b+l1*c)]; };
2207
+
2208
+ for (size_t k=0; k<l1; k++)
2209
+ {
2210
+ T ti5=CC(0,2,k)+CC(0,2,k);
2211
+ T ti4=CC(0,4,k)+CC(0,4,k);
2212
+ T tr2=CC(ido-1,1,k)+CC(ido-1,1,k);
2213
+ T tr3=CC(ido-1,3,k)+CC(ido-1,3,k);
2214
+ CH(0,k,0)=CC(0,0,k)+tr2+tr3;
2215
+ T cr2=CC(0,0,k)+tr11*tr2+tr12*tr3;
2216
+ T cr3=CC(0,0,k)+tr12*tr2+tr11*tr3;
2217
+ T ci4, ci5;
2218
+ MULPM(ci5,ci4,ti5,ti4,ti11,ti12);
2219
+ PM(CH(0,k,4),CH(0,k,1),cr2,ci5);
2220
+ PM(CH(0,k,3),CH(0,k,2),cr3,ci4);
2221
+ }
2222
+ if (ido==1) return;
2223
+ for (size_t k=0; k<l1;++k)
2224
+ for (size_t i=2, ic=ido-2; i<ido; i+=2, ic-=2)
2225
+ {
2226
+ T tr2, tr3, tr4, tr5, ti2, ti3, ti4, ti5;
2227
+ PM(tr2,tr5,CC(i-1,2,k),CC(ic-1,1,k));
2228
+ PM(ti5,ti2,CC(i ,2,k),CC(ic ,1,k));
2229
+ PM(tr3,tr4,CC(i-1,4,k),CC(ic-1,3,k));
2230
+ PM(ti4,ti3,CC(i ,4,k),CC(ic ,3,k));
2231
+ CH(i-1,k,0)=CC(i-1,0,k)+tr2+tr3;
2232
+ CH(i ,k,0)=CC(i ,0,k)+ti2+ti3;
2233
+ T cr2=CC(i-1,0,k)+tr11*tr2+tr12*tr3;
2234
+ T ci2=CC(i ,0,k)+tr11*ti2+tr12*ti3;
2235
+ T cr3=CC(i-1,0,k)+tr12*tr2+tr11*tr3;
2236
+ T ci3=CC(i ,0,k)+tr12*ti2+tr11*ti3;
2237
+ T ci4, ci5, cr5, cr4;
2238
+ MULPM(cr5,cr4,tr5,tr4,ti11,ti12);
2239
+ MULPM(ci5,ci4,ti5,ti4,ti11,ti12);
2240
+ T dr2, dr3, dr4, dr5, di2, di3, di4, di5;
2241
+ PM(dr4,dr3,cr3,ci4);
2242
+ PM(di3,di4,ci3,cr4);
2243
+ PM(dr5,dr2,cr2,ci5);
2244
+ PM(di2,di5,ci2,cr5);
2245
+ MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2);
2246
+ MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3);
2247
+ MULPM(CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),di4,dr4);
2248
+ MULPM(CH(i,k,4),CH(i-1,k,4),WA(3,i-2),WA(3,i-1),di5,dr5);
2249
+ }
2250
+ }
2251
+
2252
+ template<typename T> void radbg(size_t ido, size_t ip, size_t l1,
2253
+ T * POCKETFFT_RESTRICT cc, T * POCKETFFT_RESTRICT ch,
2254
+ const T0 * POCKETFFT_RESTRICT wa, const T0 * POCKETFFT_RESTRICT csarr) const
2255
+ {
2256
+ const size_t cdim=ip;
2257
+ size_t ipph=(ip+1)/ 2;
2258
+ size_t idl1 = ido*l1;
2259
+
2260
+ auto CC = [cc,ido,cdim](size_t a, size_t b, size_t c) -> const T&
2261
+ { return cc[a+ido*(b+cdim*c)]; };
2262
+ auto CH = [ch,ido,l1](size_t a, size_t b, size_t c) -> T&
2263
+ { return ch[a+ido*(b+l1*c)]; };
2264
+ auto C1 = [cc,ido,l1](size_t a, size_t b, size_t c) -> const T&
2265
+ { return cc[a+ido*(b+l1*c)]; };
2266
+ auto C2 = [cc,idl1](size_t a, size_t b) -> T&
2267
+ { return cc[a+idl1*b]; };
2268
+ auto CH2 = [ch,idl1](size_t a, size_t b) -> T&
2269
+ { return ch[a+idl1*b]; };
2270
+
2271
+ for (size_t k=0; k<l1; ++k) // 102
2272
+ for (size_t i=0; i<ido; ++i) // 101
2273
+ CH(i,k,0) = CC(i,0,k);
2274
+ for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 108
2275
+ {
2276
+ size_t j2=2*j-1;
2277
+ for (size_t k=0; k<l1; ++k)
2278
+ {
2279
+ CH(0,k,j ) = 2*CC(ido-1,j2,k);
2280
+ CH(0,k,jc) = 2*CC(0,j2+1,k);
2281
+ }
2282
+ }
2283
+
2284
+ if (ido!=1)
2285
+ {
2286
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 111
2287
+ {
2288
+ size_t j2=2*j-1;
2289
+ for (size_t k=0; k<l1; ++k)
2290
+ for (size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 109
2291
+ {
2292
+ CH(i ,k,j ) = CC(i ,j2+1,k)+CC(ic ,j2,k);
2293
+ CH(i ,k,jc) = CC(i ,j2+1,k)-CC(ic ,j2,k);
2294
+ CH(i+1,k,j ) = CC(i+1,j2+1,k)-CC(ic+1,j2,k);
2295
+ CH(i+1,k,jc) = CC(i+1,j2+1,k)+CC(ic+1,j2,k);
2296
+ }
2297
+ }
2298
+ }
2299
+ for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc)
2300
+ {
2301
+ for (size_t ik=0; ik<idl1; ++ik)
2302
+ {
2303
+ C2(ik,l ) = CH2(ik,0)+csarr[2*l]*CH2(ik,1)+csarr[4*l]*CH2(ik,2);
2304
+ C2(ik,lc) = csarr[2*l+1]*CH2(ik,ip-1)+csarr[4*l+1]*CH2(ik,ip-2);
2305
+ }
2306
+ size_t iang=2*l;
2307
+ size_t j=3,jc=ip-3;
2308
+ for(; j<ipph-3; j+=4,jc-=4)
2309
+ {
2310
+ iang+=l; if(iang>ip) iang-=ip;
2311
+ T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1];
2312
+ iang+=l; if(iang>ip) iang-=ip;
2313
+ T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1];
2314
+ iang+=l; if(iang>ip) iang-=ip;
2315
+ T0 ar3=csarr[2*iang], ai3=csarr[2*iang+1];
2316
+ iang+=l; if(iang>ip) iang-=ip;
2317
+ T0 ar4=csarr[2*iang], ai4=csarr[2*iang+1];
2318
+ for (size_t ik=0; ik<idl1; ++ik)
2319
+ {
2320
+ C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1)
2321
+ +ar3*CH2(ik,j +2)+ar4*CH2(ik,j +3);
2322
+ C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1)
2323
+ +ai3*CH2(ik,jc-2)+ai4*CH2(ik,jc-3);
2324
+ }
2325
+ }
2326
+ for(; j<ipph-1; j+=2,jc-=2)
2327
+ {
2328
+ iang+=l; if(iang>ip) iang-=ip;
2329
+ T0 ar1=csarr[2*iang], ai1=csarr[2*iang+1];
2330
+ iang+=l; if(iang>ip) iang-=ip;
2331
+ T0 ar2=csarr[2*iang], ai2=csarr[2*iang+1];
2332
+ for (size_t ik=0; ik<idl1; ++ik)
2333
+ {
2334
+ C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1);
2335
+ C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1);
2336
+ }
2337
+ }
2338
+ for(; j<ipph; ++j,--jc)
2339
+ {
2340
+ iang+=l; if(iang>ip) iang-=ip;
2341
+ T0 war=csarr[2*iang], wai=csarr[2*iang+1];
2342
+ for (size_t ik=0; ik<idl1; ++ik)
2343
+ {
2344
+ C2(ik,l ) += war*CH2(ik,j );
2345
+ C2(ik,lc) += wai*CH2(ik,jc);
2346
+ }
2347
+ }
2348
+ }
2349
+ for (size_t j=1; j<ipph; ++j)
2350
+ for (size_t ik=0; ik<idl1; ++ik)
2351
+ CH2(ik,0) += CH2(ik,j);
2352
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 124
2353
+ for (size_t k=0; k<l1; ++k)
2354
+ PM(CH(0,k,jc),CH(0,k,j),C1(0,k,j),C1(0,k,jc));
2355
+
2356
+ if (ido==1) return;
2357
+
2358
+ for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 127
2359
+ for (size_t k=0; k<l1; ++k)
2360
+ for (size_t i=1; i<=ido-2; i+=2)
2361
+ {
2362
+ CH(i ,k,j ) = C1(i ,k,j)-C1(i+1,k,jc);
2363
+ CH(i ,k,jc) = C1(i ,k,j)+C1(i+1,k,jc);
2364
+ CH(i+1,k,j ) = C1(i+1,k,j)+C1(i ,k,jc);
2365
+ CH(i+1,k,jc) = C1(i+1,k,j)-C1(i ,k,jc);
2366
+ }
2367
+
2368
+ // All in CH
2369
+
2370
+ for (size_t j=1; j<ip; ++j)
2371
+ {
2372
+ size_t is = (j-1)*(ido-1);
2373
+ for (size_t k=0; k<l1; ++k)
2374
+ {
2375
+ size_t idij = is;
2376
+ for (size_t i=1; i<=ido-2; i+=2)
2377
+ {
2378
+ T t1=CH(i,k,j), t2=CH(i+1,k,j);
2379
+ CH(i ,k,j) = wa[idij]*t1-wa[idij+1]*t2;
2380
+ CH(i+1,k,j) = wa[idij]*t2+wa[idij+1]*t1;
2381
+ idij+=2;
2382
+ }
2383
+ }
2384
+ }
2385
+ }
2386
+
2387
+ template<typename T> void copy_and_norm(T *c, T *p1, T0 fct) const
2388
+ {
2389
+ if (p1!=c)
2390
+ {
2391
+ if (fct!=1.)
2392
+ for (size_t i=0; i<length; ++i)
2393
+ c[i] = fct*p1[i];
2394
+ else
2395
+ std::copy_n (p1, length, c);
2396
+ }
2397
+ else
2398
+ if (fct!=1.)
2399
+ for (size_t i=0; i<length; ++i)
2400
+ c[i] *= fct;
2401
+ }
2402
+
2403
+ public:
2404
+ template<typename T> void exec(T c[], T0 fct, bool r2hc) const
2405
+ {
2406
+ if (length==1) { c[0]*=fct; return; }
2407
+ size_t nf=fact.size();
2408
+ arr<T> ch(length);
2409
+ T *p1=c, *p2=ch.data();
2410
+
2411
+ if (r2hc)
2412
+ for(size_t k1=0, l1=length; k1<nf;++k1)
2413
+ {
2414
+ size_t k=nf-k1-1;
2415
+ size_t ip=fact[k].fct;
2416
+ size_t ido=length / l1;
2417
+ l1 /= ip;
2418
+ if(ip==4)
2419
+ radf4(ido, l1, p1, p2, fact[k].tw);
2420
+ else if(ip==2)
2421
+ radf2(ido, l1, p1, p2, fact[k].tw);
2422
+ else if(ip==3)
2423
+ radf3(ido, l1, p1, p2, fact[k].tw);
2424
+ else if(ip==5)
2425
+ radf5(ido, l1, p1, p2, fact[k].tw);
2426
+ else
2427
+ { radfg(ido, ip, l1, p1, p2, fact[k].tw, fact[k].tws); std::swap (p1,p2); }
2428
+ std::swap (p1,p2);
2429
+ }
2430
+ else
2431
+ for(size_t k=0, l1=1; k<nf; k++)
2432
+ {
2433
+ size_t ip = fact[k].fct,
2434
+ ido= length/(ip*l1);
2435
+ if(ip==4)
2436
+ radb4(ido, l1, p1, p2, fact[k].tw);
2437
+ else if(ip==2)
2438
+ radb2(ido, l1, p1, p2, fact[k].tw);
2439
+ else if(ip==3)
2440
+ radb3(ido, l1, p1, p2, fact[k].tw);
2441
+ else if(ip==5)
2442
+ radb5(ido, l1, p1, p2, fact[k].tw);
2443
+ else
2444
+ radbg(ido, ip, l1, p1, p2, fact[k].tw, fact[k].tws);
2445
+ std::swap (p1,p2);
2446
+ l1*=ip;
2447
+ }
2448
+
2449
+ copy_and_norm(c,p1,fct);
2450
+ }
2451
+
2452
+ private:
2453
+ void factorize()
2454
+ {
2455
+ size_t len=length;
2456
+ while ((len%4)==0)
2457
+ { add_factor(4); len>>=2; }
2458
+ if ((len%2)==0)
2459
+ {
2460
+ len>>=1;
2461
+ // factor 2 should be at the front of the factor list
2462
+ add_factor(2);
2463
+ std::swap(fact[0].fct, fact.back().fct);
2464
+ }
2465
+ for (size_t divisor=3; divisor*divisor<=len; divisor+=2)
2466
+ while ((len%divisor)==0)
2467
+ {
2468
+ add_factor(divisor);
2469
+ len/=divisor;
2470
+ }
2471
+ if (len>1) add_factor(len);
2472
+ }
2473
+
2474
+ size_t twsize() const
2475
+ {
2476
+ size_t twsz=0, l1=1;
2477
+ for (size_t k=0; k<fact.size(); ++k)
2478
+ {
2479
+ size_t ip=fact[k].fct, ido=length/(l1*ip);
2480
+ twsz+=(ip-1)*(ido-1);
2481
+ if (ip>5) twsz+=2*ip;
2482
+ l1*=ip;
2483
+ }
2484
+ return twsz;
2485
+ }
2486
+
2487
+ void comp_twiddle()
2488
+ {
2489
+ sincos_2pibyn<T0> twid(length);
2490
+ size_t l1=1;
2491
+ T0 *ptr=mem.data();
2492
+ for (size_t k=0; k<fact.size(); ++k)
2493
+ {
2494
+ size_t ip=fact[k].fct, ido=length/(l1*ip);
2495
+ if (k<fact.size()-1) // last factor doesn't need twiddles
2496
+ {
2497
+ fact[k].tw=ptr; ptr+=(ip-1)*(ido-1);
2498
+ for (size_t j=1; j<ip; ++j)
2499
+ for (size_t i=1; i<=(ido-1)/2; ++i)
2500
+ {
2501
+ fact[k].tw[(j-1)*(ido-1)+2*i-2] = twid[j*l1*i].r;
2502
+ fact[k].tw[(j-1)*(ido-1)+2*i-1] = twid[j*l1*i].i;
2503
+ }
2504
+ }
2505
+ if (ip>5) // special factors required by *g functions
2506
+ {
2507
+ fact[k].tws=ptr; ptr+=2*ip;
2508
+ fact[k].tws[0] = 1.;
2509
+ fact[k].tws[1] = 0.;
2510
+ for (size_t i=2, ic=2*ip-2; i<=ic; i+=2, ic-=2)
2511
+ {
2512
+ fact[k].tws[i ] = twid[i/2*(length/ip)].r;
2513
+ fact[k].tws[i+1] = twid[i/2*(length/ip)].i;
2514
+ fact[k].tws[ic] = twid[i/2*(length/ip)].r;
2515
+ fact[k].tws[ic+1] = -twid[i/2*(length/ip)].i;
2516
+ }
2517
+ }
2518
+ l1*=ip;
2519
+ }
2520
+ }
2521
+
2522
+ public:
2523
+ POCKETFFT_NOINLINE explicit rfftp(size_t length_)
2524
+ : length(length_)
2525
+ {
2526
+ if (length==0) throw std::runtime_error("zero-length FFT requested");
2527
+ if (length==1) return;
2528
+ factorize();
2529
+ mem.resize(twsize());
2530
+ comp_twiddle();
2531
+ }
2532
+ };
2533
+
2534
+ //
2535
+ // complex Bluestein transforms
2536
+ //
2537
+
2538
+ template<typename T0> class fftblue
2539
+ {
2540
+ private:
2541
+ size_t n, n2;
2542
+ cfftp<T0> plan;
2543
+ arr<cmplx<T0>> mem;
2544
+ cmplx<T0> *bk, *bkf;
2545
+
2546
+ template<bool fwd, typename T> void fft(cmplx<T> c[], T0 fct) const
2547
+ {
2548
+ arr<cmplx<T>> akf(n2);
2549
+
2550
+ /* initialize a_k and FFT it */
2551
+ for (size_t m=0; m<n; ++m)
2552
+ special_mul<fwd>(c[m],bk[m],akf[m]);
2553
+ auto zero = akf[0]*T0(0);
2554
+ for (size_t m=n; m<n2; ++m)
2555
+ akf[m]=zero;
2556
+
2557
+ plan.exec (akf.data(),1.,true);
2558
+
2559
+ /* do the convolution */
2560
+ akf[0] = akf[0].template special_mul<!fwd>(bkf[0]);
2561
+ for (size_t m=1; m<(n2+1)/2; ++m)
2562
+ {
2563
+ akf[m] = akf[m].template special_mul<!fwd>(bkf[m]);
2564
+ akf[n2-m] = akf[n2-m].template special_mul<!fwd>(bkf[m]);
2565
+ }
2566
+ if ((n2&1)==0)
2567
+ akf[n2/2] = akf[n2/2].template special_mul<!fwd>(bkf[n2/2]);
2568
+
2569
+ /* inverse FFT */
2570
+ plan.exec (akf.data(),1.,false);
2571
+
2572
+ /* multiply by b_k */
2573
+ for (size_t m=0; m<n; ++m)
2574
+ c[m] = akf[m].template special_mul<fwd>(bk[m])*fct;
2575
+ }
2576
+
2577
+ public:
2578
+ POCKETFFT_NOINLINE explicit fftblue(size_t length)
2579
+ : n(length), n2(util::good_size_cmplx(n*2-1)), plan(n2), mem(n+n2/2+1),
2580
+ bk(mem.data()), bkf(mem.data()+n)
2581
+ {
2582
+ /* initialize b_k */
2583
+ sincos_2pibyn<T0> tmp(2*n);
2584
+ bk[0].Set(1, 0);
2585
+
2586
+ size_t coeff=0;
2587
+ for (size_t m=1; m<n; ++m)
2588
+ {
2589
+ coeff+=2*m-1;
2590
+ if (coeff>=2*n) coeff-=2*n;
2591
+ bk[m] = tmp[coeff];
2592
+ }
2593
+
2594
+ /* initialize the zero-padded, Fourier transformed b_k. Add normalisation. */
2595
+ arr<cmplx<T0>> tbkf(n2);
2596
+ T0 xn2 = T0(1)/T0(n2);
2597
+ tbkf[0] = bk[0]*xn2;
2598
+ for (size_t m=1; m<n; ++m)
2599
+ tbkf[m] = tbkf[n2-m] = bk[m]*xn2;
2600
+ for (size_t m=n;m<=(n2-n);++m)
2601
+ tbkf[m].Set(0.,0.);
2602
+ plan.exec(tbkf.data(),1.,true);
2603
+ for (size_t i=0; i<n2/2+1; ++i)
2604
+ bkf[i] = tbkf[i];
2605
+ }
2606
+
2607
+ template<typename T> void exec(cmplx<T> c[], T0 fct, bool fwd) const
2608
+ { fwd ? fft<true>(c,fct) : fft<false>(c,fct); }
2609
+
2610
+ template<typename T> void exec_r(T c[], T0 fct, bool fwd)
2611
+ {
2612
+ arr<cmplx<T>> tmp(n);
2613
+ if (fwd)
2614
+ {
2615
+ auto zero = T0(0)*c[0];
2616
+ for (size_t m=0; m<n; ++m)
2617
+ tmp[m].Set(c[m], zero);
2618
+ fft<true>(tmp.data(),fct);
2619
+ c[0] = tmp[0].r;
2620
+ std::copy_n (&tmp[1].r, n-1, &c[1]);
2621
+ }
2622
+ else
2623
+ {
2624
+ tmp[0].Set(c[0],c[0]*0);
2625
+ std::copy_n (c+1, n-1, &tmp[1].r);
2626
+ if ((n&1)==0) tmp[n/2].i=T0(0)*c[0];
2627
+ for (size_t m=1; 2*m<n; ++m)
2628
+ tmp[n-m].Set(tmp[m].r, -tmp[m].i);
2629
+ fft<false>(tmp.data(),fct);
2630
+ for (size_t m=0; m<n; ++m)
2631
+ c[m] = tmp[m].r;
2632
+ }
2633
+ }
2634
+ };
2635
+
2636
+ //
2637
+ // flexible (FFTPACK/Bluestein) complex 1D transform
2638
+ //
2639
+
2640
+ template<typename T0> class pocketfft_c
2641
+ {
2642
+ private:
2643
+ std::unique_ptr<cfftp<T0>> packplan;
2644
+ std::unique_ptr<fftblue<T0>> blueplan;
2645
+ size_t len;
2646
+
2647
+ public:
2648
+ POCKETFFT_NOINLINE explicit pocketfft_c(size_t length)
2649
+ : len(length)
2650
+ {
2651
+ if (length==0) throw std::runtime_error("zero-length FFT requested");
2652
+ size_t tmp = (length<50) ? 0 : util::largest_prime_factor(length);
2653
+ if (tmp*tmp <= length)
2654
+ {
2655
+ packplan=std::unique_ptr<cfftp<T0>>(new cfftp<T0>(length));
2656
+ return;
2657
+ }
2658
+ double comp1 = util::cost_guess(length);
2659
+ double comp2 = 2*util::cost_guess(util::good_size_cmplx(2*length-1));
2660
+ comp2*=1.5; /* fudge factor that appears to give good overall performance */
2661
+ if (comp2<comp1) // use Bluestein
2662
+ blueplan=std::unique_ptr<fftblue<T0>>(new fftblue<T0>(length));
2663
+ else
2664
+ packplan=std::unique_ptr<cfftp<T0>>(new cfftp<T0>(length));
2665
+ }
2666
+
2667
+ template<typename T> POCKETFFT_NOINLINE void exec(cmplx<T> c[], T0 fct, bool fwd) const
2668
+ { packplan ? packplan->exec(c,fct,fwd) : blueplan->exec(c,fct,fwd); }
2669
+
2670
+ size_t length() const { return len; }
2671
+ };
2672
+
2673
+ //
2674
+ // flexible (FFTPACK/Bluestein) real-valued 1D transform
2675
+ //
2676
+
2677
+ template<typename T0> class pocketfft_r
2678
+ {
2679
+ private:
2680
+ std::unique_ptr<rfftp<T0>> packplan;
2681
+ std::unique_ptr<fftblue<T0>> blueplan;
2682
+ size_t len;
2683
+
2684
+ public:
2685
+ POCKETFFT_NOINLINE explicit pocketfft_r(size_t length)
2686
+ : len(length)
2687
+ {
2688
+ if (length==0) throw std::runtime_error("zero-length FFT requested");
2689
+ size_t tmp = (length<50) ? 0 : util::largest_prime_factor(length);
2690
+ if (tmp*tmp <= length)
2691
+ {
2692
+ packplan=std::unique_ptr<rfftp<T0>>(new rfftp<T0>(length));
2693
+ return;
2694
+ }
2695
+ double comp1 = 0.5*util::cost_guess(length);
2696
+ double comp2 = 2*util::cost_guess(util::good_size_cmplx(2*length-1));
2697
+ comp2*=1.5; /* fudge factor that appears to give good overall performance */
2698
+ if (comp2<comp1) // use Bluestein
2699
+ blueplan=std::unique_ptr<fftblue<T0>>(new fftblue<T0>(length));
2700
+ else
2701
+ packplan=std::unique_ptr<rfftp<T0>>(new rfftp<T0>(length));
2702
+ }
2703
+
2704
+ template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool fwd) const
2705
+ { packplan ? packplan->exec(c,fct,fwd) : blueplan->exec_r(c,fct,fwd); }
2706
+
2707
+ size_t length() const { return len; }
2708
+ };
2709
+
2710
+
2711
+ //
2712
+ // sine/cosine transforms
2713
+ //
2714
+
2715
+ template<typename T0> class T_dct1
2716
+ {
2717
+ private:
2718
+ pocketfft_r<T0> fftplan;
2719
+
2720
+ public:
2721
+ POCKETFFT_NOINLINE explicit T_dct1(size_t length)
2722
+ : fftplan(2*(length-1)) {}
2723
+
2724
+ template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool ortho,
2725
+ int /*type*/, bool /*cosine*/) const
2726
+ {
2727
+ constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
2728
+ size_t N=fftplan.length(), n=N/2+1;
2729
+ if (ortho)
2730
+ { c[0]*=sqrt2; c[n-1]*=sqrt2; }
2731
+ arr<T> tmp(N);
2732
+ tmp[0] = c[0];
2733
+ for (size_t i=1; i<n; ++i)
2734
+ tmp[i] = tmp[N-i] = c[i];
2735
+ fftplan.exec(tmp.data(), fct, true);
2736
+ c[0] = tmp[0];
2737
+ for (size_t i=1; i<n; ++i)
2738
+ c[i] = tmp[2*i-1];
2739
+ if (ortho)
2740
+ { c[0]*=sqrt2*T0(0.5); c[n-1]*=sqrt2*T0(0.5); }
2741
+ }
2742
+
2743
+ size_t length() const { return fftplan.length()/2+1; }
2744
+ };
2745
+
2746
+ template<typename T0> class T_dst1
2747
+ {
2748
+ private:
2749
+ pocketfft_r<T0> fftplan;
2750
+
2751
+ public:
2752
+ POCKETFFT_NOINLINE explicit T_dst1(size_t length)
2753
+ : fftplan(2*(length+1)) {}
2754
+
2755
+ template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct,
2756
+ bool /*ortho*/, int /*type*/, bool /*cosine*/) const
2757
+ {
2758
+ size_t N=fftplan.length(), n=N/2-1;
2759
+ arr<T> tmp(N);
2760
+ tmp[0] = tmp[n+1] = c[0]*0;
2761
+ for (size_t i=0; i<n; ++i)
2762
+ { tmp[i+1]=c[i]; tmp[N-1-i]=-c[i]; }
2763
+ fftplan.exec(tmp.data(), fct, true);
2764
+ for (size_t i=0; i<n; ++i)
2765
+ c[i] = -tmp[2*i+2];
2766
+ }
2767
+
2768
+ size_t length() const { return fftplan.length()/2-1; }
2769
+ };
2770
+
2771
+ template<typename T0> class T_dcst23
2772
+ {
2773
+ private:
2774
+ pocketfft_r<T0> fftplan;
2775
+ std::vector<T0> twiddle;
2776
+
2777
+ public:
2778
+ POCKETFFT_NOINLINE explicit T_dcst23(size_t length)
2779
+ : fftplan(length), twiddle(length)
2780
+ {
2781
+ sincos_2pibyn<T0> tw(4*length);
2782
+ for (size_t i=0; i<length; ++i)
2783
+ twiddle[i] = tw[i+1].r;
2784
+ }
2785
+
2786
+ template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct, bool ortho,
2787
+ int type, bool cosine) const
2788
+ {
2789
+ constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
2790
+ size_t N=length();
2791
+ size_t NS2 = (N+1)/2;
2792
+ if (type==2)
2793
+ {
2794
+ if (!cosine)
2795
+ for (size_t k=1; k<N; k+=2)
2796
+ c[k] = -c[k];
2797
+ c[0] *= 2;
2798
+ if ((N&1)==0) c[N-1]*=2;
2799
+ for (size_t k=1; k<N-1; k+=2)
2800
+ MPINPLACE(c[k+1], c[k]);
2801
+ fftplan.exec(c, fct, false);
2802
+ for (size_t k=1, kc=N-1; k<NS2; ++k, --kc)
2803
+ {
2804
+ T t1 = twiddle[k-1]*c[kc]+twiddle[kc-1]*c[k];
2805
+ T t2 = twiddle[k-1]*c[k]-twiddle[kc-1]*c[kc];
2806
+ c[k] = T0(0.5)*(t1+t2); c[kc]=T0(0.5)*(t1-t2);
2807
+ }
2808
+ if ((N&1)==0)
2809
+ c[NS2] *= twiddle[NS2-1];
2810
+ if (!cosine)
2811
+ for (size_t k=0, kc=N-1; k<kc; ++k, --kc)
2812
+ std::swap(c[k], c[kc]);
2813
+ if (ortho)
2814
+ cosine ? c[0]*=sqrt2*T0(0.5) : c[N-1]*=sqrt2*T0(0.5);
2815
+ }
2816
+ else
2817
+ {
2818
+ if (ortho)
2819
+ cosine ? c[0]*=sqrt2 : c[N-1]*=sqrt2;
2820
+ if (!cosine)
2821
+ for (size_t k=0, kc=N-1; k<NS2; ++k, --kc)
2822
+ std::swap(c[k], c[kc]);
2823
+ for (size_t k=1, kc=N-1; k<NS2; ++k, --kc)
2824
+ {
2825
+ T t1=c[k]+c[kc], t2=c[k]-c[kc];
2826
+ c[k] = twiddle[k-1]*t2+twiddle[kc-1]*t1;
2827
+ c[kc]= twiddle[k-1]*t1-twiddle[kc-1]*t2;
2828
+ }
2829
+ if ((N&1)==0)
2830
+ c[NS2] *= 2*twiddle[NS2-1];
2831
+ fftplan.exec(c, fct, true);
2832
+ for (size_t k=1; k<N-1; k+=2)
2833
+ MPINPLACE(c[k], c[k+1]);
2834
+ if (!cosine)
2835
+ for (size_t k=1; k<N; k+=2)
2836
+ c[k] = -c[k];
2837
+ }
2838
+ }
2839
+
2840
+ size_t length() const { return fftplan.length(); }
2841
+ };
2842
+
2843
+ template<typename T0> class T_dcst4
2844
+ {
2845
+ private:
2846
+ size_t N;
2847
+ std::unique_ptr<pocketfft_c<T0>> fft;
2848
+ std::unique_ptr<pocketfft_r<T0>> rfft;
2849
+ arr<cmplx<T0>> C2;
2850
+
2851
+ public:
2852
+ POCKETFFT_NOINLINE explicit T_dcst4(size_t length)
2853
+ : N(length),
2854
+ fft((N&1) ? nullptr : new pocketfft_c<T0>(N/2)),
2855
+ rfft((N&1)? new pocketfft_r<T0>(N) : nullptr),
2856
+ C2((N&1) ? 0 : N/2)
2857
+ {
2858
+ if ((N&1)==0)
2859
+ {
2860
+ sincos_2pibyn<T0> tw(16*N);
2861
+ for (size_t i=0; i<N/2; ++i)
2862
+ C2[i] = conj(tw[8*i+1]);
2863
+ }
2864
+ }
2865
+
2866
+ template<typename T> POCKETFFT_NOINLINE void exec(T c[], T0 fct,
2867
+ bool /*ortho*/, int /*type*/, bool cosine) const
2868
+ {
2869
+ size_t n2 = N/2;
2870
+ if (!cosine)
2871
+ for (size_t k=0, kc=N-1; k<n2; ++k, --kc)
2872
+ std::swap(c[k], c[kc]);
2873
+ if (N&1)
2874
+ {
2875
+ // The following code is derived from the FFTW3 function apply_re11()
2876
+ // and is released under the 3-clause BSD license with friendly
2877
+ // permission of Matteo Frigo and Steven G. Johnson.
2878
+
2879
+ arr<T> y(N);
2880
+ {
2881
+ size_t i=0, m=n2;
2882
+ for (; m<N; ++i, m+=4)
2883
+ y[i] = c[m];
2884
+ for (; m<2*N; ++i, m+=4)
2885
+ y[i] = -c[2*N-m-1];
2886
+ for (; m<3*N; ++i, m+=4)
2887
+ y[i] = -c[m-2*N];
2888
+ for (; m<4*N; ++i, m+=4)
2889
+ y[i] = c[4*N-m-1];
2890
+ for (; i<N; ++i, m+=4)
2891
+ y[i] = c[m-4*N];
2892
+ }
2893
+ rfft->exec(y.data(), fct, true);
2894
+ {
2895
+ auto SGN = [](size_t i)
2896
+ {
2897
+ constexpr T0 sqrt2=T0(1.414213562373095048801688724209698L);
2898
+ return (i&2) ? -sqrt2 : sqrt2;
2899
+ };
2900
+ c[n2] = y[0]*SGN(n2+1);
2901
+ size_t i=0, i1=1, k=1;
2902
+ for (; k<n2; ++i, ++i1, k+=2)
2903
+ {
2904
+ c[i ] = y[2*k-1]*SGN(i1) + y[2*k ]*SGN(i);
2905
+ c[N -i1] = y[2*k-1]*SGN(N -i) - y[2*k ]*SGN(N -i1);
2906
+ c[n2-i1] = y[2*k+1]*SGN(n2-i) - y[2*k+2]*SGN(n2-i1);
2907
+ c[n2+i1] = y[2*k+1]*SGN(n2+i+2) + y[2*k+2]*SGN(n2+i1);
2908
+ }
2909
+ if (k == n2)
2910
+ {
2911
+ c[i ] = y[2*k-1]*SGN(i+1) + y[2*k]*SGN(i);
2912
+ c[N-i1] = y[2*k-1]*SGN(i+2) + y[2*k]*SGN(i1);
2913
+ }
2914
+ }
2915
+
2916
+ // FFTW-derived code ends here
2917
+ }
2918
+ else
2919
+ {
2920
+ // even length algorithm from
2921
+ // https://www.appletonaudio.com/blog/2013/derivation-of-fast-dct-4-algorithm-based-on-dft/
2922
+ arr<cmplx<T>> y(n2);
2923
+ for(size_t i=0; i<n2; ++i)
2924
+ {
2925
+ y[i].Set(c[2*i],c[N-1-2*i]);
2926
+ y[i] *= C2[i];
2927
+ }
2928
+ fft->exec(y.data(), fct, true);
2929
+ for(size_t i=0, ic=n2-1; i<n2; ++i, --ic)
2930
+ {
2931
+ c[2*i ] = 2*(y[i ].r*C2[i ].r-y[i ].i*C2[i ].i);
2932
+ c[2*i+1] = -2*(y[ic].i*C2[ic].r+y[ic].r*C2[ic].i);
2933
+ }
2934
+ }
2935
+ if (!cosine)
2936
+ for (size_t k=1; k<N; k+=2)
2937
+ c[k] = -c[k];
2938
+ }
2939
+
2940
+ size_t length() const { return N; }
2941
+ };
2942
+
2943
+
2944
+ //
2945
+ // multi-D infrastructure
2946
+ //
2947
+
2948
+ template<typename T> std::shared_ptr<T> get_plan(size_t length)
2949
+ {
2950
+ #if POCKETFFT_CACHE_SIZE==0
2951
+ return std::make_shared<T>(length);
2952
+ #else
2953
+ constexpr size_t nmax=POCKETFFT_CACHE_SIZE;
2954
+ static std::array<std::shared_ptr<T>, nmax> cache;
2955
+ static std::array<size_t, nmax> last_access{{0}};
2956
+ static size_t access_counter = 0;
2957
+ static std::mutex mut;
2958
+
2959
+ auto find_in_cache = [&]() -> std::shared_ptr<T>
2960
+ {
2961
+ for (size_t i=0; i<nmax; ++i)
2962
+ if (cache[i] && (cache[i]->length()==length))
2963
+ {
2964
+ // no need to update if this is already the most recent entry
2965
+ if (last_access[i]!=access_counter)
2966
+ {
2967
+ last_access[i] = ++access_counter;
2968
+ // Guard against overflow
2969
+ if (access_counter == 0)
2970
+ last_access.fill(0);
2971
+ }
2972
+ return cache[i];
2973
+ }
2974
+
2975
+ return nullptr;
2976
+ };
2977
+
2978
+ {
2979
+ std::lock_guard<std::mutex> lock(mut);
2980
+ auto p = find_in_cache();
2981
+ if (p) return p;
2982
+ }
2983
+ auto plan = std::make_shared<T>(length);
2984
+ {
2985
+ std::lock_guard<std::mutex> lock(mut);
2986
+ auto p = find_in_cache();
2987
+ if (p) return p;
2988
+
2989
+ size_t lru = 0;
2990
+ for (size_t i=1; i<nmax; ++i)
2991
+ if (last_access[i] < last_access[lru])
2992
+ lru = i;
2993
+
2994
+ cache[lru] = plan;
2995
+ last_access[lru] = ++access_counter;
2996
+ }
2997
+ return plan;
2998
+ #endif
2999
+ }
3000
+
3001
+ class arr_info
3002
+ {
3003
+ protected:
3004
+ shape_t shp;
3005
+ stride_t str;
3006
+
3007
+ public:
3008
+ arr_info(shape_t shape_, stride_t stride_)
3009
+ : shp(std::move(shape_)), str(std::move(stride_)) {}
3010
+ size_t ndim() const { return shp.size(); }
3011
+ size_t size() const { return util::prod(shp); }
3012
+ const shape_t &shape() const { return shp; }
3013
+ size_t shape(size_t i) const { return shp[i]; }
3014
+ const stride_t &stride() const { return str; }
3015
+ const ptrdiff_t &stride(size_t i) const { return str[i]; }
3016
+ };
3017
+
3018
+ template<typename T> class cndarr: public arr_info
3019
+ {
3020
+ protected:
3021
+ const char *d;
3022
+
3023
+ public:
3024
+ cndarr(const void *data_, const shape_t &shape_, const stride_t &stride_)
3025
+ : arr_info(shape_, stride_),
3026
+ d(reinterpret_cast<const char *>(data_)) {}
3027
+ const T &operator[](ptrdiff_t ofs) const
3028
+ { return *reinterpret_cast<const T *>(d+ofs); }
3029
+ };
3030
+
3031
+ template<typename T> class ndarr: public cndarr<T>
3032
+ {
3033
+ public:
3034
+ ndarr(void *data_, const shape_t &shape_, const stride_t &stride_)
3035
+ : cndarr<T>::cndarr(const_cast<const void *>(data_), shape_, stride_)
3036
+ {}
3037
+ T &operator[](ptrdiff_t ofs)
3038
+ { return *reinterpret_cast<T *>(const_cast<char *>(cndarr<T>::d+ofs)); }
3039
+ };
3040
+
3041
+ template<size_t N> class multi_iter
3042
+ {
3043
+ private:
3044
+ shape_t pos;
3045
+ const arr_info &iarr, &oarr;
3046
+ ptrdiff_t p_ii, p_i[N], str_i, p_oi, p_o[N], str_o;
3047
+ size_t idim, rem;
3048
+
3049
+ void advance_i()
3050
+ {
3051
+ for (int i_=int(pos.size())-1; i_>=0; --i_)
3052
+ {
3053
+ auto i = size_t(i_);
3054
+ if (i==idim) continue;
3055
+ p_ii += iarr.stride(i);
3056
+ p_oi += oarr.stride(i);
3057
+ if (++pos[i] < iarr.shape(i))
3058
+ return;
3059
+ pos[i] = 0;
3060
+ p_ii -= ptrdiff_t(iarr.shape(i))*iarr.stride(i);
3061
+ p_oi -= ptrdiff_t(oarr.shape(i))*oarr.stride(i);
3062
+ }
3063
+ }
3064
+
3065
+ public:
3066
+ multi_iter(const arr_info &iarr_, const arr_info &oarr_, size_t idim_)
3067
+ : pos(iarr_.ndim(), 0), iarr(iarr_), oarr(oarr_), p_ii(0),
3068
+ str_i(iarr.stride(idim_)), p_oi(0), str_o(oarr.stride(idim_)),
3069
+ idim(idim_), rem(iarr.size()/iarr.shape(idim))
3070
+ {
3071
+ auto nshares = threading::num_threads();
3072
+ if (nshares==1) return;
3073
+ if (nshares==0) throw std::runtime_error("can't run with zero threads");
3074
+ auto myshare = threading::thread_id();
3075
+ if (myshare>=nshares) throw std::runtime_error("impossible share requested");
3076
+ size_t nbase = rem/nshares;
3077
+ size_t additional = rem%nshares;
3078
+ size_t lo = myshare*nbase + ((myshare<additional) ? myshare : additional);
3079
+ size_t hi = lo+nbase+(myshare<additional);
3080
+ size_t todo = hi-lo;
3081
+
3082
+ size_t chunk = rem;
3083
+ for (size_t i=0; i<pos.size(); ++i)
3084
+ {
3085
+ if (i==idim) continue;
3086
+ chunk /= iarr.shape(i);
3087
+ size_t n_advance = lo/chunk;
3088
+ pos[i] += n_advance;
3089
+ p_ii += ptrdiff_t(n_advance)*iarr.stride(i);
3090
+ p_oi += ptrdiff_t(n_advance)*oarr.stride(i);
3091
+ lo -= n_advance*chunk;
3092
+ }
3093
+ rem = todo;
3094
+ }
3095
+ void advance(size_t n)
3096
+ {
3097
+ if (rem<n) throw std::runtime_error("underrun");
3098
+ for (size_t i=0; i<n; ++i)
3099
+ {
3100
+ p_i[i] = p_ii;
3101
+ p_o[i] = p_oi;
3102
+ advance_i();
3103
+ }
3104
+ rem -= n;
3105
+ }
3106
+ ptrdiff_t iofs(size_t i) const { return p_i[0] + ptrdiff_t(i)*str_i; }
3107
+ ptrdiff_t iofs(size_t j, size_t i) const { return p_i[j] + ptrdiff_t(i)*str_i; }
3108
+ ptrdiff_t oofs(size_t i) const { return p_o[0] + ptrdiff_t(i)*str_o; }
3109
+ ptrdiff_t oofs(size_t j, size_t i) const { return p_o[j] + ptrdiff_t(i)*str_o; }
3110
+ size_t length_in() const { return iarr.shape(idim); }
3111
+ size_t length_out() const { return oarr.shape(idim); }
3112
+ ptrdiff_t stride_in() const { return str_i; }
3113
+ ptrdiff_t stride_out() const { return str_o; }
3114
+ size_t remaining() const { return rem; }
3115
+ };
3116
+
3117
+ class simple_iter
3118
+ {
3119
+ private:
3120
+ shape_t pos;
3121
+ const arr_info &arr;
3122
+ ptrdiff_t p;
3123
+ size_t rem;
3124
+
3125
+ public:
3126
+ explicit simple_iter(const arr_info &arr_)
3127
+ : pos(arr_.ndim(), 0), arr(arr_), p(0), rem(arr_.size()) {}
3128
+ void advance()
3129
+ {
3130
+ --rem;
3131
+ for (int i_=int(pos.size())-1; i_>=0; --i_)
3132
+ {
3133
+ auto i = size_t(i_);
3134
+ p += arr.stride(i);
3135
+ if (++pos[i] < arr.shape(i))
3136
+ return;
3137
+ pos[i] = 0;
3138
+ p -= ptrdiff_t(arr.shape(i))*arr.stride(i);
3139
+ }
3140
+ }
3141
+ ptrdiff_t ofs() const { return p; }
3142
+ size_t remaining() const { return rem; }
3143
+ };
3144
+
3145
+ class rev_iter
3146
+ {
3147
+ private:
3148
+ shape_t pos;
3149
+ const arr_info &arr;
3150
+ std::vector<char> rev_axis;
3151
+ std::vector<char> rev_jump;
3152
+ size_t last_axis, last_size;
3153
+ shape_t shp;
3154
+ ptrdiff_t p, rp;
3155
+ size_t rem;
3156
+
3157
+ public:
3158
+ rev_iter(const arr_info &arr_, const shape_t &axes)
3159
+ : pos(arr_.ndim(), 0), arr(arr_), rev_axis(arr_.ndim(), 0),
3160
+ rev_jump(arr_.ndim(), 1), p(0), rp(0)
3161
+ {
3162
+ for (auto ax: axes)
3163
+ rev_axis[ax]=1;
3164
+ last_axis = axes.back();
3165
+ last_size = arr.shape(last_axis)/2 + 1;
3166
+ shp = arr.shape();
3167
+ shp[last_axis] = last_size;
3168
+ rem=1;
3169
+ for (auto i: shp)
3170
+ rem *= i;
3171
+ }
3172
+ void advance()
3173
+ {
3174
+ --rem;
3175
+ for (int i_=int(pos.size())-1; i_>=0; --i_)
3176
+ {
3177
+ auto i = size_t(i_);
3178
+ p += arr.stride(i);
3179
+ if (!rev_axis[i])
3180
+ rp += arr.stride(i);
3181
+ else
3182
+ {
3183
+ rp -= arr.stride(i);
3184
+ if (rev_jump[i])
3185
+ {
3186
+ rp += ptrdiff_t(arr.shape(i))*arr.stride(i);
3187
+ rev_jump[i] = 0;
3188
+ }
3189
+ }
3190
+ if (++pos[i] < shp[i])
3191
+ return;
3192
+ pos[i] = 0;
3193
+ p -= ptrdiff_t(shp[i])*arr.stride(i);
3194
+ if (rev_axis[i])
3195
+ {
3196
+ rp -= ptrdiff_t(arr.shape(i)-shp[i])*arr.stride(i);
3197
+ rev_jump[i] = 1;
3198
+ }
3199
+ else
3200
+ rp -= ptrdiff_t(shp[i])*arr.stride(i);
3201
+ }
3202
+ }
3203
+ ptrdiff_t ofs() const { return p; }
3204
+ ptrdiff_t rev_ofs() const { return rp; }
3205
+ size_t remaining() const { return rem; }
3206
+ };
3207
+
3208
+ template<typename T> struct VTYPE {};
3209
+ template <typename T> using vtype_t = typename VTYPE<T>::type;
3210
+
3211
+ #ifndef POCKETFFT_NO_VECTORS
3212
+ template<> struct VTYPE<float>
3213
+ {
3214
+ using type = float __attribute__ ((vector_size (VLEN<float>::val*sizeof(float))));
3215
+ };
3216
+ template<> struct VTYPE<double>
3217
+ {
3218
+ using type = double __attribute__ ((vector_size (VLEN<double>::val*sizeof(double))));
3219
+ };
3220
+ template<> struct VTYPE<long double>
3221
+ {
3222
+ using type = long double __attribute__ ((vector_size (VLEN<long double>::val*sizeof(long double))));
3223
+ };
3224
+ #endif
3225
+
3226
+ template<typename T> arr<char> alloc_tmp(const shape_t &shape,
3227
+ size_t axsize, size_t elemsize)
3228
+ {
3229
+ auto othersize = util::prod(shape)/axsize;
3230
+ auto tmpsize = axsize*((othersize>=VLEN<T>::val) ? VLEN<T>::val : 1);
3231
+ return arr<char>(tmpsize*elemsize);
3232
+ }
3233
+ template<typename T> arr<char> alloc_tmp(const shape_t &shape,
3234
+ const shape_t &axes, size_t elemsize)
3235
+ {
3236
+ size_t fullsize=util::prod(shape);
3237
+ size_t tmpsize=0;
3238
+ for (size_t i=0; i<axes.size(); ++i)
3239
+ {
3240
+ auto axsize = shape[axes[i]];
3241
+ auto othersize = fullsize/axsize;
3242
+ auto sz = axsize*((othersize>=VLEN<T>::val) ? VLEN<T>::val : 1);
3243
+ if (sz>tmpsize) tmpsize=sz;
3244
+ }
3245
+ return arr<char>(tmpsize*elemsize);
3246
+ }
3247
+
3248
+ template <typename T, size_t vlen> void copy_input(const multi_iter<vlen> &it,
3249
+ const cndarr<cmplx<T>> &src, cmplx<vtype_t<T>> *POCKETFFT_RESTRICT dst)
3250
+ {
3251
+ for (size_t i=0; i<it.length_in(); ++i)
3252
+ for (size_t j=0; j<vlen; ++j)
3253
+ {
3254
+ dst[i].r[j] = src[it.iofs(j,i)].r;
3255
+ dst[i].i[j] = src[it.iofs(j,i)].i;
3256
+ }
3257
+ }
3258
+
3259
+ template <typename T, size_t vlen> void copy_input(const multi_iter<vlen> &it,
3260
+ const cndarr<T> &src, vtype_t<T> *POCKETFFT_RESTRICT dst)
3261
+ {
3262
+ for (size_t i=0; i<it.length_in(); ++i)
3263
+ for (size_t j=0; j<vlen; ++j)
3264
+ dst[i][j] = src[it.iofs(j,i)];
3265
+ }
3266
+
3267
+ template <typename T, size_t vlen> void copy_input(const multi_iter<vlen> &it,
3268
+ const cndarr<T> &src, T *POCKETFFT_RESTRICT dst)
3269
+ {
3270
+ if (dst == &src[it.iofs(0)]) return; // in-place
3271
+ for (size_t i=0; i<it.length_in(); ++i)
3272
+ dst[i] = src[it.iofs(i)];
3273
+ }
3274
+
3275
+ template<typename T, size_t vlen> void copy_output(const multi_iter<vlen> &it,
3276
+ const cmplx<vtype_t<T>> *POCKETFFT_RESTRICT src, ndarr<cmplx<T>> &dst)
3277
+ {
3278
+ for (size_t i=0; i<it.length_out(); ++i)
3279
+ for (size_t j=0; j<vlen; ++j)
3280
+ dst[it.oofs(j,i)].Set(src[i].r[j],src[i].i[j]);
3281
+ }
3282
+
3283
+ template<typename T, size_t vlen> void copy_output(const multi_iter<vlen> &it,
3284
+ const vtype_t<T> *POCKETFFT_RESTRICT src, ndarr<T> &dst)
3285
+ {
3286
+ for (size_t i=0; i<it.length_out(); ++i)
3287
+ for (size_t j=0; j<vlen; ++j)
3288
+ dst[it.oofs(j,i)] = src[i][j];
3289
+ }
3290
+
3291
+ template<typename T, size_t vlen> void copy_output(const multi_iter<vlen> &it,
3292
+ const T *POCKETFFT_RESTRICT src, ndarr<T> &dst)
3293
+ {
3294
+ if (src == &dst[it.oofs(0)]) return; // in-place
3295
+ for (size_t i=0; i<it.length_out(); ++i)
3296
+ dst[it.oofs(i)] = src[i];
3297
+ }
3298
+
3299
+ template <typename T> struct add_vec { using type = vtype_t<T>; };
3300
+ template <typename T> struct add_vec<cmplx<T>>
3301
+ { using type = cmplx<vtype_t<T>>; };
3302
+ template <typename T> using add_vec_t = typename add_vec<T>::type;
3303
+
3304
+ template<typename Tplan, typename T, typename T0, typename Exec>
3305
+ POCKETFFT_NOINLINE void general_nd(const cndarr<T> &in, ndarr<T> &out,
3306
+ const shape_t &axes, T0 fct, size_t nthreads, const Exec & exec,
3307
+ const bool allow_inplace=true)
3308
+ {
3309
+ std::shared_ptr<Tplan> plan;
3310
+
3311
+ for (size_t iax=0; iax<axes.size(); ++iax)
3312
+ {
3313
+ size_t len=in.shape(axes[iax]);
3314
+ if ((!plan) || (len!=plan->length()))
3315
+ plan = get_plan<Tplan>(len);
3316
+
3317
+ threading::thread_map(
3318
+ util::thread_count(nthreads, in.shape(), axes[iax], VLEN<T>::val),
3319
+ [&] {
3320
+ constexpr auto vlen = VLEN<T0>::val;
3321
+ auto storage = alloc_tmp<T0>(in.shape(), len, sizeof(T));
3322
+ const auto &tin(iax==0? in : out);
3323
+ multi_iter<vlen> it(tin, out, axes[iax]);
3324
+ #ifndef POCKETFFT_NO_VECTORS
3325
+ if (vlen>1)
3326
+ while (it.remaining()>=vlen)
3327
+ {
3328
+ it.advance(vlen);
3329
+ auto tdatav = reinterpret_cast<add_vec_t<T> *>(storage.data());
3330
+ exec(it, tin, out, tdatav, *plan, fct);
3331
+ }
3332
+ #endif
3333
+ while (it.remaining()>0)
3334
+ {
3335
+ it.advance(1);
3336
+ auto buf = allow_inplace && it.stride_out() == sizeof(T) ?
3337
+ &out[it.oofs(0)] : reinterpret_cast<T *>(storage.data());
3338
+ exec(it, tin, out, buf, *plan, fct);
3339
+ }
3340
+ }); // end of parallel region
3341
+ fct = T0(1); // factor has been applied, use 1 for remaining axes
3342
+ }
3343
+ }
3344
+
3345
+ struct ExecC2C
3346
+ {
3347
+ bool forward;
3348
+
3349
+ template <typename T0, typename T, size_t vlen> void operator () (
3350
+ const multi_iter<vlen> &it, const cndarr<cmplx<T0>> &in,
3351
+ ndarr<cmplx<T0>> &out, T * buf, const pocketfft_c<T0> &plan, T0 fct) const
3352
+ {
3353
+ copy_input(it, in, buf);
3354
+ plan.exec(buf, fct, forward);
3355
+ copy_output(it, buf, out);
3356
+ }
3357
+ };
3358
+
3359
+ template <typename T, size_t vlen> void copy_hartley(const multi_iter<vlen> &it,
3360
+ const vtype_t<T> *POCKETFFT_RESTRICT src, ndarr<T> &dst)
3361
+ {
3362
+ for (size_t j=0; j<vlen; ++j)
3363
+ dst[it.oofs(j,0)] = src[0][j];
3364
+ size_t i=1, i1=1, i2=it.length_out()-1;
3365
+ for (i=1; i<it.length_out()-1; i+=2, ++i1, --i2)
3366
+ for (size_t j=0; j<vlen; ++j)
3367
+ {
3368
+ dst[it.oofs(j,i1)] = src[i][j]+src[i+1][j];
3369
+ dst[it.oofs(j,i2)] = src[i][j]-src[i+1][j];
3370
+ }
3371
+ if (i<it.length_out())
3372
+ for (size_t j=0; j<vlen; ++j)
3373
+ dst[it.oofs(j,i1)] = src[i][j];
3374
+ }
3375
+
3376
+ template <typename T, size_t vlen> void copy_hartley(const multi_iter<vlen> &it,
3377
+ const T *POCKETFFT_RESTRICT src, ndarr<T> &dst)
3378
+ {
3379
+ dst[it.oofs(0)] = src[0];
3380
+ size_t i=1, i1=1, i2=it.length_out()-1;
3381
+ for (i=1; i<it.length_out()-1; i+=2, ++i1, --i2)
3382
+ {
3383
+ dst[it.oofs(i1)] = src[i]+src[i+1];
3384
+ dst[it.oofs(i2)] = src[i]-src[i+1];
3385
+ }
3386
+ if (i<it.length_out())
3387
+ dst[it.oofs(i1)] = src[i];
3388
+ }
3389
+
3390
+ struct ExecHartley
3391
+ {
3392
+ template <typename T0, typename T, size_t vlen> void operator () (
3393
+ const multi_iter<vlen> &it, const cndarr<T0> &in, ndarr<T0> &out,
3394
+ T * buf, const pocketfft_r<T0> &plan, T0 fct) const
3395
+ {
3396
+ copy_input(it, in, buf);
3397
+ plan.exec(buf, fct, true);
3398
+ copy_hartley(it, buf, out);
3399
+ }
3400
+ };
3401
+
3402
+ struct ExecDcst
3403
+ {
3404
+ bool ortho;
3405
+ int type;
3406
+ bool cosine;
3407
+
3408
+ template <typename T0, typename T, typename Tplan, size_t vlen>
3409
+ void operator () (const multi_iter<vlen> &it, const cndarr<T0> &in,
3410
+ ndarr<T0> &out, T * buf, const Tplan &plan, T0 fct) const
3411
+ {
3412
+ copy_input(it, in, buf);
3413
+ plan.exec(buf, fct, ortho, type, cosine);
3414
+ copy_output(it, buf, out);
3415
+ }
3416
+ };
3417
+
3418
+ template<typename T> POCKETFFT_NOINLINE void general_r2c(
3419
+ const cndarr<T> &in, ndarr<cmplx<T>> &out, size_t axis, bool forward, T fct,
3420
+ size_t nthreads)
3421
+ {
3422
+ auto plan = get_plan<pocketfft_r<T>>(in.shape(axis));
3423
+ size_t len=in.shape(axis);
3424
+ threading::thread_map(
3425
+ util::thread_count(nthreads, in.shape(), axis, VLEN<T>::val),
3426
+ [&] {
3427
+ constexpr auto vlen = VLEN<T>::val;
3428
+ auto storage = alloc_tmp<T>(in.shape(), len, sizeof(T));
3429
+ multi_iter<vlen> it(in, out, axis);
3430
+ #ifndef POCKETFFT_NO_VECTORS
3431
+ if (vlen>1)
3432
+ while (it.remaining()>=vlen)
3433
+ {
3434
+ it.advance(vlen);
3435
+ auto tdatav = reinterpret_cast<vtype_t<T> *>(storage.data());
3436
+ copy_input(it, in, tdatav);
3437
+ plan->exec(tdatav, fct, true);
3438
+ for (size_t j=0; j<vlen; ++j)
3439
+ out[it.oofs(j,0)].Set(tdatav[0][j]);
3440
+ size_t i=1, ii=1;
3441
+ if (forward)
3442
+ for (; i<len-1; i+=2, ++ii)
3443
+ for (size_t j=0; j<vlen; ++j)
3444
+ out[it.oofs(j,ii)].Set(tdatav[i][j], tdatav[i+1][j]);
3445
+ else
3446
+ for (; i<len-1; i+=2, ++ii)
3447
+ for (size_t j=0; j<vlen; ++j)
3448
+ out[it.oofs(j,ii)].Set(tdatav[i][j], -tdatav[i+1][j]);
3449
+ if (i<len)
3450
+ for (size_t j=0; j<vlen; ++j)
3451
+ out[it.oofs(j,ii)].Set(tdatav[i][j]);
3452
+ }
3453
+ #endif
3454
+ while (it.remaining()>0)
3455
+ {
3456
+ it.advance(1);
3457
+ auto tdata = reinterpret_cast<T *>(storage.data());
3458
+ copy_input(it, in, tdata);
3459
+ plan->exec(tdata, fct, true);
3460
+ out[it.oofs(0)].Set(tdata[0]);
3461
+ size_t i=1, ii=1;
3462
+ if (forward)
3463
+ for (; i<len-1; i+=2, ++ii)
3464
+ out[it.oofs(ii)].Set(tdata[i], tdata[i+1]);
3465
+ else
3466
+ for (; i<len-1; i+=2, ++ii)
3467
+ out[it.oofs(ii)].Set(tdata[i], -tdata[i+1]);
3468
+ if (i<len)
3469
+ out[it.oofs(ii)].Set(tdata[i]);
3470
+ }
3471
+ }); // end of parallel region
3472
+ }
3473
+ template<typename T> POCKETFFT_NOINLINE void general_c2r(
3474
+ const cndarr<cmplx<T>> &in, ndarr<T> &out, size_t axis, bool forward, T fct,
3475
+ size_t nthreads)
3476
+ {
3477
+ auto plan = get_plan<pocketfft_r<T>>(out.shape(axis));
3478
+ size_t len=out.shape(axis);
3479
+ threading::thread_map(
3480
+ util::thread_count(nthreads, in.shape(), axis, VLEN<T>::val),
3481
+ [&] {
3482
+ constexpr auto vlen = VLEN<T>::val;
3483
+ auto storage = alloc_tmp<T>(out.shape(), len, sizeof(T));
3484
+ multi_iter<vlen> it(in, out, axis);
3485
+ #ifndef POCKETFFT_NO_VECTORS
3486
+ if (vlen>1)
3487
+ while (it.remaining()>=vlen)
3488
+ {
3489
+ it.advance(vlen);
3490
+ auto tdatav = reinterpret_cast<vtype_t<T> *>(storage.data());
3491
+ for (size_t j=0; j<vlen; ++j)
3492
+ tdatav[0][j]=in[it.iofs(j,0)].r;
3493
+ {
3494
+ size_t i=1, ii=1;
3495
+ if (forward)
3496
+ for (; i<len-1; i+=2, ++ii)
3497
+ for (size_t j=0; j<vlen; ++j)
3498
+ {
3499
+ tdatav[i ][j] = in[it.iofs(j,ii)].r;
3500
+ tdatav[i+1][j] = -in[it.iofs(j,ii)].i;
3501
+ }
3502
+ else
3503
+ for (; i<len-1; i+=2, ++ii)
3504
+ for (size_t j=0; j<vlen; ++j)
3505
+ {
3506
+ tdatav[i ][j] = in[it.iofs(j,ii)].r;
3507
+ tdatav[i+1][j] = in[it.iofs(j,ii)].i;
3508
+ }
3509
+ if (i<len)
3510
+ for (size_t j=0; j<vlen; ++j)
3511
+ tdatav[i][j] = in[it.iofs(j,ii)].r;
3512
+ }
3513
+ plan->exec(tdatav, fct, false);
3514
+ copy_output(it, tdatav, out);
3515
+ }
3516
+ #endif
3517
+ while (it.remaining()>0)
3518
+ {
3519
+ it.advance(1);
3520
+ auto tdata = reinterpret_cast<T *>(storage.data());
3521
+ tdata[0]=in[it.iofs(0)].r;
3522
+ {
3523
+ size_t i=1, ii=1;
3524
+ if (forward)
3525
+ for (; i<len-1; i+=2, ++ii)
3526
+ {
3527
+ tdata[i ] = in[it.iofs(ii)].r;
3528
+ tdata[i+1] = -in[it.iofs(ii)].i;
3529
+ }
3530
+ else
3531
+ for (; i<len-1; i+=2, ++ii)
3532
+ {
3533
+ tdata[i ] = in[it.iofs(ii)].r;
3534
+ tdata[i+1] = in[it.iofs(ii)].i;
3535
+ }
3536
+ if (i<len)
3537
+ tdata[i] = in[it.iofs(ii)].r;
3538
+ }
3539
+ plan->exec(tdata, fct, false);
3540
+ copy_output(it, tdata, out);
3541
+ }
3542
+ }); // end of parallel region
3543
+ }
3544
+
3545
+ struct ExecR2R
3546
+ {
3547
+ bool r2h, forward;
3548
+
3549
+ template <typename T0, typename T, size_t vlen> void operator () (
3550
+ const multi_iter<vlen> &it, const cndarr<T0> &in, ndarr<T0> &out, T * buf,
3551
+ const pocketfft_r<T0> &plan, T0 fct) const
3552
+ {
3553
+ copy_input(it, in, buf);
3554
+ if ((!r2h) && forward)
3555
+ for (size_t i=2; i<it.length_out(); i+=2)
3556
+ buf[i] = -buf[i];
3557
+ plan.exec(buf, fct, r2h);
3558
+ if (r2h && (!forward))
3559
+ for (size_t i=2; i<it.length_out(); i+=2)
3560
+ buf[i] = -buf[i];
3561
+ copy_output(it, buf, out);
3562
+ }
3563
+ };
3564
+
3565
+ template<typename T> void c2c(const shape_t &shape, const stride_t &stride_in,
3566
+ const stride_t &stride_out, const shape_t &axes, bool forward,
3567
+ const std::complex<T> *data_in, std::complex<T> *data_out, T fct,
3568
+ size_t nthreads=1)
3569
+ {
3570
+ if (util::prod(shape)==0) return;
3571
+ util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
3572
+ cndarr<cmplx<T>> ain(data_in, shape, stride_in);
3573
+ ndarr<cmplx<T>> aout(data_out, shape, stride_out);
3574
+ general_nd<pocketfft_c<T>>(ain, aout, axes, fct, nthreads, ExecC2C{forward});
3575
+ }
3576
+
3577
+ template<typename T> void dct(const shape_t &shape,
3578
+ const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
3579
+ int type, const T *data_in, T *data_out, T fct, bool ortho, size_t nthreads=1)
3580
+ {
3581
+ if ((type<1) || (type>4)) throw std::invalid_argument("invalid DCT type");
3582
+ if (util::prod(shape)==0) return;
3583
+ util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
3584
+ cndarr<T> ain(data_in, shape, stride_in);
3585
+ ndarr<T> aout(data_out, shape, stride_out);
3586
+ const ExecDcst exec{ortho, type, true};
3587
+ if (type==1)
3588
+ general_nd<T_dct1<T>>(ain, aout, axes, fct, nthreads, exec);
3589
+ else if (type==4)
3590
+ general_nd<T_dcst4<T>>(ain, aout, axes, fct, nthreads, exec);
3591
+ else
3592
+ general_nd<T_dcst23<T>>(ain, aout, axes, fct, nthreads, exec);
3593
+ }
3594
+
3595
+ template<typename T> void dst(const shape_t &shape,
3596
+ const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
3597
+ int type, const T *data_in, T *data_out, T fct, bool ortho, size_t nthreads=1)
3598
+ {
3599
+ if ((type<1) || (type>4)) throw std::invalid_argument("invalid DST type");
3600
+ if (util::prod(shape)==0) return;
3601
+ util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
3602
+ cndarr<T> ain(data_in, shape, stride_in);
3603
+ ndarr<T> aout(data_out, shape, stride_out);
3604
+ const ExecDcst exec{ortho, type, false};
3605
+ if (type==1)
3606
+ general_nd<T_dst1<T>>(ain, aout, axes, fct, nthreads, exec);
3607
+ else if (type==4)
3608
+ general_nd<T_dcst4<T>>(ain, aout, axes, fct, nthreads, exec);
3609
+ else
3610
+ general_nd<T_dcst23<T>>(ain, aout, axes, fct, nthreads, exec);
3611
+ }
3612
+
3613
+ template<typename T> void r2c(const shape_t &shape_in,
3614
+ const stride_t &stride_in, const stride_t &stride_out, size_t axis,
3615
+ bool forward, const T *data_in, std::complex<T> *data_out, T fct,
3616
+ size_t nthreads=1)
3617
+ {
3618
+ if (util::prod(shape_in)==0) return;
3619
+ util::sanity_check(shape_in, stride_in, stride_out, false, axis);
3620
+ cndarr<T> ain(data_in, shape_in, stride_in);
3621
+ shape_t shape_out(shape_in);
3622
+ shape_out[axis] = shape_in[axis]/2 + 1;
3623
+ ndarr<cmplx<T>> aout(data_out, shape_out, stride_out);
3624
+ general_r2c(ain, aout, axis, forward, fct, nthreads);
3625
+ }
3626
+
3627
+ template<typename T> void r2c(const shape_t &shape_in,
3628
+ const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
3629
+ bool forward, const T *data_in, std::complex<T> *data_out, T fct,
3630
+ size_t nthreads=1)
3631
+ {
3632
+ if (util::prod(shape_in)==0) return;
3633
+ util::sanity_check(shape_in, stride_in, stride_out, false, axes);
3634
+ r2c(shape_in, stride_in, stride_out, axes.back(), forward, data_in, data_out,
3635
+ fct, nthreads);
3636
+ if (axes.size()==1) return;
3637
+
3638
+ shape_t shape_out(shape_in);
3639
+ shape_out[axes.back()] = shape_in[axes.back()]/2 + 1;
3640
+ auto newaxes = shape_t{axes.begin(), --axes.end()};
3641
+ c2c(shape_out, stride_out, stride_out, newaxes, forward, data_out, data_out,
3642
+ T(1), nthreads);
3643
+ }
3644
+
3645
+ template<typename T> void c2r(const shape_t &shape_out,
3646
+ const stride_t &stride_in, const stride_t &stride_out, size_t axis,
3647
+ bool forward, const std::complex<T> *data_in, T *data_out, T fct,
3648
+ size_t nthreads=1)
3649
+ {
3650
+ if (util::prod(shape_out)==0) return;
3651
+ util::sanity_check(shape_out, stride_in, stride_out, false, axis);
3652
+ shape_t shape_in(shape_out);
3653
+ shape_in[axis] = shape_out[axis]/2 + 1;
3654
+ cndarr<cmplx<T>> ain(data_in, shape_in, stride_in);
3655
+ ndarr<T> aout(data_out, shape_out, stride_out);
3656
+ general_c2r(ain, aout, axis, forward, fct, nthreads);
3657
+ }
3658
+
3659
+ template<typename T> void c2r(const shape_t &shape_out,
3660
+ const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
3661
+ bool forward, const std::complex<T> *data_in, T *data_out, T fct,
3662
+ size_t nthreads=1)
3663
+ {
3664
+ if (util::prod(shape_out)==0) return;
3665
+ if (axes.size()==1)
3666
+ return c2r(shape_out, stride_in, stride_out, axes[0], forward,
3667
+ data_in, data_out, fct, nthreads);
3668
+ util::sanity_check(shape_out, stride_in, stride_out, false, axes);
3669
+ auto shape_in = shape_out;
3670
+ shape_in[axes.back()] = shape_out[axes.back()]/2 + 1;
3671
+ auto nval = util::prod(shape_in);
3672
+ stride_t stride_inter(shape_in.size());
3673
+ stride_inter.back() = sizeof(cmplx<T>);
3674
+ for (int i=int(shape_in.size())-2; i>=0; --i)
3675
+ stride_inter[size_t(i)] =
3676
+ stride_inter[size_t(i+1)]*ptrdiff_t(shape_in[size_t(i+1)]);
3677
+ arr<std::complex<T>> tmp(nval);
3678
+ auto newaxes = shape_t{axes.begin(), --axes.end()};
3679
+ c2c(shape_in, stride_in, stride_inter, newaxes, forward, data_in, tmp.data(),
3680
+ T(1), nthreads);
3681
+ c2r(shape_out, stride_inter, stride_out, axes.back(), forward,
3682
+ tmp.data(), data_out, fct, nthreads);
3683
+ }
3684
+
3685
+ template<typename T> void r2r_fftpack(const shape_t &shape,
3686
+ const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
3687
+ bool real2hermitian, bool forward, const T *data_in, T *data_out, T fct,
3688
+ size_t nthreads=1)
3689
+ {
3690
+ if (util::prod(shape)==0) return;
3691
+ util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
3692
+ cndarr<T> ain(data_in, shape, stride_in);
3693
+ ndarr<T> aout(data_out, shape, stride_out);
3694
+ general_nd<pocketfft_r<T>>(ain, aout, axes, fct, nthreads,
3695
+ ExecR2R{real2hermitian, forward});
3696
+ }
3697
+
3698
+ template<typename T> void r2r_separable_hartley(const shape_t &shape,
3699
+ const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
3700
+ const T *data_in, T *data_out, T fct, size_t nthreads=1)
3701
+ {
3702
+ if (util::prod(shape)==0) return;
3703
+ util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
3704
+ cndarr<T> ain(data_in, shape, stride_in);
3705
+ ndarr<T> aout(data_out, shape, stride_out);
3706
+ general_nd<pocketfft_r<T>>(ain, aout, axes, fct, nthreads, ExecHartley{},
3707
+ false);
3708
+ }
3709
+
3710
+ template<typename T> void r2r_genuine_hartley(const shape_t &shape,
3711
+ const stride_t &stride_in, const stride_t &stride_out, const shape_t &axes,
3712
+ const T *data_in, T *data_out, T fct, size_t nthreads=1)
3713
+ {
3714
+ if (util::prod(shape)==0) return;
3715
+ if (axes.size()==1)
3716
+ return r2r_separable_hartley(shape, stride_in, stride_out, axes, data_in,
3717
+ data_out, fct, nthreads);
3718
+ util::sanity_check(shape, stride_in, stride_out, data_in==data_out, axes);
3719
+ shape_t tshp(shape);
3720
+ tshp[axes.back()] = tshp[axes.back()]/2+1;
3721
+ arr<std::complex<T>> tdata(util::prod(tshp));
3722
+ stride_t tstride(shape.size());
3723
+ tstride.back()=sizeof(std::complex<T>);
3724
+ for (size_t i=tstride.size()-1; i>0; --i)
3725
+ tstride[i-1]=tstride[i]*ptrdiff_t(tshp[i]);
3726
+ r2c(shape, stride_in, tstride, axes, true, data_in, tdata.data(), fct, nthreads);
3727
+ cndarr<cmplx<T>> atmp(tdata.data(), tshp, tstride);
3728
+ ndarr<T> aout(data_out, shape, stride_out);
3729
+ simple_iter iin(atmp);
3730
+ rev_iter iout(aout, axes);
3731
+ while(iin.remaining()>0)
3732
+ {
3733
+ auto v = atmp[iin.ofs()];
3734
+ aout[iout.ofs()] = v.r+v.i;
3735
+ aout[iout.rev_ofs()] = v.r-v.i;
3736
+ iin.advance(); iout.advance();
3737
+ }
3738
+ }
3739
+
3740
+ } // namespace detail
3741
+
3742
+ using detail::FORWARD;
3743
+ using detail::BACKWARD;
3744
+ using detail::shape_t;
3745
+ using detail::stride_t;
3746
+ using detail::c2c;
3747
+ using detail::c2r;
3748
+ using detail::r2c;
3749
+ using detail::r2r_fftpack;
3750
+ using detail::r2r_separable_hartley;
3751
+ using detail::r2r_genuine_hartley;
3752
+ using detail::dct;
3753
+ using detail::dst;
3754
+
3755
+ } // namespace pocketfft
3756
+
3757
+ #undef POCKETFFT_NOINLINE
3758
+ #undef POCKETFFT_RESTRICT
3759
+
3760
+ #endif // POCKETFFT_HDRONLY_H