nmatrix 0.0.6 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/Gemfile +5 -0
  4. data/History.txt +97 -0
  5. data/Manifest.txt +34 -7
  6. data/README.rdoc +13 -13
  7. data/Rakefile +36 -26
  8. data/ext/nmatrix/data/data.cpp +15 -2
  9. data/ext/nmatrix/data/data.h +4 -0
  10. data/ext/nmatrix/data/ruby_object.h +5 -14
  11. data/ext/nmatrix/extconf.rb +3 -2
  12. data/ext/nmatrix/{util/math.cpp → math.cpp} +296 -6
  13. data/ext/nmatrix/math/asum.h +143 -0
  14. data/ext/nmatrix/math/geev.h +82 -0
  15. data/ext/nmatrix/math/gemm.h +267 -0
  16. data/ext/nmatrix/math/gemv.h +208 -0
  17. data/ext/nmatrix/math/ger.h +96 -0
  18. data/ext/nmatrix/math/gesdd.h +80 -0
  19. data/ext/nmatrix/math/gesvd.h +78 -0
  20. data/ext/nmatrix/math/getf2.h +86 -0
  21. data/ext/nmatrix/math/getrf.h +240 -0
  22. data/ext/nmatrix/math/getri.h +107 -0
  23. data/ext/nmatrix/math/getrs.h +125 -0
  24. data/ext/nmatrix/math/idamax.h +86 -0
  25. data/ext/nmatrix/{util → math}/lapack.h +60 -356
  26. data/ext/nmatrix/math/laswp.h +165 -0
  27. data/ext/nmatrix/math/long_dtype.h +52 -0
  28. data/ext/nmatrix/math/math.h +1154 -0
  29. data/ext/nmatrix/math/nrm2.h +181 -0
  30. data/ext/nmatrix/math/potrs.h +125 -0
  31. data/ext/nmatrix/math/rot.h +141 -0
  32. data/ext/nmatrix/math/rotg.h +115 -0
  33. data/ext/nmatrix/math/scal.h +73 -0
  34. data/ext/nmatrix/math/swap.h +73 -0
  35. data/ext/nmatrix/math/trsm.h +383 -0
  36. data/ext/nmatrix/nmatrix.cpp +176 -152
  37. data/ext/nmatrix/nmatrix.h +1 -2
  38. data/ext/nmatrix/ruby_constants.cpp +9 -4
  39. data/ext/nmatrix/ruby_constants.h +1 -0
  40. data/ext/nmatrix/storage/dense.cpp +57 -41
  41. data/ext/nmatrix/storage/list.cpp +52 -50
  42. data/ext/nmatrix/storage/storage.cpp +59 -43
  43. data/ext/nmatrix/storage/yale.cpp +352 -333
  44. data/ext/nmatrix/storage/yale.h +4 -0
  45. data/lib/nmatrix.rb +2 -2
  46. data/lib/nmatrix/blas.rb +4 -4
  47. data/lib/nmatrix/enumerate.rb +241 -0
  48. data/lib/nmatrix/lapack.rb +54 -1
  49. data/lib/nmatrix/math.rb +462 -0
  50. data/lib/nmatrix/nmatrix.rb +210 -486
  51. data/lib/nmatrix/nvector.rb +0 -62
  52. data/lib/nmatrix/rspec.rb +75 -0
  53. data/lib/nmatrix/shortcuts.rb +136 -108
  54. data/lib/nmatrix/version.rb +1 -1
  55. data/spec/blas_spec.rb +20 -12
  56. data/spec/elementwise_spec.rb +22 -13
  57. data/spec/io_spec.rb +1 -0
  58. data/spec/lapack_spec.rb +197 -0
  59. data/spec/nmatrix_spec.rb +39 -38
  60. data/spec/nvector_spec.rb +3 -9
  61. data/spec/rspec_monkeys.rb +29 -0
  62. data/spec/rspec_spec.rb +34 -0
  63. data/spec/shortcuts_spec.rb +14 -16
  64. data/spec/slice_spec.rb +242 -186
  65. data/spec/spec_helper.rb +19 -0
  66. metadata +33 -5
  67. data/ext/nmatrix/util/math.h +0 -2612
@@ -0,0 +1,165 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == laswp.h
25
+ //
26
+ // laswp function in native C++.
27
+ //
28
+ /*
29
+ * Automatically Tuned Linear Algebra Software v3.8.4
30
+ * (C) Copyright 1999 R. Clint Whaley
31
+ *
32
+ * Redistribution and use in source and binary forms, with or without
33
+ * modification, are permitted provided that the following conditions
34
+ * are met:
35
+ * 1. Redistributions of source code must retain the above copyright
36
+ * notice, this list of conditions and the following disclaimer.
37
+ * 2. Redistributions in binary form must reproduce the above copyright
38
+ * notice, this list of conditions, and the following disclaimer in the
39
+ * documentation and/or other materials provided with the distribution.
40
+ * 3. The name of the ATLAS group or the names of its contributers may
41
+ * not be used to endorse or promote products derived from this
42
+ * software without specific written permission.
43
+ *
44
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
45
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
47
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS
48
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
54
+ * POSSIBILITY OF SUCH DAMAGE.
55
+ *
56
+ */
57
+
58
+ #ifndef LASWP_H
59
+ #define LASWP_H
60
+
61
+ namespace nm { namespace math {
62
+
63
+
64
+ /*
65
+ * ATLAS function which performs row interchanges on a general rectangular matrix. Modeled after the LAPACK LASWP function.
66
+ *
67
+ * This version is templated for use by template <> getrf().
68
+ */
69
+ template <typename DType>
70
+ inline void laswp(const int N, DType* A, const int lda, const int K1, const int K2, const int *piv, const int inci) {
71
+ //const int n = K2 - K1; // not sure why this is declared. commented it out because it's unused.
72
+
73
+ int nb = N >> 5;
74
+
75
+ const int mr = N - (nb<<5);
76
+ const int incA = lda << 5;
77
+
78
+ if (K2 < K1) return;
79
+
80
+ int i1, i2;
81
+ if (inci < 0) {
82
+ piv -= (K2-1) * inci;
83
+ i1 = K2 - 1;
84
+ i2 = K1;
85
+ } else {
86
+ piv += K1 * inci;
87
+ i1 = K1;
88
+ i2 = K2-1;
89
+ }
90
+
91
+ if (nb) {
92
+
93
+ do {
94
+ const int* ipiv = piv;
95
+ int i = i1;
96
+ int KeepOn;
97
+
98
+ do {
99
+ int ip = *ipiv; ipiv += inci;
100
+
101
+ if (ip != i) {
102
+ DType *a0 = &(A[i]),
103
+ *a1 = &(A[ip]);
104
+
105
+ for (register int h = 32; h; h--) {
106
+ DType r = *a0;
107
+ *a0 = *a1;
108
+ *a1 = r;
109
+
110
+ a0 += lda;
111
+ a1 += lda;
112
+ }
113
+
114
+ }
115
+ if (inci > 0) KeepOn = (++i <= i2);
116
+ else KeepOn = (--i >= i2);
117
+
118
+ } while (KeepOn);
119
+ A += incA;
120
+ } while (--nb);
121
+ }
122
+
123
+ if (mr) {
124
+ const int* ipiv = piv;
125
+ int i = i1;
126
+ int KeepOn;
127
+
128
+ do {
129
+ int ip = *ipiv; ipiv += inci;
130
+ if (ip != i) {
131
+ DType *a0 = &(A[i]),
132
+ *a1 = &(A[ip]);
133
+
134
+ for (register int h = mr; h; h--) {
135
+ DType r = *a0;
136
+ *a0 = *a1;
137
+ *a1 = r;
138
+
139
+ a0 += lda;
140
+ a1 += lda;
141
+ }
142
+ }
143
+
144
+ if (inci > 0) KeepOn = (++i <= i2);
145
+ else KeepOn = (--i >= i2);
146
+
147
+ } while (KeepOn);
148
+ }
149
+ }
150
+
151
+
152
+ /*
153
+ * Function signature conversion for calling LAPACK's laswp functions as directly as possible.
154
+ *
155
+ * For documentation: http://www.netlib.org/lapack/double/dlaswp.f
156
+ *
157
+ * This function should normally go in math.cpp, but we need it to be available to nmatrix.cpp.
158
+ */
159
+ template <typename DType>
160
+ inline void clapack_laswp(const int n, void* a, const int lda, const int k1, const int k2, const int* ipiv, const int incx) {
161
+ laswp<DType>(n, reinterpret_cast<DType*>(a), lda, k1, k2, ipiv, incx);
162
+ }
163
+
164
+ } } // namespace nm::math
165
+ #endif // LASWP_H
@@ -0,0 +1,52 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == long_dtype.h
25
+ //
26
+ // Declarations necessary for the native versions of GEMM and GEMV.
27
+ //
28
+
29
+ #ifndef LONG_DTYPE_H
30
+ #define LONG_DTYPE_H
31
+
32
+ namespace nm { namespace math {
33
+ // These allow an increase in precision for intermediate values of gemm and gemv.
34
+ // See also: http://stackoverflow.com/questions/11873694/how-does-one-increase-precision-in-c-templates-in-a-template-typename-dependen
35
+ template <typename DType> struct LongDType;
36
+ template <> struct LongDType<uint8_t> { typedef int16_t type; };
37
+ template <> struct LongDType<int8_t> { typedef int16_t type; };
38
+ template <> struct LongDType<int16_t> { typedef int32_t type; };
39
+ template <> struct LongDType<int32_t> { typedef int64_t type; };
40
+ template <> struct LongDType<int64_t> { typedef int64_t type; };
41
+ template <> struct LongDType<float> { typedef double type; };
42
+ template <> struct LongDType<double> { typedef double type; };
43
+ template <> struct LongDType<Complex64> { typedef Complex128 type; };
44
+ template <> struct LongDType<Complex128> { typedef Complex128 type; };
45
+ template <> struct LongDType<Rational32> { typedef Rational128 type; };
46
+ template <> struct LongDType<Rational64> { typedef Rational128 type; };
47
+ template <> struct LongDType<Rational128> { typedef Rational128 type; };
48
+ template <> struct LongDType<RubyObject> { typedef RubyObject type; };
49
+
50
+ }} // end of namespace nm::math
51
+
52
+ #endif
@@ -0,0 +1,1154 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == math.h
25
+ //
26
+ // Header file for math functions, interfacing with BLAS, etc.
27
+ //
28
+ // For instructions on adding CBLAS and CLAPACK functions, see the
29
+ // beginning of math.cpp.
30
+ //
31
+ // Some of these functions are from ATLAS. Here is the license for
32
+ // ATLAS:
33
+ //
34
+ /*
35
+ * Automatically Tuned Linear Algebra Software v3.8.4
36
+ * (C) Copyright 1999 R. Clint Whaley
37
+ *
38
+ * Redistribution and use in source and binary forms, with or without
39
+ * modification, are permitted provided that the following conditions
40
+ * are met:
41
+ * 1. Redistributions of source code must retain the above copyright
42
+ * notice, this list of conditions and the following disclaimer.
43
+ * 2. Redistributions in binary form must reproduce the above copyright
44
+ * notice, this list of conditions, and the following disclaimer in the
45
+ * documentation and/or other materials provided with the distribution.
46
+ * 3. The name of the ATLAS group or the names of its contributers may
47
+ * not be used to endorse or promote products derived from this
48
+ * software without specific written permission.
49
+ *
50
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
52
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
53
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ATLAS GROUP OR ITS CONTRIBUTORS
54
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60
+ * POSSIBILITY OF SUCH DAMAGE.
61
+ *
62
+ */
63
+
64
+ #ifndef MATH_H
65
+ #define MATH_H
66
+
67
+ /*
68
+ * Standard Includes
69
+ */
70
+
71
+ extern "C" { // These need to be in an extern "C" block or you'll get all kinds of undefined symbol errors.
72
+ #include <cblas.h>
73
+
74
+ #ifdef HAVE_CLAPACK_H
75
+ #include <clapack.h>
76
+ #endif
77
+ }
78
+
79
+ #include <algorithm> // std::min, std::max
80
+ #include <limits> // std::numeric_limits
81
+
82
+ /*
83
+ * Project Includes
84
+ */
85
+ #include "lapack.h"
86
+
87
+ /*
88
+ * Macros
89
+ */
90
+ #define REAL_RECURSE_LIMIT 4
91
+
92
+ /*
93
+ * Data
94
+ */
95
+
96
+
97
+ extern "C" {
98
+ /*
99
+ * C accessors.
100
+ */
101
+ void nm_math_det_exact(const int M, const void* elements, const int lda, nm::dtype_t dtype, void* result);
102
+ void nm_math_transpose_generic(const size_t M, const size_t N, const void* A, const int lda, void* B, const int ldb, size_t element_size);
103
+ void nm_math_init_blas(void);
104
+
105
+ }
106
+
107
+
108
+ namespace nm {
109
+ namespace math {
110
+
111
+ /*
112
+ * Types
113
+ */
114
+
115
+
116
+ /*
117
+ * Functions
118
+ */
119
+
120
+
121
+ template <typename DType>
122
+ inline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,
123
+ const int K, const DType* alpha, const DType* A, const int lda, const DType* beta, DType* C, const int ldc) {
124
+ rb_raise(rb_eNotImpError, "syrk not yet implemented for non-BLAS dtypes");
125
+ }
126
+
127
+ template <typename DType>
128
+ inline void herk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,
129
+ const int K, const DType* alpha, const DType* A, const int lda, const DType* beta, DType* C, const int ldc) {
130
+ rb_raise(rb_eNotImpError, "herk not yet implemented for non-BLAS dtypes");
131
+ }
132
+
133
+ template <>
134
+ inline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,
135
+ const int K, const float* alpha, const float* A, const int lda, const float* beta, float* C, const int ldc) {
136
+ cblas_ssyrk(Order, Uplo, Trans, N, K, *alpha, A, lda, *beta, C, ldc);
137
+ }
138
+
139
+ template <>
140
+ inline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,
141
+ const int K, const double* alpha, const double* A, const int lda, const double* beta, double* C, const int ldc) {
142
+ cblas_dsyrk(Order, Uplo, Trans, N, K, *alpha, A, lda, *beta, C, ldc);
143
+ }
144
+
145
+ template <>
146
+ inline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,
147
+ const int K, const Complex64* alpha, const Complex64* A, const int lda, const Complex64* beta, Complex64* C, const int ldc) {
148
+ cblas_csyrk(Order, Uplo, Trans, N, K, alpha, A, lda, beta, C, ldc);
149
+ }
150
+
151
+ template <>
152
+ inline void syrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,
153
+ const int K, const Complex128* alpha, const Complex128* A, const int lda, const Complex128* beta, Complex128* C, const int ldc) {
154
+ cblas_zsyrk(Order, Uplo, Trans, N, K, alpha, A, lda, beta, C, ldc);
155
+ }
156
+
157
+
158
+ template <>
159
+ inline void herk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,
160
+ const int K, const Complex64* alpha, const Complex64* A, const int lda, const Complex64* beta, Complex64* C, const int ldc) {
161
+ cblas_cherk(Order, Uplo, Trans, N, K, alpha->r, A, lda, beta->r, C, ldc);
162
+ }
163
+
164
+ template <>
165
+ inline void herk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE Trans, const int N,
166
+ const int K, const Complex128* alpha, const Complex128* A, const int lda, const Complex128* beta, Complex128* C, const int ldc) {
167
+ cblas_zherk(Order, Uplo, Trans, N, K, alpha->r, A, lda, beta->r, C, ldc);
168
+ }
169
+
170
+
171
+ template <typename DType>
172
+ inline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,
173
+ const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const DType* alpha,
174
+ const DType* A, const int lda, DType* B, const int ldb) {
175
+ rb_raise(rb_eNotImpError, "trmm not yet implemented for non-BLAS dtypes");
176
+ }
177
+
178
+ template <>
179
+ inline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,
180
+ const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const float* alpha,
181
+ const float* A, const int lda, float* B, const int ldb) {
182
+ cblas_strmm(order, side, uplo, ta, diag, m, n, *alpha, A, lda, B, ldb);
183
+ }
184
+
185
+ template <>
186
+ inline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,
187
+ const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const double* alpha,
188
+ const double* A, const int lda, double* B, const int ldb) {
189
+ cblas_dtrmm(order, side, uplo, ta, diag, m, n, *alpha, A, lda, B, ldb);
190
+ }
191
+
192
+ template <>
193
+ inline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,
194
+ const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const Complex64* alpha,
195
+ const Complex64* A, const int lda, Complex64* B, const int ldb) {
196
+ cblas_ctrmm(order, side, uplo, ta, diag, m, n, alpha, A, lda, B, ldb);
197
+ }
198
+
199
+ template <>
200
+ inline void trmm(const enum CBLAS_ORDER order, const enum CBLAS_SIDE side, const enum CBLAS_UPLO uplo,
201
+ const enum CBLAS_TRANSPOSE ta, const enum CBLAS_DIAG diag, const int m, const int n, const Complex128* alpha,
202
+ const Complex128* A, const int lda, Complex128* B, const int ldb) {
203
+ cblas_ztrmm(order, side, uplo, ta, diag, m, n, alpha, A, lda, B, ldb);
204
+ }
205
+
206
+
207
+
208
+ // Yale: numeric matrix multiply c=a*b
209
+ template <typename DType, typename IType>
210
+ inline void numbmm(const unsigned int n, const unsigned int m, const unsigned int l, const IType* ia, const IType* ja, const DType* a, const bool diaga,
211
+ const IType* ib, const IType* jb, const DType* b, const bool diagb, IType* ic, IType* jc, DType* c, const bool diagc) {
212
+ const unsigned int max_lmn = std::max(std::max(m, n), l);
213
+ IType next[max_lmn];
214
+ DType sums[max_lmn];
215
+
216
+ DType v;
217
+
218
+ IType head, length, temp, ndnz = 0;
219
+ IType minmn = std::min(m,n);
220
+ IType minlm = std::min(l,m);
221
+
222
+ for (IType idx = 0; idx < max_lmn; ++idx) { // initialize scratch arrays
223
+ next[idx] = std::numeric_limits<IType>::max();
224
+ sums[idx] = 0;
225
+ }
226
+
227
+ for (IType i = 0; i < n; ++i) { // walk down the rows
228
+ head = std::numeric_limits<IType>::max()-1; // head gets assigned as whichever column of B's row j we last visited
229
+ length = 0;
230
+
231
+ for (IType jj = ia[i]; jj <= ia[i+1]; ++jj) { // walk through entries in each row
232
+ IType j;
233
+
234
+ if (jj == ia[i+1]) { // if we're in the last entry for this row:
235
+ if (!diaga || i >= minmn) continue;
236
+ j = i; // if it's a new Yale matrix, and last entry, get the diagonal position (j) and entry (ajj)
237
+ v = a[i];
238
+ } else {
239
+ j = ja[jj]; // if it's not the last entry for this row, get the column (j) and entry (ajj)
240
+ v = a[jj];
241
+ }
242
+
243
+ for (IType kk = ib[j]; kk <= ib[j+1]; ++kk) {
244
+
245
+ IType k;
246
+
247
+ if (kk == ib[j+1]) { // Get the column id for that entry
248
+ if (!diagb || j >= minlm) continue;
249
+ k = j;
250
+ sums[k] += v*b[k];
251
+ } else {
252
+ k = jb[kk];
253
+ sums[k] += v*b[kk];
254
+ }
255
+
256
+ if (next[k] == std::numeric_limits<IType>::max()) {
257
+ next[k] = head;
258
+ head = k;
259
+ ++length;
260
+ }
261
+ } // end of kk loop
262
+ } // end of jj loop
263
+
264
+ for (IType jj = 0; jj < length; ++jj) {
265
+ if (sums[head] != 0) {
266
+ if (diagc && head == i) {
267
+ c[head] = sums[head];
268
+ } else {
269
+ jc[n+1+ndnz] = head;
270
+ c[n+1+ndnz] = sums[head];
271
+ ++ndnz;
272
+ }
273
+ }
274
+
275
+ temp = head;
276
+ head = next[head];
277
+
278
+ next[temp] = std::numeric_limits<IType>::max();
279
+ sums[temp] = 0;
280
+ }
281
+
282
+ ic[i+1] = n+1+ndnz;
283
+ }
284
+ } /* numbmm_ */
285
+
286
+
287
+ /*
288
+ template <typename DType, typename IType>
289
+ inline void new_yale_matrix_multiply(const unsigned int m, const IType* ija, const DType* a, const IType* ijb, const DType* b, YALE_STORAGE* c_storage) {
290
+ unsigned int n = c_storage->shape[0],
291
+ l = c_storage->shape[1];
292
+
293
+ // Create a working vector of dimension max(m,l,n) and initial value IType::max():
294
+ std::vector<IType> mask(std::max(std::max(m,l),n), std::numeric_limits<IType>::max());
295
+
296
+ for (IType i = 0; i < n; ++i) { // A.rows.each_index do |i|
297
+
298
+ IType j, k;
299
+ size_t ndnz;
300
+
301
+ for (IType jj = ija[i]; jj <= ija[i+1]; ++jj) { // walk through column pointers for row i of A
302
+ j = (jj == ija[i+1]) ? i : ija[jj]; // Get the current column index (handle diagonals last)
303
+
304
+ if (j >= m) {
305
+ if (j == ija[jj]) rb_raise(rb_eIndexError, "ija array for left-hand matrix contains an out-of-bounds column index %u at position %u", jj, j);
306
+ else break;
307
+ }
308
+
309
+ for (IType kk = ijb[j]; kk <= ijb[j+1]; ++kk) { // walk through column pointers for row j of B
310
+ if (j >= m) continue; // first of all, does B *have* a row j?
311
+ k = (kk == ijb[j+1]) ? j : ijb[kk]; // Get the current column index (handle diagonals last)
312
+
313
+ if (k >= l) {
314
+ if (k == ijb[kk]) rb_raise(rb_eIndexError, "ija array for right-hand matrix contains an out-of-bounds column index %u at position %u", kk, k);
315
+ else break;
316
+ }
317
+
318
+ if (mask[k] == )
319
+ }
320
+
321
+ }
322
+ }
323
+ }
324
+ */
325
+
326
+ // Yale: Symbolic matrix multiply c=a*b
327
+ template <typename IType>
328
+ inline size_t symbmm(const unsigned int n, const unsigned int m, const unsigned int l, const IType* ia, const IType* ja, const bool diaga,
329
+ const IType* ib, const IType* jb, const bool diagb, IType* ic, const bool diagc) {
330
+ unsigned int max_lmn = std::max(std::max(m,n), l);
331
+ IType mask[max_lmn]; // INDEX in the SMMP paper.
332
+ IType j, k; /* Local variables */
333
+ size_t ndnz = n;
334
+
335
+ for (IType idx = 0; idx < max_lmn; ++idx)
336
+ mask[idx] = std::numeric_limits<IType>::max();
337
+
338
+ if (ic) { // Only write to ic if it's supplied; otherwise, we're just counting.
339
+ if (diagc) ic[0] = n+1;
340
+ else ic[0] = 0;
341
+ }
342
+
343
+ IType minmn = std::min(m,n);
344
+ IType minlm = std::min(l,m);
345
+
346
+ for (IType i = 0; i < n; ++i) { // MAIN LOOP: through rows
347
+
348
+ for (IType jj = ia[i]; jj <= ia[i+1]; ++jj) { // merge row lists, walking through columns in each row
349
+
350
+ // j <- column index given by JA[jj], or handle diagonal.
351
+ if (jj == ia[i+1]) { // Don't really do it the last time -- just handle diagonals in a new yale matrix.
352
+ if (!diaga || i >= minmn) continue;
353
+ j = i;
354
+ } else j = ja[jj];
355
+
356
+ for (IType kk = ib[j]; kk <= ib[j+1]; ++kk) { // Now walk through columns K of row J in matrix B.
357
+ if (kk == ib[j+1]) {
358
+ if (!diagb || j >= minlm) continue;
359
+ k = j;
360
+ } else k = jb[kk];
361
+
362
+ if (mask[k] != i) {
363
+ mask[k] = i;
364
+ ++ndnz;
365
+ }
366
+ }
367
+ }
368
+
369
+ if (diagc && mask[i] == std::numeric_limits<IType>::max()) --ndnz;
370
+
371
+ if (ic) ic[i+1] = ndnz;
372
+ }
373
+
374
+ return ndnz;
375
+ } /* symbmm_ */
376
+
377
+
378
+ // In-place quicksort (from Wikipedia) -- called by smmp_sort_columns, below. All functions are inclusive of left, right.
379
+ namespace smmp_sort {
380
+ const size_t THRESHOLD = 4; // switch to insertion sort for 4 elements or fewer
381
+
382
+ template <typename DType, typename IType>
383
+ void print_array(DType* vals, IType* array, IType left, IType right) {
384
+ for (IType i = left; i <= right; ++i) {
385
+ std::cerr << array[i] << ":" << vals[i] << " ";
386
+ }
387
+ std::cerr << std::endl;
388
+ }
389
+
390
+ template <typename DType, typename IType>
391
+ IType partition(DType* vals, IType* array, IType left, IType right, IType pivot) {
392
+ IType pivotJ = array[pivot];
393
+ DType pivotV = vals[pivot];
394
+
395
+ // Swap pivot and right
396
+ array[pivot] = array[right];
397
+ vals[pivot] = vals[right];
398
+ array[right] = pivotJ;
399
+ vals[right] = pivotV;
400
+
401
+ IType store = left;
402
+ for (IType idx = left; idx < right; ++idx) {
403
+ if (array[idx] <= pivotJ) {
404
+ // Swap i and store
405
+ std::swap(array[idx], array[store]);
406
+ std::swap(vals[idx], vals[store]);
407
+ ++store;
408
+ }
409
+ }
410
+
411
+ std::swap(array[store], array[right]);
412
+ std::swap(vals[store], vals[right]);
413
+
414
+ return store;
415
+ }
416
+
417
+ // Recommended to use the median of left, right, and mid for the pivot.
418
+ template <typename IType>
419
+ IType median(IType a, IType b, IType c) {
420
+ if (a < b) {
421
+ if (b < c) return b; // a b c
422
+ if (a < c) return c; // a c b
423
+ return a; // c a b
424
+
425
+ } else { // a > b
426
+ if (a < c) return a; // b a c
427
+ if (b < c) return c; // b c a
428
+ return b; // c b a
429
+ }
430
+ }
431
+
432
+
433
+ // Insertion sort is more efficient than quicksort for small N
434
+ template <typename DType, typename IType>
435
+ void insertion_sort(DType* vals, IType* array, IType left, IType right) {
436
+ for (IType idx = left; idx <= right; ++idx) {
437
+ IType col_to_insert = array[idx];
438
+ DType val_to_insert = vals[idx];
439
+
440
+ IType hole_pos = idx;
441
+ for (; hole_pos > left && col_to_insert < array[hole_pos-1]; --hole_pos) {
442
+ array[hole_pos] = array[hole_pos - 1]; // shift the larger column index up
443
+ vals[hole_pos] = vals[hole_pos - 1]; // value goes along with it
444
+ }
445
+
446
+ array[hole_pos] = col_to_insert;
447
+ vals[hole_pos] = val_to_insert;
448
+ }
449
+ }
450
+
451
+
452
+ template <typename DType, typename IType>
453
+ void quicksort(DType* vals, IType* array, IType left, IType right) {
454
+
455
+ if (left < right) {
456
+ if (right - left < THRESHOLD) {
457
+ insertion_sort(vals, array, left, right);
458
+ } else {
459
+ // choose any pivot such that left < pivot < right
460
+ IType pivot = median(left, right, (IType)(((unsigned long)left + (unsigned long)right) / 2));
461
+ pivot = partition(vals, array, left, right, pivot);
462
+
463
+ // recursively sort elements smaller than the pivot
464
+ quicksort<DType,IType>(vals, array, left, pivot-1);
465
+
466
+ // recursively sort elements at least as big as the pivot
467
+ quicksort<DType,IType>(vals, array, pivot+1, right);
468
+ }
469
+ }
470
+ }
471
+
472
+
473
+ }; // end of namespace smmp_sort
474
+
475
+
476
+ /*
477
+ * For use following symbmm and numbmm. Sorts the matrix entries in each row according to the column index.
478
+ * This utilizes quicksort, which is an in-place unstable sort (since there are no duplicate entries, we don't care
479
+ * about stability).
480
+ *
481
+ * TODO: It might be worthwhile to do a test for free memory, and if available, use an unstable sort that isn't in-place.
482
+ *
483
+ * TODO: It's actually probably possible to write an even faster sort, since symbmm/numbmm are not producing a random
484
+ * ordering. If someone is doing a lot of Yale matrix multiplication, it might benefit them to consider even insertion
485
+ * sort.
486
+ */
487
+ template <typename DType, typename IType>
488
+ inline void smmp_sort_columns(const size_t n, const IType* ia, IType* ja, DType* a) {
489
+ for (size_t i = 0; i < n; ++i) {
490
+ if (ia[i+1] - ia[i] < 2) continue; // no need to sort rows containing only one or two elements.
491
+ else if (ia[i+1] - ia[i] <= smmp_sort::THRESHOLD) {
492
+ smmp_sort::insertion_sort<DType, IType>(a, ja, ia[i], ia[i+1]-1); // faster for small rows
493
+ } else {
494
+ smmp_sort::quicksort<DType, IType>(a, ja, ia[i], ia[i+1]-1); // faster for large rows (and may call insertion_sort as well)
495
+ }
496
+ }
497
+ }
498
+
499
+
500
+
501
+ /*
502
+ * Transposes a generic Yale matrix (old or new). Specify new by setting diaga = true.
503
+ *
504
+ * Based on transp from SMMP (same as symbmm and numbmm).
505
+ *
506
+ * This is not named in the same way as most yale_storage functions because it does not act on a YALE_STORAGE
507
+ * object.
508
+ */
509
+ template <typename DType, typename IType>
510
+ void transpose_yale(const size_t n, const size_t m, const void* ia_, const void* ja_, const void* a_,
511
+ const bool diaga, void* ib_, void* jb_, void* b_, const bool move)
512
+ {
513
+ const IType *ia = reinterpret_cast<const IType*>(ia_),
514
+ *ja = reinterpret_cast<const IType*>(ja_);
515
+ const DType *a = reinterpret_cast<const DType*>(a_);
516
+
517
+ IType *ib = reinterpret_cast<IType*>(ib_),
518
+ *jb = reinterpret_cast<IType*>(jb_);
519
+ DType *b = reinterpret_cast<DType*>(b_);
520
+
521
+
522
+
523
+ size_t index;
524
+
525
+ // Clear B
526
+ for (size_t i = 0; i < m+1; ++i) ib[i] = 0;
527
+
528
+ if (move)
529
+ for (size_t i = 0; i < m+1; ++i) b[i] = 0;
530
+
531
+ if (diaga) ib[0] = m + 1;
532
+ else ib[0] = 0;
533
+
534
+ /* count indices for each column */
535
+
536
+ for (size_t i = 0; i < n; ++i) {
537
+ for (size_t j = ia[i]; j < ia[i+1]; ++j) {
538
+ ++(ib[ja[j]+1]);
539
+ }
540
+ }
541
+
542
+ for (size_t i = 0; i < m; ++i) {
543
+ ib[i+1] = ib[i] + ib[i+1];
544
+ }
545
+
546
+ /* now make jb */
547
+
548
+ for (size_t i = 0; i < n; ++i) {
549
+
550
+ for (size_t j = ia[i]; j < ia[i+1]; ++j) {
551
+ index = ja[j];
552
+ jb[ib[index]] = i;
553
+
554
+ if (move)
555
+ b[ib[index]] = a[j];
556
+
557
+ ++(ib[index]);
558
+ }
559
+ }
560
+
561
+ /* now fixup ib */
562
+
563
+ for (size_t i = m; i >= 1; --i) {
564
+ ib[i] = ib[i-1];
565
+ }
566
+
567
+
568
+ if (diaga) {
569
+ if (move) {
570
+ size_t j = std::min(n,m);
571
+
572
+ for (size_t i = 0; i < j; ++i) {
573
+ b[i] = a[i];
574
+ }
575
+ }
576
+ ib[0] = m + 1;
577
+
578
+ } else {
579
+ ib[0] = 0;
580
+ }
581
+ }
582
+
583
+
584
+
585
+
586
+
587
+
588
+
589
+ /*
590
+ * From ATLAS 3.8.0:
591
+ *
592
+ * Computes one of two LU factorizations based on the setting of the Order
593
+ * parameter, as follows:
594
+ * ----------------------------------------------------------------------------
595
+ * Order == CblasColMajor
596
+ * Column-major factorization of form
597
+ * A = P * L * U
598
+ * where P is a row-permutation matrix, L is lower triangular with unit
599
+ * diagonal elements (lower trapazoidal if M > N), and U is upper triangular
600
+ * (upper trapazoidal if M < N).
601
+ *
602
+ * ----------------------------------------------------------------------------
603
+ * Order == CblasRowMajor
604
+ * Row-major factorization of form
605
+ * A = P * L * U
606
+ * where P is a column-permutation matrix, L is lower triangular (lower
607
+ * trapazoidal if M > N), and U is upper triangular with unit diagonals (upper
608
+ * trapazoidal if M < N).
609
+ *
610
+ * ============================================================================
611
+ * Let IERR be the return value of the function:
612
+ * If IERR == 0, successful exit.
613
+ * If (IERR < 0) the -IERR argument had an illegal value
614
+ * If (IERR > 0 && Order == CblasColMajor)
615
+ * U(i-1,i-1) is exactly zero. The factorization has been completed,
616
+ * but the factor U is exactly singular, and division by zero will
617
+ * occur if it is used to solve a system of equations.
618
+ * If (IERR > 0 && Order == CblasRowMajor)
619
+ * L(i-1,i-1) is exactly zero. The factorization has been completed,
620
+ * but the factor L is exactly singular, and division by zero will
621
+ * occur if it is used to solve a system of equations.
622
+ */
623
+ template <typename DType>
624
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, DType* A, const int lda) {
625
+ #ifdef HAVE_CLAPACK_H
626
+ rb_raise(rb_eNotImpError, "not yet implemented for non-BLAS dtypes");
627
+ #else
628
+ rb_raise(rb_eNotImpError, "only LAPACK version implemented thus far");
629
+ #endif
630
+ return 0;
631
+ }
632
+
633
+ #ifdef HAVE_CLAPACK_H
634
+ template <>
635
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, float* A, const int lda) {
636
+ return clapack_spotrf(order, uplo, N, A, lda);
637
+ }
638
+
639
+ template <>
640
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, double* A, const int lda) {
641
+ return clapack_dpotrf(order, uplo, N, A, lda);
642
+ }
643
+
644
+ template <>
645
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, Complex64* A, const int lda) {
646
+ return clapack_cpotrf(order, uplo, N, reinterpret_cast<void*>(A), lda);
647
+ }
648
+
649
+ template <>
650
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, Complex128* A, const int lda) {
651
+ return clapack_zpotrf(order, uplo, N, reinterpret_cast<void*>(A), lda);
652
+ }
653
+ #endif
654
+
655
+
656
+
657
+ // Copies an upper row-major array from U, zeroing U; U is unit, so diagonal is not copied.
658
+ //
659
+ // From ATLAS 3.8.0.
660
+ template <typename DType>
661
+ static inline void trcpzeroU(const int M, const int N, DType* U, const int ldu, DType* C, const int ldc) {
662
+
663
+ for (int i = 0; i != M; ++i) {
664
+ for (int j = i+1; j < N; ++j) {
665
+ C[j] = U[j];
666
+ U[j] = 0;
667
+ }
668
+
669
+ C += ldc;
670
+ U += ldu;
671
+ }
672
+ }
673
+
674
+
675
+ /*
676
+ * Un-comment the following lines when we figure out how to calculate NB for each of the ATLAS-derived
677
+ * functions. This is probably really complicated.
678
+ *
679
+ * Also needed: ATL_MulByNB, ATL_DivByNB (both defined in the build process for ATLAS), and ATL_mmMU.
680
+ *
681
+ */
682
+
683
+ /*
684
+
685
+ template <bool RowMajor, bool Upper, typename DType>
686
+ static int trtri_4(const enum CBLAS_DIAG Diag, DType* A, const int lda) {
687
+
688
+ if (RowMajor) {
689
+ DType *pA0 = A, *pA1 = A+lda, *pA2 = A+2*lda, *pA3 = A+3*lda;
690
+ DType tmp;
691
+ if (Upper) {
692
+ DType A01 = pA0[1], A02 = pA0[2], A03 = pA0[3],
693
+ A12 = pA1[2], A13 = pA1[3],
694
+ A23 = pA2[3];
695
+
696
+ if (Diag == CblasNonUnit) {
697
+ pA0->inverse();
698
+ (pA1+1)->inverse();
699
+ (pA2+2)->inverse();
700
+ (pA3+3)->inverse();
701
+
702
+ pA0[1] = -A01 * pA1[1] * pA0[0];
703
+ pA1[2] = -A12 * pA2[2] * pA1[1];
704
+ pA2[3] = -A23 * pA3[3] * pA2[2];
705
+
706
+ pA0[2] = -(A01 * pA1[2] + A02 * pA2[2]) * pA0[0];
707
+ pA1[3] = -(A12 * pA2[3] + A13 * pA3[3]) * pA1[1];
708
+
709
+ pA0[3] = -(A01 * pA1[3] + A02 * pA2[3] + A03 * pA3[3]) * pA0[0];
710
+
711
+ } else {
712
+
713
+ pA0[1] = -A01;
714
+ pA1[2] = -A12;
715
+ pA2[3] = -A23;
716
+
717
+ pA0[2] = -(A01 * pA1[2] + A02);
718
+ pA1[3] = -(A12 * pA2[3] + A13);
719
+
720
+ pA0[3] = -(A01 * pA1[3] + A02 * pA2[3] + A03);
721
+ }
722
+
723
+ } else { // Lower
724
+ DType A10 = pA1[0],
725
+ A20 = pA2[0], A21 = pA2[1],
726
+ A30 = PA3[0], A31 = pA3[1], A32 = pA3[2];
727
+ DType *B10 = pA1,
728
+ *B20 = pA2,
729
+ *B30 = pA3,
730
+ *B21 = pA2+1,
731
+ *B31 = pA3+1,
732
+ *B32 = pA3+2;
733
+
734
+
735
+ if (Diag == CblasNonUnit) {
736
+ pA0->inverse();
737
+ (pA1+1)->inverse();
738
+ (pA2+2)->inverse();
739
+ (pA3+3)->inverse();
740
+
741
+ *B10 = -A10 * pA0[0] * pA1[1];
742
+ *B21 = -A21 * pA1[1] * pA2[2];
743
+ *B32 = -A32 * pA2[2] * pA3[3];
744
+ *B20 = -(A20 * pA0[0] + A21 * (*B10)) * pA2[2];
745
+ *B31 = -(A31 * pA1[1] + A32 * (*B21)) * pA3[3];
746
+ *B30 = -(A30 * pA0[0] + A31 * (*B10) + A32 * (*B20)) * pA3;
747
+ } else {
748
+ *B10 = -A10;
749
+ *B21 = -A21;
750
+ *B32 = -A32;
751
+ *B20 = -(A20 + A21 * (*B10));
752
+ *B31 = -(A31 + A32 * (*B21));
753
+ *B30 = -(A30 + A31 * (*B10) + A32 * (*B20));
754
+ }
755
+ }
756
+
757
+ } else {
758
+ rb_raise(rb_eNotImpError, "only row-major implemented at this time");
759
+ }
760
+
761
+ return 0;
762
+
763
+ }
764
+
765
+
766
+ template <bool RowMajor, bool Upper, typename DType>
767
+ static int trtri_3(const enum CBLAS_DIAG Diag, DType* A, const int lda) {
768
+
769
+ if (RowMajor) {
770
+
771
+ DType tmp;
772
+
773
+ if (Upper) {
774
+ DType A01 = pA0[1], A02 = pA0[2], A03 = pA0[3],
775
+ A12 = pA1[2], A13 = pA1[3];
776
+
777
+ DType *B01 = pA0 + 1,
778
+ *B02 = pA0 + 2,
779
+ *B12 = pA1 + 2;
780
+
781
+ if (Diag == CblasNonUnit) {
782
+ pA0->inverse();
783
+ (pA1+1)->inverse();
784
+ (pA2+2)->inverse();
785
+
786
+ *B01 = -A01 * pA1[1] * pA0[0];
787
+ *B12 = -A12 * pA2[2] * pA1[1];
788
+ *B02 = -(A01 * (*B12) + A02 * pA2[2]) * pA0[0];
789
+ } else {
790
+ *B01 = -A01;
791
+ *B12 = -A12;
792
+ *B02 = -(A01 * (*B12) + A02);
793
+ }
794
+
795
+ } else { // Lower
796
+ DType *pA0=A, *pA1=A+lda, *pA2=A+2*lda;
797
+ DType A10=pA1[0],
798
+ A20=pA2[0], A21=pA2[1];
799
+
800
+ DType *B10 = pA1,
801
+ *B20 = pA2;
802
+ *B21 = pA2+1;
803
+
804
+ if (Diag == CblasNonUnit) {
805
+ pA0->inverse();
806
+ (pA1+1)->inverse();
807
+ (pA2+2)->inverse();
808
+ *B10 = -A10 * pA0[0] * pA1[1];
809
+ *B21 = -A21 * pA1[1] * pA2[2];
810
+ *B20 = -(A20 * pA0[0] + A21 * (*B10)) * pA2[2];
811
+ } else {
812
+ *B10 = -A10;
813
+ *B21 = -A21;
814
+ *B20 = -(A20 + A21 * (*B10));
815
+ }
816
+ }
817
+
818
+
819
+ } else {
820
+ rb_raise(rb_eNotImpError, "only row-major implemented at this time");
821
+ }
822
+
823
+ return 0;
824
+
825
+ }
826
+
827
+ template <bool RowMajor, bool Upper, bool Real, typename DType>
828
+ static void trtri(const enum CBLAS_DIAG Diag, const int N, DType* A, const int lda) {
829
+ DType *Age, *Atr;
830
+ DType tmp;
831
+ int Nleft, Nright;
832
+
833
+ int ierr = 0;
834
+
835
+ static const DType ONE = 1;
836
+ static const DType MONE -1;
837
+ static const DType NONE = -1;
838
+
839
+ if (RowMajor) {
840
+
841
+ // FIXME: Use REAL_RECURSE_LIMIT here for float32 and float64 (instead of 1)
842
+ if ((Real && N > REAL_RECURSE_LIMIT) || (N > 1)) {
843
+ Nleft = N >> 1;
844
+ #ifdef NB
845
+ if (Nleft > NB) NLeft = ATL_MulByNB(ATL_DivByNB(Nleft));
846
+ #endif
847
+
848
+ Nright = N - Nleft;
849
+
850
+ if (Upper) {
851
+ Age = A + Nleft;
852
+ Atr = A + (Nleft * (lda+1));
853
+
854
+ nm::math::trsm<DType>(CblasRowMajor, CblasRight, CblasUpper, CblasNoTrans, Diag,
855
+ Nleft, Nright, ONE, Atr, lda, Age, lda);
856
+
857
+ nm::math::trsm<DType>(CblasRowMajor, CblasLeft, CblasUpper, CblasNoTrans, Diag,
858
+ Nleft, Nright, MONE, A, lda, Age, lda);
859
+
860
+ } else { // Lower
861
+ Age = A + ((Nleft*lda));
862
+ Atr = A + (Nleft * (lda+1));
863
+
864
+ nm::math::trsm<DType>(CblasRowMajor, CblasRight, CblasLower, CblasNoTrans, Diag,
865
+ Nright, Nleft, ONE, A, lda, Age, lda);
866
+ nm::math::trsm<DType>(CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, Diag,
867
+ Nright, Nleft, MONE, Atr, lda, Age, lda);
868
+ }
869
+
870
+ ierr = trtri<RowMajor,Upper,Real,DType>(Diag, Nleft, A, lda);
871
+ if (ierr) return ierr;
872
+
873
+ ierr = trtri<RowMajor,Upper,Real,DType>(Diag, Nright, Atr, lda);
874
+ if (ierr) return ierr + Nleft;
875
+
876
+ } else {
877
+ if (Real) {
878
+ if (N == 4) {
879
+ return trtri_4<RowMajor,Upper,Real,DType>(Diag, A, lda);
880
+ } else if (N == 3) {
881
+ return trtri_3<RowMajor,Upper,Real,DType>(Diag, A, lda);
882
+ } else if (N == 2) {
883
+ if (Diag == CblasNonUnit) {
884
+ A->inverse();
885
+ (A+(lda+1))->inverse();
886
+
887
+ if (Upper) {
888
+ *(A+1) *= *A; // TRI_MUL
889
+ *(A+1) *= *(A+lda+1); // TRI_MUL
890
+ } else {
891
+ *(A+lda) *= *A; // TRI_MUL
892
+ *(A+lda) *= *(A+lda+1); // TRI_MUL
893
+ }
894
+ }
895
+
896
+ if (Upper) *(A+1) = -*(A+1); // TRI_NEG
897
+ else *(A+lda) = -*(A+lda); // TRI_NEG
898
+ } else if (Diag == CblasNonUnit) A->inverse();
899
+ } else { // not real
900
+ if (Diag == CblasNonUnit) A->inverse();
901
+ }
902
+ }
903
+
904
+ } else {
905
+ rb_raise(rb_eNotImpError, "only row-major implemented at this time");
906
+ }
907
+
908
+ return ierr;
909
+ }
910
+
911
+
912
+ template <bool RowMajor, bool Real, typename DType>
913
+ int getri(const int N, DType* A, const int lda, const int* ipiv, DType* wrk, const int lwrk) {
914
+
915
+ if (!RowMajor) rb_raise(rb_eNotImpError, "only row-major implemented at this time");
916
+
917
+ int jb, nb, I, ndown, iret;
918
+
919
+ const DType ONE = 1, NONE = -1;
920
+
921
+ int iret = trtri<RowMajor,false,Real,DType>(CblasNonUnit, N, A, lda);
922
+ if (!iret && N > 1) {
923
+ jb = lwrk / N;
924
+ if (jb >= NB) nb = ATL_MulByNB(ATL_DivByNB(jb));
925
+ else if (jb >= ATL_mmMU) nb = (jb/ATL_mmMU)*ATL_mmMU;
926
+ else nb = jb;
927
+ if (!nb) return -6; // need at least 1 row of workspace
928
+
929
+ // only first iteration will have partial block, unroll it
930
+
931
+ jb = N - (N/nb) * nb;
932
+ if (!jb) jb = nb;
933
+ I = N - jb;
934
+ A += lda * I;
935
+ trcpzeroU<DType>(jb, jb, A+I, lda, wrk, jb);
936
+ nm::math::trsm<DType>(CblasRowMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasUnit,
937
+ jb, N, ONE, wrk, jb, A, lda);
938
+
939
+ if (I) {
940
+ do {
941
+ I -= nb;
942
+ A -= nb * lda;
943
+ ndown = N-I;
944
+ trcpzeroU<DType>(nb, ndown, A+I, lda, wrk, ndown);
945
+ nm::math::gemm<DType>(CblasRowMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasUnit,
946
+ nb, N, ONE, wrk, ndown, A, lda);
947
+ } while (I);
948
+ }
949
+
950
+ // Apply row interchanges
951
+
952
+ for (I = N - 2; I >= 0; --I) {
953
+ jb = ipiv[I];
954
+ if (jb != I) nm::math::swap<DType>(N, A+I*lda, 1, A+jb*lda, 1);
955
+ }
956
+ }
957
+
958
+ return iret;
959
+ }
960
+ */
961
+
962
+
963
+
964
+ template <bool is_complex, typename DType>
965
+ inline void lauum(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, DType* A, const int lda) {
966
+
967
+ int Nleft, Nright;
968
+ const DType ONE = 1;
969
+ DType *G, *U0 = A, *U1;
970
+
971
+ if (N > 1) {
972
+ Nleft = N >> 1;
973
+ #ifdef NB
974
+ if (Nleft > NB) Nleft = ATL_MulByNB(ATL_DivByNB(Nleft));
975
+ #endif
976
+
977
+ Nright = N - Nleft;
978
+
979
+ // FIXME: There's a simpler way to write this next block, but I'm way too tired to work it out right now.
980
+ if (uplo == CblasUpper) {
981
+ if (order == CblasRowMajor) {
982
+ G = A + Nleft;
983
+ U1 = G + Nleft * lda;
984
+ } else {
985
+ G = A + Nleft * lda;
986
+ U1 = G + Nleft;
987
+ }
988
+ } else {
989
+ if (order == CblasRowMajor) {
990
+ G = A + Nleft * lda;
991
+ U1 = G + Nleft;
992
+ } else {
993
+ G = A + Nleft;
994
+ U1 = G + Nleft * lda;
995
+ }
996
+ }
997
+
998
+ lauum<is_complex, DType>(order, uplo, Nleft, U0, lda);
999
+
1000
+ if (is_complex) {
1001
+
1002
+ nm::math::herk<DType>(order, uplo,
1003
+ uplo == CblasLower ? CblasConjTrans : CblasNoTrans,
1004
+ Nleft, Nright, &ONE, G, lda, &ONE, U0, lda);
1005
+
1006
+ nm::math::trmm<DType>(order, CblasLeft, uplo, CblasConjTrans, CblasNonUnit, Nright, Nleft, &ONE, U1, lda, G, lda);
1007
+ } else {
1008
+ nm::math::syrk<DType>(order, uplo,
1009
+ uplo == CblasLower ? CblasTrans : CblasNoTrans,
1010
+ Nleft, Nright, &ONE, G, lda, &ONE, U0, lda);
1011
+
1012
+ nm::math::trmm<DType>(order, CblasLeft, uplo, CblasTrans, CblasNonUnit, Nright, Nleft, &ONE, U1, lda, G, lda);
1013
+ }
1014
+ lauum<is_complex, DType>(order, uplo, Nright, U1, lda);
1015
+
1016
+ } else {
1017
+ *A = *A * *A;
1018
+ }
1019
+ }
1020
+
1021
+
1022
+ #ifdef HAVE_CLAPACK_H
1023
+ template <bool is_complex>
1024
+ inline void lauum(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, float* A, const int lda) {
1025
+ clapack_slauum(order, uplo, N, A, lda);
1026
+ }
1027
+
1028
+ template <bool is_complex>
1029
+ inline void lauum(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, double* A, const int lda) {
1030
+ clapack_dlauum(order, uplo, N, A, lda);
1031
+ }
1032
+
1033
+ template <bool is_complex>
1034
+ inline void lauum(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, Complex64* A, const int lda) {
1035
+ clapack_clauum(order, uplo, N, A, lda);
1036
+ }
1037
+
1038
+ template <bool is_complex>
1039
+ inline void lauum(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, Complex128* A, const int lda) {
1040
+ clapack_zlauum(order, uplo, N, A, lda);
1041
+ }
1042
+ #endif
1043
+
1044
+
1045
+ /*
1046
+ * Function signature conversion for calling LAPACK's lauum functions as directly as possible.
1047
+ *
1048
+ * For documentation: http://www.netlib.org/lapack/double/dlauum.f
1049
+ *
1050
+ * This function should normally go in math.cpp, but we need it to be available to nmatrix.cpp.
1051
+ */
1052
+ template <bool is_complex, typename DType>
1053
+ inline int clapack_lauum(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, void* a, const int lda) {
1054
+ if (n < 0) rb_raise(rb_eArgError, "n cannot be less than zero, is set to %d", n);
1055
+ if (lda < n || lda < 1) rb_raise(rb_eArgError, "lda must be >= max(n,1); lda=%d, n=%d\n", lda, n);
1056
+
1057
+ if (uplo == CblasUpper) lauum<is_complex, DType>(order, uplo, n, reinterpret_cast<DType*>(a), lda);
1058
+ else lauum<is_complex, DType>(order, uplo, n, reinterpret_cast<DType*>(a), lda);
1059
+
1060
+ return 0;
1061
+ }
1062
+
1063
+
1064
+
1065
+
1066
+ /*
1067
+ * Macro for declaring LAPACK specializations of the getrf function.
1068
+ *
1069
+ * type is the DType; call is the specific function to call; cast_as is what the DType* should be
1070
+ * cast to in order to pass it to LAPACK.
1071
+ */
1072
+ #define LAPACK_GETRF(type, call, cast_as) \
1073
+ template <> \
1074
+ inline int getrf(const enum CBLAS_ORDER Order, const int M, const int N, type * A, const int lda, int* ipiv) { \
1075
+ int info = call(Order, M, N, reinterpret_cast<cast_as *>(A), lda, ipiv); \
1076
+ if (!info) return info; \
1077
+ else { \
1078
+ rb_raise(rb_eArgError, "getrf: problem with argument %d\n", info); \
1079
+ return info; \
1080
+ } \
1081
+ }
1082
+
1083
+ /* Specialize for ATLAS types */
1084
+ /*LAPACK_GETRF(float, clapack_sgetrf, float)
1085
+ LAPACK_GETRF(double, clapack_dgetrf, double)
1086
+ LAPACK_GETRF(Complex64, clapack_cgetrf, void)
1087
+ LAPACK_GETRF(Complex128, clapack_zgetrf, void)
1088
+ */
1089
+
1090
+
1091
+
1092
+ /*
1093
+ * Function signature conversion for calling LAPACK's potrf functions as directly as possible.
1094
+ *
1095
+ * For documentation: http://www.netlib.org/lapack/double/dpotrf.f
1096
+ *
1097
+ * This function should normally go in math.cpp, but we need it to be available to nmatrix.cpp.
1098
+ */
1099
+ template <typename DType>
1100
+ inline int clapack_potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, void* a, const int lda) {
1101
+ return potrf<DType>(order, uplo, n, reinterpret_cast<DType*>(a), lda);
1102
+ }
1103
+
1104
+
1105
+
1106
+ template <typename DType>
1107
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, DType* a, const int lda) {
1108
+ rb_raise(rb_eNotImpError, "potri not yet implemented for non-BLAS dtypes");
1109
+ return 0;
1110
+ }
1111
+
1112
+
1113
+ #ifdef HAVE_CLAPACK_H
1114
+ template <>
1115
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, float* a, const int lda) {
1116
+ return clapack_spotri(order, uplo, n, a, lda);
1117
+ }
1118
+
1119
+ template <>
1120
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, double* a, const int lda) {
1121
+ return clapack_dpotri(order, uplo, n, a, lda);
1122
+ }
1123
+
1124
+ template <>
1125
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, Complex64* a, const int lda) {
1126
+ return clapack_cpotri(order, uplo, n, reinterpret_cast<void*>(a), lda);
1127
+ }
1128
+
1129
+ template <>
1130
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, Complex128* a, const int lda) {
1131
+ return clapack_zpotri(order, uplo, n, reinterpret_cast<void*>(a), lda);
1132
+ }
1133
+ #endif
1134
+
1135
+
1136
+ /*
1137
+ * Function signature conversion for calling LAPACK's potri functions as directly as possible.
1138
+ *
1139
+ * For documentation: http://www.netlib.org/lapack/double/dpotri.f
1140
+ *
1141
+ * This function should normally go in math.cpp, but we need it to be available to nmatrix.cpp.
1142
+ */
1143
+ template <typename DType>
1144
+ inline int clapack_potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, void* a, const int lda) {
1145
+ return potri<DType>(order, uplo, n, reinterpret_cast<DType*>(a), lda);
1146
+ }
1147
+
1148
+
1149
+
1150
+
1151
+ }} // end namespace nm::math
1152
+
1153
+
1154
+ #endif // MATH_H