nmatrix-atlas 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. checksums.yaml +7 -0
  2. data/ext/nmatrix/data/complex.h +364 -0
  3. data/ext/nmatrix/data/data.h +638 -0
  4. data/ext/nmatrix/data/meta.h +64 -0
  5. data/ext/nmatrix/data/ruby_object.h +389 -0
  6. data/ext/nmatrix/math/asum.h +120 -0
  7. data/ext/nmatrix/math/cblas_enums.h +36 -0
  8. data/ext/nmatrix/math/cblas_templates_core.h +507 -0
  9. data/ext/nmatrix/math/gemm.h +241 -0
  10. data/ext/nmatrix/math/gemv.h +178 -0
  11. data/ext/nmatrix/math/getrf.h +255 -0
  12. data/ext/nmatrix/math/getrs.h +121 -0
  13. data/ext/nmatrix/math/imax.h +79 -0
  14. data/ext/nmatrix/math/laswp.h +165 -0
  15. data/ext/nmatrix/math/long_dtype.h +49 -0
  16. data/ext/nmatrix/math/math.h +744 -0
  17. data/ext/nmatrix/math/nrm2.h +160 -0
  18. data/ext/nmatrix/math/rot.h +117 -0
  19. data/ext/nmatrix/math/rotg.h +106 -0
  20. data/ext/nmatrix/math/scal.h +71 -0
  21. data/ext/nmatrix/math/trsm.h +332 -0
  22. data/ext/nmatrix/math/util.h +148 -0
  23. data/ext/nmatrix/nm_memory.h +60 -0
  24. data/ext/nmatrix/nmatrix.h +408 -0
  25. data/ext/nmatrix/ruby_constants.h +106 -0
  26. data/ext/nmatrix/storage/common.h +176 -0
  27. data/ext/nmatrix/storage/dense/dense.h +128 -0
  28. data/ext/nmatrix/storage/list/list.h +137 -0
  29. data/ext/nmatrix/storage/storage.h +98 -0
  30. data/ext/nmatrix/storage/yale/class.h +1139 -0
  31. data/ext/nmatrix/storage/yale/iterators/base.h +142 -0
  32. data/ext/nmatrix/storage/yale/iterators/iterator.h +130 -0
  33. data/ext/nmatrix/storage/yale/iterators/row.h +449 -0
  34. data/ext/nmatrix/storage/yale/iterators/row_stored.h +139 -0
  35. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +168 -0
  36. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +123 -0
  37. data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
  38. data/ext/nmatrix/storage/yale/yale.h +202 -0
  39. data/ext/nmatrix/types.h +54 -0
  40. data/ext/nmatrix/util/io.h +115 -0
  41. data/ext/nmatrix/util/sl_list.h +143 -0
  42. data/ext/nmatrix/util/util.h +78 -0
  43. data/ext/nmatrix_atlas/extconf.rb +250 -0
  44. data/ext/nmatrix_atlas/math_atlas.cpp +1206 -0
  45. data/ext/nmatrix_atlas/math_atlas/cblas_templates_atlas.h +72 -0
  46. data/ext/nmatrix_atlas/math_atlas/clapack_templates.h +332 -0
  47. data/ext/nmatrix_atlas/math_atlas/geev.h +82 -0
  48. data/ext/nmatrix_atlas/math_atlas/gesdd.h +83 -0
  49. data/ext/nmatrix_atlas/math_atlas/gesvd.h +81 -0
  50. data/ext/nmatrix_atlas/math_atlas/inc.h +47 -0
  51. data/ext/nmatrix_atlas/nmatrix_atlas.cpp +44 -0
  52. data/lib/nmatrix/atlas.rb +213 -0
  53. data/lib/nmatrix/lapack_ext_common.rb +69 -0
  54. data/spec/00_nmatrix_spec.rb +730 -0
  55. data/spec/01_enum_spec.rb +190 -0
  56. data/spec/02_slice_spec.rb +389 -0
  57. data/spec/03_nmatrix_monkeys_spec.rb +78 -0
  58. data/spec/2x2_dense_double.mat +0 -0
  59. data/spec/4x4_sparse.mat +0 -0
  60. data/spec/4x5_dense.mat +0 -0
  61. data/spec/blas_spec.rb +193 -0
  62. data/spec/elementwise_spec.rb +303 -0
  63. data/spec/homogeneous_spec.rb +99 -0
  64. data/spec/io/fortran_format_spec.rb +88 -0
  65. data/spec/io/harwell_boeing_spec.rb +98 -0
  66. data/spec/io/test.rua +9 -0
  67. data/spec/io_spec.rb +149 -0
  68. data/spec/lapack_core_spec.rb +482 -0
  69. data/spec/leakcheck.rb +16 -0
  70. data/spec/math_spec.rb +730 -0
  71. data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
  72. data/spec/nmatrix_yale_spec.rb +286 -0
  73. data/spec/plugins/atlas/atlas_spec.rb +242 -0
  74. data/spec/rspec_monkeys.rb +56 -0
  75. data/spec/rspec_spec.rb +34 -0
  76. data/spec/shortcuts_spec.rb +310 -0
  77. data/spec/slice_set_spec.rb +157 -0
  78. data/spec/spec_helper.rb +140 -0
  79. data/spec/stat_spec.rb +203 -0
  80. data/spec/test.pcd +20 -0
  81. data/spec/utm5940.mtx +83844 -0
  82. metadata +159 -0
@@ -0,0 +1,72 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == cblas_templaces_atlas.h
25
+ //
26
+ // Define template functions for calling CBLAS functions in the
27
+ // nm::math::atlas namespace.
28
+ //
29
+
30
+ #ifndef CBLAS_TEMPLATES_ATLAS_H
31
+ #define CBLAS_TEMPLATES_ATLAS_H
32
+
33
+ //includes so we have access to internal implementations
34
+ #include "math/rotg.h"
35
+ #include "math/rot.h"
36
+ #include "math/asum.h"
37
+ #include "math/nrm2.h"
38
+ #include "math/imax.h"
39
+ #include "math/scal.h"
40
+ #include "math/gemv.h"
41
+ #include "math/gemm.h"
42
+ #include "math/trsm.h"
43
+
44
+ namespace nm { namespace math { namespace atlas {
45
+
46
+ //Add cblas templates in the correct namespace
47
+ #include "math/cblas_templates_core.h"
48
+
49
+ //Add complex specializations for rot and rotg. These cblas functions are not
50
+ //part of the the standard CBLAS and so need to be in an nmatrix-atlas header.
51
+ template <>
52
+ inline void rotg(Complex64* a, Complex64* b, Complex64* c, Complex64* s) {
53
+ cblas_crotg(a, b, c, s);
54
+ }
55
+
56
+ template <>
57
+ inline void rotg(Complex128* a, Complex128* b, Complex128* c, Complex128* s) {
58
+ cblas_zrotg(a, b, c, s);
59
+ }
60
+ template <>
61
+ inline void rot(const int N, Complex64* X, const int incX, Complex64* Y, const int incY, const float c, const float s) {
62
+ cblas_csrot(N, X, incX, Y, incY, c, s);
63
+ }
64
+
65
+ template <>
66
+ inline void rot(const int N, Complex128* X, const int incX, Complex128* Y, const int incY, const double c, const double s) {
67
+ cblas_zdrot(N, X, incX, Y, incY, c, s);
68
+ }
69
+
70
+ }}} //nm::math::atlas
71
+
72
+ #endif
@@ -0,0 +1,332 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == clapack_templates.h
25
+ //
26
+ // Collection of functions used to call ATLAS CLAPACK functions
27
+ // directly.
28
+ //
29
+
30
+ #ifndef CLAPACK_TEMPLATES_H
31
+ #define CLAPACK_TEMPLATES_H
32
+
33
+ //needed to get access to internal implementations
34
+ #include "math/getrf.h"
35
+ #include "math/getrs.h"
36
+
37
+ namespace nm { namespace math { namespace atlas {
38
+ //The first group of functions are those for which we have internal implementations.
39
+ //The internal implementations are defined in the ext/nmatrix/math directory
40
+ //and are the non-specialized
41
+ //forms of the template functions nm::math::whatever().
42
+ //They are are called below for non-BLAS
43
+ //types in the non-specialized form of the template nm::math::atlas::whatever().
44
+ //The specialized forms call the appropriate clapack functions.
45
+
46
+ //We also define the clapack_whatever() template
47
+ //functions below, which just cast
48
+ //their arguments to the appropriate types.
49
+
50
+
51
+ //getrf
52
+ template <typename DType>
53
+ inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, DType* a, const int lda, int* ipiv) {
54
+ return nm::math::getrf<DType>(order, m, n, a, lda, ipiv);
55
+ }
56
+
57
+ //Apparently CLAPACK isn't available on OS X, so we only define these
58
+ //specializations if available,
59
+ #if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)
60
+ template <>
61
+ inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, float* a, const int lda, int* ipiv) {
62
+ return clapack_sgetrf(order, m, n, a, lda, ipiv);
63
+ }
64
+
65
+ template <>
66
+ inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, double* a, const int lda, int* ipiv) {
67
+ return clapack_dgetrf(order, m, n, a, lda, ipiv);
68
+ }
69
+
70
+ template <>
71
+ inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, Complex64* a, const int lda, int* ipiv) {
72
+ return clapack_cgetrf(order, m, n, a, lda, ipiv);
73
+ }
74
+
75
+ template <>
76
+ inline int getrf(const enum CBLAS_ORDER order, const int m, const int n, Complex128* a, const int lda, int* ipiv) {
77
+ return clapack_zgetrf(order, m, n, a, lda, ipiv);
78
+ }
79
+ #endif
80
+
81
+ template <typename DType>
82
+ inline int clapack_getrf(const enum CBLAS_ORDER order, const int m, const int n, void* a, const int lda, int* ipiv) {
83
+ return getrf<DType>(order, m, n, static_cast<DType*>(a), lda, ipiv);
84
+ }
85
+
86
+ //getrs
87
+ /*
88
+ * Solves a system of linear equations A*X = B with a general NxN matrix A using the LU factorization computed by GETRF.
89
+ *
90
+ * From ATLAS 3.8.0.
91
+ */
92
+ template <typename DType>
93
+ inline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const DType* A,
94
+ const int lda, const int* ipiv, DType* B, const int ldb)
95
+ {
96
+ return nm::math::getrs<DType>(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);
97
+ }
98
+
99
+ #if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)
100
+ template <>
101
+ inline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const float* A,
102
+ const int lda, const int* ipiv, float* B, const int ldb)
103
+ {
104
+ return clapack_sgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);
105
+ }
106
+
107
+ template <>
108
+ inline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const double* A,
109
+ const int lda, const int* ipiv, double* B, const int ldb)
110
+ {
111
+ return clapack_dgetrs(Order, Trans, N, NRHS, A, lda, ipiv, B, ldb);
112
+ }
113
+
114
+ template <>
115
+ inline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const Complex64* A,
116
+ const int lda, const int* ipiv, Complex64* B, const int ldb)
117
+ {
118
+ return clapack_cgetrs(Order, Trans, N, NRHS, A, lda, ipiv, static_cast<void*>(B), ldb);
119
+ }
120
+
121
+ template <>
122
+ inline int getrs(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE Trans, const int N, const int NRHS, const Complex128* A,
123
+ const int lda, const int* ipiv, Complex128* B, const int ldb)
124
+ {
125
+ return clapack_zgetrs(Order, Trans, N, NRHS, A, lda, ipiv, static_cast<void*>(B), ldb);
126
+ }
127
+ #endif
128
+
129
+ template <typename DType>
130
+ inline int clapack_getrs(const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE trans, const int n, const int nrhs,
131
+ const void* a, const int lda, const int* ipiv, void* b, const int ldb) {
132
+ return getrs<DType>(order, trans, n, nrhs, static_cast<const DType*>(a), lda, ipiv, static_cast<DType*>(b), ldb);
133
+ }
134
+
135
+
136
+ //Functions without internal implementations below:
137
+
138
+ //getri
139
+ template <typename DType>
140
+ inline int getri(const enum CBLAS_ORDER order, const int n, DType* a, const int lda, const int* ipiv) {
141
+ rb_raise(rb_eNotImpError, "getri not yet implemented for non-BLAS dtypes");
142
+ return 0;
143
+ }
144
+
145
+ #if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)
146
+ template <>
147
+ inline int getri(const enum CBLAS_ORDER order, const int n, float* a, const int lda, const int* ipiv) {
148
+ return clapack_sgetri(order, n, a, lda, ipiv);
149
+ }
150
+
151
+ template <>
152
+ inline int getri(const enum CBLAS_ORDER order, const int n, double* a, const int lda, const int* ipiv) {
153
+ return clapack_dgetri(order, n, a, lda, ipiv);
154
+ }
155
+
156
+ template <>
157
+ inline int getri(const enum CBLAS_ORDER order, const int n, Complex64* a, const int lda, const int* ipiv) {
158
+ return clapack_cgetri(order, n, a, lda, ipiv);
159
+ }
160
+
161
+ template <>
162
+ inline int getri(const enum CBLAS_ORDER order, const int n, Complex128* a, const int lda, const int* ipiv) {
163
+ return clapack_zgetri(order, n, a, lda, ipiv);
164
+ }
165
+ #endif
166
+
167
+ template <typename DType>
168
+ inline int clapack_getri(const enum CBLAS_ORDER order, const int n, void* a, const int lda, const int* ipiv) {
169
+ return getri<DType>(order, n, static_cast<DType*>(a), lda, ipiv);
170
+ }
171
+
172
+ //potrf
173
+ /*
174
+ * From ATLAS 3.8.0:
175
+ *
176
+ * Computes one of two LU factorizations based on the setting of the Order
177
+ * parameter, as follows:
178
+ * ----------------------------------------------------------------------------
179
+ * Order == CblasColMajor
180
+ * Column-major factorization of form
181
+ * A = P * L * U
182
+ * where P is a row-permutation matrix, L is lower triangular with unit
183
+ * diagonal elements (lower trapazoidal if M > N), and U is upper triangular
184
+ * (upper trapazoidal if M < N).
185
+ *
186
+ * ----------------------------------------------------------------------------
187
+ * Order == CblasRowMajor
188
+ * Row-major factorization of form
189
+ * A = P * L * U
190
+ * where P is a column-permutation matrix, L is lower triangular (lower
191
+ * trapazoidal if M > N), and U is upper triangular with unit diagonals (upper
192
+ * trapazoidal if M < N).
193
+ *
194
+ * ============================================================================
195
+ * Let IERR be the return value of the function:
196
+ * If IERR == 0, successful exit.
197
+ * If (IERR < 0) the -IERR argument had an illegal value
198
+ * If (IERR > 0 && Order == CblasColMajor)
199
+ * U(i-1,i-1) is exactly zero. The factorization has been completed,
200
+ * but the factor U is exactly singular, and division by zero will
201
+ * occur if it is used to solve a system of equations.
202
+ * If (IERR > 0 && Order == CblasRowMajor)
203
+ * L(i-1,i-1) is exactly zero. The factorization has been completed,
204
+ * but the factor L is exactly singular, and division by zero will
205
+ * occur if it is used to solve a system of equations.
206
+ */
207
+ template <typename DType>
208
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, DType* A, const int lda) {
209
+ #if defined HAVE_CLAPACK_H || defined HAVE_ATLAS_CLAPACK_H
210
+ rb_raise(rb_eNotImpError, "not yet implemented for non-BLAS dtypes");
211
+ #else
212
+ rb_raise(rb_eNotImpError, "only CLAPACK version implemented thus far");
213
+ #endif
214
+ return 0;
215
+ }
216
+
217
+ #if defined HAVE_CLAPACK_H || defined HAVE_ATLAS_CLAPACK_H
218
+ template <>
219
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, float* A, const int lda) {
220
+ return clapack_spotrf(order, uplo, N, A, lda);
221
+ }
222
+
223
+ template <>
224
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, double* A, const int lda) {
225
+ return clapack_dpotrf(order, uplo, N, A, lda);
226
+ }
227
+
228
+ template <>
229
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, Complex64* A, const int lda) {
230
+ return clapack_cpotrf(order, uplo, N, A, lda);
231
+ }
232
+
233
+ template <>
234
+ inline int potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int N, Complex128* A, const int lda) {
235
+ return clapack_zpotrf(order, uplo, N, A, lda);
236
+ }
237
+ #endif
238
+
239
+ template <typename DType>
240
+ inline int clapack_potrf(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, void* a, const int lda) {
241
+ return potrf<DType>(order, uplo, n, static_cast<DType*>(a), lda);
242
+ }
243
+
244
+ //potri
245
+ template <typename DType>
246
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, DType* a, const int lda) {
247
+ rb_raise(rb_eNotImpError, "potri not yet implemented for non-BLAS dtypes");
248
+ return 0;
249
+ }
250
+
251
+
252
+ #if defined HAVE_CLAPACK_H || defined HAVE_ATLAS_CLAPACK_H
253
+ template <>
254
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, float* a, const int lda) {
255
+ return clapack_spotri(order, uplo, n, a, lda);
256
+ }
257
+
258
+ template <>
259
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, double* a, const int lda) {
260
+ return clapack_dpotri(order, uplo, n, a, lda);
261
+ }
262
+
263
+ template <>
264
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, Complex64* a, const int lda) {
265
+ return clapack_cpotri(order, uplo, n, a, lda);
266
+ }
267
+
268
+ template <>
269
+ inline int potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, Complex128* a, const int lda) {
270
+ return clapack_zpotri(order, uplo, n, a, lda);
271
+ }
272
+ #endif
273
+
274
+ template <typename DType>
275
+ inline int clapack_potri(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, void* a, const int lda) {
276
+ return potri<DType>(order, uplo, n, static_cast<DType*>(a), lda);
277
+ }
278
+
279
+ //potrs
280
+ /*
281
+ * Solves a system of linear equations A*X = B with a symmetric positive definite matrix A using the Cholesky factorization computed by POTRF.
282
+ */
283
+ template <typename DType>
284
+ inline int potrs(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const DType* A,
285
+ const int lda, DType* B, const int ldb)
286
+ {
287
+ #if defined HAVE_CLAPACK_H || defined HAVE_ATLAS_CLAPACK_H
288
+ rb_raise(rb_eNotImpError, "not yet implemented for non-BLAS dtypes");
289
+ #else
290
+ rb_raise(rb_eNotImpError, "only CLAPACK version implemented thus far");
291
+ #endif
292
+ }
293
+
294
+ #if defined (HAVE_CLAPACK_H) || defined (HAVE_ATLAS_CLAPACK_H)
295
+ template <>
296
+ inline int potrs<float> (const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const float* A,
297
+ const int lda, float* B, const int ldb)
298
+ {
299
+ return clapack_spotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);
300
+ }
301
+
302
+ template <>
303
+ inline int potrs<double>(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const double* A,
304
+ const int lda, double* B, const int ldb)
305
+ {
306
+ return clapack_dpotrs(Order, Uplo, N, NRHS, A, lda, B, ldb);
307
+ }
308
+
309
+ template <>
310
+ inline int potrs<Complex64>(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const Complex64* A,
311
+ const int lda, Complex64* B, const int ldb)
312
+ {
313
+ return clapack_cpotrs(Order, Uplo, N, NRHS, A, lda, static_cast<void *>(B), ldb);
314
+ }
315
+
316
+ template <>
317
+ inline int potrs<Complex128>(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N, const int NRHS, const Complex128* A,
318
+ const int lda, Complex128* B, const int ldb)
319
+ {
320
+ return clapack_zpotrs(Order, Uplo, N, NRHS, A, lda, static_cast<void *>(B), ldb);
321
+ }
322
+ #endif
323
+
324
+ template <typename DType>
325
+ inline int clapack_potrs(const enum CBLAS_ORDER order, const enum CBLAS_UPLO uplo, const int n, const int nrhs,
326
+ const void* a, const int lda, void* b, const int ldb) {
327
+ return potrs<DType>(order, uplo, n, nrhs, static_cast<const DType*>(a), lda, static_cast<DType*>(b), ldb);
328
+ }
329
+
330
+ }}}
331
+
332
+ #endif
@@ -0,0 +1,82 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == geev.h
25
+ //
26
+ // Header file for interface with LAPACK's xGEEV functions.
27
+ //
28
+
29
+ #ifndef GEEV_H
30
+ # define GEEV_H
31
+
32
+ extern "C" {
33
+ void sgeev_(char* jobvl, char* jobvr, int* n, float* a, int* lda, float* wr, float* wi, float* vl, int* ldvl, float* vr, int* ldvr, float* work, int* lwork, int* info);
34
+ void dgeev_(char* jobvl, char* jobvr, int* n, double* a, int* lda, double* wr, double* wi, double* vl, int* ldvl, double* vr, int* ldvr, double* work, int* lwork, int* info);
35
+ void cgeev_(char* jobvl, char* jobvr, int* n, nm::Complex64* a, int* lda, nm::Complex64* w, nm::Complex64* vl, int* ldvl, nm::Complex64* vr, int* ldvr, nm::Complex64* work, int* lwork, float* rwork, int* info);
36
+ void zgeev_(char* jobvl, char* jobvr, int* n, nm::Complex128* a, int* lda, nm::Complex128* w, nm::Complex128* vl, int* ldvl, nm::Complex128* vr, int* ldvr, nm::Complex128* work, int* lwork, double* rwork, int* info);
37
+ }
38
+
39
+ namespace nm { namespace math { namespace atlas {
40
+
41
+ template <typename DType, typename CType> // wr
42
+ inline int geev(char jobvl, char jobvr, int n, DType* a, int lda, DType* w, DType* wi, DType* vl, int ldvl, DType* vr, int ldvr, DType* work, int lwork, CType* rwork) {
43
+ rb_raise(rb_eNotImpError, "not yet implemented for non-BLAS dtypes");
44
+ return -1;
45
+ }
46
+
47
+ template <>
48
+ inline int geev(char jobvl, char jobvr, int n, float* a, int lda, float* w, float* wi, float* vl, int ldvl, float* vr, int ldvr, float* work, int lwork, float* rwork) {
49
+ int info;
50
+ sgeev_(&jobvl, &jobvr, &n, a, &lda, w, wi, vl, &ldvl, vr, &ldvr, work, &lwork, &info);
51
+ return info;
52
+ }
53
+
54
+ template <>
55
+ inline int geev(char jobvl, char jobvr, int n, double* a, int lda, double* w, double* wi, double* vl, int ldvl, double* vr, int ldvr, double* work, int lwork, double* rwork) {
56
+ int info;
57
+ dgeev_(&jobvl, &jobvr, &n, a, &lda, w, wi, vl, &ldvl, vr, &ldvr, work, &lwork, &info);
58
+ return info;
59
+ }
60
+
61
+ template <>
62
+ inline int geev(char jobvl, char jobvr, int n, Complex64* a, int lda, Complex64* w, Complex64* wi, Complex64* vl, int ldvl, Complex64* vr, int ldvr, Complex64* work, int lwork, float* rwork) {
63
+ int info;
64
+ cgeev_(&jobvl, &jobvr, &n, a, &lda, w, vl, &ldvl, vr, &ldvr, work, &lwork, rwork, &info);
65
+ return info;
66
+ }
67
+
68
+ template <>
69
+ inline int geev(char jobvl, char jobvr, int n, Complex128* a, int lda, Complex128* w, Complex128* wi, Complex128* vl, int ldvl, Complex128* vr, int ldvr, Complex128* work, int lwork, double* rwork) {
70
+ int info;
71
+ zgeev_(&jobvl, &jobvr, &n, a, &lda, w, vl, &ldvl, vr, &ldvr, work, &lwork, rwork, &info);
72
+ return info;
73
+ }
74
+
75
+ template <typename DType, typename CType>
76
+ inline int lapack_geev(char jobvl, char jobvr, int n, void* a, int lda, void* w, void* wi, void* vl, int ldvl, void* vr, int ldvr, void* work, int lwork, void* rwork) {
77
+ return geev<DType,CType>(jobvl, jobvr, n, reinterpret_cast<DType*>(a), lda, reinterpret_cast<DType*>(w), reinterpret_cast<DType*>(wi), reinterpret_cast<DType*>(vl), ldvl, reinterpret_cast<DType*>(vr), ldvr, reinterpret_cast<DType*>(work), lwork, reinterpret_cast<CType*>(rwork));
78
+ }
79
+
80
+ }}} // end nm::math::atlas
81
+
82
+ #endif // GEEV_H