pnmatrix 1.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. checksums.yaml +7 -0
  2. data/ext/nmatrix/binary_format.txt +53 -0
  3. data/ext/nmatrix/data/complex.h +388 -0
  4. data/ext/nmatrix/data/data.cpp +274 -0
  5. data/ext/nmatrix/data/data.h +651 -0
  6. data/ext/nmatrix/data/meta.h +64 -0
  7. data/ext/nmatrix/data/ruby_object.h +386 -0
  8. data/ext/nmatrix/extconf.rb +70 -0
  9. data/ext/nmatrix/math/asum.h +99 -0
  10. data/ext/nmatrix/math/cblas_enums.h +36 -0
  11. data/ext/nmatrix/math/cblas_templates_core.h +507 -0
  12. data/ext/nmatrix/math/gemm.h +241 -0
  13. data/ext/nmatrix/math/gemv.h +178 -0
  14. data/ext/nmatrix/math/getrf.h +255 -0
  15. data/ext/nmatrix/math/getrs.h +121 -0
  16. data/ext/nmatrix/math/imax.h +82 -0
  17. data/ext/nmatrix/math/laswp.h +165 -0
  18. data/ext/nmatrix/math/long_dtype.h +62 -0
  19. data/ext/nmatrix/math/magnitude.h +54 -0
  20. data/ext/nmatrix/math/math.h +751 -0
  21. data/ext/nmatrix/math/nrm2.h +165 -0
  22. data/ext/nmatrix/math/rot.h +117 -0
  23. data/ext/nmatrix/math/rotg.h +106 -0
  24. data/ext/nmatrix/math/scal.h +71 -0
  25. data/ext/nmatrix/math/trsm.h +336 -0
  26. data/ext/nmatrix/math/util.h +162 -0
  27. data/ext/nmatrix/math.cpp +1368 -0
  28. data/ext/nmatrix/nm_memory.h +60 -0
  29. data/ext/nmatrix/nmatrix.cpp +285 -0
  30. data/ext/nmatrix/nmatrix.h +476 -0
  31. data/ext/nmatrix/ruby_constants.cpp +151 -0
  32. data/ext/nmatrix/ruby_constants.h +106 -0
  33. data/ext/nmatrix/ruby_nmatrix.c +3130 -0
  34. data/ext/nmatrix/storage/common.cpp +77 -0
  35. data/ext/nmatrix/storage/common.h +183 -0
  36. data/ext/nmatrix/storage/dense/dense.cpp +1096 -0
  37. data/ext/nmatrix/storage/dense/dense.h +129 -0
  38. data/ext/nmatrix/storage/list/list.cpp +1628 -0
  39. data/ext/nmatrix/storage/list/list.h +138 -0
  40. data/ext/nmatrix/storage/storage.cpp +730 -0
  41. data/ext/nmatrix/storage/storage.h +99 -0
  42. data/ext/nmatrix/storage/yale/class.h +1139 -0
  43. data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
  44. data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
  45. data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
  46. data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
  47. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
  48. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
  49. data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
  50. data/ext/nmatrix/storage/yale/yale.cpp +2074 -0
  51. data/ext/nmatrix/storage/yale/yale.h +203 -0
  52. data/ext/nmatrix/types.h +55 -0
  53. data/ext/nmatrix/util/io.cpp +279 -0
  54. data/ext/nmatrix/util/io.h +115 -0
  55. data/ext/nmatrix/util/sl_list.cpp +627 -0
  56. data/ext/nmatrix/util/sl_list.h +144 -0
  57. data/ext/nmatrix/util/util.h +78 -0
  58. data/lib/nmatrix/blas.rb +378 -0
  59. data/lib/nmatrix/cruby/math.rb +744 -0
  60. data/lib/nmatrix/enumerate.rb +253 -0
  61. data/lib/nmatrix/homogeneous.rb +241 -0
  62. data/lib/nmatrix/io/fortran_format.rb +138 -0
  63. data/lib/nmatrix/io/harwell_boeing.rb +221 -0
  64. data/lib/nmatrix/io/market.rb +263 -0
  65. data/lib/nmatrix/io/point_cloud.rb +189 -0
  66. data/lib/nmatrix/jruby/decomposition.rb +24 -0
  67. data/lib/nmatrix/jruby/enumerable.rb +13 -0
  68. data/lib/nmatrix/jruby/error.rb +4 -0
  69. data/lib/nmatrix/jruby/math.rb +501 -0
  70. data/lib/nmatrix/jruby/nmatrix_java.rb +840 -0
  71. data/lib/nmatrix/jruby/operators.rb +283 -0
  72. data/lib/nmatrix/jruby/slice.rb +264 -0
  73. data/lib/nmatrix/lapack_core.rb +181 -0
  74. data/lib/nmatrix/lapack_plugin.rb +44 -0
  75. data/lib/nmatrix/math.rb +953 -0
  76. data/lib/nmatrix/mkmf.rb +100 -0
  77. data/lib/nmatrix/monkeys.rb +137 -0
  78. data/lib/nmatrix/nmatrix.rb +1172 -0
  79. data/lib/nmatrix/rspec.rb +75 -0
  80. data/lib/nmatrix/shortcuts.rb +1163 -0
  81. data/lib/nmatrix/version.rb +39 -0
  82. data/lib/nmatrix/yale_functions.rb +118 -0
  83. data/lib/nmatrix.rb +28 -0
  84. data/spec/00_nmatrix_spec.rb +892 -0
  85. data/spec/01_enum_spec.rb +196 -0
  86. data/spec/02_slice_spec.rb +407 -0
  87. data/spec/03_nmatrix_monkeys_spec.rb +80 -0
  88. data/spec/2x2_dense_double.mat +0 -0
  89. data/spec/4x4_sparse.mat +0 -0
  90. data/spec/4x5_dense.mat +0 -0
  91. data/spec/blas_spec.rb +215 -0
  92. data/spec/elementwise_spec.rb +311 -0
  93. data/spec/homogeneous_spec.rb +100 -0
  94. data/spec/io/fortran_format_spec.rb +88 -0
  95. data/spec/io/harwell_boeing_spec.rb +98 -0
  96. data/spec/io/test.rua +9 -0
  97. data/spec/io_spec.rb +159 -0
  98. data/spec/lapack_core_spec.rb +482 -0
  99. data/spec/leakcheck.rb +16 -0
  100. data/spec/math_spec.rb +1363 -0
  101. data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
  102. data/spec/nmatrix_yale_spec.rb +286 -0
  103. data/spec/rspec_monkeys.rb +56 -0
  104. data/spec/rspec_spec.rb +35 -0
  105. data/spec/shortcuts_spec.rb +474 -0
  106. data/spec/slice_set_spec.rb +162 -0
  107. data/spec/spec_helper.rb +172 -0
  108. data/spec/stat_spec.rb +214 -0
  109. data/spec/test.pcd +20 -0
  110. data/spec/utm5940.mtx +83844 -0
  111. metadata +295 -0
@@ -0,0 +1,60 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == nm_memory.h
25
+ //
26
+ // Macros for memory allocation and freeing
27
+
28
+ /**
29
+ * We define these macros, which just call the ruby ones, as this makes
30
+ * debugging memory issues (particularly those involving interaction with
31
+ * the ruby GC) easier, as it's posssible to add debugging code temporarily.
32
+ */
33
+ #ifndef __NM_MEMORY_H__
34
+ #define __NM_MEMORY_H__
35
+
36
+ #include <ruby.h>
37
+
38
+ #define NM_ALLOC(type) (ALLOC(type))
39
+
40
+ #define NM_ALLOC_N(type, n) (ALLOC_N(type, n))
41
+
42
+ #define NM_REALLOC_N(var, type, n) (REALLOC_N(var, type, n))
43
+
44
+ #define NM_ALLOCA_N(type, n) (ALLOCA_N(type, n))
45
+
46
+ #define NM_FREE(var) (xfree(var))
47
+
48
+ #define NM_ALLOC_NONRUBY(type) ((type*) malloc(sizeof(type)))
49
+
50
+ //Defines whether to do conservative gc registrations, i.e. those
51
+ //registrations that we're not that sure are necessary.
52
+ //#define NM_GC_CONSERVATIVE
53
+
54
+ #ifdef NM_GC_CONSERVATIVE
55
+ #define NM_CONSERVATIVE(statement) (statement)
56
+ #else
57
+ #define NM_CONSERVATIVE(statement)
58
+ #endif //NM_GC_CONSERVATIVE
59
+
60
+ #endif
@@ -0,0 +1,285 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == nmatrix.cpp
25
+ //
26
+ // Main C++ source file for NMatrix. Contains Init_nmatrix and most Ruby
27
+ // instance and class methods for NMatrix. Also responsible for calling Init
28
+ // methods on related modules.
29
+
30
+ /*
31
+ * Standard Includes
32
+ */
33
+
34
+ #include <ruby.h>
35
+ #include <cfloat>
36
+ #include <algorithm> // std::min
37
+ #include <fstream>
38
+
39
+ /*
40
+ * Project Includes
41
+ */
42
+ #include "nmatrix_config.h"
43
+
44
+ #include "types.h"
45
+ #include "data/data.h"
46
+ #include "math/math.h"
47
+ #include "util/io.h"
48
+ #include "storage/storage.h"
49
+ #include "storage/list/list.h"
50
+ #include "storage/yale/yale.h"
51
+
52
+ #include "nmatrix.h"
53
+
54
+ #include "ruby_constants.h"
55
+
56
+ /*
57
+ * Ruby internals
58
+ */
59
+
60
+
61
+ /*
62
+ * Macros
63
+ */
64
+
65
+
66
+ /*
67
+ * Global Variables
68
+ */
69
+
70
+ namespace nm {
71
+
72
+ /*
73
+ * This function is pulled out separately so it can be called for hermitian matrix writing, which also uses it.
74
+ */
75
+ template <typename DType>
76
+ size_t write_padded_dense_elements_upper(std::ofstream& f, DENSE_STORAGE* storage, symm_t symm) {
77
+ // Write upper triangular portion. Assume 2D square matrix.
78
+ DType* elements = reinterpret_cast<DType*>(storage->elements);
79
+ size_t length = storage->shape[0];
80
+
81
+ size_t bytes_written = 0;
82
+
83
+ for (size_t i = 0; i < length; ++i) { // which row are we on?
84
+
85
+ f.write( reinterpret_cast<const char*>( &(elements[ i*(length + 1) ]) ),
86
+ (length - i) * sizeof(DType) );
87
+
88
+ bytes_written += (length - i) * sizeof(DType);
89
+ }
90
+ return bytes_written;
91
+ }
92
+
93
+ /*
94
+ * We need to specialize for Hermitian matrices. The next six functions accomplish that specialization, basically
95
+ * by ensuring that non-complex matrices cannot read or write hermitians (which would cause big problems).
96
+ */
97
+ template <typename DType>
98
+ size_t write_padded_dense_elements_herm(std::ofstream& f, DENSE_STORAGE* storage, symm_t symm) {
99
+ rb_raise(rb_eArgError, "cannot write a non-complex matrix as hermitian");
100
+ }
101
+
102
+ template <>
103
+ size_t write_padded_dense_elements_herm<Complex64>(std::ofstream& f, DENSE_STORAGE* storage, symm_t symm) {
104
+ return write_padded_dense_elements_upper<Complex64>(f, storage, symm);
105
+ }
106
+
107
+ template <>
108
+ size_t write_padded_dense_elements_herm<Complex128>(std::ofstream& f, DENSE_STORAGE* storage, symm_t symm) {
109
+ return write_padded_dense_elements_upper<Complex128>(f, storage, symm);
110
+ }
111
+
112
+ template <typename DType>
113
+ void read_padded_dense_elements_herm(DType* elements, size_t length) {
114
+ rb_raise(rb_eArgError, "cannot read a non-complex matrix as hermitian");
115
+ }
116
+
117
+ template <>
118
+ void read_padded_dense_elements_herm(Complex64* elements, size_t length) {
119
+ for (size_t i = 0; i < length; ++i) {
120
+ for (size_t j = i+1; j < length; ++j) {
121
+ elements[j * length + i] = elements[i * length + j].conjugate();
122
+ }
123
+ }
124
+ }
125
+
126
+ template <>
127
+ void read_padded_dense_elements_herm(Complex128* elements, size_t length) {
128
+ for (size_t i = 0; i < length; ++i) {
129
+ for (size_t j = i+1; j < length; ++j) {
130
+ elements[j * length + i] = elements[i * length + j].conjugate();
131
+ }
132
+ }
133
+ }
134
+
135
+ /*
136
+ * Read the elements of a dense storage matrix from a binary file, padded to 64-bits.
137
+ *
138
+ * storage should already be allocated. No initialization necessary.
139
+ */
140
+ template <typename DType>
141
+ void read_padded_dense_elements(std::ifstream& f, DENSE_STORAGE* storage, nm::symm_t symm) {
142
+ size_t bytes_read = 0;
143
+
144
+ if (symm == nm::NONSYMM) {
145
+ // Easy. Simply read the whole elements array.
146
+ size_t length = nm_storage_count_max_elements(reinterpret_cast<STORAGE*>(storage));
147
+ f.read(reinterpret_cast<char*>(storage->elements), length * sizeof(DType) );
148
+
149
+ bytes_read += length * sizeof(DType);
150
+ } else if (symm == LOWER) {
151
+
152
+ // Read lower triangular portion and initialize remainder to 0
153
+ DType* elements = reinterpret_cast<DType*>(storage->elements);
154
+ size_t length = storage->shape[0];
155
+
156
+ for (size_t i = 0; i < length; ++i) { // which row?
157
+
158
+ f.read( reinterpret_cast<char*>(&(elements[i * length])), (i + 1) * sizeof(DType) );
159
+
160
+ // need to zero-fill the rest of the row.
161
+ for (size_t j = i+1; j < length; ++j)
162
+ elements[i * length + j] = 0;
163
+
164
+ bytes_read += (i + 1) * sizeof(DType);
165
+ }
166
+ } else {
167
+
168
+ DType* elements = reinterpret_cast<DType*>(storage->elements);
169
+ size_t length = storage->shape[0];
170
+
171
+ for (size_t i = 0; i < length; ++i) { // which row?
172
+ f.read( reinterpret_cast<char*>(&(elements[i * (length + 1)])), (length - i) * sizeof(DType) );
173
+
174
+ bytes_read += (length - i) * sizeof(DType);
175
+ }
176
+
177
+ if (symm == SYMM) {
178
+ for (size_t i = 0; i < length; ++i) {
179
+ for (size_t j = i+1; j < length; ++j) {
180
+ elements[j * length + i] = elements[i * length + j];
181
+ }
182
+ }
183
+ } else if (symm == SKEW) {
184
+ for (size_t i = 0; i < length; ++i) {
185
+ for (size_t j = i+1; j < length; ++j) {
186
+ elements[j * length + i] = -elements[i * length + j];
187
+ }
188
+ }
189
+ } else if (symm == HERM) {
190
+ read_padded_dense_elements_herm<DType>(elements, length);
191
+
192
+ } else if (symm == UPPER) { // zero-fill the rest of the rows
193
+ for (size_t i = 0; i < length; ++i) {
194
+ for(size_t j = i+1; j < length; ++j) {
195
+ elements[j * length + i] = 0;
196
+ }
197
+ }
198
+ }
199
+
200
+ }
201
+
202
+ // Ignore any padding.
203
+ if (bytes_read % 8) f.ignore(bytes_read % 8);
204
+ }
205
+
206
+ template <typename DType>
207
+ void write_padded_yale_elements(std::ofstream& f, YALE_STORAGE* storage, size_t length, nm::symm_t symm) {
208
+ if (symm != nm::NONSYMM) rb_raise(rb_eNotImpError, "Yale matrices can only be read/written in full form");
209
+
210
+ // Keep track of bytes written for each of A and IJA so we know how much padding to use.
211
+ size_t bytes_written = length * sizeof(DType);
212
+
213
+ // Write A array
214
+ f.write(reinterpret_cast<const char*>(storage->a), bytes_written);
215
+
216
+ // Padding
217
+ int64_t zero = 0;
218
+ f.write(reinterpret_cast<const char*>(&zero), bytes_written % 8);
219
+
220
+ bytes_written = length * sizeof(IType);
221
+ f.write(reinterpret_cast<const char*>(storage->ija), bytes_written);
222
+
223
+ // More padding
224
+ f.write(reinterpret_cast<const char*>(&zero), bytes_written % 8);
225
+ }
226
+
227
+
228
+ template <typename DType>
229
+ void read_padded_yale_elements(std::ifstream& f, YALE_STORAGE* storage, size_t length, nm::symm_t symm) {
230
+ if (symm != NONSYMM) rb_raise(rb_eNotImpError, "Yale matrices can only be read/written in full form");
231
+
232
+ size_t bytes_read = length * sizeof(DType);
233
+ f.read(reinterpret_cast<char*>(storage->a), bytes_read);
234
+
235
+ int64_t padding = 0;
236
+ f.read(reinterpret_cast<char*>(&padding), bytes_read % 8);
237
+
238
+ bytes_read = length * sizeof(IType);
239
+ f.read(reinterpret_cast<char*>(storage->ija), bytes_read);
240
+
241
+ f.read(reinterpret_cast<char*>(&padding), bytes_read % 8);
242
+ }
243
+
244
+ /*
245
+ * Write the elements of a dense storage matrix to a binary file, padded to 64-bits.
246
+ */
247
+ template <typename DType>
248
+ void write_padded_dense_elements(std::ofstream& f, DENSE_STORAGE* storage, nm::symm_t symm) {
249
+ size_t bytes_written = 0;
250
+
251
+ if (symm == nm::NONSYMM) {
252
+ // Simply write the whole elements array.
253
+ size_t length = nm_storage_count_max_elements(storage);
254
+ f.write(reinterpret_cast<const char*>(storage->elements), length * sizeof(DType));
255
+
256
+ bytes_written += length * sizeof(DType);
257
+
258
+ } else if (symm == nm::LOWER) {
259
+
260
+ // Write lower triangular portion. Assume 2D square matrix.
261
+ DType* elements = reinterpret_cast<DType*>(storage->elements);
262
+ size_t length = storage->shape[0];
263
+ for (size_t i = 0; i < length; ++i) { // which row?
264
+
265
+ f.write( reinterpret_cast<const char*>( &(elements[i * length]) ),
266
+ (i + 1) * sizeof(DType) );
267
+
268
+ bytes_written += (i + 1) * sizeof(DType);
269
+ }
270
+ } else if (symm == nm::HERM) {
271
+ bytes_written += write_padded_dense_elements_herm<DType>(f, storage, symm);
272
+ } else { // HERM, UPPER, SYMM, SKEW
273
+ bytes_written += write_padded_dense_elements_upper<DType>(f, storage, symm);
274
+ }
275
+
276
+ // Padding
277
+ int64_t zero = 0;
278
+ f.write(reinterpret_cast<const char*>(&zero), bytes_written % 8);
279
+ }
280
+
281
+ } // end of namespace nm
282
+
283
+ extern "C" {
284
+ #include "ruby_nmatrix.c"
285
+ } // end of extern "C"