pnmatrix 1.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/ext/nmatrix/binary_format.txt +53 -0
- data/ext/nmatrix/data/complex.h +388 -0
- data/ext/nmatrix/data/data.cpp +274 -0
- data/ext/nmatrix/data/data.h +651 -0
- data/ext/nmatrix/data/meta.h +64 -0
- data/ext/nmatrix/data/ruby_object.h +386 -0
- data/ext/nmatrix/extconf.rb +70 -0
- data/ext/nmatrix/math/asum.h +99 -0
- data/ext/nmatrix/math/cblas_enums.h +36 -0
- data/ext/nmatrix/math/cblas_templates_core.h +507 -0
- data/ext/nmatrix/math/gemm.h +241 -0
- data/ext/nmatrix/math/gemv.h +178 -0
- data/ext/nmatrix/math/getrf.h +255 -0
- data/ext/nmatrix/math/getrs.h +121 -0
- data/ext/nmatrix/math/imax.h +82 -0
- data/ext/nmatrix/math/laswp.h +165 -0
- data/ext/nmatrix/math/long_dtype.h +62 -0
- data/ext/nmatrix/math/magnitude.h +54 -0
- data/ext/nmatrix/math/math.h +751 -0
- data/ext/nmatrix/math/nrm2.h +165 -0
- data/ext/nmatrix/math/rot.h +117 -0
- data/ext/nmatrix/math/rotg.h +106 -0
- data/ext/nmatrix/math/scal.h +71 -0
- data/ext/nmatrix/math/trsm.h +336 -0
- data/ext/nmatrix/math/util.h +162 -0
- data/ext/nmatrix/math.cpp +1368 -0
- data/ext/nmatrix/nm_memory.h +60 -0
- data/ext/nmatrix/nmatrix.cpp +285 -0
- data/ext/nmatrix/nmatrix.h +476 -0
- data/ext/nmatrix/ruby_constants.cpp +151 -0
- data/ext/nmatrix/ruby_constants.h +106 -0
- data/ext/nmatrix/ruby_nmatrix.c +3130 -0
- data/ext/nmatrix/storage/common.cpp +77 -0
- data/ext/nmatrix/storage/common.h +183 -0
- data/ext/nmatrix/storage/dense/dense.cpp +1096 -0
- data/ext/nmatrix/storage/dense/dense.h +129 -0
- data/ext/nmatrix/storage/list/list.cpp +1628 -0
- data/ext/nmatrix/storage/list/list.h +138 -0
- data/ext/nmatrix/storage/storage.cpp +730 -0
- data/ext/nmatrix/storage/storage.h +99 -0
- data/ext/nmatrix/storage/yale/class.h +1139 -0
- data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
- data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
- data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
- data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
- data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
- data/ext/nmatrix/storage/yale/yale.cpp +2074 -0
- data/ext/nmatrix/storage/yale/yale.h +203 -0
- data/ext/nmatrix/types.h +55 -0
- data/ext/nmatrix/util/io.cpp +279 -0
- data/ext/nmatrix/util/io.h +115 -0
- data/ext/nmatrix/util/sl_list.cpp +627 -0
- data/ext/nmatrix/util/sl_list.h +144 -0
- data/ext/nmatrix/util/util.h +78 -0
- data/lib/nmatrix/blas.rb +378 -0
- data/lib/nmatrix/cruby/math.rb +744 -0
- data/lib/nmatrix/enumerate.rb +253 -0
- data/lib/nmatrix/homogeneous.rb +241 -0
- data/lib/nmatrix/io/fortran_format.rb +138 -0
- data/lib/nmatrix/io/harwell_boeing.rb +221 -0
- data/lib/nmatrix/io/market.rb +263 -0
- data/lib/nmatrix/io/point_cloud.rb +189 -0
- data/lib/nmatrix/jruby/decomposition.rb +24 -0
- data/lib/nmatrix/jruby/enumerable.rb +13 -0
- data/lib/nmatrix/jruby/error.rb +4 -0
- data/lib/nmatrix/jruby/math.rb +501 -0
- data/lib/nmatrix/jruby/nmatrix_java.rb +840 -0
- data/lib/nmatrix/jruby/operators.rb +283 -0
- data/lib/nmatrix/jruby/slice.rb +264 -0
- data/lib/nmatrix/lapack_core.rb +181 -0
- data/lib/nmatrix/lapack_plugin.rb +44 -0
- data/lib/nmatrix/math.rb +953 -0
- data/lib/nmatrix/mkmf.rb +100 -0
- data/lib/nmatrix/monkeys.rb +137 -0
- data/lib/nmatrix/nmatrix.rb +1172 -0
- data/lib/nmatrix/rspec.rb +75 -0
- data/lib/nmatrix/shortcuts.rb +1163 -0
- data/lib/nmatrix/version.rb +39 -0
- data/lib/nmatrix/yale_functions.rb +118 -0
- data/lib/nmatrix.rb +28 -0
- data/spec/00_nmatrix_spec.rb +892 -0
- data/spec/01_enum_spec.rb +196 -0
- data/spec/02_slice_spec.rb +407 -0
- data/spec/03_nmatrix_monkeys_spec.rb +80 -0
- data/spec/2x2_dense_double.mat +0 -0
- data/spec/4x4_sparse.mat +0 -0
- data/spec/4x5_dense.mat +0 -0
- data/spec/blas_spec.rb +215 -0
- data/spec/elementwise_spec.rb +311 -0
- data/spec/homogeneous_spec.rb +100 -0
- data/spec/io/fortran_format_spec.rb +88 -0
- data/spec/io/harwell_boeing_spec.rb +98 -0
- data/spec/io/test.rua +9 -0
- data/spec/io_spec.rb +159 -0
- data/spec/lapack_core_spec.rb +482 -0
- data/spec/leakcheck.rb +16 -0
- data/spec/math_spec.rb +1363 -0
- data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
- data/spec/nmatrix_yale_spec.rb +286 -0
- data/spec/rspec_monkeys.rb +56 -0
- data/spec/rspec_spec.rb +35 -0
- data/spec/shortcuts_spec.rb +474 -0
- data/spec/slice_set_spec.rb +162 -0
- data/spec/spec_helper.rb +172 -0
- data/spec/stat_spec.rb +214 -0
- data/spec/test.pcd +20 -0
- data/spec/utm5940.mtx +83844 -0
- metadata +295 -0
|
@@ -0,0 +1,730 @@
|
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
|
2
|
+
// = NMatrix
|
|
3
|
+
//
|
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
|
5
|
+
// NMatrix is part of SciRuby.
|
|
6
|
+
//
|
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
|
9
|
+
//
|
|
10
|
+
// == Copyright Information
|
|
11
|
+
//
|
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
|
14
|
+
//
|
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
|
16
|
+
//
|
|
17
|
+
// == Contributing
|
|
18
|
+
//
|
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
|
20
|
+
// our Contributor Agreement:
|
|
21
|
+
//
|
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
|
23
|
+
//
|
|
24
|
+
// == storage.cpp
|
|
25
|
+
//
|
|
26
|
+
// Code that is used by or involves more then one storage type.
|
|
27
|
+
|
|
28
|
+
/*
|
|
29
|
+
* Standard Includes
|
|
30
|
+
*/
|
|
31
|
+
|
|
32
|
+
/*
|
|
33
|
+
* Project Includes
|
|
34
|
+
*/
|
|
35
|
+
|
|
36
|
+
#include "data/data.h"
|
|
37
|
+
|
|
38
|
+
#include "storage.h"
|
|
39
|
+
|
|
40
|
+
#include "common.h"
|
|
41
|
+
|
|
42
|
+
/*
|
|
43
|
+
* Macros
|
|
44
|
+
*/
|
|
45
|
+
|
|
46
|
+
/*
|
|
47
|
+
* Global Variables
|
|
48
|
+
*/
|
|
49
|
+
|
|
50
|
+
extern "C" {
|
|
51
|
+
|
|
52
|
+
const char* const STYPE_NAMES[nm::NUM_STYPES] = {
|
|
53
|
+
"dense",
|
|
54
|
+
"list",
|
|
55
|
+
"yale"
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
} // end extern "C" block
|
|
59
|
+
|
|
60
|
+
/*
|
|
61
|
+
* Forward Declarations
|
|
62
|
+
*/
|
|
63
|
+
|
|
64
|
+
namespace nm {
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
/*
|
|
68
|
+
* Functions
|
|
69
|
+
*/
|
|
70
|
+
|
|
71
|
+
/////////////////////////
|
|
72
|
+
// Templated Functions //
|
|
73
|
+
/////////////////////////
|
|
74
|
+
|
|
75
|
+
namespace dense_storage {
|
|
76
|
+
|
|
77
|
+
template <typename LDType, typename RDType>
|
|
78
|
+
static void cast_copy_list_contents(LDType* lhs, const LIST* rhs, RDType* default_val,
|
|
79
|
+
size_t& pos, const size_t* shape, size_t dim, size_t max_elements, size_t recursions);
|
|
80
|
+
|
|
81
|
+
template <typename LDType, typename RDType>
|
|
82
|
+
static void cast_copy_list_default(LDType* lhs, RDType* default_val, size_t& pos,
|
|
83
|
+
const size_t* shape, size_t dim, size_t max_elements, size_t recursions);
|
|
84
|
+
|
|
85
|
+
/*
|
|
86
|
+
* Convert (by creating a copy) from list storage to dense storage.
|
|
87
|
+
*/
|
|
88
|
+
template <typename LDType, typename RDType>
|
|
89
|
+
DENSE_STORAGE* create_from_list_storage(const LIST_STORAGE* rhs, dtype_t l_dtype) {
|
|
90
|
+
nm_list_storage_register(rhs);
|
|
91
|
+
// allocate and copy shape
|
|
92
|
+
size_t* shape = NM_ALLOC_N(size_t, rhs->dim);
|
|
93
|
+
memcpy(shape, rhs->shape, rhs->dim * sizeof(size_t));
|
|
94
|
+
|
|
95
|
+
DENSE_STORAGE* lhs = nm_dense_storage_create(l_dtype, shape, rhs->dim, NULL, 0);
|
|
96
|
+
|
|
97
|
+
// Position in lhs->elements.
|
|
98
|
+
size_t pos = 0;
|
|
99
|
+
size_t max_elements = nm_storage_count_max_elements(rhs);
|
|
100
|
+
|
|
101
|
+
//static void dense_storage_cast_copy_list_contents_template(LDType* lhs, const LIST* rhs, RDType* default_val, size_t& pos, const size_t* shape, size_t dim, size_t max_elements, size_t recursions)
|
|
102
|
+
// recursively copy the contents
|
|
103
|
+
if (rhs->src == rhs)
|
|
104
|
+
cast_copy_list_contents<LDType,RDType>(reinterpret_cast<LDType*>(lhs->elements),
|
|
105
|
+
rhs->rows,
|
|
106
|
+
reinterpret_cast<RDType*>(rhs->default_val),
|
|
107
|
+
pos, shape, lhs->dim, max_elements, rhs->dim-1);
|
|
108
|
+
else {
|
|
109
|
+
LIST_STORAGE *tmp = nm_list_storage_copy(rhs);
|
|
110
|
+
cast_copy_list_contents<LDType,RDType>(reinterpret_cast<LDType*>(lhs->elements),
|
|
111
|
+
tmp->rows,
|
|
112
|
+
reinterpret_cast<RDType*>(tmp->default_val),
|
|
113
|
+
pos, shape, lhs->dim, max_elements, tmp->dim-1);
|
|
114
|
+
nm_list_storage_delete(tmp);
|
|
115
|
+
|
|
116
|
+
}
|
|
117
|
+
nm_list_storage_unregister(rhs);
|
|
118
|
+
|
|
119
|
+
return lhs;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
/*
|
|
126
|
+
* Create/allocate dense storage, copying into it the contents of a Yale matrix.
|
|
127
|
+
*/
|
|
128
|
+
template <typename LDType, typename RDType>
|
|
129
|
+
DENSE_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype) {
|
|
130
|
+
|
|
131
|
+
nm_yale_storage_register(rhs);
|
|
132
|
+
// Position in rhs->elements.
|
|
133
|
+
IType* rhs_ija = reinterpret_cast<YALE_STORAGE*>(rhs->src)->ija;
|
|
134
|
+
RDType* rhs_a = reinterpret_cast<RDType*>(reinterpret_cast<YALE_STORAGE*>(rhs->src)->a);
|
|
135
|
+
|
|
136
|
+
// Allocate and set shape.
|
|
137
|
+
size_t* shape = NM_ALLOC_N(size_t, rhs->dim);
|
|
138
|
+
shape[0] = rhs->shape[0];
|
|
139
|
+
shape[1] = rhs->shape[1];
|
|
140
|
+
|
|
141
|
+
DENSE_STORAGE* lhs = nm_dense_storage_create(l_dtype, shape, rhs->dim, NULL, 0);
|
|
142
|
+
LDType* lhs_elements = reinterpret_cast<LDType*>(lhs->elements);
|
|
143
|
+
|
|
144
|
+
// Position in dense to write to.
|
|
145
|
+
size_t pos = 0;
|
|
146
|
+
|
|
147
|
+
LDType LCAST_ZERO = rhs_a[rhs->src->shape[0]];
|
|
148
|
+
|
|
149
|
+
// Walk through rows. For each entry we set in dense, increment pos.
|
|
150
|
+
for (size_t i = 0; i < shape[0]; ++i) {
|
|
151
|
+
IType ri = i + rhs->offset[0];
|
|
152
|
+
|
|
153
|
+
if (rhs_ija[ri] == rhs_ija[ri+1]) { // Check boundaries of row: is row empty? (Yes.)
|
|
154
|
+
|
|
155
|
+
// Write zeros in each column.
|
|
156
|
+
for (size_t j = 0; j < shape[1]; ++j) { // Move to next dense position.
|
|
157
|
+
|
|
158
|
+
// Fill in zeros and copy the diagonal entry for this empty row.
|
|
159
|
+
if (ri == j + rhs->offset[1]) lhs_elements[pos] = static_cast<LDType>(rhs_a[ri]);
|
|
160
|
+
else lhs_elements[pos] = LCAST_ZERO;
|
|
161
|
+
|
|
162
|
+
++pos;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
} else { // Row contains entries: write those in each column, interspersed with zeros.
|
|
166
|
+
|
|
167
|
+
// Get the first ija position of the row (as sliced)
|
|
168
|
+
IType ija = nm::yale_storage::binary_search_left_boundary(rhs, rhs_ija[ri], rhs_ija[ri+1]-1, rhs->offset[1]);
|
|
169
|
+
|
|
170
|
+
// What column is it?
|
|
171
|
+
IType next_stored_rj = rhs_ija[ija];
|
|
172
|
+
|
|
173
|
+
for (size_t j = 0; j < shape[1]; ++j) {
|
|
174
|
+
IType rj = j + rhs->offset[1];
|
|
175
|
+
|
|
176
|
+
if (rj == ri) { // at a diagonal in RHS
|
|
177
|
+
lhs_elements[pos] = static_cast<LDType>(rhs_a[ri]);
|
|
178
|
+
|
|
179
|
+
} else if (rj == next_stored_rj) { // column ID was found in RHS
|
|
180
|
+
lhs_elements[pos] = static_cast<LDType>(rhs_a[ija]); // Copy from rhs.
|
|
181
|
+
|
|
182
|
+
// Get next.
|
|
183
|
+
++ija;
|
|
184
|
+
|
|
185
|
+
// Increment to next column ID (or go off the end).
|
|
186
|
+
if (ija < rhs_ija[ri+1]) next_stored_rj = rhs_ija[ija];
|
|
187
|
+
else next_stored_rj = rhs->src->shape[1];
|
|
188
|
+
|
|
189
|
+
} else { // rj < next_stored_rj
|
|
190
|
+
|
|
191
|
+
// Insert zero.
|
|
192
|
+
lhs_elements[pos] = LCAST_ZERO;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// Move to next dense position.
|
|
196
|
+
++pos;
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
nm_yale_storage_unregister(rhs);
|
|
201
|
+
|
|
202
|
+
return lhs;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
/*
|
|
207
|
+
* Copy list contents into dense recursively.
|
|
208
|
+
*/
|
|
209
|
+
template <typename LDType, typename RDType>
|
|
210
|
+
static void cast_copy_list_contents(LDType* lhs, const LIST* rhs, RDType* default_val, size_t& pos, const size_t* shape, size_t dim, size_t max_elements, size_t recursions) {
|
|
211
|
+
|
|
212
|
+
NODE *curr = rhs->first;
|
|
213
|
+
int last_key = -1;
|
|
214
|
+
|
|
215
|
+
nm_list_storage_register_list(rhs, recursions);
|
|
216
|
+
|
|
217
|
+
for (size_t i = 0; i < shape[dim - 1 - recursions]; ++i, ++pos) {
|
|
218
|
+
|
|
219
|
+
if (!curr || (curr->key > (size_t)(last_key+1))) {
|
|
220
|
+
|
|
221
|
+
if (recursions == 0) lhs[pos] = static_cast<LDType>(*default_val);
|
|
222
|
+
else cast_copy_list_default<LDType,RDType>(lhs, default_val, pos, shape, dim, max_elements, recursions-1);
|
|
223
|
+
|
|
224
|
+
++last_key;
|
|
225
|
+
|
|
226
|
+
} else {
|
|
227
|
+
|
|
228
|
+
if (recursions == 0) lhs[pos] = static_cast<LDType>(*reinterpret_cast<RDType*>(curr->val));
|
|
229
|
+
else cast_copy_list_contents<LDType,RDType>(lhs, (const LIST*)(curr->val),
|
|
230
|
+
default_val, pos, shape, dim, max_elements, recursions-1);
|
|
231
|
+
|
|
232
|
+
last_key = curr->key;
|
|
233
|
+
curr = curr->next;
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
nm_list_storage_unregister_list(rhs, recursions);
|
|
238
|
+
|
|
239
|
+
--pos;
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
/*
|
|
243
|
+
* Copy a set of default values into dense.
|
|
244
|
+
*/
|
|
245
|
+
template <typename LDType,typename RDType>
|
|
246
|
+
static void cast_copy_list_default(LDType* lhs, RDType* default_val, size_t& pos, const size_t* shape, size_t dim, size_t max_elements, size_t recursions) {
|
|
247
|
+
for (size_t i = 0; i < shape[dim - 1 - recursions]; ++i, ++pos) {
|
|
248
|
+
|
|
249
|
+
if (recursions == 0) lhs[pos] = static_cast<LDType>(*default_val);
|
|
250
|
+
else cast_copy_list_default<LDType,RDType>(lhs, default_val, pos, shape, dim, max_elements, recursions-1);
|
|
251
|
+
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
--pos;
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
} // end of namespace dense_storage
|
|
259
|
+
|
|
260
|
+
namespace list_storage {
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
template <typename LDType, typename RDType>
|
|
264
|
+
static bool cast_copy_contents_dense(LIST* lhs, const RDType* rhs, RDType* zero, size_t& pos, size_t* coords, const size_t* shape, size_t dim, size_t recursions);
|
|
265
|
+
|
|
266
|
+
/*
|
|
267
|
+
* Creation of list storage from dense storage.
|
|
268
|
+
*/
|
|
269
|
+
template <typename LDType, typename RDType>
|
|
270
|
+
LIST_STORAGE* create_from_dense_storage(const DENSE_STORAGE* rhs, dtype_t l_dtype, void* init) {
|
|
271
|
+
nm_dense_storage_register(rhs);
|
|
272
|
+
|
|
273
|
+
LDType* l_default_val = NM_ALLOC_N(LDType, 1);
|
|
274
|
+
RDType* r_default_val = NM_ALLOCA_N(RDType, 1); // clean up when finished with this function
|
|
275
|
+
|
|
276
|
+
// allocate and copy shape and coords
|
|
277
|
+
size_t *shape = NM_ALLOC_N(size_t, rhs->dim),
|
|
278
|
+
*coords = NM_ALLOC_N(size_t, rhs->dim);
|
|
279
|
+
|
|
280
|
+
memcpy(shape, rhs->shape, rhs->dim * sizeof(size_t));
|
|
281
|
+
memset(coords, 0, rhs->dim * sizeof(size_t));
|
|
282
|
+
|
|
283
|
+
// set list default_val to 0
|
|
284
|
+
if (init) *l_default_val = *reinterpret_cast<LDType*>(init);
|
|
285
|
+
else {
|
|
286
|
+
if (l_dtype == RUBYOBJ) *l_default_val = INT2FIX(0);
|
|
287
|
+
else *l_default_val = 0;
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// need test default value for comparing to elements in dense matrix
|
|
291
|
+
if (rhs->dtype == l_dtype || rhs->dtype != RUBYOBJ) *r_default_val = static_cast<RDType>(*l_default_val);
|
|
292
|
+
else *r_default_val = nm::rubyobj_from_cval(l_default_val, l_dtype);
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
LIST_STORAGE* lhs = nm_list_storage_create(l_dtype, shape, rhs->dim, l_default_val);
|
|
296
|
+
|
|
297
|
+
nm_list_storage_register(lhs);
|
|
298
|
+
|
|
299
|
+
size_t pos = 0;
|
|
300
|
+
|
|
301
|
+
if (rhs->src == rhs)
|
|
302
|
+
list_storage::cast_copy_contents_dense<LDType,RDType>(lhs->rows,
|
|
303
|
+
reinterpret_cast<const RDType*>(rhs->elements),
|
|
304
|
+
r_default_val,
|
|
305
|
+
pos, coords, rhs->shape, rhs->dim, rhs->dim - 1);
|
|
306
|
+
else {
|
|
307
|
+
DENSE_STORAGE* tmp = nm_dense_storage_copy(rhs);
|
|
308
|
+
list_storage::cast_copy_contents_dense<LDType,RDType>(lhs->rows,
|
|
309
|
+
reinterpret_cast<const RDType*>(tmp->elements),
|
|
310
|
+
r_default_val,
|
|
311
|
+
pos, coords, rhs->shape, rhs->dim, rhs->dim - 1);
|
|
312
|
+
|
|
313
|
+
nm_dense_storage_delete(tmp);
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
nm_list_storage_unregister(lhs);
|
|
317
|
+
nm_dense_storage_unregister(rhs);
|
|
318
|
+
|
|
319
|
+
return lhs;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
/*
|
|
325
|
+
* Creation of list storage from yale storage.
|
|
326
|
+
*/
|
|
327
|
+
template <typename LDType, typename RDType>
|
|
328
|
+
LIST_STORAGE* create_from_yale_storage(const YALE_STORAGE* rhs, dtype_t l_dtype) {
|
|
329
|
+
// allocate and copy shape
|
|
330
|
+
nm_yale_storage_register(rhs);
|
|
331
|
+
|
|
332
|
+
size_t *shape = NM_ALLOC_N(size_t, rhs->dim);
|
|
333
|
+
shape[0] = rhs->shape[0]; shape[1] = rhs->shape[1];
|
|
334
|
+
|
|
335
|
+
RDType* rhs_a = reinterpret_cast<RDType*>(reinterpret_cast<YALE_STORAGE*>(rhs->src)->a);
|
|
336
|
+
RDType R_ZERO = rhs_a[ rhs->src->shape[0] ];
|
|
337
|
+
|
|
338
|
+
// copy default value from the zero location in the Yale matrix
|
|
339
|
+
LDType* default_val = NM_ALLOC_N(LDType, 1);
|
|
340
|
+
*default_val = static_cast<LDType>(R_ZERO);
|
|
341
|
+
|
|
342
|
+
LIST_STORAGE* lhs = nm_list_storage_create(l_dtype, shape, rhs->dim, default_val);
|
|
343
|
+
|
|
344
|
+
if (rhs->dim != 2) rb_raise(nm_eStorageTypeError, "Can only convert matrices of dim 2 from yale.");
|
|
345
|
+
|
|
346
|
+
IType* rhs_ija = reinterpret_cast<YALE_STORAGE*>(rhs->src)->ija;
|
|
347
|
+
|
|
348
|
+
NODE *last_row_added = NULL;
|
|
349
|
+
// Walk through rows and columns as if RHS were a dense matrix
|
|
350
|
+
for (IType i = 0; i < shape[0]; ++i) {
|
|
351
|
+
IType ri = i + rhs->offset[0];
|
|
352
|
+
|
|
353
|
+
NODE *last_added = NULL;
|
|
354
|
+
|
|
355
|
+
// Get boundaries of beginning and end of row
|
|
356
|
+
IType ija = rhs_ija[ri],
|
|
357
|
+
ija_next = rhs_ija[ri+1];
|
|
358
|
+
|
|
359
|
+
// Are we going to need to add a diagonal for this row?
|
|
360
|
+
bool add_diag = false;
|
|
361
|
+
if (rhs_a[ri] != R_ZERO) add_diag = true; // non-zero and located within the bounds of the slice
|
|
362
|
+
|
|
363
|
+
if (ija < ija_next || add_diag) {
|
|
364
|
+
ija = nm::yale_storage::binary_search_left_boundary(rhs, ija, ija_next-1, rhs->offset[1]);
|
|
365
|
+
|
|
366
|
+
LIST* curr_row = list::create();
|
|
367
|
+
|
|
368
|
+
LDType* insert_val;
|
|
369
|
+
|
|
370
|
+
while (ija < ija_next) {
|
|
371
|
+
// Find first column in slice
|
|
372
|
+
IType rj = rhs_ija[ija];
|
|
373
|
+
IType j = rj - rhs->offset[1];
|
|
374
|
+
|
|
375
|
+
// Is there a nonzero diagonal item between the previously added item and the current one?
|
|
376
|
+
if (rj > ri && add_diag) {
|
|
377
|
+
// Allocate and copy insertion value
|
|
378
|
+
insert_val = NM_ALLOC_N(LDType, 1);
|
|
379
|
+
*insert_val = static_cast<LDType>(rhs_a[ri]);
|
|
380
|
+
|
|
381
|
+
// Insert the item in the list at the appropriate location.
|
|
382
|
+
// What is the appropriate key? Well, it's definitely right(i)==right(j), but the
|
|
383
|
+
// rj index has already been advanced past ri. So we should treat ri as the column and
|
|
384
|
+
// subtract offset[1].
|
|
385
|
+
if (last_added) last_added = list::insert_after(last_added, ri - rhs->offset[1], insert_val);
|
|
386
|
+
else last_added = list::insert(curr_row, false, ri - rhs->offset[1], insert_val);
|
|
387
|
+
|
|
388
|
+
// don't add again!
|
|
389
|
+
add_diag = false;
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
// now allocate and add the current item
|
|
393
|
+
insert_val = NM_ALLOC_N(LDType, 1);
|
|
394
|
+
*insert_val = static_cast<LDType>(rhs_a[ija]);
|
|
395
|
+
|
|
396
|
+
if (last_added) last_added = list::insert_after(last_added, j, insert_val);
|
|
397
|
+
else last_added = list::insert(curr_row, false, j, insert_val);
|
|
398
|
+
|
|
399
|
+
++ija; // move to next entry in Yale matrix
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
if (add_diag) {
|
|
403
|
+
|
|
404
|
+
// still haven't added the diagonal.
|
|
405
|
+
insert_val = NM_ALLOC_N(LDType, 1);
|
|
406
|
+
*insert_val = static_cast<LDType>(rhs_a[ri]);
|
|
407
|
+
|
|
408
|
+
// insert the item in the list at the appropriate location
|
|
409
|
+
if (last_added) last_added = list::insert_after(last_added, ri - rhs->offset[1], insert_val);
|
|
410
|
+
else last_added = list::insert(curr_row, false, ri - rhs->offset[1], insert_val);
|
|
411
|
+
|
|
412
|
+
// no need to set add_diag to false because it'll be reset automatically in next iteration.
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
// Now add the list at the appropriate location
|
|
416
|
+
if (last_row_added) last_row_added = list::insert_after(last_row_added, i, curr_row);
|
|
417
|
+
else last_row_added = list::insert(lhs->rows, false, i, curr_row);
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
// end of walk through rows
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
nm_yale_storage_unregister(rhs);
|
|
424
|
+
|
|
425
|
+
return lhs;
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
/* Copy dense into lists recursively
|
|
430
|
+
*
|
|
431
|
+
* FIXME: This works, but could probably be cleaner (do we really need to pass coords around?)
|
|
432
|
+
*/
|
|
433
|
+
template <typename LDType, typename RDType>
|
|
434
|
+
static bool cast_copy_contents_dense(LIST* lhs, const RDType* rhs, RDType* zero, size_t& pos, size_t* coords, const size_t* shape, size_t dim, size_t recursions) {
|
|
435
|
+
|
|
436
|
+
nm_list_storage_register_list(lhs, recursions);
|
|
437
|
+
|
|
438
|
+
NODE *prev = NULL;
|
|
439
|
+
LIST *sub_list;
|
|
440
|
+
bool added = false, added_list = false;
|
|
441
|
+
//void* insert_value;
|
|
442
|
+
|
|
443
|
+
for (coords[dim-1-recursions] = 0; coords[dim-1-recursions] < shape[dim-1-recursions]; ++coords[dim-1-recursions], ++pos) {
|
|
444
|
+
|
|
445
|
+
if (recursions == 0) {
|
|
446
|
+
// create nodes
|
|
447
|
+
|
|
448
|
+
if (rhs[pos] != *zero) {
|
|
449
|
+
// is not zero
|
|
450
|
+
|
|
451
|
+
// Create a copy of our value that we will insert in the list
|
|
452
|
+
LDType* insert_value = NM_ALLOC_N(LDType, 1);
|
|
453
|
+
*insert_value = static_cast<LDType>(rhs[pos]);
|
|
454
|
+
|
|
455
|
+
if (!lhs->first) prev = list::insert(lhs, false, coords[dim-1-recursions], insert_value);
|
|
456
|
+
else prev = list::insert_after(prev, coords[dim-1-recursions], insert_value);
|
|
457
|
+
|
|
458
|
+
added = true;
|
|
459
|
+
}
|
|
460
|
+
// no need to do anything if the element is zero
|
|
461
|
+
|
|
462
|
+
} else { // create lists
|
|
463
|
+
// create a list as if there's something in the row in question, and then delete it if nothing turns out to be there
|
|
464
|
+
sub_list = list::create();
|
|
465
|
+
|
|
466
|
+
added_list = list_storage::cast_copy_contents_dense<LDType,RDType>(sub_list, rhs, zero, pos, coords, shape, dim, recursions-1);
|
|
467
|
+
|
|
468
|
+
if (!added_list) list::del(sub_list, recursions-1);
|
|
469
|
+
else if (!lhs->first) prev = list::insert(lhs, false, coords[dim-1-recursions], sub_list);
|
|
470
|
+
else prev = list::insert_after(prev, coords[dim-1-recursions], sub_list);
|
|
471
|
+
|
|
472
|
+
// added = (added || added_list);
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
nm_list_storage_unregister_list(lhs, recursions);
|
|
477
|
+
|
|
478
|
+
coords[dim-1-recursions] = 0;
|
|
479
|
+
--pos;
|
|
480
|
+
|
|
481
|
+
return added;
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
} // end of namespace list_storage
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
namespace yale_storage { // FIXME: Move to yale.cpp
|
|
488
|
+
/*
|
|
489
|
+
* Creation of yale storage from dense storage.
|
|
490
|
+
*/
|
|
491
|
+
template <typename LDType, typename RDType>
|
|
492
|
+
YALE_STORAGE* create_from_dense_storage(const DENSE_STORAGE* rhs, dtype_t l_dtype, void* init) {
|
|
493
|
+
|
|
494
|
+
if (rhs->dim != 2) rb_raise(nm_eStorageTypeError, "can only convert matrices of dim 2 to yale");
|
|
495
|
+
|
|
496
|
+
nm_dense_storage_register(rhs);
|
|
497
|
+
|
|
498
|
+
IType pos = 0;
|
|
499
|
+
IType ndnz = 0;
|
|
500
|
+
|
|
501
|
+
// We need a zero value. This should nearly always be zero, but sometimes you might want false or nil.
|
|
502
|
+
LDType L_INIT(0);
|
|
503
|
+
if (init) {
|
|
504
|
+
if (l_dtype == RUBYOBJ) L_INIT = *reinterpret_cast<VALUE*>(init);
|
|
505
|
+
else L_INIT = *reinterpret_cast<LDType*>(init);
|
|
506
|
+
}
|
|
507
|
+
RDType R_INIT = static_cast<RDType>(L_INIT);
|
|
508
|
+
|
|
509
|
+
RDType* rhs_elements = reinterpret_cast<RDType*>(rhs->elements);
|
|
510
|
+
|
|
511
|
+
// First, count the non-diagonal nonzeros
|
|
512
|
+
for (size_t i = rhs->shape[0]; i-- > 0;) {
|
|
513
|
+
for (size_t j = rhs->shape[1]; j-- > 0;) {
|
|
514
|
+
pos = rhs->stride[0]*(i + rhs->offset[0]) + rhs->stride[1]*(j + rhs->offset[1]);
|
|
515
|
+
if (i != j && rhs_elements[pos] != R_INIT) ++ndnz;
|
|
516
|
+
|
|
517
|
+
// move forward 1 position in dense matrix elements array
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
// Copy shape for yale construction
|
|
522
|
+
size_t* shape = NM_ALLOC_N(size_t, 2);
|
|
523
|
+
shape[0] = rhs->shape[0];
|
|
524
|
+
shape[1] = rhs->shape[1];
|
|
525
|
+
|
|
526
|
+
size_t request_capacity = shape[0] + ndnz + 1;
|
|
527
|
+
|
|
528
|
+
// Create with minimum possible capacity -- just enough to hold all of the entries
|
|
529
|
+
YALE_STORAGE* lhs = nm_yale_storage_create(l_dtype, shape, 2, request_capacity);
|
|
530
|
+
|
|
531
|
+
if (lhs->capacity < request_capacity)
|
|
532
|
+
rb_raise(nm_eStorageTypeError, "conversion failed; capacity of %ld requested, max allowable is %ld", (unsigned long)request_capacity, (unsigned long)(lhs->capacity));
|
|
533
|
+
|
|
534
|
+
LDType* lhs_a = reinterpret_cast<LDType*>(lhs->a);
|
|
535
|
+
IType* lhs_ija = lhs->ija;
|
|
536
|
+
|
|
537
|
+
// Set the zero position in the yale matrix
|
|
538
|
+
lhs_a[shape[0]] = L_INIT;
|
|
539
|
+
|
|
540
|
+
// Start just after the zero position.
|
|
541
|
+
IType ija = shape[0]+1;
|
|
542
|
+
pos = 0;
|
|
543
|
+
|
|
544
|
+
// Copy contents
|
|
545
|
+
for (IType i = 0; i < rhs->shape[0]; ++i) {
|
|
546
|
+
// indicate the beginning of a row in the IJA array
|
|
547
|
+
lhs_ija[i] = ija;
|
|
548
|
+
|
|
549
|
+
for (IType j = 0; j < rhs->shape[1]; ++j) {
|
|
550
|
+
pos = rhs->stride[0] * (i + rhs->offset[0]) + rhs->stride[1] * (j + rhs->offset[1]); // calc position with offsets
|
|
551
|
+
|
|
552
|
+
if (i == j) { // copy to diagonal
|
|
553
|
+
lhs_a[i] = static_cast<LDType>(rhs_elements[pos]);
|
|
554
|
+
} else if (rhs_elements[pos] != R_INIT) { // copy nonzero to LU
|
|
555
|
+
lhs_ija[ija] = j; // write column index
|
|
556
|
+
lhs_a[ija] = static_cast<LDType>(rhs_elements[pos]);
|
|
557
|
+
|
|
558
|
+
++ija;
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
lhs_ija[shape[0]] = ija; // indicate the end of the last row
|
|
564
|
+
lhs->ndnz = ndnz;
|
|
565
|
+
|
|
566
|
+
nm_dense_storage_unregister(rhs);
|
|
567
|
+
|
|
568
|
+
return lhs;
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
/*
|
|
572
|
+
* Creation of yale storage from list storage.
|
|
573
|
+
*/
|
|
574
|
+
template <typename LDType, typename RDType>
|
|
575
|
+
YALE_STORAGE* create_from_list_storage(const LIST_STORAGE* rhs, nm::dtype_t l_dtype) {
|
|
576
|
+
if (rhs->dim != 2) rb_raise(nm_eStorageTypeError, "can only convert matrices of dim 2 to yale");
|
|
577
|
+
|
|
578
|
+
if (rhs->dtype == RUBYOBJ) {
|
|
579
|
+
VALUE init_val = *reinterpret_cast<VALUE*>(rhs->default_val);
|
|
580
|
+
if (rb_funcall(init_val, rb_intern("!="), 1, Qnil) == Qtrue && rb_funcall(init_val, rb_intern("!="), 1, Qfalse) == Qtrue && rb_funcall(init_val, rb_intern("!="), 1, INT2FIX(0)) == Qtrue)
|
|
581
|
+
rb_raise(nm_eStorageTypeError, "list matrix of Ruby objects must have default value equal to 0, nil, or false to convert to yale");
|
|
582
|
+
} else if (strncmp(reinterpret_cast<const char*>(rhs->default_val), "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", DTYPE_SIZES[rhs->dtype]))
|
|
583
|
+
rb_raise(nm_eStorageTypeError, "list matrix of non-Ruby objects must have default value of 0 to convert to yale");
|
|
584
|
+
|
|
585
|
+
nm_list_storage_register(rhs);
|
|
586
|
+
|
|
587
|
+
size_t ndnz = nm_list_storage_count_nd_elements(rhs);
|
|
588
|
+
// Copy shape for yale construction
|
|
589
|
+
size_t* shape = NM_ALLOC_N(size_t, 2);
|
|
590
|
+
shape[0] = rhs->shape[0];
|
|
591
|
+
shape[1] = rhs->shape[1];
|
|
592
|
+
|
|
593
|
+
size_t request_capacity = shape[0] + ndnz + 1;
|
|
594
|
+
YALE_STORAGE* lhs = nm_yale_storage_create(l_dtype, shape, 2, request_capacity);
|
|
595
|
+
|
|
596
|
+
if (lhs->capacity < request_capacity)
|
|
597
|
+
rb_raise(nm_eStorageTypeError, "conversion failed; capacity of %ld requested, max allowable is %ld", (unsigned long)request_capacity, (unsigned long)(lhs->capacity));
|
|
598
|
+
|
|
599
|
+
// Initialize the A and IJA arrays
|
|
600
|
+
init<LDType>(lhs, rhs->default_val);
|
|
601
|
+
|
|
602
|
+
IType* lhs_ija = lhs->ija;
|
|
603
|
+
LDType* lhs_a = reinterpret_cast<LDType*>(lhs->a);
|
|
604
|
+
|
|
605
|
+
IType ija = lhs->shape[0]+1;
|
|
606
|
+
|
|
607
|
+
// Copy contents
|
|
608
|
+
for (NODE* i_curr = rhs->rows->first; i_curr; i_curr = i_curr->next) {
|
|
609
|
+
|
|
610
|
+
// Shrink reference
|
|
611
|
+
int i = i_curr->key - rhs->offset[0];
|
|
612
|
+
if (i < 0 || i >= (int)rhs->shape[0]) continue;
|
|
613
|
+
|
|
614
|
+
for (NODE* j_curr = ((LIST*)(i_curr->val))->first; j_curr; j_curr = j_curr->next) {
|
|
615
|
+
|
|
616
|
+
// Shrink reference
|
|
617
|
+
int j = j_curr->key - rhs->offset[1];
|
|
618
|
+
if (j < 0 || j >= (int)rhs->shape[1]) continue;
|
|
619
|
+
|
|
620
|
+
LDType cast_jcurr_val = *reinterpret_cast<RDType*>(j_curr->val);
|
|
621
|
+
if (i_curr->key - rhs->offset[0] == j_curr->key - rhs->offset[1])
|
|
622
|
+
lhs_a[i_curr->key - rhs->offset[0]] = cast_jcurr_val; // set diagonal
|
|
623
|
+
else {
|
|
624
|
+
lhs_ija[ija] = j_curr->key - rhs->offset[1]; // set column value
|
|
625
|
+
|
|
626
|
+
lhs_a[ija] = cast_jcurr_val; // set cell value
|
|
627
|
+
|
|
628
|
+
++ija;
|
|
629
|
+
// indicate the beginning of a row in the IJA array
|
|
630
|
+
for (size_t i = i_curr->key - rhs->offset[0] + 1; i < rhs->shape[0] + rhs->offset[0]; ++i) {
|
|
631
|
+
lhs_ija[i] = ija;
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
}
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
lhs_ija[rhs->shape[0]] = ija; // indicate the end of the last row
|
|
640
|
+
lhs->ndnz = ndnz;
|
|
641
|
+
|
|
642
|
+
nm_list_storage_unregister(rhs);
|
|
643
|
+
|
|
644
|
+
return lhs;
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
} // end of namespace yale_storage
|
|
648
|
+
} // end of namespace nm
|
|
649
|
+
|
|
650
|
+
extern "C" {
|
|
651
|
+
|
|
652
|
+
/*
|
|
653
|
+
* The following functions represent stype casts -- conversions from one
|
|
654
|
+
* stype to another. Each of these is the C accessor for a templated C++
|
|
655
|
+
* function.
|
|
656
|
+
*/
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
STORAGE* nm_yale_storage_from_dense(const STORAGE* right, nm::dtype_t l_dtype, void* init) {
|
|
660
|
+
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::create_from_dense_storage, YALE_STORAGE*, const DENSE_STORAGE* rhs, nm::dtype_t l_dtype, void*);
|
|
661
|
+
|
|
662
|
+
if (!ttable[l_dtype][right->dtype]) {
|
|
663
|
+
rb_raise(nm_eDataTypeError, "casting between these dtypes is undefined");
|
|
664
|
+
return NULL;
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
return (STORAGE*)ttable[l_dtype][right->dtype]((const DENSE_STORAGE*)right, l_dtype, init);
|
|
668
|
+
}
|
|
669
|
+
|
|
670
|
+
STORAGE* nm_yale_storage_from_list(const STORAGE* right, nm::dtype_t l_dtype, void* dummy) {
|
|
671
|
+
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::yale_storage::create_from_list_storage, YALE_STORAGE*, const LIST_STORAGE* rhs, nm::dtype_t l_dtype);
|
|
672
|
+
|
|
673
|
+
if (!ttable[l_dtype][right->dtype]) {
|
|
674
|
+
rb_raise(nm_eDataTypeError, "casting between these dtypes is undefined");
|
|
675
|
+
return NULL;
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
return (STORAGE*)ttable[l_dtype][right->dtype]((const LIST_STORAGE*)right, l_dtype);
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
STORAGE* nm_dense_storage_from_list(const STORAGE* right, nm::dtype_t l_dtype, void* dummy) {
|
|
682
|
+
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::dense_storage::create_from_list_storage, DENSE_STORAGE*, const LIST_STORAGE* rhs, nm::dtype_t l_dtype);
|
|
683
|
+
|
|
684
|
+
if (!ttable[l_dtype][right->dtype]) {
|
|
685
|
+
rb_raise(nm_eDataTypeError, "casting between these dtypes is undefined");
|
|
686
|
+
return NULL;
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
return (STORAGE*)ttable[l_dtype][right->dtype]((const LIST_STORAGE*)right, l_dtype);
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
STORAGE* nm_dense_storage_from_yale(const STORAGE* right, nm::dtype_t l_dtype, void* dummy) {
|
|
693
|
+
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::dense_storage::create_from_yale_storage, DENSE_STORAGE*, const YALE_STORAGE* rhs, nm::dtype_t l_dtype);
|
|
694
|
+
|
|
695
|
+
const YALE_STORAGE* casted_right = reinterpret_cast<const YALE_STORAGE*>(right);
|
|
696
|
+
|
|
697
|
+
if (!ttable[l_dtype][right->dtype]) {
|
|
698
|
+
rb_raise(nm_eDataTypeError, "casting between these dtypes is undefined");
|
|
699
|
+
return NULL;
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
return reinterpret_cast<STORAGE*>(ttable[l_dtype][right->dtype](casted_right, l_dtype));
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
STORAGE* nm_list_storage_from_dense(const STORAGE* right, nm::dtype_t l_dtype, void* init) {
|
|
706
|
+
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::create_from_dense_storage, LIST_STORAGE*, const DENSE_STORAGE*, nm::dtype_t, void*);
|
|
707
|
+
|
|
708
|
+
if (!ttable[l_dtype][right->dtype]) {
|
|
709
|
+
rb_raise(nm_eDataTypeError, "casting between these dtypes is undefined");
|
|
710
|
+
return NULL;
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
return (STORAGE*)ttable[l_dtype][right->dtype]((DENSE_STORAGE*)right, l_dtype, init);
|
|
714
|
+
}
|
|
715
|
+
|
|
716
|
+
STORAGE* nm_list_storage_from_yale(const STORAGE* right, nm::dtype_t l_dtype, void* dummy) {
|
|
717
|
+
NAMED_LR_DTYPE_TEMPLATE_TABLE(ttable, nm::list_storage::create_from_yale_storage, LIST_STORAGE*, const YALE_STORAGE* rhs, nm::dtype_t l_dtype);
|
|
718
|
+
|
|
719
|
+
const YALE_STORAGE* casted_right = reinterpret_cast<const YALE_STORAGE*>(right);
|
|
720
|
+
|
|
721
|
+
if (!ttable[l_dtype][right->dtype]) {
|
|
722
|
+
rb_raise(nm_eDataTypeError, "casting between these dtypes is undefined");
|
|
723
|
+
return NULL;
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
return (STORAGE*)ttable[l_dtype][right->dtype](casted_right, l_dtype);
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
} // end of extern "C"
|
|
730
|
+
|