pnmatrix 1.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/ext/nmatrix/binary_format.txt +53 -0
- data/ext/nmatrix/data/complex.h +388 -0
- data/ext/nmatrix/data/data.cpp +274 -0
- data/ext/nmatrix/data/data.h +651 -0
- data/ext/nmatrix/data/meta.h +64 -0
- data/ext/nmatrix/data/ruby_object.h +386 -0
- data/ext/nmatrix/extconf.rb +70 -0
- data/ext/nmatrix/math/asum.h +99 -0
- data/ext/nmatrix/math/cblas_enums.h +36 -0
- data/ext/nmatrix/math/cblas_templates_core.h +507 -0
- data/ext/nmatrix/math/gemm.h +241 -0
- data/ext/nmatrix/math/gemv.h +178 -0
- data/ext/nmatrix/math/getrf.h +255 -0
- data/ext/nmatrix/math/getrs.h +121 -0
- data/ext/nmatrix/math/imax.h +82 -0
- data/ext/nmatrix/math/laswp.h +165 -0
- data/ext/nmatrix/math/long_dtype.h +62 -0
- data/ext/nmatrix/math/magnitude.h +54 -0
- data/ext/nmatrix/math/math.h +751 -0
- data/ext/nmatrix/math/nrm2.h +165 -0
- data/ext/nmatrix/math/rot.h +117 -0
- data/ext/nmatrix/math/rotg.h +106 -0
- data/ext/nmatrix/math/scal.h +71 -0
- data/ext/nmatrix/math/trsm.h +336 -0
- data/ext/nmatrix/math/util.h +162 -0
- data/ext/nmatrix/math.cpp +1368 -0
- data/ext/nmatrix/nm_memory.h +60 -0
- data/ext/nmatrix/nmatrix.cpp +285 -0
- data/ext/nmatrix/nmatrix.h +476 -0
- data/ext/nmatrix/ruby_constants.cpp +151 -0
- data/ext/nmatrix/ruby_constants.h +106 -0
- data/ext/nmatrix/ruby_nmatrix.c +3130 -0
- data/ext/nmatrix/storage/common.cpp +77 -0
- data/ext/nmatrix/storage/common.h +183 -0
- data/ext/nmatrix/storage/dense/dense.cpp +1096 -0
- data/ext/nmatrix/storage/dense/dense.h +129 -0
- data/ext/nmatrix/storage/list/list.cpp +1628 -0
- data/ext/nmatrix/storage/list/list.h +138 -0
- data/ext/nmatrix/storage/storage.cpp +730 -0
- data/ext/nmatrix/storage/storage.h +99 -0
- data/ext/nmatrix/storage/yale/class.h +1139 -0
- data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
- data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
- data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
- data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
- data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
- data/ext/nmatrix/storage/yale/yale.cpp +2074 -0
- data/ext/nmatrix/storage/yale/yale.h +203 -0
- data/ext/nmatrix/types.h +55 -0
- data/ext/nmatrix/util/io.cpp +279 -0
- data/ext/nmatrix/util/io.h +115 -0
- data/ext/nmatrix/util/sl_list.cpp +627 -0
- data/ext/nmatrix/util/sl_list.h +144 -0
- data/ext/nmatrix/util/util.h +78 -0
- data/lib/nmatrix/blas.rb +378 -0
- data/lib/nmatrix/cruby/math.rb +744 -0
- data/lib/nmatrix/enumerate.rb +253 -0
- data/lib/nmatrix/homogeneous.rb +241 -0
- data/lib/nmatrix/io/fortran_format.rb +138 -0
- data/lib/nmatrix/io/harwell_boeing.rb +221 -0
- data/lib/nmatrix/io/market.rb +263 -0
- data/lib/nmatrix/io/point_cloud.rb +189 -0
- data/lib/nmatrix/jruby/decomposition.rb +24 -0
- data/lib/nmatrix/jruby/enumerable.rb +13 -0
- data/lib/nmatrix/jruby/error.rb +4 -0
- data/lib/nmatrix/jruby/math.rb +501 -0
- data/lib/nmatrix/jruby/nmatrix_java.rb +840 -0
- data/lib/nmatrix/jruby/operators.rb +283 -0
- data/lib/nmatrix/jruby/slice.rb +264 -0
- data/lib/nmatrix/lapack_core.rb +181 -0
- data/lib/nmatrix/lapack_plugin.rb +44 -0
- data/lib/nmatrix/math.rb +953 -0
- data/lib/nmatrix/mkmf.rb +100 -0
- data/lib/nmatrix/monkeys.rb +137 -0
- data/lib/nmatrix/nmatrix.rb +1172 -0
- data/lib/nmatrix/rspec.rb +75 -0
- data/lib/nmatrix/shortcuts.rb +1163 -0
- data/lib/nmatrix/version.rb +39 -0
- data/lib/nmatrix/yale_functions.rb +118 -0
- data/lib/nmatrix.rb +28 -0
- data/spec/00_nmatrix_spec.rb +892 -0
- data/spec/01_enum_spec.rb +196 -0
- data/spec/02_slice_spec.rb +407 -0
- data/spec/03_nmatrix_monkeys_spec.rb +80 -0
- data/spec/2x2_dense_double.mat +0 -0
- data/spec/4x4_sparse.mat +0 -0
- data/spec/4x5_dense.mat +0 -0
- data/spec/blas_spec.rb +215 -0
- data/spec/elementwise_spec.rb +311 -0
- data/spec/homogeneous_spec.rb +100 -0
- data/spec/io/fortran_format_spec.rb +88 -0
- data/spec/io/harwell_boeing_spec.rb +98 -0
- data/spec/io/test.rua +9 -0
- data/spec/io_spec.rb +159 -0
- data/spec/lapack_core_spec.rb +482 -0
- data/spec/leakcheck.rb +16 -0
- data/spec/math_spec.rb +1363 -0
- data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
- data/spec/nmatrix_yale_spec.rb +286 -0
- data/spec/rspec_monkeys.rb +56 -0
- data/spec/rspec_spec.rb +35 -0
- data/spec/shortcuts_spec.rb +474 -0
- data/spec/slice_set_spec.rb +162 -0
- data/spec/spec_helper.rb +172 -0
- data/spec/stat_spec.rb +214 -0
- data/spec/test.pcd +20 -0
- data/spec/utm5940.mtx +83844 -0
- metadata +295 -0
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
|
2
|
+
// = NMatrix
|
|
3
|
+
//
|
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
|
5
|
+
// NMatrix is part of SciRuby.
|
|
6
|
+
//
|
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
|
9
|
+
//
|
|
10
|
+
// == Copyright Information
|
|
11
|
+
//
|
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
|
14
|
+
//
|
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
|
16
|
+
//
|
|
17
|
+
// == Contributing
|
|
18
|
+
//
|
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
|
20
|
+
// our Contributor Agreement:
|
|
21
|
+
//
|
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
|
23
|
+
//
|
|
24
|
+
// == row_stored_nd.h
|
|
25
|
+
//
|
|
26
|
+
// Yale storage row-by-row nondiagonal-storage iterator
|
|
27
|
+
//
|
|
28
|
+
|
|
29
|
+
#ifndef YALE_ITERATORS_ROW_STORED_ND_H
|
|
30
|
+
# define YALE_ITERATORS_ROW_STORED_ND_H
|
|
31
|
+
|
|
32
|
+
#include <ruby.h>
|
|
33
|
+
#include <type_traits>
|
|
34
|
+
#include <typeinfo>
|
|
35
|
+
#include <stdexcept>
|
|
36
|
+
|
|
37
|
+
namespace nm { namespace yale_storage {
|
|
38
|
+
|
|
39
|
+
/*
|
|
40
|
+
* Constants
|
|
41
|
+
*/
|
|
42
|
+
const float GROWTH_CONSTANT = 1.5;
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
/*
|
|
46
|
+
* Forward declarations
|
|
47
|
+
*/
|
|
48
|
+
template <typename D, typename RefType, typename YaleRef> class row_iterator_T;
|
|
49
|
+
|
|
50
|
+
/*
|
|
51
|
+
* Iterator for visiting each stored element in a row, including diagonals.
|
|
52
|
+
*/
|
|
53
|
+
template <typename D,
|
|
54
|
+
typename RefType,
|
|
55
|
+
typename YaleRef = typename std::conditional<
|
|
56
|
+
std::is_const<RefType>::value,
|
|
57
|
+
const nm::YaleStorage<D>,
|
|
58
|
+
nm::YaleStorage<D>
|
|
59
|
+
>::type,
|
|
60
|
+
typename RowRef = typename std::conditional<
|
|
61
|
+
std::is_const<RefType>::value,
|
|
62
|
+
const row_iterator_T<D,RefType,YaleRef>,
|
|
63
|
+
row_iterator_T<D,RefType,YaleRef>
|
|
64
|
+
>::type>
|
|
65
|
+
class row_stored_nd_iterator_T {
|
|
66
|
+
protected:
|
|
67
|
+
RowRef& r;
|
|
68
|
+
size_t p_;
|
|
69
|
+
|
|
70
|
+
public:
|
|
71
|
+
|
|
72
|
+
row_stored_nd_iterator_T(RowRef& row, size_t pp)
|
|
73
|
+
: r(row),
|
|
74
|
+
p_(pp) // do we start at the diagonal?
|
|
75
|
+
{
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// DO NOT IMPLEMENT THESE FUNCTIONS. They prevent C++ virtual slicing
|
|
79
|
+
//template <typename T> row_stored_nd_iterator_T(T const& rhs);
|
|
80
|
+
//template <typename T> row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(T const& rhs);
|
|
81
|
+
|
|
82
|
+
// Next two functions are to ensure we can still cast between nd iterators.
|
|
83
|
+
row_stored_nd_iterator_T(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs)
|
|
84
|
+
: r(rhs.r), p_(rhs.p_)
|
|
85
|
+
{ }
|
|
86
|
+
|
|
87
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs) {
|
|
88
|
+
if (&r != &(rhs.r))
|
|
89
|
+
throw std::logic_error("can't assign iterator from another row iterator");
|
|
90
|
+
p_ = rhs.p_;
|
|
91
|
+
return *this;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
virtual size_t p() const { return p_; }
|
|
95
|
+
|
|
96
|
+
virtual bool end() const {
|
|
97
|
+
return p_ > r.p_last;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {
|
|
101
|
+
if (end()) throw std::out_of_range("cannot increment row stored iterator past end of stored row");
|
|
102
|
+
++p_;
|
|
103
|
+
|
|
104
|
+
return *this;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {
|
|
108
|
+
row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> r(*this);
|
|
109
|
+
return ++r;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
virtual size_t j() const {
|
|
113
|
+
if (end()) throw std::out_of_range("cannot dereference (get j()) for an end pointer");
|
|
114
|
+
return r.ija(p_) - r.offset(1);
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// Need to declare all row_stored_nd_iterator_T friends of each other.
|
|
118
|
+
template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_nd_iterator_T;
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
virtual bool operator==(const row_stored_nd_iterator_T<D,RefType>& rhs) const {
|
|
122
|
+
if (r.i() != rhs.r.i()) return false;
|
|
123
|
+
if (end()) return rhs.end();
|
|
124
|
+
else if (rhs.end()) return false;
|
|
125
|
+
return j() == rhs.j();
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// There is something wrong with this function.
|
|
129
|
+
virtual bool operator!=(const row_stored_nd_iterator_T<D,RefType>& rhs) const {
|
|
130
|
+
if (r.i() != rhs.r.i()) return true;
|
|
131
|
+
if (end()) return !rhs.end();
|
|
132
|
+
else if (rhs.end()) return true;
|
|
133
|
+
return j() != rhs.j();
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
137
|
+
bool operator<(const row_stored_nd_iterator_T<E,ERefType>& rhs) const {
|
|
138
|
+
if (r < rhs.r) return true;
|
|
139
|
+
if (r > rhs.r) return false;
|
|
140
|
+
|
|
141
|
+
// r == rhs.r
|
|
142
|
+
if (end()) return false;
|
|
143
|
+
if (rhs.end()) return true;
|
|
144
|
+
return j() < rhs.j();
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// De-reference the iterator
|
|
148
|
+
RefType& operator*() {
|
|
149
|
+
return r.a(p_);
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
RefType& operator*() const {
|
|
153
|
+
return r.a(p_);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// Ruby VALUE de-reference
|
|
157
|
+
VALUE operator~() const {
|
|
158
|
+
return nm_rb_dereference<D>(**this);
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
inline virtual VALUE rb_j() const { return LONG2NUM(j()); }
|
|
162
|
+
|
|
163
|
+
};
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
} } // end of namespace nm::yale_storage
|
|
168
|
+
|
|
169
|
+
#endif // YALE_ITERATORS_ROW_STORED_ND_H
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
|
2
|
+
// = NMatrix
|
|
3
|
+
//
|
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
|
5
|
+
// NMatrix is part of SciRuby.
|
|
6
|
+
//
|
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
|
9
|
+
//
|
|
10
|
+
// == Copyright Information
|
|
11
|
+
//
|
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
|
14
|
+
//
|
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
|
16
|
+
//
|
|
17
|
+
// == Contributing
|
|
18
|
+
//
|
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
|
20
|
+
// our Contributor Agreement:
|
|
21
|
+
//
|
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
|
23
|
+
//
|
|
24
|
+
// == stored_diagonal_iterator.h
|
|
25
|
+
//
|
|
26
|
+
// Yale storage diagonal-storage iterator
|
|
27
|
+
//
|
|
28
|
+
|
|
29
|
+
#ifndef YALE_ITERATORS_STORED_DIAGONAL_H
|
|
30
|
+
# define YALE_ITERATORS_STORED_DIAGONAL_H
|
|
31
|
+
|
|
32
|
+
#include <ruby.h>
|
|
33
|
+
#include <type_traits>
|
|
34
|
+
#include <typeinfo>
|
|
35
|
+
|
|
36
|
+
namespace nm { namespace yale_storage {
|
|
37
|
+
|
|
38
|
+
/*
|
|
39
|
+
* Iterate across the stored diagonal.
|
|
40
|
+
*/
|
|
41
|
+
template <typename D,
|
|
42
|
+
typename RefType,
|
|
43
|
+
typename YaleRef = typename std::conditional<
|
|
44
|
+
std::is_const<RefType>::value,
|
|
45
|
+
const nm::YaleStorage<D>,
|
|
46
|
+
nm::YaleStorage<D>
|
|
47
|
+
>::type>
|
|
48
|
+
class stored_diagonal_iterator_T : public basic_iterator_T<D,RefType,YaleRef> {
|
|
49
|
+
using basic_iterator_T<D,RefType,YaleRef>::p_;
|
|
50
|
+
using basic_iterator_T<D,RefType,YaleRef>::y;
|
|
51
|
+
using basic_iterator_T<D,RefType,YaleRef>::offset;
|
|
52
|
+
using basic_iterator_T<D,RefType,YaleRef>::shape;
|
|
53
|
+
public:
|
|
54
|
+
stored_diagonal_iterator_T(YaleRef& obj, size_t d = 0)
|
|
55
|
+
: basic_iterator_T<D,RefType,YaleRef>(obj, // y
|
|
56
|
+
std::max(obj.offset(0), obj.offset(1)) + d - obj.offset(0), // i_
|
|
57
|
+
std::max(obj.offset(0), obj.offset(1)) + d) // p_
|
|
58
|
+
{
|
|
59
|
+
// std::cerr << "sdbegin: d=" << d << ", p_=" << p_ << ", i()=" << i() << ", j()=" << j() << std::endl;
|
|
60
|
+
// p_ can range from max(y.offset(0), y.offset(1)) to min(y.real_shape(0), y.real_shape(1))
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
size_t d() const {
|
|
65
|
+
return p_ - std::max(offset(0), offset(1));
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
stored_diagonal_iterator_T<D,RefType,YaleRef>& operator++() {
|
|
69
|
+
if (i() < shape(0)) ++p_;
|
|
70
|
+
return *this;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
stored_diagonal_iterator_T<D,RefType,YaleRef> operator++(int dummy) const {
|
|
74
|
+
stored_diagonal_iterator_T<D,RefType,YaleRef> iter(*this);
|
|
75
|
+
return ++iter;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// Indicates if we're at the end of the iteration.
|
|
79
|
+
bool end() const {
|
|
80
|
+
return p_ >= std::min( shape(0) + offset(0), shape(1) + offset(1) );
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// i() and j() are how we know if we're past-the-end. i will be shape(0) and j will be 0.
|
|
84
|
+
size_t i() const {
|
|
85
|
+
return p_ - offset(0);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
size_t j() const {
|
|
89
|
+
return p_ - offset(1);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
94
|
+
bool operator!=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return d() != rhs.d(); }
|
|
95
|
+
|
|
96
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
97
|
+
bool operator==(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return !(*this != rhs); }
|
|
98
|
+
|
|
99
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
100
|
+
bool operator<(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return d() < rhs.d(); }
|
|
101
|
+
|
|
102
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
103
|
+
bool operator<=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
|
|
104
|
+
return d() <= rhs.d();
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
108
|
+
bool operator>(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
|
|
109
|
+
return d() > rhs.d();
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
|
|
113
|
+
bool operator>=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
|
|
114
|
+
return d() >= rhs.d();
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
RefType& operator*() { return y.a(p_); }
|
|
118
|
+
RefType& operator*() const { return y.a(p_); }
|
|
119
|
+
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
} } // end of namespace nm::yale_storage
|
|
123
|
+
|
|
124
|
+
#endif // YALE_ITERATORS_STORED_DIAGONAL_H
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
/////////////////////////////////////////////////////////////////////
|
|
2
|
+
// = NMatrix
|
|
3
|
+
//
|
|
4
|
+
// A linear algebra library for scientific computation in Ruby.
|
|
5
|
+
// NMatrix is part of SciRuby.
|
|
6
|
+
//
|
|
7
|
+
// NMatrix was originally inspired by and derived from NArray, by
|
|
8
|
+
// Masahiro Tanaka: http://narray.rubyforge.org
|
|
9
|
+
//
|
|
10
|
+
// == Copyright Information
|
|
11
|
+
//
|
|
12
|
+
// SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
|
13
|
+
// NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
|
14
|
+
//
|
|
15
|
+
// Please see LICENSE.txt for additional copyright notices.
|
|
16
|
+
//
|
|
17
|
+
// == Contributing
|
|
18
|
+
//
|
|
19
|
+
// By contributing source code to SciRuby, you agree to be bound by
|
|
20
|
+
// our Contributor Agreement:
|
|
21
|
+
//
|
|
22
|
+
// * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
|
23
|
+
//
|
|
24
|
+
// == transpose.h
|
|
25
|
+
//
|
|
26
|
+
// Functions for Yale math: transposing
|
|
27
|
+
//
|
|
28
|
+
|
|
29
|
+
#ifndef YALE_MATH_TRANSPOSE_H
|
|
30
|
+
# define YALE_MATH_TRANSPOSE_H
|
|
31
|
+
|
|
32
|
+
namespace nm { namespace yale_storage {
|
|
33
|
+
|
|
34
|
+
/*
|
|
35
|
+
* Transposes a generic Yale matrix (old or new). Specify new by setting RDiag = true.
|
|
36
|
+
*
|
|
37
|
+
* Based on transp from SMMP (same as symbmm and numbmm).
|
|
38
|
+
*
|
|
39
|
+
* This is not named in the same way as most yale_storage functions because it does not act on a YALE_STORAGE
|
|
40
|
+
* object.
|
|
41
|
+
*/
|
|
42
|
+
|
|
43
|
+
template <typename AD, typename BD, bool DiagA, bool Move>
|
|
44
|
+
void transpose_yale(const size_t n, const size_t m,
|
|
45
|
+
const size_t* ia, const size_t* ja, const AD* a, const AD& a_default,
|
|
46
|
+
size_t* ib, size_t* jb, BD* b, const BD& b_default) {
|
|
47
|
+
|
|
48
|
+
size_t index;
|
|
49
|
+
|
|
50
|
+
// Clear B
|
|
51
|
+
for (size_t i = 0; i < m+1; ++i) ib[i] = 0;
|
|
52
|
+
|
|
53
|
+
if (Move)
|
|
54
|
+
for (size_t i = 0; i < m+1; ++i) b[i] = b_default;
|
|
55
|
+
|
|
56
|
+
if (DiagA) ib[0] = m + 1;
|
|
57
|
+
else ib[0] = 0;
|
|
58
|
+
|
|
59
|
+
/* count indices for each column */
|
|
60
|
+
|
|
61
|
+
for (size_t i = 0; i < n; ++i) {
|
|
62
|
+
for (size_t j = ia[i]; j < ia[i+1]; ++j) {
|
|
63
|
+
++(ib[ja[j]+1]);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
for (size_t i = 0; i < m; ++i) {
|
|
68
|
+
ib[i+1] = ib[i] + ib[i+1];
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/* now make jb */
|
|
72
|
+
|
|
73
|
+
for (size_t i = 0; i < n; ++i) {
|
|
74
|
+
|
|
75
|
+
for (size_t j = ia[i]; j < ia[i+1]; ++j) {
|
|
76
|
+
index = ja[j];
|
|
77
|
+
jb[ib[index]] = i;
|
|
78
|
+
|
|
79
|
+
if (Move && a[j] != a_default)
|
|
80
|
+
b[ib[index]] = a[j];
|
|
81
|
+
|
|
82
|
+
++(ib[index]);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/* now fixup ib */
|
|
87
|
+
|
|
88
|
+
for (size_t i = m; i >= 1; --i) {
|
|
89
|
+
ib[i] = ib[i-1];
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
if (DiagA) {
|
|
94
|
+
if (Move) {
|
|
95
|
+
size_t j = std::min(n,m);
|
|
96
|
+
|
|
97
|
+
for (size_t i = 0; i < j; ++i) {
|
|
98
|
+
b[i] = a[i];
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
ib[0] = m + 1;
|
|
102
|
+
|
|
103
|
+
} else {
|
|
104
|
+
ib[0] = 0;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
} } // end of namespace nm::yale_storage
|
|
109
|
+
|
|
110
|
+
#endif
|