nmatrix 0.0.8 → 0.0.9

Sign up to get free protection for your applications and to get access to all the features.
Files changed (68) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +3 -8
  3. data/.rspec +1 -1
  4. data/.travis.yml +12 -0
  5. data/CONTRIBUTING.md +27 -12
  6. data/Gemfile +1 -0
  7. data/History.txt +38 -0
  8. data/Manifest.txt +15 -15
  9. data/README.rdoc +7 -6
  10. data/Rakefile +40 -5
  11. data/ext/nmatrix/data/data.cpp +2 -37
  12. data/ext/nmatrix/data/data.h +19 -121
  13. data/ext/nmatrix/data/meta.h +70 -0
  14. data/ext/nmatrix/extconf.rb +40 -12
  15. data/ext/nmatrix/math/math.h +13 -103
  16. data/ext/nmatrix/nmatrix.cpp +10 -2018
  17. data/ext/nmatrix/nmatrix.h +16 -13
  18. data/ext/nmatrix/ruby_constants.cpp +12 -1
  19. data/ext/nmatrix/ruby_constants.h +7 -1
  20. data/ext/nmatrix/ruby_nmatrix.c +2169 -0
  21. data/ext/nmatrix/storage/dense.cpp +123 -14
  22. data/ext/nmatrix/storage/dense.h +10 -4
  23. data/ext/nmatrix/storage/list.cpp +265 -48
  24. data/ext/nmatrix/storage/list.h +6 -9
  25. data/ext/nmatrix/storage/storage.cpp +44 -54
  26. data/ext/nmatrix/storage/storage.h +2 -2
  27. data/ext/nmatrix/storage/yale/class.h +1070 -0
  28. data/ext/nmatrix/storage/yale/iterators/base.h +142 -0
  29. data/ext/nmatrix/storage/yale/iterators/iterator.h +130 -0
  30. data/ext/nmatrix/storage/yale/iterators/row.h +449 -0
  31. data/ext/nmatrix/storage/yale/iterators/row_stored.h +139 -0
  32. data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +167 -0
  33. data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +123 -0
  34. data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
  35. data/ext/nmatrix/storage/yale/yale.cpp +1785 -0
  36. data/ext/nmatrix/storage/{yale.h → yale/yale.h} +23 -55
  37. data/ext/nmatrix/types.h +2 -0
  38. data/ext/nmatrix/util/io.cpp +27 -45
  39. data/ext/nmatrix/util/io.h +0 -2
  40. data/ext/nmatrix/util/sl_list.cpp +169 -28
  41. data/ext/nmatrix/util/sl_list.h +9 -3
  42. data/lib/nmatrix/blas.rb +20 -20
  43. data/lib/nmatrix/enumerate.rb +1 -1
  44. data/lib/nmatrix/io/mat5_reader.rb +8 -14
  45. data/lib/nmatrix/lapack.rb +3 -3
  46. data/lib/nmatrix/math.rb +3 -3
  47. data/lib/nmatrix/nmatrix.rb +19 -5
  48. data/lib/nmatrix/nvector.rb +2 -0
  49. data/lib/nmatrix/shortcuts.rb +90 -125
  50. data/lib/nmatrix/version.rb +1 -1
  51. data/nmatrix.gemspec +7 -8
  52. data/spec/{nmatrix_spec.rb → 00_nmatrix_spec.rb} +45 -208
  53. data/spec/01_enum_spec.rb +184 -0
  54. data/spec/{slice_spec.rb → 02_slice_spec.rb} +55 -39
  55. data/spec/blas_spec.rb +22 -54
  56. data/spec/elementwise_spec.rb +9 -8
  57. data/spec/io_spec.rb +6 -4
  58. data/spec/lapack_spec.rb +26 -26
  59. data/spec/math_spec.rb +9 -5
  60. data/spec/nmatrix_yale_spec.rb +29 -61
  61. data/spec/shortcuts_spec.rb +34 -22
  62. data/spec/slice_set_spec.rb +157 -0
  63. data/spec/spec_helper.rb +42 -2
  64. data/spec/stat_spec.rb +192 -0
  65. metadata +52 -55
  66. data/ext/nmatrix/storage/yale.cpp +0 -2284
  67. data/spec/nmatrix_list_spec.rb +0 -113
  68. data/spec/nvector_spec.rb +0 -112
@@ -0,0 +1,139 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == row_stored.h
25
+ //
26
+ // Iterator for traversing a single stored row of a matrix (needed
27
+ // for row.h). FIXME: This is not as efficient as it could be; it uses
28
+ // two binary searches to find the beginning and end of each slice.
29
+ // The end search shouldn't be necessary, but I couldn't make it
30
+ // work without it, and eventually decided my dissertation should
31
+ // be a priority.
32
+ //
33
+
34
+ #ifndef YALE_ITERATORS_ROW_STORED_H
35
+ # define YALE_ITERATORS_ROW_STORED_H
36
+
37
+ #include <stdexcept>
38
+
39
+ namespace nm { namespace yale_storage {
40
+
41
+
42
+ /*
43
+ * Iterator for visiting each stored element in a row, including diagonals.
44
+ */
45
+ template <typename D,
46
+ typename RefType,
47
+ typename YaleRef = typename std::conditional<
48
+ std::is_const<RefType>::value,
49
+ const nm::YaleStorage<D>,
50
+ nm::YaleStorage<D>
51
+ >::type,
52
+ typename RowRef = typename std::conditional<
53
+ std::is_const<RefType>::value,
54
+ const row_iterator_T<D,RefType,YaleRef>,
55
+ row_iterator_T<D,RefType,YaleRef>
56
+ >::type>
57
+ class row_stored_iterator_T : public row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> {
58
+ protected:
59
+ using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::r;
60
+ using row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>::p_;
61
+ bool d_visited, d;
62
+
63
+ public:
64
+
65
+ // end_ is necessary for the logic when a row is empty other than the diagonal. If we just
66
+ // relied on pp == last_p+1, it'd look like these empty rows were actually end() iterators.
67
+ // So we have to actually mark end_ by telling it to ignore that diagonal visitation.
68
+ row_stored_iterator_T(RowRef& row, size_t pp, bool end_ = false)
69
+ : row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row, pp),
70
+ d_visited(!row.has_diag()), // if the row has no diagonal, just marked it as visited.
71
+ d(r.is_diag_first() && !end_) // do we start at the diagonal?
72
+ {
73
+ }
74
+
75
+ /* Diagonal constructor. Puts us on the diagonal (unless end is true) */
76
+ /*row_stored_iterator_T(RowRef& row, bool end_, size_t j)
77
+ : row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>(row.ndfind(j)),
78
+ d_visited(false),
79
+ d(!end_ && j + row.offset(1) == row.real_i())
80
+ { }*/
81
+
82
+ virtual bool diag() const {
83
+ return d;
84
+ }
85
+
86
+ virtual bool end() const {
87
+ return !d && p_ > r.p_last;
88
+ }
89
+
90
+ row_stored_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {
91
+ if (end()) throw std::out_of_range("cannot increment row stored iterator past end of stored row");
92
+ if (d) {
93
+ d_visited = true;
94
+ d = false;
95
+ } else {
96
+ ++p_;
97
+ // Are we at a diagonal?
98
+ // If we hit the end or reach a point where j > diag_j, and still
99
+ // haven't visited the diagonal, we should do so before continuing.
100
+ if (!d_visited && (end() || j() > r.diag_j())) {
101
+ d = true;
102
+ }
103
+ }
104
+
105
+ return *this;
106
+ }
107
+
108
+ row_stored_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {
109
+ row_stored_iterator_T<D,RefType,YaleRef,RowRef> r(*this);
110
+ return ++r;
111
+ }
112
+
113
+ size_t j() const {
114
+ if (end()) throw std::out_of_range("cannot dereference an end pointer");
115
+ return (d ? r.p_diag() : r.ija(p_)) - r.offset(1);
116
+ }
117
+
118
+ // Need to declare all row_stored_iterator_T friends of each other.
119
+ template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_iterator_T;
120
+
121
+ // De-reference the iterator
122
+ RefType& operator*() {
123
+ return d ? r.a(r.p_diag()) : r.a(p_);
124
+ }
125
+
126
+ RefType& operator*() const {
127
+ return d ? r.a(r.p_diag()) : r.a(p_);
128
+ }
129
+
130
+ // Ruby VALUE de-reference
131
+ VALUE operator~() const {
132
+ return nm_rb_dereference<D>(**this);
133
+ }
134
+
135
+ };
136
+
137
+ }} // end of namespace nm::yale_storage
138
+
139
+ #endif // YALE_ITERATORS_ROW_STORED_H
@@ -0,0 +1,167 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == row_stored_nd.h
25
+ //
26
+ // Yale storage row-by-row nondiagonal-storage iterator
27
+ //
28
+
29
+ #ifndef YALE_ITERATORS_ROW_STORED_ND_H
30
+ # define YALE_ITERATORS_ROW_STORED_ND_H
31
+
32
+ #include <type_traits>
33
+ #include <typeinfo>
34
+ #include <stdexcept>
35
+
36
+ namespace nm { namespace yale_storage {
37
+
38
+ /*
39
+ * Constants
40
+ */
41
+ const float GROWTH_CONSTANT = 1.5;
42
+
43
+
44
+ /*
45
+ * Forward declarations
46
+ */
47
+ template <typename D, typename RefType, typename YaleRef> class row_iterator_T;
48
+
49
+ /*
50
+ * Iterator for visiting each stored element in a row, including diagonals.
51
+ */
52
+ template <typename D,
53
+ typename RefType,
54
+ typename YaleRef = typename std::conditional<
55
+ std::is_const<RefType>::value,
56
+ const nm::YaleStorage<D>,
57
+ nm::YaleStorage<D>
58
+ >::type,
59
+ typename RowRef = typename std::conditional<
60
+ std::is_const<RefType>::value,
61
+ const row_iterator_T<D,RefType,YaleRef>,
62
+ row_iterator_T<D,RefType,YaleRef>
63
+ >::type>
64
+ class row_stored_nd_iterator_T {
65
+ protected:
66
+ RowRef& r;
67
+ size_t p_;
68
+
69
+ public:
70
+
71
+ row_stored_nd_iterator_T(RowRef& row, size_t pp)
72
+ : r(row),
73
+ p_(pp) // do we start at the diagonal?
74
+ {
75
+ }
76
+
77
+ // DO NOT IMPLEMENT THESE FUNCTIONS. They prevent C++ virtual slicing
78
+ //template <typename T> row_stored_nd_iterator_T(T const& rhs);
79
+ //template <typename T> row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(T const& rhs);
80
+
81
+ // Next two functions are to ensure we can still cast between nd iterators.
82
+ row_stored_nd_iterator_T(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs)
83
+ : r(rhs.r), p_(rhs.p_)
84
+ { }
85
+
86
+ row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& operator=(row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> const& rhs) {
87
+ if (&r != &(rhs.r))
88
+ throw std::logic_error("can't assign iterator from another row iterator");
89
+ p_ = rhs.p_;
90
+ }
91
+
92
+ virtual size_t p() const { return p_; }
93
+
94
+ virtual bool end() const {
95
+ return p_ > r.p_last;
96
+ }
97
+
98
+ row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef>& operator++() {
99
+ if (end()) throw std::out_of_range("cannot increment row stored iterator past end of stored row");
100
+ ++p_;
101
+
102
+ return *this;
103
+ }
104
+
105
+ row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> operator++(int dummy) const {
106
+ row_stored_nd_iterator_T<D,RefType,YaleRef,RowRef> r(*this);
107
+ return ++r;
108
+ }
109
+
110
+ virtual size_t j() const {
111
+ if (end()) throw std::out_of_range("cannot dereference (get j()) for an end pointer");
112
+ return r.ija(p_) - r.offset(1);
113
+ }
114
+
115
+ // Need to declare all row_stored_nd_iterator_T friends of each other.
116
+ template <typename E, typename ERefType, typename EYaleRef, typename ERowRef> friend class row_stored_nd_iterator_T;
117
+
118
+
119
+ virtual bool operator==(const row_stored_nd_iterator_T<D,RefType>& rhs) const {
120
+ if (r.i() != rhs.r.i()) return false;
121
+ if (end()) return rhs.end();
122
+ else if (rhs.end()) return false;
123
+ return j() == rhs.j();
124
+ }
125
+
126
+ // There is something wrong with this function.
127
+ virtual bool operator!=(const row_stored_nd_iterator_T<D,RefType>& rhs) const {
128
+ if (r.i() != rhs.r.i()) return true;
129
+ if (end()) return !rhs.end();
130
+ else if (rhs.end()) return true;
131
+ return j() != rhs.j();
132
+ }
133
+
134
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
135
+ bool operator<(const row_stored_nd_iterator_T<E,ERefType>& rhs) const {
136
+ if (r < rhs.r) return true;
137
+ if (r > rhs.r) return false;
138
+
139
+ // r == rhs.r
140
+ if (end()) return false;
141
+ if (rhs.end()) return true;
142
+ return j() < rhs.j();
143
+ }
144
+
145
+ // De-reference the iterator
146
+ RefType& operator*() {
147
+ return r.a(p_);
148
+ }
149
+
150
+ RefType& operator*() const {
151
+ return r.a(p_);
152
+ }
153
+
154
+ // Ruby VALUE de-reference
155
+ VALUE operator~() const {
156
+ return nm_rb_dereference<D>(**this);
157
+ }
158
+
159
+ inline virtual VALUE rb_j() const { return LONG2NUM(j()); }
160
+
161
+ };
162
+
163
+
164
+
165
+ } } // end of namespace nm::yale_storage
166
+
167
+ #endif // YALE_ITERATORS_ROW_STORED_ND_H
@@ -0,0 +1,123 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == stored_diagonal_iterator.h
25
+ //
26
+ // Yale storage diagonal-storage iterator
27
+ //
28
+
29
+ #ifndef YALE_ITERATORS_STORED_DIAGONAL_H
30
+ # define YALE_ITERATORS_STORED_DIAGONAL_H
31
+
32
+ #include <type_traits>
33
+ #include <typeinfo>
34
+
35
+ namespace nm { namespace yale_storage {
36
+
37
+ /*
38
+ * Iterate across the stored diagonal.
39
+ */
40
+ template <typename D,
41
+ typename RefType,
42
+ typename YaleRef = typename std::conditional<
43
+ std::is_const<RefType>::value,
44
+ const nm::YaleStorage<D>,
45
+ nm::YaleStorage<D>
46
+ >::type>
47
+ class stored_diagonal_iterator_T : public basic_iterator_T<D,RefType,YaleRef> {
48
+ using basic_iterator_T<D,RefType,YaleRef>::p_;
49
+ using basic_iterator_T<D,RefType,YaleRef>::y;
50
+ using basic_iterator_T<D,RefType,YaleRef>::offset;
51
+ using basic_iterator_T<D,RefType,YaleRef>::shape;
52
+ public:
53
+ stored_diagonal_iterator_T(YaleRef& obj, size_t d = 0)
54
+ : basic_iterator_T<D,RefType,YaleRef>(obj, // y
55
+ std::max(obj.offset(0), obj.offset(1)) + d - obj.offset(0), // i_
56
+ std::max(obj.offset(0), obj.offset(1)) + d) // p_
57
+ {
58
+ // std::cerr << "sdbegin: d=" << d << ", p_=" << p_ << ", i()=" << i() << ", j()=" << j() << std::endl;
59
+ // p_ can range from max(y.offset(0), y.offset(1)) to min(y.real_shape(0), y.real_shape(1))
60
+ }
61
+
62
+
63
+ size_t d() const {
64
+ return p_ - std::max(offset(0), offset(1));
65
+ }
66
+
67
+ stored_diagonal_iterator_T<D,RefType,YaleRef>& operator++() {
68
+ if (i() < shape(0)) ++p_;
69
+ return *this;
70
+ }
71
+
72
+ stored_diagonal_iterator_T<D,RefType,YaleRef> operator++(int dummy) const {
73
+ stored_diagonal_iterator_T<D,RefType,YaleRef> iter(*this);
74
+ return ++iter;
75
+ }
76
+
77
+ // Indicates if we're at the end of the iteration.
78
+ bool end() const {
79
+ return p_ >= std::min( shape(0) + offset(0), shape(1) + offset(1) );
80
+ }
81
+
82
+ // i() and j() are how we know if we're past-the-end. i will be shape(0) and j will be 0.
83
+ size_t i() const {
84
+ return p_ - offset(0);
85
+ }
86
+
87
+ size_t j() const {
88
+ return p_ - offset(1);
89
+ }
90
+
91
+
92
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
93
+ bool operator!=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return d() != rhs.d(); }
94
+
95
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
96
+ bool operator==(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return !(*this != rhs); }
97
+
98
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
99
+ bool operator<(const stored_diagonal_iterator_T<E,ERefType>& rhs) const { return d() < rhs.d(); }
100
+
101
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
102
+ bool operator<=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
103
+ return d() <= rhs.d();
104
+ }
105
+
106
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
107
+ bool operator>(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
108
+ return d() > rhs.d();
109
+ }
110
+
111
+ template <typename E, typename ERefType = typename std::conditional<std::is_const<RefType>::value, const E, E>::type>
112
+ bool operator>=(const stored_diagonal_iterator_T<E,ERefType>& rhs) const {
113
+ return d() >= rhs.d();
114
+ }
115
+
116
+ RefType& operator*() { return y.a(p_); }
117
+ RefType& operator*() const { return y.a(p_); }
118
+
119
+ };
120
+
121
+ } } // end of namespace nm::yale_storage
122
+
123
+ #endif // YALE_ITERATORS_STORED_DIAGONAL_H
@@ -0,0 +1,110 @@
1
+ /////////////////////////////////////////////////////////////////////
2
+ // = NMatrix
3
+ //
4
+ // A linear algebra library for scientific computation in Ruby.
5
+ // NMatrix is part of SciRuby.
6
+ //
7
+ // NMatrix was originally inspired by and derived from NArray, by
8
+ // Masahiro Tanaka: http://narray.rubyforge.org
9
+ //
10
+ // == Copyright Information
11
+ //
12
+ // SciRuby is Copyright (c) 2010 - 2013, Ruby Science Foundation
13
+ // NMatrix is Copyright (c) 2013, Ruby Science Foundation
14
+ //
15
+ // Please see LICENSE.txt for additional copyright notices.
16
+ //
17
+ // == Contributing
18
+ //
19
+ // By contributing source code to SciRuby, you agree to be bound by
20
+ // our Contributor Agreement:
21
+ //
22
+ // * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
23
+ //
24
+ // == transpose.h
25
+ //
26
+ // Functions for Yale math: transposing
27
+ //
28
+
29
+ #ifndef YALE_MATH_TRANSPOSE_H
30
+ # define YALE_MATH_TRANSPOSE_H
31
+
32
+ namespace nm { namespace yale_storage {
33
+
34
+ /*
35
+ * Transposes a generic Yale matrix (old or new). Specify new by setting RDiag = true.
36
+ *
37
+ * Based on transp from SMMP (same as symbmm and numbmm).
38
+ *
39
+ * This is not named in the same way as most yale_storage functions because it does not act on a YALE_STORAGE
40
+ * object.
41
+ */
42
+
43
+ template <typename AD, typename BD, bool DiagA, bool Move>
44
+ void transpose_yale(const size_t n, const size_t m,
45
+ const size_t* ia, const size_t* ja, const AD* a, const AD& a_default,
46
+ size_t* ib, size_t* jb, BD* b, const BD& b_default) {
47
+
48
+ size_t index;
49
+
50
+ // Clear B
51
+ for (size_t i = 0; i < m+1; ++i) ib[i] = 0;
52
+
53
+ if (Move)
54
+ for (size_t i = 0; i < m+1; ++i) b[i] = b_default;
55
+
56
+ if (DiagA) ib[0] = m + 1;
57
+ else ib[0] = 0;
58
+
59
+ /* count indices for each column */
60
+
61
+ for (size_t i = 0; i < n; ++i) {
62
+ for (size_t j = ia[i]; j < ia[i+1]; ++j) {
63
+ ++(ib[ja[j]+1]);
64
+ }
65
+ }
66
+
67
+ for (size_t i = 0; i < m; ++i) {
68
+ ib[i+1] = ib[i] + ib[i+1];
69
+ }
70
+
71
+ /* now make jb */
72
+
73
+ for (size_t i = 0; i < n; ++i) {
74
+
75
+ for (size_t j = ia[i]; j < ia[i+1]; ++j) {
76
+ index = ja[j];
77
+ jb[ib[index]] = i;
78
+
79
+ if (Move && a[j] != a_default)
80
+ b[ib[index]] = a[j];
81
+
82
+ ++(ib[index]);
83
+ }
84
+ }
85
+
86
+ /* now fixup ib */
87
+
88
+ for (size_t i = m; i >= 1; --i) {
89
+ ib[i] = ib[i-1];
90
+ }
91
+
92
+
93
+ if (DiagA) {
94
+ if (Move) {
95
+ size_t j = std::min(n,m);
96
+
97
+ for (size_t i = 0; i < j; ++i) {
98
+ b[i] = a[i];
99
+ }
100
+ }
101
+ ib[0] = m + 1;
102
+
103
+ } else {
104
+ ib[0] = 0;
105
+ }
106
+ }
107
+
108
+ } } // end of namespace nm::yale_storage
109
+
110
+ #endif