gps_pvt 0.1.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.rspec +3 -0
- data/CHANGELOG.md +5 -0
- data/CODE_OF_CONDUCT.md +84 -0
- data/Gemfile +10 -0
- data/README.md +86 -0
- data/Rakefile +86 -0
- data/bin/console +15 -0
- data/bin/setup +8 -0
- data/ext/gps_pvt/Coordinate/Coordinate_wrap.cxx +6613 -0
- data/ext/gps_pvt/GPS/GPS_wrap.cxx +16019 -0
- data/ext/gps_pvt/SylphideMath/SylphideMath_wrap.cxx +21050 -0
- data/ext/gps_pvt/extconf.rb +70 -0
- data/ext/ninja-scan-light/tool/navigation/EGM.h +2971 -0
- data/ext/ninja-scan-light/tool/navigation/GPS.h +2432 -0
- data/ext/ninja-scan-light/tool/navigation/GPS_Solver.h +479 -0
- data/ext/ninja-scan-light/tool/navigation/GPS_Solver_Base.h +1081 -0
- data/ext/ninja-scan-light/tool/navigation/GPS_Solver_MultiFrequency.h +199 -0
- data/ext/ninja-scan-light/tool/navigation/GPS_Solver_RAIM.h +210 -0
- data/ext/ninja-scan-light/tool/navigation/MagneticField.h +928 -0
- data/ext/ninja-scan-light/tool/navigation/NTCM.h +211 -0
- data/ext/ninja-scan-light/tool/navigation/RINEX.h +1781 -0
- data/ext/ninja-scan-light/tool/navigation/WGS84.h +186 -0
- data/ext/ninja-scan-light/tool/navigation/coordinate.h +406 -0
- data/ext/ninja-scan-light/tool/param/bit_array.h +145 -0
- data/ext/ninja-scan-light/tool/param/complex.h +558 -0
- data/ext/ninja-scan-light/tool/param/matrix.h +4049 -0
- data/ext/ninja-scan-light/tool/param/matrix_fixed.h +665 -0
- data/ext/ninja-scan-light/tool/param/matrix_special.h +562 -0
- data/ext/ninja-scan-light/tool/param/quaternion.h +765 -0
- data/ext/ninja-scan-light/tool/param/vector3.h +651 -0
- data/ext/ninja-scan-light/tool/swig/Coordinate.i +177 -0
- data/ext/ninja-scan-light/tool/swig/GPS.i +1102 -0
- data/ext/ninja-scan-light/tool/swig/SylphideMath.i +1234 -0
- data/ext/ninja-scan-light/tool/swig/extconf.rb +5 -0
- data/ext/ninja-scan-light/tool/swig/makefile +53 -0
- data/ext/ninja-scan-light/tool/swig/spec/GPS_spec.rb +417 -0
- data/ext/ninja-scan-light/tool/swig/spec/SylphideMath_spec.rb +489 -0
- data/gps_pvt.gemspec +57 -0
- data/lib/gps_pvt/receiver.rb +375 -0
- data/lib/gps_pvt/ubx.rb +148 -0
- data/lib/gps_pvt/version.rb +5 -0
- data/lib/gps_pvt.rb +9 -0
- data/sig/gps_pvt.rbs +4 -0
- metadata +117 -0
@@ -0,0 +1,665 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) 2019, M.Naruoka (fenrir)
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* Redistribution and use in source and binary forms, with or without modification,
|
6
|
+
* are permitted provided that the following conditions are met:
|
7
|
+
*
|
8
|
+
* - Redistributions of source code must retain the above copyright notice,
|
9
|
+
* this list of conditions and the following disclaimer.
|
10
|
+
* - Redistributions in binary form must reproduce the above copyright notice,
|
11
|
+
* this list of conditions and the following disclaimer in the documentation
|
12
|
+
* and/or other materials provided with the distribution.
|
13
|
+
* - Neither the name of the naruoka.org nor the names of its contributors
|
14
|
+
* may be used to endorse or promote products derived from this software
|
15
|
+
* without specific prior written permission.
|
16
|
+
*
|
17
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
18
|
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
19
|
+
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
20
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
|
21
|
+
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
|
22
|
+
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
23
|
+
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
24
|
+
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
25
|
+
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
26
|
+
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
27
|
+
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
28
|
+
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29
|
+
*
|
30
|
+
*/
|
31
|
+
|
32
|
+
#ifndef __MATRIX_FIXED_H__
|
33
|
+
#define __MATRIX_FIXED_H__
|
34
|
+
|
35
|
+
/** @file
|
36
|
+
* @brief extension of Portable matrix library to add fixed size matrix
|
37
|
+
*
|
38
|
+
* This is hand-made fixed size matrix library whose features are
|
39
|
+
* 1) to use template for generic primitive type
|
40
|
+
* including not only double for general purpose,
|
41
|
+
* but also int used with fixed float for embedded environment.
|
42
|
+
* 2) Predetermined the buffer size, which implies running without heap memory.
|
43
|
+
* 3) to use views for transpose and partial matrices
|
44
|
+
* to reduce copies.
|
45
|
+
* 4) to use expression template technique
|
46
|
+
* for matrix multiplying, adding, and subtracting
|
47
|
+
* to eliminate temporary objects.
|
48
|
+
*
|
49
|
+
* Be careful, being different from the original (flexible) matrix,
|
50
|
+
* this fixed size matrix always utilizes deep copy.
|
51
|
+
*/
|
52
|
+
|
53
|
+
#include "param/matrix.h"
|
54
|
+
|
55
|
+
#if (__cplusplus < 201103L) && !defined(noexcept)
|
56
|
+
#define noexcept throw()
|
57
|
+
#endif
|
58
|
+
#if defined(DEBUG) && !defined(throws_when_debug)
|
59
|
+
#define throws_when_debug
|
60
|
+
#else
|
61
|
+
#define throws_when_debug noexcept
|
62
|
+
#endif
|
63
|
+
|
64
|
+
template <class T, int nR, int nC = nR>
|
65
|
+
class Array2D_Fixed : public Array2D<T, Array2D_Fixed<T, nR, nC> > {
|
66
|
+
public:
|
67
|
+
typedef Array2D_Fixed<T, nR, nC> self_t;
|
68
|
+
typedef Array2D<T, self_t> super_t;
|
69
|
+
static const bool clonable = false;
|
70
|
+
|
71
|
+
template <class T2>
|
72
|
+
struct cast_t {
|
73
|
+
typedef Array2D_Fixed<T2, nR, nC> res_t;
|
74
|
+
};
|
75
|
+
|
76
|
+
using super_t::rows;
|
77
|
+
using super_t::columns;
|
78
|
+
|
79
|
+
template <class T2, class Array2D_Type2, class ViewType2>
|
80
|
+
friend class Matrix; // for protected copy(), which can only generate shallow copy
|
81
|
+
|
82
|
+
protected:
|
83
|
+
T (* const values)[nR][nC]; ///< array for values
|
84
|
+
|
85
|
+
void check_size() const {
|
86
|
+
if((nR < rows()) || (nC < columns())){
|
87
|
+
throw std::runtime_error("larger rows or columns");
|
88
|
+
}
|
89
|
+
}
|
90
|
+
|
91
|
+
template <class T2>
|
92
|
+
void fill_values(const Array2D_Frozen<T2> &array){
|
93
|
+
if(!values){
|
94
|
+
throw std::runtime_error("No buffer");
|
95
|
+
}
|
96
|
+
const unsigned int i_end(array.rows()), j_end(array.columns());
|
97
|
+
for(unsigned int i(0); i < i_end; ++i){
|
98
|
+
for(unsigned int j(0); j < j_end; ++j){
|
99
|
+
(*values)[i][j] = array(i, j);
|
100
|
+
}
|
101
|
+
}
|
102
|
+
}
|
103
|
+
|
104
|
+
public:
|
105
|
+
Array2D_Fixed(const unsigned int &rows = 0, const unsigned int &columns = 0)
|
106
|
+
: super_t(rows, columns), values(NULL) {
|
107
|
+
check_size();
|
108
|
+
}
|
109
|
+
|
110
|
+
Array2D_Fixed(
|
111
|
+
T (&buf)[nR][nC], const unsigned int &rows = 0, const unsigned int &columns = 0)
|
112
|
+
: super_t(rows, columns), values(&buf) {
|
113
|
+
check_size();
|
114
|
+
}
|
115
|
+
|
116
|
+
/**
|
117
|
+
* Copy constructor, which performs shallow copy.
|
118
|
+
*
|
119
|
+
* @param array another one
|
120
|
+
*/
|
121
|
+
Array2D_Fixed(const self_t &src) noexcept
|
122
|
+
: super_t(src.rows(), src.columns()), values(src.values){
|
123
|
+
}
|
124
|
+
|
125
|
+
/**
|
126
|
+
* Destructor
|
127
|
+
*/
|
128
|
+
~Array2D_Fixed(){}
|
129
|
+
|
130
|
+
/**
|
131
|
+
* Assigners, which performs deep copy
|
132
|
+
* This is different from Array2D_Dense::operator=(const self_t &) doing shallow copy
|
133
|
+
*/
|
134
|
+
self_t &operator=(const self_t &rhs) noexcept {
|
135
|
+
if(this != &rhs){
|
136
|
+
super_t::m_rows = rhs.m_rows;
|
137
|
+
super_t::m_columns = rhs.m_columns;
|
138
|
+
if(rhs.values){fill_values(rhs);}
|
139
|
+
}
|
140
|
+
return *this;
|
141
|
+
}
|
142
|
+
|
143
|
+
/**
|
144
|
+
* Assigners for different type, which performs deep copy
|
145
|
+
* This is same as Array2D_Dense::operator=(const self_t &)
|
146
|
+
*/
|
147
|
+
template <class T2>
|
148
|
+
self_t &operator=(const Array2D_Frozen<T2> &array){
|
149
|
+
super_t::m_rows = array.rows();
|
150
|
+
super_t::m_columns = array.columns();
|
151
|
+
check_size();
|
152
|
+
fill_values(array);
|
153
|
+
return *this;
|
154
|
+
}
|
155
|
+
|
156
|
+
protected:
|
157
|
+
inline const T &get(
|
158
|
+
const unsigned int &row,
|
159
|
+
const unsigned int &column) const throws_when_debug {
|
160
|
+
#if defined(DEBUG)
|
161
|
+
super_t::check_index(row, column);
|
162
|
+
#endif
|
163
|
+
return (*values)[row][column];
|
164
|
+
}
|
165
|
+
|
166
|
+
public:
|
167
|
+
/**
|
168
|
+
* Accessor for element
|
169
|
+
*
|
170
|
+
* @param row Row index
|
171
|
+
* @param column Column Index
|
172
|
+
* @return (T) Element
|
173
|
+
* @throw std::out_of_range When the indices are out of range
|
174
|
+
*/
|
175
|
+
T operator()(
|
176
|
+
const unsigned int &row,
|
177
|
+
const unsigned int &column) const throws_when_debug {
|
178
|
+
return get(row, column);
|
179
|
+
}
|
180
|
+
T &operator()(
|
181
|
+
const unsigned int &row,
|
182
|
+
const unsigned int &column) throws_when_debug {
|
183
|
+
return const_cast<T &>(
|
184
|
+
const_cast<const self_t *>(this)->get(row, column));
|
185
|
+
}
|
186
|
+
|
187
|
+
protected:
|
188
|
+
template <class T2, bool do_memory_op = std::numeric_limits<T2>::is_specialized>
|
189
|
+
struct setup_t {
|
190
|
+
static void copy(T2 *dest, const T2 *src, const unsigned int &length){
|
191
|
+
for(unsigned int i(0); i < length; ++i){
|
192
|
+
dest[i] = src[i];
|
193
|
+
}
|
194
|
+
}
|
195
|
+
static void clear(T2 *target){
|
196
|
+
for(unsigned int i(0); i < nR * nC; ++i){
|
197
|
+
target[i] = T2();
|
198
|
+
}
|
199
|
+
}
|
200
|
+
};
|
201
|
+
template <class T2>
|
202
|
+
struct setup_t<T2, true> {
|
203
|
+
static void copy(T2 *dest, const T2 *src, const unsigned int &length){
|
204
|
+
std::memcpy(dest, src, sizeof(T2) * length);
|
205
|
+
}
|
206
|
+
static void clear(T2 *target){
|
207
|
+
std::memset(target, 0, sizeof(T2) * nR * nC);
|
208
|
+
}
|
209
|
+
};
|
210
|
+
|
211
|
+
public:
|
212
|
+
void clear() noexcept {
|
213
|
+
setup_t<T>::clear((T *)values);
|
214
|
+
}
|
215
|
+
|
216
|
+
protected:
|
217
|
+
self_t copy(const bool &is_deep = false) const {
|
218
|
+
return self_t(*this); ///< is_deep flag will be ignored, and return shallow copy
|
219
|
+
}
|
220
|
+
|
221
|
+
public:
|
222
|
+
struct buf_t {
|
223
|
+
T buf[nR][nC]; ///< fixed size buffer
|
224
|
+
buf_t() noexcept {}
|
225
|
+
buf_t(
|
226
|
+
const unsigned int &rows, const unsigned int &columns,
|
227
|
+
const T *serialized) noexcept {
|
228
|
+
if(serialized){
|
229
|
+
for(unsigned int i(0), idx(0); i < rows; ++i, idx += columns){
|
230
|
+
setup_t<T>::copy(buf[i], &serialized[idx], columns);
|
231
|
+
}
|
232
|
+
}else{
|
233
|
+
setup_t<T>::clear((T *)&buf);
|
234
|
+
}
|
235
|
+
}
|
236
|
+
};
|
237
|
+
};
|
238
|
+
|
239
|
+
template <class T, int nR, int nC = nR>
|
240
|
+
class Matrix_Fixed
|
241
|
+
: protected Array2D_Fixed<T, nR, nC>::buf_t,
|
242
|
+
public Matrix<T, Array2D_Fixed<T, nR, nC> > {
|
243
|
+
public:
|
244
|
+
typedef typename Array2D_Fixed<T, nR, nC>::buf_t buf_t;
|
245
|
+
typedef Matrix<T, Array2D_Fixed<T, nR, nC> > super_t;
|
246
|
+
|
247
|
+
#if defined(__GNUC__) && (__GNUC__ < 5)
|
248
|
+
typedef typename super_t::storage_t storage_t;
|
249
|
+
#elif defined(_MSC_VER)
|
250
|
+
typedef typename super_t::storage_t storage_t; // To fix VC2010(C2514) in constructor
|
251
|
+
using super_t::operator(); // To fix VC2010(C2106) of X(r, c) = something
|
252
|
+
#else
|
253
|
+
using typename super_t::storage_t;
|
254
|
+
#endif
|
255
|
+
|
256
|
+
typedef Matrix_Fixed<T, nR, nC> self_t;
|
257
|
+
|
258
|
+
protected:
|
259
|
+
Matrix_Fixed(const storage_t &storage) noexcept
|
260
|
+
: buf_t(), super_t(storage_t(buf_t::buf) = storage) {}
|
261
|
+
|
262
|
+
template <class T2, class Array2D_Type2, class ViewType2>
|
263
|
+
friend class Matrix;
|
264
|
+
|
265
|
+
public:
|
266
|
+
/**
|
267
|
+
* Constructor without customization.
|
268
|
+
* The elements will be cleared with T(0).
|
269
|
+
*
|
270
|
+
*/
|
271
|
+
Matrix_Fixed() noexcept
|
272
|
+
: buf_t(), super_t(storage_t(buf_t::buf)){}
|
273
|
+
|
274
|
+
/**
|
275
|
+
* Constructor with specified row and column numbers, and values.
|
276
|
+
* The size will be compared with the predefined numbers.
|
277
|
+
* If the size is larger than buffer, then error will be thrown.
|
278
|
+
* The elements will be cleared with T(0) if serialized is NULL (default).
|
279
|
+
*
|
280
|
+
* @param rows Row number
|
281
|
+
* @param columns Column number
|
282
|
+
* @param serialized Initial values of elements
|
283
|
+
*/
|
284
|
+
Matrix_Fixed(
|
285
|
+
const unsigned int &rows, const unsigned int &columns,
|
286
|
+
const T *serialized = NULL)
|
287
|
+
: buf_t(rows, columns, serialized),
|
288
|
+
super_t(storage_t(buf_t::buf, rows, columns)) {}
|
289
|
+
|
290
|
+
/**
|
291
|
+
* Copy constructor generating deep copy.
|
292
|
+
*/
|
293
|
+
Matrix_Fixed(const self_t &matrix) noexcept
|
294
|
+
: buf_t(), super_t(storage_t(buf_t::buf) = matrix.storage) {}
|
295
|
+
|
296
|
+
template <class T2, class Array2D_Type2, class ViewType2>
|
297
|
+
Matrix_Fixed(const Matrix_Frozen<T2, Array2D_Type2, ViewType2> &matrix)
|
298
|
+
: buf_t(), super_t(storage_t(buf_t::buf, matrix.rows(), matrix.columns())){
|
299
|
+
super_t::replace(matrix);
|
300
|
+
}
|
301
|
+
/**
|
302
|
+
* Destructor
|
303
|
+
*/
|
304
|
+
virtual ~Matrix_Fixed(){}
|
305
|
+
|
306
|
+
/**
|
307
|
+
* Assigner performing deep copy by Array2D_Fixed::operator=(const Array2D_Fixed &)
|
308
|
+
*/
|
309
|
+
self_t &operator=(const self_t &another){
|
310
|
+
super_t::operator=(another); // frozen_t::operator=(const frozen_t &) is exactly called.
|
311
|
+
return *this;
|
312
|
+
}
|
313
|
+
|
314
|
+
/**
|
315
|
+
* Assigner performing deep copy by Array2D_Fixed::operator=(const Array2D_Frozen &)
|
316
|
+
*/
|
317
|
+
template <class T2, class Array2D_Type2>
|
318
|
+
self_t &operator=(const Matrix<T2, Array2D_Type2> &matrix){
|
319
|
+
super_t::operator=(matrix);
|
320
|
+
/* frozen_t::operator=(const frozen_t &) or frozen_t::operator=(const another_frozen_t &)
|
321
|
+
* is conditionally called.
|
322
|
+
*/
|
323
|
+
return *this;
|
324
|
+
}
|
325
|
+
|
326
|
+
struct wrapped_t {
|
327
|
+
self_t mat;
|
328
|
+
wrapped_t(const self_t &mat_) noexcept
|
329
|
+
: mat(mat_) {}
|
330
|
+
};
|
331
|
+
};
|
332
|
+
|
333
|
+
template <class T, int nR, int nC>
|
334
|
+
struct MatrixBuilder<Matrix_Fixed<T, nR, nC> >
|
335
|
+
: public MatrixBuilder<Matrix<T, Array2D_Fixed<T, nR, nC> > > {};
|
336
|
+
// MatrixBuilder<typename Matrix_Fixed<T, nR, nC>::super_t> invokes "invalid use of incomplete type" error
|
337
|
+
|
338
|
+
template <
|
339
|
+
template <class, class, class> class MatrixT,
|
340
|
+
class T, class T2, int nR, int nC, class ViewType>
|
341
|
+
struct MatrixBuilder_Dependency<MatrixT<T, Array2D_Fixed<T2, nR, nC>, ViewType> > {
|
342
|
+
|
343
|
+
static const int row_buffer = (MatrixViewProperty<ViewType>::transposed ? nC : nR);
|
344
|
+
static const int column_buffer = (MatrixViewProperty<ViewType>::transposed ? nR : nC);
|
345
|
+
|
346
|
+
typedef Matrix_Fixed<T, row_buffer, column_buffer> assignable_t;
|
347
|
+
|
348
|
+
template <class T3>
|
349
|
+
struct cast_t {
|
350
|
+
typedef Matrix_Fixed<T3, row_buffer, column_buffer> assignable_t;
|
351
|
+
};
|
352
|
+
|
353
|
+
template <int nR_add = 0, int nC_add = 0, int nR_multiply = 1, int nC_multiply = 1>
|
354
|
+
struct resize_t {
|
355
|
+
typedef Matrix_Fixed<T,
|
356
|
+
row_buffer * nR_multiply + nR_add,
|
357
|
+
column_buffer * nC_multiply + nC_add> assignable_t;
|
358
|
+
};
|
359
|
+
};
|
360
|
+
|
361
|
+
template <
|
362
|
+
class T, class T_op,
|
363
|
+
class T2, class T3, class T4, class T5,
|
364
|
+
class ViewType,
|
365
|
+
int nR_L, int nC_L, class ViewType_L,
|
366
|
+
int nR_R, int nC_R, class ViewType_R>
|
367
|
+
struct MatrixBuilder_Dependency<
|
368
|
+
Matrix_Frozen<
|
369
|
+
T,
|
370
|
+
Array2D_Operator<T_op, Array2D_Operator_Multiply_by_Matrix<
|
371
|
+
Matrix_Frozen<T2, Array2D_Fixed<T3, nR_L, nC_L>, ViewType_L>,
|
372
|
+
Matrix_Frozen<T4, Array2D_Fixed<T5, nR_R, nC_R>, ViewType_R> > >,
|
373
|
+
ViewType> >
|
374
|
+
: public MatrixBuilder_Dependency<
|
375
|
+
Matrix_Frozen<
|
376
|
+
T,
|
377
|
+
Array2D_Fixed<
|
378
|
+
T,
|
379
|
+
(MatrixViewProperty<ViewType_L>::transposed ? nC_L : nR_L),
|
380
|
+
(MatrixViewProperty<ViewType_R>::transposed ? nR_R : nC_R)>,
|
381
|
+
ViewType> > {};
|
382
|
+
|
383
|
+
// For optimization of local temporary matrix
|
384
|
+
template <
|
385
|
+
template <class, class, class> class MatrixT,
|
386
|
+
class T, class Array2D_Type, class ViewType>
|
387
|
+
struct MatrixBuilder<MatrixT<T, Array2D_Type, ViewType> >
|
388
|
+
: public MatrixBuilderBase<MatrixT<T, Array2D_Type, ViewType> > {
|
389
|
+
|
390
|
+
template <int nR_add = 0, int nC_add = 0, int nR_multiply = 1, int nC_multiply = 1>
|
391
|
+
struct resize_t
|
392
|
+
: public MatrixBuilderBase<MatrixT<T, Array2D_Type, ViewType> >
|
393
|
+
::template resize_t<nR_add, nC_add, nR_multiply, nC_multiply> {};
|
394
|
+
|
395
|
+
template <int nR_add, int nC_add>
|
396
|
+
struct resize_t<nR_add, nC_add, 0, 0> {
|
397
|
+
typedef Matrix_Fixed<T, nR_add, nC_add> assignable_t;
|
398
|
+
};
|
399
|
+
};
|
400
|
+
|
401
|
+
template <
|
402
|
+
class LHS_T, class RHS_T,
|
403
|
+
bool lhs_buffered = false, bool rhs_buffered = false>
|
404
|
+
struct Matrix_Fixed_BinaryOperator_buffer;
|
405
|
+
|
406
|
+
template <class LHS_T, class RHS_T>
|
407
|
+
struct Matrix_Fixed_BinaryOperator_buffer<LHS_T, RHS_T, true, false> {
|
408
|
+
LHS_T lhs;
|
409
|
+
const RHS_T &rhs;
|
410
|
+
Matrix_Fixed_BinaryOperator_buffer(const LHS_T &lhs_, const RHS_T &rhs_) noexcept
|
411
|
+
: lhs(lhs_), rhs(rhs_) {}
|
412
|
+
};
|
413
|
+
template <class LHS_T, class RHS_T>
|
414
|
+
struct Matrix_Fixed_BinaryOperator_buffer<LHS_T, RHS_T, false, true> {
|
415
|
+
const LHS_T &lhs;
|
416
|
+
RHS_T rhs;
|
417
|
+
Matrix_Fixed_BinaryOperator_buffer(const LHS_T &lhs_, const RHS_T &rhs_) noexcept
|
418
|
+
: lhs(lhs_), rhs(rhs_) {}
|
419
|
+
};
|
420
|
+
template <class LHS_T, class RHS_T>
|
421
|
+
struct Matrix_Fixed_BinaryOperator_buffer<LHS_T, RHS_T, true, true> {
|
422
|
+
LHS_T lhs;
|
423
|
+
RHS_T rhs;
|
424
|
+
Matrix_Fixed_BinaryOperator_buffer(const LHS_T &lhs_, const RHS_T &rhs_) noexcept
|
425
|
+
: lhs(lhs_), rhs(rhs_) {}
|
426
|
+
};
|
427
|
+
|
428
|
+
template <
|
429
|
+
class Result_FrozenT, class LHS_T, class RHS_T,
|
430
|
+
bool lhs_buffered = false>
|
431
|
+
struct Matrix_Fixed_multipled_by_Scalar
|
432
|
+
: protected Matrix_Fixed_BinaryOperator_buffer<LHS_T, RHS_T, lhs_buffered>,
|
433
|
+
public Result_FrozenT {
|
434
|
+
typedef Matrix_Fixed_BinaryOperator_buffer<LHS_T, RHS_T, lhs_buffered> buf_t;
|
435
|
+
Matrix_Fixed_multipled_by_Scalar(const Matrix_Fixed_multipled_by_Scalar &another) noexcept
|
436
|
+
: buf_t(another.buf_t::lhs, another.buf_t::rhs),
|
437
|
+
Result_FrozenT(typename Result_FrozenT::storage_t(
|
438
|
+
buf_t::lhs.rows(), buf_t::lhs.columns(),
|
439
|
+
typename Result_FrozenT::storage_t::op_t(buf_t::lhs, buf_t::rhs))) {}
|
440
|
+
Matrix_Fixed_multipled_by_Scalar(const LHS_T &lhs, const RHS_T &rhs) noexcept
|
441
|
+
: buf_t(lhs, rhs),
|
442
|
+
Result_FrozenT(typename Result_FrozenT::storage_t(
|
443
|
+
lhs.rows(), lhs.columns(),
|
444
|
+
typename Result_FrozenT::storage_t::op_t(buf_t::lhs, buf_t::rhs))) {}
|
445
|
+
template <class, class, class, bool> friend struct Matrix_Fixed_multipled_by_Scalar;
|
446
|
+
template <class Result_FrozenT2>
|
447
|
+
Matrix_Fixed_multipled_by_Scalar(
|
448
|
+
const Matrix_Fixed_multipled_by_Scalar<
|
449
|
+
Result_FrozenT2, LHS_T, RHS_T, lhs_buffered> &another) noexcept
|
450
|
+
: buf_t(another), // use default copy constructor
|
451
|
+
Result_FrozenT(typename Result_FrozenT::storage_t(
|
452
|
+
buf_t::lhs.rows(), buf_t::lhs.columns(),
|
453
|
+
typename Result_FrozenT::storage_t::op_t(buf_t::lhs, buf_t::rhs))) {}
|
454
|
+
};
|
455
|
+
|
456
|
+
template <class T, int nR_L, int nC_L, class RHS_T>
|
457
|
+
struct Matrix_multiplied_by_Scalar<Matrix_Fixed<T, nR_L, nC_L>, RHS_T> {
|
458
|
+
typedef Matrix_Fixed<T, nR_L, nC_L> lhs_t;
|
459
|
+
typedef RHS_T rhs_t;
|
460
|
+
typedef Array2D_Operator_Multiply_by_Scalar<
|
461
|
+
Matrix_Frozen<T, Array2D_Fixed<T, nR_L, nC_L> >, rhs_t> impl_t;
|
462
|
+
typedef Matrix_Frozen<T, Array2D_Operator<T, impl_t> > frozen_t;
|
463
|
+
typedef Matrix_Fixed_multipled_by_Scalar<frozen_t, lhs_t, rhs_t, true> mat_t;
|
464
|
+
static mat_t generate(const lhs_t &mat, const rhs_t &scalar) {
|
465
|
+
return mat_t(mat, scalar);
|
466
|
+
}
|
467
|
+
};
|
468
|
+
|
469
|
+
template <
|
470
|
+
class Result_FrozenT, class LHS_T, class RHS_T,
|
471
|
+
bool lhs_buffered = false, bool rhs_buffered = false>
|
472
|
+
struct Matrix_Fixed_multipled_by_Matrix
|
473
|
+
: protected Matrix_Fixed_BinaryOperator_buffer<LHS_T, RHS_T, lhs_buffered, rhs_buffered>,
|
474
|
+
public Result_FrozenT {
|
475
|
+
typedef Matrix_Fixed_BinaryOperator_buffer<LHS_T, RHS_T, lhs_buffered, rhs_buffered> buf_t;
|
476
|
+
Matrix_Fixed_multipled_by_Matrix(const Matrix_Fixed_multipled_by_Matrix &another) noexcept
|
477
|
+
: buf_t(another.buf_t::lhs, another.buf_t::rhs),
|
478
|
+
Result_FrozenT(typename Result_FrozenT::storage_t(
|
479
|
+
buf_t::lhs.rows(), buf_t::rhs.columns(),
|
480
|
+
typename Result_FrozenT::storage_t::op_t(buf_t::lhs, buf_t::rhs))) {}
|
481
|
+
Matrix_Fixed_multipled_by_Matrix(const LHS_T &lhs, const RHS_T &rhs) noexcept
|
482
|
+
: buf_t(lhs, rhs),
|
483
|
+
Result_FrozenT(typename Result_FrozenT::storage_t(
|
484
|
+
lhs.rows(), rhs.columns(),
|
485
|
+
typename Result_FrozenT::storage_t::op_t(buf_t::lhs, buf_t::rhs))) {}
|
486
|
+
template <class, class, class, bool, bool> friend struct Matrix_Fixed_multipled_by_Matrix;
|
487
|
+
template <class Result_FrozenT2>
|
488
|
+
Matrix_Fixed_multipled_by_Matrix(
|
489
|
+
const Matrix_Fixed_multipled_by_Matrix<
|
490
|
+
Result_FrozenT2, LHS_T, RHS_T, lhs_buffered, rhs_buffered> &another) noexcept
|
491
|
+
: buf_t(another), // use default copy constructor
|
492
|
+
Result_FrozenT(typename Result_FrozenT::storage_t(
|
493
|
+
buf_t::lhs.rows(), buf_t::rhs.columns(),
|
494
|
+
typename Result_FrozenT::storage_t::op_t(buf_t::lhs, buf_t::rhs))) {}
|
495
|
+
|
496
|
+
/* for optimization of scalar multiplication of (M * S) * M, M * (M * S), (M * S) * (M * S) */
|
497
|
+
template <class T>
|
498
|
+
struct Multiply_Matrix_by_Scalar {
|
499
|
+
typedef Matrix_Fixed_multipled_by_Matrix<
|
500
|
+
Result_FrozenT, LHS_T, RHS_T, lhs_buffered, rhs_buffered> lhs_t;
|
501
|
+
typedef T rhs_t;
|
502
|
+
typedef Array2D_Operator_Multiply_by_Scalar<Result_FrozenT, rhs_t> impl_t;
|
503
|
+
typedef Matrix_Frozen<T, Array2D_Operator<T, impl_t> > frozen_t;
|
504
|
+
typedef Matrix_Fixed_multipled_by_Scalar<frozen_t, lhs_t, rhs_t, lhs_buffered || rhs_buffered> mat_t;
|
505
|
+
static mat_t generate(const lhs_t &mat, const rhs_t &scalar) {
|
506
|
+
return mat_t(mat, scalar);
|
507
|
+
}
|
508
|
+
};
|
509
|
+
};
|
510
|
+
|
511
|
+
template <
|
512
|
+
class T_L, class Array2D_Type, class ViewType,
|
513
|
+
class T_R, int nR_R, int nC_R>
|
514
|
+
struct Array2D_Operator_Multiply_by_Matrix<
|
515
|
+
Matrix_Frozen<T_L, Array2D_Type, ViewType>,
|
516
|
+
Matrix_Fixed<T_R, nR_R, nC_R> > {
|
517
|
+
typedef Matrix_Frozen<T_L, Array2D_Type, ViewType> lhs_t;
|
518
|
+
typedef Matrix_Fixed<T_R, nR_R, nC_R> rhs_t;
|
519
|
+
typedef Array2D_Operator_Multiply_by_Matrix<lhs_t, typename rhs_t::frozen_t> op_t;
|
520
|
+
typedef Matrix_Frozen<T_L, Array2D_Operator<T_L, op_t> > frozen_t;
|
521
|
+
typedef Matrix_Fixed_multipled_by_Matrix<frozen_t, lhs_t, rhs_t, false, true> mat_t;
|
522
|
+
static mat_t generate(const lhs_t &lhs, const rhs_t &rhs) noexcept {
|
523
|
+
return mat_t(lhs, rhs);
|
524
|
+
}
|
525
|
+
};
|
526
|
+
|
527
|
+
template <
|
528
|
+
class T_L, int nR_L, int nC_L,
|
529
|
+
class T_R, class Array2D_Type, class ViewType>
|
530
|
+
struct Array2D_Operator_Multiply_by_Matrix<
|
531
|
+
Matrix_Fixed<T_L, nR_L, nC_L>,
|
532
|
+
Matrix_Frozen<T_R, Array2D_Type, ViewType> > {
|
533
|
+
typedef Matrix_Fixed<T_L, nR_L, nC_L> lhs_t;
|
534
|
+
typedef Matrix_Frozen<T_R, Array2D_Type, ViewType> rhs_t;
|
535
|
+
typedef Array2D_Operator_Multiply_by_Matrix<typename lhs_t::frozen_t, rhs_t> op_t;
|
536
|
+
typedef Matrix_Frozen<T_L, Array2D_Operator<T_L, op_t> > frozen_t;
|
537
|
+
typedef Matrix_Fixed_multipled_by_Matrix<frozen_t, lhs_t, rhs_t, true, false> mat_t;
|
538
|
+
static mat_t generate(const lhs_t &lhs, const rhs_t &rhs) noexcept {
|
539
|
+
return mat_t(lhs, rhs);
|
540
|
+
}
|
541
|
+
};
|
542
|
+
|
543
|
+
template <
|
544
|
+
class T_L, int nR_L, int nC_L,
|
545
|
+
class T_R, int nR_R, int nC_R>
|
546
|
+
struct Array2D_Operator_Multiply_by_Matrix<
|
547
|
+
Matrix_Fixed<T_L, nR_L, nC_L>,
|
548
|
+
Matrix_Fixed<T_R, nR_R, nC_R> > {
|
549
|
+
typedef Matrix_Fixed<T_L, nR_L, nC_L> lhs_t;
|
550
|
+
typedef Matrix_Fixed<T_R, nR_R, nC_R> rhs_t;
|
551
|
+
typedef Array2D_Operator_Multiply_by_Matrix<
|
552
|
+
typename lhs_t::frozen_t, typename rhs_t::frozen_t> op_t;
|
553
|
+
typedef Matrix_Frozen<T_L, Array2D_Operator<T_L, op_t> > frozen_t;
|
554
|
+
typedef Matrix_Fixed_multipled_by_Matrix<frozen_t, lhs_t, rhs_t, true, true> mat_t;
|
555
|
+
static mat_t generate(const lhs_t &lhs, const rhs_t &rhs) noexcept {
|
556
|
+
return mat_t(lhs, rhs);
|
557
|
+
}
|
558
|
+
};
|
559
|
+
|
560
|
+
template <
|
561
|
+
class T, int nR, int nC,
|
562
|
+
class Result_FrozenT = typename Matrix_Fixed<T, nR, nC>::frozen_t>
|
563
|
+
struct Matrix_Fixed_Wrapped
|
564
|
+
: protected Matrix_Fixed<T, nR, nC>::wrapped_t,
|
565
|
+
public Result_FrozenT {
|
566
|
+
typedef typename Matrix_Fixed<T, nR, nC>::wrapped_t buf_t;
|
567
|
+
Matrix_Fixed_Wrapped(const Matrix_Fixed<T, nR, nC> &mat) noexcept
|
568
|
+
: buf_t(mat),
|
569
|
+
Result_FrozenT(buf_t::mat) {}
|
570
|
+
Matrix_Fixed_Wrapped(const Matrix_Fixed_Wrapped<T, nR, nC, Result_FrozenT> &another) noexcept
|
571
|
+
: buf_t(another.buf_t::mat),
|
572
|
+
Result_FrozenT(buf_t::mat) {}
|
573
|
+
};
|
574
|
+
|
575
|
+
// { /* For matrix_special.h */
|
576
|
+
template <class MatrixT, template <class> class ViewType_Special>
|
577
|
+
struct MatrixBuilderSpecial;
|
578
|
+
|
579
|
+
template <
|
580
|
+
class T, int nR, int nC,
|
581
|
+
template <class> class ViewType_Special>
|
582
|
+
struct MatrixBuilderSpecial<Matrix_Fixed<T, nR, nC>, ViewType_Special>
|
583
|
+
: public MatrixBuilderSpecial<
|
584
|
+
typename Matrix_Fixed<T, nR, nC>::super_t, ViewType_Special> {
|
585
|
+
typedef Matrix_Fixed<T, nR, nC> fixed_t;
|
586
|
+
typedef Matrix_Fixed_Wrapped<T, nR, nC,
|
587
|
+
typename MatrixBuilderSpecial<
|
588
|
+
typename fixed_t::frozen_t, ViewType_Special>::special_t> special_t;
|
589
|
+
};
|
590
|
+
|
591
|
+
template <
|
592
|
+
class Result_FrozenT, class LHS_T, class RHS_T,
|
593
|
+
bool lhs_buffered, bool rhs_buffered,
|
594
|
+
template <class> class ViewType_Special>
|
595
|
+
struct MatrixBuilderSpecial<
|
596
|
+
Matrix_Fixed_multipled_by_Matrix<Result_FrozenT, LHS_T, RHS_T, lhs_buffered, rhs_buffered>,
|
597
|
+
ViewType_Special>
|
598
|
+
: public MatrixBuilderSpecial<Result_FrozenT, ViewType_Special> {
|
599
|
+
typedef Matrix_Fixed_multipled_by_Matrix<
|
600
|
+
typename MatrixBuilderSpecial<Result_FrozenT, ViewType_Special>::special_t,
|
601
|
+
LHS_T, RHS_T, lhs_buffered, rhs_buffered> special_t;
|
602
|
+
};
|
603
|
+
|
604
|
+
// { /* for operator/(special(Matrix_Fixed)) */
|
605
|
+
template <
|
606
|
+
class T_L, class Array2D_Type, class ViewType,
|
607
|
+
class T_R, int nR, int nC, class Result_FrozenT>
|
608
|
+
struct Array2D_Operator_Multiply_by_Matrix<
|
609
|
+
Matrix_Frozen<T_L, Array2D_Type, ViewType>,
|
610
|
+
Matrix_Fixed_Wrapped<T_R, nR, nC, Result_FrozenT> > {
|
611
|
+
typedef Matrix_Frozen<T_L, Array2D_Type, ViewType> lhs_t;
|
612
|
+
typedef Matrix_Fixed_Wrapped<T_R, nR, nC, Result_FrozenT> rhs_t;
|
613
|
+
typedef Array2D_Operator_Multiply_by_Matrix<lhs_t, Result_FrozenT> op_t;
|
614
|
+
typedef Matrix_Frozen<T_L, Array2D_Operator<T_L, op_t> > frozen_t;
|
615
|
+
typedef Matrix_Fixed_multipled_by_Matrix<frozen_t, lhs_t, rhs_t, false, true> mat_t;
|
616
|
+
static mat_t generate(const lhs_t &lhs, const rhs_t &rhs) noexcept {
|
617
|
+
return mat_t(lhs, rhs);
|
618
|
+
}
|
619
|
+
};
|
620
|
+
template <
|
621
|
+
class Result_FrozenT, class LHS_T, class RHS_T,
|
622
|
+
bool lhs_buffered, bool rhs_buffered>
|
623
|
+
struct MatrixBuilder<
|
624
|
+
Matrix_Fixed_multipled_by_Matrix<Result_FrozenT, LHS_T, RHS_T, lhs_buffered, rhs_buffered> > {
|
625
|
+
template <class ViewType>
|
626
|
+
struct view_replace_t {
|
627
|
+
typedef Matrix_Fixed_multipled_by_Matrix<
|
628
|
+
typename MatrixBuilder<Result_FrozenT>::template view_replace_t<ViewType>::replaced_t,
|
629
|
+
LHS_T, RHS_T, lhs_buffered, rhs_buffered> replaced_t;
|
630
|
+
};
|
631
|
+
};
|
632
|
+
// }
|
633
|
+
|
634
|
+
// for friend operator/(scalar, special(Matrix_Fixed))
|
635
|
+
template <class T, int nR, int nC, class Result_FrozenT, class RHS_T>
|
636
|
+
struct Matrix_multiplied_by_Scalar<Matrix_Fixed_Wrapped<T, nR, nC, Result_FrozenT>, RHS_T> {
|
637
|
+
typedef Matrix_Fixed_Wrapped<T, nR, nC, Result_FrozenT> lhs_t;
|
638
|
+
typedef RHS_T rhs_t;
|
639
|
+
typedef Array2D_Operator_Multiply_by_Scalar<Result_FrozenT, rhs_t> impl_t;
|
640
|
+
typedef Matrix_Frozen<T, Array2D_Operator<T, impl_t> > frozen_t;
|
641
|
+
typedef Matrix_Fixed_multipled_by_Scalar<frozen_t, lhs_t, rhs_t, true> mat_t;
|
642
|
+
static mat_t generate(const lhs_t &mat, const rhs_t &scalar) {
|
643
|
+
return mat_t(mat, scalar);
|
644
|
+
}
|
645
|
+
};
|
646
|
+
|
647
|
+
template <
|
648
|
+
class Result_FrozenT, class LHS_T, class RHS_T,
|
649
|
+
template <class> class ViewType_Special>
|
650
|
+
struct MatrixBuilderSpecial<
|
651
|
+
Matrix_Fixed_multipled_by_Scalar<Result_FrozenT, LHS_T, RHS_T, true>,
|
652
|
+
ViewType_Special>
|
653
|
+
: public MatrixBuilderSpecial<Result_FrozenT, ViewType_Special> {
|
654
|
+
typedef Matrix_Fixed_multipled_by_Scalar<
|
655
|
+
typename MatrixBuilderSpecial<Result_FrozenT, ViewType_Special>::special_t,
|
656
|
+
LHS_T, RHS_T, true> special_t;
|
657
|
+
};
|
658
|
+
// }
|
659
|
+
|
660
|
+
#undef throws_when_debug
|
661
|
+
#if (__cplusplus < 201103L) && defined(noexcept)
|
662
|
+
#undef noexcept
|
663
|
+
#endif
|
664
|
+
|
665
|
+
#endif /* __MATRIX_FIXED_H__ */
|