tomoto 0.2.3 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (347) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +6 -0
  3. data/README.md +8 -10
  4. data/ext/tomoto/extconf.rb +6 -2
  5. data/ext/tomoto/{ext.cpp → tomoto.cpp} +1 -1
  6. data/lib/tomoto/version.rb +1 -1
  7. data/lib/tomoto.rb +5 -1
  8. data/vendor/EigenRand/EigenRand/Core.h +10 -10
  9. data/vendor/EigenRand/EigenRand/Dists/Basic.h +208 -9
  10. data/vendor/EigenRand/EigenRand/Dists/Discrete.h +52 -31
  11. data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +9 -8
  12. data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +28 -21
  13. data/vendor/EigenRand/EigenRand/EigenRand +11 -6
  14. data/vendor/EigenRand/EigenRand/Macro.h +13 -7
  15. data/vendor/EigenRand/EigenRand/MorePacketMath.h +348 -740
  16. data/vendor/EigenRand/EigenRand/MvDists/Multinomial.h +5 -3
  17. data/vendor/EigenRand/EigenRand/MvDists/MvNormal.h +9 -3
  18. data/vendor/EigenRand/EigenRand/PacketFilter.h +11 -253
  19. data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +21 -47
  20. data/vendor/EigenRand/EigenRand/RandUtils.h +50 -344
  21. data/vendor/EigenRand/EigenRand/arch/AVX/MorePacketMath.h +619 -0
  22. data/vendor/EigenRand/EigenRand/arch/AVX/PacketFilter.h +149 -0
  23. data/vendor/EigenRand/EigenRand/arch/AVX/RandUtils.h +228 -0
  24. data/vendor/EigenRand/EigenRand/arch/NEON/MorePacketMath.h +473 -0
  25. data/vendor/EigenRand/EigenRand/arch/NEON/PacketFilter.h +142 -0
  26. data/vendor/EigenRand/EigenRand/arch/NEON/RandUtils.h +126 -0
  27. data/vendor/EigenRand/EigenRand/arch/SSE/MorePacketMath.h +501 -0
  28. data/vendor/EigenRand/EigenRand/arch/SSE/PacketFilter.h +133 -0
  29. data/vendor/EigenRand/EigenRand/arch/SSE/RandUtils.h +120 -0
  30. data/vendor/EigenRand/EigenRand/doc.h +24 -12
  31. data/vendor/EigenRand/README.md +57 -4
  32. data/vendor/eigen/COPYING.APACHE +203 -0
  33. data/vendor/eigen/COPYING.BSD +1 -1
  34. data/vendor/eigen/COPYING.MINPACK +51 -52
  35. data/vendor/eigen/Eigen/Cholesky +0 -1
  36. data/vendor/eigen/Eigen/Core +112 -265
  37. data/vendor/eigen/Eigen/Eigenvalues +2 -3
  38. data/vendor/eigen/Eigen/Geometry +5 -8
  39. data/vendor/eigen/Eigen/Householder +0 -1
  40. data/vendor/eigen/Eigen/Jacobi +0 -1
  41. data/vendor/eigen/Eigen/KLUSupport +41 -0
  42. data/vendor/eigen/Eigen/LU +2 -5
  43. data/vendor/eigen/Eigen/OrderingMethods +0 -3
  44. data/vendor/eigen/Eigen/PaStiXSupport +1 -0
  45. data/vendor/eigen/Eigen/PardisoSupport +0 -0
  46. data/vendor/eigen/Eigen/QR +2 -3
  47. data/vendor/eigen/Eigen/QtAlignedMalloc +0 -1
  48. data/vendor/eigen/Eigen/SVD +0 -1
  49. data/vendor/eigen/Eigen/Sparse +0 -2
  50. data/vendor/eigen/Eigen/SparseCholesky +0 -8
  51. data/vendor/eigen/Eigen/SparseLU +4 -0
  52. data/vendor/eigen/Eigen/SparseQR +0 -1
  53. data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +42 -27
  54. data/vendor/eigen/Eigen/src/Cholesky/LLT.h +39 -23
  55. data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +90 -47
  56. data/vendor/eigen/Eigen/src/Core/ArithmeticSequence.h +413 -0
  57. data/vendor/eigen/Eigen/src/Core/Array.h +99 -11
  58. data/vendor/eigen/Eigen/src/Core/ArrayBase.h +3 -3
  59. data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +21 -21
  60. data/vendor/eigen/Eigen/src/Core/Assign.h +1 -1
  61. data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +125 -50
  62. data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +10 -10
  63. data/vendor/eigen/Eigen/src/Core/BandMatrix.h +16 -16
  64. data/vendor/eigen/Eigen/src/Core/Block.h +56 -60
  65. data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +29 -31
  66. data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +7 -3
  67. data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +325 -272
  68. data/vendor/eigen/Eigen/src/Core/CoreIterators.h +5 -0
  69. data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +21 -22
  70. data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +153 -18
  71. data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +6 -6
  72. data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +14 -10
  73. data/vendor/eigen/Eigen/src/Core/DenseBase.h +132 -42
  74. data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +25 -21
  75. data/vendor/eigen/Eigen/src/Core/DenseStorage.h +153 -71
  76. data/vendor/eigen/Eigen/src/Core/Diagonal.h +21 -23
  77. data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +50 -2
  78. data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +1 -1
  79. data/vendor/eigen/Eigen/src/Core/Dot.h +10 -10
  80. data/vendor/eigen/Eigen/src/Core/EigenBase.h +10 -9
  81. data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +8 -4
  82. data/vendor/eigen/Eigen/src/Core/Fuzzy.h +3 -3
  83. data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +20 -10
  84. data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +599 -152
  85. data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +40 -33
  86. data/vendor/eigen/Eigen/src/Core/IO.h +40 -7
  87. data/vendor/eigen/Eigen/src/Core/IndexedView.h +237 -0
  88. data/vendor/eigen/Eigen/src/Core/Inverse.h +9 -10
  89. data/vendor/eigen/Eigen/src/Core/Map.h +7 -7
  90. data/vendor/eigen/Eigen/src/Core/MapBase.h +10 -3
  91. data/vendor/eigen/Eigen/src/Core/MathFunctions.h +767 -125
  92. data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +118 -19
  93. data/vendor/eigen/Eigen/src/Core/Matrix.h +131 -25
  94. data/vendor/eigen/Eigen/src/Core/MatrixBase.h +21 -3
  95. data/vendor/eigen/Eigen/src/Core/NestByValue.h +25 -50
  96. data/vendor/eigen/Eigen/src/Core/NoAlias.h +4 -3
  97. data/vendor/eigen/Eigen/src/Core/NumTraits.h +107 -20
  98. data/vendor/eigen/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
  99. data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +3 -31
  100. data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +152 -59
  101. data/vendor/eigen/Eigen/src/Core/Product.h +30 -25
  102. data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +192 -125
  103. data/vendor/eigen/Eigen/src/Core/Random.h +37 -1
  104. data/vendor/eigen/Eigen/src/Core/Redux.h +180 -170
  105. data/vendor/eigen/Eigen/src/Core/Ref.h +121 -23
  106. data/vendor/eigen/Eigen/src/Core/Replicate.h +8 -8
  107. data/vendor/eigen/Eigen/src/Core/Reshaped.h +454 -0
  108. data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +7 -5
  109. data/vendor/eigen/Eigen/src/Core/Reverse.h +18 -12
  110. data/vendor/eigen/Eigen/src/Core/Select.h +8 -6
  111. data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +33 -20
  112. data/vendor/eigen/Eigen/src/Core/Solve.h +14 -14
  113. data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +16 -16
  114. data/vendor/eigen/Eigen/src/Core/SolverBase.h +41 -3
  115. data/vendor/eigen/Eigen/src/Core/StableNorm.h +100 -70
  116. data/vendor/eigen/Eigen/src/Core/StlIterators.h +463 -0
  117. data/vendor/eigen/Eigen/src/Core/Stride.h +9 -4
  118. data/vendor/eigen/Eigen/src/Core/Swap.h +5 -4
  119. data/vendor/eigen/Eigen/src/Core/Transpose.h +88 -27
  120. data/vendor/eigen/Eigen/src/Core/Transpositions.h +26 -47
  121. data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +93 -75
  122. data/vendor/eigen/Eigen/src/Core/VectorBlock.h +5 -5
  123. data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +159 -70
  124. data/vendor/eigen/Eigen/src/Core/Visitor.h +137 -29
  125. data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +50 -129
  126. data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +126 -337
  127. data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +1092 -155
  128. data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +65 -1
  129. data/vendor/eigen/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
  130. data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +207 -236
  131. data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1482 -495
  132. data/vendor/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  133. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +152 -165
  134. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +19 -251
  135. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
  136. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
  137. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
  138. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +2042 -392
  139. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +235 -80
  140. data/vendor/eigen/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
  141. data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +102 -14
  142. data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
  143. data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
  144. data/vendor/eigen/Eigen/src/Core/arch/Default/Half.h +942 -0
  145. data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +1 -1
  146. data/vendor/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  147. data/vendor/eigen/Eigen/src/Core/arch/{CUDA → GPU}/MathFunctions.h +16 -4
  148. data/vendor/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
  149. data/vendor/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
  150. data/vendor/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  151. data/vendor/eigen/Eigen/src/Core/arch/MSA/Complex.h +648 -0
  152. data/vendor/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  153. data/vendor/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  154. data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +313 -219
  155. data/vendor/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  156. data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +54 -70
  157. data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +4376 -549
  158. data/vendor/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
  159. data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +59 -179
  160. data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +65 -428
  161. data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +893 -283
  162. data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +65 -0
  163. data/vendor/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  164. data/vendor/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  165. data/vendor/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  166. data/vendor/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  167. data/vendor/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  168. data/vendor/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  169. data/vendor/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  170. data/vendor/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  171. data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +212 -183
  172. data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +101 -5
  173. data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +510 -395
  174. data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +11 -2
  175. data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +112 -46
  176. data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +31 -30
  177. data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +32 -2
  178. data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +355 -16
  179. data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +1075 -586
  180. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +49 -24
  181. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +41 -35
  182. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +6 -6
  183. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +4 -2
  184. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +382 -483
  185. data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +22 -5
  186. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +53 -30
  187. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +16 -8
  188. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +8 -6
  189. data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +4 -4
  190. data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +5 -4
  191. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +33 -27
  192. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +14 -12
  193. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +36 -34
  194. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +8 -4
  195. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +13 -10
  196. data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +304 -119
  197. data/vendor/eigen/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
  198. data/vendor/eigen/Eigen/src/Core/util/Constants.h +25 -9
  199. data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +26 -3
  200. data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +29 -9
  201. data/vendor/eigen/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
  202. data/vendor/eigen/Eigen/src/Core/util/IntegralConstant.h +272 -0
  203. data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +8 -1
  204. data/vendor/eigen/Eigen/src/Core/util/Macros.h +709 -246
  205. data/vendor/eigen/Eigen/src/Core/util/Memory.h +222 -52
  206. data/vendor/eigen/Eigen/src/Core/util/Meta.h +355 -77
  207. data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +5 -1
  208. data/vendor/eigen/Eigen/src/Core/util/ReshapedHelper.h +51 -0
  209. data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +8 -5
  210. data/vendor/eigen/Eigen/src/Core/util/SymbolicIndex.h +293 -0
  211. data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +65 -30
  212. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +1 -1
  213. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +7 -4
  214. data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +2 -2
  215. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +1 -1
  216. data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +2 -2
  217. data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +2 -2
  218. data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +9 -6
  219. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +21 -9
  220. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +77 -43
  221. data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +20 -15
  222. data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +99 -5
  223. data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +4 -4
  224. data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +3 -3
  225. data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +15 -11
  226. data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +1 -1
  227. data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +3 -2
  228. data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +39 -2
  229. data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +70 -14
  230. data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +3 -3
  231. data/vendor/eigen/Eigen/src/Geometry/Scaling.h +23 -5
  232. data/vendor/eigen/Eigen/src/Geometry/Transform.h +88 -67
  233. data/vendor/eigen/Eigen/src/Geometry/Translation.h +6 -12
  234. data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +1 -1
  235. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
  236. data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +9 -2
  237. data/vendor/eigen/Eigen/src/Householder/Householder.h +8 -4
  238. data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +123 -48
  239. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +15 -15
  240. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +7 -23
  241. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +5 -22
  242. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +41 -47
  243. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +51 -60
  244. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +70 -20
  245. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +2 -20
  246. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +11 -9
  247. data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +31 -10
  248. data/vendor/eigen/Eigen/src/KLUSupport/KLUSupport.h +358 -0
  249. data/vendor/eigen/Eigen/src/LU/Determinant.h +35 -19
  250. data/vendor/eigen/Eigen/src/LU/FullPivLU.h +29 -43
  251. data/vendor/eigen/Eigen/src/LU/InverseImpl.h +25 -8
  252. data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +71 -58
  253. data/vendor/eigen/Eigen/src/LU/arch/InverseSize4.h +351 -0
  254. data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +7 -17
  255. data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +297 -277
  256. data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +6 -10
  257. data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +1 -1
  258. data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +9 -7
  259. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +41 -20
  260. data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +100 -27
  261. data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +59 -22
  262. data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +48 -23
  263. data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +25 -3
  264. data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +183 -63
  265. data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +22 -14
  266. data/vendor/eigen/Eigen/src/SVD/SVDBase.h +83 -22
  267. data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +3 -3
  268. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +17 -9
  269. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +12 -37
  270. data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +3 -2
  271. data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +16 -0
  272. data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +6 -6
  273. data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +81 -27
  274. data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +25 -57
  275. data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +40 -11
  276. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +11 -15
  277. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +4 -2
  278. data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +30 -8
  279. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +126 -11
  280. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +5 -12
  281. data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +13 -1
  282. data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +7 -7
  283. data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +5 -2
  284. data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +8 -0
  285. data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +1 -1
  286. data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +1 -0
  287. data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +162 -12
  288. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +1 -1
  289. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +76 -2
  290. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +2 -2
  291. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +1 -1
  292. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +1 -1
  293. data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +19 -6
  294. data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +2 -12
  295. data/vendor/eigen/Eigen/src/StlSupport/StdList.h +2 -2
  296. data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +2 -2
  297. data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +6 -8
  298. data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +175 -39
  299. data/vendor/eigen/Eigen/src/misc/lapacke.h +5 -4
  300. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +28 -2
  301. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +155 -11
  302. data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +626 -242
  303. data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +14 -0
  304. data/vendor/eigen/Eigen/src/plugins/IndexedViewMethods.h +262 -0
  305. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +4 -4
  306. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +10 -0
  307. data/vendor/eigen/Eigen/src/plugins/ReshapedMethods.h +149 -0
  308. data/vendor/eigen/README.md +2 -0
  309. data/vendor/eigen/bench/btl/README +1 -1
  310. data/vendor/eigen/bench/tensors/README +6 -7
  311. data/vendor/eigen/ci/README.md +56 -0
  312. data/vendor/eigen/demos/mix_eigen_and_c/README +1 -1
  313. data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +213 -158
  314. data/vendor/eigen/unsupported/README.txt +1 -1
  315. data/vendor/tomotopy/README.kr.rst +21 -0
  316. data/vendor/tomotopy/README.rst +20 -0
  317. data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +2 -2
  318. data/vendor/tomotopy/src/Labeling/Phraser.hpp +1 -1
  319. data/vendor/tomotopy/src/TopicModel/CTModel.hpp +2 -1
  320. data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +2 -1
  321. data/vendor/tomotopy/src/TopicModel/DTModel.hpp +1 -1
  322. data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +2 -2
  323. data/vendor/tomotopy/src/TopicModel/HDP.h +1 -0
  324. data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +53 -2
  325. data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +1 -1
  326. data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +1 -0
  327. data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +2 -2
  328. data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +16 -5
  329. data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +1 -0
  330. data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +1 -0
  331. data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +1 -0
  332. data/vendor/tomotopy/src/TopicModel/PT.h +3 -1
  333. data/vendor/tomotopy/src/TopicModel/PTModel.hpp +31 -1
  334. data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +2 -2
  335. data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +7 -5
  336. data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +36 -1
  337. data/vendor/tomotopy/src/Utils/exception.h +6 -0
  338. data/vendor/tomotopy/src/Utils/sample.hpp +14 -12
  339. data/vendor/tomotopy/src/Utils/sse_gamma.h +0 -3
  340. metadata +60 -14
  341. data/vendor/eigen/Eigen/CMakeLists.txt +0 -19
  342. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +0 -674
  343. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +0 -333
  344. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +0 -1124
  345. data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +0 -212
  346. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +0 -161
  347. data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +0 -338
@@ -201,7 +201,7 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
201
201
 
202
202
  ~Ref() {
203
203
  if(m_hasCopy) {
204
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
204
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
205
205
  obj->~TPlainObjectType();
206
206
  }
207
207
  }
@@ -213,7 +213,7 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
213
213
  {
214
214
  if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed()))
215
215
  {
216
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
216
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
217
217
  ::new (obj) TPlainObjectType(expr);
218
218
  m_hasCopy = true;
219
219
  Base::construct(*obj);
@@ -227,14 +227,14 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
227
227
  template<typename Expression>
228
228
  void construct(const Expression& expr, internal::false_type)
229
229
  {
230
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
230
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
231
231
  ::new (obj) TPlainObjectType(expr);
232
232
  m_hasCopy = true;
233
233
  Base::construct(*obj);
234
234
  }
235
235
 
236
236
  protected:
237
- char m_object_bytes[sizeof(TPlainObjectType)];
237
+ typename internal::aligned_storage<sizeof(TPlainObjectType), EIGEN_ALIGNOF(TPlainObjectType)>::type m_storage;
238
238
  bool m_hasCopy;
239
239
  };
240
240
 
@@ -319,7 +319,7 @@ class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType
319
319
 
320
320
  ~Ref() {
321
321
  if(m_hasCopy) {
322
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
322
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
323
323
  obj->~TPlainObjectType();
324
324
  }
325
325
  }
@@ -335,14 +335,14 @@ class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType
335
335
  template<typename Expression>
336
336
  void construct(const Expression& expr, internal::false_type)
337
337
  {
338
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
338
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
339
339
  ::new (obj) TPlainObjectType(expr);
340
340
  m_hasCopy = true;
341
341
  Base::construct(*obj);
342
342
  }
343
343
 
344
344
  protected:
345
- char m_object_bytes[sizeof(TPlainObjectType)];
345
+ typename internal::aligned_storage<sizeof(TPlainObjectType), EIGEN_ALIGNOF(TPlainObjectType)>::type m_storage;
346
346
  bool m_hasCopy;
347
347
  };
348
348
 
@@ -142,6 +142,9 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
142
142
  return *this = src.twistedBy(pnull);
143
143
  }
144
144
 
145
+ // Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor
146
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView)
147
+
145
148
  template<typename SrcMatrixType,unsigned int SrcMode>
146
149
  SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
147
150
  {
@@ -453,7 +456,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
453
456
  Index r = it.row();
454
457
  Index c = it.col();
455
458
  Index ip = perm ? perm[i] : i;
456
- if(Mode==(Upper|Lower))
459
+ if(Mode==int(Upper|Lower))
457
460
  count[StorageOrderMatch ? jp : ip]++;
458
461
  else if(r==c)
459
462
  count[ip]++;
@@ -486,7 +489,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
486
489
  StorageIndex jp = perm ? perm[j] : j;
487
490
  StorageIndex ip = perm ? perm[i] : i;
488
491
 
489
- if(Mode==(Upper|Lower))
492
+ if(Mode==int(Upper|Lower))
490
493
  {
491
494
  Index k = count[StorageOrderMatch ? jp : ip]++;
492
495
  dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
@@ -140,6 +140,14 @@ struct SparseSelfAdjointShape { static std::string debugName() { return "SparseS
140
140
  template<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type; };
141
141
  template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape type; };
142
142
 
143
+ // return type of SparseCompressedBase::lower_bound;
144
+ struct LowerBoundIndex {
145
+ LowerBoundIndex() : value(-1), found(false) {}
146
+ LowerBoundIndex(Index val, bool ok) : value(val), found(ok) {}
147
+ Index value;
148
+ bool found;
149
+ };
150
+
143
151
  } // end namespace internal
144
152
 
145
153
  /** \ingroup SparseCore_Module
@@ -281,7 +281,7 @@ class SparseVector
281
281
  }
282
282
 
283
283
  /** Swaps the values of \c *this and \a other.
284
- * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
284
+ * Overloaded for performance: this version performs a \em shallow swap by swapping pointers and attributes only.
285
285
  * \sa SparseMatrixBase::swap()
286
286
  */
287
287
  inline void swap(SparseVector& other)
@@ -90,6 +90,7 @@ struct unary_evaluator<SparseView<ArgType>, IteratorBased>
90
90
 
91
91
  class InnerIterator : public EvalIterator
92
92
  {
93
+ protected:
93
94
  typedef typename XprType::Scalar Scalar;
94
95
  public:
95
96
 
@@ -18,6 +18,63 @@ template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename
18
18
  template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
19
19
  template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
20
20
 
21
+ template <bool Conjugate,class SparseLUType>
22
+ class SparseLUTransposeView : public SparseSolverBase<SparseLUTransposeView<Conjugate,SparseLUType> >
23
+ {
24
+ protected:
25
+ typedef SparseSolverBase<SparseLUTransposeView<Conjugate,SparseLUType> > APIBase;
26
+ using APIBase::m_isInitialized;
27
+ public:
28
+ typedef typename SparseLUType::Scalar Scalar;
29
+ typedef typename SparseLUType::StorageIndex StorageIndex;
30
+ typedef typename SparseLUType::MatrixType MatrixType;
31
+ typedef typename SparseLUType::OrderingType OrderingType;
32
+
33
+ enum {
34
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
35
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
36
+ };
37
+
38
+ SparseLUTransposeView() : m_sparseLU(NULL) {}
39
+ SparseLUTransposeView(const SparseLUTransposeView& view) {
40
+ this->m_sparseLU = view.m_sparseLU;
41
+ }
42
+ void setIsInitialized(const bool isInitialized) {this->m_isInitialized = isInitialized;}
43
+ void setSparseLU(SparseLUType* sparseLU) {m_sparseLU = sparseLU;}
44
+ using APIBase::_solve_impl;
45
+ template<typename Rhs, typename Dest>
46
+ bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const
47
+ {
48
+ Dest& X(X_base.derived());
49
+ eigen_assert(m_sparseLU->info() == Success && "The matrix should be factorized first");
50
+ EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
51
+ THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
52
+
53
+
54
+ // this ugly const_cast_derived() helps to detect aliasing when applying the permutations
55
+ for(Index j = 0; j < B.cols(); ++j){
56
+ X.col(j) = m_sparseLU->colsPermutation() * B.const_cast_derived().col(j);
57
+ }
58
+ //Forward substitution with transposed or adjoint of U
59
+ m_sparseLU->matrixU().template solveTransposedInPlace<Conjugate>(X);
60
+
61
+ //Backward substitution with transposed or adjoint of L
62
+ m_sparseLU->matrixL().template solveTransposedInPlace<Conjugate>(X);
63
+
64
+ // Permute back the solution
65
+ for (Index j = 0; j < B.cols(); ++j)
66
+ X.col(j) = m_sparseLU->rowsPermutation().transpose() * X.col(j);
67
+ return true;
68
+ }
69
+ inline Index rows() const { return m_sparseLU->rows(); }
70
+ inline Index cols() const { return m_sparseLU->cols(); }
71
+
72
+ private:
73
+ SparseLUType *m_sparseLU;
74
+ SparseLUTransposeView& operator=(const SparseLUTransposeView&);
75
+ };
76
+
77
+
21
78
  /** \ingroup SparseLU_Module
22
79
  * \class SparseLU
23
80
  *
@@ -26,7 +83,7 @@ template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixURetu
26
83
  * This class implements the supernodal LU factorization for general matrices.
27
84
  * It uses the main techniques from the sequential SuperLU package
28
85
  * (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real
29
- * and complex arithmetics with single and double precision, depending on the
86
+ * and complex arithmetic with single and double precision, depending on the
30
87
  * scalar type of your input matrix.
31
88
  * The code has been optimized to provide BLAS-3 operations during supernode-panel updates.
32
89
  * It benefits directly from the built-in high-performant Eigen BLAS routines.
@@ -43,8 +100,8 @@ template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixURetu
43
100
  * Simple example with key steps
44
101
  * \code
45
102
  * VectorXd x(n), b(n);
46
- * SparseMatrix<double, ColMajor> A;
47
- * SparseLU<SparseMatrix<scalar, ColMajor>, COLAMDOrdering<Index> > solver;
103
+ * SparseMatrix<double> A;
104
+ * SparseLU<SparseMatrix<double>, COLAMDOrdering<int> > solver;
48
105
  * // fill A and b;
49
106
  * // Compute the ordering permutation vector from the structural pattern of A
50
107
  * solver.analyzePattern(A);
@@ -97,6 +154,7 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
97
154
  };
98
155
 
99
156
  public:
157
+
100
158
  SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
101
159
  {
102
160
  initperfvalues();
@@ -128,6 +186,45 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
128
186
  //Factorize
129
187
  factorize(matrix);
130
188
  }
189
+
190
+ /** \returns an expression of the transposed of the factored matrix.
191
+ *
192
+ * A typical usage is to solve for the transposed problem A^T x = b:
193
+ * \code
194
+ * solver.compute(A);
195
+ * x = solver.transpose().solve(b);
196
+ * \endcode
197
+ *
198
+ * \sa adjoint(), solve()
199
+ */
200
+ const SparseLUTransposeView<false,SparseLU<_MatrixType,_OrderingType> > transpose()
201
+ {
202
+ SparseLUTransposeView<false, SparseLU<_MatrixType,_OrderingType> > transposeView;
203
+ transposeView.setSparseLU(this);
204
+ transposeView.setIsInitialized(this->m_isInitialized);
205
+ return transposeView;
206
+ }
207
+
208
+
209
+ /** \returns an expression of the adjoint of the factored matrix
210
+ *
211
+ * A typical usage is to solve for the adjoint problem A' x = b:
212
+ * \code
213
+ * solver.compute(A);
214
+ * x = solver.adjoint().solve(b);
215
+ * \endcode
216
+ *
217
+ * For real scalar types, this function is equivalent to transpose().
218
+ *
219
+ * \sa transpose(), solve()
220
+ */
221
+ const SparseLUTransposeView<true, SparseLU<_MatrixType,_OrderingType> > adjoint()
222
+ {
223
+ SparseLUTransposeView<true, SparseLU<_MatrixType,_OrderingType> > adjointView;
224
+ adjointView.setSparseLU(this);
225
+ adjointView.setIsInitialized(this->m_isInitialized);
226
+ return adjointView;
227
+ }
131
228
 
132
229
  inline Index rows() const { return m_mat.rows(); }
133
230
  inline Index cols() const { return m_mat.cols(); }
@@ -193,7 +290,7 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
193
290
 
194
291
  /** \brief Reports whether previous computation was successful.
195
292
  *
196
- * \returns \c Success if computation was succesful,
293
+ * \returns \c Success if computation was successful,
197
294
  * \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
198
295
  * \c InvalidInput if the input matrix is invalid
199
296
  *
@@ -355,6 +452,9 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
355
452
  return (m_detPermR * m_detPermC) > 0 ? det : -det;
356
453
  }
357
454
 
455
+ Index nnzL() const { return m_nnzL; };
456
+ Index nnzU() const { return m_nnzU; };
457
+
358
458
  protected:
359
459
  // Functions
360
460
  void initperfvalues()
@@ -391,7 +491,6 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
391
491
  private:
392
492
  // Disable copy constructor
393
493
  SparseLU (const SparseLU& );
394
-
395
494
  }; // End class SparseLU
396
495
 
397
496
 
@@ -501,7 +600,6 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
501
600
 
502
601
  m_isInitialized = true;
503
602
 
504
-
505
603
  // Apply the column permutation computed in analyzepattern()
506
604
  // m_mat = matrix * m_perm_c.inverse();
507
605
  m_mat = matrix;
@@ -585,7 +683,6 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
585
683
  // (a) a relaxed supernode at the bottom of the etree, or
586
684
  // (b) panel_size contiguous columns, <panel_size> defined by the user
587
685
  Index jcol;
588
- IndexVector panel_histo(n);
589
686
  Index pivrow; // Pivotal row number in the original row matrix
590
687
  Index nseg1; // Number of segments in U-column above panel row jcol
591
688
  Index nseg; // Number of segments in each U-column
@@ -704,13 +801,19 @@ struct SparseLUMatrixLReturnType : internal::no_assignment_operator
704
801
  typedef typename MappedSupernodalType::Scalar Scalar;
705
802
  explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
706
803
  { }
707
- Index rows() { return m_mapL.rows(); }
708
- Index cols() { return m_mapL.cols(); }
804
+ Index rows() const { return m_mapL.rows(); }
805
+ Index cols() const { return m_mapL.cols(); }
709
806
  template<typename Dest>
710
807
  void solveInPlace( MatrixBase<Dest> &X) const
711
808
  {
712
809
  m_mapL.solveInPlace(X);
713
810
  }
811
+ template<bool Conjugate, typename Dest>
812
+ void solveTransposedInPlace( MatrixBase<Dest> &X) const
813
+ {
814
+ m_mapL.template solveTransposedInPlace<Conjugate>(X);
815
+ }
816
+
714
817
  const MappedSupernodalType& m_mapL;
715
818
  };
716
819
 
@@ -721,8 +824,8 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
721
824
  SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
722
825
  : m_mapL(mapL),m_mapU(mapU)
723
826
  { }
724
- Index rows() { return m_mapL.rows(); }
725
- Index cols() { return m_mapL.cols(); }
827
+ Index rows() const { return m_mapL.rows(); }
828
+ Index cols() const { return m_mapL.cols(); }
726
829
 
727
830
  template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const
728
831
  {
@@ -745,8 +848,9 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
745
848
  }
746
849
  else
747
850
  {
851
+ // FIXME: the following lines should use Block expressions and not Map!
748
852
  Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
749
- Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
853
+ Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X.coeffRef(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
750
854
  U = A.template triangularView<Upper>().solve(U);
751
855
  }
752
856
 
@@ -764,6 +868,52 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
764
868
  }
765
869
  } // End For U-solve
766
870
  }
871
+
872
+ template<bool Conjugate, typename Dest> void solveTransposedInPlace(MatrixBase<Dest> &X) const
873
+ {
874
+ using numext::conj;
875
+ Index nrhs = X.cols();
876
+ Index n = X.rows();
877
+ // Forward solve with U
878
+ for (Index k = 0; k <= m_mapL.nsuper(); k++)
879
+ {
880
+ Index fsupc = m_mapL.supToCol()[k];
881
+ Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
882
+ Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
883
+ Index luptr = m_mapL.colIndexPtr()[fsupc];
884
+
885
+ for (Index j = 0; j < nrhs; ++j)
886
+ {
887
+ for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
888
+ {
889
+ typename MatrixUType::InnerIterator it(m_mapU, jcol);
890
+ for ( ; it; ++it)
891
+ {
892
+ Index irow = it.index();
893
+ X(jcol, j) -= X(irow, j) * (Conjugate? conj(it.value()): it.value());
894
+ }
895
+ }
896
+ }
897
+ if (nsupc == 1)
898
+ {
899
+ for (Index j = 0; j < nrhs; j++)
900
+ {
901
+ X(fsupc, j) /= (Conjugate? conj(m_mapL.valuePtr()[luptr]) : m_mapL.valuePtr()[luptr]);
902
+ }
903
+ }
904
+ else
905
+ {
906
+ Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
907
+ Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
908
+ if(Conjugate)
909
+ U = A.adjoint().template triangularView<Lower>().solve(U);
910
+ else
911
+ U = A.transpose().template triangularView<Lower>().solve(U);
912
+ }
913
+ }// End For U-solve
914
+ }
915
+
916
+
767
917
  const MatrixLType& m_mapL;
768
918
  const MatrixUType& m_mapU;
769
919
  };
@@ -51,7 +51,7 @@ inline Index LUTempSpace(Index&m, Index& w)
51
51
 
52
52
 
53
53
  /**
54
- * Expand the existing storage to accomodate more fill-ins
54
+ * Expand the existing storage to accommodate more fill-ins
55
55
  * \param vec Valid pointer to the vector to allocate or expand
56
56
  * \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
57
57
  * \param[in] nbElts Current number of elements in the factors
@@ -75,12 +75,12 @@ class MappedSuperNodalMatrix
75
75
  /**
76
76
  * Number of rows
77
77
  */
78
- Index rows() { return m_row; }
78
+ Index rows() const { return m_row; }
79
79
 
80
80
  /**
81
81
  * Number of columns
82
82
  */
83
- Index cols() { return m_col; }
83
+ Index cols() const { return m_col; }
84
84
 
85
85
  /**
86
86
  * Return the array of nonzero values packed by column
@@ -156,6 +156,9 @@ class MappedSuperNodalMatrix
156
156
  class InnerIterator;
157
157
  template<typename Dest>
158
158
  void solveInPlace( MatrixBase<Dest>&X) const;
159
+ template<bool Conjugate, typename Dest>
160
+ void solveTransposedInPlace( MatrixBase<Dest>&X) const;
161
+
159
162
 
160
163
 
161
164
 
@@ -294,6 +297,77 @@ void MappedSuperNodalMatrix<Scalar,Index_>::solveInPlace( MatrixBase<Dest>&X) co
294
297
  }
295
298
  }
296
299
 
300
+ template<typename Scalar, typename Index_>
301
+ template<bool Conjugate, typename Dest>
302
+ void MappedSuperNodalMatrix<Scalar,Index_>::solveTransposedInPlace( MatrixBase<Dest>&X) const
303
+ {
304
+ using numext::conj;
305
+ Index n = int(X.rows());
306
+ Index nrhs = Index(X.cols());
307
+ const Scalar * Lval = valuePtr(); // Nonzero values
308
+ Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor> work(n, nrhs); // working vector
309
+ work.setZero();
310
+ for (Index k = nsuper(); k >= 0; k--)
311
+ {
312
+ Index fsupc = supToCol()[k]; // First column of the current supernode
313
+ Index istart = rowIndexPtr()[fsupc]; // Pointer index to the subscript of the current column
314
+ Index nsupr = rowIndexPtr()[fsupc+1] - istart; // Number of rows in the current supernode
315
+ Index nsupc = supToCol()[k+1] - fsupc; // Number of columns in the current supernode
316
+ Index nrow = nsupr - nsupc; // Number of rows in the non-diagonal part of the supernode
317
+ Index irow; //Current index row
318
+
319
+ if (nsupc == 1 )
320
+ {
321
+ for (Index j = 0; j < nrhs; j++)
322
+ {
323
+ InnerIterator it(*this, fsupc);
324
+ ++it; // Skip the diagonal element
325
+ for (; it; ++it)
326
+ {
327
+ irow = it.row();
328
+ X(fsupc,j) -= X(irow, j) * (Conjugate?conj(it.value()):it.value());
329
+ }
330
+ }
331
+ }
332
+ else
333
+ {
334
+ // The supernode has more than one column
335
+ Index luptr = colIndexPtr()[fsupc];
336
+ Index lda = colIndexPtr()[fsupc+1] - luptr;
337
+
338
+ //Begin Gather
339
+ for (Index j = 0; j < nrhs; j++)
340
+ {
341
+ Index iptr = istart + nsupc;
342
+ for (Index i = 0; i < nrow; i++)
343
+ {
344
+ irow = rowIndex()[iptr];
345
+ work.topRows(nrow)(i,j)= X(irow,j); // Gather operation
346
+ iptr++;
347
+ }
348
+ }
349
+
350
+ // Matrix-vector product with transposed submatrix
351
+ Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
352
+ Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
353
+ if(Conjugate)
354
+ U = U - A.adjoint() * work.topRows(nrow);
355
+ else
356
+ U = U - A.transpose() * work.topRows(nrow);
357
+
358
+ // Triangular solve (of transposed diagonal block)
359
+ new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > ( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
360
+ if(Conjugate)
361
+ U = A.adjoint().template triangularView<UnitUpper>().solve(U);
362
+ else
363
+ U = A.transpose().template triangularView<UnitUpper>().solve(U);
364
+
365
+ }
366
+
367
+ }
368
+ }
369
+
370
+
297
371
  } // end namespace internal
298
372
 
299
373
  } // end namespace Eigen
@@ -151,7 +151,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
151
151
  StorageIndex ito = glu.xlsub(fsupc+1);
152
152
  glu.xlsub(jcolm1) = ito;
153
153
  StorageIndex istop = ito + jptr - jm1ptr;
154
- xprune(jcolm1) = istop; // intialize xprune(jcol-1)
154
+ xprune(jcolm1) = istop; // initialize xprune(jcol-1)
155
155
  glu.xlsub(jcol) = istop;
156
156
 
157
157
  for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
@@ -166,7 +166,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
166
166
  // Tidy up the pointers before exit
167
167
  glu.xsup(nsuper+1) = jcolp1;
168
168
  glu.supno(jcolp1) = nsuper;
169
- xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning
169
+ xprune(jcol) = StorageIndex(nextl); // Initialize upper bound for pruning
170
170
  glu.xlsub(jcolp1) = StorageIndex(nextl);
171
171
 
172
172
  return 0;
@@ -215,7 +215,7 @@ void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const
215
215
  if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
216
216
  pstore(C0+i+(I)*PacketSize, c0);
217
217
 
218
- // agressive vectorization and peeling
218
+ // aggressive vectorization and peeling
219
219
  for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
220
220
  {
221
221
  EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
@@ -38,7 +38,7 @@ namespace internal {
38
38
  * \brief Performs numeric block updates (sup-panel) in topological order.
39
39
  *
40
40
  * Before entering this routine, the original nonzeros in the panel
41
- * were already copied i nto the spa[m,w]
41
+ * were already copied into the spa[m,w]
42
42
  *
43
43
  * \param m number of rows in the matrix
44
44
  * \param w Panel size
@@ -41,15 +41,16 @@ namespace internal {
41
41
  /**
42
42
  * \ingroup SparseQR_Module
43
43
  * \class SparseQR
44
- * \brief Sparse left-looking rank-revealing QR factorization
44
+ * \brief Sparse left-looking QR factorization with numerical column pivoting
45
45
  *
46
- * This class implements a left-looking rank-revealing QR decomposition
47
- * of sparse matrices. When a column has a norm less than a given tolerance
46
+ * This class implements a left-looking QR decomposition of sparse matrices
47
+ * with numerical column pivoting.
48
+ * When a column has a norm less than a given tolerance
48
49
  * it is implicitly permuted to the end. The QR factorization thus obtained is
49
50
  * given by A*P = Q*R where R is upper triangular or trapezoidal.
50
51
  *
51
52
  * P is the column permutation which is the product of the fill-reducing and the
52
- * rank-revealing permutations. Use colsPermutation() to get it.
53
+ * numerical permutations. Use colsPermutation() to get it.
53
54
  *
54
55
  * Q is the orthogonal matrix represented as products of Householder reflectors.
55
56
  * Use matrixQ() to get an expression and matrixQ().adjoint() to get the adjoint.
@@ -64,6 +65,17 @@ namespace internal {
64
65
  *
65
66
  * \implsparsesolverconcept
66
67
  *
68
+ * The numerical pivoting strategy and default threshold are the same as in SuiteSparse QR, and
69
+ * detailed in the following paper:
70
+ * <i>
71
+ * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
72
+ * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011.
73
+ * </i>
74
+ * Even though it is qualified as "rank-revealing", this strategy might fail for some
75
+ * rank deficient problems. When this class is used to solve linear or least-square problems
76
+ * it is thus strongly recommended to check the accuracy of the computed solution. If it
77
+ * failed, it usually helps to increase the threshold with setPivotThreshold.
78
+ *
67
79
  * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
68
80
  * \warning For complex matrices matrixQ().transpose() will actually return the adjoint matrix.
69
81
  *
@@ -331,7 +343,7 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
331
343
  m_R.resize(m, n);
332
344
  m_Q.resize(m, diagSize);
333
345
 
334
- // Allocate space for nonzero elements : rough estimation
346
+ // Allocate space for nonzero elements: rough estimation
335
347
  m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
336
348
  m_Q.reserve(2*mat.nonZeros());
337
349
  m_hcoeffs.resize(diagSize);
@@ -640,7 +652,8 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
640
652
  // Compute res = Q * other column by column
641
653
  for(Index j = 0; j < res.cols(); j++)
642
654
  {
643
- for (Index k = diagSize-1; k >=0; k--)
655
+ Index start_k = internal::is_identity<Derived>::value ? numext::mini(j,diagSize-1) : diagSize-1;
656
+ for (Index k = start_k; k >=0; k--)
644
657
  {
645
658
  Scalar tau = Scalar(0);
646
659
  tau = m_qr.m_Q.col(k).dot(res.col(j));
@@ -36,7 +36,7 @@ namespace std \
36
36
  deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \
37
37
  deque(const deque& c) : deque_base(c) {} \
38
38
  explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
39
- deque(iterator start, iterator end) : deque_base(start, end) {} \
39
+ deque(iterator start_, iterator end_) : deque_base(start_, end_) {} \
40
40
  deque& operator=(const deque& x) { \
41
41
  deque_base::operator=(x); \
42
42
  return *this; \
@@ -62,7 +62,7 @@ namespace std {
62
62
  : deque_base(first, last, a) {} \
63
63
  deque(const deque& c) : deque_base(c) {} \
64
64
  explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
65
- deque(iterator start, iterator end) : deque_base(start, end) {} \
65
+ deque(iterator start_, iterator end_) : deque_base(start_, end_) {} \
66
66
  deque& operator=(const deque& x) { \
67
67
  deque_base::operator=(x); \
68
68
  return *this; \
@@ -98,17 +98,7 @@ namespace std {
98
98
  { return deque_base::insert(position,x); }
99
99
  void insert(const_iterator position, size_type new_size, const value_type& x)
100
100
  { deque_base::insert(position, new_size, x); }
101
- #elif defined(_GLIBCXX_DEQUE) && EIGEN_GNUC_AT_LEAST(4,2)
102
- // workaround GCC std::deque implementation
103
- void resize(size_type new_size, const value_type& x)
104
- {
105
- if (new_size < deque_base::size())
106
- deque_base::_M_erase_at_end(this->_M_impl._M_start + new_size);
107
- else
108
- deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);
109
- }
110
101
  #else
111
- // either GCC 4.1 or non-GCC
112
102
  // default implementation which should always work.
113
103
  void resize(size_type new_size, const value_type& x)
114
104
  {
@@ -35,7 +35,7 @@ namespace std \
35
35
  list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \
36
36
  list(const list& c) : list_base(c) {} \
37
37
  explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
38
- list(iterator start, iterator end) : list_base(start, end) {} \
38
+ list(iterator start_, iterator end_) : list_base(start_, end_) {} \
39
39
  list& operator=(const list& x) { \
40
40
  list_base::operator=(x); \
41
41
  return *this; \
@@ -62,7 +62,7 @@ namespace std
62
62
  : list_base(first, last, a) {} \
63
63
  list(const list& c) : list_base(c) {} \
64
64
  explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
65
- list(iterator start, iterator end) : list_base(start, end) {} \
65
+ list(iterator start_, iterator end_) : list_base(start_, end_) {} \
66
66
  list& operator=(const list& x) { \
67
67
  list_base::operator=(x); \
68
68
  return *this; \