tomoto 0.2.2 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (369) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +10 -0
  3. data/README.md +8 -10
  4. data/ext/tomoto/ct.cpp +11 -11
  5. data/ext/tomoto/dmr.cpp +14 -13
  6. data/ext/tomoto/dt.cpp +14 -14
  7. data/ext/tomoto/extconf.rb +7 -5
  8. data/ext/tomoto/gdmr.cpp +7 -7
  9. data/ext/tomoto/hdp.cpp +9 -9
  10. data/ext/tomoto/hlda.cpp +13 -13
  11. data/ext/tomoto/hpa.cpp +5 -5
  12. data/ext/tomoto/lda.cpp +42 -39
  13. data/ext/tomoto/llda.cpp +6 -6
  14. data/ext/tomoto/mglda.cpp +15 -15
  15. data/ext/tomoto/pa.cpp +6 -6
  16. data/ext/tomoto/plda.cpp +6 -6
  17. data/ext/tomoto/slda.cpp +8 -8
  18. data/ext/tomoto/{ext.cpp → tomoto.cpp} +8 -8
  19. data/ext/tomoto/utils.h +16 -70
  20. data/lib/tomoto/version.rb +1 -1
  21. data/lib/tomoto.rb +5 -1
  22. data/vendor/EigenRand/EigenRand/Core.h +10 -10
  23. data/vendor/EigenRand/EigenRand/Dists/Basic.h +208 -9
  24. data/vendor/EigenRand/EigenRand/Dists/Discrete.h +52 -31
  25. data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +9 -8
  26. data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +28 -21
  27. data/vendor/EigenRand/EigenRand/EigenRand +11 -6
  28. data/vendor/EigenRand/EigenRand/Macro.h +13 -7
  29. data/vendor/EigenRand/EigenRand/MorePacketMath.h +348 -740
  30. data/vendor/EigenRand/EigenRand/MvDists/Multinomial.h +5 -3
  31. data/vendor/EigenRand/EigenRand/MvDists/MvNormal.h +9 -3
  32. data/vendor/EigenRand/EigenRand/PacketFilter.h +11 -253
  33. data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +21 -47
  34. data/vendor/EigenRand/EigenRand/RandUtils.h +50 -344
  35. data/vendor/EigenRand/EigenRand/arch/AVX/MorePacketMath.h +619 -0
  36. data/vendor/EigenRand/EigenRand/arch/AVX/PacketFilter.h +149 -0
  37. data/vendor/EigenRand/EigenRand/arch/AVX/RandUtils.h +228 -0
  38. data/vendor/EigenRand/EigenRand/arch/NEON/MorePacketMath.h +473 -0
  39. data/vendor/EigenRand/EigenRand/arch/NEON/PacketFilter.h +142 -0
  40. data/vendor/EigenRand/EigenRand/arch/NEON/RandUtils.h +126 -0
  41. data/vendor/EigenRand/EigenRand/arch/SSE/MorePacketMath.h +501 -0
  42. data/vendor/EigenRand/EigenRand/arch/SSE/PacketFilter.h +133 -0
  43. data/vendor/EigenRand/EigenRand/arch/SSE/RandUtils.h +120 -0
  44. data/vendor/EigenRand/EigenRand/doc.h +24 -12
  45. data/vendor/EigenRand/README.md +57 -4
  46. data/vendor/eigen/COPYING.APACHE +203 -0
  47. data/vendor/eigen/COPYING.BSD +1 -1
  48. data/vendor/eigen/COPYING.MINPACK +51 -52
  49. data/vendor/eigen/Eigen/Cholesky +0 -1
  50. data/vendor/eigen/Eigen/Core +112 -265
  51. data/vendor/eigen/Eigen/Eigenvalues +2 -3
  52. data/vendor/eigen/Eigen/Geometry +5 -8
  53. data/vendor/eigen/Eigen/Householder +0 -1
  54. data/vendor/eigen/Eigen/Jacobi +0 -1
  55. data/vendor/eigen/Eigen/KLUSupport +41 -0
  56. data/vendor/eigen/Eigen/LU +2 -5
  57. data/vendor/eigen/Eigen/OrderingMethods +0 -3
  58. data/vendor/eigen/Eigen/PaStiXSupport +1 -0
  59. data/vendor/eigen/Eigen/PardisoSupport +0 -0
  60. data/vendor/eigen/Eigen/QR +2 -3
  61. data/vendor/eigen/Eigen/QtAlignedMalloc +0 -1
  62. data/vendor/eigen/Eigen/SVD +0 -1
  63. data/vendor/eigen/Eigen/Sparse +0 -2
  64. data/vendor/eigen/Eigen/SparseCholesky +0 -8
  65. data/vendor/eigen/Eigen/SparseLU +4 -0
  66. data/vendor/eigen/Eigen/SparseQR +0 -1
  67. data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +42 -27
  68. data/vendor/eigen/Eigen/src/Cholesky/LLT.h +39 -23
  69. data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +90 -47
  70. data/vendor/eigen/Eigen/src/Core/ArithmeticSequence.h +413 -0
  71. data/vendor/eigen/Eigen/src/Core/Array.h +99 -11
  72. data/vendor/eigen/Eigen/src/Core/ArrayBase.h +3 -3
  73. data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +21 -21
  74. data/vendor/eigen/Eigen/src/Core/Assign.h +1 -1
  75. data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +125 -50
  76. data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +10 -10
  77. data/vendor/eigen/Eigen/src/Core/BandMatrix.h +16 -16
  78. data/vendor/eigen/Eigen/src/Core/Block.h +56 -60
  79. data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +29 -31
  80. data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +7 -3
  81. data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +325 -272
  82. data/vendor/eigen/Eigen/src/Core/CoreIterators.h +5 -0
  83. data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +21 -22
  84. data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +153 -18
  85. data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +6 -6
  86. data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +14 -10
  87. data/vendor/eigen/Eigen/src/Core/DenseBase.h +132 -42
  88. data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +25 -21
  89. data/vendor/eigen/Eigen/src/Core/DenseStorage.h +153 -71
  90. data/vendor/eigen/Eigen/src/Core/Diagonal.h +21 -23
  91. data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +50 -2
  92. data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +1 -1
  93. data/vendor/eigen/Eigen/src/Core/Dot.h +10 -10
  94. data/vendor/eigen/Eigen/src/Core/EigenBase.h +10 -9
  95. data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +8 -4
  96. data/vendor/eigen/Eigen/src/Core/Fuzzy.h +3 -3
  97. data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +20 -10
  98. data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +599 -152
  99. data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +40 -33
  100. data/vendor/eigen/Eigen/src/Core/IO.h +40 -7
  101. data/vendor/eigen/Eigen/src/Core/IndexedView.h +237 -0
  102. data/vendor/eigen/Eigen/src/Core/Inverse.h +9 -10
  103. data/vendor/eigen/Eigen/src/Core/Map.h +7 -7
  104. data/vendor/eigen/Eigen/src/Core/MapBase.h +10 -3
  105. data/vendor/eigen/Eigen/src/Core/MathFunctions.h +767 -125
  106. data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +118 -19
  107. data/vendor/eigen/Eigen/src/Core/Matrix.h +131 -25
  108. data/vendor/eigen/Eigen/src/Core/MatrixBase.h +21 -3
  109. data/vendor/eigen/Eigen/src/Core/NestByValue.h +25 -50
  110. data/vendor/eigen/Eigen/src/Core/NoAlias.h +4 -3
  111. data/vendor/eigen/Eigen/src/Core/NumTraits.h +107 -20
  112. data/vendor/eigen/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
  113. data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +3 -31
  114. data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +152 -59
  115. data/vendor/eigen/Eigen/src/Core/Product.h +30 -25
  116. data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +192 -125
  117. data/vendor/eigen/Eigen/src/Core/Random.h +37 -1
  118. data/vendor/eigen/Eigen/src/Core/Redux.h +180 -170
  119. data/vendor/eigen/Eigen/src/Core/Ref.h +121 -23
  120. data/vendor/eigen/Eigen/src/Core/Replicate.h +8 -8
  121. data/vendor/eigen/Eigen/src/Core/Reshaped.h +454 -0
  122. data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +7 -5
  123. data/vendor/eigen/Eigen/src/Core/Reverse.h +18 -12
  124. data/vendor/eigen/Eigen/src/Core/Select.h +8 -6
  125. data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +33 -20
  126. data/vendor/eigen/Eigen/src/Core/Solve.h +14 -14
  127. data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +16 -16
  128. data/vendor/eigen/Eigen/src/Core/SolverBase.h +41 -3
  129. data/vendor/eigen/Eigen/src/Core/StableNorm.h +100 -70
  130. data/vendor/eigen/Eigen/src/Core/StlIterators.h +463 -0
  131. data/vendor/eigen/Eigen/src/Core/Stride.h +9 -4
  132. data/vendor/eigen/Eigen/src/Core/Swap.h +5 -4
  133. data/vendor/eigen/Eigen/src/Core/Transpose.h +88 -27
  134. data/vendor/eigen/Eigen/src/Core/Transpositions.h +26 -47
  135. data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +93 -75
  136. data/vendor/eigen/Eigen/src/Core/VectorBlock.h +5 -5
  137. data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +159 -70
  138. data/vendor/eigen/Eigen/src/Core/Visitor.h +137 -29
  139. data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +50 -129
  140. data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +126 -337
  141. data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +1092 -155
  142. data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +65 -1
  143. data/vendor/eigen/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
  144. data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +207 -236
  145. data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1482 -495
  146. data/vendor/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  147. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +152 -165
  148. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +19 -251
  149. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
  150. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
  151. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
  152. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +2042 -392
  153. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +235 -80
  154. data/vendor/eigen/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
  155. data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +102 -14
  156. data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
  157. data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
  158. data/vendor/eigen/Eigen/src/Core/arch/Default/Half.h +942 -0
  159. data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +1 -1
  160. data/vendor/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  161. data/vendor/eigen/Eigen/src/Core/arch/{CUDA → GPU}/MathFunctions.h +16 -4
  162. data/vendor/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
  163. data/vendor/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
  164. data/vendor/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  165. data/vendor/eigen/Eigen/src/Core/arch/MSA/Complex.h +648 -0
  166. data/vendor/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  167. data/vendor/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  168. data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +313 -219
  169. data/vendor/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  170. data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +54 -70
  171. data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +4376 -549
  172. data/vendor/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
  173. data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +59 -179
  174. data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +65 -428
  175. data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +893 -283
  176. data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +65 -0
  177. data/vendor/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  178. data/vendor/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  179. data/vendor/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  180. data/vendor/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  181. data/vendor/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  182. data/vendor/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  183. data/vendor/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  184. data/vendor/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  185. data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +212 -183
  186. data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +101 -5
  187. data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +510 -395
  188. data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +11 -2
  189. data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +112 -46
  190. data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +31 -30
  191. data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +32 -2
  192. data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +355 -16
  193. data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +1075 -586
  194. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +49 -24
  195. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +41 -35
  196. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +6 -6
  197. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +4 -2
  198. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +382 -483
  199. data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +22 -5
  200. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +53 -30
  201. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +16 -8
  202. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +8 -6
  203. data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +4 -4
  204. data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +5 -4
  205. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +33 -27
  206. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +14 -12
  207. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +36 -34
  208. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +8 -4
  209. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +13 -10
  210. data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +304 -119
  211. data/vendor/eigen/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
  212. data/vendor/eigen/Eigen/src/Core/util/Constants.h +25 -9
  213. data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +26 -3
  214. data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +29 -9
  215. data/vendor/eigen/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
  216. data/vendor/eigen/Eigen/src/Core/util/IntegralConstant.h +272 -0
  217. data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +8 -1
  218. data/vendor/eigen/Eigen/src/Core/util/Macros.h +709 -246
  219. data/vendor/eigen/Eigen/src/Core/util/Memory.h +222 -52
  220. data/vendor/eigen/Eigen/src/Core/util/Meta.h +355 -77
  221. data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +5 -1
  222. data/vendor/eigen/Eigen/src/Core/util/ReshapedHelper.h +51 -0
  223. data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +8 -5
  224. data/vendor/eigen/Eigen/src/Core/util/SymbolicIndex.h +293 -0
  225. data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +65 -30
  226. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +1 -1
  227. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +7 -4
  228. data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +2 -2
  229. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +1 -1
  230. data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +2 -2
  231. data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +2 -2
  232. data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +9 -6
  233. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +21 -9
  234. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +77 -43
  235. data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +20 -15
  236. data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +99 -5
  237. data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +4 -4
  238. data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +3 -3
  239. data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +15 -11
  240. data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +1 -1
  241. data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +3 -2
  242. data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +39 -2
  243. data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +70 -14
  244. data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +3 -3
  245. data/vendor/eigen/Eigen/src/Geometry/Scaling.h +23 -5
  246. data/vendor/eigen/Eigen/src/Geometry/Transform.h +88 -67
  247. data/vendor/eigen/Eigen/src/Geometry/Translation.h +6 -12
  248. data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +1 -1
  249. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
  250. data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +9 -2
  251. data/vendor/eigen/Eigen/src/Householder/Householder.h +8 -4
  252. data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +123 -48
  253. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +15 -15
  254. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +7 -23
  255. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +5 -22
  256. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +41 -47
  257. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +51 -60
  258. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +70 -20
  259. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +2 -20
  260. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +11 -9
  261. data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +31 -10
  262. data/vendor/eigen/Eigen/src/KLUSupport/KLUSupport.h +358 -0
  263. data/vendor/eigen/Eigen/src/LU/Determinant.h +35 -19
  264. data/vendor/eigen/Eigen/src/LU/FullPivLU.h +29 -43
  265. data/vendor/eigen/Eigen/src/LU/InverseImpl.h +25 -8
  266. data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +71 -58
  267. data/vendor/eigen/Eigen/src/LU/arch/InverseSize4.h +351 -0
  268. data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +7 -17
  269. data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +297 -277
  270. data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +6 -10
  271. data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +1 -1
  272. data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +9 -7
  273. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +41 -20
  274. data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +100 -27
  275. data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +59 -22
  276. data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +48 -23
  277. data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +25 -3
  278. data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +183 -63
  279. data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +22 -14
  280. data/vendor/eigen/Eigen/src/SVD/SVDBase.h +83 -22
  281. data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +3 -3
  282. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +17 -9
  283. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +12 -37
  284. data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +3 -2
  285. data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +16 -0
  286. data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +6 -6
  287. data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +81 -27
  288. data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +25 -57
  289. data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +40 -11
  290. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +11 -15
  291. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +4 -2
  292. data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +30 -8
  293. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +126 -11
  294. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +5 -12
  295. data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +13 -1
  296. data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +7 -7
  297. data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +5 -2
  298. data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +8 -0
  299. data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +1 -1
  300. data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +1 -0
  301. data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +162 -12
  302. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +1 -1
  303. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +76 -2
  304. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +2 -2
  305. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +1 -1
  306. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +1 -1
  307. data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +19 -6
  308. data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +2 -12
  309. data/vendor/eigen/Eigen/src/StlSupport/StdList.h +2 -2
  310. data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +2 -2
  311. data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +6 -8
  312. data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +175 -39
  313. data/vendor/eigen/Eigen/src/misc/lapacke.h +5 -4
  314. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +28 -2
  315. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +155 -11
  316. data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +626 -242
  317. data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +14 -0
  318. data/vendor/eigen/Eigen/src/plugins/IndexedViewMethods.h +262 -0
  319. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +4 -4
  320. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +10 -0
  321. data/vendor/eigen/Eigen/src/plugins/ReshapedMethods.h +149 -0
  322. data/vendor/eigen/README.md +2 -0
  323. data/vendor/eigen/bench/btl/README +1 -1
  324. data/vendor/eigen/bench/tensors/README +6 -7
  325. data/vendor/eigen/ci/README.md +56 -0
  326. data/vendor/eigen/demos/mix_eigen_and_c/README +1 -1
  327. data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +213 -158
  328. data/vendor/eigen/unsupported/README.txt +1 -1
  329. data/vendor/tomotopy/README.kr.rst +78 -0
  330. data/vendor/tomotopy/README.rst +75 -0
  331. data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +2 -2
  332. data/vendor/tomotopy/src/Labeling/Phraser.hpp +4 -4
  333. data/vendor/tomotopy/src/TopicModel/CTModel.hpp +7 -3
  334. data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +7 -3
  335. data/vendor/tomotopy/src/TopicModel/DTModel.hpp +6 -3
  336. data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +2 -2
  337. data/vendor/tomotopy/src/TopicModel/HDP.h +1 -0
  338. data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +57 -6
  339. data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +6 -3
  340. data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +3 -2
  341. data/vendor/tomotopy/src/TopicModel/LDA.h +3 -3
  342. data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +5 -5
  343. data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +50 -19
  344. data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +6 -2
  345. data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +3 -2
  346. data/vendor/tomotopy/src/TopicModel/PAModel.hpp +1 -1
  347. data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +6 -2
  348. data/vendor/tomotopy/src/TopicModel/PT.h +3 -1
  349. data/vendor/tomotopy/src/TopicModel/PTModel.hpp +36 -3
  350. data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +6 -3
  351. data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +55 -26
  352. data/vendor/tomotopy/src/Utils/AliasMethod.hpp +5 -4
  353. data/vendor/tomotopy/src/Utils/Dictionary.h +2 -2
  354. data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +36 -1
  355. data/vendor/tomotopy/src/Utils/MultiNormalDistribution.hpp +1 -1
  356. data/vendor/tomotopy/src/Utils/TruncMultiNormal.hpp +1 -1
  357. data/vendor/tomotopy/src/Utils/exception.h +6 -0
  358. data/vendor/tomotopy/src/Utils/math.h +2 -2
  359. data/vendor/tomotopy/src/Utils/sample.hpp +14 -12
  360. data/vendor/tomotopy/src/Utils/serializer.hpp +30 -5
  361. data/vendor/tomotopy/src/Utils/sse_gamma.h +0 -3
  362. metadata +64 -18
  363. data/vendor/eigen/Eigen/CMakeLists.txt +0 -19
  364. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +0 -674
  365. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +0 -333
  366. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +0 -1124
  367. data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +0 -212
  368. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +0 -161
  369. data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +0 -338
@@ -17,6 +17,18 @@
17
17
  #define EIGEN_SVDBASE_H
18
18
 
19
19
  namespace Eigen {
20
+
21
+ namespace internal {
22
+ template<typename Derived> struct traits<SVDBase<Derived> >
23
+ : traits<Derived>
24
+ {
25
+ typedef MatrixXpr XprKind;
26
+ typedef SolverStorage StorageKind;
27
+ typedef int StorageIndex;
28
+ enum { Flags = 0 };
29
+ };
30
+ }
31
+
20
32
  /** \ingroup SVD_Module
21
33
  *
22
34
  *
@@ -39,20 +51,26 @@ namespace Eigen {
39
51
  * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
40
52
  * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
41
53
  * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
54
+ *
55
+ * The status of the computation can be retrived using the \a info() method. Unless \a info() returns \a Success, the results should be not
56
+ * considered well defined.
42
57
  *
43
- * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
58
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, and \a info() will return \a InvalidInput, but the computation is guaranteed to
44
59
  * terminate in finite (and reasonable) time.
45
60
  * \sa class BDCSVD, class JacobiSVD
46
61
  */
47
- template<typename Derived>
48
- class SVDBase
62
+ template<typename Derived> class SVDBase
63
+ : public SolverBase<SVDBase<Derived> >
49
64
  {
65
+ public:
66
+
67
+ template<typename Derived_>
68
+ friend struct internal::solve_assertion;
50
69
 
51
- public:
52
70
  typedef typename internal::traits<Derived>::MatrixType MatrixType;
53
71
  typedef typename MatrixType::Scalar Scalar;
54
72
  typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
55
- typedef typename MatrixType::StorageIndex StorageIndex;
73
+ typedef typename Eigen::internal::traits<SVDBase>::StorageIndex StorageIndex;
56
74
  typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
57
75
  enum {
58
76
  RowsAtCompileTime = MatrixType::RowsAtCompileTime,
@@ -82,7 +100,7 @@ public:
82
100
  */
83
101
  const MatrixUType& matrixU() const
84
102
  {
85
- eigen_assert(m_isInitialized && "SVD is not initialized.");
103
+ _check_compute_assertions();
86
104
  eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
87
105
  return m_matrixU;
88
106
  }
@@ -98,7 +116,7 @@ public:
98
116
  */
99
117
  const MatrixVType& matrixV() const
100
118
  {
101
- eigen_assert(m_isInitialized && "SVD is not initialized.");
119
+ _check_compute_assertions();
102
120
  eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
103
121
  return m_matrixV;
104
122
  }
@@ -110,14 +128,14 @@ public:
110
128
  */
111
129
  const SingularValuesType& singularValues() const
112
130
  {
113
- eigen_assert(m_isInitialized && "SVD is not initialized.");
131
+ _check_compute_assertions();
114
132
  return m_singularValues;
115
133
  }
116
134
 
117
135
  /** \returns the number of singular values that are not exactly 0 */
118
136
  Index nonzeroSingularValues() const
119
137
  {
120
- eigen_assert(m_isInitialized && "SVD is not initialized.");
138
+ _check_compute_assertions();
121
139
  return m_nonzeroSingularValues;
122
140
  }
123
141
 
@@ -130,7 +148,7 @@ public:
130
148
  inline Index rank() const
131
149
  {
132
150
  using std::abs;
133
- eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
151
+ _check_compute_assertions();
134
152
  if(m_singularValues.size()==0) return 0;
135
153
  RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)());
136
154
  Index i = m_nonzeroSingularValues-1;
@@ -183,7 +201,7 @@ public:
183
201
  // this temporary is needed to workaround a MSVC issue
184
202
  Index diagSize = (std::max<Index>)(1,m_diagSize);
185
203
  return m_usePrescribedThreshold ? m_prescribedThreshold
186
- : diagSize*NumTraits<Scalar>::epsilon();
204
+ : RealScalar(diagSize)*NumTraits<Scalar>::epsilon();
187
205
  }
188
206
 
189
207
  /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
@@ -194,6 +212,7 @@ public:
194
212
  inline Index rows() const { return m_rows; }
195
213
  inline Index cols() const { return m_cols; }
196
214
 
215
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
197
216
  /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
198
217
  *
199
218
  * \param b the right-hand-side of the equation to solve.
@@ -205,32 +224,55 @@ public:
205
224
  */
206
225
  template<typename Rhs>
207
226
  inline const Solve<Derived, Rhs>
208
- solve(const MatrixBase<Rhs>& b) const
227
+ solve(const MatrixBase<Rhs>& b) const;
228
+ #endif
229
+
230
+
231
+ /** \brief Reports whether previous computation was successful.
232
+ *
233
+ * \returns \c Success if computation was successful.
234
+ */
235
+ EIGEN_DEVICE_FUNC
236
+ ComputationInfo info() const
209
237
  {
210
238
  eigen_assert(m_isInitialized && "SVD is not initialized.");
211
- eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
212
- return Solve<Derived, Rhs>(derived(), b.derived());
239
+ return m_info;
213
240
  }
214
-
241
+
215
242
  #ifndef EIGEN_PARSED_BY_DOXYGEN
216
243
  template<typename RhsType, typename DstType>
217
- EIGEN_DEVICE_FUNC
218
244
  void _solve_impl(const RhsType &rhs, DstType &dst) const;
245
+
246
+ template<bool Conjugate, typename RhsType, typename DstType>
247
+ void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
219
248
  #endif
220
249
 
221
250
  protected:
222
-
251
+
223
252
  static void check_template_parameters()
224
253
  {
225
254
  EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
226
255
  }
227
-
256
+
257
+ void _check_compute_assertions() const {
258
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
259
+ }
260
+
261
+ template<bool Transpose_, typename Rhs>
262
+ void _check_solve_assertion(const Rhs& b) const {
263
+ EIGEN_ONLY_USED_FOR_DEBUG(b);
264
+ _check_compute_assertions();
265
+ eigen_assert(computeU() && computeV() && "SVDBase::solve(): Both unitaries U and V are required to be computed (thin unitaries suffice).");
266
+ eigen_assert((Transpose_?cols():rows())==b.rows() && "SVDBase::solve(): invalid number of rows of the right hand side matrix b");
267
+ }
268
+
228
269
  // return true if already allocated
229
270
  bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
230
271
 
231
272
  MatrixUType m_matrixU;
232
273
  MatrixVType m_matrixV;
233
274
  SingularValuesType m_singularValues;
275
+ ComputationInfo m_info;
234
276
  bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
235
277
  bool m_computeFullU, m_computeThinU;
236
278
  bool m_computeFullV, m_computeThinV;
@@ -243,9 +285,14 @@ protected:
243
285
  * Default constructor of SVDBase
244
286
  */
245
287
  SVDBase()
246
- : m_isInitialized(false),
288
+ : m_info(Success),
289
+ m_isInitialized(false),
247
290
  m_isAllocated(false),
248
291
  m_usePrescribedThreshold(false),
292
+ m_computeFullU(false),
293
+ m_computeThinU(false),
294
+ m_computeFullV(false),
295
+ m_computeThinV(false),
249
296
  m_computationOptions(0),
250
297
  m_rows(-1), m_cols(-1), m_diagSize(0)
251
298
  {
@@ -260,17 +307,30 @@ template<typename Derived>
260
307
  template<typename RhsType, typename DstType>
261
308
  void SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const
262
309
  {
263
- eigen_assert(rhs.rows() == rows());
264
-
265
310
  // A = U S V^*
266
311
  // So A^{-1} = V S^{-1} U^*
267
312
 
268
- Matrix<Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
313
+ Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
269
314
  Index l_rank = rank();
270
315
  tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs;
271
316
  tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
272
317
  dst = m_matrixV.leftCols(l_rank) * tmp;
273
318
  }
319
+
320
+ template<typename Derived>
321
+ template<bool Conjugate, typename RhsType, typename DstType>
322
+ void SVDBase<Derived>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
323
+ {
324
+ // A = U S V^*
325
+ // So A^{-*} = U S^{-1} V^*
326
+ // And A^{-T} = U_conj S^{-1} V^T
327
+ Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
328
+ Index l_rank = rank();
329
+
330
+ tmp.noalias() = m_matrixV.leftCols(l_rank).transpose().template conjugateIf<Conjugate>() * rhs;
331
+ tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
332
+ dst = m_matrixU.template conjugateIf<!Conjugate>().leftCols(l_rank) * tmp;
333
+ }
274
334
  #endif
275
335
 
276
336
  template<typename MatrixType>
@@ -288,6 +348,7 @@ bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computat
288
348
 
289
349
  m_rows = rows;
290
350
  m_cols = cols;
351
+ m_info = Success;
291
352
  m_isInitialized = false;
292
353
  m_isAllocated = true;
293
354
  m_computationOptions = computationOptions;
@@ -127,7 +127,7 @@ void upperbidiagonalization_inplace_unblocked(MatrixType& mat,
127
127
  .makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
128
128
  // apply householder transform to remaining part of mat on the left
129
129
  mat.bottomRightCorner(remainingRows-1, remainingCols)
130
- .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);
130
+ .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).adjoint(), mat.coeff(k,k+1), tempData);
131
131
  }
132
132
  }
133
133
 
@@ -202,7 +202,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
202
202
  {
203
203
  SubColumnType y_k( Y.col(k).tail(remainingCols) );
204
204
 
205
- // let's use the begining of column k of Y as a temporary vector
205
+ // let's use the beginning of column k of Y as a temporary vector
206
206
  SubColumnType tmp( Y.col(k).head(k) );
207
207
  y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
208
208
  tmp.noalias() = V_k1.adjoint() * v_k;
@@ -231,7 +231,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
231
231
  {
232
232
  SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
233
233
 
234
- // let's use the begining of column k of X as a temporary vectors
234
+ // let's use the beginning of column k of X as a temporary vectors
235
235
  // note that tmp0 and tmp1 overlaps
236
236
  SubColumnType tmp0 ( X.col(k).head(k) ),
237
237
  tmp1 ( X.col(k).head(k+1) );
@@ -80,11 +80,19 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
80
80
 
81
81
  /** Default constructor */
82
82
  SimplicialCholeskyBase()
83
- : m_info(Success), m_shiftOffset(0), m_shiftScale(1)
83
+ : m_info(Success),
84
+ m_factorizationIsOk(false),
85
+ m_analysisIsOk(false),
86
+ m_shiftOffset(0),
87
+ m_shiftScale(1)
84
88
  {}
85
89
 
86
90
  explicit SimplicialCholeskyBase(const MatrixType& matrix)
87
- : m_info(Success), m_shiftOffset(0), m_shiftScale(1)
91
+ : m_info(Success),
92
+ m_factorizationIsOk(false),
93
+ m_analysisIsOk(false),
94
+ m_shiftOffset(0),
95
+ m_shiftScale(1)
88
96
  {
89
97
  derived().compute(matrix);
90
98
  }
@@ -101,7 +109,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
101
109
 
102
110
  /** \brief Reports whether previous computation was successful.
103
111
  *
104
- * \returns \c Success if computation was succesful,
112
+ * \returns \c Success if computation was successful,
105
113
  * \c NumericalIssue if the matrix.appears to be negative.
106
114
  */
107
115
  ComputationInfo info() const
@@ -210,7 +218,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
210
218
  CholMatrixType tmp(size,size);
211
219
  ConstCholMatrixPtr pmat;
212
220
 
213
- if(m_P.size()==0 && (UpLo&Upper)==Upper)
221
+ if(m_P.size() == 0 && (int(UpLo) & int(Upper)) == Upper)
214
222
  {
215
223
  // If there is no ordering, try to directly use the input matrix without any copy
216
224
  internal::simplicial_cholesky_grab_input<CholMatrixType,MatrixType>::run(a, pmat, tmp);
@@ -279,8 +287,8 @@ template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<Simp
279
287
  typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType;
280
288
  typedef TriangularView<const CholMatrixType, Eigen::Lower> MatrixL;
281
289
  typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::Upper> MatrixU;
282
- static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
283
- static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
290
+ static inline MatrixL getL(const CholMatrixType& m) { return MatrixL(m); }
291
+ static inline MatrixU getU(const CholMatrixType& m) { return MatrixU(m.adjoint()); }
284
292
  };
285
293
 
286
294
  template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
@@ -293,8 +301,8 @@ template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<Simpl
293
301
  typedef SparseMatrix<Scalar, ColMajor, StorageIndex> CholMatrixType;
294
302
  typedef TriangularView<const CholMatrixType, Eigen::UnitLower> MatrixL;
295
303
  typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU;
296
- static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
297
- static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
304
+ static inline MatrixL getL(const CholMatrixType& m) { return MatrixL(m); }
305
+ static inline MatrixU getU(const CholMatrixType& m) { return MatrixU(m.adjoint()); }
298
306
  };
299
307
 
300
308
  template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
@@ -608,7 +616,7 @@ public:
608
616
  }
609
617
 
610
618
  if(Base::m_diag.size()>0)
611
- dest = Base::m_diag.asDiagonal().inverse() * dest;
619
+ dest = Base::m_diag.real().asDiagonal().inverse() * dest;
612
620
 
613
621
  if (Base::m_matrix.nonZeros()>0) // otherwise I==I
614
622
  {
@@ -2,46 +2,21 @@
2
2
  // for linear algebra.
3
3
  //
4
4
  // Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
5
9
 
6
10
  /*
7
-
8
- NOTE: thes functions vave been adapted from the LDL library:
11
+ NOTE: these functions have been adapted from the LDL library:
9
12
 
10
13
  LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
11
14
 
12
- LDL License:
13
-
14
- Your use or distribution of LDL or any modified version of
15
- LDL implies that you agree to this License.
16
-
17
- This library is free software; you can redistribute it and/or
18
- modify it under the terms of the GNU Lesser General Public
19
- License as published by the Free Software Foundation; either
20
- version 2.1 of the License, or (at your option) any later version.
21
-
22
- This library is distributed in the hope that it will be useful,
23
- but WITHOUT ANY WARRANTY; without even the implied warranty of
24
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25
- Lesser General Public License for more details.
26
-
27
- You should have received a copy of the GNU Lesser General Public
28
- License along with this library; if not, write to the Free Software
29
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
30
- USA
31
-
32
- Permission is hereby granted to use or copy this program under the
33
- terms of the GNU LGPL, provided that the Copyright, this License,
34
- and the Availability of the original version is retained on all copies.
35
- User documentation of any code that uses this code or any modified
36
- version of this code must cite the Copyright, this License, the
37
- Availability note, and "Used by permission." Permission to modify
38
- the code and to distribute modified code is granted, provided the
39
- Copyright, this License, and the Availability note are retained,
40
- and a notice that the code was modified is included.
15
+ The author of LDL, Timothy A. Davis., has executed a license with Google LLC
16
+ to permit distribution of this code and derivative works as part of Eigen under
17
+ the Mozilla Public License v. 2.0, as stated at the top of this file.
41
18
  */
42
19
 
43
- #include "../Core/util/NonMPL2.h"
44
-
45
20
  #ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
46
21
  #define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
47
22
 
@@ -122,7 +97,7 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
122
97
  for(StorageIndex k = 0; k < size; ++k)
123
98
  {
124
99
  // compute nonzero pattern of kth row of L, in topological order
125
- y[k] = 0.0; // Y(0:k) is now all zero
100
+ y[k] = Scalar(0); // Y(0:k) is now all zero
126
101
  StorageIndex top = size; // stack for pattern is empty
127
102
  tags[k] = k; // mark node k as visited
128
103
  m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
@@ -146,17 +121,17 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
146
121
  /* compute numerical values kth row of L (a sparse triangular solve) */
147
122
 
148
123
  RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
149
- y[k] = 0.0;
124
+ y[k] = Scalar(0);
150
125
  for(; top < size; ++top)
151
126
  {
152
127
  Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
153
128
  Scalar yi = y[i]; /* get and clear Y(i) */
154
- y[i] = 0.0;
129
+ y[i] = Scalar(0);
155
130
 
156
131
  /* the nonzero entry L(k,i) */
157
132
  Scalar l_ki;
158
133
  if(DoLDLT)
159
- l_ki = yi / m_diag[i];
134
+ l_ki = yi / numext::real(m_diag[i]);
160
135
  else
161
136
  yi = l_ki = yi / Lx[Lp[i]];
162
137
 
@@ -28,7 +28,7 @@ class AmbiVector
28
28
  typedef typename NumTraits<Scalar>::Real RealScalar;
29
29
 
30
30
  explicit AmbiVector(Index size)
31
- : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
31
+ : m_buffer(0), m_zero(0), m_size(0), m_end(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
32
32
  {
33
33
  resize(size);
34
34
  }
@@ -147,7 +147,8 @@ template<typename _Scalar,typename _StorageIndex>
147
147
  void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
148
148
  {
149
149
  m_mode = mode;
150
- if (m_mode==IsSparse)
150
+ // This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings
151
+ // if (m_mode==IsSparse)
151
152
  {
152
153
  m_llSize = 0;
153
154
  m_llStart = -1;
@@ -207,6 +207,22 @@ class CompressedStorage
207
207
  return m_values[id];
208
208
  }
209
209
 
210
+ void moveChunk(Index from, Index to, Index chunkSize)
211
+ {
212
+ eigen_internal_assert(to+chunkSize <= m_size);
213
+ if(to>from && from+chunkSize>to)
214
+ {
215
+ // move backward
216
+ internal::smart_memmove(m_values+from, m_values+from+chunkSize, m_values+to);
217
+ internal::smart_memmove(m_indices+from, m_indices+from+chunkSize, m_indices+to);
218
+ }
219
+ else
220
+ {
221
+ internal::smart_copy(m_values+from, m_values+from+chunkSize, m_values+to);
222
+ internal::smart_copy(m_indices+from, m_indices+from+chunkSize, m_indices+to);
223
+ }
224
+ }
225
+
210
226
  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
211
227
  {
212
228
  Index k = 0;
@@ -10,7 +10,7 @@
10
10
  #ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
11
11
  #define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
12
12
 
13
- namespace Eigen {
13
+ namespace Eigen {
14
14
 
15
15
  namespace internal {
16
16
 
@@ -25,16 +25,16 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
25
25
  Index rows = lhs.innerSize();
26
26
  Index cols = rhs.outerSize();
27
27
  eigen_assert(lhs.outerSize() == rhs.innerSize());
28
-
28
+
29
29
  ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
30
30
  ei_declare_aligned_stack_constructed_variable(ResScalar, values, rows, 0);
31
31
  ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
32
-
32
+
33
33
  std::memset(mask,0,sizeof(bool)*rows);
34
34
 
35
35
  evaluator<Lhs> lhsEval(lhs);
36
36
  evaluator<Rhs> rhsEval(rhs);
37
-
37
+
38
38
  // estimate the number of non zero entries
39
39
  // given a rhs column containing Y non zeros, we assume that the respective Y columns
40
40
  // of the lhs differs in average of one non zeros, thus the number of non zeros for
@@ -141,7 +141,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
141
141
  typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
142
142
  typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
143
143
  typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;
144
-
144
+
145
145
  // If the result is tall and thin (in the extreme case a column vector)
146
146
  // then it is faster to sort the coefficients inplace instead of transposing twice.
147
147
  // FIXME, the following heuristic is probably not very good.
@@ -155,7 +155,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
155
155
  else
156
156
  {
157
157
  ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());
158
- // ressort to transpose to sort the entries
158
+ // resort to transpose to sort the entries
159
159
  internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);
160
160
  RowMajorMatrix resRow(resCol);
161
161
  res = resRow.markAsRValue();
@@ -83,7 +83,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
83
83
  // eval without temporary
84
84
  dst.resize(src.rows(), src.cols());
85
85
  dst.setZero();
86
- dst.reserve((std::max)(src.rows(),src.cols())*2);
86
+ dst.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
87
87
  for (Index j=0; j<outerEvaluationSize; ++j)
88
88
  {
89
89
  dst.startVec(j);
@@ -107,7 +107,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
107
107
 
108
108
  DstXprType temp(src.rows(), src.cols());
109
109
 
110
- temp.reserve((std::max)(src.rows(),src.cols())*2);
110
+ temp.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
111
111
  for (Index j=0; j<outerEvaluationSize; ++j)
112
112
  {
113
113
  temp.startVec(j);
@@ -134,8 +134,8 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
134
134
  };
135
135
 
136
136
  // Generic Sparse to Dense assignment
137
- template< typename DstXprType, typename SrcXprType, typename Functor>
138
- struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
137
+ template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>
138
+ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Weak>
139
139
  {
140
140
  static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
141
141
  {
@@ -153,6 +153,73 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
153
153
  }
154
154
  };
155
155
 
156
+ // Specialization for dense ?= dense +/- sparse and dense ?= sparse +/- dense
157
+ template<typename DstXprType, typename Func1, typename Func2>
158
+ struct assignment_from_dense_op_sparse
159
+ {
160
+ template<typename SrcXprType, typename InitialFunc>
161
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
162
+ void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/)
163
+ {
164
+ #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
165
+ EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
166
+ #endif
167
+
168
+ call_assignment_no_alias(dst, src.lhs(), Func1());
169
+ call_assignment_no_alias(dst, src.rhs(), Func2());
170
+ }
171
+
172
+ // Specialization for dense1 = sparse + dense2; -> dense1 = dense2; dense1 += sparse;
173
+ template<typename Lhs, typename Rhs, typename Scalar>
174
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
175
+ typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
176
+ run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_sum_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
177
+ const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
178
+ {
179
+ #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
180
+ EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
181
+ #endif
182
+
183
+ // Apply the dense matrix first, then the sparse one.
184
+ call_assignment_no_alias(dst, src.rhs(), Func1());
185
+ call_assignment_no_alias(dst, src.lhs(), Func2());
186
+ }
187
+
188
+ // Specialization for dense1 = sparse - dense2; -> dense1 = -dense2; dense1 += sparse;
189
+ template<typename Lhs, typename Rhs, typename Scalar>
190
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
191
+ typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
192
+ run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_difference_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
193
+ const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
194
+ {
195
+ #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
196
+ EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
197
+ #endif
198
+
199
+ // Apply the dense matrix first, then the sparse one.
200
+ call_assignment_no_alias(dst, -src.rhs(), Func1());
201
+ call_assignment_no_alias(dst, src.lhs(), add_assign_op<typename DstXprType::Scalar,typename Lhs::Scalar>());
202
+ }
203
+ };
204
+
205
+ #define EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(ASSIGN_OP,BINOP,ASSIGN_OP2) \
206
+ template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> \
207
+ struct Assignment<DstXprType, CwiseBinaryOp<internal::BINOP<Scalar,Scalar>, const Lhs, const Rhs>, internal::ASSIGN_OP<typename DstXprType::Scalar,Scalar>, \
208
+ Sparse2Dense, \
209
+ typename internal::enable_if< internal::is_same<typename internal::evaluator_traits<Lhs>::Shape,DenseShape>::value \
210
+ || internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type> \
211
+ : assignment_from_dense_op_sparse<DstXprType, internal::ASSIGN_OP<typename DstXprType::Scalar,typename Lhs::Scalar>, internal::ASSIGN_OP2<typename DstXprType::Scalar,typename Rhs::Scalar> > \
212
+ {}
213
+
214
+ EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_sum_op,add_assign_op);
215
+ EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_sum_op,add_assign_op);
216
+ EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_sum_op,sub_assign_op);
217
+
218
+ EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_difference_op,sub_assign_op);
219
+ EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_difference_op,sub_assign_op);
220
+ EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_difference_op,add_assign_op);
221
+
222
+
156
223
  // Specialization for "dst = dec.solve(rhs)"
157
224
  // NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
158
225
  template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
@@ -179,35 +246,22 @@ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
179
246
  {
180
247
  typedef typename DstXprType::StorageIndex StorageIndex;
181
248
  typedef typename DstXprType::Scalar Scalar;
182
- typedef Array<StorageIndex,Dynamic,1> ArrayXI;
183
- typedef Array<Scalar,Dynamic,1> ArrayXS;
184
- template<int Options>
185
- static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
186
- {
187
- Index dstRows = src.rows();
188
- Index dstCols = src.cols();
189
- if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
190
- dst.resize(dstRows, dstCols);
191
249
 
192
- Index size = src.diagonal().size();
193
- dst.makeCompressed();
194
- dst.resizeNonZeros(size);
195
- Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
196
- Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
197
- Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
198
- }
250
+ template<int Options, typename AssignFunc>
251
+ static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const AssignFunc &func)
252
+ { dst.assignDiagonal(src.diagonal(), func); }
199
253
 
200
254
  template<typename DstDerived>
201
255
  static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
202
- {
203
- dst.diagonal() = src.diagonal();
204
- }
256
+ { dst.derived().diagonal() = src.diagonal(); }
205
257
 
206
- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
207
- { dst.diagonal() += src.diagonal(); }
258
+ template<typename DstDerived>
259
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
260
+ { dst.derived().diagonal() += src.diagonal(); }
208
261
 
209
- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
210
- { dst.diagonal() -= src.diagonal(); }
262
+ template<typename DstDerived>
263
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
264
+ { dst.derived().diagonal() -= src.diagonal(); }
211
265
  };
212
266
  } // end namespace internal
213
267