umappp 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (395) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +25 -0
  3. data/README.md +110 -0
  4. data/ext/umappp/extconf.rb +25 -0
  5. data/ext/umappp/numo.hpp +867 -0
  6. data/ext/umappp/umappp.cpp +225 -0
  7. data/lib/umappp/version.rb +5 -0
  8. data/lib/umappp.rb +41 -0
  9. data/vendor/Eigen/Cholesky +45 -0
  10. data/vendor/Eigen/CholmodSupport +48 -0
  11. data/vendor/Eigen/Core +384 -0
  12. data/vendor/Eigen/Dense +7 -0
  13. data/vendor/Eigen/Eigen +2 -0
  14. data/vendor/Eigen/Eigenvalues +60 -0
  15. data/vendor/Eigen/Geometry +59 -0
  16. data/vendor/Eigen/Householder +29 -0
  17. data/vendor/Eigen/IterativeLinearSolvers +48 -0
  18. data/vendor/Eigen/Jacobi +32 -0
  19. data/vendor/Eigen/KLUSupport +41 -0
  20. data/vendor/Eigen/LU +47 -0
  21. data/vendor/Eigen/MetisSupport +35 -0
  22. data/vendor/Eigen/OrderingMethods +70 -0
  23. data/vendor/Eigen/PaStiXSupport +49 -0
  24. data/vendor/Eigen/PardisoSupport +35 -0
  25. data/vendor/Eigen/QR +50 -0
  26. data/vendor/Eigen/QtAlignedMalloc +39 -0
  27. data/vendor/Eigen/SPQRSupport +34 -0
  28. data/vendor/Eigen/SVD +50 -0
  29. data/vendor/Eigen/Sparse +34 -0
  30. data/vendor/Eigen/SparseCholesky +37 -0
  31. data/vendor/Eigen/SparseCore +69 -0
  32. data/vendor/Eigen/SparseLU +50 -0
  33. data/vendor/Eigen/SparseQR +36 -0
  34. data/vendor/Eigen/StdDeque +27 -0
  35. data/vendor/Eigen/StdList +26 -0
  36. data/vendor/Eigen/StdVector +27 -0
  37. data/vendor/Eigen/SuperLUSupport +64 -0
  38. data/vendor/Eigen/UmfPackSupport +40 -0
  39. data/vendor/Eigen/src/Cholesky/LDLT.h +688 -0
  40. data/vendor/Eigen/src/Cholesky/LLT.h +558 -0
  41. data/vendor/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
  42. data/vendor/Eigen/src/CholmodSupport/CholmodSupport.h +682 -0
  43. data/vendor/Eigen/src/Core/ArithmeticSequence.h +413 -0
  44. data/vendor/Eigen/src/Core/Array.h +417 -0
  45. data/vendor/Eigen/src/Core/ArrayBase.h +226 -0
  46. data/vendor/Eigen/src/Core/ArrayWrapper.h +209 -0
  47. data/vendor/Eigen/src/Core/Assign.h +90 -0
  48. data/vendor/Eigen/src/Core/AssignEvaluator.h +1010 -0
  49. data/vendor/Eigen/src/Core/Assign_MKL.h +178 -0
  50. data/vendor/Eigen/src/Core/BandMatrix.h +353 -0
  51. data/vendor/Eigen/src/Core/Block.h +448 -0
  52. data/vendor/Eigen/src/Core/BooleanRedux.h +162 -0
  53. data/vendor/Eigen/src/Core/CommaInitializer.h +164 -0
  54. data/vendor/Eigen/src/Core/ConditionEstimator.h +175 -0
  55. data/vendor/Eigen/src/Core/CoreEvaluators.h +1741 -0
  56. data/vendor/Eigen/src/Core/CoreIterators.h +132 -0
  57. data/vendor/Eigen/src/Core/CwiseBinaryOp.h +183 -0
  58. data/vendor/Eigen/src/Core/CwiseNullaryOp.h +1001 -0
  59. data/vendor/Eigen/src/Core/CwiseTernaryOp.h +197 -0
  60. data/vendor/Eigen/src/Core/CwiseUnaryOp.h +103 -0
  61. data/vendor/Eigen/src/Core/CwiseUnaryView.h +132 -0
  62. data/vendor/Eigen/src/Core/DenseBase.h +701 -0
  63. data/vendor/Eigen/src/Core/DenseCoeffsBase.h +685 -0
  64. data/vendor/Eigen/src/Core/DenseStorage.h +652 -0
  65. data/vendor/Eigen/src/Core/Diagonal.h +258 -0
  66. data/vendor/Eigen/src/Core/DiagonalMatrix.h +391 -0
  67. data/vendor/Eigen/src/Core/DiagonalProduct.h +28 -0
  68. data/vendor/Eigen/src/Core/Dot.h +318 -0
  69. data/vendor/Eigen/src/Core/EigenBase.h +160 -0
  70. data/vendor/Eigen/src/Core/ForceAlignedAccess.h +150 -0
  71. data/vendor/Eigen/src/Core/Fuzzy.h +155 -0
  72. data/vendor/Eigen/src/Core/GeneralProduct.h +465 -0
  73. data/vendor/Eigen/src/Core/GenericPacketMath.h +1040 -0
  74. data/vendor/Eigen/src/Core/GlobalFunctions.h +194 -0
  75. data/vendor/Eigen/src/Core/IO.h +258 -0
  76. data/vendor/Eigen/src/Core/IndexedView.h +237 -0
  77. data/vendor/Eigen/src/Core/Inverse.h +117 -0
  78. data/vendor/Eigen/src/Core/Map.h +171 -0
  79. data/vendor/Eigen/src/Core/MapBase.h +310 -0
  80. data/vendor/Eigen/src/Core/MathFunctions.h +2057 -0
  81. data/vendor/Eigen/src/Core/MathFunctionsImpl.h +200 -0
  82. data/vendor/Eigen/src/Core/Matrix.h +565 -0
  83. data/vendor/Eigen/src/Core/MatrixBase.h +547 -0
  84. data/vendor/Eigen/src/Core/NestByValue.h +85 -0
  85. data/vendor/Eigen/src/Core/NoAlias.h +109 -0
  86. data/vendor/Eigen/src/Core/NumTraits.h +335 -0
  87. data/vendor/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
  88. data/vendor/Eigen/src/Core/PermutationMatrix.h +605 -0
  89. data/vendor/Eigen/src/Core/PlainObjectBase.h +1128 -0
  90. data/vendor/Eigen/src/Core/Product.h +191 -0
  91. data/vendor/Eigen/src/Core/ProductEvaluators.h +1179 -0
  92. data/vendor/Eigen/src/Core/Random.h +218 -0
  93. data/vendor/Eigen/src/Core/Redux.h +515 -0
  94. data/vendor/Eigen/src/Core/Ref.h +381 -0
  95. data/vendor/Eigen/src/Core/Replicate.h +142 -0
  96. data/vendor/Eigen/src/Core/Reshaped.h +454 -0
  97. data/vendor/Eigen/src/Core/ReturnByValue.h +119 -0
  98. data/vendor/Eigen/src/Core/Reverse.h +217 -0
  99. data/vendor/Eigen/src/Core/Select.h +164 -0
  100. data/vendor/Eigen/src/Core/SelfAdjointView.h +365 -0
  101. data/vendor/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
  102. data/vendor/Eigen/src/Core/Solve.h +188 -0
  103. data/vendor/Eigen/src/Core/SolveTriangular.h +235 -0
  104. data/vendor/Eigen/src/Core/SolverBase.h +168 -0
  105. data/vendor/Eigen/src/Core/StableNorm.h +251 -0
  106. data/vendor/Eigen/src/Core/StlIterators.h +463 -0
  107. data/vendor/Eigen/src/Core/Stride.h +116 -0
  108. data/vendor/Eigen/src/Core/Swap.h +68 -0
  109. data/vendor/Eigen/src/Core/Transpose.h +464 -0
  110. data/vendor/Eigen/src/Core/Transpositions.h +386 -0
  111. data/vendor/Eigen/src/Core/TriangularMatrix.h +1001 -0
  112. data/vendor/Eigen/src/Core/VectorBlock.h +96 -0
  113. data/vendor/Eigen/src/Core/VectorwiseOp.h +784 -0
  114. data/vendor/Eigen/src/Core/Visitor.h +381 -0
  115. data/vendor/Eigen/src/Core/arch/AVX/Complex.h +372 -0
  116. data/vendor/Eigen/src/Core/arch/AVX/MathFunctions.h +228 -0
  117. data/vendor/Eigen/src/Core/arch/AVX/PacketMath.h +1574 -0
  118. data/vendor/Eigen/src/Core/arch/AVX/TypeCasting.h +115 -0
  119. data/vendor/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
  120. data/vendor/Eigen/src/Core/arch/AVX512/MathFunctions.h +362 -0
  121. data/vendor/Eigen/src/Core/arch/AVX512/PacketMath.h +2303 -0
  122. data/vendor/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  123. data/vendor/Eigen/src/Core/arch/AltiVec/Complex.h +417 -0
  124. data/vendor/Eigen/src/Core/arch/AltiVec/MathFunctions.h +90 -0
  125. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
  126. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
  127. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
  128. data/vendor/Eigen/src/Core/arch/AltiVec/PacketMath.h +2711 -0
  129. data/vendor/Eigen/src/Core/arch/CUDA/Complex.h +258 -0
  130. data/vendor/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
  131. data/vendor/Eigen/src/Core/arch/Default/ConjHelper.h +117 -0
  132. data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
  133. data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
  134. data/vendor/Eigen/src/Core/arch/Default/Half.h +942 -0
  135. data/vendor/Eigen/src/Core/arch/Default/Settings.h +49 -0
  136. data/vendor/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  137. data/vendor/Eigen/src/Core/arch/GPU/MathFunctions.h +103 -0
  138. data/vendor/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
  139. data/vendor/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
  140. data/vendor/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  141. data/vendor/Eigen/src/Core/arch/MSA/Complex.h +648 -0
  142. data/vendor/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  143. data/vendor/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  144. data/vendor/Eigen/src/Core/arch/NEON/Complex.h +584 -0
  145. data/vendor/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  146. data/vendor/Eigen/src/Core/arch/NEON/MathFunctions.h +75 -0
  147. data/vendor/Eigen/src/Core/arch/NEON/PacketMath.h +4587 -0
  148. data/vendor/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
  149. data/vendor/Eigen/src/Core/arch/SSE/Complex.h +351 -0
  150. data/vendor/Eigen/src/Core/arch/SSE/MathFunctions.h +199 -0
  151. data/vendor/Eigen/src/Core/arch/SSE/PacketMath.h +1505 -0
  152. data/vendor/Eigen/src/Core/arch/SSE/TypeCasting.h +142 -0
  153. data/vendor/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  154. data/vendor/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  155. data/vendor/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  156. data/vendor/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  157. data/vendor/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  158. data/vendor/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  159. data/vendor/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  160. data/vendor/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  161. data/vendor/Eigen/src/Core/arch/ZVector/Complex.h +426 -0
  162. data/vendor/Eigen/src/Core/arch/ZVector/MathFunctions.h +233 -0
  163. data/vendor/Eigen/src/Core/arch/ZVector/PacketMath.h +1060 -0
  164. data/vendor/Eigen/src/Core/functors/AssignmentFunctors.h +177 -0
  165. data/vendor/Eigen/src/Core/functors/BinaryFunctors.h +541 -0
  166. data/vendor/Eigen/src/Core/functors/NullaryFunctors.h +189 -0
  167. data/vendor/Eigen/src/Core/functors/StlFunctors.h +166 -0
  168. data/vendor/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
  169. data/vendor/Eigen/src/Core/functors/UnaryFunctors.h +1131 -0
  170. data/vendor/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2645 -0
  171. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix.h +517 -0
  172. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +317 -0
  173. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
  174. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +124 -0
  175. data/vendor/Eigen/src/Core/products/GeneralMatrixVector.h +518 -0
  176. data/vendor/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
  177. data/vendor/Eigen/src/Core/products/Parallelizer.h +180 -0
  178. data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +544 -0
  179. data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +295 -0
  180. data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector.h +262 -0
  181. data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
  182. data/vendor/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
  183. data/vendor/Eigen/src/Core/products/SelfadjointRank2Update.h +94 -0
  184. data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix.h +472 -0
  185. data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +317 -0
  186. data/vendor/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
  187. data/vendor/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
  188. data/vendor/Eigen/src/Core/products/TriangularSolverMatrix.h +337 -0
  189. data/vendor/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +167 -0
  190. data/vendor/Eigen/src/Core/products/TriangularSolverVector.h +148 -0
  191. data/vendor/Eigen/src/Core/util/BlasUtil.h +583 -0
  192. data/vendor/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
  193. data/vendor/Eigen/src/Core/util/Constants.h +563 -0
  194. data/vendor/Eigen/src/Core/util/DisableStupidWarnings.h +106 -0
  195. data/vendor/Eigen/src/Core/util/ForwardDeclarations.h +322 -0
  196. data/vendor/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
  197. data/vendor/Eigen/src/Core/util/IntegralConstant.h +272 -0
  198. data/vendor/Eigen/src/Core/util/MKL_support.h +137 -0
  199. data/vendor/Eigen/src/Core/util/Macros.h +1464 -0
  200. data/vendor/Eigen/src/Core/util/Memory.h +1163 -0
  201. data/vendor/Eigen/src/Core/util/Meta.h +812 -0
  202. data/vendor/Eigen/src/Core/util/NonMPL2.h +3 -0
  203. data/vendor/Eigen/src/Core/util/ReenableStupidWarnings.h +31 -0
  204. data/vendor/Eigen/src/Core/util/ReshapedHelper.h +51 -0
  205. data/vendor/Eigen/src/Core/util/StaticAssert.h +221 -0
  206. data/vendor/Eigen/src/Core/util/SymbolicIndex.h +293 -0
  207. data/vendor/Eigen/src/Core/util/XprHelper.h +856 -0
  208. data/vendor/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
  209. data/vendor/Eigen/src/Eigenvalues/ComplexSchur.h +462 -0
  210. data/vendor/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
  211. data/vendor/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
  212. data/vendor/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
  213. data/vendor/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
  214. data/vendor/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
  215. data/vendor/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
  216. data/vendor/Eigen/src/Eigenvalues/RealQZ.h +657 -0
  217. data/vendor/Eigen/src/Eigenvalues/RealSchur.h +558 -0
  218. data/vendor/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
  219. data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +904 -0
  220. data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
  221. data/vendor/Eigen/src/Eigenvalues/Tridiagonalization.h +561 -0
  222. data/vendor/Eigen/src/Geometry/AlignedBox.h +486 -0
  223. data/vendor/Eigen/src/Geometry/AngleAxis.h +247 -0
  224. data/vendor/Eigen/src/Geometry/EulerAngles.h +114 -0
  225. data/vendor/Eigen/src/Geometry/Homogeneous.h +501 -0
  226. data/vendor/Eigen/src/Geometry/Hyperplane.h +282 -0
  227. data/vendor/Eigen/src/Geometry/OrthoMethods.h +235 -0
  228. data/vendor/Eigen/src/Geometry/ParametrizedLine.h +232 -0
  229. data/vendor/Eigen/src/Geometry/Quaternion.h +870 -0
  230. data/vendor/Eigen/src/Geometry/Rotation2D.h +199 -0
  231. data/vendor/Eigen/src/Geometry/RotationBase.h +206 -0
  232. data/vendor/Eigen/src/Geometry/Scaling.h +188 -0
  233. data/vendor/Eigen/src/Geometry/Transform.h +1563 -0
  234. data/vendor/Eigen/src/Geometry/Translation.h +202 -0
  235. data/vendor/Eigen/src/Geometry/Umeyama.h +166 -0
  236. data/vendor/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
  237. data/vendor/Eigen/src/Householder/BlockHouseholder.h +110 -0
  238. data/vendor/Eigen/src/Householder/Householder.h +176 -0
  239. data/vendor/Eigen/src/Householder/HouseholderSequence.h +545 -0
  240. data/vendor/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
  241. data/vendor/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +212 -0
  242. data/vendor/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +229 -0
  243. data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +394 -0
  244. data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +453 -0
  245. data/vendor/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +444 -0
  246. data/vendor/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +198 -0
  247. data/vendor/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +117 -0
  248. data/vendor/Eigen/src/Jacobi/Jacobi.h +483 -0
  249. data/vendor/Eigen/src/KLUSupport/KLUSupport.h +358 -0
  250. data/vendor/Eigen/src/LU/Determinant.h +117 -0
  251. data/vendor/Eigen/src/LU/FullPivLU.h +877 -0
  252. data/vendor/Eigen/src/LU/InverseImpl.h +432 -0
  253. data/vendor/Eigen/src/LU/PartialPivLU.h +624 -0
  254. data/vendor/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
  255. data/vendor/Eigen/src/LU/arch/InverseSize4.h +351 -0
  256. data/vendor/Eigen/src/MetisSupport/MetisSupport.h +137 -0
  257. data/vendor/Eigen/src/OrderingMethods/Amd.h +435 -0
  258. data/vendor/Eigen/src/OrderingMethods/Eigen_Colamd.h +1863 -0
  259. data/vendor/Eigen/src/OrderingMethods/Ordering.h +153 -0
  260. data/vendor/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
  261. data/vendor/Eigen/src/PardisoSupport/PardisoSupport.h +545 -0
  262. data/vendor/Eigen/src/QR/ColPivHouseholderQR.h +674 -0
  263. data/vendor/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
  264. data/vendor/Eigen/src/QR/CompleteOrthogonalDecomposition.h +635 -0
  265. data/vendor/Eigen/src/QR/FullPivHouseholderQR.h +713 -0
  266. data/vendor/Eigen/src/QR/HouseholderQR.h +434 -0
  267. data/vendor/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
  268. data/vendor/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +335 -0
  269. data/vendor/Eigen/src/SVD/BDCSVD.h +1366 -0
  270. data/vendor/Eigen/src/SVD/JacobiSVD.h +812 -0
  271. data/vendor/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
  272. data/vendor/Eigen/src/SVD/SVDBase.h +376 -0
  273. data/vendor/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
  274. data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky.h +697 -0
  275. data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +174 -0
  276. data/vendor/Eigen/src/SparseCore/AmbiVector.h +378 -0
  277. data/vendor/Eigen/src/SparseCore/CompressedStorage.h +274 -0
  278. data/vendor/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
  279. data/vendor/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
  280. data/vendor/Eigen/src/SparseCore/SparseAssign.h +270 -0
  281. data/vendor/Eigen/src/SparseCore/SparseBlock.h +571 -0
  282. data/vendor/Eigen/src/SparseCore/SparseColEtree.h +206 -0
  283. data/vendor/Eigen/src/SparseCore/SparseCompressedBase.h +370 -0
  284. data/vendor/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +722 -0
  285. data/vendor/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +150 -0
  286. data/vendor/Eigen/src/SparseCore/SparseDenseProduct.h +342 -0
  287. data/vendor/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
  288. data/vendor/Eigen/src/SparseCore/SparseDot.h +98 -0
  289. data/vendor/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
  290. data/vendor/Eigen/src/SparseCore/SparseMap.h +305 -0
  291. data/vendor/Eigen/src/SparseCore/SparseMatrix.h +1518 -0
  292. data/vendor/Eigen/src/SparseCore/SparseMatrixBase.h +398 -0
  293. data/vendor/Eigen/src/SparseCore/SparsePermutation.h +178 -0
  294. data/vendor/Eigen/src/SparseCore/SparseProduct.h +181 -0
  295. data/vendor/Eigen/src/SparseCore/SparseRedux.h +49 -0
  296. data/vendor/Eigen/src/SparseCore/SparseRef.h +397 -0
  297. data/vendor/Eigen/src/SparseCore/SparseSelfAdjointView.h +659 -0
  298. data/vendor/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
  299. data/vendor/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
  300. data/vendor/Eigen/src/SparseCore/SparseTranspose.h +92 -0
  301. data/vendor/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
  302. data/vendor/Eigen/src/SparseCore/SparseUtil.h +186 -0
  303. data/vendor/Eigen/src/SparseCore/SparseVector.h +478 -0
  304. data/vendor/Eigen/src/SparseCore/SparseView.h +254 -0
  305. data/vendor/Eigen/src/SparseCore/TriangularSolver.h +315 -0
  306. data/vendor/Eigen/src/SparseLU/SparseLU.h +923 -0
  307. data/vendor/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
  308. data/vendor/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
  309. data/vendor/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
  310. data/vendor/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +375 -0
  311. data/vendor/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
  312. data/vendor/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
  313. data/vendor/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
  314. data/vendor/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
  315. data/vendor/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
  316. data/vendor/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
  317. data/vendor/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
  318. data/vendor/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
  319. data/vendor/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
  320. data/vendor/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
  321. data/vendor/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
  322. data/vendor/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
  323. data/vendor/Eigen/src/SparseQR/SparseQR.h +758 -0
  324. data/vendor/Eigen/src/StlSupport/StdDeque.h +116 -0
  325. data/vendor/Eigen/src/StlSupport/StdList.h +106 -0
  326. data/vendor/Eigen/src/StlSupport/StdVector.h +131 -0
  327. data/vendor/Eigen/src/StlSupport/details.h +84 -0
  328. data/vendor/Eigen/src/SuperLUSupport/SuperLUSupport.h +1025 -0
  329. data/vendor/Eigen/src/UmfPackSupport/UmfPackSupport.h +642 -0
  330. data/vendor/Eigen/src/misc/Image.h +82 -0
  331. data/vendor/Eigen/src/misc/Kernel.h +79 -0
  332. data/vendor/Eigen/src/misc/RealSvd2x2.h +55 -0
  333. data/vendor/Eigen/src/misc/blas.h +440 -0
  334. data/vendor/Eigen/src/misc/lapack.h +152 -0
  335. data/vendor/Eigen/src/misc/lapacke.h +16292 -0
  336. data/vendor/Eigen/src/misc/lapacke_mangling.h +17 -0
  337. data/vendor/Eigen/src/plugins/ArrayCwiseBinaryOps.h +358 -0
  338. data/vendor/Eigen/src/plugins/ArrayCwiseUnaryOps.h +696 -0
  339. data/vendor/Eigen/src/plugins/BlockMethods.h +1442 -0
  340. data/vendor/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
  341. data/vendor/Eigen/src/plugins/CommonCwiseUnaryOps.h +177 -0
  342. data/vendor/Eigen/src/plugins/IndexedViewMethods.h +262 -0
  343. data/vendor/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
  344. data/vendor/Eigen/src/plugins/MatrixCwiseUnaryOps.h +95 -0
  345. data/vendor/Eigen/src/plugins/ReshapedMethods.h +149 -0
  346. data/vendor/aarand/aarand.hpp +114 -0
  347. data/vendor/annoy/annoylib.h +1495 -0
  348. data/vendor/annoy/kissrandom.h +120 -0
  349. data/vendor/annoy/mman.h +242 -0
  350. data/vendor/hnswlib/bruteforce.h +152 -0
  351. data/vendor/hnswlib/hnswalg.h +1192 -0
  352. data/vendor/hnswlib/hnswlib.h +108 -0
  353. data/vendor/hnswlib/space_ip.h +282 -0
  354. data/vendor/hnswlib/space_l2.h +281 -0
  355. data/vendor/hnswlib/visited_list_pool.h +79 -0
  356. data/vendor/irlba/irlba.hpp +575 -0
  357. data/vendor/irlba/lanczos.hpp +212 -0
  358. data/vendor/irlba/parallel.hpp +474 -0
  359. data/vendor/irlba/utils.hpp +224 -0
  360. data/vendor/irlba/wrappers.hpp +228 -0
  361. data/vendor/kmeans/Base.hpp +75 -0
  362. data/vendor/kmeans/Details.hpp +79 -0
  363. data/vendor/kmeans/HartiganWong.hpp +492 -0
  364. data/vendor/kmeans/InitializeKmeansPP.hpp +144 -0
  365. data/vendor/kmeans/InitializeNone.hpp +44 -0
  366. data/vendor/kmeans/InitializePCAPartition.hpp +309 -0
  367. data/vendor/kmeans/InitializeRandom.hpp +91 -0
  368. data/vendor/kmeans/Kmeans.hpp +161 -0
  369. data/vendor/kmeans/Lloyd.hpp +134 -0
  370. data/vendor/kmeans/MiniBatch.hpp +269 -0
  371. data/vendor/kmeans/QuickSearch.hpp +179 -0
  372. data/vendor/kmeans/compute_centroids.hpp +32 -0
  373. data/vendor/kmeans/compute_wcss.hpp +27 -0
  374. data/vendor/kmeans/is_edge_case.hpp +42 -0
  375. data/vendor/kmeans/random.hpp +55 -0
  376. data/vendor/knncolle/Annoy/Annoy.hpp +193 -0
  377. data/vendor/knncolle/BruteForce/BruteForce.hpp +120 -0
  378. data/vendor/knncolle/Hnsw/Hnsw.hpp +225 -0
  379. data/vendor/knncolle/Kmknn/Kmknn.hpp +286 -0
  380. data/vendor/knncolle/VpTree/VpTree.hpp +256 -0
  381. data/vendor/knncolle/knncolle.hpp +34 -0
  382. data/vendor/knncolle/utils/Base.hpp +100 -0
  383. data/vendor/knncolle/utils/NeighborQueue.hpp +94 -0
  384. data/vendor/knncolle/utils/distances.hpp +98 -0
  385. data/vendor/knncolle/utils/find_nearest_neighbors.hpp +112 -0
  386. data/vendor/powerit/PowerIterations.hpp +157 -0
  387. data/vendor/umappp/NeighborList.hpp +37 -0
  388. data/vendor/umappp/Umap.hpp +662 -0
  389. data/vendor/umappp/combine_neighbor_sets.hpp +95 -0
  390. data/vendor/umappp/find_ab.hpp +157 -0
  391. data/vendor/umappp/neighbor_similarities.hpp +136 -0
  392. data/vendor/umappp/optimize_layout.hpp +285 -0
  393. data/vendor/umappp/spectral_init.hpp +181 -0
  394. data/vendor/umappp/umappp.hpp +13 -0
  395. metadata +465 -0
@@ -0,0 +1,1366 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
5
+ // research report written by Ming Gu and Stanley C.Eisenstat
6
+ // The code variable names correspond to the names they used in their
7
+ // report
8
+ //
9
+ // Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
10
+ // Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
11
+ // Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
12
+ // Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
13
+ // Copyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
14
+ // Copyright (C) 2014-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
15
+ //
16
+ // Source Code Form is subject to the terms of the Mozilla
17
+ // Public License v. 2.0. If a copy of the MPL was not distributed
18
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
19
+
20
+ #ifndef EIGEN_BDCSVD_H
21
+ #define EIGEN_BDCSVD_H
22
+ // #define EIGEN_BDCSVD_DEBUG_VERBOSE
23
+ // #define EIGEN_BDCSVD_SANITY_CHECKS
24
+
25
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
26
+ #undef eigen_internal_assert
27
+ #define eigen_internal_assert(X) assert(X);
28
+ #endif
29
+
30
+ namespace Eigen {
31
+
32
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
33
+ IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]");
34
+ #endif
35
+
36
+ template<typename _MatrixType> class BDCSVD;
37
+
38
+ namespace internal {
39
+
40
+ template<typename _MatrixType>
41
+ struct traits<BDCSVD<_MatrixType> >
42
+ : traits<_MatrixType>
43
+ {
44
+ typedef _MatrixType MatrixType;
45
+ };
46
+
47
+ } // end namespace internal
48
+
49
+
50
+ /** \ingroup SVD_Module
51
+ *
52
+ *
53
+ * \class BDCSVD
54
+ *
55
+ * \brief class Bidiagonal Divide and Conquer SVD
56
+ *
57
+ * \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition
58
+ *
59
+ * This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization,
60
+ * and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD.
61
+ * You can control the switching size with the setSwitchSize() method, default is 16.
62
+ * For small matrice (<16), it is thus preferable to directly use JacobiSVD. For larger ones, BDCSVD is highly
63
+ * recommended and can several order of magnitude faster.
64
+ *
65
+ * \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.
66
+ * For instance, this concerns Intel's compiler (ICC), which performs such optimization by default unless
67
+ * you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will
68
+ * significantly degrade the accuracy.
69
+ *
70
+ * \sa class JacobiSVD
71
+ */
72
+ template<typename _MatrixType>
73
+ class BDCSVD : public SVDBase<BDCSVD<_MatrixType> >
74
+ {
75
+ typedef SVDBase<BDCSVD> Base;
76
+
77
+ public:
78
+ using Base::rows;
79
+ using Base::cols;
80
+ using Base::computeU;
81
+ using Base::computeV;
82
+
83
+ typedef _MatrixType MatrixType;
84
+ typedef typename MatrixType::Scalar Scalar;
85
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
86
+ typedef typename NumTraits<RealScalar>::Literal Literal;
87
+ enum {
88
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
89
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
90
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
91
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
92
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
93
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
94
+ MatrixOptions = MatrixType::Options
95
+ };
96
+
97
+ typedef typename Base::MatrixUType MatrixUType;
98
+ typedef typename Base::MatrixVType MatrixVType;
99
+ typedef typename Base::SingularValuesType SingularValuesType;
100
+
101
+ typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> MatrixX;
102
+ typedef Matrix<RealScalar, Dynamic, Dynamic, ColMajor> MatrixXr;
103
+ typedef Matrix<RealScalar, Dynamic, 1> VectorType;
104
+ typedef Array<RealScalar, Dynamic, 1> ArrayXr;
105
+ typedef Array<Index,1,Dynamic> ArrayXi;
106
+ typedef Ref<ArrayXr> ArrayRef;
107
+ typedef Ref<ArrayXi> IndicesRef;
108
+
109
+ /** \brief Default Constructor.
110
+ *
111
+ * The default constructor is useful in cases in which the user intends to
112
+ * perform decompositions via BDCSVD::compute(const MatrixType&).
113
+ */
114
+ BDCSVD() : m_algoswap(16), m_isTranspose(false), m_compU(false), m_compV(false), m_numIters(0)
115
+ {}
116
+
117
+
118
+ /** \brief Default Constructor with memory preallocation
119
+ *
120
+ * Like the default constructor but with preallocation of the internal data
121
+ * according to the specified problem size.
122
+ * \sa BDCSVD()
123
+ */
124
+ BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
125
+ : m_algoswap(16), m_numIters(0)
126
+ {
127
+ allocate(rows, cols, computationOptions);
128
+ }
129
+
130
+ /** \brief Constructor performing the decomposition of given matrix.
131
+ *
132
+ * \param matrix the matrix to decompose
133
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
134
+ * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
135
+ * #ComputeFullV, #ComputeThinV.
136
+ *
137
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
138
+ * available with the (non - default) FullPivHouseholderQR preconditioner.
139
+ */
140
+ BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
141
+ : m_algoswap(16), m_numIters(0)
142
+ {
143
+ compute(matrix, computationOptions);
144
+ }
145
+
146
+ ~BDCSVD()
147
+ {
148
+ }
149
+
150
+ /** \brief Method performing the decomposition of given matrix using custom options.
151
+ *
152
+ * \param matrix the matrix to decompose
153
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
154
+ * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
155
+ * #ComputeFullV, #ComputeThinV.
156
+ *
157
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
158
+ * available with the (non - default) FullPivHouseholderQR preconditioner.
159
+ */
160
+ BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
161
+
162
+ /** \brief Method performing the decomposition of given matrix using current options.
163
+ *
164
+ * \param matrix the matrix to decompose
165
+ *
166
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
167
+ */
168
+ BDCSVD& compute(const MatrixType& matrix)
169
+ {
170
+ return compute(matrix, this->m_computationOptions);
171
+ }
172
+
173
+ void setSwitchSize(int s)
174
+ {
175
+ eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3");
176
+ m_algoswap = s;
177
+ }
178
+
179
+ private:
180
+ void allocate(Index rows, Index cols, unsigned int computationOptions);
181
+ void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift);
182
+ void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V);
183
+ void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus);
184
+ void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat);
185
+ void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V);
186
+ void deflation43(Index firstCol, Index shift, Index i, Index size);
187
+ void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
188
+ void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
189
+ template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
190
+ void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev);
191
+ void structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1);
192
+ static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift);
193
+
194
+ protected:
195
+ MatrixXr m_naiveU, m_naiveV;
196
+ MatrixXr m_computed;
197
+ Index m_nRec;
198
+ ArrayXr m_workspace;
199
+ ArrayXi m_workspaceI;
200
+ int m_algoswap;
201
+ bool m_isTranspose, m_compU, m_compV;
202
+
203
+ using Base::m_singularValues;
204
+ using Base::m_diagSize;
205
+ using Base::m_computeFullU;
206
+ using Base::m_computeFullV;
207
+ using Base::m_computeThinU;
208
+ using Base::m_computeThinV;
209
+ using Base::m_matrixU;
210
+ using Base::m_matrixV;
211
+ using Base::m_info;
212
+ using Base::m_isInitialized;
213
+ using Base::m_nonzeroSingularValues;
214
+
215
+ public:
216
+ int m_numIters;
217
+ }; //end class BDCSVD
218
+
219
+
220
+ // Method to allocate and initialize matrix and attributes
221
+ template<typename MatrixType>
222
+ void BDCSVD<MatrixType>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
223
+ {
224
+ m_isTranspose = (cols > rows);
225
+
226
+ if (Base::allocate(rows, cols, computationOptions))
227
+ return;
228
+
229
+ m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize );
230
+ m_compU = computeV();
231
+ m_compV = computeU();
232
+ if (m_isTranspose)
233
+ std::swap(m_compU, m_compV);
234
+
235
+ if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 );
236
+ else m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 );
237
+
238
+ if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize);
239
+
240
+ m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3);
241
+ m_workspaceI.resize(3*m_diagSize);
242
+ }// end allocate
243
+
244
+ template<typename MatrixType>
245
+ BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
246
+ {
247
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
248
+ std::cout << "\n\n\n======================================================================================================================\n\n\n";
249
+ #endif
250
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
251
+ using std::abs;
252
+
253
+ const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
254
+
255
+ //**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return
256
+ if(matrix.cols() < m_algoswap)
257
+ {
258
+ // FIXME this line involves temporaries
259
+ JacobiSVD<MatrixType> jsvd(matrix,computationOptions);
260
+ m_isInitialized = true;
261
+ m_info = jsvd.info();
262
+ if (m_info == Success || m_info == NoConvergence) {
263
+ if(computeU()) m_matrixU = jsvd.matrixU();
264
+ if(computeV()) m_matrixV = jsvd.matrixV();
265
+ m_singularValues = jsvd.singularValues();
266
+ m_nonzeroSingularValues = jsvd.nonzeroSingularValues();
267
+ }
268
+ return *this;
269
+ }
270
+
271
+ //**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows
272
+ RealScalar scale = matrix.cwiseAbs().template maxCoeff<PropagateNaN>();
273
+ if (!(numext::isfinite)(scale)) {
274
+ m_isInitialized = true;
275
+ m_info = InvalidInput;
276
+ return *this;
277
+ }
278
+
279
+ if(scale==Literal(0)) scale = Literal(1);
280
+ MatrixX copy;
281
+ if (m_isTranspose) copy = matrix.adjoint()/scale;
282
+ else copy = matrix/scale;
283
+
284
+ //**** step 1 - Bidiagonalization
285
+ // FIXME this line involves temporaries
286
+ internal::UpperBidiagonalization<MatrixX> bid(copy);
287
+
288
+ //**** step 2 - Divide & Conquer
289
+ m_naiveU.setZero();
290
+ m_naiveV.setZero();
291
+ // FIXME this line involves a temporary matrix
292
+ m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose();
293
+ m_computed.template bottomRows<1>().setZero();
294
+ divide(0, m_diagSize - 1, 0, 0, 0);
295
+ if (m_info != Success && m_info != NoConvergence) {
296
+ m_isInitialized = true;
297
+ return *this;
298
+ }
299
+
300
+ //**** step 3 - Copy singular values and vectors
301
+ for (int i=0; i<m_diagSize; i++)
302
+ {
303
+ RealScalar a = abs(m_computed.coeff(i, i));
304
+ m_singularValues.coeffRef(i) = a * scale;
305
+ if (a<considerZero)
306
+ {
307
+ m_nonzeroSingularValues = i;
308
+ m_singularValues.tail(m_diagSize - i - 1).setZero();
309
+ break;
310
+ }
311
+ else if (i == m_diagSize - 1)
312
+ {
313
+ m_nonzeroSingularValues = i + 1;
314
+ break;
315
+ }
316
+ }
317
+
318
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
319
+ // std::cout << "m_naiveU\n" << m_naiveU << "\n\n";
320
+ // std::cout << "m_naiveV\n" << m_naiveV << "\n\n";
321
+ #endif
322
+ if(m_isTranspose) copyUV(bid.householderV(), bid.householderU(), m_naiveV, m_naiveU);
323
+ else copyUV(bid.householderU(), bid.householderV(), m_naiveU, m_naiveV);
324
+
325
+ m_isInitialized = true;
326
+ return *this;
327
+ }// end compute
328
+
329
+
330
+ template<typename MatrixType>
331
+ template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
332
+ void BDCSVD<MatrixType>::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV)
333
+ {
334
+ // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa
335
+ if (computeU())
336
+ {
337
+ Index Ucols = m_computeThinU ? m_diagSize : householderU.cols();
338
+ m_matrixU = MatrixX::Identity(householderU.cols(), Ucols);
339
+ m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
340
+ householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer
341
+ }
342
+ if (computeV())
343
+ {
344
+ Index Vcols = m_computeThinV ? m_diagSize : householderV.cols();
345
+ m_matrixV = MatrixX::Identity(householderV.cols(), Vcols);
346
+ m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
347
+ householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer
348
+ }
349
+ }
350
+
351
+ /** \internal
352
+ * Performs A = A * B exploiting the special structure of the matrix A. Splitting A as:
353
+ * A = [A1]
354
+ * [A2]
355
+ * such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros.
356
+ * We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large
357
+ * enough.
358
+ */
359
+ template<typename MatrixType>
360
+ void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1)
361
+ {
362
+ Index n = A.rows();
363
+ if(n>100)
364
+ {
365
+ // If the matrices are large enough, let's exploit the sparse structure of A by
366
+ // splitting it in half (wrt n1), and packing the non-zero columns.
367
+ Index n2 = n - n1;
368
+ Map<MatrixXr> A1(m_workspace.data() , n1, n);
369
+ Map<MatrixXr> A2(m_workspace.data()+ n1*n, n2, n);
370
+ Map<MatrixXr> B1(m_workspace.data()+ n*n, n, n);
371
+ Map<MatrixXr> B2(m_workspace.data()+2*n*n, n, n);
372
+ Index k1=0, k2=0;
373
+ for(Index j=0; j<n; ++j)
374
+ {
375
+ if( (A.col(j).head(n1).array()!=Literal(0)).any() )
376
+ {
377
+ A1.col(k1) = A.col(j).head(n1);
378
+ B1.row(k1) = B.row(j);
379
+ ++k1;
380
+ }
381
+ if( (A.col(j).tail(n2).array()!=Literal(0)).any() )
382
+ {
383
+ A2.col(k2) = A.col(j).tail(n2);
384
+ B2.row(k2) = B.row(j);
385
+ ++k2;
386
+ }
387
+ }
388
+
389
+ A.topRows(n1).noalias() = A1.leftCols(k1) * B1.topRows(k1);
390
+ A.bottomRows(n2).noalias() = A2.leftCols(k2) * B2.topRows(k2);
391
+ }
392
+ else
393
+ {
394
+ Map<MatrixXr,Aligned> tmp(m_workspace.data(),n,n);
395
+ tmp.noalias() = A*B;
396
+ A = tmp;
397
+ }
398
+ }
399
+
400
+ // The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
401
+ // place of the submatrix we are currently working on.
402
+
403
+ //@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
404
+ //@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
405
+ // lastCol + 1 - firstCol is the size of the submatrix.
406
+ //@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
407
+ //@param firstRowW : Same as firstRowW with the column.
408
+ //@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
409
+ // to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
410
+ template<typename MatrixType>
411
+ void BDCSVD<MatrixType>::divide(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
412
+ {
413
+ // requires rows = cols + 1;
414
+ using std::pow;
415
+ using std::sqrt;
416
+ using std::abs;
417
+ const Index n = lastCol - firstCol + 1;
418
+ const Index k = n/2;
419
+ const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
420
+ RealScalar alphaK;
421
+ RealScalar betaK;
422
+ RealScalar r0;
423
+ RealScalar lambda, phi, c0, s0;
424
+ VectorType l, f;
425
+ // We use the other algorithm which is more efficient for small
426
+ // matrices.
427
+ if (n < m_algoswap)
428
+ {
429
+ // FIXME this line involves temporaries
430
+ JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0));
431
+ m_info = b.info();
432
+ if (m_info != Success && m_info != NoConvergence) return;
433
+ if (m_compU)
434
+ m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU();
435
+ else
436
+ {
437
+ m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0);
438
+ m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n);
439
+ }
440
+ if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV();
441
+ m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
442
+ m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n);
443
+ return;
444
+ }
445
+ // We use the divide and conquer algorithm
446
+ alphaK = m_computed(firstCol + k, firstCol + k);
447
+ betaK = m_computed(firstCol + k + 1, firstCol + k);
448
+ // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
449
+ // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
450
+ // right submatrix before the left one.
451
+ divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
452
+ if (m_info != Success && m_info != NoConvergence) return;
453
+ divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
454
+ if (m_info != Success && m_info != NoConvergence) return;
455
+
456
+ if (m_compU)
457
+ {
458
+ lambda = m_naiveU(firstCol + k, firstCol + k);
459
+ phi = m_naiveU(firstCol + k + 1, lastCol + 1);
460
+ }
461
+ else
462
+ {
463
+ lambda = m_naiveU(1, firstCol + k);
464
+ phi = m_naiveU(0, lastCol + 1);
465
+ }
466
+ r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi));
467
+ if (m_compU)
468
+ {
469
+ l = m_naiveU.row(firstCol + k).segment(firstCol, k);
470
+ f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
471
+ }
472
+ else
473
+ {
474
+ l = m_naiveU.row(1).segment(firstCol, k);
475
+ f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
476
+ }
477
+ if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1);
478
+ if (r0<considerZero)
479
+ {
480
+ c0 = Literal(1);
481
+ s0 = Literal(0);
482
+ }
483
+ else
484
+ {
485
+ c0 = alphaK * lambda / r0;
486
+ s0 = betaK * phi / r0;
487
+ }
488
+
489
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
490
+ assert(m_naiveU.allFinite());
491
+ assert(m_naiveV.allFinite());
492
+ assert(m_computed.allFinite());
493
+ #endif
494
+
495
+ if (m_compU)
496
+ {
497
+ MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
498
+ // we shiftW Q1 to the right
499
+ for (Index i = firstCol + k - 1; i >= firstCol; i--)
500
+ m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1);
501
+ // we shift q1 at the left with a factor c0
502
+ m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0);
503
+ // last column = q1 * - s0
504
+ m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0));
505
+ // first column = q2 * s0
506
+ m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0;
507
+ // q2 *= c0
508
+ m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
509
+ }
510
+ else
511
+ {
512
+ RealScalar q1 = m_naiveU(0, firstCol + k);
513
+ // we shift Q1 to the right
514
+ for (Index i = firstCol + k - 1; i >= firstCol; i--)
515
+ m_naiveU(0, i + 1) = m_naiveU(0, i);
516
+ // we shift q1 at the left with a factor c0
517
+ m_naiveU(0, firstCol) = (q1 * c0);
518
+ // last column = q1 * - s0
519
+ m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
520
+ // first column = q2 * s0
521
+ m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
522
+ // q2 *= c0
523
+ m_naiveU(1, lastCol + 1) *= c0;
524
+ m_naiveU.row(1).segment(firstCol + 1, k).setZero();
525
+ m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
526
+ }
527
+
528
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
529
+ assert(m_naiveU.allFinite());
530
+ assert(m_naiveV.allFinite());
531
+ assert(m_computed.allFinite());
532
+ #endif
533
+
534
+ m_computed(firstCol + shift, firstCol + shift) = r0;
535
+ m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real();
536
+ m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real();
537
+
538
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
539
+ ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
540
+ #endif
541
+ // Second part: try to deflate singular values in combined matrix
542
+ deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
543
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
544
+ ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
545
+ std::cout << "\n\nj1 = " << tmp1.transpose().format(bdcsvdfmt) << "\n";
546
+ std::cout << "j2 = " << tmp2.transpose().format(bdcsvdfmt) << "\n\n";
547
+ std::cout << "err: " << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << "\n";
548
+ static int count = 0;
549
+ std::cout << "# " << ++count << "\n\n";
550
+ assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm());
551
+ // assert(count<681);
552
+ // assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all());
553
+ #endif
554
+
555
+ // Third part: compute SVD of combined matrix
556
+ MatrixXr UofSVD, VofSVD;
557
+ VectorType singVals;
558
+ computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD);
559
+
560
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
561
+ assert(UofSVD.allFinite());
562
+ assert(VofSVD.allFinite());
563
+ #endif
564
+
565
+ if (m_compU)
566
+ structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2);
567
+ else
568
+ {
569
+ Map<Matrix<RealScalar,2,Dynamic>,Aligned> tmp(m_workspace.data(),2,n+1);
570
+ tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD;
571
+ m_naiveU.middleCols(firstCol, n + 1) = tmp;
572
+ }
573
+
574
+ if (m_compV) structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2);
575
+
576
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
577
+ assert(m_naiveU.allFinite());
578
+ assert(m_naiveV.allFinite());
579
+ assert(m_computed.allFinite());
580
+ #endif
581
+
582
+ m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero();
583
+ m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals;
584
+ }// end divide
585
+
586
+ // Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in
587
+ // the first column and on the diagonal and has undergone deflation, so diagonal is in increasing
588
+ // order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except
589
+ // that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order.
590
+ //
591
+ // TODO Opportunities for optimization: better root finding algo, better stopping criterion, better
592
+ // handling of round-off errors, be consistent in ordering
593
+ // For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
594
+ template <typename MatrixType>
595
+ void BDCSVD<MatrixType>::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
596
+ {
597
+ const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
598
+ using std::abs;
599
+ ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n);
600
+ m_workspace.head(n) = m_computed.block(firstCol, firstCol, n, n).diagonal();
601
+ ArrayRef diag = m_workspace.head(n);
602
+ diag(0) = Literal(0);
603
+
604
+ // Allocate space for singular values and vectors
605
+ singVals.resize(n);
606
+ U.resize(n+1, n+1);
607
+ if (m_compV) V.resize(n, n);
608
+
609
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
610
+ if (col0.hasNaN() || diag.hasNaN())
611
+ std::cout << "\n\nHAS NAN\n\n";
612
+ #endif
613
+
614
+ // Many singular values might have been deflated, the zero ones have been moved to the end,
615
+ // but others are interleaved and we must ignore them at this stage.
616
+ // To this end, let's compute a permutation skipping them:
617
+ Index actual_n = n;
618
+ while(actual_n>1 && diag(actual_n-1)==Literal(0)) {--actual_n; eigen_internal_assert(col0(actual_n)==Literal(0)); }
619
+ Index m = 0; // size of the deflated problem
620
+ for(Index k=0;k<actual_n;++k)
621
+ if(abs(col0(k))>considerZero)
622
+ m_workspaceI(m++) = k;
623
+ Map<ArrayXi> perm(m_workspaceI.data(),m);
624
+
625
+ Map<ArrayXr> shifts(m_workspace.data()+1*n, n);
626
+ Map<ArrayXr> mus(m_workspace.data()+2*n, n);
627
+ Map<ArrayXr> zhat(m_workspace.data()+3*n, n);
628
+
629
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
630
+ std::cout << "computeSVDofM using:\n";
631
+ std::cout << " z: " << col0.transpose() << "\n";
632
+ std::cout << " d: " << diag.transpose() << "\n";
633
+ #endif
634
+
635
+ // Compute singVals, shifts, and mus
636
+ computeSingVals(col0, diag, perm, singVals, shifts, mus);
637
+
638
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
639
+ std::cout << " j: " << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << "\n\n";
640
+ std::cout << " sing-val: " << singVals.transpose() << "\n";
641
+ std::cout << " mu: " << mus.transpose() << "\n";
642
+ std::cout << " shift: " << shifts.transpose() << "\n";
643
+
644
+ {
645
+ std::cout << "\n\n mus: " << mus.head(actual_n).transpose() << "\n\n";
646
+ std::cout << " check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n";
647
+ assert((((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n) >= 0).all());
648
+ std::cout << " check2 (>0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n";
649
+ assert((((singVals.array()-diag) / singVals.array()).head(actual_n) >= 0).all());
650
+ }
651
+ #endif
652
+
653
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
654
+ assert(singVals.allFinite());
655
+ assert(mus.allFinite());
656
+ assert(shifts.allFinite());
657
+ #endif
658
+
659
+ // Compute zhat
660
+ perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat);
661
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
662
+ std::cout << " zhat: " << zhat.transpose() << "\n";
663
+ #endif
664
+
665
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
666
+ assert(zhat.allFinite());
667
+ #endif
668
+
669
+ computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V);
670
+
671
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
672
+ std::cout << "U^T U: " << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << "\n";
673
+ std::cout << "V^T V: " << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << "\n";
674
+ #endif
675
+
676
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
677
+ assert(m_naiveU.allFinite());
678
+ assert(m_naiveV.allFinite());
679
+ assert(m_computed.allFinite());
680
+ assert(U.allFinite());
681
+ assert(V.allFinite());
682
+ // assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
683
+ // assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 100*NumTraits<RealScalar>::epsilon() * n);
684
+ #endif
685
+
686
+ // Because of deflation, the singular values might not be completely sorted.
687
+ // Fortunately, reordering them is a O(n) problem
688
+ for(Index i=0; i<actual_n-1; ++i)
689
+ {
690
+ if(singVals(i)>singVals(i+1))
691
+ {
692
+ using std::swap;
693
+ swap(singVals(i),singVals(i+1));
694
+ U.col(i).swap(U.col(i+1));
695
+ if(m_compV) V.col(i).swap(V.col(i+1));
696
+ }
697
+ }
698
+
699
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
700
+ {
701
+ bool singular_values_sorted = (((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).array() >= 0).all();
702
+ if(!singular_values_sorted)
703
+ std::cout << "Singular values are not sorted: " << singVals.segment(1,actual_n).transpose() << "\n";
704
+ assert(singular_values_sorted);
705
+ }
706
+ #endif
707
+
708
+ // Reverse order so that singular values in increased order
709
+ // Because of deflation, the zeros singular-values are already at the end
710
+ singVals.head(actual_n).reverseInPlace();
711
+ U.leftCols(actual_n).rowwise().reverseInPlace();
712
+ if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace();
713
+
714
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
715
+ JacobiSVD<MatrixXr> jsvd(m_computed.block(firstCol, firstCol, n, n) );
716
+ std::cout << " * j: " << jsvd.singularValues().transpose() << "\n\n";
717
+ std::cout << " * sing-val: " << singVals.transpose() << "\n";
718
+ // std::cout << " * err: " << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << "\n";
719
+ #endif
720
+ }
721
+
722
+ template <typename MatrixType>
723
+ typename BDCSVD<MatrixType>::RealScalar BDCSVD<MatrixType>::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift)
724
+ {
725
+ Index m = perm.size();
726
+ RealScalar res = Literal(1);
727
+ for(Index i=0; i<m; ++i)
728
+ {
729
+ Index j = perm(i);
730
+ // The following expression could be rewritten to involve only a single division,
731
+ // but this would make the expression more sensitive to overflow.
732
+ res += (col0(j) / (diagShifted(j) - mu)) * (col0(j) / (diag(j) + shift + mu));
733
+ }
734
+ return res;
735
+
736
+ }
737
+
738
+ template <typename MatrixType>
739
+ void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm,
740
+ VectorType& singVals, ArrayRef shifts, ArrayRef mus)
741
+ {
742
+ using std::abs;
743
+ using std::swap;
744
+ using std::sqrt;
745
+
746
+ Index n = col0.size();
747
+ Index actual_n = n;
748
+ // Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above
749
+ // because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value.
750
+ while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n;
751
+
752
+ for (Index k = 0; k < n; ++k)
753
+ {
754
+ if (col0(k) == Literal(0) || actual_n==1)
755
+ {
756
+ // if col0(k) == 0, then entry is deflated, so singular value is on diagonal
757
+ // if actual_n==1, then the deflated problem is already diagonalized
758
+ singVals(k) = k==0 ? col0(0) : diag(k);
759
+ mus(k) = Literal(0);
760
+ shifts(k) = k==0 ? col0(0) : diag(k);
761
+ continue;
762
+ }
763
+
764
+ // otherwise, use secular equation to find singular value
765
+ RealScalar left = diag(k);
766
+ RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm());
767
+ if(k==actual_n-1)
768
+ right = (diag(actual_n-1) + col0.matrix().norm());
769
+ else
770
+ {
771
+ // Skip deflated singular values,
772
+ // recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside.
773
+ // This should be equivalent to using perm[]
774
+ Index l = k+1;
775
+ while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l<actual_n); }
776
+ right = diag(l);
777
+ }
778
+
779
+ // first decide whether it's closer to the left end or the right end
780
+ RealScalar mid = left + (right-left) / Literal(2);
781
+ RealScalar fMid = secularEq(mid, col0, diag, perm, diag, Literal(0));
782
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
783
+ std::cout << "right-left = " << right-left << "\n";
784
+ // std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, ArrayXr(diag-left), left)
785
+ // << " " << secularEq(mid-right, col0, diag, perm, ArrayXr(diag-right), right) << "\n";
786
+ std::cout << " = " << secularEq(left+RealScalar(0.000001)*(right-left), col0, diag, perm, diag, 0)
787
+ << " " << secularEq(left+RealScalar(0.1) *(right-left), col0, diag, perm, diag, 0)
788
+ << " " << secularEq(left+RealScalar(0.2) *(right-left), col0, diag, perm, diag, 0)
789
+ << " " << secularEq(left+RealScalar(0.3) *(right-left), col0, diag, perm, diag, 0)
790
+ << " " << secularEq(left+RealScalar(0.4) *(right-left), col0, diag, perm, diag, 0)
791
+ << " " << secularEq(left+RealScalar(0.49) *(right-left), col0, diag, perm, diag, 0)
792
+ << " " << secularEq(left+RealScalar(0.5) *(right-left), col0, diag, perm, diag, 0)
793
+ << " " << secularEq(left+RealScalar(0.51) *(right-left), col0, diag, perm, diag, 0)
794
+ << " " << secularEq(left+RealScalar(0.6) *(right-left), col0, diag, perm, diag, 0)
795
+ << " " << secularEq(left+RealScalar(0.7) *(right-left), col0, diag, perm, diag, 0)
796
+ << " " << secularEq(left+RealScalar(0.8) *(right-left), col0, diag, perm, diag, 0)
797
+ << " " << secularEq(left+RealScalar(0.9) *(right-left), col0, diag, perm, diag, 0)
798
+ << " " << secularEq(left+RealScalar(0.999999)*(right-left), col0, diag, perm, diag, 0) << "\n";
799
+ #endif
800
+ RealScalar shift = (k == actual_n-1 || fMid > Literal(0)) ? left : right;
801
+
802
+ // measure everything relative to shift
803
+ Map<ArrayXr> diagShifted(m_workspace.data()+4*n, n);
804
+ diagShifted = diag - shift;
805
+
806
+ if(k!=actual_n-1)
807
+ {
808
+ // check that after the shift, f(mid) is still negative:
809
+ RealScalar midShifted = (right - left) / RealScalar(2);
810
+ if(shift==right)
811
+ midShifted = -midShifted;
812
+ RealScalar fMidShifted = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
813
+ if(fMidShifted>0)
814
+ {
815
+ // fMid was erroneous, fix it:
816
+ shift = fMidShifted > Literal(0) ? left : right;
817
+ diagShifted = diag - shift;
818
+ }
819
+ }
820
+
821
+ // initial guess
822
+ RealScalar muPrev, muCur;
823
+ if (shift == left)
824
+ {
825
+ muPrev = (right - left) * RealScalar(0.1);
826
+ if (k == actual_n-1) muCur = right - left;
827
+ else muCur = (right - left) * RealScalar(0.5);
828
+ }
829
+ else
830
+ {
831
+ muPrev = -(right - left) * RealScalar(0.1);
832
+ muCur = -(right - left) * RealScalar(0.5);
833
+ }
834
+
835
+ RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift);
836
+ RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift);
837
+ if (abs(fPrev) < abs(fCur))
838
+ {
839
+ swap(fPrev, fCur);
840
+ swap(muPrev, muCur);
841
+ }
842
+
843
+ // rational interpolation: fit a function of the form a / mu + b through the two previous
844
+ // iterates and use its zero to compute the next iterate
845
+ bool useBisection = fPrev*fCur>Literal(0);
846
+ while (fCur!=Literal(0) && abs(muCur - muPrev) > Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)
847
+ {
848
+ ++m_numIters;
849
+
850
+ // Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples.
851
+ RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev);
852
+ RealScalar b = fCur - a / muCur;
853
+ // And find mu such that f(mu)==0:
854
+ RealScalar muZero = -a/b;
855
+ RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift);
856
+
857
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
858
+ assert((numext::isfinite)(fZero));
859
+ #endif
860
+
861
+ muPrev = muCur;
862
+ fPrev = fCur;
863
+ muCur = muZero;
864
+ fCur = fZero;
865
+
866
+ if (shift == left && (muCur < Literal(0) || muCur > right - left)) useBisection = true;
867
+ if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true;
868
+ if (abs(fCur)>abs(fPrev)) useBisection = true;
869
+ }
870
+
871
+ // fall back on bisection method if rational interpolation did not work
872
+ if (useBisection)
873
+ {
874
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
875
+ std::cout << "useBisection for k = " << k << ", actual_n = " << actual_n << "\n";
876
+ #endif
877
+ RealScalar leftShifted, rightShifted;
878
+ if (shift == left)
879
+ {
880
+ // to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)),
881
+ // the factor 2 is to be more conservative
882
+ leftShifted = numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
883
+
884
+ // check that we did it right:
885
+ eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) );
886
+ // I don't understand why the case k==0 would be special there:
887
+ // if (k == 0) rightShifted = right - left; else
888
+ rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe
889
+ }
890
+ else
891
+ {
892
+ leftShifted = -(right - left) * RealScalar(0.51);
893
+ if(k+1<n)
894
+ rightShifted = -numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
895
+ else
896
+ rightShifted = -(std::numeric_limits<RealScalar>::min)();
897
+ }
898
+
899
+ RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift);
900
+ eigen_internal_assert(fLeft<Literal(0));
901
+
902
+ #if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_SANITY_CHECKS
903
+ RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift);
904
+ #endif
905
+
906
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
907
+ if(!(numext::isfinite)(fLeft))
908
+ std::cout << "f(" << leftShifted << ") =" << fLeft << " ; " << left << " " << shift << " " << right << "\n";
909
+ assert((numext::isfinite)(fLeft));
910
+
911
+ if(!(numext::isfinite)(fRight))
912
+ std::cout << "f(" << rightShifted << ") =" << fRight << " ; " << left << " " << shift << " " << right << "\n";
913
+ // assert((numext::isfinite)(fRight));
914
+ #endif
915
+
916
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
917
+ if(!(fLeft * fRight<0))
918
+ {
919
+ std::cout << "f(leftShifted) using leftShifted=" << leftShifted << " ; diagShifted(1:10):" << diagShifted.head(10).transpose() << "\n ; "
920
+ << "left==shift=" << bool(left==shift) << " ; left-shift = " << (left-shift) << "\n";
921
+ std::cout << "k=" << k << ", " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; "
922
+ << "[" << left << " .. " << right << "] -> [" << leftShifted << " " << rightShifted << "], shift=" << shift
923
+ << " , f(right)=" << secularEq(0, col0, diag, perm, diagShifted, shift)
924
+ << " == " << secularEq(right, col0, diag, perm, diag, 0) << " == " << fRight << "\n";
925
+ }
926
+ #endif
927
+ eigen_internal_assert(fLeft * fRight < Literal(0));
928
+
929
+ if(fLeft<Literal(0))
930
+ {
931
+ while (rightShifted - leftShifted > Literal(2) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
932
+ {
933
+ RealScalar midShifted = (leftShifted + rightShifted) / Literal(2);
934
+ fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
935
+ eigen_internal_assert((numext::isfinite)(fMid));
936
+
937
+ if (fLeft * fMid < Literal(0))
938
+ {
939
+ rightShifted = midShifted;
940
+ }
941
+ else
942
+ {
943
+ leftShifted = midShifted;
944
+ fLeft = fMid;
945
+ }
946
+ }
947
+ muCur = (leftShifted + rightShifted) / Literal(2);
948
+ }
949
+ else
950
+ {
951
+ // We have a problem as shifting on the left or right give either a positive or negative value
952
+ // at the middle of [left,right]...
953
+ // Instead fo abbording or entering an infinite loop,
954
+ // let's just use the middle as the estimated zero-crossing:
955
+ muCur = (right - left) * RealScalar(0.5);
956
+ if(shift == right)
957
+ muCur = -muCur;
958
+ }
959
+ }
960
+
961
+ singVals[k] = shift + muCur;
962
+ shifts[k] = shift;
963
+ mus[k] = muCur;
964
+
965
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
966
+ if(k+1<n)
967
+ std::cout << "found " << singVals[k] << " == " << shift << " + " << muCur << " from " << diag(k) << " .. " << diag(k+1) << "\n";
968
+ #endif
969
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
970
+ assert(k==0 || singVals[k]>=singVals[k-1]);
971
+ assert(singVals[k]>=diag(k));
972
+ #endif
973
+
974
+ // perturb singular value slightly if it equals diagonal entry to avoid division by zero later
975
+ // (deflation is supposed to avoid this from happening)
976
+ // - this does no seem to be necessary anymore -
977
+ // if (singVals[k] == left) singVals[k] *= 1 + NumTraits<RealScalar>::epsilon();
978
+ // if (singVals[k] == right) singVals[k] *= 1 - NumTraits<RealScalar>::epsilon();
979
+ }
980
+ }
981
+
982
+
983
+ // zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1)
984
+ template <typename MatrixType>
985
+ void BDCSVD<MatrixType>::perturbCol0
986
+ (const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
987
+ const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat)
988
+ {
989
+ using std::sqrt;
990
+ Index n = col0.size();
991
+ Index m = perm.size();
992
+ if(m==0)
993
+ {
994
+ zhat.setZero();
995
+ return;
996
+ }
997
+ Index lastIdx = perm(m-1);
998
+ // The offset permits to skip deflated entries while computing zhat
999
+ for (Index k = 0; k < n; ++k)
1000
+ {
1001
+ if (col0(k) == Literal(0)) // deflated
1002
+ zhat(k) = Literal(0);
1003
+ else
1004
+ {
1005
+ // see equation (3.6)
1006
+ RealScalar dk = diag(k);
1007
+ RealScalar prod = (singVals(lastIdx) + dk) * (mus(lastIdx) + (shifts(lastIdx) - dk));
1008
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1009
+ if(prod<0) {
1010
+ std::cout << "k = " << k << " ; z(k)=" << col0(k) << ", diag(k)=" << dk << "\n";
1011
+ std::cout << "prod = " << "(" << singVals(lastIdx) << " + " << dk << ") * (" << mus(lastIdx) << " + (" << shifts(lastIdx) << " - " << dk << "))" << "\n";
1012
+ std::cout << " = " << singVals(lastIdx) + dk << " * " << mus(lastIdx) + (shifts(lastIdx) - dk) << "\n";
1013
+ }
1014
+ assert(prod>=0);
1015
+ #endif
1016
+
1017
+ for(Index l = 0; l<m; ++l)
1018
+ {
1019
+ Index i = perm(l);
1020
+ if(i!=k)
1021
+ {
1022
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1023
+ if(i>=k && (l==0 || l-1>=m))
1024
+ {
1025
+ std::cout << "Error in perturbCol0\n";
1026
+ std::cout << " " << k << "/" << n << " " << l << "/" << m << " " << i << "/" << n << " ; " << col0(k) << " " << diag(k) << " " << "\n";
1027
+ std::cout << " " <<diag(i) << "\n";
1028
+ Index j = (i<k /*|| l==0*/) ? i : perm(l-1);
1029
+ std::cout << " " << "j=" << j << "\n";
1030
+ }
1031
+ #endif
1032
+ Index j = i<k ? i : perm(l-1);
1033
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1034
+ if(!(dk!=Literal(0) || diag(i)!=Literal(0)))
1035
+ {
1036
+ std::cout << "k=" << k << ", i=" << i << ", l=" << l << ", perm.size()=" << perm.size() << "\n";
1037
+ }
1038
+ assert(dk!=Literal(0) || diag(i)!=Literal(0));
1039
+ #endif
1040
+ prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk)));
1041
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1042
+ assert(prod>=0);
1043
+ #endif
1044
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1045
+ if(i!=k && numext::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )
1046
+ std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk))
1047
+ << ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n";
1048
+ #endif
1049
+ }
1050
+ }
1051
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1052
+ std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(lastIdx) + dk) << " * " << mus(lastIdx) + shifts(lastIdx) << " - " << dk << "\n";
1053
+ #endif
1054
+ RealScalar tmp = sqrt(prod);
1055
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1056
+ assert((numext::isfinite)(tmp));
1057
+ #endif
1058
+ zhat(k) = col0(k) > Literal(0) ? RealScalar(tmp) : RealScalar(-tmp);
1059
+ }
1060
+ }
1061
+ }
1062
+
1063
+ // compute singular vectors
1064
+ template <typename MatrixType>
1065
+ void BDCSVD<MatrixType>::computeSingVecs
1066
+ (const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
1067
+ const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V)
1068
+ {
1069
+ Index n = zhat.size();
1070
+ Index m = perm.size();
1071
+
1072
+ for (Index k = 0; k < n; ++k)
1073
+ {
1074
+ if (zhat(k) == Literal(0))
1075
+ {
1076
+ U.col(k) = VectorType::Unit(n+1, k);
1077
+ if (m_compV) V.col(k) = VectorType::Unit(n, k);
1078
+ }
1079
+ else
1080
+ {
1081
+ U.col(k).setZero();
1082
+ for(Index l=0;l<m;++l)
1083
+ {
1084
+ Index i = perm(l);
1085
+ U(i,k) = zhat(i)/(((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
1086
+ }
1087
+ U(n,k) = Literal(0);
1088
+ U.col(k).normalize();
1089
+
1090
+ if (m_compV)
1091
+ {
1092
+ V.col(k).setZero();
1093
+ for(Index l=1;l<m;++l)
1094
+ {
1095
+ Index i = perm(l);
1096
+ V(i,k) = diag(i) * zhat(i) / (((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
1097
+ }
1098
+ V(0,k) = Literal(-1);
1099
+ V.col(k).normalize();
1100
+ }
1101
+ }
1102
+ }
1103
+ U.col(n) = VectorType::Unit(n+1, n);
1104
+ }
1105
+
1106
+
1107
+ // page 12_13
1108
+ // i >= 1, di almost null and zi non null.
1109
+ // We use a rotation to zero out zi applied to the left of M
1110
+ template <typename MatrixType>
1111
+ void BDCSVD<MatrixType>::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size)
1112
+ {
1113
+ using std::abs;
1114
+ using std::sqrt;
1115
+ using std::pow;
1116
+ Index start = firstCol + shift;
1117
+ RealScalar c = m_computed(start, start);
1118
+ RealScalar s = m_computed(start+i, start);
1119
+ RealScalar r = numext::hypot(c,s);
1120
+ if (r == Literal(0))
1121
+ {
1122
+ m_computed(start+i, start+i) = Literal(0);
1123
+ return;
1124
+ }
1125
+ m_computed(start,start) = r;
1126
+ m_computed(start+i, start) = Literal(0);
1127
+ m_computed(start+i, start+i) = Literal(0);
1128
+
1129
+ JacobiRotation<RealScalar> J(c/r,-s/r);
1130
+ if (m_compU) m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J);
1131
+ else m_naiveU.applyOnTheRight(firstCol, firstCol+i, J);
1132
+ }// end deflation 43
1133
+
1134
+
1135
+ // page 13
1136
+ // i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M)
1137
+ // We apply two rotations to have zj = 0;
1138
+ // TODO deflation44 is still broken and not properly tested
1139
+ template <typename MatrixType>
1140
+ void BDCSVD<MatrixType>::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size)
1141
+ {
1142
+ using std::abs;
1143
+ using std::sqrt;
1144
+ using std::conj;
1145
+ using std::pow;
1146
+ RealScalar c = m_computed(firstColm+i, firstColm);
1147
+ RealScalar s = m_computed(firstColm+j, firstColm);
1148
+ RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));
1149
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1150
+ std::cout << "deflation 4.4: " << i << "," << j << " -> " << c << " " << s << " " << r << " ; "
1151
+ << m_computed(firstColm + i-1, firstColm) << " "
1152
+ << m_computed(firstColm + i, firstColm) << " "
1153
+ << m_computed(firstColm + i+1, firstColm) << " "
1154
+ << m_computed(firstColm + i+2, firstColm) << "\n";
1155
+ std::cout << m_computed(firstColm + i-1, firstColm + i-1) << " "
1156
+ << m_computed(firstColm + i, firstColm+i) << " "
1157
+ << m_computed(firstColm + i+1, firstColm+i+1) << " "
1158
+ << m_computed(firstColm + i+2, firstColm+i+2) << "\n";
1159
+ #endif
1160
+ if (r==Literal(0))
1161
+ {
1162
+ m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
1163
+ return;
1164
+ }
1165
+ c/=r;
1166
+ s/=r;
1167
+ m_computed(firstColm + i, firstColm) = r;
1168
+ m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i);
1169
+ m_computed(firstColm + j, firstColm) = Literal(0);
1170
+
1171
+ JacobiRotation<RealScalar> J(c,-s);
1172
+ if (m_compU) m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J);
1173
+ else m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J);
1174
+ if (m_compV) m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J);
1175
+ }// end deflation 44
1176
+
1177
+
1178
+ // acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
1179
+ template <typename MatrixType>
1180
+ void BDCSVD<MatrixType>::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift)
1181
+ {
1182
+ using std::sqrt;
1183
+ using std::abs;
1184
+ const Index length = lastCol + 1 - firstCol;
1185
+
1186
+ Block<MatrixXr,Dynamic,1> col0(m_computed, firstCol+shift, firstCol+shift, length, 1);
1187
+ Diagonal<MatrixXr> fulldiag(m_computed);
1188
+ VectorBlock<Diagonal<MatrixXr>,Dynamic> diag(fulldiag, firstCol+shift, length);
1189
+
1190
+ const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
1191
+ RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff();
1192
+ RealScalar epsilon_strict = numext::maxi<RealScalar>(considerZero,NumTraits<RealScalar>::epsilon() * maxDiag);
1193
+ RealScalar epsilon_coarse = Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(col0.cwiseAbs().maxCoeff(), maxDiag);
1194
+
1195
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1196
+ assert(m_naiveU.allFinite());
1197
+ assert(m_naiveV.allFinite());
1198
+ assert(m_computed.allFinite());
1199
+ #endif
1200
+
1201
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1202
+ std::cout << "\ndeflate:" << diag.head(k+1).transpose() << " | " << diag.segment(k+1,length-k-1).transpose() << "\n";
1203
+ #endif
1204
+
1205
+ //condition 4.1
1206
+ if (diag(0) < epsilon_coarse)
1207
+ {
1208
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1209
+ std::cout << "deflation 4.1, because " << diag(0) << " < " << epsilon_coarse << "\n";
1210
+ #endif
1211
+ diag(0) = epsilon_coarse;
1212
+ }
1213
+
1214
+ //condition 4.2
1215
+ for (Index i=1;i<length;++i)
1216
+ if (abs(col0(i)) < epsilon_strict)
1217
+ {
1218
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1219
+ std::cout << "deflation 4.2, set z(" << i << ") to zero because " << abs(col0(i)) << " < " << epsilon_strict << " (diag(" << i << ")=" << diag(i) << ")\n";
1220
+ #endif
1221
+ col0(i) = Literal(0);
1222
+ }
1223
+
1224
+ //condition 4.3
1225
+ for (Index i=1;i<length; i++)
1226
+ if (diag(i) < epsilon_coarse)
1227
+ {
1228
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1229
+ std::cout << "deflation 4.3, cancel z(" << i << ")=" << col0(i) << " because diag(" << i << ")=" << diag(i) << " < " << epsilon_coarse << "\n";
1230
+ #endif
1231
+ deflation43(firstCol, shift, i, length);
1232
+ }
1233
+
1234
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1235
+ assert(m_naiveU.allFinite());
1236
+ assert(m_naiveV.allFinite());
1237
+ assert(m_computed.allFinite());
1238
+ #endif
1239
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1240
+ std::cout << "to be sorted: " << diag.transpose() << "\n\n";
1241
+ std::cout << " : " << col0.transpose() << "\n\n";
1242
+ #endif
1243
+ {
1244
+ // Check for total deflation
1245
+ // If we have a total deflation, then we have to consider col0(0)==diag(0) as a singular value during sorting
1246
+ bool total_deflation = (col0.tail(length-1).array()<considerZero).all();
1247
+
1248
+ // Sort the diagonal entries, since diag(1:k-1) and diag(k:length) are already sorted, let's do a sorted merge.
1249
+ // First, compute the respective permutation.
1250
+ Index *permutation = m_workspaceI.data();
1251
+ {
1252
+ permutation[0] = 0;
1253
+ Index p = 1;
1254
+
1255
+ // Move deflated diagonal entries at the end.
1256
+ for(Index i=1; i<length; ++i)
1257
+ if(abs(diag(i))<considerZero)
1258
+ permutation[p++] = i;
1259
+
1260
+ Index i=1, j=k+1;
1261
+ for( ; p < length; ++p)
1262
+ {
1263
+ if (i > k) permutation[p] = j++;
1264
+ else if (j >= length) permutation[p] = i++;
1265
+ else if (diag(i) < diag(j)) permutation[p] = j++;
1266
+ else permutation[p] = i++;
1267
+ }
1268
+ }
1269
+
1270
+ // If we have a total deflation, then we have to insert diag(0) at the right place
1271
+ if(total_deflation)
1272
+ {
1273
+ for(Index i=1; i<length; ++i)
1274
+ {
1275
+ Index pi = permutation[i];
1276
+ if(abs(diag(pi))<considerZero || diag(0)<diag(pi))
1277
+ permutation[i-1] = permutation[i];
1278
+ else
1279
+ {
1280
+ permutation[i-1] = 0;
1281
+ break;
1282
+ }
1283
+ }
1284
+ }
1285
+
1286
+ // Current index of each col, and current column of each index
1287
+ Index *realInd = m_workspaceI.data()+length;
1288
+ Index *realCol = m_workspaceI.data()+2*length;
1289
+
1290
+ for(int pos = 0; pos< length; pos++)
1291
+ {
1292
+ realCol[pos] = pos;
1293
+ realInd[pos] = pos;
1294
+ }
1295
+
1296
+ for(Index i = total_deflation?0:1; i < length; i++)
1297
+ {
1298
+ const Index pi = permutation[length - (total_deflation ? i+1 : i)];
1299
+ const Index J = realCol[pi];
1300
+
1301
+ using std::swap;
1302
+ // swap diagonal and first column entries:
1303
+ swap(diag(i), diag(J));
1304
+ if(i!=0 && J!=0) swap(col0(i), col0(J));
1305
+
1306
+ // change columns
1307
+ if (m_compU) m_naiveU.col(firstCol+i).segment(firstCol, length + 1).swap(m_naiveU.col(firstCol+J).segment(firstCol, length + 1));
1308
+ else m_naiveU.col(firstCol+i).segment(0, 2) .swap(m_naiveU.col(firstCol+J).segment(0, 2));
1309
+ if (m_compV) m_naiveV.col(firstColW + i).segment(firstRowW, length).swap(m_naiveV.col(firstColW + J).segment(firstRowW, length));
1310
+
1311
+ //update real pos
1312
+ const Index realI = realInd[i];
1313
+ realCol[realI] = J;
1314
+ realCol[pi] = i;
1315
+ realInd[J] = realI;
1316
+ realInd[i] = pi;
1317
+ }
1318
+ }
1319
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1320
+ std::cout << "sorted: " << diag.transpose().format(bdcsvdfmt) << "\n";
1321
+ std::cout << " : " << col0.transpose() << "\n\n";
1322
+ #endif
1323
+
1324
+ //condition 4.4
1325
+ {
1326
+ Index i = length-1;
1327
+ while(i>0 && (abs(diag(i))<considerZero || abs(col0(i))<considerZero)) --i;
1328
+ for(; i>1;--i)
1329
+ if( (diag(i) - diag(i-1)) < NumTraits<RealScalar>::epsilon()*maxDiag )
1330
+ {
1331
+ #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
1332
+ std::cout << "deflation 4.4 with i = " << i << " because " << diag(i) << " - " << diag(i-1) << " == " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*/*diag(i)*/maxDiag << "\n";
1333
+ #endif
1334
+ eigen_internal_assert(abs(diag(i) - diag(i-1))<epsilon_coarse && " diagonal entries are not properly sorted");
1335
+ deflation44(firstCol, firstCol + shift, firstRowW, firstColW, i-1, i, length);
1336
+ }
1337
+ }
1338
+
1339
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1340
+ for(Index j=2;j<length;++j)
1341
+ assert(diag(j-1)<=diag(j) || abs(diag(j))<considerZero);
1342
+ #endif
1343
+
1344
+ #ifdef EIGEN_BDCSVD_SANITY_CHECKS
1345
+ assert(m_naiveU.allFinite());
1346
+ assert(m_naiveV.allFinite());
1347
+ assert(m_computed.allFinite());
1348
+ #endif
1349
+ }//end deflation
1350
+
1351
+ /** \svd_module
1352
+ *
1353
+ * \return the singular value decomposition of \c *this computed by Divide & Conquer algorithm
1354
+ *
1355
+ * \sa class BDCSVD
1356
+ */
1357
+ template<typename Derived>
1358
+ BDCSVD<typename MatrixBase<Derived>::PlainObject>
1359
+ MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
1360
+ {
1361
+ return BDCSVD<PlainObject>(*this, computationOptions);
1362
+ }
1363
+
1364
+ } // end namespace Eigen
1365
+
1366
+ #endif