umappp 0.1.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE.txt +25 -0
- data/README.md +110 -0
- data/ext/umappp/extconf.rb +25 -0
- data/ext/umappp/numo.hpp +867 -0
- data/ext/umappp/umappp.cpp +225 -0
- data/lib/umappp/version.rb +5 -0
- data/lib/umappp.rb +41 -0
- data/vendor/Eigen/Cholesky +45 -0
- data/vendor/Eigen/CholmodSupport +48 -0
- data/vendor/Eigen/Core +384 -0
- data/vendor/Eigen/Dense +7 -0
- data/vendor/Eigen/Eigen +2 -0
- data/vendor/Eigen/Eigenvalues +60 -0
- data/vendor/Eigen/Geometry +59 -0
- data/vendor/Eigen/Householder +29 -0
- data/vendor/Eigen/IterativeLinearSolvers +48 -0
- data/vendor/Eigen/Jacobi +32 -0
- data/vendor/Eigen/KLUSupport +41 -0
- data/vendor/Eigen/LU +47 -0
- data/vendor/Eigen/MetisSupport +35 -0
- data/vendor/Eigen/OrderingMethods +70 -0
- data/vendor/Eigen/PaStiXSupport +49 -0
- data/vendor/Eigen/PardisoSupport +35 -0
- data/vendor/Eigen/QR +50 -0
- data/vendor/Eigen/QtAlignedMalloc +39 -0
- data/vendor/Eigen/SPQRSupport +34 -0
- data/vendor/Eigen/SVD +50 -0
- data/vendor/Eigen/Sparse +34 -0
- data/vendor/Eigen/SparseCholesky +37 -0
- data/vendor/Eigen/SparseCore +69 -0
- data/vendor/Eigen/SparseLU +50 -0
- data/vendor/Eigen/SparseQR +36 -0
- data/vendor/Eigen/StdDeque +27 -0
- data/vendor/Eigen/StdList +26 -0
- data/vendor/Eigen/StdVector +27 -0
- data/vendor/Eigen/SuperLUSupport +64 -0
- data/vendor/Eigen/UmfPackSupport +40 -0
- data/vendor/Eigen/src/Cholesky/LDLT.h +688 -0
- data/vendor/Eigen/src/Cholesky/LLT.h +558 -0
- data/vendor/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
- data/vendor/Eigen/src/CholmodSupport/CholmodSupport.h +682 -0
- data/vendor/Eigen/src/Core/ArithmeticSequence.h +413 -0
- data/vendor/Eigen/src/Core/Array.h +417 -0
- data/vendor/Eigen/src/Core/ArrayBase.h +226 -0
- data/vendor/Eigen/src/Core/ArrayWrapper.h +209 -0
- data/vendor/Eigen/src/Core/Assign.h +90 -0
- data/vendor/Eigen/src/Core/AssignEvaluator.h +1010 -0
- data/vendor/Eigen/src/Core/Assign_MKL.h +178 -0
- data/vendor/Eigen/src/Core/BandMatrix.h +353 -0
- data/vendor/Eigen/src/Core/Block.h +448 -0
- data/vendor/Eigen/src/Core/BooleanRedux.h +162 -0
- data/vendor/Eigen/src/Core/CommaInitializer.h +164 -0
- data/vendor/Eigen/src/Core/ConditionEstimator.h +175 -0
- data/vendor/Eigen/src/Core/CoreEvaluators.h +1741 -0
- data/vendor/Eigen/src/Core/CoreIterators.h +132 -0
- data/vendor/Eigen/src/Core/CwiseBinaryOp.h +183 -0
- data/vendor/Eigen/src/Core/CwiseNullaryOp.h +1001 -0
- data/vendor/Eigen/src/Core/CwiseTernaryOp.h +197 -0
- data/vendor/Eigen/src/Core/CwiseUnaryOp.h +103 -0
- data/vendor/Eigen/src/Core/CwiseUnaryView.h +132 -0
- data/vendor/Eigen/src/Core/DenseBase.h +701 -0
- data/vendor/Eigen/src/Core/DenseCoeffsBase.h +685 -0
- data/vendor/Eigen/src/Core/DenseStorage.h +652 -0
- data/vendor/Eigen/src/Core/Diagonal.h +258 -0
- data/vendor/Eigen/src/Core/DiagonalMatrix.h +391 -0
- data/vendor/Eigen/src/Core/DiagonalProduct.h +28 -0
- data/vendor/Eigen/src/Core/Dot.h +318 -0
- data/vendor/Eigen/src/Core/EigenBase.h +160 -0
- data/vendor/Eigen/src/Core/ForceAlignedAccess.h +150 -0
- data/vendor/Eigen/src/Core/Fuzzy.h +155 -0
- data/vendor/Eigen/src/Core/GeneralProduct.h +465 -0
- data/vendor/Eigen/src/Core/GenericPacketMath.h +1040 -0
- data/vendor/Eigen/src/Core/GlobalFunctions.h +194 -0
- data/vendor/Eigen/src/Core/IO.h +258 -0
- data/vendor/Eigen/src/Core/IndexedView.h +237 -0
- data/vendor/Eigen/src/Core/Inverse.h +117 -0
- data/vendor/Eigen/src/Core/Map.h +171 -0
- data/vendor/Eigen/src/Core/MapBase.h +310 -0
- data/vendor/Eigen/src/Core/MathFunctions.h +2057 -0
- data/vendor/Eigen/src/Core/MathFunctionsImpl.h +200 -0
- data/vendor/Eigen/src/Core/Matrix.h +565 -0
- data/vendor/Eigen/src/Core/MatrixBase.h +547 -0
- data/vendor/Eigen/src/Core/NestByValue.h +85 -0
- data/vendor/Eigen/src/Core/NoAlias.h +109 -0
- data/vendor/Eigen/src/Core/NumTraits.h +335 -0
- data/vendor/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
- data/vendor/Eigen/src/Core/PermutationMatrix.h +605 -0
- data/vendor/Eigen/src/Core/PlainObjectBase.h +1128 -0
- data/vendor/Eigen/src/Core/Product.h +191 -0
- data/vendor/Eigen/src/Core/ProductEvaluators.h +1179 -0
- data/vendor/Eigen/src/Core/Random.h +218 -0
- data/vendor/Eigen/src/Core/Redux.h +515 -0
- data/vendor/Eigen/src/Core/Ref.h +381 -0
- data/vendor/Eigen/src/Core/Replicate.h +142 -0
- data/vendor/Eigen/src/Core/Reshaped.h +454 -0
- data/vendor/Eigen/src/Core/ReturnByValue.h +119 -0
- data/vendor/Eigen/src/Core/Reverse.h +217 -0
- data/vendor/Eigen/src/Core/Select.h +164 -0
- data/vendor/Eigen/src/Core/SelfAdjointView.h +365 -0
- data/vendor/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
- data/vendor/Eigen/src/Core/Solve.h +188 -0
- data/vendor/Eigen/src/Core/SolveTriangular.h +235 -0
- data/vendor/Eigen/src/Core/SolverBase.h +168 -0
- data/vendor/Eigen/src/Core/StableNorm.h +251 -0
- data/vendor/Eigen/src/Core/StlIterators.h +463 -0
- data/vendor/Eigen/src/Core/Stride.h +116 -0
- data/vendor/Eigen/src/Core/Swap.h +68 -0
- data/vendor/Eigen/src/Core/Transpose.h +464 -0
- data/vendor/Eigen/src/Core/Transpositions.h +386 -0
- data/vendor/Eigen/src/Core/TriangularMatrix.h +1001 -0
- data/vendor/Eigen/src/Core/VectorBlock.h +96 -0
- data/vendor/Eigen/src/Core/VectorwiseOp.h +784 -0
- data/vendor/Eigen/src/Core/Visitor.h +381 -0
- data/vendor/Eigen/src/Core/arch/AVX/Complex.h +372 -0
- data/vendor/Eigen/src/Core/arch/AVX/MathFunctions.h +228 -0
- data/vendor/Eigen/src/Core/arch/AVX/PacketMath.h +1574 -0
- data/vendor/Eigen/src/Core/arch/AVX/TypeCasting.h +115 -0
- data/vendor/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
- data/vendor/Eigen/src/Core/arch/AVX512/MathFunctions.h +362 -0
- data/vendor/Eigen/src/Core/arch/AVX512/PacketMath.h +2303 -0
- data/vendor/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/Complex.h +417 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MathFunctions.h +90 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/PacketMath.h +2711 -0
- data/vendor/Eigen/src/Core/arch/CUDA/Complex.h +258 -0
- data/vendor/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
- data/vendor/Eigen/src/Core/arch/Default/ConjHelper.h +117 -0
- data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
- data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
- data/vendor/Eigen/src/Core/arch/Default/Half.h +942 -0
- data/vendor/Eigen/src/Core/arch/Default/Settings.h +49 -0
- data/vendor/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
- data/vendor/Eigen/src/Core/arch/GPU/MathFunctions.h +103 -0
- data/vendor/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
- data/vendor/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
- data/vendor/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
- data/vendor/Eigen/src/Core/arch/MSA/Complex.h +648 -0
- data/vendor/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
- data/vendor/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
- data/vendor/Eigen/src/Core/arch/NEON/Complex.h +584 -0
- data/vendor/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
- data/vendor/Eigen/src/Core/arch/NEON/MathFunctions.h +75 -0
- data/vendor/Eigen/src/Core/arch/NEON/PacketMath.h +4587 -0
- data/vendor/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
- data/vendor/Eigen/src/Core/arch/SSE/Complex.h +351 -0
- data/vendor/Eigen/src/Core/arch/SSE/MathFunctions.h +199 -0
- data/vendor/Eigen/src/Core/arch/SSE/PacketMath.h +1505 -0
- data/vendor/Eigen/src/Core/arch/SSE/TypeCasting.h +142 -0
- data/vendor/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
- data/vendor/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
- data/vendor/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
- data/vendor/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
- data/vendor/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
- data/vendor/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
- data/vendor/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
- data/vendor/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
- data/vendor/Eigen/src/Core/arch/ZVector/Complex.h +426 -0
- data/vendor/Eigen/src/Core/arch/ZVector/MathFunctions.h +233 -0
- data/vendor/Eigen/src/Core/arch/ZVector/PacketMath.h +1060 -0
- data/vendor/Eigen/src/Core/functors/AssignmentFunctors.h +177 -0
- data/vendor/Eigen/src/Core/functors/BinaryFunctors.h +541 -0
- data/vendor/Eigen/src/Core/functors/NullaryFunctors.h +189 -0
- data/vendor/Eigen/src/Core/functors/StlFunctors.h +166 -0
- data/vendor/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
- data/vendor/Eigen/src/Core/functors/UnaryFunctors.h +1131 -0
- data/vendor/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2645 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix.h +517 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +317 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +124 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixVector.h +518 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
- data/vendor/Eigen/src/Core/products/Parallelizer.h +180 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +544 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +295 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector.h +262 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
- data/vendor/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
- data/vendor/Eigen/src/Core/products/SelfadjointRank2Update.h +94 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix.h +472 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +317 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
- data/vendor/Eigen/src/Core/products/TriangularSolverMatrix.h +337 -0
- data/vendor/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +167 -0
- data/vendor/Eigen/src/Core/products/TriangularSolverVector.h +148 -0
- data/vendor/Eigen/src/Core/util/BlasUtil.h +583 -0
- data/vendor/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
- data/vendor/Eigen/src/Core/util/Constants.h +563 -0
- data/vendor/Eigen/src/Core/util/DisableStupidWarnings.h +106 -0
- data/vendor/Eigen/src/Core/util/ForwardDeclarations.h +322 -0
- data/vendor/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
- data/vendor/Eigen/src/Core/util/IntegralConstant.h +272 -0
- data/vendor/Eigen/src/Core/util/MKL_support.h +137 -0
- data/vendor/Eigen/src/Core/util/Macros.h +1464 -0
- data/vendor/Eigen/src/Core/util/Memory.h +1163 -0
- data/vendor/Eigen/src/Core/util/Meta.h +812 -0
- data/vendor/Eigen/src/Core/util/NonMPL2.h +3 -0
- data/vendor/Eigen/src/Core/util/ReenableStupidWarnings.h +31 -0
- data/vendor/Eigen/src/Core/util/ReshapedHelper.h +51 -0
- data/vendor/Eigen/src/Core/util/StaticAssert.h +221 -0
- data/vendor/Eigen/src/Core/util/SymbolicIndex.h +293 -0
- data/vendor/Eigen/src/Core/util/XprHelper.h +856 -0
- data/vendor/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
- data/vendor/Eigen/src/Eigenvalues/ComplexSchur.h +462 -0
- data/vendor/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
- data/vendor/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
- data/vendor/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
- data/vendor/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
- data/vendor/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
- data/vendor/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
- data/vendor/Eigen/src/Eigenvalues/RealQZ.h +657 -0
- data/vendor/Eigen/src/Eigenvalues/RealSchur.h +558 -0
- data/vendor/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
- data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +904 -0
- data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
- data/vendor/Eigen/src/Eigenvalues/Tridiagonalization.h +561 -0
- data/vendor/Eigen/src/Geometry/AlignedBox.h +486 -0
- data/vendor/Eigen/src/Geometry/AngleAxis.h +247 -0
- data/vendor/Eigen/src/Geometry/EulerAngles.h +114 -0
- data/vendor/Eigen/src/Geometry/Homogeneous.h +501 -0
- data/vendor/Eigen/src/Geometry/Hyperplane.h +282 -0
- data/vendor/Eigen/src/Geometry/OrthoMethods.h +235 -0
- data/vendor/Eigen/src/Geometry/ParametrizedLine.h +232 -0
- data/vendor/Eigen/src/Geometry/Quaternion.h +870 -0
- data/vendor/Eigen/src/Geometry/Rotation2D.h +199 -0
- data/vendor/Eigen/src/Geometry/RotationBase.h +206 -0
- data/vendor/Eigen/src/Geometry/Scaling.h +188 -0
- data/vendor/Eigen/src/Geometry/Transform.h +1563 -0
- data/vendor/Eigen/src/Geometry/Translation.h +202 -0
- data/vendor/Eigen/src/Geometry/Umeyama.h +166 -0
- data/vendor/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
- data/vendor/Eigen/src/Householder/BlockHouseholder.h +110 -0
- data/vendor/Eigen/src/Householder/Householder.h +176 -0
- data/vendor/Eigen/src/Householder/HouseholderSequence.h +545 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +212 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +229 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +394 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +453 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +444 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +198 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +117 -0
- data/vendor/Eigen/src/Jacobi/Jacobi.h +483 -0
- data/vendor/Eigen/src/KLUSupport/KLUSupport.h +358 -0
- data/vendor/Eigen/src/LU/Determinant.h +117 -0
- data/vendor/Eigen/src/LU/FullPivLU.h +877 -0
- data/vendor/Eigen/src/LU/InverseImpl.h +432 -0
- data/vendor/Eigen/src/LU/PartialPivLU.h +624 -0
- data/vendor/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
- data/vendor/Eigen/src/LU/arch/InverseSize4.h +351 -0
- data/vendor/Eigen/src/MetisSupport/MetisSupport.h +137 -0
- data/vendor/Eigen/src/OrderingMethods/Amd.h +435 -0
- data/vendor/Eigen/src/OrderingMethods/Eigen_Colamd.h +1863 -0
- data/vendor/Eigen/src/OrderingMethods/Ordering.h +153 -0
- data/vendor/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
- data/vendor/Eigen/src/PardisoSupport/PardisoSupport.h +545 -0
- data/vendor/Eigen/src/QR/ColPivHouseholderQR.h +674 -0
- data/vendor/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
- data/vendor/Eigen/src/QR/CompleteOrthogonalDecomposition.h +635 -0
- data/vendor/Eigen/src/QR/FullPivHouseholderQR.h +713 -0
- data/vendor/Eigen/src/QR/HouseholderQR.h +434 -0
- data/vendor/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
- data/vendor/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +335 -0
- data/vendor/Eigen/src/SVD/BDCSVD.h +1366 -0
- data/vendor/Eigen/src/SVD/JacobiSVD.h +812 -0
- data/vendor/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
- data/vendor/Eigen/src/SVD/SVDBase.h +376 -0
- data/vendor/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
- data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky.h +697 -0
- data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +174 -0
- data/vendor/Eigen/src/SparseCore/AmbiVector.h +378 -0
- data/vendor/Eigen/src/SparseCore/CompressedStorage.h +274 -0
- data/vendor/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
- data/vendor/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
- data/vendor/Eigen/src/SparseCore/SparseAssign.h +270 -0
- data/vendor/Eigen/src/SparseCore/SparseBlock.h +571 -0
- data/vendor/Eigen/src/SparseCore/SparseColEtree.h +206 -0
- data/vendor/Eigen/src/SparseCore/SparseCompressedBase.h +370 -0
- data/vendor/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +722 -0
- data/vendor/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +150 -0
- data/vendor/Eigen/src/SparseCore/SparseDenseProduct.h +342 -0
- data/vendor/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
- data/vendor/Eigen/src/SparseCore/SparseDot.h +98 -0
- data/vendor/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
- data/vendor/Eigen/src/SparseCore/SparseMap.h +305 -0
- data/vendor/Eigen/src/SparseCore/SparseMatrix.h +1518 -0
- data/vendor/Eigen/src/SparseCore/SparseMatrixBase.h +398 -0
- data/vendor/Eigen/src/SparseCore/SparsePermutation.h +178 -0
- data/vendor/Eigen/src/SparseCore/SparseProduct.h +181 -0
- data/vendor/Eigen/src/SparseCore/SparseRedux.h +49 -0
- data/vendor/Eigen/src/SparseCore/SparseRef.h +397 -0
- data/vendor/Eigen/src/SparseCore/SparseSelfAdjointView.h +659 -0
- data/vendor/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
- data/vendor/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
- data/vendor/Eigen/src/SparseCore/SparseTranspose.h +92 -0
- data/vendor/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
- data/vendor/Eigen/src/SparseCore/SparseUtil.h +186 -0
- data/vendor/Eigen/src/SparseCore/SparseVector.h +478 -0
- data/vendor/Eigen/src/SparseCore/SparseView.h +254 -0
- data/vendor/Eigen/src/SparseCore/TriangularSolver.h +315 -0
- data/vendor/Eigen/src/SparseLU/SparseLU.h +923 -0
- data/vendor/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +375 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
- data/vendor/Eigen/src/SparseQR/SparseQR.h +758 -0
- data/vendor/Eigen/src/StlSupport/StdDeque.h +116 -0
- data/vendor/Eigen/src/StlSupport/StdList.h +106 -0
- data/vendor/Eigen/src/StlSupport/StdVector.h +131 -0
- data/vendor/Eigen/src/StlSupport/details.h +84 -0
- data/vendor/Eigen/src/SuperLUSupport/SuperLUSupport.h +1025 -0
- data/vendor/Eigen/src/UmfPackSupport/UmfPackSupport.h +642 -0
- data/vendor/Eigen/src/misc/Image.h +82 -0
- data/vendor/Eigen/src/misc/Kernel.h +79 -0
- data/vendor/Eigen/src/misc/RealSvd2x2.h +55 -0
- data/vendor/Eigen/src/misc/blas.h +440 -0
- data/vendor/Eigen/src/misc/lapack.h +152 -0
- data/vendor/Eigen/src/misc/lapacke.h +16292 -0
- data/vendor/Eigen/src/misc/lapacke_mangling.h +17 -0
- data/vendor/Eigen/src/plugins/ArrayCwiseBinaryOps.h +358 -0
- data/vendor/Eigen/src/plugins/ArrayCwiseUnaryOps.h +696 -0
- data/vendor/Eigen/src/plugins/BlockMethods.h +1442 -0
- data/vendor/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
- data/vendor/Eigen/src/plugins/CommonCwiseUnaryOps.h +177 -0
- data/vendor/Eigen/src/plugins/IndexedViewMethods.h +262 -0
- data/vendor/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
- data/vendor/Eigen/src/plugins/MatrixCwiseUnaryOps.h +95 -0
- data/vendor/Eigen/src/plugins/ReshapedMethods.h +149 -0
- data/vendor/aarand/aarand.hpp +114 -0
- data/vendor/annoy/annoylib.h +1495 -0
- data/vendor/annoy/kissrandom.h +120 -0
- data/vendor/annoy/mman.h +242 -0
- data/vendor/hnswlib/bruteforce.h +152 -0
- data/vendor/hnswlib/hnswalg.h +1192 -0
- data/vendor/hnswlib/hnswlib.h +108 -0
- data/vendor/hnswlib/space_ip.h +282 -0
- data/vendor/hnswlib/space_l2.h +281 -0
- data/vendor/hnswlib/visited_list_pool.h +79 -0
- data/vendor/irlba/irlba.hpp +575 -0
- data/vendor/irlba/lanczos.hpp +212 -0
- data/vendor/irlba/parallel.hpp +474 -0
- data/vendor/irlba/utils.hpp +224 -0
- data/vendor/irlba/wrappers.hpp +228 -0
- data/vendor/kmeans/Base.hpp +75 -0
- data/vendor/kmeans/Details.hpp +79 -0
- data/vendor/kmeans/HartiganWong.hpp +492 -0
- data/vendor/kmeans/InitializeKmeansPP.hpp +144 -0
- data/vendor/kmeans/InitializeNone.hpp +44 -0
- data/vendor/kmeans/InitializePCAPartition.hpp +309 -0
- data/vendor/kmeans/InitializeRandom.hpp +91 -0
- data/vendor/kmeans/Kmeans.hpp +161 -0
- data/vendor/kmeans/Lloyd.hpp +134 -0
- data/vendor/kmeans/MiniBatch.hpp +269 -0
- data/vendor/kmeans/QuickSearch.hpp +179 -0
- data/vendor/kmeans/compute_centroids.hpp +32 -0
- data/vendor/kmeans/compute_wcss.hpp +27 -0
- data/vendor/kmeans/is_edge_case.hpp +42 -0
- data/vendor/kmeans/random.hpp +55 -0
- data/vendor/knncolle/Annoy/Annoy.hpp +193 -0
- data/vendor/knncolle/BruteForce/BruteForce.hpp +120 -0
- data/vendor/knncolle/Hnsw/Hnsw.hpp +225 -0
- data/vendor/knncolle/Kmknn/Kmknn.hpp +286 -0
- data/vendor/knncolle/VpTree/VpTree.hpp +256 -0
- data/vendor/knncolle/knncolle.hpp +34 -0
- data/vendor/knncolle/utils/Base.hpp +100 -0
- data/vendor/knncolle/utils/NeighborQueue.hpp +94 -0
- data/vendor/knncolle/utils/distances.hpp +98 -0
- data/vendor/knncolle/utils/find_nearest_neighbors.hpp +112 -0
- data/vendor/powerit/PowerIterations.hpp +157 -0
- data/vendor/umappp/NeighborList.hpp +37 -0
- data/vendor/umappp/Umap.hpp +662 -0
- data/vendor/umappp/combine_neighbor_sets.hpp +95 -0
- data/vendor/umappp/find_ab.hpp +157 -0
- data/vendor/umappp/neighbor_similarities.hpp +136 -0
- data/vendor/umappp/optimize_layout.hpp +285 -0
- data/vendor/umappp/spectral_init.hpp +181 -0
- data/vendor/umappp/umappp.hpp +13 -0
- metadata +465 -0
@@ -0,0 +1,212 @@
|
|
1
|
+
#ifndef IRLBA_LANCZOS_HPP
|
2
|
+
#define IRLBA_LANCZOS_HPP
|
3
|
+
|
4
|
+
#include "Eigen/Dense"
|
5
|
+
#include "utils.hpp"
|
6
|
+
#include "wrappers.hpp"
|
7
|
+
#include <cmath>
|
8
|
+
#include <limits>
|
9
|
+
|
10
|
+
/**
|
11
|
+
* @file lanczos.hpp
|
12
|
+
*
|
13
|
+
* @brief Perform the Lanczos bidiagonalization iterations.
|
14
|
+
*/
|
15
|
+
|
16
|
+
namespace irlba {
|
17
|
+
|
18
|
+
/**
|
19
|
+
* @brief Perform Lanczos bidiagonalization on an input matrix.
|
20
|
+
*/
|
21
|
+
class LanczosBidiagonalization {
|
22
|
+
public:
|
23
|
+
struct Defaults {
|
24
|
+
/**
|
25
|
+
* See `set_epsilon()` for details.
|
26
|
+
*/
|
27
|
+
static constexpr double epsilon = -1;
|
28
|
+
};
|
29
|
+
public:
|
30
|
+
/**
|
31
|
+
* Set the tolerance to use to define invariant subspaces.
|
32
|
+
* This is used as the lower bound for the L2 norm for the subspace vectors;
|
33
|
+
* below this bound, vectors are treated as all-zero and are instead filled with random draws from a normal distribution.
|
34
|
+
*
|
35
|
+
* @param e A positive number defining the tolerance.
|
36
|
+
* If negative, we instead use the machine epsilon to the power of 0.8 (the same value as the **irlba** R package).
|
37
|
+
*
|
38
|
+
* @return A reference to the `LanczosBidiagonalization` instance.
|
39
|
+
*/
|
40
|
+
LanczosBidiagonalization& set_epsilon(double e = Defaults::epsilon) {
|
41
|
+
epsilon = e;
|
42
|
+
return *this;
|
43
|
+
}
|
44
|
+
|
45
|
+
public:
|
46
|
+
/**
|
47
|
+
* @brief Intermediate data structures to avoid repeated allocations.
|
48
|
+
*/
|
49
|
+
struct Intermediates {
|
50
|
+
/**
|
51
|
+
* @tparam M Matrix class, most typically from the **Eigen** library.
|
52
|
+
*
|
53
|
+
* @param mat Instance of a matrix class `M`.
|
54
|
+
*/
|
55
|
+
template<class M>
|
56
|
+
Intermediates(const M& mat) : F(mat.cols()), W_next(mat.rows()), orthog_tmp(mat.cols()) {}
|
57
|
+
|
58
|
+
/**
|
59
|
+
* Obtain the residual vector, see algorithm 2.1 of Baglama and Reichel (2005).
|
60
|
+
*
|
61
|
+
* @return Vector of residuals of length equal to the number of columns of `mat` in `run()`.
|
62
|
+
*/
|
63
|
+
const Eigen::VectorXd& residuals() const {
|
64
|
+
return F;
|
65
|
+
}
|
66
|
+
|
67
|
+
/**
|
68
|
+
* @cond
|
69
|
+
*/
|
70
|
+
Eigen::VectorXd F;
|
71
|
+
Eigen::VectorXd W_next;
|
72
|
+
Eigen::VectorXd orthog_tmp;
|
73
|
+
/**
|
74
|
+
* @endcond
|
75
|
+
*/
|
76
|
+
};
|
77
|
+
|
78
|
+
template<class M>
|
79
|
+
Intermediates initialize(const M& mat) const {
|
80
|
+
return Intermediates(mat);
|
81
|
+
}
|
82
|
+
|
83
|
+
public:
|
84
|
+
/**
|
85
|
+
* Perform the Lanczos bidiagonalization on an input matrix, optionally with scaling and centering.
|
86
|
+
* This implements Algorithm 2.1 described by Baglama and Reichel (2005).
|
87
|
+
* Support is provided for centering and scaling without modifying `mat`.
|
88
|
+
* Protection against invariant subspaces is also implemented.
|
89
|
+
*
|
90
|
+
* @tparam M Matrix class, most typically from the **Eigen** library.
|
91
|
+
* See the `Irlba` documentation for a detailed description of the expected methods.
|
92
|
+
* @tparam Engine A functor that, when called with no arguments, returns a random integer from a discrete uniform distribution.
|
93
|
+
*
|
94
|
+
* @param mat Input matrix.
|
95
|
+
* @param W Output matrix with number of rows equal to `mat.rows()`.
|
96
|
+
* The size of the working subspace is defined from the number of columns.
|
97
|
+
* The first `start` columns should contain orthonormal column vectors with non-zero L2 norms.
|
98
|
+
* @param V Matrix with number of rows equal to `mat.cols()` and number of columns equal to `W.cols()`.
|
99
|
+
* The first `start + 1` columns should contain orthonormal column vectors with non-zero L2 norms.
|
100
|
+
* @param B Square matrix with number of rows and columns equal to the size of the working subspace.
|
101
|
+
* Number of values is defined by `set_number()`.
|
102
|
+
* @param eng An instance of a random number `Engine`.
|
103
|
+
* @param inter Collection of intermediate data structures generated by calling `initialize()` on `mat`.
|
104
|
+
* @param start The dimension from which to start the bidiagonalization.
|
105
|
+
*
|
106
|
+
* @return
|
107
|
+
* `W` is filled with orthonormal vectors, as is `V`.
|
108
|
+
* `B` is filled with upper diagonal entries.
|
109
|
+
*/
|
110
|
+
template<class M, class Engine>
|
111
|
+
void run(
|
112
|
+
const M& mat,
|
113
|
+
Eigen::MatrixXd& W,
|
114
|
+
Eigen::MatrixXd& V,
|
115
|
+
Eigen::MatrixXd& B,
|
116
|
+
Engine& eng,
|
117
|
+
Intermediates& inter,
|
118
|
+
int start = 0)
|
119
|
+
const {
|
120
|
+
const double eps = (epsilon < 0 ? std::pow(std::numeric_limits<double>::epsilon(), 0.8) : epsilon);
|
121
|
+
|
122
|
+
int work = W.cols();
|
123
|
+
auto& F = inter.F;
|
124
|
+
auto& W_next = inter.W_next;
|
125
|
+
auto& otmp = inter.orthog_tmp;
|
126
|
+
|
127
|
+
F = V.col(start);
|
128
|
+
if constexpr(has_multiply_method<M>::value) {
|
129
|
+
W_next.noalias() = mat * F;
|
130
|
+
} else {
|
131
|
+
mat.multiply(F, W_next);
|
132
|
+
}
|
133
|
+
|
134
|
+
// If start = 0, we assume that it's already normalized, see argument description for 'V'.
|
135
|
+
if (start) {
|
136
|
+
orthogonalize_vector(W, W_next, start, otmp);
|
137
|
+
}
|
138
|
+
|
139
|
+
double S = W_next.norm();
|
140
|
+
if (S < eps) {
|
141
|
+
throw -4;
|
142
|
+
}
|
143
|
+
W_next /= S;
|
144
|
+
W.col(start) = W_next;
|
145
|
+
|
146
|
+
// The Lanczos iterations themselves.
|
147
|
+
for (int j = start; j < work; ++j) {
|
148
|
+
if constexpr(has_adjoint_multiply_method<M>::value) {
|
149
|
+
F.noalias() = mat.adjoint() * W.col(j);
|
150
|
+
} else {
|
151
|
+
mat.adjoint_multiply(W.col(j), F);
|
152
|
+
}
|
153
|
+
|
154
|
+
F -= S * V.col(j); // equivalent to daxpy.
|
155
|
+
orthogonalize_vector(V, F, j + 1, otmp);
|
156
|
+
|
157
|
+
if (j + 1 < work) {
|
158
|
+
double R_F = F.norm();
|
159
|
+
|
160
|
+
if (R_F < eps) {
|
161
|
+
fill_with_random_normals(F, eng);
|
162
|
+
orthogonalize_vector(V, F, j + 1, otmp);
|
163
|
+
R_F = F.norm();
|
164
|
+
F /= R_F;
|
165
|
+
R_F = 0;
|
166
|
+
} else {
|
167
|
+
F /= R_F;
|
168
|
+
}
|
169
|
+
|
170
|
+
V.col(j + 1) = F;
|
171
|
+
|
172
|
+
B(j, j) = S;
|
173
|
+
B(j, j + 1) = R_F;
|
174
|
+
|
175
|
+
if constexpr(has_multiply_method<M>::value) {
|
176
|
+
W_next.noalias() = mat * F;
|
177
|
+
} else {
|
178
|
+
mat.multiply(F, W_next);
|
179
|
+
}
|
180
|
+
|
181
|
+
// Full re-orthogonalization, using the left-most 'j + 1' columns of W.
|
182
|
+
// Recall that W_next will be the 'j + 2'-th column, i.e., W.col(j + 1) in
|
183
|
+
// 0-indexed terms, so we want to orthogonalize to all previous columns.
|
184
|
+
orthogonalize_vector(W, W_next, j + 1, otmp);
|
185
|
+
|
186
|
+
S = W_next.norm();
|
187
|
+
if (S < eps) {
|
188
|
+
fill_with_random_normals(W_next, eng);
|
189
|
+
orthogonalize_vector(W, W_next, j + 1, otmp);
|
190
|
+
S = W_next.norm();
|
191
|
+
W_next /= S;
|
192
|
+
S = 0;
|
193
|
+
} else {
|
194
|
+
W_next /= S;
|
195
|
+
}
|
196
|
+
|
197
|
+
W.col(j + 1) = W_next;
|
198
|
+
} else {
|
199
|
+
B(j, j) = S;
|
200
|
+
}
|
201
|
+
}
|
202
|
+
|
203
|
+
return;
|
204
|
+
}
|
205
|
+
|
206
|
+
private:
|
207
|
+
double epsilon = Defaults::epsilon;
|
208
|
+
};
|
209
|
+
|
210
|
+
}
|
211
|
+
|
212
|
+
#endif
|
@@ -0,0 +1,474 @@
|
|
1
|
+
#ifndef IRLBA_PARALLEL_HPP
|
2
|
+
#define IRLBA_PARALLEL_HPP
|
3
|
+
|
4
|
+
#include "utils.hpp"
|
5
|
+
#include <vector>
|
6
|
+
#include "Eigen/Dense"
|
7
|
+
|
8
|
+
/**
|
9
|
+
* @file parallel.hpp
|
10
|
+
*
|
11
|
+
* @brief Sparse matrix class with parallelized multiplication.
|
12
|
+
*/
|
13
|
+
|
14
|
+
namespace irlba {
|
15
|
+
|
16
|
+
/**
|
17
|
+
* @brief Sparse matrix with customizable parallelization.
|
18
|
+
*
|
19
|
+
* This provides an alternative to `Eigen::SparseMatrix` for parallelized multiplication of compressed sparse matrices.
|
20
|
+
* Unlike Eigen, this implementation is able to parallelize when the multiplication does not align well with the storage layout,
|
21
|
+
* e.g., multiplication of a compressed sparse column matrix by a dense vector on the right hand side.
|
22
|
+
* On construction, it also pre-allocates the rows and/or columns to each thread, aiming to balance the number of non-zero elements that each thread needs to process.
|
23
|
+
* All subsequent multiplications can then use these allocations, which is useful for cases like `Irlba` where the cost of pre-allocation is abrogated by repeated multiplication calls.
|
24
|
+
*
|
25
|
+
* Some cursory testing indicates that the performance of this implementation is comparable to Eigen for OpenMP-based parallelization.
|
26
|
+
* However, the real purpose of this class is to support custom parallelization schemes in cases where OpenMP is not available.
|
27
|
+
* This is achieved by defining `IRLBA_CUSTOM_PARALLEL` macro to the name of a function implementing a custom scheme.
|
28
|
+
* Such a function should accept two arguments - an integer specifying the number of threads, and a lambda that accepts a thread number.
|
29
|
+
* It should then loop over the number of threads and launch one job for each thread via the lambda.
|
30
|
+
* Once all threads are complete, the function should return.
|
31
|
+
*
|
32
|
+
* @tparam column_major Whether the matrix should be in compressed sparse column format.
|
33
|
+
* If `false`, this is assumed to be in row-major format.
|
34
|
+
* @tparam ValueArray Array class containing numeric values for the non-zero values.
|
35
|
+
* Should support a read-only `[]` operator.
|
36
|
+
* @tparam IndexArray Array class containing integer values for the indices of the non-zero values.
|
37
|
+
* Should support a read-only `[]` operator.
|
38
|
+
* @tparam PointerArray Array class containing integer values for the pointers to the row/column boundaries.
|
39
|
+
* Should support a read-only `[]` operator.
|
40
|
+
*/
|
41
|
+
template<
|
42
|
+
bool column_major = true,
|
43
|
+
class ValueArray = std::vector<double>,
|
44
|
+
class IndexArray = std::vector<int>,
|
45
|
+
class PointerArray = std::vector<size_t>
|
46
|
+
>
|
47
|
+
class ParallelSparseMatrix {
|
48
|
+
public:
|
49
|
+
/**
|
50
|
+
* Default constructor.
|
51
|
+
* This object cannot be used for any operations.
|
52
|
+
*/
|
53
|
+
ParallelSparseMatrix() {}
|
54
|
+
|
55
|
+
/**
|
56
|
+
* @param nr Number of rows.
|
57
|
+
* @param nc Number of columns.
|
58
|
+
* @param x Values of non-zero elements.
|
59
|
+
* @param i Indices of non-zero elements.
|
60
|
+
* Each entry corresponds to a value in `x`, so `i` should be an array of length equal to `x`.
|
61
|
+
* If `column_major = true`, `i` should contain row indices; otherwise it should contain column indices.
|
62
|
+
* @param p Pointers to the start of each column (if `column_major = true`) or row (otherwise).
|
63
|
+
* This should be an ordered array of length equal to the number of columns or rows plus 1.
|
64
|
+
* @param nt Number of threads to be used for multiplication.
|
65
|
+
*
|
66
|
+
* `x`, `i` and `p` represent the typical components of a compressed sparse column/row matrix.
|
67
|
+
* Thus, entries in `i` should be sorted within each column/row, where the boundaries between columns/rows are defined by `p`.
|
68
|
+
*/
|
69
|
+
ParallelSparseMatrix(size_t nr, size_t nc, std::vector<double> x, std::vector<int> i, std::vector<size_t> p, int nt) :
|
70
|
+
primary_dim(column_major ? nc : nr),
|
71
|
+
secondary_dim(column_major ? nr : nc),
|
72
|
+
nthreads(nt),
|
73
|
+
values(std::move(x)),
|
74
|
+
indices(std::move(i)),
|
75
|
+
ptrs(std::move(p))
|
76
|
+
{
|
77
|
+
if (nthreads > 1) {
|
78
|
+
fragment_threads();
|
79
|
+
}
|
80
|
+
}
|
81
|
+
|
82
|
+
/**
|
83
|
+
* @return Number of rows in the matrix.
|
84
|
+
*/
|
85
|
+
auto rows() const {
|
86
|
+
if constexpr(column_major) {
|
87
|
+
return secondary_dim;
|
88
|
+
} else {
|
89
|
+
return primary_dim;
|
90
|
+
}
|
91
|
+
}
|
92
|
+
|
93
|
+
/**
|
94
|
+
* @return Number of columns in the matrix.
|
95
|
+
*/
|
96
|
+
auto cols() const {
|
97
|
+
if constexpr(column_major) {
|
98
|
+
return primary_dim;
|
99
|
+
} else {
|
100
|
+
return secondary_dim;
|
101
|
+
}
|
102
|
+
}
|
103
|
+
|
104
|
+
/**
|
105
|
+
* @return Non-zero elements in compressed sparse row/column format.
|
106
|
+
* This is equivalent to `x` in the constructor.
|
107
|
+
*/
|
108
|
+
const ValueArray& get_values() const {
|
109
|
+
return values;
|
110
|
+
}
|
111
|
+
|
112
|
+
/**
|
113
|
+
* @return Indices of non-zero elements, equivalent to `i` in the constructor.
|
114
|
+
* These are row or column indices for compressed sparse row or column format, respectively, depending on `column_major`.
|
115
|
+
*/
|
116
|
+
const IndexArray& get_indices() const {
|
117
|
+
return indices;
|
118
|
+
}
|
119
|
+
|
120
|
+
/**
|
121
|
+
* @return Pointers to the start of each row or column, equivalent to `p` in the constructor.
|
122
|
+
*/
|
123
|
+
const PointerArray& get_pointers() const {
|
124
|
+
return ptrs;
|
125
|
+
}
|
126
|
+
|
127
|
+
private:
|
128
|
+
size_t primary_dim, secondary_dim;
|
129
|
+
int nthreads;
|
130
|
+
ValueArray values;
|
131
|
+
IndexArray indices;
|
132
|
+
PointerArray ptrs;
|
133
|
+
|
134
|
+
typedef typename std::remove_const<typename std::remove_reference<decltype(indices[0])>::type>::type IndexType;
|
135
|
+
|
136
|
+
public:
|
137
|
+
/**
|
138
|
+
* This should only be called if `nt > 1` in the constructor, otherwise it will not be initialized.
|
139
|
+
*
|
140
|
+
* @return Vector of length equal to the number of threads,
|
141
|
+
* specifying the first dimension along the primary extent (e.g., column for `column_major = true`) that each thread works on.
|
142
|
+
*/
|
143
|
+
const std::vector<size_t>& get_primary_starts() const {
|
144
|
+
return primary_starts;
|
145
|
+
}
|
146
|
+
|
147
|
+
/**
|
148
|
+
* This should only be called if `nt > 1` in the constructor, otherwise it will not be initialized.
|
149
|
+
*
|
150
|
+
* @return Vector of length equal to the number of threads,
|
151
|
+
* specifying the one-past-the-last dimension along the primary extent (e.g., column for `column_major = true`) that each thread works on.
|
152
|
+
*/
|
153
|
+
const std::vector<size_t>& get_primary_ends() const {
|
154
|
+
return primary_ends;
|
155
|
+
}
|
156
|
+
|
157
|
+
/**
|
158
|
+
* Type of the elements inside a `PointerArray`.
|
159
|
+
*/
|
160
|
+
typedef typename std::remove_const<typename std::remove_reference<decltype(ptrs[0])>::type>::type PointerType;
|
161
|
+
|
162
|
+
/**
|
163
|
+
* This should only be called if `nt > 1` in the constructor, otherwise it will not be initialized.
|
164
|
+
*
|
165
|
+
* @return Vector of length equal to the number of threads plus one.
|
166
|
+
* Each inner vector is of length equal to the size of the primary extent (e.g., number of columns for `column_major = true`).
|
167
|
+
* For thread `i`, the vectors `i` and `i + 1` define the ranges of non-zero elements assigned to that thread within each primary dimension.
|
168
|
+
* This is guaranteed to contain all and only non-zero elements with indices in a contiguous range of secondary dimensions.
|
169
|
+
*/
|
170
|
+
const std::vector<std::vector<PointerType> >& get_secondary_nonzero_starts() const {
|
171
|
+
return secondary_nonzero_starts;
|
172
|
+
}
|
173
|
+
|
174
|
+
private:
|
175
|
+
std::vector<size_t> primary_starts, primary_ends;
|
176
|
+
std::vector<std::vector<PointerType> > secondary_nonzero_starts;
|
177
|
+
|
178
|
+
void fragment_threads() {
|
179
|
+
auto total_nzeros = ptrs[primary_dim]; // last element - not using back() to avoid an extra requirement on PointerArray.
|
180
|
+
PointerType per_thread = std::ceil(static_cast<double>(total_nzeros) / nthreads);
|
181
|
+
|
182
|
+
// Splitting columns across threads so each thread processes the same number of nonzero elements.
|
183
|
+
primary_starts.resize(nthreads);
|
184
|
+
primary_ends.resize(nthreads);
|
185
|
+
{
|
186
|
+
size_t primary_counter = 0;
|
187
|
+
PointerType sofar = per_thread;
|
188
|
+
for (int t = 0; t < nthreads; ++t) {
|
189
|
+
primary_starts[t] = primary_counter;
|
190
|
+
while (primary_counter < primary_dim && ptrs[primary_counter + 1] <= sofar) {
|
191
|
+
++primary_counter;
|
192
|
+
}
|
193
|
+
primary_ends[t] = primary_counter;
|
194
|
+
sofar += per_thread;
|
195
|
+
}
|
196
|
+
}
|
197
|
+
|
198
|
+
// Splitting rows across threads so each thread processes the same number of nonzero elements.
|
199
|
+
secondary_nonzero_starts.resize(nthreads + 1, std::vector<PointerType>(primary_dim));
|
200
|
+
{
|
201
|
+
std::vector<PointerType> secondary_nonzeros(secondary_dim);
|
202
|
+
for (PointerType i = 0; i < total_nzeros; ++i) { // don't using range for loop to avoid an extra requirement on IndexArray.
|
203
|
+
++(secondary_nonzeros[indices[i]]);
|
204
|
+
}
|
205
|
+
|
206
|
+
std::vector<IndexType> secondary_ends(nthreads);
|
207
|
+
IndexType secondary_counter = 0;
|
208
|
+
PointerType sofar = per_thread;
|
209
|
+
PointerType cum_rows = 0;
|
210
|
+
|
211
|
+
for (int t = 0; t < nthreads; ++t) {
|
212
|
+
while (secondary_counter < secondary_dim && cum_rows <= sofar) {
|
213
|
+
cum_rows += secondary_nonzeros[secondary_counter];
|
214
|
+
++secondary_counter;
|
215
|
+
}
|
216
|
+
secondary_ends[t] = secondary_counter;
|
217
|
+
sofar += per_thread;
|
218
|
+
}
|
219
|
+
|
220
|
+
for (size_t c = 0; c < primary_dim; ++c) {
|
221
|
+
auto primary_start = ptrs[c], primary_end = ptrs[c + 1];
|
222
|
+
secondary_nonzero_starts[0][c] = primary_start;
|
223
|
+
|
224
|
+
auto s = primary_start;
|
225
|
+
for (int thread = 0; thread < nthreads; ++thread) {
|
226
|
+
while (s < primary_end && indices[s] < secondary_ends[thread]) {
|
227
|
+
++s;
|
228
|
+
}
|
229
|
+
secondary_nonzero_starts[thread + 1][c] = s;
|
230
|
+
}
|
231
|
+
}
|
232
|
+
}
|
233
|
+
}
|
234
|
+
|
235
|
+
private:
|
236
|
+
template<class Right>
|
237
|
+
void indirect_multiply(const Right& rhs, Eigen::VectorXd& output) const {
|
238
|
+
if constexpr(has_data_method<Right>::value) {
|
239
|
+
// If it has a .data() method, the data values are already computed
|
240
|
+
// and sitting in memory, so we just use that directly.
|
241
|
+
indirect_multiply_internal(rhs, output);
|
242
|
+
} else {
|
243
|
+
// Otherwise, it is presumably an expression that involves some work
|
244
|
+
// to get the values. We realize it into a VectorXd to ensure that
|
245
|
+
// it is not repeatedly evaluated on each access to 'rhs'.
|
246
|
+
indirect_multiply_internal(Eigen::VectorXd(rhs), output);
|
247
|
+
}
|
248
|
+
}
|
249
|
+
|
250
|
+
template<class Right>
|
251
|
+
void indirect_multiply_internal(const Right& rhs, Eigen::VectorXd& output) const {
|
252
|
+
output.setZero();
|
253
|
+
|
254
|
+
if (nthreads == 1) {
|
255
|
+
for (size_t c = 0; c < primary_dim; ++c) {
|
256
|
+
auto start = ptrs[c];
|
257
|
+
auto end = ptrs[c + 1];
|
258
|
+
auto val = rhs.coeff(c);
|
259
|
+
for (PointerType s = start; s < end; ++s) {
|
260
|
+
output.coeffRef(indices[s]) += values[s] * val;
|
261
|
+
}
|
262
|
+
}
|
263
|
+
return;
|
264
|
+
}
|
265
|
+
|
266
|
+
#ifndef IRLBA_CUSTOM_PARALLEL
|
267
|
+
#pragma omp parallel for num_threads(nthreads)
|
268
|
+
for (int t = 0; t < nthreads; ++t) {
|
269
|
+
#else
|
270
|
+
IRLBA_CUSTOM_PARALLEL(nthreads, [&](int t) -> void {
|
271
|
+
#endif
|
272
|
+
|
273
|
+
auto starts = secondary_nonzero_starts[t];
|
274
|
+
auto ends = secondary_nonzero_starts[t + 1];
|
275
|
+
for (size_t c = 0; c < primary_dim; ++c) {
|
276
|
+
auto start = starts[c];
|
277
|
+
auto end = ends[c];
|
278
|
+
auto val = rhs.coeff(c);
|
279
|
+
for (PointerType s = start; s < end; ++s) {
|
280
|
+
output.coeffRef(indices[s]) += values[s] * val;
|
281
|
+
}
|
282
|
+
}
|
283
|
+
|
284
|
+
#ifndef IRLBA_CUSTOM_PARALLEL
|
285
|
+
}
|
286
|
+
#else
|
287
|
+
});
|
288
|
+
#endif
|
289
|
+
|
290
|
+
return;
|
291
|
+
}
|
292
|
+
|
293
|
+
private:
|
294
|
+
template<class Right>
|
295
|
+
void direct_multiply(const Right& rhs, Eigen::VectorXd& output) const {
|
296
|
+
if constexpr(has_data_method<Right>::value) {
|
297
|
+
// If it has a .data() method, the data values are already computed
|
298
|
+
// and sitting in memory, so we just use that directly.
|
299
|
+
direct_multiply_internal(rhs, output);
|
300
|
+
} else {
|
301
|
+
// Otherwise, it is presumably an expression that involves some work
|
302
|
+
// to get the values. We realize it into a VectorXd to ensure that
|
303
|
+
// it is not repeatedly evaluated on each access to 'rhs'.
|
304
|
+
direct_multiply_internal(Eigen::VectorXd(rhs), output);
|
305
|
+
}
|
306
|
+
}
|
307
|
+
|
308
|
+
template<class Right>
|
309
|
+
void direct_multiply_internal(const Right& rhs, Eigen::VectorXd& output) const {
|
310
|
+
if (nthreads == 1) {
|
311
|
+
for (size_t c = 0; c < primary_dim; ++c) {
|
312
|
+
output.coeffRef(c) = column_dot_product(c, rhs);
|
313
|
+
}
|
314
|
+
return;
|
315
|
+
}
|
316
|
+
|
317
|
+
#ifndef IRLBA_CUSTOM_PARALLEL
|
318
|
+
#pragma omp parallel for num_threads(nthreads)
|
319
|
+
for (int t = 0; t < nthreads; ++t) {
|
320
|
+
#else
|
321
|
+
IRLBA_CUSTOM_PARALLEL(nthreads, [&](int t) -> void {
|
322
|
+
#endif
|
323
|
+
|
324
|
+
auto curstart = primary_starts[t];
|
325
|
+
auto curend = primary_ends[t];
|
326
|
+
for (size_t c = curstart; c < curend; ++c) {
|
327
|
+
output.coeffRef(c) = column_dot_product(c, rhs);
|
328
|
+
}
|
329
|
+
|
330
|
+
#ifndef IRLBA_CUSTOM_PARALLEL
|
331
|
+
}
|
332
|
+
#else
|
333
|
+
});
|
334
|
+
#endif
|
335
|
+
|
336
|
+
return;
|
337
|
+
}
|
338
|
+
|
339
|
+
template<class Right>
|
340
|
+
double column_dot_product(size_t c, const Right& rhs) const {
|
341
|
+
PointerType primary_start = ptrs[c], primary_end = ptrs[c + 1];
|
342
|
+
double dot = 0;
|
343
|
+
for (PointerType s = primary_start; s < primary_end; ++s) {
|
344
|
+
dot += values[s] * rhs.coeff(indices[s]);
|
345
|
+
}
|
346
|
+
return dot;
|
347
|
+
}
|
348
|
+
|
349
|
+
public:
|
350
|
+
/**
|
351
|
+
* @tparam Right An `Eigen::VectorXd` or equivalent expression.
|
352
|
+
*
|
353
|
+
* @param[in] rhs The right-hand side of the matrix product.
|
354
|
+
* This should be a vector or have only one column.
|
355
|
+
* @param[out] out The output vector to store the matrix product.
|
356
|
+
*
|
357
|
+
* @return `out` is filled with the product of this matrix and `rhs`.
|
358
|
+
*/
|
359
|
+
template<class Right>
|
360
|
+
void multiply(const Right& rhs, Eigen::VectorXd& output) const {
|
361
|
+
if constexpr(column_major) {
|
362
|
+
indirect_multiply(rhs, output);
|
363
|
+
} else {
|
364
|
+
direct_multiply(rhs, output);
|
365
|
+
}
|
366
|
+
}
|
367
|
+
|
368
|
+
/**
|
369
|
+
* @tparam Right An `Eigen::VectorXd` or equivalent expression.
|
370
|
+
*
|
371
|
+
* @param[in] rhs The right-hand side of the matrix product.
|
372
|
+
* This should be a vector or have only one column.
|
373
|
+
* @param[out] out The output vector to store the matrix product.
|
374
|
+
*
|
375
|
+
* @return `out` is filled with the product of the transpose of this matrix and `rhs`.
|
376
|
+
*/
|
377
|
+
template<class Right>
|
378
|
+
void adjoint_multiply(const Right& rhs, Eigen::VectorXd& output) const {
|
379
|
+
if constexpr(column_major) {
|
380
|
+
direct_multiply(rhs, output);
|
381
|
+
} else {
|
382
|
+
indirect_multiply(rhs, output);
|
383
|
+
}
|
384
|
+
}
|
385
|
+
|
386
|
+
public:
|
387
|
+
/**
|
388
|
+
* @return A dense copy of the sparse matrix data.
|
389
|
+
*/
|
390
|
+
Eigen::MatrixXd realize() const {
|
391
|
+
Eigen::MatrixXd output(rows(), cols());
|
392
|
+
output.setZero();
|
393
|
+
|
394
|
+
if constexpr(column_major) {
|
395
|
+
for (size_t c = 0; c < cols(); ++c) {
|
396
|
+
size_t col_start = ptrs[c], col_end = ptrs[c + 1];
|
397
|
+
for (size_t s = col_start; s < col_end; ++s) {
|
398
|
+
output.coeffRef(indices[s], c) = values[s];
|
399
|
+
}
|
400
|
+
}
|
401
|
+
} else {
|
402
|
+
for (size_t r = 0; r < rows(); ++r) {
|
403
|
+
size_t row_start = ptrs[r], row_end = ptrs[r + 1];
|
404
|
+
for (size_t s = row_start; s < row_end; ++s) {
|
405
|
+
output.coeffRef(r, indices[s]) = values[s];
|
406
|
+
}
|
407
|
+
}
|
408
|
+
}
|
409
|
+
|
410
|
+
return output;
|
411
|
+
}
|
412
|
+
};
|
413
|
+
|
414
|
+
/**
|
415
|
+
* @brief Restrict the number of available threads for Eigen.
|
416
|
+
*
|
417
|
+
* @details
|
418
|
+
* Creating an instance of this class will call `Eigen::setNbThreads()` to control the number of available OpenMP threads in Eigen operations.
|
419
|
+
* Destruction will then reset the number of available threads to its prior value.
|
420
|
+
*
|
421
|
+
* If OpenMP is available and `IRLBA_CUSTOM_PARALLEL`, Eigen is restricted to just one thread when an instance of this class is created.
|
422
|
+
* This is done to avoid using OpenMP when a custom parallelization scheme has already been specified.
|
423
|
+
*
|
424
|
+
* If OpenMP is not available, this class has no effect.
|
425
|
+
*/
|
426
|
+
class EigenThreadScope {
|
427
|
+
public:
|
428
|
+
#ifdef _OPENMP
|
429
|
+
/**
|
430
|
+
* @cond
|
431
|
+
*/
|
432
|
+
EigenThreadScope(int n) : previous(Eigen::nbThreads()) {
|
433
|
+
#ifndef IRLBA_CUSTOM_PARALLEL
|
434
|
+
Eigen::setNbThreads(n);
|
435
|
+
#else
|
436
|
+
Eigen::setNbThreads(1);
|
437
|
+
#endif
|
438
|
+
}
|
439
|
+
/**
|
440
|
+
* @endcond
|
441
|
+
*/
|
442
|
+
#else
|
443
|
+
/**
|
444
|
+
* @param n Number of threads to be used by Eigen.
|
445
|
+
*/
|
446
|
+
EigenThreadScope(int n) {}
|
447
|
+
#endif
|
448
|
+
|
449
|
+
/**
|
450
|
+
* @cond
|
451
|
+
*/
|
452
|
+
EigenThreadScope(const EigenThreadScope&) = delete;
|
453
|
+
EigenThreadScope(EigenThreadScope&&) = delete;
|
454
|
+
EigenThreadScope& operator=(const EigenThreadScope&) = delete;
|
455
|
+
EigenThreadScope& operator=(EigenThreadScope&&) = delete;
|
456
|
+
|
457
|
+
~EigenThreadScope() {
|
458
|
+
#ifdef _OPENMP
|
459
|
+
Eigen::setNbThreads(previous);
|
460
|
+
#endif
|
461
|
+
}
|
462
|
+
/**
|
463
|
+
* @endcond
|
464
|
+
*/
|
465
|
+
private:
|
466
|
+
int previous;
|
467
|
+
};
|
468
|
+
|
469
|
+
|
470
|
+
|
471
|
+
|
472
|
+
}
|
473
|
+
|
474
|
+
#endif
|