umappp 0.1.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE.txt +25 -0
- data/README.md +110 -0
- data/ext/umappp/extconf.rb +25 -0
- data/ext/umappp/numo.hpp +867 -0
- data/ext/umappp/umappp.cpp +225 -0
- data/lib/umappp/version.rb +5 -0
- data/lib/umappp.rb +41 -0
- data/vendor/Eigen/Cholesky +45 -0
- data/vendor/Eigen/CholmodSupport +48 -0
- data/vendor/Eigen/Core +384 -0
- data/vendor/Eigen/Dense +7 -0
- data/vendor/Eigen/Eigen +2 -0
- data/vendor/Eigen/Eigenvalues +60 -0
- data/vendor/Eigen/Geometry +59 -0
- data/vendor/Eigen/Householder +29 -0
- data/vendor/Eigen/IterativeLinearSolvers +48 -0
- data/vendor/Eigen/Jacobi +32 -0
- data/vendor/Eigen/KLUSupport +41 -0
- data/vendor/Eigen/LU +47 -0
- data/vendor/Eigen/MetisSupport +35 -0
- data/vendor/Eigen/OrderingMethods +70 -0
- data/vendor/Eigen/PaStiXSupport +49 -0
- data/vendor/Eigen/PardisoSupport +35 -0
- data/vendor/Eigen/QR +50 -0
- data/vendor/Eigen/QtAlignedMalloc +39 -0
- data/vendor/Eigen/SPQRSupport +34 -0
- data/vendor/Eigen/SVD +50 -0
- data/vendor/Eigen/Sparse +34 -0
- data/vendor/Eigen/SparseCholesky +37 -0
- data/vendor/Eigen/SparseCore +69 -0
- data/vendor/Eigen/SparseLU +50 -0
- data/vendor/Eigen/SparseQR +36 -0
- data/vendor/Eigen/StdDeque +27 -0
- data/vendor/Eigen/StdList +26 -0
- data/vendor/Eigen/StdVector +27 -0
- data/vendor/Eigen/SuperLUSupport +64 -0
- data/vendor/Eigen/UmfPackSupport +40 -0
- data/vendor/Eigen/src/Cholesky/LDLT.h +688 -0
- data/vendor/Eigen/src/Cholesky/LLT.h +558 -0
- data/vendor/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
- data/vendor/Eigen/src/CholmodSupport/CholmodSupport.h +682 -0
- data/vendor/Eigen/src/Core/ArithmeticSequence.h +413 -0
- data/vendor/Eigen/src/Core/Array.h +417 -0
- data/vendor/Eigen/src/Core/ArrayBase.h +226 -0
- data/vendor/Eigen/src/Core/ArrayWrapper.h +209 -0
- data/vendor/Eigen/src/Core/Assign.h +90 -0
- data/vendor/Eigen/src/Core/AssignEvaluator.h +1010 -0
- data/vendor/Eigen/src/Core/Assign_MKL.h +178 -0
- data/vendor/Eigen/src/Core/BandMatrix.h +353 -0
- data/vendor/Eigen/src/Core/Block.h +448 -0
- data/vendor/Eigen/src/Core/BooleanRedux.h +162 -0
- data/vendor/Eigen/src/Core/CommaInitializer.h +164 -0
- data/vendor/Eigen/src/Core/ConditionEstimator.h +175 -0
- data/vendor/Eigen/src/Core/CoreEvaluators.h +1741 -0
- data/vendor/Eigen/src/Core/CoreIterators.h +132 -0
- data/vendor/Eigen/src/Core/CwiseBinaryOp.h +183 -0
- data/vendor/Eigen/src/Core/CwiseNullaryOp.h +1001 -0
- data/vendor/Eigen/src/Core/CwiseTernaryOp.h +197 -0
- data/vendor/Eigen/src/Core/CwiseUnaryOp.h +103 -0
- data/vendor/Eigen/src/Core/CwiseUnaryView.h +132 -0
- data/vendor/Eigen/src/Core/DenseBase.h +701 -0
- data/vendor/Eigen/src/Core/DenseCoeffsBase.h +685 -0
- data/vendor/Eigen/src/Core/DenseStorage.h +652 -0
- data/vendor/Eigen/src/Core/Diagonal.h +258 -0
- data/vendor/Eigen/src/Core/DiagonalMatrix.h +391 -0
- data/vendor/Eigen/src/Core/DiagonalProduct.h +28 -0
- data/vendor/Eigen/src/Core/Dot.h +318 -0
- data/vendor/Eigen/src/Core/EigenBase.h +160 -0
- data/vendor/Eigen/src/Core/ForceAlignedAccess.h +150 -0
- data/vendor/Eigen/src/Core/Fuzzy.h +155 -0
- data/vendor/Eigen/src/Core/GeneralProduct.h +465 -0
- data/vendor/Eigen/src/Core/GenericPacketMath.h +1040 -0
- data/vendor/Eigen/src/Core/GlobalFunctions.h +194 -0
- data/vendor/Eigen/src/Core/IO.h +258 -0
- data/vendor/Eigen/src/Core/IndexedView.h +237 -0
- data/vendor/Eigen/src/Core/Inverse.h +117 -0
- data/vendor/Eigen/src/Core/Map.h +171 -0
- data/vendor/Eigen/src/Core/MapBase.h +310 -0
- data/vendor/Eigen/src/Core/MathFunctions.h +2057 -0
- data/vendor/Eigen/src/Core/MathFunctionsImpl.h +200 -0
- data/vendor/Eigen/src/Core/Matrix.h +565 -0
- data/vendor/Eigen/src/Core/MatrixBase.h +547 -0
- data/vendor/Eigen/src/Core/NestByValue.h +85 -0
- data/vendor/Eigen/src/Core/NoAlias.h +109 -0
- data/vendor/Eigen/src/Core/NumTraits.h +335 -0
- data/vendor/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
- data/vendor/Eigen/src/Core/PermutationMatrix.h +605 -0
- data/vendor/Eigen/src/Core/PlainObjectBase.h +1128 -0
- data/vendor/Eigen/src/Core/Product.h +191 -0
- data/vendor/Eigen/src/Core/ProductEvaluators.h +1179 -0
- data/vendor/Eigen/src/Core/Random.h +218 -0
- data/vendor/Eigen/src/Core/Redux.h +515 -0
- data/vendor/Eigen/src/Core/Ref.h +381 -0
- data/vendor/Eigen/src/Core/Replicate.h +142 -0
- data/vendor/Eigen/src/Core/Reshaped.h +454 -0
- data/vendor/Eigen/src/Core/ReturnByValue.h +119 -0
- data/vendor/Eigen/src/Core/Reverse.h +217 -0
- data/vendor/Eigen/src/Core/Select.h +164 -0
- data/vendor/Eigen/src/Core/SelfAdjointView.h +365 -0
- data/vendor/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
- data/vendor/Eigen/src/Core/Solve.h +188 -0
- data/vendor/Eigen/src/Core/SolveTriangular.h +235 -0
- data/vendor/Eigen/src/Core/SolverBase.h +168 -0
- data/vendor/Eigen/src/Core/StableNorm.h +251 -0
- data/vendor/Eigen/src/Core/StlIterators.h +463 -0
- data/vendor/Eigen/src/Core/Stride.h +116 -0
- data/vendor/Eigen/src/Core/Swap.h +68 -0
- data/vendor/Eigen/src/Core/Transpose.h +464 -0
- data/vendor/Eigen/src/Core/Transpositions.h +386 -0
- data/vendor/Eigen/src/Core/TriangularMatrix.h +1001 -0
- data/vendor/Eigen/src/Core/VectorBlock.h +96 -0
- data/vendor/Eigen/src/Core/VectorwiseOp.h +784 -0
- data/vendor/Eigen/src/Core/Visitor.h +381 -0
- data/vendor/Eigen/src/Core/arch/AVX/Complex.h +372 -0
- data/vendor/Eigen/src/Core/arch/AVX/MathFunctions.h +228 -0
- data/vendor/Eigen/src/Core/arch/AVX/PacketMath.h +1574 -0
- data/vendor/Eigen/src/Core/arch/AVX/TypeCasting.h +115 -0
- data/vendor/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
- data/vendor/Eigen/src/Core/arch/AVX512/MathFunctions.h +362 -0
- data/vendor/Eigen/src/Core/arch/AVX512/PacketMath.h +2303 -0
- data/vendor/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/Complex.h +417 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MathFunctions.h +90 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/PacketMath.h +2711 -0
- data/vendor/Eigen/src/Core/arch/CUDA/Complex.h +258 -0
- data/vendor/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
- data/vendor/Eigen/src/Core/arch/Default/ConjHelper.h +117 -0
- data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
- data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
- data/vendor/Eigen/src/Core/arch/Default/Half.h +942 -0
- data/vendor/Eigen/src/Core/arch/Default/Settings.h +49 -0
- data/vendor/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
- data/vendor/Eigen/src/Core/arch/GPU/MathFunctions.h +103 -0
- data/vendor/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
- data/vendor/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
- data/vendor/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
- data/vendor/Eigen/src/Core/arch/MSA/Complex.h +648 -0
- data/vendor/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
- data/vendor/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
- data/vendor/Eigen/src/Core/arch/NEON/Complex.h +584 -0
- data/vendor/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
- data/vendor/Eigen/src/Core/arch/NEON/MathFunctions.h +75 -0
- data/vendor/Eigen/src/Core/arch/NEON/PacketMath.h +4587 -0
- data/vendor/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
- data/vendor/Eigen/src/Core/arch/SSE/Complex.h +351 -0
- data/vendor/Eigen/src/Core/arch/SSE/MathFunctions.h +199 -0
- data/vendor/Eigen/src/Core/arch/SSE/PacketMath.h +1505 -0
- data/vendor/Eigen/src/Core/arch/SSE/TypeCasting.h +142 -0
- data/vendor/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
- data/vendor/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
- data/vendor/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
- data/vendor/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
- data/vendor/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
- data/vendor/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
- data/vendor/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
- data/vendor/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
- data/vendor/Eigen/src/Core/arch/ZVector/Complex.h +426 -0
- data/vendor/Eigen/src/Core/arch/ZVector/MathFunctions.h +233 -0
- data/vendor/Eigen/src/Core/arch/ZVector/PacketMath.h +1060 -0
- data/vendor/Eigen/src/Core/functors/AssignmentFunctors.h +177 -0
- data/vendor/Eigen/src/Core/functors/BinaryFunctors.h +541 -0
- data/vendor/Eigen/src/Core/functors/NullaryFunctors.h +189 -0
- data/vendor/Eigen/src/Core/functors/StlFunctors.h +166 -0
- data/vendor/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
- data/vendor/Eigen/src/Core/functors/UnaryFunctors.h +1131 -0
- data/vendor/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2645 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix.h +517 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +317 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +124 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixVector.h +518 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
- data/vendor/Eigen/src/Core/products/Parallelizer.h +180 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +544 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +295 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector.h +262 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
- data/vendor/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
- data/vendor/Eigen/src/Core/products/SelfadjointRank2Update.h +94 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix.h +472 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +317 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
- data/vendor/Eigen/src/Core/products/TriangularSolverMatrix.h +337 -0
- data/vendor/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +167 -0
- data/vendor/Eigen/src/Core/products/TriangularSolverVector.h +148 -0
- data/vendor/Eigen/src/Core/util/BlasUtil.h +583 -0
- data/vendor/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
- data/vendor/Eigen/src/Core/util/Constants.h +563 -0
- data/vendor/Eigen/src/Core/util/DisableStupidWarnings.h +106 -0
- data/vendor/Eigen/src/Core/util/ForwardDeclarations.h +322 -0
- data/vendor/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
- data/vendor/Eigen/src/Core/util/IntegralConstant.h +272 -0
- data/vendor/Eigen/src/Core/util/MKL_support.h +137 -0
- data/vendor/Eigen/src/Core/util/Macros.h +1464 -0
- data/vendor/Eigen/src/Core/util/Memory.h +1163 -0
- data/vendor/Eigen/src/Core/util/Meta.h +812 -0
- data/vendor/Eigen/src/Core/util/NonMPL2.h +3 -0
- data/vendor/Eigen/src/Core/util/ReenableStupidWarnings.h +31 -0
- data/vendor/Eigen/src/Core/util/ReshapedHelper.h +51 -0
- data/vendor/Eigen/src/Core/util/StaticAssert.h +221 -0
- data/vendor/Eigen/src/Core/util/SymbolicIndex.h +293 -0
- data/vendor/Eigen/src/Core/util/XprHelper.h +856 -0
- data/vendor/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
- data/vendor/Eigen/src/Eigenvalues/ComplexSchur.h +462 -0
- data/vendor/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
- data/vendor/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
- data/vendor/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
- data/vendor/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
- data/vendor/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
- data/vendor/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
- data/vendor/Eigen/src/Eigenvalues/RealQZ.h +657 -0
- data/vendor/Eigen/src/Eigenvalues/RealSchur.h +558 -0
- data/vendor/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
- data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +904 -0
- data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
- data/vendor/Eigen/src/Eigenvalues/Tridiagonalization.h +561 -0
- data/vendor/Eigen/src/Geometry/AlignedBox.h +486 -0
- data/vendor/Eigen/src/Geometry/AngleAxis.h +247 -0
- data/vendor/Eigen/src/Geometry/EulerAngles.h +114 -0
- data/vendor/Eigen/src/Geometry/Homogeneous.h +501 -0
- data/vendor/Eigen/src/Geometry/Hyperplane.h +282 -0
- data/vendor/Eigen/src/Geometry/OrthoMethods.h +235 -0
- data/vendor/Eigen/src/Geometry/ParametrizedLine.h +232 -0
- data/vendor/Eigen/src/Geometry/Quaternion.h +870 -0
- data/vendor/Eigen/src/Geometry/Rotation2D.h +199 -0
- data/vendor/Eigen/src/Geometry/RotationBase.h +206 -0
- data/vendor/Eigen/src/Geometry/Scaling.h +188 -0
- data/vendor/Eigen/src/Geometry/Transform.h +1563 -0
- data/vendor/Eigen/src/Geometry/Translation.h +202 -0
- data/vendor/Eigen/src/Geometry/Umeyama.h +166 -0
- data/vendor/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
- data/vendor/Eigen/src/Householder/BlockHouseholder.h +110 -0
- data/vendor/Eigen/src/Householder/Householder.h +176 -0
- data/vendor/Eigen/src/Householder/HouseholderSequence.h +545 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +212 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +229 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +394 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +453 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +444 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +198 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +117 -0
- data/vendor/Eigen/src/Jacobi/Jacobi.h +483 -0
- data/vendor/Eigen/src/KLUSupport/KLUSupport.h +358 -0
- data/vendor/Eigen/src/LU/Determinant.h +117 -0
- data/vendor/Eigen/src/LU/FullPivLU.h +877 -0
- data/vendor/Eigen/src/LU/InverseImpl.h +432 -0
- data/vendor/Eigen/src/LU/PartialPivLU.h +624 -0
- data/vendor/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
- data/vendor/Eigen/src/LU/arch/InverseSize4.h +351 -0
- data/vendor/Eigen/src/MetisSupport/MetisSupport.h +137 -0
- data/vendor/Eigen/src/OrderingMethods/Amd.h +435 -0
- data/vendor/Eigen/src/OrderingMethods/Eigen_Colamd.h +1863 -0
- data/vendor/Eigen/src/OrderingMethods/Ordering.h +153 -0
- data/vendor/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
- data/vendor/Eigen/src/PardisoSupport/PardisoSupport.h +545 -0
- data/vendor/Eigen/src/QR/ColPivHouseholderQR.h +674 -0
- data/vendor/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
- data/vendor/Eigen/src/QR/CompleteOrthogonalDecomposition.h +635 -0
- data/vendor/Eigen/src/QR/FullPivHouseholderQR.h +713 -0
- data/vendor/Eigen/src/QR/HouseholderQR.h +434 -0
- data/vendor/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
- data/vendor/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +335 -0
- data/vendor/Eigen/src/SVD/BDCSVD.h +1366 -0
- data/vendor/Eigen/src/SVD/JacobiSVD.h +812 -0
- data/vendor/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
- data/vendor/Eigen/src/SVD/SVDBase.h +376 -0
- data/vendor/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
- data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky.h +697 -0
- data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +174 -0
- data/vendor/Eigen/src/SparseCore/AmbiVector.h +378 -0
- data/vendor/Eigen/src/SparseCore/CompressedStorage.h +274 -0
- data/vendor/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
- data/vendor/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
- data/vendor/Eigen/src/SparseCore/SparseAssign.h +270 -0
- data/vendor/Eigen/src/SparseCore/SparseBlock.h +571 -0
- data/vendor/Eigen/src/SparseCore/SparseColEtree.h +206 -0
- data/vendor/Eigen/src/SparseCore/SparseCompressedBase.h +370 -0
- data/vendor/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +722 -0
- data/vendor/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +150 -0
- data/vendor/Eigen/src/SparseCore/SparseDenseProduct.h +342 -0
- data/vendor/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
- data/vendor/Eigen/src/SparseCore/SparseDot.h +98 -0
- data/vendor/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
- data/vendor/Eigen/src/SparseCore/SparseMap.h +305 -0
- data/vendor/Eigen/src/SparseCore/SparseMatrix.h +1518 -0
- data/vendor/Eigen/src/SparseCore/SparseMatrixBase.h +398 -0
- data/vendor/Eigen/src/SparseCore/SparsePermutation.h +178 -0
- data/vendor/Eigen/src/SparseCore/SparseProduct.h +181 -0
- data/vendor/Eigen/src/SparseCore/SparseRedux.h +49 -0
- data/vendor/Eigen/src/SparseCore/SparseRef.h +397 -0
- data/vendor/Eigen/src/SparseCore/SparseSelfAdjointView.h +659 -0
- data/vendor/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
- data/vendor/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
- data/vendor/Eigen/src/SparseCore/SparseTranspose.h +92 -0
- data/vendor/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
- data/vendor/Eigen/src/SparseCore/SparseUtil.h +186 -0
- data/vendor/Eigen/src/SparseCore/SparseVector.h +478 -0
- data/vendor/Eigen/src/SparseCore/SparseView.h +254 -0
- data/vendor/Eigen/src/SparseCore/TriangularSolver.h +315 -0
- data/vendor/Eigen/src/SparseLU/SparseLU.h +923 -0
- data/vendor/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +375 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
- data/vendor/Eigen/src/SparseQR/SparseQR.h +758 -0
- data/vendor/Eigen/src/StlSupport/StdDeque.h +116 -0
- data/vendor/Eigen/src/StlSupport/StdList.h +106 -0
- data/vendor/Eigen/src/StlSupport/StdVector.h +131 -0
- data/vendor/Eigen/src/StlSupport/details.h +84 -0
- data/vendor/Eigen/src/SuperLUSupport/SuperLUSupport.h +1025 -0
- data/vendor/Eigen/src/UmfPackSupport/UmfPackSupport.h +642 -0
- data/vendor/Eigen/src/misc/Image.h +82 -0
- data/vendor/Eigen/src/misc/Kernel.h +79 -0
- data/vendor/Eigen/src/misc/RealSvd2x2.h +55 -0
- data/vendor/Eigen/src/misc/blas.h +440 -0
- data/vendor/Eigen/src/misc/lapack.h +152 -0
- data/vendor/Eigen/src/misc/lapacke.h +16292 -0
- data/vendor/Eigen/src/misc/lapacke_mangling.h +17 -0
- data/vendor/Eigen/src/plugins/ArrayCwiseBinaryOps.h +358 -0
- data/vendor/Eigen/src/plugins/ArrayCwiseUnaryOps.h +696 -0
- data/vendor/Eigen/src/plugins/BlockMethods.h +1442 -0
- data/vendor/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
- data/vendor/Eigen/src/plugins/CommonCwiseUnaryOps.h +177 -0
- data/vendor/Eigen/src/plugins/IndexedViewMethods.h +262 -0
- data/vendor/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
- data/vendor/Eigen/src/plugins/MatrixCwiseUnaryOps.h +95 -0
- data/vendor/Eigen/src/plugins/ReshapedMethods.h +149 -0
- data/vendor/aarand/aarand.hpp +114 -0
- data/vendor/annoy/annoylib.h +1495 -0
- data/vendor/annoy/kissrandom.h +120 -0
- data/vendor/annoy/mman.h +242 -0
- data/vendor/hnswlib/bruteforce.h +152 -0
- data/vendor/hnswlib/hnswalg.h +1192 -0
- data/vendor/hnswlib/hnswlib.h +108 -0
- data/vendor/hnswlib/space_ip.h +282 -0
- data/vendor/hnswlib/space_l2.h +281 -0
- data/vendor/hnswlib/visited_list_pool.h +79 -0
- data/vendor/irlba/irlba.hpp +575 -0
- data/vendor/irlba/lanczos.hpp +212 -0
- data/vendor/irlba/parallel.hpp +474 -0
- data/vendor/irlba/utils.hpp +224 -0
- data/vendor/irlba/wrappers.hpp +228 -0
- data/vendor/kmeans/Base.hpp +75 -0
- data/vendor/kmeans/Details.hpp +79 -0
- data/vendor/kmeans/HartiganWong.hpp +492 -0
- data/vendor/kmeans/InitializeKmeansPP.hpp +144 -0
- data/vendor/kmeans/InitializeNone.hpp +44 -0
- data/vendor/kmeans/InitializePCAPartition.hpp +309 -0
- data/vendor/kmeans/InitializeRandom.hpp +91 -0
- data/vendor/kmeans/Kmeans.hpp +161 -0
- data/vendor/kmeans/Lloyd.hpp +134 -0
- data/vendor/kmeans/MiniBatch.hpp +269 -0
- data/vendor/kmeans/QuickSearch.hpp +179 -0
- data/vendor/kmeans/compute_centroids.hpp +32 -0
- data/vendor/kmeans/compute_wcss.hpp +27 -0
- data/vendor/kmeans/is_edge_case.hpp +42 -0
- data/vendor/kmeans/random.hpp +55 -0
- data/vendor/knncolle/Annoy/Annoy.hpp +193 -0
- data/vendor/knncolle/BruteForce/BruteForce.hpp +120 -0
- data/vendor/knncolle/Hnsw/Hnsw.hpp +225 -0
- data/vendor/knncolle/Kmknn/Kmknn.hpp +286 -0
- data/vendor/knncolle/VpTree/VpTree.hpp +256 -0
- data/vendor/knncolle/knncolle.hpp +34 -0
- data/vendor/knncolle/utils/Base.hpp +100 -0
- data/vendor/knncolle/utils/NeighborQueue.hpp +94 -0
- data/vendor/knncolle/utils/distances.hpp +98 -0
- data/vendor/knncolle/utils/find_nearest_neighbors.hpp +112 -0
- data/vendor/powerit/PowerIterations.hpp +157 -0
- data/vendor/umappp/NeighborList.hpp +37 -0
- data/vendor/umappp/Umap.hpp +662 -0
- data/vendor/umappp/combine_neighbor_sets.hpp +95 -0
- data/vendor/umappp/find_ab.hpp +157 -0
- data/vendor/umappp/neighbor_similarities.hpp +136 -0
- data/vendor/umappp/optimize_layout.hpp +285 -0
- data/vendor/umappp/spectral_init.hpp +181 -0
- data/vendor/umappp/umappp.hpp +13 -0
- metadata +465 -0
@@ -0,0 +1,274 @@
|
|
1
|
+
// This file is part of Eigen, a lightweight C++ template library
|
2
|
+
// for linear algebra.
|
3
|
+
//
|
4
|
+
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
5
|
+
//
|
6
|
+
// This Source Code Form is subject to the terms of the Mozilla
|
7
|
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
8
|
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
9
|
+
|
10
|
+
#ifndef EIGEN_COMPRESSED_STORAGE_H
|
11
|
+
#define EIGEN_COMPRESSED_STORAGE_H
|
12
|
+
|
13
|
+
namespace Eigen {
|
14
|
+
|
15
|
+
namespace internal {
|
16
|
+
|
17
|
+
/** \internal
|
18
|
+
* Stores a sparse set of values as a list of values and a list of indices.
|
19
|
+
*
|
20
|
+
*/
|
21
|
+
template<typename _Scalar,typename _StorageIndex>
|
22
|
+
class CompressedStorage
|
23
|
+
{
|
24
|
+
public:
|
25
|
+
|
26
|
+
typedef _Scalar Scalar;
|
27
|
+
typedef _StorageIndex StorageIndex;
|
28
|
+
|
29
|
+
protected:
|
30
|
+
|
31
|
+
typedef typename NumTraits<Scalar>::Real RealScalar;
|
32
|
+
|
33
|
+
public:
|
34
|
+
|
35
|
+
CompressedStorage()
|
36
|
+
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
37
|
+
{}
|
38
|
+
|
39
|
+
explicit CompressedStorage(Index size)
|
40
|
+
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
41
|
+
{
|
42
|
+
resize(size);
|
43
|
+
}
|
44
|
+
|
45
|
+
CompressedStorage(const CompressedStorage& other)
|
46
|
+
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
47
|
+
{
|
48
|
+
*this = other;
|
49
|
+
}
|
50
|
+
|
51
|
+
CompressedStorage& operator=(const CompressedStorage& other)
|
52
|
+
{
|
53
|
+
resize(other.size());
|
54
|
+
if(other.size()>0)
|
55
|
+
{
|
56
|
+
internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
|
57
|
+
internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
|
58
|
+
}
|
59
|
+
return *this;
|
60
|
+
}
|
61
|
+
|
62
|
+
void swap(CompressedStorage& other)
|
63
|
+
{
|
64
|
+
std::swap(m_values, other.m_values);
|
65
|
+
std::swap(m_indices, other.m_indices);
|
66
|
+
std::swap(m_size, other.m_size);
|
67
|
+
std::swap(m_allocatedSize, other.m_allocatedSize);
|
68
|
+
}
|
69
|
+
|
70
|
+
~CompressedStorage()
|
71
|
+
{
|
72
|
+
delete[] m_values;
|
73
|
+
delete[] m_indices;
|
74
|
+
}
|
75
|
+
|
76
|
+
void reserve(Index size)
|
77
|
+
{
|
78
|
+
Index newAllocatedSize = m_size + size;
|
79
|
+
if (newAllocatedSize > m_allocatedSize)
|
80
|
+
reallocate(newAllocatedSize);
|
81
|
+
}
|
82
|
+
|
83
|
+
void squeeze()
|
84
|
+
{
|
85
|
+
if (m_allocatedSize>m_size)
|
86
|
+
reallocate(m_size);
|
87
|
+
}
|
88
|
+
|
89
|
+
void resize(Index size, double reserveSizeFactor = 0)
|
90
|
+
{
|
91
|
+
if (m_allocatedSize<size)
|
92
|
+
{
|
93
|
+
Index realloc_size = (std::min<Index>)(NumTraits<StorageIndex>::highest(), size + Index(reserveSizeFactor*double(size)));
|
94
|
+
if(realloc_size<size)
|
95
|
+
internal::throw_std_bad_alloc();
|
96
|
+
reallocate(realloc_size);
|
97
|
+
}
|
98
|
+
m_size = size;
|
99
|
+
}
|
100
|
+
|
101
|
+
void append(const Scalar& v, Index i)
|
102
|
+
{
|
103
|
+
Index id = m_size;
|
104
|
+
resize(m_size+1, 1);
|
105
|
+
m_values[id] = v;
|
106
|
+
m_indices[id] = internal::convert_index<StorageIndex>(i);
|
107
|
+
}
|
108
|
+
|
109
|
+
inline Index size() const { return m_size; }
|
110
|
+
inline Index allocatedSize() const { return m_allocatedSize; }
|
111
|
+
inline void clear() { m_size = 0; }
|
112
|
+
|
113
|
+
const Scalar* valuePtr() const { return m_values; }
|
114
|
+
Scalar* valuePtr() { return m_values; }
|
115
|
+
const StorageIndex* indexPtr() const { return m_indices; }
|
116
|
+
StorageIndex* indexPtr() { return m_indices; }
|
117
|
+
|
118
|
+
inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; }
|
119
|
+
inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; }
|
120
|
+
|
121
|
+
inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
|
122
|
+
inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
|
123
|
+
|
124
|
+
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
|
125
|
+
inline Index searchLowerIndex(Index key) const
|
126
|
+
{
|
127
|
+
return searchLowerIndex(0, m_size, key);
|
128
|
+
}
|
129
|
+
|
130
|
+
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
|
131
|
+
inline Index searchLowerIndex(Index start, Index end, Index key) const
|
132
|
+
{
|
133
|
+
while(end>start)
|
134
|
+
{
|
135
|
+
Index mid = (end+start)>>1;
|
136
|
+
if (m_indices[mid]<key)
|
137
|
+
start = mid+1;
|
138
|
+
else
|
139
|
+
end = mid;
|
140
|
+
}
|
141
|
+
return start;
|
142
|
+
}
|
143
|
+
|
144
|
+
/** \returns the stored value at index \a key
|
145
|
+
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
|
146
|
+
inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
|
147
|
+
{
|
148
|
+
if (m_size==0)
|
149
|
+
return defaultValue;
|
150
|
+
else if (key==m_indices[m_size-1])
|
151
|
+
return m_values[m_size-1];
|
152
|
+
// ^^ optimization: let's first check if it is the last coefficient
|
153
|
+
// (very common in high level algorithms)
|
154
|
+
const Index id = searchLowerIndex(0,m_size-1,key);
|
155
|
+
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
156
|
+
}
|
157
|
+
|
158
|
+
/** Like at(), but the search is performed in the range [start,end) */
|
159
|
+
inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const
|
160
|
+
{
|
161
|
+
if (start>=end)
|
162
|
+
return defaultValue;
|
163
|
+
else if (end>start && key==m_indices[end-1])
|
164
|
+
return m_values[end-1];
|
165
|
+
// ^^ optimization: let's first check if it is the last coefficient
|
166
|
+
// (very common in high level algorithms)
|
167
|
+
const Index id = searchLowerIndex(start,end-1,key);
|
168
|
+
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
169
|
+
}
|
170
|
+
|
171
|
+
/** \returns a reference to the value at index \a key
|
172
|
+
* If the value does not exist, then the value \a defaultValue is inserted
|
173
|
+
* such that the keys are sorted. */
|
174
|
+
inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
|
175
|
+
{
|
176
|
+
Index id = searchLowerIndex(0,m_size,key);
|
177
|
+
if (id>=m_size || m_indices[id]!=key)
|
178
|
+
{
|
179
|
+
if (m_allocatedSize<m_size+1)
|
180
|
+
{
|
181
|
+
m_allocatedSize = 2*(m_size+1);
|
182
|
+
internal::scoped_array<Scalar> newValues(m_allocatedSize);
|
183
|
+
internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
|
184
|
+
|
185
|
+
// copy first chunk
|
186
|
+
internal::smart_copy(m_values, m_values +id, newValues.ptr());
|
187
|
+
internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());
|
188
|
+
|
189
|
+
// copy the rest
|
190
|
+
if(m_size>id)
|
191
|
+
{
|
192
|
+
internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
|
193
|
+
internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
|
194
|
+
}
|
195
|
+
std::swap(m_values,newValues.ptr());
|
196
|
+
std::swap(m_indices,newIndices.ptr());
|
197
|
+
}
|
198
|
+
else if(m_size>id)
|
199
|
+
{
|
200
|
+
internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);
|
201
|
+
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
|
202
|
+
}
|
203
|
+
m_size++;
|
204
|
+
m_indices[id] = internal::convert_index<StorageIndex>(key);
|
205
|
+
m_values[id] = defaultValue;
|
206
|
+
}
|
207
|
+
return m_values[id];
|
208
|
+
}
|
209
|
+
|
210
|
+
void moveChunk(Index from, Index to, Index chunkSize)
|
211
|
+
{
|
212
|
+
eigen_internal_assert(to+chunkSize <= m_size);
|
213
|
+
if(to>from && from+chunkSize>to)
|
214
|
+
{
|
215
|
+
// move backward
|
216
|
+
internal::smart_memmove(m_values+from, m_values+from+chunkSize, m_values+to);
|
217
|
+
internal::smart_memmove(m_indices+from, m_indices+from+chunkSize, m_indices+to);
|
218
|
+
}
|
219
|
+
else
|
220
|
+
{
|
221
|
+
internal::smart_copy(m_values+from, m_values+from+chunkSize, m_values+to);
|
222
|
+
internal::smart_copy(m_indices+from, m_indices+from+chunkSize, m_indices+to);
|
223
|
+
}
|
224
|
+
}
|
225
|
+
|
226
|
+
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
|
227
|
+
{
|
228
|
+
Index k = 0;
|
229
|
+
Index n = size();
|
230
|
+
for (Index i=0; i<n; ++i)
|
231
|
+
{
|
232
|
+
if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
|
233
|
+
{
|
234
|
+
value(k) = value(i);
|
235
|
+
index(k) = index(i);
|
236
|
+
++k;
|
237
|
+
}
|
238
|
+
}
|
239
|
+
resize(k,0);
|
240
|
+
}
|
241
|
+
|
242
|
+
protected:
|
243
|
+
|
244
|
+
inline void reallocate(Index size)
|
245
|
+
{
|
246
|
+
#ifdef EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
|
247
|
+
EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
|
248
|
+
#endif
|
249
|
+
eigen_internal_assert(size!=m_allocatedSize);
|
250
|
+
internal::scoped_array<Scalar> newValues(size);
|
251
|
+
internal::scoped_array<StorageIndex> newIndices(size);
|
252
|
+
Index copySize = (std::min)(size, m_size);
|
253
|
+
if (copySize>0) {
|
254
|
+
internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
|
255
|
+
internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
|
256
|
+
}
|
257
|
+
std::swap(m_values,newValues.ptr());
|
258
|
+
std::swap(m_indices,newIndices.ptr());
|
259
|
+
m_allocatedSize = size;
|
260
|
+
}
|
261
|
+
|
262
|
+
protected:
|
263
|
+
Scalar* m_values;
|
264
|
+
StorageIndex* m_indices;
|
265
|
+
Index m_size;
|
266
|
+
Index m_allocatedSize;
|
267
|
+
|
268
|
+
};
|
269
|
+
|
270
|
+
} // end namespace internal
|
271
|
+
|
272
|
+
} // end namespace Eigen
|
273
|
+
|
274
|
+
#endif // EIGEN_COMPRESSED_STORAGE_H
|
@@ -0,0 +1,352 @@
|
|
1
|
+
// This file is part of Eigen, a lightweight C++ template library
|
2
|
+
// for linear algebra.
|
3
|
+
//
|
4
|
+
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
5
|
+
//
|
6
|
+
// This Source Code Form is subject to the terms of the Mozilla
|
7
|
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
8
|
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
9
|
+
|
10
|
+
#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
|
11
|
+
#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
|
12
|
+
|
13
|
+
namespace Eigen {
|
14
|
+
|
15
|
+
namespace internal {
|
16
|
+
|
17
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
18
|
+
static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
|
19
|
+
{
|
20
|
+
typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
|
21
|
+
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
|
22
|
+
typedef typename remove_all<ResultType>::type::Scalar ResScalar;
|
23
|
+
|
24
|
+
// make sure to call innerSize/outerSize since we fake the storage order.
|
25
|
+
Index rows = lhs.innerSize();
|
26
|
+
Index cols = rhs.outerSize();
|
27
|
+
eigen_assert(lhs.outerSize() == rhs.innerSize());
|
28
|
+
|
29
|
+
ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
|
30
|
+
ei_declare_aligned_stack_constructed_variable(ResScalar, values, rows, 0);
|
31
|
+
ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
|
32
|
+
|
33
|
+
std::memset(mask,0,sizeof(bool)*rows);
|
34
|
+
|
35
|
+
evaluator<Lhs> lhsEval(lhs);
|
36
|
+
evaluator<Rhs> rhsEval(rhs);
|
37
|
+
|
38
|
+
// estimate the number of non zero entries
|
39
|
+
// given a rhs column containing Y non zeros, we assume that the respective Y columns
|
40
|
+
// of the lhs differs in average of one non zeros, thus the number of non zeros for
|
41
|
+
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
|
42
|
+
// per column of the lhs.
|
43
|
+
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
|
44
|
+
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
|
45
|
+
|
46
|
+
res.setZero();
|
47
|
+
res.reserve(Index(estimated_nnz_prod));
|
48
|
+
// we compute each column of the result, one after the other
|
49
|
+
for (Index j=0; j<cols; ++j)
|
50
|
+
{
|
51
|
+
|
52
|
+
res.startVec(j);
|
53
|
+
Index nnz = 0;
|
54
|
+
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
|
55
|
+
{
|
56
|
+
RhsScalar y = rhsIt.value();
|
57
|
+
Index k = rhsIt.index();
|
58
|
+
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
|
59
|
+
{
|
60
|
+
Index i = lhsIt.index();
|
61
|
+
LhsScalar x = lhsIt.value();
|
62
|
+
if(!mask[i])
|
63
|
+
{
|
64
|
+
mask[i] = true;
|
65
|
+
values[i] = x * y;
|
66
|
+
indices[nnz] = i;
|
67
|
+
++nnz;
|
68
|
+
}
|
69
|
+
else
|
70
|
+
values[i] += x * y;
|
71
|
+
}
|
72
|
+
}
|
73
|
+
if(!sortedInsertion)
|
74
|
+
{
|
75
|
+
// unordered insertion
|
76
|
+
for(Index k=0; k<nnz; ++k)
|
77
|
+
{
|
78
|
+
Index i = indices[k];
|
79
|
+
res.insertBackByOuterInnerUnordered(j,i) = values[i];
|
80
|
+
mask[i] = false;
|
81
|
+
}
|
82
|
+
}
|
83
|
+
else
|
84
|
+
{
|
85
|
+
// alternative ordered insertion code:
|
86
|
+
const Index t200 = rows/11; // 11 == (log2(200)*1.39)
|
87
|
+
const Index t = (rows*100)/139;
|
88
|
+
|
89
|
+
// FIXME reserve nnz non zeros
|
90
|
+
// FIXME implement faster sorting algorithms for very small nnz
|
91
|
+
// if the result is sparse enough => use a quick sort
|
92
|
+
// otherwise => loop through the entire vector
|
93
|
+
// In order to avoid to perform an expensive log2 when the
|
94
|
+
// result is clearly very sparse we use a linear bound up to 200.
|
95
|
+
if((nnz<200 && nnz<t200) || nnz * numext::log2(int(nnz)) < t)
|
96
|
+
{
|
97
|
+
if(nnz>1) std::sort(indices,indices+nnz);
|
98
|
+
for(Index k=0; k<nnz; ++k)
|
99
|
+
{
|
100
|
+
Index i = indices[k];
|
101
|
+
res.insertBackByOuterInner(j,i) = values[i];
|
102
|
+
mask[i] = false;
|
103
|
+
}
|
104
|
+
}
|
105
|
+
else
|
106
|
+
{
|
107
|
+
// dense path
|
108
|
+
for(Index i=0; i<rows; ++i)
|
109
|
+
{
|
110
|
+
if(mask[i])
|
111
|
+
{
|
112
|
+
mask[i] = false;
|
113
|
+
res.insertBackByOuterInner(j,i) = values[i];
|
114
|
+
}
|
115
|
+
}
|
116
|
+
}
|
117
|
+
}
|
118
|
+
}
|
119
|
+
res.finalize();
|
120
|
+
}
|
121
|
+
|
122
|
+
|
123
|
+
} // end namespace internal
|
124
|
+
|
125
|
+
namespace internal {
|
126
|
+
|
127
|
+
template<typename Lhs, typename Rhs, typename ResultType,
|
128
|
+
int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
|
129
|
+
int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
|
130
|
+
int ResStorageOrder = (traits<ResultType>::Flags&RowMajorBit) ? RowMajor : ColMajor>
|
131
|
+
struct conservative_sparse_sparse_product_selector;
|
132
|
+
|
133
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
134
|
+
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
|
135
|
+
{
|
136
|
+
typedef typename remove_all<Lhs>::type LhsCleaned;
|
137
|
+
typedef typename LhsCleaned::Scalar Scalar;
|
138
|
+
|
139
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
140
|
+
{
|
141
|
+
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
142
|
+
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
|
143
|
+
typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;
|
144
|
+
|
145
|
+
// If the result is tall and thin (in the extreme case a column vector)
|
146
|
+
// then it is faster to sort the coefficients inplace instead of transposing twice.
|
147
|
+
// FIXME, the following heuristic is probably not very good.
|
148
|
+
if(lhs.rows()>rhs.cols())
|
149
|
+
{
|
150
|
+
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
|
151
|
+
// perform sorted insertion
|
152
|
+
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, true);
|
153
|
+
res = resCol.markAsRValue();
|
154
|
+
}
|
155
|
+
else
|
156
|
+
{
|
157
|
+
ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());
|
158
|
+
// resort to transpose to sort the entries
|
159
|
+
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);
|
160
|
+
RowMajorMatrix resRow(resCol);
|
161
|
+
res = resRow.markAsRValue();
|
162
|
+
}
|
163
|
+
}
|
164
|
+
};
|
165
|
+
|
166
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
167
|
+
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
|
168
|
+
{
|
169
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
170
|
+
{
|
171
|
+
typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRhs;
|
172
|
+
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
|
173
|
+
RowMajorRhs rhsRow = rhs;
|
174
|
+
RowMajorRes resRow(lhs.rows(), rhs.cols());
|
175
|
+
internal::conservative_sparse_sparse_product_impl<RowMajorRhs,Lhs,RowMajorRes>(rhsRow, lhs, resRow);
|
176
|
+
res = resRow;
|
177
|
+
}
|
178
|
+
};
|
179
|
+
|
180
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
181
|
+
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
|
182
|
+
{
|
183
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
184
|
+
{
|
185
|
+
typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorLhs;
|
186
|
+
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
|
187
|
+
RowMajorLhs lhsRow = lhs;
|
188
|
+
RowMajorRes resRow(lhs.rows(), rhs.cols());
|
189
|
+
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorLhs,RowMajorRes>(rhs, lhsRow, resRow);
|
190
|
+
res = resRow;
|
191
|
+
}
|
192
|
+
};
|
193
|
+
|
194
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
195
|
+
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
|
196
|
+
{
|
197
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
198
|
+
{
|
199
|
+
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
200
|
+
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
|
201
|
+
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
|
202
|
+
res = resRow;
|
203
|
+
}
|
204
|
+
};
|
205
|
+
|
206
|
+
|
207
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
208
|
+
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
|
209
|
+
{
|
210
|
+
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
|
211
|
+
|
212
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
213
|
+
{
|
214
|
+
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
215
|
+
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
|
216
|
+
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
|
217
|
+
res = resCol;
|
218
|
+
}
|
219
|
+
};
|
220
|
+
|
221
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
222
|
+
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
|
223
|
+
{
|
224
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
225
|
+
{
|
226
|
+
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
|
227
|
+
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
|
228
|
+
ColMajorLhs lhsCol = lhs;
|
229
|
+
ColMajorRes resCol(lhs.rows(), rhs.cols());
|
230
|
+
internal::conservative_sparse_sparse_product_impl<ColMajorLhs,Rhs,ColMajorRes>(lhsCol, rhs, resCol);
|
231
|
+
res = resCol;
|
232
|
+
}
|
233
|
+
};
|
234
|
+
|
235
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
236
|
+
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
|
237
|
+
{
|
238
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
239
|
+
{
|
240
|
+
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
|
241
|
+
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
|
242
|
+
ColMajorRhs rhsCol = rhs;
|
243
|
+
ColMajorRes resCol(lhs.rows(), rhs.cols());
|
244
|
+
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorRhs,ColMajorRes>(lhs, rhsCol, resCol);
|
245
|
+
res = resCol;
|
246
|
+
}
|
247
|
+
};
|
248
|
+
|
249
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
250
|
+
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
|
251
|
+
{
|
252
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
253
|
+
{
|
254
|
+
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
255
|
+
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
256
|
+
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
|
257
|
+
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
|
258
|
+
// sort the non zeros:
|
259
|
+
ColMajorMatrix resCol(resRow);
|
260
|
+
res = resCol;
|
261
|
+
}
|
262
|
+
};
|
263
|
+
|
264
|
+
} // end namespace internal
|
265
|
+
|
266
|
+
|
267
|
+
namespace internal {
|
268
|
+
|
269
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
270
|
+
static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
271
|
+
{
|
272
|
+
typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
|
273
|
+
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
|
274
|
+
Index cols = rhs.outerSize();
|
275
|
+
eigen_assert(lhs.outerSize() == rhs.innerSize());
|
276
|
+
|
277
|
+
evaluator<Lhs> lhsEval(lhs);
|
278
|
+
evaluator<Rhs> rhsEval(rhs);
|
279
|
+
|
280
|
+
for (Index j=0; j<cols; ++j)
|
281
|
+
{
|
282
|
+
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
|
283
|
+
{
|
284
|
+
RhsScalar y = rhsIt.value();
|
285
|
+
Index k = rhsIt.index();
|
286
|
+
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
|
287
|
+
{
|
288
|
+
Index i = lhsIt.index();
|
289
|
+
LhsScalar x = lhsIt.value();
|
290
|
+
res.coeffRef(i,j) += x * y;
|
291
|
+
}
|
292
|
+
}
|
293
|
+
}
|
294
|
+
}
|
295
|
+
|
296
|
+
|
297
|
+
} // end namespace internal
|
298
|
+
|
299
|
+
namespace internal {
|
300
|
+
|
301
|
+
template<typename Lhs, typename Rhs, typename ResultType,
|
302
|
+
int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
|
303
|
+
int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor>
|
304
|
+
struct sparse_sparse_to_dense_product_selector;
|
305
|
+
|
306
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
307
|
+
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor>
|
308
|
+
{
|
309
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
310
|
+
{
|
311
|
+
internal::sparse_sparse_to_dense_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, res);
|
312
|
+
}
|
313
|
+
};
|
314
|
+
|
315
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
316
|
+
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor>
|
317
|
+
{
|
318
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
319
|
+
{
|
320
|
+
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
|
321
|
+
ColMajorLhs lhsCol(lhs);
|
322
|
+
internal::sparse_sparse_to_dense_product_impl<ColMajorLhs,Rhs,ResultType>(lhsCol, rhs, res);
|
323
|
+
}
|
324
|
+
};
|
325
|
+
|
326
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
327
|
+
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor>
|
328
|
+
{
|
329
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
330
|
+
{
|
331
|
+
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
|
332
|
+
ColMajorRhs rhsCol(rhs);
|
333
|
+
internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorRhs,ResultType>(lhs, rhsCol, res);
|
334
|
+
}
|
335
|
+
};
|
336
|
+
|
337
|
+
template<typename Lhs, typename Rhs, typename ResultType>
|
338
|
+
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor>
|
339
|
+
{
|
340
|
+
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
341
|
+
{
|
342
|
+
Transpose<ResultType> trRes(res);
|
343
|
+
internal::sparse_sparse_to_dense_product_impl<Rhs,Lhs,Transpose<ResultType> >(rhs, lhs, trRes);
|
344
|
+
}
|
345
|
+
};
|
346
|
+
|
347
|
+
|
348
|
+
} // end namespace internal
|
349
|
+
|
350
|
+
} // end namespace Eigen
|
351
|
+
|
352
|
+
#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
|
@@ -0,0 +1,67 @@
|
|
1
|
+
// This file is part of Eigen, a lightweight C++ template library
|
2
|
+
// for linear algebra.
|
3
|
+
//
|
4
|
+
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
5
|
+
//
|
6
|
+
// This Source Code Form is subject to the terms of the Mozilla
|
7
|
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
8
|
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
9
|
+
|
10
|
+
#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
|
11
|
+
#define EIGEN_MAPPED_SPARSEMATRIX_H
|
12
|
+
|
13
|
+
namespace Eigen {
|
14
|
+
|
15
|
+
/** \deprecated Use Map<SparseMatrix<> >
|
16
|
+
* \class MappedSparseMatrix
|
17
|
+
*
|
18
|
+
* \brief Sparse matrix
|
19
|
+
*
|
20
|
+
* \param _Scalar the scalar type, i.e. the type of the coefficients
|
21
|
+
*
|
22
|
+
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
23
|
+
*
|
24
|
+
*/
|
25
|
+
namespace internal {
|
26
|
+
template<typename _Scalar, int _Flags, typename _StorageIndex>
|
27
|
+
struct traits<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> > : traits<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
|
28
|
+
{};
|
29
|
+
} // end namespace internal
|
30
|
+
|
31
|
+
template<typename _Scalar, int _Flags, typename _StorageIndex>
|
32
|
+
class MappedSparseMatrix
|
33
|
+
: public Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
|
34
|
+
{
|
35
|
+
typedef Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> > Base;
|
36
|
+
|
37
|
+
public:
|
38
|
+
|
39
|
+
typedef typename Base::StorageIndex StorageIndex;
|
40
|
+
typedef typename Base::Scalar Scalar;
|
41
|
+
|
42
|
+
inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZeroPtr = 0)
|
43
|
+
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)
|
44
|
+
{}
|
45
|
+
|
46
|
+
/** Empty destructor */
|
47
|
+
inline ~MappedSparseMatrix() {}
|
48
|
+
};
|
49
|
+
|
50
|
+
namespace internal {
|
51
|
+
|
52
|
+
template<typename _Scalar, int _Options, typename _StorageIndex>
|
53
|
+
struct evaluator<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> >
|
54
|
+
: evaluator<SparseCompressedBase<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> > >
|
55
|
+
{
|
56
|
+
typedef MappedSparseMatrix<_Scalar,_Options,_StorageIndex> XprType;
|
57
|
+
typedef evaluator<SparseCompressedBase<XprType> > Base;
|
58
|
+
|
59
|
+
evaluator() : Base() {}
|
60
|
+
explicit evaluator(const XprType &mat) : Base(mat) {}
|
61
|
+
};
|
62
|
+
|
63
|
+
}
|
64
|
+
|
65
|
+
} // end namespace Eigen
|
66
|
+
|
67
|
+
#endif // EIGEN_MAPPED_SPARSEMATRIX_H
|