umappp 0.1.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE.txt +25 -0
- data/README.md +110 -0
- data/ext/umappp/extconf.rb +25 -0
- data/ext/umappp/numo.hpp +867 -0
- data/ext/umappp/umappp.cpp +225 -0
- data/lib/umappp/version.rb +5 -0
- data/lib/umappp.rb +41 -0
- data/vendor/Eigen/Cholesky +45 -0
- data/vendor/Eigen/CholmodSupport +48 -0
- data/vendor/Eigen/Core +384 -0
- data/vendor/Eigen/Dense +7 -0
- data/vendor/Eigen/Eigen +2 -0
- data/vendor/Eigen/Eigenvalues +60 -0
- data/vendor/Eigen/Geometry +59 -0
- data/vendor/Eigen/Householder +29 -0
- data/vendor/Eigen/IterativeLinearSolvers +48 -0
- data/vendor/Eigen/Jacobi +32 -0
- data/vendor/Eigen/KLUSupport +41 -0
- data/vendor/Eigen/LU +47 -0
- data/vendor/Eigen/MetisSupport +35 -0
- data/vendor/Eigen/OrderingMethods +70 -0
- data/vendor/Eigen/PaStiXSupport +49 -0
- data/vendor/Eigen/PardisoSupport +35 -0
- data/vendor/Eigen/QR +50 -0
- data/vendor/Eigen/QtAlignedMalloc +39 -0
- data/vendor/Eigen/SPQRSupport +34 -0
- data/vendor/Eigen/SVD +50 -0
- data/vendor/Eigen/Sparse +34 -0
- data/vendor/Eigen/SparseCholesky +37 -0
- data/vendor/Eigen/SparseCore +69 -0
- data/vendor/Eigen/SparseLU +50 -0
- data/vendor/Eigen/SparseQR +36 -0
- data/vendor/Eigen/StdDeque +27 -0
- data/vendor/Eigen/StdList +26 -0
- data/vendor/Eigen/StdVector +27 -0
- data/vendor/Eigen/SuperLUSupport +64 -0
- data/vendor/Eigen/UmfPackSupport +40 -0
- data/vendor/Eigen/src/Cholesky/LDLT.h +688 -0
- data/vendor/Eigen/src/Cholesky/LLT.h +558 -0
- data/vendor/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
- data/vendor/Eigen/src/CholmodSupport/CholmodSupport.h +682 -0
- data/vendor/Eigen/src/Core/ArithmeticSequence.h +413 -0
- data/vendor/Eigen/src/Core/Array.h +417 -0
- data/vendor/Eigen/src/Core/ArrayBase.h +226 -0
- data/vendor/Eigen/src/Core/ArrayWrapper.h +209 -0
- data/vendor/Eigen/src/Core/Assign.h +90 -0
- data/vendor/Eigen/src/Core/AssignEvaluator.h +1010 -0
- data/vendor/Eigen/src/Core/Assign_MKL.h +178 -0
- data/vendor/Eigen/src/Core/BandMatrix.h +353 -0
- data/vendor/Eigen/src/Core/Block.h +448 -0
- data/vendor/Eigen/src/Core/BooleanRedux.h +162 -0
- data/vendor/Eigen/src/Core/CommaInitializer.h +164 -0
- data/vendor/Eigen/src/Core/ConditionEstimator.h +175 -0
- data/vendor/Eigen/src/Core/CoreEvaluators.h +1741 -0
- data/vendor/Eigen/src/Core/CoreIterators.h +132 -0
- data/vendor/Eigen/src/Core/CwiseBinaryOp.h +183 -0
- data/vendor/Eigen/src/Core/CwiseNullaryOp.h +1001 -0
- data/vendor/Eigen/src/Core/CwiseTernaryOp.h +197 -0
- data/vendor/Eigen/src/Core/CwiseUnaryOp.h +103 -0
- data/vendor/Eigen/src/Core/CwiseUnaryView.h +132 -0
- data/vendor/Eigen/src/Core/DenseBase.h +701 -0
- data/vendor/Eigen/src/Core/DenseCoeffsBase.h +685 -0
- data/vendor/Eigen/src/Core/DenseStorage.h +652 -0
- data/vendor/Eigen/src/Core/Diagonal.h +258 -0
- data/vendor/Eigen/src/Core/DiagonalMatrix.h +391 -0
- data/vendor/Eigen/src/Core/DiagonalProduct.h +28 -0
- data/vendor/Eigen/src/Core/Dot.h +318 -0
- data/vendor/Eigen/src/Core/EigenBase.h +160 -0
- data/vendor/Eigen/src/Core/ForceAlignedAccess.h +150 -0
- data/vendor/Eigen/src/Core/Fuzzy.h +155 -0
- data/vendor/Eigen/src/Core/GeneralProduct.h +465 -0
- data/vendor/Eigen/src/Core/GenericPacketMath.h +1040 -0
- data/vendor/Eigen/src/Core/GlobalFunctions.h +194 -0
- data/vendor/Eigen/src/Core/IO.h +258 -0
- data/vendor/Eigen/src/Core/IndexedView.h +237 -0
- data/vendor/Eigen/src/Core/Inverse.h +117 -0
- data/vendor/Eigen/src/Core/Map.h +171 -0
- data/vendor/Eigen/src/Core/MapBase.h +310 -0
- data/vendor/Eigen/src/Core/MathFunctions.h +2057 -0
- data/vendor/Eigen/src/Core/MathFunctionsImpl.h +200 -0
- data/vendor/Eigen/src/Core/Matrix.h +565 -0
- data/vendor/Eigen/src/Core/MatrixBase.h +547 -0
- data/vendor/Eigen/src/Core/NestByValue.h +85 -0
- data/vendor/Eigen/src/Core/NoAlias.h +109 -0
- data/vendor/Eigen/src/Core/NumTraits.h +335 -0
- data/vendor/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
- data/vendor/Eigen/src/Core/PermutationMatrix.h +605 -0
- data/vendor/Eigen/src/Core/PlainObjectBase.h +1128 -0
- data/vendor/Eigen/src/Core/Product.h +191 -0
- data/vendor/Eigen/src/Core/ProductEvaluators.h +1179 -0
- data/vendor/Eigen/src/Core/Random.h +218 -0
- data/vendor/Eigen/src/Core/Redux.h +515 -0
- data/vendor/Eigen/src/Core/Ref.h +381 -0
- data/vendor/Eigen/src/Core/Replicate.h +142 -0
- data/vendor/Eigen/src/Core/Reshaped.h +454 -0
- data/vendor/Eigen/src/Core/ReturnByValue.h +119 -0
- data/vendor/Eigen/src/Core/Reverse.h +217 -0
- data/vendor/Eigen/src/Core/Select.h +164 -0
- data/vendor/Eigen/src/Core/SelfAdjointView.h +365 -0
- data/vendor/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
- data/vendor/Eigen/src/Core/Solve.h +188 -0
- data/vendor/Eigen/src/Core/SolveTriangular.h +235 -0
- data/vendor/Eigen/src/Core/SolverBase.h +168 -0
- data/vendor/Eigen/src/Core/StableNorm.h +251 -0
- data/vendor/Eigen/src/Core/StlIterators.h +463 -0
- data/vendor/Eigen/src/Core/Stride.h +116 -0
- data/vendor/Eigen/src/Core/Swap.h +68 -0
- data/vendor/Eigen/src/Core/Transpose.h +464 -0
- data/vendor/Eigen/src/Core/Transpositions.h +386 -0
- data/vendor/Eigen/src/Core/TriangularMatrix.h +1001 -0
- data/vendor/Eigen/src/Core/VectorBlock.h +96 -0
- data/vendor/Eigen/src/Core/VectorwiseOp.h +784 -0
- data/vendor/Eigen/src/Core/Visitor.h +381 -0
- data/vendor/Eigen/src/Core/arch/AVX/Complex.h +372 -0
- data/vendor/Eigen/src/Core/arch/AVX/MathFunctions.h +228 -0
- data/vendor/Eigen/src/Core/arch/AVX/PacketMath.h +1574 -0
- data/vendor/Eigen/src/Core/arch/AVX/TypeCasting.h +115 -0
- data/vendor/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
- data/vendor/Eigen/src/Core/arch/AVX512/MathFunctions.h +362 -0
- data/vendor/Eigen/src/Core/arch/AVX512/PacketMath.h +2303 -0
- data/vendor/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/Complex.h +417 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MathFunctions.h +90 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
- data/vendor/Eigen/src/Core/arch/AltiVec/PacketMath.h +2711 -0
- data/vendor/Eigen/src/Core/arch/CUDA/Complex.h +258 -0
- data/vendor/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
- data/vendor/Eigen/src/Core/arch/Default/ConjHelper.h +117 -0
- data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
- data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
- data/vendor/Eigen/src/Core/arch/Default/Half.h +942 -0
- data/vendor/Eigen/src/Core/arch/Default/Settings.h +49 -0
- data/vendor/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
- data/vendor/Eigen/src/Core/arch/GPU/MathFunctions.h +103 -0
- data/vendor/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
- data/vendor/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
- data/vendor/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
- data/vendor/Eigen/src/Core/arch/MSA/Complex.h +648 -0
- data/vendor/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
- data/vendor/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
- data/vendor/Eigen/src/Core/arch/NEON/Complex.h +584 -0
- data/vendor/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
- data/vendor/Eigen/src/Core/arch/NEON/MathFunctions.h +75 -0
- data/vendor/Eigen/src/Core/arch/NEON/PacketMath.h +4587 -0
- data/vendor/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
- data/vendor/Eigen/src/Core/arch/SSE/Complex.h +351 -0
- data/vendor/Eigen/src/Core/arch/SSE/MathFunctions.h +199 -0
- data/vendor/Eigen/src/Core/arch/SSE/PacketMath.h +1505 -0
- data/vendor/Eigen/src/Core/arch/SSE/TypeCasting.h +142 -0
- data/vendor/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
- data/vendor/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
- data/vendor/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
- data/vendor/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
- data/vendor/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
- data/vendor/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
- data/vendor/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
- data/vendor/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
- data/vendor/Eigen/src/Core/arch/ZVector/Complex.h +426 -0
- data/vendor/Eigen/src/Core/arch/ZVector/MathFunctions.h +233 -0
- data/vendor/Eigen/src/Core/arch/ZVector/PacketMath.h +1060 -0
- data/vendor/Eigen/src/Core/functors/AssignmentFunctors.h +177 -0
- data/vendor/Eigen/src/Core/functors/BinaryFunctors.h +541 -0
- data/vendor/Eigen/src/Core/functors/NullaryFunctors.h +189 -0
- data/vendor/Eigen/src/Core/functors/StlFunctors.h +166 -0
- data/vendor/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
- data/vendor/Eigen/src/Core/functors/UnaryFunctors.h +1131 -0
- data/vendor/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2645 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix.h +517 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +317 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +124 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixVector.h +518 -0
- data/vendor/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
- data/vendor/Eigen/src/Core/products/Parallelizer.h +180 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +544 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +295 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector.h +262 -0
- data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
- data/vendor/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
- data/vendor/Eigen/src/Core/products/SelfadjointRank2Update.h +94 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix.h +472 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +317 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
- data/vendor/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
- data/vendor/Eigen/src/Core/products/TriangularSolverMatrix.h +337 -0
- data/vendor/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +167 -0
- data/vendor/Eigen/src/Core/products/TriangularSolverVector.h +148 -0
- data/vendor/Eigen/src/Core/util/BlasUtil.h +583 -0
- data/vendor/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
- data/vendor/Eigen/src/Core/util/Constants.h +563 -0
- data/vendor/Eigen/src/Core/util/DisableStupidWarnings.h +106 -0
- data/vendor/Eigen/src/Core/util/ForwardDeclarations.h +322 -0
- data/vendor/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
- data/vendor/Eigen/src/Core/util/IntegralConstant.h +272 -0
- data/vendor/Eigen/src/Core/util/MKL_support.h +137 -0
- data/vendor/Eigen/src/Core/util/Macros.h +1464 -0
- data/vendor/Eigen/src/Core/util/Memory.h +1163 -0
- data/vendor/Eigen/src/Core/util/Meta.h +812 -0
- data/vendor/Eigen/src/Core/util/NonMPL2.h +3 -0
- data/vendor/Eigen/src/Core/util/ReenableStupidWarnings.h +31 -0
- data/vendor/Eigen/src/Core/util/ReshapedHelper.h +51 -0
- data/vendor/Eigen/src/Core/util/StaticAssert.h +221 -0
- data/vendor/Eigen/src/Core/util/SymbolicIndex.h +293 -0
- data/vendor/Eigen/src/Core/util/XprHelper.h +856 -0
- data/vendor/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
- data/vendor/Eigen/src/Eigenvalues/ComplexSchur.h +462 -0
- data/vendor/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
- data/vendor/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
- data/vendor/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
- data/vendor/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
- data/vendor/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
- data/vendor/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
- data/vendor/Eigen/src/Eigenvalues/RealQZ.h +657 -0
- data/vendor/Eigen/src/Eigenvalues/RealSchur.h +558 -0
- data/vendor/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
- data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +904 -0
- data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
- data/vendor/Eigen/src/Eigenvalues/Tridiagonalization.h +561 -0
- data/vendor/Eigen/src/Geometry/AlignedBox.h +486 -0
- data/vendor/Eigen/src/Geometry/AngleAxis.h +247 -0
- data/vendor/Eigen/src/Geometry/EulerAngles.h +114 -0
- data/vendor/Eigen/src/Geometry/Homogeneous.h +501 -0
- data/vendor/Eigen/src/Geometry/Hyperplane.h +282 -0
- data/vendor/Eigen/src/Geometry/OrthoMethods.h +235 -0
- data/vendor/Eigen/src/Geometry/ParametrizedLine.h +232 -0
- data/vendor/Eigen/src/Geometry/Quaternion.h +870 -0
- data/vendor/Eigen/src/Geometry/Rotation2D.h +199 -0
- data/vendor/Eigen/src/Geometry/RotationBase.h +206 -0
- data/vendor/Eigen/src/Geometry/Scaling.h +188 -0
- data/vendor/Eigen/src/Geometry/Transform.h +1563 -0
- data/vendor/Eigen/src/Geometry/Translation.h +202 -0
- data/vendor/Eigen/src/Geometry/Umeyama.h +166 -0
- data/vendor/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
- data/vendor/Eigen/src/Householder/BlockHouseholder.h +110 -0
- data/vendor/Eigen/src/Householder/Householder.h +176 -0
- data/vendor/Eigen/src/Householder/HouseholderSequence.h +545 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +212 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +229 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +394 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +453 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +444 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +198 -0
- data/vendor/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +117 -0
- data/vendor/Eigen/src/Jacobi/Jacobi.h +483 -0
- data/vendor/Eigen/src/KLUSupport/KLUSupport.h +358 -0
- data/vendor/Eigen/src/LU/Determinant.h +117 -0
- data/vendor/Eigen/src/LU/FullPivLU.h +877 -0
- data/vendor/Eigen/src/LU/InverseImpl.h +432 -0
- data/vendor/Eigen/src/LU/PartialPivLU.h +624 -0
- data/vendor/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
- data/vendor/Eigen/src/LU/arch/InverseSize4.h +351 -0
- data/vendor/Eigen/src/MetisSupport/MetisSupport.h +137 -0
- data/vendor/Eigen/src/OrderingMethods/Amd.h +435 -0
- data/vendor/Eigen/src/OrderingMethods/Eigen_Colamd.h +1863 -0
- data/vendor/Eigen/src/OrderingMethods/Ordering.h +153 -0
- data/vendor/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
- data/vendor/Eigen/src/PardisoSupport/PardisoSupport.h +545 -0
- data/vendor/Eigen/src/QR/ColPivHouseholderQR.h +674 -0
- data/vendor/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
- data/vendor/Eigen/src/QR/CompleteOrthogonalDecomposition.h +635 -0
- data/vendor/Eigen/src/QR/FullPivHouseholderQR.h +713 -0
- data/vendor/Eigen/src/QR/HouseholderQR.h +434 -0
- data/vendor/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
- data/vendor/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +335 -0
- data/vendor/Eigen/src/SVD/BDCSVD.h +1366 -0
- data/vendor/Eigen/src/SVD/JacobiSVD.h +812 -0
- data/vendor/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
- data/vendor/Eigen/src/SVD/SVDBase.h +376 -0
- data/vendor/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
- data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky.h +697 -0
- data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +174 -0
- data/vendor/Eigen/src/SparseCore/AmbiVector.h +378 -0
- data/vendor/Eigen/src/SparseCore/CompressedStorage.h +274 -0
- data/vendor/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
- data/vendor/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
- data/vendor/Eigen/src/SparseCore/SparseAssign.h +270 -0
- data/vendor/Eigen/src/SparseCore/SparseBlock.h +571 -0
- data/vendor/Eigen/src/SparseCore/SparseColEtree.h +206 -0
- data/vendor/Eigen/src/SparseCore/SparseCompressedBase.h +370 -0
- data/vendor/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +722 -0
- data/vendor/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +150 -0
- data/vendor/Eigen/src/SparseCore/SparseDenseProduct.h +342 -0
- data/vendor/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
- data/vendor/Eigen/src/SparseCore/SparseDot.h +98 -0
- data/vendor/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
- data/vendor/Eigen/src/SparseCore/SparseMap.h +305 -0
- data/vendor/Eigen/src/SparseCore/SparseMatrix.h +1518 -0
- data/vendor/Eigen/src/SparseCore/SparseMatrixBase.h +398 -0
- data/vendor/Eigen/src/SparseCore/SparsePermutation.h +178 -0
- data/vendor/Eigen/src/SparseCore/SparseProduct.h +181 -0
- data/vendor/Eigen/src/SparseCore/SparseRedux.h +49 -0
- data/vendor/Eigen/src/SparseCore/SparseRef.h +397 -0
- data/vendor/Eigen/src/SparseCore/SparseSelfAdjointView.h +659 -0
- data/vendor/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
- data/vendor/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
- data/vendor/Eigen/src/SparseCore/SparseTranspose.h +92 -0
- data/vendor/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
- data/vendor/Eigen/src/SparseCore/SparseUtil.h +186 -0
- data/vendor/Eigen/src/SparseCore/SparseVector.h +478 -0
- data/vendor/Eigen/src/SparseCore/SparseView.h +254 -0
- data/vendor/Eigen/src/SparseCore/TriangularSolver.h +315 -0
- data/vendor/Eigen/src/SparseLU/SparseLU.h +923 -0
- data/vendor/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +375 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
- data/vendor/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
- data/vendor/Eigen/src/SparseQR/SparseQR.h +758 -0
- data/vendor/Eigen/src/StlSupport/StdDeque.h +116 -0
- data/vendor/Eigen/src/StlSupport/StdList.h +106 -0
- data/vendor/Eigen/src/StlSupport/StdVector.h +131 -0
- data/vendor/Eigen/src/StlSupport/details.h +84 -0
- data/vendor/Eigen/src/SuperLUSupport/SuperLUSupport.h +1025 -0
- data/vendor/Eigen/src/UmfPackSupport/UmfPackSupport.h +642 -0
- data/vendor/Eigen/src/misc/Image.h +82 -0
- data/vendor/Eigen/src/misc/Kernel.h +79 -0
- data/vendor/Eigen/src/misc/RealSvd2x2.h +55 -0
- data/vendor/Eigen/src/misc/blas.h +440 -0
- data/vendor/Eigen/src/misc/lapack.h +152 -0
- data/vendor/Eigen/src/misc/lapacke.h +16292 -0
- data/vendor/Eigen/src/misc/lapacke_mangling.h +17 -0
- data/vendor/Eigen/src/plugins/ArrayCwiseBinaryOps.h +358 -0
- data/vendor/Eigen/src/plugins/ArrayCwiseUnaryOps.h +696 -0
- data/vendor/Eigen/src/plugins/BlockMethods.h +1442 -0
- data/vendor/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
- data/vendor/Eigen/src/plugins/CommonCwiseUnaryOps.h +177 -0
- data/vendor/Eigen/src/plugins/IndexedViewMethods.h +262 -0
- data/vendor/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
- data/vendor/Eigen/src/plugins/MatrixCwiseUnaryOps.h +95 -0
- data/vendor/Eigen/src/plugins/ReshapedMethods.h +149 -0
- data/vendor/aarand/aarand.hpp +114 -0
- data/vendor/annoy/annoylib.h +1495 -0
- data/vendor/annoy/kissrandom.h +120 -0
- data/vendor/annoy/mman.h +242 -0
- data/vendor/hnswlib/bruteforce.h +152 -0
- data/vendor/hnswlib/hnswalg.h +1192 -0
- data/vendor/hnswlib/hnswlib.h +108 -0
- data/vendor/hnswlib/space_ip.h +282 -0
- data/vendor/hnswlib/space_l2.h +281 -0
- data/vendor/hnswlib/visited_list_pool.h +79 -0
- data/vendor/irlba/irlba.hpp +575 -0
- data/vendor/irlba/lanczos.hpp +212 -0
- data/vendor/irlba/parallel.hpp +474 -0
- data/vendor/irlba/utils.hpp +224 -0
- data/vendor/irlba/wrappers.hpp +228 -0
- data/vendor/kmeans/Base.hpp +75 -0
- data/vendor/kmeans/Details.hpp +79 -0
- data/vendor/kmeans/HartiganWong.hpp +492 -0
- data/vendor/kmeans/InitializeKmeansPP.hpp +144 -0
- data/vendor/kmeans/InitializeNone.hpp +44 -0
- data/vendor/kmeans/InitializePCAPartition.hpp +309 -0
- data/vendor/kmeans/InitializeRandom.hpp +91 -0
- data/vendor/kmeans/Kmeans.hpp +161 -0
- data/vendor/kmeans/Lloyd.hpp +134 -0
- data/vendor/kmeans/MiniBatch.hpp +269 -0
- data/vendor/kmeans/QuickSearch.hpp +179 -0
- data/vendor/kmeans/compute_centroids.hpp +32 -0
- data/vendor/kmeans/compute_wcss.hpp +27 -0
- data/vendor/kmeans/is_edge_case.hpp +42 -0
- data/vendor/kmeans/random.hpp +55 -0
- data/vendor/knncolle/Annoy/Annoy.hpp +193 -0
- data/vendor/knncolle/BruteForce/BruteForce.hpp +120 -0
- data/vendor/knncolle/Hnsw/Hnsw.hpp +225 -0
- data/vendor/knncolle/Kmknn/Kmknn.hpp +286 -0
- data/vendor/knncolle/VpTree/VpTree.hpp +256 -0
- data/vendor/knncolle/knncolle.hpp +34 -0
- data/vendor/knncolle/utils/Base.hpp +100 -0
- data/vendor/knncolle/utils/NeighborQueue.hpp +94 -0
- data/vendor/knncolle/utils/distances.hpp +98 -0
- data/vendor/knncolle/utils/find_nearest_neighbors.hpp +112 -0
- data/vendor/powerit/PowerIterations.hpp +157 -0
- data/vendor/umappp/NeighborList.hpp +37 -0
- data/vendor/umappp/Umap.hpp +662 -0
- data/vendor/umappp/combine_neighbor_sets.hpp +95 -0
- data/vendor/umappp/find_ab.hpp +157 -0
- data/vendor/umappp/neighbor_similarities.hpp +136 -0
- data/vendor/umappp/optimize_layout.hpp +285 -0
- data/vendor/umappp/spectral_init.hpp +181 -0
- data/vendor/umappp/umappp.hpp +13 -0
- metadata +465 -0
@@ -0,0 +1,518 @@
|
|
1
|
+
// This file is part of Eigen, a lightweight C++ template library
|
2
|
+
// for linear algebra.
|
3
|
+
//
|
4
|
+
// Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
|
5
|
+
//
|
6
|
+
// This Source Code Form is subject to the terms of the Mozilla
|
7
|
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
8
|
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
9
|
+
|
10
|
+
#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H
|
11
|
+
#define EIGEN_GENERAL_MATRIX_VECTOR_H
|
12
|
+
|
13
|
+
namespace Eigen {
|
14
|
+
|
15
|
+
namespace internal {
|
16
|
+
|
17
|
+
enum GEMVPacketSizeType {
|
18
|
+
GEMVPacketFull = 0,
|
19
|
+
GEMVPacketHalf,
|
20
|
+
GEMVPacketQuarter
|
21
|
+
};
|
22
|
+
|
23
|
+
template <int N, typename T1, typename T2, typename T3>
|
24
|
+
struct gemv_packet_cond { typedef T3 type; };
|
25
|
+
|
26
|
+
template <typename T1, typename T2, typename T3>
|
27
|
+
struct gemv_packet_cond<GEMVPacketFull, T1, T2, T3> { typedef T1 type; };
|
28
|
+
|
29
|
+
template <typename T1, typename T2, typename T3>
|
30
|
+
struct gemv_packet_cond<GEMVPacketHalf, T1, T2, T3> { typedef T2 type; };
|
31
|
+
|
32
|
+
template<typename LhsScalar, typename RhsScalar, int _PacketSize=GEMVPacketFull>
|
33
|
+
class gemv_traits
|
34
|
+
{
|
35
|
+
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
|
36
|
+
|
37
|
+
#define PACKET_DECL_COND_PREFIX(prefix, name, packet_size) \
|
38
|
+
typedef typename gemv_packet_cond<packet_size, \
|
39
|
+
typename packet_traits<name ## Scalar>::type, \
|
40
|
+
typename packet_traits<name ## Scalar>::half, \
|
41
|
+
typename unpacket_traits<typename packet_traits<name ## Scalar>::half>::half>::type \
|
42
|
+
prefix ## name ## Packet
|
43
|
+
|
44
|
+
PACKET_DECL_COND_PREFIX(_, Lhs, _PacketSize);
|
45
|
+
PACKET_DECL_COND_PREFIX(_, Rhs, _PacketSize);
|
46
|
+
PACKET_DECL_COND_PREFIX(_, Res, _PacketSize);
|
47
|
+
#undef PACKET_DECL_COND_PREFIX
|
48
|
+
|
49
|
+
public:
|
50
|
+
enum {
|
51
|
+
Vectorizable = unpacket_traits<_LhsPacket>::vectorizable &&
|
52
|
+
unpacket_traits<_RhsPacket>::vectorizable &&
|
53
|
+
int(unpacket_traits<_LhsPacket>::size)==int(unpacket_traits<_RhsPacket>::size),
|
54
|
+
LhsPacketSize = Vectorizable ? unpacket_traits<_LhsPacket>::size : 1,
|
55
|
+
RhsPacketSize = Vectorizable ? unpacket_traits<_RhsPacket>::size : 1,
|
56
|
+
ResPacketSize = Vectorizable ? unpacket_traits<_ResPacket>::size : 1
|
57
|
+
};
|
58
|
+
|
59
|
+
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
|
60
|
+
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
|
61
|
+
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
|
62
|
+
};
|
63
|
+
|
64
|
+
|
65
|
+
/* Optimized col-major matrix * vector product:
|
66
|
+
* This algorithm processes the matrix per vertical panels,
|
67
|
+
* which are then processed horizontaly per chunck of 8*PacketSize x 1 vertical segments.
|
68
|
+
*
|
69
|
+
* Mixing type logic: C += alpha * A * B
|
70
|
+
* | A | B |alpha| comments
|
71
|
+
* |real |cplx |cplx | no vectorization
|
72
|
+
* |real |cplx |real | alpha is converted to a cplx when calling the run function, no vectorization
|
73
|
+
* |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp
|
74
|
+
* |cplx |real |real | optimal case, vectorization possible via real-cplx mul
|
75
|
+
*
|
76
|
+
* The same reasoning apply for the transposed case.
|
77
|
+
*/
|
78
|
+
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
|
79
|
+
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
|
80
|
+
{
|
81
|
+
typedef gemv_traits<LhsScalar,RhsScalar> Traits;
|
82
|
+
typedef gemv_traits<LhsScalar,RhsScalar,GEMVPacketHalf> HalfTraits;
|
83
|
+
typedef gemv_traits<LhsScalar,RhsScalar,GEMVPacketQuarter> QuarterTraits;
|
84
|
+
|
85
|
+
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
|
86
|
+
|
87
|
+
typedef typename Traits::LhsPacket LhsPacket;
|
88
|
+
typedef typename Traits::RhsPacket RhsPacket;
|
89
|
+
typedef typename Traits::ResPacket ResPacket;
|
90
|
+
|
91
|
+
typedef typename HalfTraits::LhsPacket LhsPacketHalf;
|
92
|
+
typedef typename HalfTraits::RhsPacket RhsPacketHalf;
|
93
|
+
typedef typename HalfTraits::ResPacket ResPacketHalf;
|
94
|
+
|
95
|
+
typedef typename QuarterTraits::LhsPacket LhsPacketQuarter;
|
96
|
+
typedef typename QuarterTraits::RhsPacket RhsPacketQuarter;
|
97
|
+
typedef typename QuarterTraits::ResPacket ResPacketQuarter;
|
98
|
+
|
99
|
+
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run(
|
100
|
+
Index rows, Index cols,
|
101
|
+
const LhsMapper& lhs,
|
102
|
+
const RhsMapper& rhs,
|
103
|
+
ResScalar* res, Index resIncr,
|
104
|
+
RhsScalar alpha);
|
105
|
+
};
|
106
|
+
|
107
|
+
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
|
108
|
+
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
|
109
|
+
Index rows, Index cols,
|
110
|
+
const LhsMapper& alhs,
|
111
|
+
const RhsMapper& rhs,
|
112
|
+
ResScalar* res, Index resIncr,
|
113
|
+
RhsScalar alpha)
|
114
|
+
{
|
115
|
+
EIGEN_UNUSED_VARIABLE(resIncr);
|
116
|
+
eigen_internal_assert(resIncr==1);
|
117
|
+
|
118
|
+
// The following copy tells the compiler that lhs's attributes are not modified outside this function
|
119
|
+
// This helps GCC to generate propoer code.
|
120
|
+
LhsMapper lhs(alhs);
|
121
|
+
|
122
|
+
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
|
123
|
+
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
|
124
|
+
conj_helper<LhsPacketHalf,RhsPacketHalf,ConjugateLhs,ConjugateRhs> pcj_half;
|
125
|
+
conj_helper<LhsPacketQuarter,RhsPacketQuarter,ConjugateLhs,ConjugateRhs> pcj_quarter;
|
126
|
+
|
127
|
+
const Index lhsStride = lhs.stride();
|
128
|
+
// TODO: for padded aligned inputs, we could enable aligned reads
|
129
|
+
enum { LhsAlignment = Unaligned,
|
130
|
+
ResPacketSize = Traits::ResPacketSize,
|
131
|
+
ResPacketSizeHalf = HalfTraits::ResPacketSize,
|
132
|
+
ResPacketSizeQuarter = QuarterTraits::ResPacketSize,
|
133
|
+
LhsPacketSize = Traits::LhsPacketSize,
|
134
|
+
HasHalf = (int)ResPacketSizeHalf < (int)ResPacketSize,
|
135
|
+
HasQuarter = (int)ResPacketSizeQuarter < (int)ResPacketSizeHalf
|
136
|
+
};
|
137
|
+
|
138
|
+
const Index n8 = rows-8*ResPacketSize+1;
|
139
|
+
const Index n4 = rows-4*ResPacketSize+1;
|
140
|
+
const Index n3 = rows-3*ResPacketSize+1;
|
141
|
+
const Index n2 = rows-2*ResPacketSize+1;
|
142
|
+
const Index n1 = rows-1*ResPacketSize+1;
|
143
|
+
const Index n_half = rows-1*ResPacketSizeHalf+1;
|
144
|
+
const Index n_quarter = rows-1*ResPacketSizeQuarter+1;
|
145
|
+
|
146
|
+
// TODO: improve the following heuristic:
|
147
|
+
const Index block_cols = cols<128 ? cols : (lhsStride*sizeof(LhsScalar)<32000?16:4);
|
148
|
+
ResPacket palpha = pset1<ResPacket>(alpha);
|
149
|
+
ResPacketHalf palpha_half = pset1<ResPacketHalf>(alpha);
|
150
|
+
ResPacketQuarter palpha_quarter = pset1<ResPacketQuarter>(alpha);
|
151
|
+
|
152
|
+
for(Index j2=0; j2<cols; j2+=block_cols)
|
153
|
+
{
|
154
|
+
Index jend = numext::mini(j2+block_cols,cols);
|
155
|
+
Index i=0;
|
156
|
+
for(; i<n8; i+=ResPacketSize*8)
|
157
|
+
{
|
158
|
+
ResPacket c0 = pset1<ResPacket>(ResScalar(0)),
|
159
|
+
c1 = pset1<ResPacket>(ResScalar(0)),
|
160
|
+
c2 = pset1<ResPacket>(ResScalar(0)),
|
161
|
+
c3 = pset1<ResPacket>(ResScalar(0)),
|
162
|
+
c4 = pset1<ResPacket>(ResScalar(0)),
|
163
|
+
c5 = pset1<ResPacket>(ResScalar(0)),
|
164
|
+
c6 = pset1<ResPacket>(ResScalar(0)),
|
165
|
+
c7 = pset1<ResPacket>(ResScalar(0));
|
166
|
+
|
167
|
+
for(Index j=j2; j<jend; j+=1)
|
168
|
+
{
|
169
|
+
RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));
|
170
|
+
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*0,j),b0,c0);
|
171
|
+
c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*1,j),b0,c1);
|
172
|
+
c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*2,j),b0,c2);
|
173
|
+
c3 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*3,j),b0,c3);
|
174
|
+
c4 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*4,j),b0,c4);
|
175
|
+
c5 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*5,j),b0,c5);
|
176
|
+
c6 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*6,j),b0,c6);
|
177
|
+
c7 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*7,j),b0,c7);
|
178
|
+
}
|
179
|
+
pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));
|
180
|
+
pstoreu(res+i+ResPacketSize*1, pmadd(c1,palpha,ploadu<ResPacket>(res+i+ResPacketSize*1)));
|
181
|
+
pstoreu(res+i+ResPacketSize*2, pmadd(c2,palpha,ploadu<ResPacket>(res+i+ResPacketSize*2)));
|
182
|
+
pstoreu(res+i+ResPacketSize*3, pmadd(c3,palpha,ploadu<ResPacket>(res+i+ResPacketSize*3)));
|
183
|
+
pstoreu(res+i+ResPacketSize*4, pmadd(c4,palpha,ploadu<ResPacket>(res+i+ResPacketSize*4)));
|
184
|
+
pstoreu(res+i+ResPacketSize*5, pmadd(c5,palpha,ploadu<ResPacket>(res+i+ResPacketSize*5)));
|
185
|
+
pstoreu(res+i+ResPacketSize*6, pmadd(c6,palpha,ploadu<ResPacket>(res+i+ResPacketSize*6)));
|
186
|
+
pstoreu(res+i+ResPacketSize*7, pmadd(c7,palpha,ploadu<ResPacket>(res+i+ResPacketSize*7)));
|
187
|
+
}
|
188
|
+
if(i<n4)
|
189
|
+
{
|
190
|
+
ResPacket c0 = pset1<ResPacket>(ResScalar(0)),
|
191
|
+
c1 = pset1<ResPacket>(ResScalar(0)),
|
192
|
+
c2 = pset1<ResPacket>(ResScalar(0)),
|
193
|
+
c3 = pset1<ResPacket>(ResScalar(0));
|
194
|
+
|
195
|
+
for(Index j=j2; j<jend; j+=1)
|
196
|
+
{
|
197
|
+
RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));
|
198
|
+
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*0,j),b0,c0);
|
199
|
+
c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*1,j),b0,c1);
|
200
|
+
c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*2,j),b0,c2);
|
201
|
+
c3 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*3,j),b0,c3);
|
202
|
+
}
|
203
|
+
pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));
|
204
|
+
pstoreu(res+i+ResPacketSize*1, pmadd(c1,palpha,ploadu<ResPacket>(res+i+ResPacketSize*1)));
|
205
|
+
pstoreu(res+i+ResPacketSize*2, pmadd(c2,palpha,ploadu<ResPacket>(res+i+ResPacketSize*2)));
|
206
|
+
pstoreu(res+i+ResPacketSize*3, pmadd(c3,palpha,ploadu<ResPacket>(res+i+ResPacketSize*3)));
|
207
|
+
|
208
|
+
i+=ResPacketSize*4;
|
209
|
+
}
|
210
|
+
if(i<n3)
|
211
|
+
{
|
212
|
+
ResPacket c0 = pset1<ResPacket>(ResScalar(0)),
|
213
|
+
c1 = pset1<ResPacket>(ResScalar(0)),
|
214
|
+
c2 = pset1<ResPacket>(ResScalar(0));
|
215
|
+
|
216
|
+
for(Index j=j2; j<jend; j+=1)
|
217
|
+
{
|
218
|
+
RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));
|
219
|
+
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*0,j),b0,c0);
|
220
|
+
c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*1,j),b0,c1);
|
221
|
+
c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*2,j),b0,c2);
|
222
|
+
}
|
223
|
+
pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));
|
224
|
+
pstoreu(res+i+ResPacketSize*1, pmadd(c1,palpha,ploadu<ResPacket>(res+i+ResPacketSize*1)));
|
225
|
+
pstoreu(res+i+ResPacketSize*2, pmadd(c2,palpha,ploadu<ResPacket>(res+i+ResPacketSize*2)));
|
226
|
+
|
227
|
+
i+=ResPacketSize*3;
|
228
|
+
}
|
229
|
+
if(i<n2)
|
230
|
+
{
|
231
|
+
ResPacket c0 = pset1<ResPacket>(ResScalar(0)),
|
232
|
+
c1 = pset1<ResPacket>(ResScalar(0));
|
233
|
+
|
234
|
+
for(Index j=j2; j<jend; j+=1)
|
235
|
+
{
|
236
|
+
RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));
|
237
|
+
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*0,j),b0,c0);
|
238
|
+
c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*1,j),b0,c1);
|
239
|
+
}
|
240
|
+
pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));
|
241
|
+
pstoreu(res+i+ResPacketSize*1, pmadd(c1,palpha,ploadu<ResPacket>(res+i+ResPacketSize*1)));
|
242
|
+
i+=ResPacketSize*2;
|
243
|
+
}
|
244
|
+
if(i<n1)
|
245
|
+
{
|
246
|
+
ResPacket c0 = pset1<ResPacket>(ResScalar(0));
|
247
|
+
for(Index j=j2; j<jend; j+=1)
|
248
|
+
{
|
249
|
+
RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));
|
250
|
+
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+0,j),b0,c0);
|
251
|
+
}
|
252
|
+
pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));
|
253
|
+
i+=ResPacketSize;
|
254
|
+
}
|
255
|
+
if(HasHalf && i<n_half)
|
256
|
+
{
|
257
|
+
ResPacketHalf c0 = pset1<ResPacketHalf>(ResScalar(0));
|
258
|
+
for(Index j=j2; j<jend; j+=1)
|
259
|
+
{
|
260
|
+
RhsPacketHalf b0 = pset1<RhsPacketHalf>(rhs(j,0));
|
261
|
+
c0 = pcj_half.pmadd(lhs.template load<LhsPacketHalf,LhsAlignment>(i+0,j),b0,c0);
|
262
|
+
}
|
263
|
+
pstoreu(res+i+ResPacketSizeHalf*0, pmadd(c0,palpha_half,ploadu<ResPacketHalf>(res+i+ResPacketSizeHalf*0)));
|
264
|
+
i+=ResPacketSizeHalf;
|
265
|
+
}
|
266
|
+
if(HasQuarter && i<n_quarter)
|
267
|
+
{
|
268
|
+
ResPacketQuarter c0 = pset1<ResPacketQuarter>(ResScalar(0));
|
269
|
+
for(Index j=j2; j<jend; j+=1)
|
270
|
+
{
|
271
|
+
RhsPacketQuarter b0 = pset1<RhsPacketQuarter>(rhs(j,0));
|
272
|
+
c0 = pcj_quarter.pmadd(lhs.template load<LhsPacketQuarter,LhsAlignment>(i+0,j),b0,c0);
|
273
|
+
}
|
274
|
+
pstoreu(res+i+ResPacketSizeQuarter*0, pmadd(c0,palpha_quarter,ploadu<ResPacketQuarter>(res+i+ResPacketSizeQuarter*0)));
|
275
|
+
i+=ResPacketSizeQuarter;
|
276
|
+
}
|
277
|
+
for(;i<rows;++i)
|
278
|
+
{
|
279
|
+
ResScalar c0(0);
|
280
|
+
for(Index j=j2; j<jend; j+=1)
|
281
|
+
c0 += cj.pmul(lhs(i,j), rhs(j,0));
|
282
|
+
res[i] += alpha*c0;
|
283
|
+
}
|
284
|
+
}
|
285
|
+
}
|
286
|
+
|
287
|
+
/* Optimized row-major matrix * vector product:
|
288
|
+
* This algorithm processes 4 rows at once that allows to both reduce
|
289
|
+
* the number of load/stores of the result by a factor 4 and to reduce
|
290
|
+
* the instruction dependency. Moreover, we know that all bands have the
|
291
|
+
* same alignment pattern.
|
292
|
+
*
|
293
|
+
* Mixing type logic:
|
294
|
+
* - alpha is always a complex (or converted to a complex)
|
295
|
+
* - no vectorization
|
296
|
+
*/
|
297
|
+
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
|
298
|
+
struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
|
299
|
+
{
|
300
|
+
typedef gemv_traits<LhsScalar,RhsScalar> Traits;
|
301
|
+
typedef gemv_traits<LhsScalar,RhsScalar,GEMVPacketHalf> HalfTraits;
|
302
|
+
typedef gemv_traits<LhsScalar,RhsScalar,GEMVPacketQuarter> QuarterTraits;
|
303
|
+
|
304
|
+
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
|
305
|
+
|
306
|
+
typedef typename Traits::LhsPacket LhsPacket;
|
307
|
+
typedef typename Traits::RhsPacket RhsPacket;
|
308
|
+
typedef typename Traits::ResPacket ResPacket;
|
309
|
+
|
310
|
+
typedef typename HalfTraits::LhsPacket LhsPacketHalf;
|
311
|
+
typedef typename HalfTraits::RhsPacket RhsPacketHalf;
|
312
|
+
typedef typename HalfTraits::ResPacket ResPacketHalf;
|
313
|
+
|
314
|
+
typedef typename QuarterTraits::LhsPacket LhsPacketQuarter;
|
315
|
+
typedef typename QuarterTraits::RhsPacket RhsPacketQuarter;
|
316
|
+
typedef typename QuarterTraits::ResPacket ResPacketQuarter;
|
317
|
+
|
318
|
+
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run(
|
319
|
+
Index rows, Index cols,
|
320
|
+
const LhsMapper& lhs,
|
321
|
+
const RhsMapper& rhs,
|
322
|
+
ResScalar* res, Index resIncr,
|
323
|
+
ResScalar alpha);
|
324
|
+
};
|
325
|
+
|
326
|
+
template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
|
327
|
+
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
|
328
|
+
Index rows, Index cols,
|
329
|
+
const LhsMapper& alhs,
|
330
|
+
const RhsMapper& rhs,
|
331
|
+
ResScalar* res, Index resIncr,
|
332
|
+
ResScalar alpha)
|
333
|
+
{
|
334
|
+
// The following copy tells the compiler that lhs's attributes are not modified outside this function
|
335
|
+
// This helps GCC to generate propoer code.
|
336
|
+
LhsMapper lhs(alhs);
|
337
|
+
|
338
|
+
eigen_internal_assert(rhs.stride()==1);
|
339
|
+
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
|
340
|
+
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
|
341
|
+
conj_helper<LhsPacketHalf,RhsPacketHalf,ConjugateLhs,ConjugateRhs> pcj_half;
|
342
|
+
conj_helper<LhsPacketQuarter,RhsPacketQuarter,ConjugateLhs,ConjugateRhs> pcj_quarter;
|
343
|
+
|
344
|
+
// TODO: fine tune the following heuristic. The rationale is that if the matrix is very large,
|
345
|
+
// processing 8 rows at once might be counter productive wrt cache.
|
346
|
+
const Index n8 = lhs.stride()*sizeof(LhsScalar)>32000 ? 0 : rows-7;
|
347
|
+
const Index n4 = rows-3;
|
348
|
+
const Index n2 = rows-1;
|
349
|
+
|
350
|
+
// TODO: for padded aligned inputs, we could enable aligned reads
|
351
|
+
enum { LhsAlignment = Unaligned,
|
352
|
+
ResPacketSize = Traits::ResPacketSize,
|
353
|
+
ResPacketSizeHalf = HalfTraits::ResPacketSize,
|
354
|
+
ResPacketSizeQuarter = QuarterTraits::ResPacketSize,
|
355
|
+
LhsPacketSize = Traits::LhsPacketSize,
|
356
|
+
LhsPacketSizeHalf = HalfTraits::LhsPacketSize,
|
357
|
+
LhsPacketSizeQuarter = QuarterTraits::LhsPacketSize,
|
358
|
+
HasHalf = (int)ResPacketSizeHalf < (int)ResPacketSize,
|
359
|
+
HasQuarter = (int)ResPacketSizeQuarter < (int)ResPacketSizeHalf
|
360
|
+
};
|
361
|
+
|
362
|
+
Index i=0;
|
363
|
+
for(; i<n8; i+=8)
|
364
|
+
{
|
365
|
+
ResPacket c0 = pset1<ResPacket>(ResScalar(0)),
|
366
|
+
c1 = pset1<ResPacket>(ResScalar(0)),
|
367
|
+
c2 = pset1<ResPacket>(ResScalar(0)),
|
368
|
+
c3 = pset1<ResPacket>(ResScalar(0)),
|
369
|
+
c4 = pset1<ResPacket>(ResScalar(0)),
|
370
|
+
c5 = pset1<ResPacket>(ResScalar(0)),
|
371
|
+
c6 = pset1<ResPacket>(ResScalar(0)),
|
372
|
+
c7 = pset1<ResPacket>(ResScalar(0));
|
373
|
+
|
374
|
+
Index j=0;
|
375
|
+
for(; j+LhsPacketSize<=cols; j+=LhsPacketSize)
|
376
|
+
{
|
377
|
+
RhsPacket b0 = rhs.template load<RhsPacket, Unaligned>(j,0);
|
378
|
+
|
379
|
+
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+0,j),b0,c0);
|
380
|
+
c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+1,j),b0,c1);
|
381
|
+
c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+2,j),b0,c2);
|
382
|
+
c3 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+3,j),b0,c3);
|
383
|
+
c4 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+4,j),b0,c4);
|
384
|
+
c5 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+5,j),b0,c5);
|
385
|
+
c6 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+6,j),b0,c6);
|
386
|
+
c7 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+7,j),b0,c7);
|
387
|
+
}
|
388
|
+
ResScalar cc0 = predux(c0);
|
389
|
+
ResScalar cc1 = predux(c1);
|
390
|
+
ResScalar cc2 = predux(c2);
|
391
|
+
ResScalar cc3 = predux(c3);
|
392
|
+
ResScalar cc4 = predux(c4);
|
393
|
+
ResScalar cc5 = predux(c5);
|
394
|
+
ResScalar cc6 = predux(c6);
|
395
|
+
ResScalar cc7 = predux(c7);
|
396
|
+
for(; j<cols; ++j)
|
397
|
+
{
|
398
|
+
RhsScalar b0 = rhs(j,0);
|
399
|
+
|
400
|
+
cc0 += cj.pmul(lhs(i+0,j), b0);
|
401
|
+
cc1 += cj.pmul(lhs(i+1,j), b0);
|
402
|
+
cc2 += cj.pmul(lhs(i+2,j), b0);
|
403
|
+
cc3 += cj.pmul(lhs(i+3,j), b0);
|
404
|
+
cc4 += cj.pmul(lhs(i+4,j), b0);
|
405
|
+
cc5 += cj.pmul(lhs(i+5,j), b0);
|
406
|
+
cc6 += cj.pmul(lhs(i+6,j), b0);
|
407
|
+
cc7 += cj.pmul(lhs(i+7,j), b0);
|
408
|
+
}
|
409
|
+
res[(i+0)*resIncr] += alpha*cc0;
|
410
|
+
res[(i+1)*resIncr] += alpha*cc1;
|
411
|
+
res[(i+2)*resIncr] += alpha*cc2;
|
412
|
+
res[(i+3)*resIncr] += alpha*cc3;
|
413
|
+
res[(i+4)*resIncr] += alpha*cc4;
|
414
|
+
res[(i+5)*resIncr] += alpha*cc5;
|
415
|
+
res[(i+6)*resIncr] += alpha*cc6;
|
416
|
+
res[(i+7)*resIncr] += alpha*cc7;
|
417
|
+
}
|
418
|
+
for(; i<n4; i+=4)
|
419
|
+
{
|
420
|
+
ResPacket c0 = pset1<ResPacket>(ResScalar(0)),
|
421
|
+
c1 = pset1<ResPacket>(ResScalar(0)),
|
422
|
+
c2 = pset1<ResPacket>(ResScalar(0)),
|
423
|
+
c3 = pset1<ResPacket>(ResScalar(0));
|
424
|
+
|
425
|
+
Index j=0;
|
426
|
+
for(; j+LhsPacketSize<=cols; j+=LhsPacketSize)
|
427
|
+
{
|
428
|
+
RhsPacket b0 = rhs.template load<RhsPacket, Unaligned>(j,0);
|
429
|
+
|
430
|
+
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+0,j),b0,c0);
|
431
|
+
c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+1,j),b0,c1);
|
432
|
+
c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+2,j),b0,c2);
|
433
|
+
c3 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+3,j),b0,c3);
|
434
|
+
}
|
435
|
+
ResScalar cc0 = predux(c0);
|
436
|
+
ResScalar cc1 = predux(c1);
|
437
|
+
ResScalar cc2 = predux(c2);
|
438
|
+
ResScalar cc3 = predux(c3);
|
439
|
+
for(; j<cols; ++j)
|
440
|
+
{
|
441
|
+
RhsScalar b0 = rhs(j,0);
|
442
|
+
|
443
|
+
cc0 += cj.pmul(lhs(i+0,j), b0);
|
444
|
+
cc1 += cj.pmul(lhs(i+1,j), b0);
|
445
|
+
cc2 += cj.pmul(lhs(i+2,j), b0);
|
446
|
+
cc3 += cj.pmul(lhs(i+3,j), b0);
|
447
|
+
}
|
448
|
+
res[(i+0)*resIncr] += alpha*cc0;
|
449
|
+
res[(i+1)*resIncr] += alpha*cc1;
|
450
|
+
res[(i+2)*resIncr] += alpha*cc2;
|
451
|
+
res[(i+3)*resIncr] += alpha*cc3;
|
452
|
+
}
|
453
|
+
for(; i<n2; i+=2)
|
454
|
+
{
|
455
|
+
ResPacket c0 = pset1<ResPacket>(ResScalar(0)),
|
456
|
+
c1 = pset1<ResPacket>(ResScalar(0));
|
457
|
+
|
458
|
+
Index j=0;
|
459
|
+
for(; j+LhsPacketSize<=cols; j+=LhsPacketSize)
|
460
|
+
{
|
461
|
+
RhsPacket b0 = rhs.template load<RhsPacket, Unaligned>(j,0);
|
462
|
+
|
463
|
+
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+0,j),b0,c0);
|
464
|
+
c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+1,j),b0,c1);
|
465
|
+
}
|
466
|
+
ResScalar cc0 = predux(c0);
|
467
|
+
ResScalar cc1 = predux(c1);
|
468
|
+
for(; j<cols; ++j)
|
469
|
+
{
|
470
|
+
RhsScalar b0 = rhs(j,0);
|
471
|
+
|
472
|
+
cc0 += cj.pmul(lhs(i+0,j), b0);
|
473
|
+
cc1 += cj.pmul(lhs(i+1,j), b0);
|
474
|
+
}
|
475
|
+
res[(i+0)*resIncr] += alpha*cc0;
|
476
|
+
res[(i+1)*resIncr] += alpha*cc1;
|
477
|
+
}
|
478
|
+
for(; i<rows; ++i)
|
479
|
+
{
|
480
|
+
ResPacket c0 = pset1<ResPacket>(ResScalar(0));
|
481
|
+
ResPacketHalf c0_h = pset1<ResPacketHalf>(ResScalar(0));
|
482
|
+
ResPacketQuarter c0_q = pset1<ResPacketQuarter>(ResScalar(0));
|
483
|
+
Index j=0;
|
484
|
+
for(; j+LhsPacketSize<=cols; j+=LhsPacketSize)
|
485
|
+
{
|
486
|
+
RhsPacket b0 = rhs.template load<RhsPacket,Unaligned>(j,0);
|
487
|
+
c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i,j),b0,c0);
|
488
|
+
}
|
489
|
+
ResScalar cc0 = predux(c0);
|
490
|
+
if (HasHalf) {
|
491
|
+
for(; j+LhsPacketSizeHalf<=cols; j+=LhsPacketSizeHalf)
|
492
|
+
{
|
493
|
+
RhsPacketHalf b0 = rhs.template load<RhsPacketHalf,Unaligned>(j,0);
|
494
|
+
c0_h = pcj_half.pmadd(lhs.template load<LhsPacketHalf,LhsAlignment>(i,j),b0,c0_h);
|
495
|
+
}
|
496
|
+
cc0 += predux(c0_h);
|
497
|
+
}
|
498
|
+
if (HasQuarter) {
|
499
|
+
for(; j+LhsPacketSizeQuarter<=cols; j+=LhsPacketSizeQuarter)
|
500
|
+
{
|
501
|
+
RhsPacketQuarter b0 = rhs.template load<RhsPacketQuarter,Unaligned>(j,0);
|
502
|
+
c0_q = pcj_quarter.pmadd(lhs.template load<LhsPacketQuarter,LhsAlignment>(i,j),b0,c0_q);
|
503
|
+
}
|
504
|
+
cc0 += predux(c0_q);
|
505
|
+
}
|
506
|
+
for(; j<cols; ++j)
|
507
|
+
{
|
508
|
+
cc0 += cj.pmul(lhs(i,j), rhs(j,0));
|
509
|
+
}
|
510
|
+
res[i*resIncr] += alpha*cc0;
|
511
|
+
}
|
512
|
+
}
|
513
|
+
|
514
|
+
} // end namespace internal
|
515
|
+
|
516
|
+
} // end namespace Eigen
|
517
|
+
|
518
|
+
#endif // EIGEN_GENERAL_MATRIX_VECTOR_H
|
@@ -0,0 +1,136 @@
|
|
1
|
+
/*
|
2
|
+
Copyright (c) 2011, Intel Corporation. All rights reserved.
|
3
|
+
|
4
|
+
Redistribution and use in source and binary forms, with or without modification,
|
5
|
+
are permitted provided that the following conditions are met:
|
6
|
+
|
7
|
+
* Redistributions of source code must retain the above copyright notice, this
|
8
|
+
list of conditions and the following disclaimer.
|
9
|
+
* Redistributions in binary form must reproduce the above copyright notice,
|
10
|
+
this list of conditions and the following disclaimer in the documentation
|
11
|
+
and/or other materials provided with the distribution.
|
12
|
+
* Neither the name of Intel Corporation nor the names of its contributors may
|
13
|
+
be used to endorse or promote products derived from this software without
|
14
|
+
specific prior written permission.
|
15
|
+
|
16
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
17
|
+
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18
|
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
19
|
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
20
|
+
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21
|
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22
|
+
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
23
|
+
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24
|
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25
|
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26
|
+
|
27
|
+
********************************************************************************
|
28
|
+
* Content : Eigen bindings to BLAS F77
|
29
|
+
* General matrix-vector product functionality based on ?GEMV.
|
30
|
+
********************************************************************************
|
31
|
+
*/
|
32
|
+
|
33
|
+
#ifndef EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H
|
34
|
+
#define EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H
|
35
|
+
|
36
|
+
namespace Eigen {
|
37
|
+
|
38
|
+
namespace internal {
|
39
|
+
|
40
|
+
/**********************************************************************
|
41
|
+
* This file implements general matrix-vector multiplication using BLAS
|
42
|
+
* gemv function via partial specialization of
|
43
|
+
* general_matrix_vector_product::run(..) method for float, double,
|
44
|
+
* std::complex<float> and std::complex<double> types
|
45
|
+
**********************************************************************/
|
46
|
+
|
47
|
+
// gemv specialization
|
48
|
+
|
49
|
+
template<typename Index, typename LhsScalar, int StorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
|
50
|
+
struct general_matrix_vector_product_gemv;
|
51
|
+
|
52
|
+
#define EIGEN_BLAS_GEMV_SPECIALIZE(Scalar) \
|
53
|
+
template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
|
54
|
+
struct general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,ConjugateRhs,Specialized> { \
|
55
|
+
static void run( \
|
56
|
+
Index rows, Index cols, \
|
57
|
+
const const_blas_data_mapper<Scalar,Index,ColMajor> &lhs, \
|
58
|
+
const const_blas_data_mapper<Scalar,Index,RowMajor> &rhs, \
|
59
|
+
Scalar* res, Index resIncr, Scalar alpha) \
|
60
|
+
{ \
|
61
|
+
if (ConjugateLhs) { \
|
62
|
+
general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,ConjugateRhs,BuiltIn>::run( \
|
63
|
+
rows, cols, lhs, rhs, res, resIncr, alpha); \
|
64
|
+
} else { \
|
65
|
+
general_matrix_vector_product_gemv<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
|
66
|
+
rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \
|
67
|
+
} \
|
68
|
+
} \
|
69
|
+
}; \
|
70
|
+
template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
|
71
|
+
struct general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,RowMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ConjugateRhs,Specialized> { \
|
72
|
+
static void run( \
|
73
|
+
Index rows, Index cols, \
|
74
|
+
const const_blas_data_mapper<Scalar,Index,RowMajor> &lhs, \
|
75
|
+
const const_blas_data_mapper<Scalar,Index,ColMajor> &rhs, \
|
76
|
+
Scalar* res, Index resIncr, Scalar alpha) \
|
77
|
+
{ \
|
78
|
+
general_matrix_vector_product_gemv<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
|
79
|
+
rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \
|
80
|
+
} \
|
81
|
+
}; \
|
82
|
+
|
83
|
+
EIGEN_BLAS_GEMV_SPECIALIZE(double)
|
84
|
+
EIGEN_BLAS_GEMV_SPECIALIZE(float)
|
85
|
+
EIGEN_BLAS_GEMV_SPECIALIZE(dcomplex)
|
86
|
+
EIGEN_BLAS_GEMV_SPECIALIZE(scomplex)
|
87
|
+
|
88
|
+
#define EIGEN_BLAS_GEMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASFUNC) \
|
89
|
+
template<typename Index, int LhsStorageOrder, bool ConjugateLhs, bool ConjugateRhs> \
|
90
|
+
struct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,ConjugateRhs> \
|
91
|
+
{ \
|
92
|
+
typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> GEMVVector;\
|
93
|
+
\
|
94
|
+
static void run( \
|
95
|
+
Index rows, Index cols, \
|
96
|
+
const EIGTYPE* lhs, Index lhsStride, \
|
97
|
+
const EIGTYPE* rhs, Index rhsIncr, \
|
98
|
+
EIGTYPE* res, Index resIncr, EIGTYPE alpha) \
|
99
|
+
{ \
|
100
|
+
BlasIndex m=convert_index<BlasIndex>(rows), n=convert_index<BlasIndex>(cols), \
|
101
|
+
lda=convert_index<BlasIndex>(lhsStride), incx=convert_index<BlasIndex>(rhsIncr), incy=convert_index<BlasIndex>(resIncr); \
|
102
|
+
const EIGTYPE beta(1); \
|
103
|
+
const EIGTYPE *x_ptr; \
|
104
|
+
char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \
|
105
|
+
if (LhsStorageOrder==RowMajor) { \
|
106
|
+
m = convert_index<BlasIndex>(cols); \
|
107
|
+
n = convert_index<BlasIndex>(rows); \
|
108
|
+
}\
|
109
|
+
GEMVVector x_tmp; \
|
110
|
+
if (ConjugateRhs) { \
|
111
|
+
Map<const GEMVVector, 0, InnerStride<> > map_x(rhs,cols,1,InnerStride<>(incx)); \
|
112
|
+
x_tmp=map_x.conjugate(); \
|
113
|
+
x_ptr=x_tmp.data(); \
|
114
|
+
incx=1; \
|
115
|
+
} else x_ptr=rhs; \
|
116
|
+
BLASFUNC(&trans, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &incy); \
|
117
|
+
}\
|
118
|
+
};
|
119
|
+
|
120
|
+
#ifdef EIGEN_USE_MKL
|
121
|
+
EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, dgemv)
|
122
|
+
EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, sgemv)
|
123
|
+
EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, MKL_Complex16, zgemv)
|
124
|
+
EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, MKL_Complex8 , cgemv)
|
125
|
+
#else
|
126
|
+
EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, dgemv_)
|
127
|
+
EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, sgemv_)
|
128
|
+
EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, double, zgemv_)
|
129
|
+
EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, float, cgemv_)
|
130
|
+
#endif
|
131
|
+
|
132
|
+
} // end namespase internal
|
133
|
+
|
134
|
+
} // end namespace Eigen
|
135
|
+
|
136
|
+
#endif // EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H
|