tomoto 0.2.3 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/README.md +8 -10
- data/ext/tomoto/extconf.rb +6 -2
- data/ext/tomoto/{ext.cpp → tomoto.cpp} +1 -1
- data/lib/tomoto/version.rb +1 -1
- data/lib/tomoto.rb +5 -1
- data/vendor/EigenRand/EigenRand/Core.h +10 -10
- data/vendor/EigenRand/EigenRand/Dists/Basic.h +208 -9
- data/vendor/EigenRand/EigenRand/Dists/Discrete.h +52 -31
- data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +9 -8
- data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +28 -21
- data/vendor/EigenRand/EigenRand/EigenRand +11 -6
- data/vendor/EigenRand/EigenRand/Macro.h +13 -7
- data/vendor/EigenRand/EigenRand/MorePacketMath.h +348 -740
- data/vendor/EigenRand/EigenRand/MvDists/Multinomial.h +5 -3
- data/vendor/EigenRand/EigenRand/MvDists/MvNormal.h +9 -3
- data/vendor/EigenRand/EigenRand/PacketFilter.h +11 -253
- data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +21 -47
- data/vendor/EigenRand/EigenRand/RandUtils.h +50 -344
- data/vendor/EigenRand/EigenRand/arch/AVX/MorePacketMath.h +619 -0
- data/vendor/EigenRand/EigenRand/arch/AVX/PacketFilter.h +149 -0
- data/vendor/EigenRand/EigenRand/arch/AVX/RandUtils.h +228 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/MorePacketMath.h +473 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/PacketFilter.h +142 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/RandUtils.h +126 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/MorePacketMath.h +501 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/PacketFilter.h +133 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/RandUtils.h +120 -0
- data/vendor/EigenRand/EigenRand/doc.h +24 -12
- data/vendor/EigenRand/README.md +57 -4
- data/vendor/eigen/COPYING.APACHE +203 -0
- data/vendor/eigen/COPYING.BSD +1 -1
- data/vendor/eigen/COPYING.MINPACK +51 -52
- data/vendor/eigen/Eigen/Cholesky +0 -1
- data/vendor/eigen/Eigen/Core +112 -265
- data/vendor/eigen/Eigen/Eigenvalues +2 -3
- data/vendor/eigen/Eigen/Geometry +5 -8
- data/vendor/eigen/Eigen/Householder +0 -1
- data/vendor/eigen/Eigen/Jacobi +0 -1
- data/vendor/eigen/Eigen/KLUSupport +41 -0
- data/vendor/eigen/Eigen/LU +2 -5
- data/vendor/eigen/Eigen/OrderingMethods +0 -3
- data/vendor/eigen/Eigen/PaStiXSupport +1 -0
- data/vendor/eigen/Eigen/PardisoSupport +0 -0
- data/vendor/eigen/Eigen/QR +2 -3
- data/vendor/eigen/Eigen/QtAlignedMalloc +0 -1
- data/vendor/eigen/Eigen/SVD +0 -1
- data/vendor/eigen/Eigen/Sparse +0 -2
- data/vendor/eigen/Eigen/SparseCholesky +0 -8
- data/vendor/eigen/Eigen/SparseLU +4 -0
- data/vendor/eigen/Eigen/SparseQR +0 -1
- data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +42 -27
- data/vendor/eigen/Eigen/src/Cholesky/LLT.h +39 -23
- data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +90 -47
- data/vendor/eigen/Eigen/src/Core/ArithmeticSequence.h +413 -0
- data/vendor/eigen/Eigen/src/Core/Array.h +99 -11
- data/vendor/eigen/Eigen/src/Core/ArrayBase.h +3 -3
- data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +21 -21
- data/vendor/eigen/Eigen/src/Core/Assign.h +1 -1
- data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +125 -50
- data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +10 -10
- data/vendor/eigen/Eigen/src/Core/BandMatrix.h +16 -16
- data/vendor/eigen/Eigen/src/Core/Block.h +56 -60
- data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +29 -31
- data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +7 -3
- data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +325 -272
- data/vendor/eigen/Eigen/src/Core/CoreIterators.h +5 -0
- data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +21 -22
- data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +153 -18
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +6 -6
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +14 -10
- data/vendor/eigen/Eigen/src/Core/DenseBase.h +132 -42
- data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +25 -21
- data/vendor/eigen/Eigen/src/Core/DenseStorage.h +153 -71
- data/vendor/eigen/Eigen/src/Core/Diagonal.h +21 -23
- data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +50 -2
- data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +1 -1
- data/vendor/eigen/Eigen/src/Core/Dot.h +10 -10
- data/vendor/eigen/Eigen/src/Core/EigenBase.h +10 -9
- data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +8 -4
- data/vendor/eigen/Eigen/src/Core/Fuzzy.h +3 -3
- data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +20 -10
- data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +599 -152
- data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +40 -33
- data/vendor/eigen/Eigen/src/Core/IO.h +40 -7
- data/vendor/eigen/Eigen/src/Core/IndexedView.h +237 -0
- data/vendor/eigen/Eigen/src/Core/Inverse.h +9 -10
- data/vendor/eigen/Eigen/src/Core/Map.h +7 -7
- data/vendor/eigen/Eigen/src/Core/MapBase.h +10 -3
- data/vendor/eigen/Eigen/src/Core/MathFunctions.h +767 -125
- data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +118 -19
- data/vendor/eigen/Eigen/src/Core/Matrix.h +131 -25
- data/vendor/eigen/Eigen/src/Core/MatrixBase.h +21 -3
- data/vendor/eigen/Eigen/src/Core/NestByValue.h +25 -50
- data/vendor/eigen/Eigen/src/Core/NoAlias.h +4 -3
- data/vendor/eigen/Eigen/src/Core/NumTraits.h +107 -20
- data/vendor/eigen/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
- data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +3 -31
- data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +152 -59
- data/vendor/eigen/Eigen/src/Core/Product.h +30 -25
- data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +192 -125
- data/vendor/eigen/Eigen/src/Core/Random.h +37 -1
- data/vendor/eigen/Eigen/src/Core/Redux.h +180 -170
- data/vendor/eigen/Eigen/src/Core/Ref.h +121 -23
- data/vendor/eigen/Eigen/src/Core/Replicate.h +8 -8
- data/vendor/eigen/Eigen/src/Core/Reshaped.h +454 -0
- data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +7 -5
- data/vendor/eigen/Eigen/src/Core/Reverse.h +18 -12
- data/vendor/eigen/Eigen/src/Core/Select.h +8 -6
- data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +33 -20
- data/vendor/eigen/Eigen/src/Core/Solve.h +14 -14
- data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +16 -16
- data/vendor/eigen/Eigen/src/Core/SolverBase.h +41 -3
- data/vendor/eigen/Eigen/src/Core/StableNorm.h +100 -70
- data/vendor/eigen/Eigen/src/Core/StlIterators.h +463 -0
- data/vendor/eigen/Eigen/src/Core/Stride.h +9 -4
- data/vendor/eigen/Eigen/src/Core/Swap.h +5 -4
- data/vendor/eigen/Eigen/src/Core/Transpose.h +88 -27
- data/vendor/eigen/Eigen/src/Core/Transpositions.h +26 -47
- data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +93 -75
- data/vendor/eigen/Eigen/src/Core/VectorBlock.h +5 -5
- data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +159 -70
- data/vendor/eigen/Eigen/src/Core/Visitor.h +137 -29
- data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +50 -129
- data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +126 -337
- data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +1092 -155
- data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +65 -1
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +207 -236
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1482 -495
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +152 -165
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +19 -251
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +2042 -392
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +235 -80
- data/vendor/eigen/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +102 -14
- data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Half.h +942 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +1 -1
- data/vendor/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
- data/vendor/eigen/Eigen/src/Core/arch/{CUDA → GPU}/MathFunctions.h +16 -4
- data/vendor/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
- data/vendor/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
- data/vendor/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/Complex.h +648 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +313 -219
- data/vendor/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +54 -70
- data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +4376 -549
- data/vendor/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +59 -179
- data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +65 -428
- data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +893 -283
- data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +65 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +212 -183
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +101 -5
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +510 -395
- data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +11 -2
- data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +112 -46
- data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +31 -30
- data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +32 -2
- data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +355 -16
- data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +1075 -586
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +49 -24
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +41 -35
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +6 -6
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +4 -2
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +382 -483
- data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +22 -5
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +53 -30
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +16 -8
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +8 -6
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +4 -4
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +5 -4
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +33 -27
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +14 -12
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +36 -34
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +8 -4
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +13 -10
- data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +304 -119
- data/vendor/eigen/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
- data/vendor/eigen/Eigen/src/Core/util/Constants.h +25 -9
- data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +26 -3
- data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +29 -9
- data/vendor/eigen/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
- data/vendor/eigen/Eigen/src/Core/util/IntegralConstant.h +272 -0
- data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +8 -1
- data/vendor/eigen/Eigen/src/Core/util/Macros.h +709 -246
- data/vendor/eigen/Eigen/src/Core/util/Memory.h +222 -52
- data/vendor/eigen/Eigen/src/Core/util/Meta.h +355 -77
- data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +5 -1
- data/vendor/eigen/Eigen/src/Core/util/ReshapedHelper.h +51 -0
- data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +8 -5
- data/vendor/eigen/Eigen/src/Core/util/SymbolicIndex.h +293 -0
- data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +65 -30
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +1 -1
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +7 -4
- data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +1 -1
- data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +9 -6
- data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +21 -9
- data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +77 -43
- data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +20 -15
- data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +99 -5
- data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +4 -4
- data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +3 -3
- data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +15 -11
- data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +1 -1
- data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +3 -2
- data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +39 -2
- data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +70 -14
- data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +3 -3
- data/vendor/eigen/Eigen/src/Geometry/Scaling.h +23 -5
- data/vendor/eigen/Eigen/src/Geometry/Transform.h +88 -67
- data/vendor/eigen/Eigen/src/Geometry/Translation.h +6 -12
- data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +1 -1
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
- data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +9 -2
- data/vendor/eigen/Eigen/src/Householder/Householder.h +8 -4
- data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +123 -48
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +15 -15
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +7 -23
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +5 -22
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +41 -47
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +51 -60
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +70 -20
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +2 -20
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +11 -9
- data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +31 -10
- data/vendor/eigen/Eigen/src/KLUSupport/KLUSupport.h +358 -0
- data/vendor/eigen/Eigen/src/LU/Determinant.h +35 -19
- data/vendor/eigen/Eigen/src/LU/FullPivLU.h +29 -43
- data/vendor/eigen/Eigen/src/LU/InverseImpl.h +25 -8
- data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +71 -58
- data/vendor/eigen/Eigen/src/LU/arch/InverseSize4.h +351 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +7 -17
- data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +297 -277
- data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +6 -10
- data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +1 -1
- data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +9 -7
- data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +41 -20
- data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +100 -27
- data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +59 -22
- data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +48 -23
- data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +25 -3
- data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +183 -63
- data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +22 -14
- data/vendor/eigen/Eigen/src/SVD/SVDBase.h +83 -22
- data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +3 -3
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +17 -9
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +12 -37
- data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +3 -2
- data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +16 -0
- data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +6 -6
- data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +81 -27
- data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +25 -57
- data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +40 -11
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +11 -15
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +4 -2
- data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +30 -8
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +126 -11
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +5 -12
- data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +13 -1
- data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +7 -7
- data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +5 -2
- data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +8 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +1 -1
- data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +1 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +162 -12
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +1 -1
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +76 -2
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +2 -2
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +1 -1
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +1 -1
- data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +19 -6
- data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +2 -12
- data/vendor/eigen/Eigen/src/StlSupport/StdList.h +2 -2
- data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +2 -2
- data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +6 -8
- data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +175 -39
- data/vendor/eigen/Eigen/src/misc/lapacke.h +5 -4
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +28 -2
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +155 -11
- data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +626 -242
- data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +14 -0
- data/vendor/eigen/Eigen/src/plugins/IndexedViewMethods.h +262 -0
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +4 -4
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +10 -0
- data/vendor/eigen/Eigen/src/plugins/ReshapedMethods.h +149 -0
- data/vendor/eigen/README.md +2 -0
- data/vendor/eigen/bench/btl/README +1 -1
- data/vendor/eigen/bench/tensors/README +6 -7
- data/vendor/eigen/ci/README.md +56 -0
- data/vendor/eigen/demos/mix_eigen_and_c/README +1 -1
- data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +213 -158
- data/vendor/eigen/unsupported/README.txt +1 -1
- data/vendor/tomotopy/README.kr.rst +21 -0
- data/vendor/tomotopy/README.rst +20 -0
- data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +2 -2
- data/vendor/tomotopy/src/Labeling/Phraser.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/CTModel.hpp +2 -1
- data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +2 -1
- data/vendor/tomotopy/src/TopicModel/DTModel.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/HDP.h +1 -0
- data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +53 -2
- data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +16 -5
- data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/PT.h +3 -1
- data/vendor/tomotopy/src/TopicModel/PTModel.hpp +31 -1
- data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +7 -5
- data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +36 -1
- data/vendor/tomotopy/src/Utils/exception.h +6 -0
- data/vendor/tomotopy/src/Utils/sample.hpp +14 -12
- data/vendor/tomotopy/src/Utils/sse_gamma.h +0 -3
- metadata +60 -14
- data/vendor/eigen/Eigen/CMakeLists.txt +0 -19
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +0 -674
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +0 -333
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +0 -1124
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +0 -212
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +0 -161
- data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +0 -338
@@ -164,7 +164,7 @@ public:
|
|
164
164
|
}
|
165
165
|
else
|
166
166
|
{
|
167
|
-
if(m_matrix.isCompressed())
|
167
|
+
if(m_matrix.isCompressed() && nnz!=block_size)
|
168
168
|
{
|
169
169
|
// no need to realloc, simply copy the tail at its respective position and insert tmp
|
170
170
|
matrix.data().resize(start + nnz + tail_size);
|
@@ -326,46 +326,6 @@ private:
|
|
326
326
|
|
327
327
|
//----------
|
328
328
|
|
329
|
-
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
|
330
|
-
* is col-major (resp. row-major).
|
331
|
-
*/
|
332
|
-
template<typename Derived>
|
333
|
-
typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
|
334
|
-
{ return InnerVectorReturnType(derived(), outer); }
|
335
|
-
|
336
|
-
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
|
337
|
-
* is col-major (resp. row-major). Read-only.
|
338
|
-
*/
|
339
|
-
template<typename Derived>
|
340
|
-
const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
|
341
|
-
{ return ConstInnerVectorReturnType(derived(), outer); }
|
342
|
-
|
343
|
-
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
|
344
|
-
* is col-major (resp. row-major).
|
345
|
-
*/
|
346
|
-
template<typename Derived>
|
347
|
-
typename SparseMatrixBase<Derived>::InnerVectorsReturnType
|
348
|
-
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
|
349
|
-
{
|
350
|
-
return Block<Derived,Dynamic,Dynamic,true>(derived(),
|
351
|
-
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
|
352
|
-
IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
|
353
|
-
|
354
|
-
}
|
355
|
-
|
356
|
-
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
|
357
|
-
* is col-major (resp. row-major). Read-only.
|
358
|
-
*/
|
359
|
-
template<typename Derived>
|
360
|
-
const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
|
361
|
-
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
|
362
|
-
{
|
363
|
-
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
|
364
|
-
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
|
365
|
-
IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
|
366
|
-
|
367
|
-
}
|
368
|
-
|
369
329
|
/** Generic implementation of sparse Block expression.
|
370
330
|
* Real-only.
|
371
331
|
*/
|
@@ -486,9 +446,13 @@ struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBa
|
|
486
446
|
{}
|
487
447
|
|
488
448
|
inline Index nonZerosEstimate() const {
|
489
|
-
Index nnz = m_block.nonZeros();
|
490
|
-
if(nnz<0)
|
491
|
-
|
449
|
+
const Index nnz = m_block.nonZeros();
|
450
|
+
if(nnz < 0) {
|
451
|
+
// Scale the non-zero estimate for the underlying expression linearly with block size.
|
452
|
+
// Return zero if the underlying block is empty.
|
453
|
+
const Index nested_sz = m_block.nestedExpression().size();
|
454
|
+
return nested_sz == 0 ? 0 : m_argImpl.nonZerosEstimate() * m_block.size() / nested_sz;
|
455
|
+
}
|
492
456
|
return nnz;
|
493
457
|
}
|
494
458
|
|
@@ -503,22 +467,25 @@ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
|
|
503
467
|
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
|
504
468
|
: public EvalIterator
|
505
469
|
{
|
506
|
-
|
470
|
+
// NOTE MSVC fails to compile if we don't explicitely "import" IsRowMajor from unary_evaluator
|
471
|
+
// because the base class EvalIterator has a private IsRowMajor enum too. (bug #1786)
|
472
|
+
// NOTE We cannot call it IsRowMajor because it would shadow unary_evaluator::IsRowMajor
|
473
|
+
enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
|
507
474
|
const XprType& m_block;
|
508
475
|
Index m_end;
|
509
476
|
public:
|
510
477
|
|
511
478
|
EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
|
512
|
-
: EvalIterator(aEval.m_argImpl, outer + (
|
479
|
+
: EvalIterator(aEval.m_argImpl, outer + (XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
|
513
480
|
m_block(aEval.m_block),
|
514
|
-
m_end(
|
481
|
+
m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
|
515
482
|
{
|
516
|
-
while( (EvalIterator::operator bool()) && (EvalIterator::index() < (
|
483
|
+
while( (EvalIterator::operator bool()) && (EvalIterator::index() < (XprIsRowMajor ? m_block.startCol() : m_block.startRow())) )
|
517
484
|
EvalIterator::operator++();
|
518
485
|
}
|
519
486
|
|
520
|
-
inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(
|
521
|
-
inline Index outer() const { return EvalIterator::outer() - (
|
487
|
+
inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(XprIsRowMajor ? m_block.startCol() : m_block.startRow()); }
|
488
|
+
inline Index outer() const { return EvalIterator::outer() - (XprIsRowMajor ? m_block.startRow() : m_block.startCol()); }
|
522
489
|
inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
|
523
490
|
inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
|
524
491
|
|
@@ -528,7 +495,8 @@ public:
|
|
528
495
|
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
|
529
496
|
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
|
530
497
|
{
|
531
|
-
|
498
|
+
// NOTE see above
|
499
|
+
enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
|
532
500
|
const unary_evaluator& m_eval;
|
533
501
|
Index m_outerPos;
|
534
502
|
const Index m_innerIndex;
|
@@ -538,9 +506,9 @@ public:
|
|
538
506
|
|
539
507
|
EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
|
540
508
|
: m_eval(aEval),
|
541
|
-
m_outerPos( (
|
542
|
-
m_innerIndex(
|
543
|
-
m_end(
|
509
|
+
m_outerPos( (XprIsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
|
510
|
+
m_innerIndex(XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
|
511
|
+
m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
|
544
512
|
m_it(m_eval.m_argImpl, m_outerPos)
|
545
513
|
{
|
546
514
|
EIGEN_UNUSED_VARIABLE(outer);
|
@@ -551,10 +519,10 @@ public:
|
|
551
519
|
++(*this);
|
552
520
|
}
|
553
521
|
|
554
|
-
inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (
|
522
|
+
inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (XprIsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
|
555
523
|
inline Index outer() const { return 0; }
|
556
|
-
inline Index row() const { return
|
557
|
-
inline Index col() const { return
|
524
|
+
inline Index row() const { return XprIsRowMajor ? 0 : index(); }
|
525
|
+
inline Index col() const { return XprIsRowMajor ? index() : 0; }
|
558
526
|
|
559
527
|
inline Scalar value() const { return m_it.value(); }
|
560
528
|
inline Scalar& valueRef() { return m_it.valueRef(); }
|
@@ -128,6 +128,28 @@ class SparseCompressedBase
|
|
128
128
|
protected:
|
129
129
|
/** Default constructor. Do nothing. */
|
130
130
|
SparseCompressedBase() {}
|
131
|
+
|
132
|
+
/** \internal return the index of the coeff at (row,col) or just before if it does not exist.
|
133
|
+
* This is an analogue of std::lower_bound.
|
134
|
+
*/
|
135
|
+
internal::LowerBoundIndex lower_bound(Index row, Index col) const
|
136
|
+
{
|
137
|
+
eigen_internal_assert(row>=0 && row<this->rows() && col>=0 && col<this->cols());
|
138
|
+
|
139
|
+
const Index outer = Derived::IsRowMajor ? row : col;
|
140
|
+
const Index inner = Derived::IsRowMajor ? col : row;
|
141
|
+
|
142
|
+
Index start = this->outerIndexPtr()[outer];
|
143
|
+
Index end = this->isCompressed() ? this->outerIndexPtr()[outer+1] : this->outerIndexPtr()[outer] + this->innerNonZeroPtr()[outer];
|
144
|
+
eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
|
145
|
+
internal::LowerBoundIndex p;
|
146
|
+
p.value = std::lower_bound(this->innerIndexPtr()+start, this->innerIndexPtr()+end,inner) - this->innerIndexPtr();
|
147
|
+
p.found = (p.value<end) && (this->innerIndexPtr()[p.value]==inner);
|
148
|
+
return p;
|
149
|
+
}
|
150
|
+
|
151
|
+
friend struct internal::evaluator<SparseCompressedBase<Derived> >;
|
152
|
+
|
131
153
|
private:
|
132
154
|
template<typename OtherDerived> explicit SparseCompressedBase(const SparseCompressedBase<OtherDerived>&);
|
133
155
|
};
|
@@ -185,6 +207,14 @@ class SparseCompressedBase<Derived>::InnerIterator
|
|
185
207
|
}
|
186
208
|
|
187
209
|
inline InnerIterator& operator++() { m_id++; return *this; }
|
210
|
+
inline InnerIterator& operator+=(Index i) { m_id += i ; return *this; }
|
211
|
+
|
212
|
+
inline InnerIterator operator+(Index i)
|
213
|
+
{
|
214
|
+
InnerIterator result = *this;
|
215
|
+
result += i;
|
216
|
+
return result;
|
217
|
+
}
|
188
218
|
|
189
219
|
inline const Scalar& value() const { return m_values[m_id]; }
|
190
220
|
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
|
@@ -245,6 +275,14 @@ class SparseCompressedBase<Derived>::ReverseInnerIterator
|
|
245
275
|
}
|
246
276
|
|
247
277
|
inline ReverseInnerIterator& operator--() { --m_id; return *this; }
|
278
|
+
inline ReverseInnerIterator& operator-=(Index i) { m_id -= i; return *this; }
|
279
|
+
|
280
|
+
inline ReverseInnerIterator operator-(Index i)
|
281
|
+
{
|
282
|
+
ReverseInnerIterator result = *this;
|
283
|
+
result -= i;
|
284
|
+
return result;
|
285
|
+
}
|
248
286
|
|
249
287
|
inline const Scalar& value() const { return m_values[m_id-1]; }
|
250
288
|
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
|
@@ -317,17 +355,8 @@ protected:
|
|
317
355
|
|
318
356
|
Index find(Index row, Index col) const
|
319
357
|
{
|
320
|
-
|
321
|
-
|
322
|
-
const Index outer = Derived::IsRowMajor ? row : col;
|
323
|
-
const Index inner = Derived::IsRowMajor ? col : row;
|
324
|
-
|
325
|
-
Index start = m_matrix->outerIndexPtr()[outer];
|
326
|
-
Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];
|
327
|
-
eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
|
328
|
-
const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner) - m_matrix->innerIndexPtr();
|
329
|
-
|
330
|
-
return ((p<end) && (m_matrix->innerIndexPtr()[p]==inner)) ? p : Dynamic;
|
358
|
+
internal::LowerBoundIndex p = m_matrix->lower_bound(row,col);
|
359
|
+
return p.found ? p.value : Dynamic;
|
331
360
|
}
|
332
361
|
|
333
362
|
const Derived *m_matrix;
|
@@ -101,7 +101,7 @@ public:
|
|
101
101
|
}
|
102
102
|
else
|
103
103
|
{
|
104
|
-
m_value = 0; // this is to avoid a compilation warning
|
104
|
+
m_value = Scalar(0); // this is to avoid a compilation warning
|
105
105
|
m_id = -1;
|
106
106
|
}
|
107
107
|
return *this;
|
@@ -126,7 +126,7 @@ public:
|
|
126
126
|
|
127
127
|
|
128
128
|
enum {
|
129
|
-
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
|
129
|
+
CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
130
130
|
Flags = XprType::Flags
|
131
131
|
};
|
132
132
|
|
@@ -211,9 +211,8 @@ public:
|
|
211
211
|
|
212
212
|
|
213
213
|
enum {
|
214
|
-
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
|
215
|
-
|
216
|
-
Flags = (XprType::Flags & ~RowMajorBit) | (int(Rhs::Flags)&RowMajorBit)
|
214
|
+
CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
215
|
+
Flags = XprType::Flags
|
217
216
|
};
|
218
217
|
|
219
218
|
explicit binary_evaluator(const XprType& xpr)
|
@@ -299,9 +298,8 @@ public:
|
|
299
298
|
|
300
299
|
|
301
300
|
enum {
|
302
|
-
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
|
303
|
-
|
304
|
-
Flags = (XprType::Flags & ~RowMajorBit) | (int(Lhs::Flags)&RowMajorBit)
|
301
|
+
CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
302
|
+
Flags = XprType::Flags
|
305
303
|
};
|
306
304
|
|
307
305
|
explicit binary_evaluator(const XprType& xpr)
|
@@ -459,7 +457,7 @@ public:
|
|
459
457
|
|
460
458
|
|
461
459
|
enum {
|
462
|
-
CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
|
460
|
+
CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
463
461
|
Flags = XprType::Flags
|
464
462
|
};
|
465
463
|
|
@@ -532,9 +530,8 @@ public:
|
|
532
530
|
|
533
531
|
|
534
532
|
enum {
|
535
|
-
CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
|
536
|
-
|
537
|
-
Flags = (XprType::Flags & ~RowMajorBit) | (int(RhsArg::Flags)&RowMajorBit)
|
533
|
+
CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
534
|
+
Flags = XprType::Flags
|
538
535
|
};
|
539
536
|
|
540
537
|
explicit sparse_conjunction_evaluator(const XprType& xpr)
|
@@ -607,9 +604,8 @@ public:
|
|
607
604
|
|
608
605
|
|
609
606
|
enum {
|
610
|
-
CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
|
611
|
-
|
612
|
-
Flags = (XprType::Flags & ~RowMajorBit) | (int(LhsArg::Flags)&RowMajorBit)
|
607
|
+
CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
608
|
+
Flags = XprType::Flags
|
613
609
|
};
|
614
610
|
|
615
611
|
explicit sparse_conjunction_evaluator(const XprType& xpr)
|
@@ -24,7 +24,7 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
|
|
24
24
|
class InnerIterator;
|
25
25
|
|
26
26
|
enum {
|
27
|
-
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
|
27
|
+
CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
|
28
28
|
Flags = XprType::Flags
|
29
29
|
};
|
30
30
|
|
@@ -49,6 +49,7 @@ template<typename UnaryOp, typename ArgType>
|
|
49
49
|
class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator
|
50
50
|
: public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator
|
51
51
|
{
|
52
|
+
protected:
|
52
53
|
typedef typename XprType::Scalar Scalar;
|
53
54
|
typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
|
54
55
|
public:
|
@@ -78,7 +79,7 @@ struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>
|
|
78
79
|
class InnerIterator;
|
79
80
|
|
80
81
|
enum {
|
81
|
-
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost,
|
82
|
+
CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<ViewOp>::Cost),
|
82
83
|
Flags = XprType::Flags
|
83
84
|
};
|
84
85
|
|
@@ -99,6 +100,7 @@ template<typename ViewOp, typename ArgType>
|
|
99
100
|
class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator
|
100
101
|
: public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator
|
101
102
|
{
|
103
|
+
protected:
|
102
104
|
typedef typename XprType::Scalar Scalar;
|
103
105
|
typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
|
104
106
|
public:
|
@@ -88,10 +88,11 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, A
|
|
88
88
|
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
89
89
|
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
90
90
|
typedef typename internal::remove_all<DenseResType>::type Res;
|
91
|
-
typedef
|
91
|
+
typedef evaluator<Lhs> LhsEval;
|
92
|
+
typedef typename LhsEval::InnerIterator LhsInnerIterator;
|
92
93
|
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
|
93
94
|
{
|
94
|
-
|
95
|
+
LhsEval lhsEval(lhs);
|
95
96
|
for(Index c=0; c<rhs.cols(); ++c)
|
96
97
|
{
|
97
98
|
for(Index j=0; j<lhs.outerSize(); ++j)
|
@@ -111,17 +112,38 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
|
|
111
112
|
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
112
113
|
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
113
114
|
typedef typename internal::remove_all<DenseResType>::type Res;
|
114
|
-
typedef
|
115
|
+
typedef evaluator<Lhs> LhsEval;
|
116
|
+
typedef typename LhsEval::InnerIterator LhsInnerIterator;
|
115
117
|
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
116
118
|
{
|
117
|
-
|
118
|
-
|
119
|
+
Index n = lhs.rows();
|
120
|
+
LhsEval lhsEval(lhs);
|
121
|
+
|
122
|
+
#ifdef EIGEN_HAS_OPENMP
|
123
|
+
Eigen::initParallel();
|
124
|
+
Index threads = Eigen::nbThreads();
|
125
|
+
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
|
126
|
+
// It basically represents the minimal amount of work to be done to be worth it.
|
127
|
+
if(threads>1 && lhsEval.nonZerosEstimate()*rhs.cols() > 20000)
|
119
128
|
{
|
120
|
-
|
121
|
-
for(
|
122
|
-
|
129
|
+
#pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
|
130
|
+
for(Index i=0; i<n; ++i)
|
131
|
+
processRow(lhsEval,rhs,res,alpha,i);
|
132
|
+
}
|
133
|
+
else
|
134
|
+
#endif
|
135
|
+
{
|
136
|
+
for(Index i=0; i<n; ++i)
|
137
|
+
processRow(lhsEval, rhs, res, alpha, i);
|
123
138
|
}
|
124
139
|
}
|
140
|
+
|
141
|
+
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, Res& res, const typename Res::Scalar& alpha, Index i)
|
142
|
+
{
|
143
|
+
typename Res::RowXpr res_i(res.row(i));
|
144
|
+
for(LhsInnerIterator it(lhsEval,i); it ;++it)
|
145
|
+
res_i += (alpha*it.value()) * rhs.row(it.index());
|
146
|
+
}
|
125
147
|
};
|
126
148
|
|
127
149
|
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
|
@@ -21,7 +21,7 @@ namespace Eigen {
|
|
21
21
|
* This class implements a more versatile variants of the common \em compressed row/column storage format.
|
22
22
|
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
|
23
23
|
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
|
24
|
-
* space
|
24
|
+
* space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
|
25
25
|
* can be done with limited memory reallocation and copies.
|
26
26
|
*
|
27
27
|
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format
|
@@ -99,6 +99,8 @@ class SparseMatrix
|
|
99
99
|
typedef SparseCompressedBase<SparseMatrix> Base;
|
100
100
|
using Base::convert_index;
|
101
101
|
friend class SparseVector<_Scalar,0,_StorageIndex>;
|
102
|
+
template<typename, typename, typename, typename, typename>
|
103
|
+
friend struct internal::Assignment;
|
102
104
|
public:
|
103
105
|
using Base::isCompressed;
|
104
106
|
using Base::nonZeros;
|
@@ -327,7 +329,8 @@ class SparseMatrix
|
|
327
329
|
m_outerIndex[j] = newOuterIndex[j];
|
328
330
|
m_innerNonZeros[j] = innerNNZ;
|
329
331
|
}
|
330
|
-
|
332
|
+
if(m_outerSize>0)
|
333
|
+
m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
|
331
334
|
|
332
335
|
m_data.resize(m_outerIndex[m_outerSize]);
|
333
336
|
}
|
@@ -502,8 +505,8 @@ class SparseMatrix
|
|
502
505
|
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
503
506
|
}
|
504
507
|
}
|
505
|
-
|
506
|
-
/** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the
|
508
|
+
|
509
|
+
/** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
|
507
510
|
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
|
508
511
|
{
|
509
512
|
prune(default_prunning_func(reference,epsilon));
|
@@ -576,10 +579,12 @@ class SparseMatrix
|
|
576
579
|
else if (innerChange < 0)
|
577
580
|
{
|
578
581
|
// Inner size decreased: allocate a new m_innerNonZeros
|
579
|
-
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange
|
582
|
+
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
|
580
583
|
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
581
|
-
for(Index i = 0; i < m_outerSize; i++)
|
584
|
+
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
|
582
585
|
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
586
|
+
for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
|
587
|
+
m_innerNonZeros[i] = 0;
|
583
588
|
}
|
584
589
|
|
585
590
|
// Change the m_innerNonZeros in case of a decrease of inner size
|
@@ -604,9 +609,9 @@ class SparseMatrix
|
|
604
609
|
m_outerIndex = newOuterIndex;
|
605
610
|
if (outerChange > 0)
|
606
611
|
{
|
607
|
-
StorageIndex
|
612
|
+
StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
|
608
613
|
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
|
609
|
-
m_outerIndex[i] =
|
614
|
+
m_outerIndex[i] = lastIdx;
|
610
615
|
}
|
611
616
|
m_outerSize += outerChange;
|
612
617
|
}
|
@@ -780,6 +785,9 @@ class SparseMatrix
|
|
780
785
|
template<typename OtherDerived>
|
781
786
|
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
|
782
787
|
{ return Base::operator=(other.derived()); }
|
788
|
+
|
789
|
+
template<typename Lhs, typename Rhs>
|
790
|
+
inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
|
783
791
|
#endif // EIGEN_PARSED_BY_DOXYGEN
|
784
792
|
|
785
793
|
template<typename OtherDerived>
|
@@ -895,6 +903,113 @@ public:
|
|
895
903
|
m_data.index(p) = convert_index(inner);
|
896
904
|
return (m_data.value(p) = Scalar(0));
|
897
905
|
}
|
906
|
+
protected:
|
907
|
+
struct IndexPosPair {
|
908
|
+
IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
|
909
|
+
Index i;
|
910
|
+
Index p;
|
911
|
+
};
|
912
|
+
|
913
|
+
/** \internal assign \a diagXpr to the diagonal of \c *this
|
914
|
+
* There are different strategies:
|
915
|
+
* 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
|
916
|
+
* 2 - otherwise, for each diagonal coeff,
|
917
|
+
* 2.a - if it already exists, then we update it,
|
918
|
+
* 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
|
919
|
+
* 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
|
920
|
+
* 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
|
921
|
+
*
|
922
|
+
* TODO: some piece of code could be isolated and reused for a general in-place update strategy.
|
923
|
+
* TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
|
924
|
+
* then it *might* be better to disable case 2.b since they will have to be copied anyway.
|
925
|
+
*/
|
926
|
+
template<typename DiagXpr, typename Func>
|
927
|
+
void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
|
928
|
+
{
|
929
|
+
Index n = diagXpr.size();
|
930
|
+
|
931
|
+
const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
|
932
|
+
if(overwrite)
|
933
|
+
{
|
934
|
+
if((this->rows()!=n) || (this->cols()!=n))
|
935
|
+
this->resize(n, n);
|
936
|
+
}
|
937
|
+
|
938
|
+
if(m_data.size()==0 || overwrite)
|
939
|
+
{
|
940
|
+
typedef Array<StorageIndex,Dynamic,1> ArrayXI;
|
941
|
+
this->makeCompressed();
|
942
|
+
this->resizeNonZeros(n);
|
943
|
+
Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
|
944
|
+
Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
|
945
|
+
Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
|
946
|
+
values.setZero();
|
947
|
+
internal::call_assignment_no_alias(values, diagXpr, assignFunc);
|
948
|
+
}
|
949
|
+
else
|
950
|
+
{
|
951
|
+
bool isComp = isCompressed();
|
952
|
+
internal::evaluator<DiagXpr> diaEval(diagXpr);
|
953
|
+
std::vector<IndexPosPair> newEntries;
|
954
|
+
|
955
|
+
// 1 - try in-place update and record insertion failures
|
956
|
+
for(Index i = 0; i<n; ++i)
|
957
|
+
{
|
958
|
+
internal::LowerBoundIndex lb = this->lower_bound(i,i);
|
959
|
+
Index p = lb.value;
|
960
|
+
if(lb.found)
|
961
|
+
{
|
962
|
+
// the coeff already exists
|
963
|
+
assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
|
964
|
+
}
|
965
|
+
else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
|
966
|
+
{
|
967
|
+
// non compressed mode with local room for inserting one element
|
968
|
+
m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
|
969
|
+
m_innerNonZeros[i]++;
|
970
|
+
m_data.value(p) = Scalar(0);
|
971
|
+
m_data.index(p) = StorageIndex(i);
|
972
|
+
assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
|
973
|
+
}
|
974
|
+
else
|
975
|
+
{
|
976
|
+
// defer insertion
|
977
|
+
newEntries.push_back(IndexPosPair(i,p));
|
978
|
+
}
|
979
|
+
}
|
980
|
+
// 2 - insert deferred entries
|
981
|
+
Index n_entries = Index(newEntries.size());
|
982
|
+
if(n_entries>0)
|
983
|
+
{
|
984
|
+
Storage newData(m_data.size()+n_entries);
|
985
|
+
Index prev_p = 0;
|
986
|
+
Index prev_i = 0;
|
987
|
+
for(Index k=0; k<n_entries;++k)
|
988
|
+
{
|
989
|
+
Index i = newEntries[k].i;
|
990
|
+
Index p = newEntries[k].p;
|
991
|
+
internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
|
992
|
+
internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
|
993
|
+
for(Index j=prev_i;j<i;++j)
|
994
|
+
m_outerIndex[j+1] += k;
|
995
|
+
if(!isComp)
|
996
|
+
m_innerNonZeros[i]++;
|
997
|
+
prev_p = p;
|
998
|
+
prev_i = i;
|
999
|
+
newData.value(p+k) = Scalar(0);
|
1000
|
+
newData.index(p+k) = StorageIndex(i);
|
1001
|
+
assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
|
1002
|
+
}
|
1003
|
+
{
|
1004
|
+
internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
|
1005
|
+
internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
|
1006
|
+
for(Index j=prev_i+1;j<=m_outerSize;++j)
|
1007
|
+
m_outerIndex[j] += n_entries;
|
1008
|
+
}
|
1009
|
+
m_data.swap(newData);
|
1010
|
+
}
|
1011
|
+
}
|
1012
|
+
}
|
898
1013
|
|
899
1014
|
private:
|
900
1015
|
static void check_template_parameters()
|
@@ -973,7 +1088,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
|
|
973
1088
|
* \code
|
974
1089
|
typedef Triplet<double> T;
|
975
1090
|
std::vector<T> tripletList;
|
976
|
-
|
1091
|
+
tripletList.reserve(estimation_of_entries);
|
977
1092
|
for(...)
|
978
1093
|
{
|
979
1094
|
// ...
|
@@ -986,7 +1101,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
|
|
986
1101
|
*
|
987
1102
|
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
|
988
1103
|
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
|
989
|
-
* be
|
1104
|
+
* be explicitly stored into a std::vector for instance.
|
990
1105
|
*/
|
991
1106
|
template<typename Scalar, int _Options, typename _StorageIndex>
|
992
1107
|
template<typename InputIterators>
|
@@ -1232,7 +1347,7 @@ typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Sca
|
|
1232
1347
|
}
|
1233
1348
|
|
1234
1349
|
m_data.index(p) = convert_index(inner);
|
1235
|
-
return (m_data.value(p) = 0);
|
1350
|
+
return (m_data.value(p) = Scalar(0));
|
1236
1351
|
}
|
1237
1352
|
|
1238
1353
|
if(m_data.size() != m_data.allocatedSize())
|
@@ -87,6 +87,11 @@ template<typename Derived> class SparseMatrixBase
|
|
87
87
|
* we are dealing with a column-vector (if there is only one column) or with
|
88
88
|
* a row-vector (if there is only one row). */
|
89
89
|
|
90
|
+
NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2,
|
91
|
+
/**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
|
92
|
+
* and 2 for matrices.
|
93
|
+
*/
|
94
|
+
|
90
95
|
Flags = internal::traits<Derived>::Flags,
|
91
96
|
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
|
92
97
|
* constructed from this one. See the \ref flags "list of flags".
|
@@ -350,18 +355,6 @@ template<typename Derived> class SparseMatrixBase
|
|
350
355
|
const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }
|
351
356
|
const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); }
|
352
357
|
|
353
|
-
// inner-vector
|
354
|
-
typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
|
355
|
-
typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
|
356
|
-
InnerVectorReturnType innerVector(Index outer);
|
357
|
-
const ConstInnerVectorReturnType innerVector(Index outer) const;
|
358
|
-
|
359
|
-
// set of inner-vectors
|
360
|
-
typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
|
361
|
-
typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
|
362
|
-
InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
|
363
|
-
const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
|
364
|
-
|
365
358
|
DenseMatrixType toDense() const
|
366
359
|
{
|
367
360
|
return DenseMatrixType(derived());
|
@@ -17,7 +17,7 @@ namespace Eigen {
|
|
17
17
|
* The automatic pruning of the small values can be achieved by calling the pruned() function
|
18
18
|
* in which case a totally different product algorithm is employed:
|
19
19
|
* \code
|
20
|
-
* C = (A*B).pruned(); //
|
20
|
+
* C = (A*B).pruned(); // suppress numerical zeros (exact)
|
21
21
|
* C = (A*B).pruned(ref);
|
22
22
|
* C = (A*B).pruned(ref,epsilon);
|
23
23
|
* \endcode
|
@@ -164,6 +164,18 @@ protected:
|
|
164
164
|
|
165
165
|
} // end namespace internal
|
166
166
|
|
167
|
+
// sparse matrix = sparse-product (can be sparse*sparse, sparse*perm, etc.)
|
168
|
+
template<typename Scalar, int _Options, typename _StorageIndex>
|
169
|
+
template<typename Lhs, typename Rhs>
|
170
|
+
SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const Product<Lhs,Rhs,AliasFreeProduct>& src)
|
171
|
+
{
|
172
|
+
// std::cout << "in Assignment : " << DstOptions << "\n";
|
173
|
+
SparseMatrix dst(src.rows(),src.cols());
|
174
|
+
internal::generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
|
175
|
+
this->swap(dst);
|
176
|
+
return *this;
|
177
|
+
}
|
178
|
+
|
167
179
|
} // end namespace Eigen
|
168
180
|
|
169
181
|
#endif // EIGEN_SPARSEPRODUCT_H
|