umappp 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (395) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +25 -0
  3. data/README.md +110 -0
  4. data/ext/umappp/extconf.rb +25 -0
  5. data/ext/umappp/numo.hpp +867 -0
  6. data/ext/umappp/umappp.cpp +225 -0
  7. data/lib/umappp/version.rb +5 -0
  8. data/lib/umappp.rb +41 -0
  9. data/vendor/Eigen/Cholesky +45 -0
  10. data/vendor/Eigen/CholmodSupport +48 -0
  11. data/vendor/Eigen/Core +384 -0
  12. data/vendor/Eigen/Dense +7 -0
  13. data/vendor/Eigen/Eigen +2 -0
  14. data/vendor/Eigen/Eigenvalues +60 -0
  15. data/vendor/Eigen/Geometry +59 -0
  16. data/vendor/Eigen/Householder +29 -0
  17. data/vendor/Eigen/IterativeLinearSolvers +48 -0
  18. data/vendor/Eigen/Jacobi +32 -0
  19. data/vendor/Eigen/KLUSupport +41 -0
  20. data/vendor/Eigen/LU +47 -0
  21. data/vendor/Eigen/MetisSupport +35 -0
  22. data/vendor/Eigen/OrderingMethods +70 -0
  23. data/vendor/Eigen/PaStiXSupport +49 -0
  24. data/vendor/Eigen/PardisoSupport +35 -0
  25. data/vendor/Eigen/QR +50 -0
  26. data/vendor/Eigen/QtAlignedMalloc +39 -0
  27. data/vendor/Eigen/SPQRSupport +34 -0
  28. data/vendor/Eigen/SVD +50 -0
  29. data/vendor/Eigen/Sparse +34 -0
  30. data/vendor/Eigen/SparseCholesky +37 -0
  31. data/vendor/Eigen/SparseCore +69 -0
  32. data/vendor/Eigen/SparseLU +50 -0
  33. data/vendor/Eigen/SparseQR +36 -0
  34. data/vendor/Eigen/StdDeque +27 -0
  35. data/vendor/Eigen/StdList +26 -0
  36. data/vendor/Eigen/StdVector +27 -0
  37. data/vendor/Eigen/SuperLUSupport +64 -0
  38. data/vendor/Eigen/UmfPackSupport +40 -0
  39. data/vendor/Eigen/src/Cholesky/LDLT.h +688 -0
  40. data/vendor/Eigen/src/Cholesky/LLT.h +558 -0
  41. data/vendor/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
  42. data/vendor/Eigen/src/CholmodSupport/CholmodSupport.h +682 -0
  43. data/vendor/Eigen/src/Core/ArithmeticSequence.h +413 -0
  44. data/vendor/Eigen/src/Core/Array.h +417 -0
  45. data/vendor/Eigen/src/Core/ArrayBase.h +226 -0
  46. data/vendor/Eigen/src/Core/ArrayWrapper.h +209 -0
  47. data/vendor/Eigen/src/Core/Assign.h +90 -0
  48. data/vendor/Eigen/src/Core/AssignEvaluator.h +1010 -0
  49. data/vendor/Eigen/src/Core/Assign_MKL.h +178 -0
  50. data/vendor/Eigen/src/Core/BandMatrix.h +353 -0
  51. data/vendor/Eigen/src/Core/Block.h +448 -0
  52. data/vendor/Eigen/src/Core/BooleanRedux.h +162 -0
  53. data/vendor/Eigen/src/Core/CommaInitializer.h +164 -0
  54. data/vendor/Eigen/src/Core/ConditionEstimator.h +175 -0
  55. data/vendor/Eigen/src/Core/CoreEvaluators.h +1741 -0
  56. data/vendor/Eigen/src/Core/CoreIterators.h +132 -0
  57. data/vendor/Eigen/src/Core/CwiseBinaryOp.h +183 -0
  58. data/vendor/Eigen/src/Core/CwiseNullaryOp.h +1001 -0
  59. data/vendor/Eigen/src/Core/CwiseTernaryOp.h +197 -0
  60. data/vendor/Eigen/src/Core/CwiseUnaryOp.h +103 -0
  61. data/vendor/Eigen/src/Core/CwiseUnaryView.h +132 -0
  62. data/vendor/Eigen/src/Core/DenseBase.h +701 -0
  63. data/vendor/Eigen/src/Core/DenseCoeffsBase.h +685 -0
  64. data/vendor/Eigen/src/Core/DenseStorage.h +652 -0
  65. data/vendor/Eigen/src/Core/Diagonal.h +258 -0
  66. data/vendor/Eigen/src/Core/DiagonalMatrix.h +391 -0
  67. data/vendor/Eigen/src/Core/DiagonalProduct.h +28 -0
  68. data/vendor/Eigen/src/Core/Dot.h +318 -0
  69. data/vendor/Eigen/src/Core/EigenBase.h +160 -0
  70. data/vendor/Eigen/src/Core/ForceAlignedAccess.h +150 -0
  71. data/vendor/Eigen/src/Core/Fuzzy.h +155 -0
  72. data/vendor/Eigen/src/Core/GeneralProduct.h +465 -0
  73. data/vendor/Eigen/src/Core/GenericPacketMath.h +1040 -0
  74. data/vendor/Eigen/src/Core/GlobalFunctions.h +194 -0
  75. data/vendor/Eigen/src/Core/IO.h +258 -0
  76. data/vendor/Eigen/src/Core/IndexedView.h +237 -0
  77. data/vendor/Eigen/src/Core/Inverse.h +117 -0
  78. data/vendor/Eigen/src/Core/Map.h +171 -0
  79. data/vendor/Eigen/src/Core/MapBase.h +310 -0
  80. data/vendor/Eigen/src/Core/MathFunctions.h +2057 -0
  81. data/vendor/Eigen/src/Core/MathFunctionsImpl.h +200 -0
  82. data/vendor/Eigen/src/Core/Matrix.h +565 -0
  83. data/vendor/Eigen/src/Core/MatrixBase.h +547 -0
  84. data/vendor/Eigen/src/Core/NestByValue.h +85 -0
  85. data/vendor/Eigen/src/Core/NoAlias.h +109 -0
  86. data/vendor/Eigen/src/Core/NumTraits.h +335 -0
  87. data/vendor/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
  88. data/vendor/Eigen/src/Core/PermutationMatrix.h +605 -0
  89. data/vendor/Eigen/src/Core/PlainObjectBase.h +1128 -0
  90. data/vendor/Eigen/src/Core/Product.h +191 -0
  91. data/vendor/Eigen/src/Core/ProductEvaluators.h +1179 -0
  92. data/vendor/Eigen/src/Core/Random.h +218 -0
  93. data/vendor/Eigen/src/Core/Redux.h +515 -0
  94. data/vendor/Eigen/src/Core/Ref.h +381 -0
  95. data/vendor/Eigen/src/Core/Replicate.h +142 -0
  96. data/vendor/Eigen/src/Core/Reshaped.h +454 -0
  97. data/vendor/Eigen/src/Core/ReturnByValue.h +119 -0
  98. data/vendor/Eigen/src/Core/Reverse.h +217 -0
  99. data/vendor/Eigen/src/Core/Select.h +164 -0
  100. data/vendor/Eigen/src/Core/SelfAdjointView.h +365 -0
  101. data/vendor/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
  102. data/vendor/Eigen/src/Core/Solve.h +188 -0
  103. data/vendor/Eigen/src/Core/SolveTriangular.h +235 -0
  104. data/vendor/Eigen/src/Core/SolverBase.h +168 -0
  105. data/vendor/Eigen/src/Core/StableNorm.h +251 -0
  106. data/vendor/Eigen/src/Core/StlIterators.h +463 -0
  107. data/vendor/Eigen/src/Core/Stride.h +116 -0
  108. data/vendor/Eigen/src/Core/Swap.h +68 -0
  109. data/vendor/Eigen/src/Core/Transpose.h +464 -0
  110. data/vendor/Eigen/src/Core/Transpositions.h +386 -0
  111. data/vendor/Eigen/src/Core/TriangularMatrix.h +1001 -0
  112. data/vendor/Eigen/src/Core/VectorBlock.h +96 -0
  113. data/vendor/Eigen/src/Core/VectorwiseOp.h +784 -0
  114. data/vendor/Eigen/src/Core/Visitor.h +381 -0
  115. data/vendor/Eigen/src/Core/arch/AVX/Complex.h +372 -0
  116. data/vendor/Eigen/src/Core/arch/AVX/MathFunctions.h +228 -0
  117. data/vendor/Eigen/src/Core/arch/AVX/PacketMath.h +1574 -0
  118. data/vendor/Eigen/src/Core/arch/AVX/TypeCasting.h +115 -0
  119. data/vendor/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
  120. data/vendor/Eigen/src/Core/arch/AVX512/MathFunctions.h +362 -0
  121. data/vendor/Eigen/src/Core/arch/AVX512/PacketMath.h +2303 -0
  122. data/vendor/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  123. data/vendor/Eigen/src/Core/arch/AltiVec/Complex.h +417 -0
  124. data/vendor/Eigen/src/Core/arch/AltiVec/MathFunctions.h +90 -0
  125. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
  126. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
  127. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
  128. data/vendor/Eigen/src/Core/arch/AltiVec/PacketMath.h +2711 -0
  129. data/vendor/Eigen/src/Core/arch/CUDA/Complex.h +258 -0
  130. data/vendor/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
  131. data/vendor/Eigen/src/Core/arch/Default/ConjHelper.h +117 -0
  132. data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
  133. data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
  134. data/vendor/Eigen/src/Core/arch/Default/Half.h +942 -0
  135. data/vendor/Eigen/src/Core/arch/Default/Settings.h +49 -0
  136. data/vendor/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  137. data/vendor/Eigen/src/Core/arch/GPU/MathFunctions.h +103 -0
  138. data/vendor/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
  139. data/vendor/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
  140. data/vendor/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  141. data/vendor/Eigen/src/Core/arch/MSA/Complex.h +648 -0
  142. data/vendor/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  143. data/vendor/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  144. data/vendor/Eigen/src/Core/arch/NEON/Complex.h +584 -0
  145. data/vendor/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  146. data/vendor/Eigen/src/Core/arch/NEON/MathFunctions.h +75 -0
  147. data/vendor/Eigen/src/Core/arch/NEON/PacketMath.h +4587 -0
  148. data/vendor/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
  149. data/vendor/Eigen/src/Core/arch/SSE/Complex.h +351 -0
  150. data/vendor/Eigen/src/Core/arch/SSE/MathFunctions.h +199 -0
  151. data/vendor/Eigen/src/Core/arch/SSE/PacketMath.h +1505 -0
  152. data/vendor/Eigen/src/Core/arch/SSE/TypeCasting.h +142 -0
  153. data/vendor/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  154. data/vendor/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  155. data/vendor/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  156. data/vendor/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  157. data/vendor/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  158. data/vendor/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  159. data/vendor/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  160. data/vendor/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  161. data/vendor/Eigen/src/Core/arch/ZVector/Complex.h +426 -0
  162. data/vendor/Eigen/src/Core/arch/ZVector/MathFunctions.h +233 -0
  163. data/vendor/Eigen/src/Core/arch/ZVector/PacketMath.h +1060 -0
  164. data/vendor/Eigen/src/Core/functors/AssignmentFunctors.h +177 -0
  165. data/vendor/Eigen/src/Core/functors/BinaryFunctors.h +541 -0
  166. data/vendor/Eigen/src/Core/functors/NullaryFunctors.h +189 -0
  167. data/vendor/Eigen/src/Core/functors/StlFunctors.h +166 -0
  168. data/vendor/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
  169. data/vendor/Eigen/src/Core/functors/UnaryFunctors.h +1131 -0
  170. data/vendor/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2645 -0
  171. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix.h +517 -0
  172. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +317 -0
  173. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
  174. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +124 -0
  175. data/vendor/Eigen/src/Core/products/GeneralMatrixVector.h +518 -0
  176. data/vendor/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
  177. data/vendor/Eigen/src/Core/products/Parallelizer.h +180 -0
  178. data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +544 -0
  179. data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +295 -0
  180. data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector.h +262 -0
  181. data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
  182. data/vendor/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
  183. data/vendor/Eigen/src/Core/products/SelfadjointRank2Update.h +94 -0
  184. data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix.h +472 -0
  185. data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +317 -0
  186. data/vendor/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
  187. data/vendor/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
  188. data/vendor/Eigen/src/Core/products/TriangularSolverMatrix.h +337 -0
  189. data/vendor/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +167 -0
  190. data/vendor/Eigen/src/Core/products/TriangularSolverVector.h +148 -0
  191. data/vendor/Eigen/src/Core/util/BlasUtil.h +583 -0
  192. data/vendor/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
  193. data/vendor/Eigen/src/Core/util/Constants.h +563 -0
  194. data/vendor/Eigen/src/Core/util/DisableStupidWarnings.h +106 -0
  195. data/vendor/Eigen/src/Core/util/ForwardDeclarations.h +322 -0
  196. data/vendor/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
  197. data/vendor/Eigen/src/Core/util/IntegralConstant.h +272 -0
  198. data/vendor/Eigen/src/Core/util/MKL_support.h +137 -0
  199. data/vendor/Eigen/src/Core/util/Macros.h +1464 -0
  200. data/vendor/Eigen/src/Core/util/Memory.h +1163 -0
  201. data/vendor/Eigen/src/Core/util/Meta.h +812 -0
  202. data/vendor/Eigen/src/Core/util/NonMPL2.h +3 -0
  203. data/vendor/Eigen/src/Core/util/ReenableStupidWarnings.h +31 -0
  204. data/vendor/Eigen/src/Core/util/ReshapedHelper.h +51 -0
  205. data/vendor/Eigen/src/Core/util/StaticAssert.h +221 -0
  206. data/vendor/Eigen/src/Core/util/SymbolicIndex.h +293 -0
  207. data/vendor/Eigen/src/Core/util/XprHelper.h +856 -0
  208. data/vendor/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
  209. data/vendor/Eigen/src/Eigenvalues/ComplexSchur.h +462 -0
  210. data/vendor/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
  211. data/vendor/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
  212. data/vendor/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
  213. data/vendor/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
  214. data/vendor/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
  215. data/vendor/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
  216. data/vendor/Eigen/src/Eigenvalues/RealQZ.h +657 -0
  217. data/vendor/Eigen/src/Eigenvalues/RealSchur.h +558 -0
  218. data/vendor/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
  219. data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +904 -0
  220. data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
  221. data/vendor/Eigen/src/Eigenvalues/Tridiagonalization.h +561 -0
  222. data/vendor/Eigen/src/Geometry/AlignedBox.h +486 -0
  223. data/vendor/Eigen/src/Geometry/AngleAxis.h +247 -0
  224. data/vendor/Eigen/src/Geometry/EulerAngles.h +114 -0
  225. data/vendor/Eigen/src/Geometry/Homogeneous.h +501 -0
  226. data/vendor/Eigen/src/Geometry/Hyperplane.h +282 -0
  227. data/vendor/Eigen/src/Geometry/OrthoMethods.h +235 -0
  228. data/vendor/Eigen/src/Geometry/ParametrizedLine.h +232 -0
  229. data/vendor/Eigen/src/Geometry/Quaternion.h +870 -0
  230. data/vendor/Eigen/src/Geometry/Rotation2D.h +199 -0
  231. data/vendor/Eigen/src/Geometry/RotationBase.h +206 -0
  232. data/vendor/Eigen/src/Geometry/Scaling.h +188 -0
  233. data/vendor/Eigen/src/Geometry/Transform.h +1563 -0
  234. data/vendor/Eigen/src/Geometry/Translation.h +202 -0
  235. data/vendor/Eigen/src/Geometry/Umeyama.h +166 -0
  236. data/vendor/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
  237. data/vendor/Eigen/src/Householder/BlockHouseholder.h +110 -0
  238. data/vendor/Eigen/src/Householder/Householder.h +176 -0
  239. data/vendor/Eigen/src/Householder/HouseholderSequence.h +545 -0
  240. data/vendor/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
  241. data/vendor/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +212 -0
  242. data/vendor/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +229 -0
  243. data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +394 -0
  244. data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +453 -0
  245. data/vendor/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +444 -0
  246. data/vendor/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +198 -0
  247. data/vendor/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +117 -0
  248. data/vendor/Eigen/src/Jacobi/Jacobi.h +483 -0
  249. data/vendor/Eigen/src/KLUSupport/KLUSupport.h +358 -0
  250. data/vendor/Eigen/src/LU/Determinant.h +117 -0
  251. data/vendor/Eigen/src/LU/FullPivLU.h +877 -0
  252. data/vendor/Eigen/src/LU/InverseImpl.h +432 -0
  253. data/vendor/Eigen/src/LU/PartialPivLU.h +624 -0
  254. data/vendor/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
  255. data/vendor/Eigen/src/LU/arch/InverseSize4.h +351 -0
  256. data/vendor/Eigen/src/MetisSupport/MetisSupport.h +137 -0
  257. data/vendor/Eigen/src/OrderingMethods/Amd.h +435 -0
  258. data/vendor/Eigen/src/OrderingMethods/Eigen_Colamd.h +1863 -0
  259. data/vendor/Eigen/src/OrderingMethods/Ordering.h +153 -0
  260. data/vendor/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
  261. data/vendor/Eigen/src/PardisoSupport/PardisoSupport.h +545 -0
  262. data/vendor/Eigen/src/QR/ColPivHouseholderQR.h +674 -0
  263. data/vendor/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
  264. data/vendor/Eigen/src/QR/CompleteOrthogonalDecomposition.h +635 -0
  265. data/vendor/Eigen/src/QR/FullPivHouseholderQR.h +713 -0
  266. data/vendor/Eigen/src/QR/HouseholderQR.h +434 -0
  267. data/vendor/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
  268. data/vendor/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +335 -0
  269. data/vendor/Eigen/src/SVD/BDCSVD.h +1366 -0
  270. data/vendor/Eigen/src/SVD/JacobiSVD.h +812 -0
  271. data/vendor/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
  272. data/vendor/Eigen/src/SVD/SVDBase.h +376 -0
  273. data/vendor/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
  274. data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky.h +697 -0
  275. data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +174 -0
  276. data/vendor/Eigen/src/SparseCore/AmbiVector.h +378 -0
  277. data/vendor/Eigen/src/SparseCore/CompressedStorage.h +274 -0
  278. data/vendor/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
  279. data/vendor/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
  280. data/vendor/Eigen/src/SparseCore/SparseAssign.h +270 -0
  281. data/vendor/Eigen/src/SparseCore/SparseBlock.h +571 -0
  282. data/vendor/Eigen/src/SparseCore/SparseColEtree.h +206 -0
  283. data/vendor/Eigen/src/SparseCore/SparseCompressedBase.h +370 -0
  284. data/vendor/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +722 -0
  285. data/vendor/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +150 -0
  286. data/vendor/Eigen/src/SparseCore/SparseDenseProduct.h +342 -0
  287. data/vendor/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
  288. data/vendor/Eigen/src/SparseCore/SparseDot.h +98 -0
  289. data/vendor/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
  290. data/vendor/Eigen/src/SparseCore/SparseMap.h +305 -0
  291. data/vendor/Eigen/src/SparseCore/SparseMatrix.h +1518 -0
  292. data/vendor/Eigen/src/SparseCore/SparseMatrixBase.h +398 -0
  293. data/vendor/Eigen/src/SparseCore/SparsePermutation.h +178 -0
  294. data/vendor/Eigen/src/SparseCore/SparseProduct.h +181 -0
  295. data/vendor/Eigen/src/SparseCore/SparseRedux.h +49 -0
  296. data/vendor/Eigen/src/SparseCore/SparseRef.h +397 -0
  297. data/vendor/Eigen/src/SparseCore/SparseSelfAdjointView.h +659 -0
  298. data/vendor/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
  299. data/vendor/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
  300. data/vendor/Eigen/src/SparseCore/SparseTranspose.h +92 -0
  301. data/vendor/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
  302. data/vendor/Eigen/src/SparseCore/SparseUtil.h +186 -0
  303. data/vendor/Eigen/src/SparseCore/SparseVector.h +478 -0
  304. data/vendor/Eigen/src/SparseCore/SparseView.h +254 -0
  305. data/vendor/Eigen/src/SparseCore/TriangularSolver.h +315 -0
  306. data/vendor/Eigen/src/SparseLU/SparseLU.h +923 -0
  307. data/vendor/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
  308. data/vendor/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
  309. data/vendor/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
  310. data/vendor/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +375 -0
  311. data/vendor/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
  312. data/vendor/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
  313. data/vendor/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
  314. data/vendor/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
  315. data/vendor/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
  316. data/vendor/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
  317. data/vendor/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
  318. data/vendor/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
  319. data/vendor/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
  320. data/vendor/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
  321. data/vendor/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
  322. data/vendor/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
  323. data/vendor/Eigen/src/SparseQR/SparseQR.h +758 -0
  324. data/vendor/Eigen/src/StlSupport/StdDeque.h +116 -0
  325. data/vendor/Eigen/src/StlSupport/StdList.h +106 -0
  326. data/vendor/Eigen/src/StlSupport/StdVector.h +131 -0
  327. data/vendor/Eigen/src/StlSupport/details.h +84 -0
  328. data/vendor/Eigen/src/SuperLUSupport/SuperLUSupport.h +1025 -0
  329. data/vendor/Eigen/src/UmfPackSupport/UmfPackSupport.h +642 -0
  330. data/vendor/Eigen/src/misc/Image.h +82 -0
  331. data/vendor/Eigen/src/misc/Kernel.h +79 -0
  332. data/vendor/Eigen/src/misc/RealSvd2x2.h +55 -0
  333. data/vendor/Eigen/src/misc/blas.h +440 -0
  334. data/vendor/Eigen/src/misc/lapack.h +152 -0
  335. data/vendor/Eigen/src/misc/lapacke.h +16292 -0
  336. data/vendor/Eigen/src/misc/lapacke_mangling.h +17 -0
  337. data/vendor/Eigen/src/plugins/ArrayCwiseBinaryOps.h +358 -0
  338. data/vendor/Eigen/src/plugins/ArrayCwiseUnaryOps.h +696 -0
  339. data/vendor/Eigen/src/plugins/BlockMethods.h +1442 -0
  340. data/vendor/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
  341. data/vendor/Eigen/src/plugins/CommonCwiseUnaryOps.h +177 -0
  342. data/vendor/Eigen/src/plugins/IndexedViewMethods.h +262 -0
  343. data/vendor/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
  344. data/vendor/Eigen/src/plugins/MatrixCwiseUnaryOps.h +95 -0
  345. data/vendor/Eigen/src/plugins/ReshapedMethods.h +149 -0
  346. data/vendor/aarand/aarand.hpp +114 -0
  347. data/vendor/annoy/annoylib.h +1495 -0
  348. data/vendor/annoy/kissrandom.h +120 -0
  349. data/vendor/annoy/mman.h +242 -0
  350. data/vendor/hnswlib/bruteforce.h +152 -0
  351. data/vendor/hnswlib/hnswalg.h +1192 -0
  352. data/vendor/hnswlib/hnswlib.h +108 -0
  353. data/vendor/hnswlib/space_ip.h +282 -0
  354. data/vendor/hnswlib/space_l2.h +281 -0
  355. data/vendor/hnswlib/visited_list_pool.h +79 -0
  356. data/vendor/irlba/irlba.hpp +575 -0
  357. data/vendor/irlba/lanczos.hpp +212 -0
  358. data/vendor/irlba/parallel.hpp +474 -0
  359. data/vendor/irlba/utils.hpp +224 -0
  360. data/vendor/irlba/wrappers.hpp +228 -0
  361. data/vendor/kmeans/Base.hpp +75 -0
  362. data/vendor/kmeans/Details.hpp +79 -0
  363. data/vendor/kmeans/HartiganWong.hpp +492 -0
  364. data/vendor/kmeans/InitializeKmeansPP.hpp +144 -0
  365. data/vendor/kmeans/InitializeNone.hpp +44 -0
  366. data/vendor/kmeans/InitializePCAPartition.hpp +309 -0
  367. data/vendor/kmeans/InitializeRandom.hpp +91 -0
  368. data/vendor/kmeans/Kmeans.hpp +161 -0
  369. data/vendor/kmeans/Lloyd.hpp +134 -0
  370. data/vendor/kmeans/MiniBatch.hpp +269 -0
  371. data/vendor/kmeans/QuickSearch.hpp +179 -0
  372. data/vendor/kmeans/compute_centroids.hpp +32 -0
  373. data/vendor/kmeans/compute_wcss.hpp +27 -0
  374. data/vendor/kmeans/is_edge_case.hpp +42 -0
  375. data/vendor/kmeans/random.hpp +55 -0
  376. data/vendor/knncolle/Annoy/Annoy.hpp +193 -0
  377. data/vendor/knncolle/BruteForce/BruteForce.hpp +120 -0
  378. data/vendor/knncolle/Hnsw/Hnsw.hpp +225 -0
  379. data/vendor/knncolle/Kmknn/Kmknn.hpp +286 -0
  380. data/vendor/knncolle/VpTree/VpTree.hpp +256 -0
  381. data/vendor/knncolle/knncolle.hpp +34 -0
  382. data/vendor/knncolle/utils/Base.hpp +100 -0
  383. data/vendor/knncolle/utils/NeighborQueue.hpp +94 -0
  384. data/vendor/knncolle/utils/distances.hpp +98 -0
  385. data/vendor/knncolle/utils/find_nearest_neighbors.hpp +112 -0
  386. data/vendor/powerit/PowerIterations.hpp +157 -0
  387. data/vendor/umappp/NeighborList.hpp +37 -0
  388. data/vendor/umappp/Umap.hpp +662 -0
  389. data/vendor/umappp/combine_neighbor_sets.hpp +95 -0
  390. data/vendor/umappp/find_ab.hpp +157 -0
  391. data/vendor/umappp/neighbor_similarities.hpp +136 -0
  392. data/vendor/umappp/optimize_layout.hpp +285 -0
  393. data/vendor/umappp/spectral_init.hpp +181 -0
  394. data/vendor/umappp/umappp.hpp +13 -0
  395. metadata +465 -0
@@ -0,0 +1,1518 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_SPARSEMATRIX_H
11
+ #define EIGEN_SPARSEMATRIX_H
12
+
13
+ namespace Eigen {
14
+
15
+ /** \ingroup SparseCore_Module
16
+ *
17
+ * \class SparseMatrix
18
+ *
19
+ * \brief A versatible sparse matrix representation
20
+ *
21
+ * This class implements a more versatile variants of the common \em compressed row/column storage format.
22
+ * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
23
+ * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
24
+ * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
25
+ * can be done with limited memory reallocation and copies.
26
+ *
27
+ * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
28
+ * compatible with many library.
29
+ *
30
+ * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
31
+ *
32
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
33
+ * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
34
+ * is ColMajor or RowMajor. The default is 0 which means column-major.
35
+ * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
36
+ *
37
+ * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
38
+ * whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
39
+ * Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
40
+ *
41
+ * This class can be extended with the help of the plugin mechanism described on the page
42
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
43
+ */
44
+
45
+ namespace internal {
46
+ template<typename _Scalar, int _Options, typename _StorageIndex>
47
+ struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48
+ {
49
+ typedef _Scalar Scalar;
50
+ typedef _StorageIndex StorageIndex;
51
+ typedef Sparse StorageKind;
52
+ typedef MatrixXpr XprKind;
53
+ enum {
54
+ RowsAtCompileTime = Dynamic,
55
+ ColsAtCompileTime = Dynamic,
56
+ MaxRowsAtCompileTime = Dynamic,
57
+ MaxColsAtCompileTime = Dynamic,
58
+ Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59
+ SupportedAccessPatterns = InnerRandomAccessPattern
60
+ };
61
+ };
62
+
63
+ template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64
+ struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65
+ {
66
+ typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
67
+ typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68
+ typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
69
+
70
+ typedef _Scalar Scalar;
71
+ typedef Dense StorageKind;
72
+ typedef _StorageIndex StorageIndex;
73
+ typedef MatrixXpr XprKind;
74
+
75
+ enum {
76
+ RowsAtCompileTime = Dynamic,
77
+ ColsAtCompileTime = 1,
78
+ MaxRowsAtCompileTime = Dynamic,
79
+ MaxColsAtCompileTime = 1,
80
+ Flags = LvalueBit
81
+ };
82
+ };
83
+
84
+ template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85
+ struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86
+ : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87
+ {
88
+ enum {
89
+ Flags = 0
90
+ };
91
+ };
92
+
93
+ } // end namespace internal
94
+
95
+ template<typename _Scalar, int _Options, typename _StorageIndex>
96
+ class SparseMatrix
97
+ : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98
+ {
99
+ typedef SparseCompressedBase<SparseMatrix> Base;
100
+ using Base::convert_index;
101
+ friend class SparseVector<_Scalar,0,_StorageIndex>;
102
+ template<typename, typename, typename, typename, typename>
103
+ friend struct internal::Assignment;
104
+ public:
105
+ using Base::isCompressed;
106
+ using Base::nonZeros;
107
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
108
+ using Base::operator+=;
109
+ using Base::operator-=;
110
+
111
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
112
+ typedef Diagonal<SparseMatrix> DiagonalReturnType;
113
+ typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
114
+ typedef typename Base::InnerIterator InnerIterator;
115
+ typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
116
+
117
+
118
+ using Base::IsRowMajor;
119
+ typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
120
+ enum {
121
+ Options = _Options
122
+ };
123
+
124
+ typedef typename Base::IndexVector IndexVector;
125
+ typedef typename Base::ScalarVector ScalarVector;
126
+ protected:
127
+ typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
128
+
129
+ Index m_outerSize;
130
+ Index m_innerSize;
131
+ StorageIndex* m_outerIndex;
132
+ StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
133
+ Storage m_data;
134
+
135
+ public:
136
+
137
+ /** \returns the number of rows of the matrix */
138
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
139
+ /** \returns the number of columns of the matrix */
140
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141
+
142
+ /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
143
+ inline Index innerSize() const { return m_innerSize; }
144
+ /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
145
+ inline Index outerSize() const { return m_outerSize; }
146
+
147
+ /** \returns a const pointer to the array of values.
148
+ * This function is aimed at interoperability with other libraries.
149
+ * \sa innerIndexPtr(), outerIndexPtr() */
150
+ inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
151
+ /** \returns a non-const pointer to the array of values.
152
+ * This function is aimed at interoperability with other libraries.
153
+ * \sa innerIndexPtr(), outerIndexPtr() */
154
+ inline Scalar* valuePtr() { return m_data.valuePtr(); }
155
+
156
+ /** \returns a const pointer to the array of inner indices.
157
+ * This function is aimed at interoperability with other libraries.
158
+ * \sa valuePtr(), outerIndexPtr() */
159
+ inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
160
+ /** \returns a non-const pointer to the array of inner indices.
161
+ * This function is aimed at interoperability with other libraries.
162
+ * \sa valuePtr(), outerIndexPtr() */
163
+ inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
164
+
165
+ /** \returns a const pointer to the array of the starting positions of the inner vectors.
166
+ * This function is aimed at interoperability with other libraries.
167
+ * \sa valuePtr(), innerIndexPtr() */
168
+ inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
169
+ /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
170
+ * This function is aimed at interoperability with other libraries.
171
+ * \sa valuePtr(), innerIndexPtr() */
172
+ inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
173
+
174
+ /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
175
+ * This function is aimed at interoperability with other libraries.
176
+ * \warning it returns the null pointer 0 in compressed mode */
177
+ inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
178
+ /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
179
+ * This function is aimed at interoperability with other libraries.
180
+ * \warning it returns the null pointer 0 in compressed mode */
181
+ inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
182
+
183
+ /** \internal */
184
+ inline Storage& data() { return m_data; }
185
+ /** \internal */
186
+ inline const Storage& data() const { return m_data; }
187
+
188
+ /** \returns the value of the matrix at position \a i, \a j
189
+ * This function returns Scalar(0) if the element is an explicit \em zero */
190
+ inline Scalar coeff(Index row, Index col) const
191
+ {
192
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193
+
194
+ const Index outer = IsRowMajor ? row : col;
195
+ const Index inner = IsRowMajor ? col : row;
196
+ Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197
+ return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198
+ }
199
+
200
+ /** \returns a non-const reference to the value of the matrix at position \a i, \a j
201
+ *
202
+ * If the element does not exist then it is inserted via the insert(Index,Index) function
203
+ * which itself turns the matrix into a non compressed form if that was not the case.
204
+ *
205
+ * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
206
+ * function if the element does not already exist.
207
+ */
208
+ inline Scalar& coeffRef(Index row, Index col)
209
+ {
210
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211
+
212
+ const Index outer = IsRowMajor ? row : col;
213
+ const Index inner = IsRowMajor ? col : row;
214
+
215
+ Index start = m_outerIndex[outer];
216
+ Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218
+ if(end<=start)
219
+ return insert(row,col);
220
+ const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221
+ if((p<end) && (m_data.index(p)==inner))
222
+ return m_data.value(p);
223
+ else
224
+ return insert(row,col);
225
+ }
226
+
227
+ /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
228
+ * The non zero coefficient must \b not already exist.
229
+ *
230
+ * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
231
+ * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
232
+ * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
233
+ * inserted by increasing outer-indices.
234
+ *
235
+ * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
236
+ * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
237
+ *
238
+ * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
239
+ * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
240
+ *
241
+ */
242
+ Scalar& insert(Index row, Index col);
243
+
244
+ public:
245
+
246
+ /** Removes all non zeros but keep allocated memory
247
+ *
248
+ * This function does not free the currently allocated memory. To release as much as memory as possible,
249
+ * call \code mat.data().squeeze(); \endcode after resizing it.
250
+ *
251
+ * \sa resize(Index,Index), data()
252
+ */
253
+ inline void setZero()
254
+ {
255
+ m_data.clear();
256
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
257
+ if(m_innerNonZeros)
258
+ memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259
+ }
260
+
261
+ /** Preallocates \a reserveSize non zeros.
262
+ *
263
+ * Precondition: the matrix must be in compressed mode. */
264
+ inline void reserve(Index reserveSize)
265
+ {
266
+ eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267
+ m_data.reserve(reserveSize);
268
+ }
269
+
270
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
271
+ /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
272
+ *
273
+ * This function turns the matrix in non-compressed mode.
274
+ *
275
+ * The type \c SizesType must expose the following interface:
276
+ \code
277
+ typedef value_type;
278
+ const value_type& operator[](i) const;
279
+ \endcode
280
+ * for \c i in the [0,this->outerSize()[ range.
281
+ * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
282
+ */
283
+ template<class SizesType>
284
+ inline void reserve(const SizesType& reserveSizes);
285
+ #else
286
+ template<class SizesType>
287
+ inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288
+ #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289
+ typename
290
+ #endif
291
+ SizesType::value_type())
292
+ {
293
+ EIGEN_UNUSED_VARIABLE(enableif);
294
+ reserveInnerVectors(reserveSizes);
295
+ }
296
+ #endif // EIGEN_PARSED_BY_DOXYGEN
297
+ protected:
298
+ template<class SizesType>
299
+ inline void reserveInnerVectors(const SizesType& reserveSizes)
300
+ {
301
+ if(isCompressed())
302
+ {
303
+ Index totalReserveSize = 0;
304
+ // turn the matrix into non-compressed mode
305
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
306
+ if (!m_innerNonZeros) internal::throw_std_bad_alloc();
307
+
308
+ // temporarily use m_innerSizes to hold the new starting points.
309
+ StorageIndex* newOuterIndex = m_innerNonZeros;
310
+
311
+ StorageIndex count = 0;
312
+ for(Index j=0; j<m_outerSize; ++j)
313
+ {
314
+ newOuterIndex[j] = count;
315
+ count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316
+ totalReserveSize += reserveSizes[j];
317
+ }
318
+ m_data.reserve(totalReserveSize);
319
+ StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320
+ for(Index j=m_outerSize-1; j>=0; --j)
321
+ {
322
+ StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323
+ for(Index i=innerNNZ-1; i>=0; --i)
324
+ {
325
+ m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326
+ m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327
+ }
328
+ previousOuterIndex = m_outerIndex[j];
329
+ m_outerIndex[j] = newOuterIndex[j];
330
+ m_innerNonZeros[j] = innerNNZ;
331
+ }
332
+ if(m_outerSize>0)
333
+ m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
334
+
335
+ m_data.resize(m_outerIndex[m_outerSize]);
336
+ }
337
+ else
338
+ {
339
+ StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340
+ if (!newOuterIndex) internal::throw_std_bad_alloc();
341
+
342
+ StorageIndex count = 0;
343
+ for(Index j=0; j<m_outerSize; ++j)
344
+ {
345
+ newOuterIndex[j] = count;
346
+ StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347
+ StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348
+ count += toReserve + m_innerNonZeros[j];
349
+ }
350
+ newOuterIndex[m_outerSize] = count;
351
+
352
+ m_data.resize(count);
353
+ for(Index j=m_outerSize-1; j>=0; --j)
354
+ {
355
+ Index offset = newOuterIndex[j] - m_outerIndex[j];
356
+ if(offset>0)
357
+ {
358
+ StorageIndex innerNNZ = m_innerNonZeros[j];
359
+ for(Index i=innerNNZ-1; i>=0; --i)
360
+ {
361
+ m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362
+ m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363
+ }
364
+ }
365
+ }
366
+
367
+ std::swap(m_outerIndex, newOuterIndex);
368
+ std::free(newOuterIndex);
369
+ }
370
+
371
+ }
372
+ public:
373
+
374
+ //--- low level purely coherent filling ---
375
+
376
+ /** \internal
377
+ * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
378
+ * - the nonzero does not already exist
379
+ * - the new coefficient is the last one according to the storage order
380
+ *
381
+ * Before filling a given inner vector you must call the statVec(Index) function.
382
+ *
383
+ * After an insertion session, you should call the finalize() function.
384
+ *
385
+ * \sa insert, insertBackByOuterInner, startVec */
386
+ inline Scalar& insertBack(Index row, Index col)
387
+ {
388
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
389
+ }
390
+
391
+ /** \internal
392
+ * \sa insertBack, startVec */
393
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
394
+ {
395
+ eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396
+ eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397
+ Index p = m_outerIndex[outer+1];
398
+ ++m_outerIndex[outer+1];
399
+ m_data.append(Scalar(0), inner);
400
+ return m_data.value(p);
401
+ }
402
+
403
+ /** \internal
404
+ * \warning use it only if you know what you are doing */
405
+ inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
406
+ {
407
+ Index p = m_outerIndex[outer+1];
408
+ ++m_outerIndex[outer+1];
409
+ m_data.append(Scalar(0), inner);
410
+ return m_data.value(p);
411
+ }
412
+
413
+ /** \internal
414
+ * \sa insertBack, insertBackByOuterInner */
415
+ inline void startVec(Index outer)
416
+ {
417
+ eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418
+ eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419
+ m_outerIndex[outer+1] = m_outerIndex[outer];
420
+ }
421
+
422
+ /** \internal
423
+ * Must be called after inserting a set of non zero entries using the low level compressed API.
424
+ */
425
+ inline void finalize()
426
+ {
427
+ if(isCompressed())
428
+ {
429
+ StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430
+ Index i = m_outerSize;
431
+ // find the last filled column
432
+ while (i>=0 && m_outerIndex[i]==0)
433
+ --i;
434
+ ++i;
435
+ while (i<=m_outerSize)
436
+ {
437
+ m_outerIndex[i] = size;
438
+ ++i;
439
+ }
440
+ }
441
+ }
442
+
443
+ //---
444
+
445
+ template<typename InputIterators>
446
+ void setFromTriplets(const InputIterators& begin, const InputIterators& end);
447
+
448
+ template<typename InputIterators,typename DupFunctor>
449
+ void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
450
+
451
+ void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
452
+
453
+ template<typename DupFunctor>
454
+ void collapseDuplicates(DupFunctor dup_func = DupFunctor());
455
+
456
+ //---
457
+
458
+ /** \internal
459
+ * same as insert(Index,Index) except that the indices are given relative to the storage order */
460
+ Scalar& insertByOuterInner(Index j, Index i)
461
+ {
462
+ return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
463
+ }
464
+
465
+ /** Turns the matrix into the \em compressed format.
466
+ */
467
+ void makeCompressed()
468
+ {
469
+ if(isCompressed())
470
+ return;
471
+
472
+ eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
473
+
474
+ Index oldStart = m_outerIndex[1];
475
+ m_outerIndex[1] = m_innerNonZeros[0];
476
+ for(Index j=1; j<m_outerSize; ++j)
477
+ {
478
+ Index nextOldStart = m_outerIndex[j+1];
479
+ Index offset = oldStart - m_outerIndex[j];
480
+ if(offset>0)
481
+ {
482
+ for(Index k=0; k<m_innerNonZeros[j]; ++k)
483
+ {
484
+ m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485
+ m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486
+ }
487
+ }
488
+ m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489
+ oldStart = nextOldStart;
490
+ }
491
+ std::free(m_innerNonZeros);
492
+ m_innerNonZeros = 0;
493
+ m_data.resize(m_outerIndex[m_outerSize]);
494
+ m_data.squeeze();
495
+ }
496
+
497
+ /** Turns the matrix into the uncompressed mode */
498
+ void uncompress()
499
+ {
500
+ if(m_innerNonZeros != 0)
501
+ return;
502
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503
+ for (Index i = 0; i < m_outerSize; i++)
504
+ {
505
+ m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
506
+ }
507
+ }
508
+
509
+ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
510
+ void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
511
+ {
512
+ prune(default_prunning_func(reference,epsilon));
513
+ }
514
+
515
+ /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
516
+ * The functor type \a KeepFunc must implement the following function:
517
+ * \code
518
+ * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
519
+ * \endcode
520
+ * \sa prune(Scalar,RealScalar)
521
+ */
522
+ template<typename KeepFunc>
523
+ void prune(const KeepFunc& keep = KeepFunc())
524
+ {
525
+ // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
526
+ makeCompressed();
527
+
528
+ StorageIndex k = 0;
529
+ for(Index j=0; j<m_outerSize; ++j)
530
+ {
531
+ Index previousStart = m_outerIndex[j];
532
+ m_outerIndex[j] = k;
533
+ Index end = m_outerIndex[j+1];
534
+ for(Index i=previousStart; i<end; ++i)
535
+ {
536
+ if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
537
+ {
538
+ m_data.value(k) = m_data.value(i);
539
+ m_data.index(k) = m_data.index(i);
540
+ ++k;
541
+ }
542
+ }
543
+ }
544
+ m_outerIndex[m_outerSize] = k;
545
+ m_data.resize(k,0);
546
+ }
547
+
548
+ /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
549
+ *
550
+ * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
551
+ * and the storage of the out of bounds coefficients is kept and reserved.
552
+ * Call makeCompressed() to pack the entries and squeeze extra memory.
553
+ *
554
+ * \sa reserve(), setZero(), makeCompressed()
555
+ */
556
+ void conservativeResize(Index rows, Index cols)
557
+ {
558
+ // No change
559
+ if (this->rows() == rows && this->cols() == cols) return;
560
+
561
+ // If one dimension is null, then there is nothing to be preserved
562
+ if(rows==0 || cols==0) return resize(rows,cols);
563
+
564
+ Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565
+ Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566
+ StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567
+
568
+ // Deals with inner non zeros
569
+ if (m_innerNonZeros)
570
+ {
571
+ // Resize m_innerNonZeros
572
+ StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573
+ if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574
+ m_innerNonZeros = newInnerNonZeros;
575
+
576
+ for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577
+ m_innerNonZeros[i] = 0;
578
+ }
579
+ else if (innerChange < 0)
580
+ {
581
+ // Inner size decreased: allocate a new m_innerNonZeros
582
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
583
+ if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584
+ for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
585
+ m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
586
+ for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587
+ m_innerNonZeros[i] = 0;
588
+ }
589
+
590
+ // Change the m_innerNonZeros in case of a decrease of inner size
591
+ if (m_innerNonZeros && innerChange < 0)
592
+ {
593
+ for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
594
+ {
595
+ StorageIndex &n = m_innerNonZeros[i];
596
+ StorageIndex start = m_outerIndex[i];
597
+ while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
598
+ }
599
+ }
600
+
601
+ m_innerSize = newInnerSize;
602
+
603
+ // Re-allocate outer index structure if necessary
604
+ if (outerChange == 0)
605
+ return;
606
+
607
+ StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
608
+ if (!newOuterIndex) internal::throw_std_bad_alloc();
609
+ m_outerIndex = newOuterIndex;
610
+ if (outerChange > 0)
611
+ {
612
+ StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613
+ for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614
+ m_outerIndex[i] = lastIdx;
615
+ }
616
+ m_outerSize += outerChange;
617
+ }
618
+
619
+ /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
620
+ *
621
+ * This function does not free the currently allocated memory. To release as much as memory as possible,
622
+ * call \code mat.data().squeeze(); \endcode after resizing it.
623
+ *
624
+ * \sa reserve(), setZero()
625
+ */
626
+ void resize(Index rows, Index cols)
627
+ {
628
+ const Index outerSize = IsRowMajor ? rows : cols;
629
+ m_innerSize = IsRowMajor ? cols : rows;
630
+ m_data.clear();
631
+ if (m_outerSize != outerSize || m_outerSize==0)
632
+ {
633
+ std::free(m_outerIndex);
634
+ m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
635
+ if (!m_outerIndex) internal::throw_std_bad_alloc();
636
+
637
+ m_outerSize = outerSize;
638
+ }
639
+ if(m_innerNonZeros)
640
+ {
641
+ std::free(m_innerNonZeros);
642
+ m_innerNonZeros = 0;
643
+ }
644
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
645
+ }
646
+
647
+ /** \internal
648
+ * Resize the nonzero vector to \a size */
649
+ void resizeNonZeros(Index size)
650
+ {
651
+ m_data.resize(size);
652
+ }
653
+
654
+ /** \returns a const expression of the diagonal coefficients. */
655
+ const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
656
+
657
+ /** \returns a read-write expression of the diagonal coefficients.
658
+ * \warning If the diagonal entries are written, then all diagonal
659
+ * entries \b must already exist, otherwise an assertion will be raised.
660
+ */
661
+ DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
662
+
663
+ /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
664
+ inline SparseMatrix()
665
+ : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
666
+ {
667
+ check_template_parameters();
668
+ resize(0, 0);
669
+ }
670
+
671
+ /** Constructs a \a rows \c x \a cols empty matrix */
672
+ inline SparseMatrix(Index rows, Index cols)
673
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
674
+ {
675
+ check_template_parameters();
676
+ resize(rows, cols);
677
+ }
678
+
679
+ /** Constructs a sparse matrix from the sparse expression \a other */
680
+ template<typename OtherDerived>
681
+ inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
682
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
683
+ {
684
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
685
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686
+ check_template_parameters();
687
+ const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
688
+ if (needToTranspose)
689
+ *this = other.derived();
690
+ else
691
+ {
692
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
693
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
694
+ #endif
695
+ internal::call_assignment_no_alias(*this, other.derived());
696
+ }
697
+ }
698
+
699
+ /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
700
+ template<typename OtherDerived, unsigned int UpLo>
701
+ inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
702
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
703
+ {
704
+ check_template_parameters();
705
+ Base::operator=(other);
706
+ }
707
+
708
+ /** Copy constructor (it performs a deep copy) */
709
+ inline SparseMatrix(const SparseMatrix& other)
710
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
711
+ {
712
+ check_template_parameters();
713
+ *this = other.derived();
714
+ }
715
+
716
+ /** \brief Copy constructor with in-place evaluation */
717
+ template<typename OtherDerived>
718
+ SparseMatrix(const ReturnByValue<OtherDerived>& other)
719
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
720
+ {
721
+ check_template_parameters();
722
+ initAssignment(other);
723
+ other.evalTo(*this);
724
+ }
725
+
726
+ /** \brief Copy constructor with in-place evaluation */
727
+ template<typename OtherDerived>
728
+ explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
729
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
730
+ {
731
+ check_template_parameters();
732
+ *this = other.derived();
733
+ }
734
+
735
+ /** Swaps the content of two sparse matrices of the same type.
736
+ * This is a fast operation that simply swaps the underlying pointers and parameters. */
737
+ inline void swap(SparseMatrix& other)
738
+ {
739
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
740
+ std::swap(m_outerIndex, other.m_outerIndex);
741
+ std::swap(m_innerSize, other.m_innerSize);
742
+ std::swap(m_outerSize, other.m_outerSize);
743
+ std::swap(m_innerNonZeros, other.m_innerNonZeros);
744
+ m_data.swap(other.m_data);
745
+ }
746
+
747
+ /** Sets *this to the identity matrix.
748
+ * This function also turns the matrix into compressed mode, and drop any reserved memory. */
749
+ inline void setIdentity()
750
+ {
751
+ eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
752
+ this->m_data.resize(rows());
753
+ Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
754
+ Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
755
+ Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
756
+ std::free(m_innerNonZeros);
757
+ m_innerNonZeros = 0;
758
+ }
759
+ inline SparseMatrix& operator=(const SparseMatrix& other)
760
+ {
761
+ if (other.isRValue())
762
+ {
763
+ swap(other.const_cast_derived());
764
+ }
765
+ else if(this!=&other)
766
+ {
767
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
768
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
769
+ #endif
770
+ initAssignment(other);
771
+ if(other.isCompressed())
772
+ {
773
+ internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
774
+ m_data = other.m_data;
775
+ }
776
+ else
777
+ {
778
+ Base::operator=(other);
779
+ }
780
+ }
781
+ return *this;
782
+ }
783
+
784
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
785
+ template<typename OtherDerived>
786
+ inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
787
+ { return Base::operator=(other.derived()); }
788
+
789
+ template<typename Lhs, typename Rhs>
790
+ inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
791
+ #endif // EIGEN_PARSED_BY_DOXYGEN
792
+
793
+ template<typename OtherDerived>
794
+ EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
795
+
796
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
797
+ {
798
+ EIGEN_DBG_SPARSE(
799
+ s << "Nonzero entries:\n";
800
+ if(m.isCompressed())
801
+ {
802
+ for (Index i=0; i<m.nonZeros(); ++i)
803
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
804
+ }
805
+ else
806
+ {
807
+ for (Index i=0; i<m.outerSize(); ++i)
808
+ {
809
+ Index p = m.m_outerIndex[i];
810
+ Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811
+ Index k=p;
812
+ for (; k<pe; ++k) {
813
+ s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
814
+ }
815
+ for (; k<m.m_outerIndex[i+1]; ++k) {
816
+ s << "(_,_) ";
817
+ }
818
+ }
819
+ }
820
+ s << std::endl;
821
+ s << std::endl;
822
+ s << "Outer pointers:\n";
823
+ for (Index i=0; i<m.outerSize(); ++i) {
824
+ s << m.m_outerIndex[i] << " ";
825
+ }
826
+ s << " $" << std::endl;
827
+ if(!m.isCompressed())
828
+ {
829
+ s << "Inner non zeros:\n";
830
+ for (Index i=0; i<m.outerSize(); ++i) {
831
+ s << m.m_innerNonZeros[i] << " ";
832
+ }
833
+ s << " $" << std::endl;
834
+ }
835
+ s << std::endl;
836
+ );
837
+ s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
838
+ return s;
839
+ }
840
+
841
+ /** Destructor */
842
+ inline ~SparseMatrix()
843
+ {
844
+ std::free(m_outerIndex);
845
+ std::free(m_innerNonZeros);
846
+ }
847
+
848
+ /** Overloaded for performance */
849
+ Scalar sum() const;
850
+
851
+ # ifdef EIGEN_SPARSEMATRIX_PLUGIN
852
+ # include EIGEN_SPARSEMATRIX_PLUGIN
853
+ # endif
854
+
855
+ protected:
856
+
857
+ template<typename Other>
858
+ void initAssignment(const Other& other)
859
+ {
860
+ resize(other.rows(), other.cols());
861
+ if(m_innerNonZeros)
862
+ {
863
+ std::free(m_innerNonZeros);
864
+ m_innerNonZeros = 0;
865
+ }
866
+ }
867
+
868
+ /** \internal
869
+ * \sa insert(Index,Index) */
870
+ EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
871
+
872
+ /** \internal
873
+ * A vector object that is equal to 0 everywhere but v at the position i */
874
+ class SingletonVector
875
+ {
876
+ StorageIndex m_index;
877
+ StorageIndex m_value;
878
+ public:
879
+ typedef StorageIndex value_type;
880
+ SingletonVector(Index i, Index v)
881
+ : m_index(convert_index(i)), m_value(convert_index(v))
882
+ {}
883
+
884
+ StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
885
+ };
886
+
887
+ /** \internal
888
+ * \sa insert(Index,Index) */
889
+ EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
890
+
891
+ public:
892
+ /** \internal
893
+ * \sa insert(Index,Index) */
894
+ EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
895
+ {
896
+ const Index outer = IsRowMajor ? row : col;
897
+ const Index inner = IsRowMajor ? col : row;
898
+
899
+ eigen_assert(!isCompressed());
900
+ eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
901
+
902
+ Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903
+ m_data.index(p) = convert_index(inner);
904
+ return (m_data.value(p) = Scalar(0));
905
+ }
906
+ protected:
907
+ struct IndexPosPair {
908
+ IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
909
+ Index i;
910
+ Index p;
911
+ };
912
+
913
+ /** \internal assign \a diagXpr to the diagonal of \c *this
914
+ * There are different strategies:
915
+ * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
916
+ * 2 - otherwise, for each diagonal coeff,
917
+ * 2.a - if it already exists, then we update it,
918
+ * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
919
+ * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
920
+ * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
921
+ *
922
+ * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
923
+ * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
924
+ * then it *might* be better to disable case 2.b since they will have to be copied anyway.
925
+ */
926
+ template<typename DiagXpr, typename Func>
927
+ void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
928
+ {
929
+ Index n = diagXpr.size();
930
+
931
+ const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
932
+ if(overwrite)
933
+ {
934
+ if((this->rows()!=n) || (this->cols()!=n))
935
+ this->resize(n, n);
936
+ }
937
+
938
+ if(m_data.size()==0 || overwrite)
939
+ {
940
+ typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941
+ this->makeCompressed();
942
+ this->resizeNonZeros(n);
943
+ Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
944
+ Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
945
+ Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
946
+ values.setZero();
947
+ internal::call_assignment_no_alias(values, diagXpr, assignFunc);
948
+ }
949
+ else
950
+ {
951
+ bool isComp = isCompressed();
952
+ internal::evaluator<DiagXpr> diaEval(diagXpr);
953
+ std::vector<IndexPosPair> newEntries;
954
+
955
+ // 1 - try in-place update and record insertion failures
956
+ for(Index i = 0; i<n; ++i)
957
+ {
958
+ internal::LowerBoundIndex lb = this->lower_bound(i,i);
959
+ Index p = lb.value;
960
+ if(lb.found)
961
+ {
962
+ // the coeff already exists
963
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
964
+ }
965
+ else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966
+ {
967
+ // non compressed mode with local room for inserting one element
968
+ m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969
+ m_innerNonZeros[i]++;
970
+ m_data.value(p) = Scalar(0);
971
+ m_data.index(p) = StorageIndex(i);
972
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
973
+ }
974
+ else
975
+ {
976
+ // defer insertion
977
+ newEntries.push_back(IndexPosPair(i,p));
978
+ }
979
+ }
980
+ // 2 - insert deferred entries
981
+ Index n_entries = Index(newEntries.size());
982
+ if(n_entries>0)
983
+ {
984
+ Storage newData(m_data.size()+n_entries);
985
+ Index prev_p = 0;
986
+ Index prev_i = 0;
987
+ for(Index k=0; k<n_entries;++k)
988
+ {
989
+ Index i = newEntries[k].i;
990
+ Index p = newEntries[k].p;
991
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993
+ for(Index j=prev_i;j<i;++j)
994
+ m_outerIndex[j+1] += k;
995
+ if(!isComp)
996
+ m_innerNonZeros[i]++;
997
+ prev_p = p;
998
+ prev_i = i;
999
+ newData.value(p+k) = Scalar(0);
1000
+ newData.index(p+k) = StorageIndex(i);
1001
+ assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002
+ }
1003
+ {
1004
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006
+ for(Index j=prev_i+1;j<=m_outerSize;++j)
1007
+ m_outerIndex[j] += n_entries;
1008
+ }
1009
+ m_data.swap(newData);
1010
+ }
1011
+ }
1012
+ }
1013
+
1014
+ private:
1015
+ static void check_template_parameters()
1016
+ {
1017
+ EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018
+ EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019
+ }
1020
+
1021
+ struct default_prunning_func {
1022
+ default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023
+ inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024
+ {
1025
+ return !internal::isMuchSmallerThan(value, reference, epsilon);
1026
+ }
1027
+ Scalar reference;
1028
+ RealScalar epsilon;
1029
+ };
1030
+ };
1031
+
1032
+ namespace internal {
1033
+
1034
+ template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035
+ void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036
+ {
1037
+ enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038
+ typedef typename SparseMatrixType::Scalar Scalar;
1039
+ typedef typename SparseMatrixType::StorageIndex StorageIndex;
1040
+ SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1041
+
1042
+ if(begin!=end)
1043
+ {
1044
+ // pass 1: count the nnz per inner-vector
1045
+ typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046
+ wi.setZero();
1047
+ for(InputIterator it(begin); it!=end; ++it)
1048
+ {
1049
+ eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050
+ wi(IsRowMajor ? it->col() : it->row())++;
1051
+ }
1052
+
1053
+ // pass 2: insert all the elements into trMat
1054
+ trMat.reserve(wi);
1055
+ for(InputIterator it(begin); it!=end; ++it)
1056
+ trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057
+
1058
+ // pass 3:
1059
+ trMat.collapseDuplicates(dup_func);
1060
+ }
1061
+
1062
+ // pass 4: transposed copy -> implicit sorting
1063
+ mat = trMat;
1064
+ }
1065
+
1066
+ }
1067
+
1068
+
1069
+ /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
1070
+ *
1071
+ * A \em triplet is a tuple (i,j,value) defining a non-zero element.
1072
+ * The input list of triplets does not have to be sorted, and can contains duplicated elements.
1073
+ * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
1074
+ * This is a \em O(n) operation, with \em n the number of triplet elements.
1075
+ * The initial contents of \c *this is destroyed.
1076
+ * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
1077
+ * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
1078
+ *
1079
+ * The \a InputIterators value_type must provide the following interface:
1080
+ * \code
1081
+ * Scalar value() const; // the value
1082
+ * Scalar row() const; // the row index i
1083
+ * Scalar col() const; // the column index j
1084
+ * \endcode
1085
+ * See for instance the Eigen::Triplet template class.
1086
+ *
1087
+ * Here is a typical usage example:
1088
+ * \code
1089
+ typedef Triplet<double> T;
1090
+ std::vector<T> tripletList;
1091
+ tripletList.reserve(estimation_of_entries);
1092
+ for(...)
1093
+ {
1094
+ // ...
1095
+ tripletList.push_back(T(i,j,v_ij));
1096
+ }
1097
+ SparseMatrixType m(rows,cols);
1098
+ m.setFromTriplets(tripletList.begin(), tripletList.end());
1099
+ // m is ready to go!
1100
+ * \endcode
1101
+ *
1102
+ * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
1103
+ * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
1104
+ * be explicitly stored into a std::vector for instance.
1105
+ */
1106
+ template<typename Scalar, int _Options, typename _StorageIndex>
1107
+ template<typename InputIterators>
1108
+ void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109
+ {
1110
+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1111
+ }
1112
+
1113
+ /** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
1114
+ * \code
1115
+ * value = dup_func(OldValue, NewValue)
1116
+ * \endcode
1117
+ * Here is a C++11 example keeping the latest entry only:
1118
+ * \code
1119
+ * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1120
+ * \endcode
1121
+ */
1122
+ template<typename Scalar, int _Options, typename _StorageIndex>
1123
+ template<typename InputIterators,typename DupFunctor>
1124
+ void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125
+ {
1126
+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1127
+ }
1128
+
1129
+ /** \internal */
1130
+ template<typename Scalar, int _Options, typename _StorageIndex>
1131
+ template<typename DupFunctor>
1132
+ void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
1133
+ {
1134
+ eigen_assert(!isCompressed());
1135
+ // TODO, in practice we should be able to use m_innerNonZeros for that task
1136
+ IndexVector wi(innerSize());
1137
+ wi.fill(-1);
1138
+ StorageIndex count = 0;
1139
+ // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140
+ for(Index j=0; j<outerSize(); ++j)
1141
+ {
1142
+ StorageIndex start = count;
1143
+ Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1144
+ for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145
+ {
1146
+ Index i = m_data.index(k);
1147
+ if(wi(i)>=start)
1148
+ {
1149
+ // we already meet this entry => accumulate it
1150
+ m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151
+ }
1152
+ else
1153
+ {
1154
+ m_data.value(count) = m_data.value(k);
1155
+ m_data.index(count) = m_data.index(k);
1156
+ wi(i) = count;
1157
+ ++count;
1158
+ }
1159
+ }
1160
+ m_outerIndex[j] = start;
1161
+ }
1162
+ m_outerIndex[m_outerSize] = count;
1163
+
1164
+ // turn the matrix into compressed form
1165
+ std::free(m_innerNonZeros);
1166
+ m_innerNonZeros = 0;
1167
+ m_data.resize(m_outerIndex[m_outerSize]);
1168
+ }
1169
+
1170
+ template<typename Scalar, int _Options, typename _StorageIndex>
1171
+ template<typename OtherDerived>
1172
+ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1173
+ {
1174
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1175
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176
+
1177
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1178
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179
+ #endif
1180
+
1181
+ const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182
+ if (needToTranspose)
1183
+ {
1184
+ #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1185
+ EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186
+ #endif
1187
+ // two passes algorithm:
1188
+ // 1 - compute the number of coeffs per dest inner vector
1189
+ // 2 - do the actual copy/eval
1190
+ // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1191
+ typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1192
+ typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193
+ typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194
+ OtherCopy otherCopy(other.derived());
1195
+ OtherCopyEval otherCopyEval(otherCopy);
1196
+
1197
+ SparseMatrix dest(other.rows(),other.cols());
1198
+ Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199
+
1200
+ // pass 1
1201
+ // FIXME the above copy could be merged with that pass
1202
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
1203
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204
+ ++dest.m_outerIndex[it.index()];
1205
+
1206
+ // prefix sum
1207
+ StorageIndex count = 0;
1208
+ IndexVector positions(dest.outerSize());
1209
+ for (Index j=0; j<dest.outerSize(); ++j)
1210
+ {
1211
+ StorageIndex tmp = dest.m_outerIndex[j];
1212
+ dest.m_outerIndex[j] = count;
1213
+ positions[j] = count;
1214
+ count += tmp;
1215
+ }
1216
+ dest.m_outerIndex[dest.outerSize()] = count;
1217
+ // alloc
1218
+ dest.m_data.resize(count);
1219
+ // pass 2
1220
+ for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221
+ {
1222
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223
+ {
1224
+ Index pos = positions[it.index()]++;
1225
+ dest.m_data.index(pos) = j;
1226
+ dest.m_data.value(pos) = it.value();
1227
+ }
1228
+ }
1229
+ this->swap(dest);
1230
+ return *this;
1231
+ }
1232
+ else
1233
+ {
1234
+ if(other.isRValue())
1235
+ {
1236
+ initAssignment(other.derived());
1237
+ }
1238
+ // there is no special optimization
1239
+ return Base::operator=(other.derived());
1240
+ }
1241
+ }
1242
+
1243
+ template<typename _Scalar, int _Options, typename _StorageIndex>
1244
+ typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
1245
+ {
1246
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247
+
1248
+ const Index outer = IsRowMajor ? row : col;
1249
+ const Index inner = IsRowMajor ? col : row;
1250
+
1251
+ if(isCompressed())
1252
+ {
1253
+ if(nonZeros()==0)
1254
+ {
1255
+ // reserve space if not already done
1256
+ if(m_data.allocatedSize()==0)
1257
+ m_data.reserve(2*m_innerSize);
1258
+
1259
+ // turn the matrix into non-compressed mode
1260
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261
+ if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262
+
1263
+ memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264
+
1265
+ // pack all inner-vectors to the end of the pre-allocated space
1266
+ // and allocate the entire free-space to the first inner-vector
1267
+ StorageIndex end = convert_index(m_data.allocatedSize());
1268
+ for(Index j=1; j<=m_outerSize; ++j)
1269
+ m_outerIndex[j] = end;
1270
+ }
1271
+ else
1272
+ {
1273
+ // turn the matrix into non-compressed mode
1274
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275
+ if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276
+ for(Index j=0; j<m_outerSize; ++j)
1277
+ m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278
+ }
1279
+ }
1280
+
1281
+ // check whether we can do a fast "push back" insertion
1282
+ Index data_end = m_data.allocatedSize();
1283
+
1284
+ // First case: we are filling a new inner vector which is packed at the end.
1285
+ // We assume that all remaining inner-vectors are also empty and packed to the end.
1286
+ if(m_outerIndex[outer]==data_end)
1287
+ {
1288
+ eigen_internal_assert(m_innerNonZeros[outer]==0);
1289
+
1290
+ // pack previous empty inner-vectors to end of the used-space
1291
+ // and allocate the entire free-space to the current inner-vector.
1292
+ StorageIndex p = convert_index(m_data.size());
1293
+ Index j = outer;
1294
+ while(j>=0 && m_innerNonZeros[j]==0)
1295
+ m_outerIndex[j--] = p;
1296
+
1297
+ // push back the new element
1298
+ ++m_innerNonZeros[outer];
1299
+ m_data.append(Scalar(0), inner);
1300
+
1301
+ // check for reallocation
1302
+ if(data_end != m_data.allocatedSize())
1303
+ {
1304
+ // m_data has been reallocated
1305
+ // -> move remaining inner-vectors back to the end of the free-space
1306
+ // so that the entire free-space is allocated to the current inner-vector.
1307
+ eigen_internal_assert(data_end < m_data.allocatedSize());
1308
+ StorageIndex new_end = convert_index(m_data.allocatedSize());
1309
+ for(Index k=outer+1; k<=m_outerSize; ++k)
1310
+ if(m_outerIndex[k]==data_end)
1311
+ m_outerIndex[k] = new_end;
1312
+ }
1313
+ return m_data.value(p);
1314
+ }
1315
+
1316
+ // Second case: the next inner-vector is packed to the end
1317
+ // and the current inner-vector end match the used-space.
1318
+ if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319
+ {
1320
+ eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321
+
1322
+ // add space for the new element
1323
+ ++m_innerNonZeros[outer];
1324
+ m_data.resize(m_data.size()+1);
1325
+
1326
+ // check for reallocation
1327
+ if(data_end != m_data.allocatedSize())
1328
+ {
1329
+ // m_data has been reallocated
1330
+ // -> move remaining inner-vectors back to the end of the free-space
1331
+ // so that the entire free-space is allocated to the current inner-vector.
1332
+ eigen_internal_assert(data_end < m_data.allocatedSize());
1333
+ StorageIndex new_end = convert_index(m_data.allocatedSize());
1334
+ for(Index k=outer+1; k<=m_outerSize; ++k)
1335
+ if(m_outerIndex[k]==data_end)
1336
+ m_outerIndex[k] = new_end;
1337
+ }
1338
+
1339
+ // and insert it at the right position (sorted insertion)
1340
+ Index startId = m_outerIndex[outer];
1341
+ Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
1343
+ {
1344
+ m_data.index(p) = m_data.index(p-1);
1345
+ m_data.value(p) = m_data.value(p-1);
1346
+ --p;
1347
+ }
1348
+
1349
+ m_data.index(p) = convert_index(inner);
1350
+ return (m_data.value(p) = Scalar(0));
1351
+ }
1352
+
1353
+ if(m_data.size() != m_data.allocatedSize())
1354
+ {
1355
+ // make sure the matrix is compatible to random un-compressed insertion:
1356
+ m_data.resize(m_data.allocatedSize());
1357
+ this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358
+ }
1359
+
1360
+ return insertUncompressed(row,col);
1361
+ }
1362
+
1363
+ template<typename _Scalar, int _Options, typename _StorageIndex>
1364
+ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
1365
+ {
1366
+ eigen_assert(!isCompressed());
1367
+
1368
+ const Index outer = IsRowMajor ? row : col;
1369
+ const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370
+
1371
+ Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372
+ StorageIndex innerNNZ = m_innerNonZeros[outer];
1373
+ if(innerNNZ>=room)
1374
+ {
1375
+ // this inner vector is full, we need to reallocate the whole buffer :(
1376
+ reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377
+ }
1378
+
1379
+ Index startId = m_outerIndex[outer];
1380
+ Index p = startId + m_innerNonZeros[outer];
1381
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
1382
+ {
1383
+ m_data.index(p) = m_data.index(p-1);
1384
+ m_data.value(p) = m_data.value(p-1);
1385
+ --p;
1386
+ }
1387
+ eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388
+
1389
+ m_innerNonZeros[outer]++;
1390
+
1391
+ m_data.index(p) = inner;
1392
+ return (m_data.value(p) = Scalar(0));
1393
+ }
1394
+
1395
+ template<typename _Scalar, int _Options, typename _StorageIndex>
1396
+ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1397
+ {
1398
+ eigen_assert(isCompressed());
1399
+
1400
+ const Index outer = IsRowMajor ? row : col;
1401
+ const Index inner = IsRowMajor ? col : row;
1402
+
1403
+ Index previousOuter = outer;
1404
+ if (m_outerIndex[outer+1]==0)
1405
+ {
1406
+ // we start a new inner vector
1407
+ while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408
+ {
1409
+ m_outerIndex[previousOuter] = convert_index(m_data.size());
1410
+ --previousOuter;
1411
+ }
1412
+ m_outerIndex[outer+1] = m_outerIndex[outer];
1413
+ }
1414
+
1415
+ // here we have to handle the tricky case where the outerIndex array
1416
+ // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417
+ // the 2nd inner vector...
1418
+ bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419
+ && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420
+
1421
+ std::size_t startId = m_outerIndex[outer];
1422
+ // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423
+ std::size_t p = m_outerIndex[outer+1];
1424
+ ++m_outerIndex[outer+1];
1425
+
1426
+ double reallocRatio = 1;
1427
+ if (m_data.allocatedSize()<=m_data.size())
1428
+ {
1429
+ // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430
+ if (m_data.size()==0)
1431
+ {
1432
+ m_data.reserve(32);
1433
+ }
1434
+ else
1435
+ {
1436
+ // we need to reallocate the data, to reduce multiple reallocations
1437
+ // we use a smart resize algorithm based on the current filling ratio
1438
+ // in addition, we use double to avoid integers overflows
1439
+ double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440
+ reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441
+ // furthermore we bound the realloc ratio to:
1442
+ // 1) reduce multiple minor realloc when the matrix is almost filled
1443
+ // 2) avoid to allocate too much memory when the matrix is almost empty
1444
+ reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445
+ }
1446
+ }
1447
+ m_data.resize(m_data.size()+1,reallocRatio);
1448
+
1449
+ if (!isLastVec)
1450
+ {
1451
+ if (previousOuter==-1)
1452
+ {
1453
+ // oops wrong guess.
1454
+ // let's correct the outer offsets
1455
+ for (Index k=0; k<=(outer+1); ++k)
1456
+ m_outerIndex[k] = 0;
1457
+ Index k=outer+1;
1458
+ while(m_outerIndex[k]==0)
1459
+ m_outerIndex[k++] = 1;
1460
+ while (k<=m_outerSize && m_outerIndex[k]!=0)
1461
+ m_outerIndex[k++]++;
1462
+ p = 0;
1463
+ --k;
1464
+ k = m_outerIndex[k]-1;
1465
+ while (k>0)
1466
+ {
1467
+ m_data.index(k) = m_data.index(k-1);
1468
+ m_data.value(k) = m_data.value(k-1);
1469
+ k--;
1470
+ }
1471
+ }
1472
+ else
1473
+ {
1474
+ // we are not inserting into the last inner vec
1475
+ // update outer indices:
1476
+ Index j = outer+2;
1477
+ while (j<=m_outerSize && m_outerIndex[j]!=0)
1478
+ m_outerIndex[j++]++;
1479
+ --j;
1480
+ // shift data of last vecs:
1481
+ Index k = m_outerIndex[j]-1;
1482
+ while (k>=Index(p))
1483
+ {
1484
+ m_data.index(k) = m_data.index(k-1);
1485
+ m_data.value(k) = m_data.value(k-1);
1486
+ k--;
1487
+ }
1488
+ }
1489
+ }
1490
+
1491
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
1492
+ {
1493
+ m_data.index(p) = m_data.index(p-1);
1494
+ m_data.value(p) = m_data.value(p-1);
1495
+ --p;
1496
+ }
1497
+
1498
+ m_data.index(p) = inner;
1499
+ return (m_data.value(p) = Scalar(0));
1500
+ }
1501
+
1502
+ namespace internal {
1503
+
1504
+ template<typename _Scalar, int _Options, typename _StorageIndex>
1505
+ struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506
+ : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507
+ {
1508
+ typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1509
+ typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1510
+ evaluator() : Base() {}
1511
+ explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512
+ };
1513
+
1514
+ }
1515
+
1516
+ } // end namespace Eigen
1517
+
1518
+ #endif // EIGEN_SPARSEMATRIX_H