@smake/eigen 1.1.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (431) hide show
  1. package/README.md +1 -1
  2. package/eigen/Eigen/AccelerateSupport +52 -0
  3. package/eigen/Eigen/Cholesky +18 -20
  4. package/eigen/Eigen/CholmodSupport +28 -28
  5. package/eigen/Eigen/Core +187 -120
  6. package/eigen/Eigen/Eigenvalues +16 -13
  7. package/eigen/Eigen/Geometry +18 -18
  8. package/eigen/Eigen/Householder +9 -7
  9. package/eigen/Eigen/IterativeLinearSolvers +8 -4
  10. package/eigen/Eigen/Jacobi +14 -13
  11. package/eigen/Eigen/KLUSupport +23 -21
  12. package/eigen/Eigen/LU +15 -16
  13. package/eigen/Eigen/MetisSupport +12 -12
  14. package/eigen/Eigen/OrderingMethods +54 -51
  15. package/eigen/Eigen/PaStiXSupport +23 -21
  16. package/eigen/Eigen/PardisoSupport +17 -14
  17. package/eigen/Eigen/QR +18 -20
  18. package/eigen/Eigen/QtAlignedMalloc +5 -12
  19. package/eigen/Eigen/SPQRSupport +21 -14
  20. package/eigen/Eigen/SVD +23 -17
  21. package/eigen/Eigen/Sparse +1 -2
  22. package/eigen/Eigen/SparseCholesky +18 -15
  23. package/eigen/Eigen/SparseCore +18 -17
  24. package/eigen/Eigen/SparseLU +9 -9
  25. package/eigen/Eigen/SparseQR +16 -14
  26. package/eigen/Eigen/StdDeque +5 -2
  27. package/eigen/Eigen/StdList +5 -2
  28. package/eigen/Eigen/StdVector +5 -2
  29. package/eigen/Eigen/SuperLUSupport +30 -24
  30. package/eigen/Eigen/ThreadPool +80 -0
  31. package/eigen/Eigen/UmfPackSupport +19 -17
  32. package/eigen/Eigen/Version +14 -0
  33. package/eigen/Eigen/src/AccelerateSupport/AccelerateSupport.h +423 -0
  34. package/eigen/Eigen/src/AccelerateSupport/InternalHeaderCheck.h +3 -0
  35. package/eigen/Eigen/src/Cholesky/InternalHeaderCheck.h +3 -0
  36. package/eigen/Eigen/src/Cholesky/LDLT.h +366 -405
  37. package/eigen/Eigen/src/Cholesky/LLT.h +323 -367
  38. package/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h +81 -56
  39. package/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +585 -529
  40. package/eigen/Eigen/src/CholmodSupport/InternalHeaderCheck.h +3 -0
  41. package/eigen/Eigen/src/Core/ArithmeticSequence.h +143 -317
  42. package/eigen/Eigen/src/Core/Array.h +329 -370
  43. package/eigen/Eigen/src/Core/ArrayBase.h +190 -203
  44. package/eigen/Eigen/src/Core/ArrayWrapper.h +126 -170
  45. package/eigen/Eigen/src/Core/Assign.h +30 -40
  46. package/eigen/Eigen/src/Core/AssignEvaluator.h +651 -604
  47. package/eigen/Eigen/src/Core/Assign_MKL.h +125 -120
  48. package/eigen/Eigen/src/Core/BandMatrix.h +267 -282
  49. package/eigen/Eigen/src/Core/Block.h +371 -390
  50. package/eigen/Eigen/src/Core/CommaInitializer.h +85 -100
  51. package/eigen/Eigen/src/Core/ConditionEstimator.h +51 -53
  52. package/eigen/Eigen/src/Core/CoreEvaluators.h +1214 -937
  53. package/eigen/Eigen/src/Core/CoreIterators.h +72 -63
  54. package/eigen/Eigen/src/Core/CwiseBinaryOp.h +112 -129
  55. package/eigen/Eigen/src/Core/CwiseNullaryOp.h +676 -702
  56. package/eigen/Eigen/src/Core/CwiseTernaryOp.h +77 -103
  57. package/eigen/Eigen/src/Core/CwiseUnaryOp.h +55 -67
  58. package/eigen/Eigen/src/Core/CwiseUnaryView.h +127 -92
  59. package/eigen/Eigen/src/Core/DenseBase.h +630 -658
  60. package/eigen/Eigen/src/Core/DenseCoeffsBase.h +511 -628
  61. package/eigen/Eigen/src/Core/DenseStorage.h +511 -590
  62. package/eigen/Eigen/src/Core/DeviceWrapper.h +153 -0
  63. package/eigen/Eigen/src/Core/Diagonal.h +168 -207
  64. package/eigen/Eigen/src/Core/DiagonalMatrix.h +346 -317
  65. package/eigen/Eigen/src/Core/DiagonalProduct.h +12 -10
  66. package/eigen/Eigen/src/Core/Dot.h +167 -217
  67. package/eigen/Eigen/src/Core/EigenBase.h +74 -85
  68. package/eigen/Eigen/src/Core/Fill.h +138 -0
  69. package/eigen/Eigen/src/Core/FindCoeff.h +464 -0
  70. package/eigen/Eigen/src/Core/ForceAlignedAccess.h +90 -113
  71. package/eigen/Eigen/src/Core/Fuzzy.h +82 -105
  72. package/eigen/Eigen/src/Core/GeneralProduct.h +315 -261
  73. package/eigen/Eigen/src/Core/GenericPacketMath.h +1182 -520
  74. package/eigen/Eigen/src/Core/GlobalFunctions.h +193 -157
  75. package/eigen/Eigen/src/Core/IO.h +131 -156
  76. package/eigen/Eigen/src/Core/IndexedView.h +209 -125
  77. package/eigen/Eigen/src/Core/InnerProduct.h +260 -0
  78. package/eigen/Eigen/src/Core/InternalHeaderCheck.h +3 -0
  79. package/eigen/Eigen/src/Core/Inverse.h +50 -59
  80. package/eigen/Eigen/src/Core/Map.h +123 -141
  81. package/eigen/Eigen/src/Core/MapBase.h +255 -282
  82. package/eigen/Eigen/src/Core/MathFunctions.h +1247 -1201
  83. package/eigen/Eigen/src/Core/MathFunctionsImpl.h +162 -99
  84. package/eigen/Eigen/src/Core/Matrix.h +463 -494
  85. package/eigen/Eigen/src/Core/MatrixBase.h +468 -470
  86. package/eigen/Eigen/src/Core/NestByValue.h +58 -52
  87. package/eigen/Eigen/src/Core/NoAlias.h +79 -86
  88. package/eigen/Eigen/src/Core/NumTraits.h +206 -206
  89. package/eigen/Eigen/src/Core/PartialReduxEvaluator.h +163 -142
  90. package/eigen/Eigen/src/Core/PermutationMatrix.h +461 -511
  91. package/eigen/Eigen/src/Core/PlainObjectBase.h +858 -972
  92. package/eigen/Eigen/src/Core/Product.h +246 -130
  93. package/eigen/Eigen/src/Core/ProductEvaluators.h +779 -671
  94. package/eigen/Eigen/src/Core/Random.h +153 -164
  95. package/eigen/Eigen/src/Core/RandomImpl.h +262 -0
  96. package/eigen/Eigen/src/Core/RealView.h +250 -0
  97. package/eigen/Eigen/src/Core/Redux.h +334 -314
  98. package/eigen/Eigen/src/Core/Ref.h +259 -257
  99. package/eigen/Eigen/src/Core/Replicate.h +92 -104
  100. package/eigen/Eigen/src/Core/Reshaped.h +215 -271
  101. package/eigen/Eigen/src/Core/ReturnByValue.h +47 -55
  102. package/eigen/Eigen/src/Core/Reverse.h +133 -148
  103. package/eigen/Eigen/src/Core/Select.h +68 -140
  104. package/eigen/Eigen/src/Core/SelfAdjointView.h +254 -290
  105. package/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h +23 -20
  106. package/eigen/Eigen/src/Core/SkewSymmetricMatrix3.h +382 -0
  107. package/eigen/Eigen/src/Core/Solve.h +88 -102
  108. package/eigen/Eigen/src/Core/SolveTriangular.h +126 -124
  109. package/eigen/Eigen/src/Core/SolverBase.h +132 -133
  110. package/eigen/Eigen/src/Core/StableNorm.h +113 -147
  111. package/eigen/Eigen/src/Core/StlIterators.h +404 -248
  112. package/eigen/Eigen/src/Core/Stride.h +90 -92
  113. package/eigen/Eigen/src/Core/Swap.h +70 -39
  114. package/eigen/Eigen/src/Core/Transpose.h +258 -295
  115. package/eigen/Eigen/src/Core/Transpositions.h +270 -333
  116. package/eigen/Eigen/src/Core/TriangularMatrix.h +642 -743
  117. package/eigen/Eigen/src/Core/VectorBlock.h +59 -72
  118. package/eigen/Eigen/src/Core/VectorwiseOp.h +653 -704
  119. package/eigen/Eigen/src/Core/Visitor.h +464 -308
  120. package/eigen/Eigen/src/Core/arch/AVX/Complex.h +380 -187
  121. package/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +65 -163
  122. package/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +2145 -638
  123. package/eigen/Eigen/src/Core/arch/AVX/Reductions.h +353 -0
  124. package/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +253 -60
  125. package/eigen/Eigen/src/Core/arch/AVX512/Complex.h +278 -228
  126. package/eigen/Eigen/src/Core/arch/AVX512/GemmKernel.h +1245 -0
  127. package/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +48 -269
  128. package/eigen/Eigen/src/Core/arch/AVX512/MathFunctionsFP16.h +75 -0
  129. package/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1597 -754
  130. package/eigen/Eigen/src/Core/arch/AVX512/PacketMathFP16.h +1413 -0
  131. package/eigen/Eigen/src/Core/arch/AVX512/Reductions.h +297 -0
  132. package/eigen/Eigen/src/Core/arch/AVX512/TrsmKernel.h +1167 -0
  133. package/eigen/Eigen/src/Core/arch/AVX512/TrsmUnrolls.inc +1219 -0
  134. package/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +229 -41
  135. package/eigen/Eigen/src/Core/arch/AVX512/TypeCastingFP16.h +130 -0
  136. package/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +420 -184
  137. package/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +40 -49
  138. package/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2962 -2213
  139. package/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +196 -212
  140. package/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +713 -441
  141. package/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMAbfloat16.h +742 -0
  142. package/eigen/Eigen/src/Core/arch/AltiVec/MatrixVectorProduct.inc +2818 -0
  143. package/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +2380 -1362
  144. package/eigen/Eigen/src/Core/arch/AltiVec/TypeCasting.h +153 -0
  145. package/eigen/Eigen/src/Core/arch/Default/BFloat16.h +390 -224
  146. package/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +78 -67
  147. package/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1784 -799
  148. package/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +167 -50
  149. package/eigen/Eigen/src/Core/arch/Default/Half.h +528 -379
  150. package/eigen/Eigen/src/Core/arch/Default/Settings.h +10 -12
  151. package/eigen/Eigen/src/Core/arch/GPU/Complex.h +244 -0
  152. package/eigen/Eigen/src/Core/arch/GPU/MathFunctions.h +41 -40
  153. package/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +550 -523
  154. package/eigen/Eigen/src/Core/arch/GPU/Tuple.h +268 -0
  155. package/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +27 -30
  156. package/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +8 -8
  157. package/eigen/Eigen/src/Core/arch/HVX/PacketMath.h +1088 -0
  158. package/eigen/Eigen/src/Core/arch/LSX/Complex.h +520 -0
  159. package/eigen/Eigen/src/Core/arch/LSX/GeneralBlockPanelKernel.h +23 -0
  160. package/eigen/Eigen/src/Core/arch/LSX/MathFunctions.h +43 -0
  161. package/eigen/Eigen/src/Core/arch/LSX/PacketMath.h +2866 -0
  162. package/eigen/Eigen/src/Core/arch/LSX/TypeCasting.h +526 -0
  163. package/eigen/Eigen/src/Core/arch/MSA/Complex.h +54 -82
  164. package/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +84 -92
  165. package/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +51 -47
  166. package/eigen/Eigen/src/Core/arch/NEON/Complex.h +454 -306
  167. package/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +175 -115
  168. package/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +23 -30
  169. package/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +4366 -2857
  170. package/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +616 -393
  171. package/eigen/Eigen/src/Core/arch/NEON/UnaryFunctors.h +57 -0
  172. package/eigen/Eigen/src/Core/arch/SSE/Complex.h +350 -198
  173. package/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +38 -149
  174. package/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +1791 -912
  175. package/eigen/Eigen/src/Core/arch/SSE/Reductions.h +324 -0
  176. package/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +128 -40
  177. package/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +10 -6
  178. package/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +156 -234
  179. package/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +6 -3
  180. package/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +27 -32
  181. package/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +119 -117
  182. package/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +325 -419
  183. package/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +15 -17
  184. package/eigen/Eigen/src/Core/arch/ZVector/Complex.h +325 -181
  185. package/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +94 -83
  186. package/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +811 -458
  187. package/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +121 -124
  188. package/eigen/Eigen/src/Core/functors/BinaryFunctors.h +576 -370
  189. package/eigen/Eigen/src/Core/functors/NullaryFunctors.h +194 -109
  190. package/eigen/Eigen/src/Core/functors/StlFunctors.h +95 -112
  191. package/eigen/Eigen/src/Core/functors/TernaryFunctors.h +34 -7
  192. package/eigen/Eigen/src/Core/functors/UnaryFunctors.h +1038 -749
  193. package/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +1883 -1375
  194. package/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +312 -370
  195. package/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +189 -176
  196. package/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +84 -81
  197. package/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +154 -73
  198. package/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +292 -337
  199. package/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +80 -77
  200. package/eigen/Eigen/src/Core/products/Parallelizer.h +207 -105
  201. package/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +327 -388
  202. package/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +206 -224
  203. package/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +138 -147
  204. package/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +58 -61
  205. package/eigen/Eigen/src/Core/products/SelfadjointProduct.h +71 -71
  206. package/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +48 -47
  207. package/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +294 -369
  208. package/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +246 -238
  209. package/eigen/Eigen/src/Core/products/TriangularMatrixVector.h +244 -247
  210. package/eigen/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +212 -192
  211. package/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +328 -277
  212. package/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +108 -109
  213. package/eigen/Eigen/src/Core/products/TriangularSolverVector.h +68 -94
  214. package/eigen/Eigen/src/Core/util/Assert.h +158 -0
  215. package/eigen/Eigen/src/Core/util/BlasUtil.h +342 -303
  216. package/eigen/Eigen/src/Core/util/ConfigureVectorization.h +348 -317
  217. package/eigen/Eigen/src/Core/util/Constants.h +297 -262
  218. package/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +130 -90
  219. package/eigen/Eigen/src/Core/util/EmulateArray.h +270 -0
  220. package/eigen/Eigen/src/Core/util/ForwardDeclarations.h +449 -247
  221. package/eigen/Eigen/src/Core/util/GpuHipCudaDefines.inc +101 -0
  222. package/eigen/Eigen/src/Core/util/GpuHipCudaUndefines.inc +45 -0
  223. package/eigen/Eigen/src/Core/util/IndexedViewHelper.h +417 -116
  224. package/eigen/Eigen/src/Core/util/IntegralConstant.h +211 -204
  225. package/eigen/Eigen/src/Core/util/MKL_support.h +39 -37
  226. package/eigen/Eigen/src/Core/util/Macros.h +655 -773
  227. package/eigen/Eigen/src/Core/util/MaxSizeVector.h +139 -0
  228. package/eigen/Eigen/src/Core/util/Memory.h +970 -748
  229. package/eigen/Eigen/src/Core/util/Meta.h +581 -633
  230. package/eigen/Eigen/src/Core/util/MoreMeta.h +638 -0
  231. package/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +32 -19
  232. package/eigen/Eigen/src/Core/util/ReshapedHelper.h +17 -17
  233. package/eigen/Eigen/src/Core/util/Serializer.h +209 -0
  234. package/eigen/Eigen/src/Core/util/StaticAssert.h +50 -166
  235. package/eigen/Eigen/src/Core/util/SymbolicIndex.h +377 -225
  236. package/eigen/Eigen/src/Core/util/XprHelper.h +784 -547
  237. package/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +246 -277
  238. package/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +299 -319
  239. package/eigen/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +52 -48
  240. package/eigen/Eigen/src/Eigenvalues/EigenSolver.h +413 -456
  241. package/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +309 -325
  242. package/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +157 -171
  243. package/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +292 -310
  244. package/eigen/Eigen/src/Eigenvalues/InternalHeaderCheck.h +3 -0
  245. package/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +89 -105
  246. package/eigen/Eigen/src/Eigenvalues/RealQZ.h +537 -607
  247. package/eigen/Eigen/src/Eigenvalues/RealSchur.h +342 -381
  248. package/eigen/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +41 -35
  249. package/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +541 -595
  250. package/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +47 -44
  251. package/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +430 -462
  252. package/eigen/Eigen/src/Geometry/AlignedBox.h +226 -227
  253. package/eigen/Eigen/src/Geometry/AngleAxis.h +131 -133
  254. package/eigen/Eigen/src/Geometry/EulerAngles.h +163 -74
  255. package/eigen/Eigen/src/Geometry/Homogeneous.h +285 -333
  256. package/eigen/Eigen/src/Geometry/Hyperplane.h +151 -160
  257. package/eigen/Eigen/src/Geometry/InternalHeaderCheck.h +3 -0
  258. package/eigen/Eigen/src/Geometry/OrthoMethods.h +168 -146
  259. package/eigen/Eigen/src/Geometry/ParametrizedLine.h +127 -127
  260. package/eigen/Eigen/src/Geometry/Quaternion.h +566 -506
  261. package/eigen/Eigen/src/Geometry/Rotation2D.h +107 -105
  262. package/eigen/Eigen/src/Geometry/RotationBase.h +148 -145
  263. package/eigen/Eigen/src/Geometry/Scaling.h +113 -106
  264. package/eigen/Eigen/src/Geometry/Transform.h +858 -936
  265. package/eigen/Eigen/src/Geometry/Translation.h +94 -92
  266. package/eigen/Eigen/src/Geometry/Umeyama.h +79 -84
  267. package/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +90 -104
  268. package/eigen/Eigen/src/Householder/BlockHouseholder.h +51 -46
  269. package/eigen/Eigen/src/Householder/Householder.h +102 -124
  270. package/eigen/Eigen/src/Householder/HouseholderSequence.h +412 -453
  271. package/eigen/Eigen/src/Householder/InternalHeaderCheck.h +3 -0
  272. package/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +149 -162
  273. package/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +124 -119
  274. package/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +92 -104
  275. package/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +251 -243
  276. package/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +224 -228
  277. package/eigen/Eigen/src/IterativeLinearSolvers/InternalHeaderCheck.h +3 -0
  278. package/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +178 -227
  279. package/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +79 -84
  280. package/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +54 -60
  281. package/eigen/Eigen/src/Jacobi/InternalHeaderCheck.h +3 -0
  282. package/eigen/Eigen/src/Jacobi/Jacobi.h +252 -308
  283. package/eigen/Eigen/src/KLUSupport/InternalHeaderCheck.h +3 -0
  284. package/eigen/Eigen/src/KLUSupport/KLUSupport.h +208 -227
  285. package/eigen/Eigen/src/LU/Determinant.h +50 -69
  286. package/eigen/Eigen/src/LU/FullPivLU.h +545 -596
  287. package/eigen/Eigen/src/LU/InternalHeaderCheck.h +3 -0
  288. package/eigen/Eigen/src/LU/InverseImpl.h +206 -285
  289. package/eigen/Eigen/src/LU/PartialPivLU.h +390 -428
  290. package/eigen/Eigen/src/LU/PartialPivLU_LAPACKE.h +54 -40
  291. package/eigen/Eigen/src/LU/arch/InverseSize4.h +72 -70
  292. package/eigen/Eigen/src/MetisSupport/InternalHeaderCheck.h +3 -0
  293. package/eigen/Eigen/src/MetisSupport/MetisSupport.h +81 -93
  294. package/eigen/Eigen/src/OrderingMethods/Amd.h +243 -265
  295. package/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +831 -1004
  296. package/eigen/Eigen/src/OrderingMethods/InternalHeaderCheck.h +3 -0
  297. package/eigen/Eigen/src/OrderingMethods/Ordering.h +112 -119
  298. package/eigen/Eigen/src/PaStiXSupport/InternalHeaderCheck.h +3 -0
  299. package/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +524 -570
  300. package/eigen/Eigen/src/PardisoSupport/InternalHeaderCheck.h +3 -0
  301. package/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +385 -430
  302. package/eigen/Eigen/src/QR/ColPivHouseholderQR.h +479 -479
  303. package/eigen/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +120 -56
  304. package/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +166 -153
  305. package/eigen/Eigen/src/QR/FullPivHouseholderQR.h +495 -475
  306. package/eigen/Eigen/src/QR/HouseholderQR.h +394 -285
  307. package/eigen/Eigen/src/QR/HouseholderQR_LAPACKE.h +32 -23
  308. package/eigen/Eigen/src/QR/InternalHeaderCheck.h +3 -0
  309. package/eigen/Eigen/src/SPQRSupport/InternalHeaderCheck.h +3 -0
  310. package/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +244 -264
  311. package/eigen/Eigen/src/SVD/BDCSVD.h +817 -713
  312. package/eigen/Eigen/src/SVD/BDCSVD_LAPACKE.h +174 -0
  313. package/eigen/Eigen/src/SVD/InternalHeaderCheck.h +3 -0
  314. package/eigen/Eigen/src/SVD/JacobiSVD.h +577 -543
  315. package/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +85 -49
  316. package/eigen/Eigen/src/SVD/SVDBase.h +242 -182
  317. package/eigen/Eigen/src/SVD/UpperBidiagonalization.h +200 -235
  318. package/eigen/Eigen/src/SparseCholesky/InternalHeaderCheck.h +3 -0
  319. package/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +765 -594
  320. package/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +308 -94
  321. package/eigen/Eigen/src/SparseCore/AmbiVector.h +202 -251
  322. package/eigen/Eigen/src/SparseCore/CompressedStorage.h +184 -252
  323. package/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +134 -178
  324. package/eigen/Eigen/src/SparseCore/InternalHeaderCheck.h +3 -0
  325. package/eigen/Eigen/src/SparseCore/SparseAssign.h +149 -140
  326. package/eigen/Eigen/src/SparseCore/SparseBlock.h +403 -440
  327. package/eigen/Eigen/src/SparseCore/SparseColEtree.h +100 -112
  328. package/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +525 -303
  329. package/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +555 -339
  330. package/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +100 -108
  331. package/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +169 -197
  332. package/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h +71 -71
  333. package/eigen/Eigen/src/SparseCore/SparseDot.h +49 -47
  334. package/eigen/Eigen/src/SparseCore/SparseFuzzy.h +13 -11
  335. package/eigen/Eigen/src/SparseCore/SparseMap.h +243 -253
  336. package/eigen/Eigen/src/SparseCore/SparseMatrix.h +1603 -1245
  337. package/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +403 -350
  338. package/eigen/Eigen/src/SparseCore/SparsePermutation.h +186 -115
  339. package/eigen/Eigen/src/SparseCore/SparseProduct.h +94 -97
  340. package/eigen/Eigen/src/SparseCore/SparseRedux.h +22 -24
  341. package/eigen/Eigen/src/SparseCore/SparseRef.h +268 -295
  342. package/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +370 -416
  343. package/eigen/Eigen/src/SparseCore/SparseSolverBase.h +78 -87
  344. package/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +81 -95
  345. package/eigen/Eigen/src/SparseCore/SparseTranspose.h +62 -71
  346. package/eigen/Eigen/src/SparseCore/SparseTriangularView.h +132 -144
  347. package/eigen/Eigen/src/SparseCore/SparseUtil.h +138 -115
  348. package/eigen/Eigen/src/SparseCore/SparseVector.h +426 -372
  349. package/eigen/Eigen/src/SparseCore/SparseView.h +164 -193
  350. package/eigen/Eigen/src/SparseCore/TriangularSolver.h +129 -170
  351. package/eigen/Eigen/src/SparseLU/InternalHeaderCheck.h +3 -0
  352. package/eigen/Eigen/src/SparseLU/SparseLU.h +756 -710
  353. package/eigen/Eigen/src/SparseLU/SparseLUImpl.h +61 -48
  354. package/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +102 -118
  355. package/eigen/Eigen/src/SparseLU/SparseLU_Structs.h +38 -35
  356. package/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +245 -301
  357. package/eigen/Eigen/src/SparseLU/SparseLU_Utils.h +44 -49
  358. package/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +104 -108
  359. package/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +89 -100
  360. package/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +57 -58
  361. package/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +43 -55
  362. package/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +74 -71
  363. package/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +124 -132
  364. package/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h +136 -159
  365. package/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +51 -52
  366. package/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h +67 -73
  367. package/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h +24 -26
  368. package/eigen/Eigen/src/SparseQR/InternalHeaderCheck.h +3 -0
  369. package/eigen/Eigen/src/SparseQR/SparseQR.h +450 -502
  370. package/eigen/Eigen/src/StlSupport/StdDeque.h +28 -93
  371. package/eigen/Eigen/src/StlSupport/StdList.h +28 -84
  372. package/eigen/Eigen/src/StlSupport/StdVector.h +28 -108
  373. package/eigen/Eigen/src/StlSupport/details.h +48 -50
  374. package/eigen/Eigen/src/SuperLUSupport/InternalHeaderCheck.h +3 -0
  375. package/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +634 -730
  376. package/eigen/Eigen/src/ThreadPool/Barrier.h +70 -0
  377. package/eigen/Eigen/src/ThreadPool/CoreThreadPoolDevice.h +336 -0
  378. package/eigen/Eigen/src/ThreadPool/EventCount.h +241 -0
  379. package/eigen/Eigen/src/ThreadPool/ForkJoin.h +140 -0
  380. package/eigen/Eigen/src/ThreadPool/InternalHeaderCheck.h +4 -0
  381. package/eigen/Eigen/src/ThreadPool/NonBlockingThreadPool.h +587 -0
  382. package/eigen/Eigen/src/ThreadPool/RunQueue.h +230 -0
  383. package/eigen/Eigen/src/ThreadPool/ThreadCancel.h +21 -0
  384. package/eigen/Eigen/src/ThreadPool/ThreadEnvironment.h +43 -0
  385. package/eigen/Eigen/src/ThreadPool/ThreadLocal.h +289 -0
  386. package/eigen/Eigen/src/ThreadPool/ThreadPoolInterface.h +50 -0
  387. package/eigen/Eigen/src/ThreadPool/ThreadYield.h +16 -0
  388. package/eigen/Eigen/src/UmfPackSupport/InternalHeaderCheck.h +3 -0
  389. package/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +428 -464
  390. package/eigen/Eigen/src/misc/Image.h +41 -43
  391. package/eigen/Eigen/src/misc/InternalHeaderCheck.h +3 -0
  392. package/eigen/Eigen/src/misc/Kernel.h +39 -41
  393. package/eigen/Eigen/src/misc/RealSvd2x2.h +19 -21
  394. package/eigen/Eigen/src/misc/blas.h +83 -426
  395. package/eigen/Eigen/src/misc/lapacke.h +9972 -16179
  396. package/eigen/Eigen/src/misc/lapacke_helpers.h +163 -0
  397. package/eigen/Eigen/src/misc/lapacke_mangling.h +4 -5
  398. package/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.inc +344 -0
  399. package/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.inc +544 -0
  400. package/eigen/Eigen/src/plugins/{BlockMethods.h → BlockMethods.inc} +434 -506
  401. package/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.inc +116 -0
  402. package/eigen/Eigen/src/plugins/{CommonCwiseUnaryOps.h → CommonCwiseUnaryOps.inc} +58 -68
  403. package/eigen/Eigen/src/plugins/IndexedViewMethods.inc +192 -0
  404. package/eigen/Eigen/src/plugins/InternalHeaderCheck.inc +3 -0
  405. package/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.inc +331 -0
  406. package/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.inc +118 -0
  407. package/eigen/Eigen/src/plugins/ReshapedMethods.inc +133 -0
  408. package/package.json +1 -1
  409. package/eigen/COPYING.APACHE +0 -203
  410. package/eigen/COPYING.BSD +0 -26
  411. package/eigen/COPYING.GPL +0 -674
  412. package/eigen/COPYING.LGPL +0 -502
  413. package/eigen/COPYING.MINPACK +0 -51
  414. package/eigen/COPYING.MPL2 +0 -373
  415. package/eigen/COPYING.README +0 -18
  416. package/eigen/Eigen/src/Core/BooleanRedux.h +0 -162
  417. package/eigen/Eigen/src/Core/arch/CUDA/Complex.h +0 -258
  418. package/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +0 -120
  419. package/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +0 -694
  420. package/eigen/Eigen/src/Core/util/NonMPL2.h +0 -3
  421. package/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +0 -67
  422. package/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +0 -280
  423. package/eigen/Eigen/src/misc/lapack.h +0 -152
  424. package/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +0 -358
  425. package/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +0 -696
  426. package/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +0 -115
  427. package/eigen/Eigen/src/plugins/IndexedViewMethods.h +0 -262
  428. package/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +0 -152
  429. package/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +0 -95
  430. package/eigen/Eigen/src/plugins/ReshapedMethods.h +0 -149
  431. package/eigen/README.md +0 -5
@@ -10,44 +10,47 @@
10
10
  #ifndef EIGEN_SPARSEMATRIX_H
11
11
  #define EIGEN_SPARSEMATRIX_H
12
12
 
13
- namespace Eigen {
13
+ // IWYU pragma: private
14
+ #include "./InternalHeaderCheck.h"
15
+
16
+ namespace Eigen {
14
17
 
15
18
  /** \ingroup SparseCore_Module
16
- *
17
- * \class SparseMatrix
18
- *
19
- * \brief A versatible sparse matrix representation
20
- *
21
- * This class implements a more versatile variants of the common \em compressed row/column storage format.
22
- * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
23
- * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
24
- * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
25
- * can be done with limited memory reallocation and copies.
26
- *
27
- * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
28
- * compatible with many library.
29
- *
30
- * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
31
- *
32
- * \tparam _Scalar the scalar type, i.e. the type of the coefficients
33
- * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
34
- * is ColMajor or RowMajor. The default is 0 which means column-major.
35
- * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
36
- *
37
- * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
38
- * whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
39
- * Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
40
- *
41
- * This class can be extended with the help of the plugin mechanism described on the page
42
- * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
43
- */
19
+ *
20
+ * \class SparseMatrix
21
+ *
22
+ * \brief A versatible sparse matrix representation
23
+ *
24
+ * This class implements a more versatile variants of the common \em compressed row/column storage format.
25
+ * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
26
+ * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
27
+ * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
28
+ * can be done with limited memory reallocation and copies.
29
+ *
30
+ * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
31
+ * compatible with many library.
32
+ *
33
+ * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
34
+ *
35
+ * \tparam Scalar_ the scalar type, i.e. the type of the coefficients
36
+ * \tparam Options_ Union of bit flags controlling the storage scheme. Currently the only possibility
37
+ * is ColMajor or RowMajor. The default is 0 which means column-major.
38
+ * \tparam StorageIndex_ the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t).
39
+ * Default is \c int.
40
+ *
41
+ * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type
42
+ * (e.g., int), whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index. Codes making
43
+ * use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
44
+ *
45
+ * This class can be extended with the help of the plugin mechanism described on the page
46
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
47
+ */
44
48
 
45
49
  namespace internal {
46
- template<typename _Scalar, int _Options, typename _StorageIndex>
47
- struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48
- {
49
- typedef _Scalar Scalar;
50
- typedef _StorageIndex StorageIndex;
50
+ template <typename Scalar_, int Options_, typename StorageIndex_>
51
+ struct traits<SparseMatrix<Scalar_, Options_, StorageIndex_>> {
52
+ typedef Scalar_ Scalar;
53
+ typedef StorageIndex_ StorageIndex;
51
54
  typedef Sparse StorageKind;
52
55
  typedef MatrixXpr XprKind;
53
56
  enum {
@@ -55,21 +58,21 @@ struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
55
58
  ColsAtCompileTime = Dynamic,
56
59
  MaxRowsAtCompileTime = Dynamic,
57
60
  MaxColsAtCompileTime = Dynamic,
58
- Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
61
+ Options = Options_,
62
+ Flags = Options_ | NestByRefBit | LvalueBit | CompressedAccessBit,
59
63
  SupportedAccessPatterns = InnerRandomAccessPattern
60
64
  };
61
65
  };
62
66
 
63
- template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64
- struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65
- {
66
- typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
67
+ template <typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
68
+ struct traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
69
+ typedef SparseMatrix<Scalar_, Options_, StorageIndex_> MatrixType;
67
70
  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68
- typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
71
+ typedef std::remove_reference_t<MatrixTypeNested> MatrixTypeNested_;
69
72
 
70
- typedef _Scalar Scalar;
73
+ typedef Scalar_ Scalar;
71
74
  typedef Dense StorageKind;
72
- typedef _StorageIndex StorageIndex;
75
+ typedef StorageIndex_ StorageIndex;
73
76
  typedef MatrixXpr XprKind;
74
77
 
75
78
  enum {
@@ -81,947 +84,1032 @@ struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex
81
84
  };
82
85
  };
83
86
 
84
- template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85
- struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86
- : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87
- {
88
- enum {
89
- Flags = 0
90
- };
87
+ template <typename Scalar_, int Options_, typename StorageIndex_, int DiagIndex>
88
+ struct traits<Diagonal<const SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>>
89
+ : public traits<Diagonal<SparseMatrix<Scalar_, Options_, StorageIndex_>, DiagIndex>> {
90
+ enum { Flags = 0 };
91
91
  };
92
92
 
93
- } // end namespace internal
94
-
95
- template<typename _Scalar, int _Options, typename _StorageIndex>
96
- class SparseMatrix
97
- : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98
- {
99
- typedef SparseCompressedBase<SparseMatrix> Base;
100
- using Base::convert_index;
101
- friend class SparseVector<_Scalar,0,_StorageIndex>;
102
- template<typename, typename, typename, typename, typename>
103
- friend struct internal::Assignment;
104
- public:
105
- using Base::isCompressed;
106
- using Base::nonZeros;
107
- EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
108
- using Base::operator+=;
109
- using Base::operator-=;
110
-
111
- typedef MappedSparseMatrix<Scalar,Flags> Map;
112
- typedef Diagonal<SparseMatrix> DiagonalReturnType;
113
- typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
114
- typedef typename Base::InnerIterator InnerIterator;
115
- typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
116
-
117
-
118
- using Base::IsRowMajor;
119
- typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
120
- enum {
121
- Options = _Options
122
- };
123
-
124
- typedef typename Base::IndexVector IndexVector;
125
- typedef typename Base::ScalarVector ScalarVector;
126
- protected:
127
- typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
128
-
129
- Index m_outerSize;
130
- Index m_innerSize;
131
- StorageIndex* m_outerIndex;
132
- StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
133
- Storage m_data;
134
-
135
- public:
136
-
137
- /** \returns the number of rows of the matrix */
138
- inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
139
- /** \returns the number of columns of the matrix */
140
- inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141
-
142
- /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
143
- inline Index innerSize() const { return m_innerSize; }
144
- /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
145
- inline Index outerSize() const { return m_outerSize; }
146
-
147
- /** \returns a const pointer to the array of values.
148
- * This function is aimed at interoperability with other libraries.
149
- * \sa innerIndexPtr(), outerIndexPtr() */
150
- inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
151
- /** \returns a non-const pointer to the array of values.
152
- * This function is aimed at interoperability with other libraries.
153
- * \sa innerIndexPtr(), outerIndexPtr() */
154
- inline Scalar* valuePtr() { return m_data.valuePtr(); }
155
-
156
- /** \returns a const pointer to the array of inner indices.
157
- * This function is aimed at interoperability with other libraries.
158
- * \sa valuePtr(), outerIndexPtr() */
159
- inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
160
- /** \returns a non-const pointer to the array of inner indices.
161
- * This function is aimed at interoperability with other libraries.
162
- * \sa valuePtr(), outerIndexPtr() */
163
- inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
164
-
165
- /** \returns a const pointer to the array of the starting positions of the inner vectors.
166
- * This function is aimed at interoperability with other libraries.
167
- * \sa valuePtr(), innerIndexPtr() */
168
- inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
169
- /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
170
- * This function is aimed at interoperability with other libraries.
171
- * \sa valuePtr(), innerIndexPtr() */
172
- inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
173
-
174
- /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
175
- * This function is aimed at interoperability with other libraries.
176
- * \warning it returns the null pointer 0 in compressed mode */
177
- inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
178
- /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
179
- * This function is aimed at interoperability with other libraries.
180
- * \warning it returns the null pointer 0 in compressed mode */
181
- inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
182
-
183
- /** \internal */
184
- inline Storage& data() { return m_data; }
185
- /** \internal */
186
- inline const Storage& data() const { return m_data; }
187
-
188
- /** \returns the value of the matrix at position \a i, \a j
189
- * This function returns Scalar(0) if the element is an explicit \em zero */
190
- inline Scalar coeff(Index row, Index col) const
191
- {
192
- eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193
-
194
- const Index outer = IsRowMajor ? row : col;
195
- const Index inner = IsRowMajor ? col : row;
196
- Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197
- return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198
- }
93
+ template <typename StorageIndex>
94
+ struct sparse_reserve_op {
95
+ EIGEN_DEVICE_FUNC sparse_reserve_op(Index begin, Index end, Index size) {
96
+ Index range = numext::mini(end - begin, size);
97
+ m_begin = begin;
98
+ m_end = begin + range;
99
+ m_val = StorageIndex(size / range);
100
+ m_remainder = StorageIndex(size % range);
101
+ }
102
+ template <typename IndexType>
103
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex operator()(IndexType i) const {
104
+ if ((i >= m_begin) && (i < m_end))
105
+ return m_val + ((i - m_begin) < m_remainder ? 1 : 0);
106
+ else
107
+ return 0;
108
+ }
109
+ StorageIndex m_val, m_remainder;
110
+ Index m_begin, m_end;
111
+ };
199
112
 
200
- /** \returns a non-const reference to the value of the matrix at position \a i, \a j
201
- *
202
- * If the element does not exist then it is inserted via the insert(Index,Index) function
203
- * which itself turns the matrix into a non compressed form if that was not the case.
204
- *
205
- * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
206
- * function if the element does not already exist.
207
- */
208
- inline Scalar& coeffRef(Index row, Index col)
209
- {
210
- eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211
-
212
- const Index outer = IsRowMajor ? row : col;
213
- const Index inner = IsRowMajor ? col : row;
214
-
215
- Index start = m_outerIndex[outer];
216
- Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217
- eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218
- if(end<=start)
219
- return insert(row,col);
220
- const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221
- if((p<end) && (m_data.index(p)==inner))
222
- return m_data.value(p);
223
- else
224
- return insert(row,col);
225
- }
113
+ template <typename Scalar>
114
+ struct functor_traits<sparse_reserve_op<Scalar>> {
115
+ enum { Cost = 1, PacketAccess = false, IsRepeatable = true };
116
+ };
226
117
 
227
- /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
228
- * The non zero coefficient must \b not already exist.
229
- *
230
- * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
231
- * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
232
- * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
233
- * inserted by increasing outer-indices.
234
- *
235
- * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
236
- * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
237
- *
238
- * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
239
- * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
240
- *
241
- */
242
- Scalar& insert(Index row, Index col);
243
-
244
- public:
245
-
246
- /** Removes all non zeros but keep allocated memory
247
- *
248
- * This function does not free the currently allocated memory. To release as much as memory as possible,
249
- * call \code mat.data().squeeze(); \endcode after resizing it.
250
- *
251
- * \sa resize(Index,Index), data()
252
- */
253
- inline void setZero()
254
- {
255
- m_data.clear();
256
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
257
- if(m_innerNonZeros)
258
- memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259
- }
118
+ } // end namespace internal
119
+
120
+ template <typename Scalar_, int Options_, typename StorageIndex_>
121
+ class SparseMatrix : public SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>> {
122
+ typedef SparseCompressedBase<SparseMatrix> Base;
123
+ using Base::convert_index;
124
+ friend class SparseVector<Scalar_, 0, StorageIndex_>;
125
+ template <typename, typename, typename, typename, typename>
126
+ friend struct internal::Assignment;
127
+
128
+ public:
129
+ using Base::isCompressed;
130
+ using Base::nonZeros;
131
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
132
+ using Base::operator+=;
133
+ using Base::operator-=;
134
+
135
+ typedef Eigen::Map<SparseMatrix<Scalar, Options_, StorageIndex>> Map;
136
+ typedef Diagonal<SparseMatrix> DiagonalReturnType;
137
+ typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
138
+ typedef typename Base::InnerIterator InnerIterator;
139
+ typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
140
+
141
+ using Base::IsRowMajor;
142
+ typedef internal::CompressedStorage<Scalar, StorageIndex> Storage;
143
+ enum { Options = Options_ };
144
+
145
+ typedef typename Base::IndexVector IndexVector;
146
+ typedef typename Base::ScalarVector ScalarVector;
147
+
148
+ protected:
149
+ typedef SparseMatrix<Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex> TransposedSparseMatrix;
150
+
151
+ Index m_outerSize;
152
+ Index m_innerSize;
153
+ StorageIndex* m_outerIndex;
154
+ StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
155
+ Storage m_data;
156
+
157
+ public:
158
+ /** \returns the number of rows of the matrix */
159
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
160
+ /** \returns the number of columns of the matrix */
161
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
162
+
163
+ /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
164
+ inline Index innerSize() const { return m_innerSize; }
165
+ /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
166
+ inline Index outerSize() const { return m_outerSize; }
167
+
168
+ /** \returns a const pointer to the array of values.
169
+ * This function is aimed at interoperability with other libraries.
170
+ * \sa innerIndexPtr(), outerIndexPtr() */
171
+ inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
172
+ /** \returns a non-const pointer to the array of values.
173
+ * This function is aimed at interoperability with other libraries.
174
+ * \sa innerIndexPtr(), outerIndexPtr() */
175
+ inline Scalar* valuePtr() { return m_data.valuePtr(); }
176
+
177
+ /** \returns a const pointer to the array of inner indices.
178
+ * This function is aimed at interoperability with other libraries.
179
+ * \sa valuePtr(), outerIndexPtr() */
180
+ inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
181
+ /** \returns a non-const pointer to the array of inner indices.
182
+ * This function is aimed at interoperability with other libraries.
183
+ * \sa valuePtr(), outerIndexPtr() */
184
+ inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
185
+
186
+ /** \returns a const pointer to the array of the starting positions of the inner vectors.
187
+ * This function is aimed at interoperability with other libraries.
188
+ * \sa valuePtr(), innerIndexPtr() */
189
+ inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
190
+ /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
191
+ * This function is aimed at interoperability with other libraries.
192
+ * \sa valuePtr(), innerIndexPtr() */
193
+ inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
194
+
195
+ /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
196
+ * This function is aimed at interoperability with other libraries.
197
+ * \warning it returns the null pointer 0 in compressed mode */
198
+ inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
199
+ /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
200
+ * This function is aimed at interoperability with other libraries.
201
+ * \warning it returns the null pointer 0 in compressed mode */
202
+ inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
203
+
204
+ /** \internal */
205
+ constexpr Storage& data() { return m_data; }
206
+ /** \internal */
207
+ constexpr const Storage& data() const { return m_data; }
208
+
209
+ /** \returns the value of the matrix at position \a i, \a j
210
+ * This function returns Scalar(0) if the element is an explicit \em zero */
211
+ inline Scalar coeff(Index row, Index col) const {
212
+ eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
213
+
214
+ const Index outer = IsRowMajor ? row : col;
215
+ const Index inner = IsRowMajor ? col : row;
216
+ Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer + 1];
217
+ return m_data.atInRange(m_outerIndex[outer], end, inner);
218
+ }
260
219
 
261
- /** Preallocates \a reserveSize non zeros.
262
- *
263
- * Precondition: the matrix must be in compressed mode. */
264
- inline void reserve(Index reserveSize)
265
- {
266
- eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267
- m_data.reserve(reserveSize);
268
- }
269
-
270
- #ifdef EIGEN_PARSED_BY_DOXYGEN
271
- /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
272
- *
273
- * This function turns the matrix in non-compressed mode.
274
- *
275
- * The type \c SizesType must expose the following interface:
276
- \code
277
- typedef value_type;
278
- const value_type& operator[](i) const;
279
- \endcode
280
- * for \c i in the [0,this->outerSize()[ range.
281
- * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
282
- */
283
- template<class SizesType>
284
- inline void reserve(const SizesType& reserveSizes);
285
- #else
286
- template<class SizesType>
287
- inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288
- #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289
- typename
290
- #endif
291
- SizesType::value_type())
292
- {
293
- EIGEN_UNUSED_VARIABLE(enableif);
294
- reserveInnerVectors(reserveSizes);
295
- }
296
- #endif // EIGEN_PARSED_BY_DOXYGEN
297
- protected:
298
- template<class SizesType>
299
- inline void reserveInnerVectors(const SizesType& reserveSizes)
300
- {
301
- if(isCompressed())
302
- {
303
- Index totalReserveSize = 0;
304
- // turn the matrix into non-compressed mode
305
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
306
- if (!m_innerNonZeros) internal::throw_std_bad_alloc();
307
-
308
- // temporarily use m_innerSizes to hold the new starting points.
309
- StorageIndex* newOuterIndex = m_innerNonZeros;
310
-
311
- StorageIndex count = 0;
312
- for(Index j=0; j<m_outerSize; ++j)
313
- {
314
- newOuterIndex[j] = count;
315
- count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316
- totalReserveSize += reserveSizes[j];
317
- }
318
- m_data.reserve(totalReserveSize);
319
- StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320
- for(Index j=m_outerSize-1; j>=0; --j)
321
- {
322
- StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323
- for(Index i=innerNNZ-1; i>=0; --i)
324
- {
325
- m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326
- m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327
- }
328
- previousOuterIndex = m_outerIndex[j];
329
- m_outerIndex[j] = newOuterIndex[j];
330
- m_innerNonZeros[j] = innerNNZ;
331
- }
332
- if(m_outerSize>0)
333
- m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
334
-
335
- m_data.resize(m_outerIndex[m_outerSize]);
336
- }
337
- else
338
- {
339
- StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340
- if (!newOuterIndex) internal::throw_std_bad_alloc();
341
-
342
- StorageIndex count = 0;
343
- for(Index j=0; j<m_outerSize; ++j)
344
- {
345
- newOuterIndex[j] = count;
346
- StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347
- StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348
- count += toReserve + m_innerNonZeros[j];
349
- }
350
- newOuterIndex[m_outerSize] = count;
351
-
352
- m_data.resize(count);
353
- for(Index j=m_outerSize-1; j>=0; --j)
354
- {
355
- Index offset = newOuterIndex[j] - m_outerIndex[j];
356
- if(offset>0)
357
- {
358
- StorageIndex innerNNZ = m_innerNonZeros[j];
359
- for(Index i=innerNNZ-1; i>=0; --i)
360
- {
361
- m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362
- m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363
- }
364
- }
220
+ /** \returns a non-const reference to the value of the matrix at position \a i, \a j.
221
+ *
222
+ * If the element does not exist then it is inserted via the insert(Index,Index) function
223
+ * which itself turns the matrix into a non compressed form if that was not the case.
224
+ * The output parameter `inserted` is set to true.
225
+ *
226
+ * Otherwise, if the element does exist, `inserted` will be set to false.
227
+ *
228
+ * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
229
+ * function if the element does not already exist.
230
+ */
231
+ inline Scalar& findOrInsertCoeff(Index row, Index col, bool* inserted) {
232
+ eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
233
+ const Index outer = IsRowMajor ? row : col;
234
+ const Index inner = IsRowMajor ? col : row;
235
+ Index start = m_outerIndex[outer];
236
+ Index end = isCompressed() ? m_outerIndex[outer + 1] : m_outerIndex[outer] + m_innerNonZeros[outer];
237
+ eigen_assert(end >= start && "you probably called coeffRef on a non finalized matrix");
238
+ Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
239
+ if (dst == end) {
240
+ Index capacity = m_outerIndex[outer + 1] - end;
241
+ if (capacity > 0) {
242
+ // implies uncompressed: push to back of vector
243
+ m_innerNonZeros[outer]++;
244
+ m_data.index(end) = StorageIndex(inner);
245
+ m_data.value(end) = Scalar(0);
246
+ if (inserted != nullptr) {
247
+ *inserted = true;
365
248
  }
366
-
367
- std::swap(m_outerIndex, newOuterIndex);
368
- std::free(newOuterIndex);
249
+ return m_data.value(end);
369
250
  }
370
-
371
251
  }
372
- public:
373
-
374
- //--- low level purely coherent filling ---
375
-
376
- /** \internal
377
- * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
378
- * - the nonzero does not already exist
379
- * - the new coefficient is the last one according to the storage order
380
- *
381
- * Before filling a given inner vector you must call the statVec(Index) function.
382
- *
383
- * After an insertion session, you should call the finalize() function.
384
- *
385
- * \sa insert, insertBackByOuterInner, startVec */
386
- inline Scalar& insertBack(Index row, Index col)
387
- {
388
- return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
252
+ if ((dst < end) && (m_data.index(dst) == inner)) {
253
+ // this coefficient exists, return a reference to it
254
+ if (inserted != nullptr) {
255
+ *inserted = false;
256
+ }
257
+ return m_data.value(dst);
258
+ } else {
259
+ if (inserted != nullptr) {
260
+ *inserted = true;
261
+ }
262
+ // insertion will require reconfiguring the buffer
263
+ return insertAtByOuterInner(outer, inner, dst);
389
264
  }
265
+ }
390
266
 
391
- /** \internal
392
- * \sa insertBack, startVec */
393
- inline Scalar& insertBackByOuterInner(Index outer, Index inner)
394
- {
395
- eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396
- eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397
- Index p = m_outerIndex[outer+1];
398
- ++m_outerIndex[outer+1];
399
- m_data.append(Scalar(0), inner);
400
- return m_data.value(p);
267
+ /** \returns a non-const reference to the value of the matrix at position \a i, \a j
268
+ *
269
+ * If the element does not exist then it is inserted via the insert(Index,Index) function
270
+ * which itself turns the matrix into a non compressed form if that was not the case.
271
+ *
272
+ * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
273
+ * function if the element does not already exist.
274
+ */
275
+ inline Scalar& coeffRef(Index row, Index col) { return findOrInsertCoeff(row, col, nullptr); }
276
+
277
+ /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
278
+ * The non zero coefficient must \b not already exist.
279
+ *
280
+ * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
281
+ * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
282
+ * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to
283
+ * be inserted by increasing outer-indices.
284
+ *
285
+ * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to
286
+ * first call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
287
+ *
288
+ * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
289
+ * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random
290
+ * insertion.
291
+ *
292
+ */
293
+ inline Scalar& insert(Index row, Index col);
294
+
295
+ public:
296
+ /** Removes all non zeros but keep allocated memory
297
+ *
298
+ * This function does not free the currently allocated memory. To release as much as memory as possible,
299
+ * call \code mat.data().squeeze(); \endcode after resizing it.
300
+ *
301
+ * \sa resize(Index,Index), data()
302
+ */
303
+ inline void setZero() {
304
+ m_data.clear();
305
+ using std::fill_n;
306
+ fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
307
+ if (m_innerNonZeros) {
308
+ fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
401
309
  }
310
+ }
402
311
 
403
- /** \internal
404
- * \warning use it only if you know what you are doing */
405
- inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
406
- {
407
- Index p = m_outerIndex[outer+1];
408
- ++m_outerIndex[outer+1];
409
- m_data.append(Scalar(0), inner);
410
- return m_data.value(p);
411
- }
312
+ /** Preallocates \a reserveSize non zeros.
313
+ *
314
+ * Precondition: the matrix must be in compressed mode. */
315
+ inline void reserve(Index reserveSize) {
316
+ eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
317
+ m_data.reserve(reserveSize);
318
+ }
412
319
 
413
- /** \internal
414
- * \sa insertBack, insertBackByOuterInner */
415
- inline void startVec(Index outer)
416
- {
417
- eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418
- eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419
- m_outerIndex[outer+1] = m_outerIndex[outer];
320
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
321
+ /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
322
+ *
323
+ * This function turns the matrix in non-compressed mode.
324
+ *
325
+ * The type \c SizesType must expose the following interface:
326
+ \code
327
+ typedef value_type;
328
+ const value_type& operator[](i) const;
329
+ \endcode
330
+ * for \c i in the [0,this->outerSize()[ range.
331
+ * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
332
+ */
333
+ template <class SizesType>
334
+ inline void reserve(const SizesType& reserveSizes);
335
+ #else
336
+ template <class SizesType>
337
+ inline void reserve(const SizesType& reserveSizes,
338
+ const typename SizesType::value_type& enableif = typename SizesType::value_type()) {
339
+ EIGEN_UNUSED_VARIABLE(enableif);
340
+ reserveInnerVectors(reserveSizes);
341
+ }
342
+ #endif // EIGEN_PARSED_BY_DOXYGEN
343
+ protected:
344
+ template <class SizesType>
345
+ inline void reserveInnerVectors(const SizesType& reserveSizes) {
346
+ if (isCompressed()) {
347
+ Index totalReserveSize = 0;
348
+ for (Index j = 0; j < m_outerSize; ++j) totalReserveSize += internal::convert_index<Index>(reserveSizes[j]);
349
+
350
+ // if reserveSizes is empty, don't do anything!
351
+ if (totalReserveSize == 0) return;
352
+
353
+ // turn the matrix into non-compressed mode
354
+ m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
355
+
356
+ // temporarily use m_innerSizes to hold the new starting points.
357
+ StorageIndex* newOuterIndex = m_innerNonZeros;
358
+
359
+ Index count = 0;
360
+ for (Index j = 0; j < m_outerSize; ++j) {
361
+ newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
362
+ Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
363
+ count += reserveSize + internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j]);
364
+ }
365
+
366
+ m_data.reserve(totalReserveSize);
367
+ StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
368
+ for (Index j = m_outerSize - 1; j >= 0; --j) {
369
+ StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
370
+ StorageIndex begin = m_outerIndex[j];
371
+ StorageIndex end = begin + innerNNZ;
372
+ StorageIndex target = newOuterIndex[j];
373
+ internal::smart_memmove(innerIndexPtr() + begin, innerIndexPtr() + end, innerIndexPtr() + target);
374
+ internal::smart_memmove(valuePtr() + begin, valuePtr() + end, valuePtr() + target);
375
+ previousOuterIndex = m_outerIndex[j];
376
+ m_outerIndex[j] = newOuterIndex[j];
377
+ m_innerNonZeros[j] = innerNNZ;
378
+ }
379
+ if (m_outerSize > 0)
380
+ m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1] +
381
+ internal::convert_index<StorageIndex>(reserveSizes[m_outerSize - 1]);
382
+
383
+ m_data.resize(m_outerIndex[m_outerSize]);
384
+ } else {
385
+ StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
386
+
387
+ Index count = 0;
388
+ for (Index j = 0; j < m_outerSize; ++j) {
389
+ newOuterIndex[j] = internal::convert_index<StorageIndex>(count);
390
+ Index alreadyReserved =
391
+ internal::convert_index<Index>(m_outerIndex[j + 1] - m_outerIndex[j] - m_innerNonZeros[j]);
392
+ Index reserveSize = internal::convert_index<Index>(reserveSizes[j]);
393
+ Index toReserve = numext::maxi(reserveSize, alreadyReserved);
394
+ count += toReserve + internal::convert_index<Index>(m_innerNonZeros[j]);
395
+ }
396
+ newOuterIndex[m_outerSize] = internal::convert_index<StorageIndex>(count);
397
+
398
+ m_data.resize(count);
399
+ for (Index j = m_outerSize - 1; j >= 0; --j) {
400
+ StorageIndex innerNNZ = m_innerNonZeros[j];
401
+ StorageIndex begin = m_outerIndex[j];
402
+ StorageIndex target = newOuterIndex[j];
403
+ m_data.moveChunk(begin, target, innerNNZ);
404
+ }
405
+
406
+ std::swap(m_outerIndex, newOuterIndex);
407
+ internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
420
408
  }
409
+ }
421
410
 
422
- /** \internal
423
- * Must be called after inserting a set of non zero entries using the low level compressed API.
424
- */
425
- inline void finalize()
426
- {
427
- if(isCompressed())
428
- {
429
- StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430
- Index i = m_outerSize;
431
- // find the last filled column
432
- while (i>=0 && m_outerIndex[i]==0)
433
- --i;
411
+ public:
412
+ //--- low level purely coherent filling ---
413
+
414
+ /** \internal
415
+ * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
416
+ * - the nonzero does not already exist
417
+ * - the new coefficient is the last one according to the storage order
418
+ *
419
+ * Before filling a given inner vector you must call the statVec(Index) function.
420
+ *
421
+ * After an insertion session, you should call the finalize() function.
422
+ *
423
+ * \sa insert, insertBackByOuterInner, startVec */
424
+ inline Scalar& insertBack(Index row, Index col) {
425
+ return insertBackByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row);
426
+ }
427
+
428
+ /** \internal
429
+ * \sa insertBack, startVec */
430
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner) {
431
+ eigen_assert(Index(m_outerIndex[outer + 1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
432
+ eigen_assert((m_outerIndex[outer + 1] - m_outerIndex[outer] == 0 || m_data.index(m_data.size() - 1) < inner) &&
433
+ "Invalid ordered insertion (invalid inner index)");
434
+ StorageIndex p = m_outerIndex[outer + 1];
435
+ ++m_outerIndex[outer + 1];
436
+ m_data.append(Scalar(0), inner);
437
+ return m_data.value(p);
438
+ }
439
+
440
+ /** \internal
441
+ * \warning use it only if you know what you are doing */
442
+ inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner) {
443
+ StorageIndex p = m_outerIndex[outer + 1];
444
+ ++m_outerIndex[outer + 1];
445
+ m_data.append(Scalar(0), inner);
446
+ return m_data.value(p);
447
+ }
448
+
449
+ /** \internal
450
+ * \sa insertBack, insertBackByOuterInner */
451
+ inline void startVec(Index outer) {
452
+ eigen_assert(m_outerIndex[outer] == Index(m_data.size()) &&
453
+ "You must call startVec for each inner vector sequentially");
454
+ eigen_assert(m_outerIndex[outer + 1] == 0 && "You must call startVec for each inner vector sequentially");
455
+ m_outerIndex[outer + 1] = m_outerIndex[outer];
456
+ }
457
+
458
+ /** \internal
459
+ * Must be called after inserting a set of non zero entries using the low level compressed API.
460
+ */
461
+ inline void finalize() {
462
+ if (isCompressed()) {
463
+ StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
464
+ Index i = m_outerSize;
465
+ // find the last filled column
466
+ while (i >= 0 && m_outerIndex[i] == 0) --i;
467
+ ++i;
468
+ while (i <= m_outerSize) {
469
+ m_outerIndex[i] = size;
434
470
  ++i;
435
- while (i<=m_outerSize)
436
- {
437
- m_outerIndex[i] = size;
438
- ++i;
439
- }
440
471
  }
441
472
  }
473
+ }
474
+
475
+ // remove outer vectors j, j+1 ... j+num-1 and resize the matrix
476
+ void removeOuterVectors(Index j, Index num = 1) {
477
+ eigen_assert(num >= 0 && j >= 0 && j + num <= m_outerSize && "Invalid parameters");
478
+
479
+ const Index newRows = IsRowMajor ? m_outerSize - num : rows();
480
+ const Index newCols = IsRowMajor ? cols() : m_outerSize - num;
481
+
482
+ const Index begin = j + num;
483
+ const Index end = m_outerSize;
484
+ const Index target = j;
485
+
486
+ // if the removed vectors are not empty, uncompress the matrix
487
+ if (m_outerIndex[j + num] > m_outerIndex[j]) uncompress();
488
+
489
+ // shift m_outerIndex and m_innerNonZeros [num] to the left
490
+ internal::smart_memmove(m_outerIndex + begin, m_outerIndex + end + 1, m_outerIndex + target);
491
+ if (!isCompressed())
492
+ internal::smart_memmove(m_innerNonZeros + begin, m_innerNonZeros + end, m_innerNonZeros + target);
493
+
494
+ // if m_outerIndex[0] > 0, shift the data within the first vector while it is easy to do so
495
+ if (m_outerIndex[0] > StorageIndex(0)) {
496
+ uncompress();
497
+ const Index from = internal::convert_index<Index>(m_outerIndex[0]);
498
+ const Index to = Index(0);
499
+ const Index chunkSize = internal::convert_index<Index>(m_innerNonZeros[0]);
500
+ m_data.moveChunk(from, to, chunkSize);
501
+ m_outerIndex[0] = StorageIndex(0);
502
+ }
442
503
 
443
- //---
504
+ // truncate the matrix to the smaller size
505
+ conservativeResize(newRows, newCols);
506
+ }
444
507
 
445
- template<typename InputIterators>
446
- void setFromTriplets(const InputIterators& begin, const InputIterators& end);
508
+ // insert empty outer vectors at indices j, j+1 ... j+num-1 and resize the matrix
509
+ void insertEmptyOuterVectors(Index j, Index num = 1) {
510
+ using std::fill_n;
511
+ eigen_assert(num >= 0 && j >= 0 && j < m_outerSize && "Invalid parameters");
447
512
 
448
- template<typename InputIterators,typename DupFunctor>
449
- void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
513
+ const Index newRows = IsRowMajor ? m_outerSize + num : rows();
514
+ const Index newCols = IsRowMajor ? cols() : m_outerSize + num;
450
515
 
451
- void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
516
+ const Index begin = j;
517
+ const Index end = m_outerSize;
518
+ const Index target = j + num;
452
519
 
453
- template<typename DupFunctor>
454
- void collapseDuplicates(DupFunctor dup_func = DupFunctor());
520
+ // expand the matrix to the larger size
521
+ conservativeResize(newRows, newCols);
455
522
 
456
- //---
457
-
458
- /** \internal
459
- * same as insert(Index,Index) except that the indices are given relative to the storage order */
460
- Scalar& insertByOuterInner(Index j, Index i)
461
- {
462
- return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
523
+ // shift m_outerIndex and m_innerNonZeros [num] to the right
524
+ internal::smart_memmove(m_outerIndex + begin, m_outerIndex + end + 1, m_outerIndex + target);
525
+ // m_outerIndex[begin] == m_outerIndex[target], set all indices in this range to same value
526
+ fill_n(m_outerIndex + begin, num, m_outerIndex[begin]);
527
+
528
+ if (!isCompressed()) {
529
+ internal::smart_memmove(m_innerNonZeros + begin, m_innerNonZeros + end, m_innerNonZeros + target);
530
+ // set the nonzeros of the newly inserted vectors to 0
531
+ fill_n(m_innerNonZeros + begin, num, StorageIndex(0));
463
532
  }
533
+ }
464
534
 
465
- /** Turns the matrix into the \em compressed format.
466
- */
467
- void makeCompressed()
468
- {
469
- if(isCompressed())
470
- return;
471
-
472
- eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
473
-
474
- Index oldStart = m_outerIndex[1];
475
- m_outerIndex[1] = m_innerNonZeros[0];
476
- for(Index j=1; j<m_outerSize; ++j)
477
- {
478
- Index nextOldStart = m_outerIndex[j+1];
479
- Index offset = oldStart - m_outerIndex[j];
480
- if(offset>0)
481
- {
482
- for(Index k=0; k<m_innerNonZeros[j]; ++k)
483
- {
484
- m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485
- m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486
- }
487
- }
488
- m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489
- oldStart = nextOldStart;
535
+ template <typename InputIterators>
536
+ void setFromTriplets(const InputIterators& begin, const InputIterators& end);
537
+
538
+ template <typename InputIterators, typename DupFunctor>
539
+ void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
540
+
541
+ template <typename Derived, typename DupFunctor>
542
+ void collapseDuplicates(DenseBase<Derived>& wi, DupFunctor dup_func = DupFunctor());
543
+
544
+ template <typename InputIterators>
545
+ void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
546
+
547
+ template <typename InputIterators, typename DupFunctor>
548
+ void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
549
+
550
+ template <typename InputIterators>
551
+ void insertFromTriplets(const InputIterators& begin, const InputIterators& end);
552
+
553
+ template <typename InputIterators, typename DupFunctor>
554
+ void insertFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
555
+
556
+ template <typename InputIterators>
557
+ void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
558
+
559
+ template <typename InputIterators, typename DupFunctor>
560
+ void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
561
+
562
+ //---
563
+
564
+ /** \internal
565
+ * same as insert(Index,Index) except that the indices are given relative to the storage order */
566
+ Scalar& insertByOuterInner(Index j, Index i) {
567
+ eigen_assert(j >= 0 && j < m_outerSize && "invalid outer index");
568
+ eigen_assert(i >= 0 && i < m_innerSize && "invalid inner index");
569
+ Index start = m_outerIndex[j];
570
+ Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
571
+ Index dst = start == end ? end : m_data.searchLowerIndex(start, end, i);
572
+ if (dst == end) {
573
+ Index capacity = m_outerIndex[j + 1] - end;
574
+ if (capacity > 0) {
575
+ // implies uncompressed: push to back of vector
576
+ m_innerNonZeros[j]++;
577
+ m_data.index(end) = StorageIndex(i);
578
+ m_data.value(end) = Scalar(0);
579
+ return m_data.value(end);
490
580
  }
491
- std::free(m_innerNonZeros);
492
- m_innerNonZeros = 0;
493
- m_data.resize(m_outerIndex[m_outerSize]);
494
- m_data.squeeze();
495
581
  }
582
+ eigen_assert((dst == end || m_data.index(dst) != i) &&
583
+ "you cannot insert an element that already exists, you must call coeffRef to this end");
584
+ return insertAtByOuterInner(j, i, dst);
585
+ }
496
586
 
497
- /** Turns the matrix into the uncompressed mode */
498
- void uncompress()
499
- {
500
- if(m_innerNonZeros != 0)
501
- return;
502
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503
- for (Index i = 0; i < m_outerSize; i++)
504
- {
505
- m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
587
+ /** Turns the matrix into the \em compressed format.
588
+ */
589
+ void makeCompressed() {
590
+ if (isCompressed()) return;
591
+
592
+ eigen_internal_assert(m_outerIndex != 0 && m_outerSize > 0);
593
+
594
+ StorageIndex start = m_outerIndex[1];
595
+ m_outerIndex[1] = m_innerNonZeros[0];
596
+ // try to move fewer, larger contiguous chunks
597
+ Index copyStart = start;
598
+ Index copyTarget = m_innerNonZeros[0];
599
+ for (Index j = 1; j < m_outerSize; j++) {
600
+ StorageIndex end = start + m_innerNonZeros[j];
601
+ StorageIndex nextStart = m_outerIndex[j + 1];
602
+ // dont forget to move the last chunk!
603
+ bool breakUpCopy = (end != nextStart) || (j == m_outerSize - 1);
604
+ if (breakUpCopy) {
605
+ Index chunkSize = end - copyStart;
606
+ if (chunkSize > 0) m_data.moveChunk(copyStart, copyTarget, chunkSize);
607
+ copyStart = nextStart;
608
+ copyTarget += chunkSize;
506
609
  }
610
+ start = nextStart;
611
+ m_outerIndex[j + 1] = m_outerIndex[j] + m_innerNonZeros[j];
507
612
  }
613
+ m_data.resize(m_outerIndex[m_outerSize]);
508
614
 
509
- /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
510
- void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
511
- {
512
- prune(default_prunning_func(reference,epsilon));
615
+ // release as much memory as possible
616
+ internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
617
+ m_innerNonZeros = 0;
618
+ m_data.squeeze();
619
+ }
620
+
621
+ /** Turns the matrix into the uncompressed mode */
622
+ void uncompress() {
623
+ if (!isCompressed()) return;
624
+ m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
625
+ if (m_outerIndex[m_outerSize] == 0) {
626
+ using std::fill_n;
627
+ fill_n(m_innerNonZeros, m_outerSize, StorageIndex(0));
628
+ } else {
629
+ for (Index j = 0; j < m_outerSize; j++) m_innerNonZeros[j] = m_outerIndex[j + 1] - m_outerIndex[j];
513
630
  }
514
-
515
- /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
516
- * The functor type \a KeepFunc must implement the following function:
517
- * \code
518
- * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
519
- * \endcode
520
- * \sa prune(Scalar,RealScalar)
521
- */
522
- template<typename KeepFunc>
523
- void prune(const KeepFunc& keep = KeepFunc())
524
- {
525
- // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
526
- makeCompressed();
631
+ }
632
+
633
+ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
634
+ void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) {
635
+ prune(default_prunning_func(reference, epsilon));
636
+ }
527
637
 
528
- StorageIndex k = 0;
529
- for(Index j=0; j<m_outerSize; ++j)
530
- {
531
- Index previousStart = m_outerIndex[j];
638
+ /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
639
+ * The functor type \a KeepFunc must implement the following function:
640
+ * \code
641
+ * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
642
+ * \endcode
643
+ * \sa prune(Scalar,RealScalar)
644
+ */
645
+ template <typename KeepFunc>
646
+ void prune(const KeepFunc& keep = KeepFunc()) {
647
+ StorageIndex k = 0;
648
+ for (Index j = 0; j < m_outerSize; ++j) {
649
+ StorageIndex previousStart = m_outerIndex[j];
650
+ if (isCompressed())
532
651
  m_outerIndex[j] = k;
533
- Index end = m_outerIndex[j+1];
534
- for(Index i=previousStart; i<end; ++i)
535
- {
536
- if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
537
- {
538
- m_data.value(k) = m_data.value(i);
539
- m_data.index(k) = m_data.index(i);
540
- ++k;
541
- }
542
- }
652
+ else
653
+ k = m_outerIndex[j];
654
+ StorageIndex end = isCompressed() ? m_outerIndex[j + 1] : previousStart + m_innerNonZeros[j];
655
+ for (StorageIndex i = previousStart; i < end; ++i) {
656
+ StorageIndex row = IsRowMajor ? StorageIndex(j) : m_data.index(i);
657
+ StorageIndex col = IsRowMajor ? m_data.index(i) : StorageIndex(j);
658
+ bool keepEntry = keep(row, col, m_data.value(i));
659
+ if (keepEntry) {
660
+ m_data.value(k) = m_data.value(i);
661
+ m_data.index(k) = m_data.index(i);
662
+ ++k;
663
+ } else if (!isCompressed())
664
+ m_innerNonZeros[j]--;
543
665
  }
666
+ }
667
+ if (isCompressed()) {
544
668
  m_outerIndex[m_outerSize] = k;
545
- m_data.resize(k,0);
669
+ m_data.resize(k, 0);
546
670
  }
671
+ }
547
672
 
548
- /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
549
- *
550
- * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
551
- * and the storage of the out of bounds coefficients is kept and reserved.
552
- * Call makeCompressed() to pack the entries and squeeze extra memory.
553
- *
554
- * \sa reserve(), setZero(), makeCompressed()
555
- */
556
- void conservativeResize(Index rows, Index cols)
557
- {
558
- // No change
559
- if (this->rows() == rows && this->cols() == cols) return;
560
-
561
- // If one dimension is null, then there is nothing to be preserved
562
- if(rows==0 || cols==0) return resize(rows,cols);
563
-
564
- Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565
- Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566
- StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567
-
568
- // Deals with inner non zeros
569
- if (m_innerNonZeros)
570
- {
571
- // Resize m_innerNonZeros
572
- StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573
- if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574
- m_innerNonZeros = newInnerNonZeros;
575
-
576
- for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577
- m_innerNonZeros[i] = 0;
578
- }
579
- else if (innerChange < 0)
580
- {
581
- // Inner size decreased: allocate a new m_innerNonZeros
582
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
583
- if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584
- for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
585
- m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
586
- for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587
- m_innerNonZeros[i] = 0;
588
- }
589
-
590
- // Change the m_innerNonZeros in case of a decrease of inner size
591
- if (m_innerNonZeros && innerChange < 0)
592
- {
593
- for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
594
- {
595
- StorageIndex &n = m_innerNonZeros[i];
596
- StorageIndex start = m_outerIndex[i];
597
- while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
598
- }
673
+ /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
674
+ *
675
+ * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
676
+ * and the storage of the out of bounds coefficients is kept and reserved.
677
+ * Call makeCompressed() to pack the entries and squeeze extra memory.
678
+ *
679
+ * \sa reserve(), setZero(), makeCompressed()
680
+ */
681
+ void conservativeResize(Index rows, Index cols) {
682
+ // If one dimension is null, then there is nothing to be preserved
683
+ if (rows == 0 || cols == 0) return resize(rows, cols);
684
+
685
+ Index newOuterSize = IsRowMajor ? rows : cols;
686
+ Index newInnerSize = IsRowMajor ? cols : rows;
687
+
688
+ Index innerChange = newInnerSize - m_innerSize;
689
+ Index outerChange = newOuterSize - m_outerSize;
690
+
691
+ if (outerChange != 0) {
692
+ m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, newOuterSize + 1,
693
+ m_outerSize + 1);
694
+
695
+ if (!isCompressed())
696
+ m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_innerNonZeros,
697
+ newOuterSize, m_outerSize);
698
+
699
+ if (outerChange > 0) {
700
+ StorageIndex lastIdx = m_outerSize == 0 ? StorageIndex(0) : m_outerIndex[m_outerSize];
701
+ using std::fill_n;
702
+ fill_n(m_outerIndex + m_outerSize, outerChange + 1, lastIdx);
703
+
704
+ if (!isCompressed()) fill_n(m_innerNonZeros + m_outerSize, outerChange, StorageIndex(0));
599
705
  }
600
-
601
- m_innerSize = newInnerSize;
602
-
603
- // Re-allocate outer index structure if necessary
604
- if (outerChange == 0)
605
- return;
606
-
607
- StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
608
- if (!newOuterIndex) internal::throw_std_bad_alloc();
609
- m_outerIndex = newOuterIndex;
610
- if (outerChange > 0)
611
- {
612
- StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613
- for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614
- m_outerIndex[i] = lastIdx;
615
- }
616
- m_outerSize += outerChange;
617
706
  }
618
-
619
- /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
620
- *
621
- * This function does not free the currently allocated memory. To release as much as memory as possible,
622
- * call \code mat.data().squeeze(); \endcode after resizing it.
623
- *
624
- * \sa reserve(), setZero()
625
- */
626
- void resize(Index rows, Index cols)
627
- {
628
- const Index outerSize = IsRowMajor ? rows : cols;
629
- m_innerSize = IsRowMajor ? cols : rows;
630
- m_data.clear();
631
- if (m_outerSize != outerSize || m_outerSize==0)
632
- {
633
- std::free(m_outerIndex);
634
- m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
635
- if (!m_outerIndex) internal::throw_std_bad_alloc();
636
-
637
- m_outerSize = outerSize;
638
- }
639
- if(m_innerNonZeros)
640
- {
641
- std::free(m_innerNonZeros);
642
- m_innerNonZeros = 0;
707
+ m_outerSize = newOuterSize;
708
+
709
+ if (innerChange < 0) {
710
+ for (Index j = 0; j < m_outerSize; j++) {
711
+ Index start = m_outerIndex[j];
712
+ Index end = isCompressed() ? m_outerIndex[j + 1] : start + m_innerNonZeros[j];
713
+ Index lb = m_data.searchLowerIndex(start, end, newInnerSize);
714
+ if (lb != end) {
715
+ uncompress();
716
+ m_innerNonZeros[j] = StorageIndex(lb - start);
717
+ }
643
718
  }
644
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
645
719
  }
720
+ m_innerSize = newInnerSize;
646
721
 
647
- /** \internal
648
- * Resize the nonzero vector to \a size */
649
- void resizeNonZeros(Index size)
650
- {
651
- m_data.resize(size);
652
- }
722
+ Index newSize = m_outerIndex[m_outerSize];
723
+ eigen_assert(newSize <= m_data.size());
724
+ m_data.resize(newSize);
725
+ }
653
726
 
654
- /** \returns a const expression of the diagonal coefficients. */
655
- const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
656
-
657
- /** \returns a read-write expression of the diagonal coefficients.
658
- * \warning If the diagonal entries are written, then all diagonal
659
- * entries \b must already exist, otherwise an assertion will be raised.
660
- */
661
- DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
662
-
663
- /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
664
- inline SparseMatrix()
665
- : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
666
- {
667
- check_template_parameters();
668
- resize(0, 0);
727
+ /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
728
+ *
729
+ * This function does not free the currently allocated memory. To release as much as memory as possible,
730
+ * call \code mat.data().squeeze(); \endcode after resizing it.
731
+ *
732
+ * \sa reserve(), setZero()
733
+ */
734
+ void resize(Index rows, Index cols) {
735
+ const Index outerSize = IsRowMajor ? rows : cols;
736
+ m_innerSize = IsRowMajor ? cols : rows;
737
+ m_data.clear();
738
+
739
+ if ((m_outerIndex == 0) || (m_outerSize != outerSize)) {
740
+ m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, outerSize + 1,
741
+ m_outerSize + 1);
742
+ m_outerSize = outerSize;
669
743
  }
670
744
 
671
- /** Constructs a \a rows \c x \a cols empty matrix */
672
- inline SparseMatrix(Index rows, Index cols)
673
- : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
674
- {
675
- check_template_parameters();
676
- resize(rows, cols);
677
- }
745
+ internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
746
+ m_innerNonZeros = 0;
678
747
 
679
- /** Constructs a sparse matrix from the sparse expression \a other */
680
- template<typename OtherDerived>
681
- inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
682
- : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
683
- {
684
- EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
685
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686
- check_template_parameters();
687
- const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
688
- if (needToTranspose)
689
- *this = other.derived();
690
- else
691
- {
692
- #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
693
- EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
694
- #endif
695
- internal::call_assignment_no_alias(*this, other.derived());
696
- }
697
- }
698
-
699
- /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
700
- template<typename OtherDerived, unsigned int UpLo>
701
- inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
702
- : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
703
- {
704
- check_template_parameters();
705
- Base::operator=(other);
706
- }
748
+ using std::fill_n;
749
+ fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
750
+ }
707
751
 
708
- /** Copy constructor (it performs a deep copy) */
709
- inline SparseMatrix(const SparseMatrix& other)
710
- : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
711
- {
712
- check_template_parameters();
713
- *this = other.derived();
714
- }
752
+ /** \internal
753
+ * Resize the nonzero vector to \a size */
754
+ void resizeNonZeros(Index size) { m_data.resize(size); }
715
755
 
716
- /** \brief Copy constructor with in-place evaluation */
717
- template<typename OtherDerived>
718
- SparseMatrix(const ReturnByValue<OtherDerived>& other)
719
- : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
720
- {
721
- check_template_parameters();
722
- initAssignment(other);
723
- other.evalTo(*this);
724
- }
725
-
726
- /** \brief Copy constructor with in-place evaluation */
727
- template<typename OtherDerived>
728
- explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
729
- : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
730
- {
731
- check_template_parameters();
756
+ /** \returns a const expression of the diagonal coefficients. */
757
+ const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
758
+
759
+ /** \returns a read-write expression of the diagonal coefficients.
760
+ * \warning If the diagonal entries are written, then all diagonal
761
+ * entries \b must already exist, otherwise an assertion will be raised.
762
+ */
763
+ DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
764
+
765
+ /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
766
+ inline SparseMatrix() : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { resize(0, 0); }
767
+
768
+ /** Constructs a \a rows \c x \a cols empty matrix */
769
+ inline SparseMatrix(Index rows, Index cols) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
770
+ resize(rows, cols);
771
+ }
772
+
773
+ /** Constructs a sparse matrix from the sparse expression \a other */
774
+ template <typename OtherDerived>
775
+ inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
776
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
777
+ EIGEN_STATIC_ASSERT(
778
+ (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
779
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
780
+ const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
781
+ if (needToTranspose)
732
782
  *this = other.derived();
783
+ else {
784
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
785
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
786
+ #endif
787
+ internal::call_assignment_no_alias(*this, other.derived());
733
788
  }
789
+ }
734
790
 
735
- /** Swaps the content of two sparse matrices of the same type.
736
- * This is a fast operation that simply swaps the underlying pointers and parameters. */
737
- inline void swap(SparseMatrix& other)
738
- {
739
- //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
740
- std::swap(m_outerIndex, other.m_outerIndex);
741
- std::swap(m_innerSize, other.m_innerSize);
742
- std::swap(m_outerSize, other.m_outerSize);
743
- std::swap(m_innerNonZeros, other.m_innerNonZeros);
744
- m_data.swap(other.m_data);
745
- }
791
+ /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
792
+ template <typename OtherDerived, unsigned int UpLo>
793
+ inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
794
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
795
+ Base::operator=(other);
796
+ }
746
797
 
747
- /** Sets *this to the identity matrix.
748
- * This function also turns the matrix into compressed mode, and drop any reserved memory. */
749
- inline void setIdentity()
750
- {
751
- eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
752
- this->m_data.resize(rows());
753
- Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
754
- Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
755
- Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
756
- std::free(m_innerNonZeros);
757
- m_innerNonZeros = 0;
758
- }
759
- inline SparseMatrix& operator=(const SparseMatrix& other)
760
- {
761
- if (other.isRValue())
762
- {
763
- swap(other.const_cast_derived());
764
- }
765
- else if(this!=&other)
766
- {
767
- #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
768
- EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
769
- #endif
770
- initAssignment(other);
771
- if(other.isCompressed())
772
- {
773
- internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
774
- m_data = other.m_data;
775
- }
776
- else
777
- {
778
- Base::operator=(other);
779
- }
798
+ /** Move constructor */
799
+ inline SparseMatrix(SparseMatrix&& other) : SparseMatrix() { this->swap(other); }
800
+
801
+ template <typename OtherDerived>
802
+ inline SparseMatrix(SparseCompressedBase<OtherDerived>&& other) : SparseMatrix() {
803
+ *this = other.derived().markAsRValue();
804
+ }
805
+
806
+ /** Copy constructor (it performs a deep copy) */
807
+ inline SparseMatrix(const SparseMatrix& other)
808
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
809
+ *this = other.derived();
810
+ }
811
+
812
+ /** \brief Copy constructor with in-place evaluation */
813
+ template <typename OtherDerived>
814
+ SparseMatrix(const ReturnByValue<OtherDerived>& other)
815
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
816
+ initAssignment(other);
817
+ other.evalTo(*this);
818
+ }
819
+
820
+ /** \brief Copy constructor with in-place evaluation */
821
+ template <typename OtherDerived>
822
+ explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
823
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) {
824
+ *this = other.derived();
825
+ }
826
+
827
+ /** Swaps the content of two sparse matrices of the same type.
828
+ * This is a fast operation that simply swaps the underlying pointers and parameters. */
829
+ inline void swap(SparseMatrix& other) {
830
+ // EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
831
+ std::swap(m_outerIndex, other.m_outerIndex);
832
+ std::swap(m_innerSize, other.m_innerSize);
833
+ std::swap(m_outerSize, other.m_outerSize);
834
+ std::swap(m_innerNonZeros, other.m_innerNonZeros);
835
+ m_data.swap(other.m_data);
836
+ }
837
+ /** Free-function swap. */
838
+ friend EIGEN_DEVICE_FUNC void swap(SparseMatrix& a, SparseMatrix& b) { a.swap(b); }
839
+
840
+ /** Sets *this to the identity matrix.
841
+ * This function also turns the matrix into compressed mode, and drop any reserved memory. */
842
+ inline void setIdentity() {
843
+ eigen_assert(m_outerSize == m_innerSize && "ONLY FOR SQUARED MATRICES");
844
+ internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
845
+ m_innerNonZeros = 0;
846
+ m_data.resize(m_outerSize);
847
+ // is it necessary to squeeze?
848
+ m_data.squeeze();
849
+ std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
850
+ std::iota(innerIndexPtr(), innerIndexPtr() + m_outerSize, StorageIndex(0));
851
+ using std::fill_n;
852
+ fill_n(valuePtr(), m_outerSize, Scalar(1));
853
+ }
854
+
855
+ inline SparseMatrix& operator=(const SparseMatrix& other) {
856
+ if (other.isRValue()) {
857
+ swap(other.const_cast_derived());
858
+ } else if (this != &other) {
859
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
860
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
861
+ #endif
862
+ initAssignment(other);
863
+ if (other.isCompressed()) {
864
+ internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
865
+ m_data = other.m_data;
866
+ } else {
867
+ Base::operator=(other);
780
868
  }
781
- return *this;
782
869
  }
870
+ return *this;
871
+ }
872
+
873
+ inline SparseMatrix& operator=(SparseMatrix&& other) {
874
+ this->swap(other);
875
+ return *this;
876
+ }
783
877
 
784
- #ifndef EIGEN_PARSED_BY_DOXYGEN
785
- template<typename OtherDerived>
786
- inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
787
- { return Base::operator=(other.derived()); }
878
+ template <typename OtherDerived>
879
+ inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other) {
880
+ return Base::operator=(other.derived());
881
+ }
788
882
 
789
- template<typename Lhs, typename Rhs>
790
- inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
791
- #endif // EIGEN_PARSED_BY_DOXYGEN
883
+ template <typename Lhs, typename Rhs>
884
+ inline SparseMatrix& operator=(const Product<Lhs, Rhs, AliasFreeProduct>& other);
792
885
 
793
- template<typename OtherDerived>
794
- EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
886
+ template <typename OtherDerived>
887
+ EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
795
888
 
796
- friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
797
- {
798
- EIGEN_DBG_SPARSE(
799
- s << "Nonzero entries:\n";
800
- if(m.isCompressed())
801
- {
802
- for (Index i=0; i<m.nonZeros(); ++i)
803
- s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
804
- }
805
- else
806
- {
807
- for (Index i=0; i<m.outerSize(); ++i)
808
- {
889
+ template <typename OtherDerived>
890
+ inline SparseMatrix& operator=(SparseCompressedBase<OtherDerived>&& other) {
891
+ *this = other.derived().markAsRValue();
892
+ return *this;
893
+ }
894
+
895
+ #ifndef EIGEN_NO_IO
896
+ friend std::ostream& operator<<(std::ostream& s, const SparseMatrix& m) {
897
+ EIGEN_DBG_SPARSE(
898
+ s << "Nonzero entries:\n"; if (m.isCompressed()) {
899
+ for (Index i = 0; i < m.nonZeros(); ++i) s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
900
+ } else {
901
+ for (Index i = 0; i < m.outerSize(); ++i) {
809
902
  Index p = m.m_outerIndex[i];
810
- Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811
- Index k=p;
812
- for (; k<pe; ++k) {
903
+ Index pe = m.m_outerIndex[i] + m.m_innerNonZeros[i];
904
+ Index k = p;
905
+ for (; k < pe; ++k) {
813
906
  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
814
907
  }
815
- for (; k<m.m_outerIndex[i+1]; ++k) {
908
+ for (; k < m.m_outerIndex[i + 1]; ++k) {
816
909
  s << "(_,_) ";
817
910
  }
818
911
  }
819
- }
820
- s << std::endl;
821
- s << std::endl;
822
- s << "Outer pointers:\n";
823
- for (Index i=0; i<m.outerSize(); ++i) {
824
- s << m.m_outerIndex[i] << " ";
825
- }
826
- s << " $" << std::endl;
827
- if(!m.isCompressed())
828
- {
912
+ } s << std::endl;
913
+ s << std::endl; s << "Outer pointers:\n";
914
+ for (Index i = 0; i < m.outerSize(); ++i) { s << m.m_outerIndex[i] << " "; } s << " $" << std::endl;
915
+ if (!m.isCompressed()) {
829
916
  s << "Inner non zeros:\n";
830
- for (Index i=0; i<m.outerSize(); ++i) {
917
+ for (Index i = 0; i < m.outerSize(); ++i) {
831
918
  s << m.m_innerNonZeros[i] << " ";
832
919
  }
833
920
  s << " $" << std::endl;
834
- }
835
- s << std::endl;
836
- );
837
- s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
838
- return s;
839
- }
921
+ } s
922
+ << std::endl;);
923
+ s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
924
+ return s;
925
+ }
926
+ #endif
840
927
 
841
- /** Destructor */
842
- inline ~SparseMatrix()
843
- {
844
- std::free(m_outerIndex);
845
- std::free(m_innerNonZeros);
846
- }
928
+ /** Destructor */
929
+ inline ~SparseMatrix() {
930
+ internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
931
+ internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
932
+ }
847
933
 
848
- /** Overloaded for performance */
849
- Scalar sum() const;
850
-
851
- # ifdef EIGEN_SPARSEMATRIX_PLUGIN
852
- # include EIGEN_SPARSEMATRIX_PLUGIN
853
- # endif
934
+ /** Overloaded for performance */
935
+ Scalar sum() const;
854
936
 
855
- protected:
937
+ #ifdef EIGEN_SPARSEMATRIX_PLUGIN
938
+ #include EIGEN_SPARSEMATRIX_PLUGIN
939
+ #endif
856
940
 
857
- template<typename Other>
858
- void initAssignment(const Other& other)
859
- {
860
- resize(other.rows(), other.cols());
861
- if(m_innerNonZeros)
862
- {
863
- std::free(m_innerNonZeros);
864
- m_innerNonZeros = 0;
865
- }
866
- }
941
+ protected:
942
+ template <typename Other>
943
+ void initAssignment(const Other& other) {
944
+ resize(other.rows(), other.cols());
945
+ internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
946
+ m_innerNonZeros = 0;
947
+ }
867
948
 
868
- /** \internal
869
- * \sa insert(Index,Index) */
870
- EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
949
+ /** \internal
950
+ * \sa insert(Index,Index) */
951
+ EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
871
952
 
872
- /** \internal
873
- * A vector object that is equal to 0 everywhere but v at the position i */
874
- class SingletonVector
875
- {
876
- StorageIndex m_index;
877
- StorageIndex m_value;
878
- public:
879
- typedef StorageIndex value_type;
880
- SingletonVector(Index i, Index v)
881
- : m_index(convert_index(i)), m_value(convert_index(v))
882
- {}
883
-
884
- StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
885
- };
886
-
887
- /** \internal
888
- * \sa insert(Index,Index) */
889
- EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
890
-
891
- public:
892
- /** \internal
893
- * \sa insert(Index,Index) */
894
- EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
895
- {
896
- const Index outer = IsRowMajor ? row : col;
897
- const Index inner = IsRowMajor ? col : row;
953
+ /** \internal
954
+ * A vector object that is equal to 0 everywhere but v at the position i */
955
+ class SingletonVector {
956
+ StorageIndex m_index;
957
+ StorageIndex m_value;
898
958
 
899
- eigen_assert(!isCompressed());
900
- eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
959
+ public:
960
+ typedef StorageIndex value_type;
961
+ SingletonVector(Index i, Index v) : m_index(convert_index(i)), m_value(convert_index(v)) {}
901
962
 
902
- Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903
- m_data.index(p) = convert_index(inner);
904
- return (m_data.value(p) = Scalar(0));
963
+ StorageIndex operator[](Index i) const { return i == m_index ? m_value : 0; }
964
+ };
965
+
966
+ /** \internal
967
+ * \sa insert(Index,Index) */
968
+ EIGEN_DEPRECATED EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
969
+
970
+ public:
971
+ /** \internal
972
+ * \sa insert(Index,Index) */
973
+ EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) {
974
+ const Index outer = IsRowMajor ? row : col;
975
+ const Index inner = IsRowMajor ? col : row;
976
+
977
+ eigen_assert(!isCompressed());
978
+ eigen_assert(m_innerNonZeros[outer] <= (m_outerIndex[outer + 1] - m_outerIndex[outer]));
979
+
980
+ Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
981
+ m_data.index(p) = StorageIndex(inner);
982
+ m_data.value(p) = Scalar(0);
983
+ return m_data.value(p);
984
+ }
985
+
986
+ protected:
987
+ struct IndexPosPair {
988
+ IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
989
+ Index i;
990
+ Index p;
991
+ };
992
+
993
+ /** \internal assign \a diagXpr to the diagonal of \c *this
994
+ * There are different strategies:
995
+ * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector
996
+ * expression. 2 - otherwise, for each diagonal coeff, 2.a - if it already exists, then we update it, 2.b - if the
997
+ * correct position is at the end of the vector, and there is capacity, push to back 2.b - otherwise, the insertion
998
+ * requires a data move, record insertion locations and handle in a second pass 3 - at the end, if some entries failed
999
+ * to be updated in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new
1000
+ * elements.
1001
+ */
1002
+ template <typename DiagXpr, typename Func>
1003
+ void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc) {
1004
+ constexpr StorageIndex kEmptyIndexVal(-1);
1005
+ typedef typename ScalarVector::AlignedMapType ValueMap;
1006
+
1007
+ Index n = diagXpr.size();
1008
+
1009
+ const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar, Scalar>>::value;
1010
+ if (overwrite) {
1011
+ if ((m_outerSize != n) || (m_innerSize != n)) resize(n, n);
905
1012
  }
906
- protected:
907
- struct IndexPosPair {
908
- IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
909
- Index i;
910
- Index p;
911
- };
912
-
913
- /** \internal assign \a diagXpr to the diagonal of \c *this
914
- * There are different strategies:
915
- * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
916
- * 2 - otherwise, for each diagonal coeff,
917
- * 2.a - if it already exists, then we update it,
918
- * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
919
- * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
920
- * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
921
- *
922
- * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
923
- * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
924
- * then it *might* be better to disable case 2.b since they will have to be copied anyway.
925
- */
926
- template<typename DiagXpr, typename Func>
927
- void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
928
- {
929
- Index n = diagXpr.size();
930
1013
 
931
- const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
932
- if(overwrite)
933
- {
934
- if((this->rows()!=n) || (this->cols()!=n))
935
- this->resize(n, n);
1014
+ if (m_data.size() == 0 || overwrite) {
1015
+ internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1016
+ m_innerNonZeros = 0;
1017
+ resizeNonZeros(n);
1018
+ ValueMap valueMap(valuePtr(), n);
1019
+ std::iota(m_outerIndex, m_outerIndex + n + 1, StorageIndex(0));
1020
+ std::iota(innerIndexPtr(), innerIndexPtr() + n, StorageIndex(0));
1021
+ valueMap.setZero();
1022
+ internal::call_assignment_no_alias(valueMap, diagXpr, assignFunc);
1023
+ } else {
1024
+ internal::evaluator<DiagXpr> diaEval(diagXpr);
1025
+
1026
+ ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, n, 0);
1027
+ typename IndexVector::AlignedMapType insertionLocations(tmp, n);
1028
+ insertionLocations.setConstant(kEmptyIndexVal);
1029
+
1030
+ Index deferredInsertions = 0;
1031
+ Index shift = 0;
1032
+
1033
+ for (Index j = 0; j < n; j++) {
1034
+ Index begin = m_outerIndex[j];
1035
+ Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1036
+ Index capacity = m_outerIndex[j + 1] - end;
1037
+ Index dst = m_data.searchLowerIndex(begin, end, j);
1038
+ // the entry exists: update it now
1039
+ if (dst != end && m_data.index(dst) == StorageIndex(j))
1040
+ assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1041
+ // the entry belongs at the back of the vector: push to back
1042
+ else if (dst == end && capacity > 0)
1043
+ assignFunc.assignCoeff(insertBackUncompressed(j, j), diaEval.coeff(j));
1044
+ // the insertion requires a data move, record insertion location and handle in second pass
1045
+ else {
1046
+ insertionLocations.coeffRef(j) = StorageIndex(dst);
1047
+ deferredInsertions++;
1048
+ // if there is no capacity, all vectors to the right of this are shifted
1049
+ if (capacity == 0) shift++;
1050
+ }
936
1051
  }
937
1052
 
938
- if(m_data.size()==0 || overwrite)
939
- {
940
- typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941
- this->makeCompressed();
942
- this->resizeNonZeros(n);
943
- Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
944
- Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
945
- Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
946
- values.setZero();
947
- internal::call_assignment_no_alias(values, diagXpr, assignFunc);
948
- }
949
- else
950
- {
951
- bool isComp = isCompressed();
952
- internal::evaluator<DiagXpr> diaEval(diagXpr);
953
- std::vector<IndexPosPair> newEntries;
954
-
955
- // 1 - try in-place update and record insertion failures
956
- for(Index i = 0; i<n; ++i)
957
- {
958
- internal::LowerBoundIndex lb = this->lower_bound(i,i);
959
- Index p = lb.value;
960
- if(lb.found)
961
- {
962
- // the coeff already exists
963
- assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
964
- }
965
- else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966
- {
967
- // non compressed mode with local room for inserting one element
968
- m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969
- m_innerNonZeros[i]++;
970
- m_data.value(p) = Scalar(0);
971
- m_data.index(p) = StorageIndex(i);
972
- assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
973
- }
974
- else
975
- {
976
- // defer insertion
977
- newEntries.push_back(IndexPosPair(i,p));
1053
+ if (deferredInsertions > 0) {
1054
+ m_data.resize(m_data.size() + shift);
1055
+ Index copyEnd = isCompressed() ? m_outerIndex[m_outerSize]
1056
+ : m_outerIndex[m_outerSize - 1] + m_innerNonZeros[m_outerSize - 1];
1057
+ for (Index j = m_outerSize - 1; deferredInsertions > 0; j--) {
1058
+ Index begin = m_outerIndex[j];
1059
+ Index end = isCompressed() ? m_outerIndex[j + 1] : begin + m_innerNonZeros[j];
1060
+ Index capacity = m_outerIndex[j + 1] - end;
1061
+
1062
+ bool doInsertion = insertionLocations(j) >= 0;
1063
+ bool breakUpCopy = doInsertion && (capacity > 0);
1064
+ // break up copy for sorted insertion into inactive nonzeros
1065
+ // optionally, add another criterium, i.e. 'breakUpCopy || (capacity > threhsold)'
1066
+ // where `threshold >= 0` to skip inactive nonzeros in each vector
1067
+ // this reduces the total number of copied elements, but requires more moveChunk calls
1068
+ if (breakUpCopy) {
1069
+ Index copyBegin = m_outerIndex[j + 1];
1070
+ Index to = copyBegin + shift;
1071
+ Index chunkSize = copyEnd - copyBegin;
1072
+ m_data.moveChunk(copyBegin, to, chunkSize);
1073
+ copyEnd = end;
978
1074
  }
979
- }
980
- // 2 - insert deferred entries
981
- Index n_entries = Index(newEntries.size());
982
- if(n_entries>0)
983
- {
984
- Storage newData(m_data.size()+n_entries);
985
- Index prev_p = 0;
986
- Index prev_i = 0;
987
- for(Index k=0; k<n_entries;++k)
988
- {
989
- Index i = newEntries[k].i;
990
- Index p = newEntries[k].p;
991
- internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992
- internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993
- for(Index j=prev_i;j<i;++j)
994
- m_outerIndex[j+1] += k;
995
- if(!isComp)
996
- m_innerNonZeros[i]++;
997
- prev_p = p;
998
- prev_i = i;
999
- newData.value(p+k) = Scalar(0);
1000
- newData.index(p+k) = StorageIndex(i);
1001
- assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002
- }
1003
- {
1004
- internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005
- internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006
- for(Index j=prev_i+1;j<=m_outerSize;++j)
1007
- m_outerIndex[j] += n_entries;
1075
+
1076
+ m_outerIndex[j + 1] += shift;
1077
+
1078
+ if (doInsertion) {
1079
+ // if there is capacity, shift into the inactive nonzeros
1080
+ if (capacity > 0) shift++;
1081
+ Index copyBegin = insertionLocations(j);
1082
+ Index to = copyBegin + shift;
1083
+ Index chunkSize = copyEnd - copyBegin;
1084
+ m_data.moveChunk(copyBegin, to, chunkSize);
1085
+ Index dst = to - 1;
1086
+ m_data.index(dst) = StorageIndex(j);
1087
+ m_data.value(dst) = Scalar(0);
1088
+ assignFunc.assignCoeff(m_data.value(dst), diaEval.coeff(j));
1089
+ if (!isCompressed()) m_innerNonZeros[j]++;
1090
+ shift--;
1091
+ deferredInsertions--;
1092
+ copyEnd = copyBegin;
1008
1093
  }
1009
- m_data.swap(newData);
1010
1094
  }
1011
1095
  }
1096
+ eigen_assert((shift == 0) && (deferredInsertions == 0));
1012
1097
  }
1013
-
1014
- private:
1015
- static void check_template_parameters()
1016
- {
1017
- EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018
- EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019
1098
  }
1020
1099
 
1100
+ /* These functions are used to avoid a redundant binary search operation in functions such as coeffRef() and assume
1101
+ * `dst` is the appropriate sorted insertion point */
1102
+ EIGEN_STRONG_INLINE Scalar& insertAtByOuterInner(Index outer, Index inner, Index dst);
1103
+ Scalar& insertCompressedAtByOuterInner(Index outer, Index inner, Index dst);
1104
+ Scalar& insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst);
1105
+
1106
+ private:
1107
+ EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE)
1108
+ EIGEN_STATIC_ASSERT((Options & (ColMajor | RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
1109
+
1021
1110
  struct default_prunning_func {
1022
1111
  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023
- inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024
- {
1112
+ inline bool operator()(const Index&, const Index&, const Scalar& value) const {
1025
1113
  return !internal::isMuchSmallerThan(value, reference, epsilon);
1026
1114
  }
1027
1115
  Scalar reference;
@@ -1031,56 +1119,190 @@ private:
1031
1119
 
1032
1120
  namespace internal {
1033
1121
 
1034
- template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035
- void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036
- {
1037
- enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038
- typedef typename SparseMatrixType::Scalar Scalar;
1039
- typedef typename SparseMatrixType::StorageIndex StorageIndex;
1040
- SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1041
-
1042
- if(begin!=end)
1043
- {
1044
- // pass 1: count the nnz per inner-vector
1045
- typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046
- wi.setZero();
1047
- for(InputIterator it(begin); it!=end; ++it)
1048
- {
1049
- eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050
- wi(IsRowMajor ? it->col() : it->row())++;
1122
+ // Creates a compressed sparse matrix from a range of unsorted triplets
1123
+ // Requires temporary storage to handle duplicate entries
1124
+ template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1125
+ void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1126
+ DupFunctor dup_func) {
1127
+ constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1128
+ using StorageIndex = typename SparseMatrixType::StorageIndex;
1129
+ using IndexMap = typename VectorX<StorageIndex>::AlignedMapType;
1130
+ using TransposedSparseMatrix =
1131
+ SparseMatrix<typename SparseMatrixType::Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex>;
1132
+
1133
+ if (begin == end) {
1134
+ // Clear out existing data (if any).
1135
+ mat.setZero();
1136
+ return;
1137
+ }
1138
+
1139
+ // There are two strategies to consider for constructing a matrix from unordered triplets:
1140
+ // A) construct the 'mat' in its native storage order and sort in-place (less memory); or,
1141
+ // B) construct the transposed matrix and use an implicit sort upon assignment to `mat` (less time).
1142
+ // This routine uses B) for faster execution time.
1143
+ TransposedSparseMatrix trmat(mat.rows(), mat.cols());
1144
+
1145
+ // scan triplets to determine allocation size before constructing matrix
1146
+ Index nonZeros = 0;
1147
+ for (InputIterator it(begin); it != end; ++it) {
1148
+ eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1149
+ StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1150
+ if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1151
+ trmat.outerIndexPtr()[j + 1]++;
1152
+ nonZeros++;
1153
+ }
1154
+
1155
+ std::partial_sum(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize() + 1, trmat.outerIndexPtr());
1156
+ eigen_assert(nonZeros == trmat.outerIndexPtr()[trmat.outerSize()]);
1157
+ trmat.resizeNonZeros(nonZeros);
1158
+
1159
+ // construct temporary array to track insertions (outersize) and collapse duplicates (innersize)
1160
+ ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, numext::maxi(mat.innerSize(), mat.outerSize()), 0);
1161
+ smart_copy(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize(), tmp);
1162
+
1163
+ // push triplets to back of each vector
1164
+ for (InputIterator it(begin); it != end; ++it) {
1165
+ StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1166
+ StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1167
+ StorageIndex k = tmp[j];
1168
+ trmat.data().index(k) = i;
1169
+ trmat.data().value(k) = it->value();
1170
+ tmp[j]++;
1171
+ }
1172
+
1173
+ IndexMap wi(tmp, trmat.innerSize());
1174
+ trmat.collapseDuplicates(wi, dup_func);
1175
+ // implicit sorting
1176
+ mat = trmat;
1177
+ }
1178
+
1179
+ // Creates a compressed sparse matrix from a sorted range of triplets
1180
+ template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1181
+ void set_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1182
+ DupFunctor dup_func) {
1183
+ constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
1184
+ using StorageIndex = typename SparseMatrixType::StorageIndex;
1185
+
1186
+ if (begin == end) return;
1187
+
1188
+ constexpr StorageIndex kEmptyIndexValue(-1);
1189
+ // deallocate inner nonzeros if present and zero outerIndexPtr
1190
+ mat.resize(mat.rows(), mat.cols());
1191
+ // use outer indices to count non zero entries (excluding duplicate entries)
1192
+ StorageIndex previous_j = kEmptyIndexValue;
1193
+ StorageIndex previous_i = kEmptyIndexValue;
1194
+ // scan triplets to determine allocation size before constructing matrix
1195
+ Index nonZeros = 0;
1196
+ for (InputIterator it(begin); it != end; ++it) {
1197
+ eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
1198
+ StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1199
+ StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1200
+ eigen_assert(j > previous_j || (j == previous_j && i >= previous_i));
1201
+ // identify duplicates by examining previous location
1202
+ bool duplicate = (previous_j == j) && (previous_i == i);
1203
+ if (!duplicate) {
1204
+ if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
1205
+ nonZeros++;
1206
+ mat.outerIndexPtr()[j + 1]++;
1207
+ previous_j = j;
1208
+ previous_i = i;
1051
1209
  }
1210
+ }
1052
1211
 
1053
- // pass 2: insert all the elements into trMat
1054
- trMat.reserve(wi);
1055
- for(InputIterator it(begin); it!=end; ++it)
1056
- trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1212
+ // finalize outer indices and allocate memory
1213
+ std::partial_sum(mat.outerIndexPtr(), mat.outerIndexPtr() + mat.outerSize() + 1, mat.outerIndexPtr());
1214
+ eigen_assert(nonZeros == mat.outerIndexPtr()[mat.outerSize()]);
1215
+ mat.resizeNonZeros(nonZeros);
1216
+
1217
+ previous_i = kEmptyIndexValue;
1218
+ previous_j = kEmptyIndexValue;
1219
+ Index back = 0;
1220
+ for (InputIterator it(begin); it != end; ++it) {
1221
+ StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
1222
+ StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
1223
+ bool duplicate = (previous_j == j) && (previous_i == i);
1224
+ if (duplicate) {
1225
+ mat.data().value(back - 1) = dup_func(mat.data().value(back - 1), it->value());
1226
+ } else {
1227
+ // push triplets to back
1228
+ mat.data().index(back) = i;
1229
+ mat.data().value(back) = it->value();
1230
+ previous_j = j;
1231
+ previous_i = i;
1232
+ back++;
1233
+ }
1234
+ }
1235
+ eigen_assert(back == nonZeros);
1236
+ // matrix is finalized
1237
+ }
1057
1238
 
1058
- // pass 3:
1059
- trMat.collapseDuplicates(dup_func);
1239
+ // thin wrapper around a generic binary functor to use the sparse disjunction evaluator instead of the default
1240
+ // "arithmetic" evaluator
1241
+ template <typename DupFunctor, typename LhsScalar, typename RhsScalar = LhsScalar>
1242
+ struct scalar_disjunction_op {
1243
+ using result_type = typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type;
1244
+ scalar_disjunction_op(const DupFunctor& op) : m_functor(op) {}
1245
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator()(const LhsScalar& a, const RhsScalar& b) const {
1246
+ return m_functor(a, b);
1060
1247
  }
1248
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const DupFunctor& functor() const { return m_functor; }
1249
+ const DupFunctor& m_functor;
1250
+ };
1251
+
1252
+ template <typename DupFunctor, typename LhsScalar, typename RhsScalar>
1253
+ struct functor_traits<scalar_disjunction_op<DupFunctor, LhsScalar, RhsScalar>> : public functor_traits<DupFunctor> {};
1061
1254
 
1062
- // pass 4: transposed copy -> implicit sorting
1063
- mat = trMat;
1255
+ // Creates a compressed sparse matrix from its existing entries and those from an unsorted range of triplets
1256
+ template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1257
+ void insert_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1258
+ DupFunctor dup_func) {
1259
+ using Scalar = typename SparseMatrixType::Scalar;
1260
+ using SrcXprType =
1261
+ CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1262
+
1263
+ // set_from_triplets is necessary to sort the inner indices and remove the duplicate entries
1264
+ SparseMatrixType trips(mat.rows(), mat.cols());
1265
+ set_from_triplets(begin, end, trips, dup_func);
1266
+
1267
+ SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1268
+ // the sparse assignment procedure creates a temporary matrix and swaps the final result
1269
+ assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1064
1270
  }
1065
1271
 
1272
+ // Creates a compressed sparse matrix from its existing entries and those from an sorted range of triplets
1273
+ template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1274
+ void insert_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
1275
+ DupFunctor dup_func) {
1276
+ using Scalar = typename SparseMatrixType::Scalar;
1277
+ using SrcXprType =
1278
+ CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
1279
+
1280
+ // TODO: process triplets without making a copy
1281
+ SparseMatrixType trips(mat.rows(), mat.cols());
1282
+ set_from_triplets_sorted(begin, end, trips, dup_func);
1283
+
1284
+ SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
1285
+ // the sparse assignment procedure creates a temporary matrix and swaps the final result
1286
+ assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
1066
1287
  }
1067
1288
 
1289
+ } // namespace internal
1068
1290
 
1069
- /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
1291
+ /** Fill the matrix \c *this with the list of \em triplets defined in the half-open range from \a begin to \a end.
1070
1292
  *
1071
1293
  * A \em triplet is a tuple (i,j,value) defining a non-zero element.
1072
- * The input list of triplets does not have to be sorted, and can contains duplicated elements.
1294
+ * The input list of triplets does not have to be sorted, and may contain duplicated elements.
1073
1295
  * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
1074
1296
  * This is a \em O(n) operation, with \em n the number of triplet elements.
1075
- * The initial contents of \c *this is destroyed.
1297
+ * The initial contents of \c *this are destroyed.
1076
1298
  * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
1077
1299
  * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
1078
1300
  *
1079
1301
  * The \a InputIterators value_type must provide the following interface:
1080
1302
  * \code
1081
1303
  * Scalar value() const; // the value
1082
- * Scalar row() const; // the row index i
1083
- * Scalar col() const; // the column index j
1304
+ * IndexType row() const; // the row index i
1305
+ * IndexType col() const; // the column index j
1084
1306
  * \endcode
1085
1307
  * See for instance the Eigen::Triplet template class.
1086
1308
  *
@@ -1103,111 +1325,237 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
1103
1325
  * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
1104
1326
  * be explicitly stored into a std::vector for instance.
1105
1327
  */
1106
- template<typename Scalar, int _Options, typename _StorageIndex>
1107
- template<typename InputIterators>
1108
- void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109
- {
1110
- internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1328
+ template <typename Scalar, int Options_, typename StorageIndex_>
1329
+ template <typename InputIterators>
1330
+ void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromTriplets(const InputIterators& begin,
1331
+ const InputIterators& end) {
1332
+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1333
+ begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1111
1334
  }
1112
1335
 
1113
1336
  /** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
1337
+ * \code
1338
+ * value = dup_func(OldValue, NewValue)
1339
+ * \endcode
1340
+ * Here is a C++11 example keeping the latest entry only:
1341
+ * \code
1342
+ * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1343
+ * \endcode
1344
+ */
1345
+ template <typename Scalar, int Options_, typename StorageIndex_>
1346
+ template <typename InputIterators, typename DupFunctor>
1347
+ void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromTriplets(const InputIterators& begin,
1348
+ const InputIterators& end, DupFunctor dup_func) {
1349
+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1350
+ begin, end, *this, dup_func);
1351
+ }
1352
+
1353
+ /** The same as setFromTriplets but triplets are assumed to be pre-sorted. This is faster and requires less temporary
1354
+ * storage. Two triplets `a` and `b` are appropriately ordered if: \code ColMajor: ((a.col() != b.col()) ? (a.col() <
1355
+ * b.col()) : (a.row() < b.row()) RowMajor: ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col()) \endcode
1356
+ */
1357
+ template <typename Scalar, int Options_, typename StorageIndex_>
1358
+ template <typename InputIterators>
1359
+ void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromSortedTriplets(const InputIterators& begin,
1360
+ const InputIterators& end) {
1361
+ internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1362
+ begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1363
+ }
1364
+
1365
+ /** The same as setFromSortedTriplets but when duplicates are met the functor \a dup_func is applied:
1366
+ * \code
1367
+ * value = dup_func(OldValue, NewValue)
1368
+ * \endcode
1369
+ * Here is a C++11 example keeping the latest entry only:
1370
+ * \code
1371
+ * mat.setFromSortedTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1372
+ * \endcode
1373
+ */
1374
+ template <typename Scalar, int Options_, typename StorageIndex_>
1375
+ template <typename InputIterators, typename DupFunctor>
1376
+ void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromSortedTriplets(const InputIterators& begin,
1377
+ const InputIterators& end,
1378
+ DupFunctor dup_func) {
1379
+ internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1380
+ begin, end, *this, dup_func);
1381
+ }
1382
+
1383
+ /** Insert a batch of elements into the matrix \c *this with the list of \em triplets defined in the half-open range
1384
+ from \a begin to \a end.
1385
+ *
1386
+ * A \em triplet is a tuple (i,j,value) defining a non-zero element.
1387
+ * The input list of triplets does not have to be sorted, and may contain duplicated elements.
1388
+ * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
1389
+ * This is a \em O(n) operation, with \em n the number of triplet elements.
1390
+ * The initial contents of \c *this are preserved (except for the summation of duplicate elements).
1391
+ * The matrix \c *this must be properly sized beforehand. The sizes are not extracted from the triplet list.
1392
+ *
1393
+ * The \a InputIterators value_type must provide the following interface:
1114
1394
  * \code
1115
- * value = dup_func(OldValue, NewValue)
1116
- * \endcode
1117
- * Here is a C++11 example keeping the latest entry only:
1395
+ * Scalar value() const; // the value
1396
+ * IndexType row() const; // the row index i
1397
+ * IndexType col() const; // the column index j
1398
+ * \endcode
1399
+ * See for instance the Eigen::Triplet template class.
1400
+ *
1401
+ * Here is a typical usage example:
1118
1402
  * \code
1119
- * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1403
+ SparseMatrixType m(rows,cols); // m contains nonzero entries
1404
+ typedef Triplet<double> T;
1405
+ std::vector<T> tripletList;
1406
+ tripletList.reserve(estimation_of_entries);
1407
+ for(...)
1408
+ {
1409
+ // ...
1410
+ tripletList.push_back(T(i,j,v_ij));
1411
+ }
1412
+
1413
+ m.insertFromTriplets(tripletList.begin(), tripletList.end());
1414
+ // m is ready to go!
1120
1415
  * \endcode
1416
+ *
1417
+ * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
1418
+ * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
1419
+ * be explicitly stored into a std::vector for instance.
1121
1420
  */
1122
- template<typename Scalar, int _Options, typename _StorageIndex>
1123
- template<typename InputIterators,typename DupFunctor>
1124
- void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125
- {
1126
- internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1421
+ template <typename Scalar, int Options_, typename StorageIndex_>
1422
+ template <typename InputIterators>
1423
+ void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromTriplets(const InputIterators& begin,
1424
+ const InputIterators& end) {
1425
+ internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1426
+ begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1427
+ }
1428
+
1429
+ /** The same as insertFromTriplets but when duplicates are met the functor \a dup_func is applied:
1430
+ * \code
1431
+ * value = dup_func(OldValue, NewValue)
1432
+ * \endcode
1433
+ * Here is a C++11 example keeping the latest entry only:
1434
+ * \code
1435
+ * mat.insertFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1436
+ * \endcode
1437
+ */
1438
+ template <typename Scalar, int Options_, typename StorageIndex_>
1439
+ template <typename InputIterators, typename DupFunctor>
1440
+ void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromTriplets(const InputIterators& begin,
1441
+ const InputIterators& end, DupFunctor dup_func) {
1442
+ internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1443
+ begin, end, *this, dup_func);
1444
+ }
1445
+
1446
+ /** The same as insertFromTriplets but triplets are assumed to be pre-sorted. This is faster and requires less temporary
1447
+ * storage. Two triplets `a` and `b` are appropriately ordered if: \code ColMajor: ((a.col() != b.col()) ? (a.col() <
1448
+ * b.col()) : (a.row() < b.row()) RowMajor: ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col()) \endcode
1449
+ */
1450
+ template <typename Scalar, int Options_, typename StorageIndex_>
1451
+ template <typename InputIterators>
1452
+ void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromSortedTriplets(const InputIterators& begin,
1453
+ const InputIterators& end) {
1454
+ internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>>(
1455
+ begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
1456
+ }
1457
+
1458
+ /** The same as insertFromSortedTriplets but when duplicates are met the functor \a dup_func is applied:
1459
+ * \code
1460
+ * value = dup_func(OldValue, NewValue)
1461
+ * \endcode
1462
+ * Here is a C++11 example keeping the latest entry only:
1463
+ * \code
1464
+ * mat.insertFromSortedTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1465
+ * \endcode
1466
+ */
1467
+ template <typename Scalar, int Options_, typename StorageIndex_>
1468
+ template <typename InputIterators, typename DupFunctor>
1469
+ void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromSortedTriplets(const InputIterators& begin,
1470
+ const InputIterators& end,
1471
+ DupFunctor dup_func) {
1472
+ internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(
1473
+ begin, end, *this, dup_func);
1127
1474
  }
1128
1475
 
1129
1476
  /** \internal */
1130
- template<typename Scalar, int _Options, typename _StorageIndex>
1131
- template<typename DupFunctor>
1132
- void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
1133
- {
1134
- eigen_assert(!isCompressed());
1135
- // TODO, in practice we should be able to use m_innerNonZeros for that task
1136
- IndexVector wi(innerSize());
1137
- wi.fill(-1);
1477
+ template <typename Scalar_, int Options_, typename StorageIndex_>
1478
+ template <typename Derived, typename DupFunctor>
1479
+ void SparseMatrix<Scalar_, Options_, StorageIndex_>::collapseDuplicates(DenseBase<Derived>& wi, DupFunctor dup_func) {
1480
+ // removes duplicate entries and compresses the matrix
1481
+ // the excess allocated memory is not released
1482
+ // the inner indices do not need to be sorted, nor is the matrix returned in a sorted state
1483
+ eigen_assert(wi.size() == m_innerSize);
1484
+ constexpr StorageIndex kEmptyIndexValue(-1);
1485
+ wi.setConstant(kEmptyIndexValue);
1138
1486
  StorageIndex count = 0;
1487
+ const bool is_compressed = isCompressed();
1139
1488
  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140
- for(Index j=0; j<outerSize(); ++j)
1141
- {
1142
- StorageIndex start = count;
1143
- Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1144
- for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145
- {
1146
- Index i = m_data.index(k);
1147
- if(wi(i)>=start)
1148
- {
1149
- // we already meet this entry => accumulate it
1489
+ for (Index j = 0; j < m_outerSize; ++j) {
1490
+ const StorageIndex newBegin = count;
1491
+ const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
1492
+ for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
1493
+ StorageIndex i = m_data.index(k);
1494
+ if (wi(i) >= newBegin) {
1495
+ // entry at k is a duplicate
1496
+ // accumulate it into the primary entry located at wi(i)
1150
1497
  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151
- }
1152
- else
1153
- {
1498
+ } else {
1499
+ // k is the primary entry in j with inner index i
1500
+ // shift it to the left and record its location at wi(i)
1501
+ m_data.index(count) = i;
1154
1502
  m_data.value(count) = m_data.value(k);
1155
- m_data.index(count) = m_data.index(k);
1156
1503
  wi(i) = count;
1157
1504
  ++count;
1158
1505
  }
1159
1506
  }
1160
- m_outerIndex[j] = start;
1507
+ m_outerIndex[j] = newBegin;
1161
1508
  }
1162
1509
  m_outerIndex[m_outerSize] = count;
1510
+ m_data.resize(count);
1163
1511
 
1164
- // turn the matrix into compressed form
1165
- std::free(m_innerNonZeros);
1512
+ // turn the matrix into compressed form (if it is not already)
1513
+ internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
1166
1514
  m_innerNonZeros = 0;
1167
- m_data.resize(m_outerIndex[m_outerSize]);
1168
1515
  }
1169
1516
 
1170
- template<typename Scalar, int _Options, typename _StorageIndex>
1171
- template<typename OtherDerived>
1172
- EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1173
- {
1174
- EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1175
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1517
+ /** \internal */
1518
+ template <typename Scalar, int Options_, typename StorageIndex_>
1519
+ template <typename OtherDerived>
1520
+ EIGEN_DONT_INLINE SparseMatrix<Scalar, Options_, StorageIndex_>&
1521
+ SparseMatrix<Scalar, Options_, StorageIndex_>::operator=(const SparseMatrixBase<OtherDerived>& other) {
1522
+ EIGEN_STATIC_ASSERT(
1523
+ (internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1524
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1525
+
1526
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1527
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1528
+ #endif
1176
1529
 
1177
- #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1178
- EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179
- #endif
1180
-
1181
1530
  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182
- if (needToTranspose)
1183
- {
1184
- #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1185
- EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186
- #endif
1531
+ if (needToTranspose) {
1532
+ #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1533
+ EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1534
+ #endif
1187
1535
  // two passes algorithm:
1188
1536
  // 1 - compute the number of coeffs per dest inner vector
1189
1537
  // 2 - do the actual copy/eval
1190
1538
  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1191
- typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1192
- typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193
- typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1539
+ typedef
1540
+ typename internal::nested_eval<OtherDerived, 2, typename internal::plain_matrix_type<OtherDerived>::type>::type
1541
+ OtherCopy;
1542
+ typedef internal::remove_all_t<OtherCopy> OtherCopy_;
1543
+ typedef internal::evaluator<OtherCopy_> OtherCopyEval;
1194
1544
  OtherCopy otherCopy(other.derived());
1195
1545
  OtherCopyEval otherCopyEval(otherCopy);
1196
1546
 
1197
- SparseMatrix dest(other.rows(),other.cols());
1198
- Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1547
+ SparseMatrix dest(other.rows(), other.cols());
1548
+ Eigen::Map<IndexVector>(dest.m_outerIndex, dest.outerSize()).setZero();
1199
1549
 
1200
1550
  // pass 1
1201
1551
  // FIXME the above copy could be merged with that pass
1202
- for (Index j=0; j<otherCopy.outerSize(); ++j)
1203
- for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204
- ++dest.m_outerIndex[it.index()];
1552
+ for (Index j = 0; j < otherCopy.outerSize(); ++j)
1553
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) ++dest.m_outerIndex[it.index()];
1205
1554
 
1206
1555
  // prefix sum
1207
1556
  StorageIndex count = 0;
1208
1557
  IndexVector positions(dest.outerSize());
1209
- for (Index j=0; j<dest.outerSize(); ++j)
1210
- {
1558
+ for (Index j = 0; j < dest.outerSize(); ++j) {
1211
1559
  StorageIndex tmp = dest.m_outerIndex[j];
1212
1560
  dest.m_outerIndex[j] = count;
1213
1561
  positions[j] = count;
@@ -1217,10 +1565,8 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scal
1217
1565
  // alloc
1218
1566
  dest.m_data.resize(count);
1219
1567
  // pass 2
1220
- for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221
- {
1222
- for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223
- {
1568
+ for (StorageIndex j = 0; j < otherCopy.outerSize(); ++j) {
1569
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) {
1224
1570
  Index pos = positions[it.index()]++;
1225
1571
  dest.m_data.index(pos) = j;
1226
1572
  dest.m_data.value(pos) = it.value();
@@ -1228,11 +1574,8 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scal
1228
1574
  }
1229
1575
  this->swap(dest);
1230
1576
  return *this;
1231
- }
1232
- else
1233
- {
1234
- if(other.isRValue())
1235
- {
1577
+ } else {
1578
+ if (other.isRValue()) {
1236
1579
  initAssignment(other.derived());
1237
1580
  }
1238
1581
  // there is no special optimization
@@ -1240,279 +1583,294 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scal
1240
1583
  }
1241
1584
  }
1242
1585
 
1243
- template<typename _Scalar, int _Options, typename _StorageIndex>
1244
- typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
1245
- {
1246
- eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247
-
1248
- const Index outer = IsRowMajor ? row : col;
1249
- const Index inner = IsRowMajor ? col : row;
1250
-
1251
- if(isCompressed())
1252
- {
1253
- if(nonZeros()==0)
1254
- {
1255
- // reserve space if not already done
1256
- if(m_data.allocatedSize()==0)
1257
- m_data.reserve(2*m_innerSize);
1258
-
1259
- // turn the matrix into non-compressed mode
1260
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261
- if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262
-
1263
- memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264
-
1265
- // pack all inner-vectors to the end of the pre-allocated space
1266
- // and allocate the entire free-space to the first inner-vector
1267
- StorageIndex end = convert_index(m_data.allocatedSize());
1268
- for(Index j=1; j<=m_outerSize; ++j)
1269
- m_outerIndex[j] = end;
1270
- }
1271
- else
1272
- {
1273
- // turn the matrix into non-compressed mode
1274
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275
- if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276
- for(Index j=0; j<m_outerSize; ++j)
1277
- m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1586
+ template <typename Scalar_, int Options_, typename StorageIndex_>
1587
+ inline typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1588
+ SparseMatrix<Scalar_, Options_, StorageIndex_>::insert(Index row, Index col) {
1589
+ return insertByOuterInner(IsRowMajor ? row : col, IsRowMajor ? col : row);
1590
+ }
1591
+
1592
+ template <typename Scalar_, int Options_, typename StorageIndex_>
1593
+ EIGEN_STRONG_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1594
+ SparseMatrix<Scalar_, Options_, StorageIndex_>::insertAtByOuterInner(Index outer, Index inner, Index dst) {
1595
+ // random insertion into compressed matrix is very slow
1596
+ uncompress();
1597
+ return insertUncompressedAtByOuterInner(outer, inner, dst);
1598
+ }
1599
+
1600
+ template <typename Scalar_, int Options_, typename StorageIndex_>
1601
+ EIGEN_DEPRECATED EIGEN_DONT_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1602
+ SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressed(Index row, Index col) {
1603
+ eigen_assert(!isCompressed());
1604
+ Index outer = IsRowMajor ? row : col;
1605
+ Index inner = IsRowMajor ? col : row;
1606
+ Index start = m_outerIndex[outer];
1607
+ Index end = start + m_innerNonZeros[outer];
1608
+ Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1609
+ if (dst == end) {
1610
+ Index capacity = m_outerIndex[outer + 1] - end;
1611
+ if (capacity > 0) {
1612
+ // implies uncompressed: push to back of vector
1613
+ m_innerNonZeros[outer]++;
1614
+ m_data.index(end) = StorageIndex(inner);
1615
+ m_data.value(end) = Scalar(0);
1616
+ return m_data.value(end);
1278
1617
  }
1279
1618
  }
1280
-
1281
- // check whether we can do a fast "push back" insertion
1282
- Index data_end = m_data.allocatedSize();
1283
-
1284
- // First case: we are filling a new inner vector which is packed at the end.
1285
- // We assume that all remaining inner-vectors are also empty and packed to the end.
1286
- if(m_outerIndex[outer]==data_end)
1287
- {
1288
- eigen_internal_assert(m_innerNonZeros[outer]==0);
1289
-
1290
- // pack previous empty inner-vectors to end of the used-space
1291
- // and allocate the entire free-space to the current inner-vector.
1292
- StorageIndex p = convert_index(m_data.size());
1293
- Index j = outer;
1294
- while(j>=0 && m_innerNonZeros[j]==0)
1295
- m_outerIndex[j--] = p;
1296
-
1297
- // push back the new element
1298
- ++m_innerNonZeros[outer];
1299
- m_data.append(Scalar(0), inner);
1300
-
1301
- // check for reallocation
1302
- if(data_end != m_data.allocatedSize())
1303
- {
1304
- // m_data has been reallocated
1305
- // -> move remaining inner-vectors back to the end of the free-space
1306
- // so that the entire free-space is allocated to the current inner-vector.
1307
- eigen_internal_assert(data_end < m_data.allocatedSize());
1308
- StorageIndex new_end = convert_index(m_data.allocatedSize());
1309
- for(Index k=outer+1; k<=m_outerSize; ++k)
1310
- if(m_outerIndex[k]==data_end)
1311
- m_outerIndex[k] = new_end;
1312
- }
1313
- return m_data.value(p);
1619
+ eigen_assert((dst == end || m_data.index(dst) != inner) &&
1620
+ "you cannot insert an element that already exists, you must call coeffRef to this end");
1621
+ return insertUncompressedAtByOuterInner(outer, inner, dst);
1622
+ }
1623
+
1624
+ template <typename Scalar_, int Options_, typename StorageIndex_>
1625
+ EIGEN_DEPRECATED EIGEN_DONT_INLINE typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1626
+ SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressed(Index row, Index col) {
1627
+ eigen_assert(isCompressed());
1628
+ Index outer = IsRowMajor ? row : col;
1629
+ Index inner = IsRowMajor ? col : row;
1630
+ Index start = m_outerIndex[outer];
1631
+ Index end = m_outerIndex[outer + 1];
1632
+ Index dst = start == end ? end : m_data.searchLowerIndex(start, end, inner);
1633
+ eigen_assert((dst == end || m_data.index(dst) != inner) &&
1634
+ "you cannot insert an element that already exists, you must call coeffRef to this end");
1635
+ return insertCompressedAtByOuterInner(outer, inner, dst);
1636
+ }
1637
+
1638
+ template <typename Scalar_, int Options_, typename StorageIndex_>
1639
+ typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1640
+ SparseMatrix<Scalar_, Options_, StorageIndex_>::insertCompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1641
+ eigen_assert(isCompressed());
1642
+ // compressed insertion always requires expanding the buffer
1643
+ // first, check if there is adequate allocated memory
1644
+ if (m_data.allocatedSize() <= m_data.size()) {
1645
+ // if there is no capacity for a single insertion, double the capacity
1646
+ // increase capacity by a minimum of 32
1647
+ Index minReserve = 32;
1648
+ Index reserveSize = numext::maxi(minReserve, m_data.allocatedSize());
1649
+ m_data.reserve(reserveSize);
1314
1650
  }
1315
-
1316
- // Second case: the next inner-vector is packed to the end
1317
- // and the current inner-vector end match the used-space.
1318
- if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319
- {
1320
- eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321
-
1322
- // add space for the new element
1323
- ++m_innerNonZeros[outer];
1324
- m_data.resize(m_data.size()+1);
1325
-
1326
- // check for reallocation
1327
- if(data_end != m_data.allocatedSize())
1328
- {
1329
- // m_data has been reallocated
1330
- // -> move remaining inner-vectors back to the end of the free-space
1331
- // so that the entire free-space is allocated to the current inner-vector.
1332
- eigen_internal_assert(data_end < m_data.allocatedSize());
1333
- StorageIndex new_end = convert_index(m_data.allocatedSize());
1334
- for(Index k=outer+1; k<=m_outerSize; ++k)
1335
- if(m_outerIndex[k]==data_end)
1336
- m_outerIndex[k] = new_end;
1651
+ m_data.resize(m_data.size() + 1);
1652
+ Index chunkSize = m_outerIndex[m_outerSize] - dst;
1653
+ // shift the existing data to the right if necessary
1654
+ m_data.moveChunk(dst, dst + 1, chunkSize);
1655
+ // update nonzero counts
1656
+ // potentially O(outerSize) bottleneck!
1657
+ for (Index j = outer; j < m_outerSize; j++) m_outerIndex[j + 1]++;
1658
+ // initialize the coefficient
1659
+ m_data.index(dst) = StorageIndex(inner);
1660
+ m_data.value(dst) = Scalar(0);
1661
+ // return a reference to the coefficient
1662
+ return m_data.value(dst);
1663
+ }
1664
+
1665
+ template <typename Scalar_, int Options_, typename StorageIndex_>
1666
+ typename SparseMatrix<Scalar_, Options_, StorageIndex_>::Scalar&
1667
+ SparseMatrix<Scalar_, Options_, StorageIndex_>::insertUncompressedAtByOuterInner(Index outer, Index inner, Index dst) {
1668
+ eigen_assert(!isCompressed());
1669
+ // find a vector with capacity, starting at `outer` and searching to the left and right
1670
+ for (Index leftTarget = outer - 1, rightTarget = outer; (leftTarget >= 0) || (rightTarget < m_outerSize);) {
1671
+ if (rightTarget < m_outerSize) {
1672
+ Index start = m_outerIndex[rightTarget];
1673
+ Index end = start + m_innerNonZeros[rightTarget];
1674
+ Index nextStart = m_outerIndex[rightTarget + 1];
1675
+ Index capacity = nextStart - end;
1676
+ if (capacity > 0) {
1677
+ // move [dst, end) to dst+1 and insert at dst
1678
+ Index chunkSize = end - dst;
1679
+ if (chunkSize > 0) m_data.moveChunk(dst, dst + 1, chunkSize);
1680
+ m_innerNonZeros[outer]++;
1681
+ for (Index j = outer; j < rightTarget; j++) m_outerIndex[j + 1]++;
1682
+ m_data.index(dst) = StorageIndex(inner);
1683
+ m_data.value(dst) = Scalar(0);
1684
+ return m_data.value(dst);
1685
+ }
1686
+ rightTarget++;
1337
1687
  }
1338
-
1339
- // and insert it at the right position (sorted insertion)
1340
- Index startId = m_outerIndex[outer];
1341
- Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342
- while ( (p > startId) && (m_data.index(p-1) > inner) )
1343
- {
1344
- m_data.index(p) = m_data.index(p-1);
1345
- m_data.value(p) = m_data.value(p-1);
1346
- --p;
1688
+ if (leftTarget >= 0) {
1689
+ Index start = m_outerIndex[leftTarget];
1690
+ Index end = start + m_innerNonZeros[leftTarget];
1691
+ Index nextStart = m_outerIndex[leftTarget + 1];
1692
+ Index capacity = nextStart - end;
1693
+ if (capacity > 0) {
1694
+ // tricky: dst is a lower bound, so we must insert at dst-1 when shifting left
1695
+ // move [nextStart, dst) to nextStart-1 and insert at dst-1
1696
+ Index chunkSize = dst - nextStart;
1697
+ if (chunkSize > 0) m_data.moveChunk(nextStart, nextStart - 1, chunkSize);
1698
+ m_innerNonZeros[outer]++;
1699
+ for (Index j = leftTarget; j < outer; j++) m_outerIndex[j + 1]--;
1700
+ m_data.index(dst - 1) = StorageIndex(inner);
1701
+ m_data.value(dst - 1) = Scalar(0);
1702
+ return m_data.value(dst - 1);
1703
+ }
1704
+ leftTarget--;
1347
1705
  }
1348
-
1349
- m_data.index(p) = convert_index(inner);
1350
- return (m_data.value(p) = Scalar(0));
1351
1706
  }
1352
-
1353
- if(m_data.size() != m_data.allocatedSize())
1354
- {
1355
- // make sure the matrix is compatible to random un-compressed insertion:
1356
- m_data.resize(m_data.allocatedSize());
1357
- this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1707
+
1708
+ // no room for interior insertion
1709
+ // nonZeros() == m_data.size()
1710
+ // record offset as outerIndxPtr will change
1711
+ Index dst_offset = dst - m_outerIndex[outer];
1712
+ // allocate space for random insertion
1713
+ if (m_data.allocatedSize() == 0) {
1714
+ // fast method to allocate space for one element per vector in empty matrix
1715
+ m_data.resize(m_outerSize);
1716
+ std::iota(m_outerIndex, m_outerIndex + m_outerSize + 1, StorageIndex(0));
1717
+ } else {
1718
+ // check for integer overflow: if maxReserveSize == 0, insertion is not possible
1719
+ Index maxReserveSize = static_cast<Index>(NumTraits<StorageIndex>::highest()) - m_data.allocatedSize();
1720
+ eigen_assert(maxReserveSize > 0);
1721
+ if (m_outerSize <= maxReserveSize) {
1722
+ // allocate space for one additional element per vector
1723
+ reserveInnerVectors(IndexVector::Constant(m_outerSize, 1));
1724
+ } else {
1725
+ // handle the edge case where StorageIndex is insufficient to reserve outerSize additional elements
1726
+ // allocate space for one additional element in the interval [outer,maxReserveSize)
1727
+ typedef internal::sparse_reserve_op<StorageIndex> ReserveSizesOp;
1728
+ typedef CwiseNullaryOp<ReserveSizesOp, IndexVector> ReserveSizesXpr;
1729
+ ReserveSizesXpr reserveSizesXpr(m_outerSize, 1, ReserveSizesOp(outer, m_outerSize, maxReserveSize));
1730
+ reserveInnerVectors(reserveSizesXpr);
1731
+ }
1358
1732
  }
1359
-
1360
- return insertUncompressed(row,col);
1733
+ // insert element at `dst` with new outer indices
1734
+ Index start = m_outerIndex[outer];
1735
+ Index end = start + m_innerNonZeros[outer];
1736
+ Index new_dst = start + dst_offset;
1737
+ Index chunkSize = end - new_dst;
1738
+ if (chunkSize > 0) m_data.moveChunk(new_dst, new_dst + 1, chunkSize);
1739
+ m_innerNonZeros[outer]++;
1740
+ m_data.index(new_dst) = StorageIndex(inner);
1741
+ m_data.value(new_dst) = Scalar(0);
1742
+ return m_data.value(new_dst);
1361
1743
  }
1362
-
1363
- template<typename _Scalar, int _Options, typename _StorageIndex>
1364
- EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
1365
- {
1366
- eigen_assert(!isCompressed());
1367
1744
 
1368
- const Index outer = IsRowMajor ? row : col;
1369
- const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1745
+ namespace internal {
1746
+
1747
+ template <typename Scalar_, int Options_, typename StorageIndex_>
1748
+ struct evaluator<SparseMatrix<Scalar_, Options_, StorageIndex_>>
1749
+ : evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> {
1750
+ typedef evaluator<SparseCompressedBase<SparseMatrix<Scalar_, Options_, StorageIndex_>>> Base;
1751
+ typedef SparseMatrix<Scalar_, Options_, StorageIndex_> SparseMatrixType;
1752
+ evaluator() : Base() {}
1753
+ explicit evaluator(const SparseMatrixType& mat) : Base(mat) {}
1754
+ };
1370
1755
 
1371
- Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372
- StorageIndex innerNNZ = m_innerNonZeros[outer];
1373
- if(innerNNZ>=room)
1374
- {
1375
- // this inner vector is full, we need to reallocate the whole buffer :(
1376
- reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377
- }
1756
+ } // namespace internal
1757
+
1758
+ // Specialization for SparseMatrix.
1759
+ // Serializes [rows, cols, isCompressed, outerSize, innerBufferSize,
1760
+ // innerNonZeros, outerIndices, innerIndices, values].
1761
+ template <typename Scalar, int Options, typename StorageIndex>
1762
+ class Serializer<SparseMatrix<Scalar, Options, StorageIndex>, void> {
1763
+ public:
1764
+ typedef SparseMatrix<Scalar, Options, StorageIndex> SparseMat;
1765
+
1766
+ struct Header {
1767
+ typename SparseMat::Index rows;
1768
+ typename SparseMat::Index cols;
1769
+ bool compressed;
1770
+ Index outer_size;
1771
+ Index inner_buffer_size;
1772
+ };
1378
1773
 
1379
- Index startId = m_outerIndex[outer];
1380
- Index p = startId + m_innerNonZeros[outer];
1381
- while ( (p > startId) && (m_data.index(p-1) > inner) )
1382
- {
1383
- m_data.index(p) = m_data.index(p-1);
1384
- m_data.value(p) = m_data.value(p-1);
1385
- --p;
1774
+ EIGEN_DEVICE_FUNC size_t size(const SparseMat& value) const {
1775
+ // innerNonZeros.
1776
+ std::size_t num_storage_indices = value.isCompressed() ? 0 : value.outerSize();
1777
+ // Outer indices.
1778
+ num_storage_indices += value.outerSize() + 1;
1779
+ // Inner indices.
1780
+ const StorageIndex inner_buffer_size = value.outerIndexPtr()[value.outerSize()];
1781
+ num_storage_indices += inner_buffer_size;
1782
+ // Values.
1783
+ std::size_t num_values = inner_buffer_size;
1784
+ return sizeof(Header) + sizeof(Scalar) * num_values + sizeof(StorageIndex) * num_storage_indices;
1386
1785
  }
1387
- eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388
1786
 
1389
- m_innerNonZeros[outer]++;
1787
+ EIGEN_DEVICE_FUNC uint8_t* serialize(uint8_t* dest, uint8_t* end, const SparseMat& value) {
1788
+ if (EIGEN_PREDICT_FALSE(dest == nullptr)) return nullptr;
1789
+ if (EIGEN_PREDICT_FALSE(dest + size(value) > end)) return nullptr;
1790
+
1791
+ const size_t header_bytes = sizeof(Header);
1792
+ Header header = {value.rows(), value.cols(), value.isCompressed(), value.outerSize(),
1793
+ value.outerIndexPtr()[value.outerSize()]};
1794
+ EIGEN_USING_STD(memcpy)
1795
+ memcpy(dest, &header, header_bytes);
1796
+ dest += header_bytes;
1797
+
1798
+ // innerNonZeros.
1799
+ if (!header.compressed) {
1800
+ std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1801
+ memcpy(dest, value.innerNonZeroPtr(), data_bytes);
1802
+ dest += data_bytes;
1803
+ }
1390
1804
 
1391
- m_data.index(p) = inner;
1392
- return (m_data.value(p) = Scalar(0));
1393
- }
1805
+ // Outer indices.
1806
+ std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1807
+ memcpy(dest, value.outerIndexPtr(), data_bytes);
1808
+ dest += data_bytes;
1394
1809
 
1395
- template<typename _Scalar, int _Options, typename _StorageIndex>
1396
- EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1397
- {
1398
- eigen_assert(isCompressed());
1810
+ // Inner indices.
1811
+ data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1812
+ memcpy(dest, value.innerIndexPtr(), data_bytes);
1813
+ dest += data_bytes;
1399
1814
 
1400
- const Index outer = IsRowMajor ? row : col;
1401
- const Index inner = IsRowMajor ? col : row;
1815
+ // Values.
1816
+ data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1817
+ memcpy(dest, value.valuePtr(), data_bytes);
1818
+ dest += data_bytes;
1402
1819
 
1403
- Index previousOuter = outer;
1404
- if (m_outerIndex[outer+1]==0)
1405
- {
1406
- // we start a new inner vector
1407
- while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408
- {
1409
- m_outerIndex[previousOuter] = convert_index(m_data.size());
1410
- --previousOuter;
1411
- }
1412
- m_outerIndex[outer+1] = m_outerIndex[outer];
1820
+ return dest;
1413
1821
  }
1414
1822
 
1415
- // here we have to handle the tricky case where the outerIndex array
1416
- // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417
- // the 2nd inner vector...
1418
- bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419
- && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420
-
1421
- std::size_t startId = m_outerIndex[outer];
1422
- // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423
- std::size_t p = m_outerIndex[outer+1];
1424
- ++m_outerIndex[outer+1];
1425
-
1426
- double reallocRatio = 1;
1427
- if (m_data.allocatedSize()<=m_data.size())
1428
- {
1429
- // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430
- if (m_data.size()==0)
1431
- {
1432
- m_data.reserve(32);
1433
- }
1434
- else
1435
- {
1436
- // we need to reallocate the data, to reduce multiple reallocations
1437
- // we use a smart resize algorithm based on the current filling ratio
1438
- // in addition, we use double to avoid integers overflows
1439
- double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440
- reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441
- // furthermore we bound the realloc ratio to:
1442
- // 1) reduce multiple minor realloc when the matrix is almost filled
1443
- // 2) avoid to allocate too much memory when the matrix is almost empty
1444
- reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1823
+ EIGEN_DEVICE_FUNC const uint8_t* deserialize(const uint8_t* src, const uint8_t* end, SparseMat& value) const {
1824
+ if (EIGEN_PREDICT_FALSE(src == nullptr)) return nullptr;
1825
+ if (EIGEN_PREDICT_FALSE(src + sizeof(Header) > end)) return nullptr;
1826
+
1827
+ const size_t header_bytes = sizeof(Header);
1828
+ Header header;
1829
+ EIGEN_USING_STD(memcpy)
1830
+ memcpy(&header, src, header_bytes);
1831
+ src += header_bytes;
1832
+
1833
+ value.setZero();
1834
+ value.resize(header.rows, header.cols);
1835
+ if (header.compressed) {
1836
+ value.makeCompressed();
1837
+ } else {
1838
+ value.uncompress();
1445
1839
  }
1446
- }
1447
- m_data.resize(m_data.size()+1,reallocRatio);
1448
1840
 
1449
- if (!isLastVec)
1450
- {
1451
- if (previousOuter==-1)
1452
- {
1453
- // oops wrong guess.
1454
- // let's correct the outer offsets
1455
- for (Index k=0; k<=(outer+1); ++k)
1456
- m_outerIndex[k] = 0;
1457
- Index k=outer+1;
1458
- while(m_outerIndex[k]==0)
1459
- m_outerIndex[k++] = 1;
1460
- while (k<=m_outerSize && m_outerIndex[k]!=0)
1461
- m_outerIndex[k++]++;
1462
- p = 0;
1463
- --k;
1464
- k = m_outerIndex[k]-1;
1465
- while (k>0)
1466
- {
1467
- m_data.index(k) = m_data.index(k-1);
1468
- m_data.value(k) = m_data.value(k-1);
1469
- k--;
1470
- }
1471
- }
1472
- else
1473
- {
1474
- // we are not inserting into the last inner vec
1475
- // update outer indices:
1476
- Index j = outer+2;
1477
- while (j<=m_outerSize && m_outerIndex[j]!=0)
1478
- m_outerIndex[j++]++;
1479
- --j;
1480
- // shift data of last vecs:
1481
- Index k = m_outerIndex[j]-1;
1482
- while (k>=Index(p))
1483
- {
1484
- m_data.index(k) = m_data.index(k-1);
1485
- m_data.value(k) = m_data.value(k-1);
1486
- k--;
1487
- }
1841
+ // Adjust value ptr size.
1842
+ value.data().resize(header.inner_buffer_size);
1843
+
1844
+ // Initialize compressed state and inner non-zeros.
1845
+ if (!header.compressed) {
1846
+ // Inner non-zero counts.
1847
+ std::size_t data_bytes = sizeof(StorageIndex) * header.outer_size;
1848
+ if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1849
+ memcpy(value.innerNonZeroPtr(), src, data_bytes);
1850
+ src += data_bytes;
1488
1851
  }
1489
- }
1490
1852
 
1491
- while ( (p > startId) && (m_data.index(p-1) > inner) )
1492
- {
1493
- m_data.index(p) = m_data.index(p-1);
1494
- m_data.value(p) = m_data.value(p-1);
1495
- --p;
1853
+ // Outer indices.
1854
+ std::size_t data_bytes = sizeof(StorageIndex) * (header.outer_size + 1);
1855
+ if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1856
+ memcpy(value.outerIndexPtr(), src, data_bytes);
1857
+ src += data_bytes;
1858
+
1859
+ // Inner indices.
1860
+ data_bytes = sizeof(StorageIndex) * header.inner_buffer_size;
1861
+ if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1862
+ memcpy(value.innerIndexPtr(), src, data_bytes);
1863
+ src += data_bytes;
1864
+
1865
+ // Values.
1866
+ data_bytes = sizeof(Scalar) * header.inner_buffer_size;
1867
+ if (EIGEN_PREDICT_FALSE(src + data_bytes > end)) return nullptr;
1868
+ memcpy(value.valuePtr(), src, data_bytes);
1869
+ src += data_bytes;
1870
+ return src;
1496
1871
  }
1497
-
1498
- m_data.index(p) = inner;
1499
- return (m_data.value(p) = Scalar(0));
1500
- }
1501
-
1502
- namespace internal {
1503
-
1504
- template<typename _Scalar, int _Options, typename _StorageIndex>
1505
- struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506
- : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507
- {
1508
- typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1509
- typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1510
- evaluator() : Base() {}
1511
- explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512
1872
  };
1513
1873
 
1514
- }
1515
-
1516
- } // end namespace Eigen
1874
+ } // end namespace Eigen
1517
1875
 
1518
- #endif // EIGEN_SPARSEMATRIX_H
1876
+ #endif // EIGEN_SPARSEMATRIX_H