sequenzo 0.1.18__cp312-cp312-win_amd64.whl → 0.1.20__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sequenzo might be problematic. Click here for more details.

Files changed (399) hide show
  1. sequenzo/__init__.py +39 -7
  2. sequenzo/big_data/clara/utils/get_weighted_diss.c +154 -154
  3. sequenzo/big_data/clara/utils/get_weighted_diss.cp312-win_amd64.pyd +0 -0
  4. sequenzo/clustering/KMedoids.py +39 -0
  5. sequenzo/clustering/clustering_c_code.cp312-win_amd64.pyd +0 -0
  6. sequenzo/clustering/hierarchical_clustering.py +108 -6
  7. sequenzo/define_sequence_data.py +10 -1
  8. sequenzo/dissimilarity_measures/c_code.cp312-win_amd64.pyd +0 -0
  9. sequenzo/dissimilarity_measures/get_distance_matrix.py +2 -3
  10. sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +1 -1
  11. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.c +154 -154
  12. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cp312-win_amd64.pyd +0 -0
  13. sequenzo/dissimilarity_measures/utils/seqconc.c +154 -154
  14. sequenzo/dissimilarity_measures/utils/seqconc.cp312-win_amd64.pyd +0 -0
  15. sequenzo/dissimilarity_measures/utils/seqdss.c +154 -154
  16. sequenzo/dissimilarity_measures/utils/seqdss.cp312-win_amd64.pyd +0 -0
  17. sequenzo/dissimilarity_measures/utils/seqdur.c +154 -154
  18. sequenzo/dissimilarity_measures/utils/seqdur.cp312-win_amd64.pyd +0 -0
  19. sequenzo/dissimilarity_measures/utils/seqlength.c +154 -154
  20. sequenzo/dissimilarity_measures/utils/seqlength.cp312-win_amd64.pyd +0 -0
  21. sequenzo/multidomain/cat.py +0 -53
  22. sequenzo/multidomain/dat.py +11 -3
  23. sequenzo/multidomain/idcd.py +0 -3
  24. sequenzo/multidomain/linked_polyad.py +0 -1
  25. sequenzo/openmp_setup.py +233 -0
  26. sequenzo/visualization/plot_transition_matrix.py +21 -22
  27. {sequenzo-0.1.18.dist-info → sequenzo-0.1.20.dist-info}/METADATA +71 -10
  28. sequenzo-0.1.20.dist-info/RECORD +272 -0
  29. sequenzo/dissimilarity_measures/setup.py +0 -35
  30. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Cholesky/LDLT.h +0 -688
  31. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Cholesky/LLT.h +0 -558
  32. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h +0 -99
  33. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +0 -682
  34. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +0 -346
  35. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +0 -462
  36. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +0 -91
  37. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/EigenSolver.h +0 -622
  38. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +0 -418
  39. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +0 -226
  40. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +0 -374
  41. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +0 -158
  42. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/RealQZ.h +0 -657
  43. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/RealSchur.h +0 -558
  44. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +0 -77
  45. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +0 -904
  46. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +0 -87
  47. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +0 -561
  48. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/AlignedBox.h +0 -486
  49. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/AngleAxis.h +0 -247
  50. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/EulerAngles.h +0 -114
  51. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Homogeneous.h +0 -501
  52. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Hyperplane.h +0 -282
  53. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/OrthoMethods.h +0 -235
  54. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/ParametrizedLine.h +0 -232
  55. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Quaternion.h +0 -870
  56. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Rotation2D.h +0 -199
  57. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/RotationBase.h +0 -206
  58. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Scaling.h +0 -188
  59. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Transform.h +0 -1563
  60. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Translation.h +0 -202
  61. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Umeyama.h +0 -166
  62. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +0 -168
  63. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Householder/BlockHouseholder.h +0 -110
  64. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Householder/Householder.h +0 -176
  65. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Householder/HouseholderSequence.h +0 -545
  66. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +0 -226
  67. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +0 -212
  68. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +0 -229
  69. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +0 -394
  70. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +0 -453
  71. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +0 -444
  72. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +0 -198
  73. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +0 -117
  74. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Jacobi/Jacobi.h +0 -483
  75. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/KLUSupport/KLUSupport.h +0 -358
  76. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/Determinant.h +0 -117
  77. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/FullPivLU.h +0 -877
  78. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/InverseImpl.h +0 -432
  79. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/PartialPivLU.h +0 -624
  80. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/PartialPivLU_LAPACKE.h +0 -83
  81. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/arch/InverseSize4.h +0 -351
  82. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/MetisSupport/MetisSupport.h +0 -137
  83. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/OrderingMethods/Amd.h +0 -435
  84. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +0 -1863
  85. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/OrderingMethods/Ordering.h +0 -153
  86. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +0 -678
  87. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +0 -545
  88. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/ColPivHouseholderQR.h +0 -674
  89. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +0 -97
  90. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +0 -635
  91. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/FullPivHouseholderQR.h +0 -713
  92. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/HouseholderQR.h +0 -434
  93. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/HouseholderQR_LAPACKE.h +0 -68
  94. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +0 -335
  95. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/BDCSVD.h +0 -1366
  96. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/JacobiSVD.h +0 -812
  97. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +0 -91
  98. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/SVDBase.h +0 -376
  99. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/UpperBidiagonalization.h +0 -414
  100. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +0 -697
  101. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +0 -174
  102. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/AmbiVector.h +0 -378
  103. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/CompressedStorage.h +0 -274
  104. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +0 -352
  105. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +0 -67
  106. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseAssign.h +0 -270
  107. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseBlock.h +0 -571
  108. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseColEtree.h +0 -206
  109. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +0 -370
  110. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +0 -722
  111. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +0 -150
  112. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +0 -342
  113. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h +0 -138
  114. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseDot.h +0 -98
  115. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseFuzzy.h +0 -29
  116. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseMap.h +0 -305
  117. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseMatrix.h +0 -1518
  118. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +0 -398
  119. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparsePermutation.h +0 -178
  120. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseProduct.h +0 -181
  121. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseRedux.h +0 -49
  122. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseRef.h +0 -397
  123. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +0 -659
  124. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseSolverBase.h +0 -124
  125. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +0 -198
  126. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseTranspose.h +0 -92
  127. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseTriangularView.h +0 -189
  128. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseUtil.h +0 -186
  129. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseVector.h +0 -478
  130. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseView.h +0 -254
  131. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/TriangularSolver.h +0 -315
  132. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU.h +0 -923
  133. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLUImpl.h +0 -66
  134. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +0 -226
  135. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_Structs.h +0 -110
  136. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +0 -375
  137. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_Utils.h +0 -80
  138. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +0 -181
  139. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +0 -179
  140. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +0 -107
  141. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +0 -280
  142. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +0 -126
  143. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +0 -130
  144. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +0 -223
  145. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h +0 -258
  146. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +0 -137
  147. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h +0 -136
  148. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h +0 -83
  149. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseQR/SparseQR.h +0 -758
  150. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/StlSupport/StdDeque.h +0 -116
  151. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/StlSupport/StdList.h +0 -106
  152. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/StlSupport/StdVector.h +0 -131
  153. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/StlSupport/details.h +0 -84
  154. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +0 -1025
  155. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +0 -642
  156. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/Image.h +0 -82
  157. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/Kernel.h +0 -79
  158. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/RealSvd2x2.h +0 -55
  159. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/blas.h +0 -440
  160. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/lapack.h +0 -152
  161. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/lapacke.h +0 -16292
  162. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/lapacke_mangling.h +0 -17
  163. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +0 -358
  164. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +0 -696
  165. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/BlockMethods.h +0 -1442
  166. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +0 -115
  167. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +0 -177
  168. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/IndexedViewMethods.h +0 -262
  169. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +0 -152
  170. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +0 -95
  171. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/ReshapedMethods.h +0 -149
  172. sequenzo/dissimilarity_measures/src/eigen/bench/BenchSparseUtil.h +0 -149
  173. sequenzo/dissimilarity_measures/src/eigen/bench/BenchTimer.h +0 -199
  174. sequenzo/dissimilarity_measures/src/eigen/bench/BenchUtil.h +0 -92
  175. sequenzo/dissimilarity_measures/src/eigen/bench/basicbenchmark.h +0 -63
  176. sequenzo/dissimilarity_measures/src/eigen/bench/btl/generic_bench/utils/utilities.h +0 -90
  177. sequenzo/dissimilarity_measures/src/eigen/bench/btl/libs/BLAS/blas.h +0 -675
  178. sequenzo/dissimilarity_measures/src/eigen/bench/btl/libs/BLAS/c_interface_base.h +0 -73
  179. sequenzo/dissimilarity_measures/src/eigen/bench/perf_monitoring/gemm_common.h +0 -67
  180. sequenzo/dissimilarity_measures/src/eigen/bench/perf_monitoring/gemv_common.h +0 -69
  181. sequenzo/dissimilarity_measures/src/eigen/bench/spbench/spbenchsolver.h +0 -573
  182. sequenzo/dissimilarity_measures/src/eigen/bench/spbench/spbenchstyle.h +0 -95
  183. sequenzo/dissimilarity_measures/src/eigen/bench/tensors/benchmark.h +0 -49
  184. sequenzo/dissimilarity_measures/src/eigen/bench/tensors/tensor_benchmarks.h +0 -597
  185. sequenzo/dissimilarity_measures/src/eigen/blas/BandTriangularSolver.h +0 -97
  186. sequenzo/dissimilarity_measures/src/eigen/blas/GeneralRank1Update.h +0 -44
  187. sequenzo/dissimilarity_measures/src/eigen/blas/PackedSelfadjointProduct.h +0 -53
  188. sequenzo/dissimilarity_measures/src/eigen/blas/PackedTriangularMatrixVector.h +0 -79
  189. sequenzo/dissimilarity_measures/src/eigen/blas/PackedTriangularSolverVector.h +0 -88
  190. sequenzo/dissimilarity_measures/src/eigen/blas/Rank2Update.h +0 -57
  191. sequenzo/dissimilarity_measures/src/eigen/blas/common.h +0 -175
  192. sequenzo/dissimilarity_measures/src/eigen/blas/f2c/datatypes.h +0 -24
  193. sequenzo/dissimilarity_measures/src/eigen/blas/level1_cplx_impl.h +0 -155
  194. sequenzo/dissimilarity_measures/src/eigen/blas/level1_impl.h +0 -144
  195. sequenzo/dissimilarity_measures/src/eigen/blas/level1_real_impl.h +0 -122
  196. sequenzo/dissimilarity_measures/src/eigen/blas/level2_cplx_impl.h +0 -360
  197. sequenzo/dissimilarity_measures/src/eigen/blas/level2_impl.h +0 -553
  198. sequenzo/dissimilarity_measures/src/eigen/blas/level2_real_impl.h +0 -306
  199. sequenzo/dissimilarity_measures/src/eigen/blas/level3_impl.h +0 -702
  200. sequenzo/dissimilarity_measures/src/eigen/debug/gdb/__init__.py +0 -1
  201. sequenzo/dissimilarity_measures/src/eigen/debug/gdb/printers.py +0 -314
  202. sequenzo/dissimilarity_measures/src/eigen/demos/mandelbrot/mandelbrot.h +0 -71
  203. sequenzo/dissimilarity_measures/src/eigen/demos/mix_eigen_and_c/binary_library.h +0 -71
  204. sequenzo/dissimilarity_measures/src/eigen/demos/opengl/camera.h +0 -118
  205. sequenzo/dissimilarity_measures/src/eigen/demos/opengl/gpuhelper.h +0 -207
  206. sequenzo/dissimilarity_measures/src/eigen/demos/opengl/icosphere.h +0 -30
  207. sequenzo/dissimilarity_measures/src/eigen/demos/opengl/quaternion_demo.h +0 -114
  208. sequenzo/dissimilarity_measures/src/eigen/demos/opengl/trackball.h +0 -42
  209. sequenzo/dissimilarity_measures/src/eigen/lapack/lapack_common.h +0 -29
  210. sequenzo/dissimilarity_measures/src/eigen/scripts/relicense.py +0 -69
  211. sequenzo/dissimilarity_measures/src/eigen/test/AnnoyingScalar.h +0 -165
  212. sequenzo/dissimilarity_measures/src/eigen/test/MovableScalar.h +0 -35
  213. sequenzo/dissimilarity_measures/src/eigen/test/SafeScalar.h +0 -30
  214. sequenzo/dissimilarity_measures/src/eigen/test/bug1213.h +0 -8
  215. sequenzo/dissimilarity_measures/src/eigen/test/evaluator_common.h +0 -0
  216. sequenzo/dissimilarity_measures/src/eigen/test/gpu_common.h +0 -176
  217. sequenzo/dissimilarity_measures/src/eigen/test/main.h +0 -857
  218. sequenzo/dissimilarity_measures/src/eigen/test/packetmath_test_shared.h +0 -275
  219. sequenzo/dissimilarity_measures/src/eigen/test/product.h +0 -259
  220. sequenzo/dissimilarity_measures/src/eigen/test/random_without_cast_overflow.h +0 -152
  221. sequenzo/dissimilarity_measures/src/eigen/test/solverbase.h +0 -36
  222. sequenzo/dissimilarity_measures/src/eigen/test/sparse.h +0 -204
  223. sequenzo/dissimilarity_measures/src/eigen/test/sparse_solver.h +0 -699
  224. sequenzo/dissimilarity_measures/src/eigen/test/split_test_helper.h +0 -5994
  225. sequenzo/dissimilarity_measures/src/eigen/test/svd_common.h +0 -521
  226. sequenzo/dissimilarity_measures/src/eigen/test/svd_fill.h +0 -118
  227. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/Tensor.h +0 -554
  228. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h +0 -329
  229. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h +0 -247
  230. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h +0 -1176
  231. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +0 -1559
  232. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h +0 -1093
  233. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h +0 -518
  234. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h +0 -377
  235. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +0 -1023
  236. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h +0 -73
  237. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h +0 -6
  238. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h +0 -1413
  239. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h +0 -575
  240. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h +0 -1650
  241. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h +0 -1679
  242. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h +0 -456
  243. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h +0 -1132
  244. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h +0 -544
  245. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h +0 -214
  246. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h +0 -347
  247. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h +0 -137
  248. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h +0 -6
  249. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h +0 -104
  250. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h +0 -389
  251. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h +0 -1048
  252. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h +0 -409
  253. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h +0 -236
  254. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h +0 -490
  255. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h +0 -236
  256. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h +0 -983
  257. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +0 -703
  258. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h +0 -388
  259. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h +0 -669
  260. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h +0 -379
  261. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h +0 -237
  262. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h +0 -191
  263. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h +0 -488
  264. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h +0 -302
  265. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h +0 -33
  266. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h +0 -99
  267. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h +0 -44
  268. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h +0 -79
  269. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h +0 -603
  270. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h +0 -738
  271. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h +0 -247
  272. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h +0 -82
  273. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h +0 -263
  274. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h +0 -216
  275. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h +0 -98
  276. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h +0 -327
  277. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h +0 -311
  278. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h +0 -1102
  279. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h +0 -708
  280. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h +0 -291
  281. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h +0 -322
  282. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +0 -998
  283. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h +0 -6
  284. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h +0 -966
  285. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h +0 -582
  286. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h +0 -454
  287. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h +0 -465
  288. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h +0 -528
  289. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScanSycl.h +0 -513
  290. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h +0 -471
  291. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h +0 -161
  292. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h +0 -346
  293. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h +0 -303
  294. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h +0 -264
  295. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h +0 -249
  296. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h +0 -629
  297. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h +0 -293
  298. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h +0 -236
  299. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h +0 -338
  300. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h +0 -669
  301. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h +0 -67
  302. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h +0 -249
  303. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h +0 -486
  304. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h +0 -236
  305. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h +0 -23
  306. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h +0 -40
  307. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h +0 -301
  308. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h +0 -48
  309. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h +0 -20
  310. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/util/CXX11Meta.h +0 -537
  311. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h +0 -88
  312. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/util/EmulateArray.h +0 -261
  313. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h +0 -158
  314. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h +0 -108
  315. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h +0 -730
  316. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h +0 -220
  317. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h +0 -293
  318. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/BVH/KdBVH.h +0 -223
  319. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h +0 -790
  320. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/EulerAngles/EulerAngles.h +0 -355
  321. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/EulerAngles/EulerSystem.h +0 -305
  322. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h +0 -261
  323. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h +0 -449
  324. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h +0 -187
  325. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/DGMRES.h +0 -511
  326. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h +0 -335
  327. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/IDRS.h +0 -436
  328. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h +0 -90
  329. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h +0 -154
  330. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/MINRES.h +0 -267
  331. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h +0 -193
  332. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h +0 -305
  333. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h +0 -84
  334. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h +0 -202
  335. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h +0 -160
  336. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h +0 -188
  337. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h +0 -396
  338. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h +0 -441
  339. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h +0 -569
  340. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h +0 -373
  341. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h +0 -705
  342. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h +0 -368
  343. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h +0 -117
  344. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h +0 -95
  345. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h +0 -601
  346. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h +0 -657
  347. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h +0 -66
  348. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h +0 -70
  349. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h +0 -107
  350. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h +0 -79
  351. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h +0 -298
  352. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h +0 -91
  353. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h +0 -30
  354. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h +0 -99
  355. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h +0 -49
  356. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h +0 -130
  357. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Polynomials/Companion.h +0 -280
  358. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h +0 -428
  359. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h +0 -143
  360. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h +0 -352
  361. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h +0 -862
  362. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h +0 -212
  363. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h +0 -295
  364. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h +0 -259
  365. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h +0 -89
  366. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h +0 -122
  367. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h +0 -1079
  368. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h +0 -404
  369. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h +0 -282
  370. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h +0 -247
  371. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h +0 -349
  372. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsArrayAPI.h +0 -286
  373. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsBFloat16.h +0 -68
  374. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsFunctors.h +0 -357
  375. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsHalf.h +0 -66
  376. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsImpl.h +0 -1959
  377. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsPacketMath.h +0 -118
  378. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/HipVectorCompatibility.h +0 -67
  379. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h +0 -167
  380. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsBFloat16.h +0 -58
  381. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h +0 -330
  382. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h +0 -58
  383. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h +0 -2045
  384. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h +0 -79
  385. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/AVX/BesselFunctions.h +0 -46
  386. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/AVX/SpecialFunctions.h +0 -16
  387. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/BesselFunctions.h +0 -46
  388. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/SpecialFunctions.h +0 -16
  389. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/GPU/SpecialFunctions.h +0 -369
  390. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/NEON/BesselFunctions.h +0 -54
  391. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/NEON/SpecialFunctions.h +0 -34
  392. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Splines/Spline.h +0 -507
  393. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Splines/SplineFitting.h +0 -431
  394. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Splines/SplineFwd.h +0 -93
  395. sequenzo/dissimilarity_measures/src/eigen/unsupported/test/matrix_functions.h +0 -67
  396. sequenzo-0.1.18.dist-info/RECORD +0 -638
  397. {sequenzo-0.1.18.dist-info → sequenzo-0.1.20.dist-info}/WHEEL +0 -0
  398. {sequenzo-0.1.18.dist-info → sequenzo-0.1.20.dist-info}/licenses/LICENSE +0 -0
  399. {sequenzo-0.1.18.dist-info → sequenzo-0.1.20.dist-info}/top_level.txt +0 -0
@@ -1,1518 +0,0 @@
1
- // This file is part of Eigen, a lightweight C++ template library
2
- // for linear algebra.
3
- //
4
- // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5
- //
6
- // This Source Code Form is subject to the terms of the Mozilla
7
- // Public License v. 2.0. If a copy of the MPL was not distributed
8
- // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
-
10
- #ifndef EIGEN_SPARSEMATRIX_H
11
- #define EIGEN_SPARSEMATRIX_H
12
-
13
- namespace Eigen {
14
-
15
- /** \ingroup SparseCore_Module
16
- *
17
- * \class SparseMatrix
18
- *
19
- * \brief A versatible sparse matrix representation
20
- *
21
- * This class implements a more versatile variants of the common \em compressed row/column storage format.
22
- * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
23
- * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
24
- * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
25
- * can be done with limited memory reallocation and copies.
26
- *
27
- * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
28
- * compatible with many library.
29
- *
30
- * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
31
- *
32
- * \tparam _Scalar the scalar type, i.e. the type of the coefficients
33
- * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
34
- * is ColMajor or RowMajor. The default is 0 which means column-major.
35
- * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
36
- *
37
- * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
38
- * whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
39
- * Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
40
- *
41
- * This class can be extended with the help of the plugin mechanism described on the page
42
- * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
43
- */
44
-
45
- namespace internal {
46
- template<typename _Scalar, int _Options, typename _StorageIndex>
47
- struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48
- {
49
- typedef _Scalar Scalar;
50
- typedef _StorageIndex StorageIndex;
51
- typedef Sparse StorageKind;
52
- typedef MatrixXpr XprKind;
53
- enum {
54
- RowsAtCompileTime = Dynamic,
55
- ColsAtCompileTime = Dynamic,
56
- MaxRowsAtCompileTime = Dynamic,
57
- MaxColsAtCompileTime = Dynamic,
58
- Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59
- SupportedAccessPatterns = InnerRandomAccessPattern
60
- };
61
- };
62
-
63
- template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64
- struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65
- {
66
- typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
67
- typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68
- typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
69
-
70
- typedef _Scalar Scalar;
71
- typedef Dense StorageKind;
72
- typedef _StorageIndex StorageIndex;
73
- typedef MatrixXpr XprKind;
74
-
75
- enum {
76
- RowsAtCompileTime = Dynamic,
77
- ColsAtCompileTime = 1,
78
- MaxRowsAtCompileTime = Dynamic,
79
- MaxColsAtCompileTime = 1,
80
- Flags = LvalueBit
81
- };
82
- };
83
-
84
- template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85
- struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86
- : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87
- {
88
- enum {
89
- Flags = 0
90
- };
91
- };
92
-
93
- } // end namespace internal
94
-
95
- template<typename _Scalar, int _Options, typename _StorageIndex>
96
- class SparseMatrix
97
- : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98
- {
99
- typedef SparseCompressedBase<SparseMatrix> Base;
100
- using Base::convert_index;
101
- friend class SparseVector<_Scalar,0,_StorageIndex>;
102
- template<typename, typename, typename, typename, typename>
103
- friend struct internal::Assignment;
104
- public:
105
- using Base::isCompressed;
106
- using Base::nonZeros;
107
- EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
108
- using Base::operator+=;
109
- using Base::operator-=;
110
-
111
- typedef MappedSparseMatrix<Scalar,Flags> Map;
112
- typedef Diagonal<SparseMatrix> DiagonalReturnType;
113
- typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
114
- typedef typename Base::InnerIterator InnerIterator;
115
- typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
116
-
117
-
118
- using Base::IsRowMajor;
119
- typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
120
- enum {
121
- Options = _Options
122
- };
123
-
124
- typedef typename Base::IndexVector IndexVector;
125
- typedef typename Base::ScalarVector ScalarVector;
126
- protected:
127
- typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
128
-
129
- Index m_outerSize;
130
- Index m_innerSize;
131
- StorageIndex* m_outerIndex;
132
- StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
133
- Storage m_data;
134
-
135
- public:
136
-
137
- /** \returns the number of rows of the matrix */
138
- inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
139
- /** \returns the number of columns of the matrix */
140
- inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141
-
142
- /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
143
- inline Index innerSize() const { return m_innerSize; }
144
- /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
145
- inline Index outerSize() const { return m_outerSize; }
146
-
147
- /** \returns a const pointer to the array of values.
148
- * This function is aimed at interoperability with other libraries.
149
- * \sa innerIndexPtr(), outerIndexPtr() */
150
- inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
151
- /** \returns a non-const pointer to the array of values.
152
- * This function is aimed at interoperability with other libraries.
153
- * \sa innerIndexPtr(), outerIndexPtr() */
154
- inline Scalar* valuePtr() { return m_data.valuePtr(); }
155
-
156
- /** \returns a const pointer to the array of inner indices.
157
- * This function is aimed at interoperability with other libraries.
158
- * \sa valuePtr(), outerIndexPtr() */
159
- inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
160
- /** \returns a non-const pointer to the array of inner indices.
161
- * This function is aimed at interoperability with other libraries.
162
- * \sa valuePtr(), outerIndexPtr() */
163
- inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
164
-
165
- /** \returns a const pointer to the array of the starting positions of the inner vectors.
166
- * This function is aimed at interoperability with other libraries.
167
- * \sa valuePtr(), innerIndexPtr() */
168
- inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
169
- /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
170
- * This function is aimed at interoperability with other libraries.
171
- * \sa valuePtr(), innerIndexPtr() */
172
- inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
173
-
174
- /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
175
- * This function is aimed at interoperability with other libraries.
176
- * \warning it returns the null pointer 0 in compressed mode */
177
- inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
178
- /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
179
- * This function is aimed at interoperability with other libraries.
180
- * \warning it returns the null pointer 0 in compressed mode */
181
- inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
182
-
183
- /** \internal */
184
- inline Storage& data() { return m_data; }
185
- /** \internal */
186
- inline const Storage& data() const { return m_data; }
187
-
188
- /** \returns the value of the matrix at position \a i, \a j
189
- * This function returns Scalar(0) if the element is an explicit \em zero */
190
- inline Scalar coeff(Index row, Index col) const
191
- {
192
- eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193
-
194
- const Index outer = IsRowMajor ? row : col;
195
- const Index inner = IsRowMajor ? col : row;
196
- Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197
- return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198
- }
199
-
200
- /** \returns a non-const reference to the value of the matrix at position \a i, \a j
201
- *
202
- * If the element does not exist then it is inserted via the insert(Index,Index) function
203
- * which itself turns the matrix into a non compressed form if that was not the case.
204
- *
205
- * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
206
- * function if the element does not already exist.
207
- */
208
- inline Scalar& coeffRef(Index row, Index col)
209
- {
210
- eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211
-
212
- const Index outer = IsRowMajor ? row : col;
213
- const Index inner = IsRowMajor ? col : row;
214
-
215
- Index start = m_outerIndex[outer];
216
- Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217
- eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218
- if(end<=start)
219
- return insert(row,col);
220
- const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221
- if((p<end) && (m_data.index(p)==inner))
222
- return m_data.value(p);
223
- else
224
- return insert(row,col);
225
- }
226
-
227
- /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
228
- * The non zero coefficient must \b not already exist.
229
- *
230
- * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
231
- * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
232
- * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
233
- * inserted by increasing outer-indices.
234
- *
235
- * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
236
- * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
237
- *
238
- * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
239
- * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
240
- *
241
- */
242
- Scalar& insert(Index row, Index col);
243
-
244
- public:
245
-
246
- /** Removes all non zeros but keep allocated memory
247
- *
248
- * This function does not free the currently allocated memory. To release as much as memory as possible,
249
- * call \code mat.data().squeeze(); \endcode after resizing it.
250
- *
251
- * \sa resize(Index,Index), data()
252
- */
253
- inline void setZero()
254
- {
255
- m_data.clear();
256
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
257
- if(m_innerNonZeros)
258
- memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259
- }
260
-
261
- /** Preallocates \a reserveSize non zeros.
262
- *
263
- * Precondition: the matrix must be in compressed mode. */
264
- inline void reserve(Index reserveSize)
265
- {
266
- eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267
- m_data.reserve(reserveSize);
268
- }
269
-
270
- #ifdef EIGEN_PARSED_BY_DOXYGEN
271
- /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
272
- *
273
- * This function turns the matrix in non-compressed mode.
274
- *
275
- * The type \c SizesType must expose the following interface:
276
- \code
277
- typedef value_type;
278
- const value_type& operator[](i) const;
279
- \endcode
280
- * for \c i in the [0,this->outerSize()[ range.
281
- * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
282
- */
283
- template<class SizesType>
284
- inline void reserve(const SizesType& reserveSizes);
285
- #else
286
- template<class SizesType>
287
- inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288
- #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289
- typename
290
- #endif
291
- SizesType::value_type())
292
- {
293
- EIGEN_UNUSED_VARIABLE(enableif);
294
- reserveInnerVectors(reserveSizes);
295
- }
296
- #endif // EIGEN_PARSED_BY_DOXYGEN
297
- protected:
298
- template<class SizesType>
299
- inline void reserveInnerVectors(const SizesType& reserveSizes)
300
- {
301
- if(isCompressed())
302
- {
303
- Index totalReserveSize = 0;
304
- // turn the matrix into non-compressed mode
305
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
306
- if (!m_innerNonZeros) internal::throw_std_bad_alloc();
307
-
308
- // temporarily use m_innerSizes to hold the new starting points.
309
- StorageIndex* newOuterIndex = m_innerNonZeros;
310
-
311
- StorageIndex count = 0;
312
- for(Index j=0; j<m_outerSize; ++j)
313
- {
314
- newOuterIndex[j] = count;
315
- count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316
- totalReserveSize += reserveSizes[j];
317
- }
318
- m_data.reserve(totalReserveSize);
319
- StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320
- for(Index j=m_outerSize-1; j>=0; --j)
321
- {
322
- StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323
- for(Index i=innerNNZ-1; i>=0; --i)
324
- {
325
- m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326
- m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327
- }
328
- previousOuterIndex = m_outerIndex[j];
329
- m_outerIndex[j] = newOuterIndex[j];
330
- m_innerNonZeros[j] = innerNNZ;
331
- }
332
- if(m_outerSize>0)
333
- m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
334
-
335
- m_data.resize(m_outerIndex[m_outerSize]);
336
- }
337
- else
338
- {
339
- StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340
- if (!newOuterIndex) internal::throw_std_bad_alloc();
341
-
342
- StorageIndex count = 0;
343
- for(Index j=0; j<m_outerSize; ++j)
344
- {
345
- newOuterIndex[j] = count;
346
- StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347
- StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348
- count += toReserve + m_innerNonZeros[j];
349
- }
350
- newOuterIndex[m_outerSize] = count;
351
-
352
- m_data.resize(count);
353
- for(Index j=m_outerSize-1; j>=0; --j)
354
- {
355
- Index offset = newOuterIndex[j] - m_outerIndex[j];
356
- if(offset>0)
357
- {
358
- StorageIndex innerNNZ = m_innerNonZeros[j];
359
- for(Index i=innerNNZ-1; i>=0; --i)
360
- {
361
- m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362
- m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363
- }
364
- }
365
- }
366
-
367
- std::swap(m_outerIndex, newOuterIndex);
368
- std::free(newOuterIndex);
369
- }
370
-
371
- }
372
- public:
373
-
374
- //--- low level purely coherent filling ---
375
-
376
- /** \internal
377
- * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
378
- * - the nonzero does not already exist
379
- * - the new coefficient is the last one according to the storage order
380
- *
381
- * Before filling a given inner vector you must call the statVec(Index) function.
382
- *
383
- * After an insertion session, you should call the finalize() function.
384
- *
385
- * \sa insert, insertBackByOuterInner, startVec */
386
- inline Scalar& insertBack(Index row, Index col)
387
- {
388
- return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
389
- }
390
-
391
- /** \internal
392
- * \sa insertBack, startVec */
393
- inline Scalar& insertBackByOuterInner(Index outer, Index inner)
394
- {
395
- eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396
- eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397
- Index p = m_outerIndex[outer+1];
398
- ++m_outerIndex[outer+1];
399
- m_data.append(Scalar(0), inner);
400
- return m_data.value(p);
401
- }
402
-
403
- /** \internal
404
- * \warning use it only if you know what you are doing */
405
- inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
406
- {
407
- Index p = m_outerIndex[outer+1];
408
- ++m_outerIndex[outer+1];
409
- m_data.append(Scalar(0), inner);
410
- return m_data.value(p);
411
- }
412
-
413
- /** \internal
414
- * \sa insertBack, insertBackByOuterInner */
415
- inline void startVec(Index outer)
416
- {
417
- eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418
- eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419
- m_outerIndex[outer+1] = m_outerIndex[outer];
420
- }
421
-
422
- /** \internal
423
- * Must be called after inserting a set of non zero entries using the low level compressed API.
424
- */
425
- inline void finalize()
426
- {
427
- if(isCompressed())
428
- {
429
- StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430
- Index i = m_outerSize;
431
- // find the last filled column
432
- while (i>=0 && m_outerIndex[i]==0)
433
- --i;
434
- ++i;
435
- while (i<=m_outerSize)
436
- {
437
- m_outerIndex[i] = size;
438
- ++i;
439
- }
440
- }
441
- }
442
-
443
- //---
444
-
445
- template<typename InputIterators>
446
- void setFromTriplets(const InputIterators& begin, const InputIterators& end);
447
-
448
- template<typename InputIterators,typename DupFunctor>
449
- void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
450
-
451
- void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
452
-
453
- template<typename DupFunctor>
454
- void collapseDuplicates(DupFunctor dup_func = DupFunctor());
455
-
456
- //---
457
-
458
- /** \internal
459
- * same as insert(Index,Index) except that the indices are given relative to the storage order */
460
- Scalar& insertByOuterInner(Index j, Index i)
461
- {
462
- return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
463
- }
464
-
465
- /** Turns the matrix into the \em compressed format.
466
- */
467
- void makeCompressed()
468
- {
469
- if(isCompressed())
470
- return;
471
-
472
- eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
473
-
474
- Index oldStart = m_outerIndex[1];
475
- m_outerIndex[1] = m_innerNonZeros[0];
476
- for(Index j=1; j<m_outerSize; ++j)
477
- {
478
- Index nextOldStart = m_outerIndex[j+1];
479
- Index offset = oldStart - m_outerIndex[j];
480
- if(offset>0)
481
- {
482
- for(Index k=0; k<m_innerNonZeros[j]; ++k)
483
- {
484
- m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485
- m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486
- }
487
- }
488
- m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489
- oldStart = nextOldStart;
490
- }
491
- std::free(m_innerNonZeros);
492
- m_innerNonZeros = 0;
493
- m_data.resize(m_outerIndex[m_outerSize]);
494
- m_data.squeeze();
495
- }
496
-
497
- /** Turns the matrix into the uncompressed mode */
498
- void uncompress()
499
- {
500
- if(m_innerNonZeros != 0)
501
- return;
502
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503
- for (Index i = 0; i < m_outerSize; i++)
504
- {
505
- m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
506
- }
507
- }
508
-
509
- /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
510
- void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
511
- {
512
- prune(default_prunning_func(reference,epsilon));
513
- }
514
-
515
- /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
516
- * The functor type \a KeepFunc must implement the following function:
517
- * \code
518
- * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
519
- * \endcode
520
- * \sa prune(Scalar,RealScalar)
521
- */
522
- template<typename KeepFunc>
523
- void prune(const KeepFunc& keep = KeepFunc())
524
- {
525
- // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
526
- makeCompressed();
527
-
528
- StorageIndex k = 0;
529
- for(Index j=0; j<m_outerSize; ++j)
530
- {
531
- Index previousStart = m_outerIndex[j];
532
- m_outerIndex[j] = k;
533
- Index end = m_outerIndex[j+1];
534
- for(Index i=previousStart; i<end; ++i)
535
- {
536
- if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
537
- {
538
- m_data.value(k) = m_data.value(i);
539
- m_data.index(k) = m_data.index(i);
540
- ++k;
541
- }
542
- }
543
- }
544
- m_outerIndex[m_outerSize] = k;
545
- m_data.resize(k,0);
546
- }
547
-
548
- /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
549
- *
550
- * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
551
- * and the storage of the out of bounds coefficients is kept and reserved.
552
- * Call makeCompressed() to pack the entries and squeeze extra memory.
553
- *
554
- * \sa reserve(), setZero(), makeCompressed()
555
- */
556
- void conservativeResize(Index rows, Index cols)
557
- {
558
- // No change
559
- if (this->rows() == rows && this->cols() == cols) return;
560
-
561
- // If one dimension is null, then there is nothing to be preserved
562
- if(rows==0 || cols==0) return resize(rows,cols);
563
-
564
- Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565
- Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566
- StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567
-
568
- // Deals with inner non zeros
569
- if (m_innerNonZeros)
570
- {
571
- // Resize m_innerNonZeros
572
- StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573
- if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574
- m_innerNonZeros = newInnerNonZeros;
575
-
576
- for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577
- m_innerNonZeros[i] = 0;
578
- }
579
- else if (innerChange < 0)
580
- {
581
- // Inner size decreased: allocate a new m_innerNonZeros
582
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
583
- if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584
- for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
585
- m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
586
- for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587
- m_innerNonZeros[i] = 0;
588
- }
589
-
590
- // Change the m_innerNonZeros in case of a decrease of inner size
591
- if (m_innerNonZeros && innerChange < 0)
592
- {
593
- for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
594
- {
595
- StorageIndex &n = m_innerNonZeros[i];
596
- StorageIndex start = m_outerIndex[i];
597
- while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
598
- }
599
- }
600
-
601
- m_innerSize = newInnerSize;
602
-
603
- // Re-allocate outer index structure if necessary
604
- if (outerChange == 0)
605
- return;
606
-
607
- StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
608
- if (!newOuterIndex) internal::throw_std_bad_alloc();
609
- m_outerIndex = newOuterIndex;
610
- if (outerChange > 0)
611
- {
612
- StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613
- for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614
- m_outerIndex[i] = lastIdx;
615
- }
616
- m_outerSize += outerChange;
617
- }
618
-
619
- /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
620
- *
621
- * This function does not free the currently allocated memory. To release as much as memory as possible,
622
- * call \code mat.data().squeeze(); \endcode after resizing it.
623
- *
624
- * \sa reserve(), setZero()
625
- */
626
- void resize(Index rows, Index cols)
627
- {
628
- const Index outerSize = IsRowMajor ? rows : cols;
629
- m_innerSize = IsRowMajor ? cols : rows;
630
- m_data.clear();
631
- if (m_outerSize != outerSize || m_outerSize==0)
632
- {
633
- std::free(m_outerIndex);
634
- m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
635
- if (!m_outerIndex) internal::throw_std_bad_alloc();
636
-
637
- m_outerSize = outerSize;
638
- }
639
- if(m_innerNonZeros)
640
- {
641
- std::free(m_innerNonZeros);
642
- m_innerNonZeros = 0;
643
- }
644
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
645
- }
646
-
647
- /** \internal
648
- * Resize the nonzero vector to \a size */
649
- void resizeNonZeros(Index size)
650
- {
651
- m_data.resize(size);
652
- }
653
-
654
- /** \returns a const expression of the diagonal coefficients. */
655
- const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
656
-
657
- /** \returns a read-write expression of the diagonal coefficients.
658
- * \warning If the diagonal entries are written, then all diagonal
659
- * entries \b must already exist, otherwise an assertion will be raised.
660
- */
661
- DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
662
-
663
- /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
664
- inline SparseMatrix()
665
- : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
666
- {
667
- check_template_parameters();
668
- resize(0, 0);
669
- }
670
-
671
- /** Constructs a \a rows \c x \a cols empty matrix */
672
- inline SparseMatrix(Index rows, Index cols)
673
- : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
674
- {
675
- check_template_parameters();
676
- resize(rows, cols);
677
- }
678
-
679
- /** Constructs a sparse matrix from the sparse expression \a other */
680
- template<typename OtherDerived>
681
- inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
682
- : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
683
- {
684
- EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
685
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686
- check_template_parameters();
687
- const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
688
- if (needToTranspose)
689
- *this = other.derived();
690
- else
691
- {
692
- #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
693
- EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
694
- #endif
695
- internal::call_assignment_no_alias(*this, other.derived());
696
- }
697
- }
698
-
699
- /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
700
- template<typename OtherDerived, unsigned int UpLo>
701
- inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
702
- : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
703
- {
704
- check_template_parameters();
705
- Base::operator=(other);
706
- }
707
-
708
- /** Copy constructor (it performs a deep copy) */
709
- inline SparseMatrix(const SparseMatrix& other)
710
- : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
711
- {
712
- check_template_parameters();
713
- *this = other.derived();
714
- }
715
-
716
- /** \brief Copy constructor with in-place evaluation */
717
- template<typename OtherDerived>
718
- SparseMatrix(const ReturnByValue<OtherDerived>& other)
719
- : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
720
- {
721
- check_template_parameters();
722
- initAssignment(other);
723
- other.evalTo(*this);
724
- }
725
-
726
- /** \brief Copy constructor with in-place evaluation */
727
- template<typename OtherDerived>
728
- explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
729
- : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
730
- {
731
- check_template_parameters();
732
- *this = other.derived();
733
- }
734
-
735
- /** Swaps the content of two sparse matrices of the same type.
736
- * This is a fast operation that simply swaps the underlying pointers and parameters. */
737
- inline void swap(SparseMatrix& other)
738
- {
739
- //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
740
- std::swap(m_outerIndex, other.m_outerIndex);
741
- std::swap(m_innerSize, other.m_innerSize);
742
- std::swap(m_outerSize, other.m_outerSize);
743
- std::swap(m_innerNonZeros, other.m_innerNonZeros);
744
- m_data.swap(other.m_data);
745
- }
746
-
747
- /** Sets *this to the identity matrix.
748
- * This function also turns the matrix into compressed mode, and drop any reserved memory. */
749
- inline void setIdentity()
750
- {
751
- eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
752
- this->m_data.resize(rows());
753
- Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
754
- Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
755
- Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
756
- std::free(m_innerNonZeros);
757
- m_innerNonZeros = 0;
758
- }
759
- inline SparseMatrix& operator=(const SparseMatrix& other)
760
- {
761
- if (other.isRValue())
762
- {
763
- swap(other.const_cast_derived());
764
- }
765
- else if(this!=&other)
766
- {
767
- #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
768
- EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
769
- #endif
770
- initAssignment(other);
771
- if(other.isCompressed())
772
- {
773
- internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
774
- m_data = other.m_data;
775
- }
776
- else
777
- {
778
- Base::operator=(other);
779
- }
780
- }
781
- return *this;
782
- }
783
-
784
- #ifndef EIGEN_PARSED_BY_DOXYGEN
785
- template<typename OtherDerived>
786
- inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
787
- { return Base::operator=(other.derived()); }
788
-
789
- template<typename Lhs, typename Rhs>
790
- inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
791
- #endif // EIGEN_PARSED_BY_DOXYGEN
792
-
793
- template<typename OtherDerived>
794
- EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
795
-
796
- friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
797
- {
798
- EIGEN_DBG_SPARSE(
799
- s << "Nonzero entries:\n";
800
- if(m.isCompressed())
801
- {
802
- for (Index i=0; i<m.nonZeros(); ++i)
803
- s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
804
- }
805
- else
806
- {
807
- for (Index i=0; i<m.outerSize(); ++i)
808
- {
809
- Index p = m.m_outerIndex[i];
810
- Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811
- Index k=p;
812
- for (; k<pe; ++k) {
813
- s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
814
- }
815
- for (; k<m.m_outerIndex[i+1]; ++k) {
816
- s << "(_,_) ";
817
- }
818
- }
819
- }
820
- s << std::endl;
821
- s << std::endl;
822
- s << "Outer pointers:\n";
823
- for (Index i=0; i<m.outerSize(); ++i) {
824
- s << m.m_outerIndex[i] << " ";
825
- }
826
- s << " $" << std::endl;
827
- if(!m.isCompressed())
828
- {
829
- s << "Inner non zeros:\n";
830
- for (Index i=0; i<m.outerSize(); ++i) {
831
- s << m.m_innerNonZeros[i] << " ";
832
- }
833
- s << " $" << std::endl;
834
- }
835
- s << std::endl;
836
- );
837
- s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
838
- return s;
839
- }
840
-
841
- /** Destructor */
842
- inline ~SparseMatrix()
843
- {
844
- std::free(m_outerIndex);
845
- std::free(m_innerNonZeros);
846
- }
847
-
848
- /** Overloaded for performance */
849
- Scalar sum() const;
850
-
851
- # ifdef EIGEN_SPARSEMATRIX_PLUGIN
852
- # include EIGEN_SPARSEMATRIX_PLUGIN
853
- # endif
854
-
855
- protected:
856
-
857
- template<typename Other>
858
- void initAssignment(const Other& other)
859
- {
860
- resize(other.rows(), other.cols());
861
- if(m_innerNonZeros)
862
- {
863
- std::free(m_innerNonZeros);
864
- m_innerNonZeros = 0;
865
- }
866
- }
867
-
868
- /** \internal
869
- * \sa insert(Index,Index) */
870
- EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
871
-
872
- /** \internal
873
- * A vector object that is equal to 0 everywhere but v at the position i */
874
- class SingletonVector
875
- {
876
- StorageIndex m_index;
877
- StorageIndex m_value;
878
- public:
879
- typedef StorageIndex value_type;
880
- SingletonVector(Index i, Index v)
881
- : m_index(convert_index(i)), m_value(convert_index(v))
882
- {}
883
-
884
- StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
885
- };
886
-
887
- /** \internal
888
- * \sa insert(Index,Index) */
889
- EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
890
-
891
- public:
892
- /** \internal
893
- * \sa insert(Index,Index) */
894
- EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
895
- {
896
- const Index outer = IsRowMajor ? row : col;
897
- const Index inner = IsRowMajor ? col : row;
898
-
899
- eigen_assert(!isCompressed());
900
- eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
901
-
902
- Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903
- m_data.index(p) = convert_index(inner);
904
- return (m_data.value(p) = Scalar(0));
905
- }
906
- protected:
907
- struct IndexPosPair {
908
- IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
909
- Index i;
910
- Index p;
911
- };
912
-
913
- /** \internal assign \a diagXpr to the diagonal of \c *this
914
- * There are different strategies:
915
- * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
916
- * 2 - otherwise, for each diagonal coeff,
917
- * 2.a - if it already exists, then we update it,
918
- * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
919
- * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
920
- * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
921
- *
922
- * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
923
- * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
924
- * then it *might* be better to disable case 2.b since they will have to be copied anyway.
925
- */
926
- template<typename DiagXpr, typename Func>
927
- void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
928
- {
929
- Index n = diagXpr.size();
930
-
931
- const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
932
- if(overwrite)
933
- {
934
- if((this->rows()!=n) || (this->cols()!=n))
935
- this->resize(n, n);
936
- }
937
-
938
- if(m_data.size()==0 || overwrite)
939
- {
940
- typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941
- this->makeCompressed();
942
- this->resizeNonZeros(n);
943
- Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
944
- Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
945
- Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
946
- values.setZero();
947
- internal::call_assignment_no_alias(values, diagXpr, assignFunc);
948
- }
949
- else
950
- {
951
- bool isComp = isCompressed();
952
- internal::evaluator<DiagXpr> diaEval(diagXpr);
953
- std::vector<IndexPosPair> newEntries;
954
-
955
- // 1 - try in-place update and record insertion failures
956
- for(Index i = 0; i<n; ++i)
957
- {
958
- internal::LowerBoundIndex lb = this->lower_bound(i,i);
959
- Index p = lb.value;
960
- if(lb.found)
961
- {
962
- // the coeff already exists
963
- assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
964
- }
965
- else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966
- {
967
- // non compressed mode with local room for inserting one element
968
- m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969
- m_innerNonZeros[i]++;
970
- m_data.value(p) = Scalar(0);
971
- m_data.index(p) = StorageIndex(i);
972
- assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
973
- }
974
- else
975
- {
976
- // defer insertion
977
- newEntries.push_back(IndexPosPair(i,p));
978
- }
979
- }
980
- // 2 - insert deferred entries
981
- Index n_entries = Index(newEntries.size());
982
- if(n_entries>0)
983
- {
984
- Storage newData(m_data.size()+n_entries);
985
- Index prev_p = 0;
986
- Index prev_i = 0;
987
- for(Index k=0; k<n_entries;++k)
988
- {
989
- Index i = newEntries[k].i;
990
- Index p = newEntries[k].p;
991
- internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992
- internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993
- for(Index j=prev_i;j<i;++j)
994
- m_outerIndex[j+1] += k;
995
- if(!isComp)
996
- m_innerNonZeros[i]++;
997
- prev_p = p;
998
- prev_i = i;
999
- newData.value(p+k) = Scalar(0);
1000
- newData.index(p+k) = StorageIndex(i);
1001
- assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002
- }
1003
- {
1004
- internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005
- internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006
- for(Index j=prev_i+1;j<=m_outerSize;++j)
1007
- m_outerIndex[j] += n_entries;
1008
- }
1009
- m_data.swap(newData);
1010
- }
1011
- }
1012
- }
1013
-
1014
- private:
1015
- static void check_template_parameters()
1016
- {
1017
- EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018
- EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019
- }
1020
-
1021
- struct default_prunning_func {
1022
- default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023
- inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024
- {
1025
- return !internal::isMuchSmallerThan(value, reference, epsilon);
1026
- }
1027
- Scalar reference;
1028
- RealScalar epsilon;
1029
- };
1030
- };
1031
-
1032
- namespace internal {
1033
-
1034
- template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035
- void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036
- {
1037
- enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038
- typedef typename SparseMatrixType::Scalar Scalar;
1039
- typedef typename SparseMatrixType::StorageIndex StorageIndex;
1040
- SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1041
-
1042
- if(begin!=end)
1043
- {
1044
- // pass 1: count the nnz per inner-vector
1045
- typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046
- wi.setZero();
1047
- for(InputIterator it(begin); it!=end; ++it)
1048
- {
1049
- eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050
- wi(IsRowMajor ? it->col() : it->row())++;
1051
- }
1052
-
1053
- // pass 2: insert all the elements into trMat
1054
- trMat.reserve(wi);
1055
- for(InputIterator it(begin); it!=end; ++it)
1056
- trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057
-
1058
- // pass 3:
1059
- trMat.collapseDuplicates(dup_func);
1060
- }
1061
-
1062
- // pass 4: transposed copy -> implicit sorting
1063
- mat = trMat;
1064
- }
1065
-
1066
- }
1067
-
1068
-
1069
- /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
1070
- *
1071
- * A \em triplet is a tuple (i,j,value) defining a non-zero element.
1072
- * The input list of triplets does not have to be sorted, and can contains duplicated elements.
1073
- * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
1074
- * This is a \em O(n) operation, with \em n the number of triplet elements.
1075
- * The initial contents of \c *this is destroyed.
1076
- * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
1077
- * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
1078
- *
1079
- * The \a InputIterators value_type must provide the following interface:
1080
- * \code
1081
- * Scalar value() const; // the value
1082
- * Scalar row() const; // the row index i
1083
- * Scalar col() const; // the column index j
1084
- * \endcode
1085
- * See for instance the Eigen::Triplet template class.
1086
- *
1087
- * Here is a typical usage example:
1088
- * \code
1089
- typedef Triplet<double> T;
1090
- std::vector<T> tripletList;
1091
- tripletList.reserve(estimation_of_entries);
1092
- for(...)
1093
- {
1094
- // ...
1095
- tripletList.push_back(T(i,j,v_ij));
1096
- }
1097
- SparseMatrixType m(rows,cols);
1098
- m.setFromTriplets(tripletList.begin(), tripletList.end());
1099
- // m is ready to go!
1100
- * \endcode
1101
- *
1102
- * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
1103
- * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
1104
- * be explicitly stored into a std::vector for instance.
1105
- */
1106
- template<typename Scalar, int _Options, typename _StorageIndex>
1107
- template<typename InputIterators>
1108
- void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109
- {
1110
- internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1111
- }
1112
-
1113
- /** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
1114
- * \code
1115
- * value = dup_func(OldValue, NewValue)
1116
- * \endcode
1117
- * Here is a C++11 example keeping the latest entry only:
1118
- * \code
1119
- * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1120
- * \endcode
1121
- */
1122
- template<typename Scalar, int _Options, typename _StorageIndex>
1123
- template<typename InputIterators,typename DupFunctor>
1124
- void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125
- {
1126
- internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1127
- }
1128
-
1129
- /** \internal */
1130
- template<typename Scalar, int _Options, typename _StorageIndex>
1131
- template<typename DupFunctor>
1132
- void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
1133
- {
1134
- eigen_assert(!isCompressed());
1135
- // TODO, in practice we should be able to use m_innerNonZeros for that task
1136
- IndexVector wi(innerSize());
1137
- wi.fill(-1);
1138
- StorageIndex count = 0;
1139
- // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140
- for(Index j=0; j<outerSize(); ++j)
1141
- {
1142
- StorageIndex start = count;
1143
- Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1144
- for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145
- {
1146
- Index i = m_data.index(k);
1147
- if(wi(i)>=start)
1148
- {
1149
- // we already meet this entry => accumulate it
1150
- m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151
- }
1152
- else
1153
- {
1154
- m_data.value(count) = m_data.value(k);
1155
- m_data.index(count) = m_data.index(k);
1156
- wi(i) = count;
1157
- ++count;
1158
- }
1159
- }
1160
- m_outerIndex[j] = start;
1161
- }
1162
- m_outerIndex[m_outerSize] = count;
1163
-
1164
- // turn the matrix into compressed form
1165
- std::free(m_innerNonZeros);
1166
- m_innerNonZeros = 0;
1167
- m_data.resize(m_outerIndex[m_outerSize]);
1168
- }
1169
-
1170
- template<typename Scalar, int _Options, typename _StorageIndex>
1171
- template<typename OtherDerived>
1172
- EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1173
- {
1174
- EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1175
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176
-
1177
- #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1178
- EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179
- #endif
1180
-
1181
- const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182
- if (needToTranspose)
1183
- {
1184
- #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1185
- EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186
- #endif
1187
- // two passes algorithm:
1188
- // 1 - compute the number of coeffs per dest inner vector
1189
- // 2 - do the actual copy/eval
1190
- // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1191
- typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1192
- typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193
- typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194
- OtherCopy otherCopy(other.derived());
1195
- OtherCopyEval otherCopyEval(otherCopy);
1196
-
1197
- SparseMatrix dest(other.rows(),other.cols());
1198
- Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199
-
1200
- // pass 1
1201
- // FIXME the above copy could be merged with that pass
1202
- for (Index j=0; j<otherCopy.outerSize(); ++j)
1203
- for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204
- ++dest.m_outerIndex[it.index()];
1205
-
1206
- // prefix sum
1207
- StorageIndex count = 0;
1208
- IndexVector positions(dest.outerSize());
1209
- for (Index j=0; j<dest.outerSize(); ++j)
1210
- {
1211
- StorageIndex tmp = dest.m_outerIndex[j];
1212
- dest.m_outerIndex[j] = count;
1213
- positions[j] = count;
1214
- count += tmp;
1215
- }
1216
- dest.m_outerIndex[dest.outerSize()] = count;
1217
- // alloc
1218
- dest.m_data.resize(count);
1219
- // pass 2
1220
- for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221
- {
1222
- for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223
- {
1224
- Index pos = positions[it.index()]++;
1225
- dest.m_data.index(pos) = j;
1226
- dest.m_data.value(pos) = it.value();
1227
- }
1228
- }
1229
- this->swap(dest);
1230
- return *this;
1231
- }
1232
- else
1233
- {
1234
- if(other.isRValue())
1235
- {
1236
- initAssignment(other.derived());
1237
- }
1238
- // there is no special optimization
1239
- return Base::operator=(other.derived());
1240
- }
1241
- }
1242
-
1243
- template<typename _Scalar, int _Options, typename _StorageIndex>
1244
- typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
1245
- {
1246
- eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247
-
1248
- const Index outer = IsRowMajor ? row : col;
1249
- const Index inner = IsRowMajor ? col : row;
1250
-
1251
- if(isCompressed())
1252
- {
1253
- if(nonZeros()==0)
1254
- {
1255
- // reserve space if not already done
1256
- if(m_data.allocatedSize()==0)
1257
- m_data.reserve(2*m_innerSize);
1258
-
1259
- // turn the matrix into non-compressed mode
1260
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261
- if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262
-
1263
- memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264
-
1265
- // pack all inner-vectors to the end of the pre-allocated space
1266
- // and allocate the entire free-space to the first inner-vector
1267
- StorageIndex end = convert_index(m_data.allocatedSize());
1268
- for(Index j=1; j<=m_outerSize; ++j)
1269
- m_outerIndex[j] = end;
1270
- }
1271
- else
1272
- {
1273
- // turn the matrix into non-compressed mode
1274
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275
- if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276
- for(Index j=0; j<m_outerSize; ++j)
1277
- m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278
- }
1279
- }
1280
-
1281
- // check whether we can do a fast "push back" insertion
1282
- Index data_end = m_data.allocatedSize();
1283
-
1284
- // First case: we are filling a new inner vector which is packed at the end.
1285
- // We assume that all remaining inner-vectors are also empty and packed to the end.
1286
- if(m_outerIndex[outer]==data_end)
1287
- {
1288
- eigen_internal_assert(m_innerNonZeros[outer]==0);
1289
-
1290
- // pack previous empty inner-vectors to end of the used-space
1291
- // and allocate the entire free-space to the current inner-vector.
1292
- StorageIndex p = convert_index(m_data.size());
1293
- Index j = outer;
1294
- while(j>=0 && m_innerNonZeros[j]==0)
1295
- m_outerIndex[j--] = p;
1296
-
1297
- // push back the new element
1298
- ++m_innerNonZeros[outer];
1299
- m_data.append(Scalar(0), inner);
1300
-
1301
- // check for reallocation
1302
- if(data_end != m_data.allocatedSize())
1303
- {
1304
- // m_data has been reallocated
1305
- // -> move remaining inner-vectors back to the end of the free-space
1306
- // so that the entire free-space is allocated to the current inner-vector.
1307
- eigen_internal_assert(data_end < m_data.allocatedSize());
1308
- StorageIndex new_end = convert_index(m_data.allocatedSize());
1309
- for(Index k=outer+1; k<=m_outerSize; ++k)
1310
- if(m_outerIndex[k]==data_end)
1311
- m_outerIndex[k] = new_end;
1312
- }
1313
- return m_data.value(p);
1314
- }
1315
-
1316
- // Second case: the next inner-vector is packed to the end
1317
- // and the current inner-vector end match the used-space.
1318
- if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319
- {
1320
- eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321
-
1322
- // add space for the new element
1323
- ++m_innerNonZeros[outer];
1324
- m_data.resize(m_data.size()+1);
1325
-
1326
- // check for reallocation
1327
- if(data_end != m_data.allocatedSize())
1328
- {
1329
- // m_data has been reallocated
1330
- // -> move remaining inner-vectors back to the end of the free-space
1331
- // so that the entire free-space is allocated to the current inner-vector.
1332
- eigen_internal_assert(data_end < m_data.allocatedSize());
1333
- StorageIndex new_end = convert_index(m_data.allocatedSize());
1334
- for(Index k=outer+1; k<=m_outerSize; ++k)
1335
- if(m_outerIndex[k]==data_end)
1336
- m_outerIndex[k] = new_end;
1337
- }
1338
-
1339
- // and insert it at the right position (sorted insertion)
1340
- Index startId = m_outerIndex[outer];
1341
- Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342
- while ( (p > startId) && (m_data.index(p-1) > inner) )
1343
- {
1344
- m_data.index(p) = m_data.index(p-1);
1345
- m_data.value(p) = m_data.value(p-1);
1346
- --p;
1347
- }
1348
-
1349
- m_data.index(p) = convert_index(inner);
1350
- return (m_data.value(p) = Scalar(0));
1351
- }
1352
-
1353
- if(m_data.size() != m_data.allocatedSize())
1354
- {
1355
- // make sure the matrix is compatible to random un-compressed insertion:
1356
- m_data.resize(m_data.allocatedSize());
1357
- this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358
- }
1359
-
1360
- return insertUncompressed(row,col);
1361
- }
1362
-
1363
- template<typename _Scalar, int _Options, typename _StorageIndex>
1364
- EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
1365
- {
1366
- eigen_assert(!isCompressed());
1367
-
1368
- const Index outer = IsRowMajor ? row : col;
1369
- const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370
-
1371
- Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372
- StorageIndex innerNNZ = m_innerNonZeros[outer];
1373
- if(innerNNZ>=room)
1374
- {
1375
- // this inner vector is full, we need to reallocate the whole buffer :(
1376
- reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377
- }
1378
-
1379
- Index startId = m_outerIndex[outer];
1380
- Index p = startId + m_innerNonZeros[outer];
1381
- while ( (p > startId) && (m_data.index(p-1) > inner) )
1382
- {
1383
- m_data.index(p) = m_data.index(p-1);
1384
- m_data.value(p) = m_data.value(p-1);
1385
- --p;
1386
- }
1387
- eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388
-
1389
- m_innerNonZeros[outer]++;
1390
-
1391
- m_data.index(p) = inner;
1392
- return (m_data.value(p) = Scalar(0));
1393
- }
1394
-
1395
- template<typename _Scalar, int _Options, typename _StorageIndex>
1396
- EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1397
- {
1398
- eigen_assert(isCompressed());
1399
-
1400
- const Index outer = IsRowMajor ? row : col;
1401
- const Index inner = IsRowMajor ? col : row;
1402
-
1403
- Index previousOuter = outer;
1404
- if (m_outerIndex[outer+1]==0)
1405
- {
1406
- // we start a new inner vector
1407
- while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408
- {
1409
- m_outerIndex[previousOuter] = convert_index(m_data.size());
1410
- --previousOuter;
1411
- }
1412
- m_outerIndex[outer+1] = m_outerIndex[outer];
1413
- }
1414
-
1415
- // here we have to handle the tricky case where the outerIndex array
1416
- // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417
- // the 2nd inner vector...
1418
- bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419
- && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420
-
1421
- std::size_t startId = m_outerIndex[outer];
1422
- // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423
- std::size_t p = m_outerIndex[outer+1];
1424
- ++m_outerIndex[outer+1];
1425
-
1426
- double reallocRatio = 1;
1427
- if (m_data.allocatedSize()<=m_data.size())
1428
- {
1429
- // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430
- if (m_data.size()==0)
1431
- {
1432
- m_data.reserve(32);
1433
- }
1434
- else
1435
- {
1436
- // we need to reallocate the data, to reduce multiple reallocations
1437
- // we use a smart resize algorithm based on the current filling ratio
1438
- // in addition, we use double to avoid integers overflows
1439
- double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440
- reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441
- // furthermore we bound the realloc ratio to:
1442
- // 1) reduce multiple minor realloc when the matrix is almost filled
1443
- // 2) avoid to allocate too much memory when the matrix is almost empty
1444
- reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445
- }
1446
- }
1447
- m_data.resize(m_data.size()+1,reallocRatio);
1448
-
1449
- if (!isLastVec)
1450
- {
1451
- if (previousOuter==-1)
1452
- {
1453
- // oops wrong guess.
1454
- // let's correct the outer offsets
1455
- for (Index k=0; k<=(outer+1); ++k)
1456
- m_outerIndex[k] = 0;
1457
- Index k=outer+1;
1458
- while(m_outerIndex[k]==0)
1459
- m_outerIndex[k++] = 1;
1460
- while (k<=m_outerSize && m_outerIndex[k]!=0)
1461
- m_outerIndex[k++]++;
1462
- p = 0;
1463
- --k;
1464
- k = m_outerIndex[k]-1;
1465
- while (k>0)
1466
- {
1467
- m_data.index(k) = m_data.index(k-1);
1468
- m_data.value(k) = m_data.value(k-1);
1469
- k--;
1470
- }
1471
- }
1472
- else
1473
- {
1474
- // we are not inserting into the last inner vec
1475
- // update outer indices:
1476
- Index j = outer+2;
1477
- while (j<=m_outerSize && m_outerIndex[j]!=0)
1478
- m_outerIndex[j++]++;
1479
- --j;
1480
- // shift data of last vecs:
1481
- Index k = m_outerIndex[j]-1;
1482
- while (k>=Index(p))
1483
- {
1484
- m_data.index(k) = m_data.index(k-1);
1485
- m_data.value(k) = m_data.value(k-1);
1486
- k--;
1487
- }
1488
- }
1489
- }
1490
-
1491
- while ( (p > startId) && (m_data.index(p-1) > inner) )
1492
- {
1493
- m_data.index(p) = m_data.index(p-1);
1494
- m_data.value(p) = m_data.value(p-1);
1495
- --p;
1496
- }
1497
-
1498
- m_data.index(p) = inner;
1499
- return (m_data.value(p) = Scalar(0));
1500
- }
1501
-
1502
- namespace internal {
1503
-
1504
- template<typename _Scalar, int _Options, typename _StorageIndex>
1505
- struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506
- : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507
- {
1508
- typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1509
- typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1510
- evaluator() : Base() {}
1511
- explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512
- };
1513
-
1514
- }
1515
-
1516
- } // end namespace Eigen
1517
-
1518
- #endif // EIGEN_SPARSEMATRIX_H