tomoto 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (420) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +3 -0
  3. data/LICENSE.txt +22 -0
  4. data/README.md +123 -0
  5. data/ext/tomoto/ext.cpp +245 -0
  6. data/ext/tomoto/extconf.rb +28 -0
  7. data/lib/tomoto.rb +12 -0
  8. data/lib/tomoto/ct.rb +11 -0
  9. data/lib/tomoto/hdp.rb +11 -0
  10. data/lib/tomoto/lda.rb +67 -0
  11. data/lib/tomoto/version.rb +3 -0
  12. data/vendor/EigenRand/EigenRand/Core.h +1139 -0
  13. data/vendor/EigenRand/EigenRand/Dists/Basic.h +111 -0
  14. data/vendor/EigenRand/EigenRand/Dists/Discrete.h +877 -0
  15. data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +108 -0
  16. data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +626 -0
  17. data/vendor/EigenRand/EigenRand/EigenRand +19 -0
  18. data/vendor/EigenRand/EigenRand/Macro.h +24 -0
  19. data/vendor/EigenRand/EigenRand/MorePacketMath.h +978 -0
  20. data/vendor/EigenRand/EigenRand/PacketFilter.h +286 -0
  21. data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +624 -0
  22. data/vendor/EigenRand/EigenRand/RandUtils.h +413 -0
  23. data/vendor/EigenRand/EigenRand/doc.h +220 -0
  24. data/vendor/EigenRand/LICENSE +21 -0
  25. data/vendor/EigenRand/README.md +288 -0
  26. data/vendor/eigen/COPYING.BSD +26 -0
  27. data/vendor/eigen/COPYING.GPL +674 -0
  28. data/vendor/eigen/COPYING.LGPL +502 -0
  29. data/vendor/eigen/COPYING.MINPACK +52 -0
  30. data/vendor/eigen/COPYING.MPL2 +373 -0
  31. data/vendor/eigen/COPYING.README +18 -0
  32. data/vendor/eigen/Eigen/CMakeLists.txt +19 -0
  33. data/vendor/eigen/Eigen/Cholesky +46 -0
  34. data/vendor/eigen/Eigen/CholmodSupport +48 -0
  35. data/vendor/eigen/Eigen/Core +537 -0
  36. data/vendor/eigen/Eigen/Dense +7 -0
  37. data/vendor/eigen/Eigen/Eigen +2 -0
  38. data/vendor/eigen/Eigen/Eigenvalues +61 -0
  39. data/vendor/eigen/Eigen/Geometry +62 -0
  40. data/vendor/eigen/Eigen/Householder +30 -0
  41. data/vendor/eigen/Eigen/IterativeLinearSolvers +48 -0
  42. data/vendor/eigen/Eigen/Jacobi +33 -0
  43. data/vendor/eigen/Eigen/LU +50 -0
  44. data/vendor/eigen/Eigen/MetisSupport +35 -0
  45. data/vendor/eigen/Eigen/OrderingMethods +73 -0
  46. data/vendor/eigen/Eigen/PaStiXSupport +48 -0
  47. data/vendor/eigen/Eigen/PardisoSupport +35 -0
  48. data/vendor/eigen/Eigen/QR +51 -0
  49. data/vendor/eigen/Eigen/QtAlignedMalloc +40 -0
  50. data/vendor/eigen/Eigen/SPQRSupport +34 -0
  51. data/vendor/eigen/Eigen/SVD +51 -0
  52. data/vendor/eigen/Eigen/Sparse +36 -0
  53. data/vendor/eigen/Eigen/SparseCholesky +45 -0
  54. data/vendor/eigen/Eigen/SparseCore +69 -0
  55. data/vendor/eigen/Eigen/SparseLU +46 -0
  56. data/vendor/eigen/Eigen/SparseQR +37 -0
  57. data/vendor/eigen/Eigen/StdDeque +27 -0
  58. data/vendor/eigen/Eigen/StdList +26 -0
  59. data/vendor/eigen/Eigen/StdVector +27 -0
  60. data/vendor/eigen/Eigen/SuperLUSupport +64 -0
  61. data/vendor/eigen/Eigen/UmfPackSupport +40 -0
  62. data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +673 -0
  63. data/vendor/eigen/Eigen/src/Cholesky/LLT.h +542 -0
  64. data/vendor/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
  65. data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +639 -0
  66. data/vendor/eigen/Eigen/src/Core/Array.h +329 -0
  67. data/vendor/eigen/Eigen/src/Core/ArrayBase.h +226 -0
  68. data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +209 -0
  69. data/vendor/eigen/Eigen/src/Core/Assign.h +90 -0
  70. data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +935 -0
  71. data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +178 -0
  72. data/vendor/eigen/Eigen/src/Core/BandMatrix.h +353 -0
  73. data/vendor/eigen/Eigen/src/Core/Block.h +452 -0
  74. data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +164 -0
  75. data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +160 -0
  76. data/vendor/eigen/Eigen/src/Core/ConditionEstimator.h +175 -0
  77. data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +1688 -0
  78. data/vendor/eigen/Eigen/src/Core/CoreIterators.h +127 -0
  79. data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +184 -0
  80. data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +866 -0
  81. data/vendor/eigen/Eigen/src/Core/CwiseTernaryOp.h +197 -0
  82. data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +103 -0
  83. data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +128 -0
  84. data/vendor/eigen/Eigen/src/Core/DenseBase.h +611 -0
  85. data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +681 -0
  86. data/vendor/eigen/Eigen/src/Core/DenseStorage.h +570 -0
  87. data/vendor/eigen/Eigen/src/Core/Diagonal.h +260 -0
  88. data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +343 -0
  89. data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +28 -0
  90. data/vendor/eigen/Eigen/src/Core/Dot.h +318 -0
  91. data/vendor/eigen/Eigen/src/Core/EigenBase.h +159 -0
  92. data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +146 -0
  93. data/vendor/eigen/Eigen/src/Core/Fuzzy.h +155 -0
  94. data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +455 -0
  95. data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +593 -0
  96. data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +187 -0
  97. data/vendor/eigen/Eigen/src/Core/IO.h +225 -0
  98. data/vendor/eigen/Eigen/src/Core/Inverse.h +118 -0
  99. data/vendor/eigen/Eigen/src/Core/Map.h +171 -0
  100. data/vendor/eigen/Eigen/src/Core/MapBase.h +303 -0
  101. data/vendor/eigen/Eigen/src/Core/MathFunctions.h +1415 -0
  102. data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +101 -0
  103. data/vendor/eigen/Eigen/src/Core/Matrix.h +459 -0
  104. data/vendor/eigen/Eigen/src/Core/MatrixBase.h +529 -0
  105. data/vendor/eigen/Eigen/src/Core/NestByValue.h +110 -0
  106. data/vendor/eigen/Eigen/src/Core/NoAlias.h +108 -0
  107. data/vendor/eigen/Eigen/src/Core/NumTraits.h +248 -0
  108. data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +633 -0
  109. data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +1035 -0
  110. data/vendor/eigen/Eigen/src/Core/Product.h +186 -0
  111. data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +1112 -0
  112. data/vendor/eigen/Eigen/src/Core/Random.h +182 -0
  113. data/vendor/eigen/Eigen/src/Core/Redux.h +505 -0
  114. data/vendor/eigen/Eigen/src/Core/Ref.h +283 -0
  115. data/vendor/eigen/Eigen/src/Core/Replicate.h +142 -0
  116. data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +117 -0
  117. data/vendor/eigen/Eigen/src/Core/Reverse.h +211 -0
  118. data/vendor/eigen/Eigen/src/Core/Select.h +162 -0
  119. data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +352 -0
  120. data/vendor/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
  121. data/vendor/eigen/Eigen/src/Core/Solve.h +188 -0
  122. data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +235 -0
  123. data/vendor/eigen/Eigen/src/Core/SolverBase.h +130 -0
  124. data/vendor/eigen/Eigen/src/Core/StableNorm.h +221 -0
  125. data/vendor/eigen/Eigen/src/Core/Stride.h +111 -0
  126. data/vendor/eigen/Eigen/src/Core/Swap.h +67 -0
  127. data/vendor/eigen/Eigen/src/Core/Transpose.h +403 -0
  128. data/vendor/eigen/Eigen/src/Core/Transpositions.h +407 -0
  129. data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +983 -0
  130. data/vendor/eigen/Eigen/src/Core/VectorBlock.h +96 -0
  131. data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +695 -0
  132. data/vendor/eigen/Eigen/src/Core/Visitor.h +273 -0
  133. data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +451 -0
  134. data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +439 -0
  135. data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +637 -0
  136. data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +51 -0
  137. data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +391 -0
  138. data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1316 -0
  139. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +430 -0
  140. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +322 -0
  141. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +1061 -0
  142. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +103 -0
  143. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +674 -0
  144. data/vendor/eigen/Eigen/src/Core/arch/CUDA/MathFunctions.h +91 -0
  145. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +333 -0
  146. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +1124 -0
  147. data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +212 -0
  148. data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +29 -0
  149. data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +49 -0
  150. data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +490 -0
  151. data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +91 -0
  152. data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +760 -0
  153. data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +471 -0
  154. data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +562 -0
  155. data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +895 -0
  156. data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +77 -0
  157. data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +397 -0
  158. data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +137 -0
  159. data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +945 -0
  160. data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +168 -0
  161. data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +475 -0
  162. data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +188 -0
  163. data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +136 -0
  164. data/vendor/eigen/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
  165. data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +792 -0
  166. data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2156 -0
  167. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +492 -0
  168. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +311 -0
  169. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
  170. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +122 -0
  171. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +619 -0
  172. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
  173. data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +163 -0
  174. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +521 -0
  175. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +287 -0
  176. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +260 -0
  177. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
  178. data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
  179. data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +93 -0
  180. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +466 -0
  181. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +315 -0
  182. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
  183. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
  184. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +335 -0
  185. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +163 -0
  186. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +145 -0
  187. data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +398 -0
  188. data/vendor/eigen/Eigen/src/Core/util/Constants.h +547 -0
  189. data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +83 -0
  190. data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +302 -0
  191. data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +130 -0
  192. data/vendor/eigen/Eigen/src/Core/util/Macros.h +1001 -0
  193. data/vendor/eigen/Eigen/src/Core/util/Memory.h +993 -0
  194. data/vendor/eigen/Eigen/src/Core/util/Meta.h +534 -0
  195. data/vendor/eigen/Eigen/src/Core/util/NonMPL2.h +3 -0
  196. data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +27 -0
  197. data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +218 -0
  198. data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +821 -0
  199. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
  200. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +459 -0
  201. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
  202. data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
  203. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
  204. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
  205. data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
  206. data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
  207. data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +654 -0
  208. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +546 -0
  209. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
  210. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +870 -0
  211. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
  212. data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +556 -0
  213. data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +392 -0
  214. data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +247 -0
  215. data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +114 -0
  216. data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +497 -0
  217. data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +282 -0
  218. data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +234 -0
  219. data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +195 -0
  220. data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +814 -0
  221. data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +199 -0
  222. data/vendor/eigen/Eigen/src/Geometry/RotationBase.h +206 -0
  223. data/vendor/eigen/Eigen/src/Geometry/Scaling.h +170 -0
  224. data/vendor/eigen/Eigen/src/Geometry/Transform.h +1542 -0
  225. data/vendor/eigen/Eigen/src/Geometry/Translation.h +208 -0
  226. data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +166 -0
  227. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +161 -0
  228. data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +103 -0
  229. data/vendor/eigen/Eigen/src/Householder/Householder.h +172 -0
  230. data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +470 -0
  231. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
  232. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +228 -0
  233. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +246 -0
  234. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +400 -0
  235. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +462 -0
  236. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +394 -0
  237. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +216 -0
  238. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +115 -0
  239. data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +462 -0
  240. data/vendor/eigen/Eigen/src/LU/Determinant.h +101 -0
  241. data/vendor/eigen/Eigen/src/LU/FullPivLU.h +891 -0
  242. data/vendor/eigen/Eigen/src/LU/InverseImpl.h +415 -0
  243. data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +611 -0
  244. data/vendor/eigen/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
  245. data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +338 -0
  246. data/vendor/eigen/Eigen/src/MetisSupport/MetisSupport.h +137 -0
  247. data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +445 -0
  248. data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +1843 -0
  249. data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +157 -0
  250. data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
  251. data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +543 -0
  252. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +653 -0
  253. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
  254. data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +562 -0
  255. data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +676 -0
  256. data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +409 -0
  257. data/vendor/eigen/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
  258. data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +313 -0
  259. data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +1246 -0
  260. data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +804 -0
  261. data/vendor/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
  262. data/vendor/eigen/Eigen/src/SVD/SVDBase.h +315 -0
  263. data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
  264. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +689 -0
  265. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +199 -0
  266. data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +377 -0
  267. data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +258 -0
  268. data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
  269. data/vendor/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
  270. data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +216 -0
  271. data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +603 -0
  272. data/vendor/eigen/Eigen/src/SparseCore/SparseColEtree.h +206 -0
  273. data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +341 -0
  274. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +726 -0
  275. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +148 -0
  276. data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +320 -0
  277. data/vendor/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
  278. data/vendor/eigen/Eigen/src/SparseCore/SparseDot.h +98 -0
  279. data/vendor/eigen/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
  280. data/vendor/eigen/Eigen/src/SparseCore/SparseMap.h +305 -0
  281. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +1403 -0
  282. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +405 -0
  283. data/vendor/eigen/Eigen/src/SparseCore/SparsePermutation.h +178 -0
  284. data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +169 -0
  285. data/vendor/eigen/Eigen/src/SparseCore/SparseRedux.h +49 -0
  286. data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +397 -0
  287. data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +656 -0
  288. data/vendor/eigen/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
  289. data/vendor/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
  290. data/vendor/eigen/Eigen/src/SparseCore/SparseTranspose.h +92 -0
  291. data/vendor/eigen/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
  292. data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +178 -0
  293. data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +478 -0
  294. data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +253 -0
  295. data/vendor/eigen/Eigen/src/SparseCore/TriangularSolver.h +315 -0
  296. data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +773 -0
  297. data/vendor/eigen/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
  298. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
  299. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
  300. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +301 -0
  301. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
  302. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
  303. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
  304. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
  305. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
  306. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
  307. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
  308. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
  309. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
  310. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
  311. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
  312. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
  313. data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +745 -0
  314. data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +126 -0
  315. data/vendor/eigen/Eigen/src/StlSupport/StdList.h +106 -0
  316. data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +131 -0
  317. data/vendor/eigen/Eigen/src/StlSupport/details.h +84 -0
  318. data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +1027 -0
  319. data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +506 -0
  320. data/vendor/eigen/Eigen/src/misc/Image.h +82 -0
  321. data/vendor/eigen/Eigen/src/misc/Kernel.h +79 -0
  322. data/vendor/eigen/Eigen/src/misc/RealSvd2x2.h +55 -0
  323. data/vendor/eigen/Eigen/src/misc/blas.h +440 -0
  324. data/vendor/eigen/Eigen/src/misc/lapack.h +152 -0
  325. data/vendor/eigen/Eigen/src/misc/lapacke.h +16291 -0
  326. data/vendor/eigen/Eigen/src/misc/lapacke_mangling.h +17 -0
  327. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +332 -0
  328. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +552 -0
  329. data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +1058 -0
  330. data/vendor/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
  331. data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +163 -0
  332. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
  333. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +85 -0
  334. data/vendor/eigen/README.md +3 -0
  335. data/vendor/eigen/bench/README.txt +55 -0
  336. data/vendor/eigen/bench/btl/COPYING +340 -0
  337. data/vendor/eigen/bench/btl/README +154 -0
  338. data/vendor/eigen/bench/tensors/README +21 -0
  339. data/vendor/eigen/blas/README.txt +6 -0
  340. data/vendor/eigen/demos/mandelbrot/README +10 -0
  341. data/vendor/eigen/demos/mix_eigen_and_c/README +9 -0
  342. data/vendor/eigen/demos/opengl/README +13 -0
  343. data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +1760 -0
  344. data/vendor/eigen/unsupported/README.txt +50 -0
  345. data/vendor/tomotopy/LICENSE +21 -0
  346. data/vendor/tomotopy/README.kr.rst +375 -0
  347. data/vendor/tomotopy/README.rst +382 -0
  348. data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +362 -0
  349. data/vendor/tomotopy/src/Labeling/FoRelevance.h +88 -0
  350. data/vendor/tomotopy/src/Labeling/Labeler.h +50 -0
  351. data/vendor/tomotopy/src/TopicModel/CT.h +37 -0
  352. data/vendor/tomotopy/src/TopicModel/CTModel.cpp +13 -0
  353. data/vendor/tomotopy/src/TopicModel/CTModel.hpp +293 -0
  354. data/vendor/tomotopy/src/TopicModel/DMR.h +51 -0
  355. data/vendor/tomotopy/src/TopicModel/DMRModel.cpp +13 -0
  356. data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +374 -0
  357. data/vendor/tomotopy/src/TopicModel/DT.h +65 -0
  358. data/vendor/tomotopy/src/TopicModel/DTM.h +22 -0
  359. data/vendor/tomotopy/src/TopicModel/DTModel.cpp +15 -0
  360. data/vendor/tomotopy/src/TopicModel/DTModel.hpp +572 -0
  361. data/vendor/tomotopy/src/TopicModel/GDMR.h +37 -0
  362. data/vendor/tomotopy/src/TopicModel/GDMRModel.cpp +14 -0
  363. data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +485 -0
  364. data/vendor/tomotopy/src/TopicModel/HDP.h +74 -0
  365. data/vendor/tomotopy/src/TopicModel/HDPModel.cpp +13 -0
  366. data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +592 -0
  367. data/vendor/tomotopy/src/TopicModel/HLDA.h +40 -0
  368. data/vendor/tomotopy/src/TopicModel/HLDAModel.cpp +13 -0
  369. data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +681 -0
  370. data/vendor/tomotopy/src/TopicModel/HPA.h +27 -0
  371. data/vendor/tomotopy/src/TopicModel/HPAModel.cpp +21 -0
  372. data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +588 -0
  373. data/vendor/tomotopy/src/TopicModel/LDA.h +144 -0
  374. data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +442 -0
  375. data/vendor/tomotopy/src/TopicModel/LDAModel.cpp +13 -0
  376. data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +1058 -0
  377. data/vendor/tomotopy/src/TopicModel/LLDA.h +45 -0
  378. data/vendor/tomotopy/src/TopicModel/LLDAModel.cpp +13 -0
  379. data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +203 -0
  380. data/vendor/tomotopy/src/TopicModel/MGLDA.h +63 -0
  381. data/vendor/tomotopy/src/TopicModel/MGLDAModel.cpp +17 -0
  382. data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +558 -0
  383. data/vendor/tomotopy/src/TopicModel/PA.h +43 -0
  384. data/vendor/tomotopy/src/TopicModel/PAModel.cpp +13 -0
  385. data/vendor/tomotopy/src/TopicModel/PAModel.hpp +467 -0
  386. data/vendor/tomotopy/src/TopicModel/PLDA.h +17 -0
  387. data/vendor/tomotopy/src/TopicModel/PLDAModel.cpp +13 -0
  388. data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +214 -0
  389. data/vendor/tomotopy/src/TopicModel/SLDA.h +54 -0
  390. data/vendor/tomotopy/src/TopicModel/SLDAModel.cpp +17 -0
  391. data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +456 -0
  392. data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +692 -0
  393. data/vendor/tomotopy/src/Utils/AliasMethod.hpp +169 -0
  394. data/vendor/tomotopy/src/Utils/Dictionary.h +80 -0
  395. data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +181 -0
  396. data/vendor/tomotopy/src/Utils/LBFGS.h +202 -0
  397. data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBacktracking.h +120 -0
  398. data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBracketing.h +122 -0
  399. data/vendor/tomotopy/src/Utils/LBFGS/Param.h +213 -0
  400. data/vendor/tomotopy/src/Utils/LUT.hpp +82 -0
  401. data/vendor/tomotopy/src/Utils/MultiNormalDistribution.hpp +69 -0
  402. data/vendor/tomotopy/src/Utils/PolyaGamma.hpp +200 -0
  403. data/vendor/tomotopy/src/Utils/PolyaGammaHybrid.hpp +672 -0
  404. data/vendor/tomotopy/src/Utils/ThreadPool.hpp +150 -0
  405. data/vendor/tomotopy/src/Utils/Trie.hpp +220 -0
  406. data/vendor/tomotopy/src/Utils/TruncMultiNormal.hpp +94 -0
  407. data/vendor/tomotopy/src/Utils/Utils.hpp +337 -0
  408. data/vendor/tomotopy/src/Utils/avx_gamma.h +46 -0
  409. data/vendor/tomotopy/src/Utils/avx_mathfun.h +736 -0
  410. data/vendor/tomotopy/src/Utils/exception.h +28 -0
  411. data/vendor/tomotopy/src/Utils/math.h +281 -0
  412. data/vendor/tomotopy/src/Utils/rtnorm.hpp +2690 -0
  413. data/vendor/tomotopy/src/Utils/sample.hpp +192 -0
  414. data/vendor/tomotopy/src/Utils/serializer.hpp +695 -0
  415. data/vendor/tomotopy/src/Utils/slp.hpp +131 -0
  416. data/vendor/tomotopy/src/Utils/sse_gamma.h +48 -0
  417. data/vendor/tomotopy/src/Utils/sse_mathfun.h +710 -0
  418. data/vendor/tomotopy/src/Utils/text.hpp +49 -0
  419. data/vendor/tomotopy/src/Utils/tvector.hpp +543 -0
  420. metadata +531 -0
@@ -0,0 +1,67 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_MAPPED_SPARSEMATRIX_H
11
+ #define EIGEN_MAPPED_SPARSEMATRIX_H
12
+
13
+ namespace Eigen {
14
+
15
+ /** \deprecated Use Map<SparseMatrix<> >
16
+ * \class MappedSparseMatrix
17
+ *
18
+ * \brief Sparse matrix
19
+ *
20
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
21
+ *
22
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
23
+ *
24
+ */
25
+ namespace internal {
26
+ template<typename _Scalar, int _Flags, typename _StorageIndex>
27
+ struct traits<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> > : traits<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
28
+ {};
29
+ } // end namespace internal
30
+
31
+ template<typename _Scalar, int _Flags, typename _StorageIndex>
32
+ class MappedSparseMatrix
33
+ : public Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
34
+ {
35
+ typedef Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> > Base;
36
+
37
+ public:
38
+
39
+ typedef typename Base::StorageIndex StorageIndex;
40
+ typedef typename Base::Scalar Scalar;
41
+
42
+ inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZeroPtr = 0)
43
+ : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)
44
+ {}
45
+
46
+ /** Empty destructor */
47
+ inline ~MappedSparseMatrix() {}
48
+ };
49
+
50
+ namespace internal {
51
+
52
+ template<typename _Scalar, int _Options, typename _StorageIndex>
53
+ struct evaluator<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> >
54
+ : evaluator<SparseCompressedBase<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> > >
55
+ {
56
+ typedef MappedSparseMatrix<_Scalar,_Options,_StorageIndex> XprType;
57
+ typedef evaluator<SparseCompressedBase<XprType> > Base;
58
+
59
+ evaluator() : Base() {}
60
+ explicit evaluator(const XprType &mat) : Base(mat) {}
61
+ };
62
+
63
+ }
64
+
65
+ } // end namespace Eigen
66
+
67
+ #endif // EIGEN_MAPPED_SPARSEMATRIX_H
@@ -0,0 +1,216 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_SPARSEASSIGN_H
11
+ #define EIGEN_SPARSEASSIGN_H
12
+
13
+ namespace Eigen {
14
+
15
+ template<typename Derived>
16
+ template<typename OtherDerived>
17
+ Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
18
+ {
19
+ internal::call_assignment_no_alias(derived(), other.derived());
20
+ return derived();
21
+ }
22
+
23
+ template<typename Derived>
24
+ template<typename OtherDerived>
25
+ Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
26
+ {
27
+ // TODO use the evaluator mechanism
28
+ other.evalTo(derived());
29
+ return derived();
30
+ }
31
+
32
+ template<typename Derived>
33
+ template<typename OtherDerived>
34
+ inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
35
+ {
36
+ // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
37
+ internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
38
+ ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
39
+ return derived();
40
+ }
41
+
42
+ template<typename Derived>
43
+ inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
44
+ {
45
+ internal::call_assignment_no_alias(derived(), other.derived());
46
+ return derived();
47
+ }
48
+
49
+ namespace internal {
50
+
51
+ template<>
52
+ struct storage_kind_to_evaluator_kind<Sparse> {
53
+ typedef IteratorBased Kind;
54
+ };
55
+
56
+ template<>
57
+ struct storage_kind_to_shape<Sparse> {
58
+ typedef SparseShape Shape;
59
+ };
60
+
61
+ struct Sparse2Sparse {};
62
+ struct Sparse2Dense {};
63
+
64
+ template<> struct AssignmentKind<SparseShape, SparseShape> { typedef Sparse2Sparse Kind; };
65
+ template<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; };
66
+ template<> struct AssignmentKind<DenseShape, SparseShape> { typedef Sparse2Dense Kind; };
67
+ template<> struct AssignmentKind<DenseShape, SparseTriangularShape> { typedef Sparse2Dense Kind; };
68
+
69
+
70
+ template<typename DstXprType, typename SrcXprType>
71
+ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
72
+ {
73
+ typedef typename DstXprType::Scalar Scalar;
74
+ typedef internal::evaluator<DstXprType> DstEvaluatorType;
75
+ typedef internal::evaluator<SrcXprType> SrcEvaluatorType;
76
+
77
+ SrcEvaluatorType srcEvaluator(src);
78
+
79
+ const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
80
+ const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
81
+ if ((!transpose) && src.isRValue())
82
+ {
83
+ // eval without temporary
84
+ dst.resize(src.rows(), src.cols());
85
+ dst.setZero();
86
+ dst.reserve((std::max)(src.rows(),src.cols())*2);
87
+ for (Index j=0; j<outerEvaluationSize; ++j)
88
+ {
89
+ dst.startVec(j);
90
+ for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
91
+ {
92
+ Scalar v = it.value();
93
+ dst.insertBackByOuterInner(j,it.index()) = v;
94
+ }
95
+ }
96
+ dst.finalize();
97
+ }
98
+ else
99
+ {
100
+ // eval through a temporary
101
+ eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
102
+ (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
103
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
104
+
105
+ enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
106
+
107
+
108
+ DstXprType temp(src.rows(), src.cols());
109
+
110
+ temp.reserve((std::max)(src.rows(),src.cols())*2);
111
+ for (Index j=0; j<outerEvaluationSize; ++j)
112
+ {
113
+ temp.startVec(j);
114
+ for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
115
+ {
116
+ Scalar v = it.value();
117
+ temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
118
+ }
119
+ }
120
+ temp.finalize();
121
+
122
+ dst = temp.markAsRValue();
123
+ }
124
+ }
125
+
126
+ // Generic Sparse to Sparse assignment
127
+ template< typename DstXprType, typename SrcXprType, typename Functor>
128
+ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
129
+ {
130
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
131
+ {
132
+ assign_sparse_to_sparse(dst.derived(), src.derived());
133
+ }
134
+ };
135
+
136
+ // Generic Sparse to Dense assignment
137
+ template< typename DstXprType, typename SrcXprType, typename Functor>
138
+ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
139
+ {
140
+ static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
141
+ {
142
+ if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
143
+ dst.setZero();
144
+
145
+ internal::evaluator<SrcXprType> srcEval(src);
146
+ resize_if_allowed(dst, src, func);
147
+ internal::evaluator<DstXprType> dstEval(dst);
148
+
149
+ const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
150
+ for (Index j=0; j<outerEvaluationSize; ++j)
151
+ for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
152
+ func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
153
+ }
154
+ };
155
+
156
+ // Specialization for "dst = dec.solve(rhs)"
157
+ // NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
158
+ template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
159
+ struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
160
+ {
161
+ typedef Solve<DecType,RhsType> SrcXprType;
162
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
163
+ {
164
+ Index dstRows = src.rows();
165
+ Index dstCols = src.cols();
166
+ if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
167
+ dst.resize(dstRows, dstCols);
168
+
169
+ src.dec()._solve_impl(src.rhs(), dst);
170
+ }
171
+ };
172
+
173
+ struct Diagonal2Sparse {};
174
+
175
+ template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
176
+
177
+ template< typename DstXprType, typename SrcXprType, typename Functor>
178
+ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
179
+ {
180
+ typedef typename DstXprType::StorageIndex StorageIndex;
181
+ typedef typename DstXprType::Scalar Scalar;
182
+ typedef Array<StorageIndex,Dynamic,1> ArrayXI;
183
+ typedef Array<Scalar,Dynamic,1> ArrayXS;
184
+ template<int Options>
185
+ static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
186
+ {
187
+ Index dstRows = src.rows();
188
+ Index dstCols = src.cols();
189
+ if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
190
+ dst.resize(dstRows, dstCols);
191
+
192
+ Index size = src.diagonal().size();
193
+ dst.makeCompressed();
194
+ dst.resizeNonZeros(size);
195
+ Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
196
+ Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
197
+ Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
198
+ }
199
+
200
+ template<typename DstDerived>
201
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
202
+ {
203
+ dst.diagonal() = src.diagonal();
204
+ }
205
+
206
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
207
+ { dst.diagonal() += src.diagonal(); }
208
+
209
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
210
+ { dst.diagonal() -= src.diagonal(); }
211
+ };
212
+ } // end namespace internal
213
+
214
+ } // end namespace Eigen
215
+
216
+ #endif // EIGEN_SPARSEASSIGN_H
@@ -0,0 +1,603 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_SPARSE_BLOCK_H
11
+ #define EIGEN_SPARSE_BLOCK_H
12
+
13
+ namespace Eigen {
14
+
15
+ // Subset of columns or rows
16
+ template<typename XprType, int BlockRows, int BlockCols>
17
+ class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
18
+ : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
19
+ {
20
+ typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
21
+ typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
22
+ public:
23
+ enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
24
+ protected:
25
+ enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
26
+ typedef SparseMatrixBase<BlockType> Base;
27
+ using Base::convert_index;
28
+ public:
29
+ EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
30
+
31
+ inline BlockImpl(XprType& xpr, Index i)
32
+ : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
33
+ {}
34
+
35
+ inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
36
+ : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
37
+ {}
38
+
39
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
40
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
41
+
42
+ Index nonZeros() const
43
+ {
44
+ typedef internal::evaluator<XprType> EvaluatorType;
45
+ EvaluatorType matEval(m_matrix);
46
+ Index nnz = 0;
47
+ Index end = m_outerStart + m_outerSize.value();
48
+ for(Index j=m_outerStart; j<end; ++j)
49
+ for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
50
+ ++nnz;
51
+ return nnz;
52
+ }
53
+
54
+ inline const Scalar coeff(Index row, Index col) const
55
+ {
56
+ return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
57
+ }
58
+
59
+ inline const Scalar coeff(Index index) const
60
+ {
61
+ return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
62
+ }
63
+
64
+ inline const XprType& nestedExpression() const { return m_matrix; }
65
+ inline XprType& nestedExpression() { return m_matrix; }
66
+ Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
67
+ Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
68
+ Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
69
+ Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
70
+
71
+ protected:
72
+
73
+ typename internal::ref_selector<XprType>::non_const_type m_matrix;
74
+ Index m_outerStart;
75
+ const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
76
+
77
+ protected:
78
+ // Disable assignment with clear error message.
79
+ // Note that simply removing operator= yields compilation errors with ICC+MSVC
80
+ template<typename T>
81
+ BlockImpl& operator=(const T&)
82
+ {
83
+ EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
84
+ return *this;
85
+ }
86
+ };
87
+
88
+
89
+ /***************************************************************************
90
+ * specialization for SparseMatrix
91
+ ***************************************************************************/
92
+
93
+ namespace internal {
94
+
95
+ template<typename SparseMatrixType, int BlockRows, int BlockCols>
96
+ class sparse_matrix_block_impl
97
+ : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
98
+ {
99
+ typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
100
+ typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
101
+ typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
102
+ using Base::convert_index;
103
+ public:
104
+ enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
105
+ EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
106
+ protected:
107
+ typedef typename Base::IndexVector IndexVector;
108
+ enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
109
+ public:
110
+
111
+ inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
112
+ : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
113
+ {}
114
+
115
+ inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
116
+ : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
117
+ {}
118
+
119
+ template<typename OtherDerived>
120
+ inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
121
+ {
122
+ typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
123
+ _NestedMatrixType& matrix = m_matrix;
124
+ // This assignment is slow if this vector set is not empty
125
+ // and/or it is not at the end of the nonzeros of the underlying matrix.
126
+
127
+ // 1 - eval to a temporary to avoid transposition and/or aliasing issues
128
+ Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
129
+ eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
130
+
131
+ // 2 - let's check whether there is enough allocated memory
132
+ Index nnz = tmp.nonZeros();
133
+ Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
134
+ Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
135
+ Index block_size = end - start; // available room in the current block
136
+ Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
137
+
138
+ Index free_size = m_matrix.isCompressed()
139
+ ? Index(matrix.data().allocatedSize()) + block_size
140
+ : block_size;
141
+
142
+ Index tmp_start = tmp.outerIndexPtr()[0];
143
+
144
+ bool update_trailing_pointers = false;
145
+ if(nnz>free_size)
146
+ {
147
+ // realloc manually to reduce copies
148
+ typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
149
+
150
+ internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
151
+ internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
152
+
153
+ internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
154
+ internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
155
+
156
+ internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
157
+ internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
158
+
159
+ newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
160
+
161
+ matrix.data().swap(newdata);
162
+
163
+ update_trailing_pointers = true;
164
+ }
165
+ else
166
+ {
167
+ if(m_matrix.isCompressed())
168
+ {
169
+ // no need to realloc, simply copy the tail at its respective position and insert tmp
170
+ matrix.data().resize(start + nnz + tail_size);
171
+
172
+ internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz);
173
+ internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
174
+
175
+ update_trailing_pointers = true;
176
+ }
177
+
178
+ internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
179
+ internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
180
+ }
181
+
182
+ // update outer index pointers and innerNonZeros
183
+ if(IsVectorAtCompileTime)
184
+ {
185
+ if(!m_matrix.isCompressed())
186
+ matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
187
+ matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
188
+ }
189
+ else
190
+ {
191
+ StorageIndex p = StorageIndex(start);
192
+ for(Index k=0; k<m_outerSize.value(); ++k)
193
+ {
194
+ StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
195
+ if(!m_matrix.isCompressed())
196
+ matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
197
+ matrix.outerIndexPtr()[m_outerStart+k] = p;
198
+ p += nnz_k;
199
+ }
200
+ }
201
+
202
+ if(update_trailing_pointers)
203
+ {
204
+ StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
205
+ for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
206
+ {
207
+ matrix.outerIndexPtr()[k] += offset;
208
+ }
209
+ }
210
+
211
+ return derived();
212
+ }
213
+
214
+ inline BlockType& operator=(const BlockType& other)
215
+ {
216
+ return operator=<BlockType>(other);
217
+ }
218
+
219
+ inline const Scalar* valuePtr() const
220
+ { return m_matrix.valuePtr(); }
221
+ inline Scalar* valuePtr()
222
+ { return m_matrix.valuePtr(); }
223
+
224
+ inline const StorageIndex* innerIndexPtr() const
225
+ { return m_matrix.innerIndexPtr(); }
226
+ inline StorageIndex* innerIndexPtr()
227
+ { return m_matrix.innerIndexPtr(); }
228
+
229
+ inline const StorageIndex* outerIndexPtr() const
230
+ { return m_matrix.outerIndexPtr() + m_outerStart; }
231
+ inline StorageIndex* outerIndexPtr()
232
+ { return m_matrix.outerIndexPtr() + m_outerStart; }
233
+
234
+ inline const StorageIndex* innerNonZeroPtr() const
235
+ { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
236
+ inline StorageIndex* innerNonZeroPtr()
237
+ { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
238
+
239
+ bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
240
+
241
+ inline Scalar& coeffRef(Index row, Index col)
242
+ {
243
+ return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
244
+ }
245
+
246
+ inline const Scalar coeff(Index row, Index col) const
247
+ {
248
+ return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
249
+ }
250
+
251
+ inline const Scalar coeff(Index index) const
252
+ {
253
+ return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
254
+ }
255
+
256
+ const Scalar& lastCoeff() const
257
+ {
258
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
259
+ eigen_assert(Base::nonZeros()>0);
260
+ if(m_matrix.isCompressed())
261
+ return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
262
+ else
263
+ return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
264
+ }
265
+
266
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
267
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
268
+
269
+ inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
270
+ inline SparseMatrixType& nestedExpression() { return m_matrix; }
271
+ Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
272
+ Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
273
+ Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
274
+ Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
275
+
276
+ protected:
277
+
278
+ typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
279
+ Index m_outerStart;
280
+ const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
281
+
282
+ };
283
+
284
+ } // namespace internal
285
+
286
+ template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
287
+ class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
288
+ : public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
289
+ {
290
+ public:
291
+ typedef _StorageIndex StorageIndex;
292
+ typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
293
+ typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
294
+ inline BlockImpl(SparseMatrixType& xpr, Index i)
295
+ : Base(xpr, i)
296
+ {}
297
+
298
+ inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
299
+ : Base(xpr, startRow, startCol, blockRows, blockCols)
300
+ {}
301
+
302
+ using Base::operator=;
303
+ };
304
+
305
+ template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
306
+ class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
307
+ : public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
308
+ {
309
+ public:
310
+ typedef _StorageIndex StorageIndex;
311
+ typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
312
+ typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
313
+ inline BlockImpl(SparseMatrixType& xpr, Index i)
314
+ : Base(xpr, i)
315
+ {}
316
+
317
+ inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
318
+ : Base(xpr, startRow, startCol, blockRows, blockCols)
319
+ {}
320
+
321
+ using Base::operator=;
322
+ private:
323
+ template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
324
+ template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
325
+ };
326
+
327
+ //----------
328
+
329
+ /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
330
+ * is col-major (resp. row-major).
331
+ */
332
+ template<typename Derived>
333
+ typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
334
+ { return InnerVectorReturnType(derived(), outer); }
335
+
336
+ /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
337
+ * is col-major (resp. row-major). Read-only.
338
+ */
339
+ template<typename Derived>
340
+ const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
341
+ { return ConstInnerVectorReturnType(derived(), outer); }
342
+
343
+ /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
344
+ * is col-major (resp. row-major).
345
+ */
346
+ template<typename Derived>
347
+ typename SparseMatrixBase<Derived>::InnerVectorsReturnType
348
+ SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
349
+ {
350
+ return Block<Derived,Dynamic,Dynamic,true>(derived(),
351
+ IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
352
+ IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
353
+
354
+ }
355
+
356
+ /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
357
+ * is col-major (resp. row-major). Read-only.
358
+ */
359
+ template<typename Derived>
360
+ const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
361
+ SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
362
+ {
363
+ return Block<const Derived,Dynamic,Dynamic,true>(derived(),
364
+ IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
365
+ IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
366
+
367
+ }
368
+
369
+ /** Generic implementation of sparse Block expression.
370
+ * Real-only.
371
+ */
372
+ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
373
+ class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
374
+ : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
375
+ {
376
+ typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
377
+ typedef SparseMatrixBase<BlockType> Base;
378
+ using Base::convert_index;
379
+ public:
380
+ enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
381
+ EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
382
+
383
+ typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
384
+
385
+ /** Column or Row constructor
386
+ */
387
+ inline BlockImpl(XprType& xpr, Index i)
388
+ : m_matrix(xpr),
389
+ m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
390
+ m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
391
+ m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
392
+ m_blockCols(BlockCols==1 ? 1 : xpr.cols())
393
+ {}
394
+
395
+ /** Dynamic-size constructor
396
+ */
397
+ inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
398
+ : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
399
+ {}
400
+
401
+ inline Index rows() const { return m_blockRows.value(); }
402
+ inline Index cols() const { return m_blockCols.value(); }
403
+
404
+ inline Scalar& coeffRef(Index row, Index col)
405
+ {
406
+ return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
407
+ }
408
+
409
+ inline const Scalar coeff(Index row, Index col) const
410
+ {
411
+ return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
412
+ }
413
+
414
+ inline Scalar& coeffRef(Index index)
415
+ {
416
+ return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
417
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
418
+ }
419
+
420
+ inline const Scalar coeff(Index index) const
421
+ {
422
+ return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
423
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
424
+ }
425
+
426
+ inline const XprType& nestedExpression() const { return m_matrix; }
427
+ inline XprType& nestedExpression() { return m_matrix; }
428
+ Index startRow() const { return m_startRow.value(); }
429
+ Index startCol() const { return m_startCol.value(); }
430
+ Index blockRows() const { return m_blockRows.value(); }
431
+ Index blockCols() const { return m_blockCols.value(); }
432
+
433
+ protected:
434
+ // friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
435
+ friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
436
+
437
+ Index nonZeros() const { return Dynamic; }
438
+
439
+ typename internal::ref_selector<XprType>::non_const_type m_matrix;
440
+ const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
441
+ const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
442
+ const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
443
+ const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
444
+
445
+ protected:
446
+ // Disable assignment with clear error message.
447
+ // Note that simply removing operator= yields compilation errors with ICC+MSVC
448
+ template<typename T>
449
+ BlockImpl& operator=(const T&)
450
+ {
451
+ EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
452
+ return *this;
453
+ }
454
+
455
+ };
456
+
457
+ namespace internal {
458
+
459
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
460
+ struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
461
+ : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
462
+ {
463
+ class InnerVectorInnerIterator;
464
+ class OuterVectorInnerIterator;
465
+ public:
466
+ typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
467
+ typedef typename XprType::StorageIndex StorageIndex;
468
+ typedef typename XprType::Scalar Scalar;
469
+
470
+ enum {
471
+ IsRowMajor = XprType::IsRowMajor,
472
+
473
+ OuterVector = (BlockCols==1 && ArgType::IsRowMajor)
474
+ | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
475
+ // revert to || as soon as not needed anymore.
476
+ (BlockRows==1 && !ArgType::IsRowMajor),
477
+
478
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
479
+ Flags = XprType::Flags
480
+ };
481
+
482
+ typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
483
+
484
+ explicit unary_evaluator(const XprType& op)
485
+ : m_argImpl(op.nestedExpression()), m_block(op)
486
+ {}
487
+
488
+ inline Index nonZerosEstimate() const {
489
+ Index nnz = m_block.nonZeros();
490
+ if(nnz<0)
491
+ return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
492
+ return nnz;
493
+ }
494
+
495
+ protected:
496
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
497
+
498
+ evaluator<ArgType> m_argImpl;
499
+ const XprType &m_block;
500
+ };
501
+
502
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
503
+ class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
504
+ : public EvalIterator
505
+ {
506
+ enum { IsRowMajor = unary_evaluator::IsRowMajor };
507
+ const XprType& m_block;
508
+ Index m_end;
509
+ public:
510
+
511
+ EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
512
+ : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
513
+ m_block(aEval.m_block),
514
+ m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
515
+ {
516
+ while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
517
+ EvalIterator::operator++();
518
+ }
519
+
520
+ inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }
521
+ inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
522
+ inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
523
+ inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
524
+
525
+ inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
526
+ };
527
+
528
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
529
+ class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
530
+ {
531
+ enum { IsRowMajor = unary_evaluator::IsRowMajor };
532
+ const unary_evaluator& m_eval;
533
+ Index m_outerPos;
534
+ const Index m_innerIndex;
535
+ Index m_end;
536
+ EvalIterator m_it;
537
+ public:
538
+
539
+ EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
540
+ : m_eval(aEval),
541
+ m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
542
+ m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
543
+ m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
544
+ m_it(m_eval.m_argImpl, m_outerPos)
545
+ {
546
+ EIGEN_UNUSED_VARIABLE(outer);
547
+ eigen_assert(outer==0);
548
+
549
+ while(m_it && m_it.index() < m_innerIndex) ++m_it;
550
+ if((!m_it) || (m_it.index()!=m_innerIndex))
551
+ ++(*this);
552
+ }
553
+
554
+ inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
555
+ inline Index outer() const { return 0; }
556
+ inline Index row() const { return IsRowMajor ? 0 : index(); }
557
+ inline Index col() const { return IsRowMajor ? index() : 0; }
558
+
559
+ inline Scalar value() const { return m_it.value(); }
560
+ inline Scalar& valueRef() { return m_it.valueRef(); }
561
+
562
+ inline OuterVectorInnerIterator& operator++()
563
+ {
564
+ // search next non-zero entry
565
+ while(++m_outerPos<m_end)
566
+ {
567
+ // Restart iterator at the next inner-vector:
568
+ m_it.~EvalIterator();
569
+ ::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);
570
+ // search for the key m_innerIndex in the current outer-vector
571
+ while(m_it && m_it.index() < m_innerIndex) ++m_it;
572
+ if(m_it && m_it.index()==m_innerIndex) break;
573
+ }
574
+ return *this;
575
+ }
576
+
577
+ inline operator bool() const { return m_outerPos < m_end; }
578
+ };
579
+
580
+ template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
581
+ struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
582
+ : evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
583
+ {
584
+ typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
585
+ typedef evaluator<SparseCompressedBase<XprType> > Base;
586
+ explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
587
+ };
588
+
589
+ template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
590
+ struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
591
+ : evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
592
+ {
593
+ typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
594
+ typedef evaluator<SparseCompressedBase<XprType> > Base;
595
+ explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
596
+ };
597
+
598
+ } // end namespace internal
599
+
600
+
601
+ } // end namespace Eigen
602
+
603
+ #endif // EIGEN_SPARSE_BLOCK_H