tomoto 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (420) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +3 -0
  3. data/LICENSE.txt +22 -0
  4. data/README.md +123 -0
  5. data/ext/tomoto/ext.cpp +245 -0
  6. data/ext/tomoto/extconf.rb +28 -0
  7. data/lib/tomoto.rb +12 -0
  8. data/lib/tomoto/ct.rb +11 -0
  9. data/lib/tomoto/hdp.rb +11 -0
  10. data/lib/tomoto/lda.rb +67 -0
  11. data/lib/tomoto/version.rb +3 -0
  12. data/vendor/EigenRand/EigenRand/Core.h +1139 -0
  13. data/vendor/EigenRand/EigenRand/Dists/Basic.h +111 -0
  14. data/vendor/EigenRand/EigenRand/Dists/Discrete.h +877 -0
  15. data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +108 -0
  16. data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +626 -0
  17. data/vendor/EigenRand/EigenRand/EigenRand +19 -0
  18. data/vendor/EigenRand/EigenRand/Macro.h +24 -0
  19. data/vendor/EigenRand/EigenRand/MorePacketMath.h +978 -0
  20. data/vendor/EigenRand/EigenRand/PacketFilter.h +286 -0
  21. data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +624 -0
  22. data/vendor/EigenRand/EigenRand/RandUtils.h +413 -0
  23. data/vendor/EigenRand/EigenRand/doc.h +220 -0
  24. data/vendor/EigenRand/LICENSE +21 -0
  25. data/vendor/EigenRand/README.md +288 -0
  26. data/vendor/eigen/COPYING.BSD +26 -0
  27. data/vendor/eigen/COPYING.GPL +674 -0
  28. data/vendor/eigen/COPYING.LGPL +502 -0
  29. data/vendor/eigen/COPYING.MINPACK +52 -0
  30. data/vendor/eigen/COPYING.MPL2 +373 -0
  31. data/vendor/eigen/COPYING.README +18 -0
  32. data/vendor/eigen/Eigen/CMakeLists.txt +19 -0
  33. data/vendor/eigen/Eigen/Cholesky +46 -0
  34. data/vendor/eigen/Eigen/CholmodSupport +48 -0
  35. data/vendor/eigen/Eigen/Core +537 -0
  36. data/vendor/eigen/Eigen/Dense +7 -0
  37. data/vendor/eigen/Eigen/Eigen +2 -0
  38. data/vendor/eigen/Eigen/Eigenvalues +61 -0
  39. data/vendor/eigen/Eigen/Geometry +62 -0
  40. data/vendor/eigen/Eigen/Householder +30 -0
  41. data/vendor/eigen/Eigen/IterativeLinearSolvers +48 -0
  42. data/vendor/eigen/Eigen/Jacobi +33 -0
  43. data/vendor/eigen/Eigen/LU +50 -0
  44. data/vendor/eigen/Eigen/MetisSupport +35 -0
  45. data/vendor/eigen/Eigen/OrderingMethods +73 -0
  46. data/vendor/eigen/Eigen/PaStiXSupport +48 -0
  47. data/vendor/eigen/Eigen/PardisoSupport +35 -0
  48. data/vendor/eigen/Eigen/QR +51 -0
  49. data/vendor/eigen/Eigen/QtAlignedMalloc +40 -0
  50. data/vendor/eigen/Eigen/SPQRSupport +34 -0
  51. data/vendor/eigen/Eigen/SVD +51 -0
  52. data/vendor/eigen/Eigen/Sparse +36 -0
  53. data/vendor/eigen/Eigen/SparseCholesky +45 -0
  54. data/vendor/eigen/Eigen/SparseCore +69 -0
  55. data/vendor/eigen/Eigen/SparseLU +46 -0
  56. data/vendor/eigen/Eigen/SparseQR +37 -0
  57. data/vendor/eigen/Eigen/StdDeque +27 -0
  58. data/vendor/eigen/Eigen/StdList +26 -0
  59. data/vendor/eigen/Eigen/StdVector +27 -0
  60. data/vendor/eigen/Eigen/SuperLUSupport +64 -0
  61. data/vendor/eigen/Eigen/UmfPackSupport +40 -0
  62. data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +673 -0
  63. data/vendor/eigen/Eigen/src/Cholesky/LLT.h +542 -0
  64. data/vendor/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
  65. data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +639 -0
  66. data/vendor/eigen/Eigen/src/Core/Array.h +329 -0
  67. data/vendor/eigen/Eigen/src/Core/ArrayBase.h +226 -0
  68. data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +209 -0
  69. data/vendor/eigen/Eigen/src/Core/Assign.h +90 -0
  70. data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +935 -0
  71. data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +178 -0
  72. data/vendor/eigen/Eigen/src/Core/BandMatrix.h +353 -0
  73. data/vendor/eigen/Eigen/src/Core/Block.h +452 -0
  74. data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +164 -0
  75. data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +160 -0
  76. data/vendor/eigen/Eigen/src/Core/ConditionEstimator.h +175 -0
  77. data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +1688 -0
  78. data/vendor/eigen/Eigen/src/Core/CoreIterators.h +127 -0
  79. data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +184 -0
  80. data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +866 -0
  81. data/vendor/eigen/Eigen/src/Core/CwiseTernaryOp.h +197 -0
  82. data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +103 -0
  83. data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +128 -0
  84. data/vendor/eigen/Eigen/src/Core/DenseBase.h +611 -0
  85. data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +681 -0
  86. data/vendor/eigen/Eigen/src/Core/DenseStorage.h +570 -0
  87. data/vendor/eigen/Eigen/src/Core/Diagonal.h +260 -0
  88. data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +343 -0
  89. data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +28 -0
  90. data/vendor/eigen/Eigen/src/Core/Dot.h +318 -0
  91. data/vendor/eigen/Eigen/src/Core/EigenBase.h +159 -0
  92. data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +146 -0
  93. data/vendor/eigen/Eigen/src/Core/Fuzzy.h +155 -0
  94. data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +455 -0
  95. data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +593 -0
  96. data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +187 -0
  97. data/vendor/eigen/Eigen/src/Core/IO.h +225 -0
  98. data/vendor/eigen/Eigen/src/Core/Inverse.h +118 -0
  99. data/vendor/eigen/Eigen/src/Core/Map.h +171 -0
  100. data/vendor/eigen/Eigen/src/Core/MapBase.h +303 -0
  101. data/vendor/eigen/Eigen/src/Core/MathFunctions.h +1415 -0
  102. data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +101 -0
  103. data/vendor/eigen/Eigen/src/Core/Matrix.h +459 -0
  104. data/vendor/eigen/Eigen/src/Core/MatrixBase.h +529 -0
  105. data/vendor/eigen/Eigen/src/Core/NestByValue.h +110 -0
  106. data/vendor/eigen/Eigen/src/Core/NoAlias.h +108 -0
  107. data/vendor/eigen/Eigen/src/Core/NumTraits.h +248 -0
  108. data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +633 -0
  109. data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +1035 -0
  110. data/vendor/eigen/Eigen/src/Core/Product.h +186 -0
  111. data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +1112 -0
  112. data/vendor/eigen/Eigen/src/Core/Random.h +182 -0
  113. data/vendor/eigen/Eigen/src/Core/Redux.h +505 -0
  114. data/vendor/eigen/Eigen/src/Core/Ref.h +283 -0
  115. data/vendor/eigen/Eigen/src/Core/Replicate.h +142 -0
  116. data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +117 -0
  117. data/vendor/eigen/Eigen/src/Core/Reverse.h +211 -0
  118. data/vendor/eigen/Eigen/src/Core/Select.h +162 -0
  119. data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +352 -0
  120. data/vendor/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
  121. data/vendor/eigen/Eigen/src/Core/Solve.h +188 -0
  122. data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +235 -0
  123. data/vendor/eigen/Eigen/src/Core/SolverBase.h +130 -0
  124. data/vendor/eigen/Eigen/src/Core/StableNorm.h +221 -0
  125. data/vendor/eigen/Eigen/src/Core/Stride.h +111 -0
  126. data/vendor/eigen/Eigen/src/Core/Swap.h +67 -0
  127. data/vendor/eigen/Eigen/src/Core/Transpose.h +403 -0
  128. data/vendor/eigen/Eigen/src/Core/Transpositions.h +407 -0
  129. data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +983 -0
  130. data/vendor/eigen/Eigen/src/Core/VectorBlock.h +96 -0
  131. data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +695 -0
  132. data/vendor/eigen/Eigen/src/Core/Visitor.h +273 -0
  133. data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +451 -0
  134. data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +439 -0
  135. data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +637 -0
  136. data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +51 -0
  137. data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +391 -0
  138. data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1316 -0
  139. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +430 -0
  140. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +322 -0
  141. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +1061 -0
  142. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +103 -0
  143. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +674 -0
  144. data/vendor/eigen/Eigen/src/Core/arch/CUDA/MathFunctions.h +91 -0
  145. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +333 -0
  146. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +1124 -0
  147. data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +212 -0
  148. data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +29 -0
  149. data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +49 -0
  150. data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +490 -0
  151. data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +91 -0
  152. data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +760 -0
  153. data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +471 -0
  154. data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +562 -0
  155. data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +895 -0
  156. data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +77 -0
  157. data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +397 -0
  158. data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +137 -0
  159. data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +945 -0
  160. data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +168 -0
  161. data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +475 -0
  162. data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +188 -0
  163. data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +136 -0
  164. data/vendor/eigen/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
  165. data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +792 -0
  166. data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2156 -0
  167. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +492 -0
  168. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +311 -0
  169. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
  170. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +122 -0
  171. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +619 -0
  172. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
  173. data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +163 -0
  174. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +521 -0
  175. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +287 -0
  176. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +260 -0
  177. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
  178. data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
  179. data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +93 -0
  180. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +466 -0
  181. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +315 -0
  182. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
  183. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
  184. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +335 -0
  185. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +163 -0
  186. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +145 -0
  187. data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +398 -0
  188. data/vendor/eigen/Eigen/src/Core/util/Constants.h +547 -0
  189. data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +83 -0
  190. data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +302 -0
  191. data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +130 -0
  192. data/vendor/eigen/Eigen/src/Core/util/Macros.h +1001 -0
  193. data/vendor/eigen/Eigen/src/Core/util/Memory.h +993 -0
  194. data/vendor/eigen/Eigen/src/Core/util/Meta.h +534 -0
  195. data/vendor/eigen/Eigen/src/Core/util/NonMPL2.h +3 -0
  196. data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +27 -0
  197. data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +218 -0
  198. data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +821 -0
  199. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
  200. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +459 -0
  201. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
  202. data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
  203. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
  204. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
  205. data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
  206. data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
  207. data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +654 -0
  208. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +546 -0
  209. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
  210. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +870 -0
  211. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
  212. data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +556 -0
  213. data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +392 -0
  214. data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +247 -0
  215. data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +114 -0
  216. data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +497 -0
  217. data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +282 -0
  218. data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +234 -0
  219. data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +195 -0
  220. data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +814 -0
  221. data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +199 -0
  222. data/vendor/eigen/Eigen/src/Geometry/RotationBase.h +206 -0
  223. data/vendor/eigen/Eigen/src/Geometry/Scaling.h +170 -0
  224. data/vendor/eigen/Eigen/src/Geometry/Transform.h +1542 -0
  225. data/vendor/eigen/Eigen/src/Geometry/Translation.h +208 -0
  226. data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +166 -0
  227. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +161 -0
  228. data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +103 -0
  229. data/vendor/eigen/Eigen/src/Householder/Householder.h +172 -0
  230. data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +470 -0
  231. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
  232. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +228 -0
  233. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +246 -0
  234. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +400 -0
  235. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +462 -0
  236. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +394 -0
  237. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +216 -0
  238. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +115 -0
  239. data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +462 -0
  240. data/vendor/eigen/Eigen/src/LU/Determinant.h +101 -0
  241. data/vendor/eigen/Eigen/src/LU/FullPivLU.h +891 -0
  242. data/vendor/eigen/Eigen/src/LU/InverseImpl.h +415 -0
  243. data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +611 -0
  244. data/vendor/eigen/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
  245. data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +338 -0
  246. data/vendor/eigen/Eigen/src/MetisSupport/MetisSupport.h +137 -0
  247. data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +445 -0
  248. data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +1843 -0
  249. data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +157 -0
  250. data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
  251. data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +543 -0
  252. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +653 -0
  253. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
  254. data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +562 -0
  255. data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +676 -0
  256. data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +409 -0
  257. data/vendor/eigen/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
  258. data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +313 -0
  259. data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +1246 -0
  260. data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +804 -0
  261. data/vendor/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
  262. data/vendor/eigen/Eigen/src/SVD/SVDBase.h +315 -0
  263. data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
  264. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +689 -0
  265. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +199 -0
  266. data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +377 -0
  267. data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +258 -0
  268. data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
  269. data/vendor/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
  270. data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +216 -0
  271. data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +603 -0
  272. data/vendor/eigen/Eigen/src/SparseCore/SparseColEtree.h +206 -0
  273. data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +341 -0
  274. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +726 -0
  275. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +148 -0
  276. data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +320 -0
  277. data/vendor/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
  278. data/vendor/eigen/Eigen/src/SparseCore/SparseDot.h +98 -0
  279. data/vendor/eigen/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
  280. data/vendor/eigen/Eigen/src/SparseCore/SparseMap.h +305 -0
  281. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +1403 -0
  282. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +405 -0
  283. data/vendor/eigen/Eigen/src/SparseCore/SparsePermutation.h +178 -0
  284. data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +169 -0
  285. data/vendor/eigen/Eigen/src/SparseCore/SparseRedux.h +49 -0
  286. data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +397 -0
  287. data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +656 -0
  288. data/vendor/eigen/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
  289. data/vendor/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
  290. data/vendor/eigen/Eigen/src/SparseCore/SparseTranspose.h +92 -0
  291. data/vendor/eigen/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
  292. data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +178 -0
  293. data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +478 -0
  294. data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +253 -0
  295. data/vendor/eigen/Eigen/src/SparseCore/TriangularSolver.h +315 -0
  296. data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +773 -0
  297. data/vendor/eigen/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
  298. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
  299. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
  300. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +301 -0
  301. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
  302. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
  303. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
  304. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
  305. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
  306. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
  307. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
  308. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
  309. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
  310. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
  311. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
  312. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
  313. data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +745 -0
  314. data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +126 -0
  315. data/vendor/eigen/Eigen/src/StlSupport/StdList.h +106 -0
  316. data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +131 -0
  317. data/vendor/eigen/Eigen/src/StlSupport/details.h +84 -0
  318. data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +1027 -0
  319. data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +506 -0
  320. data/vendor/eigen/Eigen/src/misc/Image.h +82 -0
  321. data/vendor/eigen/Eigen/src/misc/Kernel.h +79 -0
  322. data/vendor/eigen/Eigen/src/misc/RealSvd2x2.h +55 -0
  323. data/vendor/eigen/Eigen/src/misc/blas.h +440 -0
  324. data/vendor/eigen/Eigen/src/misc/lapack.h +152 -0
  325. data/vendor/eigen/Eigen/src/misc/lapacke.h +16291 -0
  326. data/vendor/eigen/Eigen/src/misc/lapacke_mangling.h +17 -0
  327. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +332 -0
  328. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +552 -0
  329. data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +1058 -0
  330. data/vendor/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
  331. data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +163 -0
  332. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
  333. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +85 -0
  334. data/vendor/eigen/README.md +3 -0
  335. data/vendor/eigen/bench/README.txt +55 -0
  336. data/vendor/eigen/bench/btl/COPYING +340 -0
  337. data/vendor/eigen/bench/btl/README +154 -0
  338. data/vendor/eigen/bench/tensors/README +21 -0
  339. data/vendor/eigen/blas/README.txt +6 -0
  340. data/vendor/eigen/demos/mandelbrot/README +10 -0
  341. data/vendor/eigen/demos/mix_eigen_and_c/README +9 -0
  342. data/vendor/eigen/demos/opengl/README +13 -0
  343. data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +1760 -0
  344. data/vendor/eigen/unsupported/README.txt +50 -0
  345. data/vendor/tomotopy/LICENSE +21 -0
  346. data/vendor/tomotopy/README.kr.rst +375 -0
  347. data/vendor/tomotopy/README.rst +382 -0
  348. data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +362 -0
  349. data/vendor/tomotopy/src/Labeling/FoRelevance.h +88 -0
  350. data/vendor/tomotopy/src/Labeling/Labeler.h +50 -0
  351. data/vendor/tomotopy/src/TopicModel/CT.h +37 -0
  352. data/vendor/tomotopy/src/TopicModel/CTModel.cpp +13 -0
  353. data/vendor/tomotopy/src/TopicModel/CTModel.hpp +293 -0
  354. data/vendor/tomotopy/src/TopicModel/DMR.h +51 -0
  355. data/vendor/tomotopy/src/TopicModel/DMRModel.cpp +13 -0
  356. data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +374 -0
  357. data/vendor/tomotopy/src/TopicModel/DT.h +65 -0
  358. data/vendor/tomotopy/src/TopicModel/DTM.h +22 -0
  359. data/vendor/tomotopy/src/TopicModel/DTModel.cpp +15 -0
  360. data/vendor/tomotopy/src/TopicModel/DTModel.hpp +572 -0
  361. data/vendor/tomotopy/src/TopicModel/GDMR.h +37 -0
  362. data/vendor/tomotopy/src/TopicModel/GDMRModel.cpp +14 -0
  363. data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +485 -0
  364. data/vendor/tomotopy/src/TopicModel/HDP.h +74 -0
  365. data/vendor/tomotopy/src/TopicModel/HDPModel.cpp +13 -0
  366. data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +592 -0
  367. data/vendor/tomotopy/src/TopicModel/HLDA.h +40 -0
  368. data/vendor/tomotopy/src/TopicModel/HLDAModel.cpp +13 -0
  369. data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +681 -0
  370. data/vendor/tomotopy/src/TopicModel/HPA.h +27 -0
  371. data/vendor/tomotopy/src/TopicModel/HPAModel.cpp +21 -0
  372. data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +588 -0
  373. data/vendor/tomotopy/src/TopicModel/LDA.h +144 -0
  374. data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +442 -0
  375. data/vendor/tomotopy/src/TopicModel/LDAModel.cpp +13 -0
  376. data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +1058 -0
  377. data/vendor/tomotopy/src/TopicModel/LLDA.h +45 -0
  378. data/vendor/tomotopy/src/TopicModel/LLDAModel.cpp +13 -0
  379. data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +203 -0
  380. data/vendor/tomotopy/src/TopicModel/MGLDA.h +63 -0
  381. data/vendor/tomotopy/src/TopicModel/MGLDAModel.cpp +17 -0
  382. data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +558 -0
  383. data/vendor/tomotopy/src/TopicModel/PA.h +43 -0
  384. data/vendor/tomotopy/src/TopicModel/PAModel.cpp +13 -0
  385. data/vendor/tomotopy/src/TopicModel/PAModel.hpp +467 -0
  386. data/vendor/tomotopy/src/TopicModel/PLDA.h +17 -0
  387. data/vendor/tomotopy/src/TopicModel/PLDAModel.cpp +13 -0
  388. data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +214 -0
  389. data/vendor/tomotopy/src/TopicModel/SLDA.h +54 -0
  390. data/vendor/tomotopy/src/TopicModel/SLDAModel.cpp +17 -0
  391. data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +456 -0
  392. data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +692 -0
  393. data/vendor/tomotopy/src/Utils/AliasMethod.hpp +169 -0
  394. data/vendor/tomotopy/src/Utils/Dictionary.h +80 -0
  395. data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +181 -0
  396. data/vendor/tomotopy/src/Utils/LBFGS.h +202 -0
  397. data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBacktracking.h +120 -0
  398. data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBracketing.h +122 -0
  399. data/vendor/tomotopy/src/Utils/LBFGS/Param.h +213 -0
  400. data/vendor/tomotopy/src/Utils/LUT.hpp +82 -0
  401. data/vendor/tomotopy/src/Utils/MultiNormalDistribution.hpp +69 -0
  402. data/vendor/tomotopy/src/Utils/PolyaGamma.hpp +200 -0
  403. data/vendor/tomotopy/src/Utils/PolyaGammaHybrid.hpp +672 -0
  404. data/vendor/tomotopy/src/Utils/ThreadPool.hpp +150 -0
  405. data/vendor/tomotopy/src/Utils/Trie.hpp +220 -0
  406. data/vendor/tomotopy/src/Utils/TruncMultiNormal.hpp +94 -0
  407. data/vendor/tomotopy/src/Utils/Utils.hpp +337 -0
  408. data/vendor/tomotopy/src/Utils/avx_gamma.h +46 -0
  409. data/vendor/tomotopy/src/Utils/avx_mathfun.h +736 -0
  410. data/vendor/tomotopy/src/Utils/exception.h +28 -0
  411. data/vendor/tomotopy/src/Utils/math.h +281 -0
  412. data/vendor/tomotopy/src/Utils/rtnorm.hpp +2690 -0
  413. data/vendor/tomotopy/src/Utils/sample.hpp +192 -0
  414. data/vendor/tomotopy/src/Utils/serializer.hpp +695 -0
  415. data/vendor/tomotopy/src/Utils/slp.hpp +131 -0
  416. data/vendor/tomotopy/src/Utils/sse_gamma.h +48 -0
  417. data/vendor/tomotopy/src/Utils/sse_mathfun.h +710 -0
  418. data/vendor/tomotopy/src/Utils/text.hpp +49 -0
  419. data/vendor/tomotopy/src/Utils/tvector.hpp +543 -0
  420. metadata +531 -0
@@ -0,0 +1,160 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_COMMAINITIALIZER_H
12
+ #define EIGEN_COMMAINITIALIZER_H
13
+
14
+ namespace Eigen {
15
+
16
+ /** \class CommaInitializer
17
+ * \ingroup Core_Module
18
+ *
19
+ * \brief Helper class used by the comma initializer operator
20
+ *
21
+ * This class is internally used to implement the comma initializer feature. It is
22
+ * the return type of MatrixBase::operator<<, and most of the time this is the only
23
+ * way it is used.
24
+ *
25
+ * \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
26
+ */
27
+ template<typename XprType>
28
+ struct CommaInitializer
29
+ {
30
+ typedef typename XprType::Scalar Scalar;
31
+
32
+ EIGEN_DEVICE_FUNC
33
+ inline CommaInitializer(XprType& xpr, const Scalar& s)
34
+ : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
35
+ {
36
+ m_xpr.coeffRef(0,0) = s;
37
+ }
38
+
39
+ template<typename OtherDerived>
40
+ EIGEN_DEVICE_FUNC
41
+ inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
42
+ : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
43
+ {
44
+ m_xpr.block(0, 0, other.rows(), other.cols()) = other;
45
+ }
46
+
47
+ /* Copy/Move constructor which transfers ownership. This is crucial in
48
+ * absence of return value optimization to avoid assertions during destruction. */
49
+ // FIXME in C++11 mode this could be replaced by a proper RValue constructor
50
+ EIGEN_DEVICE_FUNC
51
+ inline CommaInitializer(const CommaInitializer& o)
52
+ : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
53
+ // Mark original object as finished. In absence of R-value references we need to const_cast:
54
+ const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
55
+ const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
56
+ const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
57
+ }
58
+
59
+ /* inserts a scalar value in the target matrix */
60
+ EIGEN_DEVICE_FUNC
61
+ CommaInitializer& operator,(const Scalar& s)
62
+ {
63
+ if (m_col==m_xpr.cols())
64
+ {
65
+ m_row+=m_currentBlockRows;
66
+ m_col = 0;
67
+ m_currentBlockRows = 1;
68
+ eigen_assert(m_row<m_xpr.rows()
69
+ && "Too many rows passed to comma initializer (operator<<)");
70
+ }
71
+ eigen_assert(m_col<m_xpr.cols()
72
+ && "Too many coefficients passed to comma initializer (operator<<)");
73
+ eigen_assert(m_currentBlockRows==1);
74
+ m_xpr.coeffRef(m_row, m_col++) = s;
75
+ return *this;
76
+ }
77
+
78
+ /* inserts a matrix expression in the target matrix */
79
+ template<typename OtherDerived>
80
+ EIGEN_DEVICE_FUNC
81
+ CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
82
+ {
83
+ if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows))
84
+ {
85
+ m_row+=m_currentBlockRows;
86
+ m_col = 0;
87
+ m_currentBlockRows = other.rows();
88
+ eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
89
+ && "Too many rows passed to comma initializer (operator<<)");
90
+ }
91
+ eigen_assert((m_col + other.cols() <= m_xpr.cols())
92
+ && "Too many coefficients passed to comma initializer (operator<<)");
93
+ eigen_assert(m_currentBlockRows==other.rows());
94
+ m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime>
95
+ (m_row, m_col, other.rows(), other.cols()) = other;
96
+ m_col += other.cols();
97
+ return *this;
98
+ }
99
+
100
+ EIGEN_DEVICE_FUNC
101
+ inline ~CommaInitializer()
102
+ #if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS
103
+ EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception)
104
+ #endif
105
+ {
106
+ finished();
107
+ }
108
+
109
+ /** \returns the built matrix once all its coefficients have been set.
110
+ * Calling finished is 100% optional. Its purpose is to write expressions
111
+ * like this:
112
+ * \code
113
+ * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
114
+ * \endcode
115
+ */
116
+ EIGEN_DEVICE_FUNC
117
+ inline XprType& finished() {
118
+ eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0)
119
+ && m_col == m_xpr.cols()
120
+ && "Too few coefficients passed to comma initializer (operator<<)");
121
+ return m_xpr;
122
+ }
123
+
124
+ XprType& m_xpr; // target expression
125
+ Index m_row; // current row id
126
+ Index m_col; // current col id
127
+ Index m_currentBlockRows; // current block height
128
+ };
129
+
130
+ /** \anchor MatrixBaseCommaInitRef
131
+ * Convenient operator to set the coefficients of a matrix.
132
+ *
133
+ * The coefficients must be provided in a row major order and exactly match
134
+ * the size of the matrix. Otherwise an assertion is raised.
135
+ *
136
+ * Example: \include MatrixBase_set.cpp
137
+ * Output: \verbinclude MatrixBase_set.out
138
+ *
139
+ * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
140
+ *
141
+ * \sa CommaInitializer::finished(), class CommaInitializer
142
+ */
143
+ template<typename Derived>
144
+ inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
145
+ {
146
+ return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
147
+ }
148
+
149
+ /** \sa operator<<(const Scalar&) */
150
+ template<typename Derived>
151
+ template<typename OtherDerived>
152
+ inline CommaInitializer<Derived>
153
+ DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
154
+ {
155
+ return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
156
+ }
157
+
158
+ } // end namespace Eigen
159
+
160
+ #endif // EIGEN_COMMAINITIALIZER_H
@@ -0,0 +1,175 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com)
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_CONDITIONESTIMATOR_H
11
+ #define EIGEN_CONDITIONESTIMATOR_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ template <typename Vector, typename RealVector, bool IsComplex>
18
+ struct rcond_compute_sign {
19
+ static inline Vector run(const Vector& v) {
20
+ const RealVector v_abs = v.cwiseAbs();
21
+ return (v_abs.array() == static_cast<typename Vector::RealScalar>(0))
22
+ .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs));
23
+ }
24
+ };
25
+
26
+ // Partial specialization to avoid elementwise division for real vectors.
27
+ template <typename Vector>
28
+ struct rcond_compute_sign<Vector, Vector, false> {
29
+ static inline Vector run(const Vector& v) {
30
+ return (v.array() < static_cast<typename Vector::RealScalar>(0))
31
+ .select(-Vector::Ones(v.size()), Vector::Ones(v.size()));
32
+ }
33
+ };
34
+
35
+ /**
36
+ * \returns an estimate of ||inv(matrix)||_1 given a decomposition of
37
+ * \a matrix that implements .solve() and .adjoint().solve() methods.
38
+ *
39
+ * This function implements Algorithms 4.1 and 5.1 from
40
+ * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf
41
+ * which also forms the basis for the condition number estimators in
42
+ * LAPACK. Since at most 10 calls to the solve method of dec are
43
+ * performed, the total cost is O(dims^2), as opposed to O(dims^3)
44
+ * needed to compute the inverse matrix explicitly.
45
+ *
46
+ * The most common usage is in estimating the condition number
47
+ * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
48
+ * computed directly in O(n^2) operations.
49
+ *
50
+ * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and
51
+ * LLT.
52
+ *
53
+ * \sa FullPivLU, PartialPivLU, LDLT, LLT.
54
+ */
55
+ template <typename Decomposition>
56
+ typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec)
57
+ {
58
+ typedef typename Decomposition::MatrixType MatrixType;
59
+ typedef typename Decomposition::Scalar Scalar;
60
+ typedef typename Decomposition::RealScalar RealScalar;
61
+ typedef typename internal::plain_col_type<MatrixType>::type Vector;
62
+ typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector;
63
+ const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);
64
+
65
+ eigen_assert(dec.rows() == dec.cols());
66
+ const Index n = dec.rows();
67
+ if (n == 0)
68
+ return 0;
69
+
70
+ // Disable Index to float conversion warning
71
+ #ifdef __INTEL_COMPILER
72
+ #pragma warning push
73
+ #pragma warning ( disable : 2259 )
74
+ #endif
75
+ Vector v = dec.solve(Vector::Ones(n) / Scalar(n));
76
+ #ifdef __INTEL_COMPILER
77
+ #pragma warning pop
78
+ #endif
79
+
80
+ // lower_bound is a lower bound on
81
+ // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
82
+ // and is the objective maximized by the ("super-") gradient ascent
83
+ // algorithm below.
84
+ RealScalar lower_bound = v.template lpNorm<1>();
85
+ if (n == 1)
86
+ return lower_bound;
87
+
88
+ // Gradient ascent algorithm follows: We know that the optimum is achieved at
89
+ // one of the simplices v = e_i, so in each iteration we follow a
90
+ // super-gradient to move towards the optimal one.
91
+ RealScalar old_lower_bound = lower_bound;
92
+ Vector sign_vector(n);
93
+ Vector old_sign_vector;
94
+ Index v_max_abs_index = -1;
95
+ Index old_v_max_abs_index = v_max_abs_index;
96
+ for (int k = 0; k < 4; ++k)
97
+ {
98
+ sign_vector = internal::rcond_compute_sign<Vector, RealVector, is_complex>::run(v);
99
+ if (k > 0 && !is_complex && sign_vector == old_sign_vector) {
100
+ // Break if the solution stagnated.
101
+ break;
102
+ }
103
+ // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
104
+ v = dec.adjoint().solve(sign_vector);
105
+ v.real().cwiseAbs().maxCoeff(&v_max_abs_index);
106
+ if (v_max_abs_index == old_v_max_abs_index) {
107
+ // Break if the solution stagnated.
108
+ break;
109
+ }
110
+ // Move to the new simplex e_j, where j = v_max_abs_index.
111
+ v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j.
112
+ lower_bound = v.template lpNorm<1>();
113
+ if (lower_bound <= old_lower_bound) {
114
+ // Break if the gradient step did not increase the lower_bound.
115
+ break;
116
+ }
117
+ if (!is_complex) {
118
+ old_sign_vector = sign_vector;
119
+ }
120
+ old_v_max_abs_index = v_max_abs_index;
121
+ old_lower_bound = lower_bound;
122
+ }
123
+ // The following calculates an independent estimate of ||matrix||_1 by
124
+ // multiplying matrix by a vector with entries of slowly increasing
125
+ // magnitude and alternating sign:
126
+ // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
127
+ // This improvement to Hager's algorithm above is due to Higham. It was
128
+ // added to make the algorithm more robust in certain corner cases where
129
+ // large elements in the matrix might otherwise escape detection due to
130
+ // exact cancellation (especially when op and op_adjoint correspond to a
131
+ // sequence of backsubstitutions and permutations), which could cause
132
+ // Hager's algorithm to vastly underestimate ||matrix||_1.
133
+ Scalar alternating_sign(RealScalar(1));
134
+ for (Index i = 0; i < n; ++i) {
135
+ // The static_cast is needed when Scalar is a complex and RealScalar implements expression templates
136
+ v[i] = alternating_sign * static_cast<RealScalar>(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1))));
137
+ alternating_sign = -alternating_sign;
138
+ }
139
+ v = dec.solve(v);
140
+ const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n));
141
+ return numext::maxi(lower_bound, alternate_lower_bound);
142
+ }
143
+
144
+ /** \brief Reciprocal condition number estimator.
145
+ *
146
+ * Computing a decomposition of a dense matrix takes O(n^3) operations, while
147
+ * this method estimates the condition number quickly and reliably in O(n^2)
148
+ * operations.
149
+ *
150
+ * \returns an estimate of the reciprocal condition number
151
+ * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and
152
+ * its decomposition. Supports the following decompositions: FullPivLU,
153
+ * PartialPivLU, LDLT, and LLT.
154
+ *
155
+ * \sa FullPivLU, PartialPivLU, LDLT, LLT.
156
+ */
157
+ template <typename Decomposition>
158
+ typename Decomposition::RealScalar
159
+ rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Decomposition& dec)
160
+ {
161
+ typedef typename Decomposition::RealScalar RealScalar;
162
+ eigen_assert(dec.rows() == dec.cols());
163
+ if (dec.rows() == 0) return NumTraits<RealScalar>::infinity();
164
+ if (matrix_norm == RealScalar(0)) return RealScalar(0);
165
+ if (dec.rows() == 1) return RealScalar(1);
166
+ const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);
167
+ return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0)
168
+ : (RealScalar(1) / inverse_matrix_norm) / matrix_norm);
169
+ }
170
+
171
+ } // namespace internal
172
+
173
+ } // namespace Eigen
174
+
175
+ #endif
@@ -0,0 +1,1688 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
7
+ //
8
+ // This Source Code Form is subject to the terms of the Mozilla
9
+ // Public License v. 2.0. If a copy of the MPL was not distributed
10
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
+
12
+
13
+ #ifndef EIGEN_COREEVALUATORS_H
14
+ #define EIGEN_COREEVALUATORS_H
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ // This class returns the evaluator kind from the expression storage kind.
21
+ // Default assumes index based accessors
22
+ template<typename StorageKind>
23
+ struct storage_kind_to_evaluator_kind {
24
+ typedef IndexBased Kind;
25
+ };
26
+
27
+ // This class returns the evaluator shape from the expression storage kind.
28
+ // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.
29
+ template<typename StorageKind> struct storage_kind_to_shape;
30
+
31
+ template<> struct storage_kind_to_shape<Dense> { typedef DenseShape Shape; };
32
+ template<> struct storage_kind_to_shape<SolverStorage> { typedef SolverShape Shape; };
33
+ template<> struct storage_kind_to_shape<PermutationStorage> { typedef PermutationShape Shape; };
34
+ template<> struct storage_kind_to_shape<TranspositionsStorage> { typedef TranspositionsShape Shape; };
35
+
36
+ // Evaluators have to be specialized with respect to various criteria such as:
37
+ // - storage/structure/shape
38
+ // - scalar type
39
+ // - etc.
40
+ // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.
41
+ // We currently distinguish the following kind of evaluators:
42
+ // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate)
43
+ // - binary_evaluator for expression taking two arguments (CwiseBinaryOp)
44
+ // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp)
45
+ // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching.
46
+ // - mapbase_evaluator for Map, Block, Ref
47
+ // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator)
48
+
49
+ template< typename T,
50
+ typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind,
51
+ typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind,
52
+ typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind,
53
+ typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar,
54
+ typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar,
55
+ typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator;
56
+
57
+ template< typename T,
58
+ typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
59
+ typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
60
+ typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
61
+ typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator;
62
+
63
+ template< typename T,
64
+ typename Kind = typename evaluator_traits<typename T::NestedExpression>::Kind,
65
+ typename Scalar = typename T::Scalar> struct unary_evaluator;
66
+
67
+ // evaluator_traits<T> contains traits for evaluator<T>
68
+
69
+ template<typename T>
70
+ struct evaluator_traits_base
71
+ {
72
+ // by default, get evaluator kind and shape from storage
73
+ typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind;
74
+ typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape;
75
+ };
76
+
77
+ // Default evaluator traits
78
+ template<typename T>
79
+ struct evaluator_traits : public evaluator_traits_base<T>
80
+ {
81
+ };
82
+
83
+ template<typename T, typename Shape = typename evaluator_traits<T>::Shape >
84
+ struct evaluator_assume_aliasing {
85
+ static const bool value = false;
86
+ };
87
+
88
+ // By default, we assume a unary expression:
89
+ template<typename T>
90
+ struct evaluator : public unary_evaluator<T>
91
+ {
92
+ typedef unary_evaluator<T> Base;
93
+ EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {}
94
+ };
95
+
96
+
97
+ // TODO: Think about const-correctness
98
+ template<typename T>
99
+ struct evaluator<const T>
100
+ : evaluator<T>
101
+ {
102
+ EIGEN_DEVICE_FUNC
103
+ explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
104
+ };
105
+
106
+ // ---------- base class for all evaluators ----------
107
+
108
+ template<typename ExpressionType>
109
+ struct evaluator_base : public noncopyable
110
+ {
111
+ // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
112
+ typedef traits<ExpressionType> ExpressionTraits;
113
+
114
+ enum {
115
+ Alignment = 0
116
+ };
117
+ };
118
+
119
+ // -------------------- Matrix and Array --------------------
120
+ //
121
+ // evaluator<PlainObjectBase> is a common base class for the
122
+ // Matrix and Array evaluators.
123
+ // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,
124
+ // so no need for more sophisticated dispatching.
125
+
126
+ template<typename Derived>
127
+ struct evaluator<PlainObjectBase<Derived> >
128
+ : evaluator_base<Derived>
129
+ {
130
+ typedef PlainObjectBase<Derived> PlainObjectType;
131
+ typedef typename PlainObjectType::Scalar Scalar;
132
+ typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
133
+
134
+ enum {
135
+ IsRowMajor = PlainObjectType::IsRowMajor,
136
+ IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
137
+ RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
138
+ ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,
139
+
140
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
141
+ Flags = traits<Derived>::EvaluatorFlags,
142
+ Alignment = traits<Derived>::Alignment
143
+ };
144
+
145
+ EIGEN_DEVICE_FUNC evaluator()
146
+ : m_data(0),
147
+ m_outerStride(IsVectorAtCompileTime ? 0
148
+ : int(IsRowMajor) ? ColsAtCompileTime
149
+ : RowsAtCompileTime)
150
+ {
151
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
152
+ }
153
+
154
+ EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m)
155
+ : m_data(m.data()), m_outerStride(IsVectorAtCompileTime ? 0 : m.outerStride())
156
+ {
157
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
158
+ }
159
+
160
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
161
+ CoeffReturnType coeff(Index row, Index col) const
162
+ {
163
+ if (IsRowMajor)
164
+ return m_data[row * m_outerStride.value() + col];
165
+ else
166
+ return m_data[row + col * m_outerStride.value()];
167
+ }
168
+
169
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
170
+ CoeffReturnType coeff(Index index) const
171
+ {
172
+ return m_data[index];
173
+ }
174
+
175
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
176
+ Scalar& coeffRef(Index row, Index col)
177
+ {
178
+ if (IsRowMajor)
179
+ return const_cast<Scalar*>(m_data)[row * m_outerStride.value() + col];
180
+ else
181
+ return const_cast<Scalar*>(m_data)[row + col * m_outerStride.value()];
182
+ }
183
+
184
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
185
+ Scalar& coeffRef(Index index)
186
+ {
187
+ return const_cast<Scalar*>(m_data)[index];
188
+ }
189
+
190
+ template<int LoadMode, typename PacketType>
191
+ EIGEN_STRONG_INLINE
192
+ PacketType packet(Index row, Index col) const
193
+ {
194
+ if (IsRowMajor)
195
+ return ploadt<PacketType, LoadMode>(m_data + row * m_outerStride.value() + col);
196
+ else
197
+ return ploadt<PacketType, LoadMode>(m_data + row + col * m_outerStride.value());
198
+ }
199
+
200
+ template<int LoadMode, typename PacketType>
201
+ EIGEN_STRONG_INLINE
202
+ PacketType packet(Index index) const
203
+ {
204
+ return ploadt<PacketType, LoadMode>(m_data + index);
205
+ }
206
+
207
+ template<int StoreMode,typename PacketType>
208
+ EIGEN_STRONG_INLINE
209
+ void writePacket(Index row, Index col, const PacketType& x)
210
+ {
211
+ if (IsRowMajor)
212
+ return pstoret<Scalar, PacketType, StoreMode>
213
+ (const_cast<Scalar*>(m_data) + row * m_outerStride.value() + col, x);
214
+ else
215
+ return pstoret<Scalar, PacketType, StoreMode>
216
+ (const_cast<Scalar*>(m_data) + row + col * m_outerStride.value(), x);
217
+ }
218
+
219
+ template<int StoreMode, typename PacketType>
220
+ EIGEN_STRONG_INLINE
221
+ void writePacket(Index index, const PacketType& x)
222
+ {
223
+ return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_data) + index, x);
224
+ }
225
+
226
+ protected:
227
+ const Scalar *m_data;
228
+
229
+ // We do not need to know the outer stride for vectors
230
+ variable_if_dynamic<Index, IsVectorAtCompileTime ? 0
231
+ : int(IsRowMajor) ? ColsAtCompileTime
232
+ : RowsAtCompileTime> m_outerStride;
233
+ };
234
+
235
+ template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
236
+ struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
237
+ : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
238
+ {
239
+ typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
240
+
241
+ EIGEN_DEVICE_FUNC evaluator() {}
242
+
243
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
244
+ : evaluator<PlainObjectBase<XprType> >(m)
245
+ { }
246
+ };
247
+
248
+ template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
249
+ struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
250
+ : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
251
+ {
252
+ typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
253
+
254
+ EIGEN_DEVICE_FUNC evaluator() {}
255
+
256
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
257
+ : evaluator<PlainObjectBase<XprType> >(m)
258
+ { }
259
+ };
260
+
261
+ // -------------------- Transpose --------------------
262
+
263
+ template<typename ArgType>
264
+ struct unary_evaluator<Transpose<ArgType>, IndexBased>
265
+ : evaluator_base<Transpose<ArgType> >
266
+ {
267
+ typedef Transpose<ArgType> XprType;
268
+
269
+ enum {
270
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
271
+ Flags = evaluator<ArgType>::Flags ^ RowMajorBit,
272
+ Alignment = evaluator<ArgType>::Alignment
273
+ };
274
+
275
+ EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
276
+
277
+ typedef typename XprType::Scalar Scalar;
278
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
279
+
280
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
281
+ CoeffReturnType coeff(Index row, Index col) const
282
+ {
283
+ return m_argImpl.coeff(col, row);
284
+ }
285
+
286
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
287
+ CoeffReturnType coeff(Index index) const
288
+ {
289
+ return m_argImpl.coeff(index);
290
+ }
291
+
292
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
293
+ Scalar& coeffRef(Index row, Index col)
294
+ {
295
+ return m_argImpl.coeffRef(col, row);
296
+ }
297
+
298
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
299
+ typename XprType::Scalar& coeffRef(Index index)
300
+ {
301
+ return m_argImpl.coeffRef(index);
302
+ }
303
+
304
+ template<int LoadMode, typename PacketType>
305
+ EIGEN_STRONG_INLINE
306
+ PacketType packet(Index row, Index col) const
307
+ {
308
+ return m_argImpl.template packet<LoadMode,PacketType>(col, row);
309
+ }
310
+
311
+ template<int LoadMode, typename PacketType>
312
+ EIGEN_STRONG_INLINE
313
+ PacketType packet(Index index) const
314
+ {
315
+ return m_argImpl.template packet<LoadMode,PacketType>(index);
316
+ }
317
+
318
+ template<int StoreMode, typename PacketType>
319
+ EIGEN_STRONG_INLINE
320
+ void writePacket(Index row, Index col, const PacketType& x)
321
+ {
322
+ m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x);
323
+ }
324
+
325
+ template<int StoreMode, typename PacketType>
326
+ EIGEN_STRONG_INLINE
327
+ void writePacket(Index index, const PacketType& x)
328
+ {
329
+ m_argImpl.template writePacket<StoreMode,PacketType>(index, x);
330
+ }
331
+
332
+ protected:
333
+ evaluator<ArgType> m_argImpl;
334
+ };
335
+
336
+ // -------------------- CwiseNullaryOp --------------------
337
+ // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.
338
+ // Likewise, there is not need to more sophisticated dispatching here.
339
+
340
+ template<typename Scalar,typename NullaryOp,
341
+ bool has_nullary = has_nullary_operator<NullaryOp>::value,
342
+ bool has_unary = has_unary_operator<NullaryOp>::value,
343
+ bool has_binary = has_binary_operator<NullaryOp>::value>
344
+ struct nullary_wrapper
345
+ {
346
+ template <typename IndexType>
347
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); }
348
+ template <typename IndexType>
349
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
350
+
351
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); }
352
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
353
+ };
354
+
355
+ template<typename Scalar,typename NullaryOp>
356
+ struct nullary_wrapper<Scalar,NullaryOp,true,false,false>
357
+ {
358
+ template <typename IndexType>
359
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); }
360
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); }
361
+ };
362
+
363
+ template<typename Scalar,typename NullaryOp>
364
+ struct nullary_wrapper<Scalar,NullaryOp,false,false,true>
365
+ {
366
+ template <typename IndexType>
367
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); }
368
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); }
369
+ };
370
+
371
+ // We need the following specialization for vector-only functors assigned to a runtime vector,
372
+ // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.
373
+ // In this case, i==0 and j is used for the actual iteration.
374
+ template<typename Scalar,typename NullaryOp>
375
+ struct nullary_wrapper<Scalar,NullaryOp,false,true,false>
376
+ {
377
+ template <typename IndexType>
378
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
379
+ eigen_assert(i==0 || j==0);
380
+ return op(i+j);
381
+ }
382
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
383
+ eigen_assert(i==0 || j==0);
384
+ return op.template packetOp<T>(i+j);
385
+ }
386
+
387
+ template <typename IndexType>
388
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
389
+ template <typename T, typename IndexType>
390
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
391
+ };
392
+
393
+ template<typename Scalar,typename NullaryOp>
394
+ struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {};
395
+
396
+ #if 0 && EIGEN_COMP_MSVC>0
397
+ // Disable this ugly workaround. This is now handled in traits<Ref>::match,
398
+ // but this piece of code might still become handly if some other weird compilation
399
+ // erros pop up again.
400
+
401
+ // MSVC exhibits a weird compilation error when
402
+ // compiling:
403
+ // Eigen::MatrixXf A = MatrixXf::Random(3,3);
404
+ // Ref<const MatrixXf> R = 2.f*A;
405
+ // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.
406
+ // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>
407
+ // and at that time has_*ary_operator<T> returns true regardless of T.
408
+ // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.
409
+ // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),
410
+ // and packet() are really instantiated as implemented below:
411
+
412
+ // This is a simple wrapper around Index to enforce the re-instantiation of
413
+ // has_*ary_operator when needed.
414
+ template<typename T> struct nullary_wrapper_workaround_msvc {
415
+ nullary_wrapper_workaround_msvc(const T&);
416
+ operator T()const;
417
+ };
418
+
419
+ template<typename Scalar,typename NullaryOp>
420
+ struct nullary_wrapper<Scalar,NullaryOp,true,true,true>
421
+ {
422
+ template <typename IndexType>
423
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
424
+ return nullary_wrapper<Scalar,NullaryOp,
425
+ has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
426
+ has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
427
+ has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j);
428
+ }
429
+ template <typename IndexType>
430
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {
431
+ return nullary_wrapper<Scalar,NullaryOp,
432
+ has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
433
+ has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
434
+ has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i);
435
+ }
436
+
437
+ template <typename T, typename IndexType>
438
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
439
+ return nullary_wrapper<Scalar,NullaryOp,
440
+ has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
441
+ has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
442
+ has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);
443
+ }
444
+ template <typename T, typename IndexType>
445
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {
446
+ return nullary_wrapper<Scalar,NullaryOp,
447
+ has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
448
+ has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
449
+ has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i);
450
+ }
451
+ };
452
+ #endif // MSVC workaround
453
+
454
+ template<typename NullaryOp, typename PlainObjectType>
455
+ struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
456
+ : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> >
457
+ {
458
+ typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType;
459
+ typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned;
460
+
461
+ enum {
462
+ CoeffReadCost = internal::functor_traits<NullaryOp>::Cost,
463
+
464
+ Flags = (evaluator<PlainObjectTypeCleaned>::Flags
465
+ & ( HereditaryBits
466
+ | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
467
+ | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
468
+ | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
469
+ Alignment = AlignedMax
470
+ };
471
+
472
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n)
473
+ : m_functor(n.functor()), m_wrapper()
474
+ {
475
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
476
+ }
477
+
478
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
479
+
480
+ template <typename IndexType>
481
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
482
+ CoeffReturnType coeff(IndexType row, IndexType col) const
483
+ {
484
+ return m_wrapper(m_functor, row, col);
485
+ }
486
+
487
+ template <typename IndexType>
488
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
489
+ CoeffReturnType coeff(IndexType index) const
490
+ {
491
+ return m_wrapper(m_functor,index);
492
+ }
493
+
494
+ template<int LoadMode, typename PacketType, typename IndexType>
495
+ EIGEN_STRONG_INLINE
496
+ PacketType packet(IndexType row, IndexType col) const
497
+ {
498
+ return m_wrapper.template packetOp<PacketType>(m_functor, row, col);
499
+ }
500
+
501
+ template<int LoadMode, typename PacketType, typename IndexType>
502
+ EIGEN_STRONG_INLINE
503
+ PacketType packet(IndexType index) const
504
+ {
505
+ return m_wrapper.template packetOp<PacketType>(m_functor, index);
506
+ }
507
+
508
+ protected:
509
+ const NullaryOp m_functor;
510
+ const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper;
511
+ };
512
+
513
+ // -------------------- CwiseUnaryOp --------------------
514
+
515
+ template<typename UnaryOp, typename ArgType>
516
+ struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
517
+ : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >
518
+ {
519
+ typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
520
+
521
+ enum {
522
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
523
+
524
+ Flags = evaluator<ArgType>::Flags
525
+ & (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
526
+ Alignment = evaluator<ArgType>::Alignment
527
+ };
528
+
529
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
530
+ explicit unary_evaluator(const XprType& op)
531
+ : m_functor(op.functor()),
532
+ m_argImpl(op.nestedExpression())
533
+ {
534
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
535
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
536
+ }
537
+
538
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
539
+
540
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
541
+ CoeffReturnType coeff(Index row, Index col) const
542
+ {
543
+ return m_functor(m_argImpl.coeff(row, col));
544
+ }
545
+
546
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
547
+ CoeffReturnType coeff(Index index) const
548
+ {
549
+ return m_functor(m_argImpl.coeff(index));
550
+ }
551
+
552
+ template<int LoadMode, typename PacketType>
553
+ EIGEN_STRONG_INLINE
554
+ PacketType packet(Index row, Index col) const
555
+ {
556
+ return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(row, col));
557
+ }
558
+
559
+ template<int LoadMode, typename PacketType>
560
+ EIGEN_STRONG_INLINE
561
+ PacketType packet(Index index) const
562
+ {
563
+ return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(index));
564
+ }
565
+
566
+ protected:
567
+ const UnaryOp m_functor;
568
+ evaluator<ArgType> m_argImpl;
569
+ };
570
+
571
+ // -------------------- CwiseTernaryOp --------------------
572
+
573
+ // this is a ternary expression
574
+ template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
575
+ struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
576
+ : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
577
+ {
578
+ typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
579
+ typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base;
580
+
581
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
582
+ };
583
+
584
+ template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
585
+ struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased>
586
+ : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
587
+ {
588
+ typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
589
+
590
+ enum {
591
+ CoeffReadCost = evaluator<Arg1>::CoeffReadCost + evaluator<Arg2>::CoeffReadCost + evaluator<Arg3>::CoeffReadCost + functor_traits<TernaryOp>::Cost,
592
+
593
+ Arg1Flags = evaluator<Arg1>::Flags,
594
+ Arg2Flags = evaluator<Arg2>::Flags,
595
+ Arg3Flags = evaluator<Arg3>::Flags,
596
+ SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value,
597
+ StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit),
598
+ Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & (
599
+ HereditaryBits
600
+ | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) &
601
+ ( (StorageOrdersAgree ? LinearAccessBit : 0)
602
+ | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
603
+ )
604
+ )
605
+ ),
606
+ Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),
607
+ Alignment = EIGEN_PLAIN_ENUM_MIN(
608
+ EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment),
609
+ evaluator<Arg3>::Alignment)
610
+ };
611
+
612
+ EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr)
613
+ : m_functor(xpr.functor()),
614
+ m_arg1Impl(xpr.arg1()),
615
+ m_arg2Impl(xpr.arg2()),
616
+ m_arg3Impl(xpr.arg3())
617
+ {
618
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost);
619
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
620
+ }
621
+
622
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
623
+
624
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
625
+ CoeffReturnType coeff(Index row, Index col) const
626
+ {
627
+ return m_functor(m_arg1Impl.coeff(row, col), m_arg2Impl.coeff(row, col), m_arg3Impl.coeff(row, col));
628
+ }
629
+
630
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
631
+ CoeffReturnType coeff(Index index) const
632
+ {
633
+ return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index));
634
+ }
635
+
636
+ template<int LoadMode, typename PacketType>
637
+ EIGEN_STRONG_INLINE
638
+ PacketType packet(Index row, Index col) const
639
+ {
640
+ return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(row, col),
641
+ m_arg2Impl.template packet<LoadMode,PacketType>(row, col),
642
+ m_arg3Impl.template packet<LoadMode,PacketType>(row, col));
643
+ }
644
+
645
+ template<int LoadMode, typename PacketType>
646
+ EIGEN_STRONG_INLINE
647
+ PacketType packet(Index index) const
648
+ {
649
+ return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(index),
650
+ m_arg2Impl.template packet<LoadMode,PacketType>(index),
651
+ m_arg3Impl.template packet<LoadMode,PacketType>(index));
652
+ }
653
+
654
+ protected:
655
+ const TernaryOp m_functor;
656
+ evaluator<Arg1> m_arg1Impl;
657
+ evaluator<Arg2> m_arg2Impl;
658
+ evaluator<Arg3> m_arg3Impl;
659
+ };
660
+
661
+ // -------------------- CwiseBinaryOp --------------------
662
+
663
+ // this is a binary expression
664
+ template<typename BinaryOp, typename Lhs, typename Rhs>
665
+ struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
666
+ : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
667
+ {
668
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
669
+ typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base;
670
+
671
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
672
+ };
673
+
674
+ template<typename BinaryOp, typename Lhs, typename Rhs>
675
+ struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased>
676
+ : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
677
+ {
678
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
679
+
680
+ enum {
681
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
682
+
683
+ LhsFlags = evaluator<Lhs>::Flags,
684
+ RhsFlags = evaluator<Rhs>::Flags,
685
+ SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value,
686
+ StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit),
687
+ Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
688
+ HereditaryBits
689
+ | (int(LhsFlags) & int(RhsFlags) &
690
+ ( (StorageOrdersAgree ? LinearAccessBit : 0)
691
+ | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
692
+ )
693
+ )
694
+ ),
695
+ Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
696
+ Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)
697
+ };
698
+
699
+ EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr)
700
+ : m_functor(xpr.functor()),
701
+ m_lhsImpl(xpr.lhs()),
702
+ m_rhsImpl(xpr.rhs())
703
+ {
704
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
705
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
706
+ }
707
+
708
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
709
+
710
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
711
+ CoeffReturnType coeff(Index row, Index col) const
712
+ {
713
+ return m_functor(m_lhsImpl.coeff(row, col), m_rhsImpl.coeff(row, col));
714
+ }
715
+
716
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
717
+ CoeffReturnType coeff(Index index) const
718
+ {
719
+ return m_functor(m_lhsImpl.coeff(index), m_rhsImpl.coeff(index));
720
+ }
721
+
722
+ template<int LoadMode, typename PacketType>
723
+ EIGEN_STRONG_INLINE
724
+ PacketType packet(Index row, Index col) const
725
+ {
726
+ return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(row, col),
727
+ m_rhsImpl.template packet<LoadMode,PacketType>(row, col));
728
+ }
729
+
730
+ template<int LoadMode, typename PacketType>
731
+ EIGEN_STRONG_INLINE
732
+ PacketType packet(Index index) const
733
+ {
734
+ return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(index),
735
+ m_rhsImpl.template packet<LoadMode,PacketType>(index));
736
+ }
737
+
738
+ protected:
739
+ const BinaryOp m_functor;
740
+ evaluator<Lhs> m_lhsImpl;
741
+ evaluator<Rhs> m_rhsImpl;
742
+ };
743
+
744
+ // -------------------- CwiseUnaryView --------------------
745
+
746
+ template<typename UnaryOp, typename ArgType>
747
+ struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
748
+ : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> >
749
+ {
750
+ typedef CwiseUnaryView<UnaryOp, ArgType> XprType;
751
+
752
+ enum {
753
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
754
+
755
+ Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),
756
+
757
+ Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...
758
+ };
759
+
760
+ EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op)
761
+ : m_unaryOp(op.functor()),
762
+ m_argImpl(op.nestedExpression())
763
+ {
764
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
765
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
766
+ }
767
+
768
+ typedef typename XprType::Scalar Scalar;
769
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
770
+
771
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
772
+ CoeffReturnType coeff(Index row, Index col) const
773
+ {
774
+ return m_unaryOp(m_argImpl.coeff(row, col));
775
+ }
776
+
777
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
778
+ CoeffReturnType coeff(Index index) const
779
+ {
780
+ return m_unaryOp(m_argImpl.coeff(index));
781
+ }
782
+
783
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
784
+ Scalar& coeffRef(Index row, Index col)
785
+ {
786
+ return m_unaryOp(m_argImpl.coeffRef(row, col));
787
+ }
788
+
789
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
790
+ Scalar& coeffRef(Index index)
791
+ {
792
+ return m_unaryOp(m_argImpl.coeffRef(index));
793
+ }
794
+
795
+ protected:
796
+ const UnaryOp m_unaryOp;
797
+ evaluator<ArgType> m_argImpl;
798
+ };
799
+
800
+ // -------------------- Map --------------------
801
+
802
+ // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?
803
+ // but that might complicate template specialization
804
+ template<typename Derived, typename PlainObjectType>
805
+ struct mapbase_evaluator;
806
+
807
+ template<typename Derived, typename PlainObjectType>
808
+ struct mapbase_evaluator : evaluator_base<Derived>
809
+ {
810
+ typedef Derived XprType;
811
+ typedef typename XprType::PointerType PointerType;
812
+ typedef typename XprType::Scalar Scalar;
813
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
814
+
815
+ enum {
816
+ IsRowMajor = XprType::RowsAtCompileTime,
817
+ ColsAtCompileTime = XprType::ColsAtCompileTime,
818
+ CoeffReadCost = NumTraits<Scalar>::ReadCost
819
+ };
820
+
821
+ EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map)
822
+ : m_data(const_cast<PointerType>(map.data())),
823
+ m_innerStride(map.innerStride()),
824
+ m_outerStride(map.outerStride())
825
+ {
826
+ EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1),
827
+ PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
828
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
829
+ }
830
+
831
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
832
+ CoeffReturnType coeff(Index row, Index col) const
833
+ {
834
+ return m_data[col * colStride() + row * rowStride()];
835
+ }
836
+
837
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
838
+ CoeffReturnType coeff(Index index) const
839
+ {
840
+ return m_data[index * m_innerStride.value()];
841
+ }
842
+
843
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
844
+ Scalar& coeffRef(Index row, Index col)
845
+ {
846
+ return m_data[col * colStride() + row * rowStride()];
847
+ }
848
+
849
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
850
+ Scalar& coeffRef(Index index)
851
+ {
852
+ return m_data[index * m_innerStride.value()];
853
+ }
854
+
855
+ template<int LoadMode, typename PacketType>
856
+ EIGEN_STRONG_INLINE
857
+ PacketType packet(Index row, Index col) const
858
+ {
859
+ PointerType ptr = m_data + row * rowStride() + col * colStride();
860
+ return internal::ploadt<PacketType, LoadMode>(ptr);
861
+ }
862
+
863
+ template<int LoadMode, typename PacketType>
864
+ EIGEN_STRONG_INLINE
865
+ PacketType packet(Index index) const
866
+ {
867
+ return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value());
868
+ }
869
+
870
+ template<int StoreMode, typename PacketType>
871
+ EIGEN_STRONG_INLINE
872
+ void writePacket(Index row, Index col, const PacketType& x)
873
+ {
874
+ PointerType ptr = m_data + row * rowStride() + col * colStride();
875
+ return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x);
876
+ }
877
+
878
+ template<int StoreMode, typename PacketType>
879
+ EIGEN_STRONG_INLINE
880
+ void writePacket(Index index, const PacketType& x)
881
+ {
882
+ internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
883
+ }
884
+ protected:
885
+ EIGEN_DEVICE_FUNC
886
+ inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); }
887
+ EIGEN_DEVICE_FUNC
888
+ inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); }
889
+
890
+ PointerType m_data;
891
+ const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride;
892
+ const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride;
893
+ };
894
+
895
+ template<typename PlainObjectType, int MapOptions, typename StrideType>
896
+ struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
897
+ : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>
898
+ {
899
+ typedef Map<PlainObjectType, MapOptions, StrideType> XprType;
900
+ typedef typename XprType::Scalar Scalar;
901
+ // TODO: should check for smaller packet types once we can handle multi-sized packet types
902
+ typedef typename packet_traits<Scalar>::type PacketScalar;
903
+
904
+ enum {
905
+ InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
906
+ ? int(PlainObjectType::InnerStrideAtCompileTime)
907
+ : int(StrideType::InnerStrideAtCompileTime),
908
+ OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
909
+ ? int(PlainObjectType::OuterStrideAtCompileTime)
910
+ : int(StrideType::OuterStrideAtCompileTime),
911
+ HasNoInnerStride = InnerStrideAtCompileTime == 1,
912
+ HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
913
+ HasNoStride = HasNoInnerStride && HasNoOuterStride,
914
+ IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
915
+
916
+ PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),
917
+ LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),
918
+ Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),
919
+
920
+ Alignment = int(MapOptions)&int(AlignedMask)
921
+ };
922
+
923
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)
924
+ : mapbase_evaluator<XprType, PlainObjectType>(map)
925
+ { }
926
+ };
927
+
928
+ // -------------------- Ref --------------------
929
+
930
+ template<typename PlainObjectType, int RefOptions, typename StrideType>
931
+ struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
932
+ : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>
933
+ {
934
+ typedef Ref<PlainObjectType, RefOptions, StrideType> XprType;
935
+
936
+ enum {
937
+ Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags,
938
+ Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment
939
+ };
940
+
941
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref)
942
+ : mapbase_evaluator<XprType, PlainObjectType>(ref)
943
+ { }
944
+ };
945
+
946
+ // -------------------- Block --------------------
947
+
948
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,
949
+ bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator;
950
+
951
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
952
+ struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
953
+ : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>
954
+ {
955
+ typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
956
+ typedef typename XprType::Scalar Scalar;
957
+ // TODO: should check for smaller packet types once we can handle multi-sized packet types
958
+ typedef typename packet_traits<Scalar>::type PacketScalar;
959
+
960
+ enum {
961
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
962
+
963
+ RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
964
+ ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
965
+ MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
966
+ MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
967
+
968
+ ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,
969
+ IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1
970
+ : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
971
+ : ArgTypeIsRowMajor,
972
+ HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor),
973
+ InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
974
+ InnerStrideAtCompileTime = HasSameStorageOrderAsArgType
975
+ ? int(inner_stride_at_compile_time<ArgType>::ret)
976
+ : int(outer_stride_at_compile_time<ArgType>::ret),
977
+ OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
978
+ ? int(outer_stride_at_compile_time<ArgType>::ret)
979
+ : int(inner_stride_at_compile_time<ArgType>::ret),
980
+ MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0,
981
+
982
+ FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
983
+ FlagsRowMajorBit = XprType::Flags&RowMajorBit,
984
+ Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
985
+ DirectAccessBit |
986
+ MaskPacketAccessBit),
987
+ Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
988
+
989
+ PacketAlignment = unpacket_traits<PacketScalar>::alignment,
990
+ Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
991
+ && (OuterStrideAtCompileTime!=0)
992
+ && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
993
+ Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
994
+ };
995
+ typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
996
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block)
997
+ {
998
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
999
+ }
1000
+ };
1001
+
1002
+ // no direct-access => dispatch to a unary evaluator
1003
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1004
+ struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false>
1005
+ : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1006
+ {
1007
+ typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1008
+
1009
+ EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
1010
+ : unary_evaluator<XprType>(block)
1011
+ {}
1012
+ };
1013
+
1014
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1015
+ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased>
1016
+ : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1017
+ {
1018
+ typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1019
+
1020
+ EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block)
1021
+ : m_argImpl(block.nestedExpression()),
1022
+ m_startRow(block.startRow()),
1023
+ m_startCol(block.startCol()),
1024
+ m_linear_offset(InnerPanel?(XprType::IsRowMajor ? block.startRow()*block.cols() : block.startCol()*block.rows()):0)
1025
+ { }
1026
+
1027
+ typedef typename XprType::Scalar Scalar;
1028
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1029
+
1030
+ enum {
1031
+ RowsAtCompileTime = XprType::RowsAtCompileTime,
1032
+ ForwardLinearAccess = InnerPanel && bool(evaluator<ArgType>::Flags&LinearAccessBit)
1033
+ };
1034
+
1035
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1036
+ CoeffReturnType coeff(Index row, Index col) const
1037
+ {
1038
+ return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
1039
+ }
1040
+
1041
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1042
+ CoeffReturnType coeff(Index index) const
1043
+ {
1044
+ if (ForwardLinearAccess)
1045
+ return m_argImpl.coeff(m_linear_offset.value() + index);
1046
+ else
1047
+ return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1048
+ }
1049
+
1050
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1051
+ Scalar& coeffRef(Index row, Index col)
1052
+ {
1053
+ return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
1054
+ }
1055
+
1056
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1057
+ Scalar& coeffRef(Index index)
1058
+ {
1059
+ if (ForwardLinearAccess)
1060
+ return m_argImpl.coeffRef(m_linear_offset.value() + index);
1061
+ else
1062
+ return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1063
+ }
1064
+
1065
+ template<int LoadMode, typename PacketType>
1066
+ EIGEN_STRONG_INLINE
1067
+ PacketType packet(Index row, Index col) const
1068
+ {
1069
+ return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
1070
+ }
1071
+
1072
+ template<int LoadMode, typename PacketType>
1073
+ EIGEN_STRONG_INLINE
1074
+ PacketType packet(Index index) const
1075
+ {
1076
+ if (ForwardLinearAccess)
1077
+ return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index);
1078
+ else
1079
+ return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1080
+ RowsAtCompileTime == 1 ? index : 0);
1081
+ }
1082
+
1083
+ template<int StoreMode, typename PacketType>
1084
+ EIGEN_STRONG_INLINE
1085
+ void writePacket(Index row, Index col, const PacketType& x)
1086
+ {
1087
+ return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
1088
+ }
1089
+
1090
+ template<int StoreMode, typename PacketType>
1091
+ EIGEN_STRONG_INLINE
1092
+ void writePacket(Index index, const PacketType& x)
1093
+ {
1094
+ if (ForwardLinearAccess)
1095
+ return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x);
1096
+ else
1097
+ return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1098
+ RowsAtCompileTime == 1 ? index : 0,
1099
+ x);
1100
+ }
1101
+
1102
+ protected:
1103
+ evaluator<ArgType> m_argImpl;
1104
+ const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
1105
+ const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
1106
+ const variable_if_dynamic<Index, InnerPanel ? Dynamic : 0> m_linear_offset;
1107
+ };
1108
+
1109
+ // TODO: This evaluator does not actually use the child evaluator;
1110
+ // all action is via the data() as returned by the Block expression.
1111
+
1112
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1113
+ struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>
1114
+ : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,
1115
+ typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>
1116
+ {
1117
+ typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1118
+ typedef typename XprType::Scalar Scalar;
1119
+
1120
+ EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
1121
+ : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
1122
+ {
1123
+ // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
1124
+ eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
1125
+ }
1126
+ };
1127
+
1128
+
1129
+ // -------------------- Select --------------------
1130
+ // NOTE shall we introduce a ternary_evaluator?
1131
+
1132
+ // TODO enable vectorization for Select
1133
+ template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
1134
+ struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1135
+ : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1136
+ {
1137
+ typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType;
1138
+ enum {
1139
+ CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost
1140
+ + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost,
1141
+ evaluator<ElseMatrixType>::CoeffReadCost),
1142
+
1143
+ Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,
1144
+
1145
+ Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
1146
+ };
1147
+
1148
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select)
1149
+ : m_conditionImpl(select.conditionMatrix()),
1150
+ m_thenImpl(select.thenMatrix()),
1151
+ m_elseImpl(select.elseMatrix())
1152
+ {
1153
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1154
+ }
1155
+
1156
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1157
+
1158
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1159
+ CoeffReturnType coeff(Index row, Index col) const
1160
+ {
1161
+ if (m_conditionImpl.coeff(row, col))
1162
+ return m_thenImpl.coeff(row, col);
1163
+ else
1164
+ return m_elseImpl.coeff(row, col);
1165
+ }
1166
+
1167
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1168
+ CoeffReturnType coeff(Index index) const
1169
+ {
1170
+ if (m_conditionImpl.coeff(index))
1171
+ return m_thenImpl.coeff(index);
1172
+ else
1173
+ return m_elseImpl.coeff(index);
1174
+ }
1175
+
1176
+ protected:
1177
+ evaluator<ConditionMatrixType> m_conditionImpl;
1178
+ evaluator<ThenMatrixType> m_thenImpl;
1179
+ evaluator<ElseMatrixType> m_elseImpl;
1180
+ };
1181
+
1182
+
1183
+ // -------------------- Replicate --------------------
1184
+
1185
+ template<typename ArgType, int RowFactor, int ColFactor>
1186
+ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
1187
+ : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
1188
+ {
1189
+ typedef Replicate<ArgType, RowFactor, ColFactor> XprType;
1190
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1191
+ enum {
1192
+ Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
1193
+ };
1194
+ typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested;
1195
+ typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
1196
+
1197
+ enum {
1198
+ CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost,
1199
+ LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
1200
+ Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit),
1201
+
1202
+ Alignment = evaluator<ArgTypeNestedCleaned>::Alignment
1203
+ };
1204
+
1205
+ EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate)
1206
+ : m_arg(replicate.nestedExpression()),
1207
+ m_argImpl(m_arg),
1208
+ m_rows(replicate.nestedExpression().rows()),
1209
+ m_cols(replicate.nestedExpression().cols())
1210
+ {}
1211
+
1212
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1213
+ CoeffReturnType coeff(Index row, Index col) const
1214
+ {
1215
+ // try to avoid using modulo; this is a pure optimization strategy
1216
+ const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1217
+ : RowFactor==1 ? row
1218
+ : row % m_rows.value();
1219
+ const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1220
+ : ColFactor==1 ? col
1221
+ : col % m_cols.value();
1222
+
1223
+ return m_argImpl.coeff(actual_row, actual_col);
1224
+ }
1225
+
1226
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1227
+ CoeffReturnType coeff(Index index) const
1228
+ {
1229
+ // try to avoid using modulo; this is a pure optimization strategy
1230
+ const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1231
+ ? (ColFactor==1 ? index : index%m_cols.value())
1232
+ : (RowFactor==1 ? index : index%m_rows.value());
1233
+
1234
+ return m_argImpl.coeff(actual_index);
1235
+ }
1236
+
1237
+ template<int LoadMode, typename PacketType>
1238
+ EIGEN_STRONG_INLINE
1239
+ PacketType packet(Index row, Index col) const
1240
+ {
1241
+ const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1242
+ : RowFactor==1 ? row
1243
+ : row % m_rows.value();
1244
+ const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1245
+ : ColFactor==1 ? col
1246
+ : col % m_cols.value();
1247
+
1248
+ return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);
1249
+ }
1250
+
1251
+ template<int LoadMode, typename PacketType>
1252
+ EIGEN_STRONG_INLINE
1253
+ PacketType packet(Index index) const
1254
+ {
1255
+ const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1256
+ ? (ColFactor==1 ? index : index%m_cols.value())
1257
+ : (RowFactor==1 ? index : index%m_rows.value());
1258
+
1259
+ return m_argImpl.template packet<LoadMode,PacketType>(actual_index);
1260
+ }
1261
+
1262
+ protected:
1263
+ const ArgTypeNested m_arg;
1264
+ evaluator<ArgTypeNestedCleaned> m_argImpl;
1265
+ const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows;
1266
+ const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols;
1267
+ };
1268
+
1269
+
1270
+ // -------------------- PartialReduxExpr --------------------
1271
+
1272
+ template< typename ArgType, typename MemberOp, int Direction>
1273
+ struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >
1274
+ : evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> >
1275
+ {
1276
+ typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType;
1277
+ typedef typename internal::nested_eval<ArgType,1>::type ArgTypeNested;
1278
+ typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
1279
+ typedef typename ArgType::Scalar InputScalar;
1280
+ typedef typename XprType::Scalar Scalar;
1281
+ enum {
1282
+ TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime)
1283
+ };
1284
+ typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
1285
+ enum {
1286
+ CoeffReadCost = TraversalSize==Dynamic ? HugeCost
1287
+ : TraversalSize * evaluator<ArgType>::CoeffReadCost + int(CostOpType::value),
1288
+
1289
+ Flags = (traits<XprType>::Flags&RowMajorBit) | (evaluator<ArgType>::Flags&(HereditaryBits&(~RowMajorBit))) | LinearAccessBit,
1290
+
1291
+ Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized
1292
+ };
1293
+
1294
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr)
1295
+ : m_arg(xpr.nestedExpression()), m_functor(xpr.functor())
1296
+ {
1297
+ EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : int(CostOpType::value));
1298
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1299
+ }
1300
+
1301
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1302
+
1303
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1304
+ const Scalar coeff(Index i, Index j) const
1305
+ {
1306
+ if (Direction==Vertical)
1307
+ return m_functor(m_arg.col(j));
1308
+ else
1309
+ return m_functor(m_arg.row(i));
1310
+ }
1311
+
1312
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1313
+ const Scalar coeff(Index index) const
1314
+ {
1315
+ if (Direction==Vertical)
1316
+ return m_functor(m_arg.col(index));
1317
+ else
1318
+ return m_functor(m_arg.row(index));
1319
+ }
1320
+
1321
+ protected:
1322
+ typename internal::add_const_on_value_type<ArgTypeNested>::type m_arg;
1323
+ const MemberOp m_functor;
1324
+ };
1325
+
1326
+
1327
+ // -------------------- MatrixWrapper and ArrayWrapper --------------------
1328
+ //
1329
+ // evaluator_wrapper_base<T> is a common base class for the
1330
+ // MatrixWrapper and ArrayWrapper evaluators.
1331
+
1332
+ template<typename XprType>
1333
+ struct evaluator_wrapper_base
1334
+ : evaluator_base<XprType>
1335
+ {
1336
+ typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType;
1337
+ enum {
1338
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1339
+ Flags = evaluator<ArgType>::Flags,
1340
+ Alignment = evaluator<ArgType>::Alignment
1341
+ };
1342
+
1343
+ EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
1344
+
1345
+ typedef typename ArgType::Scalar Scalar;
1346
+ typedef typename ArgType::CoeffReturnType CoeffReturnType;
1347
+
1348
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1349
+ CoeffReturnType coeff(Index row, Index col) const
1350
+ {
1351
+ return m_argImpl.coeff(row, col);
1352
+ }
1353
+
1354
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1355
+ CoeffReturnType coeff(Index index) const
1356
+ {
1357
+ return m_argImpl.coeff(index);
1358
+ }
1359
+
1360
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1361
+ Scalar& coeffRef(Index row, Index col)
1362
+ {
1363
+ return m_argImpl.coeffRef(row, col);
1364
+ }
1365
+
1366
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1367
+ Scalar& coeffRef(Index index)
1368
+ {
1369
+ return m_argImpl.coeffRef(index);
1370
+ }
1371
+
1372
+ template<int LoadMode, typename PacketType>
1373
+ EIGEN_STRONG_INLINE
1374
+ PacketType packet(Index row, Index col) const
1375
+ {
1376
+ return m_argImpl.template packet<LoadMode,PacketType>(row, col);
1377
+ }
1378
+
1379
+ template<int LoadMode, typename PacketType>
1380
+ EIGEN_STRONG_INLINE
1381
+ PacketType packet(Index index) const
1382
+ {
1383
+ return m_argImpl.template packet<LoadMode,PacketType>(index);
1384
+ }
1385
+
1386
+ template<int StoreMode, typename PacketType>
1387
+ EIGEN_STRONG_INLINE
1388
+ void writePacket(Index row, Index col, const PacketType& x)
1389
+ {
1390
+ m_argImpl.template writePacket<StoreMode>(row, col, x);
1391
+ }
1392
+
1393
+ template<int StoreMode, typename PacketType>
1394
+ EIGEN_STRONG_INLINE
1395
+ void writePacket(Index index, const PacketType& x)
1396
+ {
1397
+ m_argImpl.template writePacket<StoreMode>(index, x);
1398
+ }
1399
+
1400
+ protected:
1401
+ evaluator<ArgType> m_argImpl;
1402
+ };
1403
+
1404
+ template<typename TArgType>
1405
+ struct unary_evaluator<MatrixWrapper<TArgType> >
1406
+ : evaluator_wrapper_base<MatrixWrapper<TArgType> >
1407
+ {
1408
+ typedef MatrixWrapper<TArgType> XprType;
1409
+
1410
+ EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
1411
+ : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
1412
+ { }
1413
+ };
1414
+
1415
+ template<typename TArgType>
1416
+ struct unary_evaluator<ArrayWrapper<TArgType> >
1417
+ : evaluator_wrapper_base<ArrayWrapper<TArgType> >
1418
+ {
1419
+ typedef ArrayWrapper<TArgType> XprType;
1420
+
1421
+ EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
1422
+ : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
1423
+ { }
1424
+ };
1425
+
1426
+
1427
+ // -------------------- Reverse --------------------
1428
+
1429
+ // defined in Reverse.h:
1430
+ template<typename PacketType, bool ReversePacket> struct reverse_packet_cond;
1431
+
1432
+ template<typename ArgType, int Direction>
1433
+ struct unary_evaluator<Reverse<ArgType, Direction> >
1434
+ : evaluator_base<Reverse<ArgType, Direction> >
1435
+ {
1436
+ typedef Reverse<ArgType, Direction> XprType;
1437
+ typedef typename XprType::Scalar Scalar;
1438
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1439
+
1440
+ enum {
1441
+ IsRowMajor = XprType::IsRowMajor,
1442
+ IsColMajor = !IsRowMajor,
1443
+ ReverseRow = (Direction == Vertical) || (Direction == BothDirections),
1444
+ ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
1445
+ ReversePacket = (Direction == BothDirections)
1446
+ || ((Direction == Vertical) && IsColMajor)
1447
+ || ((Direction == Horizontal) && IsRowMajor),
1448
+
1449
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1450
+
1451
+ // let's enable LinearAccess only with vectorization because of the product overhead
1452
+ // FIXME enable DirectAccess with negative strides?
1453
+ Flags0 = evaluator<ArgType>::Flags,
1454
+ LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) )
1455
+ || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1))
1456
+ ? LinearAccessBit : 0,
1457
+
1458
+ Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),
1459
+
1460
+ Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
1461
+ };
1462
+
1463
+ EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse)
1464
+ : m_argImpl(reverse.nestedExpression()),
1465
+ m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
1466
+ m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
1467
+ { }
1468
+
1469
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1470
+ CoeffReturnType coeff(Index row, Index col) const
1471
+ {
1472
+ return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
1473
+ ReverseCol ? m_cols.value() - col - 1 : col);
1474
+ }
1475
+
1476
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1477
+ CoeffReturnType coeff(Index index) const
1478
+ {
1479
+ return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
1480
+ }
1481
+
1482
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1483
+ Scalar& coeffRef(Index row, Index col)
1484
+ {
1485
+ return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
1486
+ ReverseCol ? m_cols.value() - col - 1 : col);
1487
+ }
1488
+
1489
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1490
+ Scalar& coeffRef(Index index)
1491
+ {
1492
+ return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);
1493
+ }
1494
+
1495
+ template<int LoadMode, typename PacketType>
1496
+ EIGEN_STRONG_INLINE
1497
+ PacketType packet(Index row, Index col) const
1498
+ {
1499
+ enum {
1500
+ PacketSize = unpacket_traits<PacketType>::size,
1501
+ OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1502
+ OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1503
+ };
1504
+ typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1505
+ return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>(
1506
+ ReverseRow ? m_rows.value() - row - OffsetRow : row,
1507
+ ReverseCol ? m_cols.value() - col - OffsetCol : col));
1508
+ }
1509
+
1510
+ template<int LoadMode, typename PacketType>
1511
+ EIGEN_STRONG_INLINE
1512
+ PacketType packet(Index index) const
1513
+ {
1514
+ enum { PacketSize = unpacket_traits<PacketType>::size };
1515
+ return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize));
1516
+ }
1517
+
1518
+ template<int LoadMode, typename PacketType>
1519
+ EIGEN_STRONG_INLINE
1520
+ void writePacket(Index row, Index col, const PacketType& x)
1521
+ {
1522
+ // FIXME we could factorize some code with packet(i,j)
1523
+ enum {
1524
+ PacketSize = unpacket_traits<PacketType>::size,
1525
+ OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1526
+ OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1527
+ };
1528
+ typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1529
+ m_argImpl.template writePacket<LoadMode>(
1530
+ ReverseRow ? m_rows.value() - row - OffsetRow : row,
1531
+ ReverseCol ? m_cols.value() - col - OffsetCol : col,
1532
+ reverse_packet::run(x));
1533
+ }
1534
+
1535
+ template<int LoadMode, typename PacketType>
1536
+ EIGEN_STRONG_INLINE
1537
+ void writePacket(Index index, const PacketType& x)
1538
+ {
1539
+ enum { PacketSize = unpacket_traits<PacketType>::size };
1540
+ m_argImpl.template writePacket<LoadMode>
1541
+ (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
1542
+ }
1543
+
1544
+ protected:
1545
+ evaluator<ArgType> m_argImpl;
1546
+
1547
+ // If we do not reverse rows, then we do not need to know the number of rows; same for columns
1548
+ // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.
1549
+ const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows;
1550
+ const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols;
1551
+ };
1552
+
1553
+
1554
+ // -------------------- Diagonal --------------------
1555
+
1556
+ template<typename ArgType, int DiagIndex>
1557
+ struct evaluator<Diagonal<ArgType, DiagIndex> >
1558
+ : evaluator_base<Diagonal<ArgType, DiagIndex> >
1559
+ {
1560
+ typedef Diagonal<ArgType, DiagIndex> XprType;
1561
+
1562
+ enum {
1563
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1564
+
1565
+ Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,
1566
+
1567
+ Alignment = 0
1568
+ };
1569
+
1570
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal)
1571
+ : m_argImpl(diagonal.nestedExpression()),
1572
+ m_index(diagonal.index())
1573
+ { }
1574
+
1575
+ typedef typename XprType::Scalar Scalar;
1576
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1577
+
1578
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1579
+ CoeffReturnType coeff(Index row, Index) const
1580
+ {
1581
+ return m_argImpl.coeff(row + rowOffset(), row + colOffset());
1582
+ }
1583
+
1584
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1585
+ CoeffReturnType coeff(Index index) const
1586
+ {
1587
+ return m_argImpl.coeff(index + rowOffset(), index + colOffset());
1588
+ }
1589
+
1590
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1591
+ Scalar& coeffRef(Index row, Index)
1592
+ {
1593
+ return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());
1594
+ }
1595
+
1596
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1597
+ Scalar& coeffRef(Index index)
1598
+ {
1599
+ return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());
1600
+ }
1601
+
1602
+ protected:
1603
+ evaluator<ArgType> m_argImpl;
1604
+ const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index;
1605
+
1606
+ private:
1607
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
1608
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
1609
+ };
1610
+
1611
+
1612
+ //----------------------------------------------------------------------
1613
+ // deprecated code
1614
+ //----------------------------------------------------------------------
1615
+
1616
+ // -------------------- EvalToTemp --------------------
1617
+
1618
+ // expression class for evaluating nested expression to a temporary
1619
+
1620
+ template<typename ArgType> class EvalToTemp;
1621
+
1622
+ template<typename ArgType>
1623
+ struct traits<EvalToTemp<ArgType> >
1624
+ : public traits<ArgType>
1625
+ { };
1626
+
1627
+ template<typename ArgType>
1628
+ class EvalToTemp
1629
+ : public dense_xpr_base<EvalToTemp<ArgType> >::type
1630
+ {
1631
+ public:
1632
+
1633
+ typedef typename dense_xpr_base<EvalToTemp>::type Base;
1634
+ EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)
1635
+
1636
+ explicit EvalToTemp(const ArgType& arg)
1637
+ : m_arg(arg)
1638
+ { }
1639
+
1640
+ const ArgType& arg() const
1641
+ {
1642
+ return m_arg;
1643
+ }
1644
+
1645
+ Index rows() const
1646
+ {
1647
+ return m_arg.rows();
1648
+ }
1649
+
1650
+ Index cols() const
1651
+ {
1652
+ return m_arg.cols();
1653
+ }
1654
+
1655
+ private:
1656
+ const ArgType& m_arg;
1657
+ };
1658
+
1659
+ template<typename ArgType>
1660
+ struct evaluator<EvalToTemp<ArgType> >
1661
+ : public evaluator<typename ArgType::PlainObject>
1662
+ {
1663
+ typedef EvalToTemp<ArgType> XprType;
1664
+ typedef typename ArgType::PlainObject PlainObject;
1665
+ typedef evaluator<PlainObject> Base;
1666
+
1667
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
1668
+ : m_result(xpr.arg())
1669
+ {
1670
+ ::new (static_cast<Base*>(this)) Base(m_result);
1671
+ }
1672
+
1673
+ // This constructor is used when nesting an EvalTo evaluator in another evaluator
1674
+ EIGEN_DEVICE_FUNC evaluator(const ArgType& arg)
1675
+ : m_result(arg)
1676
+ {
1677
+ ::new (static_cast<Base*>(this)) Base(m_result);
1678
+ }
1679
+
1680
+ protected:
1681
+ PlainObject m_result;
1682
+ };
1683
+
1684
+ } // namespace internal
1685
+
1686
+ } // end namespace Eigen
1687
+
1688
+ #endif // EIGEN_COREEVALUATORS_H