umappp 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (395) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +25 -0
  3. data/README.md +110 -0
  4. data/ext/umappp/extconf.rb +25 -0
  5. data/ext/umappp/numo.hpp +867 -0
  6. data/ext/umappp/umappp.cpp +225 -0
  7. data/lib/umappp/version.rb +5 -0
  8. data/lib/umappp.rb +41 -0
  9. data/vendor/Eigen/Cholesky +45 -0
  10. data/vendor/Eigen/CholmodSupport +48 -0
  11. data/vendor/Eigen/Core +384 -0
  12. data/vendor/Eigen/Dense +7 -0
  13. data/vendor/Eigen/Eigen +2 -0
  14. data/vendor/Eigen/Eigenvalues +60 -0
  15. data/vendor/Eigen/Geometry +59 -0
  16. data/vendor/Eigen/Householder +29 -0
  17. data/vendor/Eigen/IterativeLinearSolvers +48 -0
  18. data/vendor/Eigen/Jacobi +32 -0
  19. data/vendor/Eigen/KLUSupport +41 -0
  20. data/vendor/Eigen/LU +47 -0
  21. data/vendor/Eigen/MetisSupport +35 -0
  22. data/vendor/Eigen/OrderingMethods +70 -0
  23. data/vendor/Eigen/PaStiXSupport +49 -0
  24. data/vendor/Eigen/PardisoSupport +35 -0
  25. data/vendor/Eigen/QR +50 -0
  26. data/vendor/Eigen/QtAlignedMalloc +39 -0
  27. data/vendor/Eigen/SPQRSupport +34 -0
  28. data/vendor/Eigen/SVD +50 -0
  29. data/vendor/Eigen/Sparse +34 -0
  30. data/vendor/Eigen/SparseCholesky +37 -0
  31. data/vendor/Eigen/SparseCore +69 -0
  32. data/vendor/Eigen/SparseLU +50 -0
  33. data/vendor/Eigen/SparseQR +36 -0
  34. data/vendor/Eigen/StdDeque +27 -0
  35. data/vendor/Eigen/StdList +26 -0
  36. data/vendor/Eigen/StdVector +27 -0
  37. data/vendor/Eigen/SuperLUSupport +64 -0
  38. data/vendor/Eigen/UmfPackSupport +40 -0
  39. data/vendor/Eigen/src/Cholesky/LDLT.h +688 -0
  40. data/vendor/Eigen/src/Cholesky/LLT.h +558 -0
  41. data/vendor/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
  42. data/vendor/Eigen/src/CholmodSupport/CholmodSupport.h +682 -0
  43. data/vendor/Eigen/src/Core/ArithmeticSequence.h +413 -0
  44. data/vendor/Eigen/src/Core/Array.h +417 -0
  45. data/vendor/Eigen/src/Core/ArrayBase.h +226 -0
  46. data/vendor/Eigen/src/Core/ArrayWrapper.h +209 -0
  47. data/vendor/Eigen/src/Core/Assign.h +90 -0
  48. data/vendor/Eigen/src/Core/AssignEvaluator.h +1010 -0
  49. data/vendor/Eigen/src/Core/Assign_MKL.h +178 -0
  50. data/vendor/Eigen/src/Core/BandMatrix.h +353 -0
  51. data/vendor/Eigen/src/Core/Block.h +448 -0
  52. data/vendor/Eigen/src/Core/BooleanRedux.h +162 -0
  53. data/vendor/Eigen/src/Core/CommaInitializer.h +164 -0
  54. data/vendor/Eigen/src/Core/ConditionEstimator.h +175 -0
  55. data/vendor/Eigen/src/Core/CoreEvaluators.h +1741 -0
  56. data/vendor/Eigen/src/Core/CoreIterators.h +132 -0
  57. data/vendor/Eigen/src/Core/CwiseBinaryOp.h +183 -0
  58. data/vendor/Eigen/src/Core/CwiseNullaryOp.h +1001 -0
  59. data/vendor/Eigen/src/Core/CwiseTernaryOp.h +197 -0
  60. data/vendor/Eigen/src/Core/CwiseUnaryOp.h +103 -0
  61. data/vendor/Eigen/src/Core/CwiseUnaryView.h +132 -0
  62. data/vendor/Eigen/src/Core/DenseBase.h +701 -0
  63. data/vendor/Eigen/src/Core/DenseCoeffsBase.h +685 -0
  64. data/vendor/Eigen/src/Core/DenseStorage.h +652 -0
  65. data/vendor/Eigen/src/Core/Diagonal.h +258 -0
  66. data/vendor/Eigen/src/Core/DiagonalMatrix.h +391 -0
  67. data/vendor/Eigen/src/Core/DiagonalProduct.h +28 -0
  68. data/vendor/Eigen/src/Core/Dot.h +318 -0
  69. data/vendor/Eigen/src/Core/EigenBase.h +160 -0
  70. data/vendor/Eigen/src/Core/ForceAlignedAccess.h +150 -0
  71. data/vendor/Eigen/src/Core/Fuzzy.h +155 -0
  72. data/vendor/Eigen/src/Core/GeneralProduct.h +465 -0
  73. data/vendor/Eigen/src/Core/GenericPacketMath.h +1040 -0
  74. data/vendor/Eigen/src/Core/GlobalFunctions.h +194 -0
  75. data/vendor/Eigen/src/Core/IO.h +258 -0
  76. data/vendor/Eigen/src/Core/IndexedView.h +237 -0
  77. data/vendor/Eigen/src/Core/Inverse.h +117 -0
  78. data/vendor/Eigen/src/Core/Map.h +171 -0
  79. data/vendor/Eigen/src/Core/MapBase.h +310 -0
  80. data/vendor/Eigen/src/Core/MathFunctions.h +2057 -0
  81. data/vendor/Eigen/src/Core/MathFunctionsImpl.h +200 -0
  82. data/vendor/Eigen/src/Core/Matrix.h +565 -0
  83. data/vendor/Eigen/src/Core/MatrixBase.h +547 -0
  84. data/vendor/Eigen/src/Core/NestByValue.h +85 -0
  85. data/vendor/Eigen/src/Core/NoAlias.h +109 -0
  86. data/vendor/Eigen/src/Core/NumTraits.h +335 -0
  87. data/vendor/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
  88. data/vendor/Eigen/src/Core/PermutationMatrix.h +605 -0
  89. data/vendor/Eigen/src/Core/PlainObjectBase.h +1128 -0
  90. data/vendor/Eigen/src/Core/Product.h +191 -0
  91. data/vendor/Eigen/src/Core/ProductEvaluators.h +1179 -0
  92. data/vendor/Eigen/src/Core/Random.h +218 -0
  93. data/vendor/Eigen/src/Core/Redux.h +515 -0
  94. data/vendor/Eigen/src/Core/Ref.h +381 -0
  95. data/vendor/Eigen/src/Core/Replicate.h +142 -0
  96. data/vendor/Eigen/src/Core/Reshaped.h +454 -0
  97. data/vendor/Eigen/src/Core/ReturnByValue.h +119 -0
  98. data/vendor/Eigen/src/Core/Reverse.h +217 -0
  99. data/vendor/Eigen/src/Core/Select.h +164 -0
  100. data/vendor/Eigen/src/Core/SelfAdjointView.h +365 -0
  101. data/vendor/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
  102. data/vendor/Eigen/src/Core/Solve.h +188 -0
  103. data/vendor/Eigen/src/Core/SolveTriangular.h +235 -0
  104. data/vendor/Eigen/src/Core/SolverBase.h +168 -0
  105. data/vendor/Eigen/src/Core/StableNorm.h +251 -0
  106. data/vendor/Eigen/src/Core/StlIterators.h +463 -0
  107. data/vendor/Eigen/src/Core/Stride.h +116 -0
  108. data/vendor/Eigen/src/Core/Swap.h +68 -0
  109. data/vendor/Eigen/src/Core/Transpose.h +464 -0
  110. data/vendor/Eigen/src/Core/Transpositions.h +386 -0
  111. data/vendor/Eigen/src/Core/TriangularMatrix.h +1001 -0
  112. data/vendor/Eigen/src/Core/VectorBlock.h +96 -0
  113. data/vendor/Eigen/src/Core/VectorwiseOp.h +784 -0
  114. data/vendor/Eigen/src/Core/Visitor.h +381 -0
  115. data/vendor/Eigen/src/Core/arch/AVX/Complex.h +372 -0
  116. data/vendor/Eigen/src/Core/arch/AVX/MathFunctions.h +228 -0
  117. data/vendor/Eigen/src/Core/arch/AVX/PacketMath.h +1574 -0
  118. data/vendor/Eigen/src/Core/arch/AVX/TypeCasting.h +115 -0
  119. data/vendor/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
  120. data/vendor/Eigen/src/Core/arch/AVX512/MathFunctions.h +362 -0
  121. data/vendor/Eigen/src/Core/arch/AVX512/PacketMath.h +2303 -0
  122. data/vendor/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  123. data/vendor/Eigen/src/Core/arch/AltiVec/Complex.h +417 -0
  124. data/vendor/Eigen/src/Core/arch/AltiVec/MathFunctions.h +90 -0
  125. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
  126. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
  127. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
  128. data/vendor/Eigen/src/Core/arch/AltiVec/PacketMath.h +2711 -0
  129. data/vendor/Eigen/src/Core/arch/CUDA/Complex.h +258 -0
  130. data/vendor/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
  131. data/vendor/Eigen/src/Core/arch/Default/ConjHelper.h +117 -0
  132. data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
  133. data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
  134. data/vendor/Eigen/src/Core/arch/Default/Half.h +942 -0
  135. data/vendor/Eigen/src/Core/arch/Default/Settings.h +49 -0
  136. data/vendor/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  137. data/vendor/Eigen/src/Core/arch/GPU/MathFunctions.h +103 -0
  138. data/vendor/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
  139. data/vendor/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
  140. data/vendor/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  141. data/vendor/Eigen/src/Core/arch/MSA/Complex.h +648 -0
  142. data/vendor/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  143. data/vendor/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  144. data/vendor/Eigen/src/Core/arch/NEON/Complex.h +584 -0
  145. data/vendor/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  146. data/vendor/Eigen/src/Core/arch/NEON/MathFunctions.h +75 -0
  147. data/vendor/Eigen/src/Core/arch/NEON/PacketMath.h +4587 -0
  148. data/vendor/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
  149. data/vendor/Eigen/src/Core/arch/SSE/Complex.h +351 -0
  150. data/vendor/Eigen/src/Core/arch/SSE/MathFunctions.h +199 -0
  151. data/vendor/Eigen/src/Core/arch/SSE/PacketMath.h +1505 -0
  152. data/vendor/Eigen/src/Core/arch/SSE/TypeCasting.h +142 -0
  153. data/vendor/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  154. data/vendor/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  155. data/vendor/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  156. data/vendor/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  157. data/vendor/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  158. data/vendor/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  159. data/vendor/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  160. data/vendor/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  161. data/vendor/Eigen/src/Core/arch/ZVector/Complex.h +426 -0
  162. data/vendor/Eigen/src/Core/arch/ZVector/MathFunctions.h +233 -0
  163. data/vendor/Eigen/src/Core/arch/ZVector/PacketMath.h +1060 -0
  164. data/vendor/Eigen/src/Core/functors/AssignmentFunctors.h +177 -0
  165. data/vendor/Eigen/src/Core/functors/BinaryFunctors.h +541 -0
  166. data/vendor/Eigen/src/Core/functors/NullaryFunctors.h +189 -0
  167. data/vendor/Eigen/src/Core/functors/StlFunctors.h +166 -0
  168. data/vendor/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
  169. data/vendor/Eigen/src/Core/functors/UnaryFunctors.h +1131 -0
  170. data/vendor/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2645 -0
  171. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix.h +517 -0
  172. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +317 -0
  173. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
  174. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +124 -0
  175. data/vendor/Eigen/src/Core/products/GeneralMatrixVector.h +518 -0
  176. data/vendor/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
  177. data/vendor/Eigen/src/Core/products/Parallelizer.h +180 -0
  178. data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +544 -0
  179. data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +295 -0
  180. data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector.h +262 -0
  181. data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
  182. data/vendor/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
  183. data/vendor/Eigen/src/Core/products/SelfadjointRank2Update.h +94 -0
  184. data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix.h +472 -0
  185. data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +317 -0
  186. data/vendor/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
  187. data/vendor/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
  188. data/vendor/Eigen/src/Core/products/TriangularSolverMatrix.h +337 -0
  189. data/vendor/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +167 -0
  190. data/vendor/Eigen/src/Core/products/TriangularSolverVector.h +148 -0
  191. data/vendor/Eigen/src/Core/util/BlasUtil.h +583 -0
  192. data/vendor/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
  193. data/vendor/Eigen/src/Core/util/Constants.h +563 -0
  194. data/vendor/Eigen/src/Core/util/DisableStupidWarnings.h +106 -0
  195. data/vendor/Eigen/src/Core/util/ForwardDeclarations.h +322 -0
  196. data/vendor/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
  197. data/vendor/Eigen/src/Core/util/IntegralConstant.h +272 -0
  198. data/vendor/Eigen/src/Core/util/MKL_support.h +137 -0
  199. data/vendor/Eigen/src/Core/util/Macros.h +1464 -0
  200. data/vendor/Eigen/src/Core/util/Memory.h +1163 -0
  201. data/vendor/Eigen/src/Core/util/Meta.h +812 -0
  202. data/vendor/Eigen/src/Core/util/NonMPL2.h +3 -0
  203. data/vendor/Eigen/src/Core/util/ReenableStupidWarnings.h +31 -0
  204. data/vendor/Eigen/src/Core/util/ReshapedHelper.h +51 -0
  205. data/vendor/Eigen/src/Core/util/StaticAssert.h +221 -0
  206. data/vendor/Eigen/src/Core/util/SymbolicIndex.h +293 -0
  207. data/vendor/Eigen/src/Core/util/XprHelper.h +856 -0
  208. data/vendor/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
  209. data/vendor/Eigen/src/Eigenvalues/ComplexSchur.h +462 -0
  210. data/vendor/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
  211. data/vendor/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
  212. data/vendor/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
  213. data/vendor/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
  214. data/vendor/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
  215. data/vendor/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
  216. data/vendor/Eigen/src/Eigenvalues/RealQZ.h +657 -0
  217. data/vendor/Eigen/src/Eigenvalues/RealSchur.h +558 -0
  218. data/vendor/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
  219. data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +904 -0
  220. data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
  221. data/vendor/Eigen/src/Eigenvalues/Tridiagonalization.h +561 -0
  222. data/vendor/Eigen/src/Geometry/AlignedBox.h +486 -0
  223. data/vendor/Eigen/src/Geometry/AngleAxis.h +247 -0
  224. data/vendor/Eigen/src/Geometry/EulerAngles.h +114 -0
  225. data/vendor/Eigen/src/Geometry/Homogeneous.h +501 -0
  226. data/vendor/Eigen/src/Geometry/Hyperplane.h +282 -0
  227. data/vendor/Eigen/src/Geometry/OrthoMethods.h +235 -0
  228. data/vendor/Eigen/src/Geometry/ParametrizedLine.h +232 -0
  229. data/vendor/Eigen/src/Geometry/Quaternion.h +870 -0
  230. data/vendor/Eigen/src/Geometry/Rotation2D.h +199 -0
  231. data/vendor/Eigen/src/Geometry/RotationBase.h +206 -0
  232. data/vendor/Eigen/src/Geometry/Scaling.h +188 -0
  233. data/vendor/Eigen/src/Geometry/Transform.h +1563 -0
  234. data/vendor/Eigen/src/Geometry/Translation.h +202 -0
  235. data/vendor/Eigen/src/Geometry/Umeyama.h +166 -0
  236. data/vendor/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
  237. data/vendor/Eigen/src/Householder/BlockHouseholder.h +110 -0
  238. data/vendor/Eigen/src/Householder/Householder.h +176 -0
  239. data/vendor/Eigen/src/Householder/HouseholderSequence.h +545 -0
  240. data/vendor/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
  241. data/vendor/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +212 -0
  242. data/vendor/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +229 -0
  243. data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +394 -0
  244. data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +453 -0
  245. data/vendor/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +444 -0
  246. data/vendor/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +198 -0
  247. data/vendor/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +117 -0
  248. data/vendor/Eigen/src/Jacobi/Jacobi.h +483 -0
  249. data/vendor/Eigen/src/KLUSupport/KLUSupport.h +358 -0
  250. data/vendor/Eigen/src/LU/Determinant.h +117 -0
  251. data/vendor/Eigen/src/LU/FullPivLU.h +877 -0
  252. data/vendor/Eigen/src/LU/InverseImpl.h +432 -0
  253. data/vendor/Eigen/src/LU/PartialPivLU.h +624 -0
  254. data/vendor/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
  255. data/vendor/Eigen/src/LU/arch/InverseSize4.h +351 -0
  256. data/vendor/Eigen/src/MetisSupport/MetisSupport.h +137 -0
  257. data/vendor/Eigen/src/OrderingMethods/Amd.h +435 -0
  258. data/vendor/Eigen/src/OrderingMethods/Eigen_Colamd.h +1863 -0
  259. data/vendor/Eigen/src/OrderingMethods/Ordering.h +153 -0
  260. data/vendor/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
  261. data/vendor/Eigen/src/PardisoSupport/PardisoSupport.h +545 -0
  262. data/vendor/Eigen/src/QR/ColPivHouseholderQR.h +674 -0
  263. data/vendor/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
  264. data/vendor/Eigen/src/QR/CompleteOrthogonalDecomposition.h +635 -0
  265. data/vendor/Eigen/src/QR/FullPivHouseholderQR.h +713 -0
  266. data/vendor/Eigen/src/QR/HouseholderQR.h +434 -0
  267. data/vendor/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
  268. data/vendor/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +335 -0
  269. data/vendor/Eigen/src/SVD/BDCSVD.h +1366 -0
  270. data/vendor/Eigen/src/SVD/JacobiSVD.h +812 -0
  271. data/vendor/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
  272. data/vendor/Eigen/src/SVD/SVDBase.h +376 -0
  273. data/vendor/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
  274. data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky.h +697 -0
  275. data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +174 -0
  276. data/vendor/Eigen/src/SparseCore/AmbiVector.h +378 -0
  277. data/vendor/Eigen/src/SparseCore/CompressedStorage.h +274 -0
  278. data/vendor/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
  279. data/vendor/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
  280. data/vendor/Eigen/src/SparseCore/SparseAssign.h +270 -0
  281. data/vendor/Eigen/src/SparseCore/SparseBlock.h +571 -0
  282. data/vendor/Eigen/src/SparseCore/SparseColEtree.h +206 -0
  283. data/vendor/Eigen/src/SparseCore/SparseCompressedBase.h +370 -0
  284. data/vendor/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +722 -0
  285. data/vendor/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +150 -0
  286. data/vendor/Eigen/src/SparseCore/SparseDenseProduct.h +342 -0
  287. data/vendor/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
  288. data/vendor/Eigen/src/SparseCore/SparseDot.h +98 -0
  289. data/vendor/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
  290. data/vendor/Eigen/src/SparseCore/SparseMap.h +305 -0
  291. data/vendor/Eigen/src/SparseCore/SparseMatrix.h +1518 -0
  292. data/vendor/Eigen/src/SparseCore/SparseMatrixBase.h +398 -0
  293. data/vendor/Eigen/src/SparseCore/SparsePermutation.h +178 -0
  294. data/vendor/Eigen/src/SparseCore/SparseProduct.h +181 -0
  295. data/vendor/Eigen/src/SparseCore/SparseRedux.h +49 -0
  296. data/vendor/Eigen/src/SparseCore/SparseRef.h +397 -0
  297. data/vendor/Eigen/src/SparseCore/SparseSelfAdjointView.h +659 -0
  298. data/vendor/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
  299. data/vendor/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
  300. data/vendor/Eigen/src/SparseCore/SparseTranspose.h +92 -0
  301. data/vendor/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
  302. data/vendor/Eigen/src/SparseCore/SparseUtil.h +186 -0
  303. data/vendor/Eigen/src/SparseCore/SparseVector.h +478 -0
  304. data/vendor/Eigen/src/SparseCore/SparseView.h +254 -0
  305. data/vendor/Eigen/src/SparseCore/TriangularSolver.h +315 -0
  306. data/vendor/Eigen/src/SparseLU/SparseLU.h +923 -0
  307. data/vendor/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
  308. data/vendor/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
  309. data/vendor/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
  310. data/vendor/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +375 -0
  311. data/vendor/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
  312. data/vendor/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
  313. data/vendor/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
  314. data/vendor/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
  315. data/vendor/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
  316. data/vendor/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
  317. data/vendor/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
  318. data/vendor/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
  319. data/vendor/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
  320. data/vendor/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
  321. data/vendor/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
  322. data/vendor/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
  323. data/vendor/Eigen/src/SparseQR/SparseQR.h +758 -0
  324. data/vendor/Eigen/src/StlSupport/StdDeque.h +116 -0
  325. data/vendor/Eigen/src/StlSupport/StdList.h +106 -0
  326. data/vendor/Eigen/src/StlSupport/StdVector.h +131 -0
  327. data/vendor/Eigen/src/StlSupport/details.h +84 -0
  328. data/vendor/Eigen/src/SuperLUSupport/SuperLUSupport.h +1025 -0
  329. data/vendor/Eigen/src/UmfPackSupport/UmfPackSupport.h +642 -0
  330. data/vendor/Eigen/src/misc/Image.h +82 -0
  331. data/vendor/Eigen/src/misc/Kernel.h +79 -0
  332. data/vendor/Eigen/src/misc/RealSvd2x2.h +55 -0
  333. data/vendor/Eigen/src/misc/blas.h +440 -0
  334. data/vendor/Eigen/src/misc/lapack.h +152 -0
  335. data/vendor/Eigen/src/misc/lapacke.h +16292 -0
  336. data/vendor/Eigen/src/misc/lapacke_mangling.h +17 -0
  337. data/vendor/Eigen/src/plugins/ArrayCwiseBinaryOps.h +358 -0
  338. data/vendor/Eigen/src/plugins/ArrayCwiseUnaryOps.h +696 -0
  339. data/vendor/Eigen/src/plugins/BlockMethods.h +1442 -0
  340. data/vendor/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
  341. data/vendor/Eigen/src/plugins/CommonCwiseUnaryOps.h +177 -0
  342. data/vendor/Eigen/src/plugins/IndexedViewMethods.h +262 -0
  343. data/vendor/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
  344. data/vendor/Eigen/src/plugins/MatrixCwiseUnaryOps.h +95 -0
  345. data/vendor/Eigen/src/plugins/ReshapedMethods.h +149 -0
  346. data/vendor/aarand/aarand.hpp +114 -0
  347. data/vendor/annoy/annoylib.h +1495 -0
  348. data/vendor/annoy/kissrandom.h +120 -0
  349. data/vendor/annoy/mman.h +242 -0
  350. data/vendor/hnswlib/bruteforce.h +152 -0
  351. data/vendor/hnswlib/hnswalg.h +1192 -0
  352. data/vendor/hnswlib/hnswlib.h +108 -0
  353. data/vendor/hnswlib/space_ip.h +282 -0
  354. data/vendor/hnswlib/space_l2.h +281 -0
  355. data/vendor/hnswlib/visited_list_pool.h +79 -0
  356. data/vendor/irlba/irlba.hpp +575 -0
  357. data/vendor/irlba/lanczos.hpp +212 -0
  358. data/vendor/irlba/parallel.hpp +474 -0
  359. data/vendor/irlba/utils.hpp +224 -0
  360. data/vendor/irlba/wrappers.hpp +228 -0
  361. data/vendor/kmeans/Base.hpp +75 -0
  362. data/vendor/kmeans/Details.hpp +79 -0
  363. data/vendor/kmeans/HartiganWong.hpp +492 -0
  364. data/vendor/kmeans/InitializeKmeansPP.hpp +144 -0
  365. data/vendor/kmeans/InitializeNone.hpp +44 -0
  366. data/vendor/kmeans/InitializePCAPartition.hpp +309 -0
  367. data/vendor/kmeans/InitializeRandom.hpp +91 -0
  368. data/vendor/kmeans/Kmeans.hpp +161 -0
  369. data/vendor/kmeans/Lloyd.hpp +134 -0
  370. data/vendor/kmeans/MiniBatch.hpp +269 -0
  371. data/vendor/kmeans/QuickSearch.hpp +179 -0
  372. data/vendor/kmeans/compute_centroids.hpp +32 -0
  373. data/vendor/kmeans/compute_wcss.hpp +27 -0
  374. data/vendor/kmeans/is_edge_case.hpp +42 -0
  375. data/vendor/kmeans/random.hpp +55 -0
  376. data/vendor/knncolle/Annoy/Annoy.hpp +193 -0
  377. data/vendor/knncolle/BruteForce/BruteForce.hpp +120 -0
  378. data/vendor/knncolle/Hnsw/Hnsw.hpp +225 -0
  379. data/vendor/knncolle/Kmknn/Kmknn.hpp +286 -0
  380. data/vendor/knncolle/VpTree/VpTree.hpp +256 -0
  381. data/vendor/knncolle/knncolle.hpp +34 -0
  382. data/vendor/knncolle/utils/Base.hpp +100 -0
  383. data/vendor/knncolle/utils/NeighborQueue.hpp +94 -0
  384. data/vendor/knncolle/utils/distances.hpp +98 -0
  385. data/vendor/knncolle/utils/find_nearest_neighbors.hpp +112 -0
  386. data/vendor/powerit/PowerIterations.hpp +157 -0
  387. data/vendor/umappp/NeighborList.hpp +37 -0
  388. data/vendor/umappp/Umap.hpp +662 -0
  389. data/vendor/umappp/combine_neighbor_sets.hpp +95 -0
  390. data/vendor/umappp/find_ab.hpp +157 -0
  391. data/vendor/umappp/neighbor_similarities.hpp +136 -0
  392. data/vendor/umappp/optimize_layout.hpp +285 -0
  393. data/vendor/umappp/spectral_init.hpp +181 -0
  394. data/vendor/umappp/umappp.hpp +13 -0
  395. metadata +465 -0
@@ -0,0 +1,1741 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
5
+ // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
7
+ //
8
+ // This Source Code Form is subject to the terms of the Mozilla
9
+ // Public License v. 2.0. If a copy of the MPL was not distributed
10
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
+
12
+
13
+ #ifndef EIGEN_COREEVALUATORS_H
14
+ #define EIGEN_COREEVALUATORS_H
15
+
16
+ namespace Eigen {
17
+
18
+ namespace internal {
19
+
20
+ // This class returns the evaluator kind from the expression storage kind.
21
+ // Default assumes index based accessors
22
+ template<typename StorageKind>
23
+ struct storage_kind_to_evaluator_kind {
24
+ typedef IndexBased Kind;
25
+ };
26
+
27
+ // This class returns the evaluator shape from the expression storage kind.
28
+ // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.
29
+ template<typename StorageKind> struct storage_kind_to_shape;
30
+
31
+ template<> struct storage_kind_to_shape<Dense> { typedef DenseShape Shape; };
32
+ template<> struct storage_kind_to_shape<SolverStorage> { typedef SolverShape Shape; };
33
+ template<> struct storage_kind_to_shape<PermutationStorage> { typedef PermutationShape Shape; };
34
+ template<> struct storage_kind_to_shape<TranspositionsStorage> { typedef TranspositionsShape Shape; };
35
+
36
+ // Evaluators have to be specialized with respect to various criteria such as:
37
+ // - storage/structure/shape
38
+ // - scalar type
39
+ // - etc.
40
+ // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.
41
+ // We currently distinguish the following kind of evaluators:
42
+ // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate)
43
+ // - binary_evaluator for expression taking two arguments (CwiseBinaryOp)
44
+ // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp)
45
+ // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching.
46
+ // - mapbase_evaluator for Map, Block, Ref
47
+ // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator)
48
+
49
+ template< typename T,
50
+ typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind,
51
+ typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind,
52
+ typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind,
53
+ typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar,
54
+ typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar,
55
+ typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator;
56
+
57
+ template< typename T,
58
+ typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
59
+ typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
60
+ typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
61
+ typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator;
62
+
63
+ template< typename T,
64
+ typename Kind = typename evaluator_traits<typename T::NestedExpression>::Kind,
65
+ typename Scalar = typename T::Scalar> struct unary_evaluator;
66
+
67
+ // evaluator_traits<T> contains traits for evaluator<T>
68
+
69
+ template<typename T>
70
+ struct evaluator_traits_base
71
+ {
72
+ // by default, get evaluator kind and shape from storage
73
+ typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind;
74
+ typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape;
75
+ };
76
+
77
+ // Default evaluator traits
78
+ template<typename T>
79
+ struct evaluator_traits : public evaluator_traits_base<T>
80
+ {
81
+ };
82
+
83
+ template<typename T, typename Shape = typename evaluator_traits<T>::Shape >
84
+ struct evaluator_assume_aliasing {
85
+ static const bool value = false;
86
+ };
87
+
88
+ // By default, we assume a unary expression:
89
+ template<typename T>
90
+ struct evaluator : public unary_evaluator<T>
91
+ {
92
+ typedef unary_evaluator<T> Base;
93
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
94
+ explicit evaluator(const T& xpr) : Base(xpr) {}
95
+ };
96
+
97
+
98
+ // TODO: Think about const-correctness
99
+ template<typename T>
100
+ struct evaluator<const T>
101
+ : evaluator<T>
102
+ {
103
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
104
+ explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
105
+ };
106
+
107
+ // ---------- base class for all evaluators ----------
108
+
109
+ template<typename ExpressionType>
110
+ struct evaluator_base
111
+ {
112
+ // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
113
+ typedef traits<ExpressionType> ExpressionTraits;
114
+
115
+ enum {
116
+ Alignment = 0
117
+ };
118
+ // noncopyable:
119
+ // Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization)
120
+ // and make complex evaluator much larger than then should do.
121
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator_base() {}
122
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~evaluator_base() {}
123
+ private:
124
+ EIGEN_DEVICE_FUNC evaluator_base(const evaluator_base&);
125
+ EIGEN_DEVICE_FUNC const evaluator_base& operator=(const evaluator_base&);
126
+ };
127
+
128
+ // -------------------- Matrix and Array --------------------
129
+ //
130
+ // evaluator<PlainObjectBase> is a common base class for the
131
+ // Matrix and Array evaluators.
132
+ // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,
133
+ // so no need for more sophisticated dispatching.
134
+
135
+ // this helper permits to completely eliminate m_outerStride if it is known at compiletime.
136
+ template<typename Scalar,int OuterStride> class plainobjectbase_evaluator_data {
137
+ public:
138
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
139
+ plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
140
+ {
141
+ #ifndef EIGEN_INTERNAL_DEBUGGING
142
+ EIGEN_UNUSED_VARIABLE(outerStride);
143
+ #endif
144
+ eigen_internal_assert(outerStride==OuterStride);
145
+ }
146
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
147
+ Index outerStride() const EIGEN_NOEXCEPT { return OuterStride; }
148
+ const Scalar *data;
149
+ };
150
+
151
+ template<typename Scalar> class plainobjectbase_evaluator_data<Scalar,Dynamic> {
152
+ public:
153
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
154
+ plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
155
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
156
+ Index outerStride() const { return m_outerStride; }
157
+ const Scalar *data;
158
+ protected:
159
+ Index m_outerStride;
160
+ };
161
+
162
+ template<typename Derived>
163
+ struct evaluator<PlainObjectBase<Derived> >
164
+ : evaluator_base<Derived>
165
+ {
166
+ typedef PlainObjectBase<Derived> PlainObjectType;
167
+ typedef typename PlainObjectType::Scalar Scalar;
168
+ typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
169
+
170
+ enum {
171
+ IsRowMajor = PlainObjectType::IsRowMajor,
172
+ IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
173
+ RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
174
+ ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,
175
+
176
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
177
+ Flags = traits<Derived>::EvaluatorFlags,
178
+ Alignment = traits<Derived>::Alignment
179
+ };
180
+ enum {
181
+ // We do not need to know the outer stride for vectors
182
+ OuterStrideAtCompileTime = IsVectorAtCompileTime ? 0
183
+ : int(IsRowMajor) ? ColsAtCompileTime
184
+ : RowsAtCompileTime
185
+ };
186
+
187
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
188
+ evaluator()
189
+ : m_d(0,OuterStrideAtCompileTime)
190
+ {
191
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
192
+ }
193
+
194
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
195
+ explicit evaluator(const PlainObjectType& m)
196
+ : m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride())
197
+ {
198
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
199
+ }
200
+
201
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
202
+ CoeffReturnType coeff(Index row, Index col) const
203
+ {
204
+ if (IsRowMajor)
205
+ return m_d.data[row * m_d.outerStride() + col];
206
+ else
207
+ return m_d.data[row + col * m_d.outerStride()];
208
+ }
209
+
210
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
211
+ CoeffReturnType coeff(Index index) const
212
+ {
213
+ return m_d.data[index];
214
+ }
215
+
216
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
217
+ Scalar& coeffRef(Index row, Index col)
218
+ {
219
+ if (IsRowMajor)
220
+ return const_cast<Scalar*>(m_d.data)[row * m_d.outerStride() + col];
221
+ else
222
+ return const_cast<Scalar*>(m_d.data)[row + col * m_d.outerStride()];
223
+ }
224
+
225
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
226
+ Scalar& coeffRef(Index index)
227
+ {
228
+ return const_cast<Scalar*>(m_d.data)[index];
229
+ }
230
+
231
+ template<int LoadMode, typename PacketType>
232
+ EIGEN_STRONG_INLINE
233
+ PacketType packet(Index row, Index col) const
234
+ {
235
+ if (IsRowMajor)
236
+ return ploadt<PacketType, LoadMode>(m_d.data + row * m_d.outerStride() + col);
237
+ else
238
+ return ploadt<PacketType, LoadMode>(m_d.data + row + col * m_d.outerStride());
239
+ }
240
+
241
+ template<int LoadMode, typename PacketType>
242
+ EIGEN_STRONG_INLINE
243
+ PacketType packet(Index index) const
244
+ {
245
+ return ploadt<PacketType, LoadMode>(m_d.data + index);
246
+ }
247
+
248
+ template<int StoreMode,typename PacketType>
249
+ EIGEN_STRONG_INLINE
250
+ void writePacket(Index row, Index col, const PacketType& x)
251
+ {
252
+ if (IsRowMajor)
253
+ return pstoret<Scalar, PacketType, StoreMode>
254
+ (const_cast<Scalar*>(m_d.data) + row * m_d.outerStride() + col, x);
255
+ else
256
+ return pstoret<Scalar, PacketType, StoreMode>
257
+ (const_cast<Scalar*>(m_d.data) + row + col * m_d.outerStride(), x);
258
+ }
259
+
260
+ template<int StoreMode, typename PacketType>
261
+ EIGEN_STRONG_INLINE
262
+ void writePacket(Index index, const PacketType& x)
263
+ {
264
+ return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_d.data) + index, x);
265
+ }
266
+
267
+ protected:
268
+
269
+ plainobjectbase_evaluator_data<Scalar,OuterStrideAtCompileTime> m_d;
270
+ };
271
+
272
+ template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
273
+ struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
274
+ : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
275
+ {
276
+ typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
277
+
278
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
279
+ evaluator() {}
280
+
281
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
282
+ explicit evaluator(const XprType& m)
283
+ : evaluator<PlainObjectBase<XprType> >(m)
284
+ { }
285
+ };
286
+
287
+ template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
288
+ struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
289
+ : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
290
+ {
291
+ typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
292
+
293
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
294
+ evaluator() {}
295
+
296
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
297
+ explicit evaluator(const XprType& m)
298
+ : evaluator<PlainObjectBase<XprType> >(m)
299
+ { }
300
+ };
301
+
302
+ // -------------------- Transpose --------------------
303
+
304
+ template<typename ArgType>
305
+ struct unary_evaluator<Transpose<ArgType>, IndexBased>
306
+ : evaluator_base<Transpose<ArgType> >
307
+ {
308
+ typedef Transpose<ArgType> XprType;
309
+
310
+ enum {
311
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
312
+ Flags = evaluator<ArgType>::Flags ^ RowMajorBit,
313
+ Alignment = evaluator<ArgType>::Alignment
314
+ };
315
+
316
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
317
+ explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
318
+
319
+ typedef typename XprType::Scalar Scalar;
320
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
321
+
322
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
323
+ CoeffReturnType coeff(Index row, Index col) const
324
+ {
325
+ return m_argImpl.coeff(col, row);
326
+ }
327
+
328
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
329
+ CoeffReturnType coeff(Index index) const
330
+ {
331
+ return m_argImpl.coeff(index);
332
+ }
333
+
334
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
335
+ Scalar& coeffRef(Index row, Index col)
336
+ {
337
+ return m_argImpl.coeffRef(col, row);
338
+ }
339
+
340
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
341
+ typename XprType::Scalar& coeffRef(Index index)
342
+ {
343
+ return m_argImpl.coeffRef(index);
344
+ }
345
+
346
+ template<int LoadMode, typename PacketType>
347
+ EIGEN_STRONG_INLINE
348
+ PacketType packet(Index row, Index col) const
349
+ {
350
+ return m_argImpl.template packet<LoadMode,PacketType>(col, row);
351
+ }
352
+
353
+ template<int LoadMode, typename PacketType>
354
+ EIGEN_STRONG_INLINE
355
+ PacketType packet(Index index) const
356
+ {
357
+ return m_argImpl.template packet<LoadMode,PacketType>(index);
358
+ }
359
+
360
+ template<int StoreMode, typename PacketType>
361
+ EIGEN_STRONG_INLINE
362
+ void writePacket(Index row, Index col, const PacketType& x)
363
+ {
364
+ m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x);
365
+ }
366
+
367
+ template<int StoreMode, typename PacketType>
368
+ EIGEN_STRONG_INLINE
369
+ void writePacket(Index index, const PacketType& x)
370
+ {
371
+ m_argImpl.template writePacket<StoreMode,PacketType>(index, x);
372
+ }
373
+
374
+ protected:
375
+ evaluator<ArgType> m_argImpl;
376
+ };
377
+
378
+ // -------------------- CwiseNullaryOp --------------------
379
+ // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.
380
+ // Likewise, there is not need to more sophisticated dispatching here.
381
+
382
+ template<typename Scalar,typename NullaryOp,
383
+ bool has_nullary = has_nullary_operator<NullaryOp>::value,
384
+ bool has_unary = has_unary_operator<NullaryOp>::value,
385
+ bool has_binary = has_binary_operator<NullaryOp>::value>
386
+ struct nullary_wrapper
387
+ {
388
+ template <typename IndexType>
389
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); }
390
+ template <typename IndexType>
391
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
392
+
393
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); }
394
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
395
+ };
396
+
397
+ template<typename Scalar,typename NullaryOp>
398
+ struct nullary_wrapper<Scalar,NullaryOp,true,false,false>
399
+ {
400
+ template <typename IndexType>
401
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); }
402
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); }
403
+ };
404
+
405
+ template<typename Scalar,typename NullaryOp>
406
+ struct nullary_wrapper<Scalar,NullaryOp,false,false,true>
407
+ {
408
+ template <typename IndexType>
409
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); }
410
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); }
411
+ };
412
+
413
+ // We need the following specialization for vector-only functors assigned to a runtime vector,
414
+ // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.
415
+ // In this case, i==0 and j is used for the actual iteration.
416
+ template<typename Scalar,typename NullaryOp>
417
+ struct nullary_wrapper<Scalar,NullaryOp,false,true,false>
418
+ {
419
+ template <typename IndexType>
420
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
421
+ eigen_assert(i==0 || j==0);
422
+ return op(i+j);
423
+ }
424
+ template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
425
+ eigen_assert(i==0 || j==0);
426
+ return op.template packetOp<T>(i+j);
427
+ }
428
+
429
+ template <typename IndexType>
430
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
431
+ template <typename T, typename IndexType>
432
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
433
+ };
434
+
435
+ template<typename Scalar,typename NullaryOp>
436
+ struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {};
437
+
438
+ #if 0 && EIGEN_COMP_MSVC>0
439
+ // Disable this ugly workaround. This is now handled in traits<Ref>::match,
440
+ // but this piece of code might still become handly if some other weird compilation
441
+ // erros pop up again.
442
+
443
+ // MSVC exhibits a weird compilation error when
444
+ // compiling:
445
+ // Eigen::MatrixXf A = MatrixXf::Random(3,3);
446
+ // Ref<const MatrixXf> R = 2.f*A;
447
+ // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.
448
+ // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>
449
+ // and at that time has_*ary_operator<T> returns true regardless of T.
450
+ // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.
451
+ // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),
452
+ // and packet() are really instantiated as implemented below:
453
+
454
+ // This is a simple wrapper around Index to enforce the re-instantiation of
455
+ // has_*ary_operator when needed.
456
+ template<typename T> struct nullary_wrapper_workaround_msvc {
457
+ nullary_wrapper_workaround_msvc(const T&);
458
+ operator T()const;
459
+ };
460
+
461
+ template<typename Scalar,typename NullaryOp>
462
+ struct nullary_wrapper<Scalar,NullaryOp,true,true,true>
463
+ {
464
+ template <typename IndexType>
465
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
466
+ return nullary_wrapper<Scalar,NullaryOp,
467
+ has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
468
+ has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
469
+ has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j);
470
+ }
471
+ template <typename IndexType>
472
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {
473
+ return nullary_wrapper<Scalar,NullaryOp,
474
+ has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
475
+ has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
476
+ has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i);
477
+ }
478
+
479
+ template <typename T, typename IndexType>
480
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
481
+ return nullary_wrapper<Scalar,NullaryOp,
482
+ has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
483
+ has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
484
+ has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);
485
+ }
486
+ template <typename T, typename IndexType>
487
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {
488
+ return nullary_wrapper<Scalar,NullaryOp,
489
+ has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
490
+ has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
491
+ has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i);
492
+ }
493
+ };
494
+ #endif // MSVC workaround
495
+
496
+ template<typename NullaryOp, typename PlainObjectType>
497
+ struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
498
+ : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> >
499
+ {
500
+ typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType;
501
+ typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned;
502
+
503
+ enum {
504
+ CoeffReadCost = internal::functor_traits<NullaryOp>::Cost,
505
+
506
+ Flags = (evaluator<PlainObjectTypeCleaned>::Flags
507
+ & ( HereditaryBits
508
+ | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
509
+ | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
510
+ | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
511
+ Alignment = AlignedMax
512
+ };
513
+
514
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n)
515
+ : m_functor(n.functor()), m_wrapper()
516
+ {
517
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
518
+ }
519
+
520
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
521
+
522
+ template <typename IndexType>
523
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
524
+ CoeffReturnType coeff(IndexType row, IndexType col) const
525
+ {
526
+ return m_wrapper(m_functor, row, col);
527
+ }
528
+
529
+ template <typename IndexType>
530
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
531
+ CoeffReturnType coeff(IndexType index) const
532
+ {
533
+ return m_wrapper(m_functor,index);
534
+ }
535
+
536
+ template<int LoadMode, typename PacketType, typename IndexType>
537
+ EIGEN_STRONG_INLINE
538
+ PacketType packet(IndexType row, IndexType col) const
539
+ {
540
+ return m_wrapper.template packetOp<PacketType>(m_functor, row, col);
541
+ }
542
+
543
+ template<int LoadMode, typename PacketType, typename IndexType>
544
+ EIGEN_STRONG_INLINE
545
+ PacketType packet(IndexType index) const
546
+ {
547
+ return m_wrapper.template packetOp<PacketType>(m_functor, index);
548
+ }
549
+
550
+ protected:
551
+ const NullaryOp m_functor;
552
+ const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper;
553
+ };
554
+
555
+ // -------------------- CwiseUnaryOp --------------------
556
+
557
+ template<typename UnaryOp, typename ArgType>
558
+ struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
559
+ : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >
560
+ {
561
+ typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
562
+
563
+ enum {
564
+ CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
565
+
566
+ Flags = evaluator<ArgType>::Flags
567
+ & (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
568
+ Alignment = evaluator<ArgType>::Alignment
569
+ };
570
+
571
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
572
+ explicit unary_evaluator(const XprType& op) : m_d(op)
573
+ {
574
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
575
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
576
+ }
577
+
578
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
579
+
580
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
581
+ CoeffReturnType coeff(Index row, Index col) const
582
+ {
583
+ return m_d.func()(m_d.argImpl.coeff(row, col));
584
+ }
585
+
586
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
587
+ CoeffReturnType coeff(Index index) const
588
+ {
589
+ return m_d.func()(m_d.argImpl.coeff(index));
590
+ }
591
+
592
+ template<int LoadMode, typename PacketType>
593
+ EIGEN_STRONG_INLINE
594
+ PacketType packet(Index row, Index col) const
595
+ {
596
+ return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(row, col));
597
+ }
598
+
599
+ template<int LoadMode, typename PacketType>
600
+ EIGEN_STRONG_INLINE
601
+ PacketType packet(Index index) const
602
+ {
603
+ return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(index));
604
+ }
605
+
606
+ protected:
607
+
608
+ // this helper permits to completely eliminate the functor if it is empty
609
+ struct Data
610
+ {
611
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
612
+ Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
613
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
614
+ const UnaryOp& func() const { return op; }
615
+ UnaryOp op;
616
+ evaluator<ArgType> argImpl;
617
+ };
618
+
619
+ Data m_d;
620
+ };
621
+
622
+ // -------------------- CwiseTernaryOp --------------------
623
+
624
+ // this is a ternary expression
625
+ template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
626
+ struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
627
+ : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
628
+ {
629
+ typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
630
+ typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base;
631
+
632
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
633
+ };
634
+
635
+ template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
636
+ struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased>
637
+ : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
638
+ {
639
+ typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
640
+
641
+ enum {
642
+ CoeffReadCost = int(evaluator<Arg1>::CoeffReadCost) + int(evaluator<Arg2>::CoeffReadCost) + int(evaluator<Arg3>::CoeffReadCost) + int(functor_traits<TernaryOp>::Cost),
643
+
644
+ Arg1Flags = evaluator<Arg1>::Flags,
645
+ Arg2Flags = evaluator<Arg2>::Flags,
646
+ Arg3Flags = evaluator<Arg3>::Flags,
647
+ SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value,
648
+ StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit),
649
+ Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & (
650
+ HereditaryBits
651
+ | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) &
652
+ ( (StorageOrdersAgree ? LinearAccessBit : 0)
653
+ | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
654
+ )
655
+ )
656
+ ),
657
+ Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),
658
+ Alignment = EIGEN_PLAIN_ENUM_MIN(
659
+ EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment),
660
+ evaluator<Arg3>::Alignment)
661
+ };
662
+
663
+ EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr)
664
+ {
665
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost);
666
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
667
+ }
668
+
669
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
670
+
671
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
672
+ CoeffReturnType coeff(Index row, Index col) const
673
+ {
674
+ return m_d.func()(m_d.arg1Impl.coeff(row, col), m_d.arg2Impl.coeff(row, col), m_d.arg3Impl.coeff(row, col));
675
+ }
676
+
677
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
678
+ CoeffReturnType coeff(Index index) const
679
+ {
680
+ return m_d.func()(m_d.arg1Impl.coeff(index), m_d.arg2Impl.coeff(index), m_d.arg3Impl.coeff(index));
681
+ }
682
+
683
+ template<int LoadMode, typename PacketType>
684
+ EIGEN_STRONG_INLINE
685
+ PacketType packet(Index row, Index col) const
686
+ {
687
+ return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(row, col),
688
+ m_d.arg2Impl.template packet<LoadMode,PacketType>(row, col),
689
+ m_d.arg3Impl.template packet<LoadMode,PacketType>(row, col));
690
+ }
691
+
692
+ template<int LoadMode, typename PacketType>
693
+ EIGEN_STRONG_INLINE
694
+ PacketType packet(Index index) const
695
+ {
696
+ return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(index),
697
+ m_d.arg2Impl.template packet<LoadMode,PacketType>(index),
698
+ m_d.arg3Impl.template packet<LoadMode,PacketType>(index));
699
+ }
700
+
701
+ protected:
702
+ // this helper permits to completely eliminate the functor if it is empty
703
+ struct Data
704
+ {
705
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
706
+ Data(const XprType& xpr) : op(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {}
707
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
708
+ const TernaryOp& func() const { return op; }
709
+ TernaryOp op;
710
+ evaluator<Arg1> arg1Impl;
711
+ evaluator<Arg2> arg2Impl;
712
+ evaluator<Arg3> arg3Impl;
713
+ };
714
+
715
+ Data m_d;
716
+ };
717
+
718
+ // -------------------- CwiseBinaryOp --------------------
719
+
720
+ // this is a binary expression
721
+ template<typename BinaryOp, typename Lhs, typename Rhs>
722
+ struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
723
+ : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
724
+ {
725
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
726
+ typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base;
727
+
728
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
729
+ explicit evaluator(const XprType& xpr) : Base(xpr) {}
730
+ };
731
+
732
+ template<typename BinaryOp, typename Lhs, typename Rhs>
733
+ struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased>
734
+ : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
735
+ {
736
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
737
+
738
+ enum {
739
+ CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
740
+
741
+ LhsFlags = evaluator<Lhs>::Flags,
742
+ RhsFlags = evaluator<Rhs>::Flags,
743
+ SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value,
744
+ StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit),
745
+ Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
746
+ HereditaryBits
747
+ | (int(LhsFlags) & int(RhsFlags) &
748
+ ( (StorageOrdersAgree ? LinearAccessBit : 0)
749
+ | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
750
+ )
751
+ )
752
+ ),
753
+ Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
754
+ Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)
755
+ };
756
+
757
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
758
+ explicit binary_evaluator(const XprType& xpr) : m_d(xpr)
759
+ {
760
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
761
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
762
+ }
763
+
764
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
765
+
766
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
767
+ CoeffReturnType coeff(Index row, Index col) const
768
+ {
769
+ return m_d.func()(m_d.lhsImpl.coeff(row, col), m_d.rhsImpl.coeff(row, col));
770
+ }
771
+
772
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
773
+ CoeffReturnType coeff(Index index) const
774
+ {
775
+ return m_d.func()(m_d.lhsImpl.coeff(index), m_d.rhsImpl.coeff(index));
776
+ }
777
+
778
+ template<int LoadMode, typename PacketType>
779
+ EIGEN_STRONG_INLINE
780
+ PacketType packet(Index row, Index col) const
781
+ {
782
+ return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(row, col),
783
+ m_d.rhsImpl.template packet<LoadMode,PacketType>(row, col));
784
+ }
785
+
786
+ template<int LoadMode, typename PacketType>
787
+ EIGEN_STRONG_INLINE
788
+ PacketType packet(Index index) const
789
+ {
790
+ return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(index),
791
+ m_d.rhsImpl.template packet<LoadMode,PacketType>(index));
792
+ }
793
+
794
+ protected:
795
+
796
+ // this helper permits to completely eliminate the functor if it is empty
797
+ struct Data
798
+ {
799
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
800
+ Data(const XprType& xpr) : op(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {}
801
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
802
+ const BinaryOp& func() const { return op; }
803
+ BinaryOp op;
804
+ evaluator<Lhs> lhsImpl;
805
+ evaluator<Rhs> rhsImpl;
806
+ };
807
+
808
+ Data m_d;
809
+ };
810
+
811
+ // -------------------- CwiseUnaryView --------------------
812
+
813
+ template<typename UnaryOp, typename ArgType>
814
+ struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
815
+ : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> >
816
+ {
817
+ typedef CwiseUnaryView<UnaryOp, ArgType> XprType;
818
+
819
+ enum {
820
+ CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
821
+
822
+ Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),
823
+
824
+ Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...
825
+ };
826
+
827
+ EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_d(op)
828
+ {
829
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
830
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
831
+ }
832
+
833
+ typedef typename XprType::Scalar Scalar;
834
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
835
+
836
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
837
+ CoeffReturnType coeff(Index row, Index col) const
838
+ {
839
+ return m_d.func()(m_d.argImpl.coeff(row, col));
840
+ }
841
+
842
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
843
+ CoeffReturnType coeff(Index index) const
844
+ {
845
+ return m_d.func()(m_d.argImpl.coeff(index));
846
+ }
847
+
848
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
849
+ Scalar& coeffRef(Index row, Index col)
850
+ {
851
+ return m_d.func()(m_d.argImpl.coeffRef(row, col));
852
+ }
853
+
854
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
855
+ Scalar& coeffRef(Index index)
856
+ {
857
+ return m_d.func()(m_d.argImpl.coeffRef(index));
858
+ }
859
+
860
+ protected:
861
+
862
+ // this helper permits to completely eliminate the functor if it is empty
863
+ struct Data
864
+ {
865
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
866
+ Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
867
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
868
+ const UnaryOp& func() const { return op; }
869
+ UnaryOp op;
870
+ evaluator<ArgType> argImpl;
871
+ };
872
+
873
+ Data m_d;
874
+ };
875
+
876
+ // -------------------- Map --------------------
877
+
878
+ // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?
879
+ // but that might complicate template specialization
880
+ template<typename Derived, typename PlainObjectType>
881
+ struct mapbase_evaluator;
882
+
883
+ template<typename Derived, typename PlainObjectType>
884
+ struct mapbase_evaluator : evaluator_base<Derived>
885
+ {
886
+ typedef Derived XprType;
887
+ typedef typename XprType::PointerType PointerType;
888
+ typedef typename XprType::Scalar Scalar;
889
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
890
+
891
+ enum {
892
+ IsRowMajor = XprType::RowsAtCompileTime,
893
+ ColsAtCompileTime = XprType::ColsAtCompileTime,
894
+ CoeffReadCost = NumTraits<Scalar>::ReadCost
895
+ };
896
+
897
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
898
+ explicit mapbase_evaluator(const XprType& map)
899
+ : m_data(const_cast<PointerType>(map.data())),
900
+ m_innerStride(map.innerStride()),
901
+ m_outerStride(map.outerStride())
902
+ {
903
+ EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1),
904
+ PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
905
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
906
+ }
907
+
908
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
909
+ CoeffReturnType coeff(Index row, Index col) const
910
+ {
911
+ return m_data[col * colStride() + row * rowStride()];
912
+ }
913
+
914
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
915
+ CoeffReturnType coeff(Index index) const
916
+ {
917
+ return m_data[index * m_innerStride.value()];
918
+ }
919
+
920
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
921
+ Scalar& coeffRef(Index row, Index col)
922
+ {
923
+ return m_data[col * colStride() + row * rowStride()];
924
+ }
925
+
926
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
927
+ Scalar& coeffRef(Index index)
928
+ {
929
+ return m_data[index * m_innerStride.value()];
930
+ }
931
+
932
+ template<int LoadMode, typename PacketType>
933
+ EIGEN_STRONG_INLINE
934
+ PacketType packet(Index row, Index col) const
935
+ {
936
+ PointerType ptr = m_data + row * rowStride() + col * colStride();
937
+ return internal::ploadt<PacketType, LoadMode>(ptr);
938
+ }
939
+
940
+ template<int LoadMode, typename PacketType>
941
+ EIGEN_STRONG_INLINE
942
+ PacketType packet(Index index) const
943
+ {
944
+ return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value());
945
+ }
946
+
947
+ template<int StoreMode, typename PacketType>
948
+ EIGEN_STRONG_INLINE
949
+ void writePacket(Index row, Index col, const PacketType& x)
950
+ {
951
+ PointerType ptr = m_data + row * rowStride() + col * colStride();
952
+ return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x);
953
+ }
954
+
955
+ template<int StoreMode, typename PacketType>
956
+ EIGEN_STRONG_INLINE
957
+ void writePacket(Index index, const PacketType& x)
958
+ {
959
+ internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
960
+ }
961
+ protected:
962
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
963
+ Index rowStride() const EIGEN_NOEXCEPT {
964
+ return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value();
965
+ }
966
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
967
+ Index colStride() const EIGEN_NOEXCEPT {
968
+ return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value();
969
+ }
970
+
971
+ PointerType m_data;
972
+ const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride;
973
+ const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride;
974
+ };
975
+
976
+ template<typename PlainObjectType, int MapOptions, typename StrideType>
977
+ struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
978
+ : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>
979
+ {
980
+ typedef Map<PlainObjectType, MapOptions, StrideType> XprType;
981
+ typedef typename XprType::Scalar Scalar;
982
+ // TODO: should check for smaller packet types once we can handle multi-sized packet types
983
+ typedef typename packet_traits<Scalar>::type PacketScalar;
984
+
985
+ enum {
986
+ InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
987
+ ? int(PlainObjectType::InnerStrideAtCompileTime)
988
+ : int(StrideType::InnerStrideAtCompileTime),
989
+ OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
990
+ ? int(PlainObjectType::OuterStrideAtCompileTime)
991
+ : int(StrideType::OuterStrideAtCompileTime),
992
+ HasNoInnerStride = InnerStrideAtCompileTime == 1,
993
+ HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
994
+ HasNoStride = HasNoInnerStride && HasNoOuterStride,
995
+ IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
996
+
997
+ PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),
998
+ LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),
999
+ Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),
1000
+
1001
+ Alignment = int(MapOptions)&int(AlignedMask)
1002
+ };
1003
+
1004
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)
1005
+ : mapbase_evaluator<XprType, PlainObjectType>(map)
1006
+ { }
1007
+ };
1008
+
1009
+ // -------------------- Ref --------------------
1010
+
1011
+ template<typename PlainObjectType, int RefOptions, typename StrideType>
1012
+ struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
1013
+ : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>
1014
+ {
1015
+ typedef Ref<PlainObjectType, RefOptions, StrideType> XprType;
1016
+
1017
+ enum {
1018
+ Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags,
1019
+ Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment
1020
+ };
1021
+
1022
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1023
+ explicit evaluator(const XprType& ref)
1024
+ : mapbase_evaluator<XprType, PlainObjectType>(ref)
1025
+ { }
1026
+ };
1027
+
1028
+ // -------------------- Block --------------------
1029
+
1030
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,
1031
+ bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator;
1032
+
1033
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1034
+ struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1035
+ : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>
1036
+ {
1037
+ typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1038
+ typedef typename XprType::Scalar Scalar;
1039
+ // TODO: should check for smaller packet types once we can handle multi-sized packet types
1040
+ typedef typename packet_traits<Scalar>::type PacketScalar;
1041
+
1042
+ enum {
1043
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1044
+
1045
+ RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
1046
+ ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
1047
+ MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
1048
+ MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
1049
+
1050
+ ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,
1051
+ IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1
1052
+ : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
1053
+ : ArgTypeIsRowMajor,
1054
+ HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor),
1055
+ InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
1056
+ InnerStrideAtCompileTime = HasSameStorageOrderAsArgType
1057
+ ? int(inner_stride_at_compile_time<ArgType>::ret)
1058
+ : int(outer_stride_at_compile_time<ArgType>::ret),
1059
+ OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
1060
+ ? int(outer_stride_at_compile_time<ArgType>::ret)
1061
+ : int(inner_stride_at_compile_time<ArgType>::ret),
1062
+ MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0,
1063
+
1064
+ FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
1065
+ FlagsRowMajorBit = XprType::Flags&RowMajorBit,
1066
+ Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
1067
+ DirectAccessBit |
1068
+ MaskPacketAccessBit),
1069
+ Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
1070
+
1071
+ PacketAlignment = unpacket_traits<PacketScalar>::alignment,
1072
+ Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
1073
+ && (OuterStrideAtCompileTime!=0)
1074
+ && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
1075
+ Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
1076
+ };
1077
+ typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
1078
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1079
+ explicit evaluator(const XprType& block) : block_evaluator_type(block)
1080
+ {
1081
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1082
+ }
1083
+ };
1084
+
1085
+ // no direct-access => dispatch to a unary evaluator
1086
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1087
+ struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false>
1088
+ : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1089
+ {
1090
+ typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1091
+
1092
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1093
+ explicit block_evaluator(const XprType& block)
1094
+ : unary_evaluator<XprType>(block)
1095
+ {}
1096
+ };
1097
+
1098
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1099
+ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased>
1100
+ : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1101
+ {
1102
+ typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1103
+
1104
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1105
+ explicit unary_evaluator(const XprType& block)
1106
+ : m_argImpl(block.nestedExpression()),
1107
+ m_startRow(block.startRow()),
1108
+ m_startCol(block.startCol()),
1109
+ m_linear_offset(ForwardLinearAccess?(ArgType::IsRowMajor ? block.startRow()*block.nestedExpression().cols() + block.startCol() : block.startCol()*block.nestedExpression().rows() + block.startRow()):0)
1110
+ { }
1111
+
1112
+ typedef typename XprType::Scalar Scalar;
1113
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1114
+
1115
+ enum {
1116
+ RowsAtCompileTime = XprType::RowsAtCompileTime,
1117
+ ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor)==int(ArgType::IsRowMajor)) && bool(evaluator<ArgType>::Flags&LinearAccessBit)
1118
+ };
1119
+
1120
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1121
+ CoeffReturnType coeff(Index row, Index col) const
1122
+ {
1123
+ return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
1124
+ }
1125
+
1126
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1127
+ CoeffReturnType coeff(Index index) const
1128
+ {
1129
+ return linear_coeff_impl(index, bool_constant<ForwardLinearAccess>());
1130
+ }
1131
+
1132
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1133
+ Scalar& coeffRef(Index row, Index col)
1134
+ {
1135
+ return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
1136
+ }
1137
+
1138
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1139
+ Scalar& coeffRef(Index index)
1140
+ {
1141
+ return linear_coeffRef_impl(index, bool_constant<ForwardLinearAccess>());
1142
+ }
1143
+
1144
+ template<int LoadMode, typename PacketType>
1145
+ EIGEN_STRONG_INLINE
1146
+ PacketType packet(Index row, Index col) const
1147
+ {
1148
+ return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
1149
+ }
1150
+
1151
+ template<int LoadMode, typename PacketType>
1152
+ EIGEN_STRONG_INLINE
1153
+ PacketType packet(Index index) const
1154
+ {
1155
+ if (ForwardLinearAccess)
1156
+ return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index);
1157
+ else
1158
+ return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1159
+ RowsAtCompileTime == 1 ? index : 0);
1160
+ }
1161
+
1162
+ template<int StoreMode, typename PacketType>
1163
+ EIGEN_STRONG_INLINE
1164
+ void writePacket(Index row, Index col, const PacketType& x)
1165
+ {
1166
+ return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
1167
+ }
1168
+
1169
+ template<int StoreMode, typename PacketType>
1170
+ EIGEN_STRONG_INLINE
1171
+ void writePacket(Index index, const PacketType& x)
1172
+ {
1173
+ if (ForwardLinearAccess)
1174
+ return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x);
1175
+ else
1176
+ return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1177
+ RowsAtCompileTime == 1 ? index : 0,
1178
+ x);
1179
+ }
1180
+
1181
+ protected:
1182
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1183
+ CoeffReturnType linear_coeff_impl(Index index, internal::true_type /* ForwardLinearAccess */) const
1184
+ {
1185
+ return m_argImpl.coeff(m_linear_offset.value() + index);
1186
+ }
1187
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1188
+ CoeffReturnType linear_coeff_impl(Index index, internal::false_type /* not ForwardLinearAccess */) const
1189
+ {
1190
+ return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1191
+ }
1192
+
1193
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1194
+ Scalar& linear_coeffRef_impl(Index index, internal::true_type /* ForwardLinearAccess */)
1195
+ {
1196
+ return m_argImpl.coeffRef(m_linear_offset.value() + index);
1197
+ }
1198
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1199
+ Scalar& linear_coeffRef_impl(Index index, internal::false_type /* not ForwardLinearAccess */)
1200
+ {
1201
+ return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1202
+ }
1203
+
1204
+ evaluator<ArgType> m_argImpl;
1205
+ const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
1206
+ const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
1207
+ const variable_if_dynamic<Index, ForwardLinearAccess ? Dynamic : 0> m_linear_offset;
1208
+ };
1209
+
1210
+ // TODO: This evaluator does not actually use the child evaluator;
1211
+ // all action is via the data() as returned by the Block expression.
1212
+
1213
+ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1214
+ struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>
1215
+ : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,
1216
+ typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>
1217
+ {
1218
+ typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1219
+ typedef typename XprType::Scalar Scalar;
1220
+
1221
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1222
+ explicit block_evaluator(const XprType& block)
1223
+ : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
1224
+ {
1225
+ // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
1226
+ eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
1227
+ }
1228
+ };
1229
+
1230
+
1231
+ // -------------------- Select --------------------
1232
+ // NOTE shall we introduce a ternary_evaluator?
1233
+
1234
+ // TODO enable vectorization for Select
1235
+ template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
1236
+ struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1237
+ : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1238
+ {
1239
+ typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType;
1240
+ enum {
1241
+ CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost
1242
+ + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost,
1243
+ evaluator<ElseMatrixType>::CoeffReadCost),
1244
+
1245
+ Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,
1246
+
1247
+ Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
1248
+ };
1249
+
1250
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1251
+ explicit evaluator(const XprType& select)
1252
+ : m_conditionImpl(select.conditionMatrix()),
1253
+ m_thenImpl(select.thenMatrix()),
1254
+ m_elseImpl(select.elseMatrix())
1255
+ {
1256
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1257
+ }
1258
+
1259
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1260
+
1261
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1262
+ CoeffReturnType coeff(Index row, Index col) const
1263
+ {
1264
+ if (m_conditionImpl.coeff(row, col))
1265
+ return m_thenImpl.coeff(row, col);
1266
+ else
1267
+ return m_elseImpl.coeff(row, col);
1268
+ }
1269
+
1270
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1271
+ CoeffReturnType coeff(Index index) const
1272
+ {
1273
+ if (m_conditionImpl.coeff(index))
1274
+ return m_thenImpl.coeff(index);
1275
+ else
1276
+ return m_elseImpl.coeff(index);
1277
+ }
1278
+
1279
+ protected:
1280
+ evaluator<ConditionMatrixType> m_conditionImpl;
1281
+ evaluator<ThenMatrixType> m_thenImpl;
1282
+ evaluator<ElseMatrixType> m_elseImpl;
1283
+ };
1284
+
1285
+
1286
+ // -------------------- Replicate --------------------
1287
+
1288
+ template<typename ArgType, int RowFactor, int ColFactor>
1289
+ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
1290
+ : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
1291
+ {
1292
+ typedef Replicate<ArgType, RowFactor, ColFactor> XprType;
1293
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1294
+ enum {
1295
+ Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
1296
+ };
1297
+ typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested;
1298
+ typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
1299
+
1300
+ enum {
1301
+ CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost,
1302
+ LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
1303
+ Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit),
1304
+
1305
+ Alignment = evaluator<ArgTypeNestedCleaned>::Alignment
1306
+ };
1307
+
1308
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1309
+ explicit unary_evaluator(const XprType& replicate)
1310
+ : m_arg(replicate.nestedExpression()),
1311
+ m_argImpl(m_arg),
1312
+ m_rows(replicate.nestedExpression().rows()),
1313
+ m_cols(replicate.nestedExpression().cols())
1314
+ {}
1315
+
1316
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1317
+ CoeffReturnType coeff(Index row, Index col) const
1318
+ {
1319
+ // try to avoid using modulo; this is a pure optimization strategy
1320
+ const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1321
+ : RowFactor==1 ? row
1322
+ : row % m_rows.value();
1323
+ const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1324
+ : ColFactor==1 ? col
1325
+ : col % m_cols.value();
1326
+
1327
+ return m_argImpl.coeff(actual_row, actual_col);
1328
+ }
1329
+
1330
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1331
+ CoeffReturnType coeff(Index index) const
1332
+ {
1333
+ // try to avoid using modulo; this is a pure optimization strategy
1334
+ const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1335
+ ? (ColFactor==1 ? index : index%m_cols.value())
1336
+ : (RowFactor==1 ? index : index%m_rows.value());
1337
+
1338
+ return m_argImpl.coeff(actual_index);
1339
+ }
1340
+
1341
+ template<int LoadMode, typename PacketType>
1342
+ EIGEN_STRONG_INLINE
1343
+ PacketType packet(Index row, Index col) const
1344
+ {
1345
+ const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1346
+ : RowFactor==1 ? row
1347
+ : row % m_rows.value();
1348
+ const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1349
+ : ColFactor==1 ? col
1350
+ : col % m_cols.value();
1351
+
1352
+ return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);
1353
+ }
1354
+
1355
+ template<int LoadMode, typename PacketType>
1356
+ EIGEN_STRONG_INLINE
1357
+ PacketType packet(Index index) const
1358
+ {
1359
+ const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1360
+ ? (ColFactor==1 ? index : index%m_cols.value())
1361
+ : (RowFactor==1 ? index : index%m_rows.value());
1362
+
1363
+ return m_argImpl.template packet<LoadMode,PacketType>(actual_index);
1364
+ }
1365
+
1366
+ protected:
1367
+ const ArgTypeNested m_arg;
1368
+ evaluator<ArgTypeNestedCleaned> m_argImpl;
1369
+ const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows;
1370
+ const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols;
1371
+ };
1372
+
1373
+ // -------------------- MatrixWrapper and ArrayWrapper --------------------
1374
+ //
1375
+ // evaluator_wrapper_base<T> is a common base class for the
1376
+ // MatrixWrapper and ArrayWrapper evaluators.
1377
+
1378
+ template<typename XprType>
1379
+ struct evaluator_wrapper_base
1380
+ : evaluator_base<XprType>
1381
+ {
1382
+ typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType;
1383
+ enum {
1384
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1385
+ Flags = evaluator<ArgType>::Flags,
1386
+ Alignment = evaluator<ArgType>::Alignment
1387
+ };
1388
+
1389
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1390
+ explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
1391
+
1392
+ typedef typename ArgType::Scalar Scalar;
1393
+ typedef typename ArgType::CoeffReturnType CoeffReturnType;
1394
+
1395
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1396
+ CoeffReturnType coeff(Index row, Index col) const
1397
+ {
1398
+ return m_argImpl.coeff(row, col);
1399
+ }
1400
+
1401
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1402
+ CoeffReturnType coeff(Index index) const
1403
+ {
1404
+ return m_argImpl.coeff(index);
1405
+ }
1406
+
1407
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1408
+ Scalar& coeffRef(Index row, Index col)
1409
+ {
1410
+ return m_argImpl.coeffRef(row, col);
1411
+ }
1412
+
1413
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1414
+ Scalar& coeffRef(Index index)
1415
+ {
1416
+ return m_argImpl.coeffRef(index);
1417
+ }
1418
+
1419
+ template<int LoadMode, typename PacketType>
1420
+ EIGEN_STRONG_INLINE
1421
+ PacketType packet(Index row, Index col) const
1422
+ {
1423
+ return m_argImpl.template packet<LoadMode,PacketType>(row, col);
1424
+ }
1425
+
1426
+ template<int LoadMode, typename PacketType>
1427
+ EIGEN_STRONG_INLINE
1428
+ PacketType packet(Index index) const
1429
+ {
1430
+ return m_argImpl.template packet<LoadMode,PacketType>(index);
1431
+ }
1432
+
1433
+ template<int StoreMode, typename PacketType>
1434
+ EIGEN_STRONG_INLINE
1435
+ void writePacket(Index row, Index col, const PacketType& x)
1436
+ {
1437
+ m_argImpl.template writePacket<StoreMode>(row, col, x);
1438
+ }
1439
+
1440
+ template<int StoreMode, typename PacketType>
1441
+ EIGEN_STRONG_INLINE
1442
+ void writePacket(Index index, const PacketType& x)
1443
+ {
1444
+ m_argImpl.template writePacket<StoreMode>(index, x);
1445
+ }
1446
+
1447
+ protected:
1448
+ evaluator<ArgType> m_argImpl;
1449
+ };
1450
+
1451
+ template<typename TArgType>
1452
+ struct unary_evaluator<MatrixWrapper<TArgType> >
1453
+ : evaluator_wrapper_base<MatrixWrapper<TArgType> >
1454
+ {
1455
+ typedef MatrixWrapper<TArgType> XprType;
1456
+
1457
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1458
+ explicit unary_evaluator(const XprType& wrapper)
1459
+ : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
1460
+ { }
1461
+ };
1462
+
1463
+ template<typename TArgType>
1464
+ struct unary_evaluator<ArrayWrapper<TArgType> >
1465
+ : evaluator_wrapper_base<ArrayWrapper<TArgType> >
1466
+ {
1467
+ typedef ArrayWrapper<TArgType> XprType;
1468
+
1469
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1470
+ explicit unary_evaluator(const XprType& wrapper)
1471
+ : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
1472
+ { }
1473
+ };
1474
+
1475
+
1476
+ // -------------------- Reverse --------------------
1477
+
1478
+ // defined in Reverse.h:
1479
+ template<typename PacketType, bool ReversePacket> struct reverse_packet_cond;
1480
+
1481
+ template<typename ArgType, int Direction>
1482
+ struct unary_evaluator<Reverse<ArgType, Direction> >
1483
+ : evaluator_base<Reverse<ArgType, Direction> >
1484
+ {
1485
+ typedef Reverse<ArgType, Direction> XprType;
1486
+ typedef typename XprType::Scalar Scalar;
1487
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1488
+
1489
+ enum {
1490
+ IsRowMajor = XprType::IsRowMajor,
1491
+ IsColMajor = !IsRowMajor,
1492
+ ReverseRow = (Direction == Vertical) || (Direction == BothDirections),
1493
+ ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
1494
+ ReversePacket = (Direction == BothDirections)
1495
+ || ((Direction == Vertical) && IsColMajor)
1496
+ || ((Direction == Horizontal) && IsRowMajor),
1497
+
1498
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1499
+
1500
+ // let's enable LinearAccess only with vectorization because of the product overhead
1501
+ // FIXME enable DirectAccess with negative strides?
1502
+ Flags0 = evaluator<ArgType>::Flags,
1503
+ LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) )
1504
+ || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1))
1505
+ ? LinearAccessBit : 0,
1506
+
1507
+ Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),
1508
+
1509
+ Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
1510
+ };
1511
+
1512
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1513
+ explicit unary_evaluator(const XprType& reverse)
1514
+ : m_argImpl(reverse.nestedExpression()),
1515
+ m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
1516
+ m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
1517
+ { }
1518
+
1519
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1520
+ CoeffReturnType coeff(Index row, Index col) const
1521
+ {
1522
+ return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
1523
+ ReverseCol ? m_cols.value() - col - 1 : col);
1524
+ }
1525
+
1526
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1527
+ CoeffReturnType coeff(Index index) const
1528
+ {
1529
+ return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
1530
+ }
1531
+
1532
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1533
+ Scalar& coeffRef(Index row, Index col)
1534
+ {
1535
+ return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
1536
+ ReverseCol ? m_cols.value() - col - 1 : col);
1537
+ }
1538
+
1539
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1540
+ Scalar& coeffRef(Index index)
1541
+ {
1542
+ return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);
1543
+ }
1544
+
1545
+ template<int LoadMode, typename PacketType>
1546
+ EIGEN_STRONG_INLINE
1547
+ PacketType packet(Index row, Index col) const
1548
+ {
1549
+ enum {
1550
+ PacketSize = unpacket_traits<PacketType>::size,
1551
+ OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1552
+ OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1553
+ };
1554
+ typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1555
+ return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>(
1556
+ ReverseRow ? m_rows.value() - row - OffsetRow : row,
1557
+ ReverseCol ? m_cols.value() - col - OffsetCol : col));
1558
+ }
1559
+
1560
+ template<int LoadMode, typename PacketType>
1561
+ EIGEN_STRONG_INLINE
1562
+ PacketType packet(Index index) const
1563
+ {
1564
+ enum { PacketSize = unpacket_traits<PacketType>::size };
1565
+ return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize));
1566
+ }
1567
+
1568
+ template<int LoadMode, typename PacketType>
1569
+ EIGEN_STRONG_INLINE
1570
+ void writePacket(Index row, Index col, const PacketType& x)
1571
+ {
1572
+ // FIXME we could factorize some code with packet(i,j)
1573
+ enum {
1574
+ PacketSize = unpacket_traits<PacketType>::size,
1575
+ OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1576
+ OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1577
+ };
1578
+ typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1579
+ m_argImpl.template writePacket<LoadMode>(
1580
+ ReverseRow ? m_rows.value() - row - OffsetRow : row,
1581
+ ReverseCol ? m_cols.value() - col - OffsetCol : col,
1582
+ reverse_packet::run(x));
1583
+ }
1584
+
1585
+ template<int LoadMode, typename PacketType>
1586
+ EIGEN_STRONG_INLINE
1587
+ void writePacket(Index index, const PacketType& x)
1588
+ {
1589
+ enum { PacketSize = unpacket_traits<PacketType>::size };
1590
+ m_argImpl.template writePacket<LoadMode>
1591
+ (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
1592
+ }
1593
+
1594
+ protected:
1595
+ evaluator<ArgType> m_argImpl;
1596
+
1597
+ // If we do not reverse rows, then we do not need to know the number of rows; same for columns
1598
+ // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.
1599
+ const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows;
1600
+ const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols;
1601
+ };
1602
+
1603
+
1604
+ // -------------------- Diagonal --------------------
1605
+
1606
+ template<typename ArgType, int DiagIndex>
1607
+ struct evaluator<Diagonal<ArgType, DiagIndex> >
1608
+ : evaluator_base<Diagonal<ArgType, DiagIndex> >
1609
+ {
1610
+ typedef Diagonal<ArgType, DiagIndex> XprType;
1611
+
1612
+ enum {
1613
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1614
+
1615
+ Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,
1616
+
1617
+ Alignment = 0
1618
+ };
1619
+
1620
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1621
+ explicit evaluator(const XprType& diagonal)
1622
+ : m_argImpl(diagonal.nestedExpression()),
1623
+ m_index(diagonal.index())
1624
+ { }
1625
+
1626
+ typedef typename XprType::Scalar Scalar;
1627
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
1628
+
1629
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1630
+ CoeffReturnType coeff(Index row, Index) const
1631
+ {
1632
+ return m_argImpl.coeff(row + rowOffset(), row + colOffset());
1633
+ }
1634
+
1635
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1636
+ CoeffReturnType coeff(Index index) const
1637
+ {
1638
+ return m_argImpl.coeff(index + rowOffset(), index + colOffset());
1639
+ }
1640
+
1641
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1642
+ Scalar& coeffRef(Index row, Index)
1643
+ {
1644
+ return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());
1645
+ }
1646
+
1647
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1648
+ Scalar& coeffRef(Index index)
1649
+ {
1650
+ return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());
1651
+ }
1652
+
1653
+ protected:
1654
+ evaluator<ArgType> m_argImpl;
1655
+ const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index;
1656
+
1657
+ private:
1658
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1659
+ Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
1660
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1661
+ Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
1662
+ };
1663
+
1664
+
1665
+ //----------------------------------------------------------------------
1666
+ // deprecated code
1667
+ //----------------------------------------------------------------------
1668
+
1669
+ // -------------------- EvalToTemp --------------------
1670
+
1671
+ // expression class for evaluating nested expression to a temporary
1672
+
1673
+ template<typename ArgType> class EvalToTemp;
1674
+
1675
+ template<typename ArgType>
1676
+ struct traits<EvalToTemp<ArgType> >
1677
+ : public traits<ArgType>
1678
+ { };
1679
+
1680
+ template<typename ArgType>
1681
+ class EvalToTemp
1682
+ : public dense_xpr_base<EvalToTemp<ArgType> >::type
1683
+ {
1684
+ public:
1685
+
1686
+ typedef typename dense_xpr_base<EvalToTemp>::type Base;
1687
+ EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)
1688
+
1689
+ explicit EvalToTemp(const ArgType& arg)
1690
+ : m_arg(arg)
1691
+ { }
1692
+
1693
+ const ArgType& arg() const
1694
+ {
1695
+ return m_arg;
1696
+ }
1697
+
1698
+ EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT
1699
+ {
1700
+ return m_arg.rows();
1701
+ }
1702
+
1703
+ EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT
1704
+ {
1705
+ return m_arg.cols();
1706
+ }
1707
+
1708
+ private:
1709
+ const ArgType& m_arg;
1710
+ };
1711
+
1712
+ template<typename ArgType>
1713
+ struct evaluator<EvalToTemp<ArgType> >
1714
+ : public evaluator<typename ArgType::PlainObject>
1715
+ {
1716
+ typedef EvalToTemp<ArgType> XprType;
1717
+ typedef typename ArgType::PlainObject PlainObject;
1718
+ typedef evaluator<PlainObject> Base;
1719
+
1720
+ EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
1721
+ : m_result(xpr.arg())
1722
+ {
1723
+ ::new (static_cast<Base*>(this)) Base(m_result);
1724
+ }
1725
+
1726
+ // This constructor is used when nesting an EvalTo evaluator in another evaluator
1727
+ EIGEN_DEVICE_FUNC evaluator(const ArgType& arg)
1728
+ : m_result(arg)
1729
+ {
1730
+ ::new (static_cast<Base*>(this)) Base(m_result);
1731
+ }
1732
+
1733
+ protected:
1734
+ PlainObject m_result;
1735
+ };
1736
+
1737
+ } // namespace internal
1738
+
1739
+ } // end namespace Eigen
1740
+
1741
+ #endif // EIGEN_COREEVALUATORS_H