umappp 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (395) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +25 -0
  3. data/README.md +110 -0
  4. data/ext/umappp/extconf.rb +25 -0
  5. data/ext/umappp/numo.hpp +867 -0
  6. data/ext/umappp/umappp.cpp +225 -0
  7. data/lib/umappp/version.rb +5 -0
  8. data/lib/umappp.rb +41 -0
  9. data/vendor/Eigen/Cholesky +45 -0
  10. data/vendor/Eigen/CholmodSupport +48 -0
  11. data/vendor/Eigen/Core +384 -0
  12. data/vendor/Eigen/Dense +7 -0
  13. data/vendor/Eigen/Eigen +2 -0
  14. data/vendor/Eigen/Eigenvalues +60 -0
  15. data/vendor/Eigen/Geometry +59 -0
  16. data/vendor/Eigen/Householder +29 -0
  17. data/vendor/Eigen/IterativeLinearSolvers +48 -0
  18. data/vendor/Eigen/Jacobi +32 -0
  19. data/vendor/Eigen/KLUSupport +41 -0
  20. data/vendor/Eigen/LU +47 -0
  21. data/vendor/Eigen/MetisSupport +35 -0
  22. data/vendor/Eigen/OrderingMethods +70 -0
  23. data/vendor/Eigen/PaStiXSupport +49 -0
  24. data/vendor/Eigen/PardisoSupport +35 -0
  25. data/vendor/Eigen/QR +50 -0
  26. data/vendor/Eigen/QtAlignedMalloc +39 -0
  27. data/vendor/Eigen/SPQRSupport +34 -0
  28. data/vendor/Eigen/SVD +50 -0
  29. data/vendor/Eigen/Sparse +34 -0
  30. data/vendor/Eigen/SparseCholesky +37 -0
  31. data/vendor/Eigen/SparseCore +69 -0
  32. data/vendor/Eigen/SparseLU +50 -0
  33. data/vendor/Eigen/SparseQR +36 -0
  34. data/vendor/Eigen/StdDeque +27 -0
  35. data/vendor/Eigen/StdList +26 -0
  36. data/vendor/Eigen/StdVector +27 -0
  37. data/vendor/Eigen/SuperLUSupport +64 -0
  38. data/vendor/Eigen/UmfPackSupport +40 -0
  39. data/vendor/Eigen/src/Cholesky/LDLT.h +688 -0
  40. data/vendor/Eigen/src/Cholesky/LLT.h +558 -0
  41. data/vendor/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
  42. data/vendor/Eigen/src/CholmodSupport/CholmodSupport.h +682 -0
  43. data/vendor/Eigen/src/Core/ArithmeticSequence.h +413 -0
  44. data/vendor/Eigen/src/Core/Array.h +417 -0
  45. data/vendor/Eigen/src/Core/ArrayBase.h +226 -0
  46. data/vendor/Eigen/src/Core/ArrayWrapper.h +209 -0
  47. data/vendor/Eigen/src/Core/Assign.h +90 -0
  48. data/vendor/Eigen/src/Core/AssignEvaluator.h +1010 -0
  49. data/vendor/Eigen/src/Core/Assign_MKL.h +178 -0
  50. data/vendor/Eigen/src/Core/BandMatrix.h +353 -0
  51. data/vendor/Eigen/src/Core/Block.h +448 -0
  52. data/vendor/Eigen/src/Core/BooleanRedux.h +162 -0
  53. data/vendor/Eigen/src/Core/CommaInitializer.h +164 -0
  54. data/vendor/Eigen/src/Core/ConditionEstimator.h +175 -0
  55. data/vendor/Eigen/src/Core/CoreEvaluators.h +1741 -0
  56. data/vendor/Eigen/src/Core/CoreIterators.h +132 -0
  57. data/vendor/Eigen/src/Core/CwiseBinaryOp.h +183 -0
  58. data/vendor/Eigen/src/Core/CwiseNullaryOp.h +1001 -0
  59. data/vendor/Eigen/src/Core/CwiseTernaryOp.h +197 -0
  60. data/vendor/Eigen/src/Core/CwiseUnaryOp.h +103 -0
  61. data/vendor/Eigen/src/Core/CwiseUnaryView.h +132 -0
  62. data/vendor/Eigen/src/Core/DenseBase.h +701 -0
  63. data/vendor/Eigen/src/Core/DenseCoeffsBase.h +685 -0
  64. data/vendor/Eigen/src/Core/DenseStorage.h +652 -0
  65. data/vendor/Eigen/src/Core/Diagonal.h +258 -0
  66. data/vendor/Eigen/src/Core/DiagonalMatrix.h +391 -0
  67. data/vendor/Eigen/src/Core/DiagonalProduct.h +28 -0
  68. data/vendor/Eigen/src/Core/Dot.h +318 -0
  69. data/vendor/Eigen/src/Core/EigenBase.h +160 -0
  70. data/vendor/Eigen/src/Core/ForceAlignedAccess.h +150 -0
  71. data/vendor/Eigen/src/Core/Fuzzy.h +155 -0
  72. data/vendor/Eigen/src/Core/GeneralProduct.h +465 -0
  73. data/vendor/Eigen/src/Core/GenericPacketMath.h +1040 -0
  74. data/vendor/Eigen/src/Core/GlobalFunctions.h +194 -0
  75. data/vendor/Eigen/src/Core/IO.h +258 -0
  76. data/vendor/Eigen/src/Core/IndexedView.h +237 -0
  77. data/vendor/Eigen/src/Core/Inverse.h +117 -0
  78. data/vendor/Eigen/src/Core/Map.h +171 -0
  79. data/vendor/Eigen/src/Core/MapBase.h +310 -0
  80. data/vendor/Eigen/src/Core/MathFunctions.h +2057 -0
  81. data/vendor/Eigen/src/Core/MathFunctionsImpl.h +200 -0
  82. data/vendor/Eigen/src/Core/Matrix.h +565 -0
  83. data/vendor/Eigen/src/Core/MatrixBase.h +547 -0
  84. data/vendor/Eigen/src/Core/NestByValue.h +85 -0
  85. data/vendor/Eigen/src/Core/NoAlias.h +109 -0
  86. data/vendor/Eigen/src/Core/NumTraits.h +335 -0
  87. data/vendor/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
  88. data/vendor/Eigen/src/Core/PermutationMatrix.h +605 -0
  89. data/vendor/Eigen/src/Core/PlainObjectBase.h +1128 -0
  90. data/vendor/Eigen/src/Core/Product.h +191 -0
  91. data/vendor/Eigen/src/Core/ProductEvaluators.h +1179 -0
  92. data/vendor/Eigen/src/Core/Random.h +218 -0
  93. data/vendor/Eigen/src/Core/Redux.h +515 -0
  94. data/vendor/Eigen/src/Core/Ref.h +381 -0
  95. data/vendor/Eigen/src/Core/Replicate.h +142 -0
  96. data/vendor/Eigen/src/Core/Reshaped.h +454 -0
  97. data/vendor/Eigen/src/Core/ReturnByValue.h +119 -0
  98. data/vendor/Eigen/src/Core/Reverse.h +217 -0
  99. data/vendor/Eigen/src/Core/Select.h +164 -0
  100. data/vendor/Eigen/src/Core/SelfAdjointView.h +365 -0
  101. data/vendor/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
  102. data/vendor/Eigen/src/Core/Solve.h +188 -0
  103. data/vendor/Eigen/src/Core/SolveTriangular.h +235 -0
  104. data/vendor/Eigen/src/Core/SolverBase.h +168 -0
  105. data/vendor/Eigen/src/Core/StableNorm.h +251 -0
  106. data/vendor/Eigen/src/Core/StlIterators.h +463 -0
  107. data/vendor/Eigen/src/Core/Stride.h +116 -0
  108. data/vendor/Eigen/src/Core/Swap.h +68 -0
  109. data/vendor/Eigen/src/Core/Transpose.h +464 -0
  110. data/vendor/Eigen/src/Core/Transpositions.h +386 -0
  111. data/vendor/Eigen/src/Core/TriangularMatrix.h +1001 -0
  112. data/vendor/Eigen/src/Core/VectorBlock.h +96 -0
  113. data/vendor/Eigen/src/Core/VectorwiseOp.h +784 -0
  114. data/vendor/Eigen/src/Core/Visitor.h +381 -0
  115. data/vendor/Eigen/src/Core/arch/AVX/Complex.h +372 -0
  116. data/vendor/Eigen/src/Core/arch/AVX/MathFunctions.h +228 -0
  117. data/vendor/Eigen/src/Core/arch/AVX/PacketMath.h +1574 -0
  118. data/vendor/Eigen/src/Core/arch/AVX/TypeCasting.h +115 -0
  119. data/vendor/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
  120. data/vendor/Eigen/src/Core/arch/AVX512/MathFunctions.h +362 -0
  121. data/vendor/Eigen/src/Core/arch/AVX512/PacketMath.h +2303 -0
  122. data/vendor/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  123. data/vendor/Eigen/src/Core/arch/AltiVec/Complex.h +417 -0
  124. data/vendor/Eigen/src/Core/arch/AltiVec/MathFunctions.h +90 -0
  125. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
  126. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
  127. data/vendor/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
  128. data/vendor/Eigen/src/Core/arch/AltiVec/PacketMath.h +2711 -0
  129. data/vendor/Eigen/src/Core/arch/CUDA/Complex.h +258 -0
  130. data/vendor/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
  131. data/vendor/Eigen/src/Core/arch/Default/ConjHelper.h +117 -0
  132. data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
  133. data/vendor/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
  134. data/vendor/Eigen/src/Core/arch/Default/Half.h +942 -0
  135. data/vendor/Eigen/src/Core/arch/Default/Settings.h +49 -0
  136. data/vendor/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  137. data/vendor/Eigen/src/Core/arch/GPU/MathFunctions.h +103 -0
  138. data/vendor/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
  139. data/vendor/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
  140. data/vendor/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  141. data/vendor/Eigen/src/Core/arch/MSA/Complex.h +648 -0
  142. data/vendor/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  143. data/vendor/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  144. data/vendor/Eigen/src/Core/arch/NEON/Complex.h +584 -0
  145. data/vendor/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  146. data/vendor/Eigen/src/Core/arch/NEON/MathFunctions.h +75 -0
  147. data/vendor/Eigen/src/Core/arch/NEON/PacketMath.h +4587 -0
  148. data/vendor/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
  149. data/vendor/Eigen/src/Core/arch/SSE/Complex.h +351 -0
  150. data/vendor/Eigen/src/Core/arch/SSE/MathFunctions.h +199 -0
  151. data/vendor/Eigen/src/Core/arch/SSE/PacketMath.h +1505 -0
  152. data/vendor/Eigen/src/Core/arch/SSE/TypeCasting.h +142 -0
  153. data/vendor/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  154. data/vendor/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  155. data/vendor/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  156. data/vendor/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  157. data/vendor/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  158. data/vendor/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  159. data/vendor/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  160. data/vendor/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  161. data/vendor/Eigen/src/Core/arch/ZVector/Complex.h +426 -0
  162. data/vendor/Eigen/src/Core/arch/ZVector/MathFunctions.h +233 -0
  163. data/vendor/Eigen/src/Core/arch/ZVector/PacketMath.h +1060 -0
  164. data/vendor/Eigen/src/Core/functors/AssignmentFunctors.h +177 -0
  165. data/vendor/Eigen/src/Core/functors/BinaryFunctors.h +541 -0
  166. data/vendor/Eigen/src/Core/functors/NullaryFunctors.h +189 -0
  167. data/vendor/Eigen/src/Core/functors/StlFunctors.h +166 -0
  168. data/vendor/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
  169. data/vendor/Eigen/src/Core/functors/UnaryFunctors.h +1131 -0
  170. data/vendor/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2645 -0
  171. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix.h +517 -0
  172. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +317 -0
  173. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
  174. data/vendor/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +124 -0
  175. data/vendor/Eigen/src/Core/products/GeneralMatrixVector.h +518 -0
  176. data/vendor/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
  177. data/vendor/Eigen/src/Core/products/Parallelizer.h +180 -0
  178. data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +544 -0
  179. data/vendor/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +295 -0
  180. data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector.h +262 -0
  181. data/vendor/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
  182. data/vendor/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
  183. data/vendor/Eigen/src/Core/products/SelfadjointRank2Update.h +94 -0
  184. data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix.h +472 -0
  185. data/vendor/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +317 -0
  186. data/vendor/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
  187. data/vendor/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
  188. data/vendor/Eigen/src/Core/products/TriangularSolverMatrix.h +337 -0
  189. data/vendor/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +167 -0
  190. data/vendor/Eigen/src/Core/products/TriangularSolverVector.h +148 -0
  191. data/vendor/Eigen/src/Core/util/BlasUtil.h +583 -0
  192. data/vendor/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
  193. data/vendor/Eigen/src/Core/util/Constants.h +563 -0
  194. data/vendor/Eigen/src/Core/util/DisableStupidWarnings.h +106 -0
  195. data/vendor/Eigen/src/Core/util/ForwardDeclarations.h +322 -0
  196. data/vendor/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
  197. data/vendor/Eigen/src/Core/util/IntegralConstant.h +272 -0
  198. data/vendor/Eigen/src/Core/util/MKL_support.h +137 -0
  199. data/vendor/Eigen/src/Core/util/Macros.h +1464 -0
  200. data/vendor/Eigen/src/Core/util/Memory.h +1163 -0
  201. data/vendor/Eigen/src/Core/util/Meta.h +812 -0
  202. data/vendor/Eigen/src/Core/util/NonMPL2.h +3 -0
  203. data/vendor/Eigen/src/Core/util/ReenableStupidWarnings.h +31 -0
  204. data/vendor/Eigen/src/Core/util/ReshapedHelper.h +51 -0
  205. data/vendor/Eigen/src/Core/util/StaticAssert.h +221 -0
  206. data/vendor/Eigen/src/Core/util/SymbolicIndex.h +293 -0
  207. data/vendor/Eigen/src/Core/util/XprHelper.h +856 -0
  208. data/vendor/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
  209. data/vendor/Eigen/src/Eigenvalues/ComplexSchur.h +462 -0
  210. data/vendor/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
  211. data/vendor/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
  212. data/vendor/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
  213. data/vendor/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
  214. data/vendor/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
  215. data/vendor/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
  216. data/vendor/Eigen/src/Eigenvalues/RealQZ.h +657 -0
  217. data/vendor/Eigen/src/Eigenvalues/RealSchur.h +558 -0
  218. data/vendor/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
  219. data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +904 -0
  220. data/vendor/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
  221. data/vendor/Eigen/src/Eigenvalues/Tridiagonalization.h +561 -0
  222. data/vendor/Eigen/src/Geometry/AlignedBox.h +486 -0
  223. data/vendor/Eigen/src/Geometry/AngleAxis.h +247 -0
  224. data/vendor/Eigen/src/Geometry/EulerAngles.h +114 -0
  225. data/vendor/Eigen/src/Geometry/Homogeneous.h +501 -0
  226. data/vendor/Eigen/src/Geometry/Hyperplane.h +282 -0
  227. data/vendor/Eigen/src/Geometry/OrthoMethods.h +235 -0
  228. data/vendor/Eigen/src/Geometry/ParametrizedLine.h +232 -0
  229. data/vendor/Eigen/src/Geometry/Quaternion.h +870 -0
  230. data/vendor/Eigen/src/Geometry/Rotation2D.h +199 -0
  231. data/vendor/Eigen/src/Geometry/RotationBase.h +206 -0
  232. data/vendor/Eigen/src/Geometry/Scaling.h +188 -0
  233. data/vendor/Eigen/src/Geometry/Transform.h +1563 -0
  234. data/vendor/Eigen/src/Geometry/Translation.h +202 -0
  235. data/vendor/Eigen/src/Geometry/Umeyama.h +166 -0
  236. data/vendor/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
  237. data/vendor/Eigen/src/Householder/BlockHouseholder.h +110 -0
  238. data/vendor/Eigen/src/Householder/Householder.h +176 -0
  239. data/vendor/Eigen/src/Householder/HouseholderSequence.h +545 -0
  240. data/vendor/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
  241. data/vendor/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +212 -0
  242. data/vendor/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +229 -0
  243. data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +394 -0
  244. data/vendor/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +453 -0
  245. data/vendor/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +444 -0
  246. data/vendor/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +198 -0
  247. data/vendor/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +117 -0
  248. data/vendor/Eigen/src/Jacobi/Jacobi.h +483 -0
  249. data/vendor/Eigen/src/KLUSupport/KLUSupport.h +358 -0
  250. data/vendor/Eigen/src/LU/Determinant.h +117 -0
  251. data/vendor/Eigen/src/LU/FullPivLU.h +877 -0
  252. data/vendor/Eigen/src/LU/InverseImpl.h +432 -0
  253. data/vendor/Eigen/src/LU/PartialPivLU.h +624 -0
  254. data/vendor/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
  255. data/vendor/Eigen/src/LU/arch/InverseSize4.h +351 -0
  256. data/vendor/Eigen/src/MetisSupport/MetisSupport.h +137 -0
  257. data/vendor/Eigen/src/OrderingMethods/Amd.h +435 -0
  258. data/vendor/Eigen/src/OrderingMethods/Eigen_Colamd.h +1863 -0
  259. data/vendor/Eigen/src/OrderingMethods/Ordering.h +153 -0
  260. data/vendor/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
  261. data/vendor/Eigen/src/PardisoSupport/PardisoSupport.h +545 -0
  262. data/vendor/Eigen/src/QR/ColPivHouseholderQR.h +674 -0
  263. data/vendor/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
  264. data/vendor/Eigen/src/QR/CompleteOrthogonalDecomposition.h +635 -0
  265. data/vendor/Eigen/src/QR/FullPivHouseholderQR.h +713 -0
  266. data/vendor/Eigen/src/QR/HouseholderQR.h +434 -0
  267. data/vendor/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
  268. data/vendor/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +335 -0
  269. data/vendor/Eigen/src/SVD/BDCSVD.h +1366 -0
  270. data/vendor/Eigen/src/SVD/JacobiSVD.h +812 -0
  271. data/vendor/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
  272. data/vendor/Eigen/src/SVD/SVDBase.h +376 -0
  273. data/vendor/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
  274. data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky.h +697 -0
  275. data/vendor/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +174 -0
  276. data/vendor/Eigen/src/SparseCore/AmbiVector.h +378 -0
  277. data/vendor/Eigen/src/SparseCore/CompressedStorage.h +274 -0
  278. data/vendor/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
  279. data/vendor/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
  280. data/vendor/Eigen/src/SparseCore/SparseAssign.h +270 -0
  281. data/vendor/Eigen/src/SparseCore/SparseBlock.h +571 -0
  282. data/vendor/Eigen/src/SparseCore/SparseColEtree.h +206 -0
  283. data/vendor/Eigen/src/SparseCore/SparseCompressedBase.h +370 -0
  284. data/vendor/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +722 -0
  285. data/vendor/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +150 -0
  286. data/vendor/Eigen/src/SparseCore/SparseDenseProduct.h +342 -0
  287. data/vendor/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
  288. data/vendor/Eigen/src/SparseCore/SparseDot.h +98 -0
  289. data/vendor/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
  290. data/vendor/Eigen/src/SparseCore/SparseMap.h +305 -0
  291. data/vendor/Eigen/src/SparseCore/SparseMatrix.h +1518 -0
  292. data/vendor/Eigen/src/SparseCore/SparseMatrixBase.h +398 -0
  293. data/vendor/Eigen/src/SparseCore/SparsePermutation.h +178 -0
  294. data/vendor/Eigen/src/SparseCore/SparseProduct.h +181 -0
  295. data/vendor/Eigen/src/SparseCore/SparseRedux.h +49 -0
  296. data/vendor/Eigen/src/SparseCore/SparseRef.h +397 -0
  297. data/vendor/Eigen/src/SparseCore/SparseSelfAdjointView.h +659 -0
  298. data/vendor/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
  299. data/vendor/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
  300. data/vendor/Eigen/src/SparseCore/SparseTranspose.h +92 -0
  301. data/vendor/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
  302. data/vendor/Eigen/src/SparseCore/SparseUtil.h +186 -0
  303. data/vendor/Eigen/src/SparseCore/SparseVector.h +478 -0
  304. data/vendor/Eigen/src/SparseCore/SparseView.h +254 -0
  305. data/vendor/Eigen/src/SparseCore/TriangularSolver.h +315 -0
  306. data/vendor/Eigen/src/SparseLU/SparseLU.h +923 -0
  307. data/vendor/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
  308. data/vendor/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
  309. data/vendor/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
  310. data/vendor/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +375 -0
  311. data/vendor/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
  312. data/vendor/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
  313. data/vendor/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
  314. data/vendor/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
  315. data/vendor/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
  316. data/vendor/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
  317. data/vendor/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
  318. data/vendor/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
  319. data/vendor/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
  320. data/vendor/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
  321. data/vendor/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
  322. data/vendor/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
  323. data/vendor/Eigen/src/SparseQR/SparseQR.h +758 -0
  324. data/vendor/Eigen/src/StlSupport/StdDeque.h +116 -0
  325. data/vendor/Eigen/src/StlSupport/StdList.h +106 -0
  326. data/vendor/Eigen/src/StlSupport/StdVector.h +131 -0
  327. data/vendor/Eigen/src/StlSupport/details.h +84 -0
  328. data/vendor/Eigen/src/SuperLUSupport/SuperLUSupport.h +1025 -0
  329. data/vendor/Eigen/src/UmfPackSupport/UmfPackSupport.h +642 -0
  330. data/vendor/Eigen/src/misc/Image.h +82 -0
  331. data/vendor/Eigen/src/misc/Kernel.h +79 -0
  332. data/vendor/Eigen/src/misc/RealSvd2x2.h +55 -0
  333. data/vendor/Eigen/src/misc/blas.h +440 -0
  334. data/vendor/Eigen/src/misc/lapack.h +152 -0
  335. data/vendor/Eigen/src/misc/lapacke.h +16292 -0
  336. data/vendor/Eigen/src/misc/lapacke_mangling.h +17 -0
  337. data/vendor/Eigen/src/plugins/ArrayCwiseBinaryOps.h +358 -0
  338. data/vendor/Eigen/src/plugins/ArrayCwiseUnaryOps.h +696 -0
  339. data/vendor/Eigen/src/plugins/BlockMethods.h +1442 -0
  340. data/vendor/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
  341. data/vendor/Eigen/src/plugins/CommonCwiseUnaryOps.h +177 -0
  342. data/vendor/Eigen/src/plugins/IndexedViewMethods.h +262 -0
  343. data/vendor/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
  344. data/vendor/Eigen/src/plugins/MatrixCwiseUnaryOps.h +95 -0
  345. data/vendor/Eigen/src/plugins/ReshapedMethods.h +149 -0
  346. data/vendor/aarand/aarand.hpp +114 -0
  347. data/vendor/annoy/annoylib.h +1495 -0
  348. data/vendor/annoy/kissrandom.h +120 -0
  349. data/vendor/annoy/mman.h +242 -0
  350. data/vendor/hnswlib/bruteforce.h +152 -0
  351. data/vendor/hnswlib/hnswalg.h +1192 -0
  352. data/vendor/hnswlib/hnswlib.h +108 -0
  353. data/vendor/hnswlib/space_ip.h +282 -0
  354. data/vendor/hnswlib/space_l2.h +281 -0
  355. data/vendor/hnswlib/visited_list_pool.h +79 -0
  356. data/vendor/irlba/irlba.hpp +575 -0
  357. data/vendor/irlba/lanczos.hpp +212 -0
  358. data/vendor/irlba/parallel.hpp +474 -0
  359. data/vendor/irlba/utils.hpp +224 -0
  360. data/vendor/irlba/wrappers.hpp +228 -0
  361. data/vendor/kmeans/Base.hpp +75 -0
  362. data/vendor/kmeans/Details.hpp +79 -0
  363. data/vendor/kmeans/HartiganWong.hpp +492 -0
  364. data/vendor/kmeans/InitializeKmeansPP.hpp +144 -0
  365. data/vendor/kmeans/InitializeNone.hpp +44 -0
  366. data/vendor/kmeans/InitializePCAPartition.hpp +309 -0
  367. data/vendor/kmeans/InitializeRandom.hpp +91 -0
  368. data/vendor/kmeans/Kmeans.hpp +161 -0
  369. data/vendor/kmeans/Lloyd.hpp +134 -0
  370. data/vendor/kmeans/MiniBatch.hpp +269 -0
  371. data/vendor/kmeans/QuickSearch.hpp +179 -0
  372. data/vendor/kmeans/compute_centroids.hpp +32 -0
  373. data/vendor/kmeans/compute_wcss.hpp +27 -0
  374. data/vendor/kmeans/is_edge_case.hpp +42 -0
  375. data/vendor/kmeans/random.hpp +55 -0
  376. data/vendor/knncolle/Annoy/Annoy.hpp +193 -0
  377. data/vendor/knncolle/BruteForce/BruteForce.hpp +120 -0
  378. data/vendor/knncolle/Hnsw/Hnsw.hpp +225 -0
  379. data/vendor/knncolle/Kmknn/Kmknn.hpp +286 -0
  380. data/vendor/knncolle/VpTree/VpTree.hpp +256 -0
  381. data/vendor/knncolle/knncolle.hpp +34 -0
  382. data/vendor/knncolle/utils/Base.hpp +100 -0
  383. data/vendor/knncolle/utils/NeighborQueue.hpp +94 -0
  384. data/vendor/knncolle/utils/distances.hpp +98 -0
  385. data/vendor/knncolle/utils/find_nearest_neighbors.hpp +112 -0
  386. data/vendor/powerit/PowerIterations.hpp +157 -0
  387. data/vendor/umappp/NeighborList.hpp +37 -0
  388. data/vendor/umappp/Umap.hpp +662 -0
  389. data/vendor/umappp/combine_neighbor_sets.hpp +95 -0
  390. data/vendor/umappp/find_ab.hpp +157 -0
  391. data/vendor/umappp/neighbor_similarities.hpp +136 -0
  392. data/vendor/umappp/optimize_layout.hpp +285 -0
  393. data/vendor/umappp/spectral_init.hpp +181 -0
  394. data/vendor/umappp/umappp.hpp +13 -0
  395. metadata +465 -0
@@ -0,0 +1,856 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_XPRHELPER_H
12
+ #define EIGEN_XPRHELPER_H
13
+
14
+ // just a workaround because GCC seems to not really like empty structs
15
+ // FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
16
+ // so currently we simply disable this optimization for gcc 4.3
17
+ #if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3)
18
+ #define EIGEN_EMPTY_STRUCT_CTOR(X) \
19
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \
20
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {}
21
+ #else
22
+ #define EIGEN_EMPTY_STRUCT_CTOR(X)
23
+ #endif
24
+
25
+ namespace Eigen {
26
+
27
+ namespace internal {
28
+
29
+ template<typename IndexDest, typename IndexSrc>
30
+ EIGEN_DEVICE_FUNC
31
+ inline IndexDest convert_index(const IndexSrc& idx) {
32
+ // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
33
+ eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
34
+ return IndexDest(idx);
35
+ }
36
+
37
+ // true if T can be considered as an integral index (i.e., and integral type or enum)
38
+ template<typename T> struct is_valid_index_type
39
+ {
40
+ enum { value =
41
+ #if EIGEN_HAS_TYPE_TRAITS
42
+ internal::is_integral<T>::value || std::is_enum<T>::value
43
+ #elif EIGEN_COMP_MSVC
44
+ internal::is_integral<T>::value || __is_enum(T)
45
+ #else
46
+ // without C++11, we use is_convertible to Index instead of is_integral in order to treat enums as Index.
47
+ internal::is_convertible<T,Index>::value && !internal::is_same<T,float>::value && !is_same<T,double>::value
48
+ #endif
49
+ };
50
+ };
51
+
52
+ // true if both types are not valid index types
53
+ template<typename RowIndices, typename ColIndices>
54
+ struct valid_indexed_view_overload {
55
+ enum { value = !(internal::is_valid_index_type<RowIndices>::value && internal::is_valid_index_type<ColIndices>::value) };
56
+ };
57
+
58
+ // promote_scalar_arg is an helper used in operation between an expression and a scalar, like:
59
+ // expression * scalar
60
+ // Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression.
61
+ // The IsSupported template parameter must be provided by the caller as: internal::has_ReturnType<ScalarBinaryOpTraits<ExprScalar,T,op> >::value using the proper order for ExprScalar and T.
62
+ // Then the logic is as follows:
63
+ // - if the operation is natively supported as defined by IsSupported, then the scalar type is not promoted, and T is returned.
64
+ // - otherwise, NumTraits<ExprScalar>::Literal is returned if T is implicitly convertible to NumTraits<ExprScalar>::Literal AND that this does not imply a float to integer conversion.
65
+ // - otherwise, ExprScalar is returned if T is implicitly convertible to ExprScalar AND that this does not imply a float to integer conversion.
66
+ // - In all other cases, the promoted type is not defined, and the respective operation is thus invalid and not available (SFINAE).
67
+ template<typename ExprScalar,typename T, bool IsSupported>
68
+ struct promote_scalar_arg;
69
+
70
+ template<typename S,typename T>
71
+ struct promote_scalar_arg<S,T,true>
72
+ {
73
+ typedef T type;
74
+ };
75
+
76
+ // Recursively check safe conversion to PromotedType, and then ExprScalar if they are different.
77
+ template<typename ExprScalar,typename T,typename PromotedType,
78
+ bool ConvertibleToLiteral = internal::is_convertible<T,PromotedType>::value,
79
+ bool IsSafe = NumTraits<T>::IsInteger || !NumTraits<PromotedType>::IsInteger>
80
+ struct promote_scalar_arg_unsupported;
81
+
82
+ // Start recursion with NumTraits<ExprScalar>::Literal
83
+ template<typename S,typename T>
84
+ struct promote_scalar_arg<S,T,false> : promote_scalar_arg_unsupported<S,T,typename NumTraits<S>::Literal> {};
85
+
86
+ // We found a match!
87
+ template<typename S,typename T, typename PromotedType>
88
+ struct promote_scalar_arg_unsupported<S,T,PromotedType,true,true>
89
+ {
90
+ typedef PromotedType type;
91
+ };
92
+
93
+ // No match, but no real-to-integer issues, and ExprScalar and current PromotedType are different,
94
+ // so let's try to promote to ExprScalar
95
+ template<typename ExprScalar,typename T, typename PromotedType>
96
+ struct promote_scalar_arg_unsupported<ExprScalar,T,PromotedType,false,true>
97
+ : promote_scalar_arg_unsupported<ExprScalar,T,ExprScalar>
98
+ {};
99
+
100
+ // Unsafe real-to-integer, let's stop.
101
+ template<typename S,typename T, typename PromotedType, bool ConvertibleToLiteral>
102
+ struct promote_scalar_arg_unsupported<S,T,PromotedType,ConvertibleToLiteral,false> {};
103
+
104
+ // T is not even convertible to ExprScalar, let's stop.
105
+ template<typename S,typename T>
106
+ struct promote_scalar_arg_unsupported<S,T,S,false,true> {};
107
+
108
+ //classes inheriting no_assignment_operator don't generate a default operator=.
109
+ class no_assignment_operator
110
+ {
111
+ private:
112
+ no_assignment_operator& operator=(const no_assignment_operator&);
113
+ protected:
114
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(no_assignment_operator)
115
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(no_assignment_operator)
116
+ };
117
+
118
+ /** \internal return the index type with the largest number of bits */
119
+ template<typename I1, typename I2>
120
+ struct promote_index_type
121
+ {
122
+ typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
123
+ };
124
+
125
+ /** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
126
+ * can be accessed using value() and setValue().
127
+ * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
128
+ */
129
+ template<typename T, int Value> class variable_if_dynamic
130
+ {
131
+ public:
132
+ EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(variable_if_dynamic)
133
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
134
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
135
+ T value() { return T(Value); }
136
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
137
+ operator T() const { return T(Value); }
138
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
139
+ void setValue(T v) const { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
140
+ };
141
+
142
+ template<typename T> class variable_if_dynamic<T, Dynamic>
143
+ {
144
+ T m_value;
145
+ public:
146
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value = 0) EIGEN_NO_THROW : m_value(value) {}
147
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; }
148
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator T() const { return m_value; }
149
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
150
+ };
151
+
152
+ /** \internal like variable_if_dynamic but for DynamicIndex
153
+ */
154
+ template<typename T, int Value> class variable_if_dynamicindex
155
+ {
156
+ public:
157
+ EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
158
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
159
+ EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
160
+ T value() { return T(Value); }
161
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
162
+ void setValue(T) {}
163
+ };
164
+
165
+ template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
166
+ {
167
+ T m_value;
168
+ EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); }
169
+ public:
170
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T value) : m_value(value) {}
171
+ EIGEN_DEVICE_FUNC T EIGEN_STRONG_INLINE value() const { return m_value; }
172
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
173
+ };
174
+
175
+ template<typename T> struct functor_traits
176
+ {
177
+ enum
178
+ {
179
+ Cost = 10,
180
+ PacketAccess = false,
181
+ IsRepeatable = false
182
+ };
183
+ };
184
+
185
+ template<typename T> struct packet_traits;
186
+
187
+ template<typename T> struct unpacket_traits;
188
+
189
+ template<int Size, typename PacketType,
190
+ bool Stop = Size==Dynamic || (Size%unpacket_traits<PacketType>::size)==0 || is_same<PacketType,typename unpacket_traits<PacketType>::half>::value>
191
+ struct find_best_packet_helper;
192
+
193
+ template< int Size, typename PacketType>
194
+ struct find_best_packet_helper<Size,PacketType,true>
195
+ {
196
+ typedef PacketType type;
197
+ };
198
+
199
+ template<int Size, typename PacketType>
200
+ struct find_best_packet_helper<Size,PacketType,false>
201
+ {
202
+ typedef typename find_best_packet_helper<Size,typename unpacket_traits<PacketType>::half>::type type;
203
+ };
204
+
205
+ template<typename T, int Size>
206
+ struct find_best_packet
207
+ {
208
+ typedef typename find_best_packet_helper<Size,typename packet_traits<T>::type>::type type;
209
+ };
210
+
211
+ #if EIGEN_MAX_STATIC_ALIGN_BYTES>0
212
+ template<int ArrayBytes, int AlignmentBytes,
213
+ bool Match = bool((ArrayBytes%AlignmentBytes)==0),
214
+ bool TryHalf = bool(EIGEN_MIN_ALIGN_BYTES<AlignmentBytes) >
215
+ struct compute_default_alignment_helper
216
+ {
217
+ enum { value = 0 };
218
+ };
219
+
220
+ template<int ArrayBytes, int AlignmentBytes, bool TryHalf>
221
+ struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, true, TryHalf> // Match
222
+ {
223
+ enum { value = AlignmentBytes };
224
+ };
225
+
226
+ template<int ArrayBytes, int AlignmentBytes>
227
+ struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, false, true> // Try-half
228
+ {
229
+ // current packet too large, try with an half-packet
230
+ enum { value = compute_default_alignment_helper<ArrayBytes, AlignmentBytes/2>::value };
231
+ };
232
+ #else
233
+ // If static alignment is disabled, no need to bother.
234
+ // This also avoids a division by zero in "bool Match = bool((ArrayBytes%AlignmentBytes)==0)"
235
+ template<int ArrayBytes, int AlignmentBytes>
236
+ struct compute_default_alignment_helper
237
+ {
238
+ enum { value = 0 };
239
+ };
240
+ #endif
241
+
242
+ template<typename T, int Size> struct compute_default_alignment {
243
+ enum { value = compute_default_alignment_helper<Size*sizeof(T),EIGEN_MAX_STATIC_ALIGN_BYTES>::value };
244
+ };
245
+
246
+ template<typename T> struct compute_default_alignment<T,Dynamic> {
247
+ enum { value = EIGEN_MAX_ALIGN_BYTES };
248
+ };
249
+
250
+ template<typename _Scalar, int _Rows, int _Cols,
251
+ int _Options = AutoAlign |
252
+ ( (_Rows==1 && _Cols!=1) ? RowMajor
253
+ : (_Cols==1 && _Rows!=1) ? ColMajor
254
+ : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
255
+ int _MaxRows = _Rows,
256
+ int _MaxCols = _Cols
257
+ > class make_proper_matrix_type
258
+ {
259
+ enum {
260
+ IsColVector = _Cols==1 && _Rows!=1,
261
+ IsRowVector = _Rows==1 && _Cols!=1,
262
+ Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
263
+ : IsRowVector ? (_Options | RowMajor) & ~ColMajor
264
+ : _Options
265
+ };
266
+ public:
267
+ typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
268
+ };
269
+
270
+ template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
271
+ class compute_matrix_flags
272
+ {
273
+ enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0 };
274
+ public:
275
+ // FIXME currently we still have to handle DirectAccessBit at the expression level to handle DenseCoeffsBase<>
276
+ // and then propagate this information to the evaluator's flags.
277
+ // However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage.
278
+ enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit };
279
+ };
280
+
281
+ template<int _Rows, int _Cols> struct size_at_compile_time
282
+ {
283
+ enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
284
+ };
285
+
286
+ template<typename XprType> struct size_of_xpr_at_compile_time
287
+ {
288
+ enum { ret = size_at_compile_time<traits<XprType>::RowsAtCompileTime,traits<XprType>::ColsAtCompileTime>::ret };
289
+ };
290
+
291
+ /* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
292
+ * whereas eval is a const reference in the case of a matrix
293
+ */
294
+
295
+ template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
296
+ template<typename T, typename BaseClassType, int Flags> struct plain_matrix_type_dense;
297
+ template<typename T> struct plain_matrix_type<T,Dense>
298
+ {
299
+ typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, traits<T>::Flags>::type type;
300
+ };
301
+ template<typename T> struct plain_matrix_type<T,DiagonalShape>
302
+ {
303
+ typedef typename T::PlainObject type;
304
+ };
305
+
306
+ template<typename T, int Flags> struct plain_matrix_type_dense<T,MatrixXpr,Flags>
307
+ {
308
+ typedef Matrix<typename traits<T>::Scalar,
309
+ traits<T>::RowsAtCompileTime,
310
+ traits<T>::ColsAtCompileTime,
311
+ AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
312
+ traits<T>::MaxRowsAtCompileTime,
313
+ traits<T>::MaxColsAtCompileTime
314
+ > type;
315
+ };
316
+
317
+ template<typename T, int Flags> struct plain_matrix_type_dense<T,ArrayXpr,Flags>
318
+ {
319
+ typedef Array<typename traits<T>::Scalar,
320
+ traits<T>::RowsAtCompileTime,
321
+ traits<T>::ColsAtCompileTime,
322
+ AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
323
+ traits<T>::MaxRowsAtCompileTime,
324
+ traits<T>::MaxColsAtCompileTime
325
+ > type;
326
+ };
327
+
328
+ /* eval : the return type of eval(). For matrices, this is just a const reference
329
+ * in order to avoid a useless copy
330
+ */
331
+
332
+ template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
333
+
334
+ template<typename T> struct eval<T,Dense>
335
+ {
336
+ typedef typename plain_matrix_type<T>::type type;
337
+ // typedef typename T::PlainObject type;
338
+ // typedef T::Matrix<typename traits<T>::Scalar,
339
+ // traits<T>::RowsAtCompileTime,
340
+ // traits<T>::ColsAtCompileTime,
341
+ // AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
342
+ // traits<T>::MaxRowsAtCompileTime,
343
+ // traits<T>::MaxColsAtCompileTime
344
+ // > type;
345
+ };
346
+
347
+ template<typename T> struct eval<T,DiagonalShape>
348
+ {
349
+ typedef typename plain_matrix_type<T>::type type;
350
+ };
351
+
352
+ // for matrices, no need to evaluate, just use a const reference to avoid a useless copy
353
+ template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
354
+ struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
355
+ {
356
+ typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
357
+ };
358
+
359
+ template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
360
+ struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
361
+ {
362
+ typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
363
+ };
364
+
365
+
366
+ /* similar to plain_matrix_type, but using the evaluator's Flags */
367
+ template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_object_eval;
368
+
369
+ template<typename T>
370
+ struct plain_object_eval<T,Dense>
371
+ {
372
+ typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, evaluator<T>::Flags>::type type;
373
+ };
374
+
375
+
376
+ /* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
377
+ */
378
+ template<typename T> struct plain_matrix_type_column_major
379
+ {
380
+ enum { Rows = traits<T>::RowsAtCompileTime,
381
+ Cols = traits<T>::ColsAtCompileTime,
382
+ MaxRows = traits<T>::MaxRowsAtCompileTime,
383
+ MaxCols = traits<T>::MaxColsAtCompileTime
384
+ };
385
+ typedef Matrix<typename traits<T>::Scalar,
386
+ Rows,
387
+ Cols,
388
+ (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
389
+ MaxRows,
390
+ MaxCols
391
+ > type;
392
+ };
393
+
394
+ /* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
395
+ */
396
+ template<typename T> struct plain_matrix_type_row_major
397
+ {
398
+ enum { Rows = traits<T>::RowsAtCompileTime,
399
+ Cols = traits<T>::ColsAtCompileTime,
400
+ MaxRows = traits<T>::MaxRowsAtCompileTime,
401
+ MaxCols = traits<T>::MaxColsAtCompileTime
402
+ };
403
+ typedef Matrix<typename traits<T>::Scalar,
404
+ Rows,
405
+ Cols,
406
+ (MaxCols==1&&MaxRows!=1) ? ColMajor : RowMajor,
407
+ MaxRows,
408
+ MaxCols
409
+ > type;
410
+ };
411
+
412
+ /** \internal The reference selector for template expressions. The idea is that we don't
413
+ * need to use references for expressions since they are light weight proxy
414
+ * objects which should generate no copying overhead. */
415
+ template <typename T>
416
+ struct ref_selector
417
+ {
418
+ typedef typename conditional<
419
+ bool(traits<T>::Flags & NestByRefBit),
420
+ T const&,
421
+ const T
422
+ >::type type;
423
+
424
+ typedef typename conditional<
425
+ bool(traits<T>::Flags & NestByRefBit),
426
+ T &,
427
+ T
428
+ >::type non_const_type;
429
+ };
430
+
431
+ /** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
432
+ template<typename T1, typename T2>
433
+ struct transfer_constness
434
+ {
435
+ typedef typename conditional<
436
+ bool(internal::is_const<T1>::value),
437
+ typename internal::add_const_on_value_type<T2>::type,
438
+ T2
439
+ >::type type;
440
+ };
441
+
442
+
443
+ // However, we still need a mechanism to detect whether an expression which is evaluated multiple time
444
+ // has to be evaluated into a temporary.
445
+ // That's the purpose of this new nested_eval helper:
446
+ /** \internal Determines how a given expression should be nested when evaluated multiple times.
447
+ * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
448
+ * evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or
449
+ * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
450
+ * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
451
+ * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
452
+ *
453
+ * \tparam T the type of the expression being nested.
454
+ * \tparam n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
455
+ * \tparam PlainObject the type of the temporary if needed.
456
+ */
457
+ template<typename T, int n, typename PlainObject = typename plain_object_eval<T>::type> struct nested_eval
458
+ {
459
+ enum {
460
+ ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
461
+ CoeffReadCost = evaluator<T>::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a temporary?
462
+ // Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1.
463
+ // This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON
464
+ // for all evaluator creating a temporary. This flag is then propagated by the parent evaluators.
465
+ // Another solution could be to count the number of temps?
466
+ NAsInteger = n == Dynamic ? HugeCost : n,
467
+ CostEval = (NAsInteger+1) * ScalarReadCost + CoeffReadCost,
468
+ CostNoEval = NAsInteger * CoeffReadCost,
469
+ Evaluate = (int(evaluator<T>::Flags) & EvalBeforeNestingBit) || (int(CostEval) < int(CostNoEval))
470
+ };
471
+
472
+ typedef typename conditional<Evaluate, PlainObject, typename ref_selector<T>::type>::type type;
473
+ };
474
+
475
+ template<typename T>
476
+ EIGEN_DEVICE_FUNC
477
+ inline T* const_cast_ptr(const T* ptr)
478
+ {
479
+ return const_cast<T*>(ptr);
480
+ }
481
+
482
+ template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
483
+ struct dense_xpr_base
484
+ {
485
+ /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
486
+ };
487
+
488
+ template<typename Derived>
489
+ struct dense_xpr_base<Derived, MatrixXpr>
490
+ {
491
+ typedef MatrixBase<Derived> type;
492
+ };
493
+
494
+ template<typename Derived>
495
+ struct dense_xpr_base<Derived, ArrayXpr>
496
+ {
497
+ typedef ArrayBase<Derived> type;
498
+ };
499
+
500
+ template<typename Derived, typename XprKind = typename traits<Derived>::XprKind, typename StorageKind = typename traits<Derived>::StorageKind>
501
+ struct generic_xpr_base;
502
+
503
+ template<typename Derived, typename XprKind>
504
+ struct generic_xpr_base<Derived, XprKind, Dense>
505
+ {
506
+ typedef typename dense_xpr_base<Derived,XprKind>::type type;
507
+ };
508
+
509
+ template<typename XprType, typename CastType> struct cast_return_type
510
+ {
511
+ typedef typename XprType::Scalar CurrentScalarType;
512
+ typedef typename remove_all<CastType>::type _CastType;
513
+ typedef typename _CastType::Scalar NewScalarType;
514
+ typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
515
+ const XprType&,CastType>::type type;
516
+ };
517
+
518
+ template <typename A, typename B> struct promote_storage_type;
519
+
520
+ template <typename A> struct promote_storage_type<A,A>
521
+ {
522
+ typedef A ret;
523
+ };
524
+ template <typename A> struct promote_storage_type<A, const A>
525
+ {
526
+ typedef A ret;
527
+ };
528
+ template <typename A> struct promote_storage_type<const A, A>
529
+ {
530
+ typedef A ret;
531
+ };
532
+
533
+ /** \internal Specify the "storage kind" of applying a coefficient-wise
534
+ * binary operations between two expressions of kinds A and B respectively.
535
+ * The template parameter Functor permits to specialize the resulting storage kind wrt to
536
+ * the functor.
537
+ * The default rules are as follows:
538
+ * \code
539
+ * A op A -> A
540
+ * A op dense -> dense
541
+ * dense op B -> dense
542
+ * sparse op dense -> sparse
543
+ * dense op sparse -> sparse
544
+ * \endcode
545
+ */
546
+ template <typename A, typename B, typename Functor> struct cwise_promote_storage_type;
547
+
548
+ template <typename A, typename Functor> struct cwise_promote_storage_type<A,A,Functor> { typedef A ret; };
549
+ template <typename Functor> struct cwise_promote_storage_type<Dense,Dense,Functor> { typedef Dense ret; };
550
+ template <typename A, typename Functor> struct cwise_promote_storage_type<A,Dense,Functor> { typedef Dense ret; };
551
+ template <typename B, typename Functor> struct cwise_promote_storage_type<Dense,B,Functor> { typedef Dense ret; };
552
+ template <typename Functor> struct cwise_promote_storage_type<Sparse,Dense,Functor> { typedef Sparse ret; };
553
+ template <typename Functor> struct cwise_promote_storage_type<Dense,Sparse,Functor> { typedef Sparse ret; };
554
+
555
+ template <typename LhsKind, typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order {
556
+ enum { value = LhsOrder };
557
+ };
558
+
559
+ template <typename LhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<LhsKind,Sparse,LhsOrder,RhsOrder> { enum { value = RhsOrder }; };
560
+ template <typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<Sparse,RhsKind,LhsOrder,RhsOrder> { enum { value = LhsOrder }; };
561
+ template <int Order> struct cwise_promote_storage_order<Sparse,Sparse,Order,Order> { enum { value = Order }; };
562
+
563
+
564
+ /** \internal Specify the "storage kind" of multiplying an expression of kind A with kind B.
565
+ * The template parameter ProductTag permits to specialize the resulting storage kind wrt to
566
+ * some compile-time properties of the product: GemmProduct, GemvProduct, OuterProduct, InnerProduct.
567
+ * The default rules are as follows:
568
+ * \code
569
+ * K * K -> K
570
+ * dense * K -> dense
571
+ * K * dense -> dense
572
+ * diag * K -> K
573
+ * K * diag -> K
574
+ * Perm * K -> K
575
+ * K * Perm -> K
576
+ * \endcode
577
+ */
578
+ template <typename A, typename B, int ProductTag> struct product_promote_storage_type;
579
+
580
+ template <typename A, int ProductTag> struct product_promote_storage_type<A, A, ProductTag> { typedef A ret;};
581
+ template <int ProductTag> struct product_promote_storage_type<Dense, Dense, ProductTag> { typedef Dense ret;};
582
+ template <typename A, int ProductTag> struct product_promote_storage_type<A, Dense, ProductTag> { typedef Dense ret; };
583
+ template <typename B, int ProductTag> struct product_promote_storage_type<Dense, B, ProductTag> { typedef Dense ret; };
584
+
585
+ template <typename A, int ProductTag> struct product_promote_storage_type<A, DiagonalShape, ProductTag> { typedef A ret; };
586
+ template <typename B, int ProductTag> struct product_promote_storage_type<DiagonalShape, B, ProductTag> { typedef B ret; };
587
+ template <int ProductTag> struct product_promote_storage_type<Dense, DiagonalShape, ProductTag> { typedef Dense ret; };
588
+ template <int ProductTag> struct product_promote_storage_type<DiagonalShape, Dense, ProductTag> { typedef Dense ret; };
589
+
590
+ template <typename A, int ProductTag> struct product_promote_storage_type<A, PermutationStorage, ProductTag> { typedef A ret; };
591
+ template <typename B, int ProductTag> struct product_promote_storage_type<PermutationStorage, B, ProductTag> { typedef B ret; };
592
+ template <int ProductTag> struct product_promote_storage_type<Dense, PermutationStorage, ProductTag> { typedef Dense ret; };
593
+ template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Dense, ProductTag> { typedef Dense ret; };
594
+
595
+ /** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
596
+ * \tparam Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
597
+ */
598
+ template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
599
+ struct plain_row_type
600
+ {
601
+ typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
602
+ int(ExpressionType::PlainObject::Options) | int(RowMajor), 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
603
+ typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
604
+ int(ExpressionType::PlainObject::Options) | int(RowMajor), 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
605
+
606
+ typedef typename conditional<
607
+ is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
608
+ MatrixRowType,
609
+ ArrayRowType
610
+ >::type type;
611
+ };
612
+
613
+ template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
614
+ struct plain_col_type
615
+ {
616
+ typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
617
+ ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
618
+ typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
619
+ ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
620
+
621
+ typedef typename conditional<
622
+ is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
623
+ MatrixColType,
624
+ ArrayColType
625
+ >::type type;
626
+ };
627
+
628
+ template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
629
+ struct plain_diag_type
630
+ {
631
+ enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
632
+ max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
633
+ };
634
+ typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
635
+ typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
636
+
637
+ typedef typename conditional<
638
+ is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
639
+ MatrixDiagType,
640
+ ArrayDiagType
641
+ >::type type;
642
+ };
643
+
644
+ template<typename Expr,typename Scalar = typename Expr::Scalar>
645
+ struct plain_constant_type
646
+ {
647
+ enum { Options = (traits<Expr>::Flags&RowMajorBit)?RowMajor:0 };
648
+
649
+ typedef Array<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
650
+ Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> array_type;
651
+
652
+ typedef Matrix<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
653
+ Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> matrix_type;
654
+
655
+ typedef CwiseNullaryOp<scalar_constant_op<Scalar>, const typename conditional<is_same< typename traits<Expr>::XprKind, MatrixXpr >::value, matrix_type, array_type>::type > type;
656
+ };
657
+
658
+ template<typename ExpressionType>
659
+ struct is_lvalue
660
+ {
661
+ enum { value = (!bool(is_const<ExpressionType>::value)) &&
662
+ bool(traits<ExpressionType>::Flags & LvalueBit) };
663
+ };
664
+
665
+ template<typename T> struct is_diagonal
666
+ { enum { ret = false }; };
667
+
668
+ template<typename T> struct is_diagonal<DiagonalBase<T> >
669
+ { enum { ret = true }; };
670
+
671
+ template<typename T> struct is_diagonal<DiagonalWrapper<T> >
672
+ { enum { ret = true }; };
673
+
674
+ template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
675
+ { enum { ret = true }; };
676
+
677
+
678
+ template<typename T> struct is_identity
679
+ { enum { value = false }; };
680
+
681
+ template<typename T> struct is_identity<CwiseNullaryOp<internal::scalar_identity_op<typename T::Scalar>, T> >
682
+ { enum { value = true }; };
683
+
684
+
685
+ template<typename S1, typename S2> struct glue_shapes;
686
+ template<> struct glue_shapes<DenseShape,TriangularShape> { typedef TriangularShape type; };
687
+
688
+ template<typename T1, typename T2>
689
+ struct possibly_same_dense {
690
+ enum { value = has_direct_access<T1>::ret && has_direct_access<T2>::ret && is_same<typename T1::Scalar,typename T2::Scalar>::value };
691
+ };
692
+
693
+ template<typename T1, typename T2>
694
+ EIGEN_DEVICE_FUNC
695
+ bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<possibly_same_dense<T1,T2>::value>::type * = 0)
696
+ {
697
+ return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
698
+ }
699
+
700
+ template<typename T1, typename T2>
701
+ EIGEN_DEVICE_FUNC
702
+ bool is_same_dense(const T1 &, const T2 &, typename enable_if<!possibly_same_dense<T1,T2>::value>::type * = 0)
703
+ {
704
+ return false;
705
+ }
706
+
707
+ // Internal helper defining the cost of a scalar division for the type T.
708
+ // The default heuristic can be specialized for each scalar type and architecture.
709
+ template<typename T,bool Vectorized=false,typename EnableIf = void>
710
+ struct scalar_div_cost {
711
+ enum { value = 8*NumTraits<T>::MulCost };
712
+ };
713
+
714
+ template<typename T,bool Vectorized>
715
+ struct scalar_div_cost<std::complex<T>, Vectorized> {
716
+ enum { value = 2*scalar_div_cost<T>::value
717
+ + 6*NumTraits<T>::MulCost
718
+ + 3*NumTraits<T>::AddCost
719
+ };
720
+ };
721
+
722
+
723
+ template<bool Vectorized>
724
+ struct scalar_div_cost<signed long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 24 }; };
725
+ template<bool Vectorized>
726
+ struct scalar_div_cost<unsigned long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 21 }; };
727
+
728
+
729
+ #ifdef EIGEN_DEBUG_ASSIGN
730
+ std::string demangle_traversal(int t)
731
+ {
732
+ if(t==DefaultTraversal) return "DefaultTraversal";
733
+ if(t==LinearTraversal) return "LinearTraversal";
734
+ if(t==InnerVectorizedTraversal) return "InnerVectorizedTraversal";
735
+ if(t==LinearVectorizedTraversal) return "LinearVectorizedTraversal";
736
+ if(t==SliceVectorizedTraversal) return "SliceVectorizedTraversal";
737
+ return "?";
738
+ }
739
+ std::string demangle_unrolling(int t)
740
+ {
741
+ if(t==NoUnrolling) return "NoUnrolling";
742
+ if(t==InnerUnrolling) return "InnerUnrolling";
743
+ if(t==CompleteUnrolling) return "CompleteUnrolling";
744
+ return "?";
745
+ }
746
+ std::string demangle_flags(int f)
747
+ {
748
+ std::string res;
749
+ if(f&RowMajorBit) res += " | RowMajor";
750
+ if(f&PacketAccessBit) res += " | Packet";
751
+ if(f&LinearAccessBit) res += " | Linear";
752
+ if(f&LvalueBit) res += " | Lvalue";
753
+ if(f&DirectAccessBit) res += " | Direct";
754
+ if(f&NestByRefBit) res += " | NestByRef";
755
+ if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit";
756
+
757
+ return res;
758
+ }
759
+ #endif
760
+
761
+ } // end namespace internal
762
+
763
+
764
+ /** \class ScalarBinaryOpTraits
765
+ * \ingroup Core_Module
766
+ *
767
+ * \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is.
768
+ *
769
+ * This class permits to control the scalar return type of any binary operation performed on two different scalar types through (partial) template specializations.
770
+ *
771
+ * For instance, let \c U1, \c U2 and \c U3 be three user defined scalar types for which most operations between instances of \c U1 and \c U2 returns an \c U3.
772
+ * You can let %Eigen knows that by defining:
773
+ \code
774
+ template<typename BinaryOp>
775
+ struct ScalarBinaryOpTraits<U1,U2,BinaryOp> { typedef U3 ReturnType; };
776
+ template<typename BinaryOp>
777
+ struct ScalarBinaryOpTraits<U2,U1,BinaryOp> { typedef U3 ReturnType; };
778
+ \endcode
779
+ * You can then explicitly disable some particular operations to get more explicit error messages:
780
+ \code
781
+ template<>
782
+ struct ScalarBinaryOpTraits<U1,U2,internal::scalar_max_op<U1,U2> > {};
783
+ \endcode
784
+ * Or customize the return type for individual operation:
785
+ \code
786
+ template<>
787
+ struct ScalarBinaryOpTraits<U1,U2,internal::scalar_sum_op<U1,U2> > { typedef U1 ReturnType; };
788
+ \endcode
789
+ *
790
+ * By default, the following generic combinations are supported:
791
+ <table class="manual">
792
+ <tr><th>ScalarA</th><th>ScalarB</th><th>BinaryOp</th><th>ReturnType</th><th>Note</th></tr>
793
+ <tr ><td>\c T </td><td>\c T </td><td>\c * </td><td>\c T </td><td></td></tr>
794
+ <tr class="alt"><td>\c NumTraits<T>::Real </td><td>\c T </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
795
+ <tr ><td>\c T </td><td>\c NumTraits<T>::Real </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
796
+ </table>
797
+ *
798
+ * \sa CwiseBinaryOp
799
+ */
800
+ template<typename ScalarA, typename ScalarB, typename BinaryOp=internal::scalar_product_op<ScalarA,ScalarB> >
801
+ struct ScalarBinaryOpTraits
802
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
803
+ // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class.
804
+ : internal::scalar_product_traits<ScalarA,ScalarB>
805
+ #endif // EIGEN_PARSED_BY_DOXYGEN
806
+ {};
807
+
808
+ template<typename T, typename BinaryOp>
809
+ struct ScalarBinaryOpTraits<T,T,BinaryOp>
810
+ {
811
+ typedef T ReturnType;
812
+ };
813
+
814
+ template <typename T, typename BinaryOp>
815
+ struct ScalarBinaryOpTraits<T, typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, BinaryOp>
816
+ {
817
+ typedef T ReturnType;
818
+ };
819
+ template <typename T, typename BinaryOp>
820
+ struct ScalarBinaryOpTraits<typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, T, BinaryOp>
821
+ {
822
+ typedef T ReturnType;
823
+ };
824
+
825
+ // For Matrix * Permutation
826
+ template<typename T, typename BinaryOp>
827
+ struct ScalarBinaryOpTraits<T,void,BinaryOp>
828
+ {
829
+ typedef T ReturnType;
830
+ };
831
+
832
+ // For Permutation * Matrix
833
+ template<typename T, typename BinaryOp>
834
+ struct ScalarBinaryOpTraits<void,T,BinaryOp>
835
+ {
836
+ typedef T ReturnType;
837
+ };
838
+
839
+ // for Permutation*Permutation
840
+ template<typename BinaryOp>
841
+ struct ScalarBinaryOpTraits<void,void,BinaryOp>
842
+ {
843
+ typedef void ReturnType;
844
+ };
845
+
846
+ // We require Lhs and Rhs to have "compatible" scalar types.
847
+ // It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
848
+ // So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
849
+ // add together a float matrix and a double matrix.
850
+ #define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
851
+ EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType<ScalarBinaryOpTraits<LHS, RHS,BINOP> >::value), \
852
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
853
+
854
+ } // end namespace Eigen
855
+
856
+ #endif // EIGEN_XPRHELPER_H