forgeo-gmlib 0.6.2__cp310-cp310-musllinux_1_2_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (554) hide show
  1. forgeo/gmlib/GeologicalModel3D.py +758 -0
  2. forgeo/gmlib/__init__.py +9 -0
  3. forgeo/gmlib/_version.py +34 -0
  4. forgeo/gmlib/architecture/__init__.py +1 -0
  5. forgeo/gmlib/architecture/core.py +130 -0
  6. forgeo/gmlib/common.cpython-310-x86_64-linux-gnu.so +0 -0
  7. forgeo/gmlib/fault_network.py +171 -0
  8. forgeo/gmlib/geomodeller_data.py +101 -0
  9. forgeo/gmlib/geomodeller_project.py +396 -0
  10. forgeo/gmlib/myxmltools.py +30 -0
  11. forgeo/gmlib/pypotential2D.cpython-310-x86_64-linux-gnu.so +0 -0
  12. forgeo/gmlib/pypotential3D.cpython-310-x86_64-linux-gnu.so +0 -0
  13. forgeo/gmlib/tesselate.py +236 -0
  14. forgeo/gmlib/tesselate_deprecated.py +249 -0
  15. forgeo/gmlib/topography_reader.py +198 -0
  16. forgeo/gmlib/utils/__init__.py +0 -0
  17. forgeo/gmlib/utils/append_data.py +508 -0
  18. forgeo/gmlib/utils/export.py +45 -0
  19. forgeo/gmlib/utils/normalized_gradient.py +40 -0
  20. forgeo/gmlib/utils/tools.py +35 -0
  21. forgeo_gmlib-0.6.2.dist-info/METADATA +23 -0
  22. forgeo_gmlib-0.6.2.dist-info/RECORD +554 -0
  23. forgeo_gmlib-0.6.2.dist-info/WHEEL +5 -0
  24. forgeo_gmlib-0.6.2.dist-info/licenses/LICENSE +661 -0
  25. include/eigen3/Eigen/Cholesky +45 -0
  26. include/eigen3/Eigen/CholmodSupport +48 -0
  27. include/eigen3/Eigen/Core +385 -0
  28. include/eigen3/Eigen/Dense +7 -0
  29. include/eigen3/Eigen/Eigen +2 -0
  30. include/eigen3/Eigen/Eigenvalues +60 -0
  31. include/eigen3/Eigen/Geometry +59 -0
  32. include/eigen3/Eigen/Householder +29 -0
  33. include/eigen3/Eigen/IterativeLinearSolvers +48 -0
  34. include/eigen3/Eigen/Jacobi +32 -0
  35. include/eigen3/Eigen/KLUSupport +41 -0
  36. include/eigen3/Eigen/LU +47 -0
  37. include/eigen3/Eigen/MetisSupport +35 -0
  38. include/eigen3/Eigen/OrderingMethods +70 -0
  39. include/eigen3/Eigen/PaStiXSupport +49 -0
  40. include/eigen3/Eigen/PardisoSupport +35 -0
  41. include/eigen3/Eigen/QR +50 -0
  42. include/eigen3/Eigen/QtAlignedMalloc +39 -0
  43. include/eigen3/Eigen/SPQRSupport +34 -0
  44. include/eigen3/Eigen/SVD +50 -0
  45. include/eigen3/Eigen/Sparse +34 -0
  46. include/eigen3/Eigen/SparseCholesky +37 -0
  47. include/eigen3/Eigen/SparseCore +69 -0
  48. include/eigen3/Eigen/SparseLU +48 -0
  49. include/eigen3/Eigen/SparseQR +36 -0
  50. include/eigen3/Eigen/StdDeque +27 -0
  51. include/eigen3/Eigen/StdList +26 -0
  52. include/eigen3/Eigen/StdVector +27 -0
  53. include/eigen3/Eigen/SuperLUSupport +64 -0
  54. include/eigen3/Eigen/UmfPackSupport +40 -0
  55. include/eigen3/Eigen/src/Cholesky/LDLT.h +688 -0
  56. include/eigen3/Eigen/src/Cholesky/LLT.h +558 -0
  57. include/eigen3/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
  58. include/eigen3/Eigen/src/CholmodSupport/CholmodSupport.h +682 -0
  59. include/eigen3/Eigen/src/Core/ArithmeticSequence.h +406 -0
  60. include/eigen3/Eigen/src/Core/Array.h +425 -0
  61. include/eigen3/Eigen/src/Core/ArrayBase.h +226 -0
  62. include/eigen3/Eigen/src/Core/ArrayWrapper.h +209 -0
  63. include/eigen3/Eigen/src/Core/Assign.h +90 -0
  64. include/eigen3/Eigen/src/Core/AssignEvaluator.h +1010 -0
  65. include/eigen3/Eigen/src/Core/Assign_MKL.h +178 -0
  66. include/eigen3/Eigen/src/Core/BandMatrix.h +353 -0
  67. include/eigen3/Eigen/src/Core/Block.h +463 -0
  68. include/eigen3/Eigen/src/Core/BooleanRedux.h +164 -0
  69. include/eigen3/Eigen/src/Core/CommaInitializer.h +164 -0
  70. include/eigen3/Eigen/src/Core/ConditionEstimator.h +175 -0
  71. include/eigen3/Eigen/src/Core/CoreEvaluators.h +1741 -0
  72. include/eigen3/Eigen/src/Core/CoreIterators.h +132 -0
  73. include/eigen3/Eigen/src/Core/CwiseBinaryOp.h +183 -0
  74. include/eigen3/Eigen/src/Core/CwiseNullaryOp.h +1001 -0
  75. include/eigen3/Eigen/src/Core/CwiseTernaryOp.h +197 -0
  76. include/eigen3/Eigen/src/Core/CwiseUnaryOp.h +103 -0
  77. include/eigen3/Eigen/src/Core/CwiseUnaryView.h +132 -0
  78. include/eigen3/Eigen/src/Core/DenseBase.h +701 -0
  79. include/eigen3/Eigen/src/Core/DenseCoeffsBase.h +685 -0
  80. include/eigen3/Eigen/src/Core/DenseStorage.h +652 -0
  81. include/eigen3/Eigen/src/Core/Diagonal.h +259 -0
  82. include/eigen3/Eigen/src/Core/DiagonalMatrix.h +391 -0
  83. include/eigen3/Eigen/src/Core/DiagonalProduct.h +28 -0
  84. include/eigen3/Eigen/src/Core/Dot.h +313 -0
  85. include/eigen3/Eigen/src/Core/EigenBase.h +160 -0
  86. include/eigen3/Eigen/src/Core/ForceAlignedAccess.h +150 -0
  87. include/eigen3/Eigen/src/Core/Fuzzy.h +155 -0
  88. include/eigen3/Eigen/src/Core/GeneralProduct.h +465 -0
  89. include/eigen3/Eigen/src/Core/GenericPacketMath.h +1040 -0
  90. include/eigen3/Eigen/src/Core/GlobalFunctions.h +194 -0
  91. include/eigen3/Eigen/src/Core/IO.h +258 -0
  92. include/eigen3/Eigen/src/Core/IndexedView.h +247 -0
  93. include/eigen3/Eigen/src/Core/Inverse.h +117 -0
  94. include/eigen3/Eigen/src/Core/Map.h +171 -0
  95. include/eigen3/Eigen/src/Core/MapBase.h +310 -0
  96. include/eigen3/Eigen/src/Core/MathFunctions.h +2212 -0
  97. include/eigen3/Eigen/src/Core/MathFunctionsImpl.h +200 -0
  98. include/eigen3/Eigen/src/Core/Matrix.h +578 -0
  99. include/eigen3/Eigen/src/Core/MatrixBase.h +541 -0
  100. include/eigen3/Eigen/src/Core/NestByValue.h +85 -0
  101. include/eigen3/Eigen/src/Core/NoAlias.h +109 -0
  102. include/eigen3/Eigen/src/Core/NumTraits.h +351 -0
  103. include/eigen3/Eigen/src/Core/PartialReduxEvaluator.h +237 -0
  104. include/eigen3/Eigen/src/Core/PermutationMatrix.h +605 -0
  105. include/eigen3/Eigen/src/Core/PlainObjectBase.h +1128 -0
  106. include/eigen3/Eigen/src/Core/Product.h +191 -0
  107. include/eigen3/Eigen/src/Core/ProductEvaluators.h +1179 -0
  108. include/eigen3/Eigen/src/Core/Random.h +218 -0
  109. include/eigen3/Eigen/src/Core/Redux.h +515 -0
  110. include/eigen3/Eigen/src/Core/Ref.h +381 -0
  111. include/eigen3/Eigen/src/Core/Replicate.h +142 -0
  112. include/eigen3/Eigen/src/Core/Reshaped.h +454 -0
  113. include/eigen3/Eigen/src/Core/ReturnByValue.h +119 -0
  114. include/eigen3/Eigen/src/Core/Reverse.h +217 -0
  115. include/eigen3/Eigen/src/Core/Select.h +164 -0
  116. include/eigen3/Eigen/src/Core/SelfAdjointView.h +365 -0
  117. include/eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
  118. include/eigen3/Eigen/src/Core/Solve.h +188 -0
  119. include/eigen3/Eigen/src/Core/SolveTriangular.h +235 -0
  120. include/eigen3/Eigen/src/Core/SolverBase.h +168 -0
  121. include/eigen3/Eigen/src/Core/StableNorm.h +251 -0
  122. include/eigen3/Eigen/src/Core/StlIterators.h +463 -0
  123. include/eigen3/Eigen/src/Core/Stride.h +120 -0
  124. include/eigen3/Eigen/src/Core/Swap.h +68 -0
  125. include/eigen3/Eigen/src/Core/Transpose.h +464 -0
  126. include/eigen3/Eigen/src/Core/Transpositions.h +386 -0
  127. include/eigen3/Eigen/src/Core/TriangularMatrix.h +994 -0
  128. include/eigen3/Eigen/src/Core/VectorBlock.h +96 -0
  129. include/eigen3/Eigen/src/Core/VectorwiseOp.h +784 -0
  130. include/eigen3/Eigen/src/Core/Visitor.h +381 -0
  131. include/eigen3/Eigen/src/Core/arch/AVX/Complex.h +368 -0
  132. include/eigen3/Eigen/src/Core/arch/AVX/MathFunctions.h +228 -0
  133. include/eigen3/Eigen/src/Core/arch/AVX/PacketMath.h +1588 -0
  134. include/eigen3/Eigen/src/Core/arch/AVX/TypeCasting.h +115 -0
  135. include/eigen3/Eigen/src/Core/arch/AVX512/Complex.h +384 -0
  136. include/eigen3/Eigen/src/Core/arch/AVX512/MathFunctions.h +361 -0
  137. include/eigen3/Eigen/src/Core/arch/AVX512/PacketMath.h +2270 -0
  138. include/eigen3/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  139. include/eigen3/Eigen/src/Core/arch/AltiVec/Complex.h +415 -0
  140. include/eigen3/Eigen/src/Core/arch/AltiVec/MathFunctions.h +119 -0
  141. include/eigen3/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2776 -0
  142. include/eigen3/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +159 -0
  143. include/eigen3/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +627 -0
  144. include/eigen3/Eigen/src/Core/arch/AltiVec/MatrixVectorProduct.h +2400 -0
  145. include/eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h +2743 -0
  146. include/eigen3/Eigen/src/Core/arch/CUDA/Complex.h +269 -0
  147. include/eigen3/Eigen/src/Core/arch/Default/BFloat16.h +688 -0
  148. include/eigen3/Eigen/src/Core/arch/Default/ConjHelper.h +117 -0
  149. include/eigen3/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1662 -0
  150. include/eigen3/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +116 -0
  151. include/eigen3/Eigen/src/Core/arch/Default/Half.h +950 -0
  152. include/eigen3/Eigen/src/Core/arch/Default/Settings.h +49 -0
  153. include/eigen3/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  154. include/eigen3/Eigen/src/Core/arch/GPU/MathFunctions.h +103 -0
  155. include/eigen3/Eigen/src/Core/arch/GPU/PacketMath.h +1646 -0
  156. include/eigen3/Eigen/src/Core/arch/GPU/TypeCasting.h +79 -0
  157. include/eigen3/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  158. include/eigen3/Eigen/src/Core/arch/MSA/Complex.h +645 -0
  159. include/eigen3/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  160. include/eigen3/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  161. include/eigen3/Eigen/src/Core/arch/NEON/Complex.h +560 -0
  162. include/eigen3/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  163. include/eigen3/Eigen/src/Core/arch/NEON/MathFunctions.h +75 -0
  164. include/eigen3/Eigen/src/Core/arch/NEON/PacketMath.h +4653 -0
  165. include/eigen3/Eigen/src/Core/arch/NEON/TypeCasting.h +1424 -0
  166. include/eigen3/Eigen/src/Core/arch/SSE/Complex.h +338 -0
  167. include/eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h +199 -0
  168. include/eigen3/Eigen/src/Core/arch/SSE/PacketMath.h +1505 -0
  169. include/eigen3/Eigen/src/Core/arch/SSE/TypeCasting.h +142 -0
  170. include/eigen3/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  171. include/eigen3/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  172. include/eigen3/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  173. include/eigen3/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  174. include/eigen3/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  175. include/eigen3/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  176. include/eigen3/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  177. include/eigen3/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  178. include/eigen3/Eigen/src/Core/arch/ZVector/Complex.h +428 -0
  179. include/eigen3/Eigen/src/Core/arch/ZVector/MathFunctions.h +233 -0
  180. include/eigen3/Eigen/src/Core/arch/ZVector/PacketMath.h +1060 -0
  181. include/eigen3/Eigen/src/Core/functors/AssignmentFunctors.h +177 -0
  182. include/eigen3/Eigen/src/Core/functors/BinaryFunctors.h +541 -0
  183. include/eigen3/Eigen/src/Core/functors/NullaryFunctors.h +189 -0
  184. include/eigen3/Eigen/src/Core/functors/StlFunctors.h +166 -0
  185. include/eigen3/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
  186. include/eigen3/Eigen/src/Core/functors/UnaryFunctors.h +1131 -0
  187. include/eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2645 -0
  188. include/eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h +517 -0
  189. include/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +322 -0
  190. include/eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
  191. include/eigen3/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +124 -0
  192. include/eigen3/Eigen/src/Core/products/GeneralMatrixVector.h +523 -0
  193. include/eigen3/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
  194. include/eigen3/Eigen/src/Core/products/Parallelizer.h +180 -0
  195. include/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +544 -0
  196. include/eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +295 -0
  197. include/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h +262 -0
  198. include/eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
  199. include/eigen3/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
  200. include/eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h +94 -0
  201. include/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h +472 -0
  202. include/eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +317 -0
  203. include/eigen3/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
  204. include/eigen3/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
  205. include/eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h +337 -0
  206. include/eigen3/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +167 -0
  207. include/eigen3/Eigen/src/Core/products/TriangularSolverVector.h +148 -0
  208. include/eigen3/Eigen/src/Core/util/BlasUtil.h +583 -0
  209. include/eigen3/Eigen/src/Core/util/ConfigureVectorization.h +521 -0
  210. include/eigen3/Eigen/src/Core/util/Constants.h +563 -0
  211. include/eigen3/Eigen/src/Core/util/DisableStupidWarnings.h +138 -0
  212. include/eigen3/Eigen/src/Core/util/ForwardDeclarations.h +322 -0
  213. include/eigen3/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
  214. include/eigen3/Eigen/src/Core/util/IntegralConstant.h +272 -0
  215. include/eigen3/Eigen/src/Core/util/MKL_support.h +137 -0
  216. include/eigen3/Eigen/src/Core/util/Macros.h +1511 -0
  217. include/eigen3/Eigen/src/Core/util/Memory.h +1202 -0
  218. include/eigen3/Eigen/src/Core/util/Meta.h +812 -0
  219. include/eigen3/Eigen/src/Core/util/NonMPL2.h +3 -0
  220. include/eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h +31 -0
  221. include/eigen3/Eigen/src/Core/util/ReshapedHelper.h +51 -0
  222. include/eigen3/Eigen/src/Core/util/StaticAssert.h +221 -0
  223. include/eigen3/Eigen/src/Core/util/SymbolicIndex.h +293 -0
  224. include/eigen3/Eigen/src/Core/util/XprHelper.h +856 -0
  225. include/eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h +345 -0
  226. include/eigen3/Eigen/src/Eigenvalues/ComplexSchur.h +462 -0
  227. include/eigen3/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
  228. include/eigen3/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
  229. include/eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +417 -0
  230. include/eigen3/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
  231. include/eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
  232. include/eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
  233. include/eigen3/Eigen/src/Eigenvalues/RealQZ.h +657 -0
  234. include/eigen3/Eigen/src/Eigenvalues/RealSchur.h +557 -0
  235. include/eigen3/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
  236. include/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +904 -0
  237. include/eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
  238. include/eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h +560 -0
  239. include/eigen3/Eigen/src/Geometry/AlignedBox.h +486 -0
  240. include/eigen3/Eigen/src/Geometry/AngleAxis.h +247 -0
  241. include/eigen3/Eigen/src/Geometry/EulerAngles.h +114 -0
  242. include/eigen3/Eigen/src/Geometry/Homogeneous.h +501 -0
  243. include/eigen3/Eigen/src/Geometry/Hyperplane.h +282 -0
  244. include/eigen3/Eigen/src/Geometry/OrthoMethods.h +235 -0
  245. include/eigen3/Eigen/src/Geometry/ParametrizedLine.h +232 -0
  246. include/eigen3/Eigen/src/Geometry/Quaternion.h +870 -0
  247. include/eigen3/Eigen/src/Geometry/Rotation2D.h +199 -0
  248. include/eigen3/Eigen/src/Geometry/RotationBase.h +206 -0
  249. include/eigen3/Eigen/src/Geometry/Scaling.h +188 -0
  250. include/eigen3/Eigen/src/Geometry/Transform.h +1566 -0
  251. include/eigen3/Eigen/src/Geometry/Translation.h +202 -0
  252. include/eigen3/Eigen/src/Geometry/Umeyama.h +168 -0
  253. include/eigen3/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
  254. include/eigen3/Eigen/src/Householder/BlockHouseholder.h +110 -0
  255. include/eigen3/Eigen/src/Householder/Householder.h +176 -0
  256. include/eigen3/Eigen/src/Householder/HouseholderSequence.h +553 -0
  257. include/eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
  258. include/eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +212 -0
  259. include/eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +227 -0
  260. include/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +394 -0
  261. include/eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +453 -0
  262. include/eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +444 -0
  263. include/eigen3/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +198 -0
  264. include/eigen3/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +117 -0
  265. include/eigen3/Eigen/src/Jacobi/Jacobi.h +483 -0
  266. include/eigen3/Eigen/src/KLUSupport/KLUSupport.h +358 -0
  267. include/eigen3/Eigen/src/LU/Determinant.h +117 -0
  268. include/eigen3/Eigen/src/LU/FullPivLU.h +877 -0
  269. include/eigen3/Eigen/src/LU/InverseImpl.h +432 -0
  270. include/eigen3/Eigen/src/LU/PartialPivLU.h +624 -0
  271. include/eigen3/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
  272. include/eigen3/Eigen/src/LU/arch/InverseSize4.h +363 -0
  273. include/eigen3/Eigen/src/MetisSupport/MetisSupport.h +137 -0
  274. include/eigen3/Eigen/src/OrderingMethods/Amd.h +435 -0
  275. include/eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h +1863 -0
  276. include/eigen3/Eigen/src/OrderingMethods/Ordering.h +153 -0
  277. include/eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
  278. include/eigen3/Eigen/src/PardisoSupport/PardisoSupport.h +545 -0
  279. include/eigen3/Eigen/src/QR/ColPivHouseholderQR.h +674 -0
  280. include/eigen3/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
  281. include/eigen3/Eigen/src/QR/CompleteOrthogonalDecomposition.h +635 -0
  282. include/eigen3/Eigen/src/QR/FullPivHouseholderQR.h +713 -0
  283. include/eigen3/Eigen/src/QR/HouseholderQR.h +434 -0
  284. include/eigen3/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
  285. include/eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +335 -0
  286. include/eigen3/Eigen/src/SVD/BDCSVD.h +1377 -0
  287. include/eigen3/Eigen/src/SVD/JacobiSVD.h +813 -0
  288. include/eigen3/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
  289. include/eigen3/Eigen/src/SVD/SVDBase.h +376 -0
  290. include/eigen3/Eigen/src/SVD/UpperBidiagonalization.h +415 -0
  291. include/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h +697 -0
  292. include/eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +174 -0
  293. include/eigen3/Eigen/src/SparseCore/AmbiVector.h +378 -0
  294. include/eigen3/Eigen/src/SparseCore/CompressedStorage.h +274 -0
  295. include/eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
  296. include/eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
  297. include/eigen3/Eigen/src/SparseCore/SparseAssign.h +270 -0
  298. include/eigen3/Eigen/src/SparseCore/SparseBlock.h +566 -0
  299. include/eigen3/Eigen/src/SparseCore/SparseColEtree.h +206 -0
  300. include/eigen3/Eigen/src/SparseCore/SparseCompressedBase.h +370 -0
  301. include/eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +722 -0
  302. include/eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +150 -0
  303. include/eigen3/Eigen/src/SparseCore/SparseDenseProduct.h +342 -0
  304. include/eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
  305. include/eigen3/Eigen/src/SparseCore/SparseDot.h +98 -0
  306. include/eigen3/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
  307. include/eigen3/Eigen/src/SparseCore/SparseMap.h +306 -0
  308. include/eigen3/Eigen/src/SparseCore/SparseMatrix.h +1518 -0
  309. include/eigen3/Eigen/src/SparseCore/SparseMatrixBase.h +399 -0
  310. include/eigen3/Eigen/src/SparseCore/SparsePermutation.h +178 -0
  311. include/eigen3/Eigen/src/SparseCore/SparseProduct.h +182 -0
  312. include/eigen3/Eigen/src/SparseCore/SparseRedux.h +49 -0
  313. include/eigen3/Eigen/src/SparseCore/SparseRef.h +397 -0
  314. include/eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h +659 -0
  315. include/eigen3/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
  316. include/eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
  317. include/eigen3/Eigen/src/SparseCore/SparseTranspose.h +92 -0
  318. include/eigen3/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
  319. include/eigen3/Eigen/src/SparseCore/SparseUtil.h +186 -0
  320. include/eigen3/Eigen/src/SparseCore/SparseVector.h +480 -0
  321. include/eigen3/Eigen/src/SparseCore/SparseView.h +254 -0
  322. include/eigen3/Eigen/src/SparseCore/TriangularSolver.h +315 -0
  323. include/eigen3/Eigen/src/SparseLU/SparseLU.h +925 -0
  324. include/eigen3/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
  325. include/eigen3/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
  326. include/eigen3/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
  327. include/eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +374 -0
  328. include/eigen3/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
  329. include/eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
  330. include/eigen3/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
  331. include/eigen3/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
  332. include/eigen3/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +121 -0
  333. include/eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +129 -0
  334. include/eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h +222 -0
  335. include/eigen3/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
  336. include/eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
  337. include/eigen3/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
  338. include/eigen3/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
  339. include/eigen3/Eigen/src/SparseQR/SparseQR.h +758 -0
  340. include/eigen3/Eigen/src/StlSupport/StdDeque.h +116 -0
  341. include/eigen3/Eigen/src/StlSupport/StdList.h +106 -0
  342. include/eigen3/Eigen/src/StlSupport/StdVector.h +131 -0
  343. include/eigen3/Eigen/src/StlSupport/details.h +84 -0
  344. include/eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h +1025 -0
  345. include/eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h +642 -0
  346. include/eigen3/Eigen/src/misc/Image.h +82 -0
  347. include/eigen3/Eigen/src/misc/Kernel.h +79 -0
  348. include/eigen3/Eigen/src/misc/RealSvd2x2.h +55 -0
  349. include/eigen3/Eigen/src/misc/blas.h +440 -0
  350. include/eigen3/Eigen/src/misc/lapack.h +152 -0
  351. include/eigen3/Eigen/src/misc/lapacke.h +16292 -0
  352. include/eigen3/Eigen/src/misc/lapacke_mangling.h +17 -0
  353. include/eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h +431 -0
  354. include/eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h +696 -0
  355. include/eigen3/Eigen/src/plugins/BlockMethods.h +1442 -0
  356. include/eigen3/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
  357. include/eigen3/Eigen/src/plugins/CommonCwiseUnaryOps.h +177 -0
  358. include/eigen3/Eigen/src/plugins/IndexedViewMethods.h +262 -0
  359. include/eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h +184 -0
  360. include/eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h +95 -0
  361. include/eigen3/Eigen/src/plugins/ReshapedMethods.h +149 -0
  362. include/eigen3/signature_of_eigen3_matrix_library +1 -0
  363. include/eigen3/unsupported/Eigen/AdolcForward +159 -0
  364. include/eigen3/unsupported/Eigen/AlignedVector3 +234 -0
  365. include/eigen3/unsupported/Eigen/ArpackSupport +30 -0
  366. include/eigen3/unsupported/Eigen/AutoDiff +48 -0
  367. include/eigen3/unsupported/Eigen/BVH +95 -0
  368. include/eigen3/unsupported/Eigen/CXX11/Tensor +137 -0
  369. include/eigen3/unsupported/Eigen/CXX11/TensorSymmetry +42 -0
  370. include/eigen3/unsupported/Eigen/CXX11/ThreadPool +74 -0
  371. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/Tensor.h +554 -0
  372. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h +327 -0
  373. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h +242 -0
  374. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h +1176 -0
  375. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +1559 -0
  376. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h +1083 -0
  377. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h +510 -0
  378. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h +373 -0
  379. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +1019 -0
  380. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h +73 -0
  381. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h +6 -0
  382. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h +1413 -0
  383. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h +575 -0
  384. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h +1650 -0
  385. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h +1679 -0
  386. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h +455 -0
  387. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h +1126 -0
  388. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h +536 -0
  389. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h +213 -0
  390. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h +342 -0
  391. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h +137 -0
  392. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h +6 -0
  393. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h +104 -0
  394. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h +389 -0
  395. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h +1048 -0
  396. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h +409 -0
  397. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h +234 -0
  398. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h +493 -0
  399. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h +229 -0
  400. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h +980 -0
  401. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +701 -0
  402. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h +389 -0
  403. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h +669 -0
  404. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h +377 -0
  405. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h +232 -0
  406. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h +191 -0
  407. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h +488 -0
  408. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h +297 -0
  409. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h +33 -0
  410. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h +99 -0
  411. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h +44 -0
  412. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h +79 -0
  413. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h +602 -0
  414. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h +735 -0
  415. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h +244 -0
  416. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h +82 -0
  417. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h +257 -0
  418. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h +213 -0
  419. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h +98 -0
  420. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h +327 -0
  421. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h +311 -0
  422. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h +1098 -0
  423. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h +705 -0
  424. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h +286 -0
  425. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h +317 -0
  426. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +1000 -0
  427. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h +6 -0
  428. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h +973 -0
  429. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h +582 -0
  430. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h +454 -0
  431. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h +462 -0
  432. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h +528 -0
  433. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorScanSycl.h +513 -0
  434. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h +466 -0
  435. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h +157 -0
  436. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h +341 -0
  437. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h +299 -0
  438. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h +264 -0
  439. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h +249 -0
  440. include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h +628 -0
  441. include/eigen3/unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h +293 -0
  442. include/eigen3/unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h +236 -0
  443. include/eigen3/unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h +338 -0
  444. include/eigen3/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h +669 -0
  445. include/eigen3/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h +67 -0
  446. include/eigen3/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h +249 -0
  447. include/eigen3/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h +486 -0
  448. include/eigen3/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h +236 -0
  449. include/eigen3/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h +23 -0
  450. include/eigen3/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h +40 -0
  451. include/eigen3/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h +301 -0
  452. include/eigen3/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h +48 -0
  453. include/eigen3/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h +20 -0
  454. include/eigen3/unsupported/Eigen/CXX11/src/util/CXX11Meta.h +538 -0
  455. include/eigen3/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h +88 -0
  456. include/eigen3/unsupported/Eigen/CXX11/src/util/EmulateArray.h +261 -0
  457. include/eigen3/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h +158 -0
  458. include/eigen3/unsupported/Eigen/EulerAngles +43 -0
  459. include/eigen3/unsupported/Eigen/FFT +420 -0
  460. include/eigen3/unsupported/Eigen/IterativeSolvers +51 -0
  461. include/eigen3/unsupported/Eigen/KroneckerProduct +36 -0
  462. include/eigen3/unsupported/Eigen/LevenbergMarquardt +49 -0
  463. include/eigen3/unsupported/Eigen/MPRealSupport +213 -0
  464. include/eigen3/unsupported/Eigen/MatrixFunctions +504 -0
  465. include/eigen3/unsupported/Eigen/MoreVectorization +24 -0
  466. include/eigen3/unsupported/Eigen/NonLinearOptimization +140 -0
  467. include/eigen3/unsupported/Eigen/NumericalDiff +56 -0
  468. include/eigen3/unsupported/Eigen/OpenGLSupport +322 -0
  469. include/eigen3/unsupported/Eigen/Polynomials +137 -0
  470. include/eigen3/unsupported/Eigen/Skyline +39 -0
  471. include/eigen3/unsupported/Eigen/SparseExtra +54 -0
  472. include/eigen3/unsupported/Eigen/SpecialFunctions +103 -0
  473. include/eigen3/unsupported/Eigen/Splines +35 -0
  474. include/eigen3/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h +108 -0
  475. include/eigen3/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h +730 -0
  476. include/eigen3/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h +220 -0
  477. include/eigen3/unsupported/Eigen/src/BVH/BVAlgorithms.h +293 -0
  478. include/eigen3/unsupported/Eigen/src/BVH/KdBVH.h +223 -0
  479. include/eigen3/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h +790 -0
  480. include/eigen3/unsupported/Eigen/src/EulerAngles/EulerAngles.h +356 -0
  481. include/eigen3/unsupported/Eigen/src/EulerAngles/EulerSystem.h +306 -0
  482. include/eigen3/unsupported/Eigen/src/FFT/ei_fftw_impl.h +261 -0
  483. include/eigen3/unsupported/Eigen/src/FFT/ei_kissfft_impl.h +449 -0
  484. include/eigen3/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h +187 -0
  485. include/eigen3/unsupported/Eigen/src/IterativeSolvers/DGMRES.h +511 -0
  486. include/eigen3/unsupported/Eigen/src/IterativeSolvers/GMRES.h +335 -0
  487. include/eigen3/unsupported/Eigen/src/IterativeSolvers/IDRS.h +436 -0
  488. include/eigen3/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h +90 -0
  489. include/eigen3/unsupported/Eigen/src/IterativeSolvers/IterationController.h +154 -0
  490. include/eigen3/unsupported/Eigen/src/IterativeSolvers/MINRES.h +267 -0
  491. include/eigen3/unsupported/Eigen/src/IterativeSolvers/Scaling.h +193 -0
  492. include/eigen3/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h +305 -0
  493. include/eigen3/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h +84 -0
  494. include/eigen3/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h +202 -0
  495. include/eigen3/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h +160 -0
  496. include/eigen3/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h +188 -0
  497. include/eigen3/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h +396 -0
  498. include/eigen3/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h +441 -0
  499. include/eigen3/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h +569 -0
  500. include/eigen3/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h +373 -0
  501. include/eigen3/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h +705 -0
  502. include/eigen3/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h +368 -0
  503. include/eigen3/unsupported/Eigen/src/MatrixFunctions/StemFunction.h +117 -0
  504. include/eigen3/unsupported/Eigen/src/MoreVectorization/MathFunctions.h +95 -0
  505. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h +601 -0
  506. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h +657 -0
  507. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/chkder.h +66 -0
  508. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/covar.h +70 -0
  509. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/dogleg.h +107 -0
  510. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h +79 -0
  511. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/lmpar.h +298 -0
  512. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h +91 -0
  513. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h +30 -0
  514. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/r1updt.h +99 -0
  515. include/eigen3/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h +49 -0
  516. include/eigen3/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h +130 -0
  517. include/eigen3/unsupported/Eigen/src/Polynomials/Companion.h +280 -0
  518. include/eigen3/unsupported/Eigen/src/Polynomials/PolynomialSolver.h +429 -0
  519. include/eigen3/unsupported/Eigen/src/Polynomials/PolynomialUtils.h +143 -0
  520. include/eigen3/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h +352 -0
  521. include/eigen3/unsupported/Eigen/src/Skyline/SkylineMatrix.h +862 -0
  522. include/eigen3/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h +212 -0
  523. include/eigen3/unsupported/Eigen/src/Skyline/SkylineProduct.h +295 -0
  524. include/eigen3/unsupported/Eigen/src/Skyline/SkylineStorage.h +259 -0
  525. include/eigen3/unsupported/Eigen/src/Skyline/SkylineUtil.h +89 -0
  526. include/eigen3/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h +122 -0
  527. include/eigen3/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h +1079 -0
  528. include/eigen3/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h +404 -0
  529. include/eigen3/unsupported/Eigen/src/SparseExtra/MarketIO.h +282 -0
  530. include/eigen3/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h +247 -0
  531. include/eigen3/unsupported/Eigen/src/SparseExtra/RandomSetter.h +349 -0
  532. include/eigen3/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsArrayAPI.h +286 -0
  533. include/eigen3/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsBFloat16.h +68 -0
  534. include/eigen3/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsFunctors.h +357 -0
  535. include/eigen3/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsHalf.h +66 -0
  536. include/eigen3/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsImpl.h +1959 -0
  537. include/eigen3/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsPacketMath.h +118 -0
  538. include/eigen3/unsupported/Eigen/src/SpecialFunctions/HipVectorCompatibility.h +67 -0
  539. include/eigen3/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h +167 -0
  540. include/eigen3/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsBFloat16.h +58 -0
  541. include/eigen3/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h +330 -0
  542. include/eigen3/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h +58 -0
  543. include/eigen3/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h +2051 -0
  544. include/eigen3/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h +79 -0
  545. include/eigen3/unsupported/Eigen/src/SpecialFunctions/arch/AVX/BesselFunctions.h +46 -0
  546. include/eigen3/unsupported/Eigen/src/SpecialFunctions/arch/AVX/SpecialFunctions.h +16 -0
  547. include/eigen3/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/BesselFunctions.h +51 -0
  548. include/eigen3/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/SpecialFunctions.h +16 -0
  549. include/eigen3/unsupported/Eigen/src/SpecialFunctions/arch/GPU/SpecialFunctions.h +369 -0
  550. include/eigen3/unsupported/Eigen/src/SpecialFunctions/arch/NEON/BesselFunctions.h +54 -0
  551. include/eigen3/unsupported/Eigen/src/SpecialFunctions/arch/NEON/SpecialFunctions.h +34 -0
  552. include/eigen3/unsupported/Eigen/src/Splines/Spline.h +507 -0
  553. include/eigen3/unsupported/Eigen/src/Splines/SplineFitting.h +431 -0
  554. include/eigen3/unsupported/Eigen/src/Splines/SplineFwd.h +93 -0
@@ -0,0 +1,1000 @@
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5
+ // Copyright (C) 2016 Mehdi Goli, Codeplay Software Ltd <eigen@codeplay.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H
12
+ #define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H
13
+
14
+ // clang is incompatible with the CUDA syntax wrt making a kernel a class friend,
15
+ // so we'll use a macro to make clang happy.
16
+ #ifndef KERNEL_FRIEND
17
+ #if defined(__clang__) && (defined(__CUDA__) || defined(__HIP__))
18
+ #define KERNEL_FRIEND friend __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024
19
+ #else
20
+ #define KERNEL_FRIEND friend
21
+ #endif
22
+ #endif
23
+
24
+
25
+ namespace Eigen {
26
+
27
+ namespace internal {
28
+ template<typename Op, typename Dims, typename XprType,template <class> class MakePointer_ >
29
+ struct traits<TensorReductionOp<Op, Dims, XprType, MakePointer_> >
30
+ : traits<XprType>
31
+ {
32
+ typedef traits<XprType> XprTraits;
33
+ typedef typename XprTraits::Scalar Scalar;
34
+ typedef typename XprTraits::StorageKind StorageKind;
35
+ typedef typename XprTraits::Index Index;
36
+ typedef typename XprType::Nested Nested;
37
+ static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
38
+ static const int Layout = XprTraits::Layout;
39
+ typedef typename XprTraits::PointerType PointerType;
40
+
41
+ template <class T> struct MakePointer {
42
+ // Intermediate typedef to workaround MSVC issue.
43
+ typedef MakePointer_<T> MakePointerT;
44
+ typedef typename MakePointerT::Type Type;
45
+ };
46
+ };
47
+
48
+ template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
49
+ struct eval<TensorReductionOp<Op, Dims, XprType, MakePointer_>, Eigen::Dense>
50
+ {
51
+ typedef const TensorReductionOp<Op, Dims, XprType, MakePointer_>& type;
52
+ };
53
+
54
+ template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
55
+ struct nested<TensorReductionOp<Op, Dims, XprType, MakePointer_>, 1, typename eval<TensorReductionOp<Op, Dims, XprType, MakePointer_> >::type>
56
+ {
57
+ typedef TensorReductionOp<Op, Dims, XprType, MakePointer_> type;
58
+ };
59
+
60
+
61
+ template <typename OutputDims> struct DimInitializer {
62
+ template <typename InputDims, typename ReducedDims> EIGEN_DEVICE_FUNC
63
+ static void run(const InputDims& input_dims,
64
+ const array<bool, internal::array_size<InputDims>::value>& reduced,
65
+ OutputDims* output_dims, ReducedDims* reduced_dims) {
66
+ const int NumInputDims = internal::array_size<InputDims>::value;
67
+ int outputIndex = 0;
68
+ int reduceIndex = 0;
69
+ for (int i = 0; i < NumInputDims; ++i) {
70
+ if (reduced[i]) {
71
+ (*reduced_dims)[reduceIndex] = input_dims[i];
72
+ ++reduceIndex;
73
+ } else {
74
+ (*output_dims)[outputIndex] = input_dims[i];
75
+ ++outputIndex;
76
+ }
77
+ }
78
+ }
79
+ };
80
+
81
+ template <> struct DimInitializer<Sizes<> > {
82
+ template <typename InputDims, typename Index, size_t Rank> EIGEN_DEVICE_FUNC
83
+ static void run(const InputDims& input_dims, const array<bool, Rank>&,
84
+ Sizes<>*, array<Index, Rank>* reduced_dims) {
85
+ const int NumInputDims = internal::array_size<InputDims>::value;
86
+ for (int i = 0; i < NumInputDims; ++i) {
87
+ (*reduced_dims)[i] = input_dims[i];
88
+ }
89
+ }
90
+ };
91
+
92
+
93
+ template <typename ReducedDims, int NumTensorDims, int Layout>
94
+ struct are_inner_most_dims {
95
+ static const bool value = false;
96
+ };
97
+ template <typename ReducedDims, int NumTensorDims, int Layout>
98
+ struct preserve_inner_most_dims {
99
+ static const bool value = false;
100
+ };
101
+
102
+ #if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES
103
+ template <typename ReducedDims, int NumTensorDims>
104
+ struct are_inner_most_dims<ReducedDims, NumTensorDims, ColMajor>{
105
+ static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();
106
+ static const bool tmp2 = index_statically_eq<ReducedDims>(0, 0);
107
+ static const bool tmp3 = index_statically_eq<ReducedDims>(array_size<ReducedDims>::value-1, array_size<ReducedDims>::value-1);
108
+ static const bool value = tmp1 & tmp2 & tmp3;
109
+ };
110
+ template <typename ReducedDims, int NumTensorDims>
111
+ struct are_inner_most_dims<ReducedDims, NumTensorDims, RowMajor>{
112
+ static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();
113
+ static const bool tmp2 = index_statically_eq<ReducedDims>(0, NumTensorDims - array_size<ReducedDims>::value);
114
+ static const bool tmp3 = index_statically_eq<ReducedDims>(array_size<ReducedDims>::value - 1, NumTensorDims - 1);
115
+ static const bool value = tmp1 & tmp2 & tmp3;
116
+
117
+ };
118
+ template <typename ReducedDims, int NumTensorDims>
119
+ struct preserve_inner_most_dims<ReducedDims, NumTensorDims, ColMajor>{
120
+ static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();
121
+ static const bool tmp2 = index_statically_gt<ReducedDims>(0, 0);
122
+ static const bool value = tmp1 & tmp2;
123
+
124
+ };
125
+ template <typename ReducedDims, int NumTensorDims>
126
+ struct preserve_inner_most_dims<ReducedDims, NumTensorDims, RowMajor>{
127
+ static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();
128
+ static const bool tmp2 = index_statically_lt<ReducedDims>(array_size<ReducedDims>::value - 1, NumTensorDims - 1);
129
+ static const bool value = tmp1 & tmp2;
130
+ };
131
+ #endif
132
+
133
+
134
+ template <int DimIndex, typename Self, typename Op>
135
+ struct GenericDimReducer {
136
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) {
137
+ EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
138
+ for (int j = 0; j < self.m_reducedDims[DimIndex]; ++j) {
139
+ const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex];
140
+ GenericDimReducer<DimIndex-1, Self, Op>::reduce(self, input, reducer, accum);
141
+ }
142
+ }
143
+ };
144
+ template <typename Self, typename Op>
145
+ struct GenericDimReducer<0, Self, Op> {
146
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) {
147
+ for (int j = 0; j < self.m_reducedDims[0]; ++j) {
148
+ const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0];
149
+ reducer.reduce(self.m_impl.coeff(input), accum);
150
+ }
151
+ }
152
+ };
153
+ template <typename Self, typename Op>
154
+ struct GenericDimReducer<-1, Self, Op> {
155
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index index, Op& reducer, typename Self::CoeffReturnType* accum) {
156
+ reducer.reduce(self.m_impl.coeff(index), accum);
157
+ }
158
+ };
159
+
160
+ template <typename Self, typename Op, bool Vectorizable = (Self::InputPacketAccess && Self::ReducerTraits::PacketAccess),
161
+ bool UseTreeReduction = (!Self::ReducerTraits::IsStateful &&
162
+ !Self::ReducerTraits::IsExactlyAssociative &&
163
+ // GPU threads can quickly run out of stack space
164
+ // for moderately sized inputs.
165
+ !Self::RunningOnGPU
166
+ )>
167
+ struct InnerMostDimReducer {
168
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) {
169
+ typename Self::CoeffReturnType accum = reducer.initialize();
170
+ for (typename Self::Index j = 0; j < numValuesToReduce; ++j) {
171
+ reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
172
+ }
173
+ return reducer.finalize(accum);
174
+ }
175
+ };
176
+
177
+ template <typename Self, typename Op>
178
+ struct InnerMostDimReducer<Self, Op, true, false> {
179
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) {
180
+ const typename Self::Index packetSize = internal::unpacket_traits<typename Self::PacketReturnType>::size;
181
+ const typename Self::Index VectorizedSize = (numValuesToReduce / packetSize) * packetSize;
182
+ typename Self::PacketReturnType paccum = reducer.template initializePacket<typename Self::PacketReturnType>();
183
+ for (typename Self::Index j = 0; j < VectorizedSize; j += packetSize) {
184
+ reducer.reducePacket(self.m_impl.template packet<Unaligned>(firstIndex + j), &paccum);
185
+ }
186
+ typename Self::CoeffReturnType accum = reducer.initialize();
187
+ for (typename Self::Index j = VectorizedSize; j < numValuesToReduce; ++j) {
188
+ reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
189
+ }
190
+ return reducer.finalizeBoth(accum, paccum);
191
+ }
192
+ };
193
+
194
+ #if !defined(EIGEN_HIPCC)
195
+ static const int kLeafSize = 1024;
196
+
197
+ template <typename Self, typename Op>
198
+ struct InnerMostDimReducer<Self, Op, false, true> {
199
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType
200
+ reduce(const Self& self, typename Self::Index firstIndex,
201
+ typename Self::Index numValuesToReduce, Op& reducer) {
202
+ typename Self::CoeffReturnType accum = reducer.initialize();
203
+ if (numValuesToReduce > kLeafSize) {
204
+ const typename Self::Index half = numValuesToReduce / 2;
205
+ reducer.reduce(reduce(self, firstIndex, half, reducer), &accum);
206
+ reducer.reduce(
207
+ reduce(self, firstIndex + half, numValuesToReduce - half, reducer),
208
+ &accum);
209
+ } else {
210
+ for (typename Self::Index j = 0; j < numValuesToReduce; ++j) {
211
+ reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
212
+ }
213
+ }
214
+ return reducer.finalize(accum);
215
+ }
216
+ };
217
+
218
+ template <typename Self, typename Op>
219
+ struct InnerMostDimReducer<Self, Op, true, true> {
220
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType
221
+ reduce(const Self& self, typename Self::Index firstIndex,
222
+ typename Self::Index numValuesToReduce, Op& reducer) {
223
+ const typename Self::Index packetSize =
224
+ internal::unpacket_traits<typename Self::PacketReturnType>::size;
225
+ typename Self::CoeffReturnType accum = reducer.initialize();
226
+ if (numValuesToReduce > packetSize * kLeafSize) {
227
+ // Make sure the split point is aligned on a packet boundary.
228
+ const typename Self::Index split =
229
+ packetSize *
230
+ divup(firstIndex + divup(numValuesToReduce, typename Self::Index(2)),
231
+ packetSize);
232
+ const typename Self::Index num_left =
233
+ numext::mini(split - firstIndex, numValuesToReduce);
234
+ reducer.reduce(reduce(self, firstIndex, num_left, reducer), &accum);
235
+ if (num_left < numValuesToReduce) {
236
+ reducer.reduce(
237
+ reduce(self, split, numValuesToReduce - num_left, reducer), &accum);
238
+ }
239
+ return reducer.finalize(accum);
240
+ } else {
241
+ const typename Self::Index UnrollSize =
242
+ (numValuesToReduce / (2*packetSize)) * 2*packetSize;
243
+ const typename Self::Index VectorizedSize =
244
+ (numValuesToReduce / packetSize) * packetSize;
245
+ typename Self::PacketReturnType paccum =
246
+ reducer.template initializePacket<typename Self::PacketReturnType>();
247
+ typename Self::PacketReturnType paccum2 =
248
+ reducer.template initializePacket<typename Self::PacketReturnType>();
249
+ for (typename Self::Index j = 0; j < UnrollSize; j += packetSize * 2) {
250
+ reducer.reducePacket(
251
+ self.m_impl.template packet<Unaligned>(firstIndex + j), &paccum);
252
+ reducer.reducePacket(
253
+ self.m_impl.template packet<Unaligned>(firstIndex + j + packetSize),
254
+ &paccum2);
255
+ }
256
+ for (typename Self::Index j = UnrollSize; j < VectorizedSize; j+= packetSize) {
257
+ reducer.reducePacket(self.m_impl.template packet<Unaligned>(
258
+ firstIndex + j), &paccum);
259
+ }
260
+ reducer.reducePacket(paccum2, &paccum);
261
+ for (typename Self::Index j = VectorizedSize; j < numValuesToReduce;
262
+ ++j) {
263
+ reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
264
+ }
265
+ return reducer.finalizeBoth(accum, paccum);
266
+ }
267
+ }
268
+ };
269
+ #endif
270
+
271
+ template <int DimIndex, typename Self, typename Op, bool vectorizable = (Self::InputPacketAccess && Self::ReducerTraits::PacketAccess)>
272
+ struct InnerMostDimPreserver {
273
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) {
274
+ eigen_assert(false && "should never be called");
275
+ }
276
+ };
277
+
278
+ template <int DimIndex, typename Self, typename Op>
279
+ struct InnerMostDimPreserver<DimIndex, Self, Op, true> {
280
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) {
281
+ EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
282
+ for (typename Self::Index j = 0; j < self.m_reducedDims[DimIndex]; ++j) {
283
+ const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex];
284
+ InnerMostDimPreserver<DimIndex-1, Self, Op>::reduce(self, input, reducer, accum);
285
+ }
286
+ }
287
+ };
288
+
289
+ template <typename Self, typename Op>
290
+ struct InnerMostDimPreserver<0, Self, Op, true> {
291
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) {
292
+ for (typename Self::Index j = 0; j < self.m_reducedDims[0]; ++j) {
293
+ const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0];
294
+ reducer.reducePacket(self.m_impl.template packet<Unaligned>(input), accum);
295
+ }
296
+ }
297
+ };
298
+ template <typename Self, typename Op>
299
+ struct InnerMostDimPreserver<-1, Self, Op, true> {
300
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) {
301
+ eigen_assert(false && "should never be called");
302
+ }
303
+ };
304
+
305
+ // Default full reducer
306
+ template <typename Self, typename Op, typename Device, bool Vectorizable = (Self::InputPacketAccess && Self::ReducerTraits::PacketAccess)>
307
+ struct FullReducer {
308
+ static const bool HasOptimizedImplementation = false;
309
+
310
+ static EIGEN_DEVICE_FUNC void run(const Self& self, Op& reducer, const Device&, typename Self::EvaluatorPointerType output) {
311
+ const typename Self::Index num_coeffs = array_prod(self.m_impl.dimensions());
312
+ *output = InnerMostDimReducer<Self, Op, Vectorizable>::reduce(self, 0, num_coeffs, reducer);
313
+ }
314
+ };
315
+
316
+
317
+ #ifdef EIGEN_USE_THREADS
318
+ // Multithreaded full reducers
319
+ template <typename Self, typename Op,
320
+ bool Vectorizable = (Self::InputPacketAccess && Self::ReducerTraits::PacketAccess)>
321
+ struct FullReducerShard {
322
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(const Self& self, typename Self::Index firstIndex,
323
+ typename Self::Index numValuesToReduce, Op& reducer,
324
+ typename Self::CoeffReturnType* output) {
325
+ *output = InnerMostDimReducer<Self, Op, Vectorizable>::reduce(
326
+ self, firstIndex, numValuesToReduce, reducer);
327
+ }
328
+ };
329
+
330
+ // Multithreaded full reducer
331
+ template <typename Self, typename Op, bool Vectorizable>
332
+ struct FullReducer<Self, Op, ThreadPoolDevice, Vectorizable> {
333
+ static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful;
334
+ static const Index PacketSize =
335
+ unpacket_traits<typename Self::PacketReturnType>::size;
336
+
337
+ // launch one reducer per thread and accumulate the result.
338
+ static void run(const Self& self, Op& reducer, const ThreadPoolDevice& device,
339
+ typename Self::CoeffReturnType* output) {
340
+ typedef typename Self::Index Index;
341
+ const Index num_coeffs = array_prod(self.m_impl.dimensions());
342
+ if (num_coeffs == 0) {
343
+ *output = reducer.finalize(reducer.initialize());
344
+ return;
345
+ }
346
+ const TensorOpCost cost =
347
+ self.m_impl.costPerCoeff(Vectorizable) +
348
+ TensorOpCost(0, 0, internal::functor_traits<Op>::Cost, Vectorizable,
349
+ PacketSize);
350
+ const int num_threads = TensorCostModel<ThreadPoolDevice>::numThreads(
351
+ num_coeffs, cost, device.numThreads());
352
+ if (num_threads == 1) {
353
+ *output =
354
+ InnerMostDimReducer<Self, Op, Vectorizable>::reduce(self, 0, num_coeffs, reducer);
355
+ return;
356
+ }
357
+ const Index blocksize =
358
+ std::floor<Index>(static_cast<float>(num_coeffs) / num_threads);
359
+ const Index numblocks = blocksize > 0 ? num_coeffs / blocksize : 0;
360
+ eigen_assert(num_coeffs >= numblocks * blocksize);
361
+
362
+ Barrier barrier(internal::convert_index<unsigned int>(numblocks));
363
+ MaxSizeVector<typename Self::CoeffReturnType> shards(numblocks, reducer.initialize());
364
+ for (Index i = 0; i < numblocks; ++i) {
365
+ device.enqueue_with_barrier(&barrier, &FullReducerShard<Self, Op, Vectorizable>::run,
366
+ self, i * blocksize, blocksize, reducer,
367
+ &shards[i]);
368
+ }
369
+ typename Self::CoeffReturnType finalShard;
370
+ if (numblocks * blocksize < num_coeffs) {
371
+ finalShard = InnerMostDimReducer<Self, Op, Vectorizable>::reduce(
372
+ self, numblocks * blocksize, num_coeffs - numblocks * blocksize,
373
+ reducer);
374
+ } else {
375
+ finalShard = reducer.initialize();
376
+ }
377
+ barrier.Wait();
378
+
379
+ for (Index i = 0; i < numblocks; ++i) {
380
+ reducer.reduce(shards[i], &finalShard);
381
+ }
382
+ *output = reducer.finalize(finalShard);
383
+ }
384
+ };
385
+
386
+ #endif
387
+
388
+
389
+ // Default inner reducer
390
+ template <typename Self, typename Op, typename Device>
391
+ struct InnerReducer {
392
+ static const bool HasOptimizedImplementation = false;
393
+
394
+ EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) {
395
+ eigen_assert(false && "Not implemented");
396
+ return true;
397
+ }
398
+ };
399
+
400
+ // Default outer reducer
401
+ template <typename Self, typename Op, typename Device>
402
+ struct OuterReducer {
403
+ static const bool HasOptimizedImplementation = false;
404
+
405
+ EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) {
406
+ eigen_assert(false && "Not implemented");
407
+ return true;
408
+ }
409
+ };
410
+
411
+ #ifdef EIGEN_USE_SYCL
412
+ // Default Generic reducer
413
+ template <typename Self, typename Op, typename Device>
414
+ struct GenericReducer {
415
+ static const bool HasOptimizedImplementation = false;
416
+
417
+ EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) {
418
+ eigen_assert(false && "Not implemented");
419
+ return true;
420
+ }
421
+ };
422
+ #endif
423
+
424
+ #if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
425
+ template <int B, int N, typename S, typename R, typename I_>
426
+ __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernel(R, const S, I_, typename S::CoeffReturnType*, unsigned int*);
427
+
428
+
429
+ #if defined(EIGEN_HAS_GPU_FP16)
430
+ template <typename S, typename R, typename I_>
431
+ __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionInitFullReduxKernelHalfFloat(R, const S, I_, internal::packet_traits<half>::type*);
432
+ template <int B, int N, typename S, typename R, typename I_>
433
+ __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernelHalfFloat(R, const S, I_, half*, internal::packet_traits<half>::type*);
434
+ template <int NPT, typename S, typename R, typename I_>
435
+ __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernelHalfFloat(R, const S, I_, I_, half*);
436
+
437
+ #endif
438
+
439
+ template <int NPT, typename S, typename R, typename I_>
440
+ __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*);
441
+
442
+ template <int NPT, typename S, typename R, typename I_>
443
+ __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*);
444
+ #endif
445
+
446
+ /**
447
+ * For SYCL, the return type of the reduction is deduced from the initialize method of the given Op.
448
+ * This allows the reduction to have a different type for the accumulator than the input data type.
449
+ * If this is the case, the functor needs to have two reduce method: one for reducing an element of the input
450
+ * with the accumulator and the other for reducing two accumulators.
451
+ * Such a reducer can be useful for instance when the accumulator is a boolean or a bitset that checks for
452
+ * some properties of the input.
453
+ */
454
+ template <typename Op, typename CoeffReturnType>
455
+ struct ReductionReturnType {
456
+ #if defined(EIGEN_USE_SYCL)
457
+ typedef typename remove_const<decltype(std::declval<Op>().initialize())>::type type;
458
+ #else
459
+ typedef typename remove_const<CoeffReturnType>::type type;
460
+ #endif
461
+ };
462
+
463
+ } // end namespace internal
464
+
465
+ /**
466
+ * \ingroup CXX11_Tensor_Module
467
+ *
468
+ * \brief Tensor reduction class.
469
+ *
470
+ */
471
+ template <typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
472
+ class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType, MakePointer_>, ReadOnlyAccessors> {
473
+ public:
474
+ typedef typename Eigen::internal::traits<TensorReductionOp>::Scalar Scalar;
475
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
476
+ typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
477
+ typedef typename Eigen::internal::nested<TensorReductionOp>::type Nested;
478
+ typedef typename Eigen::internal::traits<TensorReductionOp>::StorageKind StorageKind;
479
+ typedef typename Eigen::internal::traits<TensorReductionOp>::Index Index;
480
+
481
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
482
+ TensorReductionOp(const XprType& expr, const Dims& dims) : m_expr(expr), m_dims(dims)
483
+ { }
484
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
485
+ TensorReductionOp(const XprType& expr, const Dims& dims, const Op& reducer) : m_expr(expr), m_dims(dims), m_reducer(reducer)
486
+ { }
487
+
488
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
489
+ const XprType& expression() const { return m_expr; }
490
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
491
+ const Dims& dims() const { return m_dims; }
492
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
493
+ const Op& reducer() const { return m_reducer; }
494
+
495
+ protected:
496
+ typename XprType::Nested m_expr;
497
+ const Dims m_dims;
498
+ const Op m_reducer;
499
+ };
500
+
501
+ template<typename ArgType, typename Device>
502
+ struct TensorReductionEvaluatorBase;
503
+
504
+ // Eval as rvalue
505
+ template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
506
+ struct TensorReductionEvaluatorBase<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>
507
+ {
508
+ typedef internal::reducer_traits<Op, Device> ReducerTraits;
509
+ typedef Dims ReducedDims;
510
+ typedef TensorReductionOp<Op, Dims, ArgType, MakePointer_> XprType;
511
+ typedef typename XprType::Index Index;
512
+ typedef ArgType ChildType;
513
+ typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
514
+ static const int NumInputDims = internal::array_size<InputDimensions>::value;
515
+ static const int NumReducedDims = internal::array_size<Dims>::value;
516
+ static const int NumOutputDims = NumInputDims - NumReducedDims;
517
+ typedef typename internal::conditional<NumOutputDims==0, Sizes<>, DSizes<Index, NumOutputDims> >::type Dimensions;
518
+ typedef typename XprType::Scalar Scalar;
519
+ typedef TensorReductionEvaluatorBase<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> Self;
520
+ static const bool InputPacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess;
521
+ typedef typename internal::ReductionReturnType<Op, typename XprType::CoeffReturnType>::type CoeffReturnType;
522
+ typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
523
+ static const Index PacketSize = PacketType<CoeffReturnType, Device>::size;
524
+
525
+ typedef typename Eigen::internal::traits<XprType>::PointerType TensorPointerType;
526
+ typedef StorageMemory<CoeffReturnType, Device> Storage;
527
+ typedef typename Storage::Type EvaluatorPointerType;
528
+
529
+ // Subset of strides of the input tensor for the non-reduced dimensions.
530
+ // Indexed by output dimensions.
531
+ static const int NumPreservedStrides = max_n_1<NumOutputDims>::size;
532
+
533
+ // For full reductions
534
+ #if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
535
+ static constexpr bool RunningOnGPU = internal::is_same<Device, Eigen::GpuDevice>::value;
536
+ static constexpr bool RunningOnSycl = false;
537
+ #elif defined(EIGEN_USE_SYCL)
538
+ static const bool RunningOnSycl = internal::is_same<typename internal::remove_all<Device>::type, Eigen::SyclDevice>::value;
539
+ static const bool RunningOnGPU = false;
540
+ #else
541
+ static constexpr bool RunningOnGPU = false;
542
+ static constexpr bool RunningOnSycl = false;
543
+ #endif
544
+
545
+ enum {
546
+ IsAligned = false,
547
+ PacketAccess = Self::InputPacketAccess && ReducerTraits::PacketAccess,
548
+ BlockAccess = false,
549
+ PreferBlockAccess = true,
550
+ Layout = TensorEvaluator<ArgType, Device>::Layout,
551
+ CoordAccess = false, // to be implemented
552
+ RawAccess = false
553
+ };
554
+
555
+ typedef typename internal::remove_const<Scalar>::type ScalarNoConst;
556
+
557
+ //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
558
+ typedef internal::TensorBlockNotImplemented TensorBlock;
559
+ //===--------------------------------------------------------------------===//
560
+
561
+ static const bool ReducingInnerMostDims = internal::are_inner_most_dims<Dims, NumInputDims, Layout>::value;
562
+ static const bool PreservingInnerMostDims = internal::preserve_inner_most_dims<Dims, NumInputDims, Layout>::value;
563
+ static const bool RunningFullReduction = (NumOutputDims==0);
564
+
565
+ EIGEN_STRONG_INLINE TensorReductionEvaluatorBase(const XprType& op, const Device& device)
566
+ : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device)
567
+ {
568
+ EIGEN_STATIC_ASSERT((NumInputDims >= NumReducedDims), YOU_MADE_A_PROGRAMMING_MISTAKE);
569
+ EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)),
570
+ YOU_MADE_A_PROGRAMMING_MISTAKE);
571
+
572
+ // Build the bitmap indicating if an input dimension is reduced or not.
573
+ for (int i = 0; i < NumInputDims; ++i) {
574
+ m_reduced[i] = false;
575
+ }
576
+ for (int i = 0; i < NumReducedDims; ++i) {
577
+ eigen_assert(op.dims()[i] >= 0);
578
+ eigen_assert(op.dims()[i] < NumInputDims);
579
+ m_reduced[op.dims()[i]] = true;
580
+ }
581
+
582
+ const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
583
+ internal::DimInitializer<Dimensions>::run(input_dims, m_reduced, &m_dimensions, &m_reducedDims);
584
+
585
+ // Precompute output strides.
586
+ if (NumOutputDims > 0) {
587
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
588
+ m_outputStrides[0] = 1;
589
+ for (int i = 1; i < NumOutputDims; ++i) {
590
+ m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
591
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
592
+ }
593
+ } else {
594
+ m_outputStrides[static_cast<size_t>(NumOutputDims - 1)] = 1;
595
+ for (int i = NumOutputDims - 2; i >= 0; --i) {
596
+ m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
597
+ m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
598
+ }
599
+ }
600
+ }
601
+
602
+ // Precompute input strides.
603
+ if (NumInputDims > 0) {
604
+ array<Index, NumInputDims> input_strides;
605
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
606
+ input_strides[0] = 1;
607
+ for (int i = 1; i < NumInputDims; ++i) {
608
+ input_strides[i] = input_strides[i-1] * input_dims[i-1];
609
+ }
610
+ } else {
611
+ input_strides.back() = 1;
612
+ for (int i = NumInputDims - 2; i >= 0; --i) {
613
+ input_strides[i] = input_strides[i + 1] * input_dims[i + 1];
614
+ }
615
+ }
616
+
617
+ int outputIndex = 0;
618
+ int reduceIndex = 0;
619
+ for (int i = 0; i < NumInputDims; ++i) {
620
+ if (m_reduced[i]) {
621
+ m_reducedStrides[reduceIndex] = input_strides[i];
622
+ ++reduceIndex;
623
+ } else {
624
+ m_preservedStrides[outputIndex] = input_strides[i];
625
+ m_output_to_input_dim_map[outputIndex] = i;
626
+ ++outputIndex;
627
+ }
628
+ }
629
+ }
630
+
631
+ // Special case for full reductions
632
+ if (NumOutputDims == 0) {
633
+ m_preservedStrides[0] = internal::array_prod(input_dims);
634
+ }
635
+
636
+ m_numValuesToReduce =
637
+ NumOutputDims == 0
638
+ ? internal::array_prod(input_dims)
639
+ : (static_cast<int>(Layout) == static_cast<int>(ColMajor))
640
+ ? m_preservedStrides[0]
641
+ : m_preservedStrides[NumOutputDims - 1];
642
+ }
643
+
644
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
645
+
646
+ EIGEN_STRONG_INLINE
647
+ bool evalSubExprsIfNeededCommon(EvaluatorPointerType data) {
648
+ // Use the FullReducer if possible.
649
+ if ((RunningFullReduction && RunningOnSycl) ||(RunningFullReduction &&
650
+ internal::FullReducer<Self, Op, Device>::HasOptimizedImplementation &&
651
+ ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) ||
652
+ !RunningOnGPU))) {
653
+ bool need_assign = false;
654
+ if (!data) {
655
+ m_result = static_cast<EvaluatorPointerType>(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType))));
656
+ data = m_result;
657
+ need_assign = true;
658
+ }
659
+ Op reducer(m_reducer);
660
+ internal::FullReducer<Self, Op, Device>::run(*this, reducer, m_device, data);
661
+ return need_assign;
662
+ }
663
+
664
+ // Attempt to use an optimized reduction.
665
+ else if ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) || (RunningOnSycl)) {
666
+ bool reducing_inner_dims = true;
667
+ for (int i = 0; i < NumReducedDims; ++i) {
668
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
669
+ reducing_inner_dims &= m_reduced[i];
670
+ } else {
671
+ reducing_inner_dims &= m_reduced[NumInputDims - 1 - i];
672
+ }
673
+ }
674
+ if (internal::InnerReducer<Self, Op, Device>::HasOptimizedImplementation &&
675
+ (reducing_inner_dims || ReducingInnerMostDims)) {
676
+ const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
677
+ const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
678
+ if (!data) {
679
+ if ((num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 128) || (RunningOnSycl)) {
680
+ data = static_cast<EvaluatorPointerType>(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve)));
681
+ m_result = data;
682
+ }
683
+ else {
684
+ return true;
685
+ }
686
+ }
687
+ Op reducer(m_reducer);
688
+ // For SYCL this if always return false
689
+ if (internal::InnerReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) {
690
+ if (m_result) {
691
+ m_device.deallocate_temp(m_result);
692
+ m_result = NULL;
693
+ }
694
+ return true;
695
+ } else {
696
+ return (m_result != NULL);
697
+ }
698
+ }
699
+
700
+ bool preserving_inner_dims = true;
701
+ for (int i = 0; i < NumReducedDims; ++i) {
702
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
703
+ preserving_inner_dims &= m_reduced[NumInputDims - 1 - i];
704
+ } else {
705
+ preserving_inner_dims &= m_reduced[i];
706
+ }
707
+ }
708
+ if (internal::OuterReducer<Self, Op, Device>::HasOptimizedImplementation &&
709
+ preserving_inner_dims) {
710
+ const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
711
+ const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
712
+ if (!data) {
713
+ if ((num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 32) || (RunningOnSycl)) {
714
+ data = static_cast<EvaluatorPointerType>(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve)));
715
+ m_result = data;
716
+ }
717
+ else {
718
+ return true;
719
+ }
720
+ }
721
+ Op reducer(m_reducer);
722
+ // For SYCL this if always return false
723
+ if (internal::OuterReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) {
724
+ if (m_result) {
725
+ m_device.deallocate_temp(m_result);
726
+ m_result = NULL;
727
+ }
728
+ return true;
729
+ } else {
730
+ return (m_result != NULL);
731
+ }
732
+ }
733
+ #if defined(EIGEN_USE_SYCL)
734
+ // If there is no Optimised version for SYCL, the reduction expression
735
+ // must break into two subexpression and use the SYCL generic Reducer on the device.
736
+ if(RunningOnSycl) {
737
+ const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
738
+ const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
739
+ if (!data) {
740
+ data = static_cast<EvaluatorPointerType>(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve)));
741
+ m_result = data;
742
+ }
743
+ Op reducer(m_reducer);
744
+ internal::GenericReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve);
745
+ return (m_result != NULL);
746
+ }
747
+ #endif
748
+ }
749
+ return true;
750
+ }
751
+
752
+ #ifdef EIGEN_USE_THREADS
753
+ template <typename EvalSubExprsCallback>
754
+ EIGEN_STRONG_INLINE
755
+ void
756
+ evalSubExprsIfNeededAsync(EvaluatorPointerType data,
757
+ EvalSubExprsCallback done) {
758
+ m_impl.evalSubExprsIfNeededAsync(NULL, [this, data, done](bool) {
759
+ done(evalSubExprsIfNeededCommon(data));
760
+ });
761
+ }
762
+ #endif
763
+
764
+ EIGEN_STRONG_INLINE
765
+ bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
766
+ m_impl.evalSubExprsIfNeeded(NULL);
767
+ return evalSubExprsIfNeededCommon(data);
768
+ }
769
+
770
+ EIGEN_STRONG_INLINE void cleanup() {
771
+ m_impl.cleanup();
772
+ if (m_result) {
773
+ m_device.deallocate_temp(m_result);
774
+ m_result = NULL;
775
+ }
776
+ }
777
+
778
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
779
+ {
780
+ if (( RunningFullReduction || RunningOnGPU) && m_result ) {
781
+ return *(m_result + index);
782
+ }
783
+ Op reducer(m_reducer);
784
+ if (ReducingInnerMostDims || RunningFullReduction) {
785
+ const Index num_values_to_reduce =
786
+ (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1];
787
+ return internal::InnerMostDimReducer<Self, Op>::reduce(*this, firstInput(index),
788
+ num_values_to_reduce, reducer);
789
+ } else {
790
+ typename Self::CoeffReturnType accum = reducer.initialize();
791
+ internal::GenericDimReducer<NumReducedDims-1, Self, Op>::reduce(*this, firstInput(index), reducer, &accum);
792
+ return reducer.finalize(accum);
793
+ }
794
+ }
795
+
796
+ // TODO(bsteiner): provide a more efficient implementation.
797
+ template<int LoadMode>
798
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
799
+ {
800
+ EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
801
+ eigen_assert(index + PacketSize - 1 < Index(internal::array_prod(dimensions())));
802
+
803
+ if (RunningOnGPU && m_result) {
804
+ return internal::pload<PacketReturnType>(m_result + index);
805
+ }
806
+
807
+ EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
808
+ if (ReducingInnerMostDims) {
809
+ const Index num_values_to_reduce =
810
+ (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1];
811
+ const Index firstIndex = firstInput(index);
812
+ for (Index i = 0; i < PacketSize; ++i) {
813
+ Op reducer(m_reducer);
814
+ values[i] = internal::InnerMostDimReducer<Self, Op>::reduce(*this, firstIndex + i * num_values_to_reduce,
815
+ num_values_to_reduce, reducer);
816
+ }
817
+ } else if (PreservingInnerMostDims) {
818
+ const Index firstIndex = firstInput(index);
819
+ const int innermost_dim = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? 0 : NumOutputDims - 1;
820
+ // TBD: extend this the the n innermost dimensions that we preserve.
821
+ if (((firstIndex % m_dimensions[innermost_dim]) + PacketSize - 1) < m_dimensions[innermost_dim]) {
822
+ Op reducer(m_reducer);
823
+ typename Self::PacketReturnType accum = reducer.template initializePacket<typename Self::PacketReturnType>();
824
+ internal::InnerMostDimPreserver<NumReducedDims-1, Self, Op>::reduce(*this, firstIndex, reducer, &accum);
825
+ return reducer.finalizePacket(accum);
826
+ } else {
827
+ for (int i = 0; i < PacketSize; ++i) {
828
+ values[i] = coeff(index + i);
829
+ }
830
+ }
831
+ } else {
832
+ for (int i = 0; i < PacketSize; ++i) {
833
+ values[i] = coeff(index + i);
834
+ }
835
+ }
836
+ PacketReturnType rslt = internal::pload<PacketReturnType>(values);
837
+ return rslt;
838
+ }
839
+
840
+ // Must be called after evalSubExprsIfNeeded().
841
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
842
+ if (RunningFullReduction && m_result) {
843
+ return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize);
844
+ } else {
845
+ const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
846
+ const double compute_cost = num_values_to_reduce * internal::functor_traits<Op>::Cost;
847
+ return m_impl.costPerCoeff(vectorized) * num_values_to_reduce +
848
+ TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
849
+ }
850
+ }
851
+
852
+ EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_result; }
853
+ EIGEN_DEVICE_FUNC const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
854
+ EIGEN_DEVICE_FUNC const Device& device() const { return m_device; }
855
+ #ifdef EIGEN_USE_SYCL
856
+ // binding placeholder accessors to a command group handler for SYCL
857
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
858
+ m_impl.bind(cgh);
859
+ m_result.bind(cgh);
860
+ }
861
+ #endif
862
+
863
+ private:
864
+ template <int, typename, typename> friend struct internal::GenericDimReducer;
865
+ template <typename, typename, bool, bool> friend struct internal::InnerMostDimReducer;
866
+ template <int, typename, typename, bool> friend struct internal::InnerMostDimPreserver;
867
+ template <typename S, typename O, typename D, bool V> friend struct internal::FullReducer;
868
+ #ifdef EIGEN_USE_THREADS
869
+ template <typename S, typename O, bool V> friend struct internal::FullReducerShard;
870
+ #endif
871
+ #if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
872
+ template <int B, int N, typename S, typename R, typename I_> KERNEL_FRIEND void internal::FullReductionKernel(R, const S, I_, typename S::CoeffReturnType*, unsigned int*);
873
+ #if defined(EIGEN_HAS_GPU_FP16)
874
+ template <typename S, typename R, typename I_> KERNEL_FRIEND void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I_, internal::packet_traits<Eigen::half>::type*);
875
+ template <int B, int N, typename S, typename R, typename I_> KERNEL_FRIEND void internal::FullReductionKernelHalfFloat(R, const S, I_, half*, internal::packet_traits<Eigen::half>::type*);
876
+ template <int NPT, typename S, typename R, typename I_> KERNEL_FRIEND void internal::InnerReductionKernelHalfFloat(R, const S, I_, I_, half*);
877
+ #endif
878
+ template <int NPT, typename S, typename R, typename I_> KERNEL_FRIEND void internal::InnerReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*);
879
+
880
+ template <int NPT, typename S, typename R, typename I_> KERNEL_FRIEND void internal::OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*);
881
+ #endif
882
+
883
+ #if defined(EIGEN_USE_SYCL)
884
+ template < typename Evaluator_, typename Op__> friend class TensorSycl::internal::GenericNondeterministicReducer;
885
+ // SYCL need the Generic reducer for the case the recution algorithm is neither inner, outer, and full reducer
886
+ template <typename, typename, typename> friend struct internal::GenericReducer;
887
+ #endif
888
+
889
+
890
+ template <typename S, typename O, typename D> friend struct internal::InnerReducer;
891
+
892
+ struct BlockIteratorState {
893
+ Index input_dim;
894
+ Index output_size;
895
+ Index output_count;
896
+ };
897
+
898
+ // Returns the Index in the input tensor of the first value that needs to be
899
+ // used to compute the reduction at output index "index".
900
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
901
+ if (ReducingInnerMostDims) {
902
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
903
+ return index * m_preservedStrides[0];
904
+ } else {
905
+ return index * m_preservedStrides[NumPreservedStrides - 1];
906
+ }
907
+ }
908
+ // TBD: optimize the case where we preserve the innermost dimensions.
909
+ Index startInput = 0;
910
+ if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
911
+ for (int i = NumOutputDims - 1; i > 0; --i) {
912
+ // This is index_i in the output tensor.
913
+ const Index idx = index / m_outputStrides[i];
914
+ startInput += idx * m_preservedStrides[i];
915
+ index -= idx * m_outputStrides[i];
916
+ }
917
+ if (PreservingInnerMostDims) {
918
+ eigen_assert(m_preservedStrides[0] == 1);
919
+ startInput += index;
920
+ } else {
921
+ startInput += index * m_preservedStrides[0];
922
+ }
923
+ } else {
924
+ for (int i = 0; i < NumOutputDims - 1; ++i) {
925
+ // This is index_i in the output tensor.
926
+ const Index idx = index / m_outputStrides[i];
927
+ startInput += idx * m_preservedStrides[i];
928
+ index -= idx * m_outputStrides[i];
929
+ }
930
+ if (PreservingInnerMostDims) {
931
+ eigen_assert(m_preservedStrides[NumPreservedStrides - 1] == 1);
932
+ startInput += index;
933
+ } else {
934
+ startInput += index * m_preservedStrides[NumPreservedStrides - 1];
935
+ }
936
+ }
937
+ return startInput;
938
+ }
939
+
940
+ // Bitmap indicating if an input dimension is reduced or not.
941
+ array<bool, NumInputDims> m_reduced;
942
+ // Dimensions of the output of the operation.
943
+ Dimensions m_dimensions;
944
+ // Precomputed strides for the output tensor.
945
+ array<Index, NumOutputDims> m_outputStrides;
946
+ array<internal::TensorIntDivisor<Index>, NumOutputDims> m_fastOutputStrides;
947
+ array<Index, NumPreservedStrides> m_preservedStrides;
948
+ // Map from output to input dimension index.
949
+ array<Index, NumOutputDims> m_output_to_input_dim_map;
950
+ // How many values go into each reduction
951
+ Index m_numValuesToReduce;
952
+
953
+ // Subset of strides of the input tensor for the reduced dimensions.
954
+ // Indexed by reduced dimensions.
955
+ array<Index, NumReducedDims> m_reducedStrides;
956
+ // Size of the input dimensions that are reduced.
957
+ // Indexed by reduced dimensions.
958
+ array<Index, NumReducedDims> m_reducedDims;
959
+
960
+ // Evaluator for the input expression.
961
+ TensorEvaluator<ArgType, Device> m_impl;
962
+
963
+ // Operation to apply for computing the reduction.
964
+ Op m_reducer;
965
+
966
+ EvaluatorPointerType m_result;
967
+
968
+ const Device EIGEN_DEVICE_REF m_device;
969
+ };
970
+
971
+ template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
972
+ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>
973
+ : public TensorReductionEvaluatorBase<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> {
974
+ typedef TensorReductionEvaluatorBase<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> Base;
975
+ EIGEN_STRONG_INLINE TensorEvaluator(const typename Base::XprType& op, const Device& device) : Base(op, device){}
976
+ };
977
+
978
+
979
+ template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_>
980
+ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Eigen::SyclDevice>
981
+ : public TensorReductionEvaluatorBase<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Eigen::SyclDevice> {
982
+
983
+ typedef TensorReductionEvaluatorBase<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Eigen::SyclDevice> Base;
984
+ EIGEN_STRONG_INLINE TensorEvaluator(const typename Base::XprType& op, const Eigen::SyclDevice& device) : Base(op, device){}
985
+ // The coeff function in the base the recursive method which is not an standard layout and cannot be used in the SYCL kernel
986
+ //Therefore the coeff function should be overridden by for SYCL kernel
987
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Base::CoeffReturnType coeff(typename Base::Index index) const {
988
+ return *(this->data() + index);
989
+ }
990
+ // The packet function in the base the recursive method which is not an standard layout and cannot be used in the SYCL kernel
991
+ //Therefore the packet function should be overridden by for SYCL kernel
992
+ template<int LoadMode>
993
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Base::PacketReturnType packet(typename Base::Index index) const {
994
+ return internal::pload<typename Base::PacketReturnType>(this->data() + index);
995
+ }
996
+ };
997
+
998
+ } // end namespace Eigen
999
+
1000
+ #endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H