tomoto 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (420) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +3 -0
  3. data/LICENSE.txt +22 -0
  4. data/README.md +123 -0
  5. data/ext/tomoto/ext.cpp +245 -0
  6. data/ext/tomoto/extconf.rb +28 -0
  7. data/lib/tomoto.rb +12 -0
  8. data/lib/tomoto/ct.rb +11 -0
  9. data/lib/tomoto/hdp.rb +11 -0
  10. data/lib/tomoto/lda.rb +67 -0
  11. data/lib/tomoto/version.rb +3 -0
  12. data/vendor/EigenRand/EigenRand/Core.h +1139 -0
  13. data/vendor/EigenRand/EigenRand/Dists/Basic.h +111 -0
  14. data/vendor/EigenRand/EigenRand/Dists/Discrete.h +877 -0
  15. data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +108 -0
  16. data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +626 -0
  17. data/vendor/EigenRand/EigenRand/EigenRand +19 -0
  18. data/vendor/EigenRand/EigenRand/Macro.h +24 -0
  19. data/vendor/EigenRand/EigenRand/MorePacketMath.h +978 -0
  20. data/vendor/EigenRand/EigenRand/PacketFilter.h +286 -0
  21. data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +624 -0
  22. data/vendor/EigenRand/EigenRand/RandUtils.h +413 -0
  23. data/vendor/EigenRand/EigenRand/doc.h +220 -0
  24. data/vendor/EigenRand/LICENSE +21 -0
  25. data/vendor/EigenRand/README.md +288 -0
  26. data/vendor/eigen/COPYING.BSD +26 -0
  27. data/vendor/eigen/COPYING.GPL +674 -0
  28. data/vendor/eigen/COPYING.LGPL +502 -0
  29. data/vendor/eigen/COPYING.MINPACK +52 -0
  30. data/vendor/eigen/COPYING.MPL2 +373 -0
  31. data/vendor/eigen/COPYING.README +18 -0
  32. data/vendor/eigen/Eigen/CMakeLists.txt +19 -0
  33. data/vendor/eigen/Eigen/Cholesky +46 -0
  34. data/vendor/eigen/Eigen/CholmodSupport +48 -0
  35. data/vendor/eigen/Eigen/Core +537 -0
  36. data/vendor/eigen/Eigen/Dense +7 -0
  37. data/vendor/eigen/Eigen/Eigen +2 -0
  38. data/vendor/eigen/Eigen/Eigenvalues +61 -0
  39. data/vendor/eigen/Eigen/Geometry +62 -0
  40. data/vendor/eigen/Eigen/Householder +30 -0
  41. data/vendor/eigen/Eigen/IterativeLinearSolvers +48 -0
  42. data/vendor/eigen/Eigen/Jacobi +33 -0
  43. data/vendor/eigen/Eigen/LU +50 -0
  44. data/vendor/eigen/Eigen/MetisSupport +35 -0
  45. data/vendor/eigen/Eigen/OrderingMethods +73 -0
  46. data/vendor/eigen/Eigen/PaStiXSupport +48 -0
  47. data/vendor/eigen/Eigen/PardisoSupport +35 -0
  48. data/vendor/eigen/Eigen/QR +51 -0
  49. data/vendor/eigen/Eigen/QtAlignedMalloc +40 -0
  50. data/vendor/eigen/Eigen/SPQRSupport +34 -0
  51. data/vendor/eigen/Eigen/SVD +51 -0
  52. data/vendor/eigen/Eigen/Sparse +36 -0
  53. data/vendor/eigen/Eigen/SparseCholesky +45 -0
  54. data/vendor/eigen/Eigen/SparseCore +69 -0
  55. data/vendor/eigen/Eigen/SparseLU +46 -0
  56. data/vendor/eigen/Eigen/SparseQR +37 -0
  57. data/vendor/eigen/Eigen/StdDeque +27 -0
  58. data/vendor/eigen/Eigen/StdList +26 -0
  59. data/vendor/eigen/Eigen/StdVector +27 -0
  60. data/vendor/eigen/Eigen/SuperLUSupport +64 -0
  61. data/vendor/eigen/Eigen/UmfPackSupport +40 -0
  62. data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +673 -0
  63. data/vendor/eigen/Eigen/src/Cholesky/LLT.h +542 -0
  64. data/vendor/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
  65. data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +639 -0
  66. data/vendor/eigen/Eigen/src/Core/Array.h +329 -0
  67. data/vendor/eigen/Eigen/src/Core/ArrayBase.h +226 -0
  68. data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +209 -0
  69. data/vendor/eigen/Eigen/src/Core/Assign.h +90 -0
  70. data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +935 -0
  71. data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +178 -0
  72. data/vendor/eigen/Eigen/src/Core/BandMatrix.h +353 -0
  73. data/vendor/eigen/Eigen/src/Core/Block.h +452 -0
  74. data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +164 -0
  75. data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +160 -0
  76. data/vendor/eigen/Eigen/src/Core/ConditionEstimator.h +175 -0
  77. data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +1688 -0
  78. data/vendor/eigen/Eigen/src/Core/CoreIterators.h +127 -0
  79. data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +184 -0
  80. data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +866 -0
  81. data/vendor/eigen/Eigen/src/Core/CwiseTernaryOp.h +197 -0
  82. data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +103 -0
  83. data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +128 -0
  84. data/vendor/eigen/Eigen/src/Core/DenseBase.h +611 -0
  85. data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +681 -0
  86. data/vendor/eigen/Eigen/src/Core/DenseStorage.h +570 -0
  87. data/vendor/eigen/Eigen/src/Core/Diagonal.h +260 -0
  88. data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +343 -0
  89. data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +28 -0
  90. data/vendor/eigen/Eigen/src/Core/Dot.h +318 -0
  91. data/vendor/eigen/Eigen/src/Core/EigenBase.h +159 -0
  92. data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +146 -0
  93. data/vendor/eigen/Eigen/src/Core/Fuzzy.h +155 -0
  94. data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +455 -0
  95. data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +593 -0
  96. data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +187 -0
  97. data/vendor/eigen/Eigen/src/Core/IO.h +225 -0
  98. data/vendor/eigen/Eigen/src/Core/Inverse.h +118 -0
  99. data/vendor/eigen/Eigen/src/Core/Map.h +171 -0
  100. data/vendor/eigen/Eigen/src/Core/MapBase.h +303 -0
  101. data/vendor/eigen/Eigen/src/Core/MathFunctions.h +1415 -0
  102. data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +101 -0
  103. data/vendor/eigen/Eigen/src/Core/Matrix.h +459 -0
  104. data/vendor/eigen/Eigen/src/Core/MatrixBase.h +529 -0
  105. data/vendor/eigen/Eigen/src/Core/NestByValue.h +110 -0
  106. data/vendor/eigen/Eigen/src/Core/NoAlias.h +108 -0
  107. data/vendor/eigen/Eigen/src/Core/NumTraits.h +248 -0
  108. data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +633 -0
  109. data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +1035 -0
  110. data/vendor/eigen/Eigen/src/Core/Product.h +186 -0
  111. data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +1112 -0
  112. data/vendor/eigen/Eigen/src/Core/Random.h +182 -0
  113. data/vendor/eigen/Eigen/src/Core/Redux.h +505 -0
  114. data/vendor/eigen/Eigen/src/Core/Ref.h +283 -0
  115. data/vendor/eigen/Eigen/src/Core/Replicate.h +142 -0
  116. data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +117 -0
  117. data/vendor/eigen/Eigen/src/Core/Reverse.h +211 -0
  118. data/vendor/eigen/Eigen/src/Core/Select.h +162 -0
  119. data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +352 -0
  120. data/vendor/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
  121. data/vendor/eigen/Eigen/src/Core/Solve.h +188 -0
  122. data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +235 -0
  123. data/vendor/eigen/Eigen/src/Core/SolverBase.h +130 -0
  124. data/vendor/eigen/Eigen/src/Core/StableNorm.h +221 -0
  125. data/vendor/eigen/Eigen/src/Core/Stride.h +111 -0
  126. data/vendor/eigen/Eigen/src/Core/Swap.h +67 -0
  127. data/vendor/eigen/Eigen/src/Core/Transpose.h +403 -0
  128. data/vendor/eigen/Eigen/src/Core/Transpositions.h +407 -0
  129. data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +983 -0
  130. data/vendor/eigen/Eigen/src/Core/VectorBlock.h +96 -0
  131. data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +695 -0
  132. data/vendor/eigen/Eigen/src/Core/Visitor.h +273 -0
  133. data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +451 -0
  134. data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +439 -0
  135. data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +637 -0
  136. data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +51 -0
  137. data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +391 -0
  138. data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1316 -0
  139. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +430 -0
  140. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +322 -0
  141. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +1061 -0
  142. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +103 -0
  143. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +674 -0
  144. data/vendor/eigen/Eigen/src/Core/arch/CUDA/MathFunctions.h +91 -0
  145. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +333 -0
  146. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +1124 -0
  147. data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +212 -0
  148. data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +29 -0
  149. data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +49 -0
  150. data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +490 -0
  151. data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +91 -0
  152. data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +760 -0
  153. data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +471 -0
  154. data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +562 -0
  155. data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +895 -0
  156. data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +77 -0
  157. data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +397 -0
  158. data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +137 -0
  159. data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +945 -0
  160. data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +168 -0
  161. data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +475 -0
  162. data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +188 -0
  163. data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +136 -0
  164. data/vendor/eigen/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
  165. data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +792 -0
  166. data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2156 -0
  167. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +492 -0
  168. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +311 -0
  169. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
  170. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +122 -0
  171. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +619 -0
  172. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
  173. data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +163 -0
  174. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +521 -0
  175. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +287 -0
  176. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +260 -0
  177. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
  178. data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
  179. data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +93 -0
  180. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +466 -0
  181. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +315 -0
  182. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
  183. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
  184. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +335 -0
  185. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +163 -0
  186. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +145 -0
  187. data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +398 -0
  188. data/vendor/eigen/Eigen/src/Core/util/Constants.h +547 -0
  189. data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +83 -0
  190. data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +302 -0
  191. data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +130 -0
  192. data/vendor/eigen/Eigen/src/Core/util/Macros.h +1001 -0
  193. data/vendor/eigen/Eigen/src/Core/util/Memory.h +993 -0
  194. data/vendor/eigen/Eigen/src/Core/util/Meta.h +534 -0
  195. data/vendor/eigen/Eigen/src/Core/util/NonMPL2.h +3 -0
  196. data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +27 -0
  197. data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +218 -0
  198. data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +821 -0
  199. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
  200. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +459 -0
  201. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
  202. data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
  203. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
  204. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
  205. data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
  206. data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
  207. data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +654 -0
  208. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +546 -0
  209. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
  210. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +870 -0
  211. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
  212. data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +556 -0
  213. data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +392 -0
  214. data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +247 -0
  215. data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +114 -0
  216. data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +497 -0
  217. data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +282 -0
  218. data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +234 -0
  219. data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +195 -0
  220. data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +814 -0
  221. data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +199 -0
  222. data/vendor/eigen/Eigen/src/Geometry/RotationBase.h +206 -0
  223. data/vendor/eigen/Eigen/src/Geometry/Scaling.h +170 -0
  224. data/vendor/eigen/Eigen/src/Geometry/Transform.h +1542 -0
  225. data/vendor/eigen/Eigen/src/Geometry/Translation.h +208 -0
  226. data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +166 -0
  227. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +161 -0
  228. data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +103 -0
  229. data/vendor/eigen/Eigen/src/Householder/Householder.h +172 -0
  230. data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +470 -0
  231. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
  232. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +228 -0
  233. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +246 -0
  234. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +400 -0
  235. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +462 -0
  236. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +394 -0
  237. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +216 -0
  238. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +115 -0
  239. data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +462 -0
  240. data/vendor/eigen/Eigen/src/LU/Determinant.h +101 -0
  241. data/vendor/eigen/Eigen/src/LU/FullPivLU.h +891 -0
  242. data/vendor/eigen/Eigen/src/LU/InverseImpl.h +415 -0
  243. data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +611 -0
  244. data/vendor/eigen/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
  245. data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +338 -0
  246. data/vendor/eigen/Eigen/src/MetisSupport/MetisSupport.h +137 -0
  247. data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +445 -0
  248. data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +1843 -0
  249. data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +157 -0
  250. data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
  251. data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +543 -0
  252. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +653 -0
  253. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
  254. data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +562 -0
  255. data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +676 -0
  256. data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +409 -0
  257. data/vendor/eigen/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
  258. data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +313 -0
  259. data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +1246 -0
  260. data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +804 -0
  261. data/vendor/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
  262. data/vendor/eigen/Eigen/src/SVD/SVDBase.h +315 -0
  263. data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
  264. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +689 -0
  265. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +199 -0
  266. data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +377 -0
  267. data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +258 -0
  268. data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
  269. data/vendor/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
  270. data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +216 -0
  271. data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +603 -0
  272. data/vendor/eigen/Eigen/src/SparseCore/SparseColEtree.h +206 -0
  273. data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +341 -0
  274. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +726 -0
  275. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +148 -0
  276. data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +320 -0
  277. data/vendor/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
  278. data/vendor/eigen/Eigen/src/SparseCore/SparseDot.h +98 -0
  279. data/vendor/eigen/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
  280. data/vendor/eigen/Eigen/src/SparseCore/SparseMap.h +305 -0
  281. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +1403 -0
  282. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +405 -0
  283. data/vendor/eigen/Eigen/src/SparseCore/SparsePermutation.h +178 -0
  284. data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +169 -0
  285. data/vendor/eigen/Eigen/src/SparseCore/SparseRedux.h +49 -0
  286. data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +397 -0
  287. data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +656 -0
  288. data/vendor/eigen/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
  289. data/vendor/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
  290. data/vendor/eigen/Eigen/src/SparseCore/SparseTranspose.h +92 -0
  291. data/vendor/eigen/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
  292. data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +178 -0
  293. data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +478 -0
  294. data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +253 -0
  295. data/vendor/eigen/Eigen/src/SparseCore/TriangularSolver.h +315 -0
  296. data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +773 -0
  297. data/vendor/eigen/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
  298. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
  299. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
  300. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +301 -0
  301. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
  302. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
  303. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
  304. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
  305. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
  306. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
  307. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
  308. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
  309. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
  310. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
  311. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
  312. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
  313. data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +745 -0
  314. data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +126 -0
  315. data/vendor/eigen/Eigen/src/StlSupport/StdList.h +106 -0
  316. data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +131 -0
  317. data/vendor/eigen/Eigen/src/StlSupport/details.h +84 -0
  318. data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +1027 -0
  319. data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +506 -0
  320. data/vendor/eigen/Eigen/src/misc/Image.h +82 -0
  321. data/vendor/eigen/Eigen/src/misc/Kernel.h +79 -0
  322. data/vendor/eigen/Eigen/src/misc/RealSvd2x2.h +55 -0
  323. data/vendor/eigen/Eigen/src/misc/blas.h +440 -0
  324. data/vendor/eigen/Eigen/src/misc/lapack.h +152 -0
  325. data/vendor/eigen/Eigen/src/misc/lapacke.h +16291 -0
  326. data/vendor/eigen/Eigen/src/misc/lapacke_mangling.h +17 -0
  327. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +332 -0
  328. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +552 -0
  329. data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +1058 -0
  330. data/vendor/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
  331. data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +163 -0
  332. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
  333. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +85 -0
  334. data/vendor/eigen/README.md +3 -0
  335. data/vendor/eigen/bench/README.txt +55 -0
  336. data/vendor/eigen/bench/btl/COPYING +340 -0
  337. data/vendor/eigen/bench/btl/README +154 -0
  338. data/vendor/eigen/bench/tensors/README +21 -0
  339. data/vendor/eigen/blas/README.txt +6 -0
  340. data/vendor/eigen/demos/mandelbrot/README +10 -0
  341. data/vendor/eigen/demos/mix_eigen_and_c/README +9 -0
  342. data/vendor/eigen/demos/opengl/README +13 -0
  343. data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +1760 -0
  344. data/vendor/eigen/unsupported/README.txt +50 -0
  345. data/vendor/tomotopy/LICENSE +21 -0
  346. data/vendor/tomotopy/README.kr.rst +375 -0
  347. data/vendor/tomotopy/README.rst +382 -0
  348. data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +362 -0
  349. data/vendor/tomotopy/src/Labeling/FoRelevance.h +88 -0
  350. data/vendor/tomotopy/src/Labeling/Labeler.h +50 -0
  351. data/vendor/tomotopy/src/TopicModel/CT.h +37 -0
  352. data/vendor/tomotopy/src/TopicModel/CTModel.cpp +13 -0
  353. data/vendor/tomotopy/src/TopicModel/CTModel.hpp +293 -0
  354. data/vendor/tomotopy/src/TopicModel/DMR.h +51 -0
  355. data/vendor/tomotopy/src/TopicModel/DMRModel.cpp +13 -0
  356. data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +374 -0
  357. data/vendor/tomotopy/src/TopicModel/DT.h +65 -0
  358. data/vendor/tomotopy/src/TopicModel/DTM.h +22 -0
  359. data/vendor/tomotopy/src/TopicModel/DTModel.cpp +15 -0
  360. data/vendor/tomotopy/src/TopicModel/DTModel.hpp +572 -0
  361. data/vendor/tomotopy/src/TopicModel/GDMR.h +37 -0
  362. data/vendor/tomotopy/src/TopicModel/GDMRModel.cpp +14 -0
  363. data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +485 -0
  364. data/vendor/tomotopy/src/TopicModel/HDP.h +74 -0
  365. data/vendor/tomotopy/src/TopicModel/HDPModel.cpp +13 -0
  366. data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +592 -0
  367. data/vendor/tomotopy/src/TopicModel/HLDA.h +40 -0
  368. data/vendor/tomotopy/src/TopicModel/HLDAModel.cpp +13 -0
  369. data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +681 -0
  370. data/vendor/tomotopy/src/TopicModel/HPA.h +27 -0
  371. data/vendor/tomotopy/src/TopicModel/HPAModel.cpp +21 -0
  372. data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +588 -0
  373. data/vendor/tomotopy/src/TopicModel/LDA.h +144 -0
  374. data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +442 -0
  375. data/vendor/tomotopy/src/TopicModel/LDAModel.cpp +13 -0
  376. data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +1058 -0
  377. data/vendor/tomotopy/src/TopicModel/LLDA.h +45 -0
  378. data/vendor/tomotopy/src/TopicModel/LLDAModel.cpp +13 -0
  379. data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +203 -0
  380. data/vendor/tomotopy/src/TopicModel/MGLDA.h +63 -0
  381. data/vendor/tomotopy/src/TopicModel/MGLDAModel.cpp +17 -0
  382. data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +558 -0
  383. data/vendor/tomotopy/src/TopicModel/PA.h +43 -0
  384. data/vendor/tomotopy/src/TopicModel/PAModel.cpp +13 -0
  385. data/vendor/tomotopy/src/TopicModel/PAModel.hpp +467 -0
  386. data/vendor/tomotopy/src/TopicModel/PLDA.h +17 -0
  387. data/vendor/tomotopy/src/TopicModel/PLDAModel.cpp +13 -0
  388. data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +214 -0
  389. data/vendor/tomotopy/src/TopicModel/SLDA.h +54 -0
  390. data/vendor/tomotopy/src/TopicModel/SLDAModel.cpp +17 -0
  391. data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +456 -0
  392. data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +692 -0
  393. data/vendor/tomotopy/src/Utils/AliasMethod.hpp +169 -0
  394. data/vendor/tomotopy/src/Utils/Dictionary.h +80 -0
  395. data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +181 -0
  396. data/vendor/tomotopy/src/Utils/LBFGS.h +202 -0
  397. data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBacktracking.h +120 -0
  398. data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBracketing.h +122 -0
  399. data/vendor/tomotopy/src/Utils/LBFGS/Param.h +213 -0
  400. data/vendor/tomotopy/src/Utils/LUT.hpp +82 -0
  401. data/vendor/tomotopy/src/Utils/MultiNormalDistribution.hpp +69 -0
  402. data/vendor/tomotopy/src/Utils/PolyaGamma.hpp +200 -0
  403. data/vendor/tomotopy/src/Utils/PolyaGammaHybrid.hpp +672 -0
  404. data/vendor/tomotopy/src/Utils/ThreadPool.hpp +150 -0
  405. data/vendor/tomotopy/src/Utils/Trie.hpp +220 -0
  406. data/vendor/tomotopy/src/Utils/TruncMultiNormal.hpp +94 -0
  407. data/vendor/tomotopy/src/Utils/Utils.hpp +337 -0
  408. data/vendor/tomotopy/src/Utils/avx_gamma.h +46 -0
  409. data/vendor/tomotopy/src/Utils/avx_mathfun.h +736 -0
  410. data/vendor/tomotopy/src/Utils/exception.h +28 -0
  411. data/vendor/tomotopy/src/Utils/math.h +281 -0
  412. data/vendor/tomotopy/src/Utils/rtnorm.hpp +2690 -0
  413. data/vendor/tomotopy/src/Utils/sample.hpp +192 -0
  414. data/vendor/tomotopy/src/Utils/serializer.hpp +695 -0
  415. data/vendor/tomotopy/src/Utils/slp.hpp +131 -0
  416. data/vendor/tomotopy/src/Utils/sse_gamma.h +48 -0
  417. data/vendor/tomotopy/src/Utils/sse_mathfun.h +710 -0
  418. data/vendor/tomotopy/src/Utils/text.hpp +49 -0
  419. data/vendor/tomotopy/src/Utils/tvector.hpp +543 -0
  420. metadata +531 -0
@@ -0,0 +1,21 @@
1
+ The tensor benchmark suite is made of several parts.
2
+
3
+ The first part is a generic suite, in which each benchmark comes in 2 flavors: one that runs on CPU, and one that runs on GPU.
4
+
5
+ To compile the floating point CPU benchmarks, simply call:
6
+ g++ tensor_benchmarks_cpu.cc benchmark_main.cc -I ../../ -std=c++11 -O3 -DNDEBUG -pthread -mavx -o benchmarks_cpu
7
+
8
+ To compile the floating point GPU benchmarks, simply call:
9
+ nvcc tensor_benchmarks_gpu.cu benchmark_main.cc -I ../../ -std=c++11 -O2 -DNDEBUG -use_fast_math -ftz=true -arch compute_35 -o benchmarks_gpu
10
+
11
+ We also provide a version of the generic GPU tensor benchmarks that uses half floats (aka fp16) instead of regular floats. To compile these benchmarks, simply call the command line below. You'll need a recent GPU that supports compute capability 5.3 or higher to run them and nvcc 7.5 or higher to compile the code.
12
+ nvcc tensor_benchmarks_fp16_gpu.cu benchmark_main.cc -I ../../ -std=c++11 -O2 -DNDEBUG -use_fast_math -ftz=true -arch compute_53 -o benchmarks_fp16_gpu
13
+
14
+ last but not least, we also provide a suite of benchmarks to measure the scalability of the contraction code on CPU. To compile these benchmarks, call
15
+ g++ contraction_benchmarks_cpu.cc benchmark_main.cc -I ../../ -std=c++11 -O3 -DNDEBUG -pthread -mavx -o benchmarks_cpu
16
+
17
+ To compile the benchmark for SYCL, using ComputeCpp you currently need 2 passes (only for translation units containing device code):
18
+ 1. The device compilation pass that generates the device code (SYCL kernels and referenced device functions) and glue code needed by the host compiler to reference the device code from host code.
19
+ {ComputeCpp_ROOT}/bin/compute++ -I ../../ -I {ComputeCpp_ROOT}/include/ -std=c++11 -mllvm -inline-threshold=1000 -Wno-ignored-attributes -sycl -intelspirmetadata -emit-llvm -no-serial-memop -sycl-compress-name -DBUILD_PLATFORM_SPIR -DNDBUG -O3 -c tensor_benchmarks_sycl.cc
20
+ 2. The host compilation pass that generates the final host binary.
21
+ clang++-3.7 -include tensor_benchmarks_sycl.sycl benchmark_main.cc tensor_benchmarks_sycl.cc -pthread -I ../../ -I {ComputeCpp_ROOT}/include/ -L {ComputeCpp_ROOT}/lib/ -lComputeCpp -lOpenCL -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11 -o tensor_benchmark_sycl
@@ -0,0 +1,6 @@
1
+
2
+ This directory contains a BLAS library built on top of Eigen.
3
+
4
+ This module is not built by default. In order to compile it, you need to
5
+ type 'make blas' from within your build dir.
6
+
@@ -0,0 +1,10 @@
1
+ *** Mandelbrot demo ***
2
+
3
+ Controls:
4
+ * Left mouse button to center view at a point.
5
+ * Drag vertically with left mouse button to zoom in and out.
6
+
7
+ Be sure to enable SSE2 or AltiVec to improve performance.
8
+
9
+ The number of iterations, and the choice between single and double precision, are
10
+ determined at runtime depending on the zoom level.
@@ -0,0 +1,9 @@
1
+ This is an example of how one can wrap some of Eigen into a C library.
2
+
3
+ To try this with GCC, do:
4
+
5
+ g++ -c binary_library.cpp -O2 -msse2 -I ../..
6
+ gcc example.c binary_library.o -o example -lstdc++
7
+ ./example
8
+
9
+ TODO: add CMakeLists, add more explanations here
@@ -0,0 +1,13 @@
1
+
2
+ Navigation:
3
+ left button: rotate around the target
4
+ middle button: zoom
5
+ left button + ctrl quake rotate (rotate around camera position)
6
+ middle button + ctrl walk (progress along camera's z direction)
7
+ left button: pan (translate in the XY camera's plane)
8
+
9
+ R : move the camera to initial position
10
+ A : start/stop animation
11
+ C : clear the animation
12
+ G : add a key frame
13
+
@@ -0,0 +1,1760 @@
1
+ # Eigen Tensors {#eigen_tensors}
2
+
3
+ Tensors are multidimensional arrays of elements. Elements are typically scalars,
4
+ but more complex types such as strings are also supported.
5
+
6
+ [TOC]
7
+
8
+ ## Tensor Classes
9
+
10
+ You can manipulate a tensor with one of the following classes. They all are in
11
+ the namespace `::Eigen.`
12
+
13
+
14
+ ### Class Tensor<data_type, rank>
15
+
16
+ This is the class to use to create a tensor and allocate memory for it. The
17
+ class is templatized with the tensor datatype, such as float or int, and the
18
+ tensor rank. The rank is the number of dimensions, for example rank 2 is a
19
+ matrix.
20
+
21
+ Tensors of this class are resizable. For example, if you assign a tensor of a
22
+ different size to a Tensor, that tensor is resized to match its new value.
23
+
24
+ #### Constructor `Tensor<data_type, rank>(size0, size1, ...)`
25
+
26
+ Constructor for a Tensor. The constructor must be passed `rank` integers
27
+ indicating the sizes of the instance along each of the the `rank`
28
+ dimensions.
29
+
30
+ // Create a tensor of rank 3 of sizes 2, 3, 4. This tensor owns
31
+ // memory to hold 24 floating point values (24 = 2 x 3 x 4).
32
+ Tensor<float, 3> t_3d(2, 3, 4);
33
+
34
+ // Resize t_3d by assigning a tensor of different sizes, but same rank.
35
+ t_3d = Tensor<float, 3>(3, 4, 3);
36
+
37
+ #### Constructor `Tensor<data_type, rank>(size_array)`
38
+
39
+ Constructor where the sizes for the constructor are specified as an array of
40
+ values instead of an explicitly list of parameters. The array type to use is
41
+ `Eigen::array<Eigen::Index>`. The array can be constructed automatically
42
+ from an initializer list.
43
+
44
+ // Create a tensor of strings of rank 2 with sizes 5, 7.
45
+ Tensor<string, 2> t_2d({5, 7});
46
+
47
+
48
+ ### Class `TensorFixedSize<data_type, Sizes<size0, size1, ...>>`
49
+
50
+ Class to use for tensors of fixed size, where the size is known at compile
51
+ time. Fixed sized tensors can provide very fast computations because all their
52
+ dimensions are known by the compiler. FixedSize tensors are not resizable.
53
+
54
+ If the total number of elements in a fixed size tensor is small enough the
55
+ tensor data is held onto the stack and does not cause heap allocation and free.
56
+
57
+ // Create a 4 x 3 tensor of floats.
58
+ TensorFixedSize<float, Sizes<4, 3>> t_4x3;
59
+
60
+ ### Class `TensorMap<Tensor<data_type, rank>>`
61
+
62
+ This is the class to use to create a tensor on top of memory allocated and
63
+ owned by another part of your code. It allows to view any piece of allocated
64
+ memory as a Tensor. Instances of this class do not own the memory where the
65
+ data are stored.
66
+
67
+ A TensorMap is not resizable because it does not own the memory where its data
68
+ are stored.
69
+
70
+ #### Constructor `TensorMap<Tensor<data_type, rank>>(data, size0, size1, ...)`
71
+
72
+ Constructor for a Tensor. The constructor must be passed a pointer to the
73
+ storage for the data, and "rank" size attributes. The storage has to be
74
+ large enough to hold all the data.
75
+
76
+ // Map a tensor of ints on top of stack-allocated storage.
77
+ int storage[128]; // 2 x 4 x 2 x 8 = 128
78
+ TensorMap<Tensor<int, 4>> t_4d(storage, 2, 4, 2, 8);
79
+
80
+ // The same storage can be viewed as a different tensor.
81
+ // You can also pass the sizes as an array.
82
+ TensorMap<Tensor<int, 2>> t_2d(storage, 16, 8);
83
+
84
+ // You can also map fixed-size tensors. Here we get a 1d view of
85
+ // the 2d fixed-size tensor.
86
+ TensorFixedSize<float, Sizes<4, 5>> t_4x3;
87
+ TensorMap<Tensor<float, 1>> t_12(t_4x3.data(), 12);
88
+
89
+
90
+ #### Class `TensorRef`
91
+
92
+ See Assigning to a TensorRef below.
93
+
94
+ ## Accessing Tensor Elements
95
+
96
+ #### `<data_type> tensor(index0, index1...)`
97
+
98
+ Return the element at position `(index0, index1...)` in tensor
99
+ `tensor`. You must pass as many parameters as the rank of `tensor`.
100
+ The expression can be used as an l-value to set the value of the element at the
101
+ specified position. The value returned is of the datatype of the tensor.
102
+
103
+ // Set the value of the element at position (0, 1, 0);
104
+ Tensor<float, 3> t_3d(2, 3, 4);
105
+ t_3d(0, 1, 0) = 12.0f;
106
+
107
+ // Initialize all elements to random values.
108
+ for (int i = 0; i < 2; ++i) {
109
+ for (int j = 0; j < 3; ++j) {
110
+ for (int k = 0; k < 4; ++k) {
111
+ t_3d(i, j, k) = ...some random value...;
112
+ }
113
+ }
114
+ }
115
+
116
+ // Print elements of a tensor.
117
+ for (int i = 0; i < 2; ++i) {
118
+ LOG(INFO) << t_3d(i, 0, 0);
119
+ }
120
+
121
+
122
+ ## TensorLayout
123
+
124
+ The tensor library supports 2 layouts: `ColMajor` (the default) and
125
+ `RowMajor`. Only the default column major layout is currently fully
126
+ supported, and it is therefore not recommended to attempt to use the row major
127
+ layout at the moment.
128
+
129
+ The layout of a tensor is optionally specified as part of its type. If not
130
+ specified explicitly column major is assumed.
131
+
132
+ Tensor<float, 3, ColMajor> col_major; // equivalent to Tensor<float, 3>
133
+ TensorMap<Tensor<float, 3, RowMajor> > row_major(data, ...);
134
+
135
+ All the arguments to an expression must use the same layout. Attempting to mix
136
+ different layouts will result in a compilation error.
137
+
138
+ It is possible to change the layout of a tensor or an expression using the
139
+ `swap_layout()` method. Note that this will also reverse the order of the
140
+ dimensions.
141
+
142
+ Tensor<float, 2, ColMajor> col_major(2, 4);
143
+ Tensor<float, 2, RowMajor> row_major(2, 4);
144
+
145
+ Tensor<float, 2> col_major_result = col_major; // ok, layouts match
146
+ Tensor<float, 2> col_major_result = row_major; // will not compile
147
+
148
+ // Simple layout swap
149
+ col_major_result = row_major.swap_layout();
150
+ eigen_assert(col_major_result.dimension(0) == 4);
151
+ eigen_assert(col_major_result.dimension(1) == 2);
152
+
153
+ // Swap the layout and preserve the order of the dimensions
154
+ array<int, 2> shuffle(1, 0);
155
+ col_major_result = row_major.swap_layout().shuffle(shuffle);
156
+ eigen_assert(col_major_result.dimension(0) == 2);
157
+ eigen_assert(col_major_result.dimension(1) == 4);
158
+
159
+
160
+ ## Tensor Operations
161
+
162
+ The Eigen Tensor library provides a vast library of operations on Tensors:
163
+ numerical operations such as addition and multiplication, geometry operations
164
+ such as slicing and shuffling, etc. These operations are available as methods
165
+ of the Tensor classes, and in some cases as operator overloads. For example
166
+ the following code computes the elementwise addition of two tensors:
167
+
168
+ Tensor<float, 3> t1(2, 3, 4);
169
+ ...set some values in t1...
170
+ Tensor<float, 3> t2(2, 3, 4);
171
+ ...set some values in t2...
172
+ // Set t3 to the element wise sum of t1 and t2
173
+ Tensor<float, 3> t3 = t1 + t2;
174
+
175
+ While the code above looks easy enough, it is important to understand that the
176
+ expression `t1 + t2` is not actually adding the values of the tensors. The
177
+ expression instead constructs a "tensor operator" object of the class
178
+ TensorCwiseBinaryOp<scalar_sum>, which has references to the tensors
179
+ `t1` and `t2`. This is a small C++ object that knows how to add
180
+ `t1` and `t2`. It is only when the value of the expression is assigned
181
+ to the tensor `t3` that the addition is actually performed. Technically,
182
+ this happens through the overloading of `operator=()` in the Tensor class.
183
+
184
+ This mechanism for computing tensor expressions allows for lazy evaluation and
185
+ optimizations which are what make the tensor library very fast.
186
+
187
+ Of course, the tensor operators do nest, and the expression `t1 + t2 * 0.3f`
188
+ is actually represented with the (approximate) tree of operators:
189
+
190
+ TensorCwiseBinaryOp<scalar_sum>(t1, TensorCwiseUnaryOp<scalar_mul>(t2, 0.3f))
191
+
192
+
193
+ ### Tensor Operations and C++ "auto"
194
+
195
+ Because Tensor operations create tensor operators, the C++ `auto` keyword
196
+ does not have its intuitive meaning. Consider these 2 lines of code:
197
+
198
+ Tensor<float, 3> t3 = t1 + t2;
199
+ auto t4 = t1 + t2;
200
+
201
+ In the first line we allocate the tensor `t3` and it will contain the
202
+ result of the addition of `t1` and `t2`. In the second line, `t4`
203
+ is actually the tree of tensor operators that will compute the addition of
204
+ `t1` and `t2`. In fact, `t4` is *not* a tensor and you cannot get
205
+ the values of its elements:
206
+
207
+ Tensor<float, 3> t3 = t1 + t2;
208
+ cout << t3(0, 0, 0); // OK prints the value of t1(0, 0, 0) + t2(0, 0, 0)
209
+
210
+ auto t4 = t1 + t2;
211
+ cout << t4(0, 0, 0); // Compilation error!
212
+
213
+ When you use `auto` you do not get a Tensor as a result but instead a
214
+ non-evaluated expression. So only use `auto` to delay evaluation.
215
+
216
+ Unfortunately, there is no single underlying concrete type for holding
217
+ non-evaluated expressions, hence you have to use auto in the case when you do
218
+ want to hold non-evaluated expressions.
219
+
220
+ When you need the results of set of tensor computations you have to assign the
221
+ result to a Tensor that will be capable of holding onto them. This can be
222
+ either a normal Tensor, a fixed size Tensor, or a TensorMap on an existing
223
+ piece of memory. All the following will work:
224
+
225
+ auto t4 = t1 + t2;
226
+
227
+ Tensor<float, 3> result = t4; // Could also be: result(t4);
228
+ cout << result(0, 0, 0);
229
+
230
+ TensorMap<float, 4> result(<a float* with enough space>, <size0>, ...) = t4;
231
+ cout << result(0, 0, 0);
232
+
233
+ TensorFixedSize<float, Sizes<size0, ...>> result = t4;
234
+ cout << result(0, 0, 0);
235
+
236
+ Until you need the results, you can keep the operation around, and even reuse
237
+ it for additional operations. As long as you keep the expression as an
238
+ operation, no computation is performed.
239
+
240
+ // One way to compute exp((t1 + t2) * 0.2f);
241
+ auto t3 = t1 + t2;
242
+ auto t4 = t3 * 0.2f;
243
+ auto t5 = t4.exp();
244
+ Tensor<float, 3> result = t5;
245
+
246
+ // Another way, exactly as efficient as the previous one:
247
+ Tensor<float, 3> result = ((t1 + t2) * 0.2f).exp();
248
+
249
+ ### Controlling When Expression are Evaluated
250
+
251
+ There are several ways to control when expressions are evaluated:
252
+
253
+ * Assignment to a Tensor, TensorFixedSize, or TensorMap.
254
+ * Use of the eval() method.
255
+ * Assignment to a TensorRef.
256
+
257
+ #### Assigning to a Tensor, TensorFixedSize, or TensorMap.
258
+
259
+ The most common way to evaluate an expression is to assign it to a Tensor. In
260
+ the example below, the `auto` declarations make the intermediate values
261
+ "Operations", not Tensors, and do not cause the expressions to be evaluated.
262
+ The assignment to the Tensor `result` causes the evaluation of all the
263
+ operations.
264
+
265
+ auto t3 = t1 + t2; // t3 is an Operation.
266
+ auto t4 = t3 * 0.2f; // t4 is an Operation.
267
+ auto t5 = t4.exp(); // t5 is an Operation.
268
+ Tensor<float, 3> result = t5; // The operations are evaluated.
269
+
270
+ If you know the ranks and sizes of the Operation value you can assign the
271
+ Operation to a TensorFixedSize instead of a Tensor, which is a bit more
272
+ efficient.
273
+
274
+ // We know that the result is a 4x4x2 tensor!
275
+ TensorFixedSize<float, Sizes<4, 4, 2>> result = t5;
276
+
277
+ Simiarly, assigning an expression to a TensorMap causes its evaluation. Like
278
+ tensors of type TensorFixedSize, TensorMaps cannot be resized so they have to
279
+ have the rank and sizes of the expression that are assigned to them.
280
+
281
+ #### Calling `eval()`.
282
+
283
+ When you compute large composite expressions, you sometimes want to tell Eigen
284
+ that an intermediate value in the expression tree is worth evaluating ahead of
285
+ time. This is done by inserting a call to the `eval()` method of the
286
+ expression Operation.
287
+
288
+ // The previous example could have been written:
289
+ Tensor<float, 3> result = ((t1 + t2) * 0.2f).exp();
290
+
291
+ // If you want to compute (t1 + t2) once ahead of time you can write:
292
+ Tensor<float, 3> result = ((t1 + t2).eval() * 0.2f).exp();
293
+
294
+ Semantically, calling `eval()` is equivalent to materializing the value of
295
+ the expression in a temporary Tensor of the right size. The code above in
296
+ effect does:
297
+
298
+ // .eval() knows the size!
299
+ TensorFixedSize<float, Sizes<4, 4, 2>> tmp = t1 + t2;
300
+ Tensor<float, 3> result = (tmp * 0.2f).exp();
301
+
302
+ Note that the return value of `eval()` is itself an Operation, so the
303
+ following code does not do what you may think:
304
+
305
+ // Here t3 is an evaluation Operation. t3 has not been evaluated yet.
306
+ auto t3 = (t1 + t2).eval();
307
+
308
+ // You can use t3 in another expression. Still no evaluation.
309
+ auto t4 = (t3 * 0.2f).exp();
310
+
311
+ // The value is evaluated when you assign the Operation to a Tensor, using
312
+ // an intermediate tensor to represent t3.x
313
+ Tensor<float, 3> result = t4;
314
+
315
+ While in the examples above calling `eval()` does not make a difference in
316
+ performance, in other cases it can make a huge difference. In the expression
317
+ below the `broadcast()` expression causes the `X.maximum()` expression
318
+ to be evaluated many times:
319
+
320
+ Tensor<...> X ...;
321
+ Tensor<...> Y = ((X - X.maximum(depth_dim).reshape(dims2d).broadcast(bcast))
322
+ * beta).exp();
323
+
324
+ Inserting a call to `eval()` between the `maximum()` and
325
+ `reshape()` calls guarantees that maximum() is only computed once and
326
+ greatly speeds-up execution:
327
+
328
+ Tensor<...> Y =
329
+ ((X - X.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast))
330
+ * beta).exp();
331
+
332
+ In the other example below, the tensor `Y` is both used in the expression
333
+ and its assignment. This is an aliasing problem and if the evaluation is not
334
+ done in the right order Y will be updated incrementally during the evaluation
335
+ resulting in bogus results:
336
+
337
+ Tensor<...> Y ...;
338
+ Y = Y / (Y.sum(depth_dim).reshape(dims2d).broadcast(bcast));
339
+
340
+ Inserting a call to `eval()` between the `sum()` and `reshape()`
341
+ expressions ensures that the sum is computed before any updates to `Y` are
342
+ done.
343
+
344
+ Y = Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast));
345
+
346
+ Note that an eval around the full right hand side expression is not needed
347
+ because the generated has to compute the i-th value of the right hand side
348
+ before assigning it to the left hand side.
349
+
350
+ However, if you were assigning the expression value to a shuffle of `Y`
351
+ then you would need to force an eval for correctness by adding an `eval()`
352
+ call for the right hand side:
353
+
354
+ Y.shuffle(...) =
355
+ (Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast))).eval();
356
+
357
+
358
+ #### Assigning to a `TensorRef`.
359
+
360
+ If you need to access only a few elements from the value of an expression you
361
+ can avoid materializing the value in a full tensor by using a TensorRef.
362
+
363
+ A TensorRef is a small wrapper class for any Eigen Operation. It provides
364
+ overloads for the `()` operator that let you access individual values in
365
+ the expression. TensorRef is convenient, because the Operation themselves do
366
+ not provide a way to access individual elements.
367
+
368
+ // Create a TensorRef for the expression. The expression is not
369
+ // evaluated yet.
370
+ TensorRef<Tensor<float, 3> > ref = ((t1 + t2) * 0.2f).exp();
371
+
372
+ // Use "ref" to access individual elements. The expression is evaluated
373
+ // on the fly.
374
+ float at_0 = ref(0, 0, 0);
375
+ cout << ref(0, 1, 0);
376
+
377
+ Only use TensorRef when you need a subset of the values of the expression.
378
+ TensorRef only computes the values you access. However note that if you are
379
+ going to access all the values it will be much faster to materialize the
380
+ results in a Tensor first.
381
+
382
+ In some cases, if the full Tensor result would be very large, you may save
383
+ memory by accessing it as a TensorRef. But not always. So don't count on it.
384
+
385
+
386
+ ### Controlling How Expressions Are Evaluated
387
+
388
+ The tensor library provides several implementations of the various operations
389
+ such as contractions and convolutions. The implementations are optimized for
390
+ different environments: single threaded on CPU, multi threaded on CPU, or on a
391
+ GPU using cuda. Additional implementations may be added later.
392
+
393
+ You can choose which implementation to use with the `device()` call. If
394
+ you do not choose an implementation explicitly the default implementation that
395
+ uses a single thread on the CPU is used.
396
+
397
+ The default implementation has been optimized for recent Intel CPUs, taking
398
+ advantage of SSE, AVX, and FMA instructions. Work is ongoing to tune the
399
+ library on ARM CPUs. Note that you need to pass compiler-dependent flags
400
+ to enable the use of SSE, AVX, and other instructions.
401
+
402
+ For example, the following code adds two tensors using the default
403
+ single-threaded CPU implementation:
404
+
405
+ Tensor<float, 2> a(30, 40);
406
+ Tensor<float, 2> b(30, 40);
407
+ Tensor<float, 2> c = a + b;
408
+
409
+ To choose a different implementation you have to insert a `device()` call
410
+ before the assignment of the result. For technical C++ reasons this requires
411
+ that the Tensor for the result be declared on its own. This means that you
412
+ have to know the size of the result.
413
+
414
+ Eigen::Tensor<float, 2> c(30, 40);
415
+ c.device(...) = a + b;
416
+
417
+ The call to `device()` must be the last call on the left of the operator=.
418
+
419
+ You must pass to the `device()` call an Eigen device object. There are
420
+ presently three devices you can use: DefaultDevice, ThreadPoolDevice and
421
+ GpuDevice.
422
+
423
+
424
+ #### Evaluating With the DefaultDevice
425
+
426
+ This is exactly the same as not inserting a `device()` call.
427
+
428
+ DefaultDevice my_device;
429
+ c.device(my_device) = a + b;
430
+
431
+ #### Evaluating with a Thread Pool
432
+
433
+ // Create the Eigen ThreadPoolDevice.
434
+ Eigen::ThreadPoolDevice my_device(4 /* number of threads to use */);
435
+
436
+ // Now just use the device when evaluating expressions.
437
+ Eigen::Tensor<float, 2> c(30, 50);
438
+ c.device(my_device) = a.contract(b, dot_product_dims);
439
+
440
+
441
+ #### Evaluating On GPU
442
+
443
+ This is presently a bit more complicated than just using a thread pool device.
444
+ You need to create a GPU device but you also need to explicitly allocate the
445
+ memory for tensors with cuda.
446
+
447
+
448
+ ## API Reference
449
+
450
+ ### Datatypes
451
+
452
+ In the documentation of the tensor methods and Operation we mention datatypes
453
+ that are tensor-type specific:
454
+
455
+ #### `<Tensor-Type>::``Dimensions`
456
+
457
+ Acts like an array of ints. Has an `int size` attribute, and can be
458
+ indexed like an array to access individual values. Used to represent the
459
+ dimensions of a tensor. See `dimensions()`.
460
+
461
+ #### `<Tensor-Type>::``Index`
462
+
463
+ Acts like an `int`. Used for indexing tensors along their dimensions. See
464
+ `operator()`, `dimension()`, and `size()`.
465
+
466
+ #### `<Tensor-Type>::``Scalar`
467
+
468
+ Represents the datatype of individual tensor elements. For example, for a
469
+ `Tensor<float>`, `Scalar` is the type `float`. See
470
+ `setConstant()`.
471
+
472
+ #### `<Operation>`
473
+
474
+ We use this pseudo type to indicate that a tensor Operation is returned by a
475
+ method. We indicate in the text the type and dimensions of the tensor that the
476
+ Operation returns after evaluation.
477
+
478
+ The Operation will have to be evaluated, for example by assigning it to a
479
+ tensor, before you can access the values of the resulting tensor. You can also
480
+ access the values through a TensorRef.
481
+
482
+
483
+ ## Built-in Tensor Methods
484
+
485
+ These are usual C++ methods that act on tensors immediately. They are not
486
+ Operations which provide delayed evaluation of their results. Unless specified
487
+ otherwise, all the methods listed below are available on all tensor classes:
488
+ Tensor, TensorFixedSize, and TensorMap.
489
+
490
+ ## Metadata
491
+
492
+ ### `int NumDimensions`
493
+
494
+ Constant value indicating the number of dimensions of a Tensor. This is also
495
+ known as the tensor "rank".
496
+
497
+ Eigen::Tensor<float, 2> a(3, 4);
498
+ cout << "Dims " << a.NumDimensions;
499
+ => Dims 2
500
+
501
+ ### `Dimensions dimensions()`
502
+
503
+ Returns an array-like object representing the dimensions of the tensor.
504
+ The actual type of the `dimensions()` result is `<Tensor-Type>::``Dimensions`.
505
+
506
+ Eigen::Tensor<float, 2> a(3, 4);
507
+ const Eigen::Tensor<float, 2>::Dimensions& d = a.dimensions();
508
+ cout << "Dim size: " << d.size << ", dim 0: " << d[0]
509
+ << ", dim 1: " << d[1];
510
+ => Dim size: 2, dim 0: 3, dim 1: 4
511
+
512
+ If you use a C++11 compiler, you can use `auto` to simplify the code:
513
+
514
+ const auto& d = a.dimensions();
515
+ cout << "Dim size: " << d.size << ", dim 0: " << d[0]
516
+ << ", dim 1: " << d[1];
517
+ => Dim size: 2, dim 0: 3, dim 1: 4
518
+
519
+ ### `Index dimension(Index n)`
520
+
521
+ Returns the n-th dimension of the tensor. The actual type of the
522
+ `dimension()` result is `<Tensor-Type>::``Index`, but you can
523
+ always use it like an int.
524
+
525
+ Eigen::Tensor<float, 2> a(3, 4);
526
+ int dim1 = a.dimension(1);
527
+ cout << "Dim 1: " << dim1;
528
+ => Dim 1: 4
529
+
530
+ ### `Index size()`
531
+
532
+ Returns the total number of elements in the tensor. This is the product of all
533
+ the tensor dimensions. The actual type of the `size()` result is
534
+ `<Tensor-Type>::``Index`, but you can always use it like an int.
535
+
536
+ Eigen::Tensor<float, 2> a(3, 4);
537
+ cout << "Size: " << a.size();
538
+ => Size: 12
539
+
540
+
541
+ ### Getting Dimensions From An Operation
542
+
543
+ A few operations provide `dimensions()` directly,
544
+ e.g. `TensorReslicingOp`. Most operations defer calculating dimensions
545
+ until the operation is being evaluated. If you need access to the dimensions
546
+ of a deferred operation, you can wrap it in a TensorRef (see Assigning to a
547
+ TensorRef above), which provides `dimensions()` and `dimension()` as
548
+ above.
549
+
550
+ TensorRef can also wrap the plain Tensor types, so this is a useful idiom in
551
+ templated contexts where the underlying object could be either a raw Tensor
552
+ or some deferred operation (e.g. a slice of a Tensor). In this case, the
553
+ template code can wrap the object in a TensorRef and reason about its
554
+ dimensionality while remaining agnostic to the underlying type.
555
+
556
+
557
+ ## Constructors
558
+
559
+ ### Tensor
560
+
561
+ Creates a tensor of the specified size. The number of arguments must be equal
562
+ to the rank of the tensor. The content of the tensor is not initialized.
563
+
564
+ Eigen::Tensor<float, 2> a(3, 4);
565
+ cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl;
566
+ => NumRows: 3 NumCols: 4
567
+
568
+ ### TensorFixedSize
569
+
570
+ Creates a tensor of the specified size. The number of arguments in the Sizes<>
571
+ template parameter determines the rank of the tensor. The content of the tensor
572
+ is not initialized.
573
+
574
+ Eigen::TensorFixedSize<float, Sizes<3, 4>> a;
575
+ cout << "Rank: " << a.rank() << endl;
576
+ => Rank: 2
577
+ cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl;
578
+ => NumRows: 3 NumCols: 4
579
+
580
+ ### TensorMap
581
+
582
+ Creates a tensor mapping an existing array of data. The data must not be freed
583
+ until the TensorMap is discarded, and the size of the data must be large enough
584
+ to accommodate the coefficients of the tensor.
585
+
586
+ float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
587
+ Eigen::TensorMap<Tensor<float, 2>> a(data, 3, 4);
588
+ cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl;
589
+ => NumRows: 3 NumCols: 4
590
+ cout << "a(1, 2): " << a(1, 2) << endl;
591
+ => a(1, 2): 7
592
+
593
+
594
+ ## Contents Initialization
595
+
596
+ When a new Tensor or a new TensorFixedSize are created, memory is allocated to
597
+ hold all the tensor elements, but the memory is not initialized. Similarly,
598
+ when a new TensorMap is created on top of non-initialized memory the memory its
599
+ contents are not initialized.
600
+
601
+ You can use one of the methods below to initialize the tensor memory. These
602
+ have an immediate effect on the tensor and return the tensor itself as a
603
+ result. These are not tensor Operations which delay evaluation.
604
+
605
+ ### `<Tensor-Type> setConstant(const Scalar& val)`
606
+
607
+ Sets all elements of the tensor to the constant value `val`. `Scalar`
608
+ is the type of data stored in the tensor. You can pass any value that is
609
+ convertible to that type.
610
+
611
+ Returns the tensor itself in case you want to chain another call.
612
+
613
+ a.setConstant(12.3f);
614
+ cout << "Constant: " << endl << a << endl << endl;
615
+ =>
616
+ Constant:
617
+ 12.3 12.3 12.3 12.3
618
+ 12.3 12.3 12.3 12.3
619
+ 12.3 12.3 12.3 12.3
620
+
621
+ Note that `setConstant()` can be used on any tensor where the element type
622
+ has a copy constructor and an `operator=()`:
623
+
624
+ Eigen::Tensor<string, 2> a(2, 3);
625
+ a.setConstant("yolo");
626
+ cout << "String tensor: " << endl << a << endl << endl;
627
+ =>
628
+ String tensor:
629
+ yolo yolo yolo
630
+ yolo yolo yolo
631
+
632
+
633
+ ### `<Tensor-Type> setZero()`
634
+
635
+ Fills the tensor with zeros. Equivalent to `setConstant(Scalar(0))`.
636
+ Returns the tensor itself in case you want to chain another call.
637
+
638
+ a.setZero();
639
+ cout << "Zeros: " << endl << a << endl << endl;
640
+ =>
641
+ Zeros:
642
+ 0 0 0 0
643
+ 0 0 0 0
644
+ 0 0 0 0
645
+
646
+
647
+ ### `<Tensor-Type> setValues({..initializer_list})`
648
+
649
+ Fills the tensor with explicit values specified in a std::initializer_list.
650
+ The type of the initializer list depends on the type and rank of the tensor.
651
+
652
+ If the tensor has rank N, the initializer list must be nested N times. The
653
+ most deeply nested lists must contains P scalars of the Tensor type where P is
654
+ the size of the last dimension of the Tensor.
655
+
656
+ For example, for a `TensorFixedSize<float, 2, 3>` the initializer list must
657
+ contains 2 lists of 3 floats each.
658
+
659
+ `setValues()` returns the tensor itself in case you want to chain another
660
+ call.
661
+
662
+ Eigen::Tensor<float, 2> a(2, 3);
663
+ a.setValues({{0.0f, 1.0f, 2.0f}, {3.0f, 4.0f, 5.0f}});
664
+ cout << "a" << endl << a << endl << endl;
665
+ =>
666
+ a
667
+ 0 1 2
668
+ 3 4 5
669
+
670
+ If a list is too short, the corresponding elements of the tensor will not be
671
+ changed. This is valid at each level of nesting. For example the following
672
+ code only sets the values of the first row of the tensor.
673
+
674
+ Eigen::Tensor<int, 2> a(2, 3);
675
+ a.setConstant(1000);
676
+ a.setValues({{10, 20, 30}});
677
+ cout << "a" << endl << a << endl << endl;
678
+ =>
679
+ a
680
+ 10 20 30
681
+ 1000 1000 1000
682
+
683
+ ### `<Tensor-Type> setRandom()`
684
+
685
+ Fills the tensor with random values. Returns the tensor itself in case you
686
+ want to chain another call.
687
+
688
+ a.setRandom();
689
+ cout << "Random: " << endl << a << endl << endl;
690
+ =>
691
+ Random:
692
+ 0.680375 0.59688 -0.329554 0.10794
693
+ -0.211234 0.823295 0.536459 -0.0452059
694
+ 0.566198 -0.604897 -0.444451 0.257742
695
+
696
+ You can customize `setRandom()` by providing your own random number
697
+ generator as a template argument:
698
+
699
+ a.setRandom<MyRandomGenerator>();
700
+
701
+ Here, `MyRandomGenerator` must be a struct with the following member
702
+ functions, where Scalar and Index are the same as `<Tensor-Type>::``Scalar`
703
+ and `<Tensor-Type>::``Index`.
704
+
705
+ See `struct UniformRandomGenerator` in TensorFunctors.h for an example.
706
+
707
+ // Custom number generator for use with setRandom().
708
+ struct MyRandomGenerator {
709
+ // Default and copy constructors. Both are needed
710
+ MyRandomGenerator() { }
711
+ MyRandomGenerator(const MyRandomGenerator& ) { }
712
+
713
+ // Return a random value to be used. "element_location" is the
714
+ // location of the entry to set in the tensor, it can typically
715
+ // be ignored.
716
+ Scalar operator()(Eigen::DenseIndex element_location,
717
+ Eigen::DenseIndex /*unused*/ = 0) const {
718
+ return <randomly generated value of type T>;
719
+ }
720
+
721
+ // Same as above but generates several numbers at a time.
722
+ typename internal::packet_traits<Scalar>::type packetOp(
723
+ Eigen::DenseIndex packet_location, Eigen::DenseIndex /*unused*/ = 0) const {
724
+ return <a packet of randomly generated values>;
725
+ }
726
+ };
727
+
728
+ You can also use one of the 2 random number generators that are part of the
729
+ tensor library:
730
+ * UniformRandomGenerator
731
+ * NormalRandomGenerator
732
+
733
+
734
+ ## Data Access
735
+
736
+ The Tensor, TensorFixedSize, and TensorRef classes provide the following
737
+ accessors to access the tensor coefficients:
738
+
739
+ const Scalar& operator()(const array<Index, NumIndices>& indices)
740
+ const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
741
+ Scalar& operator()(const array<Index, NumIndices>& indices)
742
+ Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
743
+
744
+ The number of indices must be equal to the rank of the tensor. Moreover, these
745
+ accessors are not available on tensor expressions. In order to access the
746
+ values of a tensor expression, the expression must either be evaluated or
747
+ wrapped in a TensorRef.
748
+
749
+
750
+ ### `Scalar* data()` and `const Scalar* data() const`
751
+
752
+ Returns a pointer to the storage for the tensor. The pointer is const if the
753
+ tensor was const. This allows direct access to the data. The layout of the
754
+ data depends on the tensor layout: RowMajor or ColMajor.
755
+
756
+ This access is usually only needed for special cases, for example when mixing
757
+ Eigen Tensor code with other libraries.
758
+
759
+ Scalar is the type of data stored in the tensor.
760
+
761
+ Eigen::Tensor<float, 2> a(3, 4);
762
+ float* a_data = a.data();
763
+ a_data[0] = 123.45f;
764
+ cout << "a(0, 0): " << a(0, 0);
765
+ => a(0, 0): 123.45
766
+
767
+
768
+ ## Tensor Operations
769
+
770
+ All the methods documented below return non evaluated tensor `Operations`.
771
+ These can be chained: you can apply another Tensor Operation to the value
772
+ returned by the method.
773
+
774
+ The chain of Operation is evaluated lazily, typically when it is assigned to a
775
+ tensor. See "Controlling when Expression are Evaluated" for more details about
776
+ their evaluation.
777
+
778
+ ### `<Operation> constant(const Scalar& val)`
779
+
780
+ Returns a tensor of the same type and dimensions as the original tensor but
781
+ where all elements have the value `val`.
782
+
783
+ This is useful, for example, when you want to add or subtract a constant from a
784
+ tensor, or multiply every element of a tensor by a scalar.
785
+
786
+ Eigen::Tensor<float, 2> a(2, 3);
787
+ a.setConstant(1.0f);
788
+ Eigen::Tensor<float, 2> b = a + a.constant(2.0f);
789
+ Eigen::Tensor<float, 2> c = b * b.constant(0.2f);
790
+ cout << "a" << endl << a << endl << endl;
791
+ cout << "b" << endl << b << endl << endl;
792
+ cout << "c" << endl << c << endl << endl;
793
+ =>
794
+ a
795
+ 1 1 1
796
+ 1 1 1
797
+
798
+ b
799
+ 3 3 3
800
+ 3 3 3
801
+
802
+ c
803
+ 0.6 0.6 0.6
804
+ 0.6 0.6 0.6
805
+
806
+ ### `<Operation> random()`
807
+
808
+ Returns a tensor of the same type and dimensions as the current tensor
809
+ but where all elements have random values.
810
+
811
+ This is for example useful to add random values to an existing tensor.
812
+ The generation of random values can be customized in the same manner
813
+ as for `setRandom()`.
814
+
815
+ Eigen::Tensor<float, 2> a(2, 3);
816
+ a.setConstant(1.0f);
817
+ Eigen::Tensor<float, 2> b = a + a.random();
818
+ cout << "a" << endl << a << endl << endl;
819
+ cout << "b" << endl << b << endl << endl;
820
+ =>
821
+ a
822
+ 1 1 1
823
+ 1 1 1
824
+
825
+ b
826
+ 1.68038 1.5662 1.82329
827
+ 0.788766 1.59688 0.395103
828
+
829
+
830
+ ## Unary Element Wise Operations
831
+
832
+ All these operations take a single input tensor as argument and return a tensor
833
+ of the same type and dimensions as the tensor to which they are applied. The
834
+ requested operations are applied to each element independently.
835
+
836
+ ### `<Operation> operator-()`
837
+
838
+ Returns a tensor of the same type and dimensions as the original tensor
839
+ containing the opposite values of the original tensor.
840
+
841
+ Eigen::Tensor<float, 2> a(2, 3);
842
+ a.setConstant(1.0f);
843
+ Eigen::Tensor<float, 2> b = -a;
844
+ cout << "a" << endl << a << endl << endl;
845
+ cout << "b" << endl << b << endl << endl;
846
+ =>
847
+ a
848
+ 1 1 1
849
+ 1 1 1
850
+
851
+ b
852
+ -1 -1 -1
853
+ -1 -1 -1
854
+
855
+ ### `<Operation> sqrt()`
856
+
857
+ Returns a tensor of the same type and dimensions as the original tensor
858
+ containing the square roots of the original tensor.
859
+
860
+ ### `<Operation> rsqrt()`
861
+
862
+ Returns a tensor of the same type and dimensions as the original tensor
863
+ containing the inverse square roots of the original tensor.
864
+
865
+ ### `<Operation> square()`
866
+
867
+ Returns a tensor of the same type and dimensions as the original tensor
868
+ containing the squares of the original tensor values.
869
+
870
+ ### `<Operation> inverse()`
871
+
872
+ Returns a tensor of the same type and dimensions as the original tensor
873
+ containing the inverse of the original tensor values.
874
+
875
+ ### `<Operation> exp()`
876
+
877
+ Returns a tensor of the same type and dimensions as the original tensor
878
+ containing the exponential of the original tensor.
879
+
880
+ ### `<Operation> log()`
881
+
882
+ Returns a tensor of the same type and dimensions as the original tensor
883
+ containing the natural logarithms of the original tensor.
884
+
885
+ ### `<Operation> abs()`
886
+
887
+ Returns a tensor of the same type and dimensions as the original tensor
888
+ containing the absolute values of the original tensor.
889
+
890
+ ### `<Operation> pow(Scalar exponent)`
891
+
892
+ Returns a tensor of the same type and dimensions as the original tensor
893
+ containing the coefficients of the original tensor to the power of the
894
+ exponent.
895
+
896
+ The type of the exponent, Scalar, is always the same as the type of the
897
+ tensor coefficients. For example, only integer exponents can be used in
898
+ conjuntion with tensors of integer values.
899
+
900
+ You can use cast() to lift this restriction. For example this computes
901
+ cubic roots of an int Tensor:
902
+
903
+ Eigen::Tensor<int, 2> a(2, 3);
904
+ a.setValues({{0, 1, 8}, {27, 64, 125}});
905
+ Eigen::Tensor<double, 2> b = a.cast<double>().pow(1.0 / 3.0);
906
+ cout << "a" << endl << a << endl << endl;
907
+ cout << "b" << endl << b << endl << endl;
908
+ =>
909
+ a
910
+ 0 1 8
911
+ 27 64 125
912
+
913
+ b
914
+ 0 1 2
915
+ 3 4 5
916
+
917
+ ### `<Operation> operator * (Scalar scale)`
918
+
919
+ Multiplies all the coefficients of the input tensor by the provided scale.
920
+
921
+ ### `<Operation> cwiseMax(Scalar threshold)`
922
+ TODO
923
+
924
+ ### `<Operation> cwiseMin(Scalar threshold)`
925
+ TODO
926
+
927
+ ### `<Operation> unaryExpr(const CustomUnaryOp& func)`
928
+ TODO
929
+
930
+
931
+ ## Binary Element Wise Operations
932
+
933
+ These operations take two input tensors as arguments. The 2 input tensors should
934
+ be of the same type and dimensions. The result is a tensor of the same
935
+ dimensions as the tensors to which they are applied, and unless otherwise
936
+ specified it is also of the same type. The requested operations are applied to
937
+ each pair of elements independently.
938
+
939
+ ### `<Operation> operator+(const OtherDerived& other)`
940
+
941
+ Returns a tensor of the same type and dimensions as the input tensors
942
+ containing the coefficient wise sums of the inputs.
943
+
944
+ ### `<Operation> operator-(const OtherDerived& other)`
945
+
946
+ Returns a tensor of the same type and dimensions as the input tensors
947
+ containing the coefficient wise differences of the inputs.
948
+
949
+ ### `<Operation> operator*(const OtherDerived& other)`
950
+
951
+ Returns a tensor of the same type and dimensions as the input tensors
952
+ containing the coefficient wise products of the inputs.
953
+
954
+ ### `<Operation> operator/(const OtherDerived& other)`
955
+
956
+ Returns a tensor of the same type and dimensions as the input tensors
957
+ containing the coefficient wise quotients of the inputs.
958
+
959
+ This operator is not supported for integer types.
960
+
961
+ ### `<Operation> cwiseMax(const OtherDerived& other)`
962
+
963
+ Returns a tensor of the same type and dimensions as the input tensors
964
+ containing the coefficient wise maximums of the inputs.
965
+
966
+ ### `<Operation> cwiseMin(const OtherDerived& other)`
967
+
968
+ Returns a tensor of the same type and dimensions as the input tensors
969
+ containing the coefficient wise mimimums of the inputs.
970
+
971
+ ### `<Operation> Logical operators`
972
+
973
+ The following logical operators are supported as well:
974
+
975
+ * operator&&(const OtherDerived& other)
976
+ * operator||(const OtherDerived& other)
977
+ * operator<(const OtherDerived& other)
978
+ * operator<=(const OtherDerived& other)
979
+ * operator>(const OtherDerived& other)
980
+ * operator>=(const OtherDerived& other)
981
+ * operator==(const OtherDerived& other)
982
+ * operator!=(const OtherDerived& other)
983
+
984
+ They all return a tensor of boolean values.
985
+
986
+
987
+ ## Selection (select(const ThenDerived& thenTensor, const ElseDerived& elseTensor)
988
+
989
+ Selection is a coefficient-wise ternary operator that is the tensor equivalent
990
+ to the if-then-else operation.
991
+
992
+ Tensor<bool, 3> if = ...;
993
+ Tensor<float, 3> then = ...;
994
+ Tensor<float, 3> else = ...;
995
+ Tensor<float, 3> result = if.select(then, else);
996
+
997
+ The 3 arguments must be of the same dimensions, which will also be the dimension
998
+ of the result. The 'if' tensor must be of type boolean, the 'then' and the
999
+ 'else' tensor must be of the same type, which will also be the type of the
1000
+ result.
1001
+
1002
+ Each coefficient in the result is equal to the corresponding coefficient in the
1003
+ 'then' tensor if the corresponding value in the 'if' tensor is true. If not, the
1004
+ resulting coefficient will come from the 'else' tensor.
1005
+
1006
+
1007
+ ## Contraction
1008
+
1009
+ Tensor *contractions* are a generalization of the matrix product to the
1010
+ multidimensional case.
1011
+
1012
+ // Create 2 matrices using tensors of rank 2
1013
+ Eigen::Tensor<int, 2> a(2, 3);
1014
+ a.setValues({{1, 2, 3}, {6, 5, 4}});
1015
+ Eigen::Tensor<int, 2> b(3, 2);
1016
+ b.setValues({{1, 2}, {4, 5}, {5, 6}});
1017
+
1018
+ // Compute the traditional matrix product
1019
+ Eigen::array<Eigen::IndexPair<int>, 1> product_dims = { Eigen::IndexPair<int>(1, 0) };
1020
+ Eigen::Tensor<int, 2> AB = a.contract(b, product_dims);
1021
+
1022
+ // Compute the product of the transpose of the matrices
1023
+ Eigen::array<Eigen::IndexPair<int>, 1> transposed_product_dims = { Eigen::IndexPair<int>(0, 1) };
1024
+ Eigen::Tensor<int, 2> AtBt = a.contract(b, transposed_product_dims);
1025
+
1026
+ // Contraction to scalar value using a double contraction.
1027
+ // First coordinate of both tensors are contracted as well as both second coordinates, i.e., this computes the sum of the squares of the elements.
1028
+ Eigen::array<Eigen::IndexPair<int>, 2> double_contraction_product_dims = { Eigen::IndexPair<int>(0, 0), Eigen::IndexPair<int>(1, 1) };
1029
+ Eigen::Tensor<int, 0> AdoubleContractedA = a.contract(a, double_contraction_product_dims);
1030
+
1031
+ // Extracting the scalar value of the tensor contraction for further usage
1032
+ int value = AdoubleContractedA(0);
1033
+
1034
+ ## Reduction Operations
1035
+
1036
+ A *Reduction* operation returns a tensor with fewer dimensions than the
1037
+ original tensor. The values in the returned tensor are computed by applying a
1038
+ *reduction operator* to slices of values from the original tensor. You specify
1039
+ the dimensions along which the slices are made.
1040
+
1041
+ The Eigen Tensor library provides a set of predefined reduction operators such
1042
+ as `maximum()` and `sum()` and lets you define additional operators by
1043
+ implementing a few methods from a reductor template.
1044
+
1045
+ ### Reduction Dimensions
1046
+
1047
+ All reduction operations take a single parameter of type
1048
+ `<TensorType>::``Dimensions` which can always be specified as an array of
1049
+ ints. These are called the "reduction dimensions." The values are the indices
1050
+ of the dimensions of the input tensor over which the reduction is done. The
1051
+ parameter can have at most as many element as the rank of the input tensor;
1052
+ each element must be less than the tensor rank, as it indicates one of the
1053
+ dimensions to reduce.
1054
+
1055
+ Each dimension of the input tensor should occur at most once in the reduction
1056
+ dimensions as the implementation does not remove duplicates.
1057
+
1058
+ The order of the values in the reduction dimensions does not affect the
1059
+ results, but the code may execute faster if you list the dimensions in
1060
+ increasing order.
1061
+
1062
+ Example: Reduction along one dimension.
1063
+
1064
+ // Create a tensor of 2 dimensions
1065
+ Eigen::Tensor<int, 2> a(2, 3);
1066
+ a.setValues({{1, 2, 3}, {6, 5, 4}});
1067
+ // Reduce it along the second dimension (1)...
1068
+ Eigen::array<int, 1> dims({1 /* dimension to reduce */});
1069
+ // ...using the "maximum" operator.
1070
+ // The result is a tensor with one dimension. The size of
1071
+ // that dimension is the same as the first (non-reduced) dimension of a.
1072
+ Eigen::Tensor<int, 1> b = a.maximum(dims);
1073
+ cout << "a" << endl << a << endl << endl;
1074
+ cout << "b" << endl << b << endl << endl;
1075
+ =>
1076
+ a
1077
+ 1 2 3
1078
+ 6 5 4
1079
+
1080
+ b
1081
+ 3
1082
+ 6
1083
+
1084
+ Example: Reduction along two dimensions.
1085
+
1086
+ Eigen::Tensor<float, 3, Eigen::ColMajor> a(2, 3, 4);
1087
+ a.setValues({{{0.0f, 1.0f, 2.0f, 3.0f},
1088
+ {7.0f, 6.0f, 5.0f, 4.0f},
1089
+ {8.0f, 9.0f, 10.0f, 11.0f}},
1090
+ {{12.0f, 13.0f, 14.0f, 15.0f},
1091
+ {19.0f, 18.0f, 17.0f, 16.0f},
1092
+ {20.0f, 21.0f, 22.0f, 23.0f}}});
1093
+ // The tensor a has 3 dimensions. We reduce along the
1094
+ // first 2, resulting in a tensor with a single dimension
1095
+ // of size 4 (the last dimension of a.)
1096
+ // Note that we pass the array of reduction dimensions
1097
+ // directly to the maximum() call.
1098
+ Eigen::Tensor<float, 1, Eigen::ColMajor> b =
1099
+ a.maximum(Eigen::array<int, 2>({0, 1}));
1100
+ cout << "b" << endl << b << endl << endl;
1101
+ =>
1102
+ b
1103
+ 20
1104
+ 21
1105
+ 22
1106
+ 23
1107
+
1108
+ #### Reduction along all dimensions
1109
+
1110
+ As a special case, if you pass no parameter to a reduction operation the
1111
+ original tensor is reduced along *all* its dimensions. The result is a
1112
+ scalar, represented as a zero-dimension tensor.
1113
+
1114
+ Eigen::Tensor<float, 3> a(2, 3, 4);
1115
+ a.setValues({{{0.0f, 1.0f, 2.0f, 3.0f},
1116
+ {7.0f, 6.0f, 5.0f, 4.0f},
1117
+ {8.0f, 9.0f, 10.0f, 11.0f}},
1118
+ {{12.0f, 13.0f, 14.0f, 15.0f},
1119
+ {19.0f, 18.0f, 17.0f, 16.0f},
1120
+ {20.0f, 21.0f, 22.0f, 23.0f}}});
1121
+ // Reduce along all dimensions using the sum() operator.
1122
+ Eigen::Tensor<float, 0> b = a.sum();
1123
+ cout << "b" << endl << b << endl << endl;
1124
+ =>
1125
+ b
1126
+ 276
1127
+
1128
+
1129
+ ### `<Operation> sum(const Dimensions& new_dims)`
1130
+ ### `<Operation> sum()`
1131
+
1132
+ Reduce a tensor using the sum() operator. The resulting values
1133
+ are the sum of the reduced values.
1134
+
1135
+ ### `<Operation> mean(const Dimensions& new_dims)`
1136
+ ### `<Operation> mean()`
1137
+
1138
+ Reduce a tensor using the mean() operator. The resulting values
1139
+ are the mean of the reduced values.
1140
+
1141
+ ### `<Operation> maximum(const Dimensions& new_dims)`
1142
+ ### `<Operation> maximum()`
1143
+
1144
+ Reduce a tensor using the maximum() operator. The resulting values are the
1145
+ largest of the reduced values.
1146
+
1147
+ ### `<Operation> minimum(const Dimensions& new_dims)`
1148
+ ### `<Operation> minimum()`
1149
+
1150
+ Reduce a tensor using the minimum() operator. The resulting values
1151
+ are the smallest of the reduced values.
1152
+
1153
+ ### `<Operation> prod(const Dimensions& new_dims)`
1154
+ ### `<Operation> prod()`
1155
+
1156
+ Reduce a tensor using the prod() operator. The resulting values
1157
+ are the product of the reduced values.
1158
+
1159
+ ### `<Operation> all(const Dimensions& new_dims)`
1160
+ ### `<Operation> all()`
1161
+ Reduce a tensor using the all() operator. Casts tensor to bool and then checks
1162
+ whether all elements are true. Runs through all elements rather than
1163
+ short-circuiting, so may be significantly inefficient.
1164
+
1165
+ ### `<Operation> any(const Dimensions& new_dims)`
1166
+ ### `<Operation> any()`
1167
+ Reduce a tensor using the any() operator. Casts tensor to bool and then checks
1168
+ whether any element is true. Runs through all elements rather than
1169
+ short-circuiting, so may be significantly inefficient.
1170
+
1171
+
1172
+ ### `<Operation> reduce(const Dimensions& new_dims, const Reducer& reducer)`
1173
+
1174
+ Reduce a tensor using a user-defined reduction operator. See `SumReducer`
1175
+ in TensorFunctors.h for information on how to implement a reduction operator.
1176
+
1177
+
1178
+ ## Scan Operations
1179
+
1180
+ A *Scan* operation returns a tensor with the same dimensions as the original
1181
+ tensor. The operation performs an inclusive scan along the specified
1182
+ axis, which means it computes a running total along the axis for a given
1183
+ reduction operation.
1184
+ If the reduction operation corresponds to summation, then this computes the
1185
+ prefix sum of the tensor along the given axis.
1186
+
1187
+ Example:
1188
+ dd a comment to this line
1189
+
1190
+ // Create a tensor of 2 dimensions
1191
+ Eigen::Tensor<int, 2> a(2, 3);
1192
+ a.setValues({{1, 2, 3}, {4, 5, 6}});
1193
+ // Scan it along the second dimension (1) using summation
1194
+ Eigen::Tensor<int, 2> b = a.cumsum(1);
1195
+ // The result is a tensor with the same size as the input
1196
+ cout << "a" << endl << a << endl << endl;
1197
+ cout << "b" << endl << b << endl << endl;
1198
+ =>
1199
+ a
1200
+ 1 2 3
1201
+ 4 5 6
1202
+
1203
+ b
1204
+ 1 3 6
1205
+ 4 9 15
1206
+
1207
+ ### `<Operation> cumsum(const Index& axis)`
1208
+
1209
+ Perform a scan by summing consecutive entries.
1210
+
1211
+ ### `<Operation> cumprod(const Index& axis)`
1212
+
1213
+ Perform a scan by multiplying consecutive entries.
1214
+
1215
+
1216
+ ## Convolutions
1217
+
1218
+ ### `<Operation> convolve(const Kernel& kernel, const Dimensions& dims)`
1219
+
1220
+ Returns a tensor that is the output of the convolution of the input tensor with the kernel,
1221
+ along the specified dimensions of the input tensor. The dimension size for dimensions of the output tensor
1222
+ which were part of the convolution will be reduced by the formula:
1223
+ output_dim_size = input_dim_size - kernel_dim_size + 1 (requires: input_dim_size >= kernel_dim_size).
1224
+ The dimension sizes for dimensions that were not part of the convolution will remain the same.
1225
+ Performance of the convolution can depend on the length of the stride(s) of the input tensor dimension(s) along which the
1226
+ convolution is computed (the first dimension has the shortest stride for ColMajor, whereas RowMajor's shortest stride is
1227
+ for the last dimension).
1228
+
1229
+ // Compute convolution along the second and third dimension.
1230
+ Tensor<float, 4, DataLayout> input(3, 3, 7, 11);
1231
+ Tensor<float, 2, DataLayout> kernel(2, 2);
1232
+ Tensor<float, 4, DataLayout> output(3, 2, 6, 11);
1233
+ input.setRandom();
1234
+ kernel.setRandom();
1235
+
1236
+ Eigen::array<ptrdiff_t, 2> dims({1, 2}); // Specify second and third dimension for convolution.
1237
+ output = input.convolve(kernel, dims);
1238
+
1239
+ for (int i = 0; i < 3; ++i) {
1240
+ for (int j = 0; j < 2; ++j) {
1241
+ for (int k = 0; k < 6; ++k) {
1242
+ for (int l = 0; l < 11; ++l) {
1243
+ const float result = output(i,j,k,l);
1244
+ const float expected = input(i,j+0,k+0,l) * kernel(0,0) +
1245
+ input(i,j+1,k+0,l) * kernel(1,0) +
1246
+ input(i,j+0,k+1,l) * kernel(0,1) +
1247
+ input(i,j+1,k+1,l) * kernel(1,1);
1248
+ VERIFY_IS_APPROX(result, expected);
1249
+ }
1250
+ }
1251
+ }
1252
+ }
1253
+
1254
+
1255
+ ## Geometrical Operations
1256
+
1257
+ These operations return a Tensor with different dimensions than the original
1258
+ Tensor. They can be used to access slices of tensors, see them with different
1259
+ dimensions, or pad tensors with additional data.
1260
+
1261
+ ### `<Operation> reshape(const Dimensions& new_dims)`
1262
+
1263
+ Returns a view of the input tensor that has been reshaped to the specified
1264
+ new dimensions. The argument new_dims is an array of Index values. The
1265
+ rank of the resulting tensor is equal to the number of elements in new_dims.
1266
+
1267
+ The product of all the sizes in the new dimension array must be equal to
1268
+ the number of elements in the input tensor.
1269
+
1270
+ // Increase the rank of the input tensor by introducing a new dimension
1271
+ // of size 1.
1272
+ Tensor<float, 2> input(7, 11);
1273
+ array<int, 3> three_dims{{7, 11, 1}};
1274
+ Tensor<float, 3> result = input.reshape(three_dims);
1275
+
1276
+ // Decrease the rank of the input tensor by merging 2 dimensions;
1277
+ array<int, 1> one_dim{{7 * 11}};
1278
+ Tensor<float, 1> result = input.reshape(one_dim);
1279
+
1280
+ This operation does not move any data in the input tensor, so the resulting
1281
+ contents of a reshaped Tensor depend on the data layout of the original Tensor.
1282
+
1283
+ For example this is what happens when you `reshape()` a 2D ColMajor tensor
1284
+ to one dimension:
1285
+
1286
+ Eigen::Tensor<float, 2, Eigen::ColMajor> a(2, 3);
1287
+ a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}});
1288
+ Eigen::array<Eigen::DenseIndex, 1> one_dim({3 * 2});
1289
+ Eigen::Tensor<float, 1, Eigen::ColMajor> b = a.reshape(one_dim);
1290
+ cout << "b" << endl << b << endl;
1291
+ =>
1292
+ b
1293
+ 0
1294
+ 300
1295
+ 100
1296
+ 400
1297
+ 200
1298
+ 500
1299
+
1300
+ This is what happens when the 2D Tensor is RowMajor:
1301
+
1302
+ Eigen::Tensor<float, 2, Eigen::RowMajor> a(2, 3);
1303
+ a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}});
1304
+ Eigen::array<Eigen::DenseIndex, 1> one_dim({3 * 2});
1305
+ Eigen::Tensor<float, 1, Eigen::RowMajor> b = a.reshape(one_dim);
1306
+ cout << "b" << endl << b << endl;
1307
+ =>
1308
+ b
1309
+ 0
1310
+ 100
1311
+ 200
1312
+ 300
1313
+ 400
1314
+ 500
1315
+
1316
+ The reshape operation is a lvalue. In other words, it can be used on the left
1317
+ side of the assignment operator.
1318
+
1319
+ The previous example can be rewritten as follow:
1320
+
1321
+ Eigen::Tensor<float, 2, Eigen::ColMajor> a(2, 3);
1322
+ a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}});
1323
+ Eigen::array<Eigen::DenseIndex, 2> two_dim({2, 3});
1324
+ Eigen::Tensor<float, 1, Eigen::ColMajor> b(6);
1325
+ b.reshape(two_dim) = a;
1326
+ cout << "b" << endl << b << endl;
1327
+ =>
1328
+ b
1329
+ 0
1330
+ 300
1331
+ 100
1332
+ 400
1333
+ 200
1334
+ 500
1335
+
1336
+ Note that "b" itself was not reshaped but that instead the assignment is done to
1337
+ the reshape view of b.
1338
+
1339
+
1340
+ ### `<Operation> shuffle(const Shuffle& shuffle)`
1341
+
1342
+ Returns a copy of the input tensor whose dimensions have been
1343
+ reordered according to the specified permutation. The argument shuffle
1344
+ is an array of Index values. Its size is the rank of the input
1345
+ tensor. It must contain a permutation of 0, 1, ..., rank - 1. The i-th
1346
+ dimension of the output tensor equals to the size of the shuffle[i]-th
1347
+ dimension of the input tensor. For example:
1348
+
1349
+ // Shuffle all dimensions to the left by 1.
1350
+ Tensor<float, 3> input(20, 30, 50);
1351
+ // ... set some values in input.
1352
+ Tensor<float, 3> output = input.shuffle({1, 2, 0})
1353
+
1354
+ eigen_assert(output.dimension(0) == 30);
1355
+ eigen_assert(output.dimension(1) == 50);
1356
+ eigen_assert(output.dimension(2) == 20);
1357
+
1358
+ Indices into the output tensor are shuffled accordingly to formulate
1359
+ indices into the input tensor. For example, one can assert in the above
1360
+ code snippet that:
1361
+
1362
+ eigen_assert(output(3, 7, 11) == input(11, 3, 7));
1363
+
1364
+ In general, one can assert that
1365
+
1366
+ eigen_assert(output(..., indices[shuffle[i]], ...) ==
1367
+ input(..., indices[i], ...))
1368
+
1369
+ The shuffle operation results in a lvalue, which means that it can be assigned
1370
+ to. In other words, it can be used on the left side of the assignment operator.
1371
+
1372
+ Let's rewrite the previous example to take advantage of this feature:
1373
+
1374
+ // Shuffle all dimensions to the left by 1.
1375
+ Tensor<float, 3> input(20, 30, 50);
1376
+ // ... set some values in input.
1377
+ Tensor<float, 3> output(30, 50, 20);
1378
+ output.shuffle({2, 0, 1}) = input;
1379
+
1380
+
1381
+ ### `<Operation> stride(const Strides& strides)`
1382
+
1383
+ Returns a view of the input tensor that strides (skips stride-1
1384
+ elements) along each of the dimensions. The argument strides is an
1385
+ array of Index values. The dimensions of the resulting tensor are
1386
+ ceil(input_dimensions[i] / strides[i]).
1387
+
1388
+ For example this is what happens when you `stride()` a 2D tensor:
1389
+
1390
+ Eigen::Tensor<int, 2> a(4, 3);
1391
+ a.setValues({{0, 100, 200}, {300, 400, 500}, {600, 700, 800}, {900, 1000, 1100}});
1392
+ Eigen::array<Eigen::DenseIndex, 2> strides({3, 2});
1393
+ Eigen::Tensor<int, 2> b = a.stride(strides);
1394
+ cout << "b" << endl << b << endl;
1395
+ =>
1396
+ b
1397
+ 0 200
1398
+ 900 1100
1399
+
1400
+ It is possible to assign a tensor to a stride:
1401
+ Tensor<float, 3> input(20, 30, 50);
1402
+ // ... set some values in input.
1403
+ Tensor<float, 3> output(40, 90, 200);
1404
+ output.stride({2, 3, 4}) = input;
1405
+
1406
+
1407
+ ### `<Operation> slice(const StartIndices& offsets, const Sizes& extents)`
1408
+
1409
+ Returns a sub-tensor of the given tensor. For each dimension i, the slice is
1410
+ made of the coefficients stored between offset[i] and offset[i] + extents[i] in
1411
+ the input tensor.
1412
+
1413
+ Eigen::Tensor<int, 2> a(4, 3);
1414
+ a.setValues({{0, 100, 200}, {300, 400, 500},
1415
+ {600, 700, 800}, {900, 1000, 1100}});
1416
+ Eigen::array<int, 2> offsets = {1, 0};
1417
+ Eigen::array<int, 2> extents = {2, 2};
1418
+ Eigen::Tensor<int, 1> slice = a.slice(offsets, extents);
1419
+ cout << "a" << endl << a << endl;
1420
+ =>
1421
+ a
1422
+ 0 100 200
1423
+ 300 400 500
1424
+ 600 700 800
1425
+ 900 1000 1100
1426
+ cout << "slice" << endl << slice << endl;
1427
+ =>
1428
+ slice
1429
+ 300 400
1430
+ 600 700
1431
+
1432
+
1433
+ ### `<Operation> chip(const Index offset, const Index dim)`
1434
+
1435
+ A chip is a special kind of slice. It is the subtensor at the given offset in
1436
+ the dimension dim. The returned tensor has one fewer dimension than the input
1437
+ tensor: the dimension dim is removed.
1438
+
1439
+ For example, a matrix chip would be either a row or a column of the input
1440
+ matrix.
1441
+
1442
+ Eigen::Tensor<int, 2> a(4, 3);
1443
+ a.setValues({{0, 100, 200}, {300, 400, 500},
1444
+ {600, 700, 800}, {900, 1000, 1100}});
1445
+ Eigen::Tensor<int, 1> row_3 = a.chip(2, 0);
1446
+ Eigen::Tensor<int, 1> col_2 = a.chip(1, 1);
1447
+ cout << "a" << endl << a << endl;
1448
+ =>
1449
+ a
1450
+ 0 100 200
1451
+ 300 400 500
1452
+ 600 700 800
1453
+ 900 1000 1100
1454
+ cout << "row_3" << endl << row_3 << endl;
1455
+ =>
1456
+ row_3
1457
+ 600 700 800
1458
+ cout << "col_2" << endl << col_2 << endl;
1459
+ =>
1460
+ col_2
1461
+ 100 400 700 1000
1462
+
1463
+ It is possible to assign values to a tensor chip since the chip operation is a
1464
+ lvalue. For example:
1465
+
1466
+ Eigen::Tensor<int, 1> a(3);
1467
+ a.setValues({{100, 200, 300}});
1468
+ Eigen::Tensor<int, 2> b(2, 3);
1469
+ b.setZero();
1470
+ b.chip(0, 0) = a;
1471
+ cout << "a" << endl << a << endl;
1472
+ =>
1473
+ a
1474
+ 100
1475
+ 200
1476
+ 300
1477
+ cout << "b" << endl << b << endl;
1478
+ =>
1479
+ b
1480
+ 100 200 300
1481
+ 0 0 0
1482
+
1483
+
1484
+ ### `<Operation> reverse(const ReverseDimensions& reverse)`
1485
+
1486
+ Returns a view of the input tensor that reverses the order of the coefficients
1487
+ along a subset of the dimensions. The argument reverse is an array of boolean
1488
+ values that indicates whether or not the order of the coefficients should be
1489
+ reversed along each of the dimensions. This operation preserves the dimensions
1490
+ of the input tensor.
1491
+
1492
+ For example this is what happens when you `reverse()` the first dimension
1493
+ of a 2D tensor:
1494
+
1495
+ Eigen::Tensor<int, 2> a(4, 3);
1496
+ a.setValues({{0, 100, 200}, {300, 400, 500},
1497
+ {600, 700, 800}, {900, 1000, 1100}});
1498
+ Eigen::array<bool, 2> reverse({true, false});
1499
+ Eigen::Tensor<int, 2> b = a.reverse(reverse);
1500
+ cout << "a" << endl << a << endl << "b" << endl << b << endl;
1501
+ =>
1502
+ a
1503
+ 0 100 200
1504
+ 300 400 500
1505
+ 600 700 800
1506
+ 900 1000 1100
1507
+ b
1508
+ 900 1000 1100
1509
+ 600 700 800
1510
+ 300 400 500
1511
+ 0 100 200
1512
+
1513
+
1514
+ ### `<Operation> broadcast(const Broadcast& broadcast)`
1515
+
1516
+ Returns a view of the input tensor in which the input is replicated one to many
1517
+ times.
1518
+ The broadcast argument specifies how many copies of the input tensor need to be
1519
+ made in each of the dimensions.
1520
+
1521
+ Eigen::Tensor<int, 2> a(2, 3);
1522
+ a.setValues({{0, 100, 200}, {300, 400, 500}});
1523
+ Eigen::array<int, 2> bcast({3, 2});
1524
+ Eigen::Tensor<int, 2> b = a.broadcast(bcast);
1525
+ cout << "a" << endl << a << endl << "b" << endl << b << endl;
1526
+ =>
1527
+ a
1528
+ 0 100 200
1529
+ 300 400 500
1530
+ b
1531
+ 0 100 200 0 100 200
1532
+ 300 400 500 300 400 500
1533
+ 0 100 200 0 100 200
1534
+ 300 400 500 300 400 500
1535
+ 0 100 200 0 100 200
1536
+ 300 400 500 300 400 500
1537
+
1538
+ ### `<Operation> concatenate(const OtherDerived& other, Axis axis)`
1539
+
1540
+ TODO
1541
+
1542
+ ### `<Operation> pad(const PaddingDimensions& padding)`
1543
+
1544
+ Returns a view of the input tensor in which the input is padded with zeros.
1545
+
1546
+ Eigen::Tensor<int, 2> a(2, 3);
1547
+ a.setValues({{0, 100, 200}, {300, 400, 500}});
1548
+ Eigen::array<pair<int, int>, 2> paddings;
1549
+ paddings[0] = make_pair(0, 1);
1550
+ paddings[1] = make_pair(2, 3);
1551
+ Eigen::Tensor<int, 2> b = a.pad(paddings);
1552
+ cout << "a" << endl << a << endl << "b" << endl << b << endl;
1553
+ =>
1554
+ a
1555
+ 0 100 200
1556
+ 300 400 500
1557
+ b
1558
+ 0 0 0 0
1559
+ 0 0 0 0
1560
+ 0 100 200 0
1561
+ 300 400 500 0
1562
+ 0 0 0 0
1563
+ 0 0 0 0
1564
+ 0 0 0 0
1565
+
1566
+
1567
+ ### `<Operation> extract_patches(const PatchDims& patch_dims)`
1568
+
1569
+ Returns a tensor of coefficient patches extracted from the input tensor, where
1570
+ each patch is of dimension specified by 'patch_dims'. The returned tensor has
1571
+ one greater dimension than the input tensor, which is used to index each patch.
1572
+ The patch index in the output tensor depends on the data layout of the input
1573
+ tensor: the patch index is the last dimension ColMajor layout, and the first
1574
+ dimension in RowMajor layout.
1575
+
1576
+ For example, given the following input tensor:
1577
+
1578
+ Eigen::Tensor<float, 2, DataLayout> tensor(3,4);
1579
+ tensor.setValues({{0.0f, 1.0f, 2.0f, 3.0f},
1580
+ {4.0f, 5.0f, 6.0f, 7.0f},
1581
+ {8.0f, 9.0f, 10.0f, 11.0f}});
1582
+
1583
+ cout << "tensor: " << endl << tensor << endl;
1584
+ =>
1585
+ tensor:
1586
+ 0 1 2 3
1587
+ 4 5 6 7
1588
+ 8 9 10 11
1589
+
1590
+ Six 2x2 patches can be extracted and indexed using the following code:
1591
+
1592
+ Eigen::Tensor<float, 3, DataLayout> patch;
1593
+ Eigen::array<ptrdiff_t, 2> patch_dims;
1594
+ patch_dims[0] = 2;
1595
+ patch_dims[1] = 2;
1596
+ patch = tensor.extract_patches(patch_dims);
1597
+ for (int k = 0; k < 6; ++k) {
1598
+ cout << "patch index: " << k << endl;
1599
+ for (int i = 0; i < 2; ++i) {
1600
+ for (int j = 0; j < 2; ++j) {
1601
+ if (DataLayout == ColMajor) {
1602
+ cout << patch(i, j, k) << " ";
1603
+ } else {
1604
+ cout << patch(k, i, j) << " ";
1605
+ }
1606
+ }
1607
+ cout << endl;
1608
+ }
1609
+ }
1610
+
1611
+ This code results in the following output when the data layout is ColMajor:
1612
+
1613
+ patch index: 0
1614
+ 0 1
1615
+ 4 5
1616
+ patch index: 1
1617
+ 4 5
1618
+ 8 9
1619
+ patch index: 2
1620
+ 1 2
1621
+ 5 6
1622
+ patch index: 3
1623
+ 5 6
1624
+ 9 10
1625
+ patch index: 4
1626
+ 2 3
1627
+ 6 7
1628
+ patch index: 5
1629
+ 6 7
1630
+ 10 11
1631
+
1632
+ This code results in the following output when the data layout is RowMajor:
1633
+ (NOTE: the set of patches is the same as in ColMajor, but are indexed differently).
1634
+
1635
+ patch index: 0
1636
+ 0 1
1637
+ 4 5
1638
+ patch index: 1
1639
+ 1 2
1640
+ 5 6
1641
+ patch index: 2
1642
+ 2 3
1643
+ 6 7
1644
+ patch index: 3
1645
+ 4 5
1646
+ 8 9
1647
+ patch index: 4
1648
+ 5 6
1649
+ 9 10
1650
+ patch index: 5
1651
+ 6 7
1652
+ 10 11
1653
+
1654
+ ### `<Operation> extract_image_patches(const Index patch_rows, const Index patch_cols, const Index row_stride, const Index col_stride, const PaddingType padding_type)`
1655
+
1656
+ Returns a tensor of coefficient image patches extracted from the input tensor,
1657
+ which is expected to have dimensions ordered as follows (depending on the data
1658
+ layout of the input tensor, and the number of additional dimensions 'N'):
1659
+
1660
+ *) ColMajor
1661
+ 1st dimension: channels (of size d)
1662
+ 2nd dimension: rows (of size r)
1663
+ 3rd dimension: columns (of size c)
1664
+ 4th-Nth dimension: time (for video) or batch (for bulk processing).
1665
+
1666
+ *) RowMajor (reverse order of ColMajor)
1667
+ 1st-Nth dimension: time (for video) or batch (for bulk processing).
1668
+ N+1'th dimension: columns (of size c)
1669
+ N+2'th dimension: rows (of size r)
1670
+ N+3'th dimension: channels (of size d)
1671
+
1672
+ The returned tensor has one greater dimension than the input tensor, which is
1673
+ used to index each patch. The patch index in the output tensor depends on the
1674
+ data layout of the input tensor: the patch index is the 4'th dimension in
1675
+ ColMajor layout, and the 4'th from the last dimension in RowMajor layout.
1676
+
1677
+ For example, given the following input tensor with the following dimension
1678
+ sizes:
1679
+ *) depth: 2
1680
+ *) rows: 3
1681
+ *) columns: 5
1682
+ *) batch: 7
1683
+
1684
+ Tensor<float, 4> tensor(2,3,5,7);
1685
+ Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
1686
+
1687
+ 2x2 image patches can be extracted and indexed using the following code:
1688
+
1689
+ *) 2D patch: ColMajor (patch indexed by second-to-last dimension)
1690
+ Tensor<float, 5> twod_patch;
1691
+ twod_patch = tensor.extract_image_patches<2, 2>();
1692
+ // twod_patch.dimension(0) == 2
1693
+ // twod_patch.dimension(1) == 2
1694
+ // twod_patch.dimension(2) == 2
1695
+ // twod_patch.dimension(3) == 3*5
1696
+ // twod_patch.dimension(4) == 7
1697
+
1698
+ *) 2D patch: RowMajor (patch indexed by the second dimension)
1699
+ Tensor<float, 5, RowMajor> twod_patch_row_major;
1700
+ twod_patch_row_major = tensor_row_major.extract_image_patches<2, 2>();
1701
+ // twod_patch_row_major.dimension(0) == 7
1702
+ // twod_patch_row_major.dimension(1) == 3*5
1703
+ // twod_patch_row_major.dimension(2) == 2
1704
+ // twod_patch_row_major.dimension(3) == 2
1705
+ // twod_patch_row_major.dimension(4) == 2
1706
+
1707
+ ## Special Operations
1708
+
1709
+ ### `<Operation> cast<T>()`
1710
+
1711
+ Returns a tensor of type T with the same dimensions as the original tensor.
1712
+ The returned tensor contains the values of the original tensor converted to
1713
+ type T.
1714
+
1715
+ Eigen::Tensor<float, 2> a(2, 3);
1716
+ Eigen::Tensor<int, 2> b = a.cast<int>();
1717
+
1718
+ This can be useful for example if you need to do element-wise division of
1719
+ Tensors of integers. This is not currently supported by the Tensor library
1720
+ but you can easily cast the tensors to floats to do the division:
1721
+
1722
+ Eigen::Tensor<int, 2> a(2, 3);
1723
+ a.setValues({{0, 1, 2}, {3, 4, 5}});
1724
+ Eigen::Tensor<int, 2> b =
1725
+ (a.cast<float>() / a.constant(2).cast<float>()).cast<int>();
1726
+ cout << "a" << endl << a << endl << endl;
1727
+ cout << "b" << endl << b << endl << endl;
1728
+ =>
1729
+ a
1730
+ 0 1 2
1731
+ 3 4 5
1732
+
1733
+ b
1734
+ 0 0 1
1735
+ 1 2 2
1736
+
1737
+
1738
+ ### `<Operation> eval()`
1739
+
1740
+ TODO
1741
+
1742
+
1743
+ ## Representation of scalar values
1744
+
1745
+ Scalar values are often represented by tensors of size 1 and rank 0.For example
1746
+ Tensor<T, N>::maximum() currently returns a Tensor<T, 0>. Similarly, the inner
1747
+ product of 2 1d tensors (through contractions) returns a 0d tensor.
1748
+
1749
+ ## Limitations
1750
+
1751
+ * The number of tensor dimensions is currently limited to 250 when using a
1752
+ compiler that supports cxx11. It is limited to only 5 for older compilers.
1753
+ * The IndexList class requires a cxx11 compliant compiler. You can use an
1754
+ array of indices instead if you don't have access to a modern compiler.
1755
+ * On GPUs only floating point values are properly tested and optimized for.
1756
+ * Complex and integer values are known to be broken on GPUs. If you try to use
1757
+ them you'll most likely end up triggering a static assertion failure such as
1758
+ EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
1759
+
1760
+