tomoto 0.2.3 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (347) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +6 -0
  3. data/README.md +8 -10
  4. data/ext/tomoto/extconf.rb +6 -2
  5. data/ext/tomoto/{ext.cpp → tomoto.cpp} +1 -1
  6. data/lib/tomoto/version.rb +1 -1
  7. data/lib/tomoto.rb +5 -1
  8. data/vendor/EigenRand/EigenRand/Core.h +10 -10
  9. data/vendor/EigenRand/EigenRand/Dists/Basic.h +208 -9
  10. data/vendor/EigenRand/EigenRand/Dists/Discrete.h +52 -31
  11. data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +9 -8
  12. data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +28 -21
  13. data/vendor/EigenRand/EigenRand/EigenRand +11 -6
  14. data/vendor/EigenRand/EigenRand/Macro.h +13 -7
  15. data/vendor/EigenRand/EigenRand/MorePacketMath.h +348 -740
  16. data/vendor/EigenRand/EigenRand/MvDists/Multinomial.h +5 -3
  17. data/vendor/EigenRand/EigenRand/MvDists/MvNormal.h +9 -3
  18. data/vendor/EigenRand/EigenRand/PacketFilter.h +11 -253
  19. data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +21 -47
  20. data/vendor/EigenRand/EigenRand/RandUtils.h +50 -344
  21. data/vendor/EigenRand/EigenRand/arch/AVX/MorePacketMath.h +619 -0
  22. data/vendor/EigenRand/EigenRand/arch/AVX/PacketFilter.h +149 -0
  23. data/vendor/EigenRand/EigenRand/arch/AVX/RandUtils.h +228 -0
  24. data/vendor/EigenRand/EigenRand/arch/NEON/MorePacketMath.h +473 -0
  25. data/vendor/EigenRand/EigenRand/arch/NEON/PacketFilter.h +142 -0
  26. data/vendor/EigenRand/EigenRand/arch/NEON/RandUtils.h +126 -0
  27. data/vendor/EigenRand/EigenRand/arch/SSE/MorePacketMath.h +501 -0
  28. data/vendor/EigenRand/EigenRand/arch/SSE/PacketFilter.h +133 -0
  29. data/vendor/EigenRand/EigenRand/arch/SSE/RandUtils.h +120 -0
  30. data/vendor/EigenRand/EigenRand/doc.h +24 -12
  31. data/vendor/EigenRand/README.md +57 -4
  32. data/vendor/eigen/COPYING.APACHE +203 -0
  33. data/vendor/eigen/COPYING.BSD +1 -1
  34. data/vendor/eigen/COPYING.MINPACK +51 -52
  35. data/vendor/eigen/Eigen/Cholesky +0 -1
  36. data/vendor/eigen/Eigen/Core +112 -265
  37. data/vendor/eigen/Eigen/Eigenvalues +2 -3
  38. data/vendor/eigen/Eigen/Geometry +5 -8
  39. data/vendor/eigen/Eigen/Householder +0 -1
  40. data/vendor/eigen/Eigen/Jacobi +0 -1
  41. data/vendor/eigen/Eigen/KLUSupport +41 -0
  42. data/vendor/eigen/Eigen/LU +2 -5
  43. data/vendor/eigen/Eigen/OrderingMethods +0 -3
  44. data/vendor/eigen/Eigen/PaStiXSupport +1 -0
  45. data/vendor/eigen/Eigen/PardisoSupport +0 -0
  46. data/vendor/eigen/Eigen/QR +2 -3
  47. data/vendor/eigen/Eigen/QtAlignedMalloc +0 -1
  48. data/vendor/eigen/Eigen/SVD +0 -1
  49. data/vendor/eigen/Eigen/Sparse +0 -2
  50. data/vendor/eigen/Eigen/SparseCholesky +0 -8
  51. data/vendor/eigen/Eigen/SparseLU +4 -0
  52. data/vendor/eigen/Eigen/SparseQR +0 -1
  53. data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +42 -27
  54. data/vendor/eigen/Eigen/src/Cholesky/LLT.h +39 -23
  55. data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +90 -47
  56. data/vendor/eigen/Eigen/src/Core/ArithmeticSequence.h +413 -0
  57. data/vendor/eigen/Eigen/src/Core/Array.h +99 -11
  58. data/vendor/eigen/Eigen/src/Core/ArrayBase.h +3 -3
  59. data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +21 -21
  60. data/vendor/eigen/Eigen/src/Core/Assign.h +1 -1
  61. data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +125 -50
  62. data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +10 -10
  63. data/vendor/eigen/Eigen/src/Core/BandMatrix.h +16 -16
  64. data/vendor/eigen/Eigen/src/Core/Block.h +56 -60
  65. data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +29 -31
  66. data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +7 -3
  67. data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +325 -272
  68. data/vendor/eigen/Eigen/src/Core/CoreIterators.h +5 -0
  69. data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +21 -22
  70. data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +153 -18
  71. data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +6 -6
  72. data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +14 -10
  73. data/vendor/eigen/Eigen/src/Core/DenseBase.h +132 -42
  74. data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +25 -21
  75. data/vendor/eigen/Eigen/src/Core/DenseStorage.h +153 -71
  76. data/vendor/eigen/Eigen/src/Core/Diagonal.h +21 -23
  77. data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +50 -2
  78. data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +1 -1
  79. data/vendor/eigen/Eigen/src/Core/Dot.h +10 -10
  80. data/vendor/eigen/Eigen/src/Core/EigenBase.h +10 -9
  81. data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +8 -4
  82. data/vendor/eigen/Eigen/src/Core/Fuzzy.h +3 -3
  83. data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +20 -10
  84. data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +599 -152
  85. data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +40 -33
  86. data/vendor/eigen/Eigen/src/Core/IO.h +40 -7
  87. data/vendor/eigen/Eigen/src/Core/IndexedView.h +237 -0
  88. data/vendor/eigen/Eigen/src/Core/Inverse.h +9 -10
  89. data/vendor/eigen/Eigen/src/Core/Map.h +7 -7
  90. data/vendor/eigen/Eigen/src/Core/MapBase.h +10 -3
  91. data/vendor/eigen/Eigen/src/Core/MathFunctions.h +767 -125
  92. data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +118 -19
  93. data/vendor/eigen/Eigen/src/Core/Matrix.h +131 -25
  94. data/vendor/eigen/Eigen/src/Core/MatrixBase.h +21 -3
  95. data/vendor/eigen/Eigen/src/Core/NestByValue.h +25 -50
  96. data/vendor/eigen/Eigen/src/Core/NoAlias.h +4 -3
  97. data/vendor/eigen/Eigen/src/Core/NumTraits.h +107 -20
  98. data/vendor/eigen/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
  99. data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +3 -31
  100. data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +152 -59
  101. data/vendor/eigen/Eigen/src/Core/Product.h +30 -25
  102. data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +192 -125
  103. data/vendor/eigen/Eigen/src/Core/Random.h +37 -1
  104. data/vendor/eigen/Eigen/src/Core/Redux.h +180 -170
  105. data/vendor/eigen/Eigen/src/Core/Ref.h +121 -23
  106. data/vendor/eigen/Eigen/src/Core/Replicate.h +8 -8
  107. data/vendor/eigen/Eigen/src/Core/Reshaped.h +454 -0
  108. data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +7 -5
  109. data/vendor/eigen/Eigen/src/Core/Reverse.h +18 -12
  110. data/vendor/eigen/Eigen/src/Core/Select.h +8 -6
  111. data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +33 -20
  112. data/vendor/eigen/Eigen/src/Core/Solve.h +14 -14
  113. data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +16 -16
  114. data/vendor/eigen/Eigen/src/Core/SolverBase.h +41 -3
  115. data/vendor/eigen/Eigen/src/Core/StableNorm.h +100 -70
  116. data/vendor/eigen/Eigen/src/Core/StlIterators.h +463 -0
  117. data/vendor/eigen/Eigen/src/Core/Stride.h +9 -4
  118. data/vendor/eigen/Eigen/src/Core/Swap.h +5 -4
  119. data/vendor/eigen/Eigen/src/Core/Transpose.h +88 -27
  120. data/vendor/eigen/Eigen/src/Core/Transpositions.h +26 -47
  121. data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +93 -75
  122. data/vendor/eigen/Eigen/src/Core/VectorBlock.h +5 -5
  123. data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +159 -70
  124. data/vendor/eigen/Eigen/src/Core/Visitor.h +137 -29
  125. data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +50 -129
  126. data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +126 -337
  127. data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +1092 -155
  128. data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +65 -1
  129. data/vendor/eigen/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
  130. data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +207 -236
  131. data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1482 -495
  132. data/vendor/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  133. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +152 -165
  134. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +19 -251
  135. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
  136. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
  137. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
  138. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +2042 -392
  139. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +235 -80
  140. data/vendor/eigen/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
  141. data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +102 -14
  142. data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
  143. data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
  144. data/vendor/eigen/Eigen/src/Core/arch/Default/Half.h +942 -0
  145. data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +1 -1
  146. data/vendor/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  147. data/vendor/eigen/Eigen/src/Core/arch/{CUDA → GPU}/MathFunctions.h +16 -4
  148. data/vendor/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
  149. data/vendor/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
  150. data/vendor/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  151. data/vendor/eigen/Eigen/src/Core/arch/MSA/Complex.h +648 -0
  152. data/vendor/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  153. data/vendor/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  154. data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +313 -219
  155. data/vendor/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  156. data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +54 -70
  157. data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +4376 -549
  158. data/vendor/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
  159. data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +59 -179
  160. data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +65 -428
  161. data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +893 -283
  162. data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +65 -0
  163. data/vendor/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  164. data/vendor/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  165. data/vendor/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  166. data/vendor/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  167. data/vendor/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  168. data/vendor/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  169. data/vendor/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  170. data/vendor/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  171. data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +212 -183
  172. data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +101 -5
  173. data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +510 -395
  174. data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +11 -2
  175. data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +112 -46
  176. data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +31 -30
  177. data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +32 -2
  178. data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +355 -16
  179. data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +1075 -586
  180. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +49 -24
  181. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +41 -35
  182. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +6 -6
  183. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +4 -2
  184. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +382 -483
  185. data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +22 -5
  186. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +53 -30
  187. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +16 -8
  188. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +8 -6
  189. data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +4 -4
  190. data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +5 -4
  191. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +33 -27
  192. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +14 -12
  193. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +36 -34
  194. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +8 -4
  195. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +13 -10
  196. data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +304 -119
  197. data/vendor/eigen/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
  198. data/vendor/eigen/Eigen/src/Core/util/Constants.h +25 -9
  199. data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +26 -3
  200. data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +29 -9
  201. data/vendor/eigen/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
  202. data/vendor/eigen/Eigen/src/Core/util/IntegralConstant.h +272 -0
  203. data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +8 -1
  204. data/vendor/eigen/Eigen/src/Core/util/Macros.h +709 -246
  205. data/vendor/eigen/Eigen/src/Core/util/Memory.h +222 -52
  206. data/vendor/eigen/Eigen/src/Core/util/Meta.h +355 -77
  207. data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +5 -1
  208. data/vendor/eigen/Eigen/src/Core/util/ReshapedHelper.h +51 -0
  209. data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +8 -5
  210. data/vendor/eigen/Eigen/src/Core/util/SymbolicIndex.h +293 -0
  211. data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +65 -30
  212. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +1 -1
  213. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +7 -4
  214. data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +2 -2
  215. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +1 -1
  216. data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +2 -2
  217. data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +2 -2
  218. data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +9 -6
  219. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +21 -9
  220. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +77 -43
  221. data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +20 -15
  222. data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +99 -5
  223. data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +4 -4
  224. data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +3 -3
  225. data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +15 -11
  226. data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +1 -1
  227. data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +3 -2
  228. data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +39 -2
  229. data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +70 -14
  230. data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +3 -3
  231. data/vendor/eigen/Eigen/src/Geometry/Scaling.h +23 -5
  232. data/vendor/eigen/Eigen/src/Geometry/Transform.h +88 -67
  233. data/vendor/eigen/Eigen/src/Geometry/Translation.h +6 -12
  234. data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +1 -1
  235. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
  236. data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +9 -2
  237. data/vendor/eigen/Eigen/src/Householder/Householder.h +8 -4
  238. data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +123 -48
  239. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +15 -15
  240. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +7 -23
  241. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +5 -22
  242. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +41 -47
  243. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +51 -60
  244. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +70 -20
  245. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +2 -20
  246. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +11 -9
  247. data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +31 -10
  248. data/vendor/eigen/Eigen/src/KLUSupport/KLUSupport.h +358 -0
  249. data/vendor/eigen/Eigen/src/LU/Determinant.h +35 -19
  250. data/vendor/eigen/Eigen/src/LU/FullPivLU.h +29 -43
  251. data/vendor/eigen/Eigen/src/LU/InverseImpl.h +25 -8
  252. data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +71 -58
  253. data/vendor/eigen/Eigen/src/LU/arch/InverseSize4.h +351 -0
  254. data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +7 -17
  255. data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +297 -277
  256. data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +6 -10
  257. data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +1 -1
  258. data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +9 -7
  259. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +41 -20
  260. data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +100 -27
  261. data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +59 -22
  262. data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +48 -23
  263. data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +25 -3
  264. data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +183 -63
  265. data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +22 -14
  266. data/vendor/eigen/Eigen/src/SVD/SVDBase.h +83 -22
  267. data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +3 -3
  268. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +17 -9
  269. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +12 -37
  270. data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +3 -2
  271. data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +16 -0
  272. data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +6 -6
  273. data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +81 -27
  274. data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +25 -57
  275. data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +40 -11
  276. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +11 -15
  277. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +4 -2
  278. data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +30 -8
  279. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +126 -11
  280. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +5 -12
  281. data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +13 -1
  282. data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +7 -7
  283. data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +5 -2
  284. data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +8 -0
  285. data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +1 -1
  286. data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +1 -0
  287. data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +162 -12
  288. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +1 -1
  289. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +76 -2
  290. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +2 -2
  291. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +1 -1
  292. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +1 -1
  293. data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +19 -6
  294. data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +2 -12
  295. data/vendor/eigen/Eigen/src/StlSupport/StdList.h +2 -2
  296. data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +2 -2
  297. data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +6 -8
  298. data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +175 -39
  299. data/vendor/eigen/Eigen/src/misc/lapacke.h +5 -4
  300. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +28 -2
  301. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +155 -11
  302. data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +626 -242
  303. data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +14 -0
  304. data/vendor/eigen/Eigen/src/plugins/IndexedViewMethods.h +262 -0
  305. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +4 -4
  306. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +10 -0
  307. data/vendor/eigen/Eigen/src/plugins/ReshapedMethods.h +149 -0
  308. data/vendor/eigen/README.md +2 -0
  309. data/vendor/eigen/bench/btl/README +1 -1
  310. data/vendor/eigen/bench/tensors/README +6 -7
  311. data/vendor/eigen/ci/README.md +56 -0
  312. data/vendor/eigen/demos/mix_eigen_and_c/README +1 -1
  313. data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +213 -158
  314. data/vendor/eigen/unsupported/README.txt +1 -1
  315. data/vendor/tomotopy/README.kr.rst +21 -0
  316. data/vendor/tomotopy/README.rst +20 -0
  317. data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +2 -2
  318. data/vendor/tomotopy/src/Labeling/Phraser.hpp +1 -1
  319. data/vendor/tomotopy/src/TopicModel/CTModel.hpp +2 -1
  320. data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +2 -1
  321. data/vendor/tomotopy/src/TopicModel/DTModel.hpp +1 -1
  322. data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +2 -2
  323. data/vendor/tomotopy/src/TopicModel/HDP.h +1 -0
  324. data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +53 -2
  325. data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +1 -1
  326. data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +1 -0
  327. data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +2 -2
  328. data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +16 -5
  329. data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +1 -0
  330. data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +1 -0
  331. data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +1 -0
  332. data/vendor/tomotopy/src/TopicModel/PT.h +3 -1
  333. data/vendor/tomotopy/src/TopicModel/PTModel.hpp +31 -1
  334. data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +2 -2
  335. data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +7 -5
  336. data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +36 -1
  337. data/vendor/tomotopy/src/Utils/exception.h +6 -0
  338. data/vendor/tomotopy/src/Utils/sample.hpp +14 -12
  339. data/vendor/tomotopy/src/Utils/sse_gamma.h +0 -3
  340. metadata +60 -14
  341. data/vendor/eigen/Eigen/CMakeLists.txt +0 -19
  342. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +0 -674
  343. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +0 -333
  344. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +0 -1124
  345. data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +0 -212
  346. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +0 -161
  347. data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +0 -338
@@ -3,8 +3,6 @@
3
3
  Tensors are multidimensional arrays of elements. Elements are typically scalars,
4
4
  but more complex types such as strings are also supported.
5
5
 
6
- [TOC]
7
-
8
6
  ## Tensor Classes
9
7
 
10
8
  You can manipulate a tensor with one of the following classes. They all are in
@@ -21,7 +19,7 @@ matrix.
21
19
  Tensors of this class are resizable. For example, if you assign a tensor of a
22
20
  different size to a Tensor, that tensor is resized to match its new value.
23
21
 
24
- #### Constructor `Tensor<data_type, rank>(size0, size1, ...)`
22
+ #### Constructor Tensor<data_type, rank>(size0, size1, ...)
25
23
 
26
24
  Constructor for a Tensor. The constructor must be passed `rank` integers
27
25
  indicating the sizes of the instance along each of the the `rank`
@@ -34,7 +32,7 @@ dimensions.
34
32
  // Resize t_3d by assigning a tensor of different sizes, but same rank.
35
33
  t_3d = Tensor<float, 3>(3, 4, 3);
36
34
 
37
- #### Constructor `Tensor<data_type, rank>(size_array)`
35
+ #### Constructor Tensor<data_type, rank>(size_array)
38
36
 
39
37
  Constructor where the sizes for the constructor are specified as an array of
40
38
  values instead of an explicitly list of parameters. The array type to use is
@@ -45,7 +43,7 @@ from an initializer list.
45
43
  Tensor<string, 2> t_2d({5, 7});
46
44
 
47
45
 
48
- ### Class `TensorFixedSize<data_type, Sizes<size0, size1, ...>>`
46
+ ### Class TensorFixedSize<data_type, Sizes<size0, size1, ...>>
49
47
 
50
48
  Class to use for tensors of fixed size, where the size is known at compile
51
49
  time. Fixed sized tensors can provide very fast computations because all their
@@ -57,7 +55,7 @@ tensor data is held onto the stack and does not cause heap allocation and free.
57
55
  // Create a 4 x 3 tensor of floats.
58
56
  TensorFixedSize<float, Sizes<4, 3>> t_4x3;
59
57
 
60
- ### Class `TensorMap<Tensor<data_type, rank>>`
58
+ ### Class TensorMap<Tensor<data_type, rank>>
61
59
 
62
60
  This is the class to use to create a tensor on top of memory allocated and
63
61
  owned by another part of your code. It allows to view any piece of allocated
@@ -67,7 +65,7 @@ data are stored.
67
65
  A TensorMap is not resizable because it does not own the memory where its data
68
66
  are stored.
69
67
 
70
- #### Constructor `TensorMap<Tensor<data_type, rank>>(data, size0, size1, ...)`
68
+ #### Constructor TensorMap<Tensor<data_type, rank>>(data, size0, size1, ...)
71
69
 
72
70
  Constructor for a Tensor. The constructor must be passed a pointer to the
73
71
  storage for the data, and "rank" size attributes. The storage has to be
@@ -83,17 +81,17 @@ large enough to hold all the data.
83
81
 
84
82
  // You can also map fixed-size tensors. Here we get a 1d view of
85
83
  // the 2d fixed-size tensor.
86
- TensorFixedSize<float, Sizes<4, 5>> t_4x3;
84
+ TensorFixedSize<float, Sizes<4, 3>> t_4x3;
87
85
  TensorMap<Tensor<float, 1>> t_12(t_4x3.data(), 12);
88
86
 
89
87
 
90
- #### Class `TensorRef`
88
+ #### Class TensorRef
91
89
 
92
90
  See Assigning to a TensorRef below.
93
91
 
94
92
  ## Accessing Tensor Elements
95
93
 
96
- #### `<data_type> tensor(index0, index1...)`
94
+ #### <data_type> tensor(index0, index1...)
97
95
 
98
96
  Return the element at position `(index0, index1...)` in tensor
99
97
  `tensor`. You must pass as many parameters as the rank of `tensor`.
@@ -278,7 +276,7 @@ Simiarly, assigning an expression to a TensorMap causes its evaluation. Like
278
276
  tensors of type TensorFixedSize, TensorMaps cannot be resized so they have to
279
277
  have the rank and sizes of the expression that are assigned to them.
280
278
 
281
- #### Calling `eval()`.
279
+ #### Calling eval().
282
280
 
283
281
  When you compute large composite expressions, you sometimes want to tell Eigen
284
282
  that an intermediate value in the expression tree is worth evaluating ahead of
@@ -355,7 +353,7 @@ call for the right hand side:
355
353
  (Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast))).eval();
356
354
 
357
355
 
358
- #### Assigning to a `TensorRef`.
356
+ #### Assigning to a TensorRef.
359
357
 
360
358
  If you need to access only a few elements from the value of an expression you
361
359
  can avoid materializing the value in a full tensor by using a TensorRef.
@@ -430,8 +428,11 @@ This is exactly the same as not inserting a `device()` call.
430
428
 
431
429
  #### Evaluating with a Thread Pool
432
430
 
431
+ // Create the Eigen ThreadPool
432
+ Eigen::ThreadPool pool(8 /* number of threads in pool */)
433
+
433
434
  // Create the Eigen ThreadPoolDevice.
434
- Eigen::ThreadPoolDevice my_device(4 /* number of threads to use */);
435
+ Eigen::ThreadPoolDevice my_device(&pool, 4 /* number of threads to use */);
435
436
 
436
437
  // Now just use the device when evaluating expressions.
437
438
  Eigen::Tensor<float, 2> c(30, 50);
@@ -452,24 +453,24 @@ memory for tensors with cuda.
452
453
  In the documentation of the tensor methods and Operation we mention datatypes
453
454
  that are tensor-type specific:
454
455
 
455
- #### `<Tensor-Type>::``Dimensions`
456
+ #### <Tensor-Type>::Dimensions
456
457
 
457
458
  Acts like an array of ints. Has an `int size` attribute, and can be
458
459
  indexed like an array to access individual values. Used to represent the
459
460
  dimensions of a tensor. See `dimensions()`.
460
461
 
461
- #### `<Tensor-Type>::``Index`
462
+ #### <Tensor-Type>::Index
462
463
 
463
464
  Acts like an `int`. Used for indexing tensors along their dimensions. See
464
465
  `operator()`, `dimension()`, and `size()`.
465
466
 
466
- #### `<Tensor-Type>::``Scalar`
467
+ #### <Tensor-Type>::Scalar
467
468
 
468
469
  Represents the datatype of individual tensor elements. For example, for a
469
470
  `Tensor<float>`, `Scalar` is the type `float`. See
470
471
  `setConstant()`.
471
472
 
472
- #### `<Operation>`
473
+ #### <Operation>
473
474
 
474
475
  We use this pseudo type to indicate that a tensor Operation is returned by a
475
476
  method. We indicate in the text the type and dimensions of the tensor that the
@@ -489,7 +490,7 @@ Tensor, TensorFixedSize, and TensorMap.
489
490
 
490
491
  ## Metadata
491
492
 
492
- ### `int NumDimensions`
493
+ ### int NumDimensions
493
494
 
494
495
  Constant value indicating the number of dimensions of a Tensor. This is also
495
496
  known as the tensor "rank".
@@ -498,7 +499,7 @@ known as the tensor "rank".
498
499
  cout << "Dims " << a.NumDimensions;
499
500
  => Dims 2
500
501
 
501
- ### `Dimensions dimensions()`
502
+ ### Dimensions dimensions()
502
503
 
503
504
  Returns an array-like object representing the dimensions of the tensor.
504
505
  The actual type of the `dimensions()` result is `<Tensor-Type>::``Dimensions`.
@@ -516,7 +517,7 @@ If you use a C++11 compiler, you can use `auto` to simplify the code:
516
517
  << ", dim 1: " << d[1];
517
518
  => Dim size: 2, dim 0: 3, dim 1: 4
518
519
 
519
- ### `Index dimension(Index n)`
520
+ ### Index dimension(Index n)
520
521
 
521
522
  Returns the n-th dimension of the tensor. The actual type of the
522
523
  `dimension()` result is `<Tensor-Type>::``Index`, but you can
@@ -527,7 +528,7 @@ always use it like an int.
527
528
  cout << "Dim 1: " << dim1;
528
529
  => Dim 1: 4
529
530
 
530
- ### `Index size()`
531
+ ### Index size()
531
532
 
532
533
  Returns the total number of elements in the tensor. This is the product of all
533
534
  the tensor dimensions. The actual type of the `size()` result is
@@ -602,7 +603,7 @@ You can use one of the methods below to initialize the tensor memory. These
602
603
  have an immediate effect on the tensor and return the tensor itself as a
603
604
  result. These are not tensor Operations which delay evaluation.
604
605
 
605
- ### `<Tensor-Type> setConstant(const Scalar& val)`
606
+ ### <Tensor-Type> setConstant(const Scalar& val)
606
607
 
607
608
  Sets all elements of the tensor to the constant value `val`. `Scalar`
608
609
  is the type of data stored in the tensor. You can pass any value that is
@@ -630,7 +631,7 @@ has a copy constructor and an `operator=()`:
630
631
  yolo yolo yolo
631
632
 
632
633
 
633
- ### `<Tensor-Type> setZero()`
634
+ ### <Tensor-Type> setZero()
634
635
 
635
636
  Fills the tensor with zeros. Equivalent to `setConstant(Scalar(0))`.
636
637
  Returns the tensor itself in case you want to chain another call.
@@ -644,7 +645,7 @@ Returns the tensor itself in case you want to chain another call.
644
645
  0 0 0 0
645
646
 
646
647
 
647
- ### `<Tensor-Type> setValues({..initializer_list})`
648
+ ### <Tensor-Type> setValues({..initializer_list})
648
649
 
649
650
  Fills the tensor with explicit values specified in a std::initializer_list.
650
651
  The type of the initializer list depends on the type and rank of the tensor.
@@ -680,7 +681,7 @@ code only sets the values of the first row of the tensor.
680
681
  10 20 30
681
682
  1000 1000 1000
682
683
 
683
- ### `<Tensor-Type> setRandom()`
684
+ ### <Tensor-Type> setRandom()
684
685
 
685
686
  Fills the tensor with random values. Returns the tensor itself in case you
686
687
  want to chain another call.
@@ -747,7 +748,7 @@ values of a tensor expression, the expression must either be evaluated or
747
748
  wrapped in a TensorRef.
748
749
 
749
750
 
750
- ### `Scalar* data()` and `const Scalar* data() const`
751
+ ### Scalar* data() and const Scalar* data() const
751
752
 
752
753
  Returns a pointer to the storage for the tensor. The pointer is const if the
753
754
  tensor was const. This allows direct access to the data. The layout of the
@@ -775,7 +776,7 @@ The chain of Operation is evaluated lazily, typically when it is assigned to a
775
776
  tensor. See "Controlling when Expression are Evaluated" for more details about
776
777
  their evaluation.
777
778
 
778
- ### `<Operation> constant(const Scalar& val)`
779
+ ### <Operation> constant(const Scalar& val)
779
780
 
780
781
  Returns a tensor of the same type and dimensions as the original tensor but
781
782
  where all elements have the value `val`.
@@ -803,7 +804,7 @@ tensor, or multiply every element of a tensor by a scalar.
803
804
  0.6 0.6 0.6
804
805
  0.6 0.6 0.6
805
806
 
806
- ### `<Operation> random()`
807
+ ### <Operation> random()
807
808
 
808
809
  Returns a tensor of the same type and dimensions as the current tensor
809
810
  but where all elements have random values.
@@ -833,7 +834,7 @@ All these operations take a single input tensor as argument and return a tensor
833
834
  of the same type and dimensions as the tensor to which they are applied. The
834
835
  requested operations are applied to each element independently.
835
836
 
836
- ### `<Operation> operator-()`
837
+ ### <Operation> operator-()
837
838
 
838
839
  Returns a tensor of the same type and dimensions as the original tensor
839
840
  containing the opposite values of the original tensor.
@@ -852,42 +853,42 @@ containing the opposite values of the original tensor.
852
853
  -1 -1 -1
853
854
  -1 -1 -1
854
855
 
855
- ### `<Operation> sqrt()`
856
+ ### <Operation> sqrt()
856
857
 
857
858
  Returns a tensor of the same type and dimensions as the original tensor
858
859
  containing the square roots of the original tensor.
859
860
 
860
- ### `<Operation> rsqrt()`
861
+ ### <Operation> rsqrt()
861
862
 
862
863
  Returns a tensor of the same type and dimensions as the original tensor
863
864
  containing the inverse square roots of the original tensor.
864
865
 
865
- ### `<Operation> square()`
866
+ ### <Operation> square()
866
867
 
867
868
  Returns a tensor of the same type and dimensions as the original tensor
868
869
  containing the squares of the original tensor values.
869
870
 
870
- ### `<Operation> inverse()`
871
+ ### <Operation> inverse()
871
872
 
872
873
  Returns a tensor of the same type and dimensions as the original tensor
873
874
  containing the inverse of the original tensor values.
874
875
 
875
- ### `<Operation> exp()`
876
+ ### <Operation> exp()
876
877
 
877
878
  Returns a tensor of the same type and dimensions as the original tensor
878
879
  containing the exponential of the original tensor.
879
880
 
880
- ### `<Operation> log()`
881
+ ### <Operation> log()
881
882
 
882
883
  Returns a tensor of the same type and dimensions as the original tensor
883
884
  containing the natural logarithms of the original tensor.
884
885
 
885
- ### `<Operation> abs()`
886
+ ### <Operation> abs()
886
887
 
887
888
  Returns a tensor of the same type and dimensions as the original tensor
888
889
  containing the absolute values of the original tensor.
889
890
 
890
- ### `<Operation> pow(Scalar exponent)`
891
+ ### <Operation> pow(Scalar exponent)
891
892
 
892
893
  Returns a tensor of the same type and dimensions as the original tensor
893
894
  containing the coefficients of the original tensor to the power of the
@@ -914,17 +915,17 @@ cubic roots of an int Tensor:
914
915
  0 1 2
915
916
  3 4 5
916
917
 
917
- ### `<Operation> operator * (Scalar scale)`
918
+ ### <Operation> operator * (Scalar scale)
918
919
 
919
920
  Multiplies all the coefficients of the input tensor by the provided scale.
920
921
 
921
- ### `<Operation> cwiseMax(Scalar threshold)`
922
+ ### <Operation> cwiseMax(Scalar threshold)
922
923
  TODO
923
924
 
924
- ### `<Operation> cwiseMin(Scalar threshold)`
925
+ ### <Operation> cwiseMin(Scalar threshold)
925
926
  TODO
926
927
 
927
- ### `<Operation> unaryExpr(const CustomUnaryOp& func)`
928
+ ### <Operation> unaryExpr(const CustomUnaryOp& func)
928
929
  TODO
929
930
 
930
931
 
@@ -936,39 +937,39 @@ dimensions as the tensors to which they are applied, and unless otherwise
936
937
  specified it is also of the same type. The requested operations are applied to
937
938
  each pair of elements independently.
938
939
 
939
- ### `<Operation> operator+(const OtherDerived& other)`
940
+ ### <Operation> operator+(const OtherDerived& other)
940
941
 
941
942
  Returns a tensor of the same type and dimensions as the input tensors
942
943
  containing the coefficient wise sums of the inputs.
943
944
 
944
- ### `<Operation> operator-(const OtherDerived& other)`
945
+ ### <Operation> operator-(const OtherDerived& other)
945
946
 
946
947
  Returns a tensor of the same type and dimensions as the input tensors
947
948
  containing the coefficient wise differences of the inputs.
948
949
 
949
- ### `<Operation> operator*(const OtherDerived& other)`
950
+ ### <Operation> operator*(const OtherDerived& other)
950
951
 
951
952
  Returns a tensor of the same type and dimensions as the input tensors
952
953
  containing the coefficient wise products of the inputs.
953
954
 
954
- ### `<Operation> operator/(const OtherDerived& other)`
955
+ ### <Operation> operator/(const OtherDerived& other)
955
956
 
956
957
  Returns a tensor of the same type and dimensions as the input tensors
957
958
  containing the coefficient wise quotients of the inputs.
958
959
 
959
960
  This operator is not supported for integer types.
960
961
 
961
- ### `<Operation> cwiseMax(const OtherDerived& other)`
962
+ ### <Operation> cwiseMax(const OtherDerived& other)
962
963
 
963
964
  Returns a tensor of the same type and dimensions as the input tensors
964
965
  containing the coefficient wise maximums of the inputs.
965
966
 
966
- ### `<Operation> cwiseMin(const OtherDerived& other)`
967
+ ### <Operation> cwiseMin(const OtherDerived& other)
967
968
 
968
969
  Returns a tensor of the same type and dimensions as the input tensors
969
970
  containing the coefficient wise mimimums of the inputs.
970
971
 
971
- ### `<Operation> Logical operators`
972
+ ### <Operation> Logical operators
972
973
 
973
974
  The following logical operators are supported as well:
974
975
 
@@ -1126,55 +1127,107 @@ scalar, represented as a zero-dimension tensor.
1126
1127
  276
1127
1128
 
1128
1129
 
1129
- ### `<Operation> sum(const Dimensions& new_dims)`
1130
- ### `<Operation> sum()`
1130
+ ### <Operation> sum(const Dimensions& new_dims)
1131
+ ### <Operation> sum()
1131
1132
 
1132
1133
  Reduce a tensor using the sum() operator. The resulting values
1133
1134
  are the sum of the reduced values.
1134
1135
 
1135
- ### `<Operation> mean(const Dimensions& new_dims)`
1136
- ### `<Operation> mean()`
1136
+ ### <Operation> mean(const Dimensions& new_dims)
1137
+ ### <Operation> mean()
1137
1138
 
1138
1139
  Reduce a tensor using the mean() operator. The resulting values
1139
1140
  are the mean of the reduced values.
1140
1141
 
1141
- ### `<Operation> maximum(const Dimensions& new_dims)`
1142
- ### `<Operation> maximum()`
1142
+ ### <Operation> maximum(const Dimensions& new_dims)
1143
+ ### <Operation> maximum()
1143
1144
 
1144
1145
  Reduce a tensor using the maximum() operator. The resulting values are the
1145
1146
  largest of the reduced values.
1146
1147
 
1147
- ### `<Operation> minimum(const Dimensions& new_dims)`
1148
- ### `<Operation> minimum()`
1148
+ ### <Operation> minimum(const Dimensions& new_dims)
1149
+ ### <Operation> minimum()
1149
1150
 
1150
1151
  Reduce a tensor using the minimum() operator. The resulting values
1151
1152
  are the smallest of the reduced values.
1152
1153
 
1153
- ### `<Operation> prod(const Dimensions& new_dims)`
1154
- ### `<Operation> prod()`
1154
+ ### <Operation> prod(const Dimensions& new_dims)
1155
+ ### <Operation> prod()
1155
1156
 
1156
1157
  Reduce a tensor using the prod() operator. The resulting values
1157
1158
  are the product of the reduced values.
1158
1159
 
1159
- ### `<Operation> all(const Dimensions& new_dims)`
1160
- ### `<Operation> all()`
1160
+ ### <Operation> all(const Dimensions& new_dims)
1161
+ ### <Operation> all()
1161
1162
  Reduce a tensor using the all() operator. Casts tensor to bool and then checks
1162
1163
  whether all elements are true. Runs through all elements rather than
1163
1164
  short-circuiting, so may be significantly inefficient.
1164
1165
 
1165
- ### `<Operation> any(const Dimensions& new_dims)`
1166
- ### `<Operation> any()`
1166
+ ### <Operation> any(const Dimensions& new_dims)
1167
+ ### <Operation> any()
1167
1168
  Reduce a tensor using the any() operator. Casts tensor to bool and then checks
1168
1169
  whether any element is true. Runs through all elements rather than
1169
1170
  short-circuiting, so may be significantly inefficient.
1170
1171
 
1171
1172
 
1172
- ### `<Operation> reduce(const Dimensions& new_dims, const Reducer& reducer)`
1173
+ ### <Operation> reduce(const Dimensions& new_dims, const Reducer& reducer)
1173
1174
 
1174
1175
  Reduce a tensor using a user-defined reduction operator. See `SumReducer`
1175
1176
  in TensorFunctors.h for information on how to implement a reduction operator.
1176
1177
 
1177
1178
 
1179
+ ## Trace
1180
+
1181
+ A *Trace* operation returns a tensor with fewer dimensions than the original
1182
+ tensor. It returns a tensor whose elements are the sum of the elements of the
1183
+ original tensor along the main diagonal for a list of specified dimensions, the
1184
+ "trace dimensions". Similar to the `Reduction Dimensions`, the trace dimensions
1185
+ are passed as an input parameter to the operation, are of type `<TensorType>::``Dimensions`
1186
+ , and have the same requirements when passed as an input parameter. In addition,
1187
+ the trace dimensions must have the same size.
1188
+
1189
+ Example: Trace along 2 dimensions.
1190
+
1191
+ // Create a tensor of 3 dimensions
1192
+ Eigen::Tensor<int, 3> a(2, 2, 3);
1193
+ a.setValues({{{1, 2, 3}, {4, 5, 6}}, {{7, 8, 9}, {10, 11, 12}}});
1194
+ // Specify the dimensions along which the trace will be computed.
1195
+ // In this example, the trace can only be computed along the dimensions
1196
+ // with indices 0 and 1
1197
+ Eigen::array<int, 2> dims({0, 1});
1198
+ // The output tensor contains all but the trace dimensions.
1199
+ Tensor<int, 1> a_trace = a.trace(dims);
1200
+ cout << "a_trace:" << endl;
1201
+ cout << a_trace << endl;
1202
+ =>
1203
+ a_trace:
1204
+ 11
1205
+ 13
1206
+ 15
1207
+
1208
+
1209
+ ### <Operation> trace(const Dimensions& new_dims)
1210
+ ### <Operation> trace()
1211
+
1212
+ As a special case, if no parameter is passed to the operation, trace is computed
1213
+ along *all* dimensions of the input tensor.
1214
+
1215
+ Example: Trace along all dimensions.
1216
+
1217
+ // Create a tensor of 3 dimensions, with all dimensions having the same size.
1218
+ Eigen::Tensor<int, 3> a(3, 3, 3);
1219
+ a.setValues({{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}},
1220
+ {{10, 11, 12}, {13, 14, 15}, {16, 17, 18}},
1221
+ {{19, 20, 21}, {22, 23, 24}, {25, 26, 27}}});
1222
+ // Result is a zero dimension tensor
1223
+ Tensor<int, 0> a_trace = a.trace();
1224
+ cout<<"a_trace:"<<endl;
1225
+ cout<<a_trace<<endl;
1226
+ =>
1227
+ a_trace:
1228
+ 42
1229
+
1230
+
1178
1231
  ## Scan Operations
1179
1232
 
1180
1233
  A *Scan* operation returns a tensor with the same dimensions as the original
@@ -1204,18 +1257,18 @@ dd a comment to this line
1204
1257
  1 3 6
1205
1258
  4 9 15
1206
1259
 
1207
- ### `<Operation> cumsum(const Index& axis)`
1260
+ ### <Operation> cumsum(const Index& axis)
1208
1261
 
1209
1262
  Perform a scan by summing consecutive entries.
1210
1263
 
1211
- ### `<Operation> cumprod(const Index& axis)`
1264
+ ### <Operation> cumprod(const Index& axis)
1212
1265
 
1213
1266
  Perform a scan by multiplying consecutive entries.
1214
1267
 
1215
1268
 
1216
1269
  ## Convolutions
1217
1270
 
1218
- ### `<Operation> convolve(const Kernel& kernel, const Dimensions& dims)`
1271
+ ### <Operation> convolve(const Kernel& kernel, const Dimensions& dims)
1219
1272
 
1220
1273
  Returns a tensor that is the output of the convolution of the input tensor with the kernel,
1221
1274
  along the specified dimensions of the input tensor. The dimension size for dimensions of the output tensor
@@ -1258,7 +1311,7 @@ These operations return a Tensor with different dimensions than the original
1258
1311
  Tensor. They can be used to access slices of tensors, see them with different
1259
1312
  dimensions, or pad tensors with additional data.
1260
1313
 
1261
- ### `<Operation> reshape(const Dimensions& new_dims)`
1314
+ ### <Operation> reshape(const Dimensions& new_dims)
1262
1315
 
1263
1316
  Returns a view of the input tensor that has been reshaped to the specified
1264
1317
  new dimensions. The argument new_dims is an array of Index values. The
@@ -1337,7 +1390,7 @@ Note that "b" itself was not reshaped but that instead the assignment is done to
1337
1390
  the reshape view of b.
1338
1391
 
1339
1392
 
1340
- ### `<Operation> shuffle(const Shuffle& shuffle)`
1393
+ ### <Operation> shuffle(const Shuffle& shuffle)
1341
1394
 
1342
1395
  Returns a copy of the input tensor whose dimensions have been
1343
1396
  reordered according to the specified permutation. The argument shuffle
@@ -1378,7 +1431,7 @@ Let's rewrite the previous example to take advantage of this feature:
1378
1431
  output.shuffle({2, 0, 1}) = input;
1379
1432
 
1380
1433
 
1381
- ### `<Operation> stride(const Strides& strides)`
1434
+ ### <Operation> stride(const Strides& strides)
1382
1435
 
1383
1436
  Returns a view of the input tensor that strides (skips stride-1
1384
1437
  elements) along each of the dimensions. The argument strides is an
@@ -1404,7 +1457,7 @@ It is possible to assign a tensor to a stride:
1404
1457
  output.stride({2, 3, 4}) = input;
1405
1458
 
1406
1459
 
1407
- ### `<Operation> slice(const StartIndices& offsets, const Sizes& extents)`
1460
+ ### <Operation> slice(const StartIndices& offsets, const Sizes& extents)
1408
1461
 
1409
1462
  Returns a sub-tensor of the given tensor. For each dimension i, the slice is
1410
1463
  made of the coefficients stored between offset[i] and offset[i] + extents[i] in
@@ -1430,7 +1483,7 @@ the input tensor.
1430
1483
  600 700
1431
1484
 
1432
1485
 
1433
- ### `<Operation> chip(const Index offset, const Index dim)`
1486
+ ### <Operation> chip(const Index offset, const Index dim)
1434
1487
 
1435
1488
  A chip is a special kind of slice. It is the subtensor at the given offset in
1436
1489
  the dimension dim. The returned tensor has one fewer dimension than the input
@@ -1481,7 +1534,7 @@ lvalue. For example:
1481
1534
  0 0 0
1482
1535
 
1483
1536
 
1484
- ### `<Operation> reverse(const ReverseDimensions& reverse)`
1537
+ ### <Operation> reverse(const ReverseDimensions& reverse)
1485
1538
 
1486
1539
  Returns a view of the input tensor that reverses the order of the coefficients
1487
1540
  along a subset of the dimensions. The argument reverse is an array of boolean
@@ -1511,7 +1564,7 @@ of a 2D tensor:
1511
1564
  0 100 200
1512
1565
 
1513
1566
 
1514
- ### `<Operation> broadcast(const Broadcast& broadcast)`
1567
+ ### <Operation> broadcast(const Broadcast& broadcast)
1515
1568
 
1516
1569
  Returns a view of the input tensor in which the input is replicated one to many
1517
1570
  times.
@@ -1535,11 +1588,11 @@ made in each of the dimensions.
1535
1588
  0 100 200 0 100 200
1536
1589
  300 400 500 300 400 500
1537
1590
 
1538
- ### `<Operation> concatenate(const OtherDerived& other, Axis axis)`
1591
+ ### <Operation> concatenate(const OtherDerived& other, Axis axis)
1539
1592
 
1540
1593
  TODO
1541
1594
 
1542
- ### `<Operation> pad(const PaddingDimensions& padding)`
1595
+ ### <Operation> pad(const PaddingDimensions& padding)
1543
1596
 
1544
1597
  Returns a view of the input tensor in which the input is padded with zeros.
1545
1598
 
@@ -1564,7 +1617,7 @@ Returns a view of the input tensor in which the input is padded with zeros.
1564
1617
  0 0 0 0
1565
1618
 
1566
1619
 
1567
- ### `<Operation> extract_patches(const PatchDims& patch_dims)`
1620
+ ### <Operation> extract_patches(const PatchDims& patch_dims)
1568
1621
 
1569
1622
  Returns a tensor of coefficient patches extracted from the input tensor, where
1570
1623
  each patch is of dimension specified by 'patch_dims'. The returned tensor has
@@ -1575,83 +1628,83 @@ dimension in RowMajor layout.
1575
1628
 
1576
1629
  For example, given the following input tensor:
1577
1630
 
1578
- Eigen::Tensor<float, 2, DataLayout> tensor(3,4);
1579
- tensor.setValues({{0.0f, 1.0f, 2.0f, 3.0f},
1580
- {4.0f, 5.0f, 6.0f, 7.0f},
1581
- {8.0f, 9.0f, 10.0f, 11.0f}});
1631
+ Eigen::Tensor<float, 2, DataLayout> tensor(3,4);
1632
+ tensor.setValues({{0.0f, 1.0f, 2.0f, 3.0f},
1633
+ {4.0f, 5.0f, 6.0f, 7.0f},
1634
+ {8.0f, 9.0f, 10.0f, 11.0f}});
1582
1635
 
1583
- cout << "tensor: " << endl << tensor << endl;
1584
- =>
1585
- tensor:
1586
- 0 1 2 3
1587
- 4 5 6 7
1588
- 8 9 10 11
1636
+ cout << "tensor: " << endl << tensor << endl;
1637
+ =>
1638
+ tensor:
1639
+ 0 1 2 3
1640
+ 4 5 6 7
1641
+ 8 9 10 11
1589
1642
 
1590
1643
  Six 2x2 patches can be extracted and indexed using the following code:
1591
1644
 
1592
- Eigen::Tensor<float, 3, DataLayout> patch;
1593
- Eigen::array<ptrdiff_t, 2> patch_dims;
1594
- patch_dims[0] = 2;
1595
- patch_dims[1] = 2;
1596
- patch = tensor.extract_patches(patch_dims);
1597
- for (int k = 0; k < 6; ++k) {
1598
- cout << "patch index: " << k << endl;
1599
- for (int i = 0; i < 2; ++i) {
1600
- for (int j = 0; j < 2; ++j) {
1601
- if (DataLayout == ColMajor) {
1602
- cout << patch(i, j, k) << " ";
1603
- } else {
1604
- cout << patch(k, i, j) << " ";
1605
- }
1645
+ Eigen::Tensor<float, 3, DataLayout> patch;
1646
+ Eigen::array<ptrdiff_t, 2> patch_dims;
1647
+ patch_dims[0] = 2;
1648
+ patch_dims[1] = 2;
1649
+ patch = tensor.extract_patches(patch_dims);
1650
+ for (int k = 0; k < 6; ++k) {
1651
+ cout << "patch index: " << k << endl;
1652
+ for (int i = 0; i < 2; ++i) {
1653
+ for (int j = 0; j < 2; ++j) {
1654
+ if (DataLayout == ColMajor) {
1655
+ cout << patch(i, j, k) << " ";
1656
+ } else {
1657
+ cout << patch(k, i, j) << " ";
1658
+ }
1659
+ }
1660
+ cout << endl;
1606
1661
  }
1607
- cout << endl;
1608
1662
  }
1609
- }
1610
1663
 
1611
1664
  This code results in the following output when the data layout is ColMajor:
1612
1665
 
1613
- patch index: 0
1614
- 0 1
1615
- 4 5
1616
- patch index: 1
1617
- 4 5
1618
- 8 9
1619
- patch index: 2
1620
- 1 2
1621
- 5 6
1622
- patch index: 3
1623
- 5 6
1624
- 9 10
1625
- patch index: 4
1626
- 2 3
1627
- 6 7
1628
- patch index: 5
1629
- 6 7
1630
- 10 11
1666
+ patch index: 0
1667
+ 0 1
1668
+ 4 5
1669
+ patch index: 1
1670
+ 4 5
1671
+ 8 9
1672
+ patch index: 2
1673
+ 1 2
1674
+ 5 6
1675
+ patch index: 3
1676
+ 5 6
1677
+ 9 10
1678
+ patch index: 4
1679
+ 2 3
1680
+ 6 7
1681
+ patch index: 5
1682
+ 6 7
1683
+ 10 11
1631
1684
 
1632
1685
  This code results in the following output when the data layout is RowMajor:
1633
1686
  (NOTE: the set of patches is the same as in ColMajor, but are indexed differently).
1634
1687
 
1635
- patch index: 0
1636
- 0 1
1637
- 4 5
1638
- patch index: 1
1639
- 1 2
1640
- 5 6
1641
- patch index: 2
1642
- 2 3
1643
- 6 7
1644
- patch index: 3
1645
- 4 5
1646
- 8 9
1647
- patch index: 4
1648
- 5 6
1649
- 9 10
1650
- patch index: 5
1651
- 6 7
1652
- 10 11
1653
-
1654
- ### `<Operation> extract_image_patches(const Index patch_rows, const Index patch_cols, const Index row_stride, const Index col_stride, const PaddingType padding_type)`
1688
+ patch index: 0
1689
+ 0 1
1690
+ 4 5
1691
+ patch index: 1
1692
+ 1 2
1693
+ 5 6
1694
+ patch index: 2
1695
+ 2 3
1696
+ 6 7
1697
+ patch index: 3
1698
+ 4 5
1699
+ 8 9
1700
+ patch index: 4
1701
+ 5 6
1702
+ 9 10
1703
+ patch index: 5
1704
+ 6 7
1705
+ 10 11
1706
+
1707
+ ### <Operation> extract_image_patches(const Index patch_rows, const Index patch_cols, const Index row_stride, const Index col_stride, const PaddingType padding_type)
1655
1708
 
1656
1709
  Returns a tensor of coefficient image patches extracted from the input tensor,
1657
1710
  which is expected to have dimensions ordered as follows (depending on the data
@@ -1681,32 +1734,34 @@ sizes:
1681
1734
  *) columns: 5
1682
1735
  *) batch: 7
1683
1736
 
1684
- Tensor<float, 4> tensor(2,3,5,7);
1685
- Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
1737
+ Tensor<float, 4> tensor(2,3,5,7);
1738
+ Tensor<float, 4, RowMajor> tensor_row_major = tensor.swap_layout();
1686
1739
 
1687
1740
  2x2 image patches can be extracted and indexed using the following code:
1688
1741
 
1689
1742
  *) 2D patch: ColMajor (patch indexed by second-to-last dimension)
1690
- Tensor<float, 5> twod_patch;
1691
- twod_patch = tensor.extract_image_patches<2, 2>();
1692
- // twod_patch.dimension(0) == 2
1693
- // twod_patch.dimension(1) == 2
1694
- // twod_patch.dimension(2) == 2
1695
- // twod_patch.dimension(3) == 3*5
1696
- // twod_patch.dimension(4) == 7
1743
+
1744
+ Tensor<float, 5> twod_patch;
1745
+ twod_patch = tensor.extract_image_patches<2, 2>();
1746
+ // twod_patch.dimension(0) == 2
1747
+ // twod_patch.dimension(1) == 2
1748
+ // twod_patch.dimension(2) == 2
1749
+ // twod_patch.dimension(3) == 3*5
1750
+ // twod_patch.dimension(4) == 7
1697
1751
 
1698
1752
  *) 2D patch: RowMajor (patch indexed by the second dimension)
1699
- Tensor<float, 5, RowMajor> twod_patch_row_major;
1700
- twod_patch_row_major = tensor_row_major.extract_image_patches<2, 2>();
1701
- // twod_patch_row_major.dimension(0) == 7
1702
- // twod_patch_row_major.dimension(1) == 3*5
1703
- // twod_patch_row_major.dimension(2) == 2
1704
- // twod_patch_row_major.dimension(3) == 2
1705
- // twod_patch_row_major.dimension(4) == 2
1753
+
1754
+ Tensor<float, 5, RowMajor> twod_patch_row_major;
1755
+ twod_patch_row_major = tensor_row_major.extract_image_patches<2, 2>();
1756
+ // twod_patch_row_major.dimension(0) == 7
1757
+ // twod_patch_row_major.dimension(1) == 3*5
1758
+ // twod_patch_row_major.dimension(2) == 2
1759
+ // twod_patch_row_major.dimension(3) == 2
1760
+ // twod_patch_row_major.dimension(4) == 2
1706
1761
 
1707
1762
  ## Special Operations
1708
1763
 
1709
- ### `<Operation> cast<T>()`
1764
+ ### <Operation> cast<T>()
1710
1765
 
1711
1766
  Returns a tensor of type T with the same dimensions as the original tensor.
1712
1767
  The returned tensor contains the values of the original tensor converted to
@@ -1735,7 +1790,7 @@ but you can easily cast the tensors to floats to do the division:
1735
1790
  1 2 2
1736
1791
 
1737
1792
 
1738
- ### `<Operation> eval()`
1793
+ ### <Operation> eval()
1739
1794
 
1740
1795
  TODO
1741
1796