tomoto 0.2.2 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (369) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +10 -0
  3. data/README.md +8 -10
  4. data/ext/tomoto/ct.cpp +11 -11
  5. data/ext/tomoto/dmr.cpp +14 -13
  6. data/ext/tomoto/dt.cpp +14 -14
  7. data/ext/tomoto/extconf.rb +7 -5
  8. data/ext/tomoto/gdmr.cpp +7 -7
  9. data/ext/tomoto/hdp.cpp +9 -9
  10. data/ext/tomoto/hlda.cpp +13 -13
  11. data/ext/tomoto/hpa.cpp +5 -5
  12. data/ext/tomoto/lda.cpp +42 -39
  13. data/ext/tomoto/llda.cpp +6 -6
  14. data/ext/tomoto/mglda.cpp +15 -15
  15. data/ext/tomoto/pa.cpp +6 -6
  16. data/ext/tomoto/plda.cpp +6 -6
  17. data/ext/tomoto/slda.cpp +8 -8
  18. data/ext/tomoto/{ext.cpp → tomoto.cpp} +8 -8
  19. data/ext/tomoto/utils.h +16 -70
  20. data/lib/tomoto/version.rb +1 -1
  21. data/lib/tomoto.rb +5 -1
  22. data/vendor/EigenRand/EigenRand/Core.h +10 -10
  23. data/vendor/EigenRand/EigenRand/Dists/Basic.h +208 -9
  24. data/vendor/EigenRand/EigenRand/Dists/Discrete.h +52 -31
  25. data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +9 -8
  26. data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +28 -21
  27. data/vendor/EigenRand/EigenRand/EigenRand +11 -6
  28. data/vendor/EigenRand/EigenRand/Macro.h +13 -7
  29. data/vendor/EigenRand/EigenRand/MorePacketMath.h +348 -740
  30. data/vendor/EigenRand/EigenRand/MvDists/Multinomial.h +5 -3
  31. data/vendor/EigenRand/EigenRand/MvDists/MvNormal.h +9 -3
  32. data/vendor/EigenRand/EigenRand/PacketFilter.h +11 -253
  33. data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +21 -47
  34. data/vendor/EigenRand/EigenRand/RandUtils.h +50 -344
  35. data/vendor/EigenRand/EigenRand/arch/AVX/MorePacketMath.h +619 -0
  36. data/vendor/EigenRand/EigenRand/arch/AVX/PacketFilter.h +149 -0
  37. data/vendor/EigenRand/EigenRand/arch/AVX/RandUtils.h +228 -0
  38. data/vendor/EigenRand/EigenRand/arch/NEON/MorePacketMath.h +473 -0
  39. data/vendor/EigenRand/EigenRand/arch/NEON/PacketFilter.h +142 -0
  40. data/vendor/EigenRand/EigenRand/arch/NEON/RandUtils.h +126 -0
  41. data/vendor/EigenRand/EigenRand/arch/SSE/MorePacketMath.h +501 -0
  42. data/vendor/EigenRand/EigenRand/arch/SSE/PacketFilter.h +133 -0
  43. data/vendor/EigenRand/EigenRand/arch/SSE/RandUtils.h +120 -0
  44. data/vendor/EigenRand/EigenRand/doc.h +24 -12
  45. data/vendor/EigenRand/README.md +57 -4
  46. data/vendor/eigen/COPYING.APACHE +203 -0
  47. data/vendor/eigen/COPYING.BSD +1 -1
  48. data/vendor/eigen/COPYING.MINPACK +51 -52
  49. data/vendor/eigen/Eigen/Cholesky +0 -1
  50. data/vendor/eigen/Eigen/Core +112 -265
  51. data/vendor/eigen/Eigen/Eigenvalues +2 -3
  52. data/vendor/eigen/Eigen/Geometry +5 -8
  53. data/vendor/eigen/Eigen/Householder +0 -1
  54. data/vendor/eigen/Eigen/Jacobi +0 -1
  55. data/vendor/eigen/Eigen/KLUSupport +41 -0
  56. data/vendor/eigen/Eigen/LU +2 -5
  57. data/vendor/eigen/Eigen/OrderingMethods +0 -3
  58. data/vendor/eigen/Eigen/PaStiXSupport +1 -0
  59. data/vendor/eigen/Eigen/PardisoSupport +0 -0
  60. data/vendor/eigen/Eigen/QR +2 -3
  61. data/vendor/eigen/Eigen/QtAlignedMalloc +0 -1
  62. data/vendor/eigen/Eigen/SVD +0 -1
  63. data/vendor/eigen/Eigen/Sparse +0 -2
  64. data/vendor/eigen/Eigen/SparseCholesky +0 -8
  65. data/vendor/eigen/Eigen/SparseLU +4 -0
  66. data/vendor/eigen/Eigen/SparseQR +0 -1
  67. data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +42 -27
  68. data/vendor/eigen/Eigen/src/Cholesky/LLT.h +39 -23
  69. data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +90 -47
  70. data/vendor/eigen/Eigen/src/Core/ArithmeticSequence.h +413 -0
  71. data/vendor/eigen/Eigen/src/Core/Array.h +99 -11
  72. data/vendor/eigen/Eigen/src/Core/ArrayBase.h +3 -3
  73. data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +21 -21
  74. data/vendor/eigen/Eigen/src/Core/Assign.h +1 -1
  75. data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +125 -50
  76. data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +10 -10
  77. data/vendor/eigen/Eigen/src/Core/BandMatrix.h +16 -16
  78. data/vendor/eigen/Eigen/src/Core/Block.h +56 -60
  79. data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +29 -31
  80. data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +7 -3
  81. data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +325 -272
  82. data/vendor/eigen/Eigen/src/Core/CoreIterators.h +5 -0
  83. data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +21 -22
  84. data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +153 -18
  85. data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +6 -6
  86. data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +14 -10
  87. data/vendor/eigen/Eigen/src/Core/DenseBase.h +132 -42
  88. data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +25 -21
  89. data/vendor/eigen/Eigen/src/Core/DenseStorage.h +153 -71
  90. data/vendor/eigen/Eigen/src/Core/Diagonal.h +21 -23
  91. data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +50 -2
  92. data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +1 -1
  93. data/vendor/eigen/Eigen/src/Core/Dot.h +10 -10
  94. data/vendor/eigen/Eigen/src/Core/EigenBase.h +10 -9
  95. data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +8 -4
  96. data/vendor/eigen/Eigen/src/Core/Fuzzy.h +3 -3
  97. data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +20 -10
  98. data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +599 -152
  99. data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +40 -33
  100. data/vendor/eigen/Eigen/src/Core/IO.h +40 -7
  101. data/vendor/eigen/Eigen/src/Core/IndexedView.h +237 -0
  102. data/vendor/eigen/Eigen/src/Core/Inverse.h +9 -10
  103. data/vendor/eigen/Eigen/src/Core/Map.h +7 -7
  104. data/vendor/eigen/Eigen/src/Core/MapBase.h +10 -3
  105. data/vendor/eigen/Eigen/src/Core/MathFunctions.h +767 -125
  106. data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +118 -19
  107. data/vendor/eigen/Eigen/src/Core/Matrix.h +131 -25
  108. data/vendor/eigen/Eigen/src/Core/MatrixBase.h +21 -3
  109. data/vendor/eigen/Eigen/src/Core/NestByValue.h +25 -50
  110. data/vendor/eigen/Eigen/src/Core/NoAlias.h +4 -3
  111. data/vendor/eigen/Eigen/src/Core/NumTraits.h +107 -20
  112. data/vendor/eigen/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
  113. data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +3 -31
  114. data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +152 -59
  115. data/vendor/eigen/Eigen/src/Core/Product.h +30 -25
  116. data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +192 -125
  117. data/vendor/eigen/Eigen/src/Core/Random.h +37 -1
  118. data/vendor/eigen/Eigen/src/Core/Redux.h +180 -170
  119. data/vendor/eigen/Eigen/src/Core/Ref.h +121 -23
  120. data/vendor/eigen/Eigen/src/Core/Replicate.h +8 -8
  121. data/vendor/eigen/Eigen/src/Core/Reshaped.h +454 -0
  122. data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +7 -5
  123. data/vendor/eigen/Eigen/src/Core/Reverse.h +18 -12
  124. data/vendor/eigen/Eigen/src/Core/Select.h +8 -6
  125. data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +33 -20
  126. data/vendor/eigen/Eigen/src/Core/Solve.h +14 -14
  127. data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +16 -16
  128. data/vendor/eigen/Eigen/src/Core/SolverBase.h +41 -3
  129. data/vendor/eigen/Eigen/src/Core/StableNorm.h +100 -70
  130. data/vendor/eigen/Eigen/src/Core/StlIterators.h +463 -0
  131. data/vendor/eigen/Eigen/src/Core/Stride.h +9 -4
  132. data/vendor/eigen/Eigen/src/Core/Swap.h +5 -4
  133. data/vendor/eigen/Eigen/src/Core/Transpose.h +88 -27
  134. data/vendor/eigen/Eigen/src/Core/Transpositions.h +26 -47
  135. data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +93 -75
  136. data/vendor/eigen/Eigen/src/Core/VectorBlock.h +5 -5
  137. data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +159 -70
  138. data/vendor/eigen/Eigen/src/Core/Visitor.h +137 -29
  139. data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +50 -129
  140. data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +126 -337
  141. data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +1092 -155
  142. data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +65 -1
  143. data/vendor/eigen/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
  144. data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +207 -236
  145. data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1482 -495
  146. data/vendor/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  147. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +152 -165
  148. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +19 -251
  149. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
  150. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
  151. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
  152. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +2042 -392
  153. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +235 -80
  154. data/vendor/eigen/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
  155. data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +102 -14
  156. data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
  157. data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
  158. data/vendor/eigen/Eigen/src/Core/arch/Default/Half.h +942 -0
  159. data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +1 -1
  160. data/vendor/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  161. data/vendor/eigen/Eigen/src/Core/arch/{CUDA → GPU}/MathFunctions.h +16 -4
  162. data/vendor/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
  163. data/vendor/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
  164. data/vendor/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  165. data/vendor/eigen/Eigen/src/Core/arch/MSA/Complex.h +648 -0
  166. data/vendor/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  167. data/vendor/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  168. data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +313 -219
  169. data/vendor/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  170. data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +54 -70
  171. data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +4376 -549
  172. data/vendor/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
  173. data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +59 -179
  174. data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +65 -428
  175. data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +893 -283
  176. data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +65 -0
  177. data/vendor/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  178. data/vendor/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  179. data/vendor/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  180. data/vendor/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  181. data/vendor/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  182. data/vendor/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  183. data/vendor/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  184. data/vendor/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  185. data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +212 -183
  186. data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +101 -5
  187. data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +510 -395
  188. data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +11 -2
  189. data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +112 -46
  190. data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +31 -30
  191. data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +32 -2
  192. data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +355 -16
  193. data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +1075 -586
  194. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +49 -24
  195. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +41 -35
  196. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +6 -6
  197. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +4 -2
  198. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +382 -483
  199. data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +22 -5
  200. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +53 -30
  201. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +16 -8
  202. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +8 -6
  203. data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +4 -4
  204. data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +5 -4
  205. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +33 -27
  206. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +14 -12
  207. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +36 -34
  208. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +8 -4
  209. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +13 -10
  210. data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +304 -119
  211. data/vendor/eigen/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
  212. data/vendor/eigen/Eigen/src/Core/util/Constants.h +25 -9
  213. data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +26 -3
  214. data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +29 -9
  215. data/vendor/eigen/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
  216. data/vendor/eigen/Eigen/src/Core/util/IntegralConstant.h +272 -0
  217. data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +8 -1
  218. data/vendor/eigen/Eigen/src/Core/util/Macros.h +709 -246
  219. data/vendor/eigen/Eigen/src/Core/util/Memory.h +222 -52
  220. data/vendor/eigen/Eigen/src/Core/util/Meta.h +355 -77
  221. data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +5 -1
  222. data/vendor/eigen/Eigen/src/Core/util/ReshapedHelper.h +51 -0
  223. data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +8 -5
  224. data/vendor/eigen/Eigen/src/Core/util/SymbolicIndex.h +293 -0
  225. data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +65 -30
  226. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +1 -1
  227. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +7 -4
  228. data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +2 -2
  229. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +1 -1
  230. data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +2 -2
  231. data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +2 -2
  232. data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +9 -6
  233. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +21 -9
  234. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +77 -43
  235. data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +20 -15
  236. data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +99 -5
  237. data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +4 -4
  238. data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +3 -3
  239. data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +15 -11
  240. data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +1 -1
  241. data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +3 -2
  242. data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +39 -2
  243. data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +70 -14
  244. data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +3 -3
  245. data/vendor/eigen/Eigen/src/Geometry/Scaling.h +23 -5
  246. data/vendor/eigen/Eigen/src/Geometry/Transform.h +88 -67
  247. data/vendor/eigen/Eigen/src/Geometry/Translation.h +6 -12
  248. data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +1 -1
  249. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
  250. data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +9 -2
  251. data/vendor/eigen/Eigen/src/Householder/Householder.h +8 -4
  252. data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +123 -48
  253. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +15 -15
  254. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +7 -23
  255. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +5 -22
  256. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +41 -47
  257. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +51 -60
  258. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +70 -20
  259. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +2 -20
  260. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +11 -9
  261. data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +31 -10
  262. data/vendor/eigen/Eigen/src/KLUSupport/KLUSupport.h +358 -0
  263. data/vendor/eigen/Eigen/src/LU/Determinant.h +35 -19
  264. data/vendor/eigen/Eigen/src/LU/FullPivLU.h +29 -43
  265. data/vendor/eigen/Eigen/src/LU/InverseImpl.h +25 -8
  266. data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +71 -58
  267. data/vendor/eigen/Eigen/src/LU/arch/InverseSize4.h +351 -0
  268. data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +7 -17
  269. data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +297 -277
  270. data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +6 -10
  271. data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +1 -1
  272. data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +9 -7
  273. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +41 -20
  274. data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +100 -27
  275. data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +59 -22
  276. data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +48 -23
  277. data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +25 -3
  278. data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +183 -63
  279. data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +22 -14
  280. data/vendor/eigen/Eigen/src/SVD/SVDBase.h +83 -22
  281. data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +3 -3
  282. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +17 -9
  283. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +12 -37
  284. data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +3 -2
  285. data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +16 -0
  286. data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +6 -6
  287. data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +81 -27
  288. data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +25 -57
  289. data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +40 -11
  290. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +11 -15
  291. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +4 -2
  292. data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +30 -8
  293. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +126 -11
  294. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +5 -12
  295. data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +13 -1
  296. data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +7 -7
  297. data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +5 -2
  298. data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +8 -0
  299. data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +1 -1
  300. data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +1 -0
  301. data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +162 -12
  302. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +1 -1
  303. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +76 -2
  304. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +2 -2
  305. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +1 -1
  306. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +1 -1
  307. data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +19 -6
  308. data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +2 -12
  309. data/vendor/eigen/Eigen/src/StlSupport/StdList.h +2 -2
  310. data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +2 -2
  311. data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +6 -8
  312. data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +175 -39
  313. data/vendor/eigen/Eigen/src/misc/lapacke.h +5 -4
  314. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +28 -2
  315. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +155 -11
  316. data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +626 -242
  317. data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +14 -0
  318. data/vendor/eigen/Eigen/src/plugins/IndexedViewMethods.h +262 -0
  319. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +4 -4
  320. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +10 -0
  321. data/vendor/eigen/Eigen/src/plugins/ReshapedMethods.h +149 -0
  322. data/vendor/eigen/README.md +2 -0
  323. data/vendor/eigen/bench/btl/README +1 -1
  324. data/vendor/eigen/bench/tensors/README +6 -7
  325. data/vendor/eigen/ci/README.md +56 -0
  326. data/vendor/eigen/demos/mix_eigen_and_c/README +1 -1
  327. data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +213 -158
  328. data/vendor/eigen/unsupported/README.txt +1 -1
  329. data/vendor/tomotopy/README.kr.rst +78 -0
  330. data/vendor/tomotopy/README.rst +75 -0
  331. data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +2 -2
  332. data/vendor/tomotopy/src/Labeling/Phraser.hpp +4 -4
  333. data/vendor/tomotopy/src/TopicModel/CTModel.hpp +7 -3
  334. data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +7 -3
  335. data/vendor/tomotopy/src/TopicModel/DTModel.hpp +6 -3
  336. data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +2 -2
  337. data/vendor/tomotopy/src/TopicModel/HDP.h +1 -0
  338. data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +57 -6
  339. data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +6 -3
  340. data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +3 -2
  341. data/vendor/tomotopy/src/TopicModel/LDA.h +3 -3
  342. data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +5 -5
  343. data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +50 -19
  344. data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +6 -2
  345. data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +3 -2
  346. data/vendor/tomotopy/src/TopicModel/PAModel.hpp +1 -1
  347. data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +6 -2
  348. data/vendor/tomotopy/src/TopicModel/PT.h +3 -1
  349. data/vendor/tomotopy/src/TopicModel/PTModel.hpp +36 -3
  350. data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +6 -3
  351. data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +55 -26
  352. data/vendor/tomotopy/src/Utils/AliasMethod.hpp +5 -4
  353. data/vendor/tomotopy/src/Utils/Dictionary.h +2 -2
  354. data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +36 -1
  355. data/vendor/tomotopy/src/Utils/MultiNormalDistribution.hpp +1 -1
  356. data/vendor/tomotopy/src/Utils/TruncMultiNormal.hpp +1 -1
  357. data/vendor/tomotopy/src/Utils/exception.h +6 -0
  358. data/vendor/tomotopy/src/Utils/math.h +2 -2
  359. data/vendor/tomotopy/src/Utils/sample.hpp +14 -12
  360. data/vendor/tomotopy/src/Utils/serializer.hpp +30 -5
  361. data/vendor/tomotopy/src/Utils/sse_gamma.h +0 -3
  362. metadata +64 -18
  363. data/vendor/eigen/Eigen/CMakeLists.txt +0 -19
  364. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +0 -674
  365. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +0 -333
  366. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +0 -1124
  367. data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +0 -212
  368. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +0 -161
  369. data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +0 -338
@@ -20,7 +20,7 @@ However, it:
20
20
  - must rely on Eigen,
21
21
  - must be highly related to math,
22
22
  - should have some general purpose in the sense that it could
23
- potentially become an offical Eigen module (or be merged into another one).
23
+ potentially become an official Eigen module (or be merged into another one).
24
24
 
25
25
  In doubt feel free to contact us. For instance, if your addons is very too specific
26
26
  but it shows an interesting way of using Eigen, then it could be a nice demo.
@@ -198,6 +198,57 @@ add_doc은 `tomotopy.LDAModel.train`을 시작하기 전까지만 사용할 수
198
198
  infer 메소드는 `tomotopy.Document` 인스턴스 하나를 추론하거나 `tomotopy.Document` 인스턴스의 `list`를 추론하는데 사용할 수 있습니다.
199
199
  자세한 것은 `tomotopy.LDAModel.infer`을 참조하길 바랍니다.
200
200
 
201
+ Corpus와 transform
202
+ --------------------
203
+ `tomotopy`의 모든 토픽 모델들은 각자 별도의 내부적인 문헌 타입을 가지고 있습니다.
204
+ 그리고 이 문헌 타입들에 맞는 문헌들은 각 모델의 `add_doc` 메소드를 통해 생성될 수 있습니다.
205
+ 하지만 이 때문에 동일한 목록의 문헌들을 서로 다른 토픽 모델에 입력해야 하는 경우
206
+ 매 모델에 각 문헌을 추가할때마다 `add_doc`을 호출해야하기 때문에 비효율이 발생합니다.
207
+ 따라서 `tomotopy`에서는 여러 문헌을 묶어서 관리해주는 `tomotopy.utils.Corpus` 클래스를 제공합니다.
208
+ 토픽 모델 객체를 생성할때 `tomotopy.utils.Corpus`를 `__init__` 메소드의 `corpus` 인자로 넘겨줌으로써
209
+ 어떤 모델에든 쉽게 문헌들을 삽입할 수 있게 해줍니다.
210
+ `tomotopy.utils.Corpus`를 토픽 모델에 삽입하면 corpus 객체가 가지고 있는 문헌들 전부가 모델에 자동으로 삽입됩니다.
211
+
212
+ 그런데 일부 토픽 모델의 경우 문헌을 생성하기 위해 서로 다른 데이터를 요구합니다.
213
+ 예를 들어 `tomotopy.DMRModel`는 `metadata`라는 `str` 타입의 데이터를 요구하고,
214
+ `tomotopy.PLDAModel`는 `labels`라는 `List[str]` 타입의 데이터를 요구합니다.
215
+ 그러나 `tomotopy.utils.Corpus`는 토픽 모델에 종속되지 않은 독립적인 문헌 데이터를 보관하기 때문에,
216
+ corpus가 가지고 있는 문헌 데이터가 실제 토픽 모델이 요구하는 데이터와 일치하지 않을 가능성이 있습니다.
217
+ 이 경우 `transform`라는 인자를 통해 corpus 내의 데이터를 변형시켜 토픽 모델이 요구하는 실제 데이터와 일치시킬 수 있습니다.
218
+ 자세한 내용은 아래의 코드를 확인해주세요:
219
+
220
+ ::
221
+
222
+ from tomotopy import DMRModel
223
+ from tomotopy.utils import Corpus
224
+
225
+ corpus = Corpus()
226
+ corpus.add_doc("a b c d e".split(), a_data=1)
227
+ corpus.add_doc("e f g h i".split(), a_data=2)
228
+ corpus.add_doc("i j k l m".split(), a_data=3)
229
+
230
+ model = DMRModel(k=10)
231
+ model.add_corpus(corpus)
232
+ # `corpus`에 있던 `a_data`는 사라지고
233
+ # `DMRModel`이 요구하는 `metadata`에는 기본값인 빈 문자열이 채워집니다.
234
+
235
+ assert model.docs[0].metadata == ''
236
+ assert model.docs[1].metadata == ''
237
+ assert model.docs[2].metadata == ''
238
+
239
+ def transform_a_data_to_metadata(misc: dict):
240
+ return {'metadata': str(misc['a_data'])}
241
+ # 이 함수는 `a_data`를 `metadata`로 변환합니다.
242
+
243
+ model = DMRModel(k=10)
244
+ model.add_corpus(corpus, transform=transform_a_data_to_metadata)
245
+ # 이제 `model`에는 기본값이 아닌 `metadata`가 입력됩니다. 이들은 `transform`에 의해 `a_data`로부터 생성됩니다.
246
+
247
+ assert model.docs[0].metadata == '1'
248
+ assert model.docs[1].metadata == '2'
249
+ assert model.docs[2].metadata == '3'
250
+
251
+
201
252
  병렬 샘플링 알고리즘
202
253
  ----------------------------
203
254
  `tomotopy`는 0.5.0버전부터 병렬 알고리즘을 고를 수 있는 선택지를 제공합니다.
@@ -254,6 +305,33 @@ tomotopy의 Python3 예제 코드는 https://github.com/bab2min/tomotopy/blob/ma
254
305
 
255
306
  역사
256
307
  -------
308
+ * 0.12.3 (2022-07-19)
309
+ * 기능 개선
310
+ * 이제 `tomotopy.LDAModel.add_doc()`로 빈 문서를 삽입할 경우 예외를 발생시키는 대신 그냥 무시합니다. 새로 추가된 인자인 `ignore_empty_words`를 False로 설정할 경우 이전처럼 예외를 발생시킵니다.
311
+ * 유효하지 않은 토픽들을 모델에서 제거하는 `tomotopy.HDPModel.purge_dead_topics()` 메소드가 추가되었습니다.
312
+ * 버그 수정
313
+ * `tomotopy.SLDAModel`에서 `nuSq` 값을 지정할 때 발생하는 문제를 해결했습니다. (by @jucendrero)
314
+ * `tomotopy.utils.Coherence`가 `tomotopy.DTModel`에 대해서 작동하지 않는 문제를 해결했습니다.
315
+ * `train()` 호출 전에 `make_dic()`을 호출할 때 종종 크래시가 발생하는 문제를 해결했습니다.
316
+ * seed가 고정된 상태에서도 `tomotopy.DMRModel`, `tomotopy.GDMRModel`의 결과가 다르게 나오는 문제를 해결했습니다.
317
+ * `tomotopy.DMRModel`, `tomotopy.GDMRModel`의 파라미터 최적화 과정이 부정확하던 문제를 해결했습니다.
318
+ * `tomotopy.PTModel.copy()` 호출 시 종종 크래시가 발생하는 문제를 해결했습니다.
319
+ * 싱글스레드에서의 작동 효율을 개선했습니다.
320
+
321
+ * 0.12.2 (2021-09-06)
322
+ * `min_cf > 0`, `min_df > 0`나 `rm_top > 0`로 설정된 `tomotopy.HDPModel`에서 `convert_to_lda`를 호출할때 크래시가 발생하는 문제를 해결했습니다.
323
+ * `tomotopy.Document.get_topics`와 `tomotopy.Document.get_topic_dist`에 `from_pseudo_doc` 인자가 추가되었습니다.
324
+ 이 인자는 `PTModel`에 대해서만 유효하며, 이를 통해 토픽 분포를 구할 때 가상 문헌을 사용할지 여부를 선택할 수 있습니다.
325
+ * `tomotopy.PTModel` 생성시 기본 인자값이 변경되었습니다. `p`를 생략시 `k * 10`으로 설정됩니다.
326
+ * `make_doc`으로 생성한 문헌을 `infer` 없이 사용할 경우 발생하는 크래시를 해결하고 경고 메세지를 추가했습니다.
327
+ * 내부 C++코드가 clang c++17 환경에서 컴파일에 실패하는 문제를 해결했습니다.
328
+
329
+ * 0.12.1 (2021-06-20)
330
+ * `tomotopy.LDAModel.set_word_prior()`가 크래시를 발생시키던 문제를 해결했습니다.
331
+ * 이제 `tomotopy.LDAModel.perplexity`와 `tomotopy.LDAModel.ll_per_word`가 TermWeight가 ONE이 아닌 경우에도 정확한 값을 반환합니다.
332
+ * 용어가중치가 적용된 빈도수를 반환하는 `tomotopy.LDAModel.used_vocab_weighted_freq`가 추가되었습니다.
333
+ * 이제 `tomotopy.LDAModel.summary()`가 단어의 엔트로피뿐만 아니라, 용어 가중치가 적용된 단어의 엔트로피도 함께 보여줍니다.
334
+
257
335
  * 0.12.0 (2021-04-26)
258
336
  * 이제 `tomotopy.DMRModel`와 `tomotopy.GDMRModel`가 다중 메타데이터를 지원합니다. (https://github.com/bab2min/tomotopy/blob/main/examples/dmr_multi_label.py 참조)
259
337
  * `tomotopy.GDMRModel`의 성능이 개선되었습니다.
@@ -202,6 +202,55 @@ Inference for unseen document should be performed using `tomotopy.LDAModel.infer
202
202
  The `infer` method can infer only one instance of `tomotopy.Document` or a `list` of instances of `tomotopy.Document`.
203
203
  See more at `tomotopy.LDAModel.infer`.
204
204
 
205
+ Corpus and transform
206
+ --------------------
207
+ Every topic model in `tomotopy` has its own internal document type.
208
+ A document can be created and added into suitable for each model through each model's `add_doc` method.
209
+ However, trying to add the same list of documents to different models becomes quite inconvenient,
210
+ because `add_doc` should be called for the same list of documents to each different model.
211
+ Thus, `tomotopy` provides `tomotopy.utils.Corpus` class that holds a list of documents.
212
+ `tomotopy.utils.Corpus` can be inserted into any model by passing as argument `corpus` to `__init__` or `add_corpus` method of each model.
213
+ So, inserting `tomotopy.utils.Corpus` just has the same effect to inserting documents the corpus holds.
214
+
215
+ Some topic models requires different data for its documents.
216
+ For example, `tomotopy.DMRModel` requires argument `metadata` in `str` type,
217
+ but `tomotopy.PLDAModel` requires argument `labels` in `List[str]` type.
218
+ Since `tomotopy.utils.Corpus` holds an independent set of documents rather than being tied to a specific topic model,
219
+ data types required by a topic model may be inconsistent when a corpus is added into that topic model.
220
+ In this case, miscellaneous data can be transformed to be fitted target topic model using argument `transform`.
221
+ See more details in the following code:
222
+
223
+ ::
224
+
225
+ from tomotopy import DMRModel
226
+ from tomotopy.utils import Corpus
227
+
228
+ corpus = Corpus()
229
+ corpus.add_doc("a b c d e".split(), a_data=1)
230
+ corpus.add_doc("e f g h i".split(), a_data=2)
231
+ corpus.add_doc("i j k l m".split(), a_data=3)
232
+
233
+ model = DMRModel(k=10)
234
+ model.add_corpus(corpus)
235
+ # You lose `a_data` field in `corpus`,
236
+ # and `metadata` that `DMRModel` requires is filled with the default value, empty str.
237
+
238
+ assert model.docs[0].metadata == ''
239
+ assert model.docs[1].metadata == ''
240
+ assert model.docs[2].metadata == ''
241
+
242
+ def transform_a_data_to_metadata(misc: dict):
243
+ return {'metadata': str(misc['a_data'])}
244
+ # this function transforms `a_data` to `metadata`
245
+
246
+ model = DMRModel(k=10)
247
+ model.add_corpus(corpus, transform=transform_a_data_to_metadata)
248
+ # Now docs in `model` has non-default `metadata`, that generated from `a_data` field.
249
+
250
+ assert model.docs[0].metadata == '1'
251
+ assert model.docs[1].metadata == '2'
252
+ assert model.docs[2].metadata == '3'
253
+
205
254
  Parallel Sampling Algorithms
206
255
  ----------------------------
207
256
  Since version 0.5.0, `tomotopy` allows you to choose a parallelism algorithm.
@@ -260,6 +309,32 @@ meaning you can use it for any reasonable purpose and remain in complete ownersh
260
309
 
261
310
  History
262
311
  -------
312
+ * 0.12.3 (2022-07-19)
313
+ * New features
314
+ * Now, inserting an empty document using `tomotopy.LDAModel.add_doc()` just ignores it instead of raising an exception. If the newly added argument `ignore_empty_words` is set to False, an exception is raised as before.
315
+ * `tomotopy.HDPModel.purge_dead_topics()` method is added to remove non-live topics from the model.
316
+ * Bug fixes
317
+ * Fixed an issue that prevents setting user defined values for nuSq in `tomotopy.SLDAModel` (by @jucendrero).
318
+ * Fixed an issue where `tomotopy.utils.Coherence` did not work for `tomotopy.DTModel`.
319
+ * Fixed an issue that often crashed when calling `make_dic()` before calling `train()`.
320
+ * Resolved the problem that the results of `tomotopy.DMRModel` and `tomotopy.GDMRModel` are different even when the seed is fixed.
321
+ * The parameter optimization process of `tomotopy.DMRModel` and `tomotopy.GDMRModel` has been improved.
322
+ * Fixed an issue that sometimes crashed when calling `tomotopy.PTModel.copy()`.
323
+
324
+ * 0.12.2 (2021-09-06)
325
+ * An issue where calling `convert_to_lda` of `tomotopy.HDPModel` with `min_cf > 0`, `min_df > 0` or `rm_top > 0` causes a crash has been fixed.
326
+ * A new argument `from_pseudo_doc` is added to `tomotopy.Document.get_topics` and `tomotopy.Document.get_topic_dist`.
327
+ This argument is only valid for documents of `PTModel`, it enables to control a source for computing topic distribution.
328
+ * A default value for argument `p` of `tomotopy.PTModel` has been changed. The new default value is `k * 10`.
329
+ * Using documents generated by `make_doc` without calling `infer` doesn't cause a crash anymore, but just print warning messages.
330
+ * An issue where the internal C++ code isn't compiled at clang c++17 environment has been fixed.
331
+
332
+ * 0.12.1 (2021-06-20)
333
+ * An issue where `tomotopy.LDAModel.set_word_prior()` causes a crash has been fixed.
334
+ * Now `tomotopy.LDAModel.perplexity` and `tomotopy.LDAModel.ll_per_word` return the accurate value when `TermWeight` is not `ONE`.
335
+ * `tomotopy.LDAModel.used_vocab_weighted_freq` was added, which returns term-weighted frequencies of words.
336
+ * Now `tomotopy.LDAModel.summary()` shows not only the entropy of words, but also the entropy of term-weighted words.
337
+
263
338
  * 0.12.0 (2021-04-26)
264
339
  * Now `tomotopy.DMRModel` and `tomotopy.GDMRModel` support multiple values of metadata (see https://github.com/bab2min/tomotopy/blob/main/examples/dmr_multi_label.py )
265
340
  * The performance of `tomotopy.GDMRModel` was improved.
@@ -238,7 +238,7 @@ void FoRelevance::estimateContexts()
238
238
 
239
239
  Eigen::ArrayXi df = Eigen::ArrayXi::Zero(tm->getV());
240
240
 
241
- if (pool)
241
+ if (pool && pool->getNumWorkers() > 1)
242
242
  {
243
243
  const size_t groups = pool->getNumWorkers() * 4;
244
244
  std::vector<std::future<Eigen::ArrayXi>> futures;
@@ -379,7 +379,7 @@ void FoRelevance::estimateContexts()
379
379
  ((wcPMI.array() + smoothing) * totDocCnt / docCnt / df.cast<Float>()).log().matrix();
380
380
  };
381
381
 
382
- if (pool)
382
+ if (pool && pool->getNumWorkers() > 1)
383
383
  {
384
384
  const size_t groups = pool->getNumWorkers() * 4;
385
385
  std::vector<std::future<void>> futures;
@@ -197,7 +197,7 @@ namespace tomoto
197
197
  template<typename _LocalData, typename _ReduceFn>
198
198
  _LocalData parallelReduce(std::vector<_LocalData>&& data, _ReduceFn&& fn, ThreadPool* pool = nullptr)
199
199
  {
200
- if (pool)
200
+ if (pool && pool->getNumWorkers() > 1)
201
201
  {
202
202
  for (size_t s = data.size(); s > 1; s = (s + 1) / 2)
203
203
  {
@@ -316,7 +316,7 @@ namespace tomoto
316
316
  }
317
317
  }
318
318
 
319
- float totN = std::accumulate(vocabFreqs.begin(), vocabFreqs.end(), (size_t)0);
319
+ float totN = (float)std::accumulate(vocabFreqs.begin(), vocabFreqs.end(), (size_t)0);
320
320
  const float logTotN = std::log(totN);
321
321
 
322
322
  // calculating PMIs
@@ -489,7 +489,7 @@ namespace tomoto
489
489
 
490
490
  float rbe = branchingEntropy(trieNodes[0].getNext(bigram.first)->getNext(bigram.second), candMinCnt);
491
491
  float lbe = branchingEntropy(trieNodesBw[0].getNext(bigram.second)->getNext(bigram.first), candMinCnt);
492
- float nbe = std::sqrt(rbe * lbe) / std::log(p.second);
492
+ float nbe = std::sqrt(rbe * lbe) / (float)std::log(p.second);
493
493
  if (nbe < minNBE) continue;
494
494
  candidates.emplace_back(npmi * nbe, bigram.first, bigram.second);
495
495
  candidates.back().cf = p.second;
@@ -512,7 +512,7 @@ namespace tomoto
512
512
 
513
513
  float rbe = branchingEntropy(node, candMinCnt);
514
514
  float lbe = branchingEntropy(trieNodesBw[0].findNode(rkeys.rbegin(), rkeys.rend()), candMinCnt);
515
- float nbe = std::sqrt(rbe * lbe) / std::log(node->val);
515
+ float nbe = std::sqrt(rbe * lbe) / (float)std::log(node->val);
516
516
  if (nbe < minNBE) return;
517
517
  candidates.emplace_back(npmi * nbe, rkeys);
518
518
  candidates.back().cf = node->val;
@@ -33,7 +33,10 @@ namespace tomoto
33
33
  friend typename BaseClass::BaseClass;
34
34
  using WeightType = typename BaseClass::WeightType;
35
35
 
36
- static constexpr char TMID[] = "CTM\0";
36
+ static constexpr auto tmid()
37
+ {
38
+ return serializer::to_key("CTM\0");
39
+ }
37
40
 
38
41
  uint64_t numBetaSample = 10;
39
42
  uint64_t numTMNSample = 5;
@@ -128,7 +131,7 @@ namespace tomoto
128
131
  {
129
132
  if (this->globalStep < this->burnIn || !this->optimInterval || (this->globalStep + 1) % this->optimInterval != 0) return;
130
133
 
131
- if (pool)
134
+ if (pool && pool->getNumWorkers() > 1)
132
135
  {
133
136
  std::vector<std::future<void>> res;
134
137
  const size_t chStride = pool->getNumWorkers() * 8;
@@ -247,8 +250,9 @@ namespace tomoto
247
250
  this->optimInterval = 2;
248
251
  }
249
252
 
250
- std::vector<Float> getTopicsByDoc(const _DocType& doc, bool normalize) const
253
+ std::vector<Float> _getTopicsByDoc(const _DocType& doc, bool normalize) const
251
254
  {
255
+ if (!doc.numByTopic.size()) return {};
252
256
  std::vector<Float> ret(this->K);
253
257
  Eigen::Map<Eigen::Array<Float, -1, 1>> m{ ret.data(), this->K };
254
258
  if (normalize)
@@ -47,7 +47,10 @@ namespace tomoto
47
47
  friend typename BaseClass::BaseClass;
48
48
  using WeightType = typename BaseClass::WeightType;
49
49
 
50
- static constexpr char TMID[] = "DMR\0";
50
+ static constexpr auto tmid()
51
+ {
52
+ return serializer::to_key("DMR\0");
53
+ }
51
54
 
52
55
  Matrix lambda;
53
56
  mutable std::unordered_map<std::pair<uint64_t, Vector>, size_t, MdHash> mdHashMap;
@@ -83,7 +86,7 @@ namespace tomoto
83
86
  const size_t chStride = pool.getNumWorkers() * 8;
84
87
  for (size_t ch = 0; ch < chStride; ++ch)
85
88
  {
86
- res.emplace_back(pool.enqueue([&](size_t threadId)
89
+ res.emplace_back(pool.enqueue([&, ch](size_t threadId)
87
90
  {
88
91
  auto& tmpK = localData[threadId].tmpK;
89
92
  if (!tmpK.size()) tmpK.resize(this->K);
@@ -449,8 +452,9 @@ namespace tomoto
449
452
  optimRepeat = _optimRepeat;
450
453
  }
451
454
 
452
- std::vector<Float> getTopicsByDoc(const _DocType& doc, bool normalize) const
455
+ std::vector<Float> _getTopicsByDoc(const _DocType& doc, bool normalize) const
453
456
  {
457
+ if (!doc.numByTopic.size()) return {};
454
458
  std::vector<Float> ret(this->K);
455
459
  auto alphaDoc = getCachedAlpha(doc);
456
460
  Eigen::Map<Eigen::Array<Float, -1, 1>> m{ ret.data(), this->K };
@@ -41,7 +41,10 @@ namespace tomoto
41
41
  friend typename BaseClass::BaseClass;
42
42
  using WeightType = typename BaseClass::WeightType;
43
43
 
44
- static constexpr char TMID[] = "DTM\0";
44
+ static constexpr auto tmid()
45
+ {
46
+ return serializer::to_key("DTM\0");
47
+ }
45
48
 
46
49
  uint64_t T;
47
50
  Float shapeA = 0.03f, shapeB = 0.1f, shapeC = 0.55f;
@@ -54,7 +57,7 @@ namespace tomoto
54
57
  std::vector<sample::AliasMethod<>> wordAliasTables; // Dim: (Word * Time)
55
58
 
56
59
  template<int _inc>
57
- inline void addWordTo(_ModelState& ld, _DocType& doc, uint32_t pid, Vid vid, Tid tid) const
60
+ inline void addWordTo(_ModelState& ld, _DocType& doc, size_t pid, Vid vid, Tid tid) const
58
61
  {
59
62
  assert(tid < this->K);
60
63
  assert(vid < this->realV);
@@ -233,7 +236,7 @@ namespace tomoto
233
236
  for (size_t t = 0; t < T; ++t)
234
237
  {
235
238
  // update alias tables for word proposal
236
- if (pool)
239
+ if (pool && pool->getNumWorkers() > 1)
237
240
  {
238
241
  const size_t chStride = pool->getNumWorkers() * 8;
239
242
  std::vector<std::future<void>> futures;
@@ -85,8 +85,8 @@ namespace tomoto
85
85
 
86
86
  Float getNegativeLambdaLL(Eigen::Ref<Vector> x, Vector& g) const
87
87
  {
88
- auto mappedX = Eigen::Map<Matrix>(x.data(), this->K, this->F);
89
- auto mappedG = Eigen::Map<Matrix>(g.data(), this->K, this->F);
88
+ auto mappedX = Eigen::Map<Matrix>(x.data(), this->K, this->F * this->fCont);
89
+ auto mappedG = Eigen::Map<Matrix>(g.data(), this->K, this->F * this->fCont);
90
90
 
91
91
  Float fx = 0;
92
92
  for (size_t k = 0; k < this->K; ++k)
@@ -79,5 +79,6 @@ namespace tomoto
79
79
  virtual bool isLiveTopic(Tid tid) const = 0;
80
80
 
81
81
  virtual std::unique_ptr<ILDAModel> convertToLDA(float topicThreshold, std::vector<Tid>& newK) const = 0;
82
+ virtual std::vector<Tid> purgeDeadTopics() = 0;
82
83
  };
83
84
  }
@@ -168,7 +168,7 @@ namespace tomoto
168
168
  }
169
169
 
170
170
  template<int _inc>
171
- inline void addWordTo(_ModelState& ld, _DocType& doc, uint32_t pid, Vid vid, size_t tableId, Tid tid) const
171
+ inline void addWordTo(_ModelState& ld, _DocType& doc, size_t pid, Vid vid, size_t tableId, Tid tid) const
172
172
  {
173
173
  addOnlyWordTo<_inc>(ld, doc, pid, vid, tid);
174
174
  constexpr bool _dec = _inc < 0 && _tw != TermWeight::one;
@@ -490,8 +490,9 @@ namespace tomoto
490
490
  THROW_ERROR_WITH_INFO(exc::Unimplemented, "HDPModel doesn't provide setWordPrior function.");
491
491
  }
492
492
 
493
- std::vector<Float> getTopicsByDoc(const _DocType& doc, bool normalize) const
493
+ std::vector<Float> _getTopicsByDoc(const _DocType& doc, bool normalize) const
494
494
  {
495
+ if (!doc.numByTopic.size()) return {};
495
496
  std::vector<Float> ret(this->K);
496
497
  Eigen::Map<Eigen::Array<Float, -1, 1>> m{ ret.data(), this->K };
497
498
  if (normalize)
@@ -522,7 +523,7 @@ namespace tomoto
522
523
  for (size_t i = 0; i < cntIdx.size(); ++i)
523
524
  {
524
525
  if (i && cntIdx[i].first / sum <= topicThreshold) break;
525
- newK[cntIdx[i].second] = i;
526
+ newK[cntIdx[i].second] = (Tid)i;
526
527
  liveK++;
527
528
  }
528
529
 
@@ -538,8 +539,11 @@ namespace tomoto
538
539
  auto d = lda->_makeFromRawDoc(doc);
539
540
  lda->_addDoc(d);
540
541
  }
541
-
542
- lda->prepare(true, this->minWordCf, this->minWordDf, this->removeTopN);
542
+
543
+ lda->realV = this->realV;
544
+ lda->realN = this->realN;
545
+ lda->weightedN = this->weightedN;
546
+ lda->prepare(true, 0, 0, 0, false);
543
547
 
544
548
  auto selectFirst = [&](const std::pair<size_t, size_t>& p) { return std::max(p.first / sum - topicThreshold, 0.f); };
545
549
  std::discrete_distribution<size_t> randomTopic{
@@ -558,7 +562,7 @@ namespace tomoto
558
562
  lda->docs[i].Zs[j] = non_topic_id;
559
563
  continue;
560
564
  }
561
- size_t newTopic = newK[this->docs[i].numTopicByTable[this->docs[i].Zs[j]].topic];
565
+ Tid newTopic = newK[this->docs[i].numTopicByTable[this->docs[i].Zs[j]].topic];
562
566
  while (newTopic == (Tid)-1) newTopic = newK[randomTopic(rng)];
563
567
  lda->docs[i].Zs[j] = newTopic;
564
568
  }
@@ -569,6 +573,53 @@ namespace tomoto
569
573
 
570
574
  return lda;
571
575
  }
576
+
577
+ std::vector<Tid> purgeDeadTopics() override
578
+ {
579
+ std::vector<Tid> relocation(this->K, -1);
580
+ Tid numLiveTopics = 0;
581
+ for (size_t i = 0; i < this->K; ++i)
582
+ {
583
+ if (this->globalState.numTableByTopic[i])
584
+ {
585
+ relocation[i] = numLiveTopics++;
586
+ }
587
+ }
588
+
589
+ for (auto& doc : this->docs)
590
+ {
591
+ for (auto& nt : doc.numTopicByTable)
592
+ {
593
+ nt.topic = (relocation[nt.topic] == (Tid)-1) ? 0 : relocation[nt.topic];
594
+ }
595
+
596
+ for (size_t i = 0; i < relocation.size(); ++i)
597
+ {
598
+ if (relocation[i] == (Tid)-1) continue;
599
+ doc.numByTopic[relocation[i]] = doc.numByTopic[i];
600
+ }
601
+ doc.numByTopic.conservativeResize(numLiveTopics, 1);
602
+ }
603
+
604
+ for (auto tt : { &this->globalState, &this->tState })
605
+ {
606
+ auto& numTableByTopic = tt->numTableByTopic;
607
+ auto& numByTopic = tt->numByTopic;
608
+ auto& numByTopicWord = tt->numByTopicWord;
609
+ for (size_t i = 0; i < relocation.size(); ++i)
610
+ {
611
+ if (relocation[i] == (Tid)-1) continue;
612
+ numTableByTopic[relocation[i]] = numTableByTopic[i];
613
+ numByTopic[relocation[i]] = numByTopic[i];
614
+ numByTopicWord.row(relocation[i]) = numByTopicWord.row(i);
615
+ }
616
+ numTableByTopic.conservativeResize(numLiveTopics);
617
+ numByTopic.conservativeResize(numLiveTopics);
618
+ numByTopicWord.conservativeResize(numLiveTopics, numByTopicWord.cols());
619
+ }
620
+ this->K = numLiveTopics;
621
+ return relocation;
622
+ }
572
623
  };
573
624
 
574
625
  template<TermWeight _tw>
@@ -229,7 +229,7 @@ namespace tomoto
229
229
  };
230
230
 
231
231
  // we elide the likelihood for root node because its weight applied to all path and can be seen as constant.
232
- if (pool)
232
+ if (pool && pool->getNumWorkers() > 1)
233
233
  {
234
234
  const size_t chStride = pool->getNumWorkers() * 8;
235
235
  for (size_t ch = 0; ch < chStride; ++ch)
@@ -335,7 +335,10 @@ namespace tomoto
335
335
  friend typename BaseClass::BaseClass;
336
336
  using WeightType = typename BaseClass::WeightType;
337
337
 
338
- static constexpr char TMID[] = "hLDA";
338
+ static constexpr auto tmid()
339
+ {
340
+ return serializer::to_key("hLDA");
341
+ }
339
342
 
340
343
  Float gamma;
341
344
 
@@ -422,7 +425,7 @@ namespace tomoto
422
425
  }
423
426
 
424
427
  template<int _inc>
425
- inline void addWordTo(_ModelState& ld, _DocType& doc, uint32_t pid, Vid vid, Tid level) const
428
+ inline void addWordTo(_ModelState& ld, _DocType& doc, size_t pid, Vid vid, Tid level) const
426
429
  {
427
430
  assert(vid < this->realV);
428
431
  constexpr bool _dec = _inc < 0 && _tw != TermWeight::one;
@@ -143,7 +143,7 @@ namespace tomoto
143
143
  }
144
144
 
145
145
  template<int _inc>
146
- inline void addWordTo(_ModelState& ld, _DocType& doc, uint32_t pid, Vid vid, Tid z1, Tid z2) const
146
+ inline void addWordTo(_ModelState& ld, _DocType& doc, size_t pid, Vid vid, Tid z1, Tid z2) const
147
147
  {
148
148
  assert(vid < this->realV);
149
149
  constexpr bool _dec = _inc < 0 && _tw != TermWeight::one;
@@ -540,8 +540,9 @@ namespace tomoto
540
540
  return ret;
541
541
  }
542
542
 
543
- std::vector<Float> getTopicsByDoc(const _DocType& doc, bool normalize) const
543
+ std::vector<Float> _getTopicsByDoc(const _DocType& doc, bool normalize) const
544
544
  {
545
+ if (!doc.numByTopic.size()) return {};
545
546
  std::vector<Float> ret(1 + this->K + K2);
546
547
  Float sum = doc.getSumWordWeight() + this->alphas.sum();
547
548
  if (!normalize) sum = 1;
@@ -121,7 +121,7 @@ namespace tomoto
121
121
 
122
122
  void updateSumWordWeight(size_t realV)
123
123
  {
124
- sumWordWeight = std::count_if(static_cast<_Base*>(this)->words.begin(), static_cast<_Base*>(this)->words.end(), [realV](Vid w)
124
+ sumWordWeight = (int32_t)std::count_if(static_cast<_Base*>(this)->words.begin(), static_cast<_Base*>(this)->words.end(), [realV](Vid w)
125
125
  {
126
126
  return w < realV;
127
127
  });
@@ -164,8 +164,8 @@ namespace tomoto
164
164
  struct LDAArgs
165
165
  {
166
166
  size_t k = 1;
167
- std::vector<Float> alpha = { 0.1 };
168
- Float eta = 0.01;
167
+ std::vector<Float> alpha = { (Float)0.1 };
168
+ Float eta = (Float)0.01;
169
169
  size_t seed = std::random_device{}();
170
170
  };
171
171
 
@@ -82,7 +82,7 @@ namespace tomoto
82
82
  friend BaseClass;
83
83
 
84
84
  static constexpr const char TWID[] = "one\0";
85
- static constexpr static constexpr char TMID[] = "LDA\0";
85
+ static constexpr const char TMID[] = "LDA\0";
86
86
 
87
87
  Float alpha;
88
88
  Vector alphas;
@@ -125,7 +125,7 @@ namespace tomoto
125
125
  }
126
126
 
127
127
  template<int _Inc, typename _Vec>
128
- inline void addWordTo(_ModelState& ld, _DocType& doc, uint32_t pid, Vid vid, _Vec tDist) const
128
+ inline void addWordTo(_ModelState& ld, _DocType& doc, size_t pid, Vid vid, _Vec tDist) const
129
129
  {
130
130
  assert(vid < this->realV);
131
131
  constexpr bool _dec = _Inc < 0;
@@ -366,9 +366,9 @@ namespace tomoto
366
366
  }
367
367
  }
368
368
 
369
- void prepare(bool initDocs = true, size_t minWordCnt = 0, size_t minWordDf = 0, size_t removeTopN = 0) override
369
+ void prepare(bool initDocs = true, size_t minWordCnt = 0, size_t minWordDf = 0, size_t removeTopN = 0, bool updateStopwords = true) override
370
370
  {
371
- if (initDocs) this->removeStopwords(minWordCnt, minWordDf, removeTopN);
371
+ if (initDocs) this->removeStopwords(minWordCnt, minWordDf, removeTopN, updateStopwords);
372
372
  static_cast<DerivedClass*>(this)->updateWeakArray();
373
373
  static_cast<DerivedClass*>(this)->initGlobalState(initDocs);
374
374
 
@@ -392,7 +392,7 @@ namespace tomoto
392
392
  return static_cast<const DerivedClass*>(this)->_getTopicsCount();
393
393
  }
394
394
 
395
- std::vector<Float> getTopicsByDoc(const _DocType& doc) const
395
+ std::vector<Float> _getTopicsByDoc(const _DocType& doc) const
396
396
  {
397
397
  std::vector<Float> ret(K);
398
398
  Float sum = doc.getSumWordWeight() + K * alpha;