sequenzo 0.1.17__cp311-cp311-macosx_10_9_universal2.whl → 0.1.19__cp311-cp311-macosx_10_9_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sequenzo might be problematic. Click here for more details.

Files changed (423) hide show
  1. sequenzo/__init__.py +64 -8
  2. sequenzo/big_data/clara/clara.py +1 -1
  3. sequenzo/big_data/clara/utils/get_weighted_diss.c +155 -155
  4. sequenzo/big_data/clara/utils/get_weighted_diss.cpython-311-darwin.so +0 -0
  5. sequenzo/clustering/KMedoids.py +39 -0
  6. sequenzo/clustering/hierarchical_clustering.py +304 -8
  7. sequenzo/define_sequence_data.py +44 -3
  8. sequenzo/dissimilarity_measures/c_code.cpython-311-darwin.so +0 -0
  9. sequenzo/dissimilarity_measures/get_distance_matrix.py +1 -2
  10. sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +1 -1
  11. sequenzo/dissimilarity_measures/src/DHDdistance.cpp +13 -37
  12. sequenzo/dissimilarity_measures/src/LCPdistance.cpp +13 -37
  13. sequenzo/dissimilarity_measures/src/OMdistance.cpp +12 -47
  14. sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +103 -67
  15. sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
  16. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +41 -16
  17. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +4 -0
  18. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +7 -0
  19. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +10 -0
  20. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +127 -43
  21. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +30 -2
  22. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
  23. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +14 -5
  24. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +111 -54
  25. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +131 -9
  26. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +11 -113
  27. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +39 -7
  28. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +336 -30
  29. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +9 -37
  30. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +58 -0
  31. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +1 -0
  32. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +35 -2
  33. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +3 -1
  34. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +17 -0
  35. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +13 -0
  36. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +18 -0
  37. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +13 -0
  38. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +8 -0
  39. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +363 -34
  40. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +7 -0
  41. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +13 -0
  42. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +41 -4
  43. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +252 -16
  44. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +9 -0
  45. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +12 -1
  46. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +7 -0
  47. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
  48. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +78 -1
  49. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +3 -1
  50. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +13 -2
  51. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +5 -0
  52. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +5 -1
  53. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +2 -0
  54. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +64 -1
  55. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +36 -0
  56. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +40 -31
  57. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +8 -0
  58. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
  59. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +6 -0
  60. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.c +155 -155
  61. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cpython-311-darwin.so +0 -0
  62. sequenzo/dissimilarity_measures/utils/seqconc.c +155 -155
  63. sequenzo/dissimilarity_measures/utils/seqconc.cpython-311-darwin.so +0 -0
  64. sequenzo/dissimilarity_measures/utils/seqdss.c +155 -155
  65. sequenzo/dissimilarity_measures/utils/seqdss.cpython-311-darwin.so +0 -0
  66. sequenzo/dissimilarity_measures/utils/seqdur.c +155 -155
  67. sequenzo/dissimilarity_measures/utils/seqdur.cpython-311-darwin.so +0 -0
  68. sequenzo/dissimilarity_measures/utils/seqlength.c +155 -155
  69. sequenzo/dissimilarity_measures/utils/seqlength.cpython-311-darwin.so +0 -0
  70. sequenzo/multidomain/cat.py +0 -53
  71. sequenzo/multidomain/idcd.py +0 -1
  72. sequenzo/openmp_setup.py +233 -0
  73. sequenzo/sequence_characteristics/__init__.py +4 -0
  74. sequenzo/sequence_characteristics/complexity_index.py +17 -57
  75. sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +177 -111
  76. sequenzo/sequence_characteristics/plot_characteristics.py +30 -11
  77. sequenzo/sequence_characteristics/simple_characteristics.py +1 -0
  78. sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +9 -3
  79. sequenzo/sequence_characteristics/turbulence.py +47 -67
  80. sequenzo/sequence_characteristics/variance_of_spell_durations.py +19 -9
  81. sequenzo/sequence_characteristics/within_sequence_entropy.py +5 -58
  82. sequenzo/visualization/plot_sequence_index.py +58 -35
  83. sequenzo/visualization/plot_state_distribution.py +57 -36
  84. sequenzo/visualization/plot_transition_matrix.py +21 -22
  85. sequenzo/with_event_history_analysis/__init__.py +35 -0
  86. sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
  87. sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
  88. {sequenzo-0.1.17.dist-info → sequenzo-0.1.19.dist-info}/METADATA +48 -14
  89. sequenzo-0.1.19.dist-info/RECORD +215 -0
  90. sequenzo/dissimilarity_measures/setup.py +0 -35
  91. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Cholesky/LDLT.h +0 -688
  92. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Cholesky/LLT.h +0 -558
  93. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h +0 -99
  94. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +0 -682
  95. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +0 -346
  96. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +0 -462
  97. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +0 -91
  98. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/EigenSolver.h +0 -622
  99. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +0 -418
  100. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +0 -226
  101. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +0 -374
  102. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +0 -158
  103. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/RealQZ.h +0 -657
  104. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/RealSchur.h +0 -558
  105. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +0 -77
  106. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +0 -904
  107. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +0 -87
  108. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +0 -561
  109. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/AlignedBox.h +0 -486
  110. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/AngleAxis.h +0 -247
  111. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/EulerAngles.h +0 -114
  112. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Homogeneous.h +0 -501
  113. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Hyperplane.h +0 -282
  114. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/OrthoMethods.h +0 -235
  115. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/ParametrizedLine.h +0 -232
  116. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Quaternion.h +0 -870
  117. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Rotation2D.h +0 -199
  118. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/RotationBase.h +0 -206
  119. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Scaling.h +0 -188
  120. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Transform.h +0 -1563
  121. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Translation.h +0 -202
  122. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/Umeyama.h +0 -166
  123. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +0 -168
  124. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Householder/BlockHouseholder.h +0 -110
  125. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Householder/Householder.h +0 -176
  126. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Householder/HouseholderSequence.h +0 -545
  127. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +0 -226
  128. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +0 -212
  129. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +0 -229
  130. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +0 -394
  131. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +0 -453
  132. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +0 -444
  133. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +0 -198
  134. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +0 -117
  135. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/Jacobi/Jacobi.h +0 -483
  136. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/KLUSupport/KLUSupport.h +0 -358
  137. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/Determinant.h +0 -117
  138. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/FullPivLU.h +0 -877
  139. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/InverseImpl.h +0 -432
  140. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/PartialPivLU.h +0 -624
  141. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/PartialPivLU_LAPACKE.h +0 -83
  142. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/LU/arch/InverseSize4.h +0 -351
  143. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/MetisSupport/MetisSupport.h +0 -137
  144. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/OrderingMethods/Amd.h +0 -435
  145. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +0 -1863
  146. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/OrderingMethods/Ordering.h +0 -153
  147. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +0 -678
  148. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +0 -545
  149. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/ColPivHouseholderQR.h +0 -674
  150. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +0 -97
  151. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +0 -635
  152. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/FullPivHouseholderQR.h +0 -713
  153. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/HouseholderQR.h +0 -434
  154. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/QR/HouseholderQR_LAPACKE.h +0 -68
  155. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +0 -335
  156. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/BDCSVD.h +0 -1366
  157. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/JacobiSVD.h +0 -812
  158. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +0 -91
  159. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/SVDBase.h +0 -376
  160. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SVD/UpperBidiagonalization.h +0 -414
  161. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +0 -697
  162. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +0 -174
  163. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/AmbiVector.h +0 -378
  164. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/CompressedStorage.h +0 -274
  165. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +0 -352
  166. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +0 -67
  167. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseAssign.h +0 -270
  168. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseBlock.h +0 -571
  169. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseColEtree.h +0 -206
  170. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +0 -370
  171. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +0 -722
  172. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +0 -150
  173. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +0 -342
  174. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h +0 -138
  175. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseDot.h +0 -98
  176. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseFuzzy.h +0 -29
  177. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseMap.h +0 -305
  178. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseMatrix.h +0 -1518
  179. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +0 -398
  180. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparsePermutation.h +0 -178
  181. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseProduct.h +0 -181
  182. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseRedux.h +0 -49
  183. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseRef.h +0 -397
  184. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +0 -659
  185. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseSolverBase.h +0 -124
  186. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +0 -198
  187. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseTranspose.h +0 -92
  188. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseTriangularView.h +0 -189
  189. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseUtil.h +0 -186
  190. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseVector.h +0 -478
  191. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/SparseView.h +0 -254
  192. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseCore/TriangularSolver.h +0 -315
  193. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU.h +0 -923
  194. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLUImpl.h +0 -66
  195. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +0 -226
  196. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_Structs.h +0 -110
  197. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +0 -375
  198. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_Utils.h +0 -80
  199. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +0 -181
  200. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +0 -179
  201. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +0 -107
  202. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +0 -280
  203. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +0 -126
  204. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +0 -130
  205. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +0 -223
  206. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h +0 -258
  207. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +0 -137
  208. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h +0 -136
  209. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h +0 -83
  210. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SparseQR/SparseQR.h +0 -758
  211. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/StlSupport/StdDeque.h +0 -116
  212. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/StlSupport/StdList.h +0 -106
  213. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/StlSupport/StdVector.h +0 -131
  214. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/StlSupport/details.h +0 -84
  215. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +0 -1025
  216. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +0 -642
  217. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/Image.h +0 -82
  218. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/Kernel.h +0 -79
  219. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/RealSvd2x2.h +0 -55
  220. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/blas.h +0 -440
  221. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/lapack.h +0 -152
  222. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/lapacke.h +0 -16292
  223. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/misc/lapacke_mangling.h +0 -17
  224. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +0 -358
  225. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +0 -696
  226. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/BlockMethods.h +0 -1442
  227. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +0 -115
  228. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +0 -177
  229. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/IndexedViewMethods.h +0 -262
  230. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +0 -152
  231. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +0 -95
  232. sequenzo/dissimilarity_measures/src/eigen/Eigen/src/plugins/ReshapedMethods.h +0 -149
  233. sequenzo/dissimilarity_measures/src/eigen/blas/BandTriangularSolver.h +0 -97
  234. sequenzo/dissimilarity_measures/src/eigen/blas/GeneralRank1Update.h +0 -44
  235. sequenzo/dissimilarity_measures/src/eigen/blas/PackedSelfadjointProduct.h +0 -53
  236. sequenzo/dissimilarity_measures/src/eigen/blas/PackedTriangularMatrixVector.h +0 -79
  237. sequenzo/dissimilarity_measures/src/eigen/blas/PackedTriangularSolverVector.h +0 -88
  238. sequenzo/dissimilarity_measures/src/eigen/blas/Rank2Update.h +0 -57
  239. sequenzo/dissimilarity_measures/src/eigen/blas/common.h +0 -175
  240. sequenzo/dissimilarity_measures/src/eigen/blas/f2c/datatypes.h +0 -24
  241. sequenzo/dissimilarity_measures/src/eigen/blas/level1_cplx_impl.h +0 -155
  242. sequenzo/dissimilarity_measures/src/eigen/blas/level1_impl.h +0 -144
  243. sequenzo/dissimilarity_measures/src/eigen/blas/level1_real_impl.h +0 -122
  244. sequenzo/dissimilarity_measures/src/eigen/blas/level2_cplx_impl.h +0 -360
  245. sequenzo/dissimilarity_measures/src/eigen/blas/level2_impl.h +0 -553
  246. sequenzo/dissimilarity_measures/src/eigen/blas/level2_real_impl.h +0 -306
  247. sequenzo/dissimilarity_measures/src/eigen/blas/level3_impl.h +0 -702
  248. sequenzo/dissimilarity_measures/src/eigen/debug/gdb/__init__.py +0 -1
  249. sequenzo/dissimilarity_measures/src/eigen/debug/gdb/printers.py +0 -314
  250. sequenzo/dissimilarity_measures/src/eigen/lapack/lapack_common.h +0 -29
  251. sequenzo/dissimilarity_measures/src/eigen/scripts/relicense.py +0 -69
  252. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/Tensor.h +0 -554
  253. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h +0 -329
  254. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h +0 -247
  255. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h +0 -1176
  256. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +0 -1559
  257. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h +0 -1093
  258. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h +0 -518
  259. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h +0 -377
  260. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +0 -1023
  261. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h +0 -73
  262. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h +0 -6
  263. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h +0 -1413
  264. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h +0 -575
  265. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h +0 -1650
  266. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h +0 -1679
  267. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h +0 -456
  268. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h +0 -1132
  269. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h +0 -544
  270. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h +0 -214
  271. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h +0 -347
  272. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h +0 -137
  273. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h +0 -6
  274. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h +0 -104
  275. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h +0 -389
  276. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h +0 -1048
  277. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h +0 -409
  278. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h +0 -236
  279. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h +0 -490
  280. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h +0 -236
  281. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h +0 -983
  282. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +0 -703
  283. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h +0 -388
  284. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h +0 -669
  285. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h +0 -379
  286. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h +0 -237
  287. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h +0 -191
  288. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h +0 -488
  289. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h +0 -302
  290. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h +0 -33
  291. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h +0 -99
  292. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h +0 -44
  293. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h +0 -79
  294. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h +0 -603
  295. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h +0 -738
  296. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h +0 -247
  297. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h +0 -82
  298. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h +0 -263
  299. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h +0 -216
  300. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h +0 -98
  301. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h +0 -327
  302. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h +0 -311
  303. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h +0 -1102
  304. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h +0 -708
  305. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h +0 -291
  306. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h +0 -322
  307. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +0 -998
  308. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h +0 -6
  309. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h +0 -966
  310. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h +0 -582
  311. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h +0 -454
  312. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h +0 -465
  313. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h +0 -528
  314. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScanSycl.h +0 -513
  315. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h +0 -471
  316. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h +0 -161
  317. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h +0 -346
  318. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h +0 -303
  319. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h +0 -264
  320. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h +0 -249
  321. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h +0 -629
  322. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h +0 -293
  323. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h +0 -236
  324. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h +0 -338
  325. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h +0 -669
  326. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h +0 -67
  327. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h +0 -249
  328. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h +0 -486
  329. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h +0 -236
  330. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h +0 -23
  331. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h +0 -40
  332. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h +0 -301
  333. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h +0 -48
  334. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h +0 -20
  335. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/util/CXX11Meta.h +0 -537
  336. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h +0 -88
  337. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/util/EmulateArray.h +0 -261
  338. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h +0 -158
  339. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h +0 -108
  340. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h +0 -730
  341. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h +0 -220
  342. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/BVH/BVAlgorithms.h +0 -293
  343. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/BVH/KdBVH.h +0 -223
  344. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h +0 -790
  345. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/EulerAngles/EulerAngles.h +0 -355
  346. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/EulerAngles/EulerSystem.h +0 -305
  347. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/FFT/ei_fftw_impl.h +0 -261
  348. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/FFT/ei_kissfft_impl.h +0 -449
  349. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h +0 -187
  350. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/DGMRES.h +0 -511
  351. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h +0 -335
  352. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/IDRS.h +0 -436
  353. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h +0 -90
  354. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/IterationController.h +0 -154
  355. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/MINRES.h +0 -267
  356. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/IterativeSolvers/Scaling.h +0 -193
  357. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h +0 -305
  358. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h +0 -84
  359. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h +0 -202
  360. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h +0 -160
  361. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h +0 -188
  362. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h +0 -396
  363. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h +0 -441
  364. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h +0 -569
  365. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h +0 -373
  366. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h +0 -705
  367. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h +0 -368
  368. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MatrixFunctions/StemFunction.h +0 -117
  369. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/MoreVectorization/MathFunctions.h +0 -95
  370. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h +0 -601
  371. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h +0 -657
  372. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/chkder.h +0 -66
  373. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/covar.h +0 -70
  374. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/dogleg.h +0 -107
  375. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h +0 -79
  376. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/lmpar.h +0 -298
  377. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h +0 -91
  378. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h +0 -30
  379. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/r1updt.h +0 -99
  380. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h +0 -49
  381. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h +0 -130
  382. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Polynomials/Companion.h +0 -280
  383. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Polynomials/PolynomialSolver.h +0 -428
  384. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Polynomials/PolynomialUtils.h +0 -143
  385. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h +0 -352
  386. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineMatrix.h +0 -862
  387. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h +0 -212
  388. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineProduct.h +0 -295
  389. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineStorage.h +0 -259
  390. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Skyline/SkylineUtil.h +0 -89
  391. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h +0 -122
  392. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h +0 -1079
  393. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h +0 -404
  394. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/MarketIO.h +0 -282
  395. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h +0 -247
  396. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h +0 -349
  397. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsArrayAPI.h +0 -286
  398. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsBFloat16.h +0 -68
  399. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsFunctors.h +0 -357
  400. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsHalf.h +0 -66
  401. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsImpl.h +0 -1959
  402. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsPacketMath.h +0 -118
  403. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/HipVectorCompatibility.h +0 -67
  404. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h +0 -167
  405. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsBFloat16.h +0 -58
  406. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h +0 -330
  407. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h +0 -58
  408. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h +0 -2045
  409. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h +0 -79
  410. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/AVX/BesselFunctions.h +0 -46
  411. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/AVX/SpecialFunctions.h +0 -16
  412. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/BesselFunctions.h +0 -46
  413. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/SpecialFunctions.h +0 -16
  414. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/GPU/SpecialFunctions.h +0 -369
  415. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/NEON/BesselFunctions.h +0 -54
  416. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/SpecialFunctions/arch/NEON/SpecialFunctions.h +0 -34
  417. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Splines/Spline.h +0 -507
  418. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Splines/SplineFitting.h +0 -431
  419. sequenzo/dissimilarity_measures/src/eigen/unsupported/Eigen/src/Splines/SplineFwd.h +0 -93
  420. sequenzo-0.1.17.dist-info/RECORD +0 -537
  421. {sequenzo-0.1.17.dist-info → sequenzo-0.1.19.dist-info}/WHEEL +0 -0
  422. {sequenzo-0.1.17.dist-info → sequenzo-0.1.19.dist-info}/licenses/LICENSE +0 -0
  423. {sequenzo-0.1.17.dist-info → sequenzo-0.1.19.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,892 @@
1
+ /***************************************************************************
2
+ * Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
3
+ * Martin Renou *
4
+ * Copyright (c) QuantStack *
5
+ * Copyright (c) Serge Guelton *
6
+ * *
7
+ * Distributed under the terms of the BSD 3-Clause License. *
8
+ * *
9
+ * The full license is in the file LICENSE, distributed with this software. *
10
+ ****************************************************************************/
11
+
12
+ #ifndef XSIMD_VSX_HPP
13
+ #define XSIMD_VSX_HPP
14
+
15
+ #include <complex>
16
+ #include <limits>
17
+ #include <type_traits>
18
+
19
+ #include "../types/xsimd_vsx_register.hpp"
20
+
21
+ #include <endian.h>
22
+
23
+ namespace xsimd
24
+ {
25
+ template <typename T, class A, bool... Values>
26
+ struct batch_bool_constant;
27
+
28
+ template <class T_out, class T_in, class A>
29
+ XSIMD_INLINE batch<T_out, A> bitwise_cast(batch<T_in, A> const& x) noexcept;
30
+
31
+ template <typename T, class A, T... Values>
32
+ struct batch_constant;
33
+
34
+ namespace kernel
35
+ {
36
+ template <class A, class T>
37
+ XSIMD_INLINE batch<T, A> avg(batch<T, A> const&, batch<T, A> const&, requires_arch<common>) noexcept;
38
+ template <class A, class T>
39
+ XSIMD_INLINE batch<T, A> avgr(batch<T, A> const&, batch<T, A> const&, requires_arch<common>) noexcept;
40
+
41
+ // abs
42
+ template <class A>
43
+ XSIMD_INLINE batch<float, A> abs(batch<float, A> const& self, requires_arch<vsx>) noexcept
44
+ {
45
+ return vec_abs(self.data);
46
+ }
47
+
48
+ template <class A>
49
+ XSIMD_INLINE batch<double, A> abs(batch<double, A> const& self, requires_arch<vsx>) noexcept
50
+ {
51
+ return vec_abs(self.data);
52
+ }
53
+
54
+ // add
55
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
56
+ XSIMD_INLINE batch<T, A> add(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
57
+ {
58
+ return vec_add(self.data, other.data);
59
+ }
60
+
61
+ // all
62
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
63
+ XSIMD_INLINE bool all(batch_bool<T, A> const& self, requires_arch<vsx>) noexcept
64
+ {
65
+ return vec_all_ne(self.data, vec_xor(self.data, self.data));
66
+ }
67
+
68
+ // any
69
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
70
+ XSIMD_INLINE bool any(batch_bool<T, A> const& self, requires_arch<vsx>) noexcept
71
+ {
72
+ return vec_any_ne(self.data, vec_xor(self.data, self.data));
73
+ }
74
+
75
+ // avgr
76
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value && sizeof(T) < 8, void>::type>
77
+ XSIMD_INLINE batch<T, A> avgr(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
78
+ {
79
+ return vec_avg(self.data, other.data);
80
+ }
81
+ template <class A>
82
+ XSIMD_INLINE batch<float, A> avgr(batch<float, A> const& self, batch<float, A> const& other, requires_arch<vsx>) noexcept
83
+ {
84
+ return avgr(self, other, common {});
85
+ }
86
+ template <class A>
87
+ XSIMD_INLINE batch<double, A> avgr(batch<double, A> const& self, batch<double, A> const& other, requires_arch<vsx>) noexcept
88
+ {
89
+ return avgr(self, other, common {});
90
+ }
91
+
92
+ // avg
93
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
94
+ XSIMD_INLINE batch<T, A> avg(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
95
+ {
96
+ XSIMD_IF_CONSTEXPR(sizeof(T) < 8)
97
+ {
98
+ constexpr auto nbit = 8 * sizeof(T) - 1;
99
+ auto adj = bitwise_cast<T>(bitwise_cast<as_unsigned_integer_t<T>>((self ^ other) << nbit) >> nbit);
100
+ return avgr(self, other, A {}) - adj;
101
+ }
102
+ else
103
+ {
104
+ return avg(self, other, common {});
105
+ }
106
+ }
107
+ template <class A>
108
+ XSIMD_INLINE batch<float, A> avg(batch<float, A> const& self, batch<float, A> const& other, requires_arch<vsx>) noexcept
109
+ {
110
+ return avg(self, other, common {});
111
+ }
112
+ template <class A>
113
+ XSIMD_INLINE batch<double, A> avg(batch<double, A> const& self, batch<double, A> const& other, requires_arch<vsx>) noexcept
114
+ {
115
+ return avg(self, other, common {});
116
+ }
117
+
118
+ // batch_bool_cast
119
+ template <class A, class T_out, class T_in>
120
+ XSIMD_INLINE batch_bool<T_out, A> batch_bool_cast(batch_bool<T_in, A> const& self, batch_bool<T_out, A> const&, requires_arch<vsx>) noexcept
121
+ {
122
+ return (typename batch_bool<T_out, A>::register_type)self.data;
123
+ }
124
+
125
+ // bitwise_and
126
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
127
+ XSIMD_INLINE batch<T, A> bitwise_and(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
128
+ {
129
+ return vec_and(self.data, other.data);
130
+ }
131
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
132
+ XSIMD_INLINE batch_bool<T, A> bitwise_and(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
133
+ {
134
+ return vec_and(self.data, other.data);
135
+ }
136
+
137
+ // bitwise_andnot
138
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
139
+ XSIMD_INLINE batch<T, A> bitwise_andnot(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
140
+ {
141
+ return vec_and(self.data, vec_nor(other.data, other.data));
142
+ }
143
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
144
+ XSIMD_INLINE batch_bool<T, A> bitwise_andnot(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
145
+ {
146
+ return self.data & ~other.data;
147
+ }
148
+
149
+ // bitwise_lshift
150
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
151
+ XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& self, int32_t other, requires_arch<vsx>) noexcept
152
+ {
153
+ using shift_type = as_unsigned_integer_t<T>;
154
+ batch<shift_type, A> shift(static_cast<shift_type>(other));
155
+ return vec_sl(self.data, shift.data);
156
+ }
157
+
158
+ // bitwise_not
159
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
160
+ XSIMD_INLINE batch<T, A> bitwise_not(batch<T, A> const& self, requires_arch<vsx>) noexcept
161
+ {
162
+ return vec_nor(self.data, self.data);
163
+ }
164
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
165
+ XSIMD_INLINE batch_bool<T, A> bitwise_not(batch_bool<T, A> const& self, requires_arch<vsx>) noexcept
166
+ {
167
+ return vec_nor(self.data, self.data);
168
+ }
169
+
170
+ // bitwise_or
171
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
172
+ XSIMD_INLINE batch<T, A> bitwise_or(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
173
+ {
174
+ return vec_or(self.data, other.data);
175
+ }
176
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
177
+ XSIMD_INLINE batch_bool<T, A> bitwise_or(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
178
+ {
179
+ return vec_or(self.data, other.data);
180
+ }
181
+
182
+ // bitwise_rshift
183
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
184
+ XSIMD_INLINE batch<T, A> bitwise_rshift(batch<T, A> const& self, int32_t other, requires_arch<vsx>) noexcept
185
+ {
186
+ using shift_type = as_unsigned_integer_t<T>;
187
+ batch<shift_type, A> shift(static_cast<shift_type>(other));
188
+ XSIMD_IF_CONSTEXPR(std::is_signed<T>::value)
189
+ {
190
+ return vec_sra(self.data, shift.data);
191
+ }
192
+ else
193
+ {
194
+ return vec_sr(self.data, shift.data);
195
+ }
196
+ }
197
+
198
+ // bitwise_xor
199
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
200
+ XSIMD_INLINE batch<T, A> bitwise_xor(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
201
+ {
202
+ return vec_xor(self.data, other.data);
203
+ }
204
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
205
+ XSIMD_INLINE batch_bool<T, A> bitwise_xor(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
206
+ {
207
+ return vec_xor(self.data, other.data);
208
+ }
209
+
210
+ // bitwise_cast
211
+ template <class A, class T_in, class T_out>
212
+ XSIMD_INLINE batch<T_out, A> bitwise_cast(batch<T_in, A> const& self, batch<T_out, A> const&, requires_arch<vsx>) noexcept
213
+ {
214
+ return (typename batch<T_out, A>::register_type)(self.data);
215
+ }
216
+
217
+ // broadcast
218
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
219
+ XSIMD_INLINE batch<T, A> broadcast(T val, requires_arch<vsx>) noexcept
220
+ {
221
+ return vec_splats(val);
222
+ }
223
+
224
+ // ceil
225
+ template <class A, class T, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
226
+ XSIMD_INLINE batch<T, A> ceil(batch<T, A> const& self, requires_arch<vsx>) noexcept
227
+ {
228
+ return vec_ceil(self.data);
229
+ }
230
+
231
+ // store_complex
232
+ namespace detail
233
+ {
234
+ // complex_low
235
+ template <class A>
236
+ XSIMD_INLINE batch<float, A> complex_low(batch<std::complex<float>, A> const& self, requires_arch<vsx>) noexcept
237
+ {
238
+ return vec_mergeh(self.real().data, self.imag().data);
239
+ }
240
+ template <class A>
241
+ XSIMD_INLINE batch<double, A> complex_low(batch<std::complex<double>, A> const& self, requires_arch<vsx>) noexcept
242
+ {
243
+ return vec_mergeh(self.real().data, self.imag().data);
244
+ }
245
+ // complex_high
246
+ template <class A>
247
+ XSIMD_INLINE batch<float, A> complex_high(batch<std::complex<float>, A> const& self, requires_arch<vsx>) noexcept
248
+ {
249
+ return vec_mergel(self.real().data, self.imag().data);
250
+ }
251
+ template <class A>
252
+ XSIMD_INLINE batch<double, A> complex_high(batch<std::complex<double>, A> const& self, requires_arch<vsx>) noexcept
253
+ {
254
+ return vec_mergel(self.real().data, self.imag().data);
255
+ }
256
+ }
257
+
258
+ // decr_if
259
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
260
+ XSIMD_INLINE batch<T, A> decr_if(batch<T, A> const& self, batch_bool<T, A> const& mask, requires_arch<vsx>) noexcept
261
+ {
262
+ return self + batch<T, A>((typename batch<T, A>::register_type)mask.data);
263
+ }
264
+
265
+ // div
266
+ template <class A>
267
+ XSIMD_INLINE batch<float, A> div(batch<float, A> const& self, batch<float, A> const& other, requires_arch<vsx>) noexcept
268
+ {
269
+ return vec_div(self.data, other.data);
270
+ }
271
+ template <class A>
272
+ XSIMD_INLINE batch<double, A> div(batch<double, A> const& self, batch<double, A> const& other, requires_arch<vsx>) noexcept
273
+ {
274
+ return vec_div(self.data, other.data);
275
+ }
276
+
277
+ // fast_cast
278
+ namespace detail
279
+ {
280
+ template <class A>
281
+ XSIMD_INLINE batch<float, A> fast_cast(batch<int32_t, A> const& self, batch<float, A> const&, requires_arch<vsx>) noexcept
282
+ {
283
+ return vec_ctf(self.data, 0);
284
+ }
285
+ template <class A>
286
+ XSIMD_INLINE batch<float, A> fast_cast(batch<uint32_t, A> const& self, batch<float, A> const&, requires_arch<vsx>) noexcept
287
+ {
288
+ return vec_ctf(self.data, 0);
289
+ }
290
+
291
+ template <class A>
292
+ XSIMD_INLINE batch<int32_t, A> fast_cast(batch<float, A> const& self, batch<int32_t, A> const&, requires_arch<vsx>) noexcept
293
+ {
294
+ return vec_cts(self.data, 0);
295
+ }
296
+
297
+ template <class A>
298
+ XSIMD_INLINE batch<uint32_t, A> fast_cast(batch<float, A> const& self, batch<uint32_t, A> const&, requires_arch<vsx>) noexcept
299
+ {
300
+ return vec_ctu(self.data, 0);
301
+ }
302
+ }
303
+
304
+ // fma
305
+ template <class A>
306
+ XSIMD_INLINE batch<float, A> fma(batch<float, A> const& x, batch<float, A> const& y, batch<float, A> const& z, requires_arch<vsx>) noexcept
307
+ {
308
+ return vec_madd(x.data, y.data, z.data);
309
+ }
310
+
311
+ template <class A>
312
+ XSIMD_INLINE batch<double, A> fma(batch<double, A> const& x, batch<double, A> const& y, batch<double, A> const& z, requires_arch<vsx>) noexcept
313
+ {
314
+ return vec_madd(x.data, y.data, z.data);
315
+ }
316
+
317
+ // fms
318
+ template <class A>
319
+ XSIMD_INLINE batch<float, A> fms(batch<float, A> const& x, batch<float, A> const& y, batch<float, A> const& z, requires_arch<vsx>) noexcept
320
+ {
321
+ return vec_msub(x.data, y.data, z.data);
322
+ }
323
+
324
+ template <class A>
325
+ XSIMD_INLINE batch<double, A> fms(batch<double, A> const& x, batch<double, A> const& y, batch<double, A> const& z, requires_arch<vsx>) noexcept
326
+ {
327
+ return vec_msub(x.data, y.data, z.data);
328
+ }
329
+
330
+ // eq
331
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
332
+ XSIMD_INLINE batch_bool<T, A> eq(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
333
+ {
334
+ auto res = vec_cmpeq(self.data, other.data);
335
+ return *reinterpret_cast<typename batch_bool<T, A>::register_type*>(&res);
336
+ }
337
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
338
+ XSIMD_INLINE batch_bool<T, A> eq(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
339
+ {
340
+ auto res = vec_cmpeq(self.data, other.data);
341
+ return *reinterpret_cast<typename batch_bool<T, A>::register_type*>(&res);
342
+ }
343
+
344
+ // first
345
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
346
+ XSIMD_INLINE T first(batch<T, A> const& self, requires_arch<vsx>) noexcept
347
+ {
348
+ return vec_extract(self.data, 0);
349
+ }
350
+
351
+ // floor
352
+ template <class A, class T, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
353
+ XSIMD_INLINE batch<T, A> floor(batch<T, A> const& self, requires_arch<vsx>) noexcept
354
+ {
355
+ return vec_floor(self.data);
356
+ }
357
+
358
+ // ge
359
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
360
+ XSIMD_INLINE batch_bool<T, A> ge(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
361
+ {
362
+ return vec_cmpge(self.data, other.data);
363
+ }
364
+
365
+ // gt
366
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
367
+ XSIMD_INLINE batch_bool<T, A> gt(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
368
+ {
369
+ return vec_cmpgt(self.data, other.data);
370
+ }
371
+
372
+ // haddp
373
+ template <class A>
374
+ XSIMD_INLINE batch<float, A> haddp(batch<float, A> const* row, requires_arch<vsx>) noexcept
375
+ {
376
+ auto tmp0 = vec_mergee(row[0].data, row[1].data); // v00 v10 v02 v12
377
+ auto tmp1 = vec_mergeo(row[0].data, row[1].data); // v01 v11 v03 v13
378
+ auto tmp4 = vec_add(tmp0, tmp1); // (v00 + v01, v10 + v11, v02 + v03, v12 + v13)
379
+
380
+ auto tmp2 = vec_mergee(row[2].data, row[3].data); // v20 v30 v22 v32
381
+ auto tmp3 = vec_mergeo(row[2].data, row[3].data); // v21 v31 v23 v33
382
+ auto tmp5 = vec_add(tmp0, tmp1); // (v20 + v21, v30 + v31, v22 + v23, v32 + v33)
383
+
384
+ auto tmp6 = vec_perm(tmp4, tmp5, (__vector unsigned char) { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 }); // (v00 + v01, v10 + v11, v20 + v21, v30 + v31
385
+ auto tmp7 = vec_perm(tmp4, tmp5, (__vector unsigned char) { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 }); // (v02 + v03, v12 + v13, v12 + v13, v32 + v33)
386
+
387
+ return vec_add(tmp6, tmp7);
388
+ }
389
+
390
+ template <class A>
391
+ XSIMD_INLINE batch<double, A> haddp(batch<double, A> const* row, requires_arch<vsx>) noexcept
392
+ {
393
+ auto tmp0 = vec_mergee(row[0].data, row[1].data); // v00 v10 v02 v12
394
+ auto tmp1 = vec_mergeo(row[0].data, row[1].data); // v01 v11 v03 v13
395
+ return vec_add(tmp0, tmp1);
396
+ }
397
+
398
+ // incr_if
399
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
400
+ XSIMD_INLINE batch<T, A> incr_if(batch<T, A> const& self, batch_bool<T, A> const& mask, requires_arch<vsx>) noexcept
401
+ {
402
+ return self - batch<T, A>((typename batch<T, A>::register_type)mask.data);
403
+ }
404
+
405
+ // insert
406
+ template <class A, class T, size_t I, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
407
+ XSIMD_INLINE batch<T, A> insert(batch<T, A> const& self, T val, index<I>, requires_arch<vsx>) noexcept
408
+ {
409
+ return vec_insert(val, self.data, I);
410
+ }
411
+
412
+ // isnan
413
+ template <class A>
414
+ XSIMD_INLINE batch_bool<float, A> isnan(batch<float, A> const& self, requires_arch<vsx>) noexcept
415
+ {
416
+ return ~vec_cmpeq(self.data, self.data);
417
+ }
418
+ template <class A>
419
+ XSIMD_INLINE batch_bool<double, A> isnan(batch<double, A> const& self, requires_arch<vsx>) noexcept
420
+ {
421
+ return ~vec_cmpeq(self.data, self.data);
422
+ }
423
+
424
+ // load_aligned
425
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
426
+ XSIMD_INLINE batch<T, A> load_aligned(T const* mem, convert<T>, requires_arch<vsx>) noexcept
427
+ {
428
+ return vec_ld(0, reinterpret_cast<const typename batch<T, A>::register_type*>(mem));
429
+ }
430
+
431
+ // load_unaligned
432
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
433
+ XSIMD_INLINE batch<T, A> load_unaligned(T const* mem, convert<T>, requires_arch<vsx>) noexcept
434
+ {
435
+ return vec_vsx_ld(0, (typename batch<T, A>::register_type const*)mem);
436
+ }
437
+
438
+ // load_complex
439
+ namespace detail
440
+ {
441
+ template <class A>
442
+ XSIMD_INLINE batch<std::complex<float>, A> load_complex(batch<float, A> const& hi, batch<float, A> const& lo, requires_arch<vsx>) noexcept
443
+ {
444
+ __vector unsigned char perme = { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
445
+ __vector unsigned char permo = { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
446
+ return { vec_perm(hi.data, lo.data, perme), vec_perm(hi.data, lo.data, permo) };
447
+ }
448
+ template <class A>
449
+ XSIMD_INLINE batch<std::complex<double>, A> load_complex(batch<double, A> const& hi, batch<double, A> const& lo, requires_arch<vsx>) noexcept
450
+ {
451
+ return { vec_mergee(hi.data, lo.data), vec_mergeo(hi.data, lo.data) };
452
+ }
453
+ }
454
+
455
+ // le
456
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
457
+ XSIMD_INLINE batch_bool<T, A> le(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
458
+ {
459
+ return vec_cmple(self.data, other.data);
460
+ }
461
+
462
+ // lt
463
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
464
+ XSIMD_INLINE batch_bool<T, A> lt(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
465
+ {
466
+ return vec_cmplt(self.data, other.data);
467
+ }
468
+
469
+ // max
470
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
471
+ XSIMD_INLINE batch<T, A> max(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
472
+ {
473
+ return vec_max(self.data, other.data);
474
+ }
475
+
476
+ // min
477
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
478
+ XSIMD_INLINE batch<T, A> min(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
479
+ {
480
+ return vec_min(self.data, other.data);
481
+ }
482
+
483
+ // mul
484
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
485
+ XSIMD_INLINE batch<T, A> mul(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
486
+ {
487
+ return self.data * other.data;
488
+ }
489
+
490
+ // neg
491
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
492
+ XSIMD_INLINE batch<T, A> neg(batch<T, A> const& self, requires_arch<vsx>) noexcept
493
+ {
494
+ return -(self.data);
495
+ }
496
+
497
+ // neq
498
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
499
+ XSIMD_INLINE batch_bool<T, A> neq(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
500
+ {
501
+ return ~vec_cmpeq(self.data, other.data);
502
+ }
503
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
504
+ XSIMD_INLINE batch_bool<T, A> neq(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
505
+ {
506
+ return ~vec_cmpeq(self.data, other.data);
507
+ }
508
+
509
+ // reciprocal
510
+ template <class A>
511
+ XSIMD_INLINE batch<float, A> reciprocal(batch<float, A> const& self,
512
+ kernel::requires_arch<vsx>)
513
+ {
514
+ return vec_re(self.data);
515
+ }
516
+ template <class A>
517
+ XSIMD_INLINE batch<double, A> reciprocal(batch<double, A> const& self,
518
+ kernel::requires_arch<vsx>)
519
+ {
520
+ return vec_re(self.data);
521
+ }
522
+
523
+ // reduce_add
524
+ template <class A>
525
+ XSIMD_INLINE signed reduce_add(batch<signed, A> const& self, requires_arch<vsx>) noexcept
526
+ {
527
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
528
+ auto tmp1 = vec_add(self.data, tmp0); // v0 + v3, v1 + v2, v2 + v1, v3 + v0
529
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 + v1, v2 + v1, v3 + v0, v3 + v0
530
+ auto tmp3 = vec_add(tmp1, tmp2);
531
+ return vec_extract(tmp3, 0);
532
+ }
533
+ template <class A>
534
+ XSIMD_INLINE unsigned reduce_add(batch<unsigned, A> const& self, requires_arch<vsx>) noexcept
535
+ {
536
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
537
+ auto tmp1 = vec_add(self.data, tmp0); // v0 + v3, v1 + v2, v2 + v1, v3 + v0
538
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 + v1, v2 + v1, v3 + v0, v3 + v0
539
+ auto tmp3 = vec_add(tmp1, tmp2);
540
+ return vec_extract(tmp3, 0);
541
+ }
542
+ template <class A>
543
+ XSIMD_INLINE float reduce_add(batch<float, A> const& self, requires_arch<vsx>) noexcept
544
+ {
545
+ // FIXME: find an in-order approach
546
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
547
+ auto tmp1 = vec_add(self.data, tmp0); // v0 + v3, v1 + v2, v2 + v1, v3 + v0
548
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 + v1, v2 + v1, v3 + v0, v3 + v0
549
+ auto tmp3 = vec_add(tmp1, tmp2);
550
+ return vec_extract(tmp3, 0);
551
+ }
552
+ template <class A>
553
+ XSIMD_INLINE double reduce_add(batch<double, A> const& self, requires_arch<vsx>) noexcept
554
+ {
555
+ auto tmp0 = vec_reve(self.data); // v1, v0
556
+ auto tmp1 = vec_add(self.data, tmp0); // v0 + v1, v1 + v0
557
+ return vec_extract(tmp1, 0);
558
+ }
559
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
560
+ XSIMD_INLINE T reduce_add(batch<T, A> const& self, requires_arch<vsx>) noexcept
561
+ {
562
+ return reduce_add(self, common {});
563
+ }
564
+
565
+ // reduce_mul
566
+ template <class A>
567
+ XSIMD_INLINE signed reduce_mul(batch<signed, A> const& self, requires_arch<vsx>) noexcept
568
+ {
569
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
570
+ auto tmp1 = vec_mul(self.data, tmp0); // v0 * v3, v1 * v2, v2 * v1, v3 * v0
571
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 * v1, v2 * v1, v3 * v0, v3 * v0
572
+ auto tmp3 = vec_mul(tmp1, tmp2);
573
+ return vec_extract(tmp3, 0);
574
+ }
575
+ template <class A>
576
+ XSIMD_INLINE unsigned reduce_mul(batch<unsigned, A> const& self, requires_arch<vsx>) noexcept
577
+ {
578
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
579
+ auto tmp1 = vec_mul(self.data, tmp0); // v0 * v3, v1 * v2, v2 * v1, v3 * v0
580
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 * v1, v2 * v1, v3 * v0, v3 * v0
581
+ auto tmp3 = vec_mul(tmp1, tmp2);
582
+ return vec_extract(tmp3, 0);
583
+ }
584
+ template <class A>
585
+ XSIMD_INLINE float reduce_mul(batch<float, A> const& self, requires_arch<vsx>) noexcept
586
+ {
587
+ // FIXME: find an in-order approach
588
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
589
+ auto tmp1 = vec_mul(self.data, tmp0); // v0 * v3, v1 * v2, v2 * v1, v3 * v0
590
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 * v1, v2 * v1, v3 * v0, v3 * v0
591
+ auto tmp3 = vec_mul(tmp1, tmp2);
592
+ return vec_extract(tmp3, 0);
593
+ }
594
+ template <class A>
595
+ XSIMD_INLINE double reduce_mul(batch<double, A> const& self, requires_arch<vsx>) noexcept
596
+ {
597
+ auto tmp0 = vec_reve(self.data); // v1, v0
598
+ auto tmp1 = vec_mul(self.data, tmp0); // v0 * v1, v1 * v0
599
+ return vec_extract(tmp1, 0);
600
+ }
601
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
602
+ XSIMD_INLINE T reduce_mul(batch<T, A> const& self, requires_arch<vsx>) noexcept
603
+ {
604
+ return reduce_mul(self, common {});
605
+ }
606
+
607
+ // round
608
+ template <class A, class T, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
609
+ XSIMD_INLINE batch<T, A> round(batch<T, A> const& self, requires_arch<vsx>) noexcept
610
+ {
611
+ return vec_round(self.data);
612
+ }
613
+
614
+ // rsqrt
615
+ template <class A>
616
+ XSIMD_INLINE batch<float, A> rsqrt(batch<float, A> const& val, requires_arch<vsx>) noexcept
617
+ {
618
+ return vec_rsqrt(val.data);
619
+ }
620
+ template <class A>
621
+ XSIMD_INLINE batch<double, A> rsqrt(batch<double, A> const& val, requires_arch<vsx>) noexcept
622
+ {
623
+ return vec_rsqrt(val.data);
624
+ }
625
+
626
+ // select
627
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
628
+ XSIMD_INLINE batch<T, A> select(batch_bool<T, A> const& cond, batch<T, A> const& true_br, batch<T, A> const& false_br, requires_arch<vsx>) noexcept
629
+ {
630
+ return vec_sel(false_br.data, true_br.data, cond.data);
631
+ }
632
+ template <class A, class T, bool... Values, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
633
+ XSIMD_INLINE batch<T, A> select(batch_bool_constant<T, A, Values...> const&, batch<T, A> const& true_br, batch<T, A> const& false_br, requires_arch<vsx>) noexcept
634
+ {
635
+ return select(batch_bool<T, A> { Values... }, true_br, false_br, vsx {});
636
+ }
637
+
638
+ // shuffle
639
+ template <class A, class ITy, ITy I0, ITy I1, ITy I2, ITy I3>
640
+ XSIMD_INLINE batch<float, A> shuffle(batch<float, A> const& x, batch<float, A> const& y, batch_constant<ITy, A, I0, I1, I2, I3>, requires_arch<vsx>) noexcept
641
+ {
642
+ return vec_perm(x.data, y.data,
643
+ (__vector unsigned char) {
644
+ 4 * I0 + 0, 4 * I0 + 1, 4 * I0 + 2, 4 * I0 + 3,
645
+ 4 * I1 + 0, 4 * I1 + 1, 4 * I1 + 2, 4 * I1 + 3,
646
+ 4 * I2 + 0, 4 * I2 + 1, 4 * I2 + 2, 4 * I2 + 3,
647
+ 4 * I3 + 0, 4 * I3 + 1, 4 * I3 + 2, 4 * I3 + 3 });
648
+ }
649
+
650
+ template <class A, class ITy, ITy I0, ITy I1>
651
+ XSIMD_INLINE batch<double, A> shuffle(batch<double, A> const& x, batch<double, A> const& y, batch_constant<ITy, A, I0, I1>, requires_arch<vsx>) noexcept
652
+ {
653
+ return vec_perm(x.data, y.data,
654
+ (__vector unsigned char) {
655
+ 8 * I0 + 0,
656
+ 8 * I0 + 1,
657
+ 8 * I0 + 2,
658
+ 8 * I0 + 3,
659
+ 8 * I0 + 4,
660
+ 8 * I0 + 5,
661
+ 8 * I0 + 6,
662
+ 8 * I0 + 7,
663
+ 8 * I1 + 0,
664
+ 8 * I1 + 1,
665
+ 8 * I1 + 2,
666
+ 8 * I1 + 3,
667
+ 8 * I1 + 4,
668
+ 8 * I1 + 5,
669
+ 8 * I1 + 6,
670
+ 8 * I1 + 7,
671
+ });
672
+ }
673
+
674
+ // sqrt
675
+ template <class A>
676
+ XSIMD_INLINE batch<float, A> sqrt(batch<float, A> const& val, requires_arch<vsx>) noexcept
677
+ {
678
+ return vec_sqrt(val.data);
679
+ }
680
+
681
+ template <class A>
682
+ XSIMD_INLINE batch<double, A> sqrt(batch<double, A> const& val, requires_arch<vsx>) noexcept
683
+ {
684
+ return vec_sqrt(val.data);
685
+ }
686
+
687
+ // slide_left
688
+ template <size_t N, class A, class T>
689
+ XSIMD_INLINE batch<T, A> slide_left(batch<T, A> const& x, requires_arch<vsx>) noexcept
690
+ {
691
+ XSIMD_IF_CONSTEXPR(N == batch<T, A>::size * sizeof(T))
692
+ {
693
+ return batch<T, A>(0);
694
+ }
695
+ else
696
+ {
697
+ auto slider = vec_splats((uint8_t)(8 * N));
698
+ return (typename batch<T, A>::register_type)vec_slo(x.data, slider);
699
+ }
700
+ }
701
+
702
+ // slide_right
703
+ template <size_t N, class A, class T>
704
+ XSIMD_INLINE batch<T, A> slide_right(batch<T, A> const& x, requires_arch<vsx>) noexcept
705
+ {
706
+ XSIMD_IF_CONSTEXPR(N == batch<T, A>::size * sizeof(T))
707
+ {
708
+ return batch<T, A>(0);
709
+ }
710
+ else
711
+ {
712
+ auto slider = vec_splats((uint8_t)(8 * N));
713
+ return (typename batch<T, A>::register_type)vec_sro((__vector unsigned char)x.data, slider);
714
+ }
715
+ }
716
+
717
+ // sadd
718
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value && sizeof(T) != 8, void>::type>
719
+ XSIMD_INLINE batch<T, A> sadd(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
720
+ {
721
+ return vec_adds(self.data, other.data);
722
+ }
723
+
724
+ // set
725
+ template <class A, class T, class... Values>
726
+ XSIMD_INLINE batch<T, A> set(batch<T, A> const&, requires_arch<vsx>, Values... values) noexcept
727
+ {
728
+ static_assert(sizeof...(Values) == batch<T, A>::size, "consistent init");
729
+ return typename batch<T, A>::register_type { values... };
730
+ }
731
+
732
+ template <class A, class T, class... Values, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
733
+ XSIMD_INLINE batch_bool<T, A> set(batch_bool<T, A> const&, requires_arch<vsx>, Values... values) noexcept
734
+ {
735
+ static_assert(sizeof...(Values) == batch_bool<T, A>::size, "consistent init");
736
+ return typename batch_bool<T, A>::register_type { static_cast<decltype(std::declval<typename batch_bool<T, A>::register_type>()[0])>(values ? -1LL : 0LL)... };
737
+ }
738
+
739
+ // ssub
740
+
741
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value && sizeof(T) == 1, void>::type>
742
+ XSIMD_INLINE batch<T, A> ssub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
743
+ {
744
+ return vec_subs(self.data, other.data);
745
+ }
746
+
747
+ // store_aligned
748
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
749
+ XSIMD_INLINE void store_aligned(T* mem, batch<T, A> const& self, requires_arch<vsx>) noexcept
750
+ {
751
+ return vec_st(self.data, 0, reinterpret_cast<typename batch<T, A>::register_type*>(mem));
752
+ }
753
+
754
+ // store_unaligned
755
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
756
+ XSIMD_INLINE void store_unaligned(T* mem, batch<T, A> const& self, requires_arch<vsx>) noexcept
757
+ {
758
+ return vec_vsx_st(self.data, 0, reinterpret_cast<typename batch<T, A>::register_type*>(mem));
759
+ }
760
+
761
+ // sub
762
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
763
+ XSIMD_INLINE batch<T, A> sub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
764
+ {
765
+ return vec_sub(self.data, other.data);
766
+ }
767
+
768
+ // swizzle
769
+
770
+ template <class A, uint32_t V0, uint32_t V1, uint32_t V2, uint32_t V3>
771
+ XSIMD_INLINE batch<float, A> swizzle(batch<float, A> const& self, batch_constant<uint32_t, A, V0, V1, V2, V3>, requires_arch<vsx>) noexcept
772
+ {
773
+ return vec_perm(self.data, self.data,
774
+ (__vector unsigned char) {
775
+ 4 * V0 + 0, 4 * V0 + 1, 4 * V0 + 2, 4 * V0 + 3,
776
+ 4 * V1 + 0, 4 * V1 + 1, 4 * V1 + 2, 4 * V1 + 3,
777
+ 4 * V2 + 0, 4 * V2 + 1, 4 * V2 + 2, 4 * V2 + 3,
778
+ 4 * V3 + 0, 4 * V3 + 1, 4 * V3 + 2, 4 * V3 + 3 });
779
+ }
780
+
781
+ template <class A, uint64_t V0, uint64_t V1>
782
+ XSIMD_INLINE batch<double, A> swizzle(batch<double, A> const& self, batch_constant<uint64_t, A, V0, V1>, requires_arch<vsx>) noexcept
783
+ {
784
+ return vec_perm(self.data, self.data,
785
+ (__vector unsigned char) {
786
+ 8 * V0 + 0,
787
+ 8 * V0 + 1,
788
+ 8 * V0 + 2,
789
+ 8 * V0 + 3,
790
+ 8 * V0 + 4,
791
+ 8 * V0 + 5,
792
+ 8 * V0 + 6,
793
+ 8 * V0 + 7,
794
+ 8 * V1 + 0,
795
+ 8 * V1 + 1,
796
+ 8 * V1 + 2,
797
+ 8 * V1 + 3,
798
+ 8 * V1 + 4,
799
+ 8 * V1 + 5,
800
+ 8 * V1 + 6,
801
+ 8 * V1 + 7,
802
+ });
803
+ }
804
+
805
+ template <class A, uint64_t V0, uint64_t V1>
806
+ XSIMD_INLINE batch<uint64_t, A> swizzle(batch<uint64_t, A> const& self, batch_constant<uint64_t, A, V0, V1>, requires_arch<vsx>) noexcept
807
+ {
808
+ return vec_perm(self.data, self.data,
809
+ (__vector unsigned char) {
810
+ 8 * V0 + 0,
811
+ 8 * V0 + 1,
812
+ 8 * V0 + 2,
813
+ 8 * V0 + 3,
814
+ 8 * V0 + 4,
815
+ 8 * V0 + 5,
816
+ 8 * V0 + 6,
817
+ 8 * V0 + 7,
818
+ 8 * V1 + 0,
819
+ 8 * V1 + 1,
820
+ 8 * V1 + 2,
821
+ 8 * V1 + 3,
822
+ 8 * V1 + 4,
823
+ 8 * V1 + 5,
824
+ 8 * V1 + 6,
825
+ 8 * V1 + 7,
826
+ });
827
+ }
828
+
829
+ template <class A, uint64_t V0, uint64_t V1>
830
+ XSIMD_INLINE batch<int64_t, A> swizzle(batch<int64_t, A> const& self, batch_constant<uint64_t, A, V0, V1> mask, requires_arch<vsx>) noexcept
831
+ {
832
+ return bitwise_cast<int64_t>(swizzle(bitwise_cast<uint64_t>(self), mask, vsx {}));
833
+ }
834
+
835
+ template <class A, uint32_t V0, uint32_t V1, uint32_t V2, uint32_t V3>
836
+ XSIMD_INLINE batch<uint32_t, A> swizzle(batch<uint32_t, A> const& self, batch_constant<uint32_t, A, V0, V1, V2, V3>, requires_arch<vsx>) noexcept
837
+ {
838
+ return vec_perm(self.data, self.data,
839
+ (__vector unsigned char) {
840
+ 4 * V0 + 0, 4 * V0 + 1, 4 * V0 + 2, 4 * V0 + 3,
841
+ 4 * V1 + 0, 4 * V1 + 1, 4 * V1 + 2, 4 * V1 + 3,
842
+ 4 * V2 + 0, 4 * V2 + 1, 4 * V2 + 2, 4 * V2 + 3,
843
+ 4 * V3 + 0, 4 * V3 + 1, 4 * V3 + 2, 4 * V3 + 3 });
844
+ }
845
+
846
+ template <class A, uint32_t V0, uint32_t V1, uint32_t V2, uint32_t V3>
847
+ XSIMD_INLINE batch<int32_t, A> swizzle(batch<int32_t, A> const& self, batch_constant<uint32_t, A, V0, V1, V2, V3> mask, requires_arch<vsx>) noexcept
848
+ {
849
+ return bitwise_cast<int32_t>(swizzle(bitwise_cast<uint32_t>(self), mask, vsx {}));
850
+ }
851
+
852
+ template <class A, uint16_t V0, uint16_t V1, uint16_t V2, uint16_t V3, uint16_t V4, uint16_t V5, uint16_t V6, uint16_t V7>
853
+ XSIMD_INLINE batch<uint16_t, A> swizzle(batch<uint16_t, A> const& self, batch_constant<uint16_t, A, V0, V1, V2, V3, V4, V5, V6, V7>, requires_arch<vsx>) noexcept
854
+ {
855
+ return vec_perm(self.data, self.data,
856
+ (__vector unsigned char) {
857
+ 2 * V0 + 0, 2 * V0 + 1, 2 * V1 + 0, 2 * V1 + 1,
858
+ 2 * V2 + 0, 2 * V2 + 1, 2 * V3 + 0, 2 * V3 + 1,
859
+ 2 * V4 + 0, 2 * V4 + 1, 2 * V5 + 0, 2 * V5 + 1,
860
+ 2 * V6 + 0, 2 * V6 + 1, 2 * V7 + 0, 2 * V7 + 1 });
861
+ }
862
+
863
+ template <class A, uint16_t V0, uint16_t V1, uint16_t V2, uint16_t V3, uint16_t V4, uint16_t V5, uint16_t V6, uint16_t V7>
864
+ XSIMD_INLINE batch<int16_t, A> swizzle(batch<int16_t, A> const& self, batch_constant<uint16_t, A, V0, V1, V2, V3, V4, V5, V6, V7> mask, requires_arch<vsx>) noexcept
865
+ {
866
+ return bitwise_cast<int16_t>(swizzle(bitwise_cast<uint16_t>(self), mask, vsx {}));
867
+ }
868
+
869
+ // trunc
870
+ template <class A, class T, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
871
+ XSIMD_INLINE batch<T, A> trunc(batch<T, A> const& self, requires_arch<vsx>) noexcept
872
+ {
873
+ return vec_trunc(self.data);
874
+ }
875
+
876
+ // zip_hi
877
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
878
+ XSIMD_INLINE batch<T, A> zip_hi(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
879
+ {
880
+ return vec_mergel(self.data, other.data);
881
+ }
882
+
883
+ // zip_lo
884
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
885
+ XSIMD_INLINE batch<T, A> zip_lo(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
886
+ {
887
+ return vec_mergeh(self.data, other.data);
888
+ }
889
+ }
890
+ }
891
+
892
+ #endif