sequenzo 0.1.21__cp39-cp39-macosx_10_9_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sequenzo might be problematic. Click here for more details.

Files changed (260) hide show
  1. sequenzo/__init__.py +240 -0
  2. sequenzo/big_data/__init__.py +12 -0
  3. sequenzo/big_data/clara/__init__.py +26 -0
  4. sequenzo/big_data/clara/clara.py +467 -0
  5. sequenzo/big_data/clara/utils/__init__.py +27 -0
  6. sequenzo/big_data/clara/utils/aggregatecases.py +92 -0
  7. sequenzo/big_data/clara/utils/davies_bouldin.py +91 -0
  8. sequenzo/big_data/clara/utils/get_weighted_diss.cpython-39-darwin.so +0 -0
  9. sequenzo/big_data/clara/utils/wfcmdd.py +205 -0
  10. sequenzo/big_data/clara/visualization.py +88 -0
  11. sequenzo/clustering/KMedoids.py +196 -0
  12. sequenzo/clustering/__init__.py +30 -0
  13. sequenzo/clustering/clustering_c_code.cpython-39-darwin.so +0 -0
  14. sequenzo/clustering/hierarchical_clustering.py +1380 -0
  15. sequenzo/clustering/src/KMedoid.cpp +262 -0
  16. sequenzo/clustering/src/PAM.cpp +236 -0
  17. sequenzo/clustering/src/PAMonce.cpp +234 -0
  18. sequenzo/clustering/src/cluster_quality.cpp +496 -0
  19. sequenzo/clustering/src/cluster_quality.h +128 -0
  20. sequenzo/clustering/src/cluster_quality_backup.cpp +570 -0
  21. sequenzo/clustering/src/module.cpp +228 -0
  22. sequenzo/clustering/src/weightedinertia.cpp +111 -0
  23. sequenzo/clustering/utils/__init__.py +27 -0
  24. sequenzo/clustering/utils/disscenter.py +122 -0
  25. sequenzo/data_preprocessing/__init__.py +20 -0
  26. sequenzo/data_preprocessing/helpers.py +256 -0
  27. sequenzo/datasets/__init__.py +41 -0
  28. sequenzo/datasets/biofam.csv +2001 -0
  29. sequenzo/datasets/biofam_child_domain.csv +2001 -0
  30. sequenzo/datasets/biofam_left_domain.csv +2001 -0
  31. sequenzo/datasets/biofam_married_domain.csv +2001 -0
  32. sequenzo/datasets/chinese_colonial_territories.csv +12 -0
  33. sequenzo/datasets/country_co2_emissions.csv +194 -0
  34. sequenzo/datasets/country_co2_emissions_global_deciles.csv +195 -0
  35. sequenzo/datasets/country_co2_emissions_global_quintiles.csv +195 -0
  36. sequenzo/datasets/country_co2_emissions_local_deciles.csv +195 -0
  37. sequenzo/datasets/country_co2_emissions_local_quintiles.csv +195 -0
  38. sequenzo/datasets/country_gdp_per_capita.csv +194 -0
  39. sequenzo/datasets/mvad.csv +713 -0
  40. sequenzo/datasets/pairfam_family.csv +1867 -0
  41. sequenzo/datasets/polyadic_samplec1.csv +61 -0
  42. sequenzo/datasets/polyadic_samplep1.csv +61 -0
  43. sequenzo/datasets/polyadic_seqc1.csv +61 -0
  44. sequenzo/datasets/polyadic_seqp1.csv +61 -0
  45. sequenzo/define_sequence_data.py +609 -0
  46. sequenzo/dissimilarity_measures/__init__.py +31 -0
  47. sequenzo/dissimilarity_measures/c_code.cpython-39-darwin.so +0 -0
  48. sequenzo/dissimilarity_measures/get_distance_matrix.py +702 -0
  49. sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +241 -0
  50. sequenzo/dissimilarity_measures/src/DHDdistance.cpp +148 -0
  51. sequenzo/dissimilarity_measures/src/LCPdistance.cpp +114 -0
  52. sequenzo/dissimilarity_measures/src/OMdistance.cpp +247 -0
  53. sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +281 -0
  54. sequenzo/dissimilarity_measures/src/__init__.py +0 -0
  55. sequenzo/dissimilarity_measures/src/dist2matrix.cpp +63 -0
  56. sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
  57. sequenzo/dissimilarity_measures/src/module.cpp +34 -0
  58. sequenzo/dissimilarity_measures/src/setup.py +30 -0
  59. sequenzo/dissimilarity_measures/src/utils.h +25 -0
  60. sequenzo/dissimilarity_measures/src/xsimd/.github/cmake-test/main.cpp +6 -0
  61. sequenzo/dissimilarity_measures/src/xsimd/benchmark/main.cpp +159 -0
  62. sequenzo/dissimilarity_measures/src/xsimd/benchmark/xsimd_benchmark.hpp +565 -0
  63. sequenzo/dissimilarity_measures/src/xsimd/docs/source/conf.py +37 -0
  64. sequenzo/dissimilarity_measures/src/xsimd/examples/mandelbrot.cpp +330 -0
  65. sequenzo/dissimilarity_measures/src/xsimd/examples/pico_bench.hpp +246 -0
  66. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +266 -0
  67. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +112 -0
  68. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +323 -0
  69. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +218 -0
  70. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +2583 -0
  71. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +880 -0
  72. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_rounding.hpp +72 -0
  73. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
  74. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +978 -0
  75. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +1924 -0
  76. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +1144 -0
  77. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +656 -0
  78. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512cd.hpp +28 -0
  79. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +244 -0
  80. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512er.hpp +20 -0
  81. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +2650 -0
  82. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512ifma.hpp +20 -0
  83. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512pf.hpp +20 -0
  84. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +77 -0
  85. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +131 -0
  86. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512bw.hpp +20 -0
  87. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512vbmi2.hpp +20 -0
  88. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avxvnni.hpp +20 -0
  89. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +24 -0
  90. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +77 -0
  91. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +393 -0
  92. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +788 -0
  93. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +93 -0
  94. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx2.hpp +46 -0
  95. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +97 -0
  96. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +92 -0
  97. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_i8mm_neon64.hpp +17 -0
  98. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +142 -0
  99. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +3142 -0
  100. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +1543 -0
  101. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +1513 -0
  102. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +1260 -0
  103. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +2024 -0
  104. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +67 -0
  105. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_1.hpp +339 -0
  106. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_2.hpp +44 -0
  107. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +186 -0
  108. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +1155 -0
  109. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
  110. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +1780 -0
  111. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +240 -0
  112. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +484 -0
  113. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +269 -0
  114. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +27 -0
  115. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/math/xsimd_rem_pio2.hpp +719 -0
  116. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_aligned_allocator.hpp +349 -0
  117. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_alignment.hpp +91 -0
  118. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +55 -0
  119. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +2765 -0
  120. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx2_register.hpp +44 -0
  121. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512bw_register.hpp +51 -0
  122. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512cd_register.hpp +51 -0
  123. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512dq_register.hpp +51 -0
  124. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512er_register.hpp +51 -0
  125. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512f_register.hpp +77 -0
  126. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512ifma_register.hpp +51 -0
  127. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512pf_register.hpp +51 -0
  128. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi2_register.hpp +51 -0
  129. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi_register.hpp +51 -0
  130. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512bw_register.hpp +54 -0
  131. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512vbmi2_register.hpp +53 -0
  132. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx_register.hpp +64 -0
  133. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avxvnni_register.hpp +44 -0
  134. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +1524 -0
  135. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch_constant.hpp +300 -0
  136. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_common_arch.hpp +47 -0
  137. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_emulated_register.hpp +80 -0
  138. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx2_register.hpp +50 -0
  139. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx_register.hpp +50 -0
  140. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_sse_register.hpp +50 -0
  141. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma4_register.hpp +50 -0
  142. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_i8mm_neon64_register.hpp +55 -0
  143. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon64_register.hpp +55 -0
  144. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon_register.hpp +154 -0
  145. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_register.hpp +94 -0
  146. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +506 -0
  147. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse2_register.hpp +59 -0
  148. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse3_register.hpp +49 -0
  149. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_1_register.hpp +48 -0
  150. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_2_register.hpp +48 -0
  151. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_ssse3_register.hpp +48 -0
  152. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sve_register.hpp +156 -0
  153. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +337 -0
  154. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_utils.hpp +536 -0
  155. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
  156. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_wasm_register.hpp +59 -0
  157. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +75 -0
  158. sequenzo/dissimilarity_measures/src/xsimd/test/architectures/dummy.cpp +7 -0
  159. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set.cpp +13 -0
  160. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean.cpp +24 -0
  161. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_aligned.cpp +25 -0
  162. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_arch_independent.cpp +28 -0
  163. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_tag_dispatch.cpp +25 -0
  164. sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_abstract_batches.cpp +7 -0
  165. sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_parametric_batches.cpp +8 -0
  166. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum.hpp +31 -0
  167. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_avx2.cpp +3 -0
  168. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_sse2.cpp +3 -0
  169. sequenzo/dissimilarity_measures/src/xsimd/test/doc/writing_vectorized_code.cpp +11 -0
  170. sequenzo/dissimilarity_measures/src/xsimd/test/main.cpp +31 -0
  171. sequenzo/dissimilarity_measures/src/xsimd/test/test_api.cpp +230 -0
  172. sequenzo/dissimilarity_measures/src/xsimd/test/test_arch.cpp +217 -0
  173. sequenzo/dissimilarity_measures/src/xsimd/test/test_basic_math.cpp +183 -0
  174. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch.cpp +1049 -0
  175. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_bool.cpp +508 -0
  176. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_cast.cpp +409 -0
  177. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_complex.cpp +712 -0
  178. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_constant.cpp +286 -0
  179. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_float.cpp +141 -0
  180. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_int.cpp +365 -0
  181. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_manip.cpp +308 -0
  182. sequenzo/dissimilarity_measures/src/xsimd/test/test_bitwise_cast.cpp +222 -0
  183. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_exponential.cpp +226 -0
  184. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_hyperbolic.cpp +183 -0
  185. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_power.cpp +265 -0
  186. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_trigonometric.cpp +236 -0
  187. sequenzo/dissimilarity_measures/src/xsimd/test/test_conversion.cpp +248 -0
  188. sequenzo/dissimilarity_measures/src/xsimd/test/test_custom_default_arch.cpp +28 -0
  189. sequenzo/dissimilarity_measures/src/xsimd/test/test_error_gamma.cpp +170 -0
  190. sequenzo/dissimilarity_measures/src/xsimd/test/test_explicit_batch_instantiation.cpp +32 -0
  191. sequenzo/dissimilarity_measures/src/xsimd/test/test_exponential.cpp +202 -0
  192. sequenzo/dissimilarity_measures/src/xsimd/test/test_extract_pair.cpp +92 -0
  193. sequenzo/dissimilarity_measures/src/xsimd/test/test_fp_manipulation.cpp +77 -0
  194. sequenzo/dissimilarity_measures/src/xsimd/test/test_gnu_source.cpp +30 -0
  195. sequenzo/dissimilarity_measures/src/xsimd/test/test_hyperbolic.cpp +167 -0
  196. sequenzo/dissimilarity_measures/src/xsimd/test/test_load_store.cpp +304 -0
  197. sequenzo/dissimilarity_measures/src/xsimd/test/test_memory.cpp +61 -0
  198. sequenzo/dissimilarity_measures/src/xsimd/test/test_poly_evaluation.cpp +64 -0
  199. sequenzo/dissimilarity_measures/src/xsimd/test/test_power.cpp +184 -0
  200. sequenzo/dissimilarity_measures/src/xsimd/test/test_rounding.cpp +199 -0
  201. sequenzo/dissimilarity_measures/src/xsimd/test/test_select.cpp +101 -0
  202. sequenzo/dissimilarity_measures/src/xsimd/test/test_shuffle.cpp +760 -0
  203. sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.cpp +4 -0
  204. sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.hpp +34 -0
  205. sequenzo/dissimilarity_measures/src/xsimd/test/test_traits.cpp +172 -0
  206. sequenzo/dissimilarity_measures/src/xsimd/test/test_trigonometric.cpp +208 -0
  207. sequenzo/dissimilarity_measures/src/xsimd/test/test_utils.hpp +611 -0
  208. sequenzo/dissimilarity_measures/src/xsimd/test/test_wasm/test_wasm_playwright.py +123 -0
  209. sequenzo/dissimilarity_measures/src/xsimd/test/test_xsimd_api.cpp +1460 -0
  210. sequenzo/dissimilarity_measures/utils/__init__.py +16 -0
  211. sequenzo/dissimilarity_measures/utils/get_LCP_length_for_2_seq.py +44 -0
  212. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cpython-39-darwin.so +0 -0
  213. sequenzo/dissimilarity_measures/utils/seqconc.cpython-39-darwin.so +0 -0
  214. sequenzo/dissimilarity_measures/utils/seqdss.cpython-39-darwin.so +0 -0
  215. sequenzo/dissimilarity_measures/utils/seqdur.cpython-39-darwin.so +0 -0
  216. sequenzo/dissimilarity_measures/utils/seqlength.cpython-39-darwin.so +0 -0
  217. sequenzo/multidomain/__init__.py +23 -0
  218. sequenzo/multidomain/association_between_domains.py +311 -0
  219. sequenzo/multidomain/cat.py +431 -0
  220. sequenzo/multidomain/combt.py +519 -0
  221. sequenzo/multidomain/dat.py +89 -0
  222. sequenzo/multidomain/idcd.py +139 -0
  223. sequenzo/multidomain/linked_polyad.py +292 -0
  224. sequenzo/openmp_setup.py +233 -0
  225. sequenzo/prefix_tree/__init__.py +43 -0
  226. sequenzo/prefix_tree/individual_level_indicators.py +1274 -0
  227. sequenzo/prefix_tree/system_level_indicators.py +465 -0
  228. sequenzo/prefix_tree/utils.py +54 -0
  229. sequenzo/sequence_characteristics/__init__.py +40 -0
  230. sequenzo/sequence_characteristics/complexity_index.py +49 -0
  231. sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +220 -0
  232. sequenzo/sequence_characteristics/plot_characteristics.py +593 -0
  233. sequenzo/sequence_characteristics/simple_characteristics.py +311 -0
  234. sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +39 -0
  235. sequenzo/sequence_characteristics/turbulence.py +155 -0
  236. sequenzo/sequence_characteristics/variance_of_spell_durations.py +86 -0
  237. sequenzo/sequence_characteristics/within_sequence_entropy.py +43 -0
  238. sequenzo/suffix_tree/__init__.py +48 -0
  239. sequenzo/suffix_tree/individual_level_indicators.py +1638 -0
  240. sequenzo/suffix_tree/system_level_indicators.py +456 -0
  241. sequenzo/suffix_tree/utils.py +56 -0
  242. sequenzo/visualization/__init__.py +29 -0
  243. sequenzo/visualization/plot_mean_time.py +194 -0
  244. sequenzo/visualization/plot_modal_state.py +276 -0
  245. sequenzo/visualization/plot_most_frequent_sequences.py +147 -0
  246. sequenzo/visualization/plot_relative_frequency.py +404 -0
  247. sequenzo/visualization/plot_sequence_index.py +937 -0
  248. sequenzo/visualization/plot_single_medoid.py +153 -0
  249. sequenzo/visualization/plot_state_distribution.py +613 -0
  250. sequenzo/visualization/plot_transition_matrix.py +190 -0
  251. sequenzo/visualization/utils/__init__.py +23 -0
  252. sequenzo/visualization/utils/utils.py +310 -0
  253. sequenzo/with_event_history_analysis/__init__.py +35 -0
  254. sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
  255. sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
  256. sequenzo-0.1.21.dist-info/METADATA +308 -0
  257. sequenzo-0.1.21.dist-info/RECORD +254 -0
  258. sequenzo-0.1.21.dist-info/WHEEL +5 -0
  259. sequenzo-0.1.21.dist-info/licenses/LICENSE +28 -0
  260. sequenzo-0.1.21.dist-info/top_level.txt +1 -0
@@ -0,0 +1,892 @@
1
+ /***************************************************************************
2
+ * Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
3
+ * Martin Renou *
4
+ * Copyright (c) QuantStack *
5
+ * Copyright (c) Serge Guelton *
6
+ * *
7
+ * Distributed under the terms of the BSD 3-Clause License. *
8
+ * *
9
+ * The full license is in the file LICENSE, distributed with this software. *
10
+ ****************************************************************************/
11
+
12
+ #ifndef XSIMD_VSX_HPP
13
+ #define XSIMD_VSX_HPP
14
+
15
+ #include <complex>
16
+ #include <limits>
17
+ #include <type_traits>
18
+
19
+ #include "../types/xsimd_vsx_register.hpp"
20
+
21
+ #include <endian.h>
22
+
23
+ namespace xsimd
24
+ {
25
+ template <typename T, class A, bool... Values>
26
+ struct batch_bool_constant;
27
+
28
+ template <class T_out, class T_in, class A>
29
+ XSIMD_INLINE batch<T_out, A> bitwise_cast(batch<T_in, A> const& x) noexcept;
30
+
31
+ template <typename T, class A, T... Values>
32
+ struct batch_constant;
33
+
34
+ namespace kernel
35
+ {
36
+ template <class A, class T>
37
+ XSIMD_INLINE batch<T, A> avg(batch<T, A> const&, batch<T, A> const&, requires_arch<common>) noexcept;
38
+ template <class A, class T>
39
+ XSIMD_INLINE batch<T, A> avgr(batch<T, A> const&, batch<T, A> const&, requires_arch<common>) noexcept;
40
+
41
+ // abs
42
+ template <class A>
43
+ XSIMD_INLINE batch<float, A> abs(batch<float, A> const& self, requires_arch<vsx>) noexcept
44
+ {
45
+ return vec_abs(self.data);
46
+ }
47
+
48
+ template <class A>
49
+ XSIMD_INLINE batch<double, A> abs(batch<double, A> const& self, requires_arch<vsx>) noexcept
50
+ {
51
+ return vec_abs(self.data);
52
+ }
53
+
54
+ // add
55
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
56
+ XSIMD_INLINE batch<T, A> add(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
57
+ {
58
+ return vec_add(self.data, other.data);
59
+ }
60
+
61
+ // all
62
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
63
+ XSIMD_INLINE bool all(batch_bool<T, A> const& self, requires_arch<vsx>) noexcept
64
+ {
65
+ return vec_all_ne(self.data, vec_xor(self.data, self.data));
66
+ }
67
+
68
+ // any
69
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
70
+ XSIMD_INLINE bool any(batch_bool<T, A> const& self, requires_arch<vsx>) noexcept
71
+ {
72
+ return vec_any_ne(self.data, vec_xor(self.data, self.data));
73
+ }
74
+
75
+ // avgr
76
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value && sizeof(T) < 8, void>::type>
77
+ XSIMD_INLINE batch<T, A> avgr(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
78
+ {
79
+ return vec_avg(self.data, other.data);
80
+ }
81
+ template <class A>
82
+ XSIMD_INLINE batch<float, A> avgr(batch<float, A> const& self, batch<float, A> const& other, requires_arch<vsx>) noexcept
83
+ {
84
+ return avgr(self, other, common {});
85
+ }
86
+ template <class A>
87
+ XSIMD_INLINE batch<double, A> avgr(batch<double, A> const& self, batch<double, A> const& other, requires_arch<vsx>) noexcept
88
+ {
89
+ return avgr(self, other, common {});
90
+ }
91
+
92
+ // avg
93
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
94
+ XSIMD_INLINE batch<T, A> avg(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
95
+ {
96
+ XSIMD_IF_CONSTEXPR(sizeof(T) < 8)
97
+ {
98
+ constexpr auto nbit = 8 * sizeof(T) - 1;
99
+ auto adj = bitwise_cast<T>(bitwise_cast<as_unsigned_integer_t<T>>((self ^ other) << nbit) >> nbit);
100
+ return avgr(self, other, A {}) - adj;
101
+ }
102
+ else
103
+ {
104
+ return avg(self, other, common {});
105
+ }
106
+ }
107
+ template <class A>
108
+ XSIMD_INLINE batch<float, A> avg(batch<float, A> const& self, batch<float, A> const& other, requires_arch<vsx>) noexcept
109
+ {
110
+ return avg(self, other, common {});
111
+ }
112
+ template <class A>
113
+ XSIMD_INLINE batch<double, A> avg(batch<double, A> const& self, batch<double, A> const& other, requires_arch<vsx>) noexcept
114
+ {
115
+ return avg(self, other, common {});
116
+ }
117
+
118
+ // batch_bool_cast
119
+ template <class A, class T_out, class T_in>
120
+ XSIMD_INLINE batch_bool<T_out, A> batch_bool_cast(batch_bool<T_in, A> const& self, batch_bool<T_out, A> const&, requires_arch<vsx>) noexcept
121
+ {
122
+ return (typename batch_bool<T_out, A>::register_type)self.data;
123
+ }
124
+
125
+ // bitwise_and
126
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
127
+ XSIMD_INLINE batch<T, A> bitwise_and(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
128
+ {
129
+ return vec_and(self.data, other.data);
130
+ }
131
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
132
+ XSIMD_INLINE batch_bool<T, A> bitwise_and(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
133
+ {
134
+ return vec_and(self.data, other.data);
135
+ }
136
+
137
+ // bitwise_andnot
138
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
139
+ XSIMD_INLINE batch<T, A> bitwise_andnot(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
140
+ {
141
+ return vec_and(self.data, vec_nor(other.data, other.data));
142
+ }
143
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
144
+ XSIMD_INLINE batch_bool<T, A> bitwise_andnot(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
145
+ {
146
+ return self.data & ~other.data;
147
+ }
148
+
149
+ // bitwise_lshift
150
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
151
+ XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& self, int32_t other, requires_arch<vsx>) noexcept
152
+ {
153
+ using shift_type = as_unsigned_integer_t<T>;
154
+ batch<shift_type, A> shift(static_cast<shift_type>(other));
155
+ return vec_sl(self.data, shift.data);
156
+ }
157
+
158
+ // bitwise_not
159
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
160
+ XSIMD_INLINE batch<T, A> bitwise_not(batch<T, A> const& self, requires_arch<vsx>) noexcept
161
+ {
162
+ return vec_nor(self.data, self.data);
163
+ }
164
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
165
+ XSIMD_INLINE batch_bool<T, A> bitwise_not(batch_bool<T, A> const& self, requires_arch<vsx>) noexcept
166
+ {
167
+ return vec_nor(self.data, self.data);
168
+ }
169
+
170
+ // bitwise_or
171
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
172
+ XSIMD_INLINE batch<T, A> bitwise_or(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
173
+ {
174
+ return vec_or(self.data, other.data);
175
+ }
176
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
177
+ XSIMD_INLINE batch_bool<T, A> bitwise_or(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
178
+ {
179
+ return vec_or(self.data, other.data);
180
+ }
181
+
182
+ // bitwise_rshift
183
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
184
+ XSIMD_INLINE batch<T, A> bitwise_rshift(batch<T, A> const& self, int32_t other, requires_arch<vsx>) noexcept
185
+ {
186
+ using shift_type = as_unsigned_integer_t<T>;
187
+ batch<shift_type, A> shift(static_cast<shift_type>(other));
188
+ XSIMD_IF_CONSTEXPR(std::is_signed<T>::value)
189
+ {
190
+ return vec_sra(self.data, shift.data);
191
+ }
192
+ else
193
+ {
194
+ return vec_sr(self.data, shift.data);
195
+ }
196
+ }
197
+
198
+ // bitwise_xor
199
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
200
+ XSIMD_INLINE batch<T, A> bitwise_xor(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
201
+ {
202
+ return vec_xor(self.data, other.data);
203
+ }
204
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
205
+ XSIMD_INLINE batch_bool<T, A> bitwise_xor(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
206
+ {
207
+ return vec_xor(self.data, other.data);
208
+ }
209
+
210
+ // bitwise_cast
211
+ template <class A, class T_in, class T_out>
212
+ XSIMD_INLINE batch<T_out, A> bitwise_cast(batch<T_in, A> const& self, batch<T_out, A> const&, requires_arch<vsx>) noexcept
213
+ {
214
+ return (typename batch<T_out, A>::register_type)(self.data);
215
+ }
216
+
217
+ // broadcast
218
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
219
+ XSIMD_INLINE batch<T, A> broadcast(T val, requires_arch<vsx>) noexcept
220
+ {
221
+ return vec_splats(val);
222
+ }
223
+
224
+ // ceil
225
+ template <class A, class T, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
226
+ XSIMD_INLINE batch<T, A> ceil(batch<T, A> const& self, requires_arch<vsx>) noexcept
227
+ {
228
+ return vec_ceil(self.data);
229
+ }
230
+
231
+ // store_complex
232
+ namespace detail
233
+ {
234
+ // complex_low
235
+ template <class A>
236
+ XSIMD_INLINE batch<float, A> complex_low(batch<std::complex<float>, A> const& self, requires_arch<vsx>) noexcept
237
+ {
238
+ return vec_mergeh(self.real().data, self.imag().data);
239
+ }
240
+ template <class A>
241
+ XSIMD_INLINE batch<double, A> complex_low(batch<std::complex<double>, A> const& self, requires_arch<vsx>) noexcept
242
+ {
243
+ return vec_mergeh(self.real().data, self.imag().data);
244
+ }
245
+ // complex_high
246
+ template <class A>
247
+ XSIMD_INLINE batch<float, A> complex_high(batch<std::complex<float>, A> const& self, requires_arch<vsx>) noexcept
248
+ {
249
+ return vec_mergel(self.real().data, self.imag().data);
250
+ }
251
+ template <class A>
252
+ XSIMD_INLINE batch<double, A> complex_high(batch<std::complex<double>, A> const& self, requires_arch<vsx>) noexcept
253
+ {
254
+ return vec_mergel(self.real().data, self.imag().data);
255
+ }
256
+ }
257
+
258
+ // decr_if
259
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
260
+ XSIMD_INLINE batch<T, A> decr_if(batch<T, A> const& self, batch_bool<T, A> const& mask, requires_arch<vsx>) noexcept
261
+ {
262
+ return self + batch<T, A>((typename batch<T, A>::register_type)mask.data);
263
+ }
264
+
265
+ // div
266
+ template <class A>
267
+ XSIMD_INLINE batch<float, A> div(batch<float, A> const& self, batch<float, A> const& other, requires_arch<vsx>) noexcept
268
+ {
269
+ return vec_div(self.data, other.data);
270
+ }
271
+ template <class A>
272
+ XSIMD_INLINE batch<double, A> div(batch<double, A> const& self, batch<double, A> const& other, requires_arch<vsx>) noexcept
273
+ {
274
+ return vec_div(self.data, other.data);
275
+ }
276
+
277
+ // fast_cast
278
+ namespace detail
279
+ {
280
+ template <class A>
281
+ XSIMD_INLINE batch<float, A> fast_cast(batch<int32_t, A> const& self, batch<float, A> const&, requires_arch<vsx>) noexcept
282
+ {
283
+ return vec_ctf(self.data, 0);
284
+ }
285
+ template <class A>
286
+ XSIMD_INLINE batch<float, A> fast_cast(batch<uint32_t, A> const& self, batch<float, A> const&, requires_arch<vsx>) noexcept
287
+ {
288
+ return vec_ctf(self.data, 0);
289
+ }
290
+
291
+ template <class A>
292
+ XSIMD_INLINE batch<int32_t, A> fast_cast(batch<float, A> const& self, batch<int32_t, A> const&, requires_arch<vsx>) noexcept
293
+ {
294
+ return vec_cts(self.data, 0);
295
+ }
296
+
297
+ template <class A>
298
+ XSIMD_INLINE batch<uint32_t, A> fast_cast(batch<float, A> const& self, batch<uint32_t, A> const&, requires_arch<vsx>) noexcept
299
+ {
300
+ return vec_ctu(self.data, 0);
301
+ }
302
+ }
303
+
304
+ // fma
305
+ template <class A>
306
+ XSIMD_INLINE batch<float, A> fma(batch<float, A> const& x, batch<float, A> const& y, batch<float, A> const& z, requires_arch<vsx>) noexcept
307
+ {
308
+ return vec_madd(x.data, y.data, z.data);
309
+ }
310
+
311
+ template <class A>
312
+ XSIMD_INLINE batch<double, A> fma(batch<double, A> const& x, batch<double, A> const& y, batch<double, A> const& z, requires_arch<vsx>) noexcept
313
+ {
314
+ return vec_madd(x.data, y.data, z.data);
315
+ }
316
+
317
+ // fms
318
+ template <class A>
319
+ XSIMD_INLINE batch<float, A> fms(batch<float, A> const& x, batch<float, A> const& y, batch<float, A> const& z, requires_arch<vsx>) noexcept
320
+ {
321
+ return vec_msub(x.data, y.data, z.data);
322
+ }
323
+
324
+ template <class A>
325
+ XSIMD_INLINE batch<double, A> fms(batch<double, A> const& x, batch<double, A> const& y, batch<double, A> const& z, requires_arch<vsx>) noexcept
326
+ {
327
+ return vec_msub(x.data, y.data, z.data);
328
+ }
329
+
330
+ // eq
331
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
332
+ XSIMD_INLINE batch_bool<T, A> eq(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
333
+ {
334
+ auto res = vec_cmpeq(self.data, other.data);
335
+ return *reinterpret_cast<typename batch_bool<T, A>::register_type*>(&res);
336
+ }
337
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
338
+ XSIMD_INLINE batch_bool<T, A> eq(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
339
+ {
340
+ auto res = vec_cmpeq(self.data, other.data);
341
+ return *reinterpret_cast<typename batch_bool<T, A>::register_type*>(&res);
342
+ }
343
+
344
+ // first
345
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
346
+ XSIMD_INLINE T first(batch<T, A> const& self, requires_arch<vsx>) noexcept
347
+ {
348
+ return vec_extract(self.data, 0);
349
+ }
350
+
351
+ // floor
352
+ template <class A, class T, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
353
+ XSIMD_INLINE batch<T, A> floor(batch<T, A> const& self, requires_arch<vsx>) noexcept
354
+ {
355
+ return vec_floor(self.data);
356
+ }
357
+
358
+ // ge
359
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
360
+ XSIMD_INLINE batch_bool<T, A> ge(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
361
+ {
362
+ return vec_cmpge(self.data, other.data);
363
+ }
364
+
365
+ // gt
366
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
367
+ XSIMD_INLINE batch_bool<T, A> gt(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
368
+ {
369
+ return vec_cmpgt(self.data, other.data);
370
+ }
371
+
372
+ // haddp
373
+ template <class A>
374
+ XSIMD_INLINE batch<float, A> haddp(batch<float, A> const* row, requires_arch<vsx>) noexcept
375
+ {
376
+ auto tmp0 = vec_mergee(row[0].data, row[1].data); // v00 v10 v02 v12
377
+ auto tmp1 = vec_mergeo(row[0].data, row[1].data); // v01 v11 v03 v13
378
+ auto tmp4 = vec_add(tmp0, tmp1); // (v00 + v01, v10 + v11, v02 + v03, v12 + v13)
379
+
380
+ auto tmp2 = vec_mergee(row[2].data, row[3].data); // v20 v30 v22 v32
381
+ auto tmp3 = vec_mergeo(row[2].data, row[3].data); // v21 v31 v23 v33
382
+ auto tmp5 = vec_add(tmp0, tmp1); // (v20 + v21, v30 + v31, v22 + v23, v32 + v33)
383
+
384
+ auto tmp6 = vec_perm(tmp4, tmp5, (__vector unsigned char) { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 }); // (v00 + v01, v10 + v11, v20 + v21, v30 + v31
385
+ auto tmp7 = vec_perm(tmp4, tmp5, (__vector unsigned char) { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 }); // (v02 + v03, v12 + v13, v12 + v13, v32 + v33)
386
+
387
+ return vec_add(tmp6, tmp7);
388
+ }
389
+
390
+ template <class A>
391
+ XSIMD_INLINE batch<double, A> haddp(batch<double, A> const* row, requires_arch<vsx>) noexcept
392
+ {
393
+ auto tmp0 = vec_mergee(row[0].data, row[1].data); // v00 v10 v02 v12
394
+ auto tmp1 = vec_mergeo(row[0].data, row[1].data); // v01 v11 v03 v13
395
+ return vec_add(tmp0, tmp1);
396
+ }
397
+
398
+ // incr_if
399
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
400
+ XSIMD_INLINE batch<T, A> incr_if(batch<T, A> const& self, batch_bool<T, A> const& mask, requires_arch<vsx>) noexcept
401
+ {
402
+ return self - batch<T, A>((typename batch<T, A>::register_type)mask.data);
403
+ }
404
+
405
+ // insert
406
+ template <class A, class T, size_t I, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
407
+ XSIMD_INLINE batch<T, A> insert(batch<T, A> const& self, T val, index<I>, requires_arch<vsx>) noexcept
408
+ {
409
+ return vec_insert(val, self.data, I);
410
+ }
411
+
412
+ // isnan
413
+ template <class A>
414
+ XSIMD_INLINE batch_bool<float, A> isnan(batch<float, A> const& self, requires_arch<vsx>) noexcept
415
+ {
416
+ return ~vec_cmpeq(self.data, self.data);
417
+ }
418
+ template <class A>
419
+ XSIMD_INLINE batch_bool<double, A> isnan(batch<double, A> const& self, requires_arch<vsx>) noexcept
420
+ {
421
+ return ~vec_cmpeq(self.data, self.data);
422
+ }
423
+
424
+ // load_aligned
425
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
426
+ XSIMD_INLINE batch<T, A> load_aligned(T const* mem, convert<T>, requires_arch<vsx>) noexcept
427
+ {
428
+ return vec_ld(0, reinterpret_cast<const typename batch<T, A>::register_type*>(mem));
429
+ }
430
+
431
+ // load_unaligned
432
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
433
+ XSIMD_INLINE batch<T, A> load_unaligned(T const* mem, convert<T>, requires_arch<vsx>) noexcept
434
+ {
435
+ return vec_vsx_ld(0, (typename batch<T, A>::register_type const*)mem);
436
+ }
437
+
438
+ // load_complex
439
+ namespace detail
440
+ {
441
+ template <class A>
442
+ XSIMD_INLINE batch<std::complex<float>, A> load_complex(batch<float, A> const& hi, batch<float, A> const& lo, requires_arch<vsx>) noexcept
443
+ {
444
+ __vector unsigned char perme = { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
445
+ __vector unsigned char permo = { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
446
+ return { vec_perm(hi.data, lo.data, perme), vec_perm(hi.data, lo.data, permo) };
447
+ }
448
+ template <class A>
449
+ XSIMD_INLINE batch<std::complex<double>, A> load_complex(batch<double, A> const& hi, batch<double, A> const& lo, requires_arch<vsx>) noexcept
450
+ {
451
+ return { vec_mergee(hi.data, lo.data), vec_mergeo(hi.data, lo.data) };
452
+ }
453
+ }
454
+
455
+ // le
456
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
457
+ XSIMD_INLINE batch_bool<T, A> le(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
458
+ {
459
+ return vec_cmple(self.data, other.data);
460
+ }
461
+
462
+ // lt
463
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
464
+ XSIMD_INLINE batch_bool<T, A> lt(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
465
+ {
466
+ return vec_cmplt(self.data, other.data);
467
+ }
468
+
469
+ // max
470
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
471
+ XSIMD_INLINE batch<T, A> max(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
472
+ {
473
+ return vec_max(self.data, other.data);
474
+ }
475
+
476
+ // min
477
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
478
+ XSIMD_INLINE batch<T, A> min(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
479
+ {
480
+ return vec_min(self.data, other.data);
481
+ }
482
+
483
+ // mul
484
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
485
+ XSIMD_INLINE batch<T, A> mul(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
486
+ {
487
+ return self.data * other.data;
488
+ }
489
+
490
+ // neg
491
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
492
+ XSIMD_INLINE batch<T, A> neg(batch<T, A> const& self, requires_arch<vsx>) noexcept
493
+ {
494
+ return -(self.data);
495
+ }
496
+
497
+ // neq
498
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
499
+ XSIMD_INLINE batch_bool<T, A> neq(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
500
+ {
501
+ return ~vec_cmpeq(self.data, other.data);
502
+ }
503
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
504
+ XSIMD_INLINE batch_bool<T, A> neq(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<vsx>) noexcept
505
+ {
506
+ return ~vec_cmpeq(self.data, other.data);
507
+ }
508
+
509
+ // reciprocal
510
+ template <class A>
511
+ XSIMD_INLINE batch<float, A> reciprocal(batch<float, A> const& self,
512
+ kernel::requires_arch<vsx>)
513
+ {
514
+ return vec_re(self.data);
515
+ }
516
+ template <class A>
517
+ XSIMD_INLINE batch<double, A> reciprocal(batch<double, A> const& self,
518
+ kernel::requires_arch<vsx>)
519
+ {
520
+ return vec_re(self.data);
521
+ }
522
+
523
+ // reduce_add
524
+ template <class A>
525
+ XSIMD_INLINE signed reduce_add(batch<signed, A> const& self, requires_arch<vsx>) noexcept
526
+ {
527
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
528
+ auto tmp1 = vec_add(self.data, tmp0); // v0 + v3, v1 + v2, v2 + v1, v3 + v0
529
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 + v1, v2 + v1, v3 + v0, v3 + v0
530
+ auto tmp3 = vec_add(tmp1, tmp2);
531
+ return vec_extract(tmp3, 0);
532
+ }
533
+ template <class A>
534
+ XSIMD_INLINE unsigned reduce_add(batch<unsigned, A> const& self, requires_arch<vsx>) noexcept
535
+ {
536
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
537
+ auto tmp1 = vec_add(self.data, tmp0); // v0 + v3, v1 + v2, v2 + v1, v3 + v0
538
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 + v1, v2 + v1, v3 + v0, v3 + v0
539
+ auto tmp3 = vec_add(tmp1, tmp2);
540
+ return vec_extract(tmp3, 0);
541
+ }
542
+ template <class A>
543
+ XSIMD_INLINE float reduce_add(batch<float, A> const& self, requires_arch<vsx>) noexcept
544
+ {
545
+ // FIXME: find an in-order approach
546
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
547
+ auto tmp1 = vec_add(self.data, tmp0); // v0 + v3, v1 + v2, v2 + v1, v3 + v0
548
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 + v1, v2 + v1, v3 + v0, v3 + v0
549
+ auto tmp3 = vec_add(tmp1, tmp2);
550
+ return vec_extract(tmp3, 0);
551
+ }
552
+ template <class A>
553
+ XSIMD_INLINE double reduce_add(batch<double, A> const& self, requires_arch<vsx>) noexcept
554
+ {
555
+ auto tmp0 = vec_reve(self.data); // v1, v0
556
+ auto tmp1 = vec_add(self.data, tmp0); // v0 + v1, v1 + v0
557
+ return vec_extract(tmp1, 0);
558
+ }
559
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
560
+ XSIMD_INLINE T reduce_add(batch<T, A> const& self, requires_arch<vsx>) noexcept
561
+ {
562
+ return reduce_add(self, common {});
563
+ }
564
+
565
+ // reduce_mul
566
+ template <class A>
567
+ XSIMD_INLINE signed reduce_mul(batch<signed, A> const& self, requires_arch<vsx>) noexcept
568
+ {
569
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
570
+ auto tmp1 = vec_mul(self.data, tmp0); // v0 * v3, v1 * v2, v2 * v1, v3 * v0
571
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 * v1, v2 * v1, v3 * v0, v3 * v0
572
+ auto tmp3 = vec_mul(tmp1, tmp2);
573
+ return vec_extract(tmp3, 0);
574
+ }
575
+ template <class A>
576
+ XSIMD_INLINE unsigned reduce_mul(batch<unsigned, A> const& self, requires_arch<vsx>) noexcept
577
+ {
578
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
579
+ auto tmp1 = vec_mul(self.data, tmp0); // v0 * v3, v1 * v2, v2 * v1, v3 * v0
580
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 * v1, v2 * v1, v3 * v0, v3 * v0
581
+ auto tmp3 = vec_mul(tmp1, tmp2);
582
+ return vec_extract(tmp3, 0);
583
+ }
584
+ template <class A>
585
+ XSIMD_INLINE float reduce_mul(batch<float, A> const& self, requires_arch<vsx>) noexcept
586
+ {
587
+ // FIXME: find an in-order approach
588
+ auto tmp0 = vec_reve(self.data); // v3, v2, v1, v0
589
+ auto tmp1 = vec_mul(self.data, tmp0); // v0 * v3, v1 * v2, v2 * v1, v3 * v0
590
+ auto tmp2 = vec_mergel(tmp1, tmp1); // v2 * v1, v2 * v1, v3 * v0, v3 * v0
591
+ auto tmp3 = vec_mul(tmp1, tmp2);
592
+ return vec_extract(tmp3, 0);
593
+ }
594
+ template <class A>
595
+ XSIMD_INLINE double reduce_mul(batch<double, A> const& self, requires_arch<vsx>) noexcept
596
+ {
597
+ auto tmp0 = vec_reve(self.data); // v1, v0
598
+ auto tmp1 = vec_mul(self.data, tmp0); // v0 * v1, v1 * v0
599
+ return vec_extract(tmp1, 0);
600
+ }
601
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
602
+ XSIMD_INLINE T reduce_mul(batch<T, A> const& self, requires_arch<vsx>) noexcept
603
+ {
604
+ return reduce_mul(self, common {});
605
+ }
606
+
607
+ // round
608
+ template <class A, class T, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
609
+ XSIMD_INLINE batch<T, A> round(batch<T, A> const& self, requires_arch<vsx>) noexcept
610
+ {
611
+ return vec_round(self.data);
612
+ }
613
+
614
+ // rsqrt
615
+ template <class A>
616
+ XSIMD_INLINE batch<float, A> rsqrt(batch<float, A> const& val, requires_arch<vsx>) noexcept
617
+ {
618
+ return vec_rsqrt(val.data);
619
+ }
620
+ template <class A>
621
+ XSIMD_INLINE batch<double, A> rsqrt(batch<double, A> const& val, requires_arch<vsx>) noexcept
622
+ {
623
+ return vec_rsqrt(val.data);
624
+ }
625
+
626
+ // select
627
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
628
+ XSIMD_INLINE batch<T, A> select(batch_bool<T, A> const& cond, batch<T, A> const& true_br, batch<T, A> const& false_br, requires_arch<vsx>) noexcept
629
+ {
630
+ return vec_sel(false_br.data, true_br.data, cond.data);
631
+ }
632
+ template <class A, class T, bool... Values, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
633
+ XSIMD_INLINE batch<T, A> select(batch_bool_constant<T, A, Values...> const&, batch<T, A> const& true_br, batch<T, A> const& false_br, requires_arch<vsx>) noexcept
634
+ {
635
+ return select(batch_bool<T, A> { Values... }, true_br, false_br, vsx {});
636
+ }
637
+
638
+ // shuffle
639
+ template <class A, class ITy, ITy I0, ITy I1, ITy I2, ITy I3>
640
+ XSIMD_INLINE batch<float, A> shuffle(batch<float, A> const& x, batch<float, A> const& y, batch_constant<ITy, A, I0, I1, I2, I3>, requires_arch<vsx>) noexcept
641
+ {
642
+ return vec_perm(x.data, y.data,
643
+ (__vector unsigned char) {
644
+ 4 * I0 + 0, 4 * I0 + 1, 4 * I0 + 2, 4 * I0 + 3,
645
+ 4 * I1 + 0, 4 * I1 + 1, 4 * I1 + 2, 4 * I1 + 3,
646
+ 4 * I2 + 0, 4 * I2 + 1, 4 * I2 + 2, 4 * I2 + 3,
647
+ 4 * I3 + 0, 4 * I3 + 1, 4 * I3 + 2, 4 * I3 + 3 });
648
+ }
649
+
650
+ template <class A, class ITy, ITy I0, ITy I1>
651
+ XSIMD_INLINE batch<double, A> shuffle(batch<double, A> const& x, batch<double, A> const& y, batch_constant<ITy, A, I0, I1>, requires_arch<vsx>) noexcept
652
+ {
653
+ return vec_perm(x.data, y.data,
654
+ (__vector unsigned char) {
655
+ 8 * I0 + 0,
656
+ 8 * I0 + 1,
657
+ 8 * I0 + 2,
658
+ 8 * I0 + 3,
659
+ 8 * I0 + 4,
660
+ 8 * I0 + 5,
661
+ 8 * I0 + 6,
662
+ 8 * I0 + 7,
663
+ 8 * I1 + 0,
664
+ 8 * I1 + 1,
665
+ 8 * I1 + 2,
666
+ 8 * I1 + 3,
667
+ 8 * I1 + 4,
668
+ 8 * I1 + 5,
669
+ 8 * I1 + 6,
670
+ 8 * I1 + 7,
671
+ });
672
+ }
673
+
674
+ // sqrt
675
+ template <class A>
676
+ XSIMD_INLINE batch<float, A> sqrt(batch<float, A> const& val, requires_arch<vsx>) noexcept
677
+ {
678
+ return vec_sqrt(val.data);
679
+ }
680
+
681
+ template <class A>
682
+ XSIMD_INLINE batch<double, A> sqrt(batch<double, A> const& val, requires_arch<vsx>) noexcept
683
+ {
684
+ return vec_sqrt(val.data);
685
+ }
686
+
687
+ // slide_left
688
+ template <size_t N, class A, class T>
689
+ XSIMD_INLINE batch<T, A> slide_left(batch<T, A> const& x, requires_arch<vsx>) noexcept
690
+ {
691
+ XSIMD_IF_CONSTEXPR(N == batch<T, A>::size * sizeof(T))
692
+ {
693
+ return batch<T, A>(0);
694
+ }
695
+ else
696
+ {
697
+ auto slider = vec_splats((uint8_t)(8 * N));
698
+ return (typename batch<T, A>::register_type)vec_slo(x.data, slider);
699
+ }
700
+ }
701
+
702
+ // slide_right
703
+ template <size_t N, class A, class T>
704
+ XSIMD_INLINE batch<T, A> slide_right(batch<T, A> const& x, requires_arch<vsx>) noexcept
705
+ {
706
+ XSIMD_IF_CONSTEXPR(N == batch<T, A>::size * sizeof(T))
707
+ {
708
+ return batch<T, A>(0);
709
+ }
710
+ else
711
+ {
712
+ auto slider = vec_splats((uint8_t)(8 * N));
713
+ return (typename batch<T, A>::register_type)vec_sro((__vector unsigned char)x.data, slider);
714
+ }
715
+ }
716
+
717
+ // sadd
718
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value && sizeof(T) != 8, void>::type>
719
+ XSIMD_INLINE batch<T, A> sadd(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
720
+ {
721
+ return vec_adds(self.data, other.data);
722
+ }
723
+
724
+ // set
725
+ template <class A, class T, class... Values>
726
+ XSIMD_INLINE batch<T, A> set(batch<T, A> const&, requires_arch<vsx>, Values... values) noexcept
727
+ {
728
+ static_assert(sizeof...(Values) == batch<T, A>::size, "consistent init");
729
+ return typename batch<T, A>::register_type { values... };
730
+ }
731
+
732
+ template <class A, class T, class... Values, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
733
+ XSIMD_INLINE batch_bool<T, A> set(batch_bool<T, A> const&, requires_arch<vsx>, Values... values) noexcept
734
+ {
735
+ static_assert(sizeof...(Values) == batch_bool<T, A>::size, "consistent init");
736
+ return typename batch_bool<T, A>::register_type { static_cast<decltype(std::declval<typename batch_bool<T, A>::register_type>()[0])>(values ? -1LL : 0LL)... };
737
+ }
738
+
739
+ // ssub
740
+
741
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value && sizeof(T) == 1, void>::type>
742
+ XSIMD_INLINE batch<T, A> ssub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
743
+ {
744
+ return vec_subs(self.data, other.data);
745
+ }
746
+
747
+ // store_aligned
748
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
749
+ XSIMD_INLINE void store_aligned(T* mem, batch<T, A> const& self, requires_arch<vsx>) noexcept
750
+ {
751
+ return vec_st(self.data, 0, reinterpret_cast<typename batch<T, A>::register_type*>(mem));
752
+ }
753
+
754
+ // store_unaligned
755
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
756
+ XSIMD_INLINE void store_unaligned(T* mem, batch<T, A> const& self, requires_arch<vsx>) noexcept
757
+ {
758
+ return vec_vsx_st(self.data, 0, reinterpret_cast<typename batch<T, A>::register_type*>(mem));
759
+ }
760
+
761
+ // sub
762
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
763
+ XSIMD_INLINE batch<T, A> sub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
764
+ {
765
+ return vec_sub(self.data, other.data);
766
+ }
767
+
768
+ // swizzle
769
+
770
+ template <class A, uint32_t V0, uint32_t V1, uint32_t V2, uint32_t V3>
771
+ XSIMD_INLINE batch<float, A> swizzle(batch<float, A> const& self, batch_constant<uint32_t, A, V0, V1, V2, V3>, requires_arch<vsx>) noexcept
772
+ {
773
+ return vec_perm(self.data, self.data,
774
+ (__vector unsigned char) {
775
+ 4 * V0 + 0, 4 * V0 + 1, 4 * V0 + 2, 4 * V0 + 3,
776
+ 4 * V1 + 0, 4 * V1 + 1, 4 * V1 + 2, 4 * V1 + 3,
777
+ 4 * V2 + 0, 4 * V2 + 1, 4 * V2 + 2, 4 * V2 + 3,
778
+ 4 * V3 + 0, 4 * V3 + 1, 4 * V3 + 2, 4 * V3 + 3 });
779
+ }
780
+
781
+ template <class A, uint64_t V0, uint64_t V1>
782
+ XSIMD_INLINE batch<double, A> swizzle(batch<double, A> const& self, batch_constant<uint64_t, A, V0, V1>, requires_arch<vsx>) noexcept
783
+ {
784
+ return vec_perm(self.data, self.data,
785
+ (__vector unsigned char) {
786
+ 8 * V0 + 0,
787
+ 8 * V0 + 1,
788
+ 8 * V0 + 2,
789
+ 8 * V0 + 3,
790
+ 8 * V0 + 4,
791
+ 8 * V0 + 5,
792
+ 8 * V0 + 6,
793
+ 8 * V0 + 7,
794
+ 8 * V1 + 0,
795
+ 8 * V1 + 1,
796
+ 8 * V1 + 2,
797
+ 8 * V1 + 3,
798
+ 8 * V1 + 4,
799
+ 8 * V1 + 5,
800
+ 8 * V1 + 6,
801
+ 8 * V1 + 7,
802
+ });
803
+ }
804
+
805
+ template <class A, uint64_t V0, uint64_t V1>
806
+ XSIMD_INLINE batch<uint64_t, A> swizzle(batch<uint64_t, A> const& self, batch_constant<uint64_t, A, V0, V1>, requires_arch<vsx>) noexcept
807
+ {
808
+ return vec_perm(self.data, self.data,
809
+ (__vector unsigned char) {
810
+ 8 * V0 + 0,
811
+ 8 * V0 + 1,
812
+ 8 * V0 + 2,
813
+ 8 * V0 + 3,
814
+ 8 * V0 + 4,
815
+ 8 * V0 + 5,
816
+ 8 * V0 + 6,
817
+ 8 * V0 + 7,
818
+ 8 * V1 + 0,
819
+ 8 * V1 + 1,
820
+ 8 * V1 + 2,
821
+ 8 * V1 + 3,
822
+ 8 * V1 + 4,
823
+ 8 * V1 + 5,
824
+ 8 * V1 + 6,
825
+ 8 * V1 + 7,
826
+ });
827
+ }
828
+
829
+ template <class A, uint64_t V0, uint64_t V1>
830
+ XSIMD_INLINE batch<int64_t, A> swizzle(batch<int64_t, A> const& self, batch_constant<uint64_t, A, V0, V1> mask, requires_arch<vsx>) noexcept
831
+ {
832
+ return bitwise_cast<int64_t>(swizzle(bitwise_cast<uint64_t>(self), mask, vsx {}));
833
+ }
834
+
835
+ template <class A, uint32_t V0, uint32_t V1, uint32_t V2, uint32_t V3>
836
+ XSIMD_INLINE batch<uint32_t, A> swizzle(batch<uint32_t, A> const& self, batch_constant<uint32_t, A, V0, V1, V2, V3>, requires_arch<vsx>) noexcept
837
+ {
838
+ return vec_perm(self.data, self.data,
839
+ (__vector unsigned char) {
840
+ 4 * V0 + 0, 4 * V0 + 1, 4 * V0 + 2, 4 * V0 + 3,
841
+ 4 * V1 + 0, 4 * V1 + 1, 4 * V1 + 2, 4 * V1 + 3,
842
+ 4 * V2 + 0, 4 * V2 + 1, 4 * V2 + 2, 4 * V2 + 3,
843
+ 4 * V3 + 0, 4 * V3 + 1, 4 * V3 + 2, 4 * V3 + 3 });
844
+ }
845
+
846
+ template <class A, uint32_t V0, uint32_t V1, uint32_t V2, uint32_t V3>
847
+ XSIMD_INLINE batch<int32_t, A> swizzle(batch<int32_t, A> const& self, batch_constant<uint32_t, A, V0, V1, V2, V3> mask, requires_arch<vsx>) noexcept
848
+ {
849
+ return bitwise_cast<int32_t>(swizzle(bitwise_cast<uint32_t>(self), mask, vsx {}));
850
+ }
851
+
852
+ template <class A, uint16_t V0, uint16_t V1, uint16_t V2, uint16_t V3, uint16_t V4, uint16_t V5, uint16_t V6, uint16_t V7>
853
+ XSIMD_INLINE batch<uint16_t, A> swizzle(batch<uint16_t, A> const& self, batch_constant<uint16_t, A, V0, V1, V2, V3, V4, V5, V6, V7>, requires_arch<vsx>) noexcept
854
+ {
855
+ return vec_perm(self.data, self.data,
856
+ (__vector unsigned char) {
857
+ 2 * V0 + 0, 2 * V0 + 1, 2 * V1 + 0, 2 * V1 + 1,
858
+ 2 * V2 + 0, 2 * V2 + 1, 2 * V3 + 0, 2 * V3 + 1,
859
+ 2 * V4 + 0, 2 * V4 + 1, 2 * V5 + 0, 2 * V5 + 1,
860
+ 2 * V6 + 0, 2 * V6 + 1, 2 * V7 + 0, 2 * V7 + 1 });
861
+ }
862
+
863
+ template <class A, uint16_t V0, uint16_t V1, uint16_t V2, uint16_t V3, uint16_t V4, uint16_t V5, uint16_t V6, uint16_t V7>
864
+ XSIMD_INLINE batch<int16_t, A> swizzle(batch<int16_t, A> const& self, batch_constant<uint16_t, A, V0, V1, V2, V3, V4, V5, V6, V7> mask, requires_arch<vsx>) noexcept
865
+ {
866
+ return bitwise_cast<int16_t>(swizzle(bitwise_cast<uint16_t>(self), mask, vsx {}));
867
+ }
868
+
869
+ // trunc
870
+ template <class A, class T, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
871
+ XSIMD_INLINE batch<T, A> trunc(batch<T, A> const& self, requires_arch<vsx>) noexcept
872
+ {
873
+ return vec_trunc(self.data);
874
+ }
875
+
876
+ // zip_hi
877
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
878
+ XSIMD_INLINE batch<T, A> zip_hi(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
879
+ {
880
+ return vec_mergel(self.data, other.data);
881
+ }
882
+
883
+ // zip_lo
884
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
885
+ XSIMD_INLINE batch<T, A> zip_lo(batch<T, A> const& self, batch<T, A> const& other, requires_arch<vsx>) noexcept
886
+ {
887
+ return vec_mergeh(self.data, other.data);
888
+ }
889
+ }
890
+ }
891
+
892
+ #endif