sequenzo 0.1.31__cp310-cp310-macosx_10_9_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (299) hide show
  1. _sequenzo_fastcluster.cpython-310-darwin.so +0 -0
  2. sequenzo/__init__.py +349 -0
  3. sequenzo/big_data/__init__.py +12 -0
  4. sequenzo/big_data/clara/__init__.py +26 -0
  5. sequenzo/big_data/clara/clara.py +476 -0
  6. sequenzo/big_data/clara/utils/__init__.py +27 -0
  7. sequenzo/big_data/clara/utils/aggregatecases.py +92 -0
  8. sequenzo/big_data/clara/utils/davies_bouldin.py +91 -0
  9. sequenzo/big_data/clara/utils/get_weighted_diss.cpython-310-darwin.so +0 -0
  10. sequenzo/big_data/clara/utils/wfcmdd.py +205 -0
  11. sequenzo/big_data/clara/visualization.py +88 -0
  12. sequenzo/clustering/KMedoids.py +178 -0
  13. sequenzo/clustering/__init__.py +30 -0
  14. sequenzo/clustering/clustering_c_code.cpython-310-darwin.so +0 -0
  15. sequenzo/clustering/hierarchical_clustering.py +1256 -0
  16. sequenzo/clustering/sequenzo_fastcluster/fastcluster.py +495 -0
  17. sequenzo/clustering/sequenzo_fastcluster/src/fastcluster.cpp +1877 -0
  18. sequenzo/clustering/sequenzo_fastcluster/src/fastcluster_python.cpp +1264 -0
  19. sequenzo/clustering/src/KMedoid.cpp +263 -0
  20. sequenzo/clustering/src/PAM.cpp +237 -0
  21. sequenzo/clustering/src/PAMonce.cpp +265 -0
  22. sequenzo/clustering/src/cluster_quality.cpp +496 -0
  23. sequenzo/clustering/src/cluster_quality.h +128 -0
  24. sequenzo/clustering/src/cluster_quality_backup.cpp +570 -0
  25. sequenzo/clustering/src/module.cpp +228 -0
  26. sequenzo/clustering/src/weightedinertia.cpp +111 -0
  27. sequenzo/clustering/utils/__init__.py +27 -0
  28. sequenzo/clustering/utils/disscenter.py +122 -0
  29. sequenzo/data_preprocessing/__init__.py +22 -0
  30. sequenzo/data_preprocessing/helpers.py +303 -0
  31. sequenzo/datasets/__init__.py +41 -0
  32. sequenzo/datasets/biofam.csv +2001 -0
  33. sequenzo/datasets/biofam_child_domain.csv +2001 -0
  34. sequenzo/datasets/biofam_left_domain.csv +2001 -0
  35. sequenzo/datasets/biofam_married_domain.csv +2001 -0
  36. sequenzo/datasets/chinese_colonial_territories.csv +12 -0
  37. sequenzo/datasets/country_co2_emissions.csv +194 -0
  38. sequenzo/datasets/country_co2_emissions_global_deciles.csv +195 -0
  39. sequenzo/datasets/country_co2_emissions_global_quintiles.csv +195 -0
  40. sequenzo/datasets/country_co2_emissions_local_deciles.csv +195 -0
  41. sequenzo/datasets/country_co2_emissions_local_quintiles.csv +195 -0
  42. sequenzo/datasets/country_gdp_per_capita.csv +194 -0
  43. sequenzo/datasets/dyadic_children.csv +61 -0
  44. sequenzo/datasets/dyadic_parents.csv +61 -0
  45. sequenzo/datasets/mvad.csv +713 -0
  46. sequenzo/datasets/pairfam_activity_by_month.csv +1028 -0
  47. sequenzo/datasets/pairfam_activity_by_year.csv +1028 -0
  48. sequenzo/datasets/pairfam_family_by_month.csv +1028 -0
  49. sequenzo/datasets/pairfam_family_by_year.csv +1028 -0
  50. sequenzo/datasets/political_science_aid_shock.csv +166 -0
  51. sequenzo/datasets/political_science_donor_fragmentation.csv +157 -0
  52. sequenzo/define_sequence_data.py +1400 -0
  53. sequenzo/dissimilarity_measures/__init__.py +31 -0
  54. sequenzo/dissimilarity_measures/c_code.cpython-310-darwin.so +0 -0
  55. sequenzo/dissimilarity_measures/get_distance_matrix.py +762 -0
  56. sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +246 -0
  57. sequenzo/dissimilarity_measures/src/DHDdistance.cpp +148 -0
  58. sequenzo/dissimilarity_measures/src/LCPdistance.cpp +114 -0
  59. sequenzo/dissimilarity_measures/src/LCPspellDistance.cpp +215 -0
  60. sequenzo/dissimilarity_measures/src/OMdistance.cpp +247 -0
  61. sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +281 -0
  62. sequenzo/dissimilarity_measures/src/__init__.py +0 -0
  63. sequenzo/dissimilarity_measures/src/dist2matrix.cpp +63 -0
  64. sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
  65. sequenzo/dissimilarity_measures/src/module.cpp +40 -0
  66. sequenzo/dissimilarity_measures/src/setup.py +30 -0
  67. sequenzo/dissimilarity_measures/src/utils.h +25 -0
  68. sequenzo/dissimilarity_measures/src/xsimd/.github/cmake-test/main.cpp +6 -0
  69. sequenzo/dissimilarity_measures/src/xsimd/benchmark/main.cpp +159 -0
  70. sequenzo/dissimilarity_measures/src/xsimd/benchmark/xsimd_benchmark.hpp +565 -0
  71. sequenzo/dissimilarity_measures/src/xsimd/docs/source/conf.py +37 -0
  72. sequenzo/dissimilarity_measures/src/xsimd/examples/mandelbrot.cpp +330 -0
  73. sequenzo/dissimilarity_measures/src/xsimd/examples/pico_bench.hpp +246 -0
  74. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +266 -0
  75. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +112 -0
  76. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +323 -0
  77. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +218 -0
  78. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +2583 -0
  79. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +880 -0
  80. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_rounding.hpp +72 -0
  81. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
  82. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +978 -0
  83. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +1924 -0
  84. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +1144 -0
  85. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +656 -0
  86. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512cd.hpp +28 -0
  87. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +244 -0
  88. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512er.hpp +20 -0
  89. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +2650 -0
  90. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512ifma.hpp +20 -0
  91. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512pf.hpp +20 -0
  92. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +77 -0
  93. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +131 -0
  94. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512bw.hpp +20 -0
  95. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512vbmi2.hpp +20 -0
  96. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avxvnni.hpp +20 -0
  97. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +24 -0
  98. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +77 -0
  99. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +393 -0
  100. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +788 -0
  101. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +93 -0
  102. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx2.hpp +46 -0
  103. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +97 -0
  104. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +92 -0
  105. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_i8mm_neon64.hpp +17 -0
  106. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +142 -0
  107. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +3142 -0
  108. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +1543 -0
  109. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +1513 -0
  110. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +1260 -0
  111. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +2024 -0
  112. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +67 -0
  113. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_1.hpp +339 -0
  114. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_2.hpp +44 -0
  115. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +186 -0
  116. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +1155 -0
  117. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
  118. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +1780 -0
  119. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +240 -0
  120. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +484 -0
  121. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +269 -0
  122. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +27 -0
  123. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/math/xsimd_rem_pio2.hpp +719 -0
  124. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_aligned_allocator.hpp +349 -0
  125. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_alignment.hpp +91 -0
  126. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +55 -0
  127. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +2765 -0
  128. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx2_register.hpp +44 -0
  129. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512bw_register.hpp +51 -0
  130. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512cd_register.hpp +51 -0
  131. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512dq_register.hpp +51 -0
  132. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512er_register.hpp +51 -0
  133. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512f_register.hpp +77 -0
  134. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512ifma_register.hpp +51 -0
  135. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512pf_register.hpp +51 -0
  136. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi2_register.hpp +51 -0
  137. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi_register.hpp +51 -0
  138. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512bw_register.hpp +54 -0
  139. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512vbmi2_register.hpp +53 -0
  140. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx_register.hpp +64 -0
  141. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avxvnni_register.hpp +44 -0
  142. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +1524 -0
  143. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch_constant.hpp +300 -0
  144. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_common_arch.hpp +47 -0
  145. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_emulated_register.hpp +80 -0
  146. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx2_register.hpp +50 -0
  147. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx_register.hpp +50 -0
  148. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_sse_register.hpp +50 -0
  149. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma4_register.hpp +50 -0
  150. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_i8mm_neon64_register.hpp +55 -0
  151. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon64_register.hpp +55 -0
  152. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon_register.hpp +154 -0
  153. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_register.hpp +94 -0
  154. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +506 -0
  155. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse2_register.hpp +59 -0
  156. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse3_register.hpp +49 -0
  157. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_1_register.hpp +48 -0
  158. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_2_register.hpp +48 -0
  159. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_ssse3_register.hpp +48 -0
  160. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sve_register.hpp +156 -0
  161. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +337 -0
  162. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_utils.hpp +536 -0
  163. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
  164. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_wasm_register.hpp +59 -0
  165. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +75 -0
  166. sequenzo/dissimilarity_measures/src/xsimd/test/architectures/dummy.cpp +7 -0
  167. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set.cpp +13 -0
  168. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean.cpp +24 -0
  169. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_aligned.cpp +25 -0
  170. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_arch_independent.cpp +28 -0
  171. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_tag_dispatch.cpp +25 -0
  172. sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_abstract_batches.cpp +7 -0
  173. sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_parametric_batches.cpp +8 -0
  174. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum.hpp +31 -0
  175. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_avx2.cpp +3 -0
  176. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_sse2.cpp +3 -0
  177. sequenzo/dissimilarity_measures/src/xsimd/test/doc/writing_vectorized_code.cpp +11 -0
  178. sequenzo/dissimilarity_measures/src/xsimd/test/main.cpp +31 -0
  179. sequenzo/dissimilarity_measures/src/xsimd/test/test_api.cpp +230 -0
  180. sequenzo/dissimilarity_measures/src/xsimd/test/test_arch.cpp +217 -0
  181. sequenzo/dissimilarity_measures/src/xsimd/test/test_basic_math.cpp +183 -0
  182. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch.cpp +1049 -0
  183. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_bool.cpp +508 -0
  184. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_cast.cpp +409 -0
  185. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_complex.cpp +712 -0
  186. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_constant.cpp +286 -0
  187. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_float.cpp +141 -0
  188. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_int.cpp +365 -0
  189. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_manip.cpp +308 -0
  190. sequenzo/dissimilarity_measures/src/xsimd/test/test_bitwise_cast.cpp +222 -0
  191. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_exponential.cpp +226 -0
  192. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_hyperbolic.cpp +183 -0
  193. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_power.cpp +265 -0
  194. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_trigonometric.cpp +236 -0
  195. sequenzo/dissimilarity_measures/src/xsimd/test/test_conversion.cpp +248 -0
  196. sequenzo/dissimilarity_measures/src/xsimd/test/test_custom_default_arch.cpp +28 -0
  197. sequenzo/dissimilarity_measures/src/xsimd/test/test_error_gamma.cpp +170 -0
  198. sequenzo/dissimilarity_measures/src/xsimd/test/test_explicit_batch_instantiation.cpp +32 -0
  199. sequenzo/dissimilarity_measures/src/xsimd/test/test_exponential.cpp +202 -0
  200. sequenzo/dissimilarity_measures/src/xsimd/test/test_extract_pair.cpp +92 -0
  201. sequenzo/dissimilarity_measures/src/xsimd/test/test_fp_manipulation.cpp +77 -0
  202. sequenzo/dissimilarity_measures/src/xsimd/test/test_gnu_source.cpp +30 -0
  203. sequenzo/dissimilarity_measures/src/xsimd/test/test_hyperbolic.cpp +167 -0
  204. sequenzo/dissimilarity_measures/src/xsimd/test/test_load_store.cpp +304 -0
  205. sequenzo/dissimilarity_measures/src/xsimd/test/test_memory.cpp +61 -0
  206. sequenzo/dissimilarity_measures/src/xsimd/test/test_poly_evaluation.cpp +64 -0
  207. sequenzo/dissimilarity_measures/src/xsimd/test/test_power.cpp +184 -0
  208. sequenzo/dissimilarity_measures/src/xsimd/test/test_rounding.cpp +199 -0
  209. sequenzo/dissimilarity_measures/src/xsimd/test/test_select.cpp +101 -0
  210. sequenzo/dissimilarity_measures/src/xsimd/test/test_shuffle.cpp +760 -0
  211. sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.cpp +4 -0
  212. sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.hpp +34 -0
  213. sequenzo/dissimilarity_measures/src/xsimd/test/test_traits.cpp +172 -0
  214. sequenzo/dissimilarity_measures/src/xsimd/test/test_trigonometric.cpp +208 -0
  215. sequenzo/dissimilarity_measures/src/xsimd/test/test_utils.hpp +611 -0
  216. sequenzo/dissimilarity_measures/src/xsimd/test/test_wasm/test_wasm_playwright.py +123 -0
  217. sequenzo/dissimilarity_measures/src/xsimd/test/test_xsimd_api.cpp +1460 -0
  218. sequenzo/dissimilarity_measures/utils/__init__.py +16 -0
  219. sequenzo/dissimilarity_measures/utils/get_LCP_length_for_2_seq.py +44 -0
  220. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cpython-310-darwin.so +0 -0
  221. sequenzo/dissimilarity_measures/utils/seqconc.cpython-310-darwin.so +0 -0
  222. sequenzo/dissimilarity_measures/utils/seqdss.cpython-310-darwin.so +0 -0
  223. sequenzo/dissimilarity_measures/utils/seqdur.cpython-310-darwin.so +0 -0
  224. sequenzo/dissimilarity_measures/utils/seqlength.cpython-310-darwin.so +0 -0
  225. sequenzo/multidomain/__init__.py +23 -0
  226. sequenzo/multidomain/association_between_domains.py +311 -0
  227. sequenzo/multidomain/cat.py +597 -0
  228. sequenzo/multidomain/combt.py +519 -0
  229. sequenzo/multidomain/dat.py +81 -0
  230. sequenzo/multidomain/idcd.py +139 -0
  231. sequenzo/multidomain/linked_polyad.py +292 -0
  232. sequenzo/openmp_setup.py +233 -0
  233. sequenzo/prefix_tree/__init__.py +62 -0
  234. sequenzo/prefix_tree/hub.py +114 -0
  235. sequenzo/prefix_tree/individual_level_indicators.py +1321 -0
  236. sequenzo/prefix_tree/spell_individual_level_indicators.py +580 -0
  237. sequenzo/prefix_tree/spell_level_indicators.py +297 -0
  238. sequenzo/prefix_tree/system_level_indicators.py +544 -0
  239. sequenzo/prefix_tree/utils.py +54 -0
  240. sequenzo/seqhmm/__init__.py +95 -0
  241. sequenzo/seqhmm/advanced_optimization.py +305 -0
  242. sequenzo/seqhmm/bootstrap.py +411 -0
  243. sequenzo/seqhmm/build_hmm.py +142 -0
  244. sequenzo/seqhmm/build_mhmm.py +136 -0
  245. sequenzo/seqhmm/build_nhmm.py +121 -0
  246. sequenzo/seqhmm/fit_mhmm.py +62 -0
  247. sequenzo/seqhmm/fit_model.py +61 -0
  248. sequenzo/seqhmm/fit_nhmm.py +76 -0
  249. sequenzo/seqhmm/formulas.py +289 -0
  250. sequenzo/seqhmm/forward_backward_nhmm.py +276 -0
  251. sequenzo/seqhmm/gradients_nhmm.py +306 -0
  252. sequenzo/seqhmm/hmm.py +291 -0
  253. sequenzo/seqhmm/mhmm.py +314 -0
  254. sequenzo/seqhmm/model_comparison.py +238 -0
  255. sequenzo/seqhmm/multichannel_em.py +282 -0
  256. sequenzo/seqhmm/multichannel_utils.py +138 -0
  257. sequenzo/seqhmm/nhmm.py +270 -0
  258. sequenzo/seqhmm/nhmm_utils.py +191 -0
  259. sequenzo/seqhmm/predict.py +137 -0
  260. sequenzo/seqhmm/predict_mhmm.py +142 -0
  261. sequenzo/seqhmm/simulate.py +878 -0
  262. sequenzo/seqhmm/utils.py +218 -0
  263. sequenzo/seqhmm/visualization.py +910 -0
  264. sequenzo/sequence_characteristics/__init__.py +40 -0
  265. sequenzo/sequence_characteristics/complexity_index.py +49 -0
  266. sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +220 -0
  267. sequenzo/sequence_characteristics/plot_characteristics.py +593 -0
  268. sequenzo/sequence_characteristics/simple_characteristics.py +311 -0
  269. sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +39 -0
  270. sequenzo/sequence_characteristics/turbulence.py +155 -0
  271. sequenzo/sequence_characteristics/variance_of_spell_durations.py +86 -0
  272. sequenzo/sequence_characteristics/within_sequence_entropy.py +43 -0
  273. sequenzo/suffix_tree/__init__.py +66 -0
  274. sequenzo/suffix_tree/hub.py +114 -0
  275. sequenzo/suffix_tree/individual_level_indicators.py +1679 -0
  276. sequenzo/suffix_tree/spell_individual_level_indicators.py +493 -0
  277. sequenzo/suffix_tree/spell_level_indicators.py +248 -0
  278. sequenzo/suffix_tree/system_level_indicators.py +535 -0
  279. sequenzo/suffix_tree/utils.py +56 -0
  280. sequenzo/version_check.py +283 -0
  281. sequenzo/visualization/__init__.py +29 -0
  282. sequenzo/visualization/plot_mean_time.py +222 -0
  283. sequenzo/visualization/plot_modal_state.py +276 -0
  284. sequenzo/visualization/plot_most_frequent_sequences.py +147 -0
  285. sequenzo/visualization/plot_relative_frequency.py +405 -0
  286. sequenzo/visualization/plot_sequence_index.py +1175 -0
  287. sequenzo/visualization/plot_single_medoid.py +153 -0
  288. sequenzo/visualization/plot_state_distribution.py +651 -0
  289. sequenzo/visualization/plot_transition_matrix.py +190 -0
  290. sequenzo/visualization/utils/__init__.py +23 -0
  291. sequenzo/visualization/utils/utils.py +310 -0
  292. sequenzo/with_event_history_analysis/__init__.py +35 -0
  293. sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
  294. sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
  295. sequenzo-0.1.31.dist-info/METADATA +286 -0
  296. sequenzo-0.1.31.dist-info/RECORD +299 -0
  297. sequenzo-0.1.31.dist-info/WHEEL +5 -0
  298. sequenzo-0.1.31.dist-info/licenses/LICENSE +28 -0
  299. sequenzo-0.1.31.dist-info/top_level.txt +2 -0
@@ -0,0 +1,2765 @@
1
+ /***************************************************************************
2
+ * Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
3
+ * Martin Renou *
4
+ * Copyright (c) QuantStack *
5
+ * Copyright (c) Serge Guelton *
6
+ * *
7
+ * Distributed under the terms of the BSD 3-Clause License. *
8
+ * *
9
+ * The full license is in the file LICENSE, distributed with this software. *
10
+ ****************************************************************************/
11
+
12
+ #ifndef XSIMD_API_HPP
13
+ #define XSIMD_API_HPP
14
+
15
+ #include <complex>
16
+ #include <cstddef>
17
+ #include <limits>
18
+ #include <ostream>
19
+
20
+ #include "../arch/xsimd_isa.hpp"
21
+ #include "../types/xsimd_batch.hpp"
22
+ #include "../types/xsimd_traits.hpp"
23
+
24
+ namespace xsimd
25
+ {
26
+ /**
27
+ * high level free functions
28
+ *
29
+ * @defgroup batch_arithmetic Arithmetic operators
30
+ * @defgroup batch_constant Constant batches
31
+ * @defgroup batch_cond Conditional operators
32
+ * @defgroup batch_data_transfer Memory operators
33
+ * @defgroup batch_math Basic math operators
34
+ * @defgroup batch_math_extra Extra math operators
35
+ * @defgroup batch_fp Floating point manipulation
36
+ * @defgroup batch_rounding Rounding operators
37
+ * @defgroup batch_conversion Conversion operators
38
+ * @defgroup batch_complex Complex operators
39
+ * @defgroup batch_logical Logical operators
40
+ * @defgroup batch_bitwise Bitwise operators
41
+ * @defgroup batch_reducers Reducers
42
+ * @defgroup batch_miscellaneous Miscellaneous
43
+ * @defgroup batch_trigo Trigonometry
44
+ *
45
+ * @defgroup batch_bool_logical Boolean logical operators
46
+ * @defgroup batch_bool_reducers Boolean reducers
47
+ */
48
+
49
+ /**
50
+ * @ingroup batch_math
51
+ *
52
+ * Computes the absolute values of each scalar in the batch \c x.
53
+ * @param x batch of integer or floating point values.
54
+ * @return the absolute values of \c x.
55
+ */
56
+ template <class T, class A>
57
+ XSIMD_INLINE batch<T, A> abs(batch<T, A> const& x) noexcept
58
+ {
59
+ detail::static_check_supported_config<T, A>();
60
+ return kernel::abs<A>(x, A {});
61
+ }
62
+
63
+ /**
64
+ * @ingroup batch_complex
65
+ *
66
+ * Computes the absolute values of each complex in the batch \c z.
67
+ * @param z batch of complex values.
68
+ * @return the absolute values of \c z.
69
+ */
70
+ template <class T, class A>
71
+ XSIMD_INLINE batch<T, A> abs(batch<std::complex<T>, A> const& z) noexcept
72
+ {
73
+ detail::static_check_supported_config<T, A>();
74
+ return kernel::abs<A>(z, A {});
75
+ }
76
+
77
+ /**
78
+ * @ingroup batch_arithmetic
79
+ *
80
+ * Computes the sum of the batches \c x and \c y.
81
+ * @param x batch or scalar involved in the addition.
82
+ * @param y batch or scalar involved in the addition.
83
+ * @return the sum of \c x and \c y
84
+ */
85
+ template <class T, class A>
86
+ XSIMD_INLINE auto add(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x + y)
87
+ {
88
+ detail::static_check_supported_config<T, A>();
89
+ return x + y;
90
+ }
91
+
92
+ /**
93
+ * @ingroup batch_trigo
94
+ *
95
+ * Computes the arc cosine of the batch \c x.
96
+ * @param x batch of floating point values.
97
+ * @return the arc cosine of \c x.
98
+ */
99
+ template <class T, class A>
100
+ XSIMD_INLINE batch<T, A> acos(batch<T, A> const& x) noexcept
101
+ {
102
+ detail::static_check_supported_config<T, A>();
103
+ return kernel::acos<A>(x, A {});
104
+ }
105
+
106
+ /**
107
+ * @ingroup batch_trigo
108
+ *
109
+ * Computes the inverse hyperbolic cosine of the batch \c x.
110
+ * @param x batch of floating point values.
111
+ * @return the inverse hyperbolic cosine of \c x.
112
+ */
113
+ template <class T, class A>
114
+ XSIMD_INLINE batch<T, A> acosh(batch<T, A> const& x) noexcept
115
+ {
116
+ detail::static_check_supported_config<T, A>();
117
+ return kernel::acosh<A>(x, A {});
118
+ }
119
+
120
+ /**
121
+ * @ingroup batch_complex
122
+ *
123
+ * Computes the argument of the batch \c z.
124
+ * @param z batch of complex or real values.
125
+ * @return the argument of \c z.
126
+ */
127
+ template <class T, class A>
128
+ XSIMD_INLINE real_batch_type_t<batch<T, A>> arg(batch<T, A> const& z) noexcept
129
+ {
130
+ detail::static_check_supported_config<T, A>();
131
+ return kernel::arg<A>(z, A {});
132
+ }
133
+
134
+ /**
135
+ * @ingroup batch_trigo
136
+ *
137
+ * Computes the arc sine of the batch \c x.
138
+ * @param x batch of floating point values.
139
+ * @return the arc sine of \c x.
140
+ */
141
+ template <class T, class A>
142
+ XSIMD_INLINE batch<T, A> asin(batch<T, A> const& x) noexcept
143
+ {
144
+ detail::static_check_supported_config<T, A>();
145
+ return kernel::asin<A>(x, A {});
146
+ }
147
+
148
+ /**
149
+ * @ingroup batch_trigo
150
+ *
151
+ * Computes the inverse hyperbolic sine of the batch \c x.
152
+ * @param x batch of floating point values.
153
+ * @return the inverse hyperbolic sine of \c x.
154
+ */
155
+ template <class T, class A>
156
+ XSIMD_INLINE batch<T, A> asinh(batch<T, A> const& x) noexcept
157
+ {
158
+ detail::static_check_supported_config<T, A>();
159
+ return kernel::asinh<A>(x, A {});
160
+ }
161
+
162
+ /**
163
+ * @ingroup batch_trigo
164
+ *
165
+ * Computes the arc tangent of the batch \c x.
166
+ * @param x batch of floating point values.
167
+ * @return the arc tangent of \c x.
168
+ */
169
+ template <class T, class A>
170
+ XSIMD_INLINE batch<T, A> atan(batch<T, A> const& x) noexcept
171
+ {
172
+ detail::static_check_supported_config<T, A>();
173
+ return kernel::atan<A>(x, A {});
174
+ }
175
+
176
+ /**
177
+ * @ingroup batch_trigo
178
+ *
179
+ * Computes the arc tangent of the batch \c x/y, using the signs of the
180
+ * arguments to determine the correct quadrant.
181
+ * @param x batch of floating point values.
182
+ * @param y batch of floating point values.
183
+ * @return the arc tangent of \c x/y.
184
+ */
185
+ template <class T, class A>
186
+ XSIMD_INLINE batch<T, A> atan2(batch<T, A> const& x, batch<T, A> const& y) noexcept
187
+ {
188
+ detail::static_check_supported_config<T, A>();
189
+ return kernel::atan2<A>(x, y, A {});
190
+ }
191
+
192
+ /**
193
+ * @ingroup batch_trigo
194
+ *
195
+ * Computes the inverse hyperbolic tangent of the batch \c x.
196
+ * @param x batch of floating point values.
197
+ * @return the inverse hyperbolic tangent of \c x.
198
+ */
199
+ template <class T, class A>
200
+ XSIMD_INLINE batch<T, A> atanh(batch<T, A> const& x) noexcept
201
+ {
202
+ detail::static_check_supported_config<T, A>();
203
+ return kernel::atanh<A>(x, A {});
204
+ }
205
+
206
+ /**
207
+ * @ingroup batch_math
208
+ *
209
+ * Computes the average of batches \c x and \c y
210
+ * @param x batch of T
211
+ * @param y batch of T
212
+ * @return the average of elements between \c x and \c y.
213
+ */
214
+ template <class T, class A>
215
+ XSIMD_INLINE batch<T, A> avg(batch<T, A> const& x, batch<T, A> const& y) noexcept
216
+ {
217
+ detail::static_check_supported_config<T, A>();
218
+ return kernel::avg<A>(x, y, A {});
219
+ }
220
+
221
+ /**
222
+ * @ingroup batch_math
223
+ *
224
+ * Computes the rounded average of batches \c x and \c y
225
+ * @param x batch of T
226
+ * @param y batch of T
227
+ * @return the rounded average of elements between \c x and \c y.
228
+ */
229
+ template <class T, class A>
230
+ XSIMD_INLINE batch<T, A> avgr(batch<T, A> const& x, batch<T, A> const& y) noexcept
231
+ {
232
+ detail::static_check_supported_config<T, A>();
233
+ return kernel::avgr<A>(x, y, A {});
234
+ }
235
+
236
+ /**
237
+ * @ingroup batch_conversion
238
+ *
239
+ * Perform a static_cast from \c T_in to \c T_out on \c x.
240
+ * @param x batch_bool of \c T_in
241
+ * @return \c x cast to \c T_out
242
+ */
243
+ template <class T_out, class T_in, class A>
244
+ XSIMD_INLINE batch_bool<T_out, A> batch_bool_cast(batch_bool<T_in, A> const& x) noexcept
245
+ {
246
+ detail::static_check_supported_config<T_out, A>();
247
+ detail::static_check_supported_config<T_in, A>();
248
+ static_assert(batch_bool<T_out, A>::size == batch_bool<T_in, A>::size, "Casting between incompatibles batch_bool types.");
249
+ return kernel::batch_bool_cast<A>(x, batch_bool<T_out, A> {}, A {});
250
+ }
251
+
252
+ /**
253
+ * @ingroup batch_conversion
254
+ *
255
+ * Perform a static_cast from \c T_in to \c T_out on \c x.
256
+ * @param x batch of \c T_in
257
+ * @return \c x cast to \c T_out
258
+ */
259
+ template <class T_out, class T_in, class A>
260
+ XSIMD_INLINE batch<T_out, A> batch_cast(batch<T_in, A> const& x) noexcept
261
+ {
262
+ detail::static_check_supported_config<T_out, A>();
263
+ detail::static_check_supported_config<T_in, A>();
264
+ return kernel::batch_cast<A>(x, batch<T_out, A> {}, A {});
265
+ }
266
+
267
+ /**
268
+ * @ingroup batch_miscellaneous
269
+ *
270
+ * Computes the bit of sign of \c x
271
+ * @param x batch of scalar
272
+ * @return bit of sign of \c x
273
+ */
274
+ template <class T, class A>
275
+ XSIMD_INLINE batch<T, A> bitofsign(batch<T, A> const& x) noexcept
276
+ {
277
+ detail::static_check_supported_config<T, A>();
278
+ return kernel::bitofsign<A>(x, A {});
279
+ }
280
+
281
+ /**
282
+ * @ingroup batch_bitwise
283
+ *
284
+ * Computes the bitwise and of the batches \c x and \c y.
285
+ * @param x batch involved in the operation.
286
+ * @param y batch involved in the operation.
287
+ * @return the result of the bitwise and.
288
+ */
289
+ template <class T, class A>
290
+ XSIMD_INLINE auto bitwise_and(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x & y)
291
+ {
292
+ detail::static_check_supported_config<T, A>();
293
+ return x & y;
294
+ }
295
+
296
+ /**
297
+ * @ingroup batch_bitwise
298
+ *
299
+ * Computes the bitwise and of the batches \c x and \c y.
300
+ * @param x batch involved in the operation.
301
+ * @param y batch involved in the operation.
302
+ * @return the result of the bitwise and.
303
+ */
304
+ template <class T, class A>
305
+ XSIMD_INLINE auto bitwise_and(batch_bool<T, A> const& x, batch_bool<T, A> const& y) noexcept -> decltype(x & y)
306
+ {
307
+ detail::static_check_supported_config<T, A>();
308
+ return x & y;
309
+ }
310
+
311
+ /**
312
+ * @ingroup batch_bitwise
313
+ *
314
+ * Computes the bitwise and not of batches \c x and \c y.
315
+ * @param x batch involved in the operation.
316
+ * @param y batch involved in the operation.
317
+ * @return the result of the bitwise and not.
318
+ */
319
+ template <class T, class A>
320
+ XSIMD_INLINE batch<T, A> bitwise_andnot(batch<T, A> const& x, batch<T, A> const& y) noexcept
321
+ {
322
+ detail::static_check_supported_config<T, A>();
323
+ return kernel::bitwise_andnot<A>(x, y, A {});
324
+ }
325
+
326
+ /**
327
+ * @ingroup batch_bool_logical
328
+ *
329
+ * Computes the bitwise and not of batches \c x and \c y.
330
+ * @param x batch involved in the operation.
331
+ * @param y batch involved in the operation.
332
+ * @return the result of the bitwise and not.
333
+ */
334
+ template <class T, class A>
335
+ XSIMD_INLINE batch_bool<T, A> bitwise_andnot(batch_bool<T, A> const& x, batch_bool<T, A> const& y) noexcept
336
+ {
337
+ detail::static_check_supported_config<T, A>();
338
+ return kernel::bitwise_andnot<A>(x, y, A {});
339
+ }
340
+
341
+ /**
342
+ * @ingroup batch_conversion
343
+ *
344
+ * Perform a reinterpret_cast from \c T_in to \c T_out on \c x.
345
+ * @param x batch of \c T_in
346
+ * @return \c x reinterpreted as \c T_out
347
+ */
348
+ template <class T_out, class T_in, class A>
349
+ XSIMD_INLINE batch<T_out, A> bitwise_cast(batch<T_in, A> const& x) noexcept
350
+ {
351
+ detail::static_check_supported_config<T_in, A>();
352
+ detail::static_check_supported_config<T_out, A>();
353
+ return kernel::bitwise_cast<A>(x, batch<T_out, A> {}, A {});
354
+ }
355
+
356
+ /**
357
+ * @ingroup batch_bitwise
358
+ *
359
+ * Perform a bitwise shift to the left
360
+ * @param x batch of \c T_in
361
+ * @param shift scalar amount to shift
362
+ * @return shifted \c x.
363
+ */
364
+ template <class T, class A>
365
+ XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& x, int shift) noexcept
366
+ {
367
+ detail::static_check_supported_config<T, A>();
368
+ return kernel::bitwise_lshift<A>(x, shift, A {});
369
+ }
370
+ template <class T, class A>
371
+ XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& x, batch<T, A> const& shift) noexcept
372
+ {
373
+ detail::static_check_supported_config<T, A>();
374
+ return kernel::bitwise_lshift<A>(x, shift, A {});
375
+ }
376
+ template <size_t shift, class T, class A>
377
+ XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& x) noexcept
378
+ {
379
+ detail::static_check_supported_config<T, A>();
380
+ return kernel::bitwise_lshift<shift, A>(x, A {});
381
+ }
382
+
383
+ /**
384
+ * @ingroup batch_bitwise
385
+ *
386
+ * Computes the bitwise not of batch \c x.
387
+ * @param x batch involved in the operation.
388
+ * @return the result of the bitwise not.
389
+ */
390
+ template <class T, class A>
391
+ XSIMD_INLINE batch<T, A> bitwise_not(batch<T, A> const& x) noexcept
392
+ {
393
+ detail::static_check_supported_config<T, A>();
394
+ return kernel::bitwise_not<A>(x, A {});
395
+ }
396
+
397
+ /**
398
+ * @ingroup batch_bitwise
399
+ *
400
+ * Computes the bitwise not of batch \c x.
401
+ * @param x batch involved in the operation.
402
+ * @return the result of the bitwise not.
403
+ */
404
+ template <class T, class A>
405
+ XSIMD_INLINE batch_bool<T, A> bitwise_not(batch_bool<T, A> const& x) noexcept
406
+ {
407
+ detail::static_check_supported_config<T, A>();
408
+ return kernel::bitwise_not<A>(x, A {});
409
+ }
410
+
411
+ /**
412
+ * @ingroup batch_bitwise
413
+ *
414
+ * Computes the bitwise or of the batches \c x and \c y.
415
+ * @param x scalar or batch of scalars
416
+ * @param y scalar or batch of scalars
417
+ * @return the result of the bitwise or.
418
+ */
419
+ template <class T, class A>
420
+ XSIMD_INLINE auto bitwise_or(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x | y)
421
+ {
422
+ detail::static_check_supported_config<T, A>();
423
+ return x | y;
424
+ }
425
+
426
+ /**
427
+ * @ingroup batch_bitwise
428
+ *
429
+ * Computes the bitwise or of the batches \c x and \c y.
430
+ * @param x scalar or batch of scalars
431
+ * @param y scalar or batch of scalars
432
+ * @return the result of the bitwise or.
433
+ */
434
+ template <class T, class A>
435
+ XSIMD_INLINE auto bitwise_or(batch_bool<T, A> const& x, batch_bool<T, A> const& y) noexcept -> decltype(x | y)
436
+ {
437
+ detail::static_check_supported_config<T, A>();
438
+ return x | y;
439
+ }
440
+
441
+ /**
442
+ * @ingroup batch_bitwise
443
+ *
444
+ * Perform a bitwise shift to the right
445
+ * @param x batch of \c T_in
446
+ * @param shift scalar amount to shift
447
+ * @return shifted \c x.
448
+ */
449
+ template <class T, class A>
450
+ XSIMD_INLINE batch<T, A> bitwise_rshift(batch<T, A> const& x, int shift) noexcept
451
+ {
452
+ detail::static_check_supported_config<T, A>();
453
+ return kernel::bitwise_rshift<A>(x, shift, A {});
454
+ }
455
+ template <class T, class A>
456
+ XSIMD_INLINE batch<T, A> bitwise_rshift(batch<T, A> const& x, batch<T, A> const& shift) noexcept
457
+ {
458
+ detail::static_check_supported_config<T, A>();
459
+ return kernel::bitwise_rshift<A>(x, shift, A {});
460
+ }
461
+ template <size_t shift, class T, class A>
462
+ XSIMD_INLINE batch<T, A> bitwise_rshift(batch<T, A> const& x) noexcept
463
+ {
464
+ detail::static_check_supported_config<T, A>();
465
+ return kernel::bitwise_rshift<shift, A>(x, A {});
466
+ }
467
+
468
+ /**
469
+ * @ingroup batch_bitwise
470
+ *
471
+ * Computes the bitwise xor of the batches \c x and \c y.
472
+ * @param x scalar or batch of scalars
473
+ * @param y scalar or batch of scalars
474
+ * @return the result of the bitwise xor.
475
+ */
476
+ template <class T, class A>
477
+ XSIMD_INLINE auto bitwise_xor(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x ^ y)
478
+ {
479
+ detail::static_check_supported_config<T, A>();
480
+ return x ^ y;
481
+ }
482
+
483
+ /**
484
+ * @ingroup batch_bitwise
485
+ *
486
+ * Computes the bitwise xor of the batches \c x and \c y.
487
+ * @param x scalar or batch of scalars
488
+ * @param y scalar or batch of scalars
489
+ * @return the result of the bitwise xor.
490
+ */
491
+ template <class T, class A>
492
+ XSIMD_INLINE auto bitwise_xor(batch_bool<T, A> const& x, batch_bool<T, A> const& y) noexcept -> decltype(x ^ y)
493
+ {
494
+ detail::static_check_supported_config<T, A>();
495
+ return x ^ y;
496
+ }
497
+
498
+ /**
499
+ * @ingroup batch_data_transfer
500
+ *
501
+ * Creates a batch from the single value \c v. If \c v is a boolean,
502
+ * this function returns a batch_bool<uint8_t>. If you need another type
503
+ * of batch_bool, please use \c broadcast_as instead.
504
+ * @param v the value used to initialize the batch
505
+ * @return a new batch instance
506
+ */
507
+ template <class T, class A = default_arch>
508
+ XSIMD_INLINE typename kernel::detail::broadcaster<T, A>::return_type broadcast(T v) noexcept
509
+ {
510
+ detail::static_check_supported_config<T, A>();
511
+ return kernel::detail::broadcaster<T, A>::run(v);
512
+ }
513
+
514
+ /**
515
+ * @ingroup batch_data_transfer
516
+ *
517
+ * Creates a batch from the single value \c v and
518
+ * the specified batch value type \c To.
519
+ * @param v the value used to initialize the batch
520
+ * @return a new batch instance
521
+ */
522
+ template <class To, class A = default_arch, class From>
523
+ XSIMD_INLINE simd_return_type<From, To, A> broadcast_as(From v) noexcept
524
+ {
525
+ detail::static_check_supported_config<From, A>();
526
+ using batch_value_type = typename simd_return_type<From, To, A>::value_type;
527
+ using value_type = typename std::conditional<std::is_same<From, bool>::value,
528
+ bool,
529
+ batch_value_type>::type;
530
+ return simd_return_type<From, To, A>(value_type(v));
531
+ }
532
+
533
+ /**
534
+ * @ingroup batch_math
535
+ *
536
+ * Computes the cubic root of the batch \c x.
537
+ * @param x batch of floating point values.
538
+ * @return the cubic root of \c x.
539
+ */
540
+ template <class T, class A>
541
+ XSIMD_INLINE batch<T, A> cbrt(batch<T, A> const& x) noexcept
542
+ {
543
+ detail::static_check_supported_config<T, A>();
544
+ return kernel::cbrt<A>(x, A {});
545
+ }
546
+
547
+ /**
548
+ * @ingroup batch_rounding
549
+ *
550
+ * Computes the batch of smallest integer values not less than
551
+ * scalars in \c x.
552
+ * @param x batch of floating point values.
553
+ * @return the batch of smallest integer values not less than \c x.
554
+ */
555
+ template <class T, class A>
556
+ XSIMD_INLINE batch<T, A> ceil(batch<T, A> const& x) noexcept
557
+ {
558
+ detail::static_check_supported_config<T, A>();
559
+ return kernel::ceil<A>(x, A {});
560
+ }
561
+
562
+ /**
563
+ * @ingroup batch_math
564
+ *
565
+ * Clips the values of the batch \c x between those of the batches \c lo and \c hi.
566
+ * @param x batch of scalar values.
567
+ * @param lo batch of scalar values.
568
+ * @param hi batch of scalar values.
569
+ * @return the result of the clipping.
570
+ */
571
+ template <class T, class A>
572
+ XSIMD_INLINE batch<T, A> clip(batch<T, A> const& x, batch<T, A> const& lo, batch<T, A> const& hi) noexcept
573
+ {
574
+ detail::static_check_supported_config<T, A>();
575
+ return kernel::clip(x, lo, hi, A {});
576
+ }
577
+
578
+ /**
579
+ * @ingroup batch_data_transfer
580
+ *
581
+ * Pick elements from \c x selected by \c mask, and append them to the
582
+ * resulting vector, zeroing the remaining slots
583
+ */
584
+ template <class T, class A>
585
+ XSIMD_INLINE batch<T, A> compress(batch<T, A> const& x, batch_bool<T, A> const& mask) noexcept
586
+ {
587
+ detail::static_check_supported_config<T, A>();
588
+ return kernel::compress<A>(x, mask, A {});
589
+ }
590
+
591
+ /**
592
+ * @ingroup batch_complex
593
+ *
594
+ * Computes the conjugate of the batch \c z.
595
+ * @param z batch of complex values.
596
+ * @return the argument of \c z.
597
+ */
598
+ template <class A, class T>
599
+ XSIMD_INLINE complex_batch_type_t<batch<T, A>> conj(batch<T, A> const& z) noexcept
600
+ {
601
+ return kernel::conj(z, A {});
602
+ }
603
+
604
+ /**
605
+ * @ingroup batch_miscellaneous
606
+ *
607
+ * Computes a value whose absolute value matches
608
+ * that of \c x, but whose sign bit matches that of \c y.
609
+ * @param x batch of scalars
610
+ * @param y batch of scalars
611
+ * @return batch whose absolute value matches that of \c x, but whose sign bit
612
+ * matches that of \c y.
613
+ */
614
+ template <class T, class A>
615
+ XSIMD_INLINE batch<T, A> copysign(batch<T, A> const& x, batch<T, A> const& y) noexcept
616
+ {
617
+ detail::static_check_supported_config<T, A>();
618
+ return kernel::copysign<A>(x, y, A {});
619
+ }
620
+
621
+ /**
622
+ * @ingroup batch_trigo
623
+ *
624
+ * Computes the cosine of the batch \c x.
625
+ * @param x batch of floating point values.
626
+ * @return the cosine of \c x.
627
+ */
628
+ template <class T, class A>
629
+ XSIMD_INLINE batch<T, A> cos(batch<T, A> const& x) noexcept
630
+ {
631
+ detail::static_check_supported_config<T, A>();
632
+ return kernel::cos<A>(x, A {});
633
+ }
634
+
635
+ /**
636
+ * @ingroup batch_trigo
637
+ *
638
+ * computes the hyperbolic cosine of the batch \c x.
639
+ * @param x batch of floating point values.
640
+ * @return the hyperbolic cosine of \c x.
641
+ */
642
+ template <class T, class A>
643
+ XSIMD_INLINE batch<T, A> cosh(batch<T, A> const& x) noexcept
644
+ {
645
+ detail::static_check_supported_config<T, A>();
646
+ return kernel::cosh<A>(x, A {});
647
+ }
648
+
649
+ /**
650
+ * @ingroup batch_bool_reducers
651
+ *
652
+ * Count the number of values set to true in the batch \c x
653
+ * @param x boolean or batch of boolean
654
+ * @return the result of the counting.
655
+ */
656
+ template <class T, class A>
657
+ XSIMD_INLINE size_t count(batch_bool<T, A> const& x) noexcept
658
+ {
659
+ detail::static_check_supported_config<T, A>();
660
+ return kernel::count<A>(x, A {});
661
+ }
662
+
663
+ /**
664
+ * @ingroup batch_arithmetic
665
+ *
666
+ * Subtract 1 to batch \c x.
667
+ * @param x batch involved in the decrement.
668
+ * @return the subtraction of \c x and 1.
669
+ */
670
+ template <class T, class A>
671
+ XSIMD_INLINE batch<T, A> decr(batch<T, A> const& x) noexcept
672
+ {
673
+ detail::static_check_supported_config<T, A>();
674
+ return kernel::decr<A>(x, A {});
675
+ }
676
+
677
+ /**
678
+ * @ingroup batch_arithmetic
679
+ *
680
+ * Subtract 1 to batch \c x for each element where \c mask is true.
681
+ * @param x batch involved in the increment.
682
+ * @param mask whether to perform the increment or not. Can be a \c
683
+ * batch_bool or a \c batch_bool_constant.
684
+ * @return the subtraction of \c x and 1 when \c mask is true.
685
+ */
686
+ template <class T, class A, class Mask>
687
+ XSIMD_INLINE batch<T, A> decr_if(batch<T, A> const& x, Mask const& mask) noexcept
688
+ {
689
+ detail::static_check_supported_config<T, A>();
690
+ return kernel::decr_if<A>(x, mask, A {});
691
+ }
692
+
693
+ /**
694
+ * @ingroup batch_arithmetic
695
+ *
696
+ * Computes the division of the batch \c x by the batch \c y.
697
+ * @param x scalar or batch of scalars
698
+ * @param y scalar or batch of scalars
699
+ * @return the result of the division.
700
+ */
701
+ template <class T, class A>
702
+ XSIMD_INLINE auto div(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x / y)
703
+ {
704
+ detail::static_check_supported_config<T, A>();
705
+ return x / y;
706
+ }
707
+
708
+ /**
709
+ * @ingroup batch_logical
710
+ *
711
+ * Element-wise equality comparison of batches \c x and \c y.
712
+ * @param x batch of scalars
713
+ * @param y batch of scalars
714
+ * @return a boolean batch.
715
+ */
716
+ template <class T, class A>
717
+ XSIMD_INLINE auto eq(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x == y)
718
+ {
719
+ detail::static_check_supported_config<T, A>();
720
+ return x == y;
721
+ }
722
+
723
+ /**
724
+ * @ingroup batch_logical
725
+ *
726
+ * Element-wise equality comparison of batches of boolean values \c x and \c y.
727
+ * @param x batch of booleans involved in the comparison.
728
+ * @param y batch of booleans involved in the comparison.
729
+ * @return a boolean batch.
730
+ */
731
+ template <class T, class A>
732
+ XSIMD_INLINE auto eq(batch_bool<T, A> const& x, batch_bool<T, A> const& y) noexcept -> decltype(x == y)
733
+ {
734
+ detail::static_check_supported_config<T, A>();
735
+ return x == y;
736
+ }
737
+
738
+ /**
739
+ * @ingroup batch_math
740
+ *
741
+ * Computes the natural exponential of the batch \c x.
742
+ * @param x batch of floating point values.
743
+ * @return the natural exponential of \c x.
744
+ */
745
+ template <class T, class A>
746
+ XSIMD_INLINE batch<T, A> exp(batch<T, A> const& x) noexcept
747
+ {
748
+ detail::static_check_supported_config<T, A>();
749
+ return kernel::exp<A>(x, A {});
750
+ }
751
+
752
+ /**
753
+ * @ingroup batch_math
754
+ *
755
+ * Computes the base 10 exponential of the batch \c x.
756
+ * @param x batch of floating point values.
757
+ * @return the base 10 exponential of \c x.
758
+ */
759
+ template <class T, class A>
760
+ XSIMD_INLINE batch<T, A> exp10(batch<T, A> const& x) noexcept
761
+ {
762
+ detail::static_check_supported_config<T, A>();
763
+ return kernel::exp10<A>(x, A {});
764
+ }
765
+
766
+ /**
767
+ * @ingroup batch_math
768
+ *
769
+ * Computes the base 2 exponential of the batch \c x.
770
+ * @param x batch of floating point values.
771
+ * @return the base 2 exponential of \c x.
772
+ */
773
+ template <class T, class A>
774
+ XSIMD_INLINE batch<T, A> exp2(batch<T, A> const& x) noexcept
775
+ {
776
+ detail::static_check_supported_config<T, A>();
777
+ return kernel::exp2<A>(x, A {});
778
+ }
779
+
780
+ /**
781
+ * @ingroup batch_data_transfer
782
+ *
783
+ * Load contiguous elements from \c x and place them in slots selected by \c
784
+ * mask, zeroing the other slots
785
+ */
786
+ template <class T, class A>
787
+ XSIMD_INLINE batch<T, A> expand(batch<T, A> const& x, batch_bool<T, A> const& mask) noexcept
788
+ {
789
+ detail::static_check_supported_config<T, A>();
790
+ return kernel::expand<A>(x, mask, A {});
791
+ }
792
+
793
+ /**
794
+ * @ingroup batch_math
795
+ *
796
+ * Computes the natural exponential of the batch \c x, minus one.
797
+ * @param x batch of floating point values.
798
+ * @return the natural exponential of \c x, minus one.
799
+ */
800
+ template <class T, class A>
801
+ XSIMD_INLINE batch<T, A> expm1(batch<T, A> const& x) noexcept
802
+ {
803
+ detail::static_check_supported_config<T, A>();
804
+ return kernel::expm1<A>(x, A {});
805
+ }
806
+
807
+ /**
808
+ * @ingroup batch_math_extra
809
+ *
810
+ * Computes the error function of the batch \c x.
811
+ * @param x batch of floating point values.
812
+ * @return the error function of \c x.
813
+ */
814
+ template <class T, class A>
815
+ XSIMD_INLINE batch<T, A> erf(batch<T, A> const& x) noexcept
816
+ {
817
+ detail::static_check_supported_config<T, A>();
818
+ return kernel::erf<A>(x, A {});
819
+ }
820
+
821
+ /**
822
+ * @ingroup batch_math_extra
823
+ *
824
+ * Computes the complementary error function of the batch \c x.
825
+ * @param x batch of floating point values.
826
+ * @return the error function of \c x.
827
+ */
828
+ template <class T, class A>
829
+ XSIMD_INLINE batch<T, A> erfc(batch<T, A> const& x) noexcept
830
+ {
831
+ detail::static_check_supported_config<T, A>();
832
+ return kernel::erfc<A>(x, A {});
833
+ }
834
+
835
+ /**
836
+ * Extract vector from pair of vectors
837
+ * extracts the lowest vector elements from the second source \c x
838
+ * and the highest vector elements from the first source \c y
839
+ * Concatenates the results into th Return value.
840
+ * @param x batch of integer or floating point values.
841
+ * @param y batch of integer or floating point values.
842
+ * @param i integer specifying the lowest vector element to extract from the first source register
843
+ * @return.
844
+ */
845
+ template <class T, class A>
846
+ XSIMD_INLINE batch<T, A> extract_pair(batch<T, A> const& x, batch<T, A> const& y, std::size_t i) noexcept
847
+ {
848
+ detail::static_check_supported_config<T, A>();
849
+ return kernel::extract_pair<A>(x, y, i, A {});
850
+ }
851
+
852
+ /**
853
+ * @ingroup batch_math
854
+ *
855
+ * Computes the absolute values of each scalar in the batch \c x.
856
+ * @param x batch floating point values.
857
+ * @return the absolute values of \c x.
858
+ */
859
+ template <class T, class A>
860
+ XSIMD_INLINE batch<T, A> fabs(batch<T, A> const& x) noexcept
861
+ {
862
+ detail::static_check_supported_config<T, A>();
863
+ return kernel::abs<A>(x, A {});
864
+ }
865
+
866
+ /**
867
+ * @ingroup batch_math
868
+ *
869
+ * Computes the positive difference between \c x and \c y, that is,
870
+ * <tt>max(0, x-y)</tt>.
871
+ * @param x batch of floating point values.
872
+ * @param y batch of floating point values.
873
+ * @return the positive difference.
874
+ */
875
+ template <class T, class A>
876
+ XSIMD_INLINE batch<T, A> fdim(batch<T, A> const& x, batch<T, A> const& y) noexcept
877
+ {
878
+ detail::static_check_supported_config<T, A>();
879
+ return kernel::fdim<A>(x, y, A {});
880
+ }
881
+
882
+ /**
883
+ * @ingroup batch_rounding
884
+ *
885
+ * Computes the batch of largest integer values not greater than
886
+ * scalars in \c x.
887
+ * @param x batch of floating point values.
888
+ * @return the batch of largest integer values not greater than \c x.
889
+ */
890
+ template <class T, class A>
891
+ XSIMD_INLINE batch<T, A> floor(batch<T, A> const& x) noexcept
892
+ {
893
+ detail::static_check_supported_config<T, A>();
894
+ return kernel::floor<A>(x, A {});
895
+ }
896
+
897
+ /**
898
+ * @ingroup batch_arithmetic
899
+ *
900
+ * Computes <tt>(x*y) + z</tt> in a single instruction when possible.
901
+ * @param x a batch of integer or floating point values.
902
+ * @param y a batch of integer or floating point values.
903
+ * @param z a batch of integer or floating point values.
904
+ * @return the result of the fused multiply-add operation.
905
+ */
906
+ template <class T, class A>
907
+ XSIMD_INLINE batch<T, A> fma(batch<T, A> const& x, batch<T, A> const& y, batch<T, A> const& z) noexcept
908
+ {
909
+ detail::static_check_supported_config<T, A>();
910
+ return kernel::fma<A>(x, y, z, A {});
911
+ }
912
+
913
+ /**
914
+ * @ingroup batch_math
915
+ *
916
+ * Computes the larger values of the batches \c x and \c y.
917
+ * @param x a batch of integer or floating point values.
918
+ * @param y a batch of integer or floating point values.
919
+ * @return a batch of the larger values.
920
+ */
921
+ template <class T, class A>
922
+ XSIMD_INLINE batch<T, A> fmax(batch<T, A> const& x, batch<T, A> const& y) noexcept
923
+ {
924
+ detail::static_check_supported_config<T, A>();
925
+ return kernel::max<A>(x, y, A {});
926
+ }
927
+
928
+ /**
929
+ * @ingroup batch_math
930
+ *
931
+ * Computes the smaller values of the batches \c x and \c y.
932
+ * @param x a batch of integer or floating point values.
933
+ * @param y a batch of integer or floating point values.
934
+ * @return a batch of the smaller values.
935
+ */
936
+ template <class T, class A>
937
+ XSIMD_INLINE batch<T, A> fmin(batch<T, A> const& x, batch<T, A> const& y) noexcept
938
+ {
939
+ detail::static_check_supported_config<T, A>();
940
+ return kernel::min<A>(x, y, A {});
941
+ }
942
+
943
+ /**
944
+ * @ingroup batch_math
945
+ *
946
+ * Computes the modulo of the batch \c x by the batch \c y.
947
+ * @param x batch involved in the modulo.
948
+ * @param y batch involved in the modulo.
949
+ * @return the result of the modulo.
950
+ */
951
+ template <class T, class A>
952
+ XSIMD_INLINE batch<T, A> fmod(batch<T, A> const& x, batch<T, A> const& y) noexcept
953
+ {
954
+ detail::static_check_supported_config<T, A>();
955
+ return kernel::fmod<A>(x, y, A {});
956
+ }
957
+
958
+ /**
959
+ * @ingroup batch_arithmetic
960
+ *
961
+ * Computes <tt>(x*y) - z</tt> in a single instruction when possible.
962
+ * @param x a batch of integer or floating point values.
963
+ * @param y a batch of integer or floating point values.
964
+ * @param z a batch of integer or floating point values.
965
+ * @return the result of the fused multiply-sub operation.
966
+ */
967
+ template <class T, class A>
968
+ XSIMD_INLINE batch<T, A> fms(batch<T, A> const& x, batch<T, A> const& y, batch<T, A> const& z) noexcept
969
+ {
970
+ detail::static_check_supported_config<T, A>();
971
+ return kernel::fms<A>(x, y, z, A {});
972
+ }
973
+
974
+ /**
975
+ * @ingroup batch_arithmetic
976
+ *
977
+ * Computes <tt>-(x*y) + z</tt> in a single instruction when possible.
978
+ * @param x a batch of integer or floating point values.
979
+ * @param y a batch of integer or floating point values.
980
+ * @param z a batch of integer or floating point values.
981
+ * @return the result of the fused negated multiply-add operation.
982
+ */
983
+ template <class T, class A>
984
+ XSIMD_INLINE batch<T, A> fnma(batch<T, A> const& x, batch<T, A> const& y, batch<T, A> const& z) noexcept
985
+ {
986
+ detail::static_check_supported_config<T, A>();
987
+ return kernel::fnma<A>(x, y, z, A {});
988
+ }
989
+
990
+ /**
991
+ * @ingroup batch_arithmetic
992
+ *
993
+ * Computes <tt>-(x*y) - z</tt> in a single instruction when possible.
994
+ * @param x a batch of integer or floating point values.
995
+ * @param y a batch of integer or floating point values.
996
+ * @param z a batch of integer or floating point values.
997
+ * @return the result of the fused negated multiply-sub operation.
998
+ */
999
+ template <class T, class A>
1000
+ XSIMD_INLINE batch<T, A> fnms(batch<T, A> const& x, batch<T, A> const& y, batch<T, A> const& z) noexcept
1001
+ {
1002
+ detail::static_check_supported_config<T, A>();
1003
+ return kernel::fnms<A>(x, y, z, A {});
1004
+ }
1005
+
1006
+ /**
1007
+ * @ingroup batch_arithmetic
1008
+ *
1009
+ * Computes <tt>-(x*y) - z</tt> in a single instruction when possible.
1010
+ * @param x a batch of integer or floating point values.
1011
+ * @param y a batch of integer or floating point values.
1012
+ * @param z a batch of integer or floating point values.
1013
+ * @return a batch where each even-indexed element is computed as <tt>x * y - z</tt> and each odd-indexed element as <tt>x * y + z</tt>
1014
+ */
1015
+ template <class T, class A>
1016
+ XSIMD_INLINE batch<T, A> fmas(batch<T, A> const& x, batch<T, A> const& y, batch<T, A> const& z) noexcept
1017
+ {
1018
+ detail::static_check_supported_config<T, A>();
1019
+ return kernel::fmas<A>(x, y, z, A {});
1020
+ }
1021
+ /**
1022
+ * @ingroup batch_fp
1023
+ *
1024
+ * Split split the number x into a normalized fraction and an exponent which is stored in exp
1025
+ * @param x a batch of integer or floating point values.
1026
+ * @param y a batch of integer or floating point values.
1027
+ * @return the normalized fraction of x
1028
+ */
1029
+ template <class T, class A>
1030
+ XSIMD_INLINE batch<T, A> frexp(const batch<T, A>& x, batch<as_integer_t<T>, A>& y) noexcept
1031
+ {
1032
+ detail::static_check_supported_config<T, A>();
1033
+ return kernel::frexp<A>(x, y, A {});
1034
+ }
1035
+
1036
+ /**
1037
+ * @ingroup batch_logical
1038
+ *
1039
+ * Element-wise greater or equal comparison of batches \c x and \c y.
1040
+ * @tparam X the actual type of batch.
1041
+ * @param x batch involved in the comparison.
1042
+ * @param y batch involved in the comparison.
1043
+ * @return a boolean batch.
1044
+ */
1045
+ template <class T, class A>
1046
+ XSIMD_INLINE batch_bool<T, A> ge(batch<T, A> const& x, batch<T, A> const& y) noexcept
1047
+ {
1048
+ detail::static_check_supported_config<T, A>();
1049
+ return x >= y;
1050
+ }
1051
+
1052
+ /**
1053
+ * @ingroup batch_logical
1054
+ *
1055
+ * Element-wise greater than comparison of batches \c x and \c y.
1056
+ * @tparam X the actual type of batch.
1057
+ * @param x batch involved in the comparison.
1058
+ * @param y batch involved in the comparison.
1059
+ * @return a boolean batch.
1060
+ */
1061
+ template <class T, class A>
1062
+ XSIMD_INLINE batch_bool<T, A> gt(batch<T, A> const& x, batch<T, A> const& y) noexcept
1063
+ {
1064
+ detail::static_check_supported_config<T, A>();
1065
+ return x > y;
1066
+ }
1067
+
1068
+ /**
1069
+ * @ingroup batch_reducers
1070
+ *
1071
+ * Parallel horizontal addition: adds the scalars of each batch
1072
+ * in the array pointed by \c row and store them in a returned
1073
+ * batch.
1074
+ * @param row an array of \c N batches
1075
+ * @return the result of the reduction.
1076
+ */
1077
+ template <class T, class A>
1078
+ XSIMD_INLINE batch<T, A> haddp(batch<T, A> const* row) noexcept
1079
+ {
1080
+ detail::static_check_supported_config<T, A>();
1081
+ return kernel::haddp<A>(row, A {});
1082
+ }
1083
+
1084
+ /**
1085
+ * @ingroup batch_math
1086
+ *
1087
+ * Computes the square root of the sum of the squares of the batches
1088
+ * \c x, and \c y.
1089
+ * @param x batch of floating point values.
1090
+ * @param y batch of floating point values.
1091
+ * @return the square root of the sum of the squares of \c x and \c y.
1092
+ */
1093
+ template <class T, class A>
1094
+ XSIMD_INLINE batch<T, A> hypot(batch<T, A> const& x, batch<T, A> const& y) noexcept
1095
+ {
1096
+ detail::static_check_supported_config<T, A>();
1097
+ return kernel::hypot<A>(x, y, A {});
1098
+ }
1099
+
1100
+ /**
1101
+ * @ingroup batch_complex
1102
+ *
1103
+ * Computes the imaginary part of the batch \c x.
1104
+ * @param x batch of complex or real values.
1105
+ * @return the argument of \c x.
1106
+ */
1107
+ template <class T, class A>
1108
+ XSIMD_INLINE real_batch_type_t<batch<T, A>> imag(batch<T, A> const& x) noexcept
1109
+ {
1110
+ detail::static_check_supported_config<T, A>();
1111
+ return kernel::imag<A>(x, A {});
1112
+ }
1113
+
1114
+ /**
1115
+ * @ingroup batch_arithmetic
1116
+ *
1117
+ * Add 1 to batch \c x.
1118
+ * @param x batch involved in the increment.
1119
+ * @return the sum of \c x and 1.
1120
+ */
1121
+ template <class T, class A>
1122
+ XSIMD_INLINE batch<T, A> incr(batch<T, A> const& x) noexcept
1123
+ {
1124
+ detail::static_check_supported_config<T, A>();
1125
+ return kernel::incr<A>(x, A {});
1126
+ }
1127
+
1128
+ /**
1129
+ * @ingroup batch_arithmetic
1130
+ *
1131
+ * Add 1 to batch \c x for each element where \c mask is true.
1132
+ * @param x batch involved in the increment.
1133
+ * @param mask whether to perform the increment or not. Can be a \c
1134
+ * batch_bool or a \c batch_bool_constant.
1135
+ * @return the sum of \c x and 1 when \c mask is true.
1136
+ */
1137
+ template <class T, class A, class Mask>
1138
+ XSIMD_INLINE batch<T, A> incr_if(batch<T, A> const& x, Mask const& mask) noexcept
1139
+ {
1140
+ detail::static_check_supported_config<T, A>();
1141
+ return kernel::incr_if<A>(x, mask, A {});
1142
+ }
1143
+
1144
+ #ifndef __FAST_MATH__
1145
+ /**
1146
+ * @ingroup batch_constant
1147
+ *
1148
+ * Return a batch of scalars representing positive infinity
1149
+ * @return a batch of positive infinity
1150
+ */
1151
+ template <class B>
1152
+ XSIMD_INLINE B infinity()
1153
+ {
1154
+ using T = typename B::value_type;
1155
+ using A = typename B::arch_type;
1156
+ detail::static_check_supported_config<T, A>();
1157
+ return B(std::numeric_limits<T>::infinity());
1158
+ }
1159
+ #endif
1160
+
1161
+ /**
1162
+ * @ingroup batch_data_transfer
1163
+ *
1164
+ * Create a new batch equivalent to \c x but with element \c val set at position \c pos
1165
+ * @param x batch
1166
+ * @param val value to set
1167
+ * @param pos index of the updated slot
1168
+ * @return copy of \c x with position \c pos set to \c val
1169
+ */
1170
+ template <class T, class A, size_t I>
1171
+ XSIMD_INLINE batch<T, A> insert(batch<T, A> const& x, T val, index<I> pos) noexcept
1172
+ {
1173
+ detail::static_check_supported_config<T, A>();
1174
+ return kernel::insert<A>(x, val, pos, A {});
1175
+ }
1176
+
1177
+ /**
1178
+ * @ingroup batch_logical
1179
+ *
1180
+ * Determines if the scalars in the given batch \c x represent an even integer value
1181
+ * @param x batch of floating point values.
1182
+ * @return a batch of booleans.
1183
+ */
1184
+ template <class T, class A>
1185
+ XSIMD_INLINE batch_bool<T, A> is_even(batch<T, A> const& x) noexcept
1186
+ {
1187
+ detail::static_check_supported_config<T, A>();
1188
+ return kernel::is_even<A>(x, A {});
1189
+ }
1190
+
1191
+ /**
1192
+ * @ingroup batch_logical
1193
+ *
1194
+ * Determines if the floating-point scalars in the given batch \c x represent integer value
1195
+ * @param x batch of floating point values.
1196
+ * @return a batch of booleans.
1197
+ */
1198
+ template <class T, class A>
1199
+ XSIMD_INLINE batch_bool<T, A> is_flint(batch<T, A> const& x) noexcept
1200
+ {
1201
+ detail::static_check_supported_config<T, A>();
1202
+ return kernel::is_flint<A>(x, A {});
1203
+ }
1204
+
1205
+ /**
1206
+ * @ingroup batch_logical
1207
+ *
1208
+ * Determines if the scalars in the given batch \c x represent an odd integer value
1209
+ * @param x batch of floating point values.
1210
+ * @return a batch of booleans.
1211
+ */
1212
+ template <class T, class A>
1213
+ XSIMD_INLINE batch_bool<T, A> is_odd(batch<T, A> const& x) noexcept
1214
+ {
1215
+ detail::static_check_supported_config<T, A>();
1216
+ return kernel::is_odd<A>(x, A {});
1217
+ }
1218
+
1219
+ /**
1220
+ * @ingroup batch_logical
1221
+ *
1222
+ * Determines if the scalars in the given batch \c x are inf values.
1223
+ * @param x batch of floating point values.
1224
+ * @return a batch of booleans.
1225
+ */
1226
+ template <class T, class A>
1227
+ XSIMD_INLINE typename batch<T, A>::batch_bool_type isinf(batch<T, A> const& x) noexcept
1228
+ {
1229
+ detail::static_check_supported_config<T, A>();
1230
+ return kernel::isinf<A>(x, A {});
1231
+ }
1232
+
1233
+ /**
1234
+ * @ingroup batch_logical
1235
+ *
1236
+ * Determines if the scalars in the given batch \c x are finite values.
1237
+ * @param x batch of floating point values.
1238
+ * @return a batch of booleans.
1239
+ */
1240
+ template <class T, class A>
1241
+ XSIMD_INLINE typename batch<T, A>::batch_bool_type isfinite(batch<T, A> const& x) noexcept
1242
+ {
1243
+ detail::static_check_supported_config<T, A>();
1244
+ return kernel::isfinite<A>(x, A {});
1245
+ }
1246
+
1247
+ /**
1248
+ * @ingroup batch_logical
1249
+ *
1250
+ * Determines if the scalars in the given batch \c x are NaN values.
1251
+ * @param x batch of floating point values.
1252
+ * @return a batch of booleans.
1253
+ */
1254
+ template <class T, class A>
1255
+ XSIMD_INLINE typename batch<T, A>::batch_bool_type isnan(batch<T, A> const& x) noexcept
1256
+ {
1257
+ detail::static_check_supported_config<T, A>();
1258
+ return kernel::isnan<A>(x, A {});
1259
+ }
1260
+
1261
+ /**
1262
+ * @ingroup batch_math_extra
1263
+ *
1264
+ * Computes the multiplication of the floating point number \c x by 2 raised to the power \c y.
1265
+ * @param x batch of floating point values.
1266
+ * @param y batch of integer values.
1267
+ * @return a batch of floating point values.
1268
+ */
1269
+ template <class T, class A>
1270
+ XSIMD_INLINE batch<T, A> ldexp(const batch<T, A>& x, const batch<as_integer_t<T>, A>& y) noexcept
1271
+ {
1272
+ detail::static_check_supported_config<T, A>();
1273
+ return kernel::ldexp<A>(x, y, A {});
1274
+ }
1275
+
1276
+ /**
1277
+ * @ingroup batch_logical
1278
+ *
1279
+ * Element-wise lesser or equal to comparison of batches \c x and \c y.
1280
+ * @param x batch involved in the comparison.
1281
+ * @param y batch involved in the comparison.
1282
+ * @return a boolean batch.
1283
+ */
1284
+ template <class T, class A>
1285
+ XSIMD_INLINE batch_bool<T, A> le(batch<T, A> const& x, batch<T, A> const& y) noexcept
1286
+ {
1287
+ detail::static_check_supported_config<T, A>();
1288
+ return x <= y;
1289
+ }
1290
+
1291
+ /**
1292
+ * @ingroup batch_math_extra
1293
+ *
1294
+ * Computes the natural logarithm of the gamma function of the batch \c x.
1295
+ * @param x batch of floating point values.
1296
+ * @return the natural logarithm of the gamma function of \c x.
1297
+ */
1298
+ template <class T, class A>
1299
+ XSIMD_INLINE batch<T, A> lgamma(batch<T, A> const& x) noexcept
1300
+ {
1301
+ detail::static_check_supported_config<T, A>();
1302
+ return kernel::lgamma<A>(x, A {});
1303
+ }
1304
+
1305
+ /**
1306
+ * @ingroup batch_data_transfer
1307
+ *
1308
+ * Creates a batch from the buffer \c ptr and the specifed
1309
+ * batch value type \c To. The memory needs to be aligned.
1310
+ * @param ptr the memory buffer to read
1311
+ * @return a new batch instance
1312
+ */
1313
+ template <class To, class A = default_arch, class From>
1314
+ XSIMD_INLINE simd_return_type<From, To, A> load_as(From const* ptr, aligned_mode) noexcept
1315
+ {
1316
+ using batch_value_type = typename simd_return_type<From, To, A>::value_type;
1317
+ detail::static_check_supported_config<From, A>();
1318
+ detail::static_check_supported_config<To, A>();
1319
+ return kernel::load_aligned<A>(ptr, kernel::convert<batch_value_type> {}, A {});
1320
+ }
1321
+
1322
+ template <class To, class A = default_arch>
1323
+ XSIMD_INLINE simd_return_type<bool, To, A> load_as(bool const* ptr, aligned_mode) noexcept
1324
+ {
1325
+ detail::static_check_supported_config<To, A>();
1326
+ return simd_return_type<bool, To, A>::load_aligned(ptr);
1327
+ }
1328
+
1329
+ template <class To, class A = default_arch, class From>
1330
+ XSIMD_INLINE simd_return_type<std::complex<From>, To, A> load_as(std::complex<From> const* ptr, aligned_mode) noexcept
1331
+ {
1332
+ detail::static_check_supported_config<To, A>();
1333
+ using batch_value_type = typename simd_return_type<std::complex<From>, To, A>::value_type;
1334
+ return kernel::load_complex_aligned<A>(ptr, kernel::convert<batch_value_type> {}, A {});
1335
+ }
1336
+
1337
+ #ifdef XSIMD_ENABLE_XTL_COMPLEX
1338
+ template <class To, class A = default_arch, class From, bool i3ec>
1339
+ XSIMD_INLINE simd_return_type<xtl::xcomplex<From, From, i3ec>, To, A> load_as(xtl::xcomplex<From, From, i3ec> const* ptr, aligned_mode) noexcept
1340
+ {
1341
+ detail::static_check_supported_config<To, A>();
1342
+ detail::static_check_supported_config<From, A>();
1343
+ return load_as<To>(reinterpret_cast<std::complex<From> const*>(ptr), aligned_mode());
1344
+ }
1345
+ #endif
1346
+
1347
+ /**
1348
+ * @ingroup batch_data_transfer
1349
+ *
1350
+ * Creates a batch from the buffer \c ptr and the specifed
1351
+ * batch value type \c To. The memory does not need to be aligned.
1352
+ * @param ptr the memory buffer to read
1353
+ * @return a new batch instance
1354
+ */
1355
+ template <class To, class A = default_arch, class From>
1356
+ XSIMD_INLINE simd_return_type<From, To, A> load_as(From const* ptr, unaligned_mode) noexcept
1357
+ {
1358
+ using batch_value_type = typename simd_return_type<From, To, A>::value_type;
1359
+ detail::static_check_supported_config<To, A>();
1360
+ detail::static_check_supported_config<From, A>();
1361
+ return kernel::load_unaligned<A>(ptr, kernel::convert<batch_value_type> {}, A {});
1362
+ }
1363
+
1364
+ template <class To, class A = default_arch>
1365
+ XSIMD_INLINE simd_return_type<bool, To, A> load_as(bool const* ptr, unaligned_mode) noexcept
1366
+ {
1367
+ return simd_return_type<bool, To, A>::load_unaligned(ptr);
1368
+ }
1369
+
1370
+ template <class To, class A = default_arch, class From>
1371
+ XSIMD_INLINE simd_return_type<std::complex<From>, To, A> load_as(std::complex<From> const* ptr, unaligned_mode) noexcept
1372
+ {
1373
+ detail::static_check_supported_config<To, A>();
1374
+ detail::static_check_supported_config<From, A>();
1375
+ using batch_value_type = typename simd_return_type<std::complex<From>, To, A>::value_type;
1376
+ return kernel::load_complex_unaligned<A>(ptr, kernel::convert<batch_value_type> {}, A {});
1377
+ }
1378
+
1379
+ #ifdef XSIMD_ENABLE_XTL_COMPLEX
1380
+ template <class To, class A = default_arch, class From, bool i3ec>
1381
+ XSIMD_INLINE simd_return_type<xtl::xcomplex<From, From, i3ec>, To, A> load_as(xtl::xcomplex<From, From, i3ec> const* ptr, unaligned_mode) noexcept
1382
+ {
1383
+ detail::static_check_supported_config<To, A>();
1384
+ detail::static_check_supported_config<From, A>();
1385
+ return load_as<To>(reinterpret_cast<std::complex<From> const*>(ptr), unaligned_mode());
1386
+ }
1387
+ #endif
1388
+
1389
+ /**
1390
+ * @ingroup batch_data_transfer
1391
+ *
1392
+ * Creates a batch from the buffer \c ptr. The
1393
+ * memory needs to be aligned.
1394
+ * @param ptr the memory buffer to read
1395
+ * @return a new batch instance
1396
+ */
1397
+ template <class A = default_arch, class From>
1398
+ XSIMD_INLINE batch<From, A> load(From const* ptr, aligned_mode = {}) noexcept
1399
+ {
1400
+ detail::static_check_supported_config<From, A>();
1401
+ return load_as<From, A>(ptr, aligned_mode {});
1402
+ }
1403
+
1404
+ /**
1405
+ * @ingroup batch_data_transfer
1406
+ *
1407
+ * Creates a batch from the buffer \c ptr. The
1408
+ * memory does not need to be aligned.
1409
+ * @param ptr the memory buffer to read
1410
+ * @return a new batch instance
1411
+ */
1412
+ template <class A = default_arch, class From>
1413
+ XSIMD_INLINE batch<From, A> load(From const* ptr, unaligned_mode) noexcept
1414
+ {
1415
+ detail::static_check_supported_config<From, A>();
1416
+ return load_as<From, A>(ptr, unaligned_mode {});
1417
+ }
1418
+
1419
+ /**
1420
+ * @ingroup batch_data_transfer
1421
+ *
1422
+ * Creates a batch from the buffer \c ptr. The
1423
+ * memory needs to be aligned.
1424
+ * @param ptr the memory buffer to read
1425
+ * @return a new batch instance
1426
+ */
1427
+ template <class A = default_arch, class From>
1428
+ XSIMD_INLINE batch<From, A> load_aligned(From const* ptr) noexcept
1429
+ {
1430
+ detail::static_check_supported_config<From, A>();
1431
+ return load_as<From, A>(ptr, aligned_mode {});
1432
+ }
1433
+
1434
+ /**
1435
+ * @ingroup batch_data_transfer
1436
+ *
1437
+ * Creates a batch from the buffer \c ptr. The
1438
+ * memory does not need to be aligned.
1439
+ * @param ptr the memory buffer to read
1440
+ * @return a new batch instance
1441
+ */
1442
+ template <class A = default_arch, class From>
1443
+ XSIMD_INLINE batch<From, A> load_unaligned(From const* ptr) noexcept
1444
+ {
1445
+ detail::static_check_supported_config<From, A>();
1446
+ return load_as<From, A>(ptr, unaligned_mode {});
1447
+ }
1448
+
1449
+ /**
1450
+ * @ingroup batch_math
1451
+ *
1452
+ * Computes the natural logarithm of the batch \c x.
1453
+ * @param x batch of floating point values.
1454
+ * @return the natural logarithm of \c x.
1455
+ */
1456
+ template <class T, class A>
1457
+ XSIMD_INLINE batch<T, A> log(batch<T, A> const& x) noexcept
1458
+ {
1459
+ detail::static_check_supported_config<T, A>();
1460
+ return kernel::log<A>(x, A {});
1461
+ }
1462
+
1463
+ /**
1464
+ * @ingroup batch_math
1465
+ * Computes the base 2 logarithm of the batch \c x.
1466
+ * @param x batch of floating point values.
1467
+ * @return the base 2 logarithm of \c x.
1468
+ */
1469
+ template <class T, class A>
1470
+ XSIMD_INLINE batch<T, A> log2(batch<T, A> const& x) noexcept
1471
+ {
1472
+ detail::static_check_supported_config<T, A>();
1473
+ return kernel::log2<A>(x, A {});
1474
+ }
1475
+
1476
+ /**
1477
+ * @ingroup batch_math
1478
+ * Computes the base 10 logarithm of the batch \c x.
1479
+ * @param x batch of floating point values.
1480
+ * @return the base 10 logarithm of \c x.
1481
+ */
1482
+ template <class T, class A>
1483
+ XSIMD_INLINE batch<T, A> log10(batch<T, A> const& x) noexcept
1484
+ {
1485
+ detail::static_check_supported_config<T, A>();
1486
+ return kernel::log10<A>(x, A {});
1487
+ }
1488
+
1489
+ /**
1490
+ * @ingroup batch_math
1491
+ * Computes the natural logarithm of one plus the batch \c x.
1492
+ * @param x batch of floating point values.
1493
+ * @return the natural logarithm of one plus \c x.
1494
+ */
1495
+ template <class T, class A>
1496
+ XSIMD_INLINE batch<T, A> log1p(batch<T, A> const& x) noexcept
1497
+ {
1498
+ detail::static_check_supported_config<T, A>();
1499
+ return kernel::log1p<A>(x, A {});
1500
+ }
1501
+
1502
+ /**
1503
+ * @ingroup batch_logical
1504
+ *
1505
+ * Element-wise lesser than comparison of batches \c x and \c y.
1506
+ * @param x batch involved in the comparison.
1507
+ * @param y batch involved in the comparison.
1508
+ * @return a boolean batch.
1509
+ */
1510
+ template <class T, class A>
1511
+ XSIMD_INLINE batch_bool<T, A> lt(batch<T, A> const& x, batch<T, A> const& y) noexcept
1512
+ {
1513
+ detail::static_check_supported_config<T, A>();
1514
+ return x < y;
1515
+ }
1516
+
1517
+ /**
1518
+ * @ingroup batch_math
1519
+ *
1520
+ * Computes the larger values of the batches \c x and \c y.
1521
+ * @param x a batch of integer or floating point values.
1522
+ * @param y a batch of integer or floating point values.
1523
+ * @return a batch of the larger values.
1524
+ */
1525
+ template <class T, class A>
1526
+ XSIMD_INLINE batch<T, A> max(batch<T, A> const& x, batch<T, A> const& y) noexcept
1527
+ {
1528
+ detail::static_check_supported_config<T, A>();
1529
+ return kernel::max<A>(x, y, A {});
1530
+ }
1531
+
1532
+ /**
1533
+ * @ingroup batch_math
1534
+ *
1535
+ * Computes the smaller values of the batches \c x and \c y.
1536
+ * @param x a batch of integer or floating point values.
1537
+ * @param y a batch of integer or floating point values.
1538
+ * @return a batch of the smaller values.
1539
+ */
1540
+ template <class T, class A>
1541
+ XSIMD_INLINE batch<T, A> min(batch<T, A> const& x, batch<T, A> const& y) noexcept
1542
+ {
1543
+ detail::static_check_supported_config<T, A>();
1544
+ return kernel::min<A>(x, y, A {});
1545
+ }
1546
+
1547
+ /**
1548
+ * @ingroup batch_constant
1549
+ *
1550
+ * Return a batch of scalars representing positive infinity
1551
+ * @return a batch of positive infinity
1552
+ */
1553
+ template <class B>
1554
+ XSIMD_INLINE B minusinfinity() noexcept
1555
+ {
1556
+ using T = typename B::value_type;
1557
+ using A = typename B::arch_type;
1558
+ detail::static_check_supported_config<T, A>();
1559
+ return B(-std::numeric_limits<T>::infinity());
1560
+ }
1561
+
1562
+ /**
1563
+ * @ingroup batch_arithmetic
1564
+ *
1565
+ * Computes the integer modulo of the batch \c x by the batch \c y.
1566
+ * @param x batch involved in the modulo.
1567
+ * @param y batch involved in the modulo.
1568
+ * @return the result of the modulo.
1569
+ */
1570
+ template <class T, class A>
1571
+ XSIMD_INLINE auto mod(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x % y)
1572
+ {
1573
+ detail::static_check_supported_config<T, A>();
1574
+ return x % y;
1575
+ }
1576
+
1577
+ /**
1578
+ * @ingroup batch_arithmetic
1579
+ *
1580
+ * Computes the product of the batches \c x and \c y.
1581
+ * @tparam X the actual type of batch.
1582
+ * @param x batch involved in the product.
1583
+ * @param y batch involved in the product.
1584
+ * @return the result of the product.
1585
+ */
1586
+ template <class T, class A>
1587
+ XSIMD_INLINE auto mul(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x * y)
1588
+ {
1589
+ detail::static_check_supported_config<T, A>();
1590
+ return x * y;
1591
+ }
1592
+
1593
+ /**
1594
+ * @ingroup batch_rounding
1595
+ *
1596
+ * Rounds the scalars in \c x to integer values (in floating point format), using
1597
+ * the current rounding mode.
1598
+ * @param x batch of floating point values.
1599
+ * @return the batch of nearest integer values.
1600
+ */
1601
+ template <class T, class A>
1602
+ XSIMD_INLINE batch<T, A> nearbyint(batch<T, A> const& x) noexcept
1603
+ {
1604
+ detail::static_check_supported_config<T, A>();
1605
+ return kernel::nearbyint<A>(x, A {});
1606
+ }
1607
+
1608
+ /**
1609
+ * @ingroup batch_rounding
1610
+ *
1611
+ * Rounds the scalars in \c x to integer values (in integer format) using
1612
+ * the current rounding mode.
1613
+ * @param x batch of floating point values.
1614
+ * @return the batch of nearest integer values.
1615
+ *
1616
+ * @warning For very large values the conversion to int silently overflows.
1617
+ */
1618
+ template <class T, class A>
1619
+ XSIMD_INLINE batch<as_integer_t<T>, A>
1620
+ nearbyint_as_int(batch<T, A> const& x) noexcept
1621
+ {
1622
+ detail::static_check_supported_config<T, A>();
1623
+ return kernel::nearbyint_as_int(x, A {});
1624
+ }
1625
+
1626
+ /**
1627
+ * @ingroup batch_logical
1628
+ *
1629
+ * Element-wise inequality comparison of batches \c x and \c y.
1630
+ * @param x batch involved in the comparison.
1631
+ * @param y batch involved in the comparison.
1632
+ * @return a boolean batch.
1633
+ */
1634
+ template <class T, class A>
1635
+ XSIMD_INLINE auto neq(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x != y)
1636
+ {
1637
+ detail::static_check_supported_config<T, A>();
1638
+ return x != y;
1639
+ }
1640
+
1641
+ /**
1642
+ * @ingroup batch_logical
1643
+ *
1644
+ * Element-wise inequality comparison of batches of boolean values \c x and \c y.
1645
+ * @param x batch of booleans involved in the comparison.
1646
+ * @param y batch of booleans involved in the comparison.
1647
+ * @return a boolean batch.
1648
+ */
1649
+ template <class T, class A>
1650
+ XSIMD_INLINE auto neq(batch_bool<T, A> const& x, batch_bool<T, A> const& y) noexcept -> decltype(x != y)
1651
+ {
1652
+ detail::static_check_supported_config<T, A>();
1653
+ return x != y;
1654
+ }
1655
+
1656
+ /**
1657
+ * @ingroup batch_arithmetic
1658
+ *
1659
+ * Computes the opposite of the batch \c x.
1660
+ * @param x batch involved in the operation.
1661
+ * @return the opposite of \c x.
1662
+ */
1663
+ template <class T, class A>
1664
+ XSIMD_INLINE batch<T, A> neg(batch<T, A> const& x) noexcept
1665
+ {
1666
+ detail::static_check_supported_config<T, A>();
1667
+ return -x;
1668
+ }
1669
+
1670
+ /**
1671
+ * @ingroup batch_math_extra
1672
+ *
1673
+ * Computes the next representable floating-point
1674
+ * value following x in the direction of y
1675
+ * @param x batch of floating point values.
1676
+ * @param y batch of floating point values.
1677
+ * @return \c x raised to the power \c y.
1678
+ */
1679
+ template <class T, class A>
1680
+ XSIMD_INLINE batch<T, A> nextafter(batch<T, A> const& x, batch<T, A> const& y) noexcept
1681
+ {
1682
+ detail::static_check_supported_config<T, A>();
1683
+ return kernel::nextafter<A>(x, y, A {});
1684
+ }
1685
+
1686
+ /**
1687
+ * @ingroup batch_complex
1688
+ *
1689
+ * Computes the norm of the batch \c x.
1690
+ * @param x batch of complex or real values.
1691
+ * @return the norm of \c x.
1692
+ */
1693
+ template <class T, class A>
1694
+ XSIMD_INLINE real_batch_type_t<batch<T, A>> norm(batch<T, A> const& x) noexcept
1695
+ {
1696
+ detail::static_check_supported_config<T, A>();
1697
+ return kernel::norm(x, A {});
1698
+ }
1699
+
1700
+ /**
1701
+ * @ingroup batch_math
1702
+ *
1703
+ * Returns a complex batch with magnitude \c r and phase angle \c theta.
1704
+ * @param r The magnitude of the desired complex result.
1705
+ * @param theta The phase angle of the desired complex result.
1706
+ * @return \c r exp(i * \c theta).
1707
+ */
1708
+ template <class T, class A>
1709
+ XSIMD_INLINE complex_batch_type_t<batch<T, A>> polar(batch<T, A> const& r, batch<T, A> const& theta = batch<T, A> {}) noexcept
1710
+ {
1711
+ detail::static_check_supported_config<T, A>();
1712
+ return kernel::polar<A>(r, theta, A {});
1713
+ }
1714
+
1715
+ /**
1716
+ * @ingroup batch_arithmetic
1717
+ *
1718
+ * No-op on \c x.
1719
+ * @param x batch involved in the operation.
1720
+ * @return \c x.
1721
+ */
1722
+ template <class T, class A>
1723
+ XSIMD_INLINE batch<T, A> pos(batch<T, A> const& x) noexcept
1724
+ {
1725
+ detail::static_check_supported_config<T, A>();
1726
+ return +x;
1727
+ }
1728
+
1729
+ /**
1730
+ * @ingroup batch_math
1731
+ *
1732
+ * Computes the value of the batch \c x raised to the power
1733
+ * \c y.
1734
+ * @param x batch of floating point values.
1735
+ * @param y batch of floating point values.
1736
+ * @return \c x raised to the power \c y.
1737
+ */
1738
+ template <class T, class A>
1739
+ XSIMD_INLINE batch<T, A> pow(batch<T, A> const& x, batch<T, A> const& y) noexcept
1740
+ {
1741
+ detail::static_check_supported_config<T, A>();
1742
+ return kernel::pow<A>(x, y, A {});
1743
+ }
1744
+
1745
+ /**
1746
+ * @ingroup batch_math
1747
+ *
1748
+ * Computes the value of the batch \c x raised to the power
1749
+ * \c y.
1750
+ * @param x batch of complex floating point values.
1751
+ * @param y batch of floating point values.
1752
+ * @return \c x raised to the power \c y.
1753
+ */
1754
+ template <class T, class A>
1755
+ XSIMD_INLINE batch<std::complex<T>, A> pow(batch<std::complex<T>, A> const& x, batch<T, A> const& y) noexcept
1756
+ {
1757
+ detail::static_check_supported_config<T, A>();
1758
+ return kernel::pow<A>(x, y, A {});
1759
+ }
1760
+
1761
+ /**
1762
+ * @ingroup batch_math
1763
+ *
1764
+ * Computes the value of the batch \c x raised to the power
1765
+ * \c y.
1766
+ * @param x batch of complex floating point values.
1767
+ * @param y batch of floating point values.
1768
+ * @return \c x raised to the power \c y.
1769
+ */
1770
+ template <class T, class A>
1771
+ XSIMD_INLINE batch<std::complex<T>, A> pow(batch<T, A> const& x, batch<std::complex<T>, A> const& y) noexcept
1772
+ {
1773
+ detail::static_check_supported_config<T, A>();
1774
+ return kernel::pow<A>(x, y, A {});
1775
+ }
1776
+
1777
+ /**
1778
+ * @ingroup batch_math
1779
+ *
1780
+ * Computes the value of the batch \c x raised to the power
1781
+ * \c y.
1782
+ * @param x batch of integral values.
1783
+ * @param y batch of integral values.
1784
+ * @return \c x raised to the power \c y.
1785
+ */
1786
+ template <class T, class ITy, class A, class = typename std::enable_if<std::is_integral<ITy>::value, void>::type>
1787
+ XSIMD_INLINE batch<T, A> pow(batch<T, A> const& x, ITy y) noexcept
1788
+ {
1789
+ detail::static_check_supported_config<T, A>();
1790
+ return kernel::ipow<A>(x, y, A {});
1791
+ }
1792
+
1793
+ /**
1794
+ * @ingroup batch_complex
1795
+ *
1796
+ * Computes the projection of the batch \c z.
1797
+ * @param z batch of complex or real values.
1798
+ * @return the projection of \c z.
1799
+ */
1800
+ template <class T, class A>
1801
+ XSIMD_INLINE complex_batch_type_t<batch<T, A>> proj(batch<T, A> const& z) noexcept
1802
+ {
1803
+ detail::static_check_supported_config<T, A>();
1804
+ return kernel::proj(z, A {});
1805
+ }
1806
+
1807
+ /**
1808
+ * @ingroup batch_complex
1809
+ *
1810
+ * Computes the real part of the batch \c z.
1811
+ * @param z batch of complex or real values.
1812
+ * @return the argument of \c z.
1813
+ */
1814
+ template <class T, class A>
1815
+ XSIMD_INLINE real_batch_type_t<batch<T, A>> real(batch<T, A> const& z) noexcept
1816
+ {
1817
+ detail::static_check_supported_config<T, A>();
1818
+ return kernel::real<A>(z, A {});
1819
+ }
1820
+
1821
+ /**
1822
+ * @ingroup batch_arithmetic
1823
+ *
1824
+ * Computes the approximate reciprocal of the batch \c x.
1825
+ * The maximum relative error for this approximation is
1826
+ * less than 1.5*2^-12.
1827
+ * @param x batch of floating point numbers.
1828
+ * @return the reciprocal.
1829
+ */
1830
+ template <class T, class A, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
1831
+ XSIMD_INLINE batch<T, A> reciprocal(batch<T, A> const& x) noexcept
1832
+ {
1833
+ detail::static_check_supported_config<T, A>();
1834
+ return kernel::reciprocal(x, A {});
1835
+ }
1836
+
1837
+ /**
1838
+ * @ingroup batch_reducers
1839
+ *
1840
+ * Generic reducer using only batch operations
1841
+ * @param f reducing function, accepting `batch ()(batch, batch)`
1842
+ * @param x batch involved in the reduction
1843
+ * @return the result of the reduction, as a scalar.
1844
+ */
1845
+ template <class T, class A, class F>
1846
+ XSIMD_INLINE T reduce(F&& f, batch<T, A> const& x) noexcept
1847
+ {
1848
+ detail::static_check_supported_config<T, A>();
1849
+ return kernel::detail::reduce(std::forward<F>(f), x, std::integral_constant<unsigned, batch<T, A>::size>());
1850
+ }
1851
+
1852
+ /**
1853
+ * @ingroup batch_reducers
1854
+ *
1855
+ * Adds all the scalars of the batch \c x.
1856
+ * @param x batch involved in the reduction
1857
+ * @return the result of the reduction.
1858
+ */
1859
+ template <class T, class A>
1860
+ XSIMD_INLINE T reduce_add(batch<T, A> const& x) noexcept
1861
+ {
1862
+ detail::static_check_supported_config<T, A>();
1863
+ return kernel::reduce_add<A>(x, A {});
1864
+ }
1865
+
1866
+ /**
1867
+ * @ingroup batch_reducers
1868
+ *
1869
+ * Max of all the scalars of the batch \c x.
1870
+ * @param x batch involved in the reduction
1871
+ * @return the result of the reduction.
1872
+ */
1873
+ template <class T, class A>
1874
+ XSIMD_INLINE T reduce_max(batch<T, A> const& x) noexcept
1875
+ {
1876
+ detail::static_check_supported_config<T, A>();
1877
+ return kernel::reduce_max<A>(x, A {});
1878
+ }
1879
+
1880
+ /**
1881
+ * @ingroup batch_reducers
1882
+ *
1883
+ * Min of all the scalars of the batch \c x.
1884
+ * @param x batch involved in the reduction
1885
+ * @return the result of the reduction.
1886
+ */
1887
+ template <class T, class A>
1888
+ XSIMD_INLINE T reduce_min(batch<T, A> const& x) noexcept
1889
+ {
1890
+ detail::static_check_supported_config<T, A>();
1891
+ return kernel::reduce_min<A>(x, A {});
1892
+ }
1893
+
1894
+ /**
1895
+ * @ingroup batch_reducers
1896
+ *
1897
+ * Multiplies of all the scalars of the batch \c x.
1898
+ * @param x batch involved in the reduction
1899
+ * @return the result of the reduction.
1900
+ */
1901
+ template <class T, class A>
1902
+ XSIMD_INLINE T reduce_mul(batch<T, A> const& x) noexcept
1903
+ {
1904
+ detail::static_check_supported_config<T, A>();
1905
+ return kernel::reduce_mul<A>(x, A {});
1906
+ }
1907
+
1908
+ /**
1909
+ * @ingroup batch_math
1910
+ *
1911
+ * Computes the remainder of dividing \c x by \c y
1912
+ * @param x batch of scalar values
1913
+ * @param y batch of scalar values
1914
+ * @return the result of the addition.
1915
+ */
1916
+ template <class T, class A>
1917
+ XSIMD_INLINE batch<T, A> remainder(batch<T, A> const& x, batch<T, A> const& y) noexcept
1918
+ {
1919
+ detail::static_check_supported_config<T, A>();
1920
+ return kernel::remainder<A>(x, y, A {});
1921
+ }
1922
+
1923
+ /**
1924
+ * @ingroup batch_rounding
1925
+ *
1926
+ * Rounds the scalars in \c x to integer values (in floating point format), using
1927
+ * the current rounding mode.
1928
+ * @param x batch of floating point values.
1929
+ * @return the batch of rounded values.
1930
+ */
1931
+ template <class T, class A>
1932
+ XSIMD_INLINE batch<T, A> rint(batch<T, A> const& x) noexcept
1933
+ {
1934
+ detail::static_check_supported_config<T, A>();
1935
+ return nearbyint(x);
1936
+ }
1937
+
1938
+ /**
1939
+ * @ingroup batch_data_transfer
1940
+ *
1941
+ * Slide the whole batch to the left by \c n elements, and reintroduce the
1942
+ * slided out elements from the right. This is different from
1943
+ * \c rotl that rotates each batch element to the left.
1944
+ *
1945
+ * @tparam N Amount of elements to rotate to the left.
1946
+ * @param x batch of integer values.
1947
+ * @return rotated batch.
1948
+ */
1949
+ template <size_t N, class T, class A>
1950
+ XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& x) noexcept
1951
+ {
1952
+ detail::static_check_supported_config<T, A>();
1953
+ return kernel::rotate_left<N, A>(x, A {});
1954
+ }
1955
+
1956
+ /**
1957
+ * @ingroup batch_data_transfer
1958
+ *
1959
+ * Slide the whole batch to the right by \c n elements, and reintroduce the
1960
+ * slided out elements from the left. This is different from
1961
+ * \c rotr that rotates each batch element to the right.
1962
+ *
1963
+ * @tparam N Amount of elements to rotate to the right.
1964
+ * @param x batch of integer values.
1965
+ * @return rotated batch.
1966
+ */
1967
+ template <size_t N, class T, class A>
1968
+ XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& x) noexcept
1969
+ {
1970
+ detail::static_check_supported_config<T, A>();
1971
+ return kernel::rotate_right<N, A>(x, A {});
1972
+ }
1973
+
1974
+ /**
1975
+ * @ingroup batch_bitwise
1976
+ *
1977
+ * Perform a bitwise shift to the left, reintroducing the shifted out bits
1978
+ * to the right
1979
+ * @param x batch to rotate
1980
+ * @param shift scalar amount to shift
1981
+ * @return rotated \c x.
1982
+ */
1983
+ template <class T, class A>
1984
+ XSIMD_INLINE batch<T, A> rotl(batch<T, A> const& x, int shift) noexcept
1985
+ {
1986
+ detail::static_check_supported_config<T, A>();
1987
+ return kernel::rotl<A>(x, shift, A {});
1988
+ }
1989
+ template <class T, class A>
1990
+ XSIMD_INLINE batch<T, A> rotl(batch<T, A> const& x, batch<T, A> const& shift) noexcept
1991
+ {
1992
+ detail::static_check_supported_config<T, A>();
1993
+ return kernel::rotl<A>(x, shift, A {});
1994
+ }
1995
+ template <size_t count, class T, class A>
1996
+ XSIMD_INLINE batch<T, A> rotl(batch<T, A> const& x) noexcept
1997
+ {
1998
+ detail::static_check_supported_config<T, A>();
1999
+ return kernel::rotl<count, A>(x, A {});
2000
+ }
2001
+
2002
+ /**
2003
+ * @ingroup batch_bitwise
2004
+ *
2005
+ * Perform a bitwise shift to the right, reintroducing the shifted out bits
2006
+ * to the left.
2007
+ * @param x batch to rotate
2008
+ * @param shift scalar amount to shift
2009
+ * @return rotated \c x.
2010
+ */
2011
+ template <class T, class A>
2012
+ XSIMD_INLINE batch<T, A> rotr(batch<T, A> const& x, int shift) noexcept
2013
+ {
2014
+ detail::static_check_supported_config<T, A>();
2015
+ return kernel::rotr<A>(x, shift, A {});
2016
+ }
2017
+ template <class T, class A>
2018
+ XSIMD_INLINE batch<T, A> rotr(batch<T, A> const& x, batch<T, A> const& shift) noexcept
2019
+ {
2020
+ detail::static_check_supported_config<T, A>();
2021
+ return kernel::rotr<A>(x, shift, A {});
2022
+ }
2023
+ template <size_t count, class T, class A>
2024
+ XSIMD_INLINE batch<T, A> rotr(batch<T, A> const& x) noexcept
2025
+ {
2026
+ detail::static_check_supported_config<T, A>();
2027
+ return kernel::rotr<count, A>(x, A {});
2028
+ }
2029
+
2030
+ /**
2031
+ * @ingroup batch_rounding
2032
+ *
2033
+ * Computes the batch of nearest integer values to scalars in \c x (in
2034
+ * floating point format), rounding halfway cases away from zero, regardless
2035
+ * of the current rounding mode.
2036
+ * @param x batch of flaoting point values.
2037
+ * @return the batch of nearest integer values.
2038
+ */
2039
+ template <class T, class A>
2040
+ XSIMD_INLINE batch<T, A> round(batch<T, A> const& x) noexcept
2041
+ {
2042
+ detail::static_check_supported_config<T, A>();
2043
+ return kernel::round<A>(x, A {});
2044
+ }
2045
+
2046
+ /**
2047
+ * @ingroup batch_math
2048
+ *
2049
+ * Computes an estimate of the inverse square root of the batch \c x.
2050
+ *
2051
+ * @warning Unlike most xsimd function, this does not return the same result as the
2052
+ * equivalent scalar operation, trading accuracy for speed.
2053
+ *
2054
+ * @param x batch of floating point values.
2055
+ * @return the inverse square root of \c x.
2056
+ */
2057
+ template <class T, class A>
2058
+ XSIMD_INLINE batch<T, A> rsqrt(batch<T, A> const& x) noexcept
2059
+ {
2060
+ detail::static_check_supported_config<T, A>();
2061
+ return kernel::rsqrt<A>(x, A {});
2062
+ }
2063
+
2064
+ /**
2065
+ * @ingroup batch_arithmetic
2066
+ *
2067
+ * Computes the saturate sum of the batch \c x and the batch \c y.
2068
+
2069
+ * @tparam X the actual type of batch.
2070
+ * @param x batch involved in the saturated addition.
2071
+ * @param y batch involved in the saturated addition.
2072
+ * @return the result of the saturated addition.
2073
+ */
2074
+ template <class T, class A>
2075
+ XSIMD_INLINE batch<T, A> sadd(batch<T, A> const& x, batch<T, A> const& y) noexcept
2076
+ {
2077
+ detail::static_check_supported_config<T, A>();
2078
+ return kernel::sadd<A>(x, y, A {});
2079
+ }
2080
+
2081
+ /**
2082
+ * @ingroup batch_cond
2083
+ *
2084
+ * Ternary operator for batches: selects values from the batches \c true_br or \c false_br
2085
+ * depending on the boolean values in the constant batch \c cond. Equivalent to
2086
+ * \code{.cpp}
2087
+ * for(std::size_t i = 0; i < N; ++i)
2088
+ * res[i] = cond[i] ? true_br[i] : false_br[i];
2089
+ * \endcode
2090
+ * @param cond batch condition.
2091
+ * @param true_br batch values for truthy condition.
2092
+ * @param false_br batch value for falsy condition.
2093
+ * @return the result of the selection.
2094
+ */
2095
+ template <class T, class A>
2096
+ XSIMD_INLINE batch<T, A> select(batch_bool<T, A> const& cond, batch<T, A> const& true_br, batch<T, A> const& false_br) noexcept
2097
+ {
2098
+ detail::static_check_supported_config<T, A>();
2099
+ return kernel::select<A>(cond, true_br, false_br, A {});
2100
+ }
2101
+
2102
+ /**
2103
+ * @ingroup batch_cond
2104
+ *
2105
+ * Ternary operator for batches: selects values from the batches \c true_br or \c false_br
2106
+ * depending on the boolean values in the constant batch \c cond. Equivalent to
2107
+ * \code{.cpp}
2108
+ * for(std::size_t i = 0; i < N; ++i)
2109
+ * res[i] = cond[i] ? true_br[i] : false_br[i];
2110
+ * \endcode
2111
+ * @param cond batch condition.
2112
+ * @param true_br batch values for truthy condition.
2113
+ * @param false_br batch value for falsy condition.
2114
+ * @return the result of the selection.
2115
+ */
2116
+ template <class T, class A>
2117
+ XSIMD_INLINE batch<std::complex<T>, A> select(batch_bool<T, A> const& cond, batch<std::complex<T>, A> const& true_br, batch<std::complex<T>, A> const& false_br) noexcept
2118
+ {
2119
+ detail::static_check_supported_config<T, A>();
2120
+ return kernel::select<A>(cond, true_br, false_br, A {});
2121
+ }
2122
+
2123
+ /**
2124
+ * @ingroup batch_cond
2125
+ *
2126
+ * Ternary operator for batches: selects values from the batches \c true_br or \c false_br
2127
+ * depending on the boolean values in the constant batch \c cond. Equivalent to
2128
+ * \code{.cpp}
2129
+ * for(std::size_t i = 0; i < N; ++i)
2130
+ * res[i] = cond[i] ? true_br[i] : false_br[i];
2131
+ * \endcode
2132
+ * @param cond constant batch condition.
2133
+ * @param true_br batch values for truthy condition.
2134
+ * @param false_br batch value for falsy condition.
2135
+ * @return the result of the selection.
2136
+ */
2137
+ template <class T, class A, bool... Values>
2138
+ XSIMD_INLINE batch<T, A> select(batch_bool_constant<T, A, Values...> const& cond, batch<T, A> const& true_br, batch<T, A> const& false_br) noexcept
2139
+ {
2140
+ detail::static_check_supported_config<T, A>();
2141
+ return kernel::select<A>(cond, true_br, false_br, A {});
2142
+ }
2143
+
2144
+ /**
2145
+ * @ingroup batch_data_transfer
2146
+ *
2147
+ * Combine elements from \c x and \c y according to selector \c mask
2148
+ * @param x batch
2149
+ * @param y batch
2150
+ * @param mask constant batch mask of integer elements of the same size as
2151
+ * element of \c x and \c y. Each element of the mask index the vector that
2152
+ * would be formed by the concatenation of \c x and \c y. For instance
2153
+ * \code{.cpp}
2154
+ * batch_constant<uint32_t, sse2, 0, 4, 3, 7>
2155
+ * \endcode
2156
+ * Picks \c x[0], \c y[0], \c x[3], \c y[3]
2157
+ *
2158
+ * @return combined batch
2159
+ */
2160
+ template <class T, class A, class Vt, Vt... Values>
2161
+ XSIMD_INLINE typename std::enable_if<std::is_arithmetic<T>::value, batch<T, A>>::type
2162
+ shuffle(batch<T, A> const& x, batch<T, A> const& y, batch_constant<Vt, A, Values...> mask) noexcept
2163
+ {
2164
+ static_assert(sizeof(T) == sizeof(Vt), "consistent mask");
2165
+ static_assert(std::is_unsigned<Vt>::value, "mask must hold unsigned indices");
2166
+ detail::static_check_supported_config<T, A>();
2167
+ return kernel::shuffle<A>(x, y, mask, A {});
2168
+ }
2169
+
2170
+ /**
2171
+ * @ingroup batch_miscellaneous
2172
+ *
2173
+ * Computes the sign of \c x
2174
+ * @param x batch
2175
+ * @return -1 for each negative element, -1 or +1 for each null element and +1 for each element
2176
+ */
2177
+ template <class T, class A>
2178
+ XSIMD_INLINE batch<T, A> sign(batch<T, A> const& x) noexcept
2179
+ {
2180
+ detail::static_check_supported_config<T, A>();
2181
+ return kernel::sign<A>(x, A {});
2182
+ }
2183
+
2184
+ /**
2185
+ * @ingroup batch_miscellaneous
2186
+ *
2187
+ * Computes the sign of \c x, assuming x doesn't have any zero
2188
+ * @param x batch
2189
+ * @return -1 for each negative element, -1 or +1 for each null element and +1 for each element
2190
+ */
2191
+ template <class T, class A>
2192
+ XSIMD_INLINE batch<T, A> signnz(batch<T, A> const& x) noexcept
2193
+ {
2194
+ detail::static_check_supported_config<T, A>();
2195
+ return kernel::signnz<A>(x, A {});
2196
+ }
2197
+
2198
+ /**
2199
+ * @ingroup batch_trigo
2200
+ *
2201
+ * Computes the sine of the batch \c x.
2202
+ * @param x batch of floating point values.
2203
+ * @return the sine of \c x.
2204
+ */
2205
+ template <class T, class A>
2206
+ XSIMD_INLINE batch<T, A> sin(batch<T, A> const& x) noexcept
2207
+ {
2208
+ detail::static_check_supported_config<T, A>();
2209
+ return kernel::sin<A>(x, A {});
2210
+ }
2211
+
2212
+ /**
2213
+ * @ingroup batch_trigo
2214
+ *
2215
+ * Computes the sine and the cosine of the batch \c x. This method is faster
2216
+ * than calling sine and cosine independently.
2217
+ * @param x batch of floating point values.
2218
+ * @return a pair containing the sine then the cosine of batch \c x
2219
+ */
2220
+ template <class T, class A>
2221
+ XSIMD_INLINE std::pair<batch<T, A>, batch<T, A>> sincos(batch<T, A> const& x) noexcept
2222
+ {
2223
+ detail::static_check_supported_config<T, A>();
2224
+ return kernel::sincos<A>(x, A {});
2225
+ }
2226
+
2227
+ /**
2228
+ * @ingroup batch_trigo
2229
+ *
2230
+ * Computes the hyperbolic sine of the batch \c x.
2231
+ * @param x batch of floating point values.
2232
+ * @return the hyperbolic sine of \c x.
2233
+ */
2234
+ template <class T, class A>
2235
+ XSIMD_INLINE batch<T, A> sinh(batch<T, A> const& x) noexcept
2236
+ {
2237
+ detail::static_check_supported_config<T, A>();
2238
+ return kernel::sinh<A>(x, A {});
2239
+ }
2240
+
2241
+ /**
2242
+ * @ingroup batch_data_transfer
2243
+ *
2244
+ * Slide the whole batch to the left by \c n bytes. This is different from
2245
+ * \c bitwise_lshift that shifts each batch element to the left.
2246
+ *
2247
+ * @warning The behavior of this function is platform-dependent on big
2248
+ * endian architectures.
2249
+ *
2250
+ * @tparam N Amount of bytes to slide to the left.
2251
+ * @param x batch of integer values.
2252
+ * @return slided batch.
2253
+ */
2254
+ template <size_t N, class T, class A>
2255
+ XSIMD_INLINE batch<T, A> slide_left(batch<T, A> const& x) noexcept
2256
+ {
2257
+ static_assert(std::is_integral<T>::value, "can only slide batch of integers");
2258
+ detail::static_check_supported_config<T, A>();
2259
+ return kernel::slide_left<N, A>(x, A {});
2260
+ }
2261
+
2262
+ /**
2263
+ * @ingroup batch_data_transfer
2264
+ *
2265
+ * Slide the whole batch to the right by \c N bytes. This is different from
2266
+ * \c bitwise_rshift that shifts each batch element to the right.
2267
+ *
2268
+ * @warning The behavior of this function is platform-dependent on big
2269
+ * endian architectures.
2270
+ *
2271
+ * @tparam N Amount of bytes to slide to the right.
2272
+ * @param x batch of integer values.
2273
+ * @return slided batch.
2274
+ */
2275
+ template <size_t N, class T, class A>
2276
+ XSIMD_INLINE batch<T, A> slide_right(batch<T, A> const& x) noexcept
2277
+ {
2278
+ static_assert(std::is_integral<T>::value, "can only slide batch of integers");
2279
+ detail::static_check_supported_config<T, A>();
2280
+ return kernel::slide_right<N, A>(x, A {});
2281
+ }
2282
+
2283
+ /**
2284
+ * @ingroup batch_math
2285
+ *
2286
+ * Computes the square root of the batch \c x.
2287
+ * @param x batch of floating point values.
2288
+ * @return the square root of \c x.
2289
+ */
2290
+ template <class T, class A>
2291
+ XSIMD_INLINE batch<T, A> sqrt(batch<T, A> const& x) noexcept
2292
+ {
2293
+ detail::static_check_supported_config<T, A>();
2294
+ return kernel::sqrt<A>(x, A {});
2295
+ }
2296
+
2297
+ /**
2298
+ * @ingroup batch_arithmetic
2299
+ *
2300
+ * Computes the saturate difference of the batch \c x and the batch \c y.
2301
+ * @tparam X the actual type of batch.
2302
+ * @param x batch involved in the saturated difference.
2303
+ * @param y batch involved in the saturated difference.
2304
+ * @return the result of the saturated difference.
2305
+ */
2306
+ template <class T, class A>
2307
+ XSIMD_INLINE batch<T, A> ssub(batch<T, A> const& x, batch<T, A> const& y) noexcept
2308
+ {
2309
+ detail::static_check_supported_config<T, A>();
2310
+ return kernel::ssub<A>(x, y, A {});
2311
+ }
2312
+
2313
+ /**
2314
+ * @ingroup batch_data_transfer
2315
+ *
2316
+ * Copy content of batch \c src to the buffer \c dst. The
2317
+ * memory needs to be aligned.
2318
+ * @param dst the memory buffer to write to
2319
+ * @param src the batch to copy
2320
+ */
2321
+ template <class To, class A = default_arch, class From>
2322
+ XSIMD_INLINE void store_as(To* dst, batch<From, A> const& src, aligned_mode) noexcept
2323
+ {
2324
+ detail::static_check_supported_config<From, A>();
2325
+ kernel::store_aligned<A>(dst, src, A {});
2326
+ }
2327
+
2328
+ template <class A = default_arch, class From>
2329
+ XSIMD_INLINE void store_as(bool* dst, batch_bool<From, A> const& src, aligned_mode) noexcept
2330
+ {
2331
+ detail::static_check_supported_config<From, A>();
2332
+ kernel::store<A>(src, dst, A {});
2333
+ }
2334
+
2335
+ template <class To, class A = default_arch, class From>
2336
+ XSIMD_INLINE void store_as(std::complex<To>* dst, batch<std::complex<From>, A> const& src, aligned_mode) noexcept
2337
+ {
2338
+ detail::static_check_supported_config<std::complex<From>, A>();
2339
+ kernel::store_complex_aligned<A>(dst, src, A {});
2340
+ }
2341
+
2342
+ #ifdef XSIMD_ENABLE_XTL_COMPLEX
2343
+ template <class To, class A = default_arch, class From, bool i3ec>
2344
+ XSIMD_INLINE void store_as(xtl::xcomplex<To, To, i3ec>* dst, batch<std::complex<From>, A> const& src, aligned_mode) noexcept
2345
+ {
2346
+ store_as(reinterpret_cast<std::complex<To>*>(dst), src, aligned_mode());
2347
+ }
2348
+ #endif
2349
+
2350
+ /**
2351
+ * @ingroup batch_data_transfer
2352
+ *
2353
+ * Copy content of batch \c src to the buffer \c dst. The
2354
+ * memory does not need to be aligned.
2355
+ * @param dst the memory buffer to write to
2356
+ * @param src the batch to copy
2357
+ */
2358
+ template <class To, class A = default_arch, class From>
2359
+ XSIMD_INLINE void store_as(To* dst, batch<From, A> const& src, unaligned_mode) noexcept
2360
+ {
2361
+ detail::static_check_supported_config<From, A>();
2362
+ kernel::store_unaligned<A>(dst, src, A {});
2363
+ }
2364
+
2365
+ template <class A = default_arch, class From>
2366
+ XSIMD_INLINE void store_as(bool* dst, batch_bool<From, A> const& src, unaligned_mode) noexcept
2367
+ {
2368
+ detail::static_check_supported_config<From, A>();
2369
+ kernel::store<A>(src, dst, A {});
2370
+ }
2371
+
2372
+ template <class To, class A = default_arch, class From>
2373
+ XSIMD_INLINE void store_as(std::complex<To>* dst, batch<std::complex<From>, A> const& src, unaligned_mode) noexcept
2374
+ {
2375
+ detail::static_check_supported_config<std::complex<From>, A>();
2376
+ kernel::store_complex_unaligned<A>(dst, src, A {});
2377
+ }
2378
+
2379
+ #ifdef XSIMD_ENABLE_XTL_COMPLEX
2380
+ template <class To, class A = default_arch, class From, bool i3ec>
2381
+ XSIMD_INLINE void store_as(xtl::xcomplex<To, To, i3ec>* dst, batch<std::complex<From>, A> const& src, unaligned_mode) noexcept
2382
+ {
2383
+ detail::static_check_supported_config<std::complex<From>, A>();
2384
+ store_as(reinterpret_cast<std::complex<To>*>(dst), src, unaligned_mode());
2385
+ }
2386
+ #endif
2387
+
2388
+ /**
2389
+ * @ingroup batch_data_transfer
2390
+ *
2391
+ * Copy content of batch \c val to the buffer \c mem. The
2392
+ * memory does not need to be aligned.
2393
+ * @param mem the memory buffer to write to
2394
+ * @param val the batch to copy from
2395
+ */
2396
+ template <class A, class T>
2397
+ XSIMD_INLINE void store(T* mem, batch<T, A> const& val, aligned_mode = {}) noexcept
2398
+ {
2399
+ store_as<T, A>(mem, val, aligned_mode {});
2400
+ }
2401
+
2402
+ /**
2403
+ * @ingroup batch_data_transfer
2404
+ *
2405
+ * Copy content of batch \c val to the buffer \c mem. The
2406
+ * memory does not need to be aligned.
2407
+ * @param mem the memory buffer to write to
2408
+ * @param val the batch to copy from
2409
+ */
2410
+ template <class A, class T>
2411
+ XSIMD_INLINE void store(T* mem, batch<T, A> const& val, unaligned_mode) noexcept
2412
+ {
2413
+ store_as<T, A>(mem, val, unaligned_mode {});
2414
+ }
2415
+
2416
+ /**
2417
+ * @ingroup batch_data_transfer
2418
+ *
2419
+ * Copy content of batch \c val to the buffer \c mem. The
2420
+ * memory needs to be aligned.
2421
+ * @param mem the memory buffer to write to
2422
+ * @param val the batch to copy from
2423
+ */
2424
+ template <class A, class T>
2425
+ XSIMD_INLINE void store_aligned(T* mem, batch<T, A> const& val) noexcept
2426
+ {
2427
+ store_as<T, A>(mem, val, aligned_mode {});
2428
+ }
2429
+
2430
+ /**
2431
+ * @ingroup batch_data_transfer
2432
+ *
2433
+ * Copy content of batch \c val to the buffer \c mem. The
2434
+ * memory does not need to be aligned.
2435
+ * @param mem the memory buffer to write to
2436
+ * @param val the batch to copy
2437
+ */
2438
+ template <class A, class T>
2439
+ XSIMD_INLINE void store_unaligned(T* mem, batch<T, A> const& val) noexcept
2440
+ {
2441
+ store_as<T, A>(mem, val, unaligned_mode {});
2442
+ }
2443
+
2444
+ /**
2445
+ * @ingroup batch_arithmetic
2446
+ *
2447
+ * Computes the difference between \c x and \c y
2448
+ * @tparam X the actual type of batch.
2449
+ * @param x scalar or batch of scalars
2450
+ * @param y scalar or batch of scalars
2451
+ * @return the difference between \c x and \c y
2452
+ */
2453
+ template <class T, class A>
2454
+ XSIMD_INLINE auto sub(batch<T, A> const& x, batch<T, A> const& y) noexcept -> decltype(x - y)
2455
+ {
2456
+ detail::static_check_supported_config<T, A>();
2457
+ return x - y;
2458
+ }
2459
+
2460
+ /**
2461
+ * @ingroup batch_data_transfer
2462
+ *
2463
+ * Rearrange elements from \c x according to constant mask \c mask
2464
+ * @param x batch
2465
+ * @param mask constant batch mask of integer elements of the same size as
2466
+ * element of \c x
2467
+ * @return swizzled batch
2468
+ */
2469
+ template <class T, class A, class Vt, Vt... Values>
2470
+ XSIMD_INLINE typename std::enable_if<std::is_arithmetic<T>::value, batch<T, A>>::type
2471
+ swizzle(batch<T, A> const& x, batch_constant<Vt, A, Values...> mask) noexcept
2472
+ {
2473
+ static_assert(sizeof(T) == sizeof(Vt), "consistent mask");
2474
+ detail::static_check_supported_config<T, A>();
2475
+ return kernel::swizzle<A>(x, mask, A {});
2476
+ }
2477
+ template <class T, class A, class Vt, Vt... Values>
2478
+ XSIMD_INLINE batch<std::complex<T>, A> swizzle(batch<std::complex<T>, A> const& x, batch_constant<Vt, A, Values...> mask) noexcept
2479
+ {
2480
+ static_assert(sizeof(T) == sizeof(Vt), "consistent mask");
2481
+ static_assert(std::is_unsigned<Vt>::value, "mask must hold unsigned indices");
2482
+ detail::static_check_supported_config<T, A>();
2483
+ return kernel::swizzle<A>(x, mask, A {});
2484
+ }
2485
+
2486
+ /**
2487
+ * @ingroup batch_data_transfer
2488
+ *
2489
+ * Rearrange elements from \c x according to mask \c mask
2490
+ * @param x batch
2491
+ * @param mask batch mask of integer elements of the same size as
2492
+ * element of \c x
2493
+ * @return swizzled batch
2494
+ */
2495
+ template <class T, class A, class Vt>
2496
+ XSIMD_INLINE typename std::enable_if<std::is_arithmetic<T>::value, batch<T, A>>::type
2497
+ swizzle(batch<T, A> const& x, batch<Vt, A> mask) noexcept
2498
+ {
2499
+ static_assert(sizeof(T) == sizeof(Vt), "consistent mask");
2500
+ detail::static_check_supported_config<T, A>();
2501
+ return kernel::swizzle<A>(x, mask, A {});
2502
+ }
2503
+
2504
+ template <class T, class A, class Vt>
2505
+ XSIMD_INLINE batch<std::complex<T>, A> swizzle(batch<std::complex<T>, A> const& x, batch<Vt, A> mask) noexcept
2506
+ {
2507
+ static_assert(sizeof(T) == sizeof(Vt), "consistent mask");
2508
+ detail::static_check_supported_config<T, A>();
2509
+ return kernel::swizzle<A>(x, mask, A {});
2510
+ }
2511
+
2512
+ /**
2513
+ * @ingroup batch_trigo
2514
+ *
2515
+ * Computes the tangent of the batch \c x.
2516
+ * @param x batch of floating point values.
2517
+ * @return the tangent of \c x.
2518
+ */
2519
+ template <class T, class A>
2520
+ XSIMD_INLINE batch<T, A> tan(batch<T, A> const& x) noexcept
2521
+ {
2522
+ detail::static_check_supported_config<T, A>();
2523
+ return kernel::tan<A>(x, A {});
2524
+ }
2525
+
2526
+ /**
2527
+ * @ingroup batch_trigo
2528
+ *
2529
+ * Computes the hyperbolic tangent of the batch \c x.
2530
+ * @param x batch of floating point values.
2531
+ * @return the hyperbolic tangent of \c x.
2532
+ */
2533
+ template <class T, class A>
2534
+ XSIMD_INLINE batch<T, A> tanh(batch<T, A> const& x) noexcept
2535
+ {
2536
+ detail::static_check_supported_config<T, A>();
2537
+ return kernel::tanh<A>(x, A {});
2538
+ }
2539
+
2540
+ /**
2541
+ * @ingroup batch_math_extra
2542
+ *
2543
+ * Computes the gamma function of the batch \c x.
2544
+ * @param x batch of floating point values.
2545
+ * @return the gamma function of \c x.
2546
+ */
2547
+ template <class T, class A>
2548
+ XSIMD_INLINE batch<T, A> tgamma(batch<T, A> const& x) noexcept
2549
+ {
2550
+ detail::static_check_supported_config<T, A>();
2551
+ return kernel::tgamma<A>(x, A {});
2552
+ }
2553
+
2554
+ /**
2555
+ * @ingroup batch_conversion
2556
+ *
2557
+ * Perform a conversion from \c i to a value of an floating point type of the same size as \c T.
2558
+ * This is equivalent to \c batch_cast<as_float_t<T>>(i)
2559
+ * @param i batch of integers.
2560
+ * @return \c i converted to a value of an floating point type of the same size as \c T
2561
+ */
2562
+ template <class T, class A>
2563
+ XSIMD_INLINE batch<as_float_t<T>, A> to_float(batch<T, A> const& i) noexcept
2564
+ {
2565
+ detail::static_check_supported_config<T, A>();
2566
+ return batch_cast<as_float_t<T>>(i);
2567
+ }
2568
+
2569
+ /**
2570
+ * @ingroup batch_conversion
2571
+ *
2572
+ * Perform a conversion from \c x to a value of an integer type of the same size as \c T
2573
+ * This is equivalent to \c batch_cast<as_integer_t<T>>(x)
2574
+ * @param x batch.
2575
+ * @return \c x converted to a value of an integer type of the same size as \c T
2576
+ */
2577
+ template <class T, class A>
2578
+ XSIMD_INLINE batch<as_integer_t<T>, A> to_int(batch<T, A> const& x) noexcept
2579
+ {
2580
+ detail::static_check_supported_config<T, A>();
2581
+ return batch_cast<as_integer_t<T>>(x);
2582
+ }
2583
+
2584
+ /**
2585
+ * @ingroup batch_data_transfer
2586
+ *
2587
+ * Transposes in place the matrix whose line are each of the batch passed as
2588
+ * argument.
2589
+ * @param matrix_begin pointer to the first line of the matrix to transpose
2590
+ * @param matrix_end pointer to one element after the last line of the matrix to transpose
2591
+ *
2592
+ */
2593
+ template <class T, class A>
2594
+ XSIMD_INLINE void transpose(batch<T, A>* matrix_begin, batch<T, A>* matrix_end) noexcept
2595
+ {
2596
+ assert((matrix_end - matrix_begin == batch<T, A>::size) && "correctly sized matrix");
2597
+ detail::static_check_supported_config<T, A>();
2598
+ return kernel::transpose(matrix_begin, matrix_end, A {});
2599
+ }
2600
+
2601
+ /**
2602
+ * @ingroup batch_rounding
2603
+ *
2604
+ * Computes the batch of nearest integer values not greater in magnitude
2605
+ * than scalars in \c x.
2606
+ * @param x batch of floating point values.
2607
+ * @return the batch of nearest integer values not greater in magnitude than \c x.
2608
+ */
2609
+ template <class T, class A>
2610
+ XSIMD_INLINE batch<T, A> trunc(batch<T, A> const& x) noexcept
2611
+ {
2612
+ detail::static_check_supported_config<T, A>();
2613
+ return kernel::trunc<A>(x, A {});
2614
+ }
2615
+
2616
+ /**
2617
+ * @ingroup batch_data_transfer
2618
+ *
2619
+ * Unpack and interleave data from the HIGH half of batches \c x and \c y.
2620
+ * Store the results in the Return value.
2621
+ * @param x a batch of integer or floating point or double precision values.
2622
+ * @param y a batch of integer or floating point or double precision values.
2623
+ * @return a batch of the high part of shuffled values.
2624
+ */
2625
+ template <class T, class A>
2626
+ XSIMD_INLINE batch<T, A> zip_hi(batch<T, A> const& x, batch<T, A> const& y) noexcept
2627
+ {
2628
+ detail::static_check_supported_config<T, A>();
2629
+ return kernel::zip_hi<A>(x, y, A {});
2630
+ }
2631
+
2632
+ /**
2633
+ * @ingroup batch_data_transfer
2634
+ *
2635
+ * Unpack and interleave data from the LOW half of batches \c x and \c y.
2636
+ * Store the results in the Return value.
2637
+ * @param x a batch of integer or floating point or double precision values.
2638
+ * @param y a batch of integer or floating point or double precision values.
2639
+ * @return a batch of the low part of shuffled values.
2640
+ */
2641
+ template <class T, class A>
2642
+ XSIMD_INLINE batch<T, A> zip_lo(batch<T, A> const& x, batch<T, A> const& y) noexcept
2643
+ {
2644
+ detail::static_check_supported_config<T, A>();
2645
+ return kernel::zip_lo<A>(x, y, A {});
2646
+ }
2647
+
2648
+ /**
2649
+ * @ingroup batch_conversion
2650
+ *
2651
+ * Cast a \c batch_bool of \c T into a \c batch of the same type using the
2652
+ * following rule: if an element of \c self is true, it maps to -1 in the
2653
+ * returned integral batch, otherwise it maps to 0.
2654
+ *
2655
+ * @param self batch_bool of \c T
2656
+ * @return \c self cast to a \c batch of \c T
2657
+ */
2658
+ template <class T, class A, typename std::enable_if<std::is_integral<T>::value, int>::type = 3>
2659
+ XSIMD_INLINE batch<T, A> bitwise_cast(batch_bool<T, A> const& self) noexcept
2660
+ {
2661
+ T z(0);
2662
+ detail::static_check_supported_config<T, A>();
2663
+ return select(self, batch<T, A>(T(~z)), batch<T, A>(z));
2664
+ }
2665
+
2666
+ template <class T, class A, typename std::enable_if<std::is_floating_point<T>::value, int>::type = 3>
2667
+ XSIMD_INLINE batch<T, A> bitwise_cast(batch_bool<T, A> const& self) noexcept
2668
+ {
2669
+ T z0(0), z1(0);
2670
+ using int_type = as_unsigned_integer_t<T>;
2671
+ int_type value(~int_type(0));
2672
+ std::memcpy(&z1, &value, sizeof(int_type));
2673
+ detail::static_check_supported_config<T, A>();
2674
+ return select(self, batch<T, A>(z1), batch<T, A>(z0));
2675
+ }
2676
+
2677
+ /**
2678
+ * @ingroup batch_bool_reducers
2679
+ *
2680
+ * Returns true if all the boolean values in the batch are true,
2681
+ * false otherwise.
2682
+ * @param x the batch to reduce.
2683
+ * @return a boolean scalar.
2684
+ */
2685
+ template <class T, class A>
2686
+ XSIMD_INLINE bool all(batch_bool<T, A> const& x) noexcept
2687
+ {
2688
+ detail::static_check_supported_config<T, A>();
2689
+ return kernel::all<A>(x, A {});
2690
+ }
2691
+
2692
+ /**
2693
+ * @ingroup batch_bool_reducers
2694
+ *
2695
+ * Return true if any of the boolean values in the batch is true,
2696
+ * false otherwise.
2697
+ * @param x the batch to reduce.
2698
+ * @return a boolean scalar.
2699
+ */
2700
+ template <class T, class A>
2701
+ XSIMD_INLINE bool any(batch_bool<T, A> const& x) noexcept
2702
+ {
2703
+ detail::static_check_supported_config<T, A>();
2704
+ return kernel::any<A>(x, A {});
2705
+ }
2706
+
2707
+ /**
2708
+ * @ingroup batch_bool_reducers
2709
+ *
2710
+ * Return true if none of the boolean values in the batch is true,
2711
+ * false otherwise.
2712
+ * @param x the batch to reduce.
2713
+ * @return a boolean scalar.
2714
+ */
2715
+ template <class T, class A>
2716
+ XSIMD_INLINE bool none(batch_bool<T, A> const& x) noexcept
2717
+ {
2718
+ detail::static_check_supported_config<T, A>();
2719
+ return !xsimd::any(x);
2720
+ }
2721
+
2722
+ /**
2723
+ * @ingroup batch_miscellaneous
2724
+ *
2725
+ * Dump the content of batch \c x to stream \c o
2726
+ * @param o the stream where the batch is dumped
2727
+ * @param x batch to dump.
2728
+ * @return a reference to \c o
2729
+ */
2730
+ template <class T, class A>
2731
+ XSIMD_INLINE std::ostream& operator<<(std::ostream& o, batch<T, A> const& x) noexcept
2732
+ {
2733
+ detail::static_check_supported_config<T, A>();
2734
+ constexpr auto size = batch<T, A>::size;
2735
+ alignas(A::alignment()) T buffer[size];
2736
+ x.store_aligned(&buffer[0]);
2737
+ o << '(';
2738
+ for (std::size_t i = 0; i < size - 1; ++i)
2739
+ o << buffer[i] << ", ";
2740
+ return o << buffer[size - 1] << ')';
2741
+ }
2742
+
2743
+ /**
2744
+ * @ingroup batch_miscellaneous
2745
+ *
2746
+ * Dump the content of batch \c x to stream \c o
2747
+ * @param o the stream where the batch is dumped
2748
+ * @param x batch to dump.
2749
+ * @return a reference to \c o
2750
+ */
2751
+ template <class T, class A>
2752
+ XSIMD_INLINE std::ostream& operator<<(std::ostream& o, batch_bool<T, A> const& x) noexcept
2753
+ {
2754
+ detail::static_check_supported_config<T, A>();
2755
+ constexpr auto size = batch_bool<T, A>::size;
2756
+ alignas(A::alignment()) bool buffer[size];
2757
+ x.store_aligned(&buffer[0]);
2758
+ o << '(';
2759
+ for (std::size_t i = 0; i < size - 1; ++i)
2760
+ o << buffer[i] << ", ";
2761
+ return o << buffer[size - 1] << ')';
2762
+ }
2763
+ }
2764
+
2765
+ #endif