sequenzo 0.1.21__cp310-cp310-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sequenzo might be problematic. Click here for more details.

Files changed (260) hide show
  1. sequenzo/__init__.py +240 -0
  2. sequenzo/big_data/__init__.py +12 -0
  3. sequenzo/big_data/clara/__init__.py +26 -0
  4. sequenzo/big_data/clara/clara.py +467 -0
  5. sequenzo/big_data/clara/utils/__init__.py +27 -0
  6. sequenzo/big_data/clara/utils/aggregatecases.py +92 -0
  7. sequenzo/big_data/clara/utils/davies_bouldin.py +91 -0
  8. sequenzo/big_data/clara/utils/get_weighted_diss.cpython-310-darwin.so +0 -0
  9. sequenzo/big_data/clara/utils/wfcmdd.py +205 -0
  10. sequenzo/big_data/clara/visualization.py +88 -0
  11. sequenzo/clustering/KMedoids.py +196 -0
  12. sequenzo/clustering/__init__.py +30 -0
  13. sequenzo/clustering/clustering_c_code.cpython-310-darwin.so +0 -0
  14. sequenzo/clustering/hierarchical_clustering.py +1380 -0
  15. sequenzo/clustering/src/KMedoid.cpp +262 -0
  16. sequenzo/clustering/src/PAM.cpp +236 -0
  17. sequenzo/clustering/src/PAMonce.cpp +234 -0
  18. sequenzo/clustering/src/cluster_quality.cpp +496 -0
  19. sequenzo/clustering/src/cluster_quality.h +128 -0
  20. sequenzo/clustering/src/cluster_quality_backup.cpp +570 -0
  21. sequenzo/clustering/src/module.cpp +228 -0
  22. sequenzo/clustering/src/weightedinertia.cpp +111 -0
  23. sequenzo/clustering/utils/__init__.py +27 -0
  24. sequenzo/clustering/utils/disscenter.py +122 -0
  25. sequenzo/data_preprocessing/__init__.py +20 -0
  26. sequenzo/data_preprocessing/helpers.py +256 -0
  27. sequenzo/datasets/__init__.py +41 -0
  28. sequenzo/datasets/biofam.csv +2001 -0
  29. sequenzo/datasets/biofam_child_domain.csv +2001 -0
  30. sequenzo/datasets/biofam_left_domain.csv +2001 -0
  31. sequenzo/datasets/biofam_married_domain.csv +2001 -0
  32. sequenzo/datasets/chinese_colonial_territories.csv +12 -0
  33. sequenzo/datasets/country_co2_emissions.csv +194 -0
  34. sequenzo/datasets/country_co2_emissions_global_deciles.csv +195 -0
  35. sequenzo/datasets/country_co2_emissions_global_quintiles.csv +195 -0
  36. sequenzo/datasets/country_co2_emissions_local_deciles.csv +195 -0
  37. sequenzo/datasets/country_co2_emissions_local_quintiles.csv +195 -0
  38. sequenzo/datasets/country_gdp_per_capita.csv +194 -0
  39. sequenzo/datasets/mvad.csv +713 -0
  40. sequenzo/datasets/pairfam_family.csv +1867 -0
  41. sequenzo/datasets/polyadic_samplec1.csv +61 -0
  42. sequenzo/datasets/polyadic_samplep1.csv +61 -0
  43. sequenzo/datasets/polyadic_seqc1.csv +61 -0
  44. sequenzo/datasets/polyadic_seqp1.csv +61 -0
  45. sequenzo/define_sequence_data.py +609 -0
  46. sequenzo/dissimilarity_measures/__init__.py +31 -0
  47. sequenzo/dissimilarity_measures/c_code.cpython-310-darwin.so +0 -0
  48. sequenzo/dissimilarity_measures/get_distance_matrix.py +702 -0
  49. sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +241 -0
  50. sequenzo/dissimilarity_measures/src/DHDdistance.cpp +148 -0
  51. sequenzo/dissimilarity_measures/src/LCPdistance.cpp +114 -0
  52. sequenzo/dissimilarity_measures/src/OMdistance.cpp +247 -0
  53. sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +281 -0
  54. sequenzo/dissimilarity_measures/src/__init__.py +0 -0
  55. sequenzo/dissimilarity_measures/src/dist2matrix.cpp +63 -0
  56. sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
  57. sequenzo/dissimilarity_measures/src/module.cpp +34 -0
  58. sequenzo/dissimilarity_measures/src/setup.py +30 -0
  59. sequenzo/dissimilarity_measures/src/utils.h +25 -0
  60. sequenzo/dissimilarity_measures/src/xsimd/.github/cmake-test/main.cpp +6 -0
  61. sequenzo/dissimilarity_measures/src/xsimd/benchmark/main.cpp +159 -0
  62. sequenzo/dissimilarity_measures/src/xsimd/benchmark/xsimd_benchmark.hpp +565 -0
  63. sequenzo/dissimilarity_measures/src/xsimd/docs/source/conf.py +37 -0
  64. sequenzo/dissimilarity_measures/src/xsimd/examples/mandelbrot.cpp +330 -0
  65. sequenzo/dissimilarity_measures/src/xsimd/examples/pico_bench.hpp +246 -0
  66. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +266 -0
  67. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +112 -0
  68. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +323 -0
  69. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +218 -0
  70. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +2583 -0
  71. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +880 -0
  72. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_rounding.hpp +72 -0
  73. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
  74. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +978 -0
  75. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +1924 -0
  76. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +1144 -0
  77. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +656 -0
  78. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512cd.hpp +28 -0
  79. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +244 -0
  80. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512er.hpp +20 -0
  81. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +2650 -0
  82. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512ifma.hpp +20 -0
  83. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512pf.hpp +20 -0
  84. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +77 -0
  85. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +131 -0
  86. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512bw.hpp +20 -0
  87. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512vbmi2.hpp +20 -0
  88. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avxvnni.hpp +20 -0
  89. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +24 -0
  90. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +77 -0
  91. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +393 -0
  92. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +788 -0
  93. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +93 -0
  94. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx2.hpp +46 -0
  95. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +97 -0
  96. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +92 -0
  97. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_i8mm_neon64.hpp +17 -0
  98. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +142 -0
  99. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +3142 -0
  100. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +1543 -0
  101. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +1513 -0
  102. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +1260 -0
  103. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +2024 -0
  104. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +67 -0
  105. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_1.hpp +339 -0
  106. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_2.hpp +44 -0
  107. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +186 -0
  108. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +1155 -0
  109. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
  110. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +1780 -0
  111. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +240 -0
  112. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +484 -0
  113. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +269 -0
  114. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +27 -0
  115. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/math/xsimd_rem_pio2.hpp +719 -0
  116. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_aligned_allocator.hpp +349 -0
  117. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_alignment.hpp +91 -0
  118. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +55 -0
  119. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +2765 -0
  120. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx2_register.hpp +44 -0
  121. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512bw_register.hpp +51 -0
  122. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512cd_register.hpp +51 -0
  123. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512dq_register.hpp +51 -0
  124. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512er_register.hpp +51 -0
  125. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512f_register.hpp +77 -0
  126. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512ifma_register.hpp +51 -0
  127. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512pf_register.hpp +51 -0
  128. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi2_register.hpp +51 -0
  129. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi_register.hpp +51 -0
  130. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512bw_register.hpp +54 -0
  131. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512vbmi2_register.hpp +53 -0
  132. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx_register.hpp +64 -0
  133. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avxvnni_register.hpp +44 -0
  134. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +1524 -0
  135. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch_constant.hpp +300 -0
  136. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_common_arch.hpp +47 -0
  137. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_emulated_register.hpp +80 -0
  138. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx2_register.hpp +50 -0
  139. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx_register.hpp +50 -0
  140. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_sse_register.hpp +50 -0
  141. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma4_register.hpp +50 -0
  142. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_i8mm_neon64_register.hpp +55 -0
  143. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon64_register.hpp +55 -0
  144. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon_register.hpp +154 -0
  145. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_register.hpp +94 -0
  146. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +506 -0
  147. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse2_register.hpp +59 -0
  148. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse3_register.hpp +49 -0
  149. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_1_register.hpp +48 -0
  150. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_2_register.hpp +48 -0
  151. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_ssse3_register.hpp +48 -0
  152. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sve_register.hpp +156 -0
  153. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +337 -0
  154. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_utils.hpp +536 -0
  155. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
  156. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_wasm_register.hpp +59 -0
  157. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +75 -0
  158. sequenzo/dissimilarity_measures/src/xsimd/test/architectures/dummy.cpp +7 -0
  159. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set.cpp +13 -0
  160. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean.cpp +24 -0
  161. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_aligned.cpp +25 -0
  162. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_arch_independent.cpp +28 -0
  163. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_tag_dispatch.cpp +25 -0
  164. sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_abstract_batches.cpp +7 -0
  165. sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_parametric_batches.cpp +8 -0
  166. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum.hpp +31 -0
  167. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_avx2.cpp +3 -0
  168. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_sse2.cpp +3 -0
  169. sequenzo/dissimilarity_measures/src/xsimd/test/doc/writing_vectorized_code.cpp +11 -0
  170. sequenzo/dissimilarity_measures/src/xsimd/test/main.cpp +31 -0
  171. sequenzo/dissimilarity_measures/src/xsimd/test/test_api.cpp +230 -0
  172. sequenzo/dissimilarity_measures/src/xsimd/test/test_arch.cpp +217 -0
  173. sequenzo/dissimilarity_measures/src/xsimd/test/test_basic_math.cpp +183 -0
  174. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch.cpp +1049 -0
  175. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_bool.cpp +508 -0
  176. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_cast.cpp +409 -0
  177. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_complex.cpp +712 -0
  178. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_constant.cpp +286 -0
  179. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_float.cpp +141 -0
  180. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_int.cpp +365 -0
  181. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_manip.cpp +308 -0
  182. sequenzo/dissimilarity_measures/src/xsimd/test/test_bitwise_cast.cpp +222 -0
  183. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_exponential.cpp +226 -0
  184. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_hyperbolic.cpp +183 -0
  185. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_power.cpp +265 -0
  186. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_trigonometric.cpp +236 -0
  187. sequenzo/dissimilarity_measures/src/xsimd/test/test_conversion.cpp +248 -0
  188. sequenzo/dissimilarity_measures/src/xsimd/test/test_custom_default_arch.cpp +28 -0
  189. sequenzo/dissimilarity_measures/src/xsimd/test/test_error_gamma.cpp +170 -0
  190. sequenzo/dissimilarity_measures/src/xsimd/test/test_explicit_batch_instantiation.cpp +32 -0
  191. sequenzo/dissimilarity_measures/src/xsimd/test/test_exponential.cpp +202 -0
  192. sequenzo/dissimilarity_measures/src/xsimd/test/test_extract_pair.cpp +92 -0
  193. sequenzo/dissimilarity_measures/src/xsimd/test/test_fp_manipulation.cpp +77 -0
  194. sequenzo/dissimilarity_measures/src/xsimd/test/test_gnu_source.cpp +30 -0
  195. sequenzo/dissimilarity_measures/src/xsimd/test/test_hyperbolic.cpp +167 -0
  196. sequenzo/dissimilarity_measures/src/xsimd/test/test_load_store.cpp +304 -0
  197. sequenzo/dissimilarity_measures/src/xsimd/test/test_memory.cpp +61 -0
  198. sequenzo/dissimilarity_measures/src/xsimd/test/test_poly_evaluation.cpp +64 -0
  199. sequenzo/dissimilarity_measures/src/xsimd/test/test_power.cpp +184 -0
  200. sequenzo/dissimilarity_measures/src/xsimd/test/test_rounding.cpp +199 -0
  201. sequenzo/dissimilarity_measures/src/xsimd/test/test_select.cpp +101 -0
  202. sequenzo/dissimilarity_measures/src/xsimd/test/test_shuffle.cpp +760 -0
  203. sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.cpp +4 -0
  204. sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.hpp +34 -0
  205. sequenzo/dissimilarity_measures/src/xsimd/test/test_traits.cpp +172 -0
  206. sequenzo/dissimilarity_measures/src/xsimd/test/test_trigonometric.cpp +208 -0
  207. sequenzo/dissimilarity_measures/src/xsimd/test/test_utils.hpp +611 -0
  208. sequenzo/dissimilarity_measures/src/xsimd/test/test_wasm/test_wasm_playwright.py +123 -0
  209. sequenzo/dissimilarity_measures/src/xsimd/test/test_xsimd_api.cpp +1460 -0
  210. sequenzo/dissimilarity_measures/utils/__init__.py +16 -0
  211. sequenzo/dissimilarity_measures/utils/get_LCP_length_for_2_seq.py +44 -0
  212. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cpython-310-darwin.so +0 -0
  213. sequenzo/dissimilarity_measures/utils/seqconc.cpython-310-darwin.so +0 -0
  214. sequenzo/dissimilarity_measures/utils/seqdss.cpython-310-darwin.so +0 -0
  215. sequenzo/dissimilarity_measures/utils/seqdur.cpython-310-darwin.so +0 -0
  216. sequenzo/dissimilarity_measures/utils/seqlength.cpython-310-darwin.so +0 -0
  217. sequenzo/multidomain/__init__.py +23 -0
  218. sequenzo/multidomain/association_between_domains.py +311 -0
  219. sequenzo/multidomain/cat.py +431 -0
  220. sequenzo/multidomain/combt.py +519 -0
  221. sequenzo/multidomain/dat.py +89 -0
  222. sequenzo/multidomain/idcd.py +139 -0
  223. sequenzo/multidomain/linked_polyad.py +292 -0
  224. sequenzo/openmp_setup.py +233 -0
  225. sequenzo/prefix_tree/__init__.py +43 -0
  226. sequenzo/prefix_tree/individual_level_indicators.py +1274 -0
  227. sequenzo/prefix_tree/system_level_indicators.py +465 -0
  228. sequenzo/prefix_tree/utils.py +54 -0
  229. sequenzo/sequence_characteristics/__init__.py +40 -0
  230. sequenzo/sequence_characteristics/complexity_index.py +49 -0
  231. sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +220 -0
  232. sequenzo/sequence_characteristics/plot_characteristics.py +593 -0
  233. sequenzo/sequence_characteristics/simple_characteristics.py +311 -0
  234. sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +39 -0
  235. sequenzo/sequence_characteristics/turbulence.py +155 -0
  236. sequenzo/sequence_characteristics/variance_of_spell_durations.py +86 -0
  237. sequenzo/sequence_characteristics/within_sequence_entropy.py +43 -0
  238. sequenzo/suffix_tree/__init__.py +48 -0
  239. sequenzo/suffix_tree/individual_level_indicators.py +1638 -0
  240. sequenzo/suffix_tree/system_level_indicators.py +456 -0
  241. sequenzo/suffix_tree/utils.py +56 -0
  242. sequenzo/visualization/__init__.py +29 -0
  243. sequenzo/visualization/plot_mean_time.py +194 -0
  244. sequenzo/visualization/plot_modal_state.py +276 -0
  245. sequenzo/visualization/plot_most_frequent_sequences.py +147 -0
  246. sequenzo/visualization/plot_relative_frequency.py +404 -0
  247. sequenzo/visualization/plot_sequence_index.py +937 -0
  248. sequenzo/visualization/plot_single_medoid.py +153 -0
  249. sequenzo/visualization/plot_state_distribution.py +613 -0
  250. sequenzo/visualization/plot_transition_matrix.py +190 -0
  251. sequenzo/visualization/utils/__init__.py +23 -0
  252. sequenzo/visualization/utils/utils.py +310 -0
  253. sequenzo/with_event_history_analysis/__init__.py +35 -0
  254. sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
  255. sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
  256. sequenzo-0.1.21.dist-info/METADATA +308 -0
  257. sequenzo-0.1.21.dist-info/RECORD +254 -0
  258. sequenzo-0.1.21.dist-info/WHEEL +5 -0
  259. sequenzo-0.1.21.dist-info/licenses/LICENSE +28 -0
  260. sequenzo-0.1.21.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1524 @@
1
+ /***************************************************************************
2
+ * Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
3
+ * Martin Renou *
4
+ * Copyright (c) QuantStack *
5
+ * Copyright (c) Serge Guelton *
6
+ * *
7
+ * Distributed under the terms of the BSD 3-Clause License. *
8
+ * *
9
+ * The full license is in the file LICENSE, distributed with this software. *
10
+ ****************************************************************************/
11
+
12
+ #ifndef XSIMD_BATCH_HPP
13
+ #define XSIMD_BATCH_HPP
14
+
15
+ #include <cassert>
16
+ #include <complex>
17
+
18
+ #include "../config/xsimd_arch.hpp"
19
+ #include "../memory/xsimd_alignment.hpp"
20
+ #include "./xsimd_utils.hpp"
21
+
22
+ namespace xsimd
23
+ {
24
+ template <class T, class A = default_arch>
25
+ class batch;
26
+
27
+ namespace types
28
+ {
29
+ template <class T, class A>
30
+ struct integral_only_operators
31
+ {
32
+ XSIMD_INLINE batch<T, A>& operator%=(batch<T, A> const& other) noexcept;
33
+ XSIMD_INLINE batch<T, A>& operator>>=(int32_t other) noexcept;
34
+ XSIMD_INLINE batch<T, A>& operator>>=(batch<T, A> const& other) noexcept;
35
+ XSIMD_INLINE batch<T, A>& operator<<=(int32_t other) noexcept;
36
+ XSIMD_INLINE batch<T, A>& operator<<=(batch<T, A> const& other) noexcept;
37
+
38
+ /** Shorthand for xsimd::mod() */
39
+ friend XSIMD_INLINE batch<T, A> operator%(batch<T, A> const& self, batch<T, A> const& other) noexcept
40
+ {
41
+ return batch<T, A>(self) %= other;
42
+ }
43
+
44
+ /** Shorthand for xsimd::bitwise_rshift() */
45
+ friend XSIMD_INLINE batch<T, A> operator>>(batch<T, A> const& self, batch<T, A> const& other) noexcept
46
+ {
47
+ return batch<T, A>(self) >>= other;
48
+ }
49
+
50
+ /** Shorthand for xsimd::bitwise_lshift() */
51
+ friend XSIMD_INLINE batch<T, A> operator<<(batch<T, A> const& self, batch<T, A> const& other) noexcept
52
+ {
53
+ return batch<T, A>(self) <<= other;
54
+ }
55
+
56
+ /** Shorthand for xsimd::bitwise_rshift() */
57
+ friend XSIMD_INLINE batch<T, A> operator>>(batch<T, A> const& self, int32_t other) noexcept
58
+ {
59
+ return batch<T, A>(self) >>= other;
60
+ }
61
+
62
+ /** Shorthand for xsimd::bitwise_lshift() */
63
+ friend XSIMD_INLINE batch<T, A> operator<<(batch<T, A> const& self, int32_t other) noexcept
64
+ {
65
+ return batch<T, A>(self) <<= other;
66
+ }
67
+ };
68
+ template <class A>
69
+ struct integral_only_operators<float, A>
70
+ {
71
+ };
72
+ template <class A>
73
+ struct integral_only_operators<double, A>
74
+ {
75
+ };
76
+
77
+ }
78
+
79
+ namespace details
80
+ {
81
+ // These functions are forwarded declared here so that they can be used by friend functions
82
+ // with batch<T, A>. Their implementation must appear only once the
83
+ // kernel implementations have been included.
84
+ template <class T, class A>
85
+ XSIMD_INLINE batch_bool<T, A> eq(batch<T, A> const& self, batch<T, A> const& other) noexcept;
86
+
87
+ template <class T, class A>
88
+ XSIMD_INLINE batch_bool<T, A> neq(batch<T, A> const& self, batch<T, A> const& other) noexcept;
89
+
90
+ template <class T, class A>
91
+ XSIMD_INLINE batch_bool<T, A> ge(batch<T, A> const& self, batch<T, A> const& other) noexcept;
92
+
93
+ template <class T, class A>
94
+ XSIMD_INLINE batch_bool<T, A> le(batch<T, A> const& self, batch<T, A> const& other) noexcept;
95
+
96
+ template <class T, class A>
97
+ XSIMD_INLINE batch_bool<T, A> gt(batch<T, A> const& self, batch<T, A> const& other) noexcept;
98
+
99
+ template <class T, class A>
100
+ XSIMD_INLINE batch_bool<T, A> lt(batch<T, A> const& self, batch<T, A> const& other) noexcept;
101
+ }
102
+
103
+ /**
104
+ * @brief batch of integer or floating point values.
105
+ *
106
+ * Abstract representation of an SIMD register for floating point or integral
107
+ * value.
108
+ *
109
+ * @tparam T the type of the underlying values.
110
+ * @tparam A the architecture this batch is tied too.
111
+ **/
112
+ template <class T, class A>
113
+ class batch : public types::simd_register<T, A>, public types::integral_only_operators<T, A>
114
+ {
115
+ static_assert(!std::is_same<T, bool>::value, "use xsimd::batch_bool<T, A> instead of xsimd::batch<bool, A>");
116
+
117
+ public:
118
+ static constexpr std::size_t size = sizeof(types::simd_register<T, A>) / sizeof(T); ///< Number of scalar elements in this batch.
119
+
120
+ using value_type = T; ///< Type of the scalar elements within this batch.
121
+ using arch_type = A; ///< SIMD Architecture abstracted by this batch.
122
+ using register_type = typename types::simd_register<T, A>::register_type; ///< SIMD register type abstracted by this batch.
123
+ using batch_bool_type = batch_bool<T, A>; ///< Associated batch type used to represented logical operations on this batch.
124
+
125
+ // constructors
126
+ XSIMD_INLINE batch() = default; ///< Create a batch initialized with undefined values.
127
+ XSIMD_INLINE batch(T val) noexcept;
128
+ template <class... Ts>
129
+ XSIMD_INLINE batch(T val0, T val1, Ts... vals) noexcept;
130
+ XSIMD_INLINE explicit batch(batch_bool_type const& b) noexcept;
131
+ XSIMD_INLINE batch(register_type reg) noexcept;
132
+
133
+ template <class U>
134
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch broadcast(U val) noexcept;
135
+
136
+ // memory operators
137
+ template <class U>
138
+ XSIMD_INLINE void store_aligned(U* mem) const noexcept;
139
+ template <class U>
140
+ XSIMD_INLINE void store_unaligned(U* mem) const noexcept;
141
+ template <class U>
142
+ XSIMD_INLINE void store(U* mem, aligned_mode) const noexcept;
143
+ template <class U>
144
+ XSIMD_INLINE void store(U* mem, unaligned_mode) const noexcept;
145
+
146
+ template <class U>
147
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load_aligned(U const* mem) noexcept;
148
+ template <class U>
149
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load_unaligned(U const* mem) noexcept;
150
+ template <class U>
151
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load(U const* mem, aligned_mode) noexcept;
152
+ template <class U>
153
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load(U const* mem, unaligned_mode) noexcept;
154
+
155
+ template <class U, class V>
156
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch gather(U const* src, batch<V, arch_type> const& index) noexcept;
157
+ template <class U, class V>
158
+ XSIMD_INLINE void scatter(U* dst, batch<V, arch_type> const& index) const noexcept;
159
+
160
+ XSIMD_INLINE T get(std::size_t i) const noexcept;
161
+
162
+ XSIMD_INLINE T first() const noexcept;
163
+
164
+ // comparison operators. Defined as friend to enable automatic
165
+ // conversion of parameters from scalar to batch, at the cost of using a
166
+ // proxy implementation from details::.
167
+ friend XSIMD_INLINE batch_bool<T, A> operator==(batch const& self, batch const& other) noexcept
168
+ {
169
+ return details::eq<T, A>(self, other);
170
+ }
171
+ friend XSIMD_INLINE batch_bool<T, A> operator!=(batch const& self, batch const& other) noexcept
172
+ {
173
+ return details::neq<T, A>(self, other);
174
+ }
175
+ friend XSIMD_INLINE batch_bool<T, A> operator>=(batch const& self, batch const& other) noexcept
176
+ {
177
+ return details::ge<T, A>(self, other);
178
+ }
179
+ friend XSIMD_INLINE batch_bool<T, A> operator<=(batch const& self, batch const& other) noexcept
180
+ {
181
+ return details::le<T, A>(self, other);
182
+ }
183
+ friend XSIMD_INLINE batch_bool<T, A> operator>(batch const& self, batch const& other) noexcept
184
+ {
185
+ return details::gt<T, A>(self, other);
186
+ }
187
+ friend XSIMD_INLINE batch_bool<T, A> operator<(batch const& self, batch const& other) noexcept
188
+ {
189
+ return details::lt<T, A>(self, other);
190
+ }
191
+
192
+ // Update operators
193
+ XSIMD_INLINE batch& operator+=(batch const& other) noexcept;
194
+ XSIMD_INLINE batch& operator-=(batch const& other) noexcept;
195
+ XSIMD_INLINE batch& operator*=(batch const& other) noexcept;
196
+ XSIMD_INLINE batch& operator/=(batch const& other) noexcept;
197
+ XSIMD_INLINE batch& operator&=(batch const& other) noexcept;
198
+ XSIMD_INLINE batch& operator|=(batch const& other) noexcept;
199
+ XSIMD_INLINE batch& operator^=(batch const& other) noexcept;
200
+
201
+ // incr/decr operators
202
+ XSIMD_INLINE batch& operator++() noexcept;
203
+ XSIMD_INLINE batch& operator--() noexcept;
204
+ XSIMD_INLINE batch operator++(int) noexcept;
205
+ XSIMD_INLINE batch operator--(int) noexcept;
206
+
207
+ // unary operators
208
+ XSIMD_INLINE batch_bool_type operator!() const noexcept;
209
+ XSIMD_INLINE batch operator~() const noexcept;
210
+ XSIMD_INLINE batch operator-() const noexcept;
211
+ XSIMD_INLINE batch operator+() const noexcept;
212
+
213
+ // arithmetic operators. They are defined as friend to enable automatic
214
+ // conversion of parameters from scalar to batch. Inline implementation
215
+ // is required to avoid warnings.
216
+
217
+ /** Shorthand for xsimd::add() */
218
+ friend XSIMD_INLINE batch operator+(batch const& self, batch const& other) noexcept
219
+ {
220
+ return batch(self) += other;
221
+ }
222
+
223
+ /** Shorthand for xsimd::sub() */
224
+ friend XSIMD_INLINE batch operator-(batch const& self, batch const& other) noexcept
225
+ {
226
+ return batch(self) -= other;
227
+ }
228
+
229
+ /** Shorthand for xsimd::mul() */
230
+ friend XSIMD_INLINE batch operator*(batch const& self, batch const& other) noexcept
231
+ {
232
+ return batch(self) *= other;
233
+ }
234
+
235
+ /** Shorthand for xsimd::div() */
236
+ friend XSIMD_INLINE batch operator/(batch const& self, batch const& other) noexcept
237
+ {
238
+ return batch(self) /= other;
239
+ }
240
+
241
+ /** Shorthand for xsimd::bitwise_and() */
242
+ friend XSIMD_INLINE batch operator&(batch const& self, batch const& other) noexcept
243
+ {
244
+ return batch(self) &= other;
245
+ }
246
+
247
+ /** Shorthand for xsimd::bitwise_or() */
248
+ friend XSIMD_INLINE batch operator|(batch const& self, batch const& other) noexcept
249
+ {
250
+ return batch(self) |= other;
251
+ }
252
+
253
+ /** Shorthand for xsimd::bitwise_xor() */
254
+ friend XSIMD_INLINE batch operator^(batch const& self, batch const& other) noexcept
255
+ {
256
+ return batch(self) ^= other;
257
+ }
258
+
259
+ /** Shorthand for xsimd::logical_and() */
260
+ friend XSIMD_INLINE batch operator&&(batch const& self, batch const& other) noexcept
261
+ {
262
+ return batch(self).logical_and(other);
263
+ }
264
+
265
+ /** Shorthand for xsimd::logical_or() */
266
+ friend XSIMD_INLINE batch operator||(batch const& self, batch const& other) noexcept
267
+ {
268
+ return batch(self).logical_or(other);
269
+ }
270
+
271
+ private:
272
+ XSIMD_INLINE batch logical_and(batch const& other) const noexcept;
273
+ XSIMD_INLINE batch logical_or(batch const& other) const noexcept;
274
+ };
275
+
276
+ #if __cplusplus < 201703L
277
+ template <class T, class A>
278
+ constexpr std::size_t batch<T, A>::size;
279
+ #endif
280
+
281
+ /**
282
+ * @brief batch of predicate over scalar or complex values.
283
+ *
284
+ * Abstract representation of a predicate over SIMD register for scalar or
285
+ * complex values.
286
+ *
287
+ * @tparam T the type of the predicated values.
288
+ * @tparam A the architecture this batch is tied too.
289
+ **/
290
+ template <class T, class A = default_arch>
291
+ class batch_bool : public types::get_bool_simd_register_t<T, A>
292
+ {
293
+ using base_type = types::get_bool_simd_register_t<T, A>;
294
+
295
+ public:
296
+ static constexpr std::size_t size = sizeof(types::simd_register<T, A>) / sizeof(T); ///< Number of scalar elements in this batch.
297
+
298
+ using value_type = bool; ///< Type of the scalar elements within this batch.
299
+ using arch_type = A; ///< SIMD Architecture abstracted by this batch.
300
+ using register_type = typename base_type::register_type; ///< SIMD register type abstracted by this batch.
301
+ using batch_type = batch<T, A>; ///< Associated batch type this batch represents logical operations for.
302
+
303
+ // constructors
304
+ XSIMD_INLINE batch_bool() = default; ///< Create a batch initialized with undefined values.
305
+ XSIMD_INLINE batch_bool(bool val) noexcept;
306
+ XSIMD_INLINE batch_bool(register_type reg) noexcept;
307
+ template <class... Ts>
308
+ XSIMD_INLINE batch_bool(bool val0, bool val1, Ts... vals) noexcept;
309
+
310
+ template <class Tp>
311
+ XSIMD_INLINE batch_bool(Tp const*) = delete;
312
+
313
+ // memory operators
314
+ XSIMD_INLINE void store_aligned(bool* mem) const noexcept;
315
+ XSIMD_INLINE void store_unaligned(bool* mem) const noexcept;
316
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch_bool load_aligned(bool const* mem) noexcept;
317
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch_bool load_unaligned(bool const* mem) noexcept;
318
+
319
+ XSIMD_INLINE bool get(std::size_t i) const noexcept;
320
+
321
+ XSIMD_INLINE bool first() const noexcept;
322
+
323
+ // mask operations
324
+ XSIMD_INLINE uint64_t mask() const noexcept;
325
+ XSIMD_INLINE static batch_bool from_mask(uint64_t mask) noexcept;
326
+
327
+ // comparison operators
328
+ XSIMD_INLINE batch_bool operator==(batch_bool const& other) const noexcept;
329
+ XSIMD_INLINE batch_bool operator!=(batch_bool const& other) const noexcept;
330
+
331
+ // logical operators
332
+ XSIMD_INLINE batch_bool operator~() const noexcept;
333
+ XSIMD_INLINE batch_bool operator!() const noexcept;
334
+ XSIMD_INLINE batch_bool operator&(batch_bool const& other) const noexcept;
335
+ XSIMD_INLINE batch_bool operator|(batch_bool const& other) const noexcept;
336
+ XSIMD_INLINE batch_bool operator^(batch_bool const& other) const noexcept;
337
+ XSIMD_INLINE batch_bool operator&&(batch_bool const& other) const noexcept;
338
+ XSIMD_INLINE batch_bool operator||(batch_bool const& other) const noexcept;
339
+
340
+ // update operators
341
+ XSIMD_INLINE batch_bool& operator&=(batch_bool const& other) noexcept { return (*this) = (*this) & other; }
342
+ XSIMD_INLINE batch_bool& operator|=(batch_bool const& other) noexcept { return (*this) = (*this) | other; }
343
+ XSIMD_INLINE batch_bool& operator^=(batch_bool const& other) noexcept { return (*this) = (*this) ^ other; }
344
+
345
+ private:
346
+ template <class U, class... V, size_t I, size_t... Is>
347
+ static XSIMD_INLINE register_type make_register(detail::index_sequence<I, Is...>, U u, V... v) noexcept;
348
+
349
+ template <class... V>
350
+ static XSIMD_INLINE register_type make_register(detail::index_sequence<>, V... v) noexcept;
351
+ };
352
+
353
+ #if __cplusplus < 201703L
354
+ template <class T, class A>
355
+ constexpr std::size_t batch_bool<T, A>::size;
356
+ #endif
357
+
358
+ /**
359
+ * @brief batch of complex values.
360
+ *
361
+ * Abstract representation of an SIMD register for complex values.
362
+ *
363
+ * @tparam T the type of the underlying values.
364
+ * @tparam A the architecture this batch is tied too.
365
+ **/
366
+ template <class T, class A>
367
+ class batch<std::complex<T>, A>
368
+ {
369
+ public:
370
+ using value_type = std::complex<T>; ///< Type of the complex elements within this batch.
371
+ using real_batch = batch<T, A>; ///< Type of the scalar elements within this batch.
372
+ using arch_type = A; ///< SIMD Architecture abstracted by this batch.
373
+ using batch_bool_type = batch_bool<T, A>; ///< Associated batch type used to represented logical operations on this batch.
374
+
375
+ static constexpr std::size_t size = real_batch::size; ///< Number of complex elements in this batch.
376
+
377
+ // constructors
378
+ XSIMD_INLINE batch() = default; ///< Create a batch initialized with undefined values.
379
+ XSIMD_INLINE batch(value_type const& val) noexcept;
380
+ XSIMD_INLINE batch(real_batch const& real, real_batch const& imag) noexcept;
381
+
382
+ XSIMD_INLINE batch(real_batch const& real) noexcept;
383
+ XSIMD_INLINE batch(T val) noexcept;
384
+ template <class... Ts>
385
+ XSIMD_INLINE batch(value_type val0, value_type val1, Ts... vals) noexcept;
386
+ XSIMD_INLINE explicit batch(batch_bool_type const& b) noexcept;
387
+
388
+ template <class U>
389
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch broadcast(U val) noexcept;
390
+
391
+ // memory operators
392
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load_aligned(const T* real_src, const T* imag_src = nullptr) noexcept;
393
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load_unaligned(const T* real_src, const T* imag_src = nullptr) noexcept;
394
+ XSIMD_INLINE void store_aligned(T* real_dst, T* imag_dst) const noexcept;
395
+ XSIMD_INLINE void store_unaligned(T* real_dst, T* imag_dst) const noexcept;
396
+
397
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load_aligned(const value_type* src) noexcept;
398
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load_unaligned(const value_type* src) noexcept;
399
+ XSIMD_INLINE void store_aligned(value_type* dst) const noexcept;
400
+ XSIMD_INLINE void store_unaligned(value_type* dst) const noexcept;
401
+
402
+ template <class U>
403
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load(U const* mem, aligned_mode) noexcept;
404
+ template <class U>
405
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load(U const* mem, unaligned_mode) noexcept;
406
+ template <class U>
407
+ XSIMD_INLINE void store(U* mem, aligned_mode) const noexcept;
408
+ template <class U>
409
+ XSIMD_INLINE void store(U* mem, unaligned_mode) const noexcept;
410
+
411
+ XSIMD_INLINE real_batch real() const noexcept;
412
+ XSIMD_INLINE real_batch imag() const noexcept;
413
+
414
+ XSIMD_INLINE value_type get(std::size_t i) const noexcept;
415
+
416
+ XSIMD_INLINE value_type first() const noexcept;
417
+
418
+ #ifdef XSIMD_ENABLE_XTL_COMPLEX
419
+ // xtl-related methods
420
+ template <bool i3ec>
421
+ XSIMD_INLINE batch(xtl::xcomplex<T, T, i3ec> const& val) noexcept;
422
+ template <bool i3ec, class... Ts>
423
+ XSIMD_INLINE batch(xtl::xcomplex<T, T, i3ec> val0, xtl::xcomplex<T, T, i3ec> val1, Ts... vals) noexcept;
424
+
425
+ template <bool i3ec>
426
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load_aligned(const xtl::xcomplex<T, T, i3ec>* src) noexcept;
427
+ template <bool i3ec>
428
+ XSIMD_NO_DISCARD static XSIMD_INLINE batch load_unaligned(const xtl::xcomplex<T, T, i3ec>* src) noexcept;
429
+ template <bool i3ec>
430
+ XSIMD_INLINE void store_aligned(xtl::xcomplex<T, T, i3ec>* dst) const noexcept;
431
+ template <bool i3ec>
432
+ XSIMD_INLINE void store_unaligned(xtl::xcomplex<T, T, i3ec>* dst) const noexcept;
433
+ #endif
434
+
435
+ // comparison operators
436
+ XSIMD_INLINE batch_bool<T, A> operator==(batch const& other) const noexcept;
437
+ XSIMD_INLINE batch_bool<T, A> operator!=(batch const& other) const noexcept;
438
+
439
+ // Update operators
440
+ XSIMD_INLINE batch& operator+=(batch const& other) noexcept;
441
+ XSIMD_INLINE batch& operator-=(batch const& other) noexcept;
442
+ XSIMD_INLINE batch& operator*=(batch const& other) noexcept;
443
+ XSIMD_INLINE batch& operator/=(batch const& other) noexcept;
444
+
445
+ // incr/decr operators
446
+ XSIMD_INLINE batch& operator++() noexcept;
447
+ XSIMD_INLINE batch& operator--() noexcept;
448
+ XSIMD_INLINE batch operator++(int) noexcept;
449
+ XSIMD_INLINE batch operator--(int) noexcept;
450
+
451
+ // unary operators
452
+ XSIMD_INLINE batch_bool_type operator!() const noexcept;
453
+ XSIMD_INLINE batch operator~() const noexcept;
454
+ XSIMD_INLINE batch operator-() const noexcept;
455
+ XSIMD_INLINE batch operator+() const noexcept;
456
+
457
+ // arithmetic operators. They are defined as friend to enable automatic
458
+ // conversion of parameters from scalar to batch
459
+
460
+ /** Shorthand for xsimd::add() */
461
+ friend XSIMD_INLINE batch operator+(batch const& self, batch const& other) noexcept
462
+ {
463
+ return batch(self) += other;
464
+ }
465
+
466
+ /** Shorthand for xsimd::sub() */
467
+ friend XSIMD_INLINE batch operator-(batch const& self, batch const& other) noexcept
468
+ {
469
+ return batch(self) -= other;
470
+ }
471
+
472
+ /** Shorthand for xsimd::mul() */
473
+ friend XSIMD_INLINE batch operator*(batch const& self, batch const& other) noexcept
474
+ {
475
+ return batch(self) *= other;
476
+ }
477
+
478
+ /** Shorthand for xsimd::div() */
479
+ friend XSIMD_INLINE batch operator/(batch const& self, batch const& other) noexcept
480
+ {
481
+ return batch(self) /= other;
482
+ }
483
+
484
+ private:
485
+ real_batch m_real;
486
+ real_batch m_imag;
487
+ };
488
+
489
+ #if __cplusplus < 201703L
490
+ template <class T, class A>
491
+ constexpr std::size_t batch<std::complex<T>, A>::size;
492
+ #endif
493
+
494
+ #ifdef XSIMD_ENABLE_XTL_COMPLEX
495
+ template <typename T, bool i3ec, typename A>
496
+ struct batch<xtl::xcomplex<T, T, i3ec>, A>
497
+ {
498
+ static_assert(std::is_same<T, void>::value,
499
+ "Please use batch<std::complex<T>, A> initialized from xtl::xcomplex instead");
500
+ };
501
+ #endif
502
+ }
503
+
504
+ #include "../arch/xsimd_isa.hpp"
505
+ #include "./xsimd_batch_constant.hpp"
506
+ #include "./xsimd_traits.hpp"
507
+
508
+ namespace xsimd
509
+ {
510
+
511
+ /**
512
+ * Create a batch with all element initialized to \c val.
513
+ */
514
+ template <class T, class A>
515
+ XSIMD_INLINE batch<T, A>::batch(T val) noexcept
516
+ : types::simd_register<T, A>(kernel::broadcast<A>(val, A {}))
517
+ {
518
+ detail::static_check_supported_config<T, A>();
519
+ }
520
+
521
+ /**
522
+ * Create a batch with elements initialized from \c val0, \c val1, \c vals...
523
+ * There must be exactly \c size elements in total.
524
+ */
525
+ template <class T, class A>
526
+ template <class... Ts>
527
+ XSIMD_INLINE batch<T, A>::batch(T val0, T val1, Ts... vals) noexcept
528
+ : batch(kernel::set<A>(batch {}, A {}, val0, val1, static_cast<T>(vals)...))
529
+ {
530
+ detail::static_check_supported_config<T, A>();
531
+ static_assert(sizeof...(Ts) + 2 == size, "The constructor requires as many arguments as batch elements.");
532
+ }
533
+
534
+ /**
535
+ * Converts a \c bool_batch to a \c batch where each element is
536
+ * set to 1 (resp. 0) if the corresponding element is `true`
537
+ * (resp. `false`).
538
+ */
539
+ template <class T, class A>
540
+ XSIMD_INLINE batch<T, A>::batch(batch_bool<T, A> const& b) noexcept
541
+ : batch(kernel::from_bool(b, A {}))
542
+ {
543
+ }
544
+
545
+ /**
546
+ * Wraps a compatible native simd register as a \c batch. This is generally not needed but
547
+ * becomes handy when doing architecture-specific operations.
548
+ */
549
+ template <class T, class A>
550
+ XSIMD_INLINE batch<T, A>::batch(register_type reg) noexcept
551
+ : types::simd_register<T, A>({ reg })
552
+ {
553
+ detail::static_check_supported_config<T, A>();
554
+ }
555
+
556
+ /**
557
+ * Equivalent to batch::batch(T val).
558
+ */
559
+ template <class T, class A>
560
+ template <class U>
561
+ XSIMD_NO_DISCARD XSIMD_INLINE batch<T, A> batch<T, A>::broadcast(U val) noexcept
562
+ {
563
+ detail::static_check_supported_config<T, A>();
564
+ return batch(static_cast<T>(val));
565
+ }
566
+
567
+ /**************************
568
+ * batch memory operators *
569
+ **************************/
570
+
571
+ /**
572
+ * Copy content of this batch to the buffer \c mem. The
573
+ * memory needs to be aligned.
574
+ */
575
+ template <class T, class A>
576
+ template <class U>
577
+ XSIMD_INLINE void batch<T, A>::store_aligned(U* mem) const noexcept
578
+ {
579
+ detail::static_check_supported_config<T, A>();
580
+ assert(((reinterpret_cast<uintptr_t>(mem) % A::alignment()) == 0)
581
+ && "store location is not properly aligned");
582
+ kernel::store_aligned<A>(mem, *this, A {});
583
+ }
584
+
585
+ /**
586
+ * Copy content of this batch to the buffer \c mem. The
587
+ * memory does not need to be aligned.
588
+ */
589
+ template <class T, class A>
590
+ template <class U>
591
+ XSIMD_INLINE void batch<T, A>::store_unaligned(U* mem) const noexcept
592
+ {
593
+ detail::static_check_supported_config<T, A>();
594
+ kernel::store_unaligned<A>(mem, *this, A {});
595
+ }
596
+
597
+ /**
598
+ * Equivalent to batch::store_aligned()
599
+ */
600
+ template <class T, class A>
601
+ template <class U>
602
+ XSIMD_INLINE void batch<T, A>::store(U* mem, aligned_mode) const noexcept
603
+ {
604
+ detail::static_check_supported_config<T, A>();
605
+ return store_aligned(mem);
606
+ }
607
+
608
+ /**
609
+ * Equivalent to batch::store_unaligned()
610
+ */
611
+ template <class T, class A>
612
+ template <class U>
613
+ XSIMD_INLINE void batch<T, A>::store(U* mem, unaligned_mode) const noexcept
614
+ {
615
+ detail::static_check_supported_config<T, A>();
616
+ return store_unaligned(mem);
617
+ }
618
+
619
+ /**
620
+ * Loading from aligned memory. May involve a conversion if \c U is different
621
+ * from \c T.
622
+ */
623
+ template <class T, class A>
624
+ template <class U>
625
+ XSIMD_INLINE batch<T, A> batch<T, A>::load_aligned(U const* mem) noexcept
626
+ {
627
+ assert(((reinterpret_cast<uintptr_t>(mem) % A::alignment()) == 0)
628
+ && "loaded pointer is not properly aligned");
629
+ detail::static_check_supported_config<T, A>();
630
+ return kernel::load_aligned<A>(mem, kernel::convert<T> {}, A {});
631
+ }
632
+
633
+ /**
634
+ * Loading from unaligned memory. May involve a conversion if \c U is different
635
+ * from \c T.
636
+ */
637
+ template <class T, class A>
638
+ template <class U>
639
+ XSIMD_INLINE batch<T, A> batch<T, A>::load_unaligned(U const* mem) noexcept
640
+ {
641
+ detail::static_check_supported_config<T, A>();
642
+ return kernel::load_unaligned<A>(mem, kernel::convert<T> {}, A {});
643
+ }
644
+
645
+ /**
646
+ * Equivalent to batch::load_aligned()
647
+ */
648
+ template <class T, class A>
649
+ template <class U>
650
+ XSIMD_INLINE batch<T, A> batch<T, A>::load(U const* mem, aligned_mode) noexcept
651
+ {
652
+ detail::static_check_supported_config<T, A>();
653
+ return load_aligned(mem);
654
+ }
655
+
656
+ /**
657
+ * Equivalent to batch::load_unaligned()
658
+ */
659
+ template <class T, class A>
660
+ template <class U>
661
+ XSIMD_INLINE batch<T, A> batch<T, A>::load(U const* mem, unaligned_mode) noexcept
662
+ {
663
+ detail::static_check_supported_config<T, A>();
664
+ return load_unaligned(mem);
665
+ }
666
+
667
+ /**
668
+ * Create a new batch gathering elements starting at address \c src and
669
+ * offset by each element in \c index.
670
+ * If \c T is not of the same size as \c U, a \c static_cast is performed
671
+ * at element gather time.
672
+ */
673
+ template <class T, class A>
674
+ template <typename U, typename V>
675
+ XSIMD_INLINE batch<T, A> batch<T, A>::gather(U const* src, batch<V, A> const& index) noexcept
676
+ {
677
+ detail::static_check_supported_config<T, A>();
678
+ static_assert(std::is_convertible<T, U>::value, "Can't convert from src to this batch's type!");
679
+ return kernel::gather(batch {}, src, index, A {});
680
+ }
681
+
682
+ /**
683
+ * Scatter elements from this batch into addresses starting at \c dst
684
+ * and offset by each element in \c index.
685
+ * If \c T is not of the same size as \c U, a \c static_cast is performed
686
+ * at element scatter time.
687
+ */
688
+ template <class T, class A>
689
+ template <class U, class V>
690
+ XSIMD_INLINE void batch<T, A>::scatter(U* dst, batch<V, A> const& index) const noexcept
691
+ {
692
+ detail::static_check_supported_config<T, A>();
693
+ static_assert(std::is_convertible<T, U>::value, "Can't convert from this batch's type to dst!");
694
+ kernel::scatter<A>(*this, dst, index, A {});
695
+ }
696
+
697
+ /**
698
+ * Retrieve the \c i th scalar element in this batch.
699
+ *
700
+ * \c warning This is very inefficient and should only be used for debugging purpose.
701
+ */
702
+ template <class T, class A>
703
+ XSIMD_INLINE T batch<T, A>::get(std::size_t i) const noexcept
704
+ {
705
+ return kernel::get(*this, i, A {});
706
+ }
707
+
708
+ /**
709
+ * Retrieve the first scalar element in this batch.
710
+ */
711
+ template <class T, class A>
712
+ XSIMD_INLINE T batch<T, A>::first() const noexcept
713
+ {
714
+ detail::static_check_supported_config<T, A>();
715
+ return kernel::first(*this, A {});
716
+ }
717
+
718
+ /******************************
719
+ * batch comparison operators *
720
+ ******************************/
721
+ namespace details
722
+ {
723
+ /**
724
+ * Shorthand for xsimd::eq()
725
+ */
726
+ template <class T, class A>
727
+ XSIMD_INLINE batch_bool<T, A> eq(batch<T, A> const& self, batch<T, A> const& other) noexcept
728
+ {
729
+ detail::static_check_supported_config<T, A>();
730
+ return kernel::eq<A>(self, other, A {});
731
+ }
732
+
733
+ /**
734
+ * Shorthand for xsimd::neq()
735
+ */
736
+ template <class T, class A>
737
+ XSIMD_INLINE batch_bool<T, A> neq(batch<T, A> const& self, batch<T, A> const& other) noexcept
738
+ {
739
+ detail::static_check_supported_config<T, A>();
740
+ return kernel::neq<A>(self, other, A {});
741
+ }
742
+
743
+ /**
744
+ * Shorthand for xsimd::ge()
745
+ */
746
+ template <class T, class A>
747
+ XSIMD_INLINE batch_bool<T, A> ge(batch<T, A> const& self, batch<T, A> const& other) noexcept
748
+ {
749
+ detail::static_check_supported_config<T, A>();
750
+ return kernel::ge<A>(self, other, A {});
751
+ }
752
+
753
+ /**
754
+ * Shorthand for xsimd::le()
755
+ */
756
+ template <class T, class A>
757
+ XSIMD_INLINE batch_bool<T, A> le(batch<T, A> const& self, batch<T, A> const& other) noexcept
758
+ {
759
+ detail::static_check_supported_config<T, A>();
760
+ return kernel::le<A>(self, other, A {});
761
+ }
762
+
763
+ /**
764
+ * Shorthand for xsimd::gt()
765
+ */
766
+ template <class T, class A>
767
+ XSIMD_INLINE batch_bool<T, A> gt(batch<T, A> const& self, batch<T, A> const& other) noexcept
768
+ {
769
+ detail::static_check_supported_config<T, A>();
770
+ return kernel::gt<A>(self, other, A {});
771
+ }
772
+
773
+ /**
774
+ * Shorthand for xsimd::lt()
775
+ */
776
+ template <class T, class A>
777
+ XSIMD_INLINE batch_bool<T, A> lt(batch<T, A> const& self, batch<T, A> const& other) noexcept
778
+ {
779
+ detail::static_check_supported_config<T, A>();
780
+ return kernel::lt<A>(self, other, A {});
781
+ }
782
+ }
783
+
784
+ /**************************
785
+ * batch update operators *
786
+ **************************/
787
+
788
+ template <class T, class A>
789
+ XSIMD_INLINE batch<T, A>& batch<T, A>::operator+=(batch<T, A> const& other) noexcept
790
+ {
791
+ detail::static_check_supported_config<T, A>();
792
+ return *this = kernel::add<A>(*this, other, A {});
793
+ }
794
+
795
+ template <class T, class A>
796
+ XSIMD_INLINE batch<T, A>& batch<T, A>::operator-=(batch<T, A> const& other) noexcept
797
+ {
798
+ detail::static_check_supported_config<T, A>();
799
+ return *this = kernel::sub<A>(*this, other, A {});
800
+ }
801
+
802
+ template <class T, class A>
803
+ XSIMD_INLINE batch<T, A>& batch<T, A>::operator*=(batch<T, A> const& other) noexcept
804
+ {
805
+ detail::static_check_supported_config<T, A>();
806
+ return *this = kernel::mul<A>(*this, other, A {});
807
+ }
808
+
809
+ template <class T, class A>
810
+ XSIMD_INLINE batch<T, A>& batch<T, A>::operator/=(batch<T, A> const& other) noexcept
811
+ {
812
+ detail::static_check_supported_config<T, A>();
813
+ return *this = kernel::div<A>(*this, other, A {});
814
+ }
815
+
816
+ template <class T, class A>
817
+ XSIMD_INLINE batch<T, A>& types::integral_only_operators<T, A>::operator%=(batch<T, A> const& other) noexcept
818
+ {
819
+ ::xsimd::detail::static_check_supported_config<T, A>();
820
+ return *static_cast<batch<T, A>*>(this) = kernel::mod<A>(*static_cast<batch<T, A>*>(this), other, A {});
821
+ }
822
+
823
+ template <class T, class A>
824
+ XSIMD_INLINE batch<T, A>& batch<T, A>::operator&=(batch<T, A> const& other) noexcept
825
+ {
826
+ detail::static_check_supported_config<T, A>();
827
+ return *this = kernel::bitwise_and<A>(*this, other, A {});
828
+ }
829
+
830
+ template <class T, class A>
831
+ XSIMD_INLINE batch<T, A>& batch<T, A>::operator|=(batch<T, A> const& other) noexcept
832
+ {
833
+ detail::static_check_supported_config<T, A>();
834
+ return *this = kernel::bitwise_or<A>(*this, other, A {});
835
+ }
836
+
837
+ template <class T, class A>
838
+ XSIMD_INLINE batch<T, A>& batch<T, A>::operator^=(batch<T, A> const& other) noexcept
839
+ {
840
+ detail::static_check_supported_config<T, A>();
841
+ return *this = kernel::bitwise_xor<A>(*this, other, A {});
842
+ }
843
+
844
+ template <class T, class A>
845
+ XSIMD_INLINE batch<T, A>& kernel::integral_only_operators<T, A>::operator>>=(batch<T, A> const& other) noexcept
846
+ {
847
+ ::xsimd::detail::static_check_supported_config<T, A>();
848
+ return *static_cast<batch<T, A>*>(this) = kernel::bitwise_rshift<A>(*static_cast<batch<T, A>*>(this), other, A {});
849
+ }
850
+
851
+ template <class T, class A>
852
+ XSIMD_INLINE batch<T, A>& kernel::integral_only_operators<T, A>::operator<<=(batch<T, A> const& other) noexcept
853
+ {
854
+ ::xsimd::detail::static_check_supported_config<T, A>();
855
+ return *static_cast<batch<T, A>*>(this) = kernel::bitwise_lshift<A>(*static_cast<batch<T, A>*>(this), other, A {});
856
+ }
857
+
858
+ template <class T, class A>
859
+ XSIMD_INLINE batch<T, A>& kernel::integral_only_operators<T, A>::operator>>=(int32_t other) noexcept
860
+ {
861
+ ::xsimd::detail::static_check_supported_config<T, A>();
862
+ return *static_cast<batch<T, A>*>(this) = kernel::bitwise_rshift<A>(*static_cast<batch<T, A>*>(this), other, A {});
863
+ }
864
+
865
+ template <class T, class A>
866
+ XSIMD_INLINE batch<T, A>& kernel::integral_only_operators<T, A>::operator<<=(int32_t other) noexcept
867
+ {
868
+ ::xsimd::detail::static_check_supported_config<T, A>();
869
+ return *static_cast<batch<T, A>*>(this) = kernel::bitwise_lshift<A>(*static_cast<batch<T, A>*>(this), other, A {});
870
+ }
871
+
872
+ /*****************************
873
+ * batch incr/decr operators *
874
+ *****************************/
875
+
876
+ template <class T, class A>
877
+ XSIMD_INLINE batch<T, A>& batch<T, A>::operator++() noexcept
878
+ {
879
+ detail::static_check_supported_config<T, A>();
880
+ return operator+=(1);
881
+ }
882
+
883
+ template <class T, class A>
884
+ XSIMD_INLINE batch<T, A>& batch<T, A>::operator--() noexcept
885
+ {
886
+ detail::static_check_supported_config<T, A>();
887
+ return operator-=(1);
888
+ }
889
+
890
+ template <class T, class A>
891
+ XSIMD_INLINE batch<T, A> batch<T, A>::operator++(int) noexcept
892
+ {
893
+ detail::static_check_supported_config<T, A>();
894
+ batch<T, A> copy(*this);
895
+ operator+=(1);
896
+ return copy;
897
+ }
898
+
899
+ template <class T, class A>
900
+ XSIMD_INLINE batch<T, A> batch<T, A>::operator--(int) noexcept
901
+ {
902
+ detail::static_check_supported_config<T, A>();
903
+ batch copy(*this);
904
+ operator-=(1);
905
+ return copy;
906
+ }
907
+
908
+ /*************************
909
+ * batch unary operators *
910
+ *************************/
911
+
912
+ template <class T, class A>
913
+ XSIMD_INLINE batch_bool<T, A> batch<T, A>::operator!() const noexcept
914
+ {
915
+ detail::static_check_supported_config<T, A>();
916
+ return kernel::eq<A>(*this, batch(0), A {});
917
+ }
918
+
919
+ template <class T, class A>
920
+ XSIMD_INLINE batch<T, A> batch<T, A>::operator~() const noexcept
921
+ {
922
+ detail::static_check_supported_config<T, A>();
923
+ return kernel::bitwise_not<A>(*this, A {});
924
+ }
925
+
926
+ template <class T, class A>
927
+ XSIMD_INLINE batch<T, A> batch<T, A>::operator-() const noexcept
928
+ {
929
+ detail::static_check_supported_config<T, A>();
930
+ return kernel::neg<A>(*this, A {});
931
+ }
932
+
933
+ template <class T, class A>
934
+ XSIMD_INLINE batch<T, A> batch<T, A>::operator+() const noexcept
935
+ {
936
+ detail::static_check_supported_config<T, A>();
937
+ return *this;
938
+ }
939
+
940
+ /************************
941
+ * batch private method *
942
+ ************************/
943
+
944
+ template <class T, class A>
945
+ XSIMD_INLINE batch<T, A> batch<T, A>::logical_and(batch<T, A> const& other) const noexcept
946
+ {
947
+ return kernel::logical_and<A>(*this, other, A());
948
+ }
949
+
950
+ template <class T, class A>
951
+ XSIMD_INLINE batch<T, A> batch<T, A>::logical_or(batch<T, A> const& other) const noexcept
952
+ {
953
+ return kernel::logical_or<A>(*this, other, A());
954
+ }
955
+
956
+ /***************************
957
+ * batch_bool constructors *
958
+ ***************************/
959
+
960
+ template <class T, class A>
961
+ XSIMD_INLINE batch_bool<T, A>::batch_bool(register_type reg) noexcept
962
+ : types::get_bool_simd_register_t<T, A>({ reg })
963
+ {
964
+ }
965
+
966
+ template <class T, class A>
967
+ template <class... Ts>
968
+ XSIMD_INLINE batch_bool<T, A>::batch_bool(bool val0, bool val1, Ts... vals) noexcept
969
+ : batch_bool(kernel::set<A>(batch_bool {}, A {}, val0, val1, static_cast<bool>(vals)...))
970
+ {
971
+ static_assert(sizeof...(Ts) + 2 == size, "The constructor requires as many arguments as batch elements.");
972
+ }
973
+
974
+ /*******************************
975
+ * batch_bool memory operators *
976
+ *******************************/
977
+
978
+ template <class T, class A>
979
+ XSIMD_INLINE void batch_bool<T, A>::store_aligned(bool* mem) const noexcept
980
+ {
981
+ kernel::store(*this, mem, A {});
982
+ }
983
+
984
+ template <class T, class A>
985
+ XSIMD_INLINE void batch_bool<T, A>::store_unaligned(bool* mem) const noexcept
986
+ {
987
+ store_aligned(mem);
988
+ }
989
+
990
+ template <class T, class A>
991
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::load_aligned(bool const* mem) noexcept
992
+ {
993
+ return kernel::load_aligned<A>(mem, batch_bool<T, A>(), A {});
994
+ }
995
+
996
+ template <class T, class A>
997
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::load_unaligned(bool const* mem) noexcept
998
+ {
999
+ return kernel::load_unaligned<A>(mem, batch_bool<T, A>(), A {});
1000
+ }
1001
+
1002
+ /**
1003
+ * Extract a scalar mask representation from this @c batch_bool.
1004
+ *
1005
+ * @return bit mask
1006
+ */
1007
+ template <class T, class A>
1008
+ XSIMD_INLINE uint64_t batch_bool<T, A>::mask() const noexcept
1009
+ {
1010
+ return kernel::mask(*this, A {});
1011
+ }
1012
+
1013
+ /**
1014
+ * Extract a scalar mask representation from this @c batch_bool.
1015
+ *
1016
+ * @return bit mask
1017
+ */
1018
+ template <class T, class A>
1019
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::from_mask(uint64_t mask) noexcept
1020
+ {
1021
+ return kernel::from_mask(batch_bool<T, A>(), mask, A {});
1022
+ }
1023
+
1024
+ template <class T, class A>
1025
+ XSIMD_INLINE bool batch_bool<T, A>::get(std::size_t i) const noexcept
1026
+ {
1027
+ return kernel::get(*this, i, A {});
1028
+ }
1029
+
1030
+ template <class T, class A>
1031
+ XSIMD_INLINE bool batch_bool<T, A>::first() const noexcept
1032
+ {
1033
+ detail::static_check_supported_config<T, A>();
1034
+ return kernel::first(*this, A {});
1035
+ }
1036
+
1037
+ /***********************************
1038
+ * batch_bool comparison operators *
1039
+ ***********************************/
1040
+
1041
+ template <class T, class A>
1042
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::operator==(batch_bool<T, A> const& other) const noexcept
1043
+ {
1044
+ return kernel::eq<A>(*this, other, A {}).data;
1045
+ }
1046
+
1047
+ template <class T, class A>
1048
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::operator!=(batch_bool<T, A> const& other) const noexcept
1049
+ {
1050
+ return kernel::neq<A>(*this, other, A {}).data;
1051
+ }
1052
+
1053
+ /********************************
1054
+ * batch_bool logical operators *
1055
+ ********************************/
1056
+
1057
+ template <class T, class A>
1058
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::operator~() const noexcept
1059
+ {
1060
+ return kernel::bitwise_not<A>(*this, A {}).data;
1061
+ }
1062
+
1063
+ template <class T, class A>
1064
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::operator!() const noexcept
1065
+ {
1066
+ return operator==(batch_bool(false));
1067
+ }
1068
+
1069
+ template <class T, class A>
1070
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::operator&(batch_bool<T, A> const& other) const noexcept
1071
+ {
1072
+ return kernel::bitwise_and<A>(*this, other, A {}).data;
1073
+ }
1074
+
1075
+ template <class T, class A>
1076
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::operator|(batch_bool<T, A> const& other) const noexcept
1077
+ {
1078
+ return kernel::bitwise_or<A>(*this, other, A {}).data;
1079
+ }
1080
+
1081
+ template <class T, class A>
1082
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::operator^(batch_bool<T, A> const& other) const noexcept
1083
+ {
1084
+ return kernel::bitwise_xor<A>(*this, other, A {}).data;
1085
+ }
1086
+
1087
+ template <class T, class A>
1088
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::operator&&(batch_bool const& other) const noexcept
1089
+ {
1090
+ return operator&(other);
1091
+ }
1092
+
1093
+ template <class T, class A>
1094
+ XSIMD_INLINE batch_bool<T, A> batch_bool<T, A>::operator||(batch_bool const& other) const noexcept
1095
+ {
1096
+ return operator|(other);
1097
+ }
1098
+
1099
+ /******************************
1100
+ * batch_bool private methods *
1101
+ ******************************/
1102
+
1103
+ template <class T, class A>
1104
+ XSIMD_INLINE batch_bool<T, A>::batch_bool(bool val) noexcept
1105
+ : base_type { make_register(detail::make_index_sequence<size - 1>(), val) }
1106
+ {
1107
+ }
1108
+
1109
+ template <class T, class A>
1110
+ template <class U, class... V, size_t I, size_t... Is>
1111
+ XSIMD_INLINE auto batch_bool<T, A>::make_register(detail::index_sequence<I, Is...>, U u, V... v) noexcept -> register_type
1112
+ {
1113
+ return make_register(detail::index_sequence<Is...>(), u, u, v...);
1114
+ }
1115
+
1116
+ template <class T, class A>
1117
+ template <class... V>
1118
+ XSIMD_INLINE auto batch_bool<T, A>::make_register(detail::index_sequence<>, V... v) noexcept -> register_type
1119
+ {
1120
+ return kernel::set<A>(batch_bool<T, A>(), A {}, v...).data;
1121
+ }
1122
+
1123
+ /*******************************
1124
+ * batch<complex> constructors *
1125
+ *******************************/
1126
+
1127
+ template <class T, class A>
1128
+ XSIMD_INLINE batch<std::complex<T>, A>::batch(value_type const& val) noexcept
1129
+ : m_real(val.real())
1130
+ , m_imag(val.imag())
1131
+ {
1132
+ }
1133
+
1134
+ template <class T, class A>
1135
+ XSIMD_INLINE batch<std::complex<T>, A>::batch(real_batch const& real, real_batch const& imag) noexcept
1136
+ : m_real(real)
1137
+ , m_imag(imag)
1138
+ {
1139
+ }
1140
+
1141
+ template <class T, class A>
1142
+ XSIMD_INLINE batch<std::complex<T>, A>::batch(real_batch const& real) noexcept
1143
+ : m_real(real)
1144
+ , m_imag(0)
1145
+ {
1146
+ }
1147
+
1148
+ template <class T, class A>
1149
+ XSIMD_INLINE batch<std::complex<T>, A>::batch(T val) noexcept
1150
+ : m_real(val)
1151
+ , m_imag(0)
1152
+ {
1153
+ }
1154
+
1155
+ template <class T, class A>
1156
+ template <class... Ts>
1157
+ XSIMD_INLINE batch<std::complex<T>, A>::batch(value_type val0, value_type val1, Ts... vals) noexcept
1158
+ : batch(kernel::set<A>(batch {}, A {}, val0, val1, static_cast<value_type>(vals)...))
1159
+ {
1160
+ static_assert(sizeof...(Ts) + 2 == size, "as many arguments as batch elements");
1161
+ }
1162
+
1163
+ template <class T, class A>
1164
+ XSIMD_INLINE batch<std::complex<T>, A>::batch(batch_bool_type const& b) noexcept
1165
+ : m_real(b)
1166
+ , m_imag(0)
1167
+ {
1168
+ }
1169
+
1170
+ template <class T, class A>
1171
+ template <class U>
1172
+ XSIMD_NO_DISCARD XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::broadcast(U val) noexcept
1173
+ {
1174
+ return batch(static_cast<std::complex<T>>(val));
1175
+ }
1176
+
1177
+ /***********************************
1178
+ * batch<complex> memory operators *
1179
+ ***********************************/
1180
+
1181
+ template <class T, class A>
1182
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::load_aligned(const T* real_src, const T* imag_src) noexcept
1183
+ {
1184
+ return { batch<T, A>::load_aligned(real_src), imag_src ? batch<T, A>::load_aligned(imag_src) : batch<T, A>(0) };
1185
+ }
1186
+ template <class T, class A>
1187
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::load_unaligned(const T* real_src, const T* imag_src) noexcept
1188
+ {
1189
+ return { batch<T, A>::load_unaligned(real_src), imag_src ? batch<T, A>::load_unaligned(imag_src) : batch<T, A>(0) };
1190
+ }
1191
+
1192
+ template <class T, class A>
1193
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::load_aligned(const value_type* src) noexcept
1194
+ {
1195
+ assert(((reinterpret_cast<uintptr_t>(src) % A::alignment()) == 0)
1196
+ && "loaded pointer is not properly aligned");
1197
+ return kernel::load_complex_aligned<A>(src, kernel::convert<value_type> {}, A {});
1198
+ }
1199
+
1200
+ template <class T, class A>
1201
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::load_unaligned(const value_type* src) noexcept
1202
+ {
1203
+ return kernel::load_complex_unaligned<A>(src, kernel::convert<value_type> {}, A {});
1204
+ }
1205
+
1206
+ template <class T, class A>
1207
+ XSIMD_INLINE void batch<std::complex<T>, A>::store_aligned(value_type* dst) const noexcept
1208
+ {
1209
+ assert(((reinterpret_cast<uintptr_t>(dst) % A::alignment()) == 0)
1210
+ && "store location is not properly aligned");
1211
+ return kernel::store_complex_aligned(dst, *this, A {});
1212
+ }
1213
+
1214
+ template <class T, class A>
1215
+ XSIMD_INLINE void batch<std::complex<T>, A>::store_unaligned(value_type* dst) const noexcept
1216
+ {
1217
+ return kernel::store_complex_unaligned(dst, *this, A {});
1218
+ }
1219
+
1220
+ template <class T, class A>
1221
+ XSIMD_INLINE void batch<std::complex<T>, A>::store_aligned(T* real_dst, T* imag_dst) const noexcept
1222
+ {
1223
+ m_real.store_aligned(real_dst);
1224
+ m_imag.store_aligned(imag_dst);
1225
+ }
1226
+
1227
+ template <class T, class A>
1228
+ XSIMD_INLINE void batch<std::complex<T>, A>::store_unaligned(T* real_dst, T* imag_dst) const noexcept
1229
+ {
1230
+ m_real.store_unaligned(real_dst);
1231
+ m_imag.store_unaligned(imag_dst);
1232
+ }
1233
+
1234
+ template <class T, class A>
1235
+ template <class U>
1236
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::load(U const* mem, aligned_mode) noexcept
1237
+ {
1238
+ return load_aligned(mem);
1239
+ }
1240
+
1241
+ template <class T, class A>
1242
+ template <class U>
1243
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::load(U const* mem, unaligned_mode) noexcept
1244
+ {
1245
+ return load_unaligned(mem);
1246
+ }
1247
+
1248
+ template <class T, class A>
1249
+ template <class U>
1250
+ XSIMD_INLINE void batch<std::complex<T>, A>::store(U* mem, aligned_mode) const noexcept
1251
+ {
1252
+ return store_aligned(mem);
1253
+ }
1254
+
1255
+ template <class T, class A>
1256
+ template <class U>
1257
+ XSIMD_INLINE void batch<std::complex<T>, A>::store(U* mem, unaligned_mode) const noexcept
1258
+ {
1259
+ return store_unaligned(mem);
1260
+ }
1261
+
1262
+ template <class T, class A>
1263
+ XSIMD_INLINE auto batch<std::complex<T>, A>::real() const noexcept -> real_batch
1264
+ {
1265
+ return m_real;
1266
+ }
1267
+
1268
+ template <class T, class A>
1269
+ XSIMD_INLINE auto batch<std::complex<T>, A>::imag() const noexcept -> real_batch
1270
+ {
1271
+ return m_imag;
1272
+ }
1273
+
1274
+ template <class T, class A>
1275
+ XSIMD_INLINE auto batch<std::complex<T>, A>::get(std::size_t i) const noexcept -> value_type
1276
+ {
1277
+ return kernel::get(*this, i, A {});
1278
+ }
1279
+
1280
+ template <class T, class A>
1281
+ XSIMD_INLINE auto batch<std::complex<T>, A>::first() const noexcept -> value_type
1282
+ {
1283
+ detail::static_check_supported_config<std::complex<T>, A>();
1284
+ return kernel::first(*this, A {});
1285
+ }
1286
+
1287
+ /**************************************
1288
+ * batch<complex> xtl-related methods *
1289
+ **************************************/
1290
+
1291
+ #ifdef XSIMD_ENABLE_XTL_COMPLEX
1292
+
1293
+ template <class T, class A>
1294
+ template <bool i3ec>
1295
+ XSIMD_INLINE batch<std::complex<T>, A>::batch(xtl::xcomplex<T, T, i3ec> const& val) noexcept
1296
+ : m_real(val.real())
1297
+ , m_imag(val.imag())
1298
+ {
1299
+ }
1300
+
1301
+ template <class T, class A>
1302
+ template <bool i3ec, class... Ts>
1303
+ XSIMD_INLINE batch<std::complex<T>, A>::batch(xtl::xcomplex<T, T, i3ec> val0, xtl::xcomplex<T, T, i3ec> val1, Ts... vals) noexcept
1304
+ : batch(kernel::set<A>(batch {}, A {}, val0, val1, static_cast<xtl::xcomplex<T, T, i3ec>>(vals)...))
1305
+ {
1306
+ static_assert(sizeof...(Ts) + 2 == size, "as many arguments as batch elements");
1307
+ }
1308
+
1309
+ // Memory layout of an xcomplex and std::complex are the same when xcomplex
1310
+ // stores values and not reference. Unfortunately, this breaks strict
1311
+ // aliasing...
1312
+
1313
+ template <class T, class A>
1314
+ template <bool i3ec>
1315
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::load_aligned(const xtl::xcomplex<T, T, i3ec>* src) noexcept
1316
+ {
1317
+ return load_aligned(reinterpret_cast<std::complex<T> const*>(src));
1318
+ }
1319
+
1320
+ template <class T, class A>
1321
+ template <bool i3ec>
1322
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::load_unaligned(const xtl::xcomplex<T, T, i3ec>* src) noexcept
1323
+ {
1324
+ return load_unaligned(reinterpret_cast<std::complex<T> const*>(src));
1325
+ }
1326
+
1327
+ template <class T, class A>
1328
+ template <bool i3ec>
1329
+ XSIMD_INLINE void batch<std::complex<T>, A>::store_aligned(xtl::xcomplex<T, T, i3ec>* dst) const noexcept
1330
+ {
1331
+ store_aligned(reinterpret_cast<std::complex<T>*>(dst));
1332
+ }
1333
+
1334
+ template <class T, class A>
1335
+ template <bool i3ec>
1336
+ XSIMD_INLINE void batch<std::complex<T>, A>::store_unaligned(xtl::xcomplex<T, T, i3ec>* dst) const noexcept
1337
+ {
1338
+ store_unaligned(reinterpret_cast<std::complex<T>*>(dst));
1339
+ }
1340
+
1341
+ #endif
1342
+
1343
+ /***************************************
1344
+ * batch<complex> comparison operators *
1345
+ ***************************************/
1346
+
1347
+ template <class T, class A>
1348
+ XSIMD_INLINE batch_bool<T, A> batch<std::complex<T>, A>::operator==(batch const& other) const noexcept
1349
+ {
1350
+ return m_real == other.m_real && m_imag == other.m_imag;
1351
+ }
1352
+
1353
+ template <class T, class A>
1354
+ XSIMD_INLINE batch_bool<T, A> batch<std::complex<T>, A>::operator!=(batch const& other) const noexcept
1355
+ {
1356
+ return m_real != other.m_real || m_imag != other.m_imag;
1357
+ }
1358
+
1359
+ /***********************************
1360
+ * batch<complex> update operators *
1361
+ ***********************************/
1362
+
1363
+ template <class T, class A>
1364
+ XSIMD_INLINE batch<std::complex<T>, A>& batch<std::complex<T>, A>::operator+=(batch const& other) noexcept
1365
+ {
1366
+ m_real += other.m_real;
1367
+ m_imag += other.m_imag;
1368
+ return *this;
1369
+ }
1370
+
1371
+ template <class T, class A>
1372
+ XSIMD_INLINE batch<std::complex<T>, A>& batch<std::complex<T>, A>::operator-=(batch const& other) noexcept
1373
+ {
1374
+ m_real -= other.m_real;
1375
+ m_imag -= other.m_imag;
1376
+ return *this;
1377
+ }
1378
+
1379
+ template <class T, class A>
1380
+ XSIMD_INLINE batch<std::complex<T>, A>& batch<std::complex<T>, A>::operator*=(batch const& other) noexcept
1381
+ {
1382
+ real_batch new_real = fms(real(), other.real(), imag() * other.imag());
1383
+ real_batch new_imag = fma(real(), other.imag(), imag() * other.real());
1384
+ m_real = new_real;
1385
+ m_imag = new_imag;
1386
+ return *this;
1387
+ }
1388
+
1389
+ template <class T, class A>
1390
+ XSIMD_INLINE batch<std::complex<T>, A>& batch<std::complex<T>, A>::operator/=(batch const& other) noexcept
1391
+ {
1392
+ real_batch a = real();
1393
+ real_batch b = imag();
1394
+ real_batch c = other.real();
1395
+ real_batch d = other.imag();
1396
+ real_batch e = c * c + d * d;
1397
+ m_real = (c * a + d * b) / e;
1398
+ m_imag = (c * b - d * a) / e;
1399
+ return *this;
1400
+ }
1401
+
1402
+ /**************************************
1403
+ * batch<complex> incr/decr operators *
1404
+ **************************************/
1405
+
1406
+ template <class T, class A>
1407
+ XSIMD_INLINE batch<std::complex<T>, A>& batch<std::complex<T>, A>::operator++() noexcept
1408
+ {
1409
+ return operator+=(1);
1410
+ }
1411
+
1412
+ template <class T, class A>
1413
+ XSIMD_INLINE batch<std::complex<T>, A>& batch<std::complex<T>, A>::operator--() noexcept
1414
+ {
1415
+ return operator-=(1);
1416
+ }
1417
+
1418
+ template <class T, class A>
1419
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::operator++(int) noexcept
1420
+ {
1421
+ batch copy(*this);
1422
+ operator+=(1);
1423
+ return copy;
1424
+ }
1425
+
1426
+ template <class T, class A>
1427
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::operator--(int) noexcept
1428
+ {
1429
+ batch copy(*this);
1430
+ operator-=(1);
1431
+ return copy;
1432
+ }
1433
+
1434
+ /**********************************
1435
+ * batch<complex> unary operators *
1436
+ **********************************/
1437
+
1438
+ template <class T, class A>
1439
+ XSIMD_INLINE batch_bool<T, A> batch<std::complex<T>, A>::operator!() const noexcept
1440
+ {
1441
+ return operator==(batch(0));
1442
+ }
1443
+
1444
+ template <class T, class A>
1445
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::operator~() const noexcept
1446
+ {
1447
+ return { ~m_real, ~m_imag };
1448
+ }
1449
+
1450
+ template <class T, class A>
1451
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::operator-() const noexcept
1452
+ {
1453
+ return { -m_real, -m_imag };
1454
+ }
1455
+
1456
+ template <class T, class A>
1457
+ XSIMD_INLINE batch<std::complex<T>, A> batch<std::complex<T>, A>::operator+() const noexcept
1458
+ {
1459
+ return { +m_real, +m_imag };
1460
+ }
1461
+
1462
+ /**********************************
1463
+ * size type aliases
1464
+ **********************************/
1465
+
1466
+ namespace details
1467
+ {
1468
+ template <typename T, std::size_t N, class ArchList>
1469
+ struct sized_batch;
1470
+
1471
+ template <typename T, std::size_t N>
1472
+ struct sized_batch<T, N, xsimd::arch_list<>>
1473
+ {
1474
+ using type = void;
1475
+ };
1476
+
1477
+ template <typename T, class Arch, bool BatchExists = xsimd::has_simd_register<T, Arch>::value>
1478
+ struct batch_trait;
1479
+
1480
+ template <typename T, class Arch>
1481
+ struct batch_trait<T, Arch, true>
1482
+ {
1483
+ using type = xsimd::batch<T, Arch>;
1484
+ static constexpr std::size_t size = xsimd::batch<T, Arch>::size;
1485
+ };
1486
+
1487
+ template <typename T, class Arch>
1488
+ struct batch_trait<T, Arch, false>
1489
+ {
1490
+ using type = void;
1491
+ static constexpr std::size_t size = 0;
1492
+ };
1493
+
1494
+ template <typename T, std::size_t N, class Arch, class... Archs>
1495
+ struct sized_batch<T, N, xsimd::arch_list<Arch, Archs...>>
1496
+ {
1497
+ using type = typename std::conditional<
1498
+ batch_trait<T, Arch>::size == N,
1499
+ typename batch_trait<T, Arch>::type,
1500
+ typename sized_batch<T, N, xsimd::arch_list<Archs...>>::type>::type;
1501
+ };
1502
+ }
1503
+
1504
+ /**
1505
+ * @brief type utility to select a batch of given type and size
1506
+ *
1507
+ * If one of the available architectures has a native vector type of the
1508
+ * given type and size, sets the @p type member to the appropriate batch
1509
+ * type. Otherwise set its to @p void.
1510
+ *
1511
+ * @tparam T the type of the underlying values.
1512
+ * @tparam N the number of elements of that type in the batch.
1513
+ **/
1514
+ template <typename T, std::size_t N>
1515
+ struct make_sized_batch
1516
+ {
1517
+ using type = typename details::sized_batch<T, N, supported_architectures>::type;
1518
+ };
1519
+
1520
+ template <typename T, std::size_t N>
1521
+ using make_sized_batch_t = typename make_sized_batch<T, N>::type;
1522
+ }
1523
+
1524
+ #endif