sequenzo 0.1.31__cp310-cp310-macosx_10_9_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (299) hide show
  1. _sequenzo_fastcluster.cpython-310-darwin.so +0 -0
  2. sequenzo/__init__.py +349 -0
  3. sequenzo/big_data/__init__.py +12 -0
  4. sequenzo/big_data/clara/__init__.py +26 -0
  5. sequenzo/big_data/clara/clara.py +476 -0
  6. sequenzo/big_data/clara/utils/__init__.py +27 -0
  7. sequenzo/big_data/clara/utils/aggregatecases.py +92 -0
  8. sequenzo/big_data/clara/utils/davies_bouldin.py +91 -0
  9. sequenzo/big_data/clara/utils/get_weighted_diss.cpython-310-darwin.so +0 -0
  10. sequenzo/big_data/clara/utils/wfcmdd.py +205 -0
  11. sequenzo/big_data/clara/visualization.py +88 -0
  12. sequenzo/clustering/KMedoids.py +178 -0
  13. sequenzo/clustering/__init__.py +30 -0
  14. sequenzo/clustering/clustering_c_code.cpython-310-darwin.so +0 -0
  15. sequenzo/clustering/hierarchical_clustering.py +1256 -0
  16. sequenzo/clustering/sequenzo_fastcluster/fastcluster.py +495 -0
  17. sequenzo/clustering/sequenzo_fastcluster/src/fastcluster.cpp +1877 -0
  18. sequenzo/clustering/sequenzo_fastcluster/src/fastcluster_python.cpp +1264 -0
  19. sequenzo/clustering/src/KMedoid.cpp +263 -0
  20. sequenzo/clustering/src/PAM.cpp +237 -0
  21. sequenzo/clustering/src/PAMonce.cpp +265 -0
  22. sequenzo/clustering/src/cluster_quality.cpp +496 -0
  23. sequenzo/clustering/src/cluster_quality.h +128 -0
  24. sequenzo/clustering/src/cluster_quality_backup.cpp +570 -0
  25. sequenzo/clustering/src/module.cpp +228 -0
  26. sequenzo/clustering/src/weightedinertia.cpp +111 -0
  27. sequenzo/clustering/utils/__init__.py +27 -0
  28. sequenzo/clustering/utils/disscenter.py +122 -0
  29. sequenzo/data_preprocessing/__init__.py +22 -0
  30. sequenzo/data_preprocessing/helpers.py +303 -0
  31. sequenzo/datasets/__init__.py +41 -0
  32. sequenzo/datasets/biofam.csv +2001 -0
  33. sequenzo/datasets/biofam_child_domain.csv +2001 -0
  34. sequenzo/datasets/biofam_left_domain.csv +2001 -0
  35. sequenzo/datasets/biofam_married_domain.csv +2001 -0
  36. sequenzo/datasets/chinese_colonial_territories.csv +12 -0
  37. sequenzo/datasets/country_co2_emissions.csv +194 -0
  38. sequenzo/datasets/country_co2_emissions_global_deciles.csv +195 -0
  39. sequenzo/datasets/country_co2_emissions_global_quintiles.csv +195 -0
  40. sequenzo/datasets/country_co2_emissions_local_deciles.csv +195 -0
  41. sequenzo/datasets/country_co2_emissions_local_quintiles.csv +195 -0
  42. sequenzo/datasets/country_gdp_per_capita.csv +194 -0
  43. sequenzo/datasets/dyadic_children.csv +61 -0
  44. sequenzo/datasets/dyadic_parents.csv +61 -0
  45. sequenzo/datasets/mvad.csv +713 -0
  46. sequenzo/datasets/pairfam_activity_by_month.csv +1028 -0
  47. sequenzo/datasets/pairfam_activity_by_year.csv +1028 -0
  48. sequenzo/datasets/pairfam_family_by_month.csv +1028 -0
  49. sequenzo/datasets/pairfam_family_by_year.csv +1028 -0
  50. sequenzo/datasets/political_science_aid_shock.csv +166 -0
  51. sequenzo/datasets/political_science_donor_fragmentation.csv +157 -0
  52. sequenzo/define_sequence_data.py +1400 -0
  53. sequenzo/dissimilarity_measures/__init__.py +31 -0
  54. sequenzo/dissimilarity_measures/c_code.cpython-310-darwin.so +0 -0
  55. sequenzo/dissimilarity_measures/get_distance_matrix.py +762 -0
  56. sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +246 -0
  57. sequenzo/dissimilarity_measures/src/DHDdistance.cpp +148 -0
  58. sequenzo/dissimilarity_measures/src/LCPdistance.cpp +114 -0
  59. sequenzo/dissimilarity_measures/src/LCPspellDistance.cpp +215 -0
  60. sequenzo/dissimilarity_measures/src/OMdistance.cpp +247 -0
  61. sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +281 -0
  62. sequenzo/dissimilarity_measures/src/__init__.py +0 -0
  63. sequenzo/dissimilarity_measures/src/dist2matrix.cpp +63 -0
  64. sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
  65. sequenzo/dissimilarity_measures/src/module.cpp +40 -0
  66. sequenzo/dissimilarity_measures/src/setup.py +30 -0
  67. sequenzo/dissimilarity_measures/src/utils.h +25 -0
  68. sequenzo/dissimilarity_measures/src/xsimd/.github/cmake-test/main.cpp +6 -0
  69. sequenzo/dissimilarity_measures/src/xsimd/benchmark/main.cpp +159 -0
  70. sequenzo/dissimilarity_measures/src/xsimd/benchmark/xsimd_benchmark.hpp +565 -0
  71. sequenzo/dissimilarity_measures/src/xsimd/docs/source/conf.py +37 -0
  72. sequenzo/dissimilarity_measures/src/xsimd/examples/mandelbrot.cpp +330 -0
  73. sequenzo/dissimilarity_measures/src/xsimd/examples/pico_bench.hpp +246 -0
  74. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +266 -0
  75. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +112 -0
  76. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +323 -0
  77. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +218 -0
  78. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +2583 -0
  79. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +880 -0
  80. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_rounding.hpp +72 -0
  81. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
  82. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +978 -0
  83. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +1924 -0
  84. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +1144 -0
  85. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +656 -0
  86. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512cd.hpp +28 -0
  87. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +244 -0
  88. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512er.hpp +20 -0
  89. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +2650 -0
  90. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512ifma.hpp +20 -0
  91. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512pf.hpp +20 -0
  92. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +77 -0
  93. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +131 -0
  94. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512bw.hpp +20 -0
  95. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512vbmi2.hpp +20 -0
  96. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avxvnni.hpp +20 -0
  97. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +24 -0
  98. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +77 -0
  99. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +393 -0
  100. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +788 -0
  101. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +93 -0
  102. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx2.hpp +46 -0
  103. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +97 -0
  104. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +92 -0
  105. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_i8mm_neon64.hpp +17 -0
  106. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +142 -0
  107. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +3142 -0
  108. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +1543 -0
  109. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +1513 -0
  110. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +1260 -0
  111. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +2024 -0
  112. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +67 -0
  113. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_1.hpp +339 -0
  114. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_2.hpp +44 -0
  115. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +186 -0
  116. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +1155 -0
  117. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
  118. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +1780 -0
  119. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +240 -0
  120. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +484 -0
  121. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +269 -0
  122. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +27 -0
  123. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/math/xsimd_rem_pio2.hpp +719 -0
  124. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_aligned_allocator.hpp +349 -0
  125. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_alignment.hpp +91 -0
  126. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +55 -0
  127. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +2765 -0
  128. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx2_register.hpp +44 -0
  129. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512bw_register.hpp +51 -0
  130. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512cd_register.hpp +51 -0
  131. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512dq_register.hpp +51 -0
  132. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512er_register.hpp +51 -0
  133. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512f_register.hpp +77 -0
  134. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512ifma_register.hpp +51 -0
  135. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512pf_register.hpp +51 -0
  136. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi2_register.hpp +51 -0
  137. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi_register.hpp +51 -0
  138. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512bw_register.hpp +54 -0
  139. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512vbmi2_register.hpp +53 -0
  140. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx_register.hpp +64 -0
  141. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avxvnni_register.hpp +44 -0
  142. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +1524 -0
  143. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch_constant.hpp +300 -0
  144. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_common_arch.hpp +47 -0
  145. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_emulated_register.hpp +80 -0
  146. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx2_register.hpp +50 -0
  147. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx_register.hpp +50 -0
  148. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_sse_register.hpp +50 -0
  149. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma4_register.hpp +50 -0
  150. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_i8mm_neon64_register.hpp +55 -0
  151. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon64_register.hpp +55 -0
  152. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon_register.hpp +154 -0
  153. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_register.hpp +94 -0
  154. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +506 -0
  155. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse2_register.hpp +59 -0
  156. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse3_register.hpp +49 -0
  157. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_1_register.hpp +48 -0
  158. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_2_register.hpp +48 -0
  159. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_ssse3_register.hpp +48 -0
  160. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sve_register.hpp +156 -0
  161. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +337 -0
  162. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_utils.hpp +536 -0
  163. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
  164. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_wasm_register.hpp +59 -0
  165. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +75 -0
  166. sequenzo/dissimilarity_measures/src/xsimd/test/architectures/dummy.cpp +7 -0
  167. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set.cpp +13 -0
  168. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean.cpp +24 -0
  169. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_aligned.cpp +25 -0
  170. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_arch_independent.cpp +28 -0
  171. sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_tag_dispatch.cpp +25 -0
  172. sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_abstract_batches.cpp +7 -0
  173. sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_parametric_batches.cpp +8 -0
  174. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum.hpp +31 -0
  175. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_avx2.cpp +3 -0
  176. sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_sse2.cpp +3 -0
  177. sequenzo/dissimilarity_measures/src/xsimd/test/doc/writing_vectorized_code.cpp +11 -0
  178. sequenzo/dissimilarity_measures/src/xsimd/test/main.cpp +31 -0
  179. sequenzo/dissimilarity_measures/src/xsimd/test/test_api.cpp +230 -0
  180. sequenzo/dissimilarity_measures/src/xsimd/test/test_arch.cpp +217 -0
  181. sequenzo/dissimilarity_measures/src/xsimd/test/test_basic_math.cpp +183 -0
  182. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch.cpp +1049 -0
  183. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_bool.cpp +508 -0
  184. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_cast.cpp +409 -0
  185. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_complex.cpp +712 -0
  186. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_constant.cpp +286 -0
  187. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_float.cpp +141 -0
  188. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_int.cpp +365 -0
  189. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_manip.cpp +308 -0
  190. sequenzo/dissimilarity_measures/src/xsimd/test/test_bitwise_cast.cpp +222 -0
  191. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_exponential.cpp +226 -0
  192. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_hyperbolic.cpp +183 -0
  193. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_power.cpp +265 -0
  194. sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_trigonometric.cpp +236 -0
  195. sequenzo/dissimilarity_measures/src/xsimd/test/test_conversion.cpp +248 -0
  196. sequenzo/dissimilarity_measures/src/xsimd/test/test_custom_default_arch.cpp +28 -0
  197. sequenzo/dissimilarity_measures/src/xsimd/test/test_error_gamma.cpp +170 -0
  198. sequenzo/dissimilarity_measures/src/xsimd/test/test_explicit_batch_instantiation.cpp +32 -0
  199. sequenzo/dissimilarity_measures/src/xsimd/test/test_exponential.cpp +202 -0
  200. sequenzo/dissimilarity_measures/src/xsimd/test/test_extract_pair.cpp +92 -0
  201. sequenzo/dissimilarity_measures/src/xsimd/test/test_fp_manipulation.cpp +77 -0
  202. sequenzo/dissimilarity_measures/src/xsimd/test/test_gnu_source.cpp +30 -0
  203. sequenzo/dissimilarity_measures/src/xsimd/test/test_hyperbolic.cpp +167 -0
  204. sequenzo/dissimilarity_measures/src/xsimd/test/test_load_store.cpp +304 -0
  205. sequenzo/dissimilarity_measures/src/xsimd/test/test_memory.cpp +61 -0
  206. sequenzo/dissimilarity_measures/src/xsimd/test/test_poly_evaluation.cpp +64 -0
  207. sequenzo/dissimilarity_measures/src/xsimd/test/test_power.cpp +184 -0
  208. sequenzo/dissimilarity_measures/src/xsimd/test/test_rounding.cpp +199 -0
  209. sequenzo/dissimilarity_measures/src/xsimd/test/test_select.cpp +101 -0
  210. sequenzo/dissimilarity_measures/src/xsimd/test/test_shuffle.cpp +760 -0
  211. sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.cpp +4 -0
  212. sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.hpp +34 -0
  213. sequenzo/dissimilarity_measures/src/xsimd/test/test_traits.cpp +172 -0
  214. sequenzo/dissimilarity_measures/src/xsimd/test/test_trigonometric.cpp +208 -0
  215. sequenzo/dissimilarity_measures/src/xsimd/test/test_utils.hpp +611 -0
  216. sequenzo/dissimilarity_measures/src/xsimd/test/test_wasm/test_wasm_playwright.py +123 -0
  217. sequenzo/dissimilarity_measures/src/xsimd/test/test_xsimd_api.cpp +1460 -0
  218. sequenzo/dissimilarity_measures/utils/__init__.py +16 -0
  219. sequenzo/dissimilarity_measures/utils/get_LCP_length_for_2_seq.py +44 -0
  220. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cpython-310-darwin.so +0 -0
  221. sequenzo/dissimilarity_measures/utils/seqconc.cpython-310-darwin.so +0 -0
  222. sequenzo/dissimilarity_measures/utils/seqdss.cpython-310-darwin.so +0 -0
  223. sequenzo/dissimilarity_measures/utils/seqdur.cpython-310-darwin.so +0 -0
  224. sequenzo/dissimilarity_measures/utils/seqlength.cpython-310-darwin.so +0 -0
  225. sequenzo/multidomain/__init__.py +23 -0
  226. sequenzo/multidomain/association_between_domains.py +311 -0
  227. sequenzo/multidomain/cat.py +597 -0
  228. sequenzo/multidomain/combt.py +519 -0
  229. sequenzo/multidomain/dat.py +81 -0
  230. sequenzo/multidomain/idcd.py +139 -0
  231. sequenzo/multidomain/linked_polyad.py +292 -0
  232. sequenzo/openmp_setup.py +233 -0
  233. sequenzo/prefix_tree/__init__.py +62 -0
  234. sequenzo/prefix_tree/hub.py +114 -0
  235. sequenzo/prefix_tree/individual_level_indicators.py +1321 -0
  236. sequenzo/prefix_tree/spell_individual_level_indicators.py +580 -0
  237. sequenzo/prefix_tree/spell_level_indicators.py +297 -0
  238. sequenzo/prefix_tree/system_level_indicators.py +544 -0
  239. sequenzo/prefix_tree/utils.py +54 -0
  240. sequenzo/seqhmm/__init__.py +95 -0
  241. sequenzo/seqhmm/advanced_optimization.py +305 -0
  242. sequenzo/seqhmm/bootstrap.py +411 -0
  243. sequenzo/seqhmm/build_hmm.py +142 -0
  244. sequenzo/seqhmm/build_mhmm.py +136 -0
  245. sequenzo/seqhmm/build_nhmm.py +121 -0
  246. sequenzo/seqhmm/fit_mhmm.py +62 -0
  247. sequenzo/seqhmm/fit_model.py +61 -0
  248. sequenzo/seqhmm/fit_nhmm.py +76 -0
  249. sequenzo/seqhmm/formulas.py +289 -0
  250. sequenzo/seqhmm/forward_backward_nhmm.py +276 -0
  251. sequenzo/seqhmm/gradients_nhmm.py +306 -0
  252. sequenzo/seqhmm/hmm.py +291 -0
  253. sequenzo/seqhmm/mhmm.py +314 -0
  254. sequenzo/seqhmm/model_comparison.py +238 -0
  255. sequenzo/seqhmm/multichannel_em.py +282 -0
  256. sequenzo/seqhmm/multichannel_utils.py +138 -0
  257. sequenzo/seqhmm/nhmm.py +270 -0
  258. sequenzo/seqhmm/nhmm_utils.py +191 -0
  259. sequenzo/seqhmm/predict.py +137 -0
  260. sequenzo/seqhmm/predict_mhmm.py +142 -0
  261. sequenzo/seqhmm/simulate.py +878 -0
  262. sequenzo/seqhmm/utils.py +218 -0
  263. sequenzo/seqhmm/visualization.py +910 -0
  264. sequenzo/sequence_characteristics/__init__.py +40 -0
  265. sequenzo/sequence_characteristics/complexity_index.py +49 -0
  266. sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +220 -0
  267. sequenzo/sequence_characteristics/plot_characteristics.py +593 -0
  268. sequenzo/sequence_characteristics/simple_characteristics.py +311 -0
  269. sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +39 -0
  270. sequenzo/sequence_characteristics/turbulence.py +155 -0
  271. sequenzo/sequence_characteristics/variance_of_spell_durations.py +86 -0
  272. sequenzo/sequence_characteristics/within_sequence_entropy.py +43 -0
  273. sequenzo/suffix_tree/__init__.py +66 -0
  274. sequenzo/suffix_tree/hub.py +114 -0
  275. sequenzo/suffix_tree/individual_level_indicators.py +1679 -0
  276. sequenzo/suffix_tree/spell_individual_level_indicators.py +493 -0
  277. sequenzo/suffix_tree/spell_level_indicators.py +248 -0
  278. sequenzo/suffix_tree/system_level_indicators.py +535 -0
  279. sequenzo/suffix_tree/utils.py +56 -0
  280. sequenzo/version_check.py +283 -0
  281. sequenzo/visualization/__init__.py +29 -0
  282. sequenzo/visualization/plot_mean_time.py +222 -0
  283. sequenzo/visualization/plot_modal_state.py +276 -0
  284. sequenzo/visualization/plot_most_frequent_sequences.py +147 -0
  285. sequenzo/visualization/plot_relative_frequency.py +405 -0
  286. sequenzo/visualization/plot_sequence_index.py +1175 -0
  287. sequenzo/visualization/plot_single_medoid.py +153 -0
  288. sequenzo/visualization/plot_state_distribution.py +651 -0
  289. sequenzo/visualization/plot_transition_matrix.py +190 -0
  290. sequenzo/visualization/utils/__init__.py +23 -0
  291. sequenzo/visualization/utils/utils.py +310 -0
  292. sequenzo/with_event_history_analysis/__init__.py +35 -0
  293. sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
  294. sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
  295. sequenzo-0.1.31.dist-info/METADATA +286 -0
  296. sequenzo-0.1.31.dist-info/RECORD +299 -0
  297. sequenzo-0.1.31.dist-info/WHEEL +5 -0
  298. sequenzo-0.1.31.dist-info/licenses/LICENSE +28 -0
  299. sequenzo-0.1.31.dist-info/top_level.txt +2 -0
@@ -0,0 +1,788 @@
1
+ /***************************************************************************
2
+ * Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
3
+ * Martin Renou *
4
+ * Copyright (c) QuantStack *
5
+ * Copyright (c) Serge Guelton *
6
+ * *
7
+ * Distributed under the terms of the BSD 3-Clause License. *
8
+ * *
9
+ * The full license is in the file LICENSE, distributed with this software. *
10
+ ****************************************************************************/
11
+
12
+ #ifndef XSIMD_EMULATED_HPP
13
+ #define XSIMD_EMULATED_HPP
14
+
15
+ #include <complex>
16
+ #include <limits>
17
+ #include <numeric>
18
+ #include <type_traits>
19
+
20
+ #include "../arch/xsimd_scalar.hpp"
21
+
22
+ #include "../types/xsimd_emulated_register.hpp"
23
+ #include "../types/xsimd_utils.hpp"
24
+
25
+ namespace xsimd
26
+ {
27
+ template <typename T, class A, bool... Values>
28
+ struct batch_bool_constant;
29
+
30
+ template <class T_out, class T_in, class A>
31
+ XSIMD_INLINE batch<T_out, A> bitwise_cast(batch<T_in, A> const& x) noexcept;
32
+
33
+ template <typename T, class A, T... Values>
34
+ struct batch_constant;
35
+
36
+ namespace kernel
37
+ {
38
+ using namespace types;
39
+
40
+ // fwd
41
+ template <class A, class T, size_t I>
42
+ XSIMD_INLINE batch<T, A> insert(batch<T, A> const& self, T val, index<I>, requires_arch<common>) noexcept;
43
+ template <class A, typename T, typename ITy, ITy... Indices>
44
+ XSIMD_INLINE batch<T, A> shuffle(batch<T, A> const& x, batch<T, A> const& y, batch_constant<ITy, A, Indices...>, requires_arch<common>) noexcept;
45
+
46
+ namespace detail
47
+ {
48
+ template <size_t I, class F, class... Bs>
49
+ auto emulated_apply(F func, Bs const&... bs) -> decltype(func(bs.data[I]...))
50
+ {
51
+ return func(bs.data[I]...);
52
+ }
53
+
54
+ template <class F, class B, class... Bs, size_t... Is>
55
+ auto emulated_apply(F func, ::xsimd::detail::index_sequence<Is...>, B const& b, Bs const&... bs) -> std::array<decltype(func(b.data[0], bs.data[0]...)), B::size>
56
+ {
57
+ return { emulated_apply<Is>(func, b, bs...)... };
58
+ }
59
+
60
+ template <class B, class F, class... Bs>
61
+ auto emulated_apply(F func, B const& b, Bs const&... bs) -> std::array<decltype(func(b.data[0], bs.data[0]...)), B::size>
62
+ {
63
+ return emulated_apply(func, ::xsimd::detail::make_index_sequence<B::size>(), b, bs...);
64
+ }
65
+ }
66
+
67
+ // abs
68
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
69
+ XSIMD_INLINE batch<T, A> abs(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
70
+ {
71
+ return detail::emulated_apply([](T v)
72
+ { return xsimd::abs(v); },
73
+ self);
74
+ }
75
+
76
+ // add
77
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
78
+ XSIMD_INLINE batch<T, A> add(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
79
+ {
80
+ return detail::emulated_apply([](T v0, T v1)
81
+ { return xsimd::add(v0, v1); },
82
+ self, other);
83
+ }
84
+
85
+ // all
86
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
87
+ XSIMD_INLINE bool all(batch_bool<T, A> const& self, requires_arch<emulated<N>>) noexcept
88
+ {
89
+ return std::all_of(self.data.begin(), self.data.end(), [](T v)
90
+ { return bool(v); });
91
+ }
92
+
93
+ // any
94
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
95
+ XSIMD_INLINE bool any(batch_bool<T, A> const& self, requires_arch<emulated<N>>) noexcept
96
+ {
97
+ return std::any_of(self.data.begin(), self.data.end(), [](T v)
98
+ { return bool(v); });
99
+ }
100
+
101
+ // batch_bool_cast
102
+ template <class A, class T_out, class T_in, size_t N = 8 * sizeof(T_in) * batch<T_in, A>::size>
103
+ XSIMD_INLINE batch_bool<T_out, A> batch_bool_cast(batch_bool<T_in, A> const& self, batch_bool<T_out, A> const&, requires_arch<emulated<N>>) noexcept
104
+ {
105
+ return { self.data };
106
+ }
107
+
108
+ // bitwise_and
109
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
110
+ XSIMD_INLINE batch<T, A> bitwise_and(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
111
+ {
112
+ return detail::emulated_apply([](T v0, T v1)
113
+ { return xsimd::bitwise_and(v0, v1); },
114
+ self, other);
115
+ }
116
+
117
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
118
+ XSIMD_INLINE batch_bool<T, A> bitwise_and(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<emulated<N>>) noexcept
119
+ {
120
+ return detail::emulated_apply([](bool v0, bool v1)
121
+ { return xsimd::bitwise_and(v0, v1); },
122
+ self, other);
123
+ }
124
+
125
+ // bitwise_andnot
126
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
127
+ XSIMD_INLINE batch<T, A> bitwise_andnot(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
128
+ {
129
+ return detail::emulated_apply([](T v0, T v1)
130
+ { return xsimd::bitwise_andnot(v0, v1); },
131
+ self, other);
132
+ }
133
+
134
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
135
+ XSIMD_INLINE batch_bool<T, A> bitwise_andnot(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<emulated<N>>) noexcept
136
+ {
137
+ return detail::emulated_apply([](bool v0, bool v1)
138
+ { return xsimd::bitwise_andnot(v0, v1); },
139
+ self, other);
140
+ }
141
+
142
+ // bitwise_lshift
143
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
144
+ XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& self, int32_t other, requires_arch<emulated<N>>) noexcept
145
+ {
146
+ return detail::emulated_apply([other](T v)
147
+ { return xsimd::bitwise_lshift(v, other); },
148
+ self);
149
+ }
150
+
151
+ // bitwise_not
152
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
153
+ XSIMD_INLINE batch<T, A> bitwise_not(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
154
+ {
155
+ return detail::emulated_apply([](T v)
156
+ { return xsimd::bitwise_not(v); },
157
+ self);
158
+ }
159
+
160
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
161
+ XSIMD_INLINE batch_bool<T, A> bitwise_not(batch_bool<T, A> const& self, requires_arch<emulated<N>>) noexcept
162
+ {
163
+ return detail::emulated_apply([](bool v)
164
+ { return xsimd::bitwise_not(v); },
165
+ self);
166
+ }
167
+
168
+ // bitwise_or
169
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
170
+ XSIMD_INLINE batch<T, A> bitwise_or(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
171
+ {
172
+ return detail::emulated_apply([](T v0, T v1)
173
+ { return xsimd::bitwise_or(v0, v1); },
174
+ self, other);
175
+ }
176
+
177
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
178
+ XSIMD_INLINE batch_bool<T, A> bitwise_or(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<emulated<N>>) noexcept
179
+ {
180
+ return detail::emulated_apply([](bool v0, bool v1)
181
+ { return xsimd::bitwise_or(v0, v1); },
182
+ self, other);
183
+ }
184
+
185
+ // bitwise_rshift
186
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
187
+ XSIMD_INLINE batch<T, A> bitwise_rshift(batch<T, A> const& self, int32_t other, requires_arch<emulated<N>>) noexcept
188
+ {
189
+ return detail::emulated_apply([other](T v)
190
+ { return xsimd::bitwise_rshift(v, other); },
191
+ self);
192
+ }
193
+
194
+ // bitwise_xor
195
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
196
+ XSIMD_INLINE batch<T, A> bitwise_xor(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
197
+ {
198
+ return detail::emulated_apply([](T v0, T v1)
199
+ { return xsimd::bitwise_xor(v0, v1); },
200
+ self, other);
201
+ }
202
+
203
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
204
+ XSIMD_INLINE batch_bool<T, A> bitwise_xor(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<emulated<N>>) noexcept
205
+ {
206
+ return detail::emulated_apply([](bool v0, bool v1)
207
+ { return xsimd::bitwise_xor(v0, v1); },
208
+ self, other);
209
+ }
210
+
211
+ // bitwise_cast
212
+ template <class A, class T_in, class T_out, size_t N = 8 * sizeof(T_in) * batch<T_in, A>::size>
213
+ XSIMD_INLINE batch<T_out, A> bitwise_cast(batch<T_in, A> const& self, batch<T_out, A> const&, requires_arch<emulated<N>>) noexcept
214
+ {
215
+ constexpr size_t size = batch<T_out, A>::size;
216
+ std::array<T_out, size> result;
217
+ char* raw_data = reinterpret_cast<char*>(result.data());
218
+ const char* raw_input = reinterpret_cast<const char*>(self.data.data());
219
+ memcpy(raw_data, raw_input, size * sizeof(T_out));
220
+ return result;
221
+ }
222
+
223
+ // broadcast
224
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
225
+ batch<T, A> XSIMD_INLINE broadcast(T val, requires_arch<emulated<N>>) noexcept
226
+ {
227
+ constexpr size_t size = batch<T, A>::size;
228
+ std::array<T, size> r;
229
+ std::fill(r.begin(), r.end(), val);
230
+ return r;
231
+ }
232
+
233
+ // first
234
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
235
+ T XSIMD_INLINE first(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
236
+ {
237
+ return self.data[0];
238
+ }
239
+
240
+ #if 0
241
+ // count
242
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
243
+ XSIMD_INLINE size_t count(batch_bool<T, A> const& x, requires_arch<emulated<N>>) noexcept
244
+ {
245
+ uint64_t m = x.mask();
246
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
247
+ m = m - ((m >> 1) & (uint64_t) ~(uint64_t)0 / 3); // temp
248
+ m = (m & (uint64_t) ~(uint64_t)0 / 15 * 3) + ((m >> 2) & (uint64_t) ~(uint64_t)0 / 15 * 3); // temp
249
+ m = (m + (m >> 4)) & (uint64_t) ~(uint64_t)0 / 255 * 15; // temp
250
+ return (m * ((uint64_t) ~(uint64_t)0 / 255)) >> (sizeof(uint64_t) - 1) * CHAR_BIT; // count
251
+ }
252
+ #endif
253
+
254
+ // store_complex
255
+ namespace detail
256
+ {
257
+ // complex_low
258
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
259
+ XSIMD_INLINE batch<T, A> complex_low(batch<std::complex<T>, A> const& self, requires_arch<emulated<N>>) noexcept
260
+ {
261
+ constexpr size_t size = batch<T, A>::size;
262
+ std::array<T, size> result;
263
+ for (size_t i = 0; i < size / 2; ++i)
264
+ {
265
+ result[2 * i] = self.real().data[i];
266
+ result[1 + 2 * i] = self.imag().data[i];
267
+ }
268
+ return result;
269
+ }
270
+ // complex_high
271
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
272
+ XSIMD_INLINE batch<T, A> complex_high(batch<std::complex<T>, A> const& self, requires_arch<emulated<N>>) noexcept
273
+ {
274
+ constexpr size_t size = batch<T, A>::size;
275
+ std::array<T, size> result;
276
+ for (size_t i = 0; i < size / 2; ++i)
277
+ {
278
+ result[2 * i] = self.real().data[i + size / 2];
279
+ result[1 + 2 * i] = self.imag().data[i + size / 2];
280
+ }
281
+ return result;
282
+ }
283
+ }
284
+
285
+ // decr_if
286
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
287
+ XSIMD_INLINE batch<T, A> decr_if(batch<T, A> const& self, batch_bool<T, A> const& mask, requires_arch<emulated<N>>) noexcept
288
+ {
289
+ return self - batch<T, A>(mask.data);
290
+ }
291
+
292
+ // div
293
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
294
+ XSIMD_INLINE batch<T, A> div(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
295
+ {
296
+ return detail::emulated_apply([](T v0, T v1)
297
+ { return xsimd::div(v0, v1); },
298
+ self, other);
299
+ }
300
+
301
+ // fast_cast
302
+ namespace detail
303
+ {
304
+ template <class A, size_t N = 8 * sizeof(float) * batch<float, A>::size>
305
+ XSIMD_INLINE batch<float, A> fast_cast(batch<int32_t, A> const& self, batch<float, A> const&, requires_arch<emulated<N>>) noexcept
306
+ {
307
+ return detail::emulated_apply([](int32_t v)
308
+ { return float(v); },
309
+ self);
310
+ }
311
+
312
+ template <class A, size_t N = 8 * sizeof(float) * batch<float, A>::size>
313
+ XSIMD_INLINE batch<float, A> fast_cast(batch<uint32_t, A> const& self, batch<float, A> const&, requires_arch<emulated<N>>) noexcept
314
+ {
315
+ return detail::emulated_apply([](uint32_t v)
316
+ { return float(v); },
317
+ self);
318
+ }
319
+
320
+ template <class A, size_t N = 8 * sizeof(double) * batch<double, A>::size>
321
+ XSIMD_INLINE batch<double, A> fast_cast(batch<int64_t, A> const& self, batch<double, A> const&, requires_arch<emulated<N>>) noexcept
322
+ {
323
+ return detail::emulated_apply([](int64_t v)
324
+ { return double(v); },
325
+ self);
326
+ }
327
+
328
+ template <class A, size_t N = 8 * sizeof(double) * batch<double, A>::size>
329
+ XSIMD_INLINE batch<double, A> fast_cast(batch<uint64_t, A> const& self, batch<double, A> const&, requires_arch<emulated<N>>) noexcept
330
+ {
331
+ return detail::emulated_apply([](uint64_t v)
332
+ { return double(v); },
333
+ self);
334
+ }
335
+
336
+ template <class A, size_t N = 8 * sizeof(int32_t) * batch<int32_t, A>::size>
337
+ XSIMD_INLINE batch<int32_t, A> fast_cast(batch<float, A> const& self, batch<int32_t, A> const&, requires_arch<emulated<N>>) noexcept
338
+ {
339
+ return detail::emulated_apply([](float v)
340
+ { return int32_t(v); },
341
+ self);
342
+ }
343
+
344
+ template <class A, size_t N = 8 * sizeof(double) * batch<double, A>::size>
345
+ XSIMD_INLINE batch<int64_t, A> fast_cast(batch<double, A> const& self, batch<int64_t, A> const&, requires_arch<emulated<N>>) noexcept
346
+ {
347
+ return detail::emulated_apply([](double v)
348
+ { return int64_t(v); },
349
+ self);
350
+ }
351
+ }
352
+
353
+ // eq
354
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
355
+ XSIMD_INLINE batch_bool<T, emulated<N>> eq(batch<T, emulated<N>> const& self, batch<T, emulated<N>> const& other, requires_arch<emulated<N>>) noexcept
356
+ {
357
+ return detail::emulated_apply([](T v0, T v1)
358
+ { return xsimd::eq(v0, v1); },
359
+ self, other);
360
+ }
361
+
362
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch_bool<T, A>::size>
363
+ XSIMD_INLINE batch_bool<T, emulated<N>> eq(batch_bool<T, emulated<N>> const& self, batch_bool<T, emulated<N>> const& other, requires_arch<emulated<N>>) noexcept
364
+ {
365
+ return detail::emulated_apply([](bool v0, bool v1)
366
+ { return xsimd::eq(v0, v1); },
367
+ self, other);
368
+ }
369
+
370
+ // from_bool
371
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
372
+ XSIMD_INLINE batch<T, A> from_bool(batch_bool<T, A> const& self, requires_arch<emulated<N>>) noexcept
373
+ {
374
+ return detail::emulated_apply([](bool v)
375
+ { return T(v); },
376
+ self);
377
+ }
378
+
379
+ // from_mask
380
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
381
+ XSIMD_INLINE batch_bool<T, A> from_mask(batch_bool<T, A> const&, uint64_t mask, requires_arch<emulated<N>>) noexcept
382
+ {
383
+ constexpr size_t size = batch<T, A>::size;
384
+ std::array<bool, size> vmask;
385
+ for (size_t i = 0; i < size; ++i)
386
+ vmask[i] = (mask >> i) & 1u;
387
+ return vmask;
388
+ }
389
+
390
+ // ge
391
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
392
+ XSIMD_INLINE batch_bool<T, emulated<N>> ge(batch<T, emulated<N>> const& self, batch<T, emulated<N>> const& other, requires_arch<emulated<N>>) noexcept
393
+ {
394
+ return detail::emulated_apply([](T v0, T v1)
395
+ { return xsimd::ge(v0, v1); },
396
+ self, other);
397
+ }
398
+
399
+ // gt
400
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
401
+ XSIMD_INLINE batch_bool<T, emulated<N>> gt(batch<T, emulated<N>> const& self, batch<T, emulated<N>> const& other, requires_arch<emulated<N>>) noexcept
402
+ {
403
+ return detail::emulated_apply([](T v0, T v1)
404
+ { return xsimd::gt(v0, v1); },
405
+ self, other);
406
+ }
407
+
408
+ // haddp
409
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
410
+ XSIMD_INLINE batch<T, A> haddp(batch<T, A> const* row, requires_arch<emulated<N>>) noexcept
411
+ {
412
+ constexpr size_t size = batch<T, A>::size;
413
+ std::array<T, size> r;
414
+ for (size_t i = 0; i < size; ++i)
415
+ r[i] = std::accumulate(row[i].data.begin() + 1, row[i].data.end(), row[i].data.front());
416
+ return r;
417
+ }
418
+
419
+ // incr_if
420
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
421
+ XSIMD_INLINE batch<T, A> incr_if(batch<T, A> const& self, batch_bool<T, A> const& mask, requires_arch<emulated<N>>) noexcept
422
+ {
423
+ return self + batch<T, A>(mask.data);
424
+ }
425
+
426
+ // insert
427
+ template <class A, class T, size_t I, size_t N = 8 * sizeof(T) * batch<T, A>::size>
428
+ XSIMD_INLINE batch<T, A> insert(batch<T, A> const& self, T val, index<I>, requires_arch<emulated<N>>) noexcept
429
+ {
430
+ batch<T, A> other = self;
431
+ other.data[I] = val;
432
+ return other;
433
+ }
434
+
435
+ // isnan
436
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size, class = typename std::enable_if<std::is_floating_point<T>::value, void>::type>
437
+ XSIMD_INLINE batch_bool<T, A> isnan(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
438
+ {
439
+ return detail::emulated_apply([](T v)
440
+ { return xsimd::isnan(v); },
441
+ self);
442
+ }
443
+
444
+ // load_aligned
445
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
446
+ XSIMD_INLINE batch<T, A> load_aligned(T const* mem, convert<T>, requires_arch<emulated<N>>) noexcept
447
+ {
448
+ constexpr size_t size = batch<T, A>::size;
449
+ std::array<T, size> res;
450
+ std::copy(mem, mem + size, res.begin());
451
+ return res;
452
+ }
453
+
454
+ // load_unaligned
455
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
456
+ XSIMD_INLINE batch<T, A> load_unaligned(T const* mem, convert<T>, requires_arch<emulated<N>>) noexcept
457
+ {
458
+ constexpr size_t size = batch<T, A>::size;
459
+ std::array<T, size> res;
460
+ std::copy(mem, mem + size, res.begin());
461
+ return res;
462
+ }
463
+
464
+ // load_complex
465
+ namespace detail
466
+ {
467
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
468
+ XSIMD_INLINE batch<std::complex<T>, A> load_complex(batch<T, A> const& hi, batch<T, A> const& lo, requires_arch<emulated<N>>) noexcept
469
+ {
470
+ constexpr size_t size = batch<T, A>::size;
471
+ std::array<T, size> real, imag;
472
+ for (size_t i = 0; i < size / 2; ++i)
473
+ {
474
+ real[i] = hi.data[2 * i];
475
+ imag[i] = hi.data[1 + 2 * i];
476
+ }
477
+ for (size_t i = 0; i < size / 2; ++i)
478
+ {
479
+ real[size / 2 + i] = lo.data[2 * i];
480
+ imag[size / 2 + i] = lo.data[1 + 2 * i];
481
+ }
482
+ return { real, imag };
483
+ }
484
+ }
485
+
486
+ // le
487
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
488
+ XSIMD_INLINE batch_bool<T, emulated<N>> le(batch<T, emulated<N>> const& self, batch<T, emulated<N>> const& other, requires_arch<emulated<N>>) noexcept
489
+ {
490
+ return detail::emulated_apply([](T v0, T v1)
491
+ { return xsimd::le(v0, v1); },
492
+ self, other);
493
+ }
494
+
495
+ // lt
496
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
497
+ XSIMD_INLINE batch_bool<T, emulated<N>> lt(batch<T, emulated<N>> const& self, batch<T, emulated<N>> const& other, requires_arch<emulated<N>>) noexcept
498
+ {
499
+ return detail::emulated_apply([](T v0, T v1)
500
+ { return xsimd::lt(v0, v1); },
501
+ self, other);
502
+ }
503
+
504
+ // mask
505
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
506
+ XSIMD_INLINE uint64_t mask(batch_bool<T, A> const& self, requires_arch<emulated<N>>) noexcept
507
+ {
508
+ constexpr size_t size = batch<T, A>::size;
509
+ uint64_t res = 0;
510
+ for (size_t i = 0; i < size; ++i)
511
+ res |= (self.data[i] ? 1u : 0u) << i;
512
+ return res;
513
+ }
514
+
515
+ // max
516
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
517
+ XSIMD_INLINE batch<T, A> max(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
518
+ {
519
+ return detail::emulated_apply([](T v0, T v1)
520
+ { return xsimd::max(v0, v1); },
521
+ self, other);
522
+ }
523
+
524
+ // min
525
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
526
+ XSIMD_INLINE batch<T, A> min(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
527
+ {
528
+ return detail::emulated_apply([](T v0, T v1)
529
+ { return xsimd::min(v0, v1); },
530
+ self, other);
531
+ }
532
+
533
+ // mul
534
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
535
+ XSIMD_INLINE batch<T, A> mul(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
536
+ {
537
+ return detail::emulated_apply([](T v0, T v1)
538
+ { return xsimd::mul(v0, v1); },
539
+ self, other);
540
+ }
541
+
542
+ // nearbyint_as_int
543
+ template <class A, typename T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
544
+ XSIMD_INLINE batch<as_integer_t<T>, A> nearbyint_as_int(batch<T, A> const& self,
545
+ requires_arch<emulated<N>>) noexcept
546
+ {
547
+ return detail::emulated_apply([](T v)
548
+ { return xsimd::nearbyint_as_int(v); },
549
+ self);
550
+ }
551
+
552
+ // neg
553
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
554
+ XSIMD_INLINE batch<T, A> neg(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
555
+ {
556
+ return detail::emulated_apply([](T v)
557
+ { return xsimd::neg(v); },
558
+ self);
559
+ }
560
+
561
+ // neq
562
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
563
+ XSIMD_INLINE batch_bool<T, A> neq(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
564
+ {
565
+ return detail::emulated_apply([](T v0, T v1)
566
+ { return xsimd::neq(v0, v1); },
567
+ self, other);
568
+ }
569
+
570
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
571
+ XSIMD_INLINE batch_bool<T, A> neq(batch_bool<T, A> const& self, batch_bool<T, A> const& other, requires_arch<emulated<N>>) noexcept
572
+ {
573
+ return detail::emulated_apply([](bool v0, bool v1)
574
+ { return xsimd::neq(v0, v1); },
575
+ self, other);
576
+ }
577
+
578
+ // reduce_add
579
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
580
+ XSIMD_INLINE T reduce_add(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
581
+ {
582
+ constexpr size_t size = batch<T, A>::size;
583
+ std::array<T, size> buffer;
584
+ self.store_unaligned(buffer.data());
585
+ return std::accumulate(buffer.begin() + 1, buffer.end(), *buffer.begin());
586
+ }
587
+
588
+ // reduce_max
589
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
590
+ XSIMD_INLINE T reduce_max(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
591
+ {
592
+ return std::accumulate(self.data.begin() + 1, self.data.end(), *self.data.begin(), [](T const& x, T const& y)
593
+ { return xsimd::max(x, y); });
594
+ }
595
+
596
+ // reduce_min
597
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
598
+ XSIMD_INLINE T reduce_min(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
599
+ {
600
+ return std::accumulate(self.data.begin() + 1, self.data.end(), *self.data.begin(), [](T const& x, T const& y)
601
+ { return xsimd::min(x, y); });
602
+ }
603
+
604
+ // reduce_mul
605
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
606
+ XSIMD_INLINE T reduce_mul(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
607
+ {
608
+ constexpr size_t size = batch<T, A>::size;
609
+ std::array<T, size> buffer;
610
+ self.store_unaligned(buffer.data());
611
+ return std::accumulate(buffer.begin() + 1, buffer.end(), *buffer.begin(), std::multiplies<T>());
612
+ }
613
+
614
+ // rsqrt
615
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
616
+ XSIMD_INLINE batch<T, A> rsqrt(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
617
+ {
618
+ return detail::emulated_apply([](T v)
619
+ { return xsimd::rsqrt(v); },
620
+ self);
621
+ }
622
+
623
+ // select
624
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
625
+ XSIMD_INLINE batch<T, A> select(batch_bool<T, A> const& cond, batch<T, A> const& true_br, batch<T, A> const& false_br, requires_arch<emulated<N>>) noexcept
626
+ {
627
+ return detail::emulated_apply([](bool c, T t, T f)
628
+ { return xsimd::select(c, t, f); },
629
+ cond, true_br, false_br);
630
+ }
631
+
632
+ template <class A, class T, bool... Values>
633
+ XSIMD_INLINE batch<T, A> select(batch_bool_constant<T, A, Values...> const& cond, batch<T, A> const& true_br, batch<T, A> const& false_br, requires_arch<emulated<8 * sizeof(T) * batch<T, A>::size>>) noexcept
634
+ {
635
+ constexpr size_t size = batch<T, A>::size;
636
+ static_assert(sizeof...(Values) == size, "consistent init");
637
+ return select((batch_bool<T, A>)cond, true_br, false_br, emulated<8 * sizeof(T) * size> {});
638
+ }
639
+
640
+ // shuffle
641
+ template <class A, typename T, class ITy, ITy... Is>
642
+ XSIMD_INLINE batch<T, A> shuffle(batch<T, A> const& x, batch<float, A> const& y, batch_constant<ITy, A, Is...> mask, requires_arch<emulated<batch<T, A>::size>>) noexcept
643
+ {
644
+ constexpr size_t size = batch<T, A>::size;
645
+ batch<ITy, A> bmask = mask;
646
+ std::array<T, size> res;
647
+ for (size_t i = 0; i < size; ++i)
648
+ res[i] = bmask.data[i] < size ? x.data[bmask.data[i]] : y.data[bmask.data[i] - size];
649
+ return res;
650
+ }
651
+
652
+ // sqrt
653
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
654
+ XSIMD_INLINE batch<T, A> sqrt(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
655
+ {
656
+ return detail::emulated_apply([](T v)
657
+ { return xsimd::sqrt(v); },
658
+ self);
659
+ }
660
+
661
+ // slide_left
662
+ template <size_t M, class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
663
+ XSIMD_INLINE batch<T, A> slide_left(batch<T, A> const& x, requires_arch<emulated<N>>) noexcept
664
+ {
665
+ constexpr size_t size = batch<T, A>::size;
666
+ std::array<T, size> result;
667
+ char* raw_data = reinterpret_cast<char*>(result.data());
668
+ memset(raw_data, 0, M);
669
+ memcpy(raw_data + M, reinterpret_cast<const char*>(x.data.data()), sizeof(T) * result.size() - M);
670
+ return result;
671
+ }
672
+
673
+ // slide_right
674
+ template <size_t M, class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
675
+ XSIMD_INLINE batch<T, A> slide_right(batch<T, A> const& x, requires_arch<emulated<N>>) noexcept
676
+ {
677
+ constexpr size_t size = batch<T, A>::size;
678
+ std::array<T, size> result;
679
+ char* raw_data = reinterpret_cast<char*>(result.data());
680
+ memcpy(raw_data, reinterpret_cast<const char*>(x.data.data()) + M, sizeof(T) * result.size() - M);
681
+ memset(raw_data + sizeof(T) * result.size() - M, 0, M);
682
+ return result;
683
+ }
684
+
685
+ // sadd
686
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
687
+ XSIMD_INLINE batch<T, A> sadd(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
688
+ {
689
+ return detail::emulated_apply([](T v0, T v1)
690
+ { return xsimd::sadd(v0, v1); },
691
+ self, other);
692
+ }
693
+
694
+ // set
695
+ template <class A, class T, size_t N, class... Values>
696
+ XSIMD_INLINE batch<T, emulated<N>> set(batch<T, emulated<N>> const&, requires_arch<emulated<N>>, Values... values) noexcept
697
+ {
698
+ static_assert(sizeof...(Values) == batch<T, emulated<N>>::size, "consistent init");
699
+ return { typename batch<T, emulated<N>>::register_type { static_cast<T>(values)... } };
700
+ }
701
+
702
+ template <class A, class T, size_t N, class... Values>
703
+ XSIMD_INLINE batch_bool<T, emulated<N>> set(batch_bool<T, emulated<N>> const&, requires_arch<emulated<N>>, Values... values) noexcept
704
+ {
705
+ static_assert(sizeof...(Values) == batch<T, emulated<N>>::size, "consistent init");
706
+ return { std::array<bool, sizeof...(Values)> { static_cast<bool>(values)... } };
707
+ }
708
+
709
+ // ssub
710
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
711
+ XSIMD_INLINE batch<T, A> ssub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
712
+ {
713
+ return detail::emulated_apply([](T v0, T v1)
714
+ { return xsimd::ssub(v0, v1); },
715
+ self, other);
716
+ }
717
+
718
+ // store_aligned
719
+ template <class A, class T, size_t N>
720
+ XSIMD_INLINE void store_aligned(T* mem, batch<T, emulated<N>> const& self, requires_arch<emulated<N>>) noexcept
721
+ {
722
+ std::copy(self.data.begin(), self.data.end(), mem);
723
+ }
724
+
725
+ // store_unaligned
726
+ template <class A, class T, size_t N>
727
+ XSIMD_INLINE void store_unaligned(T* mem, batch<T, emulated<N>> const& self, requires_arch<emulated<N>>) noexcept
728
+ {
729
+ std::copy(self.data.begin(), self.data.end(), mem);
730
+ }
731
+
732
+ // sub
733
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
734
+ XSIMD_INLINE batch<T, A> sub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
735
+ {
736
+ return detail::emulated_apply([](T v0, T v1)
737
+ { return xsimd::sub(v0, v1); },
738
+ self, other);
739
+ }
740
+
741
+ // swizzle
742
+
743
+ template <class A, typename T, class ITy, ITy... Is>
744
+ XSIMD_INLINE batch<T, A> swizzle(batch<T, A> const& self, batch_constant<ITy, A, Is...> mask, requires_arch<emulated<8 * sizeof(T) * batch<T, A>::size>>) noexcept
745
+ {
746
+ constexpr size_t size = batch<T, A>::size;
747
+ batch<ITy, A> bmask = mask;
748
+ std::array<T, size> res;
749
+ for (size_t i = 0; i < size; ++i)
750
+ res[i] = self.data[bmask.data[i]];
751
+ return res;
752
+ }
753
+
754
+ // zip_hi
755
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
756
+ XSIMD_INLINE batch<T, A> zip_hi(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
757
+ {
758
+ constexpr size_t size = batch<T, A>::size;
759
+ // Note: irregular behavior for odd numbers.
760
+ std::array<T, size> res;
761
+ if (size % 2)
762
+ {
763
+ for (size_t i = 0; i < size; ++i)
764
+ res[i] = (i % 2 ? self : other).data[size / 2 + i / 2];
765
+ }
766
+ else
767
+ {
768
+ for (size_t i = 0; i < size; ++i)
769
+ res[i] = (i % 2 ? other : self).data[size / 2 + i / 2];
770
+ }
771
+ return res;
772
+ }
773
+
774
+ // zip_lo
775
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
776
+ XSIMD_INLINE batch<T, A> zip_lo(batch<T, A> const& self, batch<T, A> const& other, requires_arch<emulated<N>>) noexcept
777
+ {
778
+ constexpr size_t size = batch<T, A>::size;
779
+ // Note: irregular behavior for odd numbers.
780
+ std::array<T, size> res;
781
+ for (size_t i = 0; i < size; ++i)
782
+ res[i] = (i % 2 ? other : self).data[i / 2];
783
+ return res;
784
+ }
785
+ }
786
+ }
787
+
788
+ #endif