sequenzo 0.1.31__cp310-cp310-macosx_10_9_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _sequenzo_fastcluster.cpython-310-darwin.so +0 -0
- sequenzo/__init__.py +349 -0
- sequenzo/big_data/__init__.py +12 -0
- sequenzo/big_data/clara/__init__.py +26 -0
- sequenzo/big_data/clara/clara.py +476 -0
- sequenzo/big_data/clara/utils/__init__.py +27 -0
- sequenzo/big_data/clara/utils/aggregatecases.py +92 -0
- sequenzo/big_data/clara/utils/davies_bouldin.py +91 -0
- sequenzo/big_data/clara/utils/get_weighted_diss.cpython-310-darwin.so +0 -0
- sequenzo/big_data/clara/utils/wfcmdd.py +205 -0
- sequenzo/big_data/clara/visualization.py +88 -0
- sequenzo/clustering/KMedoids.py +178 -0
- sequenzo/clustering/__init__.py +30 -0
- sequenzo/clustering/clustering_c_code.cpython-310-darwin.so +0 -0
- sequenzo/clustering/hierarchical_clustering.py +1256 -0
- sequenzo/clustering/sequenzo_fastcluster/fastcluster.py +495 -0
- sequenzo/clustering/sequenzo_fastcluster/src/fastcluster.cpp +1877 -0
- sequenzo/clustering/sequenzo_fastcluster/src/fastcluster_python.cpp +1264 -0
- sequenzo/clustering/src/KMedoid.cpp +263 -0
- sequenzo/clustering/src/PAM.cpp +237 -0
- sequenzo/clustering/src/PAMonce.cpp +265 -0
- sequenzo/clustering/src/cluster_quality.cpp +496 -0
- sequenzo/clustering/src/cluster_quality.h +128 -0
- sequenzo/clustering/src/cluster_quality_backup.cpp +570 -0
- sequenzo/clustering/src/module.cpp +228 -0
- sequenzo/clustering/src/weightedinertia.cpp +111 -0
- sequenzo/clustering/utils/__init__.py +27 -0
- sequenzo/clustering/utils/disscenter.py +122 -0
- sequenzo/data_preprocessing/__init__.py +22 -0
- sequenzo/data_preprocessing/helpers.py +303 -0
- sequenzo/datasets/__init__.py +41 -0
- sequenzo/datasets/biofam.csv +2001 -0
- sequenzo/datasets/biofam_child_domain.csv +2001 -0
- sequenzo/datasets/biofam_left_domain.csv +2001 -0
- sequenzo/datasets/biofam_married_domain.csv +2001 -0
- sequenzo/datasets/chinese_colonial_territories.csv +12 -0
- sequenzo/datasets/country_co2_emissions.csv +194 -0
- sequenzo/datasets/country_co2_emissions_global_deciles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_global_quintiles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_local_deciles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_local_quintiles.csv +195 -0
- sequenzo/datasets/country_gdp_per_capita.csv +194 -0
- sequenzo/datasets/dyadic_children.csv +61 -0
- sequenzo/datasets/dyadic_parents.csv +61 -0
- sequenzo/datasets/mvad.csv +713 -0
- sequenzo/datasets/pairfam_activity_by_month.csv +1028 -0
- sequenzo/datasets/pairfam_activity_by_year.csv +1028 -0
- sequenzo/datasets/pairfam_family_by_month.csv +1028 -0
- sequenzo/datasets/pairfam_family_by_year.csv +1028 -0
- sequenzo/datasets/political_science_aid_shock.csv +166 -0
- sequenzo/datasets/political_science_donor_fragmentation.csv +157 -0
- sequenzo/define_sequence_data.py +1400 -0
- sequenzo/dissimilarity_measures/__init__.py +31 -0
- sequenzo/dissimilarity_measures/c_code.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/get_distance_matrix.py +762 -0
- sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +246 -0
- sequenzo/dissimilarity_measures/src/DHDdistance.cpp +148 -0
- sequenzo/dissimilarity_measures/src/LCPdistance.cpp +114 -0
- sequenzo/dissimilarity_measures/src/LCPspellDistance.cpp +215 -0
- sequenzo/dissimilarity_measures/src/OMdistance.cpp +247 -0
- sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +281 -0
- sequenzo/dissimilarity_measures/src/__init__.py +0 -0
- sequenzo/dissimilarity_measures/src/dist2matrix.cpp +63 -0
- sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
- sequenzo/dissimilarity_measures/src/module.cpp +40 -0
- sequenzo/dissimilarity_measures/src/setup.py +30 -0
- sequenzo/dissimilarity_measures/src/utils.h +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/.github/cmake-test/main.cpp +6 -0
- sequenzo/dissimilarity_measures/src/xsimd/benchmark/main.cpp +159 -0
- sequenzo/dissimilarity_measures/src/xsimd/benchmark/xsimd_benchmark.hpp +565 -0
- sequenzo/dissimilarity_measures/src/xsimd/docs/source/conf.py +37 -0
- sequenzo/dissimilarity_measures/src/xsimd/examples/mandelbrot.cpp +330 -0
- sequenzo/dissimilarity_measures/src/xsimd/examples/pico_bench.hpp +246 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +266 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +112 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +323 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +218 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +2583 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +880 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_rounding.hpp +72 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +978 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +1924 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +1144 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +656 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512cd.hpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +244 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512er.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +2650 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512ifma.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512pf.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +131 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512bw.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512vbmi2.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avxvnni.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +24 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +393 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +788 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +93 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx2.hpp +46 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +97 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +92 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_i8mm_neon64.hpp +17 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +142 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +3142 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +1543 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +1513 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +1260 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +2024 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +67 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_1.hpp +339 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_2.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +186 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +1155 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +1780 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +240 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +484 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +269 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +27 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/math/xsimd_rem_pio2.hpp +719 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_aligned_allocator.hpp +349 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_alignment.hpp +91 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +2765 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx2_register.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512bw_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512cd_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512dq_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512er_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512f_register.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512ifma_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512pf_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi2_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512bw_register.hpp +54 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512vbmi2_register.hpp +53 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx_register.hpp +64 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avxvnni_register.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +1524 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch_constant.hpp +300 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_common_arch.hpp +47 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_emulated_register.hpp +80 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx2_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_sse_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma4_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_i8mm_neon64_register.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon64_register.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon_register.hpp +154 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_register.hpp +94 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +506 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse2_register.hpp +59 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse3_register.hpp +49 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_1_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_2_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_ssse3_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sve_register.hpp +156 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +337 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_utils.hpp +536 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_wasm_register.hpp +59 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +75 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/architectures/dummy.cpp +7 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set.cpp +13 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean.cpp +24 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_aligned.cpp +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_arch_independent.cpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_tag_dispatch.cpp +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_abstract_batches.cpp +7 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_parametric_batches.cpp +8 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum.hpp +31 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_avx2.cpp +3 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_sse2.cpp +3 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/writing_vectorized_code.cpp +11 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/main.cpp +31 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_api.cpp +230 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_arch.cpp +217 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_basic_math.cpp +183 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch.cpp +1049 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_bool.cpp +508 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_cast.cpp +409 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_complex.cpp +712 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_constant.cpp +286 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_float.cpp +141 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_int.cpp +365 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_manip.cpp +308 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_bitwise_cast.cpp +222 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_exponential.cpp +226 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_hyperbolic.cpp +183 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_power.cpp +265 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_trigonometric.cpp +236 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_conversion.cpp +248 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_custom_default_arch.cpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_error_gamma.cpp +170 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_explicit_batch_instantiation.cpp +32 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_exponential.cpp +202 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_extract_pair.cpp +92 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_fp_manipulation.cpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_gnu_source.cpp +30 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_hyperbolic.cpp +167 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_load_store.cpp +304 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_memory.cpp +61 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_poly_evaluation.cpp +64 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_power.cpp +184 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_rounding.cpp +199 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_select.cpp +101 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_shuffle.cpp +760 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.cpp +4 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.hpp +34 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_traits.cpp +172 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_trigonometric.cpp +208 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_utils.hpp +611 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_wasm/test_wasm_playwright.py +123 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_xsimd_api.cpp +1460 -0
- sequenzo/dissimilarity_measures/utils/__init__.py +16 -0
- sequenzo/dissimilarity_measures/utils/get_LCP_length_for_2_seq.py +44 -0
- sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqconc.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqdss.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqdur.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqlength.cpython-310-darwin.so +0 -0
- sequenzo/multidomain/__init__.py +23 -0
- sequenzo/multidomain/association_between_domains.py +311 -0
- sequenzo/multidomain/cat.py +597 -0
- sequenzo/multidomain/combt.py +519 -0
- sequenzo/multidomain/dat.py +81 -0
- sequenzo/multidomain/idcd.py +139 -0
- sequenzo/multidomain/linked_polyad.py +292 -0
- sequenzo/openmp_setup.py +233 -0
- sequenzo/prefix_tree/__init__.py +62 -0
- sequenzo/prefix_tree/hub.py +114 -0
- sequenzo/prefix_tree/individual_level_indicators.py +1321 -0
- sequenzo/prefix_tree/spell_individual_level_indicators.py +580 -0
- sequenzo/prefix_tree/spell_level_indicators.py +297 -0
- sequenzo/prefix_tree/system_level_indicators.py +544 -0
- sequenzo/prefix_tree/utils.py +54 -0
- sequenzo/seqhmm/__init__.py +95 -0
- sequenzo/seqhmm/advanced_optimization.py +305 -0
- sequenzo/seqhmm/bootstrap.py +411 -0
- sequenzo/seqhmm/build_hmm.py +142 -0
- sequenzo/seqhmm/build_mhmm.py +136 -0
- sequenzo/seqhmm/build_nhmm.py +121 -0
- sequenzo/seqhmm/fit_mhmm.py +62 -0
- sequenzo/seqhmm/fit_model.py +61 -0
- sequenzo/seqhmm/fit_nhmm.py +76 -0
- sequenzo/seqhmm/formulas.py +289 -0
- sequenzo/seqhmm/forward_backward_nhmm.py +276 -0
- sequenzo/seqhmm/gradients_nhmm.py +306 -0
- sequenzo/seqhmm/hmm.py +291 -0
- sequenzo/seqhmm/mhmm.py +314 -0
- sequenzo/seqhmm/model_comparison.py +238 -0
- sequenzo/seqhmm/multichannel_em.py +282 -0
- sequenzo/seqhmm/multichannel_utils.py +138 -0
- sequenzo/seqhmm/nhmm.py +270 -0
- sequenzo/seqhmm/nhmm_utils.py +191 -0
- sequenzo/seqhmm/predict.py +137 -0
- sequenzo/seqhmm/predict_mhmm.py +142 -0
- sequenzo/seqhmm/simulate.py +878 -0
- sequenzo/seqhmm/utils.py +218 -0
- sequenzo/seqhmm/visualization.py +910 -0
- sequenzo/sequence_characteristics/__init__.py +40 -0
- sequenzo/sequence_characteristics/complexity_index.py +49 -0
- sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +220 -0
- sequenzo/sequence_characteristics/plot_characteristics.py +593 -0
- sequenzo/sequence_characteristics/simple_characteristics.py +311 -0
- sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +39 -0
- sequenzo/sequence_characteristics/turbulence.py +155 -0
- sequenzo/sequence_characteristics/variance_of_spell_durations.py +86 -0
- sequenzo/sequence_characteristics/within_sequence_entropy.py +43 -0
- sequenzo/suffix_tree/__init__.py +66 -0
- sequenzo/suffix_tree/hub.py +114 -0
- sequenzo/suffix_tree/individual_level_indicators.py +1679 -0
- sequenzo/suffix_tree/spell_individual_level_indicators.py +493 -0
- sequenzo/suffix_tree/spell_level_indicators.py +248 -0
- sequenzo/suffix_tree/system_level_indicators.py +535 -0
- sequenzo/suffix_tree/utils.py +56 -0
- sequenzo/version_check.py +283 -0
- sequenzo/visualization/__init__.py +29 -0
- sequenzo/visualization/plot_mean_time.py +222 -0
- sequenzo/visualization/plot_modal_state.py +276 -0
- sequenzo/visualization/plot_most_frequent_sequences.py +147 -0
- sequenzo/visualization/plot_relative_frequency.py +405 -0
- sequenzo/visualization/plot_sequence_index.py +1175 -0
- sequenzo/visualization/plot_single_medoid.py +153 -0
- sequenzo/visualization/plot_state_distribution.py +651 -0
- sequenzo/visualization/plot_transition_matrix.py +190 -0
- sequenzo/visualization/utils/__init__.py +23 -0
- sequenzo/visualization/utils/utils.py +310 -0
- sequenzo/with_event_history_analysis/__init__.py +35 -0
- sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
- sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
- sequenzo-0.1.31.dist-info/METADATA +286 -0
- sequenzo-0.1.31.dist-info/RECORD +299 -0
- sequenzo-0.1.31.dist-info/WHEEL +5 -0
- sequenzo-0.1.31.dist-info/licenses/LICENSE +28 -0
- sequenzo-0.1.31.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,656 @@
|
|
|
1
|
+
/***************************************************************************
|
|
2
|
+
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
|
|
3
|
+
* Martin Renou *
|
|
4
|
+
* Copyright (c) QuantStack *
|
|
5
|
+
* Copyright (c) Serge Guelton *
|
|
6
|
+
* *
|
|
7
|
+
* Distributed under the terms of the BSD 3-Clause License. *
|
|
8
|
+
* *
|
|
9
|
+
* The full license is in the file LICENSE, distributed with this software. *
|
|
10
|
+
****************************************************************************/
|
|
11
|
+
|
|
12
|
+
#ifndef XSIMD_AVX512BW_HPP
|
|
13
|
+
#define XSIMD_AVX512BW_HPP
|
|
14
|
+
|
|
15
|
+
#include <array>
|
|
16
|
+
#include <type_traits>
|
|
17
|
+
|
|
18
|
+
#include "../types/xsimd_avx512bw_register.hpp"
|
|
19
|
+
|
|
20
|
+
namespace xsimd
|
|
21
|
+
{
|
|
22
|
+
|
|
23
|
+
namespace kernel
|
|
24
|
+
{
|
|
25
|
+
using namespace types;
|
|
26
|
+
|
|
27
|
+
namespace detail
|
|
28
|
+
{
|
|
29
|
+
template <class A, class T, int Cmp>
|
|
30
|
+
XSIMD_INLINE batch_bool<T, A> compare_int_avx512bw(batch<T, A> const& self, batch<T, A> const& other) noexcept
|
|
31
|
+
{
|
|
32
|
+
using register_type = typename batch_bool<T, A>::register_type;
|
|
33
|
+
if (std::is_signed<T>::value)
|
|
34
|
+
{
|
|
35
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
36
|
+
{
|
|
37
|
+
return (register_type)_mm512_cmp_epi8_mask(self, other, Cmp);
|
|
38
|
+
}
|
|
39
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
40
|
+
{
|
|
41
|
+
return (register_type)_mm512_cmp_epi16_mask(self, other, Cmp);
|
|
42
|
+
}
|
|
43
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
44
|
+
{
|
|
45
|
+
return (register_type)_mm512_cmp_epi32_mask(self, other, Cmp);
|
|
46
|
+
}
|
|
47
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
|
|
48
|
+
{
|
|
49
|
+
return (register_type)_mm512_cmp_epi64_mask(self, other, Cmp);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
else
|
|
53
|
+
{
|
|
54
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
55
|
+
{
|
|
56
|
+
return (register_type)_mm512_cmp_epu8_mask(self, other, Cmp);
|
|
57
|
+
}
|
|
58
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
59
|
+
{
|
|
60
|
+
return (register_type)_mm512_cmp_epu16_mask(self, other, Cmp);
|
|
61
|
+
}
|
|
62
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
63
|
+
{
|
|
64
|
+
return (register_type)_mm512_cmp_epu32_mask(self, other, Cmp);
|
|
65
|
+
}
|
|
66
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
|
|
67
|
+
{
|
|
68
|
+
return (register_type)_mm512_cmp_epu64_mask(self, other, Cmp);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// abs
|
|
75
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
76
|
+
XSIMD_INLINE batch<T, A> abs(batch<T, A> const& self, requires_arch<avx512bw>) noexcept
|
|
77
|
+
{
|
|
78
|
+
if (std::is_unsigned<T>::value)
|
|
79
|
+
{
|
|
80
|
+
return self;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
84
|
+
{
|
|
85
|
+
return _mm512_abs_epi8(self);
|
|
86
|
+
}
|
|
87
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
88
|
+
{
|
|
89
|
+
return _mm512_abs_epi16(self);
|
|
90
|
+
}
|
|
91
|
+
else
|
|
92
|
+
{
|
|
93
|
+
return abs(self, avx512dq {});
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// add
|
|
98
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
99
|
+
XSIMD_INLINE batch<T, A> add(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
100
|
+
{
|
|
101
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
102
|
+
{
|
|
103
|
+
return _mm512_add_epi8(self, other);
|
|
104
|
+
}
|
|
105
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
106
|
+
{
|
|
107
|
+
return _mm512_add_epi16(self, other);
|
|
108
|
+
}
|
|
109
|
+
else
|
|
110
|
+
{
|
|
111
|
+
return add(self, other, avx512dq {});
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// avgr
|
|
116
|
+
template <class A, class T, class = typename std::enable_if<std::is_unsigned<T>::value, void>::type>
|
|
117
|
+
XSIMD_INLINE batch<T, A> avgr(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
118
|
+
{
|
|
119
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
120
|
+
{
|
|
121
|
+
return _mm512_avg_epu8(self, other);
|
|
122
|
+
}
|
|
123
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
124
|
+
{
|
|
125
|
+
return _mm512_avg_epu16(self, other);
|
|
126
|
+
}
|
|
127
|
+
else
|
|
128
|
+
{
|
|
129
|
+
return avgr(self, other, common {});
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// avg
|
|
134
|
+
template <class A, class T, class = typename std::enable_if<std::is_unsigned<T>::value, void>::type>
|
|
135
|
+
XSIMD_INLINE batch<T, A> avg(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
136
|
+
{
|
|
137
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
138
|
+
{
|
|
139
|
+
auto adj = ((self ^ other) << 7) >> 7;
|
|
140
|
+
return avgr(self, other, A {}) - adj;
|
|
141
|
+
}
|
|
142
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
143
|
+
{
|
|
144
|
+
auto adj = ((self ^ other) << 15) >> 15;
|
|
145
|
+
return avgr(self, other, A {}) - adj;
|
|
146
|
+
}
|
|
147
|
+
else
|
|
148
|
+
{
|
|
149
|
+
return avg(self, other, common {});
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// bitwise_lshift
|
|
154
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
155
|
+
XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& self, int32_t other, requires_arch<avx512bw>) noexcept
|
|
156
|
+
{
|
|
157
|
+
#if defined(XSIMD_AVX512_SHIFT_INTRINSICS_IMM_ONLY)
|
|
158
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
159
|
+
{
|
|
160
|
+
return _mm512_sllv_epi16(self, _mm512_set1_epi16(other));
|
|
161
|
+
#else
|
|
162
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
163
|
+
{
|
|
164
|
+
return _mm512_slli_epi16(self, other);
|
|
165
|
+
#endif
|
|
166
|
+
}
|
|
167
|
+
else
|
|
168
|
+
{
|
|
169
|
+
return bitwise_lshift(self, other, avx512dq {});
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
// bitwise_rshift
|
|
174
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
175
|
+
XSIMD_INLINE batch<T, A> bitwise_rshift(batch<T, A> const& self, int32_t other, requires_arch<avx512bw>) noexcept
|
|
176
|
+
{
|
|
177
|
+
if (std::is_signed<T>::value)
|
|
178
|
+
{
|
|
179
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
180
|
+
{
|
|
181
|
+
__m512i sign_mask = _mm512_set1_epi16((0xFF00 >> other) & 0x00FF);
|
|
182
|
+
__m512i zeros = _mm512_setzero_si512();
|
|
183
|
+
__mmask64 cmp_is_negative_mask = _mm512_cmpgt_epi8_mask(zeros, self);
|
|
184
|
+
__m512i cmp_sign_mask = _mm512_mask_blend_epi8(cmp_is_negative_mask, zeros, sign_mask);
|
|
185
|
+
#if defined(XSIMD_AVX512_SHIFT_INTRINSICS_IMM_ONLY)
|
|
186
|
+
__m512i res = _mm512_srav_epi16(self, _mm512_set1_epi16(other));
|
|
187
|
+
#else
|
|
188
|
+
__m512i res = _mm512_srai_epi16(self, other);
|
|
189
|
+
#endif
|
|
190
|
+
return _mm512_or_si512(cmp_sign_mask, _mm512_andnot_si512(sign_mask, res));
|
|
191
|
+
#if defined(XSIMD_AVX512_SHIFT_INTRINSICS_IMM_ONLY)
|
|
192
|
+
}
|
|
193
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
194
|
+
{
|
|
195
|
+
return _mm512_srav_epi16(self, _mm512_set1_epi16(other));
|
|
196
|
+
#else
|
|
197
|
+
}
|
|
198
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
199
|
+
{
|
|
200
|
+
return _mm512_srai_epi16(self, other);
|
|
201
|
+
#endif
|
|
202
|
+
}
|
|
203
|
+
else
|
|
204
|
+
{
|
|
205
|
+
return bitwise_rshift(self, other, avx512dq {});
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
else
|
|
209
|
+
{
|
|
210
|
+
#if defined(XSIMD_AVX512_SHIFT_INTRINSICS_IMM_ONLY)
|
|
211
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
212
|
+
{
|
|
213
|
+
return _mm512_srlv_epi16(self, _mm512_set1_epi16(other));
|
|
214
|
+
#else
|
|
215
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
216
|
+
{
|
|
217
|
+
return _mm512_srli_epi16(self, other);
|
|
218
|
+
#endif
|
|
219
|
+
}
|
|
220
|
+
else
|
|
221
|
+
{
|
|
222
|
+
return bitwise_rshift(self, other, avx512dq {});
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
// decr_if
|
|
228
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
229
|
+
XSIMD_INLINE batch<T, A> decr_if(batch<T, A> const& self, batch_bool<T, A> const& mask, requires_arch<avx512bw>) noexcept
|
|
230
|
+
{
|
|
231
|
+
|
|
232
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
233
|
+
{
|
|
234
|
+
return _mm512_mask_sub_epi8(self, mask.data, self, _mm512_set1_epi8(1));
|
|
235
|
+
}
|
|
236
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
237
|
+
{
|
|
238
|
+
return _mm512_mask_sub_epi16(self, mask.data, self, _mm512_set1_epi16(1));
|
|
239
|
+
}
|
|
240
|
+
else
|
|
241
|
+
{
|
|
242
|
+
return decr_if(self, mask, avx512dq {});
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// eq
|
|
247
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
248
|
+
XSIMD_INLINE batch_bool<T, A> eq(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
249
|
+
{
|
|
250
|
+
return detail::compare_int_avx512bw<A, T, _MM_CMPINT_EQ>(self, other);
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// ge
|
|
254
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
255
|
+
XSIMD_INLINE batch_bool<T, A> ge(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
256
|
+
{
|
|
257
|
+
return detail::compare_int_avx512bw<A, T, _MM_CMPINT_GE>(self, other);
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// gt
|
|
261
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
262
|
+
XSIMD_INLINE batch_bool<T, A> gt(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
263
|
+
{
|
|
264
|
+
return detail::compare_int_avx512bw<A, T, _MM_CMPINT_GT>(self, other);
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
// incr_if
|
|
268
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
269
|
+
XSIMD_INLINE batch<T, A> incr_if(batch<T, A> const& self, batch_bool<T, A> const& mask, requires_arch<avx512bw>) noexcept
|
|
270
|
+
{
|
|
271
|
+
|
|
272
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
273
|
+
{
|
|
274
|
+
return _mm512_mask_add_epi8(self, mask.data, self, _mm512_set1_epi8(1));
|
|
275
|
+
}
|
|
276
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
277
|
+
{
|
|
278
|
+
return _mm512_mask_add_epi16(self, mask.data, self, _mm512_set1_epi16(1));
|
|
279
|
+
}
|
|
280
|
+
else
|
|
281
|
+
{
|
|
282
|
+
return incr_if(self, mask, avx512dq {});
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// insert
|
|
287
|
+
template <class A, class T, size_t I, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
288
|
+
XSIMD_INLINE batch<T, A> insert(batch<T, A> const& self, T val, index<I> pos, requires_arch<avx512bw>) noexcept
|
|
289
|
+
{
|
|
290
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
291
|
+
{
|
|
292
|
+
return _mm512_mask_set1_epi8(self, __mmask64(1ULL << (I & 63)), val);
|
|
293
|
+
}
|
|
294
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
295
|
+
{
|
|
296
|
+
return _mm512_mask_set1_epi16(self, __mmask32(1 << (I & 31)), val);
|
|
297
|
+
}
|
|
298
|
+
else
|
|
299
|
+
{
|
|
300
|
+
return insert(self, val, pos, avx512dq {});
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
// le
|
|
305
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
306
|
+
XSIMD_INLINE batch_bool<T, A> le(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
307
|
+
{
|
|
308
|
+
return detail::compare_int_avx512bw<A, T, _MM_CMPINT_LE>(self, other);
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
// lt
|
|
312
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
313
|
+
XSIMD_INLINE batch_bool<T, A> lt(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
314
|
+
{
|
|
315
|
+
return detail::compare_int_avx512bw<A, T, _MM_CMPINT_LT>(self, other);
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
// load
|
|
319
|
+
template <class A, class T, class = typename std::enable_if<batch_bool<T, A>::size == 64, void>::type>
|
|
320
|
+
XSIMD_INLINE batch_bool<T, A> load_unaligned(bool const* mem, batch_bool<T, A>, requires_arch<avx512bw>) noexcept
|
|
321
|
+
{
|
|
322
|
+
__m512i bool_val = _mm512_loadu_si512((__m512i const*)mem);
|
|
323
|
+
return _mm512_cmpgt_epu8_mask(bool_val, _mm512_setzero_si512());
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
template <class A, class T, class = typename std::enable_if<batch_bool<T, A>::size == 64, void>::type>
|
|
327
|
+
XSIMD_INLINE batch_bool<T, A> load_aligned(bool const* mem, batch_bool<T, A>, requires_arch<avx512bw>) noexcept
|
|
328
|
+
{
|
|
329
|
+
__m512i bool_val = _mm512_load_si512((__m512i const*)mem);
|
|
330
|
+
return _mm512_cmpgt_epu8_mask(bool_val, _mm512_setzero_si512());
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// max
|
|
334
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
335
|
+
XSIMD_INLINE batch<T, A> max(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
336
|
+
{
|
|
337
|
+
if (std::is_signed<T>::value)
|
|
338
|
+
{
|
|
339
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
340
|
+
{
|
|
341
|
+
return _mm512_max_epi8(self, other);
|
|
342
|
+
}
|
|
343
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
344
|
+
{
|
|
345
|
+
return _mm512_max_epi16(self, other);
|
|
346
|
+
}
|
|
347
|
+
else
|
|
348
|
+
{
|
|
349
|
+
return max(self, other, avx512dq {});
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
else
|
|
353
|
+
{
|
|
354
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
355
|
+
{
|
|
356
|
+
return _mm512_max_epu8(self, other);
|
|
357
|
+
}
|
|
358
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
359
|
+
{
|
|
360
|
+
return _mm512_max_epu16(self, other);
|
|
361
|
+
}
|
|
362
|
+
else
|
|
363
|
+
{
|
|
364
|
+
return max(self, other, avx512dq {});
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
// min
|
|
370
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
371
|
+
XSIMD_INLINE batch<T, A> min(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
372
|
+
{
|
|
373
|
+
if (std::is_signed<T>::value)
|
|
374
|
+
{
|
|
375
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
376
|
+
{
|
|
377
|
+
return _mm512_min_epi8(self, other);
|
|
378
|
+
}
|
|
379
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
380
|
+
{
|
|
381
|
+
return _mm512_min_epi16(self, other);
|
|
382
|
+
}
|
|
383
|
+
else
|
|
384
|
+
{
|
|
385
|
+
return min(self, other, avx512dq {});
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
else
|
|
389
|
+
{
|
|
390
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
391
|
+
{
|
|
392
|
+
return _mm512_min_epu8(self, other);
|
|
393
|
+
}
|
|
394
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
395
|
+
{
|
|
396
|
+
return _mm512_min_epu16(self, other);
|
|
397
|
+
}
|
|
398
|
+
else
|
|
399
|
+
{
|
|
400
|
+
return min(self, other, avx512dq {});
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
// mul
|
|
406
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
407
|
+
XSIMD_INLINE batch<T, A> mul(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
408
|
+
{
|
|
409
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
410
|
+
{
|
|
411
|
+
__m512i upper = _mm512_and_si512(_mm512_mullo_epi16(self, other), _mm512_srli_epi16(_mm512_set1_epi16(-1), 8));
|
|
412
|
+
__m512i lower = _mm512_slli_epi16(_mm512_mullo_epi16(_mm512_srli_epi16(self, 8), _mm512_srli_epi16(other, 8)), 8);
|
|
413
|
+
return _mm512_or_si512(upper, lower);
|
|
414
|
+
}
|
|
415
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
416
|
+
{
|
|
417
|
+
return _mm512_mullo_epi16(self, other);
|
|
418
|
+
}
|
|
419
|
+
else
|
|
420
|
+
{
|
|
421
|
+
return mul(self, other, avx512dq {});
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
// neq
|
|
426
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
427
|
+
XSIMD_INLINE batch_bool<T, A> neq(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
428
|
+
{
|
|
429
|
+
return detail::compare_int_avx512bw<A, T, _MM_CMPINT_NE>(self, other);
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
// sadd
|
|
433
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
434
|
+
XSIMD_INLINE batch<T, A> sadd(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
435
|
+
{
|
|
436
|
+
if (std::is_signed<T>::value)
|
|
437
|
+
{
|
|
438
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
439
|
+
{
|
|
440
|
+
return _mm512_adds_epi8(self, other);
|
|
441
|
+
}
|
|
442
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
443
|
+
{
|
|
444
|
+
return _mm512_adds_epi16(self, other);
|
|
445
|
+
}
|
|
446
|
+
else
|
|
447
|
+
{
|
|
448
|
+
return sadd(self, other, avx512dq {});
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
else
|
|
452
|
+
{
|
|
453
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
454
|
+
{
|
|
455
|
+
return _mm512_adds_epu8(self, other);
|
|
456
|
+
}
|
|
457
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
458
|
+
{
|
|
459
|
+
return _mm512_adds_epu16(self, other);
|
|
460
|
+
}
|
|
461
|
+
else
|
|
462
|
+
{
|
|
463
|
+
return sadd(self, other, avx512dq {});
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
// select
|
|
469
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
470
|
+
XSIMD_INLINE batch<T, A> select(batch_bool<T, A> const& cond, batch<T, A> const& true_br, batch<T, A> const& false_br, requires_arch<avx512bw>) noexcept
|
|
471
|
+
{
|
|
472
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
473
|
+
{
|
|
474
|
+
return _mm512_mask_blend_epi8(cond, false_br.data, true_br.data);
|
|
475
|
+
}
|
|
476
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
477
|
+
{
|
|
478
|
+
return _mm512_mask_blend_epi16(cond, false_br.data, true_br.data);
|
|
479
|
+
}
|
|
480
|
+
else
|
|
481
|
+
{
|
|
482
|
+
return select(cond, true_br, false_br, avx512dq {});
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
// slide_left
|
|
487
|
+
template <size_t N, class A, class T, class = typename std::enable_if<(N & 3) == 2 && (N < 64)>::type>
|
|
488
|
+
XSIMD_INLINE batch<T, A> slide_left(batch<T, A> const& x, requires_arch<avx512bw>) noexcept
|
|
489
|
+
{
|
|
490
|
+
static_assert((N & 3) == 2 && N < 64, "The AVX512F implementation may have a lower latency.");
|
|
491
|
+
|
|
492
|
+
__mmask32 mask = 0xFFFFFFFFu << ((N / 2) & 31);
|
|
493
|
+
auto slide_pattern = make_batch_constant<uint16_t, detail::make_slide_left_pattern<N / 2>, A>();
|
|
494
|
+
return _mm512_maskz_permutexvar_epi16(mask, slide_pattern.as_batch(), x);
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
// slide_right
|
|
498
|
+
template <size_t N, class A, class T, class = typename std::enable_if<(N & 3) == 2 && (N < 64)>::type>
|
|
499
|
+
XSIMD_INLINE batch<T, A> slide_right(batch<T, A> const& x, requires_arch<avx512bw>) noexcept
|
|
500
|
+
{
|
|
501
|
+
static_assert((N & 3) == 2 && N < 64, "The AVX512F implementation may have a lower latency.");
|
|
502
|
+
|
|
503
|
+
__mmask32 mask = 0xFFFFFFFFu >> ((N / 2) & 31);
|
|
504
|
+
auto slide_pattern = make_batch_constant<uint16_t, detail::make_slide_right_pattern<N / 2>, A>();
|
|
505
|
+
return _mm512_maskz_permutexvar_epi16(mask, slide_pattern.as_batch(), x);
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
// ssub
|
|
509
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
510
|
+
XSIMD_INLINE batch<T, A> ssub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
511
|
+
{
|
|
512
|
+
if (std::is_signed<T>::value)
|
|
513
|
+
{
|
|
514
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
515
|
+
{
|
|
516
|
+
return _mm512_subs_epi8(self, other);
|
|
517
|
+
}
|
|
518
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
519
|
+
{
|
|
520
|
+
return _mm512_subs_epi16(self, other);
|
|
521
|
+
}
|
|
522
|
+
else
|
|
523
|
+
{
|
|
524
|
+
return ssub(self, other, avx512dq {});
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
else
|
|
528
|
+
{
|
|
529
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
530
|
+
{
|
|
531
|
+
return _mm512_subs_epu8(self, other);
|
|
532
|
+
}
|
|
533
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
534
|
+
{
|
|
535
|
+
return _mm512_subs_epu16(self, other);
|
|
536
|
+
}
|
|
537
|
+
else
|
|
538
|
+
{
|
|
539
|
+
return ssub(self, other, avx512dq {});
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
// store
|
|
545
|
+
template <class T, class A>
|
|
546
|
+
XSIMD_INLINE void store(batch_bool<T, A> const& self, bool* mem, requires_arch<avx512bw>) noexcept
|
|
547
|
+
{
|
|
548
|
+
constexpr auto size = batch_bool<T, A>::size;
|
|
549
|
+
__m512i bool_val = _mm512_maskz_set1_epi8(self.data, 0x01);
|
|
550
|
+
__mmask64 mask = size >= 64 ? ~(__mmask64)0 : (1ULL << size) - 1;
|
|
551
|
+
_mm512_mask_storeu_epi8((void*)mem, mask, bool_val);
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
// sub
|
|
555
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
556
|
+
XSIMD_INLINE batch<T, A> sub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
557
|
+
{
|
|
558
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
559
|
+
{
|
|
560
|
+
return _mm512_sub_epi8(self, other);
|
|
561
|
+
}
|
|
562
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
563
|
+
{
|
|
564
|
+
return _mm512_sub_epi16(self, other);
|
|
565
|
+
}
|
|
566
|
+
else
|
|
567
|
+
{
|
|
568
|
+
return sub(self, other, avx512dq {});
|
|
569
|
+
}
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
// swizzle (dynamic version)
|
|
573
|
+
template <class A>
|
|
574
|
+
XSIMD_INLINE batch<uint16_t, A> swizzle(batch<uint16_t, A> const& self, batch<uint16_t, A> mask, requires_arch<avx512bw>) noexcept
|
|
575
|
+
{
|
|
576
|
+
return _mm512_permutexvar_epi16(mask, self);
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
template <class A>
|
|
580
|
+
XSIMD_INLINE batch<int16_t, A> swizzle(batch<int16_t, A> const& self, batch<uint16_t, A> mask, requires_arch<avx512bw>) noexcept
|
|
581
|
+
{
|
|
582
|
+
return bitwise_cast<int16_t>(swizzle(bitwise_cast<uint16_t>(self), mask, avx512bw {}));
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
// swizzle (static version)
|
|
586
|
+
template <class A, uint16_t... Vs>
|
|
587
|
+
XSIMD_INLINE batch<uint16_t, A> swizzle(batch<uint16_t, A> const& self, batch_constant<uint16_t, A, Vs...> mask, requires_arch<avx512bw>) noexcept
|
|
588
|
+
{
|
|
589
|
+
return swizzle(self, mask.as_batch(), avx512bw {});
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
template <class A, uint16_t... Vs>
|
|
593
|
+
XSIMD_INLINE batch<int16_t, A> swizzle(batch<int16_t, A> const& self, batch_constant<uint16_t, A, Vs...> mask, requires_arch<avx512bw>) noexcept
|
|
594
|
+
{
|
|
595
|
+
return swizzle(self, mask.as_batch(), avx512bw {});
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
// zip_hi
|
|
599
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
600
|
+
XSIMD_INLINE batch<T, A> zip_hi(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
601
|
+
{
|
|
602
|
+
__m512i lo, hi;
|
|
603
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
604
|
+
{
|
|
605
|
+
lo = _mm512_unpacklo_epi8(self, other);
|
|
606
|
+
hi = _mm512_unpackhi_epi8(self, other);
|
|
607
|
+
}
|
|
608
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
609
|
+
{
|
|
610
|
+
lo = _mm512_unpacklo_epi16(self, other);
|
|
611
|
+
hi = _mm512_unpackhi_epi16(self, other);
|
|
612
|
+
}
|
|
613
|
+
else
|
|
614
|
+
{
|
|
615
|
+
return zip_hi(self, other, avx512f {});
|
|
616
|
+
}
|
|
617
|
+
return _mm512_inserti32x4(
|
|
618
|
+
_mm512_inserti32x4(
|
|
619
|
+
_mm512_inserti32x4(hi, _mm512_extracti32x4_epi32(lo, 2), 0),
|
|
620
|
+
_mm512_extracti32x4_epi32(lo, 3),
|
|
621
|
+
2),
|
|
622
|
+
_mm512_extracti32x4_epi32(hi, 2),
|
|
623
|
+
1);
|
|
624
|
+
}
|
|
625
|
+
|
|
626
|
+
// zip_lo
|
|
627
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
628
|
+
XSIMD_INLINE batch<T, A> zip_lo(batch<T, A> const& self, batch<T, A> const& other, requires_arch<avx512bw>) noexcept
|
|
629
|
+
{
|
|
630
|
+
__m512i lo, hi;
|
|
631
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
632
|
+
{
|
|
633
|
+
lo = _mm512_unpacklo_epi8(self, other);
|
|
634
|
+
hi = _mm512_unpackhi_epi8(self, other);
|
|
635
|
+
}
|
|
636
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
637
|
+
{
|
|
638
|
+
lo = _mm512_unpacklo_epi16(self, other);
|
|
639
|
+
hi = _mm512_unpackhi_epi16(self, other);
|
|
640
|
+
}
|
|
641
|
+
else
|
|
642
|
+
{
|
|
643
|
+
return zip_lo(self, other, avx512f {});
|
|
644
|
+
}
|
|
645
|
+
return _mm512_inserti32x4(
|
|
646
|
+
_mm512_inserti32x4(
|
|
647
|
+
_mm512_inserti32x4(lo, _mm512_extracti32x4_epi32(hi, 0), 1),
|
|
648
|
+
_mm512_extracti32x4_epi32(hi, 1),
|
|
649
|
+
3),
|
|
650
|
+
_mm512_extracti32x4_epi32(lo, 1),
|
|
651
|
+
2);
|
|
652
|
+
}
|
|
653
|
+
}
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
#endif
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
/***************************************************************************
|
|
2
|
+
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
|
|
3
|
+
* Martin Renou *
|
|
4
|
+
* Copyright (c) QuantStack *
|
|
5
|
+
* Copyright (c) Serge Guelton *
|
|
6
|
+
* *
|
|
7
|
+
* Distributed under the terms of the BSD 3-Clause License. *
|
|
8
|
+
* *
|
|
9
|
+
* The full license is in the file LICENSE, distributed with this software. *
|
|
10
|
+
****************************************************************************/
|
|
11
|
+
|
|
12
|
+
#ifndef XSIMD_AVX512CD_HPP
|
|
13
|
+
#define XSIMD_AVX512CD_HPP
|
|
14
|
+
|
|
15
|
+
#include "../types/xsimd_avx512cd_register.hpp"
|
|
16
|
+
|
|
17
|
+
namespace xsimd
|
|
18
|
+
{
|
|
19
|
+
|
|
20
|
+
namespace kernel
|
|
21
|
+
{
|
|
22
|
+
// Nothing there yet.
|
|
23
|
+
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
#endif
|