sequenzo 0.1.31__cp310-cp310-macosx_10_9_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _sequenzo_fastcluster.cpython-310-darwin.so +0 -0
- sequenzo/__init__.py +349 -0
- sequenzo/big_data/__init__.py +12 -0
- sequenzo/big_data/clara/__init__.py +26 -0
- sequenzo/big_data/clara/clara.py +476 -0
- sequenzo/big_data/clara/utils/__init__.py +27 -0
- sequenzo/big_data/clara/utils/aggregatecases.py +92 -0
- sequenzo/big_data/clara/utils/davies_bouldin.py +91 -0
- sequenzo/big_data/clara/utils/get_weighted_diss.cpython-310-darwin.so +0 -0
- sequenzo/big_data/clara/utils/wfcmdd.py +205 -0
- sequenzo/big_data/clara/visualization.py +88 -0
- sequenzo/clustering/KMedoids.py +178 -0
- sequenzo/clustering/__init__.py +30 -0
- sequenzo/clustering/clustering_c_code.cpython-310-darwin.so +0 -0
- sequenzo/clustering/hierarchical_clustering.py +1256 -0
- sequenzo/clustering/sequenzo_fastcluster/fastcluster.py +495 -0
- sequenzo/clustering/sequenzo_fastcluster/src/fastcluster.cpp +1877 -0
- sequenzo/clustering/sequenzo_fastcluster/src/fastcluster_python.cpp +1264 -0
- sequenzo/clustering/src/KMedoid.cpp +263 -0
- sequenzo/clustering/src/PAM.cpp +237 -0
- sequenzo/clustering/src/PAMonce.cpp +265 -0
- sequenzo/clustering/src/cluster_quality.cpp +496 -0
- sequenzo/clustering/src/cluster_quality.h +128 -0
- sequenzo/clustering/src/cluster_quality_backup.cpp +570 -0
- sequenzo/clustering/src/module.cpp +228 -0
- sequenzo/clustering/src/weightedinertia.cpp +111 -0
- sequenzo/clustering/utils/__init__.py +27 -0
- sequenzo/clustering/utils/disscenter.py +122 -0
- sequenzo/data_preprocessing/__init__.py +22 -0
- sequenzo/data_preprocessing/helpers.py +303 -0
- sequenzo/datasets/__init__.py +41 -0
- sequenzo/datasets/biofam.csv +2001 -0
- sequenzo/datasets/biofam_child_domain.csv +2001 -0
- sequenzo/datasets/biofam_left_domain.csv +2001 -0
- sequenzo/datasets/biofam_married_domain.csv +2001 -0
- sequenzo/datasets/chinese_colonial_territories.csv +12 -0
- sequenzo/datasets/country_co2_emissions.csv +194 -0
- sequenzo/datasets/country_co2_emissions_global_deciles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_global_quintiles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_local_deciles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_local_quintiles.csv +195 -0
- sequenzo/datasets/country_gdp_per_capita.csv +194 -0
- sequenzo/datasets/dyadic_children.csv +61 -0
- sequenzo/datasets/dyadic_parents.csv +61 -0
- sequenzo/datasets/mvad.csv +713 -0
- sequenzo/datasets/pairfam_activity_by_month.csv +1028 -0
- sequenzo/datasets/pairfam_activity_by_year.csv +1028 -0
- sequenzo/datasets/pairfam_family_by_month.csv +1028 -0
- sequenzo/datasets/pairfam_family_by_year.csv +1028 -0
- sequenzo/datasets/political_science_aid_shock.csv +166 -0
- sequenzo/datasets/political_science_donor_fragmentation.csv +157 -0
- sequenzo/define_sequence_data.py +1400 -0
- sequenzo/dissimilarity_measures/__init__.py +31 -0
- sequenzo/dissimilarity_measures/c_code.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/get_distance_matrix.py +762 -0
- sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +246 -0
- sequenzo/dissimilarity_measures/src/DHDdistance.cpp +148 -0
- sequenzo/dissimilarity_measures/src/LCPdistance.cpp +114 -0
- sequenzo/dissimilarity_measures/src/LCPspellDistance.cpp +215 -0
- sequenzo/dissimilarity_measures/src/OMdistance.cpp +247 -0
- sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +281 -0
- sequenzo/dissimilarity_measures/src/__init__.py +0 -0
- sequenzo/dissimilarity_measures/src/dist2matrix.cpp +63 -0
- sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
- sequenzo/dissimilarity_measures/src/module.cpp +40 -0
- sequenzo/dissimilarity_measures/src/setup.py +30 -0
- sequenzo/dissimilarity_measures/src/utils.h +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/.github/cmake-test/main.cpp +6 -0
- sequenzo/dissimilarity_measures/src/xsimd/benchmark/main.cpp +159 -0
- sequenzo/dissimilarity_measures/src/xsimd/benchmark/xsimd_benchmark.hpp +565 -0
- sequenzo/dissimilarity_measures/src/xsimd/docs/source/conf.py +37 -0
- sequenzo/dissimilarity_measures/src/xsimd/examples/mandelbrot.cpp +330 -0
- sequenzo/dissimilarity_measures/src/xsimd/examples/pico_bench.hpp +246 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +266 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +112 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +323 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +218 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +2583 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +880 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_rounding.hpp +72 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +978 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +1924 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +1144 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +656 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512cd.hpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +244 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512er.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +2650 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512ifma.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512pf.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +131 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512bw.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512vbmi2.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avxvnni.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +24 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +393 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +788 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +93 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx2.hpp +46 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +97 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +92 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_i8mm_neon64.hpp +17 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +142 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +3142 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +1543 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +1513 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +1260 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +2024 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +67 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_1.hpp +339 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_2.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +186 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +1155 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +1780 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +240 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +484 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +269 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +27 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/math/xsimd_rem_pio2.hpp +719 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_aligned_allocator.hpp +349 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_alignment.hpp +91 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +2765 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx2_register.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512bw_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512cd_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512dq_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512er_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512f_register.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512ifma_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512pf_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi2_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512bw_register.hpp +54 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512vbmi2_register.hpp +53 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx_register.hpp +64 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avxvnni_register.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +1524 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch_constant.hpp +300 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_common_arch.hpp +47 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_emulated_register.hpp +80 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx2_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_sse_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma4_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_i8mm_neon64_register.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon64_register.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon_register.hpp +154 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_register.hpp +94 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +506 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse2_register.hpp +59 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse3_register.hpp +49 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_1_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_2_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_ssse3_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sve_register.hpp +156 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +337 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_utils.hpp +536 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_wasm_register.hpp +59 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +75 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/architectures/dummy.cpp +7 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set.cpp +13 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean.cpp +24 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_aligned.cpp +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_arch_independent.cpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_tag_dispatch.cpp +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_abstract_batches.cpp +7 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_parametric_batches.cpp +8 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum.hpp +31 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_avx2.cpp +3 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_sse2.cpp +3 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/writing_vectorized_code.cpp +11 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/main.cpp +31 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_api.cpp +230 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_arch.cpp +217 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_basic_math.cpp +183 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch.cpp +1049 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_bool.cpp +508 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_cast.cpp +409 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_complex.cpp +712 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_constant.cpp +286 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_float.cpp +141 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_int.cpp +365 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_manip.cpp +308 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_bitwise_cast.cpp +222 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_exponential.cpp +226 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_hyperbolic.cpp +183 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_power.cpp +265 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_trigonometric.cpp +236 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_conversion.cpp +248 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_custom_default_arch.cpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_error_gamma.cpp +170 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_explicit_batch_instantiation.cpp +32 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_exponential.cpp +202 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_extract_pair.cpp +92 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_fp_manipulation.cpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_gnu_source.cpp +30 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_hyperbolic.cpp +167 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_load_store.cpp +304 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_memory.cpp +61 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_poly_evaluation.cpp +64 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_power.cpp +184 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_rounding.cpp +199 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_select.cpp +101 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_shuffle.cpp +760 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.cpp +4 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.hpp +34 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_traits.cpp +172 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_trigonometric.cpp +208 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_utils.hpp +611 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_wasm/test_wasm_playwright.py +123 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_xsimd_api.cpp +1460 -0
- sequenzo/dissimilarity_measures/utils/__init__.py +16 -0
- sequenzo/dissimilarity_measures/utils/get_LCP_length_for_2_seq.py +44 -0
- sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqconc.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqdss.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqdur.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqlength.cpython-310-darwin.so +0 -0
- sequenzo/multidomain/__init__.py +23 -0
- sequenzo/multidomain/association_between_domains.py +311 -0
- sequenzo/multidomain/cat.py +597 -0
- sequenzo/multidomain/combt.py +519 -0
- sequenzo/multidomain/dat.py +81 -0
- sequenzo/multidomain/idcd.py +139 -0
- sequenzo/multidomain/linked_polyad.py +292 -0
- sequenzo/openmp_setup.py +233 -0
- sequenzo/prefix_tree/__init__.py +62 -0
- sequenzo/prefix_tree/hub.py +114 -0
- sequenzo/prefix_tree/individual_level_indicators.py +1321 -0
- sequenzo/prefix_tree/spell_individual_level_indicators.py +580 -0
- sequenzo/prefix_tree/spell_level_indicators.py +297 -0
- sequenzo/prefix_tree/system_level_indicators.py +544 -0
- sequenzo/prefix_tree/utils.py +54 -0
- sequenzo/seqhmm/__init__.py +95 -0
- sequenzo/seqhmm/advanced_optimization.py +305 -0
- sequenzo/seqhmm/bootstrap.py +411 -0
- sequenzo/seqhmm/build_hmm.py +142 -0
- sequenzo/seqhmm/build_mhmm.py +136 -0
- sequenzo/seqhmm/build_nhmm.py +121 -0
- sequenzo/seqhmm/fit_mhmm.py +62 -0
- sequenzo/seqhmm/fit_model.py +61 -0
- sequenzo/seqhmm/fit_nhmm.py +76 -0
- sequenzo/seqhmm/formulas.py +289 -0
- sequenzo/seqhmm/forward_backward_nhmm.py +276 -0
- sequenzo/seqhmm/gradients_nhmm.py +306 -0
- sequenzo/seqhmm/hmm.py +291 -0
- sequenzo/seqhmm/mhmm.py +314 -0
- sequenzo/seqhmm/model_comparison.py +238 -0
- sequenzo/seqhmm/multichannel_em.py +282 -0
- sequenzo/seqhmm/multichannel_utils.py +138 -0
- sequenzo/seqhmm/nhmm.py +270 -0
- sequenzo/seqhmm/nhmm_utils.py +191 -0
- sequenzo/seqhmm/predict.py +137 -0
- sequenzo/seqhmm/predict_mhmm.py +142 -0
- sequenzo/seqhmm/simulate.py +878 -0
- sequenzo/seqhmm/utils.py +218 -0
- sequenzo/seqhmm/visualization.py +910 -0
- sequenzo/sequence_characteristics/__init__.py +40 -0
- sequenzo/sequence_characteristics/complexity_index.py +49 -0
- sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +220 -0
- sequenzo/sequence_characteristics/plot_characteristics.py +593 -0
- sequenzo/sequence_characteristics/simple_characteristics.py +311 -0
- sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +39 -0
- sequenzo/sequence_characteristics/turbulence.py +155 -0
- sequenzo/sequence_characteristics/variance_of_spell_durations.py +86 -0
- sequenzo/sequence_characteristics/within_sequence_entropy.py +43 -0
- sequenzo/suffix_tree/__init__.py +66 -0
- sequenzo/suffix_tree/hub.py +114 -0
- sequenzo/suffix_tree/individual_level_indicators.py +1679 -0
- sequenzo/suffix_tree/spell_individual_level_indicators.py +493 -0
- sequenzo/suffix_tree/spell_level_indicators.py +248 -0
- sequenzo/suffix_tree/system_level_indicators.py +535 -0
- sequenzo/suffix_tree/utils.py +56 -0
- sequenzo/version_check.py +283 -0
- sequenzo/visualization/__init__.py +29 -0
- sequenzo/visualization/plot_mean_time.py +222 -0
- sequenzo/visualization/plot_modal_state.py +276 -0
- sequenzo/visualization/plot_most_frequent_sequences.py +147 -0
- sequenzo/visualization/plot_relative_frequency.py +405 -0
- sequenzo/visualization/plot_sequence_index.py +1175 -0
- sequenzo/visualization/plot_single_medoid.py +153 -0
- sequenzo/visualization/plot_state_distribution.py +651 -0
- sequenzo/visualization/plot_transition_matrix.py +190 -0
- sequenzo/visualization/utils/__init__.py +23 -0
- sequenzo/visualization/utils/utils.py +310 -0
- sequenzo/with_event_history_analysis/__init__.py +35 -0
- sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
- sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
- sequenzo-0.1.31.dist-info/METADATA +286 -0
- sequenzo-0.1.31.dist-info/RECORD +299 -0
- sequenzo-0.1.31.dist-info/WHEEL +5 -0
- sequenzo-0.1.31.dist-info/licenses/LICENSE +28 -0
- sequenzo-0.1.31.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
/***************************************************************************
|
|
2
|
+
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
|
|
3
|
+
* Martin Renou *
|
|
4
|
+
* Copyright (c) QuantStack *
|
|
5
|
+
* Copyright (c) Serge Guelton *
|
|
6
|
+
* *
|
|
7
|
+
* Distributed under the terms of the BSD 3-Clause License. *
|
|
8
|
+
* *
|
|
9
|
+
* The full license is in the file LICENSE, distributed with this software. *
|
|
10
|
+
****************************************************************************/
|
|
11
|
+
|
|
12
|
+
#ifndef XSIMD_SSE3_HPP
|
|
13
|
+
#define XSIMD_SSE3_HPP
|
|
14
|
+
|
|
15
|
+
#include "../types/xsimd_sse3_register.hpp"
|
|
16
|
+
#include <type_traits>
|
|
17
|
+
|
|
18
|
+
namespace xsimd
|
|
19
|
+
{
|
|
20
|
+
|
|
21
|
+
namespace kernel
|
|
22
|
+
{
|
|
23
|
+
using namespace types;
|
|
24
|
+
|
|
25
|
+
// haddp
|
|
26
|
+
template <class A>
|
|
27
|
+
XSIMD_INLINE batch<float, A> haddp(batch<float, A> const* row, requires_arch<sse3>) noexcept
|
|
28
|
+
{
|
|
29
|
+
return _mm_hadd_ps(_mm_hadd_ps(row[0], row[1]),
|
|
30
|
+
_mm_hadd_ps(row[2], row[3]));
|
|
31
|
+
}
|
|
32
|
+
template <class A>
|
|
33
|
+
XSIMD_INLINE batch<double, A> haddp(batch<double, A> const* row, requires_arch<sse3>) noexcept
|
|
34
|
+
{
|
|
35
|
+
return _mm_hadd_pd(row[0], row[1]);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// load_unaligned
|
|
39
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
40
|
+
XSIMD_INLINE batch<T, A> load_unaligned(T const* mem, convert<T>, requires_arch<sse3>) noexcept
|
|
41
|
+
{
|
|
42
|
+
return _mm_lddqu_si128((__m128i const*)mem);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// reduce_add
|
|
46
|
+
template <class A>
|
|
47
|
+
XSIMD_INLINE float reduce_add(batch<float, A> const& self, requires_arch<sse3>) noexcept
|
|
48
|
+
{
|
|
49
|
+
__m128 tmp0 = _mm_hadd_ps(self, self);
|
|
50
|
+
__m128 tmp1 = _mm_hadd_ps(tmp0, tmp0);
|
|
51
|
+
return _mm_cvtss_f32(tmp1);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// reduce_mul
|
|
55
|
+
template <class A>
|
|
56
|
+
XSIMD_INLINE float reduce_mul(batch<float, A> const& self, requires_arch<sse3>) noexcept
|
|
57
|
+
{
|
|
58
|
+
__m128 tmp1 = _mm_mul_ps(self, _mm_movehl_ps(self, self));
|
|
59
|
+
__m128 tmp2 = _mm_mul_ps(tmp1, _mm_movehdup_ps(tmp1));
|
|
60
|
+
return _mm_cvtss_f32(tmp2);
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
#endif
|
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
/***************************************************************************
|
|
2
|
+
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
|
|
3
|
+
* Martin Renou *
|
|
4
|
+
* Copyright (c) QuantStack *
|
|
5
|
+
* Copyright (c) Serge Guelton *
|
|
6
|
+
* *
|
|
7
|
+
* Distributed under the terms of the BSD 3-Clause License. *
|
|
8
|
+
* *
|
|
9
|
+
* The full license is in the file LICENSE, distributed with this software. *
|
|
10
|
+
****************************************************************************/
|
|
11
|
+
|
|
12
|
+
#ifndef XSIMD_SSE4_1_HPP
|
|
13
|
+
#define XSIMD_SSE4_1_HPP
|
|
14
|
+
|
|
15
|
+
#include <type_traits>
|
|
16
|
+
|
|
17
|
+
#include "../types/xsimd_sse4_1_register.hpp"
|
|
18
|
+
|
|
19
|
+
namespace xsimd
|
|
20
|
+
{
|
|
21
|
+
|
|
22
|
+
namespace kernel
|
|
23
|
+
{
|
|
24
|
+
using namespace types;
|
|
25
|
+
// any
|
|
26
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
27
|
+
XSIMD_INLINE bool any(batch<T, A> const& self, requires_arch<sse4_1>) noexcept
|
|
28
|
+
{
|
|
29
|
+
return !_mm_testz_si128(self, self);
|
|
30
|
+
}
|
|
31
|
+
// ceil
|
|
32
|
+
template <class A>
|
|
33
|
+
XSIMD_INLINE batch<float, A> ceil(batch<float, A> const& self, requires_arch<sse4_1>) noexcept
|
|
34
|
+
{
|
|
35
|
+
return _mm_ceil_ps(self);
|
|
36
|
+
}
|
|
37
|
+
template <class A>
|
|
38
|
+
XSIMD_INLINE batch<double, A> ceil(batch<double, A> const& self, requires_arch<sse4_1>) noexcept
|
|
39
|
+
{
|
|
40
|
+
return _mm_ceil_pd(self);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// fast_cast
|
|
44
|
+
namespace detail
|
|
45
|
+
{
|
|
46
|
+
template <class A>
|
|
47
|
+
XSIMD_INLINE batch<double, A> fast_cast(batch<int64_t, A> const& x, batch<double, A> const&, requires_arch<sse4_1>) noexcept
|
|
48
|
+
{
|
|
49
|
+
// from https://stackoverflow.com/questions/41144668/how-to-efficiently-perform-double-int64-conversions-with-sse-avx
|
|
50
|
+
__m128i xH = _mm_srai_epi32(x, 16);
|
|
51
|
+
xH = _mm_blend_epi16(xH, _mm_setzero_si128(), 0x33);
|
|
52
|
+
xH = _mm_add_epi64(xH, _mm_castpd_si128(_mm_set1_pd(442721857769029238784.))); // 3*2^67
|
|
53
|
+
__m128i xL = _mm_blend_epi16(x, _mm_castpd_si128(_mm_set1_pd(0x0010000000000000)), 0x88); // 2^52
|
|
54
|
+
__m128d f = _mm_sub_pd(_mm_castsi128_pd(xH), _mm_set1_pd(442726361368656609280.)); // 3*2^67 + 2^52
|
|
55
|
+
return _mm_add_pd(f, _mm_castsi128_pd(xL));
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
template <class A>
|
|
59
|
+
XSIMD_INLINE batch<double, A> fast_cast(batch<uint64_t, A> const& x, batch<double, A> const&, requires_arch<sse4_1>) noexcept
|
|
60
|
+
{
|
|
61
|
+
// from https://stackoverflow.com/questions/41144668/how-to-efficiently-perform-double-int64-conversions-with-sse-avx
|
|
62
|
+
__m128i xH = _mm_srli_epi64(x, 32);
|
|
63
|
+
xH = _mm_or_si128(xH, _mm_castpd_si128(_mm_set1_pd(19342813113834066795298816.))); // 2^84
|
|
64
|
+
__m128i xL = _mm_blend_epi16(x, _mm_castpd_si128(_mm_set1_pd(0x0010000000000000)), 0xcc); // 2^52
|
|
65
|
+
__m128d f = _mm_sub_pd(_mm_castsi128_pd(xH), _mm_set1_pd(19342813118337666422669312.)); // 2^84 + 2^52
|
|
66
|
+
return _mm_add_pd(f, _mm_castsi128_pd(xL));
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// eq
|
|
71
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
72
|
+
XSIMD_INLINE batch_bool<T, A> eq(batch<T, A> const& self, batch<T, A> const& other, requires_arch<sse4_1>) noexcept
|
|
73
|
+
{
|
|
74
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
|
|
75
|
+
{
|
|
76
|
+
return _mm_cmpeq_epi64(self, other);
|
|
77
|
+
}
|
|
78
|
+
else
|
|
79
|
+
{
|
|
80
|
+
return eq(self, other, ssse3 {});
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// floor
|
|
85
|
+
template <class A>
|
|
86
|
+
XSIMD_INLINE batch<float, A> floor(batch<float, A> const& self, requires_arch<sse4_1>) noexcept
|
|
87
|
+
{
|
|
88
|
+
return _mm_floor_ps(self);
|
|
89
|
+
}
|
|
90
|
+
template <class A>
|
|
91
|
+
XSIMD_INLINE batch<double, A> floor(batch<double, A> const& self, requires_arch<sse4_1>) noexcept
|
|
92
|
+
{
|
|
93
|
+
return _mm_floor_pd(self);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// insert
|
|
97
|
+
template <class A, class T, size_t I, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
98
|
+
XSIMD_INLINE batch<T, A> insert(batch<T, A> const& self, T val, index<I> pos, requires_arch<sse4_1>) noexcept
|
|
99
|
+
{
|
|
100
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
101
|
+
{
|
|
102
|
+
return _mm_insert_epi8(self, val, I);
|
|
103
|
+
}
|
|
104
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
105
|
+
{
|
|
106
|
+
return _mm_insert_epi32(self, val, I);
|
|
107
|
+
}
|
|
108
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
|
|
109
|
+
{
|
|
110
|
+
#if (!defined(_MSC_VER) && __x86_64__) || (_MSC_VER > 1900 && defined(_M_X64))
|
|
111
|
+
return _mm_insert_epi64(self, val, I);
|
|
112
|
+
#else
|
|
113
|
+
uint32_t lo, hi;
|
|
114
|
+
memcpy(&lo, (reinterpret_cast<uint32_t*>(&val)), sizeof(lo));
|
|
115
|
+
memcpy(&hi, (reinterpret_cast<uint32_t*>(&val)) + 1, sizeof(hi));
|
|
116
|
+
return _mm_insert_epi32(_mm_insert_epi32(self, lo, 2 * I), hi, 2 * I + 1);
|
|
117
|
+
#endif
|
|
118
|
+
}
|
|
119
|
+
else
|
|
120
|
+
{
|
|
121
|
+
return insert(self, val, pos, ssse3 {});
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// max
|
|
126
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
127
|
+
XSIMD_INLINE batch<T, A> max(batch<T, A> const& self, batch<T, A> const& other, requires_arch<sse4_1>) noexcept
|
|
128
|
+
{
|
|
129
|
+
if (std::is_signed<T>::value)
|
|
130
|
+
{
|
|
131
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
132
|
+
{
|
|
133
|
+
return _mm_max_epi8(self, other);
|
|
134
|
+
}
|
|
135
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
136
|
+
{
|
|
137
|
+
return _mm_max_epi16(self, other);
|
|
138
|
+
}
|
|
139
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
140
|
+
{
|
|
141
|
+
return _mm_max_epi32(self, other);
|
|
142
|
+
}
|
|
143
|
+
else
|
|
144
|
+
{
|
|
145
|
+
return max(self, other, ssse3 {});
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
else
|
|
149
|
+
{
|
|
150
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
151
|
+
{
|
|
152
|
+
return _mm_max_epu8(self, other);
|
|
153
|
+
}
|
|
154
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
155
|
+
{
|
|
156
|
+
return _mm_max_epu16(self, other);
|
|
157
|
+
}
|
|
158
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
159
|
+
{
|
|
160
|
+
return _mm_max_epu32(self, other);
|
|
161
|
+
}
|
|
162
|
+
else
|
|
163
|
+
{
|
|
164
|
+
return max(self, other, ssse3 {});
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
// min
|
|
170
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
171
|
+
XSIMD_INLINE batch<T, A> min(batch<T, A> const& self, batch<T, A> const& other, requires_arch<sse4_1>) noexcept
|
|
172
|
+
{
|
|
173
|
+
if (std::is_signed<T>::value)
|
|
174
|
+
{
|
|
175
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
176
|
+
{
|
|
177
|
+
return _mm_min_epi8(self, other);
|
|
178
|
+
}
|
|
179
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
180
|
+
{
|
|
181
|
+
return _mm_min_epi16(self, other);
|
|
182
|
+
}
|
|
183
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
184
|
+
{
|
|
185
|
+
return _mm_min_epi32(self, other);
|
|
186
|
+
}
|
|
187
|
+
else
|
|
188
|
+
{
|
|
189
|
+
return min(self, other, ssse3 {});
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
else
|
|
193
|
+
{
|
|
194
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
195
|
+
{
|
|
196
|
+
return _mm_min_epu8(self, other);
|
|
197
|
+
}
|
|
198
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
199
|
+
{
|
|
200
|
+
return _mm_min_epu16(self, other);
|
|
201
|
+
}
|
|
202
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
203
|
+
{
|
|
204
|
+
return _mm_min_epu32(self, other);
|
|
205
|
+
}
|
|
206
|
+
else
|
|
207
|
+
{
|
|
208
|
+
return min(self, other, ssse3 {});
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// mul
|
|
214
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
215
|
+
XSIMD_INLINE batch<T, A> mul(batch<T, A> const& self, batch<T, A> const& other, requires_arch<sse4_1>) noexcept
|
|
216
|
+
{
|
|
217
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
218
|
+
{
|
|
219
|
+
return _mm_or_si128(
|
|
220
|
+
_mm_and_si128(_mm_mullo_epi16(self, other), _mm_srli_epi16(_mm_cmpeq_epi8(self, self), 8)),
|
|
221
|
+
_mm_slli_epi16(_mm_mullo_epi16(_mm_srli_epi16(self, 8), _mm_srli_epi16(other, 8)), 8));
|
|
222
|
+
}
|
|
223
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
224
|
+
{
|
|
225
|
+
return _mm_mullo_epi16(self, other);
|
|
226
|
+
}
|
|
227
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
228
|
+
{
|
|
229
|
+
return _mm_mullo_epi32(self, other);
|
|
230
|
+
}
|
|
231
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
|
|
232
|
+
{
|
|
233
|
+
return _mm_add_epi64(
|
|
234
|
+
_mm_mul_epu32(self, other),
|
|
235
|
+
_mm_slli_epi64(
|
|
236
|
+
_mm_add_epi64(
|
|
237
|
+
_mm_mul_epu32(other, _mm_shuffle_epi32(self, _MM_SHUFFLE(2, 3, 0, 1))),
|
|
238
|
+
_mm_mul_epu32(self, _mm_shuffle_epi32(other, _MM_SHUFFLE(2, 3, 0, 1)))),
|
|
239
|
+
32));
|
|
240
|
+
}
|
|
241
|
+
else
|
|
242
|
+
{
|
|
243
|
+
assert(false && "unsupported arch/op combination");
|
|
244
|
+
return {};
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
// nearbyint
|
|
249
|
+
template <class A>
|
|
250
|
+
XSIMD_INLINE batch<float, A> nearbyint(batch<float, A> const& self, requires_arch<sse4_1>) noexcept
|
|
251
|
+
{
|
|
252
|
+
return _mm_round_ps(self, _MM_FROUND_TO_NEAREST_INT);
|
|
253
|
+
}
|
|
254
|
+
template <class A>
|
|
255
|
+
XSIMD_INLINE batch<double, A> nearbyint(batch<double, A> const& self, requires_arch<sse4_1>) noexcept
|
|
256
|
+
{
|
|
257
|
+
return _mm_round_pd(self, _MM_FROUND_TO_NEAREST_INT);
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// select
|
|
261
|
+
namespace detail
|
|
262
|
+
{
|
|
263
|
+
template <class T>
|
|
264
|
+
XSIMD_INLINE constexpr T interleave(T const& cond) noexcept
|
|
265
|
+
{
|
|
266
|
+
return (((cond * 0x0101010101010101ULL & 0x8040201008040201ULL) * 0x0102040810204081ULL >> 49) & 0x5555) | (((cond * 0x0101010101010101ULL & 0x8040201008040201ULL) * 0x0102040810204081ULL >> 48) & 0xAAAA);
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
271
|
+
XSIMD_INLINE batch<T, A> select(batch_bool<T, A> const& cond, batch<T, A> const& true_br, batch<T, A> const& false_br, requires_arch<sse4_1>) noexcept
|
|
272
|
+
{
|
|
273
|
+
return _mm_blendv_epi8(false_br, true_br, cond);
|
|
274
|
+
}
|
|
275
|
+
template <class A>
|
|
276
|
+
XSIMD_INLINE batch<float, A> select(batch_bool<float, A> const& cond, batch<float, A> const& true_br, batch<float, A> const& false_br, requires_arch<sse4_1>) noexcept
|
|
277
|
+
{
|
|
278
|
+
return _mm_blendv_ps(false_br, true_br, cond);
|
|
279
|
+
}
|
|
280
|
+
template <class A>
|
|
281
|
+
XSIMD_INLINE batch<double, A> select(batch_bool<double, A> const& cond, batch<double, A> const& true_br, batch<double, A> const& false_br, requires_arch<sse4_1>) noexcept
|
|
282
|
+
{
|
|
283
|
+
return _mm_blendv_pd(false_br, true_br, cond);
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
template <class A, class T, bool... Values, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
287
|
+
XSIMD_INLINE batch<T, A> select(batch_bool_constant<T, A, Values...> const&, batch<T, A> const& true_br, batch<T, A> const& false_br, requires_arch<sse4_1>) noexcept
|
|
288
|
+
{
|
|
289
|
+
constexpr int mask = batch_bool_constant<T, A, Values...>::mask();
|
|
290
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
291
|
+
{
|
|
292
|
+
return _mm_blend_epi16(false_br, true_br, mask);
|
|
293
|
+
}
|
|
294
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
295
|
+
{
|
|
296
|
+
constexpr int imask = detail::interleave(mask);
|
|
297
|
+
return _mm_blend_epi16(false_br, true_br, imask);
|
|
298
|
+
}
|
|
299
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
|
|
300
|
+
{
|
|
301
|
+
constexpr int imask = detail::interleave(mask);
|
|
302
|
+
constexpr int imask2 = detail::interleave(imask);
|
|
303
|
+
return _mm_blend_epi16(false_br, true_br, imask2);
|
|
304
|
+
}
|
|
305
|
+
else
|
|
306
|
+
{
|
|
307
|
+
return select(batch_bool_constant<T, A, Values...>(), true_br, false_br, ssse3 {});
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
template <class A, bool... Values>
|
|
311
|
+
XSIMD_INLINE batch<float, A> select(batch_bool_constant<float, A, Values...> const&, batch<float, A> const& true_br, batch<float, A> const& false_br, requires_arch<sse4_1>) noexcept
|
|
312
|
+
{
|
|
313
|
+
constexpr int mask = batch_bool_constant<float, A, Values...>::mask();
|
|
314
|
+
return _mm_blend_ps(false_br, true_br, mask);
|
|
315
|
+
}
|
|
316
|
+
template <class A, bool... Values>
|
|
317
|
+
XSIMD_INLINE batch<double, A> select(batch_bool_constant<double, A, Values...> const&, batch<double, A> const& true_br, batch<double, A> const& false_br, requires_arch<sse4_1>) noexcept
|
|
318
|
+
{
|
|
319
|
+
constexpr int mask = batch_bool_constant<double, A, Values...>::mask();
|
|
320
|
+
return _mm_blend_pd(false_br, true_br, mask);
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
// trunc
|
|
324
|
+
template <class A>
|
|
325
|
+
XSIMD_INLINE batch<float, A> trunc(batch<float, A> const& self, requires_arch<sse4_1>) noexcept
|
|
326
|
+
{
|
|
327
|
+
return _mm_round_ps(self, _MM_FROUND_TO_ZERO);
|
|
328
|
+
}
|
|
329
|
+
template <class A>
|
|
330
|
+
XSIMD_INLINE batch<double, A> trunc(batch<double, A> const& self, requires_arch<sse4_1>) noexcept
|
|
331
|
+
{
|
|
332
|
+
return _mm_round_pd(self, _MM_FROUND_TO_ZERO);
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
#endif
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/***************************************************************************
|
|
2
|
+
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
|
|
3
|
+
* Martin Renou *
|
|
4
|
+
* Copyright (c) QuantStack *
|
|
5
|
+
* Copyright (c) Serge Guelton *
|
|
6
|
+
* *
|
|
7
|
+
* Distributed under the terms of the BSD 3-Clause License. *
|
|
8
|
+
* *
|
|
9
|
+
* The full license is in the file LICENSE, distributed with this software. *
|
|
10
|
+
****************************************************************************/
|
|
11
|
+
|
|
12
|
+
#ifndef XSIMD_SSE4_2_HPP
|
|
13
|
+
#define XSIMD_SSE4_2_HPP
|
|
14
|
+
|
|
15
|
+
#include <limits>
|
|
16
|
+
|
|
17
|
+
#include "../types/xsimd_sse4_2_register.hpp"
|
|
18
|
+
|
|
19
|
+
namespace xsimd
|
|
20
|
+
{
|
|
21
|
+
|
|
22
|
+
namespace kernel
|
|
23
|
+
{
|
|
24
|
+
using namespace types;
|
|
25
|
+
|
|
26
|
+
// lt
|
|
27
|
+
template <class A>
|
|
28
|
+
XSIMD_INLINE batch_bool<int64_t, A> lt(batch<int64_t, A> const& self, batch<int64_t, A> const& other, requires_arch<sse4_2>) noexcept
|
|
29
|
+
{
|
|
30
|
+
return _mm_cmpgt_epi64(other, self);
|
|
31
|
+
}
|
|
32
|
+
template <class A>
|
|
33
|
+
XSIMD_INLINE batch_bool<uint64_t, A> lt(batch<uint64_t, A> const& self, batch<uint64_t, A> const& other, requires_arch<sse4_2>) noexcept
|
|
34
|
+
{
|
|
35
|
+
auto xself = _mm_xor_si128(self, _mm_set1_epi64x(std::numeric_limits<int64_t>::lowest()));
|
|
36
|
+
auto xother = _mm_xor_si128(other, _mm_set1_epi64x(std::numeric_limits<int64_t>::lowest()));
|
|
37
|
+
return _mm_cmpgt_epi64(xother, xself);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
#endif
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
/***************************************************************************
|
|
2
|
+
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
|
|
3
|
+
* Martin Renou *
|
|
4
|
+
* Copyright (c) QuantStack *
|
|
5
|
+
* Copyright (c) Serge Guelton *
|
|
6
|
+
* *
|
|
7
|
+
* Distributed under the terms of the BSD 3-Clause License. *
|
|
8
|
+
* *
|
|
9
|
+
* The full license is in the file LICENSE, distributed with this software. *
|
|
10
|
+
****************************************************************************/
|
|
11
|
+
|
|
12
|
+
#ifndef XSIMD_SSSE3_HPP
|
|
13
|
+
#define XSIMD_SSSE3_HPP
|
|
14
|
+
|
|
15
|
+
#include <cstddef>
|
|
16
|
+
#include <type_traits>
|
|
17
|
+
|
|
18
|
+
#include "../types/xsimd_ssse3_register.hpp"
|
|
19
|
+
#include "../types/xsimd_utils.hpp"
|
|
20
|
+
|
|
21
|
+
namespace xsimd
|
|
22
|
+
{
|
|
23
|
+
|
|
24
|
+
namespace kernel
|
|
25
|
+
{
|
|
26
|
+
using namespace types;
|
|
27
|
+
|
|
28
|
+
// abs
|
|
29
|
+
template <class A, class T, typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, void>::type>
|
|
30
|
+
XSIMD_INLINE batch<T, A> abs(batch<T, A> const& self, requires_arch<ssse3>) noexcept
|
|
31
|
+
{
|
|
32
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
|
|
33
|
+
{
|
|
34
|
+
return _mm_abs_epi8(self);
|
|
35
|
+
}
|
|
36
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
37
|
+
{
|
|
38
|
+
return _mm_abs_epi16(self);
|
|
39
|
+
}
|
|
40
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
41
|
+
{
|
|
42
|
+
return _mm_abs_epi32(self);
|
|
43
|
+
}
|
|
44
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
|
|
45
|
+
{
|
|
46
|
+
return _mm_abs_epi64(self);
|
|
47
|
+
}
|
|
48
|
+
else
|
|
49
|
+
{
|
|
50
|
+
assert(false && "unsupported arch/op combination");
|
|
51
|
+
return {};
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// extract_pair
|
|
56
|
+
namespace detail
|
|
57
|
+
{
|
|
58
|
+
|
|
59
|
+
template <class T, class A>
|
|
60
|
+
XSIMD_INLINE batch<T, A> extract_pair(batch<T, A> const&, batch<T, A> const& other, std::size_t, ::xsimd::detail::index_sequence<>) noexcept
|
|
61
|
+
{
|
|
62
|
+
return other;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
template <class T, class A, std::size_t I, std::size_t... Is>
|
|
66
|
+
XSIMD_INLINE batch<T, A> extract_pair(batch<T, A> const& self, batch<T, A> const& other, std::size_t i, ::xsimd::detail::index_sequence<I, Is...>) noexcept
|
|
67
|
+
{
|
|
68
|
+
if (i == I)
|
|
69
|
+
{
|
|
70
|
+
return _mm_alignr_epi8(self, other, sizeof(T) * I);
|
|
71
|
+
}
|
|
72
|
+
else
|
|
73
|
+
return extract_pair(self, other, i, ::xsimd::detail::index_sequence<Is...>());
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
template <class A, class T, class _ = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
78
|
+
XSIMD_INLINE batch<T, A> extract_pair(batch<T, A> const& self, batch<T, A> const& other, std::size_t i, requires_arch<ssse3>) noexcept
|
|
79
|
+
{
|
|
80
|
+
constexpr std::size_t size = batch<T, A>::size;
|
|
81
|
+
assert(i < size && "index in bounds");
|
|
82
|
+
return detail::extract_pair(self, other, i, ::xsimd::detail::make_index_sequence<size>());
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// reduce_add
|
|
86
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
87
|
+
XSIMD_INLINE T reduce_add(batch<T, A> const& self, requires_arch<ssse3>) noexcept
|
|
88
|
+
{
|
|
89
|
+
XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
|
|
90
|
+
{
|
|
91
|
+
__m128i tmp1 = _mm_hadd_epi16(self, self);
|
|
92
|
+
__m128i tmp2 = _mm_hadd_epi16(tmp1, tmp1);
|
|
93
|
+
__m128i tmp3 = _mm_hadd_epi16(tmp2, tmp2);
|
|
94
|
+
return _mm_cvtsi128_si32(tmp3) & 0xFFFF;
|
|
95
|
+
}
|
|
96
|
+
else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
|
|
97
|
+
{
|
|
98
|
+
__m128i tmp1 = _mm_hadd_epi32(self, self);
|
|
99
|
+
__m128i tmp2 = _mm_hadd_epi32(tmp1, tmp1);
|
|
100
|
+
return _mm_cvtsi128_si32(tmp2);
|
|
101
|
+
}
|
|
102
|
+
else
|
|
103
|
+
{
|
|
104
|
+
return reduce_add(self, sse3 {});
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// rotate_left
|
|
109
|
+
template <size_t N, class A>
|
|
110
|
+
XSIMD_INLINE batch<uint8_t, A> rotate_left(batch<uint8_t, A> const& self, requires_arch<ssse3>) noexcept
|
|
111
|
+
{
|
|
112
|
+
return _mm_alignr_epi8(self, self, N);
|
|
113
|
+
}
|
|
114
|
+
template <size_t N, class A>
|
|
115
|
+
XSIMD_INLINE batch<int8_t, A> rotate_left(batch<int8_t, A> const& self, requires_arch<ssse3>) noexcept
|
|
116
|
+
{
|
|
117
|
+
return bitwise_cast<int8_t>(rotate_left<N, A>(bitwise_cast<uint8_t>(self), ssse3 {}));
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
template <size_t N, class A>
|
|
121
|
+
XSIMD_INLINE batch<uint16_t, A> rotate_left(batch<uint16_t, A> const& self, requires_arch<ssse3>) noexcept
|
|
122
|
+
{
|
|
123
|
+
return _mm_alignr_epi8(self, self, 2 * N);
|
|
124
|
+
}
|
|
125
|
+
template <size_t N, class A>
|
|
126
|
+
XSIMD_INLINE batch<int16_t, A> rotate_left(batch<int16_t, A> const& self, requires_arch<ssse3>) noexcept
|
|
127
|
+
{
|
|
128
|
+
return bitwise_cast<int16_t>(rotate_left<N, A>(bitwise_cast<uint16_t>(self), ssse3 {}));
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// swizzle (dynamic mask)
|
|
132
|
+
template <class A>
|
|
133
|
+
XSIMD_INLINE batch<uint8_t, A> swizzle(batch<uint8_t, A> const& self, batch<uint8_t, A> mask, requires_arch<ssse3>) noexcept
|
|
134
|
+
{
|
|
135
|
+
return _mm_shuffle_epi8(self, mask);
|
|
136
|
+
}
|
|
137
|
+
template <class A>
|
|
138
|
+
XSIMD_INLINE batch<int8_t, A> swizzle(batch<int8_t, A> const& self, batch<uint8_t, A> mask, requires_arch<ssse3>) noexcept
|
|
139
|
+
{
|
|
140
|
+
return _mm_shuffle_epi8(self, mask);
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
template <class A, class T, class IT>
|
|
144
|
+
XSIMD_INLINE typename std::enable_if<std::is_arithmetic<T>::value, batch<T, A>>::type
|
|
145
|
+
swizzle(batch<T, A> const& self, batch<IT, A> mask, requires_arch<ssse3>) noexcept
|
|
146
|
+
{
|
|
147
|
+
constexpr auto pikes = static_cast<as_unsigned_integer_t<T>>(0x0706050403020100ul);
|
|
148
|
+
constexpr auto comb = static_cast<as_unsigned_integer_t<T>>(0x0101010101010101ul * sizeof(T));
|
|
149
|
+
return bitwise_cast<T>(swizzle(bitwise_cast<uint8_t>(self), bitwise_cast<uint8_t>(mask * comb + pikes), ssse3 {}));
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// swizzle (constant mask)
|
|
153
|
+
template <class A, uint16_t V0, uint16_t V1, uint16_t V2, uint16_t V3, uint16_t V4, uint16_t V5, uint16_t V6, uint16_t V7>
|
|
154
|
+
XSIMD_INLINE batch<uint16_t, A> swizzle(batch<uint16_t, A> const& self, batch_constant<uint16_t, A, V0, V1, V2, V3, V4, V5, V6, V7>, requires_arch<ssse3>) noexcept
|
|
155
|
+
{
|
|
156
|
+
constexpr batch_constant<uint8_t, A, 2 * V0, 2 * V0 + 1, 2 * V1, 2 * V1 + 1, 2 * V2, 2 * V2 + 1, 2 * V3, 2 * V3 + 1,
|
|
157
|
+
2 * V4, 2 * V4 + 1, 2 * V5, 2 * V5 + 1, 2 * V6, 2 * V6 + 1, 2 * V7, 2 * V7 + 1>
|
|
158
|
+
mask8;
|
|
159
|
+
return _mm_shuffle_epi8(self, mask8.as_batch());
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
template <class A, uint16_t V0, uint16_t V1, uint16_t V2, uint16_t V3, uint16_t V4, uint16_t V5, uint16_t V6, uint16_t V7>
|
|
163
|
+
XSIMD_INLINE batch<int16_t, A> swizzle(batch<int16_t, A> const& self, batch_constant<uint16_t, A, V0, V1, V2, V3, V4, V5, V6, V7> mask, requires_arch<ssse3>) noexcept
|
|
164
|
+
{
|
|
165
|
+
return bitwise_cast<int16_t>(swizzle(bitwise_cast<uint16_t>(self), mask, ssse3 {}));
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
template <class A, uint8_t V0, uint8_t V1, uint8_t V2, uint8_t V3, uint8_t V4, uint8_t V5, uint8_t V6, uint8_t V7,
|
|
169
|
+
uint8_t V8, uint8_t V9, uint8_t V10, uint8_t V11, uint8_t V12, uint8_t V13, uint8_t V14, uint8_t V15>
|
|
170
|
+
XSIMD_INLINE batch<uint8_t, A> swizzle(batch<uint8_t, A> const& self, batch_constant<uint8_t, A, V0, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13, V14, V15> mask, requires_arch<ssse3>) noexcept
|
|
171
|
+
{
|
|
172
|
+
return swizzle(self, mask.as_batch(), ssse3 {});
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
template <class A, uint8_t V0, uint8_t V1, uint8_t V2, uint8_t V3, uint8_t V4, uint8_t V5, uint8_t V6, uint8_t V7,
|
|
176
|
+
uint8_t V8, uint8_t V9, uint8_t V10, uint8_t V11, uint8_t V12, uint8_t V13, uint8_t V14, uint8_t V15>
|
|
177
|
+
XSIMD_INLINE batch<int8_t, A> swizzle(batch<int8_t, A> const& self, batch_constant<uint8_t, A, V0, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13, V14, V15> mask, requires_arch<ssse3>) noexcept
|
|
178
|
+
{
|
|
179
|
+
return swizzle(self, mask.as_batch(), ssse3 {});
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
#endif
|