sequenzo 0.1.31__cp310-cp310-macosx_10_9_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _sequenzo_fastcluster.cpython-310-darwin.so +0 -0
- sequenzo/__init__.py +349 -0
- sequenzo/big_data/__init__.py +12 -0
- sequenzo/big_data/clara/__init__.py +26 -0
- sequenzo/big_data/clara/clara.py +476 -0
- sequenzo/big_data/clara/utils/__init__.py +27 -0
- sequenzo/big_data/clara/utils/aggregatecases.py +92 -0
- sequenzo/big_data/clara/utils/davies_bouldin.py +91 -0
- sequenzo/big_data/clara/utils/get_weighted_diss.cpython-310-darwin.so +0 -0
- sequenzo/big_data/clara/utils/wfcmdd.py +205 -0
- sequenzo/big_data/clara/visualization.py +88 -0
- sequenzo/clustering/KMedoids.py +178 -0
- sequenzo/clustering/__init__.py +30 -0
- sequenzo/clustering/clustering_c_code.cpython-310-darwin.so +0 -0
- sequenzo/clustering/hierarchical_clustering.py +1256 -0
- sequenzo/clustering/sequenzo_fastcluster/fastcluster.py +495 -0
- sequenzo/clustering/sequenzo_fastcluster/src/fastcluster.cpp +1877 -0
- sequenzo/clustering/sequenzo_fastcluster/src/fastcluster_python.cpp +1264 -0
- sequenzo/clustering/src/KMedoid.cpp +263 -0
- sequenzo/clustering/src/PAM.cpp +237 -0
- sequenzo/clustering/src/PAMonce.cpp +265 -0
- sequenzo/clustering/src/cluster_quality.cpp +496 -0
- sequenzo/clustering/src/cluster_quality.h +128 -0
- sequenzo/clustering/src/cluster_quality_backup.cpp +570 -0
- sequenzo/clustering/src/module.cpp +228 -0
- sequenzo/clustering/src/weightedinertia.cpp +111 -0
- sequenzo/clustering/utils/__init__.py +27 -0
- sequenzo/clustering/utils/disscenter.py +122 -0
- sequenzo/data_preprocessing/__init__.py +22 -0
- sequenzo/data_preprocessing/helpers.py +303 -0
- sequenzo/datasets/__init__.py +41 -0
- sequenzo/datasets/biofam.csv +2001 -0
- sequenzo/datasets/biofam_child_domain.csv +2001 -0
- sequenzo/datasets/biofam_left_domain.csv +2001 -0
- sequenzo/datasets/biofam_married_domain.csv +2001 -0
- sequenzo/datasets/chinese_colonial_territories.csv +12 -0
- sequenzo/datasets/country_co2_emissions.csv +194 -0
- sequenzo/datasets/country_co2_emissions_global_deciles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_global_quintiles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_local_deciles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_local_quintiles.csv +195 -0
- sequenzo/datasets/country_gdp_per_capita.csv +194 -0
- sequenzo/datasets/dyadic_children.csv +61 -0
- sequenzo/datasets/dyadic_parents.csv +61 -0
- sequenzo/datasets/mvad.csv +713 -0
- sequenzo/datasets/pairfam_activity_by_month.csv +1028 -0
- sequenzo/datasets/pairfam_activity_by_year.csv +1028 -0
- sequenzo/datasets/pairfam_family_by_month.csv +1028 -0
- sequenzo/datasets/pairfam_family_by_year.csv +1028 -0
- sequenzo/datasets/political_science_aid_shock.csv +166 -0
- sequenzo/datasets/political_science_donor_fragmentation.csv +157 -0
- sequenzo/define_sequence_data.py +1400 -0
- sequenzo/dissimilarity_measures/__init__.py +31 -0
- sequenzo/dissimilarity_measures/c_code.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/get_distance_matrix.py +762 -0
- sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +246 -0
- sequenzo/dissimilarity_measures/src/DHDdistance.cpp +148 -0
- sequenzo/dissimilarity_measures/src/LCPdistance.cpp +114 -0
- sequenzo/dissimilarity_measures/src/LCPspellDistance.cpp +215 -0
- sequenzo/dissimilarity_measures/src/OMdistance.cpp +247 -0
- sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +281 -0
- sequenzo/dissimilarity_measures/src/__init__.py +0 -0
- sequenzo/dissimilarity_measures/src/dist2matrix.cpp +63 -0
- sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
- sequenzo/dissimilarity_measures/src/module.cpp +40 -0
- sequenzo/dissimilarity_measures/src/setup.py +30 -0
- sequenzo/dissimilarity_measures/src/utils.h +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/.github/cmake-test/main.cpp +6 -0
- sequenzo/dissimilarity_measures/src/xsimd/benchmark/main.cpp +159 -0
- sequenzo/dissimilarity_measures/src/xsimd/benchmark/xsimd_benchmark.hpp +565 -0
- sequenzo/dissimilarity_measures/src/xsimd/docs/source/conf.py +37 -0
- sequenzo/dissimilarity_measures/src/xsimd/examples/mandelbrot.cpp +330 -0
- sequenzo/dissimilarity_measures/src/xsimd/examples/pico_bench.hpp +246 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +266 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +112 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +323 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +218 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +2583 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +880 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_rounding.hpp +72 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +978 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +1924 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +1144 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +656 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512cd.hpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +244 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512er.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +2650 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512ifma.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512pf.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +131 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512bw.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512vbmi2.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avxvnni.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +24 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +393 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +788 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +93 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx2.hpp +46 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +97 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +92 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_i8mm_neon64.hpp +17 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +142 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +3142 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +1543 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +1513 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +1260 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +2024 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +67 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_1.hpp +339 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_2.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +186 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +1155 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +1780 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +240 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +484 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +269 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +27 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/math/xsimd_rem_pio2.hpp +719 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_aligned_allocator.hpp +349 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_alignment.hpp +91 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +2765 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx2_register.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512bw_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512cd_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512dq_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512er_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512f_register.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512ifma_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512pf_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi2_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512bw_register.hpp +54 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512vbmi2_register.hpp +53 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx_register.hpp +64 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avxvnni_register.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +1524 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch_constant.hpp +300 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_common_arch.hpp +47 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_emulated_register.hpp +80 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx2_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_sse_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma4_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_i8mm_neon64_register.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon64_register.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon_register.hpp +154 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_register.hpp +94 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +506 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse2_register.hpp +59 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse3_register.hpp +49 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_1_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_2_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_ssse3_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sve_register.hpp +156 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +337 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_utils.hpp +536 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_wasm_register.hpp +59 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +75 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/architectures/dummy.cpp +7 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set.cpp +13 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean.cpp +24 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_aligned.cpp +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_arch_independent.cpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_tag_dispatch.cpp +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_abstract_batches.cpp +7 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_parametric_batches.cpp +8 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum.hpp +31 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_avx2.cpp +3 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_sse2.cpp +3 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/writing_vectorized_code.cpp +11 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/main.cpp +31 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_api.cpp +230 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_arch.cpp +217 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_basic_math.cpp +183 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch.cpp +1049 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_bool.cpp +508 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_cast.cpp +409 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_complex.cpp +712 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_constant.cpp +286 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_float.cpp +141 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_int.cpp +365 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_manip.cpp +308 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_bitwise_cast.cpp +222 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_exponential.cpp +226 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_hyperbolic.cpp +183 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_power.cpp +265 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_trigonometric.cpp +236 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_conversion.cpp +248 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_custom_default_arch.cpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_error_gamma.cpp +170 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_explicit_batch_instantiation.cpp +32 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_exponential.cpp +202 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_extract_pair.cpp +92 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_fp_manipulation.cpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_gnu_source.cpp +30 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_hyperbolic.cpp +167 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_load_store.cpp +304 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_memory.cpp +61 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_poly_evaluation.cpp +64 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_power.cpp +184 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_rounding.cpp +199 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_select.cpp +101 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_shuffle.cpp +760 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.cpp +4 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.hpp +34 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_traits.cpp +172 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_trigonometric.cpp +208 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_utils.hpp +611 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_wasm/test_wasm_playwright.py +123 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_xsimd_api.cpp +1460 -0
- sequenzo/dissimilarity_measures/utils/__init__.py +16 -0
- sequenzo/dissimilarity_measures/utils/get_LCP_length_for_2_seq.py +44 -0
- sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqconc.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqdss.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqdur.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqlength.cpython-310-darwin.so +0 -0
- sequenzo/multidomain/__init__.py +23 -0
- sequenzo/multidomain/association_between_domains.py +311 -0
- sequenzo/multidomain/cat.py +597 -0
- sequenzo/multidomain/combt.py +519 -0
- sequenzo/multidomain/dat.py +81 -0
- sequenzo/multidomain/idcd.py +139 -0
- sequenzo/multidomain/linked_polyad.py +292 -0
- sequenzo/openmp_setup.py +233 -0
- sequenzo/prefix_tree/__init__.py +62 -0
- sequenzo/prefix_tree/hub.py +114 -0
- sequenzo/prefix_tree/individual_level_indicators.py +1321 -0
- sequenzo/prefix_tree/spell_individual_level_indicators.py +580 -0
- sequenzo/prefix_tree/spell_level_indicators.py +297 -0
- sequenzo/prefix_tree/system_level_indicators.py +544 -0
- sequenzo/prefix_tree/utils.py +54 -0
- sequenzo/seqhmm/__init__.py +95 -0
- sequenzo/seqhmm/advanced_optimization.py +305 -0
- sequenzo/seqhmm/bootstrap.py +411 -0
- sequenzo/seqhmm/build_hmm.py +142 -0
- sequenzo/seqhmm/build_mhmm.py +136 -0
- sequenzo/seqhmm/build_nhmm.py +121 -0
- sequenzo/seqhmm/fit_mhmm.py +62 -0
- sequenzo/seqhmm/fit_model.py +61 -0
- sequenzo/seqhmm/fit_nhmm.py +76 -0
- sequenzo/seqhmm/formulas.py +289 -0
- sequenzo/seqhmm/forward_backward_nhmm.py +276 -0
- sequenzo/seqhmm/gradients_nhmm.py +306 -0
- sequenzo/seqhmm/hmm.py +291 -0
- sequenzo/seqhmm/mhmm.py +314 -0
- sequenzo/seqhmm/model_comparison.py +238 -0
- sequenzo/seqhmm/multichannel_em.py +282 -0
- sequenzo/seqhmm/multichannel_utils.py +138 -0
- sequenzo/seqhmm/nhmm.py +270 -0
- sequenzo/seqhmm/nhmm_utils.py +191 -0
- sequenzo/seqhmm/predict.py +137 -0
- sequenzo/seqhmm/predict_mhmm.py +142 -0
- sequenzo/seqhmm/simulate.py +878 -0
- sequenzo/seqhmm/utils.py +218 -0
- sequenzo/seqhmm/visualization.py +910 -0
- sequenzo/sequence_characteristics/__init__.py +40 -0
- sequenzo/sequence_characteristics/complexity_index.py +49 -0
- sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +220 -0
- sequenzo/sequence_characteristics/plot_characteristics.py +593 -0
- sequenzo/sequence_characteristics/simple_characteristics.py +311 -0
- sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +39 -0
- sequenzo/sequence_characteristics/turbulence.py +155 -0
- sequenzo/sequence_characteristics/variance_of_spell_durations.py +86 -0
- sequenzo/sequence_characteristics/within_sequence_entropy.py +43 -0
- sequenzo/suffix_tree/__init__.py +66 -0
- sequenzo/suffix_tree/hub.py +114 -0
- sequenzo/suffix_tree/individual_level_indicators.py +1679 -0
- sequenzo/suffix_tree/spell_individual_level_indicators.py +493 -0
- sequenzo/suffix_tree/spell_level_indicators.py +248 -0
- sequenzo/suffix_tree/system_level_indicators.py +535 -0
- sequenzo/suffix_tree/utils.py +56 -0
- sequenzo/version_check.py +283 -0
- sequenzo/visualization/__init__.py +29 -0
- sequenzo/visualization/plot_mean_time.py +222 -0
- sequenzo/visualization/plot_modal_state.py +276 -0
- sequenzo/visualization/plot_most_frequent_sequences.py +147 -0
- sequenzo/visualization/plot_relative_frequency.py +405 -0
- sequenzo/visualization/plot_sequence_index.py +1175 -0
- sequenzo/visualization/plot_single_medoid.py +153 -0
- sequenzo/visualization/plot_state_distribution.py +651 -0
- sequenzo/visualization/plot_transition_matrix.py +190 -0
- sequenzo/visualization/utils/__init__.py +23 -0
- sequenzo/visualization/utils/utils.py +310 -0
- sequenzo/with_event_history_analysis/__init__.py +35 -0
- sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
- sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
- sequenzo-0.1.31.dist-info/METADATA +286 -0
- sequenzo-0.1.31.dist-info/RECORD +299 -0
- sequenzo-0.1.31.dist-info/WHEEL +5 -0
- sequenzo-0.1.31.dist-info/licenses/LICENSE +28 -0
- sequenzo-0.1.31.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
/***************************************************************************
|
|
2
|
+
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
|
|
3
|
+
* Martin Renou *
|
|
4
|
+
* Copyright (c) QuantStack *
|
|
5
|
+
* Copyright (c) Serge Guelton *
|
|
6
|
+
* *
|
|
7
|
+
* Distributed under the terms of the BSD 3-Clause License. *
|
|
8
|
+
* *
|
|
9
|
+
* The full license is in the file LICENSE, distributed with this software. *
|
|
10
|
+
****************************************************************************/
|
|
11
|
+
|
|
12
|
+
#ifndef XSIMD_COMMON_DETAILS_HPP
|
|
13
|
+
#define XSIMD_COMMON_DETAILS_HPP
|
|
14
|
+
|
|
15
|
+
#include <complex>
|
|
16
|
+
|
|
17
|
+
#include "../../math/xsimd_rem_pio2.hpp"
|
|
18
|
+
#include "../../types/xsimd_common_arch.hpp"
|
|
19
|
+
#include "../../types/xsimd_utils.hpp"
|
|
20
|
+
#include "../xsimd_constants.hpp"
|
|
21
|
+
|
|
22
|
+
namespace xsimd
|
|
23
|
+
{
|
|
24
|
+
// Forward declaration. Should we put them in a separate file?
|
|
25
|
+
template <class T, class A>
|
|
26
|
+
XSIMD_INLINE batch<T, A> abs(batch<T, A> const& self) noexcept;
|
|
27
|
+
template <class T, class A>
|
|
28
|
+
XSIMD_INLINE batch<T, A> abs(batch<std::complex<T>, A> const& self) noexcept;
|
|
29
|
+
template <class T, class A>
|
|
30
|
+
XSIMD_INLINE bool any(batch_bool<T, A> const& self) noexcept;
|
|
31
|
+
template <class T, class A>
|
|
32
|
+
XSIMD_INLINE batch<T, A> atan2(batch<T, A> const& self, batch<T, A> const& other) noexcept;
|
|
33
|
+
template <class A, class T_out, class T_in>
|
|
34
|
+
XSIMD_INLINE batch<T_out, A> batch_cast(batch<T_in, A> const&, batch<T_out, A> const& out) noexcept;
|
|
35
|
+
template <class T, class A>
|
|
36
|
+
XSIMD_INLINE batch<T, A> bitofsign(batch<T, A> const& self) noexcept;
|
|
37
|
+
template <class T_out, class T_in, class A>
|
|
38
|
+
XSIMD_INLINE batch<T_out, A> bitwise_cast(batch<T_in, A> const& self) noexcept;
|
|
39
|
+
template <class T, class A>
|
|
40
|
+
XSIMD_INLINE batch<T, A> cos(batch<T, A> const& self) noexcept;
|
|
41
|
+
template <class T, class A>
|
|
42
|
+
XSIMD_INLINE batch<T, A> cosh(batch<T, A> const& self) noexcept;
|
|
43
|
+
template <class T, class A>
|
|
44
|
+
XSIMD_INLINE batch<T, A> exp(batch<T, A> const& self) noexcept;
|
|
45
|
+
template <class T, class A>
|
|
46
|
+
XSIMD_INLINE batch<T, A> fma(batch<T, A> const& x, batch<T, A> const& y, batch<T, A> const& z) noexcept;
|
|
47
|
+
template <class T, class A>
|
|
48
|
+
XSIMD_INLINE batch<T, A> fms(batch<T, A> const& x, batch<T, A> const& y, batch<T, A> const& z) noexcept;
|
|
49
|
+
template <class T, class A>
|
|
50
|
+
XSIMD_INLINE batch<T, A> fmas(batch<T, A> const& x, batch<T, A> const& y, batch<T, A> const& z) noexcept;
|
|
51
|
+
template <class T, class A>
|
|
52
|
+
XSIMD_INLINE batch<T, A> frexp(const batch<T, A>& x, const batch<as_integer_t<T>, A>& e) noexcept;
|
|
53
|
+
template <class T, class A, uint64_t... Coefs>
|
|
54
|
+
XSIMD_INLINE batch<T, A> horner(const batch<T, A>& self) noexcept;
|
|
55
|
+
template <class T, class A>
|
|
56
|
+
XSIMD_INLINE batch<T, A> hypot(const batch<T, A>& self) noexcept;
|
|
57
|
+
template <class T, class A>
|
|
58
|
+
XSIMD_INLINE batch_bool<T, A> is_even(batch<T, A> const& self) noexcept;
|
|
59
|
+
template <class T, class A>
|
|
60
|
+
XSIMD_INLINE batch_bool<T, A> is_flint(batch<T, A> const& self) noexcept;
|
|
61
|
+
template <class T, class A>
|
|
62
|
+
XSIMD_INLINE batch_bool<T, A> is_odd(batch<T, A> const& self) noexcept;
|
|
63
|
+
template <class T, class A>
|
|
64
|
+
XSIMD_INLINE typename batch<T, A>::batch_bool_type isinf(batch<T, A> const& self) noexcept;
|
|
65
|
+
template <class T, class A>
|
|
66
|
+
XSIMD_INLINE typename batch<T, A>::batch_bool_type isfinite(batch<T, A> const& self) noexcept;
|
|
67
|
+
template <class T, class A>
|
|
68
|
+
XSIMD_INLINE typename batch<T, A>::batch_bool_type isnan(batch<T, A> const& self) noexcept;
|
|
69
|
+
template <class T, class A>
|
|
70
|
+
XSIMD_INLINE batch<T, A> ldexp(const batch<T, A>& x, const batch<as_integer_t<T>, A>& e) noexcept;
|
|
71
|
+
template <class T, class A>
|
|
72
|
+
XSIMD_INLINE batch<T, A> log(batch<T, A> const& self) noexcept;
|
|
73
|
+
template <class T, class A>
|
|
74
|
+
XSIMD_INLINE batch<T, A> nearbyint(batch<T, A> const& self) noexcept;
|
|
75
|
+
template <class T, class A>
|
|
76
|
+
XSIMD_INLINE batch<as_integer_t<T>, A> nearbyint_as_int(const batch<T, A>& x) noexcept;
|
|
77
|
+
template <class T, class A>
|
|
78
|
+
XSIMD_INLINE T reduce_add(batch<T, A> const&) noexcept;
|
|
79
|
+
template <class T, class A>
|
|
80
|
+
XSIMD_INLINE T reduce_mul(batch<T, A> const&) noexcept;
|
|
81
|
+
template <class T, class A>
|
|
82
|
+
XSIMD_INLINE batch<T, A> select(batch_bool<T, A> const&, batch<T, A> const&, batch<T, A> const&) noexcept;
|
|
83
|
+
template <class T, class A>
|
|
84
|
+
XSIMD_INLINE batch<std::complex<T>, A> select(batch_bool<T, A> const&, batch<std::complex<T>, A> const&, batch<std::complex<T>, A> const&) noexcept;
|
|
85
|
+
template <class T, class A>
|
|
86
|
+
XSIMD_INLINE batch<T, A> sign(batch<T, A> const& self) noexcept;
|
|
87
|
+
template <class T, class A>
|
|
88
|
+
XSIMD_INLINE batch<T, A> signnz(batch<T, A> const& self) noexcept;
|
|
89
|
+
template <class T, class A>
|
|
90
|
+
XSIMD_INLINE batch<T, A> sin(batch<T, A> const& self) noexcept;
|
|
91
|
+
template <class T, class A>
|
|
92
|
+
XSIMD_INLINE batch<T, A> sinh(batch<T, A> const& self) noexcept;
|
|
93
|
+
template <class T, class A>
|
|
94
|
+
XSIMD_INLINE std::pair<batch<T, A>, batch<T, A>> sincos(batch<T, A> const& self) noexcept;
|
|
95
|
+
template <class T, class A>
|
|
96
|
+
XSIMD_INLINE batch<T, A> sqrt(batch<T, A> const& self) noexcept;
|
|
97
|
+
template <class T, class A, class Vt, Vt... Values>
|
|
98
|
+
XSIMD_INLINE typename std::enable_if<std::is_arithmetic<T>::value, batch<T, A>>::type
|
|
99
|
+
swizzle(batch<T, A> const& x, batch_constant<Vt, A, Values...> mask) noexcept;
|
|
100
|
+
template <class T, class A>
|
|
101
|
+
XSIMD_INLINE batch<T, A> tan(batch<T, A> const& self) noexcept;
|
|
102
|
+
template <class T, class A>
|
|
103
|
+
XSIMD_INLINE batch<as_float_t<T>, A> to_float(batch<T, A> const& self) noexcept;
|
|
104
|
+
template <class T, class A>
|
|
105
|
+
XSIMD_INLINE batch<as_integer_t<T>, A> to_int(batch<T, A> const& self) noexcept;
|
|
106
|
+
template <class T, class A>
|
|
107
|
+
XSIMD_INLINE batch<T, A> trunc(batch<T, A> const& self) noexcept;
|
|
108
|
+
|
|
109
|
+
namespace kernel
|
|
110
|
+
{
|
|
111
|
+
|
|
112
|
+
namespace detail
|
|
113
|
+
{
|
|
114
|
+
template <class F, class A, class T, class... Batches>
|
|
115
|
+
XSIMD_INLINE batch<T, A> apply(F&& func, batch<T, A> const& self, batch<T, A> const& other) noexcept
|
|
116
|
+
{
|
|
117
|
+
constexpr std::size_t size = batch<T, A>::size;
|
|
118
|
+
alignas(A::alignment()) T self_buffer[size];
|
|
119
|
+
alignas(A::alignment()) T other_buffer[size];
|
|
120
|
+
self.store_aligned(&self_buffer[0]);
|
|
121
|
+
other.store_aligned(&other_buffer[0]);
|
|
122
|
+
for (std::size_t i = 0; i < size; ++i)
|
|
123
|
+
{
|
|
124
|
+
self_buffer[i] = func(self_buffer[i], other_buffer[i]);
|
|
125
|
+
}
|
|
126
|
+
return batch<T, A>::load_aligned(self_buffer);
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
template <class U, class F, class A, class T>
|
|
130
|
+
XSIMD_INLINE batch<U, A> apply_transform(F&& func, batch<T, A> const& self) noexcept
|
|
131
|
+
{
|
|
132
|
+
static_assert(batch<T, A>::size == batch<U, A>::size,
|
|
133
|
+
"Source and destination sizes must match");
|
|
134
|
+
constexpr std::size_t src_size = batch<T, A>::size;
|
|
135
|
+
constexpr std::size_t dest_size = batch<U, A>::size;
|
|
136
|
+
alignas(A::alignment()) T self_buffer[src_size];
|
|
137
|
+
alignas(A::alignment()) U other_buffer[dest_size];
|
|
138
|
+
self.store_aligned(&self_buffer[0]);
|
|
139
|
+
for (std::size_t i = 0; i < src_size; ++i)
|
|
140
|
+
{
|
|
141
|
+
other_buffer[i] = func(self_buffer[i]);
|
|
142
|
+
}
|
|
143
|
+
return batch<U, A>::load_aligned(other_buffer);
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
// some common fast_cast conversion
|
|
148
|
+
namespace detail
|
|
149
|
+
{
|
|
150
|
+
template <class A>
|
|
151
|
+
XSIMD_INLINE batch<uint8_t, A> fast_cast(batch<int8_t, A> const& self, batch<uint8_t, A> const&, requires_arch<common>) noexcept
|
|
152
|
+
{
|
|
153
|
+
return bitwise_cast<uint8_t>(self);
|
|
154
|
+
}
|
|
155
|
+
template <class A>
|
|
156
|
+
XSIMD_INLINE batch<uint16_t, A> fast_cast(batch<int16_t, A> const& self, batch<uint16_t, A> const&, requires_arch<common>) noexcept
|
|
157
|
+
{
|
|
158
|
+
return bitwise_cast<uint16_t>(self);
|
|
159
|
+
}
|
|
160
|
+
template <class A>
|
|
161
|
+
XSIMD_INLINE batch<uint32_t, A> fast_cast(batch<int32_t, A> const& self, batch<uint32_t, A> const&, requires_arch<common>) noexcept
|
|
162
|
+
{
|
|
163
|
+
return bitwise_cast<uint32_t>(self);
|
|
164
|
+
}
|
|
165
|
+
template <class A>
|
|
166
|
+
XSIMD_INLINE batch<uint64_t, A> fast_cast(batch<int64_t, A> const& self, batch<uint64_t, A> const&, requires_arch<common>) noexcept
|
|
167
|
+
{
|
|
168
|
+
return bitwise_cast<uint64_t>(self);
|
|
169
|
+
}
|
|
170
|
+
template <class A>
|
|
171
|
+
XSIMD_INLINE batch<int8_t, A> fast_cast(batch<uint8_t, A> const& self, batch<int8_t, A> const&, requires_arch<common>) noexcept
|
|
172
|
+
{
|
|
173
|
+
return bitwise_cast<int8_t>(self);
|
|
174
|
+
}
|
|
175
|
+
template <class A>
|
|
176
|
+
XSIMD_INLINE batch<int16_t, A> fast_cast(batch<uint16_t, A> const& self, batch<int16_t, A> const&, requires_arch<common>) noexcept
|
|
177
|
+
{
|
|
178
|
+
return bitwise_cast<int16_t>(self);
|
|
179
|
+
}
|
|
180
|
+
template <class A>
|
|
181
|
+
XSIMD_INLINE batch<int32_t, A> fast_cast(batch<uint32_t, A> const& self, batch<int32_t, A> const&, requires_arch<common>) noexcept
|
|
182
|
+
{
|
|
183
|
+
return bitwise_cast<int32_t>(self);
|
|
184
|
+
}
|
|
185
|
+
template <class A>
|
|
186
|
+
XSIMD_INLINE batch<int64_t, A> fast_cast(batch<uint64_t, A> const& self, batch<int64_t, A> const&, requires_arch<common>) noexcept
|
|
187
|
+
{
|
|
188
|
+
return bitwise_cast<int64_t>(self);
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Provide a common uint32_t -> float cast only if we have a
|
|
192
|
+
// non-common int32_t -> float fast_cast
|
|
193
|
+
template <class A, class _ = decltype(fast_cast(std::declval<batch<int32_t, A> const&>(), std::declval<batch<float, A> const&>(), A {}))>
|
|
194
|
+
XSIMD_INLINE batch<float, A> fast_cast(batch<uint32_t, A> const& v, batch<float, A> const&, requires_arch<common>) noexcept
|
|
195
|
+
{
|
|
196
|
+
// see https://stackoverflow.com/questions/34066228/how-to-perform-uint32-float-conversion-with-sse
|
|
197
|
+
batch<uint32_t, A> msk_lo(0xFFFF);
|
|
198
|
+
batch<float, A> cnst65536f(65536.0f);
|
|
199
|
+
|
|
200
|
+
auto v_lo = batch_cast<int32_t>(v & msk_lo); /* extract the 16 lowest significant bits of self */
|
|
201
|
+
auto v_hi = batch_cast<int32_t>(v >> 16); /* 16 most significant bits of v */
|
|
202
|
+
auto v_lo_flt = batch_cast<float>(v_lo); /* No rounding */
|
|
203
|
+
auto v_hi_flt = batch_cast<float>(v_hi); /* No rounding */
|
|
204
|
+
v_hi_flt = cnst65536f * v_hi_flt; /* No rounding */
|
|
205
|
+
return v_hi_flt + v_lo_flt; /* Rounding may occur here, mul and add may fuse to fma for haswell and newer */
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// Provide a common float -> uint32_t cast only if we have a
|
|
209
|
+
// non-common float -> int32_t fast_cast
|
|
210
|
+
template <class A, class _ = decltype(fast_cast(std::declval<batch<float, A> const&>(), std::declval<batch<int32_t, A> const&>(), A {}))>
|
|
211
|
+
XSIMD_INLINE batch<uint32_t, A> fast_cast(batch<float, A> const& v, batch<uint32_t, A> const&, requires_arch<common>) noexcept
|
|
212
|
+
{
|
|
213
|
+
auto is_large = v >= batch<float, A>(1u << 31);
|
|
214
|
+
auto small_v = bitwise_cast<float>(batch_cast<int32_t>(v));
|
|
215
|
+
auto large_v = bitwise_cast<float>(
|
|
216
|
+
batch_cast<int32_t>(v - batch<float, A>(1u << 31))
|
|
217
|
+
^ batch<int32_t, A>(1u << 31));
|
|
218
|
+
return bitwise_cast<uint32_t>(select(is_large, large_v, small_v));
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
namespace detail
|
|
223
|
+
{
|
|
224
|
+
// Generic conversion handling machinery. Each architecture must define
|
|
225
|
+
// conversion function when such conversions exits in the form of
|
|
226
|
+
// intrinsic. Then we use that information to automatically decide whether
|
|
227
|
+
// to use scalar or vector conversion when doing load / store / batch_cast
|
|
228
|
+
struct with_fast_conversion
|
|
229
|
+
{
|
|
230
|
+
};
|
|
231
|
+
struct with_slow_conversion
|
|
232
|
+
{
|
|
233
|
+
};
|
|
234
|
+
|
|
235
|
+
template <class A, class From, class To, class = void>
|
|
236
|
+
struct conversion_type_impl
|
|
237
|
+
{
|
|
238
|
+
using type = with_slow_conversion;
|
|
239
|
+
};
|
|
240
|
+
|
|
241
|
+
using xsimd::detail::void_t;
|
|
242
|
+
|
|
243
|
+
template <class A, class From, class To>
|
|
244
|
+
struct conversion_type_impl<A, From, To,
|
|
245
|
+
void_t<decltype(fast_cast(std::declval<const batch<From, A>&>(),
|
|
246
|
+
std::declval<const batch<To, A>&>(),
|
|
247
|
+
std::declval<const A&>()))>>
|
|
248
|
+
{
|
|
249
|
+
using type = with_fast_conversion;
|
|
250
|
+
};
|
|
251
|
+
|
|
252
|
+
template <class A, class From, class To>
|
|
253
|
+
using conversion_type = typename conversion_type_impl<A, From, To>::type;
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
namespace detail
|
|
257
|
+
{
|
|
258
|
+
/* origin: boost/simdfunction/horn.hpp*/
|
|
259
|
+
/*
|
|
260
|
+
* ====================================================
|
|
261
|
+
* copyright 2016 NumScale SAS
|
|
262
|
+
*
|
|
263
|
+
* Distributed under the Boost Software License, Version 1.0.
|
|
264
|
+
* (See copy at http://boost.org/LICENSE_1_0.txt)
|
|
265
|
+
* ====================================================
|
|
266
|
+
*/
|
|
267
|
+
template <class B, uint64_t c>
|
|
268
|
+
XSIMD_INLINE B coef() noexcept
|
|
269
|
+
{
|
|
270
|
+
using value_type = typename B::value_type;
|
|
271
|
+
return B(bit_cast<value_type>(as_unsigned_integer_t<value_type>(c)));
|
|
272
|
+
}
|
|
273
|
+
template <class B>
|
|
274
|
+
XSIMD_INLINE B horner(const B&) noexcept
|
|
275
|
+
{
|
|
276
|
+
return B(typename B::value_type(0.));
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
template <class B, uint64_t c0>
|
|
280
|
+
XSIMD_INLINE B horner(const B&) noexcept
|
|
281
|
+
{
|
|
282
|
+
return coef<B, c0>();
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
template <class B, uint64_t c0, uint64_t c1, uint64_t... args>
|
|
286
|
+
XSIMD_INLINE B horner(const B& self) noexcept
|
|
287
|
+
{
|
|
288
|
+
return fma(self, horner<B, c1, args...>(self), coef<B, c0>());
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
/* origin: boost/simdfunction/horn1.hpp*/
|
|
292
|
+
/*
|
|
293
|
+
* ====================================================
|
|
294
|
+
* copyright 2016 NumScale SAS
|
|
295
|
+
*
|
|
296
|
+
* Distributed under the Boost Software License, Version 1.0.
|
|
297
|
+
* (See copy at http://boost.org/LICENSE_1_0.txt)
|
|
298
|
+
* ====================================================
|
|
299
|
+
*/
|
|
300
|
+
template <class B>
|
|
301
|
+
XSIMD_INLINE B horner1(const B&) noexcept
|
|
302
|
+
{
|
|
303
|
+
return B(1.);
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
template <class B, uint64_t c0>
|
|
307
|
+
XSIMD_INLINE B horner1(const B& x) noexcept
|
|
308
|
+
{
|
|
309
|
+
return x + detail::coef<B, c0>();
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
template <class B, uint64_t c0, uint64_t c1, uint64_t... args>
|
|
313
|
+
XSIMD_INLINE B horner1(const B& x) noexcept
|
|
314
|
+
{
|
|
315
|
+
return fma(x, horner1<B, c1, args...>(x), detail::coef<B, c0>());
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
#endif
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
/***************************************************************************
|
|
2
|
+
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
|
|
3
|
+
* Martin Renou *
|
|
4
|
+
* Copyright (c) QuantStack *
|
|
5
|
+
* Copyright (c) Serge Guelton *
|
|
6
|
+
* *
|
|
7
|
+
* Distributed under the terms of the BSD 3-Clause License. *
|
|
8
|
+
* *
|
|
9
|
+
* The full license is in the file LICENSE, distributed with this software. *
|
|
10
|
+
****************************************************************************/
|
|
11
|
+
|
|
12
|
+
#ifndef XSIMD_COMMON_LOGICAL_HPP
|
|
13
|
+
#define XSIMD_COMMON_LOGICAL_HPP
|
|
14
|
+
|
|
15
|
+
#include "./xsimd_common_details.hpp"
|
|
16
|
+
|
|
17
|
+
#include <climits>
|
|
18
|
+
|
|
19
|
+
namespace xsimd
|
|
20
|
+
{
|
|
21
|
+
|
|
22
|
+
namespace kernel
|
|
23
|
+
{
|
|
24
|
+
|
|
25
|
+
using namespace types;
|
|
26
|
+
|
|
27
|
+
// count
|
|
28
|
+
template <class A, class T>
|
|
29
|
+
XSIMD_INLINE size_t count(batch_bool<T, A> const& self, requires_arch<common>) noexcept
|
|
30
|
+
{
|
|
31
|
+
uint64_t m = self.mask();
|
|
32
|
+
XSIMD_IF_CONSTEXPR(batch_bool<T, A>::size < 14)
|
|
33
|
+
{
|
|
34
|
+
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSet64
|
|
35
|
+
return (m * 0x200040008001ULL & 0x111111111111111ULL) % 0xf;
|
|
36
|
+
}
|
|
37
|
+
else
|
|
38
|
+
{
|
|
39
|
+
#if defined __has_builtin
|
|
40
|
+
#if __has_builtin(__builtin_popcountg)
|
|
41
|
+
#define builtin_popcount(v) __builtin_popcountg(v)
|
|
42
|
+
#endif
|
|
43
|
+
#endif
|
|
44
|
+
|
|
45
|
+
#ifdef builtin_popcount
|
|
46
|
+
return builtin_popcount(m);
|
|
47
|
+
#else
|
|
48
|
+
// FIXME: we could do better by dispatching to the appropriate
|
|
49
|
+
// popcount instruction depending on the arch...
|
|
50
|
+
XSIMD_IF_CONSTEXPR(batch_bool<T, A>::size <= 32)
|
|
51
|
+
{
|
|
52
|
+
uint32_t m32 = static_cast<uint32_t>(m);
|
|
53
|
+
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
|
|
54
|
+
m32 = m32 - ((m32 >> 1) & 0x55555555); // reuse input as temporary
|
|
55
|
+
m32 = (m32 & 0x33333333) + ((m32 >> 2) & 0x33333333); // temp
|
|
56
|
+
return (((m32 + (m32 >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; // count
|
|
57
|
+
}
|
|
58
|
+
else
|
|
59
|
+
{
|
|
60
|
+
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
|
|
61
|
+
m = m - ((m >> 1) & (uint64_t) ~(uint64_t)0 / 3); // temp
|
|
62
|
+
m = (m & (uint64_t) ~(uint64_t)0 / 15 * 3) + ((m >> 2) & (uint64_t) ~(uint64_t)0 / 15 * 3); // temp
|
|
63
|
+
m = (m + (m >> 4)) & (uint64_t) ~(uint64_t)0 / 255 * 15; // temp
|
|
64
|
+
return (m * ((uint64_t) ~(uint64_t)0 / 255)) >> (sizeof(uint64_t) - 1) * CHAR_BIT; // count
|
|
65
|
+
}
|
|
66
|
+
#endif
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// from mask
|
|
71
|
+
template <class A, class T>
|
|
72
|
+
XSIMD_INLINE batch_bool<T, A> from_mask(batch_bool<T, A> const&, uint64_t mask, requires_arch<common>) noexcept
|
|
73
|
+
{
|
|
74
|
+
alignas(A::alignment()) bool buffer[batch_bool<T, A>::size];
|
|
75
|
+
// This is inefficient but should never be called. It's just a
|
|
76
|
+
// temporary implementation until arm support is added.
|
|
77
|
+
for (size_t i = 0; i < batch_bool<T, A>::size; ++i)
|
|
78
|
+
buffer[i] = mask & (1ull << i);
|
|
79
|
+
return batch_bool<T, A>::load_aligned(buffer);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// ge
|
|
83
|
+
template <class A, class T>
|
|
84
|
+
XSIMD_INLINE batch_bool<T, A> ge(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept
|
|
85
|
+
{
|
|
86
|
+
return other <= self;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// gt
|
|
90
|
+
template <class A, class T>
|
|
91
|
+
XSIMD_INLINE batch_bool<T, A> gt(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept
|
|
92
|
+
{
|
|
93
|
+
return other < self;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// is_even
|
|
97
|
+
template <class A, class T>
|
|
98
|
+
XSIMD_INLINE batch_bool<T, A> is_even(batch<T, A> const& self, requires_arch<common>) noexcept
|
|
99
|
+
{
|
|
100
|
+
return is_flint(self * T(0.5));
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// is_flint
|
|
104
|
+
template <class A, class T>
|
|
105
|
+
XSIMD_INLINE batch_bool<T, A> is_flint(batch<T, A> const& self, requires_arch<common>) noexcept
|
|
106
|
+
{
|
|
107
|
+
auto frac = select(isnan(self - self), constants::nan<batch<T, A>>(), self - trunc(self));
|
|
108
|
+
return frac == T(0.);
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// is_odd
|
|
112
|
+
template <class A, class T>
|
|
113
|
+
XSIMD_INLINE batch_bool<T, A> is_odd(batch<T, A> const& self, requires_arch<common>) noexcept
|
|
114
|
+
{
|
|
115
|
+
return is_even(self - T(1.));
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// isinf
|
|
119
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
120
|
+
XSIMD_INLINE batch_bool<T, A> isinf(batch<T, A> const&, requires_arch<common>) noexcept
|
|
121
|
+
{
|
|
122
|
+
return batch_bool<T, A>(false);
|
|
123
|
+
}
|
|
124
|
+
template <class A>
|
|
125
|
+
XSIMD_INLINE batch_bool<float, A> isinf(batch<float, A> const& self, requires_arch<common>) noexcept
|
|
126
|
+
{
|
|
127
|
+
#ifdef __FAST_MATH__
|
|
128
|
+
(void)self;
|
|
129
|
+
return { false };
|
|
130
|
+
#else
|
|
131
|
+
return abs(self) == std::numeric_limits<float>::infinity();
|
|
132
|
+
#endif
|
|
133
|
+
}
|
|
134
|
+
template <class A>
|
|
135
|
+
XSIMD_INLINE batch_bool<double, A> isinf(batch<double, A> const& self, requires_arch<common>) noexcept
|
|
136
|
+
{
|
|
137
|
+
#ifdef __FAST_MATH__
|
|
138
|
+
(void)self;
|
|
139
|
+
return { false };
|
|
140
|
+
#else
|
|
141
|
+
return abs(self) == std::numeric_limits<double>::infinity();
|
|
142
|
+
#endif
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// isfinite
|
|
146
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
147
|
+
XSIMD_INLINE batch_bool<T, A> isfinite(batch<T, A> const&, requires_arch<common>) noexcept
|
|
148
|
+
{
|
|
149
|
+
return batch_bool<T, A>(true);
|
|
150
|
+
}
|
|
151
|
+
template <class A>
|
|
152
|
+
XSIMD_INLINE batch_bool<float, A> isfinite(batch<float, A> const& self, requires_arch<common>) noexcept
|
|
153
|
+
{
|
|
154
|
+
return (self - self) == 0.f;
|
|
155
|
+
}
|
|
156
|
+
template <class A>
|
|
157
|
+
XSIMD_INLINE batch_bool<double, A> isfinite(batch<double, A> const& self, requires_arch<common>) noexcept
|
|
158
|
+
{
|
|
159
|
+
return (self - self) == 0.;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
// isnan
|
|
163
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
164
|
+
XSIMD_INLINE batch_bool<T, A> isnan(batch<T, A> const&, requires_arch<common>) noexcept
|
|
165
|
+
{
|
|
166
|
+
return batch_bool<T, A>(false);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
// le
|
|
170
|
+
template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
|
|
171
|
+
XSIMD_INLINE batch_bool<T, A> le(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept
|
|
172
|
+
{
|
|
173
|
+
return (self < other) || (self == other);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// neq
|
|
177
|
+
template <class A, class T>
|
|
178
|
+
XSIMD_INLINE batch_bool<T, A> neq(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept
|
|
179
|
+
{
|
|
180
|
+
return !(other == self);
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
// logical_and
|
|
184
|
+
template <class A, class T>
|
|
185
|
+
XSIMD_INLINE batch<T, A> logical_and(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept
|
|
186
|
+
{
|
|
187
|
+
return detail::apply([](T x, T y) noexcept
|
|
188
|
+
{ return x && y; },
|
|
189
|
+
self, other);
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
// logical_or
|
|
193
|
+
template <class A, class T>
|
|
194
|
+
XSIMD_INLINE batch<T, A> logical_or(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept
|
|
195
|
+
{
|
|
196
|
+
return detail::apply([](T x, T y) noexcept
|
|
197
|
+
{ return x || y; },
|
|
198
|
+
self, other);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// mask
|
|
202
|
+
template <class A, class T>
|
|
203
|
+
XSIMD_INLINE uint64_t mask(batch_bool<T, A> const& self, requires_arch<common>) noexcept
|
|
204
|
+
{
|
|
205
|
+
alignas(A::alignment()) bool buffer[batch_bool<T, A>::size];
|
|
206
|
+
self.store_aligned(buffer);
|
|
207
|
+
// This is inefficient but should never be called. It's just a
|
|
208
|
+
// temporary implementation until arm support is added.
|
|
209
|
+
uint64_t res = 0;
|
|
210
|
+
for (size_t i = 0; i < batch_bool<T, A>::size; ++i)
|
|
211
|
+
if (buffer[i])
|
|
212
|
+
res |= 1ul << i;
|
|
213
|
+
return res;
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
#endif
|