sequenzo 0.1.21__cp310-cp310-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sequenzo might be problematic. Click here for more details.
- sequenzo/__init__.py +240 -0
- sequenzo/big_data/__init__.py +12 -0
- sequenzo/big_data/clara/__init__.py +26 -0
- sequenzo/big_data/clara/clara.py +467 -0
- sequenzo/big_data/clara/utils/__init__.py +27 -0
- sequenzo/big_data/clara/utils/aggregatecases.py +92 -0
- sequenzo/big_data/clara/utils/davies_bouldin.py +91 -0
- sequenzo/big_data/clara/utils/get_weighted_diss.cpython-310-darwin.so +0 -0
- sequenzo/big_data/clara/utils/wfcmdd.py +205 -0
- sequenzo/big_data/clara/visualization.py +88 -0
- sequenzo/clustering/KMedoids.py +196 -0
- sequenzo/clustering/__init__.py +30 -0
- sequenzo/clustering/clustering_c_code.cpython-310-darwin.so +0 -0
- sequenzo/clustering/hierarchical_clustering.py +1380 -0
- sequenzo/clustering/src/KMedoid.cpp +262 -0
- sequenzo/clustering/src/PAM.cpp +236 -0
- sequenzo/clustering/src/PAMonce.cpp +234 -0
- sequenzo/clustering/src/cluster_quality.cpp +496 -0
- sequenzo/clustering/src/cluster_quality.h +128 -0
- sequenzo/clustering/src/cluster_quality_backup.cpp +570 -0
- sequenzo/clustering/src/module.cpp +228 -0
- sequenzo/clustering/src/weightedinertia.cpp +111 -0
- sequenzo/clustering/utils/__init__.py +27 -0
- sequenzo/clustering/utils/disscenter.py +122 -0
- sequenzo/data_preprocessing/__init__.py +20 -0
- sequenzo/data_preprocessing/helpers.py +256 -0
- sequenzo/datasets/__init__.py +41 -0
- sequenzo/datasets/biofam.csv +2001 -0
- sequenzo/datasets/biofam_child_domain.csv +2001 -0
- sequenzo/datasets/biofam_left_domain.csv +2001 -0
- sequenzo/datasets/biofam_married_domain.csv +2001 -0
- sequenzo/datasets/chinese_colonial_territories.csv +12 -0
- sequenzo/datasets/country_co2_emissions.csv +194 -0
- sequenzo/datasets/country_co2_emissions_global_deciles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_global_quintiles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_local_deciles.csv +195 -0
- sequenzo/datasets/country_co2_emissions_local_quintiles.csv +195 -0
- sequenzo/datasets/country_gdp_per_capita.csv +194 -0
- sequenzo/datasets/mvad.csv +713 -0
- sequenzo/datasets/pairfam_family.csv +1867 -0
- sequenzo/datasets/polyadic_samplec1.csv +61 -0
- sequenzo/datasets/polyadic_samplep1.csv +61 -0
- sequenzo/datasets/polyadic_seqc1.csv +61 -0
- sequenzo/datasets/polyadic_seqp1.csv +61 -0
- sequenzo/define_sequence_data.py +609 -0
- sequenzo/dissimilarity_measures/__init__.py +31 -0
- sequenzo/dissimilarity_measures/c_code.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/get_distance_matrix.py +702 -0
- sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +241 -0
- sequenzo/dissimilarity_measures/src/DHDdistance.cpp +148 -0
- sequenzo/dissimilarity_measures/src/LCPdistance.cpp +114 -0
- sequenzo/dissimilarity_measures/src/OMdistance.cpp +247 -0
- sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +281 -0
- sequenzo/dissimilarity_measures/src/__init__.py +0 -0
- sequenzo/dissimilarity_measures/src/dist2matrix.cpp +63 -0
- sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
- sequenzo/dissimilarity_measures/src/module.cpp +34 -0
- sequenzo/dissimilarity_measures/src/setup.py +30 -0
- sequenzo/dissimilarity_measures/src/utils.h +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/.github/cmake-test/main.cpp +6 -0
- sequenzo/dissimilarity_measures/src/xsimd/benchmark/main.cpp +159 -0
- sequenzo/dissimilarity_measures/src/xsimd/benchmark/xsimd_benchmark.hpp +565 -0
- sequenzo/dissimilarity_measures/src/xsimd/docs/source/conf.py +37 -0
- sequenzo/dissimilarity_measures/src/xsimd/examples/mandelbrot.cpp +330 -0
- sequenzo/dissimilarity_measures/src/xsimd/examples/pico_bench.hpp +246 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +266 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +112 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +323 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +218 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +2583 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +880 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_rounding.hpp +72 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +978 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +1924 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +1144 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +656 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512cd.hpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +244 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512er.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +2650 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512ifma.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512pf.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +131 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512bw.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vnni_avx512vbmi2.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avxvnni.hpp +20 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +24 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +393 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +788 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +93 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx2.hpp +46 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +97 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +92 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_i8mm_neon64.hpp +17 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +142 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +3142 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +1543 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +1513 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +1260 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +2024 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +67 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_1.hpp +339 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse4_2.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +186 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +1155 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +1780 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +240 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +484 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +269 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +27 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/math/xsimd_rem_pio2.hpp +719 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_aligned_allocator.hpp +349 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/memory/xsimd_alignment.hpp +91 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +2765 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx2_register.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512bw_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512cd_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512dq_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512er_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512f_register.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512ifma_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512pf_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi2_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vbmi_register.hpp +51 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512bw_register.hpp +54 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx512vnni_avx512vbmi2_register.hpp +53 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avx_register.hpp +64 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_avxvnni_register.hpp +44 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +1524 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch_constant.hpp +300 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_common_arch.hpp +47 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_emulated_register.hpp +80 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx2_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_avx_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma3_sse_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_fma4_register.hpp +50 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_i8mm_neon64_register.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon64_register.hpp +55 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_neon_register.hpp +154 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_register.hpp +94 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +506 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse2_register.hpp +59 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse3_register.hpp +49 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_1_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sse4_2_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_ssse3_register.hpp +48 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_sve_register.hpp +156 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +337 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_utils.hpp +536 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_wasm_register.hpp +59 -0
- sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +75 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/architectures/dummy.cpp +7 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set.cpp +13 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean.cpp +24 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_aligned.cpp +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_arch_independent.cpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/explicit_use_of_an_instruction_set_mean_tag_dispatch.cpp +25 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_abstract_batches.cpp +7 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/manipulating_parametric_batches.cpp +8 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum.hpp +31 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_avx2.cpp +3 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/sum_sse2.cpp +3 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/doc/writing_vectorized_code.cpp +11 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/main.cpp +31 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_api.cpp +230 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_arch.cpp +217 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_basic_math.cpp +183 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch.cpp +1049 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_bool.cpp +508 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_cast.cpp +409 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_complex.cpp +712 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_constant.cpp +286 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_float.cpp +141 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_int.cpp +365 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_manip.cpp +308 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_bitwise_cast.cpp +222 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_exponential.cpp +226 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_hyperbolic.cpp +183 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_power.cpp +265 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_complex_trigonometric.cpp +236 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_conversion.cpp +248 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_custom_default_arch.cpp +28 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_error_gamma.cpp +170 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_explicit_batch_instantiation.cpp +32 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_exponential.cpp +202 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_extract_pair.cpp +92 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_fp_manipulation.cpp +77 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_gnu_source.cpp +30 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_hyperbolic.cpp +167 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_load_store.cpp +304 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_memory.cpp +61 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_poly_evaluation.cpp +64 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_power.cpp +184 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_rounding.cpp +199 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_select.cpp +101 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_shuffle.cpp +760 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.cpp +4 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_sum.hpp +34 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_traits.cpp +172 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_trigonometric.cpp +208 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_utils.hpp +611 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_wasm/test_wasm_playwright.py +123 -0
- sequenzo/dissimilarity_measures/src/xsimd/test/test_xsimd_api.cpp +1460 -0
- sequenzo/dissimilarity_measures/utils/__init__.py +16 -0
- sequenzo/dissimilarity_measures/utils/get_LCP_length_for_2_seq.py +44 -0
- sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqconc.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqdss.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqdur.cpython-310-darwin.so +0 -0
- sequenzo/dissimilarity_measures/utils/seqlength.cpython-310-darwin.so +0 -0
- sequenzo/multidomain/__init__.py +23 -0
- sequenzo/multidomain/association_between_domains.py +311 -0
- sequenzo/multidomain/cat.py +431 -0
- sequenzo/multidomain/combt.py +519 -0
- sequenzo/multidomain/dat.py +89 -0
- sequenzo/multidomain/idcd.py +139 -0
- sequenzo/multidomain/linked_polyad.py +292 -0
- sequenzo/openmp_setup.py +233 -0
- sequenzo/prefix_tree/__init__.py +43 -0
- sequenzo/prefix_tree/individual_level_indicators.py +1274 -0
- sequenzo/prefix_tree/system_level_indicators.py +465 -0
- sequenzo/prefix_tree/utils.py +54 -0
- sequenzo/sequence_characteristics/__init__.py +40 -0
- sequenzo/sequence_characteristics/complexity_index.py +49 -0
- sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +220 -0
- sequenzo/sequence_characteristics/plot_characteristics.py +593 -0
- sequenzo/sequence_characteristics/simple_characteristics.py +311 -0
- sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +39 -0
- sequenzo/sequence_characteristics/turbulence.py +155 -0
- sequenzo/sequence_characteristics/variance_of_spell_durations.py +86 -0
- sequenzo/sequence_characteristics/within_sequence_entropy.py +43 -0
- sequenzo/suffix_tree/__init__.py +48 -0
- sequenzo/suffix_tree/individual_level_indicators.py +1638 -0
- sequenzo/suffix_tree/system_level_indicators.py +456 -0
- sequenzo/suffix_tree/utils.py +56 -0
- sequenzo/visualization/__init__.py +29 -0
- sequenzo/visualization/plot_mean_time.py +194 -0
- sequenzo/visualization/plot_modal_state.py +276 -0
- sequenzo/visualization/plot_most_frequent_sequences.py +147 -0
- sequenzo/visualization/plot_relative_frequency.py +404 -0
- sequenzo/visualization/plot_sequence_index.py +937 -0
- sequenzo/visualization/plot_single_medoid.py +153 -0
- sequenzo/visualization/plot_state_distribution.py +613 -0
- sequenzo/visualization/plot_transition_matrix.py +190 -0
- sequenzo/visualization/utils/__init__.py +23 -0
- sequenzo/visualization/utils/utils.py +310 -0
- sequenzo/with_event_history_analysis/__init__.py +35 -0
- sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
- sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
- sequenzo-0.1.21.dist-info/METADATA +308 -0
- sequenzo-0.1.21.dist-info/RECORD +254 -0
- sequenzo-0.1.21.dist-info/WHEEL +5 -0
- sequenzo-0.1.21.dist-info/licenses/LICENSE +28 -0
- sequenzo-0.1.21.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,880 @@
|
|
|
1
|
+
/***************************************************************************
|
|
2
|
+
* Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and *
|
|
3
|
+
* Martin Renou *
|
|
4
|
+
* Copyright (c) QuantStack *
|
|
5
|
+
* Copyright (c) Serge Guelton *
|
|
6
|
+
* *
|
|
7
|
+
* Distributed under the terms of the BSD 3-Clause License. *
|
|
8
|
+
* *
|
|
9
|
+
* The full license is in the file LICENSE, distributed with this software. *
|
|
10
|
+
****************************************************************************/
|
|
11
|
+
|
|
12
|
+
#ifndef XSIMD_COMMON_MEMORY_HPP
|
|
13
|
+
#define XSIMD_COMMON_MEMORY_HPP
|
|
14
|
+
|
|
15
|
+
#include <algorithm>
|
|
16
|
+
#include <complex>
|
|
17
|
+
#include <stdexcept>
|
|
18
|
+
|
|
19
|
+
#include "../../types/xsimd_batch_constant.hpp"
|
|
20
|
+
#include "./xsimd_common_details.hpp"
|
|
21
|
+
|
|
22
|
+
namespace xsimd
|
|
23
|
+
{
|
|
24
|
+
template <typename T, class A, T... Values>
|
|
25
|
+
struct batch_constant;
|
|
26
|
+
|
|
27
|
+
template <typename T, class A, bool... Values>
|
|
28
|
+
struct batch_bool_constant;
|
|
29
|
+
|
|
30
|
+
namespace kernel
|
|
31
|
+
{
|
|
32
|
+
|
|
33
|
+
using namespace types;
|
|
34
|
+
|
|
35
|
+
// broadcast
|
|
36
|
+
namespace detail
|
|
37
|
+
{
|
|
38
|
+
template <class T, class A>
|
|
39
|
+
struct broadcaster
|
|
40
|
+
{
|
|
41
|
+
using return_type = batch<T, A>;
|
|
42
|
+
|
|
43
|
+
static XSIMD_INLINE return_type run(T v) noexcept
|
|
44
|
+
{
|
|
45
|
+
return return_type::broadcast(v);
|
|
46
|
+
}
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
template <class A>
|
|
50
|
+
struct broadcaster<bool, A>
|
|
51
|
+
{
|
|
52
|
+
using return_type = batch_bool<xsimd::as_unsigned_integer_t<bool>, A>;
|
|
53
|
+
|
|
54
|
+
static XSIMD_INLINE return_type run(bool b) noexcept
|
|
55
|
+
{
|
|
56
|
+
return return_type(b);
|
|
57
|
+
}
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// compress
|
|
62
|
+
namespace detail
|
|
63
|
+
{
|
|
64
|
+
template <class IT, class A, class I, size_t... Is>
|
|
65
|
+
XSIMD_INLINE batch<IT, A> create_compress_swizzle_mask(I bitmask, ::xsimd::detail::index_sequence<Is...>)
|
|
66
|
+
{
|
|
67
|
+
batch<IT, A> swizzle_mask(IT(0));
|
|
68
|
+
alignas(A::alignment()) IT mask_buffer[batch<IT, A>::size] = { Is... };
|
|
69
|
+
size_t inserted = 0;
|
|
70
|
+
for (size_t i = 0; i < sizeof...(Is); ++i)
|
|
71
|
+
if ((bitmask >> i) & 1u)
|
|
72
|
+
std::swap(mask_buffer[inserted++], mask_buffer[i]);
|
|
73
|
+
return batch<IT, A>::load_aligned(&mask_buffer[0]);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
template <typename A, typename T>
|
|
78
|
+
XSIMD_INLINE batch<T, A>
|
|
79
|
+
compress(batch<T, A> const& x, batch_bool<T, A> const& mask,
|
|
80
|
+
kernel::requires_arch<common>) noexcept
|
|
81
|
+
{
|
|
82
|
+
using IT = as_unsigned_integer_t<T>;
|
|
83
|
+
constexpr std::size_t size = batch_bool<T, A>::size;
|
|
84
|
+
auto bitmask = mask.mask();
|
|
85
|
+
auto z = select(mask, x, batch<T, A>((T)0));
|
|
86
|
+
auto compress_mask = detail::create_compress_swizzle_mask<IT, A>(bitmask, ::xsimd::detail::make_index_sequence<size>());
|
|
87
|
+
return swizzle(z, compress_mask);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// expand
|
|
91
|
+
namespace detail
|
|
92
|
+
{
|
|
93
|
+
template <class IT, class A, class I, size_t... Is>
|
|
94
|
+
XSIMD_INLINE batch<IT, A> create_expand_swizzle_mask(I bitmask, ::xsimd::detail::index_sequence<Is...>)
|
|
95
|
+
{
|
|
96
|
+
batch<IT, A> swizzle_mask(IT(0));
|
|
97
|
+
IT j = 0;
|
|
98
|
+
(void)std::initializer_list<bool> { ((swizzle_mask = insert(swizzle_mask, j, index<Is>())), (j += ((bitmask >> Is) & 1u)), true)... };
|
|
99
|
+
return swizzle_mask;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
template <typename A, typename T>
|
|
104
|
+
XSIMD_INLINE batch<T, A>
|
|
105
|
+
expand(batch<T, A> const& x, batch_bool<T, A> const& mask,
|
|
106
|
+
kernel::requires_arch<common>) noexcept
|
|
107
|
+
{
|
|
108
|
+
constexpr std::size_t size = batch_bool<T, A>::size;
|
|
109
|
+
auto bitmask = mask.mask();
|
|
110
|
+
auto swizzle_mask = detail::create_expand_swizzle_mask<as_unsigned_integer_t<T>, A>(bitmask, ::xsimd::detail::make_index_sequence<size>());
|
|
111
|
+
auto z = swizzle(x, swizzle_mask);
|
|
112
|
+
return select(mask, z, batch<T, A>(T(0)));
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// extract_pair
|
|
116
|
+
template <class A, class T>
|
|
117
|
+
XSIMD_INLINE batch<T, A> extract_pair(batch<T, A> const& self, batch<T, A> const& other, std::size_t i, requires_arch<common>) noexcept
|
|
118
|
+
{
|
|
119
|
+
constexpr std::size_t size = batch<T, A>::size;
|
|
120
|
+
assert(i < size && "index in bounds");
|
|
121
|
+
|
|
122
|
+
alignas(A::alignment()) T self_buffer[size];
|
|
123
|
+
self.store_aligned(self_buffer);
|
|
124
|
+
|
|
125
|
+
alignas(A::alignment()) T other_buffer[size];
|
|
126
|
+
other.store_aligned(other_buffer);
|
|
127
|
+
|
|
128
|
+
alignas(A::alignment()) T concat_buffer[size];
|
|
129
|
+
|
|
130
|
+
for (std::size_t j = 0; j < (size - i); ++j)
|
|
131
|
+
{
|
|
132
|
+
concat_buffer[j] = other_buffer[i + j];
|
|
133
|
+
if (j < i)
|
|
134
|
+
{
|
|
135
|
+
concat_buffer[size - 1 - j] = self_buffer[i - 1 - j];
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
return batch<T, A>::load_aligned(concat_buffer);
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// gather
|
|
142
|
+
namespace detail
|
|
143
|
+
{
|
|
144
|
+
// Not using XSIMD_INLINE here as it makes msvc hand got ever on avx512
|
|
145
|
+
template <size_t N, typename T, typename A, typename U, typename V, typename std::enable_if<N == 0, int>::type = 0>
|
|
146
|
+
inline batch<T, A> gather(U const* src, batch<V, A> const& index,
|
|
147
|
+
::xsimd::index<N> I) noexcept
|
|
148
|
+
{
|
|
149
|
+
return insert(batch<T, A> {}, static_cast<T>(src[index.get(I)]), I);
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
template <size_t N, typename T, typename A, typename U, typename V, typename std::enable_if<N != 0, int>::type = 0>
|
|
153
|
+
inline batch<T, A>
|
|
154
|
+
gather(U const* src, batch<V, A> const& index, ::xsimd::index<N> I) noexcept
|
|
155
|
+
{
|
|
156
|
+
static_assert(N <= batch<V, A>::size, "Incorrect value in recursion!");
|
|
157
|
+
|
|
158
|
+
const auto test = gather<N - 1, T, A>(src, index, {});
|
|
159
|
+
return insert(test, static_cast<T>(src[index.get(I)]), I);
|
|
160
|
+
}
|
|
161
|
+
} // namespace detail
|
|
162
|
+
|
|
163
|
+
template <typename T, typename A, typename V>
|
|
164
|
+
XSIMD_INLINE batch<T, A>
|
|
165
|
+
gather(batch<T, A> const&, T const* src, batch<V, A> const& index,
|
|
166
|
+
kernel::requires_arch<common>) noexcept
|
|
167
|
+
{
|
|
168
|
+
static_assert(batch<T, A>::size == batch<V, A>::size,
|
|
169
|
+
"Index and destination sizes must match");
|
|
170
|
+
|
|
171
|
+
return detail::gather<batch<V, A>::size - 1, T, A>(src, index, {});
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// Gather with runtime indexes and mismatched strides.
|
|
175
|
+
template <typename T, typename A, typename U, typename V>
|
|
176
|
+
XSIMD_INLINE detail::sizes_mismatch_t<T, U, batch<T, A>>
|
|
177
|
+
gather(batch<T, A> const&, U const* src, batch<V, A> const& index,
|
|
178
|
+
kernel::requires_arch<common>) noexcept
|
|
179
|
+
{
|
|
180
|
+
static_assert(batch<T, A>::size == batch<V, A>::size,
|
|
181
|
+
"Index and destination sizes must match");
|
|
182
|
+
|
|
183
|
+
return detail::gather<batch<V, A>::size - 1, T, A>(src, index, {});
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
// Gather with runtime indexes and matching strides.
|
|
187
|
+
template <typename T, typename A, typename U, typename V>
|
|
188
|
+
XSIMD_INLINE detail::stride_match_t<T, U, batch<T, A>>
|
|
189
|
+
gather(batch<T, A> const&, U const* src, batch<V, A> const& index,
|
|
190
|
+
kernel::requires_arch<common>) noexcept
|
|
191
|
+
{
|
|
192
|
+
static_assert(batch<T, A>::size == batch<V, A>::size,
|
|
193
|
+
"Index and destination sizes must match");
|
|
194
|
+
|
|
195
|
+
return batch_cast<T>(kernel::gather(batch<U, A> {}, src, index, A {}));
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// insert
|
|
199
|
+
template <class A, class T, size_t I>
|
|
200
|
+
XSIMD_INLINE batch<T, A> insert(batch<T, A> const& self, T val, index<I>, requires_arch<common>) noexcept
|
|
201
|
+
{
|
|
202
|
+
struct index_mask
|
|
203
|
+
{
|
|
204
|
+
static constexpr bool get(size_t index, size_t /* size*/)
|
|
205
|
+
{
|
|
206
|
+
return index != I;
|
|
207
|
+
}
|
|
208
|
+
};
|
|
209
|
+
batch<T, A> tmp(val);
|
|
210
|
+
return select(make_batch_bool_constant<T, index_mask, A>(), self, tmp);
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// get
|
|
214
|
+
template <class A, size_t I, class T>
|
|
215
|
+
XSIMD_INLINE T get(batch<T, A> const& self, ::xsimd::index<I>, requires_arch<common>) noexcept
|
|
216
|
+
{
|
|
217
|
+
alignas(A::alignment()) T buffer[batch<T, A>::size];
|
|
218
|
+
self.store_aligned(&buffer[0]);
|
|
219
|
+
return buffer[I];
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
template <class A, size_t I, class T>
|
|
223
|
+
XSIMD_INLINE T get(batch_bool<T, A> const& self, ::xsimd::index<I>, requires_arch<common>) noexcept
|
|
224
|
+
{
|
|
225
|
+
alignas(A::alignment()) T buffer[batch_bool<T, A>::size];
|
|
226
|
+
self.store_aligned(&buffer[0]);
|
|
227
|
+
return buffer[I];
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
template <class A, size_t I, class T>
|
|
231
|
+
XSIMD_INLINE auto get(batch<std::complex<T>, A> const& self, ::xsimd::index<I>, requires_arch<common>) noexcept -> typename batch<std::complex<T>, A>::value_type
|
|
232
|
+
{
|
|
233
|
+
alignas(A::alignment()) T buffer[batch<std::complex<T>, A>::size];
|
|
234
|
+
self.store_aligned(&buffer[0]);
|
|
235
|
+
return buffer[I];
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
template <class A, class T>
|
|
239
|
+
XSIMD_INLINE T get(batch<T, A> const& self, std::size_t i, requires_arch<common>) noexcept
|
|
240
|
+
{
|
|
241
|
+
alignas(A::alignment()) T buffer[batch<T, A>::size];
|
|
242
|
+
self.store_aligned(&buffer[0]);
|
|
243
|
+
return buffer[i];
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
template <class A, class T>
|
|
247
|
+
XSIMD_INLINE T get(batch_bool<T, A> const& self, std::size_t i, requires_arch<common>) noexcept
|
|
248
|
+
{
|
|
249
|
+
alignas(A::alignment()) bool buffer[batch_bool<T, A>::size];
|
|
250
|
+
self.store_aligned(&buffer[0]);
|
|
251
|
+
return buffer[i];
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
template <class A, class T>
|
|
255
|
+
XSIMD_INLINE auto get(batch<std::complex<T>, A> const& self, std::size_t i, requires_arch<common>) noexcept -> typename batch<std::complex<T>, A>::value_type
|
|
256
|
+
{
|
|
257
|
+
using T2 = typename batch<std::complex<T>, A>::value_type;
|
|
258
|
+
alignas(A::alignment()) T2 buffer[batch<std::complex<T>, A>::size];
|
|
259
|
+
self.store_aligned(&buffer[0]);
|
|
260
|
+
return buffer[i];
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// first
|
|
264
|
+
template <class A, class T>
|
|
265
|
+
XSIMD_INLINE T first(batch<T, A> const& self, requires_arch<common>) noexcept
|
|
266
|
+
{
|
|
267
|
+
return get(self, 0, common {});
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
template <class A, class T>
|
|
271
|
+
XSIMD_INLINE T first(batch_bool<T, A> const& self, requires_arch<common>) noexcept
|
|
272
|
+
{
|
|
273
|
+
return first(batch<T, A>(self), A {});
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
template <class A, class T>
|
|
277
|
+
XSIMD_INLINE auto first(batch<std::complex<T>, A> const& self, requires_arch<common>) noexcept -> typename batch<std::complex<T>, A>::value_type
|
|
278
|
+
{
|
|
279
|
+
return { first(self.real(), A {}), first(self.imag(), A {}) };
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
// load
|
|
283
|
+
template <class A, class T>
|
|
284
|
+
XSIMD_INLINE batch_bool<T, A> load_unaligned(bool const* mem, batch_bool<T, A>, requires_arch<common>) noexcept
|
|
285
|
+
{
|
|
286
|
+
using batch_type = batch<T, A>;
|
|
287
|
+
batch_type ref(0);
|
|
288
|
+
constexpr auto size = batch_bool<T, A>::size;
|
|
289
|
+
alignas(A::alignment()) T buffer[size];
|
|
290
|
+
for (std::size_t i = 0; i < size; ++i)
|
|
291
|
+
buffer[i] = mem[i] ? 1 : 0;
|
|
292
|
+
return ref != batch_type::load_aligned(&buffer[0]);
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
template <class A, class T>
|
|
296
|
+
XSIMD_INLINE batch_bool<T, A> load_aligned(bool const* mem, batch_bool<T, A> b, requires_arch<common>) noexcept
|
|
297
|
+
{
|
|
298
|
+
return load_unaligned(mem, b, A {});
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
// load_aligned
|
|
302
|
+
namespace detail
|
|
303
|
+
{
|
|
304
|
+
template <class A, class T_in, class T_out>
|
|
305
|
+
XSIMD_INLINE batch<T_out, A> load_aligned(T_in const* mem, convert<T_out>, requires_arch<common>, with_fast_conversion) noexcept
|
|
306
|
+
{
|
|
307
|
+
using batch_type_in = batch<T_in, A>;
|
|
308
|
+
using batch_type_out = batch<T_out, A>;
|
|
309
|
+
return fast_cast(batch_type_in::load_aligned(mem), batch_type_out(), A {});
|
|
310
|
+
}
|
|
311
|
+
template <class A, class T_in, class T_out>
|
|
312
|
+
XSIMD_INLINE batch<T_out, A> load_aligned(T_in const* mem, convert<T_out>, requires_arch<common>, with_slow_conversion) noexcept
|
|
313
|
+
{
|
|
314
|
+
static_assert(!std::is_same<T_in, T_out>::value, "there should be a direct load for this type combination");
|
|
315
|
+
using batch_type_out = batch<T_out, A>;
|
|
316
|
+
alignas(A::alignment()) T_out buffer[batch_type_out::size];
|
|
317
|
+
std::copy(mem, mem + batch_type_out::size, std::begin(buffer));
|
|
318
|
+
return batch_type_out::load_aligned(buffer);
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
template <class A, class T_in, class T_out>
|
|
322
|
+
XSIMD_INLINE batch<T_out, A> load_aligned(T_in const* mem, convert<T_out> cvt, requires_arch<common>) noexcept
|
|
323
|
+
{
|
|
324
|
+
return detail::load_aligned<A>(mem, cvt, A {}, detail::conversion_type<A, T_in, T_out> {});
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
// load_unaligned
|
|
328
|
+
namespace detail
|
|
329
|
+
{
|
|
330
|
+
template <class A, class T_in, class T_out>
|
|
331
|
+
XSIMD_INLINE batch<T_out, A> load_unaligned(T_in const* mem, convert<T_out>, requires_arch<common>, with_fast_conversion) noexcept
|
|
332
|
+
{
|
|
333
|
+
using batch_type_in = batch<T_in, A>;
|
|
334
|
+
using batch_type_out = batch<T_out, A>;
|
|
335
|
+
return fast_cast(batch_type_in::load_unaligned(mem), batch_type_out(), A {});
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
template <class A, class T_in, class T_out>
|
|
339
|
+
XSIMD_INLINE batch<T_out, A> load_unaligned(T_in const* mem, convert<T_out> cvt, requires_arch<common>, with_slow_conversion) noexcept
|
|
340
|
+
{
|
|
341
|
+
static_assert(!std::is_same<T_in, T_out>::value, "there should be a direct load for this type combination");
|
|
342
|
+
return load_aligned<A>(mem, cvt, common {}, with_slow_conversion {});
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
template <class A, class T_in, class T_out>
|
|
346
|
+
XSIMD_INLINE batch<T_out, A> load_unaligned(T_in const* mem, convert<T_out> cvt, requires_arch<common>) noexcept
|
|
347
|
+
{
|
|
348
|
+
return detail::load_unaligned<A>(mem, cvt, common {}, detail::conversion_type<A, T_in, T_out> {});
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
// rotate_right
|
|
352
|
+
template <size_t N, class A, class T>
|
|
353
|
+
XSIMD_INLINE batch<T, A> rotate_right(batch<T, A> const& self, requires_arch<common>) noexcept
|
|
354
|
+
{
|
|
355
|
+
struct rotate_generator
|
|
356
|
+
{
|
|
357
|
+
static constexpr size_t get(size_t index, size_t size)
|
|
358
|
+
{
|
|
359
|
+
return (index - N) % size;
|
|
360
|
+
}
|
|
361
|
+
};
|
|
362
|
+
|
|
363
|
+
return swizzle(self, make_batch_constant<as_unsigned_integer_t<T>, rotate_generator, A>());
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
template <size_t N, class A, class T>
|
|
367
|
+
XSIMD_INLINE batch<std::complex<T>, A> rotate_right(batch<std::complex<T>, A> const& self, requires_arch<common>) noexcept
|
|
368
|
+
{
|
|
369
|
+
return { rotate_right<N>(self.real()), rotate_right<N>(self.imag()) };
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
// rotate_left
|
|
373
|
+
template <size_t N, class A, class T>
|
|
374
|
+
XSIMD_INLINE batch<T, A> rotate_left(batch<T, A> const& self, requires_arch<common>) noexcept
|
|
375
|
+
{
|
|
376
|
+
struct rotate_generator
|
|
377
|
+
{
|
|
378
|
+
static constexpr size_t get(size_t index, size_t size)
|
|
379
|
+
{
|
|
380
|
+
return (index + N) % size;
|
|
381
|
+
}
|
|
382
|
+
};
|
|
383
|
+
|
|
384
|
+
return swizzle(self, make_batch_constant<as_unsigned_integer_t<T>, rotate_generator, A>());
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
template <size_t N, class A, class T>
|
|
388
|
+
XSIMD_INLINE batch<std::complex<T>, A> rotate_left(batch<std::complex<T>, A> const& self, requires_arch<common>) noexcept
|
|
389
|
+
{
|
|
390
|
+
return { rotate_left<N>(self.real()), rotate_left<N>(self.imag()) };
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// Scatter with runtime indexes.
|
|
394
|
+
namespace detail
|
|
395
|
+
{
|
|
396
|
+
template <size_t N, typename T, typename A, typename U, typename V, typename std::enable_if<N == 0, int>::type = 0>
|
|
397
|
+
XSIMD_INLINE void scatter(batch<T, A> const& src, U* dst,
|
|
398
|
+
batch<V, A> const& index,
|
|
399
|
+
::xsimd::index<N> I) noexcept
|
|
400
|
+
{
|
|
401
|
+
dst[index.get(I)] = static_cast<U>(src.get(I));
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
template <size_t N, typename T, typename A, typename U, typename V, typename std::enable_if<N != 0, int>::type = 0>
|
|
405
|
+
XSIMD_INLINE void
|
|
406
|
+
scatter(batch<T, A> const& src, U* dst, batch<V, A> const& index,
|
|
407
|
+
::xsimd::index<N> I) noexcept
|
|
408
|
+
{
|
|
409
|
+
static_assert(N <= batch<V, A>::size, "Incorrect value in recursion!");
|
|
410
|
+
|
|
411
|
+
kernel::detail::scatter<N - 1, T, A, U, V>(
|
|
412
|
+
src, dst, index, {});
|
|
413
|
+
dst[index.get(I)] = static_cast<U>(src.get(I));
|
|
414
|
+
}
|
|
415
|
+
} // namespace detail
|
|
416
|
+
|
|
417
|
+
template <typename A, typename T, typename V>
|
|
418
|
+
XSIMD_INLINE void
|
|
419
|
+
scatter(batch<T, A> const& src, T* dst,
|
|
420
|
+
batch<V, A> const& index,
|
|
421
|
+
kernel::requires_arch<common>) noexcept
|
|
422
|
+
{
|
|
423
|
+
static_assert(batch<T, A>::size == batch<V, A>::size,
|
|
424
|
+
"Source and index sizes must match");
|
|
425
|
+
kernel::detail::scatter<batch<V, A>::size - 1, T, A, T, V>(
|
|
426
|
+
src, dst, index, {});
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
template <typename A, typename T, typename U, typename V>
|
|
430
|
+
XSIMD_INLINE detail::sizes_mismatch_t<T, U, void>
|
|
431
|
+
scatter(batch<T, A> const& src, U* dst,
|
|
432
|
+
batch<V, A> const& index,
|
|
433
|
+
kernel::requires_arch<common>) noexcept
|
|
434
|
+
{
|
|
435
|
+
static_assert(batch<T, A>::size == batch<V, A>::size,
|
|
436
|
+
"Source and index sizes must match");
|
|
437
|
+
kernel::detail::scatter<batch<V, A>::size - 1, T, A, U, V>(
|
|
438
|
+
src, dst, index, {});
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
template <typename A, typename T, typename U, typename V>
|
|
442
|
+
XSIMD_INLINE detail::stride_match_t<T, U, void>
|
|
443
|
+
scatter(batch<T, A> const& src, U* dst,
|
|
444
|
+
batch<V, A> const& index,
|
|
445
|
+
kernel::requires_arch<common>) noexcept
|
|
446
|
+
{
|
|
447
|
+
static_assert(batch<T, A>::size == batch<V, A>::size,
|
|
448
|
+
"Source and index sizes must match");
|
|
449
|
+
const auto tmp = batch_cast<U>(src);
|
|
450
|
+
kernel::scatter<A>(tmp, dst, index, A {});
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
// shuffle
|
|
454
|
+
namespace detail
|
|
455
|
+
{
|
|
456
|
+
constexpr bool is_swizzle_fst(size_t)
|
|
457
|
+
{
|
|
458
|
+
return true;
|
|
459
|
+
}
|
|
460
|
+
template <typename ITy, typename... ITys>
|
|
461
|
+
constexpr bool is_swizzle_fst(size_t bsize, ITy index, ITys... indices)
|
|
462
|
+
{
|
|
463
|
+
return index < bsize && is_swizzle_fst(bsize, indices...);
|
|
464
|
+
}
|
|
465
|
+
constexpr bool is_swizzle_snd(size_t)
|
|
466
|
+
{
|
|
467
|
+
return true;
|
|
468
|
+
}
|
|
469
|
+
template <typename ITy, typename... ITys>
|
|
470
|
+
constexpr bool is_swizzle_snd(size_t bsize, ITy index, ITys... indices)
|
|
471
|
+
{
|
|
472
|
+
return index >= bsize && is_swizzle_snd(bsize, indices...);
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
constexpr bool is_zip_lo(size_t)
|
|
476
|
+
{
|
|
477
|
+
return true;
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
template <typename ITy>
|
|
481
|
+
constexpr bool is_zip_lo(size_t, ITy)
|
|
482
|
+
{
|
|
483
|
+
return false;
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
template <typename ITy0, typename ITy1, typename... ITys>
|
|
487
|
+
constexpr bool is_zip_lo(size_t bsize, ITy0 index0, ITy1 index1, ITys... indices)
|
|
488
|
+
{
|
|
489
|
+
return index0 == (bsize - (sizeof...(indices) + 2)) && index1 == (2 * bsize - (sizeof...(indices) + 2)) && is_zip_lo(bsize, indices...);
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
constexpr bool is_zip_hi(size_t)
|
|
493
|
+
{
|
|
494
|
+
return true;
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
template <typename ITy>
|
|
498
|
+
constexpr bool is_zip_hi(size_t, ITy)
|
|
499
|
+
{
|
|
500
|
+
return false;
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
template <typename ITy0, typename ITy1, typename... ITys>
|
|
504
|
+
constexpr bool is_zip_hi(size_t bsize, ITy0 index0, ITy1 index1, ITys... indices)
|
|
505
|
+
{
|
|
506
|
+
return index0 == (bsize / 2 + bsize - (sizeof...(indices) + 2)) && index1 == (bsize / 2 + 2 * bsize - (sizeof...(indices) + 2)) && is_zip_hi(bsize, indices...);
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
constexpr bool is_select(size_t)
|
|
510
|
+
{
|
|
511
|
+
return true;
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
template <typename ITy, typename... ITys>
|
|
515
|
+
constexpr bool is_select(size_t bsize, ITy index, ITys... indices)
|
|
516
|
+
{
|
|
517
|
+
return (index < bsize ? index : index - bsize) == (bsize - sizeof...(ITys)) && is_select(bsize, indices...);
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
template <class A, typename T, typename ITy, ITy... Indices>
|
|
523
|
+
XSIMD_INLINE batch<T, A> shuffle(batch<T, A> const& x, batch<T, A> const& y, batch_constant<ITy, A, Indices...>, requires_arch<common>) noexcept
|
|
524
|
+
{
|
|
525
|
+
constexpr size_t bsize = sizeof...(Indices);
|
|
526
|
+
static_assert(bsize == batch<T, A>::size, "valid shuffle");
|
|
527
|
+
|
|
528
|
+
// Detect common patterns
|
|
529
|
+
XSIMD_IF_CONSTEXPR(detail::is_swizzle_fst(bsize, Indices...))
|
|
530
|
+
{
|
|
531
|
+
return swizzle(x, batch_constant<ITy, A, ((Indices >= bsize) ? 0 /* never happens */ : Indices)...>());
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
XSIMD_IF_CONSTEXPR(detail::is_swizzle_snd(bsize, Indices...))
|
|
535
|
+
{
|
|
536
|
+
return swizzle(y, batch_constant<ITy, A, ((Indices >= bsize) ? (Indices - bsize) : 0 /* never happens */)...>());
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
XSIMD_IF_CONSTEXPR(detail::is_zip_lo(bsize, Indices...))
|
|
540
|
+
{
|
|
541
|
+
return zip_lo(x, y);
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
XSIMD_IF_CONSTEXPR(detail::is_zip_hi(bsize, Indices...))
|
|
545
|
+
{
|
|
546
|
+
return zip_hi(x, y);
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
XSIMD_IF_CONSTEXPR(detail::is_select(bsize, Indices...))
|
|
550
|
+
{
|
|
551
|
+
return select(batch_bool_constant<T, A, (Indices < bsize)...>(), x, y);
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
#if defined(__has_builtin) && !defined(XSIMD_WITH_EMULATED)
|
|
555
|
+
#if __has_builtin(__builtin_shufflevector)
|
|
556
|
+
#define builtin_shuffle __builtin_shufflevector
|
|
557
|
+
#endif
|
|
558
|
+
#endif
|
|
559
|
+
|
|
560
|
+
#if defined(builtin_shuffle)
|
|
561
|
+
typedef T vty __attribute__((__vector_size__(sizeof(batch<T, A>))));
|
|
562
|
+
return (typename batch<T, A>::register_type)builtin_shuffle((vty)x.data, (vty)y.data, Indices...);
|
|
563
|
+
|
|
564
|
+
// FIXME: my experiments show that GCC only correctly optimizes this builtin
|
|
565
|
+
// starting at GCC 13, where it already has __builtin_shuffle_vector
|
|
566
|
+
//
|
|
567
|
+
// #elif __has_builtin(__builtin_shuffle) || GCC >= 6
|
|
568
|
+
// typedef ITy integer_vector_type __attribute__((vector_size(sizeof(batch<ITy, A>))));
|
|
569
|
+
// return __builtin_shuffle(x.data, y.data, integer_vector_type{Indices...});
|
|
570
|
+
#else
|
|
571
|
+
// Use a common_pattern. It is suboptimal but clang optimizes this
|
|
572
|
+
// pretty well.
|
|
573
|
+
batch<T, A> x_lane = swizzle(x, batch_constant<ITy, A, ((Indices >= bsize) ? (Indices - bsize) : Indices)...>());
|
|
574
|
+
batch<T, A> y_lane = swizzle(y, batch_constant<ITy, A, ((Indices >= bsize) ? (Indices - bsize) : Indices)...>());
|
|
575
|
+
batch_bool_constant<T, A, (Indices < bsize)...> select_x_lane;
|
|
576
|
+
return select(select_x_lane, x_lane, y_lane);
|
|
577
|
+
#endif
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
// store
|
|
581
|
+
template <class A, class T>
|
|
582
|
+
XSIMD_INLINE void store(batch_bool<T, A> const& self, bool* mem, requires_arch<common>) noexcept
|
|
583
|
+
{
|
|
584
|
+
using batch_type = batch<T, A>;
|
|
585
|
+
constexpr auto size = batch_bool<T, A>::size;
|
|
586
|
+
alignas(A::alignment()) T buffer[size];
|
|
587
|
+
kernel::store_aligned<A>(&buffer[0], batch_type(self), A {});
|
|
588
|
+
for (std::size_t i = 0; i < size; ++i)
|
|
589
|
+
mem[i] = bool(buffer[i]);
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
// store_aligned
|
|
593
|
+
template <class A, class T_in, class T_out>
|
|
594
|
+
XSIMD_INLINE void store_aligned(T_out* mem, batch<T_in, A> const& self, requires_arch<common>) noexcept
|
|
595
|
+
{
|
|
596
|
+
static_assert(!std::is_same<T_in, T_out>::value, "there should be a direct store for this type combination");
|
|
597
|
+
alignas(A::alignment()) T_in buffer[batch<T_in, A>::size];
|
|
598
|
+
store_aligned(&buffer[0], self);
|
|
599
|
+
std::copy(std::begin(buffer), std::end(buffer), mem);
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
// store_unaligned
|
|
603
|
+
template <class A, class T_in, class T_out>
|
|
604
|
+
XSIMD_INLINE void store_unaligned(T_out* mem, batch<T_in, A> const& self, requires_arch<common>) noexcept
|
|
605
|
+
{
|
|
606
|
+
static_assert(!std::is_same<T_in, T_out>::value, "there should be a direct store for this type combination");
|
|
607
|
+
return store_aligned<A>(mem, self, common {});
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
// swizzle
|
|
611
|
+
template <class A, class T, class ITy, ITy... Vs>
|
|
612
|
+
XSIMD_INLINE batch<std::complex<T>, A> swizzle(batch<std::complex<T>, A> const& self, batch_constant<ITy, A, Vs...> mask, requires_arch<common>) noexcept
|
|
613
|
+
{
|
|
614
|
+
return { swizzle(self.real(), mask), swizzle(self.imag(), mask) };
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
template <class A, class T, class ITy>
|
|
618
|
+
XSIMD_INLINE batch<T, A> swizzle(batch<T, A> const& self, batch<ITy, A> mask, requires_arch<common>) noexcept
|
|
619
|
+
{
|
|
620
|
+
constexpr size_t size = batch<T, A>::size;
|
|
621
|
+
alignas(A::alignment()) T self_buffer[size];
|
|
622
|
+
store_aligned(&self_buffer[0], self);
|
|
623
|
+
|
|
624
|
+
alignas(A::alignment()) ITy mask_buffer[size];
|
|
625
|
+
store_aligned(&mask_buffer[0], mask);
|
|
626
|
+
|
|
627
|
+
alignas(A::alignment()) T out_buffer[size];
|
|
628
|
+
for (size_t i = 0; i < size; ++i)
|
|
629
|
+
out_buffer[i] = self_buffer[mask_buffer[i]];
|
|
630
|
+
return batch<T, A>::load_aligned(out_buffer);
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
template <class A, class T, class ITy, ITy... Is>
|
|
634
|
+
XSIMD_INLINE batch<T, A> swizzle(batch<T, A> const& self, batch_constant<ITy, A, Is...>, requires_arch<common>) noexcept
|
|
635
|
+
{
|
|
636
|
+
constexpr size_t size = batch<T, A>::size;
|
|
637
|
+
alignas(A::alignment()) T self_buffer[size];
|
|
638
|
+
store_aligned(&self_buffer[0], self);
|
|
639
|
+
return { self_buffer[Is]... };
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
template <class A, class T, class ITy>
|
|
643
|
+
XSIMD_INLINE batch<std::complex<T>, A> swizzle(batch<std::complex<T>, A> const& self, batch<ITy, A> mask, requires_arch<common>) noexcept
|
|
644
|
+
{
|
|
645
|
+
return { swizzle(self.real(), mask), swizzle(self.imag(), mask) };
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
// load_complex_aligned
|
|
649
|
+
namespace detail
|
|
650
|
+
{
|
|
651
|
+
template <class A, class T>
|
|
652
|
+
XSIMD_INLINE batch<std::complex<T>, A> load_complex(batch<T, A> const& /*hi*/, batch<T, A> const& /*lo*/, requires_arch<common>) noexcept
|
|
653
|
+
{
|
|
654
|
+
static_assert(std::is_same<T, void>::value, "load_complex not implemented for the required architecture");
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
template <class A, class T>
|
|
658
|
+
XSIMD_INLINE batch<T, A> complex_high(batch<std::complex<T>, A> const& /*src*/, requires_arch<common>) noexcept
|
|
659
|
+
{
|
|
660
|
+
static_assert(std::is_same<T, void>::value, "complex_high not implemented for the required architecture");
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
template <class A, class T>
|
|
664
|
+
XSIMD_INLINE batch<T, A> complex_low(batch<std::complex<T>, A> const& /*src*/, requires_arch<common>) noexcept
|
|
665
|
+
{
|
|
666
|
+
static_assert(std::is_same<T, void>::value, "complex_low not implemented for the required architecture");
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
|
|
670
|
+
template <class A, class T_out, class T_in>
|
|
671
|
+
XSIMD_INLINE batch<std::complex<T_out>, A> load_complex_aligned(std::complex<T_in> const* mem, convert<std::complex<T_out>>, requires_arch<common>) noexcept
|
|
672
|
+
{
|
|
673
|
+
using real_batch = batch<T_out, A>;
|
|
674
|
+
T_in const* buffer = reinterpret_cast<T_in const*>(mem);
|
|
675
|
+
real_batch hi = real_batch::load_aligned(buffer),
|
|
676
|
+
lo = real_batch::load_aligned(buffer + real_batch::size);
|
|
677
|
+
return detail::load_complex(hi, lo, A {});
|
|
678
|
+
}
|
|
679
|
+
|
|
680
|
+
// load_complex_unaligned
|
|
681
|
+
template <class A, class T_out, class T_in>
|
|
682
|
+
XSIMD_INLINE batch<std::complex<T_out>, A> load_complex_unaligned(std::complex<T_in> const* mem, convert<std::complex<T_out>>, requires_arch<common>) noexcept
|
|
683
|
+
{
|
|
684
|
+
using real_batch = batch<T_out, A>;
|
|
685
|
+
T_in const* buffer = reinterpret_cast<T_in const*>(mem);
|
|
686
|
+
real_batch hi = real_batch::load_unaligned(buffer),
|
|
687
|
+
lo = real_batch::load_unaligned(buffer + real_batch::size);
|
|
688
|
+
return detail::load_complex(hi, lo, A {});
|
|
689
|
+
}
|
|
690
|
+
|
|
691
|
+
// store_complex_aligned
|
|
692
|
+
template <class A, class T_out, class T_in>
|
|
693
|
+
XSIMD_INLINE void store_complex_aligned(std::complex<T_out>* dst, batch<std::complex<T_in>, A> const& src, requires_arch<common>) noexcept
|
|
694
|
+
{
|
|
695
|
+
using real_batch = batch<T_in, A>;
|
|
696
|
+
real_batch hi = detail::complex_high(src, A {});
|
|
697
|
+
real_batch lo = detail::complex_low(src, A {});
|
|
698
|
+
T_out* buffer = reinterpret_cast<T_out*>(dst);
|
|
699
|
+
lo.store_aligned(buffer);
|
|
700
|
+
hi.store_aligned(buffer + real_batch::size);
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
// store_complex_unaligned
|
|
704
|
+
template <class A, class T_out, class T_in>
|
|
705
|
+
XSIMD_INLINE void store_complex_unaligned(std::complex<T_out>* dst, batch<std::complex<T_in>, A> const& src, requires_arch<common>) noexcept
|
|
706
|
+
{
|
|
707
|
+
using real_batch = batch<T_in, A>;
|
|
708
|
+
real_batch hi = detail::complex_high(src, A {});
|
|
709
|
+
real_batch lo = detail::complex_low(src, A {});
|
|
710
|
+
T_out* buffer = reinterpret_cast<T_out*>(dst);
|
|
711
|
+
lo.store_unaligned(buffer);
|
|
712
|
+
hi.store_unaligned(buffer + real_batch::size);
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
// transpose
|
|
716
|
+
template <class A, class T>
|
|
717
|
+
XSIMD_INLINE void transpose(batch<T, A>* matrix_begin, batch<T, A>* matrix_end, requires_arch<common>) noexcept
|
|
718
|
+
{
|
|
719
|
+
assert((matrix_end - matrix_begin == batch<T, A>::size) && "correctly sized matrix");
|
|
720
|
+
(void)matrix_end;
|
|
721
|
+
alignas(A::alignment()) T scratch_buffer[batch<T, A>::size * batch<T, A>::size];
|
|
722
|
+
for (size_t i = 0; i < batch<T, A>::size; ++i)
|
|
723
|
+
{
|
|
724
|
+
matrix_begin[i].store_aligned(&scratch_buffer[i * batch<T, A>::size]);
|
|
725
|
+
}
|
|
726
|
+
// FIXME: this is super naive we can probably do better.
|
|
727
|
+
for (size_t i = 0; i < batch<T, A>::size; ++i)
|
|
728
|
+
{
|
|
729
|
+
for (size_t j = 0; j < i; ++j)
|
|
730
|
+
{
|
|
731
|
+
std::swap(scratch_buffer[i * batch<T, A>::size + j],
|
|
732
|
+
scratch_buffer[j * batch<T, A>::size + i]);
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
for (size_t i = 0; i < batch<T, A>::size; ++i)
|
|
736
|
+
{
|
|
737
|
+
matrix_begin[i] = batch<T, A>::load_aligned(&scratch_buffer[i * batch<T, A>::size]);
|
|
738
|
+
}
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
// transpose
|
|
742
|
+
template <class A, class = typename std::enable_if<batch<int16_t, A>::size == 8, void>::type>
|
|
743
|
+
XSIMD_INLINE void transpose(batch<int16_t, A>* matrix_begin, batch<int16_t, A>* matrix_end, requires_arch<common>) noexcept
|
|
744
|
+
{
|
|
745
|
+
assert((matrix_end - matrix_begin == batch<int16_t, A>::size) && "correctly sized matrix");
|
|
746
|
+
(void)matrix_end;
|
|
747
|
+
auto l0 = zip_lo(matrix_begin[0], matrix_begin[1]);
|
|
748
|
+
auto l1 = zip_lo(matrix_begin[2], matrix_begin[3]);
|
|
749
|
+
auto l2 = zip_lo(matrix_begin[4], matrix_begin[5]);
|
|
750
|
+
auto l3 = zip_lo(matrix_begin[6], matrix_begin[7]);
|
|
751
|
+
|
|
752
|
+
auto l4 = zip_lo(bit_cast<batch<int32_t, A>>(l0), bit_cast<batch<int32_t, A>>(l1));
|
|
753
|
+
auto l5 = zip_lo(bit_cast<batch<int32_t, A>>(l2), bit_cast<batch<int32_t, A>>(l3));
|
|
754
|
+
|
|
755
|
+
auto l6 = zip_hi(bit_cast<batch<int32_t, A>>(l0), bit_cast<batch<int32_t, A>>(l1));
|
|
756
|
+
auto l7 = zip_hi(bit_cast<batch<int32_t, A>>(l2), bit_cast<batch<int32_t, A>>(l3));
|
|
757
|
+
|
|
758
|
+
auto h0 = zip_hi(matrix_begin[0], matrix_begin[1]);
|
|
759
|
+
auto h1 = zip_hi(matrix_begin[2], matrix_begin[3]);
|
|
760
|
+
auto h2 = zip_hi(matrix_begin[4], matrix_begin[5]);
|
|
761
|
+
auto h3 = zip_hi(matrix_begin[6], matrix_begin[7]);
|
|
762
|
+
|
|
763
|
+
auto h4 = zip_lo(bit_cast<batch<int32_t, A>>(h0), bit_cast<batch<int32_t, A>>(h1));
|
|
764
|
+
auto h5 = zip_lo(bit_cast<batch<int32_t, A>>(h2), bit_cast<batch<int32_t, A>>(h3));
|
|
765
|
+
|
|
766
|
+
auto h6 = zip_hi(bit_cast<batch<int32_t, A>>(h0), bit_cast<batch<int32_t, A>>(h1));
|
|
767
|
+
auto h7 = zip_hi(bit_cast<batch<int32_t, A>>(h2), bit_cast<batch<int32_t, A>>(h3));
|
|
768
|
+
|
|
769
|
+
matrix_begin[0] = bit_cast<batch<int16_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(l4), bit_cast<batch<int64_t, A>>(l5)));
|
|
770
|
+
matrix_begin[1] = bit_cast<batch<int16_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(l4), bit_cast<batch<int64_t, A>>(l5)));
|
|
771
|
+
matrix_begin[2] = bit_cast<batch<int16_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(l6), bit_cast<batch<int64_t, A>>(l7)));
|
|
772
|
+
matrix_begin[3] = bit_cast<batch<int16_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(l6), bit_cast<batch<int64_t, A>>(l7)));
|
|
773
|
+
|
|
774
|
+
matrix_begin[4] = bit_cast<batch<int16_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(h4), bit_cast<batch<int64_t, A>>(h5)));
|
|
775
|
+
matrix_begin[5] = bit_cast<batch<int16_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(h4), bit_cast<batch<int64_t, A>>(h5)));
|
|
776
|
+
matrix_begin[6] = bit_cast<batch<int16_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(h6), bit_cast<batch<int64_t, A>>(h7)));
|
|
777
|
+
matrix_begin[7] = bit_cast<batch<int16_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(h6), bit_cast<batch<int64_t, A>>(h7)));
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
template <class A>
|
|
781
|
+
XSIMD_INLINE void transpose(batch<uint16_t, A>* matrix_begin, batch<uint16_t, A>* matrix_end, requires_arch<common>) noexcept
|
|
782
|
+
{
|
|
783
|
+
transpose(reinterpret_cast<batch<int16_t, A>*>(matrix_begin), reinterpret_cast<batch<int16_t, A>*>(matrix_end), A {});
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
template <class A, class = typename std::enable_if<batch<int8_t, A>::size == 16, void>::type>
|
|
787
|
+
XSIMD_INLINE void transpose(batch<int8_t, A>* matrix_begin, batch<int8_t, A>* matrix_end, requires_arch<common>) noexcept
|
|
788
|
+
{
|
|
789
|
+
assert((matrix_end - matrix_begin == batch<int8_t, A>::size) && "correctly sized matrix");
|
|
790
|
+
(void)matrix_end;
|
|
791
|
+
auto l0 = zip_lo(matrix_begin[0], matrix_begin[1]);
|
|
792
|
+
auto l1 = zip_lo(matrix_begin[2], matrix_begin[3]);
|
|
793
|
+
auto l2 = zip_lo(matrix_begin[4], matrix_begin[5]);
|
|
794
|
+
auto l3 = zip_lo(matrix_begin[6], matrix_begin[7]);
|
|
795
|
+
auto l4 = zip_lo(matrix_begin[8], matrix_begin[9]);
|
|
796
|
+
auto l5 = zip_lo(matrix_begin[10], matrix_begin[11]);
|
|
797
|
+
auto l6 = zip_lo(matrix_begin[12], matrix_begin[13]);
|
|
798
|
+
auto l7 = zip_lo(matrix_begin[14], matrix_begin[15]);
|
|
799
|
+
|
|
800
|
+
auto h0 = zip_hi(matrix_begin[0], matrix_begin[1]);
|
|
801
|
+
auto h1 = zip_hi(matrix_begin[2], matrix_begin[3]);
|
|
802
|
+
auto h2 = zip_hi(matrix_begin[4], matrix_begin[5]);
|
|
803
|
+
auto h3 = zip_hi(matrix_begin[6], matrix_begin[7]);
|
|
804
|
+
auto h4 = zip_hi(matrix_begin[8], matrix_begin[9]);
|
|
805
|
+
auto h5 = zip_hi(matrix_begin[10], matrix_begin[11]);
|
|
806
|
+
auto h6 = zip_hi(matrix_begin[12], matrix_begin[13]);
|
|
807
|
+
auto h7 = zip_hi(matrix_begin[14], matrix_begin[15]);
|
|
808
|
+
|
|
809
|
+
auto L0 = zip_lo(bit_cast<batch<int16_t, A>>(l0), bit_cast<batch<int16_t, A>>(l1));
|
|
810
|
+
auto L1 = zip_lo(bit_cast<batch<int16_t, A>>(l2), bit_cast<batch<int16_t, A>>(l3));
|
|
811
|
+
auto L2 = zip_lo(bit_cast<batch<int16_t, A>>(l4), bit_cast<batch<int16_t, A>>(l5));
|
|
812
|
+
auto L3 = zip_lo(bit_cast<batch<int16_t, A>>(l6), bit_cast<batch<int16_t, A>>(l7));
|
|
813
|
+
|
|
814
|
+
auto m0 = zip_lo(bit_cast<batch<int32_t, A>>(L0), bit_cast<batch<int32_t, A>>(L1));
|
|
815
|
+
auto m1 = zip_lo(bit_cast<batch<int32_t, A>>(L2), bit_cast<batch<int32_t, A>>(L3));
|
|
816
|
+
auto m2 = zip_hi(bit_cast<batch<int32_t, A>>(L0), bit_cast<batch<int32_t, A>>(L1));
|
|
817
|
+
auto m3 = zip_hi(bit_cast<batch<int32_t, A>>(L2), bit_cast<batch<int32_t, A>>(L3));
|
|
818
|
+
|
|
819
|
+
matrix_begin[0] = bit_cast<batch<int8_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(m0), bit_cast<batch<int64_t, A>>(m1)));
|
|
820
|
+
matrix_begin[1] = bit_cast<batch<int8_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(m0), bit_cast<batch<int64_t, A>>(m1)));
|
|
821
|
+
matrix_begin[2] = bit_cast<batch<int8_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(m2), bit_cast<batch<int64_t, A>>(m3)));
|
|
822
|
+
matrix_begin[3] = bit_cast<batch<int8_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(m2), bit_cast<batch<int64_t, A>>(m3)));
|
|
823
|
+
|
|
824
|
+
auto L4 = zip_hi(bit_cast<batch<int16_t, A>>(l0), bit_cast<batch<int16_t, A>>(l1));
|
|
825
|
+
auto L5 = zip_hi(bit_cast<batch<int16_t, A>>(l2), bit_cast<batch<int16_t, A>>(l3));
|
|
826
|
+
auto L6 = zip_hi(bit_cast<batch<int16_t, A>>(l4), bit_cast<batch<int16_t, A>>(l5));
|
|
827
|
+
auto L7 = zip_hi(bit_cast<batch<int16_t, A>>(l6), bit_cast<batch<int16_t, A>>(l7));
|
|
828
|
+
|
|
829
|
+
auto m4 = zip_lo(bit_cast<batch<int32_t, A>>(L4), bit_cast<batch<int32_t, A>>(L5));
|
|
830
|
+
auto m5 = zip_lo(bit_cast<batch<int32_t, A>>(L6), bit_cast<batch<int32_t, A>>(L7));
|
|
831
|
+
auto m6 = zip_hi(bit_cast<batch<int32_t, A>>(L4), bit_cast<batch<int32_t, A>>(L5));
|
|
832
|
+
auto m7 = zip_hi(bit_cast<batch<int32_t, A>>(L6), bit_cast<batch<int32_t, A>>(L7));
|
|
833
|
+
|
|
834
|
+
matrix_begin[4] = bit_cast<batch<int8_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(m4), bit_cast<batch<int64_t, A>>(m5)));
|
|
835
|
+
matrix_begin[5] = bit_cast<batch<int8_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(m4), bit_cast<batch<int64_t, A>>(m5)));
|
|
836
|
+
matrix_begin[6] = bit_cast<batch<int8_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(m6), bit_cast<batch<int64_t, A>>(m7)));
|
|
837
|
+
matrix_begin[7] = bit_cast<batch<int8_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(m6), bit_cast<batch<int64_t, A>>(m7)));
|
|
838
|
+
|
|
839
|
+
auto H0 = zip_lo(bit_cast<batch<int16_t, A>>(h0), bit_cast<batch<int16_t, A>>(h1));
|
|
840
|
+
auto H1 = zip_lo(bit_cast<batch<int16_t, A>>(h2), bit_cast<batch<int16_t, A>>(h3));
|
|
841
|
+
auto H2 = zip_lo(bit_cast<batch<int16_t, A>>(h4), bit_cast<batch<int16_t, A>>(h5));
|
|
842
|
+
auto H3 = zip_lo(bit_cast<batch<int16_t, A>>(h6), bit_cast<batch<int16_t, A>>(h7));
|
|
843
|
+
|
|
844
|
+
auto M0 = zip_lo(bit_cast<batch<int32_t, A>>(H0), bit_cast<batch<int32_t, A>>(H1));
|
|
845
|
+
auto M1 = zip_lo(bit_cast<batch<int32_t, A>>(H2), bit_cast<batch<int32_t, A>>(H3));
|
|
846
|
+
auto M2 = zip_hi(bit_cast<batch<int32_t, A>>(H0), bit_cast<batch<int32_t, A>>(H1));
|
|
847
|
+
auto M3 = zip_hi(bit_cast<batch<int32_t, A>>(H2), bit_cast<batch<int32_t, A>>(H3));
|
|
848
|
+
|
|
849
|
+
matrix_begin[8] = bit_cast<batch<int8_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(M0), bit_cast<batch<int64_t, A>>(M1)));
|
|
850
|
+
matrix_begin[9] = bit_cast<batch<int8_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(M0), bit_cast<batch<int64_t, A>>(M1)));
|
|
851
|
+
matrix_begin[10] = bit_cast<batch<int8_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(M2), bit_cast<batch<int64_t, A>>(M3)));
|
|
852
|
+
matrix_begin[11] = bit_cast<batch<int8_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(M2), bit_cast<batch<int64_t, A>>(M3)));
|
|
853
|
+
|
|
854
|
+
auto H4 = zip_hi(bit_cast<batch<int16_t, A>>(h0), bit_cast<batch<int16_t, A>>(h1));
|
|
855
|
+
auto H5 = zip_hi(bit_cast<batch<int16_t, A>>(h2), bit_cast<batch<int16_t, A>>(h3));
|
|
856
|
+
auto H6 = zip_hi(bit_cast<batch<int16_t, A>>(h4), bit_cast<batch<int16_t, A>>(h5));
|
|
857
|
+
auto H7 = zip_hi(bit_cast<batch<int16_t, A>>(h6), bit_cast<batch<int16_t, A>>(h7));
|
|
858
|
+
|
|
859
|
+
auto M4 = zip_lo(bit_cast<batch<int32_t, A>>(H4), bit_cast<batch<int32_t, A>>(H5));
|
|
860
|
+
auto M5 = zip_lo(bit_cast<batch<int32_t, A>>(H6), bit_cast<batch<int32_t, A>>(H7));
|
|
861
|
+
auto M6 = zip_hi(bit_cast<batch<int32_t, A>>(H4), bit_cast<batch<int32_t, A>>(H5));
|
|
862
|
+
auto M7 = zip_hi(bit_cast<batch<int32_t, A>>(H6), bit_cast<batch<int32_t, A>>(H7));
|
|
863
|
+
|
|
864
|
+
matrix_begin[12] = bit_cast<batch<int8_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(M4), bit_cast<batch<int64_t, A>>(M5)));
|
|
865
|
+
matrix_begin[13] = bit_cast<batch<int8_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(M4), bit_cast<batch<int64_t, A>>(M5)));
|
|
866
|
+
matrix_begin[14] = bit_cast<batch<int8_t, A>>(zip_lo(bit_cast<batch<int64_t, A>>(M6), bit_cast<batch<int64_t, A>>(M7)));
|
|
867
|
+
matrix_begin[15] = bit_cast<batch<int8_t, A>>(zip_hi(bit_cast<batch<int64_t, A>>(M6), bit_cast<batch<int64_t, A>>(M7)));
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
template <class A>
|
|
871
|
+
XSIMD_INLINE void transpose(batch<uint8_t, A>* matrix_begin, batch<uint8_t, A>* matrix_end, requires_arch<common>) noexcept
|
|
872
|
+
{
|
|
873
|
+
transpose(reinterpret_cast<batch<int8_t, A>*>(matrix_begin), reinterpret_cast<batch<int8_t, A>*>(matrix_end), A {});
|
|
874
|
+
}
|
|
875
|
+
|
|
876
|
+
}
|
|
877
|
+
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
#endif
|