sequenzo 0.1.17__cp39-cp39-win_amd64.whl → 0.1.18__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sequenzo might be problematic. Click here for more details.

Files changed (101) hide show
  1. sequenzo/__init__.py +25 -1
  2. sequenzo/big_data/clara/clara.py +1 -1
  3. sequenzo/big_data/clara/utils/get_weighted_diss.c +156 -156
  4. sequenzo/big_data/clara/utils/get_weighted_diss.cp39-win_amd64.pyd +0 -0
  5. sequenzo/clustering/clustering_c_code.cp39-win_amd64.pyd +0 -0
  6. sequenzo/clustering/hierarchical_clustering.py +202 -8
  7. sequenzo/define_sequence_data.py +34 -2
  8. sequenzo/dissimilarity_measures/c_code.cp39-win_amd64.pyd +0 -0
  9. sequenzo/dissimilarity_measures/get_substitution_cost_matrix.py +1 -1
  10. sequenzo/dissimilarity_measures/src/DHDdistance.cpp +13 -37
  11. sequenzo/dissimilarity_measures/src/LCPdistance.cpp +13 -37
  12. sequenzo/dissimilarity_measures/src/OMdistance.cpp +12 -47
  13. sequenzo/dissimilarity_measures/src/OMspellDistance.cpp +103 -67
  14. sequenzo/dissimilarity_measures/src/dp_utils.h +160 -0
  15. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_arithmetic.hpp +41 -16
  16. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_complex.hpp +4 -0
  17. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_details.hpp +7 -0
  18. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_logical.hpp +10 -0
  19. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_math.hpp +127 -43
  20. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_memory.hpp +30 -2
  21. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_swizzle.hpp +174 -0
  22. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/common/xsimd_common_trigo.hpp +14 -5
  23. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx.hpp +111 -54
  24. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx2.hpp +131 -9
  25. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512bw.hpp +11 -113
  26. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512dq.hpp +39 -7
  27. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512f.hpp +336 -30
  28. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi.hpp +9 -37
  29. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_avx512vbmi2.hpp +58 -0
  30. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common.hpp +1 -0
  31. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_common_fwd.hpp +35 -2
  32. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_constants.hpp +3 -1
  33. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_emulated.hpp +17 -0
  34. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_avx.hpp +13 -0
  35. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma3_sse.hpp +18 -0
  36. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_fma4.hpp +13 -0
  37. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_isa.hpp +8 -0
  38. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon.hpp +363 -34
  39. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_neon64.hpp +7 -0
  40. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_rvv.hpp +13 -0
  41. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_scalar.hpp +41 -4
  42. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse2.hpp +252 -16
  43. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sse3.hpp +9 -0
  44. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_ssse3.hpp +12 -1
  45. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_sve.hpp +7 -0
  46. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_vsx.hpp +892 -0
  47. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/arch/xsimd_wasm.hpp +78 -1
  48. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_arch.hpp +3 -1
  49. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_config.hpp +13 -2
  50. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_cpuid.hpp +5 -0
  51. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/config/xsimd_inline.hpp +5 -1
  52. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_all_registers.hpp +2 -0
  53. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_api.hpp +64 -1
  54. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_batch.hpp +36 -0
  55. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_rvv_register.hpp +40 -31
  56. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_traits.hpp +8 -0
  57. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/types/xsimd_vsx_register.hpp +77 -0
  58. sequenzo/dissimilarity_measures/src/xsimd/include/xsimd/xsimd.hpp +6 -0
  59. sequenzo/dissimilarity_measures/src/xsimd/test/test_basic_math.cpp +6 -0
  60. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch.cpp +54 -2
  61. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_bool.cpp +8 -0
  62. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_cast.cpp +11 -4
  63. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_complex.cpp +18 -0
  64. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_int.cpp +8 -14
  65. sequenzo/dissimilarity_measures/src/xsimd/test/test_batch_manip.cpp +216 -173
  66. sequenzo/dissimilarity_measures/src/xsimd/test/test_load_store.cpp +6 -0
  67. sequenzo/dissimilarity_measures/src/xsimd/test/test_memory.cpp +1 -1
  68. sequenzo/dissimilarity_measures/src/xsimd/test/test_power.cpp +7 -4
  69. sequenzo/dissimilarity_measures/src/xsimd/test/test_select.cpp +6 -2
  70. sequenzo/dissimilarity_measures/src/xsimd/test/test_shuffle.cpp +32 -18
  71. sequenzo/dissimilarity_measures/src/xsimd/test/test_utils.hpp +21 -24
  72. sequenzo/dissimilarity_measures/src/xsimd/test/test_xsimd_api.cpp +69 -9
  73. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.c +156 -156
  74. sequenzo/dissimilarity_measures/utils/get_sm_trate_substitution_cost_matrix.cp39-win_amd64.pyd +0 -0
  75. sequenzo/dissimilarity_measures/utils/seqconc.c +156 -156
  76. sequenzo/dissimilarity_measures/utils/seqconc.cp39-win_amd64.pyd +0 -0
  77. sequenzo/dissimilarity_measures/utils/seqdss.c +156 -156
  78. sequenzo/dissimilarity_measures/utils/seqdss.cp39-win_amd64.pyd +0 -0
  79. sequenzo/dissimilarity_measures/utils/seqdur.c +156 -156
  80. sequenzo/dissimilarity_measures/utils/seqdur.cp39-win_amd64.pyd +0 -0
  81. sequenzo/dissimilarity_measures/utils/seqlength.c +156 -156
  82. sequenzo/dissimilarity_measures/utils/seqlength.cp39-win_amd64.pyd +0 -0
  83. sequenzo/sequence_characteristics/__init__.py +4 -0
  84. sequenzo/sequence_characteristics/complexity_index.py +17 -57
  85. sequenzo/sequence_characteristics/overall_cross_sectional_entropy.py +177 -111
  86. sequenzo/sequence_characteristics/plot_characteristics.py +30 -11
  87. sequenzo/sequence_characteristics/simple_characteristics.py +1 -0
  88. sequenzo/sequence_characteristics/state_frequencies_and_entropy_per_sequence.py +9 -3
  89. sequenzo/sequence_characteristics/turbulence.py +47 -67
  90. sequenzo/sequence_characteristics/variance_of_spell_durations.py +19 -9
  91. sequenzo/sequence_characteristics/within_sequence_entropy.py +5 -58
  92. sequenzo/visualization/plot_sequence_index.py +58 -35
  93. sequenzo/visualization/plot_state_distribution.py +57 -36
  94. sequenzo/with_event_history_analysis/__init__.py +35 -0
  95. sequenzo/with_event_history_analysis/sequence_analysis_multi_state_model.py +850 -0
  96. sequenzo/with_event_history_analysis/sequence_history_analysis.py +283 -0
  97. {sequenzo-0.1.17.dist-info → sequenzo-0.1.18.dist-info}/METADATA +7 -6
  98. {sequenzo-0.1.17.dist-info → sequenzo-0.1.18.dist-info}/RECORD +101 -94
  99. {sequenzo-0.1.17.dist-info → sequenzo-0.1.18.dist-info}/WHEEL +0 -0
  100. {sequenzo-0.1.17.dist-info → sequenzo-0.1.18.dist-info}/licenses/LICENSE +0 -0
  101. {sequenzo-0.1.17.dist-info → sequenzo-0.1.18.dist-info}/top_level.txt +0 -0
@@ -67,6 +67,64 @@ namespace xsimd
67
67
  {
68
68
  return _mm512_maskz_expand_epi8(mask.mask(), self);
69
69
  }
70
+
71
+ // rotl
72
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
73
+ XSIMD_INLINE batch<T, A> rotl(batch<T, A> const& self, int32_t other, requires_arch<avx512vbmi2>) noexcept
74
+ {
75
+ XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
76
+ {
77
+ return _mm512_shldv_epi16(self, self, _mm512_set1_epi16(static_cast<uint16_t>(other)));
78
+ }
79
+ else
80
+ {
81
+ return rotl(self, other, avx512bw {});
82
+ }
83
+ }
84
+
85
+ template <size_t count, class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
86
+ XSIMD_INLINE batch<T, A> rotl(batch<T, A> const& self, requires_arch<avx512vbmi2>) noexcept
87
+ {
88
+ constexpr auto bits = std::numeric_limits<T>::digits + std::numeric_limits<T>::is_signed;
89
+ static_assert(count < bits, "Count must be less than the number of bits in T");
90
+ XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
91
+ {
92
+ return _mm512_shldi_epi16(self, self, count);
93
+ }
94
+ else
95
+ {
96
+ return rotl<count>(self, avx512bw {});
97
+ }
98
+ }
99
+
100
+ // rotr
101
+ template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
102
+ XSIMD_INLINE batch<T, A> rotr(batch<T, A> const& self, int32_t other, requires_arch<avx512vbmi2>) noexcept
103
+ {
104
+ XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
105
+ {
106
+ return _mm512_shrdv_epi16(self, self, _mm512_set1_epi16(static_cast<uint16_t>(other)));
107
+ }
108
+ else
109
+ {
110
+ return rotr(self, other, avx512bw {});
111
+ }
112
+ }
113
+
114
+ template <size_t count, class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
115
+ XSIMD_INLINE batch<T, A> rotr(batch<T, A> const& self, requires_arch<avx512vbmi2>) noexcept
116
+ {
117
+ constexpr auto bits = std::numeric_limits<T>::digits + std::numeric_limits<T>::is_signed;
118
+ static_assert(count < bits, "count must be less than the number of bits in T");
119
+ XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
120
+ {
121
+ return _mm512_shrdi_epi16(self, self, count);
122
+ }
123
+ else
124
+ {
125
+ return rotr<count>(self, avx512bw {});
126
+ }
127
+ }
70
128
  }
71
129
  }
72
130
 
@@ -18,6 +18,7 @@
18
18
  #include "./common/xsimd_common_math.hpp"
19
19
  #include "./common/xsimd_common_memory.hpp"
20
20
  #include "./common/xsimd_common_rounding.hpp"
21
+ #include "./common/xsimd_common_swizzle.hpp"
21
22
  #include "./common/xsimd_common_trigo.hpp"
22
23
 
23
24
  #endif
@@ -3,6 +3,7 @@
3
3
  * Martin Renou *
4
4
  * Copyright (c) QuantStack *
5
5
  * Copyright (c) Serge Guelton *
6
+ * Copyright (c) Marco Barbone *
6
7
  * *
7
8
  * Distributed under the terms of the BSD 3-Clause License. *
8
9
  * *
@@ -25,8 +26,12 @@ namespace xsimd
25
26
  XSIMD_INLINE batch<T, A> abs(batch<T, A> const& self, requires_arch<common>) noexcept;
26
27
  template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
27
28
  XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept;
29
+ template <size_t shift, class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
30
+ XSIMD_INLINE batch<T, A> bitwise_lshift(batch<T, A> const& self, requires_arch<common>) noexcept;
28
31
  template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
29
32
  XSIMD_INLINE batch<T, A> bitwise_rshift(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept;
33
+ template <size_t shift, class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
34
+ XSIMD_INLINE batch<T, A> bitwise_rshift(batch<T, A> const& self, requires_arch<common>) noexcept;
30
35
  template <class A, class T>
31
36
  XSIMD_INLINE batch_bool<T, A> gt(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept;
32
37
  template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
@@ -35,9 +40,37 @@ namespace xsimd
35
40
  XSIMD_INLINE batch<T, A> sadd(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept;
36
41
  template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
37
42
  XSIMD_INLINE batch<T, A> ssub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<common>) noexcept;
38
- template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
39
- XSIMD_INLINE T hadd(batch<T, A> const& self, requires_arch<common>) noexcept;
43
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
44
+ XSIMD_INLINE T reduce_add(batch<T, A> const& self, requires_arch<common>) noexcept;
45
+ template <class A, class T, class = typename std::enable_if<std::is_scalar<T>::value, void>::type>
46
+ XSIMD_INLINE T reduce_mul(batch<T, A> const& self, requires_arch<common>) noexcept;
47
+ template <class A, class T, class STy>
48
+ XSIMD_INLINE batch<T, A> rotl(batch<T, A> const& self, STy other, requires_arch<common>) noexcept;
49
+ template <size_t count, class A, class T>
50
+ XSIMD_INLINE batch<T, A> rotl(batch<T, A> const& self, requires_arch<common>) noexcept;
51
+ template <class A, class T, class STy>
52
+ XSIMD_INLINE batch<T, A> rotr(batch<T, A> const& self, STy other, requires_arch<common>) noexcept;
53
+ template <size_t count, class A, class T>
54
+ XSIMD_INLINE batch<T, A> rotr(batch<T, A> const& self, requires_arch<common>) noexcept;
55
+ // Forward declarations for pack-level helpers
56
+ namespace detail
57
+ {
58
+ template <typename T, T... Vs>
59
+ XSIMD_INLINE constexpr bool is_identity() noexcept;
60
+ template <typename T, class A, T... Vs>
61
+ XSIMD_INLINE constexpr bool is_identity(batch_constant<T, A, Vs...>) noexcept;
62
+ template <typename T, class A, T... Vs>
63
+ XSIMD_INLINE constexpr bool is_all_different(batch_constant<T, A, Vs...>) noexcept;
64
+ template <typename T, class A, T... Vs>
65
+ XSIMD_INLINE constexpr bool is_dup_lo(batch_constant<T, A, Vs...>) noexcept;
66
+ template <typename T, class A, T... Vs>
67
+ XSIMD_INLINE constexpr bool is_dup_hi(batch_constant<T, A, Vs...>) noexcept;
68
+ template <typename T, class A, T... Vs>
69
+ XSIMD_INLINE constexpr bool is_cross_lane(batch_constant<T, A, Vs...>) noexcept;
70
+ template <typename T, class A, T... Vs>
71
+ XSIMD_INLINE constexpr bool no_duplicates(batch_constant<T, A, Vs...>) noexcept;
40
72
 
73
+ }
41
74
  }
42
75
  }
43
76
 
@@ -61,7 +61,10 @@ namespace xsimd
61
61
  #pragma GCC push_options
62
62
  #pragma GCC optimize("signed-zeros")
63
63
  #endif
64
+ #ifndef __FAST_MATH__
64
65
  XSIMD_DEFINE_CONSTANT(infinity, (std::numeric_limits<float>::infinity()), (std::numeric_limits<double>::infinity()))
66
+ XSIMD_DEFINE_CONSTANT(minusinfinity, (-infinity<float>()), (-infinity<double>()))
67
+ #endif
65
68
  XSIMD_DEFINE_CONSTANT(invlog_2, 1.442695040888963407359924681001892137426645954152986f, 1.442695040888963407359924681001892137426645954152986)
66
69
  XSIMD_DEFINE_CONSTANT_HEX(invlog_2hi, 0x3fb8b000, 0x3ff7154765200000)
67
70
  XSIMD_DEFINE_CONSTANT_HEX(invlog_2lo, 0xb9389ad4, 0x3de705fc2eefa200)
@@ -83,7 +86,6 @@ namespace xsimd
83
86
  XSIMD_DEFINE_CONSTANT(minlog, -88.3762626647949f, -708.3964185322641)
84
87
  XSIMD_DEFINE_CONSTANT(minlog2, -127.0f, -1023.)
85
88
  XSIMD_DEFINE_CONSTANT(minlog10, -37.89999771118164f, -308.2547155599167)
86
- XSIMD_DEFINE_CONSTANT(minusinfinity, (-infinity<float>()), (-infinity<double>()))
87
89
  XSIMD_DEFINE_CONSTANT_HEX(nan, 0xffffffff, 0xffffffffffffffff)
88
90
  XSIMD_DEFINE_CONSTANT_HEX(oneosqrteps, 0x453504f3, 0x4190000000000000)
89
91
  XSIMD_DEFINE_CONSTANT_HEX(oneotwoeps, 0x4a800000, 0x4320000000000000)
@@ -230,6 +230,13 @@ namespace xsimd
230
230
  return r;
231
231
  }
232
232
 
233
+ // first
234
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
235
+ T XSIMD_INLINE first(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
236
+ {
237
+ return self.data[0];
238
+ }
239
+
233
240
  #if 0
234
241
  // count
235
242
  template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
@@ -594,6 +601,16 @@ namespace xsimd
594
601
  { return xsimd::min(x, y); });
595
602
  }
596
603
 
604
+ // reduce_mul
605
+ template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
606
+ XSIMD_INLINE T reduce_mul(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
607
+ {
608
+ constexpr size_t size = batch<T, A>::size;
609
+ std::array<T, size> buffer;
610
+ self.store_unaligned(buffer.data());
611
+ return std::accumulate(buffer.begin() + 1, buffer.end(), *buffer.begin(), std::multiplies<T>());
612
+ }
613
+
597
614
  // rsqrt
598
615
  template <class A, class T, size_t N = 8 * sizeof(T) * batch<T, A>::size>
599
616
  XSIMD_INLINE batch<T, A> rsqrt(batch<T, A> const& self, requires_arch<emulated<N>>) noexcept
@@ -73,6 +73,19 @@ namespace xsimd
73
73
  return _mm256_fmsub_pd(x, y, z);
74
74
  }
75
75
 
76
+ // fmas
77
+ template <class A>
78
+ XSIMD_INLINE batch<float, A> fmas(batch<float, A> const& x, batch<float, A> const& y, batch<float, A> const& z, requires_arch<fma3<avx>>) noexcept
79
+ {
80
+ return _mm256_fmaddsub_ps(x, y, z);
81
+ }
82
+
83
+ template <class A>
84
+ XSIMD_INLINE batch<double, A> fmas(batch<double, A> const& x, batch<double, A> const& y, batch<double, A> const& z, requires_arch<fma3<avx>>) noexcept
85
+ {
86
+ return _mm256_fmaddsub_pd(x, y, z);
87
+ }
88
+
76
89
  }
77
90
 
78
91
  }
@@ -71,6 +71,24 @@ namespace xsimd
71
71
  {
72
72
  return _mm_fmsub_pd(x, y, z);
73
73
  }
74
+ // fms
75
+ template <class A>
76
+ XSIMD_INLINE batch<float, A> fmas(batch<float, A> const& x,
77
+ batch<float, A> const& y,
78
+ batch<float, A> const& z,
79
+ requires_arch<fma3<sse4_2>>) noexcept
80
+ {
81
+ return _mm_fmaddsub_ps(x, y, z);
82
+ }
83
+
84
+ template <class A>
85
+ XSIMD_INLINE batch<double, A> fmas(batch<double, A> const& x,
86
+ batch<double, A> const& y,
87
+ batch<double, A> const& z,
88
+ requires_arch<fma3<sse4_2>>) noexcept
89
+ {
90
+ return _mm_fmaddsub_pd(x, y, z);
91
+ }
74
92
 
75
93
  }
76
94
 
@@ -72,6 +72,19 @@ namespace xsimd
72
72
  {
73
73
  return _mm_msub_pd(x, y, z);
74
74
  }
75
+
76
+ // fmas
77
+ template <class A>
78
+ XSIMD_INLINE batch<float, A> fmas(batch<float, A> const& x, batch<float, A> const& y, batch<float, A> const& z, requires_arch<fma4>) noexcept
79
+ {
80
+ return _mm_maddsub_ps(x, y, z);
81
+ }
82
+
83
+ template <class A>
84
+ XSIMD_INLINE batch<double, A> fmas(batch<double, A> const& x, batch<double, A> const& y, batch<double, A> const& z, requires_arch<fma4>) noexcept
85
+ {
86
+ return _mm_maddsub_pd(x, y, z);
87
+ }
75
88
  }
76
89
 
77
90
  }
@@ -72,6 +72,10 @@
72
72
  #include "./xsimd_avx512f.hpp"
73
73
  #endif
74
74
 
75
+ #if XSIMD_WITH_AVX512DQ
76
+ #include "./xsimd_avx512dq.hpp"
77
+ #endif
78
+
75
79
  #if XSIMD_WITH_AVX512BW
76
80
  #include "./xsimd_avx512bw.hpp"
77
81
  #endif
@@ -128,6 +132,10 @@
128
132
  #include "./xsimd_wasm.hpp"
129
133
  #endif
130
134
 
135
+ #if XSIMD_WITH_VSX
136
+ #include "./xsimd_vsx.hpp"
137
+ #endif
138
+
131
139
  // Must come last to have access to all conversion specializations.
132
140
  #include "./xsimd_common.hpp"
133
141