lisaanalysistools-cuda11x 1.1.0__cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lisaanalysistools-cuda11x might be problematic. Click here for more details.

Files changed (41) hide show
  1. lisaanalysistools_cuda11x-1.1.0.dist-info/METADATA +300 -0
  2. lisaanalysistools_cuda11x-1.1.0.dist-info/RECORD +41 -0
  3. lisaanalysistools_cuda11x-1.1.0.dist-info/WHEEL +6 -0
  4. lisaanalysistools_cuda11x-1.1.0.dist-info/licenses/LICENSE +201 -0
  5. lisaanalysistools_cuda11x.libs/libcudart-d0da41ae.so.11.8.89 +0 -0
  6. lisatools/__init__.py +58 -0
  7. lisatools/_version.py +34 -0
  8. lisatools/analysiscontainer.py +474 -0
  9. lisatools/cutils/__init__.py +139 -0
  10. lisatools/datacontainer.py +312 -0
  11. lisatools/detector.py +696 -0
  12. lisatools/diagnostic.py +990 -0
  13. lisatools/git_version.py.in +7 -0
  14. lisatools/orbit_files/equalarmlength-orbits-best-fit-to-esa.h5 +0 -0
  15. lisatools/orbit_files/equalarmlength-orbits.h5 +0 -0
  16. lisatools/orbit_files/esa-trailing-orbits.h5 +0 -0
  17. lisatools/sampling/__init__.py +0 -0
  18. lisatools/sampling/likelihood.py +882 -0
  19. lisatools/sampling/moves/__init__.py +0 -0
  20. lisatools/sampling/moves/skymodehop.py +110 -0
  21. lisatools/sampling/prior.py +646 -0
  22. lisatools/sampling/stopping.py +320 -0
  23. lisatools/sampling/utility.py +411 -0
  24. lisatools/sensitivity.py +972 -0
  25. lisatools/sources/__init__.py +6 -0
  26. lisatools/sources/bbh/__init__.py +1 -0
  27. lisatools/sources/bbh/waveform.py +106 -0
  28. lisatools/sources/defaultresponse.py +36 -0
  29. lisatools/sources/emri/__init__.py +1 -0
  30. lisatools/sources/emri/waveform.py +79 -0
  31. lisatools/sources/gb/__init__.py +1 -0
  32. lisatools/sources/gb/waveform.py +69 -0
  33. lisatools/sources/utils.py +459 -0
  34. lisatools/sources/waveformbase.py +41 -0
  35. lisatools/stochastic.py +327 -0
  36. lisatools/utils/__init__.py +0 -0
  37. lisatools/utils/constants.py +54 -0
  38. lisatools/utils/parallelbase.py +11 -0
  39. lisatools/utils/utility.py +245 -0
  40. lisatools_backend_cuda11x/git_version.py +7 -0
  41. lisatools_backend_cuda11x/pycppdetector.cpython-310-x86_64-linux-gnu.so +0 -0
@@ -0,0 +1,327 @@
1
+ from __future__ import annotations
2
+ import warnings
3
+ from abc import ABC
4
+ from typing import Any, Tuple, Optional, List, Dict
5
+
6
+ import math
7
+ import numpy as np
8
+ from scipy import interpolate
9
+
10
+ try:
11
+ import cupy as cp
12
+
13
+ except (ModuleNotFoundError, ImportError):
14
+ import numpy as cp
15
+
16
+ from . import detector as lisa_models
17
+ from .utils.utility import AET
18
+ from .utils.constants import *
19
+
20
+
21
+ class StochasticContribution(ABC):
22
+ """Base Class for Stochastic Contributions to the PSD."""
23
+
24
+ ndim = None
25
+ added_stochastic_list = []
26
+
27
+ @classmethod
28
+ def _check_ndim(cls, params: np.ndarray | list) -> None:
29
+ """Check the dimensionality of the parameters matches the model.
30
+
31
+ Args:
32
+ params: Parameters for stochastic model.
33
+
34
+ """
35
+ if cls.ndim is None:
36
+ raise ValueError(
37
+ "When subclassing the StochasticContribution class, must set `ndim` as a static attribute."
38
+ )
39
+
40
+ if len(params) != cls.ndim:
41
+ raise ValueError("length of parameters is not equivalent to class ndim.")
42
+
43
+ @classmethod
44
+ def get_Sh(
45
+ cls, f: float | np.ndarray, *params: np.ndarray | list, **kwargs: Any
46
+ ) -> float | np.ndarray:
47
+ """Calculate the power spectral density of the stochastic contribution.
48
+
49
+ Args:
50
+ f: Frequency array.
51
+ *params: Parameters for the stochastic model.
52
+ **kwargs: Keyword arguments for the stochastic model.
53
+
54
+ """
55
+ if len(cls.added_stochastic_list) > 0:
56
+ cls._check_ndim(params[0])
57
+ return cls.specific_Sh_function(f, *params, **kwargs)
58
+
59
+ @staticmethod
60
+ def specific_Sh_function(
61
+ f: float | np.ndarray, *args: Any, **kwargs: Any
62
+ ) -> float | np.ndarray:
63
+ """Calculate the power spectral density contained in a stochastic signal contribution.
64
+
65
+ Args:
66
+ f: Frequency array.
67
+ *args: Any arguments for the function.
68
+ **kwargs: Any keyword arguments for the function.
69
+
70
+ Returns:
71
+ Power spectral density contained in stochastic signal.
72
+
73
+ """
74
+ raise NotImplementedError
75
+
76
+
77
+ class StochasticContributionContainer:
78
+ """Container for multiple Stochastic Contributions
79
+
80
+ Args:
81
+ stochastic_contribution_dict: Dictionary with multiple Stochastic entries.
82
+ Keys are the names and values are of type :class:`StochasticContribution`.
83
+
84
+ """
85
+
86
+ def __init__(
87
+ self, stochastic_contribution_dict: dict[StochasticContribution]
88
+ ) -> None:
89
+ self.stochastic_contribution_dict = stochastic_contribution_dict
90
+
91
+ @property
92
+ def stochastic_contribution_dict(self) -> dict[StochasticContribution]:
93
+ """Stochastic contribution storage."""
94
+ return self._stochastic_contribution_dict
95
+
96
+ @stochastic_contribution_dict.setter
97
+ def stochastic_contribution_dict(
98
+ self, stochastic_contribution_dict: dict[StochasticContribution]
99
+ ) -> None:
100
+ """Set stochastic_contribution_dict."""
101
+ assert isinstance(stochastic_contribution_dict, dict)
102
+ for key, value in stochastic_contribution_dict.items():
103
+ if not isinstance(value, StochasticContribution):
104
+ raise ValueError(
105
+ f"Stochastic model {key} is not of type StochasticContribution."
106
+ )
107
+ self._stochastic_contribution_dict = stochastic_contribution_dict
108
+
109
+ def get_Sh(
110
+ self, f: float | np.ndarray, params_dict: dict[tuple], kwargs_dict: dict[dict]
111
+ ) -> np.ndarray:
112
+ """Calculate Sh for stochastic contribution.
113
+
114
+ Args:
115
+ f: Frequency array.
116
+ params_dict: Dictionary with keys equivalent to ``self.stochastic_contribution_dict.keys()``.
117
+ Values are the parameters for each associated model.
118
+ kwargs_dict: Dictionary with keys equivalent to ``self.stochastic_contribution_dict.keys()``.
119
+ Values are the keyword argument dicts for each associated model.
120
+
121
+ Returns:
122
+ Stochastic contribution.
123
+
124
+ """
125
+ Sh_out = np.zeros_like(f)
126
+ for key in params_dict:
127
+ stochastic_contrib = self.stochastic_contribution_dict[key]
128
+ Sh_out += stochastic_contrib.get_Sh(
129
+ f, params_dict[key], **(kwargs_dict.get(key, {}))
130
+ )
131
+ return Sh_out
132
+
133
+ def __setitem__(self, key: str | int | tuple, val: StochasticContribution) -> None:
134
+ """Set an item by directly indexing the class object."""
135
+ self.stochastic_contribution_dict[key] = val
136
+
137
+ def __getitem__(self, key: str | int | tuple) -> StochasticContribution:
138
+ """Get item directly from dictionary."""
139
+ return self.stochastic_contribution_dict[key]
140
+
141
+
142
+ class HyperbolicTangentGalacticForeground(StochasticContribution):
143
+ """Hyperbolic Tangent-based foreground fitting function."""
144
+
145
+ ndim = 5
146
+
147
+ @staticmethod
148
+ def specific_Sh_function(
149
+ f: float | np.ndarray, amp: float, fk: float, alpha: float, s1: float, s2: float
150
+ ) -> float | np.ndarray:
151
+ """Hyperbolic tangent model 1 for the Galaxy foreground noise
152
+
153
+ This model for the PSD contribution from the Galactic foreground noise is given by
154
+
155
+ .. math::
156
+
157
+ S_\\text{gal} = \\frac{A_\\text{gal}}{2}e^{-s_1f^\\alpha}f^{-7/3}\\left[ 1 + \\tanh{\\left(-s_2 (f - f_k)\\right)} \\right],
158
+
159
+ where :math:`A_\\text{gal}` is the amplitude of the stochastic signal, :math:`f_k` is the knee frequency at which a bend occurs,
160
+ math:`\\alpha` is a power law parameter, :math:`s_1` is a slope parameter below the knee,
161
+ and :math:`s_2` is a slope parameter after the knee.:
162
+
163
+ Args:
164
+ f: Frequency array.
165
+ amp: Amplitude parameter for the Galaxy.
166
+ fk: Knee frequency in Hz.
167
+ alpha: Power law parameter.
168
+ s1: Slope parameter below knee.
169
+ s2: Slope parameter above knee.
170
+
171
+ Returns:
172
+ PSD of the Galaxy foreground noise
173
+
174
+ """
175
+ Sgal = (
176
+ amp
177
+ * np.exp(-(f**alpha) * s1)
178
+ * (f ** (-7.0 / 3.0))
179
+ * 0.5
180
+ * (1.0 + np.tanh(-(f - fk) * s2))
181
+ )
182
+
183
+ return Sgal
184
+
185
+
186
+ class FittedHyperbolicTangentGalacticForeground(HyperbolicTangentGalacticForeground):
187
+ # TODO: need to verify this is still working
188
+ ndim = 1
189
+ amp = 3.26651613e-44
190
+ alpha = 1.18300266e00
191
+ # Tobs should be in sec.
192
+ day = 86400.0
193
+ month = day * 30.5
194
+ year = 365.25 * 24.0 * 3600.0 # hard coded for initial fits
195
+
196
+ Xobs = [
197
+ 1.0 * day,
198
+ 3.0 * month,
199
+ 6.0 * month,
200
+ 1.0 * year,
201
+ 2.0 * year,
202
+ 4.0 * year,
203
+ 10.0 * year,
204
+ ]
205
+ knee = [
206
+ 1.15120924e-02,
207
+ 4.01884128e-03,
208
+ 3.47302482e-03,
209
+ 2.77606177e-03,
210
+ 2.41178384e-03,
211
+ 2.09278117e-03,
212
+ 1.57362626e-03,
213
+ ]
214
+ Slope1 = [
215
+ 9.41315118e02,
216
+ 1.36887568e03,
217
+ 1.68729474e03,
218
+ 1.76327234e03,
219
+ 2.32678814e03,
220
+ 3.01430978e03,
221
+ 3.74970124e03,
222
+ ]
223
+
224
+ Slope2 = [
225
+ 1.03239773e02,
226
+ 1.03351646e03,
227
+ 1.62204855e03,
228
+ 1.68631844e03,
229
+ 2.06821665e03,
230
+ 2.95774596e03,
231
+ 3.15199454e03,
232
+ ]
233
+ Tmax = 10 * YRSID_SI
234
+
235
+ @classmethod
236
+ def specific_Sh_function(
237
+ cls, f: float | np.ndarray, Tobs: float
238
+ ) -> float | np.ndarray:
239
+ """Fitted hyperbolic tangent model 1 for the Galaxy foreground noise.
240
+
241
+ This class fits the parameters for :class:`HyperbolicTangentGalacticForeground`
242
+ using analytic estimates from (# TODO). The fit is a function of time, so the user
243
+ inputs ``Tobs``.
244
+
245
+ # Sgal_1d = 2.2e-44*np.exp(-(fr**1.2)*0.9e3)*(fr**(-7./3.))*0.5*(1.0 + np.tanh(-(fr-1.4e-2)*0.7e2))
246
+ # Sgal_3m = 2.2e-44*np.exp(-(fr**1.2)*1.7e3)*(fr**(-7./3.))*0.5*(1.0 + np.tanh(-(fr-4.8e-3)*5.4e2))
247
+ # Sgal_1y = 2.2e-44*np.exp(-(fr**1.2)*2.2e3)*(fr**(-7./3.))*0.5*(1.0 + np.tanh(-(fr-3.1e-3)*1.3e3))
248
+ # Sgal_2y = 2.2e-44*np.exp(-(fr**1.2)*2.2e3)*(fr**(-7./3.))*0.5*(1.0 + np.tanh(-(fr-2.3e-3)*1.8e3))
249
+ # Sgal_4y = 2.2e-44*np.exp(-(fr**1.2)*2.9e3)*(fr**(-7./3.))*0.5*(1.0 + np.tanh(-(fr-2.0e-3)*1.9e3))
250
+
251
+ Args:
252
+ f: Frequency array.
253
+ Tobs: Observation time in seconds.
254
+
255
+ Returns:
256
+ PSD of the Galaxy foreground noise
257
+
258
+ """
259
+
260
+ if Tobs > cls.Tmax:
261
+ raise ValueError(
262
+ "Tobs is greater than the maximum allowable fit which is 10 years."
263
+ )
264
+
265
+ # Interpolate
266
+ tck1 = interpolate.splrep(cls.Xobs, cls.Slope1, s=0, k=1)
267
+ tck2 = interpolate.splrep(cls.Xobs, cls.knee, s=0, k=1)
268
+ tck3 = interpolate.splrep(cls.Xobs, cls.Slope2, s=0, k=1)
269
+ s1 = interpolate.splev(Tobs, tck1, der=0).item()
270
+ fk = interpolate.splev(Tobs, tck2, der=0).item()
271
+ s2 = interpolate.splev(Tobs, tck3, der=0).item()
272
+
273
+ return HyperbolicTangentGalacticForeground.specific_Sh_function(
274
+ f, cls.amp, fk, cls.alpha, s1, s2
275
+ )
276
+
277
+
278
+ __stock_gb_stochastic_options__ = [
279
+ "HyperbolicTangentGalacticForeground",
280
+ "FittedHyperbolicTangentGalacticForeground",
281
+ ]
282
+
283
+
284
+ def get_stock_gb_stochastic_options() -> List[StochasticContribution]:
285
+ """Get stock options for stochastic contributions.
286
+
287
+ Returns:
288
+ List of stock stochastic options.
289
+
290
+ """
291
+ return __stock_gb_stochastic_options__
292
+
293
+
294
+ def get_default_stochastic_from_str(stochastic: str) -> StochasticContribution:
295
+ """Return a LISA stochastic from a ``str`` input.
296
+
297
+ Args:
298
+ stochastic: Stochastic contribution indicated with a ``str``.
299
+
300
+ Returns:
301
+ Stochastic contribution associated to that ``str``.
302
+
303
+ """
304
+ if stochastic not in __stock_gb_stochastic_options__:
305
+ raise ValueError(
306
+ "Requested string stochastic is not available. See lisatools.stochastic documentation."
307
+ )
308
+ return globals()[stochastic]
309
+
310
+
311
+ def check_stochastic(stochastic: Any) -> StochasticContribution:
312
+ """Check input stochastic contribution.
313
+
314
+ Args:
315
+ stochastic: Stochastic contribution to check.
316
+
317
+ Returns:
318
+ Stochastic contribution checked. Adjusted from ``str`` if ``str`` input.
319
+
320
+ """
321
+ if isinstance(stochastic, str):
322
+ stochastic = get_default_stochastic_from_str(stochastic)
323
+
324
+ if not issubclass(stochastic, StochasticContribution):
325
+ raise ValueError("stochastic argument not given correctly.")
326
+
327
+ return stochastic
File without changes
@@ -0,0 +1,54 @@
1
+ MSUN_SI = 1.98848e30
2
+ YRSID_SI = 31558149.763545603
3
+ AU_SI = 149597870700.0
4
+ C_SI = 299792458.0
5
+ G_SI = 6.674080e-11
6
+ GMSUN = 1.3271244210789466e20
7
+ MTSUN_SI = 4.925491025873693e-06
8
+ MRSUN_SI = 1476.6250615036158
9
+ PC_SI = 3.0856775814913674e16
10
+ PI = 3.141592653589793238462643383279502884
11
+ PI_2 = 1.570796326794896619231321691639751442
12
+ PI_3 = 1.047197551196597746154214461093167628
13
+ PI_4 = 0.785398163397448309615660845819875721
14
+ SQRTPI = 1.772453850905516027298167483341145183
15
+ SQRTTWOPI = 2.506628274631000502415765284811045253
16
+ INVSQRTPI = 0.564189583547756286948079451560772585
17
+ INVSQRTTWOPI = 0.398942280401432677939946059934381868
18
+ GAMMA = 0.577215664901532860606512090082402431
19
+ SQRT2 = 1.414213562373095048801688724209698079
20
+ SQRT3 = 1.732050807568877293527446341505872367
21
+ SQRT6 = 2.449489742783178098197284074705891392
22
+ INVSQRT2 = 0.707106781186547524400844362104849039
23
+ INVSQRT3 = 0.577350269189625764509148780501957455
24
+ INVSQRT6 = 0.408248290463863016366214012450981898
25
+ F0 = 3.168753578687779e-08
26
+ Omega0 = 1.9909865927683788e-07
27
+ L_SI = 2.5e9
28
+ eorbit = 0.004824185218078991
29
+ ConstOmega = 1.99098659277e-7
30
+
31
+ #### Armlength
32
+ lisaL = 2.5e9 # LISA's arm meters
33
+ lisaLT = lisaL / C_SI # LISA's armn in sec
34
+
35
+ #### Noise levels
36
+ ### Optical Metrology System noise
37
+ ## Decomposition
38
+ Sloc = (1.7e-12) ** 2 # m^2/Hz
39
+ Ssci = (8.9e-12) ** 2 # m^2/Hz
40
+ Soth = (2.0e-12) ** 2 # m^2/Hz
41
+
42
+ ######################
43
+ # Physical constants #
44
+ ######################
45
+
46
+ # Mass of Jupiter
47
+ Mjup = 1.898e27
48
+
49
+ #################
50
+ # LISA constant #
51
+ #################
52
+
53
+ # Transfer frequency
54
+ fstar = C_SI / (lisaL * 2 * PI)
@@ -0,0 +1,11 @@
1
+ from typing import Optional, Sequence, TypeVar, Union
2
+ import types
3
+
4
+
5
+ from gpubackendtools import ParallelModuleBase
6
+
7
+
8
+ class LISAToolsParallelModule(ParallelModuleBase):
9
+ def __init__(self, force_backend=None):
10
+ force_backend_in = ('lisatools', force_backend) if isinstance(force_backend, str) else force_backend
11
+ super().__init__(force_backend_in)
@@ -0,0 +1,245 @@
1
+ from __future__ import annotations
2
+ from typing import Tuple
3
+ import numpy as np
4
+
5
+ # from ..sensitivity import get_sensitivity
6
+
7
+ try:
8
+ import cupy as cp
9
+
10
+ except (ModuleNotFoundError, ImportError):
11
+ import numpy as cp
12
+
13
+ pass
14
+
15
+
16
+ def get_array_module(arr: np.ndarray | cp.ndarray) -> object:
17
+ """Return array library of an array (np/cp).
18
+
19
+ Args:
20
+ arr: Numpy or Cupy array.
21
+
22
+ """
23
+ if isinstance(arr, np.ndarray):
24
+ return np
25
+ elif isinstance(arr, cp.ndarray):
26
+ return cp
27
+ else:
28
+ raise ValueError("arr must be a numpy or cupy array.")
29
+
30
+
31
+ def generate_noise_fd(freqs, df, *sensitivity_args, func=None, **sensitivity_kwargs):
32
+ if func is None:
33
+ func = get_sensitivity
34
+
35
+ norm = 0.5 * (1.0 / df) ** 0.5
36
+ psd = func(freqs, *sensitivity_args, **sensitivity_kwargs)
37
+ noise_to_add = psd ** (1 / 2) * (
38
+ np.random.normal(0, norm, len(freqs))
39
+ + 1j * np.random.normal(0, norm, len(freqs))
40
+ )
41
+ return noise_to_add
42
+
43
+
44
+ def AET(
45
+ X: float | np.ndarray, Y: float | np.ndarray, Z: float | np.ndarray
46
+ ) -> Tuple[float | np.ndarray, float | np.ndarray, float | np.ndarray]:
47
+ """Transform to AET from XYZ
48
+
49
+ .. math::
50
+
51
+ A = (Z - X) / \\sqrt(2)
52
+
53
+ .. math::
54
+
55
+ E = (X - 2Y + Z) / \\sqrt(6)
56
+
57
+ .. math::
58
+
59
+ T = (X + Y + Z) / \\sqrt(3)
60
+
61
+ Args:
62
+ X: X-channel information.
63
+ Y: Y-channel information.
64
+ Z: Z-channel information.
65
+
66
+ Returns:
67
+ A, E, T Channels.
68
+
69
+ """
70
+ return (
71
+ (Z - X) / np.sqrt(2.0),
72
+ (X - 2.0 * Y + Z) / np.sqrt(6.0),
73
+ (X + Y + Z) / np.sqrt(3.0),
74
+ )
75
+
76
+
77
+ def searchsorted2d_vec(a, b, xp=None, gpu=None, **kwargs):
78
+ if xp is None:
79
+ xp = np
80
+ else:
81
+ try:
82
+ xp.cuda.runtime.setDevice(gpu)
83
+ except AttributeError:
84
+ pass
85
+
86
+ m, n = a.shape
87
+ max_num = xp.maximum(a.max() - a.min(), b.max() - b.min()) + 1
88
+ r = max_num * xp.arange(a.shape[0])[:, None]
89
+ p = xp.searchsorted((a + r).ravel(), (b + r).ravel(), **kwargs).reshape(m, -1)
90
+
91
+ out = p - n * (xp.arange(m)[:, None])
92
+ try:
93
+ xp.cuda.runtime.deviceSynchronize()
94
+ except AttributeError:
95
+ pass
96
+
97
+ return out
98
+
99
+
100
+ def get_groups_from_band_structure(
101
+ f0, band_edges, f0_2=None, xp=None, num_groups_base=3, fix_f_test=None, inds=None
102
+ ):
103
+ if num_groups_base not in [2, 3, 4]:
104
+ raise ValueError("num_groups_base must be 2 or 3 or 4.")
105
+ if xp is None:
106
+ xp = np
107
+
108
+ else:
109
+ try:
110
+ xp.cuda.runtime.setDevice(xp.cuda.runtime.getDevice())
111
+
112
+ except AttributeError:
113
+ # it is numpy
114
+ pass
115
+
116
+ if not isinstance(f0, xp.ndarray) or not isinstance(band_edges, xp.ndarray):
117
+ raise TypeError(
118
+ "f0 and band_edges must be xp.ndarray with xp as numpy or cupy as given by the xp kwarg."
119
+ )
120
+
121
+ shape = f0.shape
122
+
123
+ # remove any above or below bands
124
+ bad = (f0 < band_edges.min()) | (f0 > band_edges.max())
125
+
126
+ band_indices = xp.searchsorted(band_edges, f0.flatten()).reshape(shape) - 1
127
+
128
+ # sort the bands in, but keep places with inds_band_indices
129
+ band_indices_sorted = xp.sort(band_indices, axis=-1)
130
+ inds_band_indices = xp.argsort(band_indices, axis=-1)
131
+
132
+ if f0_2 is not None:
133
+ assert f0_2.shape == f0.shape
134
+ band_indices_2 = xp.searchsorted(band_edges, f0_2.flatten()).reshape(shape) - 1
135
+ band_indices_2_sorted = xp.take_along_axis(
136
+ band_indices_2, inds_band_indices, axis=-1
137
+ )
138
+
139
+ # very important: ensures the proposed new point is not further than 1 band away.
140
+ diff = 1 if num_groups_base > 2 else 0
141
+ keep = (
142
+ np.abs(band_indices_2_sorted.flatten() - band_indices_sorted.flatten())
143
+ <= diff
144
+ )
145
+ if fix_f_test is not None:
146
+ keep[fix_f_test.flatten()] = False
147
+ remove = ~keep
148
+
149
+ else:
150
+ keep = np.ones(np.prod(band_indices_sorted.shape), dtype=bool)
151
+
152
+ # temperature index associated with each band
153
+ temp_inds = xp.repeat(
154
+ xp.arange(band_indices_sorted.shape[0]), np.prod(band_indices_sorted.shape[1:])
155
+ )[
156
+ keep
157
+ ] # .reshape(shape)
158
+
159
+ # walker index associated with each band
160
+ walker_inds = (
161
+ xp.tile(
162
+ xp.arange(band_indices_sorted.shape[1]),
163
+ (band_indices_sorted.shape[0], band_indices_sorted.shape[2], 1),
164
+ )
165
+ .transpose((0, 2, 1))
166
+ .flatten()[keep]
167
+ )
168
+
169
+ if f0_2 is not None:
170
+ temp_inds_remove = xp.repeat(
171
+ xp.arange(band_indices_sorted.shape[0]),
172
+ np.prod(band_indices_sorted.shape[1:]),
173
+ )[
174
+ remove
175
+ ] # .reshape(shape)
176
+
177
+ # walker index associated with each band
178
+ walker_inds_remove = (
179
+ xp.tile(
180
+ xp.arange(band_indices_sorted.shape[1]),
181
+ (band_indices_sorted.shape[0], band_indices_sorted.shape[2], 1),
182
+ )
183
+ .transpose((0, 2, 1))
184
+ .flatten()[remove]
185
+ )
186
+ inds_band_indices_remove = inds_band_indices.flatten()[remove]
187
+
188
+ # special indexing method
189
+ band_indices_sorted_special = (
190
+ band_indices_sorted.flatten()[keep]
191
+ + int(1e12) * temp_inds
192
+ + int(1e6) * walker_inds
193
+ )
194
+
195
+ # get the unique special indicators
196
+ (
197
+ unique_special,
198
+ unique_special_start_inds,
199
+ unique_special_reverse,
200
+ unique_special_counts,
201
+ ) = np.unique(
202
+ band_indices_sorted_special,
203
+ return_index=True,
204
+ return_inverse=True,
205
+ return_counts=True,
206
+ )
207
+
208
+ # this basically makes mini arange setups for each band
209
+ # the added_contribution for the first unique band index is removed
210
+ added_contribution = xp.arange(band_indices_sorted_special.shape[0])
211
+
212
+ # gets the groups
213
+ combined = band_indices_sorted_special + added_contribution
214
+ groups = (
215
+ combined - (combined[unique_special_start_inds])[unique_special_reverse]
216
+ ) # .reshape(shape)
217
+
218
+ groups_even_odd_tmp = xp.asarray(
219
+ [
220
+ (num_groups_base * groups + i)
221
+ * (band_indices_sorted.flatten()[keep] % num_groups_base == i)
222
+ for i in range(num_groups_base)
223
+ ]
224
+ )
225
+ groups_even_odd = xp.sum(groups_even_odd_tmp, axis=0)
226
+
227
+ groups_out = -2 * xp.ones_like(f0, dtype=int)
228
+ groups_out[(temp_inds, walker_inds, inds_band_indices.flatten()[keep])] = (
229
+ groups_even_odd
230
+ )
231
+
232
+ groups_out[bad] = -1
233
+
234
+ """if f0_2 is not None and not np.all(keep):
235
+ fix = (temp_inds_remove, walker_inds_remove, inds_band_indices_remove)
236
+ fix_2 = band_indices_2[fix]
237
+ fix_1 = band_indices[fix]"""
238
+
239
+ return groups_out
240
+
241
+
242
+ autodoc_type_aliases = {
243
+ "Iterable": "Iterable",
244
+ "ArrayLike": "ArrayLike",
245
+ }
@@ -0,0 +1,7 @@
1
+ """Metadata deduced from git at build time."""
2
+
3
+ id: str
4
+ short_id: str
5
+
6
+ id = ""
7
+ short_id = ""