ml4gw 0.7.3__py3-none-any.whl → 0.7.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ml4gw might be problematic. Click here for more details.
- ml4gw/dataloading/chunked_dataset.py +4 -2
- ml4gw/dataloading/hdf5_dataset.py +1 -0
- ml4gw/dataloading/in_memory_dataset.py +0 -1
- ml4gw/distributions.py +6 -3
- ml4gw/gw.py +2 -2
- ml4gw/nn/autoencoder/convolutional.py +3 -3
- ml4gw/nn/resnet/resnet_1d.py +4 -3
- ml4gw/nn/resnet/resnet_2d.py +4 -3
- ml4gw/spectral.py +8 -6
- ml4gw/transforms/iirfilter.py +2 -2
- ml4gw/transforms/qtransform.py +9 -8
- ml4gw/transforms/spectrogram.py +3 -1
- ml4gw/transforms/spline_interpolation.py +3 -4
- ml4gw/transforms/whitening.py +1 -1
- ml4gw/utils/slicing.py +16 -11
- ml4gw/waveforms/cbc/phenom_d.py +1 -1
- ml4gw/waveforms/cbc/phenom_p.py +2 -2
- ml4gw/waveforms/cbc/taylorf2.py +1 -1
- ml4gw/waveforms/cbc/utils.py +4 -5
- ml4gw/waveforms/conversion.py +11 -9
- ml4gw/waveforms/generator.py +45 -32
- {ml4gw-0.7.3.dist-info → ml4gw-0.7.4.dist-info}/METADATA +10 -39
- {ml4gw-0.7.3.dist-info → ml4gw-0.7.4.dist-info}/RECORD +27 -27
- {ml4gw-0.7.3.dist-info → ml4gw-0.7.4.dist-info}/WHEEL +1 -1
- {ml4gw-0.7.3.dist-info → ml4gw-0.7.4.dist-info/licenses}/LICENSE +0 -0
|
@@ -94,8 +94,10 @@ class ChunkedTimeSeriesDataset(torch.utils.data.IterableDataset):
|
|
|
94
94
|
# flatten it to make it easier to slice
|
|
95
95
|
if chunk_size < self.kernel_size:
|
|
96
96
|
raise ValueError(
|
|
97
|
-
|
|
98
|
-
|
|
97
|
+
(
|
|
98
|
+
"Can't sample kernels of size {} from chunk "
|
|
99
|
+
"with size {}"
|
|
100
|
+
).format(self.kernel_size, chunk_size)
|
|
99
101
|
)
|
|
100
102
|
chunk = chunk.reshape(-1)
|
|
101
103
|
|
|
@@ -212,7 +212,6 @@ class InMemoryDataset(torch.utils.data.IterableDataset):
|
|
|
212
212
|
Float[Tensor, "batch channel time"],
|
|
213
213
|
Tuple[Float[Tensor, "batch channel time"], Float[Tensor, " batch"]],
|
|
214
214
|
]:
|
|
215
|
-
|
|
216
215
|
indices = self.init_indices()
|
|
217
216
|
for i in range(len(self)):
|
|
218
217
|
# slice the array of _indices_ we'll be using to
|
ml4gw/distributions.py
CHANGED
|
@@ -4,6 +4,7 @@ from specified distributions. Each callable should map from
|
|
|
4
4
|
an integer `N` to a 1D torch `Tensor` containing `N` samples
|
|
5
5
|
from the corresponding distribution.
|
|
6
6
|
"""
|
|
7
|
+
|
|
7
8
|
import math
|
|
8
9
|
from typing import Optional
|
|
9
10
|
|
|
@@ -33,14 +34,15 @@ class Cosine(dist.Distribution):
|
|
|
33
34
|
self.high = torch.as_tensor(high)
|
|
34
35
|
self.norm = 1 / (torch.sin(self.high) - torch.sin(self.low))
|
|
35
36
|
|
|
36
|
-
def rsample(self, sample_shape: torch.Size =
|
|
37
|
+
def rsample(self, sample_shape: torch.Size = None) -> Tensor:
|
|
38
|
+
sample_shape = sample_shape or torch.Size()
|
|
37
39
|
u = torch.rand(sample_shape, device=self.low.device)
|
|
38
40
|
return torch.arcsin(u / self.norm + torch.sin(self.low))
|
|
39
41
|
|
|
40
42
|
def log_prob(self, value: float) -> Float[Tensor, ""]:
|
|
41
43
|
value = torch.as_tensor(value)
|
|
42
44
|
inside_range = (value >= self.low) & (value <= self.high)
|
|
43
|
-
return value.cos().log()
|
|
45
|
+
return (value.cos() * inside_range).log()
|
|
44
46
|
|
|
45
47
|
|
|
46
48
|
class Sine(dist.TransformedDistribution):
|
|
@@ -166,7 +168,8 @@ class DeltaFunction(dist.Distribution):
|
|
|
166
168
|
super().__init__(batch_shape, validate_args=validate_args)
|
|
167
169
|
self.peak = torch.as_tensor(peak)
|
|
168
170
|
|
|
169
|
-
def rsample(self, sample_shape: torch.Size =
|
|
171
|
+
def rsample(self, sample_shape: torch.Size = None) -> Tensor:
|
|
172
|
+
sample_shape = sample_shape or torch.Size()
|
|
170
173
|
return self.peak * torch.ones(
|
|
171
174
|
sample_shape, device=self.peak.device, dtype=torch.float32
|
|
172
175
|
)
|
ml4gw/gw.py
CHANGED
|
@@ -125,8 +125,8 @@ def compute_antenna_responses(
|
|
|
125
125
|
for mode in modes:
|
|
126
126
|
try:
|
|
127
127
|
polarization = polarization_funcs[mode](m, n)
|
|
128
|
-
except KeyError:
|
|
129
|
-
raise ValueError(f"No polarization mode {mode}")
|
|
128
|
+
except KeyError as exc:
|
|
129
|
+
raise ValueError(f"No polarization mode {mode}") from exc
|
|
130
130
|
|
|
131
131
|
# add a dummy dimension for concatenating
|
|
132
132
|
polarizations.append(polarization)
|
|
@@ -19,7 +19,7 @@ class ConvBlock(Autoencoder):
|
|
|
19
19
|
kernel_size: int,
|
|
20
20
|
stride: int = 1,
|
|
21
21
|
groups: int = 1,
|
|
22
|
-
activation: torch.nn.Module = torch.nn.ReLU
|
|
22
|
+
activation: torch.nn.Module = torch.nn.ReLU,
|
|
23
23
|
norm: Module = torch.nn.BatchNorm1d,
|
|
24
24
|
decode_channels: Optional[int] = None,
|
|
25
25
|
output_activation: Optional[torch.nn.Module] = None,
|
|
@@ -56,7 +56,7 @@ class ConvBlock(Autoencoder):
|
|
|
56
56
|
groups=groups,
|
|
57
57
|
)
|
|
58
58
|
|
|
59
|
-
self.activation = activation
|
|
59
|
+
self.activation = activation()
|
|
60
60
|
if output_activation is not None:
|
|
61
61
|
self.output_activation = output_activation
|
|
62
62
|
else:
|
|
@@ -97,7 +97,7 @@ class ConvolutionalAutoencoder(Autoencoder):
|
|
|
97
97
|
kernel_size: int,
|
|
98
98
|
stride: int = 1,
|
|
99
99
|
groups: int = 1,
|
|
100
|
-
activation: torch.nn.Module = torch.nn.ReLU
|
|
100
|
+
activation: torch.nn.Module = torch.nn.ReLU,
|
|
101
101
|
output_activation: Optional[torch.nn.Module] = None,
|
|
102
102
|
norm: Module = torch.nn.BatchNorm1d,
|
|
103
103
|
decode_channels: Optional[int] = None,
|
ml4gw/nn/resnet/resnet_1d.py
CHANGED
|
@@ -64,7 +64,6 @@ class BasicBlock(nn.Module):
|
|
|
64
64
|
dilation: int = 1,
|
|
65
65
|
norm_layer: Optional[Callable[..., nn.Module]] = None,
|
|
66
66
|
) -> None:
|
|
67
|
-
|
|
68
67
|
super().__init__()
|
|
69
68
|
if norm_layer is None:
|
|
70
69
|
norm_layer = nn.BatchNorm1d
|
|
@@ -258,8 +257,10 @@ class ResNet1D(nn.Module):
|
|
|
258
257
|
stride_type = ["stride"] * (len(layers) - 1)
|
|
259
258
|
if len(stride_type) != (len(layers) - 1):
|
|
260
259
|
raise ValueError(
|
|
261
|
-
|
|
262
|
-
|
|
260
|
+
(
|
|
261
|
+
"'stride_type' should be None or a {}-element "
|
|
262
|
+
"tuple, got {}"
|
|
263
|
+
).format(len(layers) - 1, stride_type)
|
|
263
264
|
)
|
|
264
265
|
|
|
265
266
|
self.groups = groups
|
ml4gw/nn/resnet/resnet_2d.py
CHANGED
|
@@ -61,7 +61,6 @@ class BasicBlock(nn.Module):
|
|
|
61
61
|
dilation: int = 1,
|
|
62
62
|
norm_layer: Optional[Callable[..., nn.Module]] = None,
|
|
63
63
|
) -> None:
|
|
64
|
-
|
|
65
64
|
super().__init__()
|
|
66
65
|
if norm_layer is None:
|
|
67
66
|
norm_layer = nn.BatchNorm2d
|
|
@@ -258,8 +257,10 @@ class ResNet2D(nn.Module):
|
|
|
258
257
|
stride_type = ["stride"] * (len(layers) - 1)
|
|
259
258
|
if len(stride_type) != (len(layers) - 1):
|
|
260
259
|
raise ValueError(
|
|
261
|
-
|
|
262
|
-
|
|
260
|
+
(
|
|
261
|
+
"'stride_type' should be None or a {}-element "
|
|
262
|
+
"tuple, got {}"
|
|
263
|
+
).format(len(layers) - 1, stride_type)
|
|
263
264
|
)
|
|
264
265
|
|
|
265
266
|
self.groups = groups
|
ml4gw/spectral.py
CHANGED
|
@@ -28,8 +28,8 @@ def median(x: Float[Tensor, "... size"], axis: int) -> Float[Tensor, "..."]:
|
|
|
28
28
|
Implements a median calculation that matches numpy's
|
|
29
29
|
behavior for an even number of elements and includes
|
|
30
30
|
the same bias correction used by scipy's implementation.
|
|
31
|
-
see https://github.com/scipy/scipy/blob/main/scipy/signal/_spectral_py.py#L2066
|
|
32
|
-
"""
|
|
31
|
+
see https://github.com/scipy/scipy/blob/main/scipy/signal/_spectral_py.py#L2066
|
|
32
|
+
""" # noqa: E501
|
|
33
33
|
n = x.shape[axis]
|
|
34
34
|
ii_2 = 2 * torch.arange(1.0, (n - 1) // 2 + 1)
|
|
35
35
|
bias = 1 + torch.sum(1.0 / (ii_2 + 1) - 1.0 / ii_2)
|
|
@@ -355,7 +355,7 @@ def truncate_inverse_power_spectrum(
|
|
|
355
355
|
to which the whitening filter will be applied.
|
|
356
356
|
|
|
357
357
|
Implementation details adapted from
|
|
358
|
-
https://github.com/vivinousi/gw-detection-deep-learning/blob/203966cc2ee47c32c292be000fb009a16824b7d9/modules/whiten.py#L8
|
|
358
|
+
https://github.com/vivinousi/gw-detection-deep-learning/blob/203966cc2ee47c32c292be000fb009a16824b7d9/modules/whiten.py#L8
|
|
359
359
|
|
|
360
360
|
Args:
|
|
361
361
|
psd:
|
|
@@ -384,7 +384,7 @@ def truncate_inverse_power_spectrum(
|
|
|
384
384
|
The PSD with its time domain response truncated
|
|
385
385
|
to `fduration` and any filtered frequencies
|
|
386
386
|
tapered.
|
|
387
|
-
"""
|
|
387
|
+
""" # noqa: E501
|
|
388
388
|
|
|
389
389
|
num_freqs = psd.size(-1)
|
|
390
390
|
N = (num_freqs - 1) * 2
|
|
@@ -522,8 +522,10 @@ def whiten(
|
|
|
522
522
|
N = X.size(-1)
|
|
523
523
|
if N <= (2 * pad):
|
|
524
524
|
raise ValueError(
|
|
525
|
-
|
|
526
|
-
|
|
525
|
+
(
|
|
526
|
+
"Not enough timeseries samples {} for number of "
|
|
527
|
+
"padded samples {}"
|
|
528
|
+
).format(N, 2 * pad)
|
|
527
529
|
)
|
|
528
530
|
|
|
529
531
|
# normalize the number of expected dimensions in the PSD
|
ml4gw/transforms/iirfilter.py
CHANGED
|
@@ -10,7 +10,7 @@ class IIRFilter(torch.nn.Module):
|
|
|
10
10
|
IIR digital and analog filter design given order and critical points.
|
|
11
11
|
Design an Nth-order digital or analog filter and apply it to a signal.
|
|
12
12
|
Uses SciPy's `iirfilter` function to create the filter coefficients.
|
|
13
|
-
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirfilter.html
|
|
13
|
+
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirfilter.html
|
|
14
14
|
|
|
15
15
|
The forward call of this module accepts a batch tensor of shape
|
|
16
16
|
(n_waveforms, n_samples) and returns the filtered waveforms.
|
|
@@ -50,7 +50,7 @@ class IIRFilter(torch.nn.Module):
|
|
|
50
50
|
|
|
51
51
|
Returns:
|
|
52
52
|
Filtered signal on the forward pass.
|
|
53
|
-
"""
|
|
53
|
+
""" # noqa: E501
|
|
54
54
|
|
|
55
55
|
def __init__(
|
|
56
56
|
self,
|
ml4gw/transforms/qtransform.py
CHANGED
|
@@ -196,14 +196,14 @@ class SingleQTransform(torch.nn.Module):
|
|
|
196
196
|
sample_rate: float,
|
|
197
197
|
spectrogram_shape: Tuple[int, int],
|
|
198
198
|
q: float = 12,
|
|
199
|
-
frange: List[float] =
|
|
199
|
+
frange: List[float] = None,
|
|
200
200
|
mismatch: float = 0.2,
|
|
201
201
|
interpolation_method: str = "bicubic",
|
|
202
202
|
) -> None:
|
|
203
203
|
super().__init__()
|
|
204
204
|
self.q = q
|
|
205
205
|
self.spectrogram_shape = spectrogram_shape
|
|
206
|
-
self.frange = frange
|
|
206
|
+
self.frange = frange or [0, torch.inf]
|
|
207
207
|
self.duration = duration
|
|
208
208
|
self.mismatch = mismatch
|
|
209
209
|
|
|
@@ -251,7 +251,7 @@ class SingleQTransform(torch.nn.Module):
|
|
|
251
251
|
ntiles = [qtile.ntiles() for qtile in self.qtile_transforms]
|
|
252
252
|
# For efficiency, we'll stack all qtiles of the same length before
|
|
253
253
|
# interpolating, so we need to figure out which those are
|
|
254
|
-
unique_ntiles = sorted(
|
|
254
|
+
unique_ntiles = sorted(set(ntiles))
|
|
255
255
|
idx = torch.arange(len(ntiles))
|
|
256
256
|
self.stack_idx = [idx[Tensor(ntiles) == n] for n in unique_ntiles]
|
|
257
257
|
|
|
@@ -465,15 +465,15 @@ class QScan(torch.nn.Module):
|
|
|
465
465
|
duration: float,
|
|
466
466
|
sample_rate: float,
|
|
467
467
|
spectrogram_shape: Tuple[int, int],
|
|
468
|
-
qrange: List[float] =
|
|
469
|
-
frange: List[float] =
|
|
468
|
+
qrange: List[float] = None,
|
|
469
|
+
frange: List[float] = None,
|
|
470
470
|
interpolation_method="bicubic",
|
|
471
471
|
mismatch: float = 0.2,
|
|
472
472
|
) -> None:
|
|
473
473
|
super().__init__()
|
|
474
|
-
self.qrange = qrange
|
|
474
|
+
self.qrange = qrange or [4, 64]
|
|
475
475
|
self.mismatch = mismatch
|
|
476
|
-
self.frange = frange
|
|
476
|
+
self.frange = frange or [0, torch.inf]
|
|
477
477
|
self.spectrogram_shape = spectrogram_shape
|
|
478
478
|
max_q = torch.pi * duration * sample_rate / 50 - 11 ** (0.5)
|
|
479
479
|
self.qs = self.get_qs()
|
|
@@ -481,7 +481,8 @@ class QScan(torch.nn.Module):
|
|
|
481
481
|
warnings.warn(
|
|
482
482
|
"Some Q values exceed the maximum allowable Q value of "
|
|
483
483
|
f"{max_q}. The list of Q values to be tested in this "
|
|
484
|
-
"scan will be truncated to avoid those values."
|
|
484
|
+
"scan will be truncated to avoid those values.",
|
|
485
|
+
stacklevel=2,
|
|
485
486
|
)
|
|
486
487
|
|
|
487
488
|
# Deliberately doing something different from GWpy here.
|
ml4gw/transforms/spectrogram.py
CHANGED
|
@@ -104,7 +104,8 @@ class MultiResolutionSpectrogram(torch.nn.Module):
|
|
|
104
104
|
self.register_buffer("time_idxs", time_idxs)
|
|
105
105
|
|
|
106
106
|
def _check_and_format_kwargs(self, kwargs: Dict[str, List]) -> List:
|
|
107
|
-
lengths = sorted(
|
|
107
|
+
lengths = sorted(len(v) for v in kwargs.values())
|
|
108
|
+
lengths = list(set(lengths))
|
|
108
109
|
|
|
109
110
|
if lengths[-1] > 3:
|
|
110
111
|
warnings.warn(
|
|
@@ -112,6 +113,7 @@ class MultiResolutionSpectrogram(torch.nn.Module):
|
|
|
112
113
|
"If performance is slower than desired, try reducing the "
|
|
113
114
|
"number of spectrograms",
|
|
114
115
|
RuntimeWarning,
|
|
116
|
+
stacklevel=2,
|
|
115
117
|
)
|
|
116
118
|
|
|
117
119
|
if len(lengths) > 2 or (len(lengths) == 2 and lengths[0] != 1):
|
|
@@ -74,7 +74,7 @@ class SplineInterpolate(torch.nn.Module):
|
|
|
74
74
|
def __init__(
|
|
75
75
|
self,
|
|
76
76
|
x_in: Tensor,
|
|
77
|
-
y_in: Tensor =
|
|
77
|
+
y_in: Tensor = None,
|
|
78
78
|
kx: int = 3,
|
|
79
79
|
ky: int = 3,
|
|
80
80
|
sx: float = 0.001,
|
|
@@ -83,6 +83,8 @@ class SplineInterpolate(torch.nn.Module):
|
|
|
83
83
|
y_out: Optional[Tensor] = None,
|
|
84
84
|
):
|
|
85
85
|
super().__init__()
|
|
86
|
+
if y_in is None:
|
|
87
|
+
y_in = Tensor([1])
|
|
86
88
|
self.kx = kx
|
|
87
89
|
self.ky = ky
|
|
88
90
|
self.sx = sx
|
|
@@ -141,7 +143,6 @@ class SplineInterpolate(torch.nn.Module):
|
|
|
141
143
|
d: int,
|
|
142
144
|
m: int,
|
|
143
145
|
) -> Tuple[Tensor, Tensor]:
|
|
144
|
-
|
|
145
146
|
"""
|
|
146
147
|
Compute the L and R values for B-spline basis functions.
|
|
147
148
|
L and R are respectively the first and second coefficient multiplying
|
|
@@ -184,7 +185,6 @@ class SplineInterpolate(torch.nn.Module):
|
|
|
184
185
|
n: int,
|
|
185
186
|
m: int,
|
|
186
187
|
) -> Tensor:
|
|
187
|
-
|
|
188
188
|
"""
|
|
189
189
|
Compute the zeroth-order B-spline basis functions
|
|
190
190
|
according to de Boors recursive formula.
|
|
@@ -256,7 +256,6 @@ class SplineInterpolate(torch.nn.Module):
|
|
|
256
256
|
return b[:, :, -1]
|
|
257
257
|
|
|
258
258
|
def bivariate_spline_fit_natural(self, Z):
|
|
259
|
-
|
|
260
259
|
if len(Z.shape) == 3:
|
|
261
260
|
Z_Bx = torch.matmul(Z, self.Bx)
|
|
262
261
|
# ((BxT @ Bx)^-1 @ (Z @ Bx)T)T = Z @ BxT^-1
|
ml4gw/transforms/whitening.py
CHANGED
|
@@ -161,7 +161,7 @@ class FixedWhiten(FittableSpectralTransform):
|
|
|
161
161
|
fftlength: Optional[float] = None,
|
|
162
162
|
highpass: Optional[float] = None,
|
|
163
163
|
lowpass: Optional[float] = None,
|
|
164
|
-
overlap: Optional[float] = None
|
|
164
|
+
overlap: Optional[float] = None,
|
|
165
165
|
) -> None:
|
|
166
166
|
"""
|
|
167
167
|
Compute the PSD of channel-wise background to
|
ml4gw/utils/slicing.py
CHANGED
|
@@ -171,8 +171,10 @@ def slice_kernels(
|
|
|
171
171
|
# to select _different_ kernels from each channel
|
|
172
172
|
if len(x) != idx.shape[1]:
|
|
173
173
|
raise ValueError(
|
|
174
|
-
|
|
175
|
-
|
|
174
|
+
(
|
|
175
|
+
"Can't slice array with shape {} with indices "
|
|
176
|
+
"with shape {}"
|
|
177
|
+
).format(x.shape, idx.shape)
|
|
176
178
|
)
|
|
177
179
|
|
|
178
180
|
# batch_size x num_channels x kernel_size
|
|
@@ -284,8 +286,9 @@ def sample_kernels(
|
|
|
284
286
|
|
|
285
287
|
if X.shape[-1] < kernel_size:
|
|
286
288
|
raise ValueError(
|
|
287
|
-
"Can't sample kernels of size {} from "
|
|
288
|
-
|
|
289
|
+
"Can't sample kernels of size {} from tensor with shape {}".format(
|
|
290
|
+
kernel_size, X.shape
|
|
291
|
+
)
|
|
289
292
|
)
|
|
290
293
|
elif X.ndim > 3:
|
|
291
294
|
raise ValueError(
|
|
@@ -293,13 +296,14 @@ def sample_kernels(
|
|
|
293
296
|
)
|
|
294
297
|
elif X.ndim < 3 and N is None:
|
|
295
298
|
raise ValueError(
|
|
296
|
-
"Must specify number of kernels N if X "
|
|
297
|
-
"has fewer than 3 dimensions"
|
|
299
|
+
"Must specify number of kernels N if X has fewer than 3 dimensions"
|
|
298
300
|
)
|
|
299
301
|
elif X.ndim == 3 and N is not None and N != len(X):
|
|
300
302
|
raise ValueError(
|
|
301
|
-
|
|
302
|
-
|
|
303
|
+
(
|
|
304
|
+
"Can't sample {} kernels from 3D tensor with "
|
|
305
|
+
"batch dimension {}"
|
|
306
|
+
).format(N, len(X))
|
|
303
307
|
)
|
|
304
308
|
|
|
305
309
|
if X.ndim == 1:
|
|
@@ -309,7 +313,6 @@ def sample_kernels(
|
|
|
309
313
|
center = int(X.shape[-1] // 2)
|
|
310
314
|
|
|
311
315
|
if max_center_offset is None:
|
|
312
|
-
|
|
313
316
|
# sample uniformly from all of X's time dimension
|
|
314
317
|
min_val, max_val = 0, X.shape[-1] - kernel_size
|
|
315
318
|
elif max_center_offset >= 0:
|
|
@@ -341,8 +344,10 @@ def sample_kernels(
|
|
|
341
344
|
# if kernel_size > center - max_center_offset,
|
|
342
345
|
# we may end up with negative indices
|
|
343
346
|
raise ValueError(
|
|
344
|
-
|
|
345
|
-
|
|
347
|
+
(
|
|
348
|
+
"Kernel size {} is too large for requested center "
|
|
349
|
+
"offset value {}"
|
|
350
|
+
).format(kernel_size, max_center_offset)
|
|
346
351
|
)
|
|
347
352
|
|
|
348
353
|
if X.ndim == 3 or coincident:
|
ml4gw/waveforms/cbc/phenom_d.py
CHANGED
ml4gw/waveforms/cbc/phenom_p.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""
|
|
2
|
-
|
|
3
|
-
|
|
2
|
+
Based on the JAX implementation of IMRPhenomPv2 from
|
|
3
|
+
https://github.com/tedwards2412/ripple/blob/main/src/ripplegw/waveforms/IMRPhenomPv2.py
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
from typing import Dict, Optional, Tuple
|
ml4gw/waveforms/cbc/taylorf2.py
CHANGED
ml4gw/waveforms/cbc/utils.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Utilities for conditioning waveforms
|
|
3
|
-
See https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimInspiral.c
|
|
4
|
-
"""
|
|
3
|
+
See https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimInspiral.c
|
|
4
|
+
""" # noqa: E501
|
|
5
|
+
|
|
5
6
|
import torch
|
|
6
7
|
|
|
7
8
|
from ml4gw.constants import MRSUN, MSUN, MTSUN_SI, C, G
|
|
@@ -105,7 +106,5 @@ def ringdown_time_bound(
|
|
|
105
106
|
def frequency_isco(mass_1: BatchTensor, mass_2: BatchTensor):
|
|
106
107
|
return (
|
|
107
108
|
1.0
|
|
108
|
-
/ (
|
|
109
|
-
(9.0**1.5) * torch.pi * (mass_1 + mass_2) * MTSUN_SI / MSUN
|
|
110
|
-
).float()
|
|
109
|
+
/ ((9.0**1.5) * torch.pi * (mass_1 + mass_2) * MTSUN_SI / MSUN).float()
|
|
111
110
|
)
|
ml4gw/waveforms/conversion.py
CHANGED
|
@@ -20,15 +20,15 @@ def XLALSimInspiralLN(
|
|
|
20
20
|
total_mass: BatchTensor, eta: BatchTensor, v: BatchTensor
|
|
21
21
|
):
|
|
22
22
|
"""
|
|
23
|
-
See https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimInspiralPNCoefficients.c#L2173
|
|
24
|
-
"""
|
|
23
|
+
See https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimInspiralPNCoefficients.c#L2173
|
|
24
|
+
""" # noqa: E501
|
|
25
25
|
return total_mass**2 * eta / v
|
|
26
26
|
|
|
27
27
|
|
|
28
28
|
def XLALSimInspiralL_2PN(eta: BatchTensor):
|
|
29
29
|
"""
|
|
30
|
-
See https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimInspiralPNCoefficients.c#L2181
|
|
31
|
-
"""
|
|
30
|
+
See https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimInspiralPNCoefficients.c#L2181
|
|
31
|
+
""" # noqa: E501
|
|
32
32
|
return 1.5 + eta / 6.0
|
|
33
33
|
|
|
34
34
|
|
|
@@ -65,8 +65,8 @@ def bilby_spins_to_lalsim(
|
|
|
65
65
|
"""
|
|
66
66
|
Converts between bilby spin and lalsimulation spin conventions.
|
|
67
67
|
|
|
68
|
-
See https://github.com/bilby-dev/bilby/blob/cccdf891e82d46319e69dbfdf48c4970b4e9a727/bilby/gw/conversion.py#L105
|
|
69
|
-
and https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimInspiral.c#L3594
|
|
68
|
+
See https://github.com/bilby-dev/bilby/blob/cccdf891e82d46319e69dbfdf48c4970b4e9a727/bilby/gw/conversion.py#L105
|
|
69
|
+
and https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimInspiral.c#L3594
|
|
70
70
|
|
|
71
71
|
Args:
|
|
72
72
|
theta_jn: BatchTensor,
|
|
@@ -80,13 +80,15 @@ def bilby_spins_to_lalsim(
|
|
|
80
80
|
mass_2: BatchTensor,
|
|
81
81
|
f_ref: float,
|
|
82
82
|
phi_ref: BatchTensor,
|
|
83
|
-
"""
|
|
83
|
+
""" # noqa: E501
|
|
84
84
|
|
|
85
85
|
# check if f_ref is valid
|
|
86
86
|
if f_ref <= 0.0:
|
|
87
87
|
raise ValueError(
|
|
88
|
-
|
|
89
|
-
|
|
88
|
+
(
|
|
89
|
+
"f_ref <= 0 is invalid. "
|
|
90
|
+
"Please pass in the starting GW frequency instead."
|
|
91
|
+
)
|
|
90
92
|
)
|
|
91
93
|
|
|
92
94
|
# starting frame: LNhat is along the z-axis and the unit
|
ml4gw/waveforms/generator.py
CHANGED
|
@@ -19,14 +19,15 @@ EXTRA_CYCLES = 3.0
|
|
|
19
19
|
|
|
20
20
|
class TimeDomainCBCWaveformGenerator(torch.nn.Module):
|
|
21
21
|
"""
|
|
22
|
-
Waveform generator that generates time-domain waveforms from
|
|
22
|
+
Waveform generator that generates time-domain waveforms from
|
|
23
|
+
frequency-domain approximants.
|
|
23
24
|
|
|
24
25
|
Frequency domain waveforms are conditioned as done by lalsimulation.
|
|
25
26
|
Specifically, waveforms are generated with a starting frequency `fstart`
|
|
26
27
|
slightly below the requested `f_min`, so that they can be tapered from
|
|
27
28
|
`fstart` to `f_min` using a cosine window.
|
|
28
29
|
|
|
29
|
-
Please see https://lscsoft.docs.ligo.org/lalsuite/lalsimulation/group___l_a_l_sim_inspiral__c.html#gac9f16dab2cbca5a431738ee7d2505969
|
|
30
|
+
Please see https://lscsoft.docs.ligo.org/lalsuite/lalsimulation/group___l_a_l_sim_inspiral__c.html#gac9f16dab2cbca5a431738ee7d2505969
|
|
30
31
|
for more information
|
|
31
32
|
|
|
32
33
|
Args:
|
|
@@ -50,7 +51,7 @@ class TimeDomainCBCWaveformGenerator(torch.nn.Module):
|
|
|
50
51
|
will be placed.
|
|
51
52
|
f_ref:
|
|
52
53
|
Reference frequency for the waveform
|
|
53
|
-
"""
|
|
54
|
+
""" # noqa: E501
|
|
54
55
|
|
|
55
56
|
def __init__(
|
|
56
57
|
self,
|
|
@@ -61,7 +62,6 @@ class TimeDomainCBCWaveformGenerator(torch.nn.Module):
|
|
|
61
62
|
f_ref: float,
|
|
62
63
|
right_pad: float,
|
|
63
64
|
) -> None:
|
|
64
|
-
|
|
65
65
|
super().__init__()
|
|
66
66
|
self.approximant = approximant
|
|
67
67
|
self.f_min = f_min
|
|
@@ -93,8 +93,8 @@ class TimeDomainCBCWaveformGenerator(torch.nn.Module):
|
|
|
93
93
|
"""
|
|
94
94
|
Builds highpass filter object.
|
|
95
95
|
|
|
96
|
-
See https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/python/lalsimulation/gwsignal/core/conditioning_subroutines.py?ref_type=heads#L10
|
|
97
|
-
"""
|
|
96
|
+
See https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/python/lalsimulation/gwsignal/core/conditioning_subroutines.py?ref_type=heads#L10
|
|
97
|
+
""" # noqa: E501
|
|
98
98
|
order = 8.0
|
|
99
99
|
w1 = np.tan(np.pi * (self.f_min) / self.sample_rate)
|
|
100
100
|
attenuation = 0.99
|
|
@@ -117,22 +117,29 @@ class TimeDomainCBCWaveformGenerator(torch.nn.Module):
|
|
|
117
117
|
self, **parameters: dict[str, BatchTensor]
|
|
118
118
|
) -> Tuple[Float[Tensor, "{N} samples"], Float[Tensor, "{N} samples"]]:
|
|
119
119
|
"""
|
|
120
|
-
Generate a conditioned frequency domain waveform from a
|
|
120
|
+
Generate a conditioned frequency domain waveform from a
|
|
121
|
+
frequency-domain approximant.
|
|
121
122
|
|
|
122
|
-
Based on https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/python/lalsimulation/gwsignal/core/waveform_conditioning.py?ref_type=heads#L248
|
|
123
|
+
Based on https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/python/lalsimulation/gwsignal/core/waveform_conditioning.py?ref_type=heads#L248
|
|
123
124
|
|
|
124
125
|
Args:
|
|
125
126
|
**parameters:
|
|
126
|
-
Dictionary of parameters for waveform generation
|
|
127
|
-
|
|
128
|
-
It is required that `parameters` contains `mass_1`, `mass_2`,
|
|
129
|
-
keys, which are used for determining
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
127
|
+
Dictionary of parameters for waveform generation where key is
|
|
128
|
+
the parameter name and value is a tensor of parameters.
|
|
129
|
+
It is required that `parameters` contains `mass_1`, `mass_2`,
|
|
130
|
+
`s1z`, and `s2z` keys, which are used for determining
|
|
131
|
+
parameters of data conditioning.
|
|
132
|
+
|
|
133
|
+
If the specified approximant takes other parameters for
|
|
134
|
+
waveform generation, like `chirp_mass` and `mass_ratio`, the
|
|
135
|
+
utility functions in `ml4gw.waveforms.conversion`may be useful
|
|
136
|
+
for populating the parameters dictionary with these
|
|
137
|
+
additional parameters.
|
|
138
|
+
|
|
139
|
+
Note that, if using an approximant from `ml4gw.waveforms.cbc`,
|
|
140
|
+
any additional keys in `parameters` not ingested by the
|
|
141
|
+
approximant will be ignored.
|
|
142
|
+
""" # noqa: E501
|
|
136
143
|
# convert masses to kg, make sure
|
|
137
144
|
# they are doubles so there is no
|
|
138
145
|
# overflow in the calculations
|
|
@@ -266,23 +273,29 @@ class TimeDomainCBCWaveformGenerator(torch.nn.Module):
|
|
|
266
273
|
**parameters,
|
|
267
274
|
) -> Tuple[Float[Tensor, "{N} samples"], Float[Tensor, "{N} samples"]]:
|
|
268
275
|
"""
|
|
269
|
-
Generates a time-domain waveform from a frequency
|
|
270
|
-
Conditioning is based onhttps://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/python/lalsimulation/gwsignal/core/waveform_conditioning.py?ref_type=heads#L248
|
|
276
|
+
Generates a time-domain waveform from a frequency-domain approximant.
|
|
277
|
+
Conditioning is based onhttps://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/python/lalsimulation/gwsignal/core/waveform_conditioning.py?ref_type=heads#L248
|
|
271
278
|
|
|
272
|
-
A frequency domain waveform is generated, conditioned
|
|
273
|
-
and
|
|
279
|
+
A frequency domain waveform is generated, conditioned
|
|
280
|
+
(see `generate_conditioned_fd_waveform`) and fft'd into the time-domain
|
|
274
281
|
|
|
275
282
|
**parameters:
|
|
276
|
-
Dictionary of parameters for waveform generation
|
|
277
|
-
|
|
278
|
-
It is required that `parameters` contains `mass_1`, `mass_2`,
|
|
279
|
-
keys, which are used for determining parameters
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
283
|
+
Dictionary of parameters for waveform generation where each key is
|
|
284
|
+
the parameter name and each value is a tensor of parameters.
|
|
285
|
+
It is required that `parameters` contains `mass_1`, `mass_2`,
|
|
286
|
+
`s1z`, and `s2z` keys, which are used for determining parameters
|
|
287
|
+
of data conditioning.
|
|
288
|
+
|
|
289
|
+
If the specified approximant takes other parameters for waveform
|
|
290
|
+
generation, like `chirp_mass` and `mass_ratio`, the utility
|
|
291
|
+
functions in `ml4gw.waveforms.conversion` may be useful for
|
|
292
|
+
populating the parameters dictionary with these additional
|
|
293
|
+
parameters.
|
|
294
|
+
|
|
295
|
+
Note that, if using an approximant from `ml4gw.waveforms.cbc`,
|
|
296
|
+
any additional keys in `parameters` not ingested by the
|
|
297
|
+
approximant will be ignored.
|
|
298
|
+
""" # noqa: E501
|
|
286
299
|
|
|
287
300
|
hc, hp = self.generate_conditioned_fd_waveform(**parameters)
|
|
288
301
|
|
|
@@ -1,20 +1,15 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: ml4gw
|
|
3
|
-
Version: 0.7.
|
|
3
|
+
Version: 0.7.4
|
|
4
4
|
Summary: Tools for training torch models on gravitational wave data
|
|
5
|
-
Author: Alec Gunny
|
|
6
|
-
|
|
7
|
-
Requires-Python:
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
Requires-Dist: jaxtyping (>=0.2,<0.3)
|
|
14
|
-
Requires-Dist: numpy (<2.0.0)
|
|
15
|
-
Requires-Dist: scipy (>=1.9.0,<1.15)
|
|
16
|
-
Requires-Dist: torch (>=2.0,<3.0)
|
|
17
|
-
Requires-Dist: torchaudio (>=2.0,<3.0)
|
|
5
|
+
Author-email: Ethan Marx <emarx@mit.edu>, Will Benoit <benoi090@umn.edu>, Deep Chatterjee <deep1018@mit.edu>, Alec Gunny <alec.gunny@ligo.org>
|
|
6
|
+
License-File: LICENSE
|
|
7
|
+
Requires-Python: <3.13,>=3.9
|
|
8
|
+
Requires-Dist: jaxtyping<0.3,>=0.2
|
|
9
|
+
Requires-Dist: numpy<2.0.0
|
|
10
|
+
Requires-Dist: scipy<1.15,>=1.9.0
|
|
11
|
+
Requires-Dist: torchaudio~=2.0
|
|
12
|
+
Requires-Dist: torch~=2.0
|
|
18
13
|
Description-Content-Type: text/markdown
|
|
19
14
|
|
|
20
15
|
# ML4GW
|
|
@@ -44,29 +39,6 @@ To build with a specific version of PyTorch/CUDA, please see the PyTorch install
|
|
|
44
39
|
pip install ml4gw torch==2.5.1--extra-index-url=https://download.pytorch.org/whl/cu118
|
|
45
40
|
```
|
|
46
41
|
|
|
47
|
-
### Poetry installation
|
|
48
|
-
`ml4gw` is also fully compatible with use in Poetry, with your `pyproject.toml` set up like
|
|
49
|
-
|
|
50
|
-
```toml
|
|
51
|
-
[tool.poetry.dependencies]
|
|
52
|
-
python = "^3.9" # python versions 3.9-3.12 are supported
|
|
53
|
-
ml4gw = "^0.6"
|
|
54
|
-
```
|
|
55
|
-
|
|
56
|
-
To build against a specific PyTorch/CUDA combination, consult the PyTorch installation documentation above and specify the `extra-index-url` via the `tool.poetry.source` table in your `pyproject.toml`. For example, to build against CUDA 11.6, you would do something like:
|
|
57
|
-
|
|
58
|
-
```toml
|
|
59
|
-
[tool.poetry.dependencies]
|
|
60
|
-
python = "^3.9"
|
|
61
|
-
ml4gw = "^0.6"
|
|
62
|
-
torch = {version = "^2.0", source = "torch"}
|
|
63
|
-
|
|
64
|
-
[[tool.poetry.source]]
|
|
65
|
-
name = "torch"
|
|
66
|
-
url = "https://download.pytorch.org/whl/cu118"
|
|
67
|
-
priority = "explicit"
|
|
68
|
-
```
|
|
69
|
-
|
|
70
42
|
## Contributing
|
|
71
43
|
If you come across errors in the code, have difficulties using this software, or simply find that the current version doesn't cover your use case, please file an issue on our GitHub page, and we'll be happy to offer support.
|
|
72
44
|
We encourage users who encounter these difficulties to file issues on GitHub, and we'll be happy to offer support to extend our coverage to new or improved functionality.
|
|
@@ -76,4 +48,3 @@ By bringing in new users with new use cases, we hope to develop this library int
|
|
|
76
48
|
|
|
77
49
|
## Funding
|
|
78
50
|
We are grateful for the support of the U.S. National Science Foundation (NSF) Harnessing the Data Revolution (HDR) Institute for <a href="https://a3d3.ai">Accelerating AI Algorithms for Data Driven Discovery (A3D3)</a> under Cooperative Agreement No. <a href="https://www.nsf.gov/awardsearch/showAward?AWD_ID=2117997">PHY-2117997</a>.
|
|
79
|
-
|
|
@@ -1,55 +1,55 @@
|
|
|
1
1
|
ml4gw/__init__.py,sha256=81quoggCuIypZjZs3bbf1Ty70KHdva5RGEJxi0oC57E,25
|
|
2
2
|
ml4gw/augmentations.py,sha256=pZH9tjEpXV0AIqvHHDkpUE-BorG02beOz2pmSipw2EY,1232
|
|
3
3
|
ml4gw/constants.py,sha256=RQPXwavlw_cWu3ByltvTejPsi6EWXHDJQ1HaV9iE3Lg,850
|
|
4
|
+
ml4gw/distributions.py,sha256=6UOgq8W-Bs-9170Jor_0hyeRnmC74zwbUrwAcJEz1jI,5082
|
|
5
|
+
ml4gw/gw.py,sha256=0ovW_HJ3j2b5Yq3mduYtGLSl2RrvFyNNcOsZFf7koHY,19794
|
|
6
|
+
ml4gw/spectral.py,sha256=sao_D0ceeMEatABfiabpqb-xxRfQO8Tz7yk9N7ciOAU,19858
|
|
7
|
+
ml4gw/types.py,sha256=CcctqDcNajR7khGT6BD-WYsfRKpiP0udoSAB0k1qcFw,863
|
|
4
8
|
ml4gw/dataloading/__init__.py,sha256=EHBBqU7y2-Np5iQ_xyufxamUEM1pPEquqFo7oaJnaJE,149
|
|
5
|
-
ml4gw/dataloading/chunked_dataset.py,sha256=
|
|
6
|
-
ml4gw/dataloading/hdf5_dataset.py,sha256=
|
|
7
|
-
ml4gw/dataloading/in_memory_dataset.py,sha256=
|
|
8
|
-
ml4gw/distributions.py,sha256=tUuaOiX5enjKLYWD7uiN8rdRVQcrIKps64xBkTl8fMs,4991
|
|
9
|
-
ml4gw/gw.py,sha256=0I9MhoHWksWG9a5EUI0GkHD1skuOXiaQgSgxNKYXCxE,19778
|
|
9
|
+
ml4gw/dataloading/chunked_dataset.py,sha256=yjJ_T7YFDErnLm-TmjXnZtT6GNPdPxj5njljWHr07Y0,5308
|
|
10
|
+
ml4gw/dataloading/hdf5_dataset.py,sha256=IwOUYjjeUWPDL6HvPAfHrN5wK3qCrDdkH1OZFsecZnQ,7917
|
|
11
|
+
ml4gw/dataloading/in_memory_dataset.py,sha256=7307LxdDGKxcy5_WNJp4f5J92AtV8gFT-B6P0QcWUfs,9545
|
|
10
12
|
ml4gw/nn/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
+
ml4gw/nn/norm.py,sha256=JIOMXQbUtoWlrhncGsqW6f1-DiGDx9zQH2O3CvQml3U,3594
|
|
11
14
|
ml4gw/nn/autoencoder/__init__.py,sha256=ZaT1XhJTHpMuPQqu5E__Jezeh9uwtjcXlT7IZ18byq4,161
|
|
12
15
|
ml4gw/nn/autoencoder/base.py,sha256=eSWrDdpblI609oqa7RDSvZiY3YcV8WfhTioWKFn_7eE,3205
|
|
13
|
-
ml4gw/nn/autoencoder/convolutional.py,sha256=
|
|
16
|
+
ml4gw/nn/autoencoder/convolutional.py,sha256=prj5Sat_yN15q-T7q9Uym8Hw6gjh7b8zimAVwT2Q85g,5357
|
|
14
17
|
ml4gw/nn/autoencoder/skip_connection.py,sha256=9PKoCCvCUj5di9tuFM0Cl1v6gtcOK1bDeE_fS_R__FE,1391
|
|
15
18
|
ml4gw/nn/autoencoder/utils.py,sha256=m_ivYGNwdrhA7cFxJVD4gqM8AHiWIGmlQI3pFNRklXQ,355
|
|
16
|
-
ml4gw/nn/norm.py,sha256=JIOMXQbUtoWlrhncGsqW6f1-DiGDx9zQH2O3CvQml3U,3594
|
|
17
19
|
ml4gw/nn/resnet/__init__.py,sha256=vBI0IftVP_EYAeDlqomtkGqUYE-RE_S4WNioUhniw9s,64
|
|
18
|
-
ml4gw/nn/resnet/resnet_1d.py,sha256=
|
|
19
|
-
ml4gw/nn/resnet/resnet_2d.py,sha256=
|
|
20
|
+
ml4gw/nn/resnet/resnet_1d.py,sha256=qNiPNyT2Gracj0PvvRSwXiskqghZ22se5KYnlxcjzEw,13254
|
|
21
|
+
ml4gw/nn/resnet/resnet_2d.py,sha256=xgEyraxgvAOM6OqEtpciRCa20U_fyClLG-A8oaIr2UQ,13337
|
|
20
22
|
ml4gw/nn/streaming/__init__.py,sha256=zgjGR2L8t0txXLnil9ceZT0tM8Y2FC8yPxqIKYH0o1A,80
|
|
21
23
|
ml4gw/nn/streaming/online_average.py,sha256=_nrul4ygTC_ln4wpSWGRWTgWlfGeOUGXxeGrhU4oJms,4716
|
|
22
24
|
ml4gw/nn/streaming/snapshotter.py,sha256=1vWDpebRQBZIUVeksbXoqngqMnlSzQFkcsgYNrHB9tc,4473
|
|
23
|
-
ml4gw/spectral.py,sha256=rnxd1ObPjyQMAu3D83_sw2lEEHZF7f87YQBV_pxHLxM,19809
|
|
24
25
|
ml4gw/transforms/__init__.py,sha256=OaTQJD4GFkDkcxt0DIwt2AzeEcv9t21ciKXxQnqDiuI,447
|
|
25
|
-
ml4gw/transforms/iirfilter.py,sha256=
|
|
26
|
+
ml4gw/transforms/iirfilter.py,sha256=0FQ2HfXiYuvRzNmMz-SxkocTTrrKuJpxob0NGgkalBA,3188
|
|
26
27
|
ml4gw/transforms/pearson.py,sha256=CM9FTRxI4384-36FIaJFOcMZwsA7BkgberToJkMU1PA,3227
|
|
27
|
-
ml4gw/transforms/qtransform.py,sha256=
|
|
28
|
+
ml4gw/transforms/qtransform.py,sha256=Kntl3MlZvL5Ae4A4Vskfq-uIHKhue_USnEHLG9Cf-Pw,20724
|
|
28
29
|
ml4gw/transforms/scaler.py,sha256=K5mp4w2zGZbpH1AcBUfpQS4n3aVSNzkaGWXedwk2LXs,2508
|
|
29
30
|
ml4gw/transforms/snr_rescaler.py,sha256=lfuwdwMY117gB-emmn0_22gsK_A9xnkHJv2-76HFWc4,2728
|
|
30
31
|
ml4gw/transforms/spectral.py,sha256=4uCLNEcDff4kLheUA5v64L0y_MSOvUTJ92IH4TVcEys,4385
|
|
31
|
-
ml4gw/transforms/spectrogram.py,sha256=
|
|
32
|
-
ml4gw/transforms/spline_interpolation.py,sha256=
|
|
32
|
+
ml4gw/transforms/spectrogram.py,sha256=fpI0vypB5seBA05a979KKsSL9go0FHGI_-BVzRJ9ATA,6218
|
|
33
|
+
ml4gw/transforms/spline_interpolation.py,sha256=CDfHX2Cd0KqNgo2JXNk2QgFNYtDfEXCMIFZ7bDqo6po,13573
|
|
33
34
|
ml4gw/transforms/transform.py,sha256=lu5ukcOCOYYZDZCM_0amS9AY2bJgkbLpXmZ9DpnSK9I,2504
|
|
34
35
|
ml4gw/transforms/waveforms.py,sha256=koWOuHuUpQWmTT1yawSWa_MOuLfDBuugy91KIyuklOo,3189
|
|
35
|
-
ml4gw/transforms/whitening.py,sha256=
|
|
36
|
-
ml4gw/types.py,sha256=CcctqDcNajR7khGT6BD-WYsfRKpiP0udoSAB0k1qcFw,863
|
|
36
|
+
ml4gw/transforms/whitening.py,sha256=XyBAAsTkW5O4K06ET6a9mb0809yDkMcVZJjkB1X1vY4,10295
|
|
37
37
|
ml4gw/utils/interferometer.py,sha256=lRS0N3SwUTknhYXX57VACJ99jK1P9M19oUWN_i_nQN0,1814
|
|
38
|
-
ml4gw/utils/slicing.py,sha256=
|
|
38
|
+
ml4gw/utils/slicing.py,sha256=WkY9biz6V8FWUmqCq9QpYZv67kp0yEjdKfkI8CcK4Dc,13653
|
|
39
39
|
ml4gw/waveforms/__init__.py,sha256=QVUzBx_y8A9_AsRuTJruPvL9mqGnBt11Iw1MOYjXyE4,40
|
|
40
|
+
ml4gw/waveforms/conversion.py,sha256=YMyjxzPlLAay66oNABCU98VnvhyjTNbGJ36R5sX3MVo,6945
|
|
41
|
+
ml4gw/waveforms/generator.py,sha256=RuVO9FL3IwmzTb10JkQ4iR8u2-0edy8DiZhZ_leHAPc,12169
|
|
40
42
|
ml4gw/waveforms/adhoc/__init__.py,sha256=XVwP4t8TMUj87WY3yMGRTkXsv7_lVr1w8p8iKBW8iKE,71
|
|
41
43
|
ml4gw/waveforms/adhoc/ringdown.py,sha256=m8IBQTxKBBGFqBtWGEO4KG3DEYR8TTnNyGVdVLaMKa8,3316
|
|
42
44
|
ml4gw/waveforms/adhoc/sine_gaussian.py,sha256=-MtrI7ydwBTk4K0O4tdkC8-w5OifQszdnWN9__I4XzY,3569
|
|
43
45
|
ml4gw/waveforms/cbc/__init__.py,sha256=hGbPsFNAIveYJnff8qKY8RWeBPFtZoYcnGHxraPWtWI,99
|
|
44
46
|
ml4gw/waveforms/cbc/coefficients.py,sha256=PMr0IBALEQ38eAvZqYg-w_FE_sS1mH2FWr9soQ5MRfU,1106
|
|
45
|
-
ml4gw/waveforms/cbc/phenom_d.py,sha256=
|
|
47
|
+
ml4gw/waveforms/cbc/phenom_d.py,sha256=GdBdSfUIze2KUfKBS44A4kfYpG27aA5FYknaBqL0q8c,48984
|
|
46
48
|
ml4gw/waveforms/cbc/phenom_d_data.py,sha256=WA1FBxUp9fo1IQaV_OLJ_5g5gI166mY1FtG9n25he9U,53447
|
|
47
|
-
ml4gw/waveforms/cbc/phenom_p.py,sha256=
|
|
48
|
-
ml4gw/waveforms/cbc/taylorf2.py,sha256=
|
|
49
|
-
ml4gw/waveforms/cbc/utils.py,sha256=
|
|
50
|
-
ml4gw/
|
|
51
|
-
ml4gw/
|
|
52
|
-
ml4gw-0.7.
|
|
53
|
-
ml4gw-0.7.
|
|
54
|
-
ml4gw-0.7.3.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
55
|
-
ml4gw-0.7.3.dist-info/RECORD,,
|
|
49
|
+
ml4gw/waveforms/cbc/phenom_p.py,sha256=RZzzKQzqZW3rQuWZ41htTZOwwulYP61ow87HRRrel5A,27612
|
|
50
|
+
ml4gw/waveforms/cbc/taylorf2.py,sha256=cmYrVL29dwX2Icp7I6SXqRIjtPmoljK5DP_ofx2heiM,10505
|
|
51
|
+
ml4gw/waveforms/cbc/utils.py,sha256=LT1ky10_6ZrbwTcxIrWP1O75GUEuU5q2ZE2yYDhadQE,3037
|
|
52
|
+
ml4gw-0.7.4.dist-info/METADATA,sha256=5nM8sBFDpqrKHQNqtssbhFYFkwze3IE_HLM8Zb8qXQU,3049
|
|
53
|
+
ml4gw-0.7.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
54
|
+
ml4gw-0.7.4.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
55
|
+
ml4gw-0.7.4.dist-info/RECORD,,
|
|
File without changes
|