ml4gw 0.4.1__tar.gz → 0.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ml4gw might be problematic. Click here for more details.
- {ml4gw-0.4.1 → ml4gw-0.5.0}/PKG-INFO +2 -1
- ml4gw-0.5.0/ml4gw/constants.py +45 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/dataloading/in_memory_dataset.py +18 -32
- ml4gw-0.5.0/ml4gw/distributions.py +170 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/norm.py +6 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/transforms/__init__.py +1 -0
- ml4gw-0.5.0/ml4gw/transforms/qtransform.py +463 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/transforms/spectral.py +17 -5
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/utils/interferometer.py +6 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/waveforms/__init__.py +2 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/waveforms/generator.py +1 -1
- ml4gw-0.5.0/ml4gw/waveforms/phenom_d.py +1441 -0
- ml4gw-0.5.0/ml4gw/waveforms/phenom_p.py +779 -0
- ml4gw-0.5.0/ml4gw/waveforms/ringdown.py +110 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/waveforms/sine_gaussian.py +4 -5
- ml4gw-0.5.0/ml4gw/waveforms/taylorf2.py +325 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/pyproject.toml +4 -1
- ml4gw-0.4.1/ml4gw/distributions.py +0 -122
- ml4gw-0.4.1/ml4gw/waveforms/phenom_d.py +0 -1359
- ml4gw-0.4.1/ml4gw/waveforms/taylorf2.py +0 -306
- {ml4gw-0.4.1 → ml4gw-0.5.0}/README.md +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/__init__.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/augmentations.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/dataloading/__init__.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/dataloading/chunked_dataset.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/dataloading/hdf5_dataset.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/gw.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/__init__.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/autoencoder/__init__.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/autoencoder/base.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/autoencoder/convolutional.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/autoencoder/skip_connection.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/autoencoder/utils.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/resnet/__init__.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/resnet/resnet_1d.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/resnet/resnet_2d.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/streaming/__init__.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/streaming/online_average.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/nn/streaming/snapshotter.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/spectral.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/transforms/pearson.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/transforms/scaler.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/transforms/snr_rescaler.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/transforms/spectrogram.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/transforms/transform.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/transforms/waveforms.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/transforms/whitening.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/types.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/utils/slicing.py +0 -0
- {ml4gw-0.4.1 → ml4gw-0.5.0}/ml4gw/waveforms/phenom_d_data.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ml4gw
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.5.0
|
|
4
4
|
Summary: Tools for training torch models on gravitational wave data
|
|
5
5
|
Author: Alec Gunny
|
|
6
6
|
Author-email: alec.gunny@ligo.org
|
|
@@ -10,6 +10,7 @@ Classifier: Programming Language :: Python :: 3.8
|
|
|
10
10
|
Classifier: Programming Language :: Python :: 3.9
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
+
Requires-Dist: numpy (<2.0.0)
|
|
13
14
|
Requires-Dist: torch (>=2.0,<3.0)
|
|
14
15
|
Requires-Dist: torchaudio (>=2.0,<3.0)
|
|
15
16
|
Requires-Dist: torchtyping (>=0.1,<0.2)
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Various constants, all in SI units.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
EulerGamma = 0.577215664901532860606512090082402431
|
|
6
|
+
|
|
7
|
+
MSUN = 1.988409902147041637325262574352366540e30 # kg
|
|
8
|
+
"""Solar mass"""
|
|
9
|
+
|
|
10
|
+
MRSUN = 1.476625038050124729627979840144936351e3
|
|
11
|
+
"""Geometrized nominal solar mass, m"""
|
|
12
|
+
|
|
13
|
+
G = 6.67430e-11 # m^3 / kg / s^2
|
|
14
|
+
"""Newton's gravitational constant"""
|
|
15
|
+
|
|
16
|
+
C = 299792458.0 # m / s
|
|
17
|
+
"""Speed of light"""
|
|
18
|
+
|
|
19
|
+
"""Pi"""
|
|
20
|
+
PI = 3.141592653589793238462643383279502884
|
|
21
|
+
|
|
22
|
+
TWO_PI = 6.283185307179586476925286766559005768
|
|
23
|
+
|
|
24
|
+
gt = G * MSUN / (C**3.0)
|
|
25
|
+
"""
|
|
26
|
+
G MSUN / C^3 in seconds
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
MTSUN_SI = 4.925490947641266978197229498498379006e-6
|
|
30
|
+
"""1 solar mass in seconds. Same value as lal.MTSUN_SI"""
|
|
31
|
+
|
|
32
|
+
m_per_Mpc = 3.085677581491367278913937957796471611e22
|
|
33
|
+
"""
|
|
34
|
+
Meters per Mpc.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
MPC_SEC = m_per_Mpc / C
|
|
38
|
+
"""
|
|
39
|
+
1 Mpc in seconds.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
clightGpc = C / 3.0856778570831e22
|
|
43
|
+
"""
|
|
44
|
+
Speed of light in vacuum (:math:`c`), in gigaparsecs per second
|
|
45
|
+
"""
|
|
@@ -7,7 +7,7 @@ from ml4gw import types
|
|
|
7
7
|
from ml4gw.utils.slicing import slice_kernels
|
|
8
8
|
|
|
9
9
|
|
|
10
|
-
class InMemoryDataset:
|
|
10
|
+
class InMemoryDataset(torch.utils.data.IterableDataset):
|
|
11
11
|
"""Dataset for iterating through in-memory multi-channel timeseries
|
|
12
12
|
|
|
13
13
|
Dataset for arrays of timeseries data which can be stored
|
|
@@ -131,7 +131,6 @@ class InMemoryDataset:
|
|
|
131
131
|
self.batches_per_epoch = batches_per_epoch
|
|
132
132
|
self.shuffle = shuffle
|
|
133
133
|
self.coincident = coincident
|
|
134
|
-
self._i = self._idx = None
|
|
135
134
|
|
|
136
135
|
@property
|
|
137
136
|
def num_kernels(self) -> int:
|
|
@@ -157,7 +156,7 @@ class InMemoryDataset:
|
|
|
157
156
|
num_kernels = self.num_kernels ** len(self.X)
|
|
158
157
|
return (num_kernels - 1) // self.batch_size + 1
|
|
159
158
|
|
|
160
|
-
def
|
|
159
|
+
def init_indices(self):
|
|
161
160
|
"""
|
|
162
161
|
Initialize arrays of indices we'll use to slice
|
|
163
162
|
through X and y at iteration time. This helps by
|
|
@@ -204,36 +203,23 @@ class InMemoryDataset:
|
|
|
204
203
|
# the simplest case: deteriminstic and coincident
|
|
205
204
|
idx = torch.arange(num_kernels, device=device)
|
|
206
205
|
|
|
207
|
-
|
|
208
|
-
self._i = 0
|
|
209
|
-
return self
|
|
206
|
+
return idx
|
|
210
207
|
|
|
211
|
-
def
|
|
208
|
+
def __iter__(
|
|
212
209
|
self,
|
|
213
210
|
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
|
|
214
|
-
if self._i is None or self._idx is None:
|
|
215
|
-
raise TypeError(
|
|
216
|
-
"Must initialize InMemoryDataset iteration "
|
|
217
|
-
"before calling __next__"
|
|
218
|
-
)
|
|
219
211
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
y = slice_kernels(self.y, idx, self.kernel_size)
|
|
235
|
-
|
|
236
|
-
self._i += 1
|
|
237
|
-
if self.y is not None:
|
|
238
|
-
return X, y
|
|
239
|
-
return X
|
|
212
|
+
indices = self.init_indices()
|
|
213
|
+
for i in range(len(self)):
|
|
214
|
+
# slice the array of _indices_ we'll be using to
|
|
215
|
+
# slice our timeseries, and scale them by the stride
|
|
216
|
+
slc = slice(i * self.batch_size, (i + 1) * self.batch_size)
|
|
217
|
+
idx = indices[slc] * self.stride
|
|
218
|
+
|
|
219
|
+
# slice our timeseries
|
|
220
|
+
X = slice_kernels(self.X, idx, self.kernel_size)
|
|
221
|
+
if self.y is not None:
|
|
222
|
+
y = slice_kernels(self.y, idx, self.kernel_size)
|
|
223
|
+
yield X, y
|
|
224
|
+
else:
|
|
225
|
+
yield X
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module containing callables classes for generating samples
|
|
3
|
+
from specified distributions. Each callable should map from
|
|
4
|
+
an integer `N` to a 1D torch `Tensor` containing `N` samples
|
|
5
|
+
from the corresponding distribution.
|
|
6
|
+
"""
|
|
7
|
+
import math
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
import torch
|
|
11
|
+
import torch.distributions as dist
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Cosine(dist.Distribution):
|
|
15
|
+
"""
|
|
16
|
+
Cosine distribution based on
|
|
17
|
+
``torch.distributions.TransformedDistribution``.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
arg_constraints = {}
|
|
21
|
+
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
low: float = -math.pi / 2,
|
|
25
|
+
high: float = math.pi / 2,
|
|
26
|
+
validate_args=None,
|
|
27
|
+
):
|
|
28
|
+
batch_shape = torch.Size()
|
|
29
|
+
super().__init__(batch_shape, validate_args=validate_args)
|
|
30
|
+
self.low = torch.as_tensor(low)
|
|
31
|
+
self.high = torch.as_tensor(high)
|
|
32
|
+
self.norm = 1 / (torch.sin(self.high) - torch.sin(self.low))
|
|
33
|
+
|
|
34
|
+
def rsample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor:
|
|
35
|
+
u = torch.rand(sample_shape, device=self.low.device)
|
|
36
|
+
return torch.arcsin(u / self.norm + torch.sin(self.low))
|
|
37
|
+
|
|
38
|
+
def log_prob(self, value):
|
|
39
|
+
value = torch.as_tensor(value)
|
|
40
|
+
inside_range = (value >= self.low) & (value <= self.high)
|
|
41
|
+
return value.cos().log() * inside_range
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class Sine(dist.TransformedDistribution):
|
|
45
|
+
"""
|
|
46
|
+
Sine distribution based on
|
|
47
|
+
``torch.distributions.TransformedDistribution``.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
low: float = 0.0,
|
|
53
|
+
high: float = math.pi,
|
|
54
|
+
validate_args=None,
|
|
55
|
+
):
|
|
56
|
+
low = torch.as_tensor(low)
|
|
57
|
+
high = torch.as_tensor(high)
|
|
58
|
+
base_dist = Cosine(
|
|
59
|
+
low - torch.pi / 2, high - torch.pi / 2, validate_args
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
super().__init__(
|
|
63
|
+
base_dist,
|
|
64
|
+
[
|
|
65
|
+
dist.AffineTransform(
|
|
66
|
+
loc=torch.pi / 2,
|
|
67
|
+
scale=1,
|
|
68
|
+
)
|
|
69
|
+
],
|
|
70
|
+
validate_args=validate_args,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class LogUniform(dist.TransformedDistribution):
|
|
75
|
+
"""
|
|
76
|
+
Sample from a log uniform distribution
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(self, low: float, high: float, validate_args=None):
|
|
80
|
+
base_dist = dist.Uniform(
|
|
81
|
+
torch.as_tensor(low).log(),
|
|
82
|
+
torch.as_tensor(high).log(),
|
|
83
|
+
validate_args,
|
|
84
|
+
)
|
|
85
|
+
super().__init__(
|
|
86
|
+
base_dist,
|
|
87
|
+
[dist.ExpTransform()],
|
|
88
|
+
validate_args=validate_args,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class LogNormal(dist.LogNormal):
|
|
93
|
+
def __init__(
|
|
94
|
+
self,
|
|
95
|
+
mean: float,
|
|
96
|
+
std: float,
|
|
97
|
+
low: Optional[float] = None,
|
|
98
|
+
validate_args=None,
|
|
99
|
+
):
|
|
100
|
+
self.low = low
|
|
101
|
+
super().__init__(loc=mean, scale=std, validate_args=validate_args)
|
|
102
|
+
|
|
103
|
+
def support(self):
|
|
104
|
+
if self.low is not None:
|
|
105
|
+
return dist.constraints.greater_than(self.low)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class PowerLaw(dist.TransformedDistribution):
|
|
109
|
+
"""
|
|
110
|
+
Sample from a power law distribution,
|
|
111
|
+
.. math::
|
|
112
|
+
p(x) \approx x^{\alpha}.
|
|
113
|
+
|
|
114
|
+
Index alpha cannot be 0, since it is equivalent to a Uniform distribution.
|
|
115
|
+
This could be used, for example, as a universal distribution of
|
|
116
|
+
signal-to-noise ratios (SNRs) from uniformly volume distributed
|
|
117
|
+
sources
|
|
118
|
+
.. math::
|
|
119
|
+
|
|
120
|
+
p(\rho) = 3*\rho_0^3 / \rho^4
|
|
121
|
+
|
|
122
|
+
where :math:`\rho_0` is a representative minimum SNR
|
|
123
|
+
considered for detection. See, for example,
|
|
124
|
+
`Schutz (2011) <https://arxiv.org/abs/1102.5421>`_.
|
|
125
|
+
Or, for example, ``index=2`` for uniform in Euclidean volume.
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
support = dist.constraints.nonnegative
|
|
129
|
+
|
|
130
|
+
def __init__(
|
|
131
|
+
self, minimum: float, maximum: float, index: int, validate_args=None
|
|
132
|
+
):
|
|
133
|
+
if index == 0:
|
|
134
|
+
raise RuntimeError("Index of 0 is the same as Uniform")
|
|
135
|
+
elif index == -1:
|
|
136
|
+
base_min = torch.as_tensor(minimum).log()
|
|
137
|
+
base_max = torch.as_tensor(maximum).log()
|
|
138
|
+
transforms = [dist.ExpTransform()]
|
|
139
|
+
else:
|
|
140
|
+
index_plus = index + 1
|
|
141
|
+
base_min = minimum**index_plus / index_plus
|
|
142
|
+
base_max = maximum**index_plus / index_plus
|
|
143
|
+
transforms = [
|
|
144
|
+
dist.AffineTransform(loc=0, scale=index_plus),
|
|
145
|
+
dist.PowerTransform(1 / index_plus),
|
|
146
|
+
]
|
|
147
|
+
base_dist = dist.Uniform(base_min, base_max, validate_args=False)
|
|
148
|
+
super().__init__(
|
|
149
|
+
base_dist,
|
|
150
|
+
transforms,
|
|
151
|
+
validate_args=validate_args,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class DeltaFunction(dist.Distribution):
|
|
156
|
+
arg_constraints = {}
|
|
157
|
+
|
|
158
|
+
def __init__(
|
|
159
|
+
self,
|
|
160
|
+
peak: float = 0.0,
|
|
161
|
+
validate_args=None,
|
|
162
|
+
):
|
|
163
|
+
batch_shape = torch.Size()
|
|
164
|
+
super().__init__(batch_shape, validate_args=validate_args)
|
|
165
|
+
self.peak = torch.as_tensor(peak)
|
|
166
|
+
|
|
167
|
+
def rsample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor:
|
|
168
|
+
return self.peak * torch.ones(
|
|
169
|
+
sample_shape, device=self.peak.device, dtype=torch.float32
|
|
170
|
+
)
|
|
@@ -32,6 +32,12 @@ class GroupNorm1D(torch.nn.Module):
|
|
|
32
32
|
self.bias = torch.nn.Parameter(torch.zeros(shape))
|
|
33
33
|
|
|
34
34
|
def forward(self, x):
|
|
35
|
+
if len(x.shape) != 3:
|
|
36
|
+
raise ValueError(
|
|
37
|
+
"GroupNorm1D requires 3-dimensional input, "
|
|
38
|
+
f"received {len(x.shape)} dimensional input"
|
|
39
|
+
)
|
|
40
|
+
|
|
35
41
|
keepdims = self.num_groups == self.num_channels
|
|
36
42
|
|
|
37
43
|
# compute group variance via the E[x**2] - E**2[x] trick
|