torch-l1-snr 0.0.5__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {torch_l1snr → torch_l1_snr}/__init__.py +1 -1
- {torch_l1snr → torch_l1_snr}/l1snr.py +10 -25
- torch_l1_snr-0.1.1.dist-info/METADATA +265 -0
- torch_l1_snr-0.1.1.dist-info/RECORD +7 -0
- torch_l1_snr-0.1.1.dist-info/top_level.txt +1 -0
- torch_l1_snr-0.0.5.dist-info/METADATA +0 -212
- torch_l1_snr-0.0.5.dist-info/RECORD +0 -7
- torch_l1_snr-0.0.5.dist-info/top_level.txt +0 -1
- {torch_l1_snr-0.0.5.dist-info → torch_l1_snr-0.1.1.dist-info}/WHEEL +0 -0
- {torch_l1_snr-0.0.5.dist-info → torch_l1_snr-0.1.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -16,6 +16,8 @@
|
|
|
16
16
|
# Proceedings of the 25th International Society for Music Information Retrieval Conference, 2024
|
|
17
17
|
# arXiv:2406.18747
|
|
18
18
|
|
|
19
|
+
import warnings
|
|
20
|
+
|
|
19
21
|
import torch
|
|
20
22
|
import torch.nn as nn
|
|
21
23
|
from torchaudio.transforms import Spectrogram
|
|
@@ -107,20 +109,6 @@ class L1SNRLoss(torch.nn.Module):
|
|
|
107
109
|
scale_time = c * inv_mean
|
|
108
110
|
l1_term = torch.mean(l1_error) * scale_time
|
|
109
111
|
|
|
110
|
-
if getattr(self, "balance_per_sample", False):
|
|
111
|
-
# per-sample w-independent scaling
|
|
112
|
-
bal = c / (l1_error.detach() + self.eps)
|
|
113
|
-
l1_term = torch.mean(l1_error * bal)
|
|
114
|
-
|
|
115
|
-
if getattr(self, "debug_balance", False):
|
|
116
|
-
g_d1 = (1.0 - w) * c * inv_mean
|
|
117
|
-
if getattr(self, "balance_per_sample", False):
|
|
118
|
-
g_l1 = w * torch.mean(c / (l1_error.detach() + self.eps))
|
|
119
|
-
else:
|
|
120
|
-
g_l1 = w * c * inv_mean
|
|
121
|
-
ratio = (g_l1 / (g_d1 + 1e-12)).item()
|
|
122
|
-
setattr(self, "last_balance_ratio", ratio)
|
|
123
|
-
|
|
124
112
|
loss = (1.0 - w) * l1snr_loss + w * l1_term
|
|
125
113
|
return loss * self.weight
|
|
126
114
|
|
|
@@ -464,11 +452,6 @@ class STFTL1SNRDBLoss(torch.nn.Module):
|
|
|
464
452
|
scale_spec = 2.0 * c * inv_mean_comp
|
|
465
453
|
l1_term = 0.5 * (torch.mean(err_re) + torch.mean(err_im)) * scale_spec
|
|
466
454
|
|
|
467
|
-
if getattr(self, "balance_per_sample", False):
|
|
468
|
-
bal_re = c / (err_re.detach() + self.l1snr_eps)
|
|
469
|
-
bal_im = c / (err_im.detach() + self.l1snr_eps)
|
|
470
|
-
l1_term = 0.5 * (torch.mean(err_re * bal_re) + torch.mean(err_im * bal_im))
|
|
471
|
-
|
|
472
455
|
loss = (1.0 - w) * d1_sum + w * l1_term
|
|
473
456
|
return loss
|
|
474
457
|
elif w >= 1.0:
|
|
@@ -563,8 +546,10 @@ class STFTL1SNRDBLoss(torch.nn.Module):
|
|
|
563
546
|
est_spec = transform(est_source)
|
|
564
547
|
act_spec = transform(act_source)
|
|
565
548
|
except RuntimeError as e:
|
|
566
|
-
|
|
567
|
-
|
|
549
|
+
warnings.warn(
|
|
550
|
+
f"Error computing spectrogram for resolution {i}: {e}. "
|
|
551
|
+
f"Parameters: n_fft={self.n_ffts[i]}, hop_length={self.hop_lengths[i]}, win_length={self.win_lengths[i]}"
|
|
552
|
+
)
|
|
568
553
|
continue
|
|
569
554
|
|
|
570
555
|
# Ensure same (B, C, F, T); crop only (F, T) if needed
|
|
@@ -578,7 +563,7 @@ class STFTL1SNRDBLoss(torch.nn.Module):
|
|
|
578
563
|
try:
|
|
579
564
|
spec_loss = self._compute_complex_spec_l1snr_loss(est_spec, act_spec)
|
|
580
565
|
except RuntimeError as e:
|
|
581
|
-
|
|
566
|
+
warnings.warn(f"Error computing complex spectral loss for resolution {i}: {e}")
|
|
582
567
|
continue
|
|
583
568
|
|
|
584
569
|
# Check for numerical issues
|
|
@@ -599,19 +584,19 @@ class STFTL1SNRDBLoss(torch.nn.Module):
|
|
|
599
584
|
# Accumulate regularization loss
|
|
600
585
|
total_spec_reg_loss += spec_reg_loss
|
|
601
586
|
except RuntimeError as e:
|
|
602
|
-
|
|
587
|
+
warnings.warn(f"Error computing spectral level-matching for resolution {i}: {e}")
|
|
603
588
|
|
|
604
589
|
# Accumulate loss
|
|
605
590
|
total_spec_loss += spec_loss
|
|
606
591
|
valid_transforms += 1
|
|
607
592
|
|
|
608
593
|
except RuntimeError as e:
|
|
609
|
-
|
|
594
|
+
warnings.warn(f"Runtime error in spectrogram transform {i}: {e}")
|
|
610
595
|
continue
|
|
611
596
|
|
|
612
597
|
# If all transforms failed, return zero loss
|
|
613
598
|
if valid_transforms == 0:
|
|
614
|
-
|
|
599
|
+
warnings.warn("All spectrogram transforms failed. Returning zero loss.")
|
|
615
600
|
return torch.tensor(0.0, device=device)
|
|
616
601
|
|
|
617
602
|
# Average losses across valid transforms
|
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: torch-l1-snr
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: L1-SNR loss functions for audio source separation in PyTorch
|
|
5
|
+
Home-page: https://github.com/crlandsc/torch-l1-snr
|
|
6
|
+
Author: Christopher Landschoot
|
|
7
|
+
Author-email: crlandschoot@gmail.com
|
|
8
|
+
License: MIT
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Intended Audience :: Science/Research
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Operating System :: OS Independent
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Classifier: Topic :: Multimedia :: Sound/Audio :: Analysis
|
|
21
|
+
Requires-Python: >=3.8
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE
|
|
24
|
+
Requires-Dist: torch
|
|
25
|
+
Requires-Dist: torchaudio
|
|
26
|
+
Requires-Dist: numpy>=1.21.0
|
|
27
|
+
Dynamic: license-file
|
|
28
|
+
|
|
29
|
+

|
|
30
|
+
|
|
31
|
+
[](https://github.com/crlandsc/torch-l1-snr/blob/main/LICENSE) [](https://github.com/crlandsc/torch-l1-snr/stargazers) [](https://pypi.org/project/torch-l1-snr/) [](https://pypi.org/project/torch-l1-snr/) [](https://pypi.org/project/torch-l1-snr/)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
L1 Signal-to-Noise Ratio (SNR) loss functions for audio source separation in PyTorch. This package provides four loss functions that combine implementations from recent academic research with novel extensions, designed to integrate easily into any audio separation or enhancement training pipeline.
|
|
35
|
+
|
|
36
|
+
The core [`L1SNRLoss`](#example-l1snrloss-time-domain) is based on the loss function described in [[1]](https://arxiv.org/abs/2309.02539). [`L1SNRDBLoss`](#example-l1snrdbloss-time-domain-with-regularization) adds adaptive level-matching regularization proposed in [[2]](https://arxiv.org/abs/2501.16171). [`STFTL1SNRDBLoss`](#example-stftl1snrdbloss-spectrogram-domain) provides a spectrogram-domain L1SNR-style loss (real/imag STFT components as in [[1]](https://arxiv.org/abs/2309.02539) / [[3]](https://arxiv.org/abs/2406.18747)). [`MultiL1SNRDBLoss`](#example-multil1snrdbloss-combined-time--spectrogram) combines time-domain and spectrogram-domain losses into a single loss function for convenience and flexibility. Optional novel algorithmic extensions have also been included (such as multi-resolution STFT averaging, spectrogram-domain adaptation of the level-matching regularizer from [[2]](https://arxiv.org/abs/2501.16171), time vs. spectrogram loss balancing, and blending of standard L1 loss) with the goal of increasing flexibility for improved performance depending on the specific task.
|
|
37
|
+
|
|
38
|
+
## Quick Start
|
|
39
|
+
|
|
40
|
+
```python
|
|
41
|
+
import torch
|
|
42
|
+
from torch_l1_snr import MultiL1SNRDBLoss
|
|
43
|
+
|
|
44
|
+
# Create combined time + spectrogram domain loss function with adaptive regularization
|
|
45
|
+
loss_fn = MultiL1SNRDBLoss(name="multi_l1_snr_db_loss")
|
|
46
|
+
|
|
47
|
+
# Calculate loss between model output and target
|
|
48
|
+
estimates = torch.randn(4, 32000) # (batch, samples)
|
|
49
|
+
targets = torch.randn(4, 32000)
|
|
50
|
+
loss = loss_fn(estimates, targets)
|
|
51
|
+
loss.backward()
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## Loss Functions
|
|
55
|
+
|
|
56
|
+
- [**Time-Domain L1SNR Loss**](#example-l1snrloss-time-domain): A basic, time-domain L1-SNR loss, based on [[1]](https://arxiv.org/abs/2309.02539).
|
|
57
|
+
- [**Regularized Time-Domain L1SNRDBLoss**](#example-l1snrdbloss-time-domain-with-regularization): An extension of the L1SNR loss with adaptive level-matching regularization from [[2]](https://arxiv.org/abs/2501.16171), plus an optional L1 loss component.
|
|
58
|
+
- [**Multi-Resolution STFT L1SNRDBLoss**](#example-stftl1snrdbloss-spectrogram-domain): A spectrogram-domain L1SNR-style loss (real/imag STFT components as in [[1]](https://arxiv.org/abs/2309.02539) / [[3]](https://arxiv.org/abs/2406.18747)), computed over multiple STFT resolutions, with optional spectrogram-domain level-matching regularization inspired by its time-domain counterpart in [[2]](https://arxiv.org/abs/2501.16171).
|
|
59
|
+
- [**Combined Multi-Domain Loss**](#example-multil1snrdbloss-combined-time--spectrogram): `MultiL1SNRDBLoss` combines time-domain and spectrogram-domain losses into a single, weighted objective function.
|
|
60
|
+
|
|
61
|
+
## Additional Features
|
|
62
|
+
|
|
63
|
+
- **L1 Loss Blending**: The `l1_weight` parameter allows mixing between L1SNR and standard L1 loss, softening the ["all-or-nothing" behavior](#all-or-nothing-behavior-and-l1_weight) of pure SNR losses for more nuanced separation.
|
|
64
|
+
- **Multi-Resolution STFT Averaging** - Extending an STFT-based loss to multiple resolutions is common in recent literature.
|
|
65
|
+
- **Spectrogram-Domain Adaptation of Level-Matching Regularizer [[2]](https://arxiv.org/abs/2501.16171)** - Options to extend adaptive level-matching regularization to spectrogram-domain. Experimental and not used by default.
|
|
66
|
+
- **Time vs. Spectrogram Loss Balancing.** - Allows fine-tuning the relative contribution of time-domain and spectrogram-domain losses in `MultiL1SNRDBLoss` via the `spec_weight` parameter.
|
|
67
|
+
- **Numerical Stability**: Robust handling of `NaN` and `inf` values during training.
|
|
68
|
+
- **Short Audio Fallback**: Graceful fallback to time-domain loss when audio is too short for STFT processing.
|
|
69
|
+
|
|
70
|
+
## Installation
|
|
71
|
+
|
|
72
|
+
### Install from PyPI
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
pip install torch-l1-snr
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### Install from GitHub
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
pip install git+https://github.com/crlandsc/torch-l1-snr.git
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
Or, you can clone the repository and install it in editable mode for development:
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
git clone https://github.com/crlandsc/torch-l1-snr.git
|
|
88
|
+
cd torch-l1-snr
|
|
89
|
+
pip install -e .
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
## Dependencies
|
|
93
|
+
|
|
94
|
+
- [PyTorch](https://pytorch.org/)
|
|
95
|
+
- [torchaudio](https://pytorch.org/audio/stable/index.html)
|
|
96
|
+
- [NumPy](https://numpy.org/) (>=1.21.0)
|
|
97
|
+
|
|
98
|
+
## Supported Tensor Shapes
|
|
99
|
+
|
|
100
|
+
All loss functions in this package (`L1SNRLoss`, `L1SNRDBLoss`, `STFTL1SNRDBLoss`, and `MultiL1SNRDBLoss`) accept standard audio tensors of shape `(batch, samples)`, `(batch, channels, samples)`, or `(batch, num_sources, channels, samples)`. For the time-domain losses, any 3D/4D input is flattened across all non-batch dimensions (e.g., sources, channels, and samples) into a single vector per example before the loss is computed. For the spectrogram-domain loss, inputs are reshaped to `(batch, streams, samples)` by flattening all non-time dimensions into a “stream” dimension (e.g., `streams = channels` or `streams = num_sources * channels`), and a separate STFT is computed for each stream.
|
|
101
|
+
|
|
102
|
+
## Usage
|
|
103
|
+
|
|
104
|
+
The loss functions can be imported directly from the `torch_l1_snr` package.
|
|
105
|
+
|
|
106
|
+
### `L1SNRLoss` (Time Domain)
|
|
107
|
+
|
|
108
|
+
The simplest loss function - pure L1SNR without regularization.
|
|
109
|
+
|
|
110
|
+
```python
|
|
111
|
+
import torch
|
|
112
|
+
from torch_l1_snr import L1SNRLoss
|
|
113
|
+
|
|
114
|
+
# Create dummy audio signals
|
|
115
|
+
estimates = torch.randn(4, 2, 44100) # Batch of 4, stereo, 44100 samples
|
|
116
|
+
actuals = torch.randn(4, 2, 44100)
|
|
117
|
+
|
|
118
|
+
# Basic L1SNR loss
|
|
119
|
+
loss_fn = L1SNRLoss(name="l1_snr_loss")
|
|
120
|
+
|
|
121
|
+
# Calculate loss
|
|
122
|
+
loss = loss_fn(estimates, actuals)
|
|
123
|
+
loss.backward()
|
|
124
|
+
|
|
125
|
+
print(f"L1SNRLoss: {loss.item()}")
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
### `L1SNRDBLoss` (Time Domain with Regularization)
|
|
129
|
+
|
|
130
|
+
Adds adaptive level-matching regularization to prevent silence collapse.
|
|
131
|
+
|
|
132
|
+
```python
|
|
133
|
+
import torch
|
|
134
|
+
from torch_l1_snr import L1SNRDBLoss
|
|
135
|
+
|
|
136
|
+
# Create dummy audio signals
|
|
137
|
+
estimates = torch.randn(4, 2, 44100) # Batch of 4, stereo, 44100 samples
|
|
138
|
+
actuals = torch.randn(4, 2, 44100)
|
|
139
|
+
|
|
140
|
+
# Initialize the loss function with regularization enabled
|
|
141
|
+
# l1_weight=0.1 blends 90% L1SNR+Regularization with 10% L1 loss
|
|
142
|
+
loss_fn = L1SNRDBLoss(
|
|
143
|
+
name="l1_snr_db_loss",
|
|
144
|
+
use_regularization=True, # Enable adaptive level-matching regularization
|
|
145
|
+
l1_weight=0.1 # 10% L1 loss, 90% L1SNR + regularization
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Calculate loss
|
|
149
|
+
loss = loss_fn(estimates, actuals)
|
|
150
|
+
loss.backward()
|
|
151
|
+
|
|
152
|
+
print(f"L1SNRDBLoss: {loss.item()}")
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
### `STFTL1SNRDBLoss` (Spectrogram Domain)
|
|
156
|
+
|
|
157
|
+
Computes L1SNR loss across multiple STFT resolutions.
|
|
158
|
+
|
|
159
|
+
```python
|
|
160
|
+
import torch
|
|
161
|
+
from torch_l1_snr import STFTL1SNRDBLoss
|
|
162
|
+
|
|
163
|
+
# Create dummy audio signals
|
|
164
|
+
estimates = torch.randn(4, 2, 44100) # Batch of 4, stereo, 44100 samples
|
|
165
|
+
actuals = torch.randn(4, 2, 44100)
|
|
166
|
+
|
|
167
|
+
# Initialize the loss function without regularization or traditional L1
|
|
168
|
+
# Uses multiple STFT resolutions by default: [512, 1024, 2048] FFT sizes
|
|
169
|
+
loss_fn = STFTL1SNRDBLoss(
|
|
170
|
+
name="stft_l1_snr_db_loss",
|
|
171
|
+
l1_weight=0.0 # Pure L1SNR (no regularization, no L1)
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Calculate loss
|
|
175
|
+
loss = loss_fn(estimates, actuals)
|
|
176
|
+
loss.backward()
|
|
177
|
+
|
|
178
|
+
print(f"STFTL1SNRDBLoss: {loss.item()}")
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
### `MultiL1SNRDBLoss` (Combined Time + Spectrogram)
|
|
182
|
+
|
|
183
|
+
Combines time-domain and spectrogram-domain losses into a single weighted objective.
|
|
184
|
+
|
|
185
|
+
```python
|
|
186
|
+
import torch
|
|
187
|
+
from torch_l1_snr import MultiL1SNRDBLoss
|
|
188
|
+
|
|
189
|
+
# Create dummy audio signals
|
|
190
|
+
estimates = torch.randn(4, 2, 44100) # Batch of 4, stereo, 44100 samples
|
|
191
|
+
actuals = torch.randn(4, 2, 44100)
|
|
192
|
+
|
|
193
|
+
# Initialize the multi-domain loss function
|
|
194
|
+
loss_fn = MultiL1SNRDBLoss(
|
|
195
|
+
name="multi_l1_snr_db_loss",
|
|
196
|
+
weight=1.0, # Overall weight for this loss
|
|
197
|
+
spec_weight=0.6, # 60% spectrogram loss, 40% time-domain loss
|
|
198
|
+
l1_weight=0.1, # Use 10% L1, 90% L1SNR+Reg in both domains
|
|
199
|
+
use_time_regularization=True, # Enable regularization in time domain
|
|
200
|
+
use_spec_regularization=False # Disable regularization in spec domain
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# Calculate loss
|
|
204
|
+
loss = loss_fn(estimates, actuals)
|
|
205
|
+
print(f"Multi-domain Loss: {loss.item()}")
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
## Motivation
|
|
209
|
+
|
|
210
|
+
The goal of these loss functions is to provide a perceptually-informed and robust alternative to common audio losses like L1, L2 (MSE), and SI-SDR for training audio source separation models.
|
|
211
|
+
|
|
212
|
+
- **Robustness**: The L1 norm is less sensitive to large outliers than the L2 norm, making it more suitable for audio signals which can have sharp transients.
|
|
213
|
+
- **Perceptual Relevance**: The loss is scaled to decibels (dB), which more closely aligns with human perception of loudness.
|
|
214
|
+
- **Adaptive Regularization**: Prevents the model from collapsing to silent outputs by penalizing mismatches in the overall loudness (dBRMS) between the estimate and the target.
|
|
215
|
+
|
|
216
|
+
This package is motivated by, and largely follows, the objectives and regularizers described in the cited papers ([1–3]). Several novel algorithmic extensions have been included with the goal of increasing flexibility for improved performance depending on the specific task.
|
|
217
|
+
|
|
218
|
+
### Level-Matching Regularization
|
|
219
|
+
|
|
220
|
+
A key feature of `L1SNRDBLoss` is the adaptive regularization term, as described in [[2]](https://arxiv.org/abs/2501.16171). This component calculates the difference in decibel-scaled root-mean-square (dBRMS) levels between the estimated and actual signals. An adaptive weight (`lambda`) is applied to this difference, which increases when the model incorrectly silences a non-silent target. This encourages the model to learn the correct output level and specifically avoids the model collapsing to a trivial silent solution when uncertain.
|
|
221
|
+
|
|
222
|
+
### Multi-Resolution Spectrogram Analysis
|
|
223
|
+
|
|
224
|
+
The `STFTL1SNRDBLoss` module applies the L1SNRDB loss across multiple time-frequency (spectrogram) resolutions. While not mentioned in the cited papers, by analyzing the signal with *multiple different* STFT window sizes and hop lengths, the loss function can capture a wider range of artifacts - from short, transient errors to longer, tonal discrepancies. This provides a more comprehensive error signal to the model during training. Using multiple resolutions for an STFT loss is common among many recent source separation works, such as the [Band-Split RoPE Transformer](https://arxiv.org/abs/2309.02612).
|
|
225
|
+
|
|
226
|
+
### "All-or-Nothing" Behavior and `l1_weight`
|
|
227
|
+
|
|
228
|
+
A characteristic of these SNR-style losses that I experienced in many training experiments is that they encourage the model to make definitive, "all-or-nothing" separation decisions. This can be highly effective for well-defined sources (e.g. drums vs vocals), as it pushes the model to be confident in its estimations. However, this can also lead to "confident errors," where the model completely removes a signal component it should have kept. This poses a tradeoff for sources that may share greater similarities (e.g. speech vs singing vocals).
|
|
229
|
+
|
|
230
|
+
While the Level-Matching Regularization prevents a *total collapse to silence*, it does not by itself solve this issue of overly confident, hard-boundary separation. To provide a tunable solution, this implementation introduces a novel `l1_weight` hyperparameter. This allows you to create a hybrid loss, blending the decisive L1SNR objective with a standard L1 loss to soften its "all-or-nothing"-style behavior and allow for more nuanced separation.
|
|
231
|
+
|
|
232
|
+
While this can potentially reduce the "cleanliness" of separations and slightly harm metrics like SDR, I found that re-introducing some standard L1 loss allows for slightly more "smearing" of sound between sources to mask large errors and be more perceptually acceptable for sources with many similarities. I have no hard numbers to report on this yet, just my experience. So I recommend starting with no standard L1 mixed in (`l1_weight=0.0`), and then slowly increasing from there based on your needs.
|
|
233
|
+
|
|
234
|
+
- `l1_weight=0.0` (Default): Pure L1SNR (+ regularization).
|
|
235
|
+
- `l1_weight=1.0`: Pure standard L1 loss.
|
|
236
|
+
- `0.0 < l1_weight < 1.0`: A weighted combination of the two.
|
|
237
|
+
|
|
238
|
+
The implementation is optimized for efficiency: if `l1_weight` is `0.0` or `1.0`, the unused loss component is not computed, saving computational resources.
|
|
239
|
+
|
|
240
|
+
**Note on Gradient Balancing:** When blending losses (`0.0 < l1_weight < 1.0`), the implementation automatically scales the L1 component to approximately match the gradient magnitudes of the L1SNR component. This helps maintain stable training without manual tuning.
|
|
241
|
+
|
|
242
|
+
## Limitations
|
|
243
|
+
|
|
244
|
+
- The L1SNR loss is not scale-invariant. Unlike SI-SNR, it requires the model's output to be correctly scaled relative to the target.
|
|
245
|
+
- While the dB scaling and regularization are psychoacoustically motivated, the loss does not model more complex perceptual phenomena like auditory masking.
|
|
246
|
+
|
|
247
|
+
## Contributing
|
|
248
|
+
|
|
249
|
+
Contributions are welcome! Please open an issue or submit a pull request if you have any bug fixes, improvements, or new features to suggest.
|
|
250
|
+
|
|
251
|
+
## License
|
|
252
|
+
|
|
253
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
254
|
+
|
|
255
|
+
## Acknowledgments
|
|
256
|
+
|
|
257
|
+
The loss functions implemented here are largely based on the work of the authors of the referenced papers. Thank you for your research!
|
|
258
|
+
|
|
259
|
+
## References
|
|
260
|
+
|
|
261
|
+
[1] K. N. Watcharasupat, C.-W. Wu, Y. Ding, I. Orife, A. J. Hipple, P. A. Williams, S. Kramer, A. Lerch, and W. Wolcott, "A Generalized Bandsplit Neural Network for Cinematic Audio Source Separation," IEEE Open Journal of Signal Processing, 2023. [arXiv:2309.02539](https://arxiv.org/abs/2309.02539)
|
|
262
|
+
|
|
263
|
+
[2] K. N. Watcharasupat and A. Lerch, "Separate This, and All of these Things Around It: Music Source Separation via Hyperellipsoidal Queries," [arXiv:2501.16171](https://arxiv.org/abs/2501.16171).
|
|
264
|
+
|
|
265
|
+
[3] K. N. Watcharasupat and A. Lerch, "A Stem-Agnostic Single-Decoder System for Music Source Separation Beyond Four Stems," Proceedings of the 25th International Society for Music Information Retrieval Conference, 2024. [arXiv:2406.18747](https://arxiv.org/abs/2406.18747)
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
torch_l1_snr/__init__.py,sha256=L3Cpdpnhz80gpfcTf6aNM1ROPdOIbdNoN8vO9LxcZEQ,244
|
|
2
|
+
torch_l1_snr/l1snr.py,sha256=F1NF3VGodaLWFtHs9xco9MbxfEJ01ip_JSHFS2GgBkU,34520
|
|
3
|
+
torch_l1_snr-0.1.1.dist-info/licenses/LICENSE,sha256=JdS2Pv6DDs3jvXHACGdcHYdiFMe9EO1XGeHkEHLTr8Y,1079
|
|
4
|
+
torch_l1_snr-0.1.1.dist-info/METADATA,sha256=8zC2S_NgV8B4Wg59QJwoupNfF063TxD6nFEzXJdeZIw,15704
|
|
5
|
+
torch_l1_snr-0.1.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
6
|
+
torch_l1_snr-0.1.1.dist-info/top_level.txt,sha256=VUo0QlGvu7tOF8BKWWDoIiLlhcAcetYwR6c8Ldhhpco,13
|
|
7
|
+
torch_l1_snr-0.1.1.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
torch_l1_snr
|
|
@@ -1,212 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: torch-l1-snr
|
|
3
|
-
Version: 0.0.5
|
|
4
|
-
Summary: L1-SNR loss functions for audio source separation in PyTorch
|
|
5
|
-
Home-page: https://github.com/crlandsc/torch-l1-snr
|
|
6
|
-
Author: Christopher Landscaping
|
|
7
|
-
Author-email: crlandschoot@gmail.com
|
|
8
|
-
License: MIT
|
|
9
|
-
Classifier: Intended Audience :: Developers
|
|
10
|
-
Classifier: Intended Audience :: Science/Research
|
|
11
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
-
Classifier: Programming Language :: Python :: 3
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
14
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
15
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
-
Classifier: Operating System :: OS Independent
|
|
19
|
-
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
-
Classifier: Topic :: Multimedia :: Sound/Audio :: Analysis
|
|
21
|
-
Requires-Python: >=3.8
|
|
22
|
-
Description-Content-Type: text/markdown
|
|
23
|
-
License-File: LICENSE
|
|
24
|
-
Requires-Dist: torch
|
|
25
|
-
Requires-Dist: torchaudio
|
|
26
|
-
Requires-Dist: numpy>=1.21.0
|
|
27
|
-
Dynamic: license-file
|
|
28
|
-
|
|
29
|
-

|
|
30
|
-
|
|
31
|
-
# NOTE: Repo is currently a work-in-progress and not ready for installation & use.
|
|
32
|
-
|
|
33
|
-
[](https://github.com/crlandsc/torch-l1snr/blob/main/LICENSE) [](https://github.com/crlandsc/torch-l1snr/stargazers)
|
|
34
|
-
|
|
35
|
-
A PyTorch implementation of L1-based Signal-to-Noise Ratio (SNR) loss functions for audio source separation. This package provides implementations and novel extensions based on concepts from recent academic papers, offering flexible and robust loss functions that can be easily integrated into any PyTorch-based audio separation pipeline.
|
|
36
|
-
|
|
37
|
-
The core `L1SNRLoss` is based on the loss function described in [[1]](https://arxiv.org/abs/2309.02539), while `L1SNRDBLoss` and `STFTL1SNRDBLoss` are extensions of the adaptive level-matching regularization technique proposed in [[2]](https://arxiv.org/abs/2501.16171).
|
|
38
|
-
|
|
39
|
-
## Features
|
|
40
|
-
|
|
41
|
-
- **Time-Domain L1SNR Loss**: A basic, time-domain L1-SNR loss, based on [[1]](https://arxiv.org/abs/2309.02539).
|
|
42
|
-
- **Regularized Time-Domain L1SNRDBLoss**: An extension of the L1SNR loss with adaptive level-matching regularization from [[2]](https://arxiv.org/abs/2501.16171), plus an optional L1 loss component.
|
|
43
|
-
- **Multi-Resolution STFT L1SNRDBLoss**: A spectrogram-domain version of the loss from [[2]](https://arxiv.org/abs/2501.16171), calculated over multiple STFT resolutions.
|
|
44
|
-
- **Modular Stem-based Loss**: A wrapper that combines time and spectrogram domain losses and can be configured to run on specific stems.
|
|
45
|
-
- **Efficient & Robust**: Includes optimizations for pure L1 loss calculation and robust handling of `NaN`/`inf` values and short audio segments.
|
|
46
|
-
|
|
47
|
-
## Installation
|
|
48
|
-
|
|
49
|
-
[](https://pypi.org/project/torch-l1-snr/) [](https://pypi.org/project/torch-l1-snr/) [](https://pypi.org/project/torch-l1-snr/)
|
|
50
|
-
|
|
51
|
-
## Install from PyPI
|
|
52
|
-
|
|
53
|
-
```bash
|
|
54
|
-
pip install torch-l1-snr
|
|
55
|
-
```
|
|
56
|
-
|
|
57
|
-
## Install from GitHub
|
|
58
|
-
|
|
59
|
-
```bash
|
|
60
|
-
pip install git+https://github.com/crlandsc/torch-l1snr.git
|
|
61
|
-
```
|
|
62
|
-
|
|
63
|
-
Or, you can clone the repository and install it in editable mode for development:
|
|
64
|
-
|
|
65
|
-
```bash
|
|
66
|
-
git clone https://github.com/crlandsc/torch-l1snr.git
|
|
67
|
-
cd torch-l1snr
|
|
68
|
-
pip install -e .
|
|
69
|
-
```
|
|
70
|
-
|
|
71
|
-
## Dependencies
|
|
72
|
-
|
|
73
|
-
- [PyTorch](https://pytorch.org/)
|
|
74
|
-
- [torchaudio](https://pytorch.org/audio/stable/index.html)
|
|
75
|
-
|
|
76
|
-
## Supported Tensor Shapes
|
|
77
|
-
|
|
78
|
-
All loss functions in this package (`L1SNRLoss`, `L1SNRDBLoss`, `STFTL1SNRDBLoss`, and `MultiL1SNRDBLoss`) accept standard audio tensors of shape `(batch, samples)` or `(batch, channels, samples)`. For 3D tensors, the channel and sample dimensions are flattened before the time-domain losses are calculated. For the spectrogram-domain loss, a separate STFT is computed for each channel.
|
|
79
|
-
|
|
80
|
-
## Usage
|
|
81
|
-
|
|
82
|
-
The loss functions can be imported directly from the `torch_l1snr` package.
|
|
83
|
-
|
|
84
|
-
### Example: `L1SNRDBLoss` (Time Domain)
|
|
85
|
-
|
|
86
|
-
```python
|
|
87
|
-
import torch
|
|
88
|
-
from torch_l1snr import L1SNRDBLoss
|
|
89
|
-
|
|
90
|
-
# Create dummy audio signals
|
|
91
|
-
estimates = torch.randn(4, 32000) # Batch of 4, 32000 samples
|
|
92
|
-
actuals = torch.randn(4, 32000)
|
|
93
|
-
|
|
94
|
-
# Initialize the loss function with regularization enabled
|
|
95
|
-
# l1_weight=0.1 blends L1SNR+Regularization with 10% L1 loss
|
|
96
|
-
loss_fn = L1SNRDBLoss(
|
|
97
|
-
name="l1_snr_db_loss",
|
|
98
|
-
use_regularization=True, # Enable adaptive level-matching regularization
|
|
99
|
-
l1_weight=0.1 # 10% L1 loss, 90% L1SNR + regularization
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
# Calculate loss
|
|
103
|
-
loss = loss_fn(estimates, actuals)
|
|
104
|
-
loss.backward()
|
|
105
|
-
|
|
106
|
-
print(f"L1SNRDBLoss: {loss.item()}")
|
|
107
|
-
```
|
|
108
|
-
|
|
109
|
-
### Example: `STFTL1SNRDBLoss` (Spectrogram Domain)
|
|
110
|
-
|
|
111
|
-
```python
|
|
112
|
-
import torch
|
|
113
|
-
from torch_l1snr import STFTL1SNRDBLoss
|
|
114
|
-
|
|
115
|
-
# Create dummy audio signals
|
|
116
|
-
estimates = torch.randn(4, 32000)
|
|
117
|
-
actuals = torch.randn(4, 32000)
|
|
118
|
-
|
|
119
|
-
# Initialize the loss function
|
|
120
|
-
# Uses multiple STFT resolutions by default: [512, 1024, 2048] FFT sizes
|
|
121
|
-
loss_fn = STFTL1SNRDBLoss(
|
|
122
|
-
name="stft_l1_snr_db_loss",
|
|
123
|
-
l1_weight=0.0 # Pure L1SNR (no regularization, no L1)
|
|
124
|
-
)
|
|
125
|
-
|
|
126
|
-
# Calculate loss
|
|
127
|
-
loss = loss_fn(estimates, actuals)
|
|
128
|
-
loss.backward()
|
|
129
|
-
|
|
130
|
-
print(f"STFTL1SNRDBLoss: {loss.item()}")
|
|
131
|
-
```
|
|
132
|
-
|
|
133
|
-
### Example: `MultiL1SNRDBLoss` for a Combined Time+Spectrogram Loss
|
|
134
|
-
|
|
135
|
-
This loss combines the time-domain and spectrogram-domain losses into a single, weighted objective function.
|
|
136
|
-
|
|
137
|
-
```python
|
|
138
|
-
import torch
|
|
139
|
-
from torch_l1snr import MultiL1SNRDBLoss
|
|
140
|
-
|
|
141
|
-
# Create dummy audio signals
|
|
142
|
-
# Shape: (batch, channels, samples)
|
|
143
|
-
estimates = torch.randn(2, 2, 44100) # Batch of 2, stereo
|
|
144
|
-
actuals = torch.randn(2, 2, 44100)
|
|
145
|
-
|
|
146
|
-
# --- Configuration ---
|
|
147
|
-
loss_fn = MultiL1SNRDBLoss(
|
|
148
|
-
name="multi_l1_snr_db_loss",
|
|
149
|
-
weight=1.0, # Overall weight for this loss
|
|
150
|
-
spec_weight=0.6, # 60% spectrogram loss, 40% time-domain loss
|
|
151
|
-
l1_weight=0.1, # Use 10% L1, 90% L1SNR+Reg in both domains
|
|
152
|
-
use_time_regularization=True, # Enable regularization in time domain
|
|
153
|
-
use_spec_regularization=False # Disable regularization in spec domain
|
|
154
|
-
)
|
|
155
|
-
loss = loss_fn(estimates, actuals)
|
|
156
|
-
print(f"Multi-domain Loss: {loss.item()}")
|
|
157
|
-
```
|
|
158
|
-
|
|
159
|
-
## Motivation
|
|
160
|
-
|
|
161
|
-
The goal of these loss functions is to provide a perceptually-informed and robust alternative to common audio losses like L1, L2 (MSE), and SI-SDR for training audio source separation models.
|
|
162
|
-
|
|
163
|
-
- **Robustness**: The L1 norm is less sensitive to large outliers than the L2 norm, making it more suitable for audio signals which can have sharp transients.
|
|
164
|
-
- **Perceptual Relevance**: The loss is scaled to decibels (dB), which more closely aligns with human perception of loudness.
|
|
165
|
-
- **Adaptive Regularization**: Prevents the model from collapsing to silent outputs by penalizing mismatches in the overall loudness (dBRMS) between the estimate and the target.
|
|
166
|
-
|
|
167
|
-
#### Level-Matching Regularization
|
|
168
|
-
|
|
169
|
-
A key feature of `L1SNRDBLoss` is the adaptive regularization term, as described in [[2]](https://arxiv.org/abs/2501.16171). This component calculates the difference in decibel-scaled root-mean-square (dBRMS) levels between the estimated and actual signals. An adaptive weight (`lambda`) is applied to this difference, which increases when the model incorrectly silences a non-silent target. This encourages the model to learn the correct output level and specifically avoids the model collapsing to a trivial silent solution when uncertain.
|
|
170
|
-
|
|
171
|
-
#### Multi-Resolution Spectrogram Analysis
|
|
172
|
-
|
|
173
|
-
The `STFTL1SNRDBLoss` module applies the L1SNRDB loss across multiple time-frequency resolutions. By analyzing the signal with different STFT window sizes and hop lengths, the loss function can capture a wider range of artifacts—from short, transient errors to longer, tonal discrepancies. This provides a more comprehensive error signal to the model during training.
|
|
174
|
-
|
|
175
|
-
#### "All-or-Nothing" Behavior and `l1_weight`
|
|
176
|
-
|
|
177
|
-
A characteristic of SNR-style losses is that they encourage the model to make definitive, "all-or-nothing" separation decisions. This can be highly effective for well-defined sources, as it pushes the model to be confident in its estimations. However, this can also lead to "confident errors," where the model completely removes a signal component it should have kept.
|
|
178
|
-
|
|
179
|
-
While the Level-Matching Regularization prevents a *total collapse to silence*, it does not by itself solve this issue of overly confident, hard-boundary separation. To provide a tunable solution, this implementation introduces a novel `l1_weight` hyperparameter. This allows you to create a hybrid loss, blending the decisive L1SNR objective with a standard L1 loss to soften its "all-or-nothing"-style behavior and allow for more nuanced separation.
|
|
180
|
-
|
|
181
|
-
- `l1_weight=0.0` (Default): Pure L1SNR (+ regularization).
|
|
182
|
-
- `l1_weight=1.0`: Pure L1 loss.
|
|
183
|
-
- `0.0 < l1_weight < 1.0`: A weighted combination of the two.
|
|
184
|
-
|
|
185
|
-
The implementation is optimized for efficiency: if `l1_weight` is `0.0` or `1.0`, the unused loss component is not computed, saving computational resources.
|
|
186
|
-
|
|
187
|
-
**Note on Gradient Balancing:** When blending losses (`0.0 < l1_weight < 1.0`), you may need to tune `l1_scale_time` and `l1_scale_spec`. This is to ensure the gradients of the L1 and L1SNR components are balanced, which is crucial for stable training. The default values provide a reasonable starting point, but monitoring the loss components is recommended to ensure they are scaled appropriately.
|
|
188
|
-
|
|
189
|
-
## Limitations
|
|
190
|
-
|
|
191
|
-
- The L1SNR loss is not scale-invariant. Unlike SI-SNR, it requires the model's output to be correctly scaled relative to the target.
|
|
192
|
-
- While the dB scaling and regularization are psychoacoustically motivated, the loss does not model more complex perceptual phenomena like auditory masking.
|
|
193
|
-
|
|
194
|
-
## Contributing
|
|
195
|
-
|
|
196
|
-
Contributions are welcome! Please open an issue or submit a pull request if you have any improvements or new features to suggest.
|
|
197
|
-
|
|
198
|
-
## License
|
|
199
|
-
|
|
200
|
-
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
201
|
-
|
|
202
|
-
## Acknowledgments
|
|
203
|
-
|
|
204
|
-
The loss functions implemented here are based on the work of the authors of the referenced papers.
|
|
205
|
-
|
|
206
|
-
## References
|
|
207
|
-
|
|
208
|
-
[1] K. N. Watcharasupat, C.-W. Wu, Y. Ding, I. Orife, A. J. Hipple, P. A. Williams, S. Kramer, A. Lerch, and W. Wolcott, "A Generalized Bandsplit Neural Network for Cinematic Audio Source Separation," IEEE Open Journal of Signal Processing, 2023. [arXiv:2309.02539](https://arxiv.org/abs/2309.02539)
|
|
209
|
-
|
|
210
|
-
[2] K. N. Watcharasupat and A. Lerch, "Separate This, and All of these Things Around It: Music Source Separation via Hyperellipsoidal Queries," [arXiv:2501.16171](https://arxiv.org/abs/2501.16171).
|
|
211
|
-
|
|
212
|
-
[3] K. N. Watcharasupat and A. Lerch, "A Stem-Agnostic Single-Decoder System for Music Source Separation Beyond Four Stems," Proceedings of the 25th International Society for Music Information Retrieval Conference, 2024. [arXiv:2406.18747](https://arxiv.org/abs/2406.18747)
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
torch_l1_snr-0.0.5.dist-info/licenses/LICENSE,sha256=JdS2Pv6DDs3jvXHACGdcHYdiFMe9EO1XGeHkEHLTr8Y,1079
|
|
2
|
-
torch_l1snr/__init__.py,sha256=aeVkPlJMPv78xSKINga6A01VaLAHU1JkoGdJRJgtyaM,244
|
|
3
|
-
torch_l1snr/l1snr.py,sha256=aqmtNfT_8A0IRI9jiVGwNse3igBvelQGKnjfe23Xh7w,35304
|
|
4
|
-
torch_l1_snr-0.0.5.dist-info/METADATA,sha256=lcqW5iAupekXdy6rWoi9oAnL087Gl8prdriy_t-TQOA,11143
|
|
5
|
-
torch_l1_snr-0.0.5.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
6
|
-
torch_l1_snr-0.0.5.dist-info/top_level.txt,sha256=NfaRND6pcjZ7-035d4XAg8xJuz31EEU210Y9xWeFOxc,12
|
|
7
|
-
torch_l1_snr-0.0.5.dist-info/RECORD,,
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
torch_l1snr
|
|
File without changes
|
|
File without changes
|