torch-l1-snr 0.0.4__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,4 +12,6 @@ __all__ = [
12
12
  "L1SNRDBLoss",
13
13
  "STFTL1SNRDBLoss",
14
14
  "MultiL1SNRDBLoss",
15
- ]
15
+ ]
16
+
17
+ __version__ = "0.1.0"
@@ -16,6 +16,8 @@
16
16
  # Proceedings of the 25th International Society for Music Information Retrieval Conference, 2024
17
17
  # arXiv:2406.18747
18
18
 
19
+ import warnings
20
+
19
21
  import torch
20
22
  import torch.nn as nn
21
23
  from torchaudio.transforms import Spectrogram
@@ -107,20 +109,6 @@ class L1SNRLoss(torch.nn.Module):
107
109
  scale_time = c * inv_mean
108
110
  l1_term = torch.mean(l1_error) * scale_time
109
111
 
110
- if getattr(self, "balance_per_sample", False):
111
- # per-sample w-independent scaling
112
- bal = c / (l1_error.detach() + self.eps)
113
- l1_term = torch.mean(l1_error * bal)
114
-
115
- if getattr(self, "debug_balance", False):
116
- g_d1 = (1.0 - w) * c * inv_mean
117
- if getattr(self, "balance_per_sample", False):
118
- g_l1 = w * torch.mean(c / (l1_error.detach() + self.eps))
119
- else:
120
- g_l1 = w * c * inv_mean
121
- ratio = (g_l1 / (g_d1 + 1e-12)).item()
122
- setattr(self, "last_balance_ratio", ratio)
123
-
124
112
  loss = (1.0 - w) * l1snr_loss + w * l1_term
125
113
  return loss * self.weight
126
114
 
@@ -464,11 +452,6 @@ class STFTL1SNRDBLoss(torch.nn.Module):
464
452
  scale_spec = 2.0 * c * inv_mean_comp
465
453
  l1_term = 0.5 * (torch.mean(err_re) + torch.mean(err_im)) * scale_spec
466
454
 
467
- if getattr(self, "balance_per_sample", False):
468
- bal_re = c / (err_re.detach() + self.l1snr_eps)
469
- bal_im = c / (err_im.detach() + self.l1snr_eps)
470
- l1_term = 0.5 * (torch.mean(err_re * bal_re) + torch.mean(err_im * bal_im))
471
-
472
455
  loss = (1.0 - w) * d1_sum + w * l1_term
473
456
  return loss
474
457
  elif w >= 1.0:
@@ -563,8 +546,10 @@ class STFTL1SNRDBLoss(torch.nn.Module):
563
546
  est_spec = transform(est_source)
564
547
  act_spec = transform(act_source)
565
548
  except RuntimeError as e:
566
- print(f"Error computing spectrogram for resolution {i}: {e}")
567
- print(f"Parameters: n_fft={self.n_ffts[i]}, hop_length={self.hop_lengths[i]}, win_length={self.win_lengths[i]}")
549
+ warnings.warn(
550
+ f"Error computing spectrogram for resolution {i}: {e}. "
551
+ f"Parameters: n_fft={self.n_ffts[i]}, hop_length={self.hop_lengths[i]}, win_length={self.win_lengths[i]}"
552
+ )
568
553
  continue
569
554
 
570
555
  # Ensure same (B, C, F, T); crop only (F, T) if needed
@@ -578,7 +563,7 @@ class STFTL1SNRDBLoss(torch.nn.Module):
578
563
  try:
579
564
  spec_loss = self._compute_complex_spec_l1snr_loss(est_spec, act_spec)
580
565
  except RuntimeError as e:
581
- print(f"Error computing complex spectral loss for resolution {i}: {e}")
566
+ warnings.warn(f"Error computing complex spectral loss for resolution {i}: {e}")
582
567
  continue
583
568
 
584
569
  # Check for numerical issues
@@ -599,19 +584,19 @@ class STFTL1SNRDBLoss(torch.nn.Module):
599
584
  # Accumulate regularization loss
600
585
  total_spec_reg_loss += spec_reg_loss
601
586
  except RuntimeError as e:
602
- print(f"Error computing spectral level-matching for resolution {i}: {e}")
587
+ warnings.warn(f"Error computing spectral level-matching for resolution {i}: {e}")
603
588
 
604
589
  # Accumulate loss
605
590
  total_spec_loss += spec_loss
606
591
  valid_transforms += 1
607
592
 
608
593
  except RuntimeError as e:
609
- print(f"Runtime error in spectrogram transform {i}: {e}")
594
+ warnings.warn(f"Runtime error in spectrogram transform {i}: {e}")
610
595
  continue
611
596
 
612
597
  # If all transforms failed, return zero loss
613
598
  if valid_transforms == 0:
614
- print("Warning: All spectrogram transforms failed. Returning zero loss.")
599
+ warnings.warn("All spectrogram transforms failed. Returning zero loss.")
615
600
  return torch.tensor(0.0, device=device)
616
601
 
617
602
  # Average losses across valid transforms
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: torch-l1-snr
3
- Version: 0.0.4
3
+ Version: 0.1.0
4
4
  Summary: L1-SNR loss functions for audio source separation in PyTorch
5
5
  Home-page: https://github.com/crlandsc/torch-l1-snr
6
- Author: Christopher Landscaping
6
+ Author: Christopher Landschoot
7
7
  Author-email: crlandschoot@gmail.com
8
8
  License: MIT
9
9
  Classifier: Intended Audience :: Developers
@@ -28,43 +28,59 @@ Dynamic: license-file
28
28
 
29
29
  ![torch-l1-snr-logo](https://raw.githubusercontent.com/crlandsc/torch-l1-snr/main/images/logo.png)
30
30
 
31
- # NOTE: Repo is currently a work-in-progress and not ready for installation & use.
31
+ [![LICENSE](https://img.shields.io/github/license/crlandsc/torch-l1-snr)](https://github.com/crlandsc/torch-l1-snr/blob/main/LICENSE) [![GitHub Repo stars](https://img.shields.io/github/stars/crlandsc/torch-l1-snr)](https://github.com/crlandsc/torch-l1-snr/stargazers)
32
32
 
33
- [![LICENSE](https://img.shields.io/github/license/crlandsc/torch-l1snr)](https://github.com/crlandsc/torch-l1snr/blob/main/LICENSE) [![GitHub Repo stars](https://img.shields.io/github/stars/crlandsc/torch-l1snr)](https://github.com/crlandsc/torch-l1snr/stargazers)
33
+ L1 Signal-to-Noise Ratio (SNR) loss functions for audio source separation in PyTorch. This package provides four loss functions that combine implementations from recent academic research with novel extensions, designed to integrate easily into any audio separation training pipeline.
34
34
 
35
- A PyTorch implementation of L1-based Signal-to-Noise Ratio (SNR) loss functions for audio source separation. This package provides implementations and novel extensions based on concepts from recent academic papers, offering flexible and robust loss functions that can be easily integrated into any PyTorch-based audio separation pipeline.
35
+ The core `L1SNRLoss` is based on the loss function described in [[1]](https://arxiv.org/abs/2309.02539), while `L1SNRDBLoss` and `STFTL1SNRDBLoss` are extensions of the adaptive level-matching regularization technique proposed in [[2]](https://arxiv.org/abs/2501.16171). `MultiL1SNRDBLoss` combines both time-domain and spectrogram-domain losses into a single loss function for convenience and flexibility.
36
36
 
37
- The core `L1SNRLoss` is based on the loss function described in [[1]](https://arxiv.org/abs/2309.02539), while `L1SNRDBLoss` and `STFTL1SNRDBLoss` are extensions of the adaptive level-matching regularization technique proposed in [[2]](https://arxiv.org/abs/2501.16171).
37
+ ## Quick Start
38
+
39
+ ```python
40
+ import torch
41
+ from torch_l1_snr import MultiL1SNRDBLoss
42
+
43
+ # Create combined time + spectrogram domain loss function with adaptive regularization
44
+ loss_fn = MultiL1SNRDBLoss(name="multi_l1_snr_db_loss")
45
+
46
+ # Calculate loss between model output and target
47
+ estimates = torch.randn(4, 32000) # (batch, samples)
48
+ targets = torch.randn(4, 32000)
49
+ loss = loss_fn(estimates, targets)
50
+ loss.backward()
51
+ ```
38
52
 
39
53
  ## Features
40
54
 
41
55
  - **Time-Domain L1SNR Loss**: A basic, time-domain L1-SNR loss, based on [[1]](https://arxiv.org/abs/2309.02539).
42
56
  - **Regularized Time-Domain L1SNRDBLoss**: An extension of the L1SNR loss with adaptive level-matching regularization from [[2]](https://arxiv.org/abs/2501.16171), plus an optional L1 loss component.
43
57
  - **Multi-Resolution STFT L1SNRDBLoss**: A spectrogram-domain version of the loss from [[2]](https://arxiv.org/abs/2501.16171), calculated over multiple STFT resolutions.
44
- - **Modular Stem-based Loss**: A wrapper that combines time and spectrogram domain losses and can be configured to run on specific stems.
45
- - **Efficient & Robust**: Includes optimizations for pure L1 loss calculation and robust handling of `NaN`/`inf` values and short audio segments.
58
+ - **Combined Multi-Domain Loss**: `MultiL1SNRDBLoss` combines time-domain and spectrogram-domain losses into a single, weighted objective function.
59
+ - **L1 Loss Blending**: The `l1_weight` parameter allows mixing between L1SNR and standard L1 loss, softening the ["all-or-nothing" behavior](#all-or-nothing-behavior-and-l1_weight) of pure SNR losses for more nuanced separation.
60
+ - **Numerical Stability**: Robust handling of `NaN` and `inf` values during training.
61
+ - **Short Audio Fallback**: Graceful fallback to time-domain loss when audio is too short for STFT processing.
46
62
 
47
63
  ## Installation
48
64
 
49
65
  [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/torch-l1-snr)](https://pypi.org/project/torch-l1-snr/) [![PyPI - Version](https://img.shields.io/pypi/v/torch-l1-snr)](https://pypi.org/project/torch-l1-snr/) [![Number of downloads from PyPI per month](https://img.shields.io/pypi/dm/torch-l1-snr)](https://pypi.org/project/torch-l1-snr/)
50
66
 
51
- ## Install from PyPI
67
+ ### Install from PyPI
52
68
 
53
69
  ```bash
54
70
  pip install torch-l1-snr
55
71
  ```
56
72
 
57
- ## Install from GitHub
73
+ ### Install from GitHub
58
74
 
59
75
  ```bash
60
- pip install git+https://github.com/crlandsc/torch-l1snr.git
76
+ pip install git+https://github.com/crlandsc/torch-l1-snr.git
61
77
  ```
62
78
 
63
79
  Or, you can clone the repository and install it in editable mode for development:
64
80
 
65
81
  ```bash
66
- git clone https://github.com/crlandsc/torch-l1snr.git
67
- cd torch-l1snr
82
+ git clone https://github.com/crlandsc/torch-l1-snr.git
83
+ cd torch-l1-snr
68
84
  pip install -e .
69
85
  ```
70
86
 
@@ -72,27 +88,52 @@ pip install -e .
72
88
 
73
89
  - [PyTorch](https://pytorch.org/)
74
90
  - [torchaudio](https://pytorch.org/audio/stable/index.html)
91
+ - [NumPy](https://numpy.org/) (>=1.21.0)
75
92
 
76
93
  ## Supported Tensor Shapes
77
94
 
78
- All loss functions in this package (`L1SNRLoss`, `L1SNRDBLoss`, `STFTL1SNRDBLoss`, and `MultiL1SNRDBLoss`) accept standard audio tensors of shape `(batch, samples)` or `(batch, channels, samples)`. For 3D tensors, the channel and sample dimensions are flattened before the time-domain losses are calculated. For the spectrogram-domain loss, a separate STFT is computed for each channel.
95
+ All loss functions in this package (`L1SNRLoss`, `L1SNRDBLoss`, `STFTL1SNRDBLoss`, and `MultiL1SNRDBLoss`) accept standard audio tensors of shape `(batch, samples)`, `(batch, channels, samples)`, or `(batch, num_sources, channels, samples)`. For 3D & 4D tensors, the channel and sample dimensions are flattened before the time-domain losses are calculated. For the spectrogram-domain loss, a separate STFT is computed for each channel.
79
96
 
80
97
  ## Usage
81
98
 
82
- The loss functions can be imported directly from the `torch_l1snr` package.
99
+ The loss functions can be imported directly from the `torch_l1_snr` package.
100
+
101
+ ### Example: `L1SNRLoss` (Time Domain)
102
+
103
+ The simplest loss function - pure L1SNR without regularization.
104
+
105
+ ```python
106
+ import torch
107
+ from torch_l1_snr import L1SNRLoss
108
+
109
+ # Create dummy audio signals
110
+ estimates = torch.randn(4, 2, 44100) # Batch of 4, stereo, 44100 samples
111
+ actuals = torch.randn(4, 2, 44100)
112
+
113
+ # Basic L1SNR loss
114
+ loss_fn = L1SNRLoss(name="l1_snr_loss")
83
115
 
84
- ### Example: `L1SNRDBLoss` (Time Domain)
116
+ # Calculate loss
117
+ loss = loss_fn(estimates, actuals)
118
+ loss.backward()
119
+
120
+ print(f"L1SNRLoss: {loss.item()}")
121
+ ```
122
+
123
+ ### Example: `L1SNRDBLoss` (Time Domain with Regularization)
124
+
125
+ Adds adaptive level-matching regularization to prevent silence collapse.
85
126
 
86
127
  ```python
87
128
  import torch
88
- from torch_l1snr import L1SNRDBLoss
129
+ from torch_l1_snr import L1SNRDBLoss
89
130
 
90
131
  # Create dummy audio signals
91
- estimates = torch.randn(4, 32000) # Batch of 4, 32000 samples
92
- actuals = torch.randn(4, 32000)
132
+ estimates = torch.randn(4, 2, 44100) # Batch of 4, stereo, 44100 samples
133
+ actuals = torch.randn(4, 2, 44100)
93
134
 
94
135
  # Initialize the loss function with regularization enabled
95
- # l1_weight=0.1 blends L1SNR+Regularization with 10% L1 loss
136
+ # l1_weight=0.1 blends 90% L1SNR+Regularization with 10% L1 loss
96
137
  loss_fn = L1SNRDBLoss(
97
138
  name="l1_snr_db_loss",
98
139
  use_regularization=True, # Enable adaptive level-matching regularization
@@ -108,15 +149,17 @@ print(f"L1SNRDBLoss: {loss.item()}")
108
149
 
109
150
  ### Example: `STFTL1SNRDBLoss` (Spectrogram Domain)
110
151
 
152
+ Computes L1SNR loss across multiple STFT resolutions.
153
+
111
154
  ```python
112
155
  import torch
113
- from torch_l1snr import STFTL1SNRDBLoss
156
+ from torch_l1_snr import STFTL1SNRDBLoss
114
157
 
115
158
  # Create dummy audio signals
116
- estimates = torch.randn(4, 32000)
117
- actuals = torch.randn(4, 32000)
159
+ estimates = torch.randn(4, 2, 44100) # Batch of 4, stereo, 44100 samples
160
+ actuals = torch.randn(4, 2, 44100)
118
161
 
119
- # Initialize the loss function
162
+ # Initialize the loss function without regularization or traditional L1
120
163
  # Uses multiple STFT resolutions by default: [512, 1024, 2048] FFT sizes
121
164
  loss_fn = STFTL1SNRDBLoss(
122
165
  name="stft_l1_snr_db_loss",
@@ -130,20 +173,19 @@ loss.backward()
130
173
  print(f"STFTL1SNRDBLoss: {loss.item()}")
131
174
  ```
132
175
 
133
- ### Example: `MultiL1SNRDBLoss` for a Combined Time+Spectrogram Loss
176
+ ### Example: `MultiL1SNRDBLoss` (Combined Time + Spectrogram)
134
177
 
135
- This loss combines the time-domain and spectrogram-domain losses into a single, weighted objective function.
178
+ Combines time-domain and spectrogram-domain losses into a single weighted objective.
136
179
 
137
180
  ```python
138
181
  import torch
139
- from torch_l1snr import MultiL1SNRDBLoss
182
+ from torch_l1_snr import MultiL1SNRDBLoss
140
183
 
141
184
  # Create dummy audio signals
142
- # Shape: (batch, channels, samples)
143
- estimates = torch.randn(2, 2, 44100) # Batch of 2, stereo
144
- actuals = torch.randn(2, 2, 44100)
185
+ estimates = torch.randn(4, 2, 44100) # Batch of 4, stereo, 44100 samples
186
+ actuals = torch.randn(4, 2, 44100)
145
187
 
146
- # --- Configuration ---
188
+ # Initialize the multi-domain loss function
147
189
  loss_fn = MultiL1SNRDBLoss(
148
190
  name="multi_l1_snr_db_loss",
149
191
  weight=1.0, # Overall weight for this loss
@@ -152,6 +194,8 @@ loss_fn = MultiL1SNRDBLoss(
152
194
  use_time_regularization=True, # Enable regularization in time domain
153
195
  use_spec_regularization=False # Disable regularization in spec domain
154
196
  )
197
+
198
+ # Calculate loss
155
199
  loss = loss_fn(estimates, actuals)
156
200
  print(f"Multi-domain Loss: {loss.item()}")
157
201
  ```
@@ -164,27 +208,29 @@ The goal of these loss functions is to provide a perceptually-informed and robus
164
208
  - **Perceptual Relevance**: The loss is scaled to decibels (dB), which more closely aligns with human perception of loudness.
165
209
  - **Adaptive Regularization**: Prevents the model from collapsing to silent outputs by penalizing mismatches in the overall loudness (dBRMS) between the estimate and the target.
166
210
 
167
- #### Level-Matching Regularization
211
+ ### Level-Matching Regularization
168
212
 
169
213
  A key feature of `L1SNRDBLoss` is the adaptive regularization term, as described in [[2]](https://arxiv.org/abs/2501.16171). This component calculates the difference in decibel-scaled root-mean-square (dBRMS) levels between the estimated and actual signals. An adaptive weight (`lambda`) is applied to this difference, which increases when the model incorrectly silences a non-silent target. This encourages the model to learn the correct output level and specifically avoids the model collapsing to a trivial silent solution when uncertain.
170
214
 
171
- #### Multi-Resolution Spectrogram Analysis
215
+ ### Multi-Resolution Spectrogram Analysis
172
216
 
173
- The `STFTL1SNRDBLoss` module applies the L1SNRDB loss across multiple time-frequency resolutions. By analyzing the signal with different STFT window sizes and hop lengths, the loss function can capture a wider range of artifacts—from short, transient errors to longer, tonal discrepancies. This provides a more comprehensive error signal to the model during training.
217
+ The `STFTL1SNRDBLoss` module applies the L1SNRDB loss across multiple time-frequency resolutions. By analyzing the signal with different STFT window sizes and hop lengths, the loss function can capture a wider range of artifacts—from short, transient errors to longer, tonal discrepancies. This provides a more comprehensive error signal to the model during training. Using multiple resolutions for an STFT loss is common among many recent source separation works.
174
218
 
175
- #### "All-or-Nothing" Behavior and `l1_weight`
219
+ ### "All-or-Nothing" Behavior and `l1_weight`
176
220
 
177
- A characteristic of SNR-style losses is that they encourage the model to make definitive, "all-or-nothing" separation decisions. This can be highly effective for well-defined sources, as it pushes the model to be confident in its estimations. However, this can also lead to "confident errors," where the model completely removes a signal component it should have kept.
221
+ A characteristic of SNR-style losses (that I experienced in many training experiments) is that they encourage the model to make definitive, "all-or-nothing" separation decisions. This can be highly effective for well-defined sources, as it pushes the model to be confident in its estimations. However, this can also lead to "confident errors," where the model completely removes a signal component it should have kept.
178
222
 
179
223
  While the Level-Matching Regularization prevents a *total collapse to silence*, it does not by itself solve this issue of overly confident, hard-boundary separation. To provide a tunable solution, this implementation introduces a novel `l1_weight` hyperparameter. This allows you to create a hybrid loss, blending the decisive L1SNR objective with a standard L1 loss to soften its "all-or-nothing"-style behavior and allow for more nuanced separation.
180
224
 
225
+ While this can potentially reduce metrics like SDR, I found that re-introducing some standard L1 loss allows for slightly more "smearing" of sound between sources to mask large errors and be more perceptually acceptable. I have no hard numbers on this, just my experience, so I recommend starting with no standard L1 mixed in (`l1_weight=0.0`), and then slowly increasing from there based on your needs.
226
+
181
227
  - `l1_weight=0.0` (Default): Pure L1SNR (+ regularization).
182
228
  - `l1_weight=1.0`: Pure L1 loss.
183
229
  - `0.0 < l1_weight < 1.0`: A weighted combination of the two.
184
230
 
185
231
  The implementation is optimized for efficiency: if `l1_weight` is `0.0` or `1.0`, the unused loss component is not computed, saving computational resources.
186
232
 
187
- **Note on Gradient Balancing:** When blending losses (`0.0 < l1_weight < 1.0`), you may need to tune `l1_scale_time` and `l1_scale_spec`. This is to ensure the gradients of the L1 and L1SNR components are balanced, which is crucial for stable training. The default values provide a reasonable starting point, but monitoring the loss components is recommended to ensure they are scaled appropriately.
233
+ **Note on Gradient Balancing:** When blending losses (`0.0 < l1_weight < 1.0`), the implementation automatically scales the L1 component to approximately match the gradient magnitudes of the L1SNR component. This helps maintain stable training without manual tuning.
188
234
 
189
235
  ## Limitations
190
236
 
@@ -201,7 +247,7 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
201
247
 
202
248
  ## Acknowledgments
203
249
 
204
- The loss functions implemented here are based on the work of the authors of the referenced papers.
250
+ The loss functions implemented here are largely based on the work of the authors of the referenced papers. Thank you for your research!
205
251
 
206
252
  ## References
207
253
 
@@ -209,4 +255,4 @@ The loss functions implemented here are based on the work of the authors of the
209
255
 
210
256
  [2] K. N. Watcharasupat and A. Lerch, "Separate This, and All of these Things Around It: Music Source Separation via Hyperellipsoidal Queries," [arXiv:2501.16171](https://arxiv.org/abs/2501.16171).
211
257
 
212
- [3] K. N. Watcharasupat and A. Lerch, "A Stem-Agnostic Single-Decoder System for Music Source Separation Beyond Four Stems," Proceedings of the 25th International Society for Music Information Retrieval Conference, 2024. [arXiv:2406.18747](https://arxiv.org/abs/2406.18747)
258
+ [3] K. N. Watcharasupat and A. Lerch, "A Stem-Agnostic Single-Decoder System for Music Source Separation Beyond Four Stems," Proceedings of the 25th International Society for Music Information Retrieval Conference, 2024. [arXiv:2406.18747](https://arxiv.org/abs/2406.18747)
@@ -0,0 +1,7 @@
1
+ torch_l1_snr/__init__.py,sha256=mT6WxYYlshOwabs79jbUvmoNTn2pG19UKHdSrGVdbYc,244
2
+ torch_l1_snr/l1snr.py,sha256=F1NF3VGodaLWFtHs9xco9MbxfEJ01ip_JSHFS2GgBkU,34520
3
+ torch_l1_snr-0.1.0.dist-info/licenses/LICENSE,sha256=JdS2Pv6DDs3jvXHACGdcHYdiFMe9EO1XGeHkEHLTr8Y,1079
4
+ torch_l1_snr-0.1.0.dist-info/METADATA,sha256=JQjYJCQgzf5Ogj3GJ8OAioGdBtI8ddCo6Tnjf1JfMxs,13112
5
+ torch_l1_snr-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
6
+ torch_l1_snr-0.1.0.dist-info/top_level.txt,sha256=VUo0QlGvu7tOF8BKWWDoIiLlhcAcetYwR6c8Ldhhpco,13
7
+ torch_l1_snr-0.1.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -0,0 +1 @@
1
+ torch_l1_snr
@@ -1,7 +0,0 @@
1
- torch_l1_snr-0.0.4.dist-info/licenses/LICENSE,sha256=JdS2Pv6DDs3jvXHACGdcHYdiFMe9EO1XGeHkEHLTr8Y,1079
2
- torch_l1snr/__init__.py,sha256=pR9jg3fjTKt_suZoVDC67tqB7EWRkbfaXaPP7pYQrlQ,220
3
- torch_l1snr/l1snr.py,sha256=aqmtNfT_8A0IRI9jiVGwNse3igBvelQGKnjfe23Xh7w,35304
4
- torch_l1_snr-0.0.4.dist-info/METADATA,sha256=pB7DvZ6BdvCshcDqOTkJNqekh97qXNaPc7tnNzBqJVk,11143
5
- torch_l1_snr-0.0.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
6
- torch_l1_snr-0.0.4.dist-info/top_level.txt,sha256=NfaRND6pcjZ7-035d4XAg8xJuz31EEU210Y9xWeFOxc,12
7
- torch_l1_snr-0.0.4.dist-info/RECORD,,
@@ -1 +0,0 @@
1
- torch_l1snr