flamo 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -33,7 +33,8 @@ class HomogeneousFDNConfig(BaseModel):
33
33
  delays_grad: bool = False
34
34
  mixing_matrix_grad: bool = True
35
35
  attenuation_grad: bool = True
36
-
36
+ is_delay_int: bool = True
37
+
37
38
  def __init__(self, **data):
38
39
  super().__init__(**data)
39
40
  if self.delays is None:
flamo/auxiliary/eq.py CHANGED
@@ -86,7 +86,8 @@ def geq(
86
86
 
87
87
  for band in range(num_bands):
88
88
  if band == 0:
89
- b = torch.tensor([db2mag(gain_db[band]), 0, 0], device=device)
89
+ b = torch.zeros(3, device=device)
90
+ b[0] = db2mag(gain_db[band])
90
91
  a = torch.tensor([1, 0, 0], device=device)
91
92
  elif band == 1:
92
93
  b, a = shelving_filter(
flamo/auxiliary/reverb.py CHANGED
@@ -129,7 +129,7 @@ class HomogeneousFDN:
129
129
  size=(self.N,),
130
130
  max_len=delay_lines.max(),
131
131
  nfft=self.config_dict.nfft,
132
- isint=True,
132
+ isint=self.config_dict.is_delay_int,
133
133
  requires_grad=self.config_dict.delays_grad,
134
134
  alias_decay_db=self.config_dict.alias_decay_db,
135
135
  device=self.config_dict.device,
@@ -245,13 +245,11 @@ def get_random_shifts(N, sparsity_vect, pulse_size):
245
245
 
246
246
  def hadamard_matrix(N):
247
247
  """Generate a hadamard matrix of size N"""
248
- X = np.array([[1]])
248
+ X = np.array([[1]])
249
249
  # Create a Hadamard matrix of the specified order
250
- # TODO remove for loop becuase all matrices look the same
251
250
  while X.shape[0] < N:
252
251
  # Kronecker product to generate a larger Hadamard matrix
253
252
  X = np.kron(X, np.array([[1, 1], [1, -1]])) / np.sqrt(2)
254
-
255
253
  return X
256
254
 
257
255
 
@@ -0,0 +1,114 @@
1
+ """
2
+ Velvet Noise implementations for FLAMO
3
+
4
+ Velvet noise is a sparse pseudo-random noise used in artificial reverberation.
5
+ It consists of sample values of +1, -1, and 0, with the non-zero samples
6
+ occurring at pseudo-random locations.
7
+
8
+ References:
9
+ Välimäki, V., & Prawda, K. (2021). Late-Reverberation Synthesis Using
10
+ Interleaved Velvet-Noise Sequences. IEEE/ACM Transactions on Audio,
11
+ Speech, and Language Processing, 29, 1149-1160.
12
+ """
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ import math
17
+ from typing import Optional
18
+ from flamo.processor.dsp import Filter, parallelFilter
19
+
20
+
21
+ class VelvetNoiseFilter(Filter):
22
+ """
23
+ TODO
24
+ Args:
25
+ size: Size of the filter parameters (length, output_channels, input_channels)
26
+ density: Number of impulses per second
27
+ delta: Scaling factor for impulse range (0 < delta <= 1)
28
+ When delta=0.25, impulses only appear in first 25% of each grid
29
+ sample_rate: Sample rate in Hz (default: 48000)
30
+ nfft: Number of FFT points required to compute the frequency response
31
+ requires_grad: Whether the filter parameters require gradients
32
+ alias_decay_db: The decaying factor in dB for the time anti-aliasing envelope
33
+ device: The device of the constructed tensors
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ size: tuple = (1, 1, 1),
39
+ density: float = 1000.0,
40
+ delta: float = 1.0,
41
+ sample_rate: int = 48000,
42
+ nfft: int = 2**11,
43
+ requires_grad: bool = False,
44
+ alias_decay_db: float = 0.0,
45
+ device: Optional[str] = None,
46
+ ):
47
+ self.density = density
48
+ self.sample_rate = sample_rate
49
+ self.Td = sample_rate / density # Average distance between impulses
50
+ if not 0 < delta <= 1:
51
+ raise ValueError("Delta must be in range (0, 1]")
52
+
53
+ self.delta = delta
54
+ # Create mapping function that generates velvet noise
55
+ map = lambda x: self._generate_velvet_impulse_response(x)
56
+ super().__init__(
57
+ size=size,
58
+ nfft=nfft,
59
+ map=map,
60
+ requires_grad=requires_grad,
61
+ alias_decay_db=alias_decay_db,
62
+ device=device,
63
+ )
64
+
65
+ def _generate_velvet_impulse_response(self, param: torch.Tensor) -> torch.Tensor:
66
+ """Generate velvet noise impulse response from parameters."""
67
+ # Calculate grid size (average distance between impulses)
68
+
69
+
70
+ result = torch.zeros_like(param)
71
+
72
+ for out_ch in range(self.param.shape[1]):
73
+ for in_ch in range(self.param.shape[2]):
74
+ # Extract parameters for this channel pair
75
+ result[:, out_ch, in_ch] = self._generate_velvet_sequence()
76
+
77
+ return result
78
+
79
+ def _generate_velvet_sequence(
80
+ self,
81
+ ) -> torch.Tensor:
82
+ """Generate a single velvet noise sequence."""
83
+
84
+ # Add random jitter to each position (uniform distribution)
85
+ jitter_factors = torch.rand(self.floor_impulses)
86
+ impulse_indices = torch.ceil(self.grid + self.delta * jitter_factors * (self.Td - 1)).long()
87
+
88
+ # first impulse is at position 0 and all indices are within bounds
89
+ impulse_indices[0] = 0
90
+ impulse_indices = torch.clamp(impulse_indices, max=self.param.shape[0] - 1)
91
+
92
+ # Generate random signs (+1 or -1)
93
+ signs = 2 * torch.randint(0, 2, (self.floor_impulses,)) - 1
94
+
95
+ # Construct sparse signal
96
+ sequence = torch.zeros(self.size[0], device=self.device)
97
+ sequence[impulse_indices] = signs.float()
98
+
99
+ return sequence
100
+
101
+ def initialize_class(self):
102
+ r"""
103
+ Initializes the Filter module.
104
+
105
+ This method checks the shape of the gain parameters, computes the frequency response of the filter,
106
+ and computes the frequency convolution function.
107
+ """
108
+ self.check_param_shape()
109
+ self.get_io()
110
+ num_impulses = self.param.shape[0] / self.Td
111
+ self.floor_impulses = math.floor(num_impulses)
112
+ self.grid = torch.arange(self.floor_impulses) * self.Td
113
+ self.get_freq_response()
114
+ self.get_freq_convolve()
flamo/optimize/loss.py CHANGED
@@ -77,6 +77,7 @@ class mse_loss(nn.Module):
77
77
  self.nfft = nfft
78
78
  self.device = device
79
79
  self.mse_loss = nn.MSELoss()
80
+ self.name = "MSE"
80
81
 
81
82
  def forward(self, y_pred, y_true):
82
83
  """
flamo/processor/dsp.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import torch
2
+ import math
2
3
  from typing import Optional
3
4
  import torch.nn as nn
4
5
  import torch.nn.functional as F
@@ -16,7 +17,8 @@ from flamo.auxiliary.eq import (
16
17
  geq,
17
18
  accurate_geq)
18
19
  from flamo.auxiliary.scattering import (
19
- ScatteringMapping)
20
+ ScatteringMapping,
21
+ hadamard_matrix)
20
22
  # ============================= TRANSFORMS ================================
21
23
 
22
24
 
@@ -928,7 +930,7 @@ class parallelFilter(Filter):
928
930
 
929
931
  class ScatteringMatrix(Filter):
930
932
  r"""
931
- A class representing a set of Scattering Filter matrix.
933
+ A class representing a Scattering Filter matrix.
932
934
 
933
935
  The :class:`ScatteringMatrix` was designed as filter feedback matrix of the
934
936
  Feedback Delay Network (FDN) reverberator structure.
@@ -1075,6 +1077,145 @@ class ScatteringMatrix(Filter):
1075
1077
  self.get_freq_response()
1076
1078
  self.get_freq_convolve()
1077
1079
 
1080
+ class VelvetNoiseMatrix(Filter):
1081
+ r"""
1082
+ A class representing a Velvet Noise Filter matrix.
1083
+
1084
+ The :class:`VelvetNoiseMatrix` was designed as filter feedback matrix of the
1085
+ Feedback Delay Network (FDN) reverberator structure.
1086
+ NOTE: It is not learnable.
1087
+
1088
+ The input tensor is expected to be a complex-valued tensor representing the
1089
+ frequency response of the input signal. The input tensor is then convolved in
1090
+ frequency domain with the filter frequency responses to produce the output tensor.
1091
+
1092
+ Shape:
1093
+ - input: :math:`(B, M, N_{in}, ...)`
1094
+ - param: :math:`(N_{taps}, N_{out}, N_{in})`
1095
+ - output: :math:`(B, M, N_{out}, ...)`
1096
+
1097
+ where :math:`B` is the batch size, :math:`M` is the number of frequency bins,
1098
+ :math:`N_{in}` is the number of input channels, :math:`N_{out}` is the number of output channels,
1099
+ and :math:`N_{taps}` is the number of filter parameters per input-output channel pair. By default, :math:`N_{taps}`
1100
+ correspond to the length of the FIR filters.
1101
+ Ellipsis :math:`(...)` represents additional dimensions.
1102
+
1103
+ **Arguments / Attributes**:
1104
+ - **size** (tuple): The size of the filter parameters. Default: (1, 1, 1).
1105
+ - **nfft** (int): The number of FFT points required to compute the frequency response. Default: 2 ** 11.
1106
+ - **density** (float): Average number of pulses per sample. Default: 0.03.
1107
+ - **gain_per_sample** (float): The gain per sample. This is useful when ensuring homogeneous decay in FDNs Default: 0.9999.
1108
+ - **m_L** (torch.tensor): The leftmost delay vector. Default: None.
1109
+ - **m_R** (torch.tensor): The rightmost delay vector. Default: None.
1110
+ - **requires_grad** (bool): Whether the filter parameters require gradients. Default: False.
1111
+ - **alias_decay_db** (float): The decaying factor in dB for the time anti-aliasing envelope. The decay refers to the attenuation after nfft samples. Default: 0.
1112
+ - **device** (str): The device of the constructed tensors. Default: None.
1113
+
1114
+ **Attributes**:
1115
+ - **param** (nn.Parameter): The parameters of the Filter module.
1116
+ - **map** (function): Mapping function to ensure orthogonality of :math:`\mathbf{U}_k`.
1117
+ - **map_filter** (ScatteringMapping): Mapping function to generate the filter matrix.
1118
+ - **fft** (function): The FFT function. Calls the torch.fft.rfft function.
1119
+ - **ifft** (function): The Inverse FFT function. Calls the torch.fft.irfft.
1120
+ - **gamma** (torch.Tensor): The gamma value used for time anti-aliasing envelope.
1121
+ - **new_value** (int): Flag indicating if new values have been assigned.
1122
+ - **freq_response** (torch.Tensor): The frequency response of the filter.
1123
+ - **freq_convolve** (function): The frequency convolution function.
1124
+ - **input_channels** (int): The number of input channels.
1125
+ - **output_channels** (int): The number of output channels.
1126
+
1127
+ Refereces:
1128
+ - Schlecht, S. J., & Habets, E. A. (2020). Scattering in feedback delay networks. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 28, 1915-1924.
1129
+
1130
+ """
1131
+
1132
+ def __init__(
1133
+ self,
1134
+ size: tuple = (1, 1, 1),
1135
+ nfft: int = 2**11,
1136
+ density: float = 0.03,
1137
+ gain_per_sample: float = 0.9999,
1138
+ m_L: torch.tensor = None,
1139
+ m_R: torch.tensor = None,
1140
+ alias_decay_db: float = 0.0,
1141
+ device: Optional[str] = None,
1142
+ ):
1143
+ self.sparsity = 1/density
1144
+ self.gain_per_sample = gain_per_sample
1145
+ self.pulse_size = 1
1146
+ self.m_L = m_L
1147
+ self.m_R = m_R
1148
+ map = lambda x: x
1149
+ assert size[1] == size[2], "Matrix must be square"
1150
+ assert (size[1] & (size[1] - 1)) == 0, "At the moment the Matrix must have dimensions which are powers of 2"
1151
+ super().__init__(
1152
+ size=size,
1153
+ nfft=nfft,
1154
+ map=map,
1155
+ requires_grad=False,
1156
+ alias_decay_db=alias_decay_db,
1157
+ device=device,
1158
+ )
1159
+ self.assign_value(torch.tensor(hadamard_matrix(self.size[-1]), device=self.device).unsqueeze(0).repeat(self.size[0], 1, 1))
1160
+
1161
+ def get_freq_convolve(self):
1162
+ r"""
1163
+ Computes the frequency convolution function.
1164
+
1165
+ The frequency convolution is computed using the :func:`torch.einsum` function.
1166
+
1167
+ **Arguments**:
1168
+ **x** (torch.Tensor): Input tensor.
1169
+
1170
+ **Returns**:
1171
+ torch.Tensor: Output tensor after frequency convolution.
1172
+ """
1173
+ self.freq_convolve = lambda x, param: torch.einsum(
1174
+ "fmn,bfn...->bfm...", self.freq_response(param), x
1175
+ )
1176
+
1177
+ def get_freq_response(self):
1178
+ r"""
1179
+ Computes the frequency response of the filter.
1180
+
1181
+ The mapping function is applied to the filter parameters to obtain the filter impulse responses.
1182
+ Then, the time anti-aliasing envelope is computed and applied to the impulse responses. Finally,
1183
+ the frequency response is obtained by computing the FFT of the filter impulse responses.
1184
+ """
1185
+ L = (
1186
+ (sum(self.map_filter.shifts).max() + 1).item()
1187
+ + self.m_L.max().item()
1188
+ + self.m_R.max().item()
1189
+ )
1190
+ self.freq_response = lambda param: self.fft(
1191
+ self.map_filter(self.map(param))
1192
+ * (self.gamma ** torch.arange(0, L, device=self.device)).view(
1193
+ -1, *tuple([1 for i in self.size[1:]])
1194
+ )
1195
+ )
1196
+
1197
+ def initialize_class(self):
1198
+ r"""
1199
+ Initializes the ScatteringMatrix module.
1200
+
1201
+ This method creates the mapping to generate the filter matrix, checks the shape of the gain parameters, computes the frequency response of the filter,
1202
+ and computes the frequency convolution function.
1203
+ """
1204
+ self.map_filter = ScatteringMapping(
1205
+ self.size[-1],
1206
+ n_stages=self.size[0] - 1,
1207
+ sparsity=math.floor(self.sparsity),
1208
+ gain_per_sample=self.gain_per_sample,
1209
+ pulse_size=self.pulse_size,
1210
+ m_L=self.m_L,
1211
+ m_R=self.m_R,
1212
+ device=self.device,
1213
+ )
1214
+ self.check_param_shape()
1215
+ self.get_io()
1216
+ self.get_freq_response()
1217
+ self.get_freq_convolve()
1218
+
1078
1219
 
1079
1220
  class Biquad(Filter):
1080
1221
  r"""
@@ -2410,7 +2551,7 @@ class AccurateGEQ(Filter):
2410
2551
  :math:`N_{in}` is the number of input channels, :math:`N_{out}` is the number of output channels,
2411
2552
  The :attr:'param' attribute represent the command gains of each band + shelving filters. The first dimension of the :attr:'param' tensor corresponds to the number of command gains/filters :math:`K`.
2412
2553
  Ellipsis :math:`(...)` represents additional dimensions (not tested).
2413
- NOTE I: It is not differentiable
2554
+ NOTE I: It is not learnable
2414
2555
  NOTE II: To avoid NaN or Inf values in the frequency response, the operations
2415
2556
  performed in the frequency domain are done in double precision. The original
2416
2557
  data type is restored at the end of the computation.
@@ -2531,7 +2672,7 @@ class parallelAccurateGEQ(AccurateGEQ):
2531
2672
  r"""
2532
2673
  Parallel counterpart of the :class:`GEQ` class
2533
2674
  For information about **attributes** and **methods** see :class:`flamo.processor.dsp.GEQ`.
2534
- NOTE: It is not differentiable
2675
+ NOTE: It is not learnable.
2535
2676
 
2536
2677
  Shape:
2537
2678
  - input: :math:`(B, M, N, ...)`
@@ -2743,7 +2884,7 @@ class Delay(DSP):
2743
2884
  """
2744
2885
  m = self.get_delays()
2745
2886
  if self.isint:
2746
- self.freq_response = lambda param: (self.gamma ** m(param)) * torch.exp(
2887
+ self.freq_response = lambda param: (self.gamma ** m(param).round()) * torch.exp(
2747
2888
  -1j
2748
2889
  * torch.einsum(
2749
2890
  "fo, omn -> fmn",
@@ -2880,14 +3021,24 @@ class parallelDelay(Delay):
2880
3021
  Computes the frequency response of the delay module.
2881
3022
  """
2882
3023
  m = self.get_delays()
2883
- self.freq_response = lambda param: (self.gamma ** m(param)) * torch.exp(
2884
- -1j
2885
- * torch.einsum(
2886
- "fo, on -> fn",
2887
- self.omega,
2888
- m(param).unsqueeze(0),
3024
+ if self.isint:
3025
+ self.freq_response = lambda param: (self.gamma ** m(param).round()) * torch.exp(
3026
+ -1j
3027
+ * torch.einsum(
3028
+ "fo, on -> fn",
3029
+ self.omega,
3030
+ m(param).round().unsqueeze(0),
3031
+ )
3032
+ )
3033
+ else:
3034
+ self.freq_response = lambda param: (self.gamma ** m(param)) * torch.exp(
3035
+ -1j
3036
+ * torch.einsum(
3037
+ "fo, on -> fn",
3038
+ self.omega,
3039
+ m(param).unsqueeze(0),
3040
+ )
2889
3041
  )
2890
- )
2891
3042
 
2892
3043
  def get_io(self):
2893
3044
  r"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flamo
3
- Version: 0.1.4
3
+ Version: 0.1.6
4
4
  Summary: An Open-Source Library for Frequency-Domain Differentiable Audio Processing
5
5
  Project-URL: Homepage, https://github.com/gdalsanto/flamo
6
6
  Project-URL: Issues, https://github.com/gdalsanto/flamo/issues
@@ -38,6 +38,7 @@ Requires-Dist: numpy
38
38
  Requires-Dist: pydantic
39
39
  Requires-Dist: pyfar
40
40
  Requires-Dist: pysoundfile
41
+ Requires-Dist: pyyaml
41
42
  Requires-Dist: scipy
42
43
  Requires-Dist: torch
43
44
  Requires-Dist: torchaudio
@@ -70,7 +71,7 @@ Utilities, system designers, and optimization - in `flamo.processor.system`:
70
71
  - **Shell**: Container class for safe interaction between system, dataset, and loss functions
71
72
 
72
73
  Optimization - in `flamo.optimize`:
73
- - **Trianer** : Handling of the training and validation steps
74
+ - **Trainer** : Handling of the training and validation steps
74
75
  - **Dataset** : Customizable dataset class and helper methods
75
76
 
76
77
  ---
@@ -2,22 +2,23 @@ flamo/__init__.py,sha256=ujezWOJfD7DUoj4q1meeMUnB97rOEtNR7mYw_PE9LMg,49
2
2
  flamo/functional.py,sha256=9wl6fHkc8KMB5IMvbd_K7-z8Z2Miw0qOsNxWPItliPU,35138
3
3
  flamo/utils.py,sha256=ypGKSABZMphgIrjCKgCH-zgR7BaupRbyzuUhsZFqAAM,3350
4
4
  flamo/auxiliary/__init__.py,sha256=7lVNh8OxHloZ4KPmp-iTUJnUbi8XbuRzGaQ3Z-NKXio,42
5
- flamo/auxiliary/eq.py,sha256=dkULcVlQrL3LKi4ejFnWb6VSWSmEb4PYSNLrOQMvGws,6767
5
+ flamo/auxiliary/eq.py,sha256=eIWMIq0ggizXLhTdeWWbgBXWUFXCJyoEbkBH7Gzasao,6779
6
6
  flamo/auxiliary/filterbank.py,sha256=02w8dI8HoNDtKpdVhSJkIkd-h-KNXvZtivf3l4_ozzU,9866
7
7
  flamo/auxiliary/minimize.py,sha256=fMTAAAk9yD7Y4luKS4XA1-HTq44xo2opq_dRPRrhlIY,2474
8
- flamo/auxiliary/reverb.py,sha256=Rmv5oCW49MsfuJnM7ujZnJRQB6y1hQa1KAn1Hki2Bwk,31611
9
- flamo/auxiliary/scattering.py,sha256=ITPT0TTOAROy3G0_kpykffRSqjoA9dFJ2LnaLxtUMF4,9482
10
- flamo/auxiliary/config/config.py,sha256=7WYQsk3rfzb-OOY5JyRv6GzXPv8deLL_Viv1EbAUwu4,2859
8
+ flamo/auxiliary/reverb.py,sha256=9iKSuyuqRiHGGvaj0eizqVpu2V7plsX13OWiB6o1whU,31636
9
+ flamo/auxiliary/scattering.py,sha256=qlK8cynrpde56yLlbPuScC0Y1VmsPb0SFXl6Xisv6hA,9420
10
+ flamo/auxiliary/velvet.py,sha256=B4pYEnhaQPkh02pxqiGdAhLRX2g-eWtHezphi0_h4Qs,4201
11
+ flamo/auxiliary/config/config.py,sha256=CxXj-8sLq0_m9KyLg1a6NwLoK1UvTz3i0jZOLraq14I,2893
11
12
  flamo/optimize/__init__.py,sha256=grgxLmQ7m-c9MvRdIejmEAaaajfBwgeaZAv2qjHIvPw,65
12
13
  flamo/optimize/dataset.py,sha256=2mfzsnyX_bzavXouII9ee_pd6ti4lv215ieGJHscceI,5829
13
- flamo/optimize/loss.py,sha256=jD1n4r7olY-C1CNs8LZZr3BStFweJ1bqBeA4aJbVvS4,33440
14
+ flamo/optimize/loss.py,sha256=h6EeqjdX5P1SqDBKBavSxV25VBgnYK8tuX91wk6lw_g,33466
14
15
  flamo/optimize/surface.py,sha256=uvsgxLFSvJ18s8kPcb22G3W1rgycXP1nNX0q48Pda2g,26135
15
16
  flamo/optimize/trainer.py,sha256=he4nUjLC-3RTlxxBIw33r5k8mQfgAGvN1wpPBAWCjVo,12045
16
17
  flamo/optimize/utils.py,sha256=R5-KoZagRho3eykY88pC3UB2mc5SsE4Yv9X-ogskXdA,1610
17
18
  flamo/processor/__init__.py,sha256=paGdxGVZgA2VAs0tBwRd0bobzGxeyK79DS7ZGO8drkI,41
18
- flamo/processor/dsp.py,sha256=r5C3UyamYTxd-SQgN_nNQMxB848nFAmO3iX-hLRx8D0,119609
19
+ flamo/processor/dsp.py,sha256=n92YJPrES-ydwHgXmZ9RkFevIC3n-Wh4X8I1QNZqcV0,126378
19
20
  flamo/processor/system.py,sha256=9XwLtaGEVs9glVOFvyiPnQpsnR_Wjrv6k1i1qCs8D1Q,42516
20
- flamo-0.1.4.dist-info/METADATA,sha256=cXMr__3dyaarBscLyCbSEM3yBNHufxn-9NpTJNtPmhw,7803
21
- flamo-0.1.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
- flamo-0.1.4.dist-info/licenses/LICENSE,sha256=smMocRH7xdPT5RvFNqSLtbSNzohXJM5G_rX1Qaej6vg,1120
23
- flamo-0.1.4.dist-info/RECORD,,
21
+ flamo-0.1.6.dist-info/METADATA,sha256=n7uiBeQ1bqIP4Xwpmyx7Ah3FxCl2Xowucprz00lk2BM,7825
22
+ flamo-0.1.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
23
+ flamo-0.1.6.dist-info/licenses/LICENSE,sha256=smMocRH7xdPT5RvFNqSLtbSNzohXJM5G_rX1Qaej6vg,1120
24
+ flamo-0.1.6.dist-info/RECORD,,
File without changes