flamo 0.2.4__tar.gz → 0.2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (257) hide show
  1. {flamo-0.2.4 → flamo-0.2.6}/PKG-INFO +1 -1
  2. {flamo-0.2.4 → flamo-0.2.6}/examples/e0_siso.py +1 -1
  3. {flamo-0.2.4 → flamo-0.2.6}/examples/e1_mimo.py +1 -1
  4. {flamo-0.2.4 → flamo-0.2.6}/examples/e2_chaining_filters.py +1 -1
  5. {flamo-0.2.4 → flamo-0.2.6}/examples/e3_series_class.py +1 -1
  6. {flamo-0.2.4 → flamo-0.2.6}/examples/e4_recursion_nn.py +1 -1
  7. {flamo-0.2.4 → flamo-0.2.6}/examples/e7_biquad.py +5 -3
  8. {flamo-0.2.4 → flamo-0.2.6}/examples/e7_biquad_nn.py +1 -1
  9. {flamo-0.2.4 → flamo-0.2.6}/examples/e7_geq.py +2 -0
  10. {flamo-0.2.4 → flamo-0.2.6}/examples/e7_peq.py +2 -0
  11. {flamo-0.2.4 → flamo-0.2.6}/examples/e7_svf.py +2 -0
  12. {flamo-0.2.4 → flamo-0.2.6}/flamo/optimize/dataset.py +5 -1
  13. {flamo-0.2.4 → flamo-0.2.6}/flamo/optimize/trainer.py +8 -5
  14. {flamo-0.2.4 → flamo-0.2.6}/flamo/processor/dsp.py +415 -0
  15. {flamo-0.2.4 → flamo-0.2.6}/pyproject.toml +1 -1
  16. {flamo-0.2.4 → flamo-0.2.6}/.gitignore +0 -0
  17. {flamo-0.2.4 → flamo-0.2.6}/2025_FLAMO_ICASSP_DalSantoDeBortoli_poster.pdf +0 -0
  18. {flamo-0.2.4 → flamo-0.2.6}/LICENSE +0 -0
  19. {flamo-0.2.4 → flamo-0.2.6}/README.md +0 -0
  20. {flamo-0.2.4 → flamo-0.2.6}/examples/e1_vn.py +0 -0
  21. {flamo-0.2.4 → flamo-0.2.6}/examples/e4_recursion.py +0 -0
  22. {flamo-0.2.4 → flamo-0.2.6}/examples/e5_shell.py +0 -0
  23. {flamo-0.2.4 → flamo-0.2.6}/examples/e6_anti_aliasing.py +0 -0
  24. {flamo-0.2.4 → flamo-0.2.6}/examples/e8_active_acoustics.py +0 -0
  25. {flamo-0.2.4 → flamo-0.2.6}/examples/e8_colorless_fdn.py +0 -0
  26. {flamo-0.2.4 → flamo-0.2.6}/examples/e8_colorless_sfdn.py +0 -0
  27. {flamo-0.2.4 → flamo-0.2.6}/examples/e8_fdn.py +0 -0
  28. {flamo-0.2.4 → flamo-0.2.6}/examples/e9_loss_profile.py +0 -0
  29. {flamo-0.2.4 → flamo-0.2.6}/examples/run_all.py +0 -0
  30. {flamo-0.2.4 → flamo-0.2.6}/flamo/__init__.py +0 -0
  31. {flamo-0.2.4 → flamo-0.2.6}/flamo/auxiliary/__init__.py +0 -0
  32. {flamo-0.2.4 → flamo-0.2.6}/flamo/auxiliary/config/config.py +0 -0
  33. {flamo-0.2.4 → flamo-0.2.6}/flamo/auxiliary/eq.py +0 -0
  34. {flamo-0.2.4 → flamo-0.2.6}/flamo/auxiliary/filterbank.py +0 -0
  35. {flamo-0.2.4 → flamo-0.2.6}/flamo/auxiliary/minimize.py +0 -0
  36. {flamo-0.2.4 → flamo-0.2.6}/flamo/auxiliary/reverb.py +0 -0
  37. {flamo-0.2.4 → flamo-0.2.6}/flamo/auxiliary/scattering.py +0 -0
  38. {flamo-0.2.4 → flamo-0.2.6}/flamo/auxiliary/velvet.py +0 -0
  39. {flamo-0.2.4 → flamo-0.2.6}/flamo/functional.py +0 -0
  40. {flamo-0.2.4 → flamo-0.2.6}/flamo/optimize/__init__.py +0 -0
  41. {flamo-0.2.4 → flamo-0.2.6}/flamo/optimize/loss.py +0 -0
  42. {flamo-0.2.4 → flamo-0.2.6}/flamo/optimize/surface.py +0 -0
  43. {flamo-0.2.4 → flamo-0.2.6}/flamo/optimize/utils.py +0 -0
  44. {flamo-0.2.4 → flamo-0.2.6}/flamo/processor/__init__.py +0 -0
  45. {flamo-0.2.4 → flamo-0.2.6}/flamo/processor/system.py +0 -0
  46. {flamo-0.2.4 → flamo-0.2.6}/flamo/utils.py +0 -0
  47. {flamo-0.2.4 → flamo-0.2.6}/notebooks/e7_biquad.ipynb +0 -0
  48. {flamo-0.2.4 → flamo-0.2.6}/notebooks/e8_colorless_fdn.ipynb +0 -0
  49. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker1.wav +0 -0
  50. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker10.wav +0 -0
  51. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker11.wav +0 -0
  52. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker12.wav +0 -0
  53. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker13.wav +0 -0
  54. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker14.wav +0 -0
  55. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker2.wav +0 -0
  56. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker3.wav +0 -0
  57. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker5.wav +0 -0
  58. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker6.wav +0 -0
  59. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker7.wav +0 -0
  60. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker8.wav +0 -0
  61. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic1_speaker9.wav +0 -0
  62. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker1.wav +0 -0
  63. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker10.wav +0 -0
  64. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker11.wav +0 -0
  65. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker12.wav +0 -0
  66. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker13.wav +0 -0
  67. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker14.wav +0 -0
  68. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker2.wav +0 -0
  69. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker3.wav +0 -0
  70. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker5.wav +0 -0
  71. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker6.wav +0 -0
  72. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker7.wav +0 -0
  73. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker8.wav +0 -0
  74. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic2_speaker9.wav +0 -0
  75. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker1.wav +0 -0
  76. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker10.wav +0 -0
  77. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker11.wav +0 -0
  78. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker12.wav +0 -0
  79. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker13.wav +0 -0
  80. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker14.wav +0 -0
  81. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker2.wav +0 -0
  82. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker3.wav +0 -0
  83. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker5.wav +0 -0
  84. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker6.wav +0 -0
  85. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker7.wav +0 -0
  86. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker8.wav +0 -0
  87. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic3_speaker9.wav +0 -0
  88. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker1.wav +0 -0
  89. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker10.wav +0 -0
  90. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker11.wav +0 -0
  91. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker12.wav +0 -0
  92. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker13.wav +0 -0
  93. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker14.wav +0 -0
  94. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker2.wav +0 -0
  95. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker3.wav +0 -0
  96. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker5.wav +0 -0
  97. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker6.wav +0 -0
  98. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker7.wav +0 -0
  99. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker8.wav +0 -0
  100. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic4_speaker9.wav +0 -0
  101. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker1.wav +0 -0
  102. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker10.wav +0 -0
  103. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker11.wav +0 -0
  104. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker12.wav +0 -0
  105. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker13.wav +0 -0
  106. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker14.wav +0 -0
  107. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker2.wav +0 -0
  108. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker3.wav +0 -0
  109. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker5.wav +0 -0
  110. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker6.wav +0 -0
  111. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker7.wav +0 -0
  112. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker8.wav +0 -0
  113. {flamo-0.2.4 → flamo-0.2.6}/rirs/Otala-2024.05.10/mic5_speaker9.wav +0 -0
  114. {flamo-0.2.4 → flamo-0.2.6}/rirs/arni_35_3541_4_2.wav +0 -0
  115. {flamo-0.2.4 → flamo-0.2.6}/rirs/s3_r4_o.wav +0 -0
  116. {flamo-0.2.4 → flamo-0.2.6}/sphinx/Makefile +0 -0
  117. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/auxiliary/eq.doctree +0 -0
  118. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/auxiliary/filterbank.doctree +0 -0
  119. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/auxiliary/minimize.doctree +0 -0
  120. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/auxiliary/reverb.doctree +0 -0
  121. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/auxiliary/scattering.doctree +0 -0
  122. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/environment.pickle +0 -0
  123. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/functional.doctree +0 -0
  124. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/index.doctree +0 -0
  125. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/optimize/dataset.doctree +0 -0
  126. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/optimize/loss.doctree +0 -0
  127. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/optimize/trainer.doctree +0 -0
  128. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/optimize/utils.doctree +0 -0
  129. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/processor/dsp.doctree +0 -0
  130. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/doctrees/processor/system.doctree +0 -0
  131. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/.buildinfo +0 -0
  132. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/.buildinfo.bak +0 -0
  133. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/.nojekyll +0 -0
  134. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/auxiliary/eq.rst.txt +0 -0
  135. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/auxiliary/filterbank.rst.txt +0 -0
  136. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/auxiliary/minimize.rst.txt +0 -0
  137. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/auxiliary/reverb.rst.txt +0 -0
  138. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/auxiliary/scattering.rst.txt +0 -0
  139. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/functional.rst.txt +0 -0
  140. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/index.rst.txt +0 -0
  141. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/optimize/dataset.rst.txt +0 -0
  142. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/optimize/loss.rst.txt +0 -0
  143. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/optimize/trainer.rst.txt +0 -0
  144. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/optimize/utils.rst.txt +0 -0
  145. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/processor/dsp.rst.txt +0 -0
  146. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_sources/processor/system.rst.txt +0 -0
  147. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/_sphinx_javascript_frameworks_compat.js +0 -0
  148. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/alabaster.css +0 -0
  149. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/basic.css +0 -0
  150. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/badge_only.css +0 -0
  151. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff +0 -0
  152. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 +0 -0
  153. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff +0 -0
  154. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 +0 -0
  155. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.eot +0 -0
  156. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.svg +0 -0
  157. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.ttf +0 -0
  158. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff +0 -0
  159. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff2 +0 -0
  160. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff +0 -0
  161. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff2 +0 -0
  162. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/lato-bold.woff +0 -0
  163. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/lato-bold.woff2 +0 -0
  164. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff +0 -0
  165. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff2 +0 -0
  166. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/lato-normal.woff +0 -0
  167. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/fonts/lato-normal.woff2 +0 -0
  168. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/css/theme.css +0 -0
  169. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/custom.css +0 -0
  170. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/doctools.js +0 -0
  171. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/documentation_options.js +0 -0
  172. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/file.png +0 -0
  173. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-bold.eot +0 -0
  174. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-bold.ttf +0 -0
  175. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-bold.woff +0 -0
  176. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-bold.woff2 +0 -0
  177. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.eot +0 -0
  178. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.ttf +0 -0
  179. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.woff +0 -0
  180. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 +0 -0
  181. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-italic.eot +0 -0
  182. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-italic.ttf +0 -0
  183. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-italic.woff +0 -0
  184. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-italic.woff2 +0 -0
  185. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-regular.eot +0 -0
  186. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-regular.ttf +0 -0
  187. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-regular.woff +0 -0
  188. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/Lato/lato-regular.woff2 +0 -0
  189. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot +0 -0
  190. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf +0 -0
  191. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff +0 -0
  192. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 +0 -0
  193. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot +0 -0
  194. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf +0 -0
  195. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff +0 -0
  196. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 +0 -0
  197. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/github-banner.svg +0 -0
  198. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/jquery.js +0 -0
  199. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/js/badge_only.js +0 -0
  200. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/js/theme.js +0 -0
  201. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/js/versions.js +0 -0
  202. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/language_data.js +0 -0
  203. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/minus.png +0 -0
  204. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/plus.png +0 -0
  205. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/pygments.css +0 -0
  206. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/scripts/bootstrap.js +0 -0
  207. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/scripts/bootstrap.js.LICENSE.txt +0 -0
  208. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/scripts/bootstrap.js.map +0 -0
  209. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/scripts/fontawesome.js +0 -0
  210. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/scripts/fontawesome.js.LICENSE.txt +0 -0
  211. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/scripts/fontawesome.js.map +0 -0
  212. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/scripts/pydata-sphinx-theme.js +0 -0
  213. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/scripts/pydata-sphinx-theme.js.map +0 -0
  214. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/searchtools.js +0 -0
  215. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/sphinx_highlight.js +0 -0
  216. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/styles/pydata-sphinx-theme.css +0 -0
  217. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/styles/pydata-sphinx-theme.css.map +0 -0
  218. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/styles/theme.css +0 -0
  219. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-brands-400.ttf +0 -0
  220. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-brands-400.woff2 +0 -0
  221. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-regular-400.ttf +0 -0
  222. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-regular-400.woff2 +0 -0
  223. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-solid-900.ttf +0 -0
  224. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-solid-900.woff2 +0 -0
  225. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/_static/webpack-macros.html +0 -0
  226. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/auxiliary/eq.html +0 -0
  227. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/auxiliary/filterbank.html +0 -0
  228. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/auxiliary/minimize.html +0 -0
  229. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/auxiliary/reverb.html +0 -0
  230. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/auxiliary/scattering.html +0 -0
  231. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/functional.html +0 -0
  232. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/genindex.html +0 -0
  233. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/index.html +0 -0
  234. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/objects.inv +0 -0
  235. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/optimize/dataset.html +0 -0
  236. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/optimize/loss.html +0 -0
  237. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/optimize/trainer.html +0 -0
  238. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/optimize/utils.html +0 -0
  239. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/processor/dsp.html +0 -0
  240. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/processor/system.html +0 -0
  241. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/search.html +0 -0
  242. {flamo-0.2.4 → flamo-0.2.6}/sphinx/_build/html/searchindex.js +0 -0
  243. {flamo-0.2.4 → flamo-0.2.6}/sphinx/auxiliary/eq.rst +0 -0
  244. {flamo-0.2.4 → flamo-0.2.6}/sphinx/auxiliary/filterbank.rst +0 -0
  245. {flamo-0.2.4 → flamo-0.2.6}/sphinx/auxiliary/minimize.rst +0 -0
  246. {flamo-0.2.4 → flamo-0.2.6}/sphinx/auxiliary/reverb.rst +0 -0
  247. {flamo-0.2.4 → flamo-0.2.6}/sphinx/auxiliary/scattering.rst +0 -0
  248. {flamo-0.2.4 → flamo-0.2.6}/sphinx/conf.py +0 -0
  249. {flamo-0.2.4 → flamo-0.2.6}/sphinx/functional.rst +0 -0
  250. {flamo-0.2.4 → flamo-0.2.6}/sphinx/index.rst +0 -0
  251. {flamo-0.2.4 → flamo-0.2.6}/sphinx/make.bat +0 -0
  252. {flamo-0.2.4 → flamo-0.2.6}/sphinx/optimize/dataset.rst +0 -0
  253. {flamo-0.2.4 → flamo-0.2.6}/sphinx/optimize/loss.rst +0 -0
  254. {flamo-0.2.4 → flamo-0.2.6}/sphinx/optimize/trainer.rst +0 -0
  255. {flamo-0.2.4 → flamo-0.2.6}/sphinx/optimize/utils.rst +0 -0
  256. {flamo-0.2.4 → flamo-0.2.6}/sphinx/processor/dsp.rst +0 -0
  257. {flamo-0.2.4 → flamo-0.2.6}/sphinx/processor/system.rst +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flamo
3
- Version: 0.2.4
3
+ Version: 0.2.6
4
4
  Summary: An Open-Source Library for Frequency-Domain Differentiable Audio Processing
5
5
  Project-URL: Homepage, https://github.com/gdalsanto/flamo
6
6
  Project-URL: Issues, https://github.com/gdalsanto/flamo/issues
@@ -204,7 +204,7 @@ def example_fir(args) -> None:
204
204
 
205
205
  # Dataset
206
206
  dataset = Dataset(
207
- input=unit_imp, target=target, expand=args.num, device=args.device
207
+ input=unit_imp, target=target, expand=args.num, device=args.device, dtype=args.dtype
208
208
  )
209
209
  train_loader, valid_loader = load_dataset(
210
210
  dataset, batch_size=args.batch_size, split=args.split
@@ -186,7 +186,7 @@ def example_biquads(args) -> None:
186
186
 
187
187
  # Dataset
188
188
  dataset = Dataset(
189
- input=unit_imp, target=target, expand=args.num, device=args.device
189
+ input=unit_imp, target=target, expand=args.num, device=args.device, dtype=args.dtype
190
190
  )
191
191
  train_loader, valid_loader = load_dataset(
192
192
  dataset, batch_size=args.batch_size, split=args.split
@@ -244,7 +244,7 @@ def example_requires_grad(args):
244
244
 
245
245
  # Dataset
246
246
  dataset = Dataset(
247
- input=unit_imp, target=target.unsqueeze(0), expand=args.num, device=args.device
247
+ input=unit_imp, target=target.unsqueeze(0), expand=args.num, device=args.device, dtype=args.dtype
248
248
  )
249
249
  train_loader, valid_loader = load_dataset(
250
250
  dataset, batch_size=args.batch_size, split=args.split
@@ -266,7 +266,7 @@ def example_series_training(args):
266
266
 
267
267
  # Dataset
268
268
  dataset = Dataset(
269
- input=unit_imp, target=target.unsqueeze(0), expand=args.num, device=args.device
269
+ input=unit_imp, target=target.unsqueeze(0), expand=args.num, device=args.device, dtype=args.dtype
270
270
  )
271
271
  train_loader, valid_loader = load_dataset(
272
272
  dataset, batch_size=args.batch_size, split=args.split
@@ -278,7 +278,7 @@ def example_comb_nn(args):
278
278
  )
279
279
 
280
280
  # Create a dataset
281
- dataset = Dataset(args, ch, args.num, n_sections, delay_lengths)
281
+ dataset = Dataset(args, ch, args.num, n_sections, delay_lengths, dtype=args.dtype)
282
282
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
283
283
 
284
284
  trainer = Trainer(
@@ -24,10 +24,10 @@ def example_biquad(args):
24
24
  n_sections = 2
25
25
  ## ---------------- TARGET ---------------- ##
26
26
  b, a = highpass_filter(
27
- fc=torch.tensor(args.samplerate // 2)
28
- * torch.rand(size=(n_sections, out_ch, in_ch)),
27
+ fc=torch.tensor(args.samplerate // 2, dtype=args.dtype)
28
+ * torch.rand(size=(n_sections, out_ch, in_ch), dtype=args.dtype),
29
29
  gain=torch.tensor(-1)
30
- + (torch.tensor(2)) * torch.rand(size=(n_sections, out_ch, in_ch)),
30
+ + (torch.tensor(2)) * torch.rand(size=(n_sections, out_ch, in_ch), dtype=args.dtype),
31
31
  fs=args.samplerate,
32
32
  )
33
33
  B = torch.fft.rfft(b, args.nfft, dim=0)
@@ -65,6 +65,7 @@ def example_biquad(args):
65
65
  target=torch.abs(target),
66
66
  expand=args.num,
67
67
  device=args.device,
68
+ dtype=args.dtype,
68
69
  )
69
70
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
70
71
 
@@ -171,6 +172,7 @@ def example_parallel_biquad(args):
171
172
  target=torch.abs(target),
172
173
  expand=args.num,
173
174
  device=args.device,
175
+ dtype=args.dtype,
174
176
  )
175
177
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
176
178
 
@@ -175,7 +175,7 @@ def example_biquad_nn(args):
175
175
  )
176
176
 
177
177
  # Create a dataset
178
- dataset = Dataset(args, in_ch, out_ch, args.num, n_sections)
178
+ dataset = Dataset(args, in_ch, out_ch, args.num, n_sections, dtype=args.dtype)
179
179
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
180
180
 
181
181
  trainer = Trainer(
@@ -73,6 +73,7 @@ def example_geq(args):
73
73
  target=torch.abs(target),
74
74
  expand=args.num,
75
75
  device=args.device,
76
+ dtype=args.dtype,
76
77
  )
77
78
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
78
79
 
@@ -181,6 +182,7 @@ def example_parallel_geq(args):
181
182
  target=torch.abs(target),
182
183
  expand=args.num,
183
184
  device=args.device,
185
+ dtype=args.dtype,
184
186
  )
185
187
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
186
188
 
@@ -66,6 +66,7 @@ def example_peq(args):
66
66
  target=torch.abs(target),
67
67
  expand=args.num,
68
68
  device=args.device,
69
+ dtype=args.dtype,
69
70
  )
70
71
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
71
72
 
@@ -167,6 +168,7 @@ def example_parallel_peq(args):
167
168
  target=torch.abs(target),
168
169
  expand=args.num,
169
170
  device=args.device,
171
+ dtype=args.dtype,
170
172
  )
171
173
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
172
174
 
@@ -67,6 +67,7 @@ def example_svf(args):
67
67
  target=torch.abs(target),
68
68
  expand=args.num,
69
69
  device=args.device,
70
+ dtype=args.dtype,
70
71
  )
71
72
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
72
73
 
@@ -175,6 +176,7 @@ def example_parallel_svf(args):
175
176
  target=torch.abs(target),
176
177
  expand=args.num,
177
178
  device=args.device,
179
+ dtype=args.dtype,
178
180
  )
179
181
  train_loader, valid_loader = load_dataset(dataset, batch_size=args.batch_size)
180
182
 
@@ -1,3 +1,4 @@
1
+ from typing import Optional
1
2
  import torch
2
3
  import torch.utils.data as data
3
4
  from flamo.utils import get_device
@@ -21,6 +22,7 @@ class Dataset(torch.utils.data.Dataset):
21
22
  - **target** (torch.Tensor, optional): The target data tensor. Default: torch.randn(100, 100).
22
23
  - **expand** (int): The first shape dimention of the input and target tensor after expansion. Default: 1. This coincides with the length on the dataset.
23
24
  - **device** (torch.device, optional): The device to store the tensors on. Defaults to torch default device.
25
+ - **dtype** (torch.dtype, optional): The data type of the tensors. If None, the data type of the input tensor is used. Default: None.
24
26
  """
25
27
 
26
28
  def __init__(
@@ -29,8 +31,10 @@ class Dataset(torch.utils.data.Dataset):
29
31
  target: torch.Tensor = torch.randn(1, 1),
30
32
  expand: int = 1,
31
33
  device: torch.device = torch.get_default_device(),
32
- dtype: torch.dtype = torch.float32,
34
+ dtype: Optional[torch.dtype] = None,
33
35
  ):
36
+ if dtype is None:
37
+ dtype = input.dtype
34
38
  self.input = input.to(device).to(dtype)
35
39
  self.target = target.to(device).to(dtype)
36
40
  self.expand = expand
@@ -54,11 +54,13 @@ class Trainer:
54
54
  patience_delta: float = 0.01,
55
55
  step_size: int = 50,
56
56
  step_factor: float = 0.1,
57
+ log: bool = True,
57
58
  train_dir: str = None,
58
59
  device: str = "cpu",
59
60
  ):
60
61
 
61
62
  self.device = device
63
+ self.log = log
62
64
  self.net = net.to(device)
63
65
  self.max_epochs = max_epochs
64
66
  self.lr = lr
@@ -67,10 +69,10 @@ class Trainer:
67
69
  self.min_val_loss = float("inf")
68
70
  self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr)
69
71
  self.n_loss = 0
70
-
71
- assert os.path.isdir(
72
- train_dir
73
- ), "The directory specified in train_dir does not exist."
72
+ if self.log:
73
+ assert os.path.isdir(
74
+ train_dir
75
+ ), "The directory specified in train_dir does not exist."
74
76
  self.train_dir = train_dir
75
77
 
76
78
  self.criterion, self.alpha, self.requires_model = (
@@ -141,7 +143,8 @@ class Trainer:
141
143
  self.print_results(epoch, et_epoch - st_epoch)
142
144
 
143
145
  # save checkpoints
144
- self.save_model(epoch)
146
+ if self.log:
147
+ self.save_model(epoch)
145
148
  if self.early_stop():
146
149
  print("Early stopping at epoch: {}".format(epoch))
147
150
  break
@@ -1663,6 +1663,270 @@ class parallelBiquad(Biquad):
1663
1663
  self.output_channels = self.size[-1]
1664
1664
 
1665
1665
 
1666
+ class SOSFilter(Filter):
1667
+ r"""
1668
+ A class representing cascaded second-order sections (SOS) specified directly by
1669
+ numerator/denominator coefficients (b/a).
1670
+
1671
+ Each section k has coefficients [b0_k, b1_k, b2_k, a0_k, a1_k, a2_k]. Sections are
1672
+ applied in series. The frequency response is computed from the time-domain
1673
+ polynomial coefficients with an anti time-aliasing envelope applied to the 3 taps.
1674
+
1675
+ Shape:
1676
+ - input: (B, M, N_in, ...)
1677
+ - param: (K, 6, N_out, N_in) with ordering [b0, b1, b2, a0, a1, a2]
1678
+ - freq_response: (M, N_out, N_in)
1679
+ - output: (B, M, N_out, ...)
1680
+
1681
+ where B is the batch size, M is the number of frequency bins, N_in is the number of
1682
+ input channels, N_out is the number of output channels, and K is the number of SOS
1683
+ sections (cascaded in series).
1684
+
1685
+ **Arguments / Attributes**:
1686
+ - size (tuple, optional): (N_out, N_in). Default: (1, 1).
1687
+ - n_sections (int, optional): Number of SOS sections (K). Default: 1.
1688
+ - nfft (int, optional): Number of FFT points. Default: 2**11.
1689
+ - fs (int, optional): Sampling frequency. Default: 48000.
1690
+ - alias_decay_db (float, optional): Anti time-aliasing envelope decay in dB after nfft samples. Default: 0.0.
1691
+ - device (str, optional): Device for constructed tensors. Default: None.
1692
+ - dtype (torch.dtype, optional): Data type for tensors. Default: torch.float32.
1693
+ - normalize_a0 (bool, optional): Normalize each section by a0 so a0=1. Default: True.
1694
+
1695
+ **Attributes**:
1696
+ - param (nn.Parameter): Raw SOS coefficients with shape (K, 6, N_out, N_in).
1697
+ - alias_envelope_dcy (torch.Tensor): Length-3 envelope for time anti-aliasing.
1698
+ - freq_response (callable): Maps parameters to frequency response.
1699
+ - input_channels (int): Number of input channels.
1700
+ - output_channels (int): Number of output channels.
1701
+ """
1702
+
1703
+ def __init__(
1704
+ self,
1705
+ size: tuple = (1, 1),
1706
+ n_sections: int = 1,
1707
+ nfft: int = 2**11,
1708
+ fs: int = 48000,
1709
+ alias_decay_db: float = 0.0,
1710
+ device: Optional[str] = None,
1711
+ dtype: torch.dtype = torch.float32,
1712
+ normalize_a0: bool = True,
1713
+ ):
1714
+ self.n_sections = n_sections
1715
+ self.fs = fs
1716
+ self.device = device
1717
+ self.dtype = dtype
1718
+ self.normalize_a0 = normalize_a0
1719
+ # 3-tap envelope for [b0, b1, b2] and [a0, a1, a2]
1720
+ gamma = 10 ** (
1721
+ -torch.abs(torch.tensor(alias_decay_db, device=self.device, dtype=self.dtype)) / (nfft) / 20
1722
+ )
1723
+ self.alias_envelope_dcy = gamma ** torch.arange(0, 3, 1, device=self.device)
1724
+ self.get_map()
1725
+ super().__init__(
1726
+ size=(n_sections, *self.get_size(), *size),
1727
+ nfft=nfft,
1728
+ map=self.map,
1729
+ requires_grad=False,
1730
+ alias_decay_db=alias_decay_db,
1731
+ device=device,
1732
+ dtype=dtype,
1733
+ )
1734
+
1735
+ def get_size(self):
1736
+ r"""
1737
+ Leading dimensions for SOS parameters.
1738
+
1739
+ - 6 for [b0, b1, b2, a0, a1, a2]
1740
+ """
1741
+ return (6,)
1742
+
1743
+ def get_map(self):
1744
+ r"""
1745
+ Mapping for raw SOS coefficients. Optionally normalizes each section so a0=1.
1746
+ """
1747
+
1748
+ def _map(x: torch.Tensor) -> torch.Tensor:
1749
+ if not self.normalize_a0:
1750
+ return x
1751
+ # x: (K, 6, N_out, N_in)
1752
+ a0 = x[:, 3, ...]
1753
+ eps = torch.finfo(x.dtype).eps
1754
+ a0_safe = torch.where(torch.abs(a0) > eps, a0, eps * torch.ones_like(a0))
1755
+ y = x.clone()
1756
+ # divide all coeffs by a0; set a0 to 1
1757
+ for idx in [0, 1, 2, 4, 5]:
1758
+ y[:, idx, ...] = y[:, idx, ...] / a0_safe
1759
+ y[:, 3, ...] = torch.ones_like(a0)
1760
+ return y
1761
+
1762
+ self.map = _map
1763
+
1764
+ def init_param(self):
1765
+ r"""
1766
+ Initialize parameters to identity sections: b=[1,0,0], a=[1,0,0].
1767
+ """
1768
+ with torch.no_grad():
1769
+ self.param.zero_()
1770
+ # b0 = 1, a0 = 1
1771
+ self.param[:, 0, ...] = 1.0
1772
+ self.param[:, 3, ...] = 1.0
1773
+
1774
+ def check_param_shape(self):
1775
+ r"""
1776
+ Checks if the shape of the SOS parameters is valid.
1777
+ """
1778
+ assert (
1779
+ len(self.size) == 4
1780
+ ), "Parameter size must be 4D, expected (K, 6, N_out, N_in)."
1781
+ assert (
1782
+ self.size[1] == 6
1783
+ ), "Second dimension must be 6: [b0,b1,b2,a0,a1,a2]."
1784
+
1785
+ def initialize_class(self):
1786
+ r"""
1787
+ Initialize the SosFilter class.
1788
+ """
1789
+ self.check_param_shape()
1790
+ self.get_io()
1791
+ self.get_freq_response()
1792
+ self.get_freq_convolve()
1793
+
1794
+ def get_freq_response(self):
1795
+ r"""
1796
+ Compute the frequency response of the cascaded SOS.
1797
+ """
1798
+ self.freq_response = lambda param: self.get_poly_coeff(self.map(param))[0]
1799
+
1800
+ def get_poly_coeff(self, param: torch.Tensor):
1801
+ r"""
1802
+ Split mapped parameters into b and a polynomials, apply anti-aliasing envelope,
1803
+ and compute frequency response in double precision.
1804
+
1805
+ **Arguments**:
1806
+ - param (torch.Tensor): (K, 6, N_out, N_in)
1807
+
1808
+ **Returns**:
1809
+ - H (torch.Tensor): (M, N_out, N_in)
1810
+ - B (torch.Tensor): (M, K, N_out, N_in)
1811
+ - A (torch.Tensor): (M, K, N_out, N_in)
1812
+ """
1813
+ # Arrange to (3, K, N_out, N_in)
1814
+ b = torch.stack((param[:, 0, ...], param[:, 1, ...], param[:, 2, ...]), dim=0)
1815
+ a = torch.stack((param[:, 3, ...], param[:, 4, ...], param[:, 5, ...]), dim=0)
1816
+
1817
+ b_aa = torch.einsum(
1818
+ "p, pomn -> pomn", self.alias_envelope_dcy, b)
1819
+ a_aa = torch.einsum(
1820
+ "p, pomn -> pomn", self.alias_envelope_dcy, a)
1821
+ B = torch.fft.rfft(b_aa, self.nfft, dim=0)
1822
+ A = torch.fft.rfft(a_aa, self.nfft, dim=0)
1823
+ H_temp = torch.prod(B, dim=1) / (torch.prod(A, dim=1))
1824
+ denom = torch.abs(torch.prod(A, dim=1))
1825
+ H = torch.where(
1826
+ denom != 0, H_temp, torch.finfo(H_temp.dtype).eps * torch.ones_like(H_temp)
1827
+ )
1828
+ return H, B, A
1829
+
1830
+ def get_freq_convolve(self):
1831
+ r"""
1832
+ Frequency-domain matrix product with the input.
1833
+ """
1834
+ self.freq_convolve = lambda x, param: torch.einsum(
1835
+ "fmn,bfn...->bfm...", self.freq_response(param), x
1836
+ )
1837
+
1838
+ def get_io(self):
1839
+ r"""
1840
+ Computes the number of input and output channels based on the size parameter.
1841
+ """
1842
+ self.input_channels = self.size[-1]
1843
+ self.output_channels = self.size[-2]
1844
+
1845
+
1846
+ class parallelSOSFilter(SOSFilter):
1847
+ r"""
1848
+ Parallel counterpart of the SOSFilter class.
1849
+
1850
+ Accepts direct SOS coefficients for N parallel channels. Parameter shape is
1851
+ (K, 6, N), with ordering [b0, b1, b2, a0, a1, a2] per section.
1852
+
1853
+ Shape:
1854
+ - input: (B, M, N, ...)
1855
+ - param: (K, 6, N)
1856
+ - freq_response: (M, N)
1857
+ - output: (B, M, N, ...)
1858
+ """
1859
+
1860
+ def __init__(
1861
+ self,
1862
+ size: tuple = (1,),
1863
+ n_sections: int = 1,
1864
+ nfft: int = 2**11,
1865
+ fs: int = 48000,
1866
+ alias_decay_db: float = 0.0,
1867
+ device: Optional[str] = None,
1868
+ dtype: torch.dtype = torch.float32,
1869
+ normalize_a0: bool = True,
1870
+ ):
1871
+ super().__init__(
1872
+ size=size,
1873
+ n_sections=n_sections,
1874
+ nfft=nfft,
1875
+ fs=fs,
1876
+ alias_decay_db=alias_decay_db,
1877
+ device=device,
1878
+ dtype=dtype,
1879
+ normalize_a0=normalize_a0,
1880
+ )
1881
+
1882
+ def check_param_shape(self):
1883
+ r"""
1884
+ Checks if the shape of the SOS parameters is valid.
1885
+ """
1886
+ assert (
1887
+ len(self.size) == 3
1888
+ ), "Parameter size must be 3D, expected (K, 6, N)."
1889
+ assert self.size[1] == 6, "Second dimension must be 6: [b0,b1,b2,a0,a1,a2]."
1890
+
1891
+ def get_freq_response(self):
1892
+ r"""Compute the frequency response of the cascaded SOS."""
1893
+ self.freq_response = lambda param: self.get_poly_coeff(self.map(param))[0]
1894
+
1895
+ def get_poly_coeff(self, param: torch.Tensor):
1896
+ r"""
1897
+ Split mapped parameters into b and a polynomials (parallel case), apply
1898
+ anti-aliasing envelope, and compute frequency response in double precision.
1899
+
1900
+ **Arguments**:
1901
+ - param (torch.Tensor): (K, 6, N)
1902
+
1903
+ **Returns**:
1904
+ - H (torch.Tensor): (M, N)
1905
+ - B (torch.Tensor): (M, K, N)
1906
+ - A (torch.Tensor): (M, K, N)
1907
+ """
1908
+ b = torch.stack((param[:, 0, :], param[:, 1, :], param[:, 2, :]), dim=0)
1909
+ a = torch.stack((param[:, 3, :], param[:, 4, :], param[:, 5, :]), dim=0)
1910
+
1911
+ b_aa = torch.einsum("p, pon -> pon", self.alias_envelope_dcy, b)
1912
+ a_aa = torch.einsum("p, pon -> pon", self.alias_envelope_dcy, a)
1913
+ B = torch.fft.rfft(b_aa, self.nfft, dim=0)
1914
+ A = torch.fft.rfft(a_aa, self.nfft, dim=0)
1915
+ H_temp = torch.prod(B, dim=1) / (torch.prod(A, dim=1))
1916
+ H = torch.where(torch.abs(torch.prod(A, dim=1)) != 0, H_temp, torch.finfo(H_temp.dtype).eps * torch.ones_like(H_temp))
1917
+ return H, B, A
1918
+
1919
+ def get_freq_convolve(self):
1920
+ self.freq_convolve = lambda x, param: torch.einsum(
1921
+ "fn,bfn...->bfn...", self.freq_response(param), x
1922
+ )
1923
+
1924
+ def get_io(self):
1925
+ r"""Computes the number of input and output channels based on the size parameter."""
1926
+ self.input_channels = self.size[-1]
1927
+ self.output_channels = self.size[-1]
1928
+
1929
+
1666
1930
  class SVF(Filter):
1667
1931
  r"""
1668
1932
  A class for IIR filters as a serially cascaded state variable filters (SVFs).
@@ -3105,3 +3369,154 @@ class parallelDelay(Delay):
3105
3369
  """
3106
3370
  self.input_channels = self.size[-1]
3107
3371
  self.output_channels = self.size[-1]
3372
+
3373
+
3374
+ class GainDelay(DSP):
3375
+ r"""
3376
+ A class implementing a combined MIMO gain and delay stage operating in the frequency domain.
3377
+
3378
+ This class computes the frequency response of a gain matrix followed by per-channel delays
3379
+ without constructing intermediate expanded tensors of size :math:`N_{out} \\times N_{in}`.
3380
+
3381
+ Shape:
3382
+ - input: :math:`(B, M, N_{in}, ...)`
3383
+ - param: :math:`(2, N_{out}, N_{in})`
3384
+ - output: :math:`(B, M, N_{out}, ...)`
3385
+
3386
+ where :math:`B` is the batch size, :math:`M` is the number of frequency bins,
3387
+ :math:`N_{in}` is the number of input channels, and :math:`N_{out}` is the number of output channels.
3388
+ Ellipsis :math:`(...)` represents additional dimensions.
3389
+
3390
+ **Arguments / Attributes**:
3391
+ - **size** (tuple, optional): Size of the gain-delay stage as ``(N_{out}, N_{in})``. Default: (1, 1).
3392
+ - **max_len** (int, optional): Maximum delay length expressed in samples. Default: 2000.
3393
+ - **isint** (bool, optional): If ``True``, delays are rounded to the nearest integer sample. Default: False.
3394
+ - **unit** (int, optional): Unit scaling factor for converting seconds to samples. Default: 100.
3395
+ - **nfft** (int, optional): Number of FFT points. Default: 2 ** 11.
3396
+ - **fs** (int, optional): Sampling rate. Default: 48000.
3397
+ - **map_gain** (callable, optional): Mapping applied to raw gain parameters. Default: ``lambda x: x``.
3398
+ - **map_delay** (callable, optional): Mapping applied to raw delay parameters (in seconds). Default: ``lambda x: x``.
3399
+ - **requires_grad** (bool, optional): Whether parameters require gradients. Default: False.
3400
+ - **alias_decay_db** (float, optional): Decay in dB applied by the anti aliasing envelope. Default: 0.0.
3401
+ - **device** (str, optional): Device of the constructed tensors. Default: None.
3402
+ - **dtype** (torch.dtype, optional): Data type for tensors. Default: torch.float32.
3403
+ """
3404
+
3405
+ def __init__(
3406
+ self,
3407
+ size: tuple = (1, 1),
3408
+ max_len: int = 2000,
3409
+ isint: bool = False,
3410
+ unit: int = 100,
3411
+ nfft: int = 2**11,
3412
+ fs: int = 48000,
3413
+ map_gain: Optional[callable] = None,
3414
+ map_delay: Optional[callable] = None,
3415
+ requires_grad: bool = False,
3416
+ alias_decay_db: float = 0.0,
3417
+ device: Optional[str] = None,
3418
+ dtype: torch.dtype = torch.float32,
3419
+ ):
3420
+ self.fs = fs
3421
+ self.max_len = max_len
3422
+ self.unit = unit
3423
+ self.isint = isint
3424
+ self._custom_gain_map = map_gain is not None
3425
+ self._custom_delay_map = map_delay is not None
3426
+ self.map_gain = map_gain if map_gain is not None else (lambda x: x)
3427
+ self.map_delay = map_delay if map_delay is not None else (lambda x: x)
3428
+ super().__init__(
3429
+ size=(2, *size),
3430
+ nfft=nfft,
3431
+ requires_grad=requires_grad,
3432
+ alias_decay_db=alias_decay_db,
3433
+ device=device,
3434
+ dtype=dtype,
3435
+ )
3436
+ self.initialize_class()
3437
+
3438
+ def forward(self, x, ext_param=None):
3439
+ self.check_input_shape(x)
3440
+ if ext_param is None:
3441
+ return self.freq_convolve(x, self.param)
3442
+ with torch.no_grad():
3443
+ self.assign_value(ext_param)
3444
+ return self.freq_convolve(x, ext_param)
3445
+
3446
+ def init_param(self):
3447
+ gain_shape = self.size[1:]
3448
+ with torch.no_grad():
3449
+ nn.init.ones_(self.param[0])
3450
+ if self.isint:
3451
+ delay_samples = torch.randint(
3452
+ 1, self.max_len, gain_shape, device=self.device, dtype=torch.int64
3453
+ ).to(self.param.dtype)
3454
+ else:
3455
+ delay_samples = torch.rand(gain_shape, device=self.device, dtype=self.dtype) * self.max_len
3456
+ delay_seconds = self.sample2s(delay_samples)
3457
+ self.param[1].copy_(delay_seconds)
3458
+ max_delay = torch.ceil(delay_samples).max().item()
3459
+ self.order = int(max_delay) + 1
3460
+
3461
+ def s2sample(self, delay: torch.Tensor):
3462
+ return delay * self.fs / self.unit
3463
+
3464
+ def sample2s(self, delay: torch.Tensor):
3465
+ return delay / self.fs * self.unit
3466
+
3467
+ def check_input_shape(self, x):
3468
+ if (int(self.nfft / 2 + 1), self.input_channels) != (x.shape[1], x.shape[2]):
3469
+ raise ValueError(
3470
+ f"parameter shape = {self.param.shape} not compatible with input signal of shape = ({x.shape})."
3471
+ )
3472
+
3473
+ def check_param_shape(self):
3474
+ assert (
3475
+ len(self.size) == 3 and self.size[0] == 2
3476
+ ), "GainDelay parameters must have shape (2, N_out, N_in)."
3477
+
3478
+ def get_gains(self):
3479
+ return lambda param: to_complex(self.map_gain(param[0]))
3480
+
3481
+ def get_delays(self):
3482
+ return lambda param: self.s2sample(self.map_delay(param[1]))
3483
+
3484
+ def get_freq_response(self):
3485
+ gains = self.get_gains()
3486
+ delays = self.get_delays()
3487
+ if self.isint:
3488
+ self.freq_response = lambda param: self._combine_gain_delay(
3489
+ gains(param), delays(param).round()
3490
+ )
3491
+ else:
3492
+ self.freq_response = lambda param: self._combine_gain_delay(
3493
+ gains(param), delays(param)
3494
+ )
3495
+
3496
+ def get_freq_convolve(self):
3497
+ self.freq_convolve = lambda x, param: torch.einsum(
3498
+ "fmn,bfn...->bfm...", self.freq_response(param), x
3499
+ )
3500
+
3501
+ def initialize_class(self):
3502
+ self.check_param_shape()
3503
+ self.get_io()
3504
+ if self.requires_grad and not self._custom_delay_map:
3505
+ self.map_delay = lambda x: F.softplus(x)
3506
+ self.omega = (
3507
+ 2
3508
+ * torch.pi
3509
+ * torch.arange(0, self.nfft // 2 + 1, device=self.device, dtype=self.dtype)
3510
+ / self.nfft
3511
+ ).unsqueeze(1)
3512
+ self.get_freq_response()
3513
+ self.get_freq_convolve()
3514
+
3515
+ def get_io(self):
3516
+ self.input_channels = self.size[-1]
3517
+ self.output_channels = self.size[-2]
3518
+
3519
+ def _combine_gain_delay(self, gain: torch.Tensor, delay_samples: torch.Tensor):
3520
+ delay_samples = delay_samples.to(gain.real.dtype)
3521
+ phase = torch.einsum("fo, omn -> fmn", self.omega, delay_samples.unsqueeze(0))
3522
+ return gain.unsqueeze(0) * (self.gamma ** delay_samples) * torch.exp(-1j * phase)
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "flamo"
7
- version = "0.2.4"
7
+ version = "0.2.6"
8
8
  authors = [
9
9
  { name="Gloria Dal Santo", email="gloria.dalsanto@aalto.fi" },
10
10
  { name="Gian Marco De Bortoli", email="gian.debortoli@aalto.fi"},
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes