flamo 0.1.4__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (269) hide show
  1. {flamo-0.1.4 → flamo-0.1.5}/PKG-INFO +2 -1
  2. {flamo-0.1.4 → flamo-0.1.5}/examples/e8_colorless_fdn.py +7 -74
  3. {flamo-0.1.4 → flamo-0.1.5}/examples/e8_fdn.py +4 -4
  4. {flamo-0.1.4 → flamo-0.1.5}/examples/e9_loss_profile.py +4 -4
  5. {flamo-0.1.4 → flamo-0.1.5}/flamo/auxiliary/config/config.py +2 -1
  6. {flamo-0.1.4 → flamo-0.1.5}/flamo/auxiliary/eq.py +2 -1
  7. {flamo-0.1.4 → flamo-0.1.5}/flamo/auxiliary/reverb.py +1 -1
  8. {flamo-0.1.4 → flamo-0.1.5}/flamo/optimize/loss.py +1 -0
  9. {flamo-0.1.4 → flamo-0.1.5}/flamo/processor/dsp.py +18 -8
  10. flamo-0.1.5/notebooks/e8_colorless_fdn.ipynb +333 -0
  11. {flamo-0.1.4 → flamo-0.1.5}/pyproject.toml +2 -1
  12. flamo-0.1.4/losses.py +0 -721
  13. flamo-0.1.4/notebooks/e8_colorless_fdn.ipynb +0 -550
  14. flamo-0.1.4/rirs/lrf/fdn_target_ir.wav +0 -0
  15. flamo-0.1.4/rirs/lrf/target_ir.wav +0 -0
  16. flamo-0.1.4/run.py +0 -387
  17. flamo-0.1.4/test_fdn.py +0 -437
  18. flamo-0.1.4/test_fdn_2.py +0 -466
  19. flamo-0.1.4/test_fdn_3.py +0 -399
  20. flamo-0.1.4/test_fdn_4.py +0 -465
  21. flamo-0.1.4/test_fdn_5.py +0 -407
  22. flamo-0.1.4/test_fdn_6.py +0 -423
  23. flamo-0.1.4/test_fdn_6_wgn.py +0 -424
  24. flamo-0.1.4/test_peq.py +0 -219
  25. flamo-0.1.4/test_prop_peak.py +0 -56
  26. {flamo-0.1.4 → flamo-0.1.5}/.gitignore +0 -0
  27. {flamo-0.1.4 → flamo-0.1.5}/2025_FLAMO_ICASSP_DalSantoDeBortoli_poster.pdf +0 -0
  28. {flamo-0.1.4 → flamo-0.1.5}/LICENSE +0 -0
  29. {flamo-0.1.4 → flamo-0.1.5}/README.md +0 -0
  30. {flamo-0.1.4 → flamo-0.1.5}/examples/e0_siso.py +0 -0
  31. {flamo-0.1.4 → flamo-0.1.5}/examples/e1_mimo.py +0 -0
  32. {flamo-0.1.4 → flamo-0.1.5}/examples/e2_chaining_filters.py +0 -0
  33. {flamo-0.1.4 → flamo-0.1.5}/examples/e3_series_class.py +0 -0
  34. {flamo-0.1.4 → flamo-0.1.5}/examples/e4_recursion.py +0 -0
  35. {flamo-0.1.4 → flamo-0.1.5}/examples/e4_recursion_nn.py +0 -0
  36. {flamo-0.1.4 → flamo-0.1.5}/examples/e5_shell.py +0 -0
  37. {flamo-0.1.4 → flamo-0.1.5}/examples/e6_anti_aliasing.py +0 -0
  38. {flamo-0.1.4 → flamo-0.1.5}/examples/e7_biquad.py +0 -0
  39. {flamo-0.1.4 → flamo-0.1.5}/examples/e7_biquad_nn.py +0 -0
  40. {flamo-0.1.4 → flamo-0.1.5}/examples/e7_geq.py +0 -0
  41. {flamo-0.1.4 → flamo-0.1.5}/examples/e7_peq.py +0 -0
  42. {flamo-0.1.4 → flamo-0.1.5}/examples/e7_svf.py +0 -0
  43. {flamo-0.1.4 → flamo-0.1.5}/examples/e8_active_acoustics.py +0 -0
  44. {flamo-0.1.4 → flamo-0.1.5}/examples/e8_colorless_sfdn.py +0 -0
  45. {flamo-0.1.4 → flamo-0.1.5}/examples/run_all.py +0 -0
  46. {flamo-0.1.4 → flamo-0.1.5}/flamo/__init__.py +0 -0
  47. {flamo-0.1.4 → flamo-0.1.5}/flamo/auxiliary/__init__.py +0 -0
  48. {flamo-0.1.4 → flamo-0.1.5}/flamo/auxiliary/filterbank.py +0 -0
  49. {flamo-0.1.4 → flamo-0.1.5}/flamo/auxiliary/minimize.py +0 -0
  50. {flamo-0.1.4 → flamo-0.1.5}/flamo/auxiliary/scattering.py +0 -0
  51. {flamo-0.1.4 → flamo-0.1.5}/flamo/functional.py +0 -0
  52. {flamo-0.1.4 → flamo-0.1.5}/flamo/optimize/__init__.py +0 -0
  53. {flamo-0.1.4 → flamo-0.1.5}/flamo/optimize/dataset.py +0 -0
  54. {flamo-0.1.4 → flamo-0.1.5}/flamo/optimize/surface.py +0 -0
  55. {flamo-0.1.4 → flamo-0.1.5}/flamo/optimize/trainer.py +0 -0
  56. {flamo-0.1.4 → flamo-0.1.5}/flamo/optimize/utils.py +0 -0
  57. {flamo-0.1.4 → flamo-0.1.5}/flamo/processor/__init__.py +0 -0
  58. {flamo-0.1.4 → flamo-0.1.5}/flamo/processor/system.py +0 -0
  59. {flamo-0.1.4 → flamo-0.1.5}/flamo/utils.py +0 -0
  60. {flamo-0.1.4 → flamo-0.1.5}/notebooks/e7_biquad.ipynb +0 -0
  61. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker1.wav +0 -0
  62. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker10.wav +0 -0
  63. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker11.wav +0 -0
  64. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker12.wav +0 -0
  65. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker13.wav +0 -0
  66. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker14.wav +0 -0
  67. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker2.wav +0 -0
  68. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker3.wav +0 -0
  69. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker5.wav +0 -0
  70. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker6.wav +0 -0
  71. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker7.wav +0 -0
  72. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker8.wav +0 -0
  73. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic1_speaker9.wav +0 -0
  74. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker1.wav +0 -0
  75. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker10.wav +0 -0
  76. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker11.wav +0 -0
  77. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker12.wav +0 -0
  78. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker13.wav +0 -0
  79. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker14.wav +0 -0
  80. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker2.wav +0 -0
  81. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker3.wav +0 -0
  82. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker5.wav +0 -0
  83. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker6.wav +0 -0
  84. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker7.wav +0 -0
  85. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker8.wav +0 -0
  86. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic2_speaker9.wav +0 -0
  87. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker1.wav +0 -0
  88. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker10.wav +0 -0
  89. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker11.wav +0 -0
  90. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker12.wav +0 -0
  91. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker13.wav +0 -0
  92. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker14.wav +0 -0
  93. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker2.wav +0 -0
  94. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker3.wav +0 -0
  95. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker5.wav +0 -0
  96. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker6.wav +0 -0
  97. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker7.wav +0 -0
  98. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker8.wav +0 -0
  99. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic3_speaker9.wav +0 -0
  100. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker1.wav +0 -0
  101. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker10.wav +0 -0
  102. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker11.wav +0 -0
  103. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker12.wav +0 -0
  104. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker13.wav +0 -0
  105. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker14.wav +0 -0
  106. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker2.wav +0 -0
  107. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker3.wav +0 -0
  108. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker5.wav +0 -0
  109. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker6.wav +0 -0
  110. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker7.wav +0 -0
  111. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker8.wav +0 -0
  112. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic4_speaker9.wav +0 -0
  113. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker1.wav +0 -0
  114. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker10.wav +0 -0
  115. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker11.wav +0 -0
  116. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker12.wav +0 -0
  117. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker13.wav +0 -0
  118. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker14.wav +0 -0
  119. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker2.wav +0 -0
  120. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker3.wav +0 -0
  121. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker5.wav +0 -0
  122. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker6.wav +0 -0
  123. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker7.wav +0 -0
  124. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker8.wav +0 -0
  125. {flamo-0.1.4 → flamo-0.1.5}/rirs/Otala-2024.05.10/mic5_speaker9.wav +0 -0
  126. {flamo-0.1.4 → flamo-0.1.5}/rirs/arni_35_3541_4_2.wav +0 -0
  127. {flamo-0.1.4 → flamo-0.1.5}/rirs/s3_r4_o.wav +0 -0
  128. {flamo-0.1.4 → flamo-0.1.5}/sphinx/Makefile +0 -0
  129. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/auxiliary/eq.doctree +0 -0
  130. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/auxiliary/filterbank.doctree +0 -0
  131. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/auxiliary/minimize.doctree +0 -0
  132. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/auxiliary/reverb.doctree +0 -0
  133. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/auxiliary/scattering.doctree +0 -0
  134. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/environment.pickle +0 -0
  135. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/functional.doctree +0 -0
  136. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/index.doctree +0 -0
  137. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/optimize/dataset.doctree +0 -0
  138. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/optimize/loss.doctree +0 -0
  139. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/optimize/trainer.doctree +0 -0
  140. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/optimize/utils.doctree +0 -0
  141. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/processor/dsp.doctree +0 -0
  142. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/doctrees/processor/system.doctree +0 -0
  143. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/.buildinfo +0 -0
  144. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/.buildinfo.bak +0 -0
  145. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/.nojekyll +0 -0
  146. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/auxiliary/eq.rst.txt +0 -0
  147. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/auxiliary/filterbank.rst.txt +0 -0
  148. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/auxiliary/minimize.rst.txt +0 -0
  149. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/auxiliary/reverb.rst.txt +0 -0
  150. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/auxiliary/scattering.rst.txt +0 -0
  151. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/functional.rst.txt +0 -0
  152. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/index.rst.txt +0 -0
  153. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/optimize/dataset.rst.txt +0 -0
  154. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/optimize/loss.rst.txt +0 -0
  155. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/optimize/trainer.rst.txt +0 -0
  156. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/optimize/utils.rst.txt +0 -0
  157. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/processor/dsp.rst.txt +0 -0
  158. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_sources/processor/system.rst.txt +0 -0
  159. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/_sphinx_javascript_frameworks_compat.js +0 -0
  160. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/alabaster.css +0 -0
  161. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/basic.css +0 -0
  162. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/badge_only.css +0 -0
  163. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff +0 -0
  164. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 +0 -0
  165. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff +0 -0
  166. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 +0 -0
  167. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.eot +0 -0
  168. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.svg +0 -0
  169. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.ttf +0 -0
  170. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff +0 -0
  171. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff2 +0 -0
  172. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff +0 -0
  173. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff2 +0 -0
  174. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/lato-bold.woff +0 -0
  175. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/lato-bold.woff2 +0 -0
  176. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff +0 -0
  177. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff2 +0 -0
  178. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/lato-normal.woff +0 -0
  179. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/fonts/lato-normal.woff2 +0 -0
  180. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/css/theme.css +0 -0
  181. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/custom.css +0 -0
  182. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/doctools.js +0 -0
  183. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/documentation_options.js +0 -0
  184. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/file.png +0 -0
  185. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-bold.eot +0 -0
  186. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-bold.ttf +0 -0
  187. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-bold.woff +0 -0
  188. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-bold.woff2 +0 -0
  189. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.eot +0 -0
  190. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.ttf +0 -0
  191. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.woff +0 -0
  192. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 +0 -0
  193. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-italic.eot +0 -0
  194. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-italic.ttf +0 -0
  195. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-italic.woff +0 -0
  196. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-italic.woff2 +0 -0
  197. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-regular.eot +0 -0
  198. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-regular.ttf +0 -0
  199. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-regular.woff +0 -0
  200. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/Lato/lato-regular.woff2 +0 -0
  201. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot +0 -0
  202. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf +0 -0
  203. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff +0 -0
  204. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 +0 -0
  205. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot +0 -0
  206. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf +0 -0
  207. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff +0 -0
  208. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 +0 -0
  209. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/github-banner.svg +0 -0
  210. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/jquery.js +0 -0
  211. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/js/badge_only.js +0 -0
  212. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/js/theme.js +0 -0
  213. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/js/versions.js +0 -0
  214. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/language_data.js +0 -0
  215. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/minus.png +0 -0
  216. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/plus.png +0 -0
  217. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/pygments.css +0 -0
  218. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/scripts/bootstrap.js +0 -0
  219. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/scripts/bootstrap.js.LICENSE.txt +0 -0
  220. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/scripts/bootstrap.js.map +0 -0
  221. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/scripts/fontawesome.js +0 -0
  222. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/scripts/fontawesome.js.LICENSE.txt +0 -0
  223. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/scripts/fontawesome.js.map +0 -0
  224. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/scripts/pydata-sphinx-theme.js +0 -0
  225. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/scripts/pydata-sphinx-theme.js.map +0 -0
  226. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/searchtools.js +0 -0
  227. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/sphinx_highlight.js +0 -0
  228. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/styles/pydata-sphinx-theme.css +0 -0
  229. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/styles/pydata-sphinx-theme.css.map +0 -0
  230. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/styles/theme.css +0 -0
  231. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-brands-400.ttf +0 -0
  232. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-brands-400.woff2 +0 -0
  233. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-regular-400.ttf +0 -0
  234. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-regular-400.woff2 +0 -0
  235. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-solid-900.ttf +0 -0
  236. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-solid-900.woff2 +0 -0
  237. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/_static/webpack-macros.html +0 -0
  238. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/auxiliary/eq.html +0 -0
  239. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/auxiliary/filterbank.html +0 -0
  240. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/auxiliary/minimize.html +0 -0
  241. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/auxiliary/reverb.html +0 -0
  242. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/auxiliary/scattering.html +0 -0
  243. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/functional.html +0 -0
  244. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/genindex.html +0 -0
  245. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/index.html +0 -0
  246. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/objects.inv +0 -0
  247. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/optimize/dataset.html +0 -0
  248. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/optimize/loss.html +0 -0
  249. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/optimize/trainer.html +0 -0
  250. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/optimize/utils.html +0 -0
  251. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/processor/dsp.html +0 -0
  252. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/processor/system.html +0 -0
  253. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/search.html +0 -0
  254. {flamo-0.1.4 → flamo-0.1.5}/sphinx/_build/html/searchindex.js +0 -0
  255. {flamo-0.1.4 → flamo-0.1.5}/sphinx/auxiliary/eq.rst +0 -0
  256. {flamo-0.1.4 → flamo-0.1.5}/sphinx/auxiliary/filterbank.rst +0 -0
  257. {flamo-0.1.4 → flamo-0.1.5}/sphinx/auxiliary/minimize.rst +0 -0
  258. {flamo-0.1.4 → flamo-0.1.5}/sphinx/auxiliary/reverb.rst +0 -0
  259. {flamo-0.1.4 → flamo-0.1.5}/sphinx/auxiliary/scattering.rst +0 -0
  260. {flamo-0.1.4 → flamo-0.1.5}/sphinx/conf.py +0 -0
  261. {flamo-0.1.4 → flamo-0.1.5}/sphinx/functional.rst +0 -0
  262. {flamo-0.1.4 → flamo-0.1.5}/sphinx/index.rst +0 -0
  263. {flamo-0.1.4 → flamo-0.1.5}/sphinx/make.bat +0 -0
  264. {flamo-0.1.4 → flamo-0.1.5}/sphinx/optimize/dataset.rst +0 -0
  265. {flamo-0.1.4 → flamo-0.1.5}/sphinx/optimize/loss.rst +0 -0
  266. {flamo-0.1.4 → flamo-0.1.5}/sphinx/optimize/trainer.rst +0 -0
  267. {flamo-0.1.4 → flamo-0.1.5}/sphinx/optimize/utils.rst +0 -0
  268. {flamo-0.1.4 → flamo-0.1.5}/sphinx/processor/dsp.rst +0 -0
  269. {flamo-0.1.4 → flamo-0.1.5}/sphinx/processor/system.rst +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flamo
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: An Open-Source Library for Frequency-Domain Differentiable Audio Processing
5
5
  Project-URL: Homepage, https://github.com/gdalsanto/flamo
6
6
  Project-URL: Issues, https://github.com/gdalsanto/flamo/issues
@@ -38,6 +38,7 @@ Requires-Dist: numpy
38
38
  Requires-Dist: pydantic
39
39
  Requires-Dist: pyfar
40
40
  Requires-Dist: pysoundfile
41
+ Requires-Dist: pyyaml
41
42
  Requires-Dist: scipy
42
43
  Requires-Dist: torch
43
44
  Requires-Dist: torchaudio
@@ -26,8 +26,8 @@ def example_fdn(args):
26
26
 
27
27
  # FDN parameters
28
28
  N = 6 # number of delays
29
- alias_decay_db = 0 # alias decay in dB
30
- delay_lengths = torch.tensor([997, 1153, 1327, 1559, 1801, 2099])
29
+ alias_decay_db = 30 # alias decay in dB
30
+ delay_lengths = torch.tensor([887, 911, 941, 1699, 1951, 2053])
31
31
 
32
32
  ## ---------------- CONSTRUCT FDN ---------------- ##
33
33
 
@@ -35,19 +35,17 @@ def example_fdn(args):
35
35
  input_gain = dsp.Gain(
36
36
  size=(N, 1),
37
37
  nfft=args.nfft,
38
- requires_grad=False,
38
+ requires_grad=True,
39
39
  alias_decay_db=alias_decay_db,
40
40
  device=args.device,
41
41
  )
42
- input_gain.assign_value(torch.ones((N, 1), device=args.device) / torch.tensor(N))
43
42
  output_gain = dsp.Gain(
44
43
  size=(1, N),
45
44
  nfft=args.nfft,
46
- requires_grad=False,
45
+ requires_grad=True,
47
46
  alias_decay_db=alias_decay_db,
48
47
  device=args.device,
49
48
  )
50
- output_gain.assign_value(torch.ones((1, N), device=args.device) )
51
49
  # Feedback loop with delays
52
50
  delays = dsp.parallelDelay(
53
51
  size=(N,),
@@ -60,7 +58,7 @@ def example_fdn(args):
60
58
  )
61
59
  delays.assign_value(delays.sample2s(delay_lengths))
62
60
  # Feedback path with orthogonal matrix
63
- mixing_matrix = dsp.Matrix(
61
+ feedback = dsp.Matrix(
64
62
  size=(N, N),
65
63
  nfft=args.nfft,
66
64
  matrix_type="orthogonal",
@@ -68,16 +66,6 @@ def example_fdn(args):
68
66
  alias_decay_db=alias_decay_db,
69
67
  device=args.device,
70
68
  )
71
- attenuation = dsp.parallelGain(
72
- size=(N,),
73
- nfft=args.nfft,
74
- requires_grad=False,
75
- alias_decay_db=alias_decay_db,
76
- device=args.device,
77
- )
78
- attenuation.map = map_gamma(delay_lengths)
79
- gamma = 6
80
- attenuation.assign_value(gamma * torch.ones((N,), device=args.device,))
81
69
 
82
70
  # # Feedback path with scattering matrix
83
71
  # m_L = torch.randint(low=1, high=int(torch.floor(min(delay_lengths)/2)), size=[N])
@@ -93,14 +81,6 @@ def example_fdn(args):
93
81
  # requires_grad=True,
94
82
  # device=args.device,
95
83
  # )
96
- feedback = system.Series(
97
- OrderedDict(
98
- {
99
- "attenuation": attenuation,
100
- "mixing_matrix": mixing_matrix,
101
- }
102
- )
103
- )
104
84
 
105
85
  # Recursion
106
86
  feedback_loop = system.Recursion(fF=delays, fB=feedback)
@@ -118,42 +98,9 @@ def example_fdn(args):
118
98
 
119
99
  # Create the model with Shell
120
100
  input_layer = dsp.FFT(args.nfft)
121
- output_layer = dsp.iFFT(args.nfft)
101
+ output_layer = dsp.Transform(transform=lambda x: torch.abs(x))
122
102
  model = system.Shell(core=FDN, input_layer=input_layer, output_layer=output_layer)
123
103
 
124
- ir_init = model.get_time_response(identity=False, fs=args.samplerate)
125
-
126
- time_axis = torch.arange(0, args.nfft) / args.samplerate
127
-
128
- out = torch.flip(ir_init, dims=[1])
129
- out = torch.cumsum(out**2, dim=1)
130
- out = torch.flip(out, dims=[1])
131
-
132
- # Normalize to 1
133
- norm_vals = torch.max(out, dim=1, keepdim=True)[0] # per channel
134
-
135
-
136
- edc = out / norm_vals
137
-
138
- core = model.get_core()
139
-
140
- # generate a decaying sinusoid
141
- sine = torch.sin(
142
- 2 * torch.pi * 1000 * time_axis
143
- ) * torch.exp(-time_axis / 0.01) # 1000 Hz, decay time of 0.1 seconds
144
- sine = sine.unsqueeze(0).unsqueeze(-1) # add batch and channel dimensions
145
-
146
- out = torch.flip(model(sine), dims=[1])
147
- out = torch.cumsum(out**2, dim=1)
148
- out = torch.flip(out, dims=[1])
149
-
150
- # Normalize to 1
151
- norm_vals = torch.max(out, dim=1, keepdim=True)[0] # per channel
152
-
153
-
154
- edc_sine = out / norm_vals
155
-
156
-
157
104
  # Get initial impulse response
158
105
  with torch.no_grad():
159
106
  ir_init = model.get_time_response(identity=False, fs=args.samplerate).squeeze()
@@ -218,7 +165,7 @@ def save_fdn_params(net, filename="parameters"):
218
165
 
219
166
  core = net.get_core()
220
167
  param = {}
221
- param["A"] = core.feedback_loop.feedback.mixing_matrix.param.squeeze().detach().cpu().numpy()
168
+ param["A"] = core.feedback_loop.feedback.param.squeeze().detach().cpu().numpy()
222
169
  param["B"] = core.input_gain.param.squeeze().detach().cpu().numpy()
223
170
  param["C"] = core.output_gain.param.squeeze().detach().cpu().numpy()
224
171
  param["m"] = (
@@ -236,20 +183,6 @@ def save_fdn_params(net, filename="parameters"):
236
183
  return param
237
184
 
238
185
 
239
- class map_gamma(torch.nn.Module):
240
-
241
- def __init__(self, delays):
242
- super().__init__()
243
- self.delays = delays.double()
244
- self.g_min = torch.tensor(0.99, dtype=torch.double, device=delays.device)
245
- self.g_max = torch.tensor(1, dtype=torch.double, device=delays.device)
246
-
247
- def forward(self, x):
248
- return (
249
- ((1 / (1 + torch.exp(-x[0]))) * (self.g_max - self.g_min) + self.g_min)
250
- ** self.delays
251
- ).type_as(x)
252
-
253
186
  if __name__ == "__main__":
254
187
 
255
188
  parser = argparse.ArgumentParser()
@@ -490,8 +490,8 @@ if __name__ == "__main__":
490
490
 
491
491
  parser = argparse.ArgumentParser()
492
492
 
493
- parser.add_argument("--nfft", type=int, default=44100*2, help="FFT size")
494
- parser.add_argument("--samplerate", type=int, default=44100, help="sampling rate")
493
+ parser.add_argument("--nfft", type=int, default=96000, help="FFT size")
494
+ parser.add_argument("--samplerate", type=int, default=48000, help="sampling rate")
495
495
  parser.add_argument("--num", type=int, default=100, help="dataset size")
496
496
  parser.add_argument(
497
497
  "--device", type=str, default="cuda", help="device to use for computation"
@@ -541,6 +541,6 @@ if __name__ == "__main__":
541
541
  )
542
542
  )
543
543
 
544
- example_fdn(args)
544
+ # example_fdn(args)
545
545
  # example_fdn_accurate_geq(args)
546
- # example_fdn_direct(args)
546
+ example_fdn_direct(args)
@@ -6,7 +6,7 @@ import argparse
6
6
  import yaml
7
7
  from flamo.auxiliary.reverb import HomogeneousFDN, map_gamma, inverse_map_gamma
8
8
  from flamo.auxiliary.config.config import HomogeneousFDNConfig
9
- from flamo.optimize.loss import mse_loss
9
+ from flamo.optimize.loss import mse_loss, mel_mss_loss
10
10
  from flamo.optimize.surface import LossProfile, LossConfig, ParameterConfig, LossSurface
11
11
  from flamo.functional import signal_gallery, get_magnitude
12
12
 
@@ -59,7 +59,7 @@ def example_loss_profile(args):
59
59
 
60
60
  loss_profile = LossProfile(FDN.model, loss_config)
61
61
  loss = loss_profile.compute_loss(input_signal, target_signal)
62
- loss_profile.plot_loss(loss, criterion_name=["MSE"])
62
+ loss_profile.plot_loss(loss)
63
63
 
64
64
 
65
65
  def example_loss_surface(args):
@@ -103,7 +103,7 @@ def example_loss_surface(args):
103
103
 
104
104
  # define config structures
105
105
  loss_config = LossConfig(
106
- criteria=[mse_loss(), torch.nn.L1Loss()],
106
+ criteria=[mse_loss(), mel_mss_loss()],
107
107
  param_config=[attenuation_config, input_gain_config],
108
108
  # perturb_param=None,#"output_gain",
109
109
  perturb_map=lambda x: x,
@@ -124,7 +124,7 @@ def example_loss_surface(args):
124
124
 
125
125
  loss_profile = LossSurface(FDN.model, loss_config)
126
126
  loss = loss_profile.compute_loss(input_signal, target_signal)
127
- loss_profile.plot_loss(loss, criterion_name=["MSE", "MAE"])
127
+ loss_profile.plot_loss(loss)
128
128
 
129
129
  if __name__ == "__main__":
130
130
 
@@ -33,7 +33,8 @@ class HomogeneousFDNConfig(BaseModel):
33
33
  delays_grad: bool = False
34
34
  mixing_matrix_grad: bool = True
35
35
  attenuation_grad: bool = True
36
-
36
+ is_delay_int: bool = True
37
+
37
38
  def __init__(self, **data):
38
39
  super().__init__(**data)
39
40
  if self.delays is None:
@@ -86,7 +86,8 @@ def geq(
86
86
 
87
87
  for band in range(num_bands):
88
88
  if band == 0:
89
- b = torch.tensor([db2mag(gain_db[band]), 0, 0], device=device)
89
+ b = torch.zeros(3, device=device)
90
+ b[0] = db2mag(gain_db[band])
90
91
  a = torch.tensor([1, 0, 0], device=device)
91
92
  elif band == 1:
92
93
  b, a = shelving_filter(
@@ -129,7 +129,7 @@ class HomogeneousFDN:
129
129
  size=(self.N,),
130
130
  max_len=delay_lines.max(),
131
131
  nfft=self.config_dict.nfft,
132
- isint=True,
132
+ isint=self.config_dict.is_delay_int,
133
133
  requires_grad=self.config_dict.delays_grad,
134
134
  alias_decay_db=self.config_dict.alias_decay_db,
135
135
  device=self.config_dict.device,
@@ -77,6 +77,7 @@ class mse_loss(nn.Module):
77
77
  self.nfft = nfft
78
78
  self.device = device
79
79
  self.mse_loss = nn.MSELoss()
80
+ self.name = "MSE"
80
81
 
81
82
  def forward(self, y_pred, y_true):
82
83
  """
@@ -2743,7 +2743,7 @@ class Delay(DSP):
2743
2743
  """
2744
2744
  m = self.get_delays()
2745
2745
  if self.isint:
2746
- self.freq_response = lambda param: (self.gamma ** m(param)) * torch.exp(
2746
+ self.freq_response = lambda param: (self.gamma ** m(param).round()) * torch.exp(
2747
2747
  -1j
2748
2748
  * torch.einsum(
2749
2749
  "fo, omn -> fmn",
@@ -2880,14 +2880,24 @@ class parallelDelay(Delay):
2880
2880
  Computes the frequency response of the delay module.
2881
2881
  """
2882
2882
  m = self.get_delays()
2883
- self.freq_response = lambda param: (self.gamma ** m(param)) * torch.exp(
2884
- -1j
2885
- * torch.einsum(
2886
- "fo, on -> fn",
2887
- self.omega,
2888
- m(param).unsqueeze(0),
2883
+ if self.isint:
2884
+ self.freq_response = lambda param: (self.gamma ** m(param).round()) * torch.exp(
2885
+ -1j
2886
+ * torch.einsum(
2887
+ "fo, on -> fn",
2888
+ self.omega,
2889
+ m(param).round().unsqueeze(0),
2890
+ )
2891
+ )
2892
+ else:
2893
+ self.freq_response = lambda param: (self.gamma ** m(param)) * torch.exp(
2894
+ -1j
2895
+ * torch.einsum(
2896
+ "fo, on -> fn",
2897
+ self.omega,
2898
+ m(param).unsqueeze(0),
2899
+ )
2889
2900
  )
2890
- )
2891
2901
 
2892
2902
  def get_io(self):
2893
2903
  r"""
@@ -0,0 +1,333 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "## Training of a lossless FDN to improve colorlessness\n",
8
+ "Tune the parameters of an homogeneous and lossless FDN to reduce coloration\n"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "markdown",
13
+ "metadata": {},
14
+ "source": [
15
+ "Start from importing all the neccessary packages and flamo modules "
16
+ ]
17
+ },
18
+ {
19
+ "cell_type": "code",
20
+ "execution_count": null,
21
+ "metadata": {},
22
+ "outputs": [],
23
+ "source": [
24
+ "import torch\n",
25
+ "import os\n",
26
+ "from collections import OrderedDict\n",
27
+ "import matplotlib.pyplot as plt\n",
28
+ "\n",
29
+ "# from flamo \n",
30
+ "from flamo.optimize.dataset import DatasetColorless, load_dataset\n",
31
+ "from flamo.optimize.trainer import Trainer\n",
32
+ "from flamo.processor import dsp, system\n",
33
+ "from flamo.optimize.loss import mse_loss, sparsity_loss\n",
34
+ "\n",
35
+ "torch.manual_seed(130798)\n"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "markdown",
40
+ "metadata": {},
41
+ "source": [
42
+ "### Construct the Feedback Delay Network\n",
43
+ "The FDN is created as an istance of the `flamo.system.Series`class which is used to cascade multiple DSP modules in series, similarly to `nn.Sequential`. This class serves as a container and ensures that all included modules share the same values for the `nfft` and `alias_decay_db` attributes. \n",
44
+ "\n",
45
+ "Note that the created FDN is lossless, so the `alias_decay_db` must be nonzero to reduce time-aliasing "
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": null,
51
+ "metadata": {},
52
+ "outputs": [],
53
+ "source": [
54
+ "# FDN parameters\n",
55
+ "delay_lengths = torch.tensor([887, 911, 941, 1699, 1951, 2053])\n",
56
+ "N = len(delay_lengths) # number of delays\n",
57
+ "\n",
58
+ "# training parameters\n",
59
+ "nfft = 2**16 # number of FFT points\n",
60
+ "alias_decay_db = 30 # decay in dB of the anti time-aliasing envelope\n",
61
+ "device = 'cpu' # 'cuda' or 'cpu'\n",
62
+ "fs = 48000 # sample rate\n"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": null,
68
+ "metadata": {},
69
+ "outputs": [],
70
+ "source": [
71
+ "# Input gains \n",
72
+ "input_gain = dsp.Gain(\n",
73
+ " size=(N, 1), \n",
74
+ " nfft=nfft, \n",
75
+ " requires_grad=True, \n",
76
+ " alias_decay_db=alias_decay_db, \n",
77
+ " device=device\n",
78
+ ")\n",
79
+ "\n",
80
+ "# Output gains\n",
81
+ "output_gain = dsp.Gain(\n",
82
+ " size=(1, N), \n",
83
+ " nfft=nfft, \n",
84
+ " requires_grad=True, \n",
85
+ " alias_decay_db=alias_decay_db, \n",
86
+ " device=device\n",
87
+ ")\n",
88
+ "\n",
89
+ "# FEEDBACK LOOP\n",
90
+ "\n",
91
+ "# feedforward path with delays\n",
92
+ "delays = dsp.parallelDelay(\n",
93
+ " size=(N,),\n",
94
+ " max_len=delay_lengths.max(),\n",
95
+ " nfft=nfft,\n",
96
+ " isint=True,\n",
97
+ " requires_grad=False,\n",
98
+ " alias_decay_db=alias_decay_db,\n",
99
+ " device=device,\n",
100
+ ")\n",
101
+ "delays.assign_value(delays.sample2s(delay_lengths))\n",
102
+ "\n",
103
+ "# Feedback path with orthogonal matrix\n",
104
+ "feedback = dsp.Matrix(\n",
105
+ " size=(N, N),\n",
106
+ " nfft=nfft,\n",
107
+ " matrix_type=\"orthogonal\",\n",
108
+ " requires_grad=True,\n",
109
+ " alias_decay_db=alias_decay_db,\n",
110
+ " device=device,\n",
111
+ ")\n",
112
+ "# Create recursion\n",
113
+ "feedback_loop = system.Recursion(fF=delays, fB=feedback)\n",
114
+ "\n",
115
+ "# Contruct the FDN\n",
116
+ "FDN = system.Series(OrderedDict({\n",
117
+ " 'input_gain': input_gain,\n",
118
+ " 'feedback_loop': feedback_loop,\n",
119
+ " 'output_gain': output_gain\n",
120
+ "}))"
121
+ ]
122
+ },
123
+ {
124
+ "cell_type": "markdown",
125
+ "metadata": {},
126
+ "source": [
127
+ "flamo provides a `Shell` class where the differentiable system, in this case `FDN`, is connected to the input and output layers. \n",
128
+ "- The input will be an impulse in time domain, thus the input layer needs to transform it to frequency domain \n",
129
+ "- The target is the desired magnitude response, thus the input layers is the absolute value operation "
130
+ ]
131
+ },
132
+ {
133
+ "cell_type": "code",
134
+ "execution_count": null,
135
+ "metadata": {},
136
+ "outputs": [],
137
+ "source": [
138
+ "input_layer = dsp.FFT(nfft) \n",
139
+ "output_layer = dsp.Transform(transform=lambda x : torch.abs(x))\n",
140
+ "# wrap the FDN in the Shell\n",
141
+ "model = system.Shell(\n",
142
+ " core=FDN, \n",
143
+ " input_layer=input_layer, \n",
144
+ " output_layer=output_layer)"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "markdown",
149
+ "metadata": {},
150
+ "source": [
151
+ "To speed up training is good practice to make sure that the energy of the system is comparable to that of the target. "
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": null,
157
+ "metadata": {},
158
+ "outputs": [],
159
+ "source": [
160
+ "H = model.get_freq_response(identity=False)\n",
161
+ "energy_H = torch.mean(torch.pow(torch.abs(H),2))\n",
162
+ "target_energy = 1\n",
163
+ "# apply energy normalization on input and output gains only\n",
164
+ "with torch.no_grad():\n",
165
+ " core = model.get_core()\n",
166
+ " core.input_gain.assign_value(torch.div(core.input_gain.param, torch.pow( energy_H / target_energy, 1/4)))\n",
167
+ " core.output_gain.assign_value(torch.div(core.output_gain.param, torch.pow( energy_H / target_energy, 1/4)))\n",
168
+ " model.set_core(core)"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "markdown",
173
+ "metadata": {},
174
+ "source": [
175
+ "Log impulse response and the magnitude response at initialization"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "execution_count": null,
181
+ "metadata": {},
182
+ "outputs": [],
183
+ "source": [
184
+ "with torch.no_grad():\n",
185
+ " ir_init = model.get_time_response(identity=False, fs=fs).squeeze() \n",
186
+ " mag_init = model.get_freq_response(identity=False, fs=fs).squeeze() \n",
187
+ " mag_init = 20 * torch.log10(mag_init)"
188
+ ]
189
+ },
190
+ {
191
+ "cell_type": "markdown",
192
+ "metadata": {},
193
+ "source": [
194
+ "#### Set up training\n",
195
+ "Set training parameters values and construct dataset and trainer. "
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": null,
201
+ "metadata": {},
202
+ "outputs": [],
203
+ "source": [
204
+ "# training set up parameters \n",
205
+ "batch_size = 1\n",
206
+ "num = 256 # number of samples\n",
207
+ "max_epochs = 20 # maximum number of epochs \n",
208
+ "lr = 1e-3 # learning rate\n",
209
+ "step_size = 5 # step size for the learning rate scheduler\n",
210
+ "train_dir = 'output/ex_fdn'\n",
211
+ "# create the output directory\n",
212
+ "os.makedirs(train_dir, exist_ok=True)\n",
213
+ "\n",
214
+ "# create the dataset and data loaders \n",
215
+ "dataset = DatasetColorless(\n",
216
+ " input_shape=(1, nfft // 2 + 1, 1), # impulse \n",
217
+ " target_shape=(1, nfft // 2 + 1, 1), # flat spectrum as target \n",
218
+ " expand=num,\n",
219
+ " device=device,\n",
220
+ ")\n",
221
+ "train_loader, valid_loader = load_dataset(dataset, batch_size=batch_size)\n",
222
+ "\n",
223
+ "# Initialize training process\n",
224
+ "trainer = Trainer(\n",
225
+ " model, \n",
226
+ " max_epochs=max_epochs, \n",
227
+ " lr=lr, \n",
228
+ " train_dir=train_dir, \n",
229
+ " device=device)\n",
230
+ "\n",
231
+ "# Register the loss functions with their relative weights\n",
232
+ "trainer.register_criterion(mse_loss(), 1)\n",
233
+ "trainer.register_criterion(sparsity_loss(), 1, requires_model=True)\n"
234
+ ]
235
+ },
236
+ {
237
+ "cell_type": "markdown",
238
+ "metadata": {},
239
+ "source": [
240
+ "#### Train the model! \n",
241
+ "For each epoch the trainer launch both training and validation "
242
+ ]
243
+ },
244
+ {
245
+ "cell_type": "code",
246
+ "execution_count": null,
247
+ "metadata": {},
248
+ "outputs": [],
249
+ "source": [
250
+ "trainer.train(train_loader, valid_loader)"
251
+ ]
252
+ },
253
+ {
254
+ "cell_type": "code",
255
+ "execution_count": null,
256
+ "metadata": {},
257
+ "outputs": [],
258
+ "source": [
259
+ "\n",
260
+ "# Get optimized impulse response\n",
261
+ "with torch.no_grad():\n",
262
+ " ir_optim = model.get_time_response(identity=False, fs=fs).squeeze()\n",
263
+ " mag_optim = model.get_freq_response(identity=False, fs=fs).squeeze() \n",
264
+ " mag_optim = 20 * torch.log10(mag_optim)\n",
265
+ "\n",
266
+ "time_axis = torch.linspace(0, nfft/fs, nfft)\n",
267
+ "freq_axis = torch.linspace(0, fs/2, nfft//2+1)\n",
268
+ "\n",
269
+ "# plot impulse response\n",
270
+ "plt.figure(figsize=(12, 6))\n",
271
+ "plt.subplot(2, 1, 1)\n",
272
+ "plt.plot(time_axis, ir_init.numpy(), label='Initial')\n",
273
+ "plt.plot(time_axis, ir_optim.numpy(), label='Optimized', alpha=0.7)\n",
274
+ "plt.xlim(0, 0.5)\n",
275
+ "plt.legend()\n",
276
+ "plt.title('Impulse Response')\n",
277
+ "plt.xlabel('Samples')\n",
278
+ "plt.ylabel('Amplitude')\n",
279
+ "\n",
280
+ "# plot magnitude response\n",
281
+ "plt.subplot(2, 1, 2)\n",
282
+ "plt.plot(freq_axis, mag_init.numpy(), label='Initial')\n",
283
+ "plt.plot(freq_axis, mag_optim.numpy(), label='Optimized', alpha=0.7)\n",
284
+ "plt.xlim(100, 500)\n",
285
+ "plt.legend()\n",
286
+ "plt.title('Magnitude Response')\n",
287
+ "plt.xlabel('Frequency (Hz)')\n",
288
+ "plt.ylabel('Magnitude')\n",
289
+ "\n",
290
+ "plt.tight_layout()\n",
291
+ "plt.show()"
292
+ ]
293
+ },
294
+ {
295
+ "cell_type": "code",
296
+ "execution_count": null,
297
+ "metadata": {},
298
+ "outputs": [],
299
+ "source": [
300
+ "from IPython.display import Audio\n",
301
+ "\n",
302
+ "# Play the initial impulse response\n",
303
+ "print(\"Initial Impulse Response:\")\n",
304
+ "display(Audio(ir_init.numpy(), rate=fs))\n",
305
+ "\n",
306
+ "# Play the optimized impulse response\n",
307
+ "print(\"Optimized Impulse Response:\")\n",
308
+ "display(Audio(ir_optim.numpy(), rate=fs))"
309
+ ]
310
+ }
311
+ ],
312
+ "metadata": {
313
+ "kernelspec": {
314
+ "display_name": ".flamo-env",
315
+ "language": "python",
316
+ "name": "python3"
317
+ },
318
+ "language_info": {
319
+ "codemirror_mode": {
320
+ "name": "ipython",
321
+ "version": 3
322
+ },
323
+ "file_extension": ".py",
324
+ "mimetype": "text/x-python",
325
+ "name": "python",
326
+ "nbconvert_exporter": "python",
327
+ "pygments_lexer": "ipython3",
328
+ "version": "3.12.7"
329
+ }
330
+ },
331
+ "nbformat": 4,
332
+ "nbformat_minor": 2
333
+ }
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "flamo"
7
- version = "0.1.4"
7
+ version = "0.1.5"
8
8
  authors = [
9
9
  { name="Gloria Dal Santo", email="gloria.dalsanto@aalto.fi" },
10
10
  { name="Gian Marco De Bortoli", email="gian.debortoli@aalto.fi"},
@@ -34,6 +34,7 @@ dependencies = [
34
34
  "pydantic",
35
35
  "nnAudio",
36
36
  "pyfar",
37
+ "pyyaml"
37
38
  ]
38
39
 
39
40
  [project.urls]