flamo 0.1.13__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (257) hide show
  1. {flamo-0.1.13 → flamo-0.2.0}/PKG-INFO +1 -1
  2. {flamo-0.1.13 → flamo-0.2.0}/examples/e0_siso.py +20 -10
  3. {flamo-0.1.13 → flamo-0.2.0}/examples/e1_mimo.py +18 -8
  4. {flamo-0.1.13 → flamo-0.2.0}/examples/e1_vn.py +7 -3
  5. {flamo-0.1.13 → flamo-0.2.0}/examples/e2_chaining_filters.py +19 -8
  6. {flamo-0.1.13 → flamo-0.2.0}/examples/e3_series_class.py +24 -12
  7. {flamo-0.1.13 → flamo-0.2.0}/examples/e4_recursion.py +10 -3
  8. {flamo-0.1.13 → flamo-0.2.0}/examples/e4_recursion_nn.py +6 -2
  9. {flamo-0.1.13 → flamo-0.2.0}/examples/e5_shell.py +35 -15
  10. {flamo-0.1.13 → flamo-0.2.0}/examples/e6_anti_aliasing.py +4 -0
  11. {flamo-0.1.13 → flamo-0.2.0}/examples/e7_biquad.py +12 -6
  12. {flamo-0.1.13 → flamo-0.2.0}/examples/e7_biquad_nn.py +13 -7
  13. {flamo-0.1.13 → flamo-0.2.0}/examples/e7_geq.py +22 -18
  14. {flamo-0.1.13 → flamo-0.2.0}/examples/e7_peq.py +18 -14
  15. {flamo-0.1.13 → flamo-0.2.0}/examples/e7_svf.py +13 -7
  16. {flamo-0.1.13 → flamo-0.2.0}/examples/e8_active_acoustics.py +19 -5
  17. {flamo-0.1.13 → flamo-0.2.0}/examples/e8_colorless_fdn.py +12 -3
  18. {flamo-0.1.13 → flamo-0.2.0}/examples/e8_colorless_sfdn.py +15 -4
  19. {flamo-0.1.13 → flamo-0.2.0}/examples/e8_fdn.py +34 -10
  20. {flamo-0.1.13 → flamo-0.2.0}/examples/e9_loss_profile.py +4 -4
  21. {flamo-0.1.13 → flamo-0.2.0}/flamo/auxiliary/config/config.py +3 -1
  22. {flamo-0.1.13 → flamo-0.2.0}/flamo/auxiliary/eq.py +22 -18
  23. {flamo-0.1.13 → flamo-0.2.0}/flamo/auxiliary/reverb.py +28 -17
  24. {flamo-0.1.13 → flamo-0.2.0}/flamo/auxiliary/scattering.py +21 -21
  25. {flamo-0.1.13 → flamo-0.2.0}/flamo/functional.py +74 -52
  26. {flamo-0.1.13 → flamo-0.2.0}/flamo/optimize/dataset.py +7 -5
  27. {flamo-0.1.13 → flamo-0.2.0}/flamo/optimize/surface.py +15 -12
  28. {flamo-0.1.13 → flamo-0.2.0}/flamo/processor/dsp.py +158 -99
  29. {flamo-0.1.13 → flamo-0.2.0}/flamo/processor/system.py +15 -10
  30. {flamo-0.1.13 → flamo-0.2.0}/flamo/utils.py +2 -2
  31. {flamo-0.1.13 → flamo-0.2.0}/pyproject.toml +1 -1
  32. {flamo-0.1.13 → flamo-0.2.0}/.gitignore +0 -0
  33. {flamo-0.1.13 → flamo-0.2.0}/2025_FLAMO_ICASSP_DalSantoDeBortoli_poster.pdf +0 -0
  34. {flamo-0.1.13 → flamo-0.2.0}/LICENSE +0 -0
  35. {flamo-0.1.13 → flamo-0.2.0}/README.md +0 -0
  36. {flamo-0.1.13 → flamo-0.2.0}/examples/run_all.py +0 -0
  37. {flamo-0.1.13 → flamo-0.2.0}/flamo/__init__.py +0 -0
  38. {flamo-0.1.13 → flamo-0.2.0}/flamo/auxiliary/__init__.py +0 -0
  39. {flamo-0.1.13 → flamo-0.2.0}/flamo/auxiliary/filterbank.py +0 -0
  40. {flamo-0.1.13 → flamo-0.2.0}/flamo/auxiliary/minimize.py +0 -0
  41. {flamo-0.1.13 → flamo-0.2.0}/flamo/auxiliary/velvet.py +0 -0
  42. {flamo-0.1.13 → flamo-0.2.0}/flamo/optimize/__init__.py +0 -0
  43. {flamo-0.1.13 → flamo-0.2.0}/flamo/optimize/loss.py +0 -0
  44. {flamo-0.1.13 → flamo-0.2.0}/flamo/optimize/trainer.py +0 -0
  45. {flamo-0.1.13 → flamo-0.2.0}/flamo/optimize/utils.py +0 -0
  46. {flamo-0.1.13 → flamo-0.2.0}/flamo/processor/__init__.py +0 -0
  47. {flamo-0.1.13 → flamo-0.2.0}/notebooks/e7_biquad.ipynb +0 -0
  48. {flamo-0.1.13 → flamo-0.2.0}/notebooks/e8_colorless_fdn.ipynb +0 -0
  49. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker1.wav +0 -0
  50. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker10.wav +0 -0
  51. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker11.wav +0 -0
  52. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker12.wav +0 -0
  53. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker13.wav +0 -0
  54. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker14.wav +0 -0
  55. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker2.wav +0 -0
  56. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker3.wav +0 -0
  57. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker5.wav +0 -0
  58. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker6.wav +0 -0
  59. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker7.wav +0 -0
  60. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker8.wav +0 -0
  61. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic1_speaker9.wav +0 -0
  62. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker1.wav +0 -0
  63. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker10.wav +0 -0
  64. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker11.wav +0 -0
  65. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker12.wav +0 -0
  66. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker13.wav +0 -0
  67. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker14.wav +0 -0
  68. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker2.wav +0 -0
  69. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker3.wav +0 -0
  70. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker5.wav +0 -0
  71. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker6.wav +0 -0
  72. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker7.wav +0 -0
  73. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker8.wav +0 -0
  74. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic2_speaker9.wav +0 -0
  75. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker1.wav +0 -0
  76. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker10.wav +0 -0
  77. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker11.wav +0 -0
  78. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker12.wav +0 -0
  79. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker13.wav +0 -0
  80. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker14.wav +0 -0
  81. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker2.wav +0 -0
  82. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker3.wav +0 -0
  83. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker5.wav +0 -0
  84. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker6.wav +0 -0
  85. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker7.wav +0 -0
  86. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker8.wav +0 -0
  87. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic3_speaker9.wav +0 -0
  88. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker1.wav +0 -0
  89. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker10.wav +0 -0
  90. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker11.wav +0 -0
  91. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker12.wav +0 -0
  92. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker13.wav +0 -0
  93. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker14.wav +0 -0
  94. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker2.wav +0 -0
  95. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker3.wav +0 -0
  96. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker5.wav +0 -0
  97. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker6.wav +0 -0
  98. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker7.wav +0 -0
  99. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker8.wav +0 -0
  100. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic4_speaker9.wav +0 -0
  101. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker1.wav +0 -0
  102. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker10.wav +0 -0
  103. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker11.wav +0 -0
  104. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker12.wav +0 -0
  105. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker13.wav +0 -0
  106. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker14.wav +0 -0
  107. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker2.wav +0 -0
  108. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker3.wav +0 -0
  109. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker5.wav +0 -0
  110. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker6.wav +0 -0
  111. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker7.wav +0 -0
  112. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker8.wav +0 -0
  113. {flamo-0.1.13 → flamo-0.2.0}/rirs/Otala-2024.05.10/mic5_speaker9.wav +0 -0
  114. {flamo-0.1.13 → flamo-0.2.0}/rirs/arni_35_3541_4_2.wav +0 -0
  115. {flamo-0.1.13 → flamo-0.2.0}/rirs/s3_r4_o.wav +0 -0
  116. {flamo-0.1.13 → flamo-0.2.0}/sphinx/Makefile +0 -0
  117. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/auxiliary/eq.doctree +0 -0
  118. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/auxiliary/filterbank.doctree +0 -0
  119. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/auxiliary/minimize.doctree +0 -0
  120. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/auxiliary/reverb.doctree +0 -0
  121. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/auxiliary/scattering.doctree +0 -0
  122. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/environment.pickle +0 -0
  123. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/functional.doctree +0 -0
  124. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/index.doctree +0 -0
  125. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/optimize/dataset.doctree +0 -0
  126. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/optimize/loss.doctree +0 -0
  127. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/optimize/trainer.doctree +0 -0
  128. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/optimize/utils.doctree +0 -0
  129. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/processor/dsp.doctree +0 -0
  130. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/doctrees/processor/system.doctree +0 -0
  131. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/.buildinfo +0 -0
  132. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/.buildinfo.bak +0 -0
  133. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/.nojekyll +0 -0
  134. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/auxiliary/eq.rst.txt +0 -0
  135. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/auxiliary/filterbank.rst.txt +0 -0
  136. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/auxiliary/minimize.rst.txt +0 -0
  137. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/auxiliary/reverb.rst.txt +0 -0
  138. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/auxiliary/scattering.rst.txt +0 -0
  139. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/functional.rst.txt +0 -0
  140. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/index.rst.txt +0 -0
  141. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/optimize/dataset.rst.txt +0 -0
  142. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/optimize/loss.rst.txt +0 -0
  143. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/optimize/trainer.rst.txt +0 -0
  144. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/optimize/utils.rst.txt +0 -0
  145. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/processor/dsp.rst.txt +0 -0
  146. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_sources/processor/system.rst.txt +0 -0
  147. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/_sphinx_javascript_frameworks_compat.js +0 -0
  148. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/alabaster.css +0 -0
  149. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/basic.css +0 -0
  150. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/badge_only.css +0 -0
  151. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff +0 -0
  152. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 +0 -0
  153. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff +0 -0
  154. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 +0 -0
  155. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.eot +0 -0
  156. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.svg +0 -0
  157. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.ttf +0 -0
  158. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff +0 -0
  159. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/fontawesome-webfont.woff2 +0 -0
  160. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff +0 -0
  161. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/lato-bold-italic.woff2 +0 -0
  162. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/lato-bold.woff +0 -0
  163. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/lato-bold.woff2 +0 -0
  164. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff +0 -0
  165. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/lato-normal-italic.woff2 +0 -0
  166. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/lato-normal.woff +0 -0
  167. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/fonts/lato-normal.woff2 +0 -0
  168. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/css/theme.css +0 -0
  169. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/custom.css +0 -0
  170. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/doctools.js +0 -0
  171. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/documentation_options.js +0 -0
  172. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/file.png +0 -0
  173. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-bold.eot +0 -0
  174. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-bold.ttf +0 -0
  175. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-bold.woff +0 -0
  176. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-bold.woff2 +0 -0
  177. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.eot +0 -0
  178. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.ttf +0 -0
  179. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.woff +0 -0
  180. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 +0 -0
  181. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-italic.eot +0 -0
  182. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-italic.ttf +0 -0
  183. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-italic.woff +0 -0
  184. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-italic.woff2 +0 -0
  185. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-regular.eot +0 -0
  186. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-regular.ttf +0 -0
  187. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-regular.woff +0 -0
  188. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/Lato/lato-regular.woff2 +0 -0
  189. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot +0 -0
  190. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf +0 -0
  191. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff +0 -0
  192. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 +0 -0
  193. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot +0 -0
  194. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf +0 -0
  195. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff +0 -0
  196. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 +0 -0
  197. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/github-banner.svg +0 -0
  198. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/jquery.js +0 -0
  199. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/js/badge_only.js +0 -0
  200. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/js/theme.js +0 -0
  201. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/js/versions.js +0 -0
  202. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/language_data.js +0 -0
  203. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/minus.png +0 -0
  204. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/plus.png +0 -0
  205. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/pygments.css +0 -0
  206. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/scripts/bootstrap.js +0 -0
  207. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/scripts/bootstrap.js.LICENSE.txt +0 -0
  208. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/scripts/bootstrap.js.map +0 -0
  209. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/scripts/fontawesome.js +0 -0
  210. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/scripts/fontawesome.js.LICENSE.txt +0 -0
  211. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/scripts/fontawesome.js.map +0 -0
  212. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/scripts/pydata-sphinx-theme.js +0 -0
  213. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/scripts/pydata-sphinx-theme.js.map +0 -0
  214. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/searchtools.js +0 -0
  215. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/sphinx_highlight.js +0 -0
  216. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/styles/pydata-sphinx-theme.css +0 -0
  217. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/styles/pydata-sphinx-theme.css.map +0 -0
  218. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/styles/theme.css +0 -0
  219. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-brands-400.ttf +0 -0
  220. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-brands-400.woff2 +0 -0
  221. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-regular-400.ttf +0 -0
  222. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-regular-400.woff2 +0 -0
  223. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-solid-900.ttf +0 -0
  224. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/vendor/fontawesome/webfonts/fa-solid-900.woff2 +0 -0
  225. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/_static/webpack-macros.html +0 -0
  226. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/auxiliary/eq.html +0 -0
  227. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/auxiliary/filterbank.html +0 -0
  228. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/auxiliary/minimize.html +0 -0
  229. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/auxiliary/reverb.html +0 -0
  230. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/auxiliary/scattering.html +0 -0
  231. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/functional.html +0 -0
  232. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/genindex.html +0 -0
  233. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/index.html +0 -0
  234. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/objects.inv +0 -0
  235. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/optimize/dataset.html +0 -0
  236. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/optimize/loss.html +0 -0
  237. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/optimize/trainer.html +0 -0
  238. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/optimize/utils.html +0 -0
  239. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/processor/dsp.html +0 -0
  240. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/processor/system.html +0 -0
  241. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/search.html +0 -0
  242. {flamo-0.1.13 → flamo-0.2.0}/sphinx/_build/html/searchindex.js +0 -0
  243. {flamo-0.1.13 → flamo-0.2.0}/sphinx/auxiliary/eq.rst +0 -0
  244. {flamo-0.1.13 → flamo-0.2.0}/sphinx/auxiliary/filterbank.rst +0 -0
  245. {flamo-0.1.13 → flamo-0.2.0}/sphinx/auxiliary/minimize.rst +0 -0
  246. {flamo-0.1.13 → flamo-0.2.0}/sphinx/auxiliary/reverb.rst +0 -0
  247. {flamo-0.1.13 → flamo-0.2.0}/sphinx/auxiliary/scattering.rst +0 -0
  248. {flamo-0.1.13 → flamo-0.2.0}/sphinx/conf.py +0 -0
  249. {flamo-0.1.13 → flamo-0.2.0}/sphinx/functional.rst +0 -0
  250. {flamo-0.1.13 → flamo-0.2.0}/sphinx/index.rst +0 -0
  251. {flamo-0.1.13 → flamo-0.2.0}/sphinx/make.bat +0 -0
  252. {flamo-0.1.13 → flamo-0.2.0}/sphinx/optimize/dataset.rst +0 -0
  253. {flamo-0.1.13 → flamo-0.2.0}/sphinx/optimize/loss.rst +0 -0
  254. {flamo-0.1.13 → flamo-0.2.0}/sphinx/optimize/trainer.rst +0 -0
  255. {flamo-0.1.13 → flamo-0.2.0}/sphinx/optimize/utils.rst +0 -0
  256. {flamo-0.1.13 → flamo-0.2.0}/sphinx/processor/dsp.rst +0 -0
  257. {flamo-0.1.13 → flamo-0.2.0}/sphinx/processor/system.rst +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flamo
3
- Version: 0.1.13
3
+ Version: 0.2.0
4
4
  Summary: An Open-Source Library for Frequency-Domain Differentiable Audio Processing
5
5
  Project-URL: Homepage, https://github.com/gdalsanto/flamo
6
6
  Project-URL: Issues, https://github.com/gdalsanto/flamo/issues
@@ -20,8 +20,8 @@ def example_fft(args) -> None:
20
20
  Use of FFT and iFFT modules.
21
21
  """
22
22
  # ------------------ Module Definition ------------------
23
- fft = dsp.FFT(nfft=args.nfft)
24
- ifft = dsp.iFFT(nfft=args.nfft)
23
+ fft = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
24
+ ifft = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
25
25
 
26
26
  # ------------------ Signal Definition ------------------
27
27
  x = signal_gallery(
@@ -31,6 +31,7 @@ def example_fft(args) -> None:
31
31
  n=1,
32
32
  fs=args.samplerate,
33
33
  device=args.device,
34
+ dtype=args.dtype,
34
35
  )
35
36
 
36
37
  # ------------------ Apply FFT and iFFT -----------------
@@ -58,9 +59,9 @@ def example_gains(args) -> None:
58
59
 
59
60
  # ------------------- DSP Definition --------------------
60
61
  channels = 1
61
- filter = dsp.parallelGain(size=(channels,), nfft=args.nfft, device=args.device)
62
- input_layer = dsp.FFT(nfft=args.nfft)
63
- output_layer = dsp.iFFT(nfft=args.nfft)
62
+ filter = dsp.parallelGain(size=(channels,), nfft=args.nfft, device=args.device, dtype=args.dtype)
63
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
64
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
64
65
 
65
66
  my_dsp = nn.Sequential(input_layer, filter, output_layer)
66
67
 
@@ -74,6 +75,7 @@ def example_gains(args) -> None:
74
75
  n=channels,
75
76
  fs=args.samplerate,
76
77
  device=args.device,
78
+ dtype=args.dtype,
77
79
  )
78
80
 
79
81
  # Apply filter
@@ -104,9 +106,9 @@ def example_gains_2(args) -> None:
104
106
  # ------------------- DSP Definition -------------------
105
107
  in_ch = 1
106
108
  out_ch = 1
107
- filter = dsp.Gain(size=(out_ch, in_ch), nfft=args.nfft, device=args.device)
108
- input_layer = dsp.FFT(nfft=args.nfft)
109
- output_layer = dsp.iFFT(nfft=args.nfft)
109
+ filter = dsp.Gain(size=(out_ch, in_ch), nfft=args.nfft, device=args.device, dtype=args.dtype)
110
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
111
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
110
112
 
111
113
  my_dsp = nn.Sequential(input_layer, filter, output_layer)
112
114
 
@@ -120,6 +122,7 @@ def example_gains_2(args) -> None:
120
122
  n=in_ch,
121
123
  fs=args.samplerate,
122
124
  device=args.device,
125
+ dtype=args.dtype,
123
126
  )
124
127
 
125
128
  # Apply filter before changes
@@ -168,9 +171,10 @@ def example_fir(args) -> None:
168
171
  nfft=args.nfft,
169
172
  requires_grad=True,
170
173
  device=args.device,
174
+ dtype=args.dtype,
171
175
  )
172
- input_layer = dsp.FFT(nfft=args.nfft)
173
- output_layer = dsp.iFFT(nfft=args.nfft)
176
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
177
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
174
178
 
175
179
  model = nn.Sequential(input_layer, filter, output_layer)
176
180
 
@@ -183,6 +187,7 @@ def example_fir(args) -> None:
183
187
  n=in_ch,
184
188
  fs=args.samplerate,
185
189
  device=args.device,
190
+ dtype=args.dtype,
186
191
  )
187
192
 
188
193
  # Target impulse response
@@ -194,6 +199,7 @@ def example_fir(args) -> None:
194
199
  rate=2,
195
200
  fs=args.samplerate,
196
201
  device=args.device,
202
+ dtype=args.dtype,
197
203
  )
198
204
 
199
205
  # Dataset
@@ -252,6 +258,7 @@ if __name__ == "__main__":
252
258
  # ---------------------- Processing -------------------
253
259
  parser.add_argument("--nfft", type=int, default=96000, help="FFT size")
254
260
  parser.add_argument("--samplerate", type=int, default=48000, help="sampling rate")
261
+ parser.add_argument("--dtype", type=str, default="float64", choices=["float32", "float64"], help="data type for tensors")
255
262
  # ----------------------- Dataset ----------------------
256
263
  parser.add_argument(
257
264
  "--batch_size", type=int, default=1, help="batch size for training"
@@ -288,6 +295,9 @@ if __name__ == "__main__":
288
295
  if args.device == "cuda" and not torch.cuda.is_available():
289
296
  args.device = "cpu"
290
297
 
298
+ # convert dtype string to torch dtype
299
+ args.dtype = torch.float32 if args.dtype == "float32" else torch.float64
300
+
291
301
  # make output directory
292
302
  if args.train_dir is not None:
293
303
  if not os.path.isdir(args.train_dir):
@@ -36,6 +36,7 @@ def example_matrix(args) -> None:
36
36
  n=in_ch,
37
37
  fs=args.samplerate,
38
38
  device=args.device,
39
+ dtype=args.dtype,
39
40
  )
40
41
 
41
42
  # Test different matrix types
@@ -54,10 +55,11 @@ def example_matrix(args) -> None:
54
55
  matrix_type=matrix_type,
55
56
  nfft=args.nfft,
56
57
  device=args.device,
58
+ dtype=args.dtype,
57
59
  )
58
60
 
59
- input_layer = dsp.FFT(nfft=args.nfft)
60
- output_layer = dsp.iFFT(nfft=args.nfft)
61
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
62
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
61
63
 
62
64
  my_dsp = nn.Sequential(input_layer, matrix_filter, output_layer)
63
65
 
@@ -94,9 +96,10 @@ def example_delays(args) -> None:
94
96
  nfft=args.nfft,
95
97
  fs=args.samplerate,
96
98
  device=args.device,
99
+ dtype=args.dtype,
97
100
  )
98
- input_layer = dsp.FFT(nfft=args.nfft)
99
- output_layer = dsp.iFFT(nfft=args.nfft)
101
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
102
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
100
103
 
101
104
  my_dsp = nn.Sequential(input_layer, filter, output_layer)
102
105
 
@@ -110,6 +113,7 @@ def example_delays(args) -> None:
110
113
  n=in_ch,
111
114
  fs=args.samplerate,
112
115
  device=args.device,
116
+ dtype=args.dtype,
113
117
  )
114
118
 
115
119
  # Apply filter
@@ -147,9 +151,10 @@ def example_biquads(args) -> None:
147
151
  fs=args.samplerate,
148
152
  requires_grad=True,
149
153
  device=args.device,
154
+ dtype=args.dtype,
150
155
  )
151
- input_layer = dsp.FFT(nfft=args.nfft)
152
- output_layer = dsp.Transform(lambda x: torch.abs(x))
156
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
157
+ output_layer = dsp.Transform(lambda x: torch.abs(x), dtype=args.dtype)
153
158
 
154
159
  model = nn.Sequential(input_layer, filter, output_layer)
155
160
 
@@ -163,17 +168,18 @@ def example_biquads(args) -> None:
163
168
  n=in_ch,
164
169
  fs=args.samplerate,
165
170
  device=args.device,
171
+ dtype=args.dtype,
166
172
  )
167
173
 
168
174
  # Target frequency responses
169
175
  f_cut_1 = 500 # Cut-off frequency for the first lowpass filter
170
176
  g_1 = 10 # Bandpass gain for the first lowpass filter
171
- b_lp_1, a_lp_1 = lowpass_filter(f_cut_1, g_1, args.samplerate, device=args.device)
177
+ b_lp_1, a_lp_1 = lowpass_filter(f_cut_1, g_1, args.samplerate, device=args.device, dtype=args.dtype)
172
178
  H_lp_1 = biquad2tf(b=b_lp_1, a=a_lp_1, nfft=args.nfft)
173
179
 
174
180
  f_cut_2 = 5000 # Cut-off frequency for the second lowpass filter
175
181
  g_2 = 0.7 # Bandpass gain for the second lowpass filter
176
- b_lp_2, a_lp_2 = lowpass_filter(f_cut_2, g_2, args.samplerate, device=args.device)
182
+ b_lp_2, a_lp_2 = lowpass_filter(f_cut_2, g_2, args.samplerate, device=args.device, dtype=args.dtype)
177
183
  H_lp_2 = biquad2tf(b=b_lp_2, a=a_lp_2, nfft=args.nfft)
178
184
 
179
185
  target = torch.stack([torch.abs(H_lp_1), torch.abs(H_lp_2)], dim=1).unsqueeze(0)
@@ -242,6 +248,7 @@ if __name__ == "__main__":
242
248
  # ---------------------- Processing -------------------
243
249
  parser.add_argument("--nfft", type=int, default=96000, help="FFT size")
244
250
  parser.add_argument("--samplerate", type=int, default=48000, help="sampling rate")
251
+ parser.add_argument("--dtype", type=str, default="float64", choices=["float32", "float64"], help="data type for tensors")
245
252
  # ----------------------- Dataset ----------------------
246
253
  parser.add_argument(
247
254
  "--batch_size", type=int, default=1, help="batch size for training"
@@ -278,6 +285,9 @@ if __name__ == "__main__":
278
285
  if args.device == "cuda" and not torch.cuda.is_available():
279
286
  args.device = "cpu"
280
287
 
288
+ # convert dtype string to torch dtype
289
+ args.dtype = torch.float32 if args.dtype == "float32" else torch.float64
290
+
281
291
  # make output directory
282
292
  if args.train_dir is not None:
283
293
  if not os.path.isdir(args.train_dir):
@@ -5,6 +5,7 @@ This example shows how to use the new Filter-based velvet noise classes
5
5
  that are differentiable and can be used within the FLAMO processing chain.
6
6
  """
7
7
 
8
+ import torch
8
9
  import torch.nn as nn
9
10
 
10
11
  import matplotlib.pyplot as plt
@@ -21,6 +22,7 @@ def example_velvet_noise_filter():
21
22
  in_ch, out_ch = 1, 1
22
23
  nfft = 2048 # FFT size for the filter
23
24
  length = nfft # Length of the filter
25
+ dtype = torch.float64 # Can be changed to torch.float32 if needed
24
26
 
25
27
  # Create a velvet noise filter
26
28
  # size = (length, output_channels, input_channels)
@@ -28,11 +30,12 @@ def example_velvet_noise_filter():
28
30
  size=(length, out_ch, in_ch), # 1024 samples, 1x1 matrix
29
31
  density=1000.0, # 1000 impulses per second
30
32
  sample_rate=48000,
31
- requires_grad=True # Make it differentiable
33
+ requires_grad=True, # Make it differentiable
34
+ dtype=dtype,
32
35
  )
33
36
 
34
- input_layer = dsp.FFT(nfft=nfft)
35
- output_layer = dsp.iFFT(nfft=nfft)
37
+ input_layer = dsp.FFT(nfft=nfft, dtype=dtype)
38
+ output_layer = dsp.iFFT(nfft=nfft, dtype=dtype)
36
39
 
37
40
  my_dsp = nn.Sequential(input_layer, velvet_filter, output_layer)
38
41
 
@@ -46,6 +49,7 @@ def example_velvet_noise_filter():
46
49
  n=in_ch,
47
50
  fs=48000,
48
51
  device="cpu",
52
+ dtype=dtype,
49
53
  )
50
54
 
51
55
  output_vn = my_dsp(input_sig)
@@ -33,8 +33,8 @@ def example_mimo(args):
33
33
  fs=args.samplerate,
34
34
  device=args.device,
35
35
  )
36
- input_layer = dsp.FFT(nfft=args.nfft)
37
- output_layer = dsp.iFFT(nfft=args.nfft)
36
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
37
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
38
38
 
39
39
  my_dsp = nn.Sequential(input_layer, filter1, filter2, output_layer)
40
40
 
@@ -48,6 +48,7 @@ def example_mimo(args):
48
48
  n=in_ch,
49
49
  fs=args.samplerate,
50
50
  device=args.device,
51
+ dtype=args.dtype,
51
52
  )
52
53
 
53
54
  # Apply filter
@@ -87,9 +88,10 @@ def example_siso(args):
87
88
  nfft=args.nfft,
88
89
  fs=args.samplerate,
89
90
  device=args.device,
91
+ dtype=args.dtype
90
92
  )
91
- input_layer = dsp.FFT(nfft=args.nfft)
92
- output_layer = dsp.iFFT(nfft=args.nfft)
93
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
94
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
93
95
 
94
96
  my_dsp = nn.Sequential(input_layer, filter1, filter2, output_layer)
95
97
 
@@ -103,6 +105,7 @@ def example_siso(args):
103
105
  n=in_ch,
104
106
  fs=args.samplerate,
105
107
  device=args.device,
108
+ dtype=args.dtype,
106
109
  )
107
110
 
108
111
  # Apply filter
@@ -143,9 +146,10 @@ def example_assign_new_values(args):
143
146
  nfft=args.nfft,
144
147
  fs=args.samplerate,
145
148
  device=args.device,
149
+ dtype=args.dtype
146
150
  )
147
- input_layer = dsp.FFT(nfft=args.nfft)
148
- output_layer = dsp.iFFT(nfft=args.nfft)
151
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
152
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
149
153
 
150
154
  my_dsp = nn.Sequential(input_layer, filter1, filter2, output_layer)
151
155
 
@@ -171,6 +175,7 @@ def example_assign_new_values(args):
171
175
  n=in_ch,
172
176
  fs=args.samplerate,
173
177
  device=args.device,
178
+ dtype=args.dtype,
174
179
  )
175
180
 
176
181
  # Apply filter
@@ -209,9 +214,10 @@ def example_requires_grad(args):
209
214
  nfft=args.nfft,
210
215
  fs=args.samplerate,
211
216
  device=args.device,
217
+ dtype=args.dtype,
212
218
  )
213
- input_layer = dsp.FFT(nfft=args.nfft)
214
- output_layer = dsp.iFFT(nfft=args.nfft)
219
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
220
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
215
221
 
216
222
  model = nn.Sequential(input_layer, filter1, filter2, output_layer)
217
223
 
@@ -225,6 +231,7 @@ def example_requires_grad(args):
225
231
  n=in_ch,
226
232
  fs=args.samplerate,
227
233
  device=args.device,
234
+ dtype=args.dtype,
228
235
  )
229
236
 
230
237
  # Target
@@ -298,6 +305,7 @@ if __name__ == "__main__":
298
305
  # ---------------------- Processing -------------------
299
306
  parser.add_argument("--nfft", type=int, default=96000, help="FFT size")
300
307
  parser.add_argument("--samplerate", type=int, default=48000, help="sampling rate")
308
+ parser.add_argument("--dtype", type=str, default="float64", choices=["float32", "float64"], help="data type for tensors")
301
309
  # ----------------------- Dataset ----------------------
302
310
  parser.add_argument(
303
311
  "--batch_size", type=int, default=1, help="batch size for training"
@@ -334,6 +342,9 @@ if __name__ == "__main__":
334
342
  if args.device == "cuda" and not torch.cuda.is_available():
335
343
  args.device = "cpu"
336
344
 
345
+ # convert dtype string to torch dtype
346
+ args.dtype = torch.float32 if args.dtype == "float32" else torch.float64
347
+
337
348
  # make output directory
338
349
  if args.train_dir is not None:
339
350
  if not os.path.isdir(args.train_dir):
@@ -50,8 +50,8 @@ def example_series(args):
50
50
  size=(out_ch, btw_ch), nfft=args.nfft, requires_grad=False, device=args.device
51
51
  )
52
52
  # Input and output layers
53
- input_layer = dsp.FFT(nfft=args.nfft)
54
- output_layer = dsp.iFFT(nfft=args.nfft)
53
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
54
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
55
55
 
56
56
  # Series class
57
57
  my_dsp = system.Series(input_layer, filter1, filter2, filter3, output_layer)
@@ -93,8 +93,8 @@ def example_series_with_error(args):
93
93
  device=args.device,
94
94
  )
95
95
  # Input and output layers
96
- input_layer = dsp.FFT(nfft=args.nfft)
97
- output_layer = dsp.iFFT(nfft=args.nfft)
96
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
97
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
98
98
 
99
99
  # Series class
100
100
  my_dsp = system.Series(input_layer, filter1, filter2, filter3, output_layer)
@@ -131,8 +131,8 @@ def example_series_OrderedDict(args):
131
131
  size=(out_ch, btw_ch), nfft=args.nfft, requires_grad=False, device=args.device
132
132
  )
133
133
  # Input and output layers
134
- input_layer = dsp.FFT(nfft=args.nfft)
135
- output_layer = dsp.iFFT(nfft=args.nfft)
134
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
135
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
136
136
 
137
137
  # Series class
138
138
  my_dsp = system.Series(
@@ -182,8 +182,8 @@ def example_series_nesting(args):
182
182
  size=(out_ch, btw_ch), nfft=args.nfft, requires_grad=False, device=args.device
183
183
  )
184
184
  # Input and output layers
185
- input_layer = dsp.FFT(nfft=args.nfft)
186
- output_layer = dsp.iFFT(nfft=args.nfft)
185
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
186
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
187
187
 
188
188
  # Series class
189
189
  filters = OrderedDict({"Gains": filter1, "Delays": filter2, "Eqs": filter3})
@@ -221,9 +221,10 @@ def example_series_training(args):
221
221
  nfft=args.nfft,
222
222
  fs=args.samplerate,
223
223
  device=args.device,
224
+ dtype=args.dtype,
224
225
  )
225
- input_layer = dsp.FFT(nfft=args.nfft)
226
- output_layer = dsp.iFFT(nfft=args.nfft)
226
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
227
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
227
228
 
228
229
  # Series of filters
229
230
  filters = OrderedDict(
@@ -252,6 +253,7 @@ def example_series_training(args):
252
253
  n=in_ch,
253
254
  fs=args.samplerate,
254
255
  device=args.device,
256
+ dtype=args.dtype,
255
257
  )
256
258
 
257
259
  # Target
@@ -336,6 +338,7 @@ def example_series_utils(args):
336
338
  fs=args.samplerate,
337
339
  device=args.device,
338
340
  alias_decay_db=alias_decay_db,
341
+ dtype=args.dtype
339
342
  )
340
343
  feedback = dsp.Matrix(
341
344
  size=(channel_n, channel_n),
@@ -343,6 +346,7 @@ def example_series_utils(args):
343
346
  matrix_type="orthogonal",
344
347
  device=args.device,
345
348
  alias_decay_db=alias_decay_db,
349
+ dtype=args.dtype
346
350
  )
347
351
  feedback_loop = system.Recursion(fF=feedforward, fB=feedback)
348
352
 
@@ -350,13 +354,14 @@ def example_series_utils(args):
350
354
  my_dsp = system.Series(OrderedDict({"Recursion": feedback_loop}))
351
355
 
352
356
  # New filters to add an the beginning
353
- input_layer = dsp.FFT(nfft=args.nfft)
357
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
354
358
  input_gains = dsp.parallelGain(
355
359
  size=(channel_n,),
356
360
  nfft=args.nfft,
357
361
  requires_grad=False,
358
362
  device=args.device,
359
363
  alias_decay_db=alias_decay_db,
364
+ dtype=args.dtype
360
365
  )
361
366
  # New filter to add in the middle
362
367
  output_gains = dsp.Gain(
@@ -365,6 +370,7 @@ def example_series_utils(args):
365
370
  requires_grad=False,
366
371
  device=args.device,
367
372
  alias_decay_db=alias_decay_db,
373
+ dtype=args.dtype
368
374
  )
369
375
  # New filters to add at the end
370
376
  equalization = dsp.GEQ(
@@ -373,8 +379,9 @@ def example_series_utils(args):
373
379
  requires_grad=False,
374
380
  device=args.device,
375
381
  alias_decay_db=alias_decay_db,
382
+ dtype=args.dtype
376
383
  )
377
- output_layer = dsp.iFFTAntiAlias(nfft=args.nfft, alias_decay_db=alias_decay_db)
384
+ output_layer = dsp.iFFTAntiAlias(nfft=args.nfft, alias_decay_db=alias_decay_db, dtype=args.dtype)
378
385
 
379
386
  # DSP so far
380
387
  print(my_dsp)
@@ -409,6 +416,7 @@ def example_series_utils(args):
409
416
  n=channel_n,
410
417
  fs=args.samplerate,
411
418
  device=args.device,
419
+ dtype=args.dtype,
412
420
  )
413
421
  y = my_dsp(input_signal)
414
422
 
@@ -436,6 +444,7 @@ if __name__ == "__main__":
436
444
  # ---------------------- Processing -------------------
437
445
  parser.add_argument("--nfft", type=int, default=96000, help="FFT size")
438
446
  parser.add_argument("--samplerate", type=int, default=48000, help="sampling rate")
447
+ parser.add_argument("--dtype", type=str, default="float64", choices=["float32", "float64"], help="data type for tensors")
439
448
  # ----------------------- Dataset ----------------------
440
449
  parser.add_argument(
441
450
  "--batch_size", type=int, default=1, help="batch size for training"
@@ -472,6 +481,9 @@ if __name__ == "__main__":
472
481
  if args.device == "cuda" and not torch.cuda.is_available():
473
482
  args.device = "cpu"
474
483
 
484
+ # convert dtype string to torch dtype
485
+ args.dtype = torch.float32 if args.dtype == "float32" else torch.float64
486
+
475
487
  # make output directory
476
488
  if args.train_dir is not None:
477
489
  if not os.path.isdir(args.train_dir):
@@ -30,8 +30,9 @@ def example_recursion(args):
30
30
  nfft=args.nfft,
31
31
  fs=args.samplerate,
32
32
  device=args.device,
33
+ dtype=args.dtype,
33
34
  )
34
- attenuation = dsp.parallelGain(size=(out_ch,), nfft=args.nfft, device=args.device)
35
+ attenuation = dsp.parallelGain(size=(out_ch,), nfft=args.nfft, device=args.device, dtype=args.dtype)
35
36
  rand_vector = torch.rand(attenuation.param.shape)
36
37
  attenuation.assign_value(0.3 * rand_vector / torch.norm(rand_vector, p=2))
37
38
  feedforward_path = OrderedDict({"delays": delays, "attenuation": attenuation})
@@ -42,6 +43,7 @@ def example_recursion(args):
42
43
  matrix_type="orthogonal",
43
44
  nfft=args.nfft,
44
45
  device=args.device,
46
+ dtype=args.dtype,
45
47
  )
46
48
 
47
49
  feedback_path = OrderedDict({"feedback_matrix": feedback_matrix})
@@ -50,8 +52,8 @@ def example_recursion(args):
50
52
  recursion = system.Recursion(fF=feedforward_path, fB=feedback_path)
51
53
 
52
54
  # Input and output layers
53
- input_layer = dsp.FFT(nfft=args.nfft)
54
- output_layer = dsp.iFFT(nfft=args.nfft)
55
+ input_layer = dsp.FFT(nfft=args.nfft, dtype=args.dtype)
56
+ output_layer = dsp.iFFT(nfft=args.nfft, dtype=args.dtype)
55
57
 
56
58
  my_dsp = system.Series(
57
59
  OrderedDict(
@@ -73,6 +75,7 @@ def example_recursion(args):
73
75
  n=in_ch,
74
76
  fs=args.samplerate,
75
77
  device=args.device,
78
+ dtype=args.dtype,
76
79
  )
77
80
 
78
81
  # Apply filter
@@ -104,6 +107,7 @@ if __name__ == "__main__":
104
107
  # ---------------------- Processing -------------------
105
108
  parser.add_argument("--nfft", type=int, default=96000, help="FFT size")
106
109
  parser.add_argument("--samplerate", type=int, default=48000, help="sampling rate")
110
+ parser.add_argument("--dtype", type=str, default="float64", choices=["float32", "float64"], help="data type for tensors")
107
111
  # ----------------------- Dataset ----------------------
108
112
  parser.add_argument(
109
113
  "--batch_size", type=int, default=1, help="batch size for training"
@@ -141,6 +145,9 @@ if __name__ == "__main__":
141
145
  if args.device == "cuda" and not torch.cuda.is_available():
142
146
  args.device = "cpu"
143
147
 
148
+ # convert dtype string to torch dtype
149
+ args.dtype = torch.float32 if args.dtype == "float32" else torch.float64
150
+
144
151
  # make output directory
145
152
  if args.train_dir is not None:
146
153
  if not os.path.isdir(args.train_dir):
@@ -214,8 +214,8 @@ class nnComb(nn.Module):
214
214
  comb = system.Recursion(fF=delays, fB=filt)
215
215
 
216
216
  # Create the model with Shell
217
- input_layer = dsp.FFT(args.nfft)
218
- output_layer = dsp.Transform(transform=lambda x: torch.abs(x))
217
+ input_layer = dsp.FFT(args.nfft, dtype=args.dtype)
218
+ output_layer = dsp.Transform(transform=lambda x: torch.abs(x), dtype=args.dtype)
219
219
  self.comb = system.Shell(
220
220
  core=comb, input_layer=input_layer, output_layer=output_layer
221
221
  )
@@ -344,6 +344,7 @@ if __name__ == "__main__":
344
344
 
345
345
  parser.add_argument("--nfft", type=int, default=32000, help="FFT size")
346
346
  parser.add_argument("--samplerate", type=int, default=16000, help="sampling rate")
347
+ parser.add_argument("--dtype", type=str, default="float64", choices=["float32", "float64"], help="data type for tensors")
347
348
  parser.add_argument("--num", type=int, default=2**10, help="dataset size")
348
349
  parser.add_argument(
349
350
  "--device", type=str, default="cuda", help="device to use for computation"
@@ -373,6 +374,9 @@ if __name__ == "__main__":
373
374
  if args.device == "cuda" and not torch.cuda.is_available():
374
375
  args.device = "cpu"
375
376
 
377
+ # convert dtype string to torch dtype
378
+ args.dtype = torch.float32 if args.dtype == "float32" else torch.float64
379
+
376
380
  # Make output directory
377
381
  if args.train_dir is not None:
378
382
  if not os.path.isdir(args.train_dir):