spacr 0.3.22__tar.gz → 0.3.30__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. {spacr-0.3.22/spacr.egg-info → spacr-0.3.30}/PKG-INFO +1 -1
  2. {spacr-0.3.22 → spacr-0.3.30}/setup.py +1 -1
  3. {spacr-0.3.22 → spacr-0.3.30}/spacr/deep_spacr.py +131 -227
  4. {spacr-0.3.22 → spacr-0.3.30}/spacr/gui.py +1 -0
  5. {spacr-0.3.22 → spacr-0.3.30}/spacr/gui_core.py +13 -4
  6. {spacr-0.3.22 → spacr-0.3.30}/spacr/gui_utils.py +29 -1
  7. {spacr-0.3.22 → spacr-0.3.30}/spacr/io.py +4 -4
  8. {spacr-0.3.22 → spacr-0.3.30}/spacr/measure.py +1 -38
  9. {spacr-0.3.22 → spacr-0.3.30}/spacr/settings.py +49 -5
  10. {spacr-0.3.22 → spacr-0.3.30}/spacr/utils.py +383 -28
  11. {spacr-0.3.22 → spacr-0.3.30/spacr.egg-info}/PKG-INFO +1 -1
  12. {spacr-0.3.22 → spacr-0.3.30}/.readthedocs.yaml +0 -0
  13. {spacr-0.3.22 → spacr-0.3.30}/LICENSE +0 -0
  14. {spacr-0.3.22 → spacr-0.3.30}/MANIFEST.in +0 -0
  15. {spacr-0.3.22 → spacr-0.3.30}/README.rst +0 -0
  16. {spacr-0.3.22 → spacr-0.3.30}/deploy_docs.sh +0 -0
  17. {spacr-0.3.22 → spacr-0.3.30}/docs/requirements.txt +0 -0
  18. {spacr-0.3.22 → spacr-0.3.30}/docs/source/Makefile +0 -0
  19. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/doctrees/environment.pickle +0 -0
  20. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/index.html +0 -0
  21. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/app_annotate.html +0 -0
  22. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/app_classify.html +0 -0
  23. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/app_make_masks.html +0 -0
  24. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/app_mask.html +0 -0
  25. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/app_measure.html +0 -0
  26. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/app_sequencing.html +0 -0
  27. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/app_umap.html +0 -0
  28. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/core.html +0 -0
  29. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/deep_spacr.html +0 -0
  30. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/graph_learning.html +0 -0
  31. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/gui.html +0 -0
  32. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/gui_core.html +0 -0
  33. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/gui_elements.html +0 -0
  34. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/gui_utils.html +0 -0
  35. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/io.html +0 -0
  36. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/logger.html +0 -0
  37. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/measure.html +0 -0
  38. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/plot.html +0 -0
  39. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/sequencing.html +0 -0
  40. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/settings.html +0 -0
  41. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/sim.html +0 -0
  42. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/timelapse.html +0 -0
  43. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_modules/spacr/utils.html +0 -0
  44. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_sources/index.rst.txt +0 -0
  45. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_sources/modules.rst.txt +0 -0
  46. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_sources/spacr.rst.txt +0 -0
  47. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/_sphinx_javascript_frameworks_compat.js +0 -0
  48. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/basic.css +0 -0
  49. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/badge_only.css +0 -0
  50. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff +0 -0
  51. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 +0 -0
  52. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff +0 -0
  53. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 +0 -0
  54. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.eot +0 -0
  55. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.svg +0 -0
  56. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.ttf +0 -0
  57. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.woff +0 -0
  58. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.woff2 +0 -0
  59. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/lato-bold-italic.woff +0 -0
  60. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/lato-bold-italic.woff2 +0 -0
  61. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/lato-bold.woff +0 -0
  62. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/lato-bold.woff2 +0 -0
  63. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/lato-normal-italic.woff +0 -0
  64. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/lato-normal-italic.woff2 +0 -0
  65. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/lato-normal.woff +0 -0
  66. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/fonts/lato-normal.woff2 +0 -0
  67. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/css/theme.css +0 -0
  68. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/doctools.js +0 -0
  69. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/documentation_options.js +0 -0
  70. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/file.png +0 -0
  71. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/jquery.js +0 -0
  72. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/js/badge_only.js +0 -0
  73. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/js/html5shiv-printshiv.min.js +0 -0
  74. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/js/html5shiv.min.js +0 -0
  75. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/js/theme.js +0 -0
  76. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/language_data.js +0 -0
  77. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/minus.png +0 -0
  78. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/plus.png +0 -0
  79. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/pygments.css +0 -0
  80. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/searchtools.js +0 -0
  81. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/_static/sphinx_highlight.js +0 -0
  82. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/genindex.html +0 -0
  83. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/index.html +0 -0
  84. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/modules.html +0 -0
  85. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/objects.inv +0 -0
  86. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/py-modindex.html +0 -0
  87. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/search.html +0 -0
  88. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/searchindex.js +0 -0
  89. {spacr-0.3.22 → spacr-0.3.30}/docs/source/_build/html/spacr.html +0 -0
  90. {spacr-0.3.22 → spacr-0.3.30}/docs/source/conf.py +0 -0
  91. {spacr-0.3.22 → spacr-0.3.30}/docs/source/index.rst +0 -0
  92. {spacr-0.3.22 → spacr-0.3.30}/docs/source/make.bat +0 -0
  93. {spacr-0.3.22 → spacr-0.3.30}/docs/source/modules.rst +0 -0
  94. {spacr-0.3.22 → spacr-0.3.30}/docs/source/spacr.rst +0 -0
  95. {spacr-0.3.22 → spacr-0.3.30}/environment.yaml +0 -0
  96. {spacr-0.3.22 → spacr-0.3.30}/fonts/OpenSans-Regular.ttf +0 -0
  97. {spacr-0.3.22 → spacr-0.3.30}/notebooks/cv_scoring_nb.ipynb +0 -0
  98. {spacr-0.3.22 → spacr-0.3.30}/notebooks/deep_learning_spacr.ipynb +0 -0
  99. {spacr-0.3.22 → spacr-0.3.30}/notebooks/machine_learning_spacr_nb.ipynb +0 -0
  100. {spacr-0.3.22 → spacr-0.3.30}/notebooks/spacr_0.1_all_settings_git.ipynb +0 -0
  101. {spacr-0.3.22 → spacr-0.3.30}/notebooks/spacr_0.1_minimal.ipynb +0 -0
  102. {spacr-0.3.22 → spacr-0.3.30}/path/home/carruthers/datasets/plate1/measurements/measurements.db +0 -0
  103. {spacr-0.3.22 → spacr-0.3.30}/path/home/carruthers/datasets/plate1/settings/measure_crop_settings.csv +0 -0
  104. {spacr-0.3.22 → spacr-0.3.30}/path/settings/preprocess_generate_masks_settings.csv +0 -0
  105. {spacr-0.3.22 → spacr-0.3.30}/requirements.txt +0 -0
  106. {spacr-0.3.22 → spacr-0.3.30}/settings/measure_crop_settings.csv +0 -0
  107. {spacr-0.3.22 → spacr-0.3.30}/setup.cfg +0 -0
  108. {spacr-0.3.22 → spacr-0.3.30}/setup_docs.sh +0 -0
  109. {spacr-0.3.22 → spacr-0.3.30}/source/conf.py +0 -0
  110. {spacr-0.3.22 → spacr-0.3.30}/source/index.rst +0 -0
  111. {spacr-0.3.22 → spacr-0.3.30}/source/modules.rst +0 -0
  112. {spacr-0.3.22 → spacr-0.3.30}/source/setup.rst +0 -0
  113. {spacr-0.3.22 → spacr-0.3.30}/source/spacr.rst +0 -0
  114. {spacr-0.3.22 → spacr-0.3.30}/spacr/__init__.py +0 -0
  115. {spacr-0.3.22 → spacr-0.3.30}/spacr/__main__.py +0 -0
  116. {spacr-0.3.22 → spacr-0.3.30}/spacr/app_annotate.py +0 -0
  117. {spacr-0.3.22 → spacr-0.3.30}/spacr/app_classify.py +0 -0
  118. {spacr-0.3.22 → spacr-0.3.30}/spacr/app_make_masks.py +0 -0
  119. {spacr-0.3.22 → spacr-0.3.30}/spacr/app_mask.py +0 -0
  120. {spacr-0.3.22 → spacr-0.3.30}/spacr/app_measure.py +0 -0
  121. {spacr-0.3.22 → spacr-0.3.30}/spacr/app_sequencing.py +0 -0
  122. {spacr-0.3.22 → spacr-0.3.30}/spacr/app_umap.py +0 -0
  123. {spacr-0.3.22 → spacr-0.3.30}/spacr/cellpose.py +0 -0
  124. {spacr-0.3.22 → spacr-0.3.30}/spacr/core.py +0 -0
  125. {spacr-0.3.22 → spacr-0.3.30}/spacr/gui_elements.py +0 -0
  126. {spacr-0.3.22 → spacr-0.3.30}/spacr/logger.py +0 -0
  127. {spacr-0.3.22 → spacr-0.3.30}/spacr/mediar.py +0 -0
  128. {spacr-0.3.22 → spacr-0.3.30}/spacr/ml.py +0 -0
  129. {spacr-0.3.22 → spacr-0.3.30}/spacr/openai.py +0 -0
  130. {spacr-0.3.22 → spacr-0.3.30}/spacr/plot.py +0 -0
  131. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/.gitignore +0 -0
  132. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/LICENSE +0 -0
  133. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/README.md +0 -0
  134. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/SetupDict.py +0 -0
  135. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/config/baseline.json +0 -0
  136. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/config/mediar_example.json +0 -0
  137. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/config/pred/pred_mediar.json +0 -0
  138. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/config/step1_pretraining/phase1.json +0 -0
  139. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/config/step1_pretraining/phase2.json +0 -0
  140. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/config/step2_finetuning/finetuning1.json +0 -0
  141. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/config/step2_finetuning/finetuning2.json +0 -0
  142. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/config/step3_prediction/base_prediction.json +0 -0
  143. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/config/step3_prediction/ensemble_tta.json +0 -0
  144. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/BasePredictor.py +0 -0
  145. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/BaseTrainer.py +0 -0
  146. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/Baseline/Predictor.py +0 -0
  147. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/Baseline/Trainer.py +0 -0
  148. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/Baseline/__init__.py +0 -0
  149. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/Baseline/utils.py +0 -0
  150. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/MEDIAR/EnsemblePredictor.py +0 -0
  151. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/MEDIAR/Predictor.py +0 -0
  152. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/MEDIAR/Trainer.py +0 -0
  153. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/MEDIAR/__init__.py +0 -0
  154. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/MEDIAR/utils.py +0 -0
  155. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/__init__.py +0 -0
  156. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/core/utils.py +0 -0
  157. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/evaluate.py +0 -0
  158. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/generate_mapping.py +0 -0
  159. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/image/examples/img1.tiff +0 -0
  160. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/image/examples/img2.tif +0 -0
  161. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/image/failure_cases.png +0 -0
  162. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/image/mediar_framework.png +0 -0
  163. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/image/mediar_model.PNG +0 -0
  164. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/image/mediar_results.png +0 -0
  165. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/main.py +0 -0
  166. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/predict.py +0 -0
  167. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/requirements.txt +0 -0
  168. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/__init__.py +0 -0
  169. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/data_utils/__init__.py +0 -0
  170. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/data_utils/custom/CellAware.py +0 -0
  171. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/data_utils/custom/LoadImage.py +0 -0
  172. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/data_utils/custom/NormalizeImage.py +0 -0
  173. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/data_utils/custom/__init__.py +0 -0
  174. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/data_utils/custom/modalities.pkl +0 -0
  175. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/data_utils/datasetter.py +0 -0
  176. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/data_utils/transforms.py +0 -0
  177. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/data_utils/utils.py +0 -0
  178. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/measures.py +0 -0
  179. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/models/MEDIARFormer.py +0 -0
  180. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/models/__init__.py +0 -0
  181. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/MEDIAR/train_tools/utils.py +0 -0
  182. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/data/lopit.csv +0 -0
  183. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/data/toxoplasma_metadata.csv +0 -0
  184. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/OFL.txt +0 -0
  185. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/OpenSans-Italic-VariableFont_wdth,wght.ttf +0 -0
  186. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/OpenSans-VariableFont_wdth,wght.ttf +0 -0
  187. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/README.txt +0 -0
  188. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-Bold.ttf +0 -0
  189. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-BoldItalic.ttf +0 -0
  190. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-ExtraBold.ttf +0 -0
  191. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-ExtraBoldItalic.ttf +0 -0
  192. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-Italic.ttf +0 -0
  193. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-Light.ttf +0 -0
  194. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-LightItalic.ttf +0 -0
  195. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-Medium.ttf +0 -0
  196. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-MediumItalic.ttf +0 -0
  197. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-Regular.ttf +0 -0
  198. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-SemiBold.ttf +0 -0
  199. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans-SemiBoldItalic.ttf +0 -0
  200. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Bold.ttf +0 -0
  201. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-BoldItalic.ttf +0 -0
  202. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-ExtraBold.ttf +0 -0
  203. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-ExtraBoldItalic.ttf +0 -0
  204. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Italic.ttf +0 -0
  205. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Light.ttf +0 -0
  206. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-LightItalic.ttf +0 -0
  207. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Medium.ttf +0 -0
  208. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-MediumItalic.ttf +0 -0
  209. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Regular.ttf +0 -0
  210. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-SemiBold.ttf +0 -0
  211. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_Condensed-SemiBoldItalic.ttf +0 -0
  212. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Bold.ttf +0 -0
  213. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-BoldItalic.ttf +0 -0
  214. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-ExtraBold.ttf +0 -0
  215. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-ExtraBoldItalic.ttf +0 -0
  216. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Italic.ttf +0 -0
  217. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Light.ttf +0 -0
  218. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-LightItalic.ttf +0 -0
  219. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Medium.ttf +0 -0
  220. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-MediumItalic.ttf +0 -0
  221. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Regular.ttf +0 -0
  222. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-SemiBold.ttf +0 -0
  223. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-SemiBoldItalic.ttf +0 -0
  224. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/abort.png +0 -0
  225. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/annotate.png +0 -0
  226. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/cellpose_all.png +0 -0
  227. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/cellpose_masks.png +0 -0
  228. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/classify.png +0 -0
  229. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/convert.png +0 -0
  230. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/default.png +0 -0
  231. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/dna_matrix.mp4 +0 -0
  232. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/download.png +0 -0
  233. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/logo.pdf +0 -0
  234. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/logo_spacr.png +0 -0
  235. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/logo_spacr_1.png +0 -0
  236. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/make_masks.png +0 -0
  237. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/map_barcodes.png +0 -0
  238. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/mask.png +0 -0
  239. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/measure.png +0 -0
  240. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/ml_analyze.png +0 -0
  241. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/plaque.png +0 -0
  242. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/recruitment.png +0 -0
  243. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/regression.png +0 -0
  244. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/run.png +0 -0
  245. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/sequencing.png +0 -0
  246. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/settings.png +0 -0
  247. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/train_cellpose.png +0 -0
  248. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/icons/umap.png +0 -0
  249. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/images/plate1_E01_T0001F001L01A01Z01C02.tif +0 -0
  250. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/images/plate1_E01_T0001F001L01A02Z01C01.tif +0 -0
  251. {spacr-0.3.22 → spacr-0.3.30}/spacr/resources/images/plate1_E01_T0001F001L01A03Z01C03.tif +0 -0
  252. {spacr-0.3.22 → spacr-0.3.30}/spacr/sequencing.py +0 -0
  253. {spacr-0.3.22 → spacr-0.3.30}/spacr/sim.py +0 -0
  254. {spacr-0.3.22 → spacr-0.3.30}/spacr/submodules.py +0 -0
  255. {spacr-0.3.22 → spacr-0.3.30}/spacr/timelapse.py +0 -0
  256. {spacr-0.3.22 → spacr-0.3.30}/spacr/toxo.py +0 -0
  257. {spacr-0.3.22 → spacr-0.3.30}/spacr/version.py +0 -0
  258. {spacr-0.3.22 → spacr-0.3.30}/spacr.egg-info/SOURCES.txt +0 -0
  259. {spacr-0.3.22 → spacr-0.3.30}/spacr.egg-info/dependency_links.txt +0 -0
  260. {spacr-0.3.22 → spacr-0.3.30}/spacr.egg-info/entry_points.txt +0 -0
  261. {spacr-0.3.22 → spacr-0.3.30}/spacr.egg-info/requires.txt +0 -0
  262. {spacr-0.3.22 → spacr-0.3.30}/spacr.egg-info/top_level.txt +0 -0
  263. {spacr-0.3.22 → spacr-0.3.30}/tests/test_annotate_app.py +0 -0
  264. {spacr-0.3.22 → spacr-0.3.30}/tests/test_core.py +0 -0
  265. {spacr-0.3.22 → spacr-0.3.30}/tests/test_gui_classify_app.py +0 -0
  266. {spacr-0.3.22 → spacr-0.3.30}/tests/test_gui_mask_app.py +0 -0
  267. {spacr-0.3.22 → spacr-0.3.30}/tests/test_gui_measure_app.py +0 -0
  268. {spacr-0.3.22 → spacr-0.3.30}/tests/test_gui_sim_app.py +0 -0
  269. {spacr-0.3.22 → spacr-0.3.30}/tests/test_gui_utils.py +0 -0
  270. {spacr-0.3.22 → spacr-0.3.30}/tests/test_io.py +0 -0
  271. {spacr-0.3.22 → spacr-0.3.30}/tests/test_mask_app.py +0 -0
  272. {spacr-0.3.22 → spacr-0.3.30}/tests/test_measure.py +0 -0
  273. {spacr-0.3.22 → spacr-0.3.30}/tests/test_plot.py +0 -0
  274. {spacr-0.3.22 → spacr-0.3.30}/tests/test_sim.py +0 -0
  275. {spacr-0.3.22 → spacr-0.3.30}/tests/test_timelapse.py +0 -0
  276. {spacr-0.3.22 → spacr-0.3.30}/tests/test_train.py +0 -0
  277. {spacr-0.3.22 → spacr-0.3.30}/tests/test_umap.py +0 -0
  278. {spacr-0.3.22 → spacr-0.3.30}/tests/test_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: spacr
3
- Version: 0.3.22
3
+ Version: 0.3.30
4
4
  Summary: Spatial phenotype analysis of crisp screens (SpaCr)
5
5
  Home-page: https://github.com/EinarOlafsson/spacr
6
6
  Author: Einar Birnir Olafsson
@@ -67,7 +67,7 @@ dependencies = [
67
67
 
68
68
  setup(
69
69
  name="spacr",
70
- version="0.3.22",
70
+ version="0.3.30",
71
71
  author="Einar Birnir Olafsson",
72
72
  author_email="olafsson@med.umich.com",
73
73
  description="Spatial phenotype analysis of crisp screens (SpaCr)",
@@ -610,264 +610,168 @@ def train_model(dst, model_type, train_loaders, epochs=100, learning_rate=0.0001
610
610
 
611
611
  return model, model_path
612
612
 
613
- def visualize_saliency_map(settings):
614
- from spacr.utils import SaliencyMapGenerator, print_progress
615
- from spacr.io import TarImageDataset # Assuming you have a dataset class
616
- from torchvision.utils import make_grid
617
-
613
+ def generate_activation_map(settings):
614
+
615
+ from .utils import SaliencyMapGenerator, GradCAMGenerator, SelectChannels, activation_maps_to_database, activation_correlations_to_database
616
+ from .utils import print_progress, save_settings, calculate_activation_correlations
617
+ from .io import TarImageDataset
618
+ from .settings import get_default_generate_activation_map_settings
619
+
620
+ torch.cuda.empty_cache()
621
+ gc.collect()
622
+
623
+ plt.clf()
618
624
  use_cuda = torch.cuda.is_available()
619
625
  device = torch.device("cuda" if use_cuda else "cpu")
620
-
626
+
627
+ source_folder = os.path.dirname(os.path.dirname(settings['dataset']))
628
+ settings['src'] = source_folder
629
+ settings = get_default_generate_activation_map_settings(settings)
630
+ save_settings(settings, name=f"{settings['cam_type']}_settings", show=False)
631
+
632
+ if settings['model_type'] == 'maxvit' and settings['target_layer'] == None:
633
+ settings['target_layer'] = 'base_model.blocks.3.layers.1.layers.MBconv.layers.conv_b'
634
+ if settings['cam_type'] in ['saliency_image', 'saliency_channel']:
635
+ settings['target_layer'] = None
636
+
621
637
  # Set number of jobs for loading
622
- if settings['n_jobs'] is None:
638
+ n_jobs = settings['n_jobs']
639
+ if n_jobs is None:
623
640
  n_jobs = max(1, cpu_count() - 4)
624
- else:
625
- n_jobs = settings['n_jobs']
626
641
 
627
642
  # Set transforms for images
628
- if settings['normalize']:
629
- transform = transforms.Compose([
630
- transforms.ToTensor(),
631
- transforms.CenterCrop(size=(settings['image_size'], settings['image_size'])),
632
- transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
633
- else:
634
- transform = transforms.Compose([
635
- transforms.ToTensor(),
636
- transforms.CenterCrop(size=(settings['image_size'], settings['image_size']))])
643
+ transform = transforms.Compose([
644
+ transforms.ToTensor(),
645
+ transforms.CenterCrop(size=(settings['image_size'], settings['image_size'])),
646
+ transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) if settings['normalize_input'] else None,
647
+ SelectChannels(settings['channels'])
648
+ ])
637
649
 
638
650
  # Handle dataset path
639
- if os.path.exists(settings['dataset']):
640
- tar_path = settings['dataset']
641
- else:
651
+ if not os.path.exists(settings['dataset']):
642
652
  print(f"Dataset not found at {settings['dataset']}")
643
653
  return
644
-
645
- if settings.get('save', False):
646
- if settings['dtype'] not in ['uint8', 'uint16']:
647
- print("Invalid dtype in settings. Please use 'uint8' or 'uint16'.")
648
- return
649
654
 
650
655
  # Load the model
651
656
  model = torch.load(settings['model_path'])
652
657
  model.to(device)
653
- model.eval() # Ensure the model is in evaluation mode
658
+ model.eval()
654
659
 
655
- # Create directory for saving saliency maps if it does not exist
656
- if settings.get('save', False):
657
- dataset_dir = os.path.dirname(tar_path)
658
- dataset_name = os.path.splitext(os.path.basename(tar_path))[0]
659
- save_dir = os.path.join(dataset_dir, dataset_name, 'saliency_maps')
660
+ # Create directory for saving activation maps if it does not exist
661
+ dataset_dir = os.path.dirname(settings['dataset'])
662
+ dataset_name = os.path.splitext(os.path.basename(settings['dataset']))[0]
663
+ save_dir = os.path.join(dataset_dir, dataset_name, settings['cam_type'])
664
+ batch_grid_fldr = os.path.join(save_dir, 'batch_grids')
665
+
666
+ if settings['save']:
660
667
  os.makedirs(save_dir, exist_ok=True)
661
- print(f"Saliency maps will be saved in: {save_dir}")
662
-
668
+ print(f"Activation maps will be saved in: {save_dir}")
669
+
670
+ if settings['plot']:
671
+ os.makedirs(batch_grid_fldr, exist_ok=True)
672
+ print(f"Batch grid maps will be saved in: {batch_grid_fldr}")
673
+
663
674
  # Load dataset
664
- dataset = TarImageDataset(tar_path, transform=transform)
665
- data_loader = DataLoader(dataset, batch_size=settings['batch_size'], shuffle=True, num_workers=n_jobs, pin_memory=True)
666
-
667
- # Initialize SaliencyMapGenerator
668
- cam_generator = SaliencyMapGenerator(model)
675
+ dataset = TarImageDataset(settings['dataset'], transform=transform)
676
+ data_loader = DataLoader(dataset, batch_size=settings['batch_size'], shuffle=settings['shuffle'], num_workers=n_jobs, pin_memory=True)
677
+
678
+ # Initialize generator based on cam_type
679
+ if settings['cam_type'] in ['gradcam', 'gradcam_pp']:
680
+ cam_generator = GradCAMGenerator(model, target_layer=settings['target_layer'], cam_type=settings['cam_type'])
681
+ elif settings['cam_type'] in ['saliency_image', 'saliency_channel']:
682
+ cam_generator = SaliencyMapGenerator(model)
683
+
669
684
  time_ls = []
670
-
671
685
  for batch_idx, (inputs, filenames) in enumerate(data_loader):
672
686
  start = time.time()
687
+ img_paths = []
673
688
  inputs = inputs.to(device)
674
-
675
- saliency_maps, predicted_classes = cam_generator.compute_saliency_and_predictions(inputs)
676
-
677
- if settings['saliency_mode'] not in ['mean', 'sum']:
678
- print("To generate channel average or sum saliency maps set saliency_mode to 'mean' or 'sum', respectively.")
679
-
680
- if settings['saliency_mode'] == 'mean':
681
- saliency_maps = saliency_maps.mean(dim=1, keepdim=True)
682
-
683
- elif settings['saliency_mode'] == 'sum':
684
- saliency_maps = saliency_maps.sum(dim=1, keepdim=True)
685
-
686
- # Example usage with the class
687
- if settings.get('plot', False):
688
- if settings['plot_mode'] not in ['mean', 'channel', '3-channel']:
689
- print("Invalid plot_mode in settings. Please use 'mean', 'channel', or '3-channel'.")
690
- return
691
- else:
692
- cam_generator.plot_saliency_grid(inputs, saliency_maps, predicted_classes, mode=settings['plot_mode'])
693
-
694
- if settings.get('save', False):
695
- for i in range(inputs.size(0)):
696
- saliency_map = saliency_maps[i].detach().cpu().numpy()
697
-
698
- # Check dtype in settings and normalize accordingly
699
- if settings['dtype'] == 'uint16':
700
- saliency_map = np.clip(saliency_map, 0, 1) * 65535
701
- saliency_map = saliency_map.astype(np.uint16)
702
- mode = 'I;16'
703
- elif settings['dtype'] == 'uint8':
704
- saliency_map = np.clip(saliency_map, 0, 1) * 255
705
- saliency_map = saliency_map.astype(np.uint8)
706
- mode = 'L' # Grayscale mode for uint8
707
-
708
- # Get the class prediction (0 or 1)
709
- class_pred = predicted_classes[i].item()
710
-
711
- save_class_dir = os.path.join(save_dir, f'class_{class_pred}')
712
- os.makedirs(save_class_dir, exist_ok=True)
713
- save_path = os.path.join(save_class_dir, filenames[i])
714
-
715
- # Handle different cases based on saliency_map dimensions
716
- if saliency_map.ndim == 3: # Multi-channel case (C, H, W)
717
- if saliency_map.shape[0] == 3: # RGB-like saliency map
718
- saliency_image = Image.fromarray(np.moveaxis(saliency_map, 0, -1), mode="RGB") # Convert (C, H, W) to (H, W, C)
719
- elif saliency_map.shape[0] == 1: # Single-channel case (1, H, W)
720
- saliency_map = np.squeeze(saliency_map) # Remove the extra channel dimension
721
- saliency_image = Image.fromarray(saliency_map, mode=mode) # Use grayscale mode for single-channel
722
- else:
723
- raise ValueError(f"Unexpected number of channels: {saliency_map.shape[0]}")
724
-
725
- elif saliency_map.ndim == 2: # Single-channel case (H, W)
726
- saliency_image = Image.fromarray(saliency_map, mode=mode) # Keep single channel (H, W)
727
-
728
- else:
729
- raise ValueError(f"Unexpected number of dimensions: {saliency_map.ndim}")
730
-
731
- # Save the image
732
- saliency_image.save(save_path)
733
689
 
690
+ # Compute activation maps and predictions
691
+ if settings['cam_type'] in ['gradcam', 'gradcam_pp']:
692
+ activation_maps, predicted_classes = cam_generator.compute_gradcam_and_predictions(inputs)
693
+ elif settings['cam_type'] in ['saliency_image', 'saliency_channel']:
694
+ activation_maps, predicted_classes = cam_generator.compute_saliency_and_predictions(inputs)
695
+
696
+ # Move activation maps to CPU
697
+ activation_maps = activation_maps.cpu()
698
+
699
+ # Sum saliency maps for 'saliency_image' type
700
+ if settings['cam_type'] == 'saliency_image':
701
+ summed_activation_maps = []
702
+ for i in range(activation_maps.size(0)):
703
+ activation_map = activation_maps[i]
704
+ #print(f"1: {activation_map.shape}")
705
+ activation_map_sum = activation_map.sum(dim=0, keepdim=False)
706
+ #print(f"2: {activation_map.shape}")
707
+ activation_map_sum = np.squeeze(activation_map_sum, axis=0)
708
+ #print(f"3: {activation_map_sum.shape}")
709
+ summed_activation_maps.append(activation_map_sum)
710
+ activation_maps = torch.stack(summed_activation_maps)
711
+
712
+ # For plotting
713
+ if settings['plot']:
714
+ fig = cam_generator.plot_activation_grid(inputs, activation_maps, predicted_classes, overlay=settings['overlay'], normalize=settings['normalize'])
715
+ pdf_save_path = os.path.join(batch_grid_fldr,f"batch_{batch_idx}_grid.pdf")
716
+ fig.savefig(pdf_save_path, format='pdf')
717
+ print(f"Saved batch grid to {pdf_save_path}")
718
+ #plt.show()
719
+ display(fig)
720
+
721
+ for i in range(inputs.size(0)):
722
+ activation_map = activation_maps[i].detach().numpy()
723
+
724
+ if settings['cam_type'] in ['saliency_image', 'gradcam', 'gradcam_pp']:
725
+ #activation_map = activation_map.sum(axis=0)
726
+ activation_map = (activation_map - activation_map.min()) / (activation_map.max() - activation_map.min())
727
+ activation_map = (activation_map * 255).astype(np.uint8)
728
+ activation_image = Image.fromarray(activation_map, mode='L')
729
+
730
+ elif settings['cam_type'] == 'saliency_channel':
731
+ # Handle each channel separately and save as RGB
732
+ rgb_activation_map = np.zeros((activation_map.shape[1], activation_map.shape[2], 3), dtype=np.uint8)
733
+ for c in range(min(activation_map.shape[0], 3)): # Limit to 3 channels for RGB
734
+ channel_map = activation_map[c]
735
+ channel_map = (channel_map - channel_map.min()) / (channel_map.max() - channel_map.min())
736
+ rgb_activation_map[:, :, c] = (channel_map * 255).astype(np.uint8)
737
+ activation_image = Image.fromarray(rgb_activation_map, mode='RGB')
738
+
739
+ # Save activation maps
740
+ class_pred = predicted_classes[i].item()
741
+ parts = filenames[i].split('_')
742
+ plate = parts[0]
743
+ well = parts[1]
744
+ save_class_dir = os.path.join(save_dir, f'class_{class_pred}', str(plate), str(well))
745
+ os.makedirs(save_class_dir, exist_ok=True)
746
+ save_path = os.path.join(save_class_dir, f'{filenames[i]}')
747
+ if settings['save']:
748
+ activation_image.save(save_path)
749
+ img_paths.append(save_path)
750
+
751
+ if settings['save']:
752
+ activation_maps_to_database(img_paths, source_folder, settings)
753
+
754
+ if settings['correlation']:
755
+ df = calculate_activation_correlations(inputs, activation_maps, filenames, manders_thresholds=settings['manders_thresholds'])
756
+ if settings['plot']:
757
+ display(df)
758
+ if settings['save']:
759
+ activation_correlations_to_database(df, img_paths, source_folder, settings)
734
760
 
735
761
  stop = time.time()
736
762
  duration = stop - start
737
763
  time_ls.append(duration)
738
764
  files_processed = batch_idx * settings['batch_size']
739
- files_to_process = len(data_loader)
740
- print_progress(files_processed, files_to_process, n_jobs=n_jobs, time_ls=time_ls, batch_size=settings['batch_size'], operation_type="Generating Saliency Maps")
741
-
742
- print("Saliency map generation complete.")
743
-
744
- def visualize_saliency_map_v1(src, model_type='maxvit', model_path='', image_size=224, channels=[1,2,3], normalize=True, class_names=None, save_saliency=False, save_dir='saliency_maps'):
765
+ files_to_process = len(data_loader) * settings['batch_size']
766
+ print_progress(files_processed, files_to_process, n_jobs=n_jobs, time_ls=time_ls, batch_size=settings['batch_size'], operation_type="Generating Activation Maps")
745
767
 
746
- from spacr.utils import SaliencyMapGenerator, preprocess_image
747
-
748
- use_cuda = torch.cuda.is_available()
749
- device = torch.device("cuda" if use_cuda else "cpu")
750
-
751
- # Load the entire model object
752
- model = torch.load(model_path)
753
- model.to(device)
754
-
755
- # Create directory for saving saliency maps if it does not exist
756
- if save_saliency and not os.path.exists(save_dir):
757
- os.makedirs(save_dir)
758
-
759
- # Collect all images and their tensors
760
- images = []
761
- input_tensors = []
762
- filenames = []
763
- for file in os.listdir(src):
764
- if not file.endswith('.png'):
765
- continue
766
- image_path = os.path.join(src, file)
767
- image, input_tensor = preprocess_image(image_path, normalize=normalize, image_size=image_size, channels=channels)
768
- images.append(image)
769
- input_tensors.append(input_tensor)
770
- filenames.append(file)
771
-
772
- input_tensors = torch.cat(input_tensors).to(device)
773
- class_labels = torch.zeros(input_tensors.size(0), dtype=torch.long).to(device) # Replace with actual class labels if available
774
-
775
- # Generate saliency maps
776
- cam_generator = SaliencyMapGenerator(model)
777
- saliency_maps = cam_generator.compute_saliency_maps(input_tensors, class_labels)
778
-
779
- # Convert saliency maps to numpy arrays
780
- saliency_maps = saliency_maps.cpu().numpy()
781
-
782
- N = len(images)
783
-
784
- dst = os.path.join(src, 'saliency_maps')
785
-
786
- for i in range(N):
787
- fig, axes = plt.subplots(1, 3, figsize=(20, 5))
788
-
789
- # Original image
790
- axes[0].imshow(images[i])
791
- axes[0].axis('off')
792
- if class_names:
793
- axes[0].set_title(f"Class: {class_names[class_labels[i].item()]}")
794
-
795
- # Saliency Map
796
- axes[1].imshow(saliency_maps[i, 0], cmap='hot')
797
- axes[1].axis('off')
798
- axes[1].set_title("Saliency Map")
799
-
800
- # Overlay
801
- overlay = np.array(images[i])
802
- overlay = overlay / overlay.max()
803
- saliency_map_rgb = np.stack([saliency_maps[i, 0]] * 3, axis=-1) # Convert saliency map to RGB
804
- overlay = (overlay * 0.5 + saliency_map_rgb * 0.5).clip(0, 1)
805
- axes[2].imshow(overlay)
806
- axes[2].axis('off')
807
- axes[2].set_title("Overlay")
808
-
809
- plt.tight_layout()
810
- plt.show()
811
-
812
- # Save the saliency map if required
813
- if save_saliency:
814
- os.makedirs(dst, exist_ok=True)
815
- saliency_image = Image.fromarray((saliency_maps[i, 0] * 255).astype(np.uint8))
816
- saliency_image.save(os.path.join(dst, f'saliency_{filenames[i]}'))
817
-
818
- def visualize_grad_cam(src, model_path, target_layers=None, image_size=224, channels=[1, 2, 3], normalize=True, class_names=None, save_cam=False, save_dir='grad_cam'):
819
-
820
- from spacr.utils import GradCAM, preprocess_image, show_cam_on_image, recommend_target_layers
821
-
822
- use_cuda = torch.cuda.is_available()
823
- device = torch.device("cuda" if use_cuda else "cpu")
824
-
825
- model = torch.load(model_path)
826
- model.to(device)
827
-
828
- # If no target layers provided, recommend a target layer
829
- if target_layers is None:
830
- target_layers, all_layers = recommend_target_layers(model)
831
- print(f"No target layer provided. Using recommended layer: {target_layers[0]}")
832
- print("All possible target layers:")
833
- for layer in all_layers:
834
- print(layer)
835
-
836
- grad_cam = GradCAM(model=model, target_layers=target_layers, use_cuda=use_cuda)
837
-
838
- if save_cam and not os.path.exists(save_dir):
839
- os.makedirs(save_dir)
840
-
841
- images = []
842
- filenames = []
843
- for file in os.listdir(src):
844
- if not file.endswith('.png'):
845
- continue
846
- image_path = os.path.join(src, file)
847
- image, input_tensor = preprocess_image(image_path, normalize=normalize, image_size=image_size, channels=channels)
848
- images.append(image)
849
- filenames.append(file)
850
-
851
- input_tensor = input_tensor.to(device)
852
- cam = grad_cam(input_tensor)
853
- cam_image = show_cam_on_image(np.array(image) / 255.0, cam)
854
-
855
- fig, ax = plt.subplots(1, 2, figsize=(10, 5))
856
- ax[0].imshow(image)
857
- ax[0].axis('off')
858
- ax[0].set_title("Original Image")
859
- ax[1].imshow(cam_image)
860
- ax[1].axis('off')
861
- ax[1].set_title("Grad-CAM")
862
- plt.show()
863
-
864
- if save_cam:
865
- cam_pil = Image.fromarray(cam_image)
866
- cam_pil.save(os.path.join(save_dir, f'grad_cam_{file}'))
768
+ torch.cuda.empty_cache()
769
+ gc.collect()
770
+ print("Activation map generation complete.")
867
771
 
868
772
  def visualize_classes(model, dtype, class_names, **kwargs):
869
773
 
870
- from spacr.utils import class_visualization
774
+ from .utils import class_visualization
871
775
 
872
776
  for target_y in range(2): # Assuming binary classification
873
777
  print(f"Visualizing class: {class_names[target_y]}")
@@ -57,6 +57,7 @@ class MainApp(tk.Tk):
57
57
  "Map Barcodes": (lambda frame: initiate_root(self, 'map_barcodes'), "Map barcodes to data."),
58
58
  "Regression": (lambda frame: initiate_root(self, 'regression'), "Perform regression analysis."),
59
59
  "Recruitment": (lambda frame: initiate_root(self, 'recruitment'), "Analyze recruitment data."),
60
+ "Activation": (lambda frame: initiate_root(self, 'activation'), "Generate activation maps of computer vision models and measure channel-activation correlation."),
60
61
  "Plaque": (lambda frame: initiate_root(self, 'analyze_plaques'), "Analyze plaque data.")
61
62
  }
62
63
 
@@ -379,10 +379,13 @@ def set_globals(thread_control_var, q_var, console_output_var, parent_frame_var,
379
379
  index_control = index_control_var
380
380
 
381
381
  def import_settings(settings_type='mask'):
382
- from .gui_utils import convert_settings_dict_for_gui, hide_all_settings
383
382
  global vars_dict, scrollable_frame, button_scrollable_frame
384
- from .settings import generate_fields, set_default_settings_preprocess_generate_masks, get_measure_crop_settings, set_default_train_test_model, set_default_generate_barecode_mapping, set_default_umap_image_settings, get_analyze_recruitment_default_settings
385
383
 
384
+ from .gui_utils import convert_settings_dict_for_gui, hide_all_settings
385
+ from .settings import generate_fields, set_default_settings_preprocess_generate_masks, get_measure_crop_settings, set_default_train_test_model
386
+ from .settings import set_default_generate_barecode_mapping, set_default_umap_image_settings, get_analyze_recruitment_default_settings
387
+ from .settings import get_default_generate_activation_map_settings
388
+ #activation
386
389
  def read_settings_from_csv(csv_file_path):
387
390
  settings = {}
388
391
  with open(csv_file_path, newline='') as csvfile:
@@ -422,6 +425,8 @@ def import_settings(settings_type='mask'):
422
425
  settings = set_default_umap_image_settings(settings={})
423
426
  elif settings_type == 'recruitment':
424
427
  settings = get_analyze_recruitment_default_settings(settings={})
428
+ elif settings_type == 'activation':
429
+ settings = get_default_generate_activation_map_settings(settings={})
425
430
  elif settings_type == 'analyze_plaques':
426
431
  settings = {}
427
432
  elif settings_type == 'convert':
@@ -436,8 +441,10 @@ def import_settings(settings_type='mask'):
436
441
 
437
442
  def setup_settings_panel(vertical_container, settings_type='mask'):
438
443
  global vars_dict, scrollable_frame
439
- from .settings import get_identify_masks_finetune_default_settings, set_default_analyze_screen, set_default_settings_preprocess_generate_masks, get_measure_crop_settings, deep_spacr_defaults, set_default_generate_barecode_mapping, set_default_umap_image_settings
440
- from .settings import get_map_barcodes_default_settings, get_analyze_recruitment_default_settings, get_check_cellpose_models_default_settings, generate_fields, get_perform_regression_default_settings, get_train_cellpose_default_settings
444
+ from .settings import get_identify_masks_finetune_default_settings, set_default_analyze_screen, set_default_settings_preprocess_generate_masks
445
+ from .settings import get_measure_crop_settings, deep_spacr_defaults, set_default_generate_barecode_mapping, set_default_umap_image_settings
446
+ from .settings import get_map_barcodes_default_settings, get_analyze_recruitment_default_settings, get_check_cellpose_models_default_settings
447
+ from .settings import generate_fields, get_perform_regression_default_settings, get_train_cellpose_default_settings, get_default_generate_activation_map_settings
441
448
  from .gui_utils import convert_settings_dict_for_gui
442
449
  from .gui_elements import set_element_size
443
450
 
@@ -480,6 +487,8 @@ def setup_settings_panel(vertical_container, settings_type='mask'):
480
487
  settings = get_perform_regression_default_settings(settings={})
481
488
  elif settings_type == 'recruitment':
482
489
  settings = get_analyze_recruitment_default_settings(settings={})
490
+ elif settings_type == 'activation':
491
+ settings = get_default_generate_activation_map_settings(settings={})
483
492
  elif settings_type == 'analyze_plaques':
484
493
  settings = {'src':'path to images'}
485
494
  elif settings_type == 'convert':
@@ -77,7 +77,7 @@ def load_app(root, app_name, app_func):
77
77
  else:
78
78
  proceed_with_app(root, app_name, app_func)
79
79
 
80
- def parse_list(value):
80
+ def parse_list_v1(value):
81
81
  """
82
82
  Parses a string representation of a list and returns the parsed list.
83
83
 
@@ -98,6 +98,34 @@ def parse_list(value):
98
98
  return parsed_value
99
99
  elif all(isinstance(item, str) for item in parsed_value):
100
100
  return parsed_value
101
+ elif all(isinstance(item, float) for item in parsed_value):
102
+ return parsed_value
103
+ else:
104
+ raise ValueError("List contains mixed types or unsupported types")
105
+ else:
106
+ raise ValueError(f"Expected a list but got {type(parsed_value).__name__}")
107
+ except (ValueError, SyntaxError) as e:
108
+ raise ValueError(f"Invalid format for list: {value}. Error: {e}")
109
+
110
+ def parse_list(value):
111
+ """
112
+ Parses a string representation of a list and returns the parsed list.
113
+
114
+ Args:
115
+ value (str): The string representation of the list.
116
+
117
+ Returns:
118
+ list: The parsed list, which can contain integers, floats, or strings.
119
+
120
+ Raises:
121
+ ValueError: If the input value is not a valid list format or contains mixed types or unsupported types.
122
+ """
123
+ try:
124
+ parsed_value = ast.literal_eval(value)
125
+ if isinstance(parsed_value, list):
126
+ # Check if all elements are homogeneous (either all int, float, or str)
127
+ if all(isinstance(item, (int, float, str)) for item in parsed_value):
128
+ return parsed_value
101
129
  else:
102
130
  raise ValueError("List contains mixed types or unsupported types")
103
131
  else:
@@ -2861,10 +2861,10 @@ def generate_dataset(settings={}):
2861
2861
  date_name = datetime.date.today().strftime('%y%m%d')
2862
2862
  if len(settings['src']) > 1:
2863
2863
  date_name = f"{date_name}_combined"
2864
- if not settings['file_metadata'] is None:
2865
- tar_name = f"{date_name}_{settings['experiment']}_{settings['file_metadata']}.tar"
2866
- else:
2867
- tar_name = f"{date_name}_{settings['experiment']}.tar"
2864
+ #if not settings['file_metadata'] is None:
2865
+ # tar_name = f"{date_name}_{settings['experiment']}_{settings['file_metadata']}.tar"
2866
+ #else:
2867
+ tar_name = f"{date_name}_{settings['experiment']}.tar"
2868
2868
  tar_name = os.path.join(dst, tar_name)
2869
2869
  if os.path.exists(tar_name):
2870
2870
  number = random.randint(1, 100)
@@ -652,43 +652,6 @@ def img_list_to_grid(grid, titles=None):
652
652
  plt.tight_layout(pad=0.1)
653
653
  return fig
654
654
 
655
- def filepaths_to_database(img_paths, settings, source_folder, crop_mode):
656
- from. utils import _map_wells_png
657
- png_df = pd.DataFrame(img_paths, columns=['png_path'])
658
-
659
- png_df['file_name'] = png_df['png_path'].apply(lambda x: os.path.basename(x))
660
-
661
- parts = png_df['file_name'].apply(lambda x: pd.Series(_map_wells_png(x, timelapse=settings['timelapse'])))
662
-
663
- columns = ['plate', 'row', 'col', 'field']
664
-
665
- if settings['timelapse']:
666
- columns = columns + ['time_id']
667
-
668
- columns = columns + ['prcfo']
669
-
670
- if crop_mode == 'cell':
671
- columns = columns + ['cell_id']
672
-
673
- if crop_mode == 'nucleus':
674
- columns = columns + ['nucleus_id']
675
-
676
- if crop_mode == 'pathogen':
677
- columns = columns + ['pathogen_id']
678
-
679
- if crop_mode == 'cytoplasm':
680
- columns = columns + ['cytoplasm_id']
681
-
682
- png_df[columns] = parts
683
-
684
- try:
685
- conn = sqlite3.connect(f'{source_folder}/measurements/measurements.db', timeout=5)
686
- png_df.to_sql('png_list', conn, if_exists='append', index=False)
687
- conn.commit()
688
- except sqlite3.OperationalError as e:
689
- print(f"SQLite error: {e}", flush=True)
690
- traceback.print_exc()
691
-
692
655
  #@log_function_call
693
656
  def _measure_crop_core(index, time_ls, file, settings):
694
657
 
@@ -711,7 +674,7 @@ def _measure_crop_core(index, time_ls, file, settings):
711
674
  """
712
675
 
713
676
  from .plot import _plot_cropped_arrays
714
- from .utils import _merge_overlapping_objects, _filter_object, _relabel_parent_with_child_labels, _exclude_objects, normalize_to_dtype
677
+ from .utils import _merge_overlapping_objects, _filter_object, _relabel_parent_with_child_labels, _exclude_objects, normalize_to_dtype, filepaths_to_database
715
678
  from .utils import _merge_and_save_to_database, _crop_center, _find_bounding_box, _generate_names, _get_percentiles
716
679
 
717
680
  figs = {}