spacr 0.3.22__tar.gz → 0.3.31__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. {spacr-0.3.22/spacr.egg-info → spacr-0.3.31}/PKG-INFO +1 -1
  2. {spacr-0.3.22 → spacr-0.3.31}/setup.py +1 -1
  3. {spacr-0.3.22 → spacr-0.3.31}/spacr/app_annotate.py +1 -2
  4. {spacr-0.3.22 → spacr-0.3.31}/spacr/deep_spacr.py +131 -227
  5. {spacr-0.3.22 → spacr-0.3.31}/spacr/gui.py +1 -0
  6. {spacr-0.3.22 → spacr-0.3.31}/spacr/gui_core.py +13 -4
  7. {spacr-0.3.22 → spacr-0.3.31}/spacr/gui_elements.py +72 -49
  8. {spacr-0.3.22 → spacr-0.3.31}/spacr/gui_utils.py +33 -44
  9. {spacr-0.3.22 → spacr-0.3.31}/spacr/io.py +4 -4
  10. {spacr-0.3.22 → spacr-0.3.31}/spacr/measure.py +1 -38
  11. {spacr-0.3.22 → spacr-0.3.31}/spacr/plot.py +0 -2
  12. {spacr-0.3.22 → spacr-0.3.31}/spacr/settings.py +50 -5
  13. {spacr-0.3.22 → spacr-0.3.31}/spacr/utils.py +383 -28
  14. {spacr-0.3.22 → spacr-0.3.31/spacr.egg-info}/PKG-INFO +1 -1
  15. {spacr-0.3.22 → spacr-0.3.31}/.readthedocs.yaml +0 -0
  16. {spacr-0.3.22 → spacr-0.3.31}/LICENSE +0 -0
  17. {spacr-0.3.22 → spacr-0.3.31}/MANIFEST.in +0 -0
  18. {spacr-0.3.22 → spacr-0.3.31}/README.rst +0 -0
  19. {spacr-0.3.22 → spacr-0.3.31}/deploy_docs.sh +0 -0
  20. {spacr-0.3.22 → spacr-0.3.31}/docs/requirements.txt +0 -0
  21. {spacr-0.3.22 → spacr-0.3.31}/docs/source/Makefile +0 -0
  22. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/doctrees/environment.pickle +0 -0
  23. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/index.html +0 -0
  24. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/app_annotate.html +0 -0
  25. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/app_classify.html +0 -0
  26. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/app_make_masks.html +0 -0
  27. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/app_mask.html +0 -0
  28. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/app_measure.html +0 -0
  29. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/app_sequencing.html +0 -0
  30. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/app_umap.html +0 -0
  31. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/core.html +0 -0
  32. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/deep_spacr.html +0 -0
  33. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/graph_learning.html +0 -0
  34. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/gui.html +0 -0
  35. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/gui_core.html +0 -0
  36. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/gui_elements.html +0 -0
  37. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/gui_utils.html +0 -0
  38. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/io.html +0 -0
  39. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/logger.html +0 -0
  40. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/measure.html +0 -0
  41. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/plot.html +0 -0
  42. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/sequencing.html +0 -0
  43. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/settings.html +0 -0
  44. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/sim.html +0 -0
  45. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/timelapse.html +0 -0
  46. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_modules/spacr/utils.html +0 -0
  47. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_sources/index.rst.txt +0 -0
  48. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_sources/modules.rst.txt +0 -0
  49. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_sources/spacr.rst.txt +0 -0
  50. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/_sphinx_javascript_frameworks_compat.js +0 -0
  51. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/basic.css +0 -0
  52. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/badge_only.css +0 -0
  53. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff +0 -0
  54. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 +0 -0
  55. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff +0 -0
  56. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 +0 -0
  57. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.eot +0 -0
  58. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.svg +0 -0
  59. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.ttf +0 -0
  60. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.woff +0 -0
  61. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/fontawesome-webfont.woff2 +0 -0
  62. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/lato-bold-italic.woff +0 -0
  63. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/lato-bold-italic.woff2 +0 -0
  64. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/lato-bold.woff +0 -0
  65. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/lato-bold.woff2 +0 -0
  66. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/lato-normal-italic.woff +0 -0
  67. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/lato-normal-italic.woff2 +0 -0
  68. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/lato-normal.woff +0 -0
  69. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/fonts/lato-normal.woff2 +0 -0
  70. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/css/theme.css +0 -0
  71. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/doctools.js +0 -0
  72. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/documentation_options.js +0 -0
  73. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/file.png +0 -0
  74. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/jquery.js +0 -0
  75. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/js/badge_only.js +0 -0
  76. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/js/html5shiv-printshiv.min.js +0 -0
  77. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/js/html5shiv.min.js +0 -0
  78. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/js/theme.js +0 -0
  79. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/language_data.js +0 -0
  80. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/minus.png +0 -0
  81. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/plus.png +0 -0
  82. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/pygments.css +0 -0
  83. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/searchtools.js +0 -0
  84. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/_static/sphinx_highlight.js +0 -0
  85. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/genindex.html +0 -0
  86. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/index.html +0 -0
  87. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/modules.html +0 -0
  88. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/objects.inv +0 -0
  89. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/py-modindex.html +0 -0
  90. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/search.html +0 -0
  91. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/searchindex.js +0 -0
  92. {spacr-0.3.22 → spacr-0.3.31}/docs/source/_build/html/spacr.html +0 -0
  93. {spacr-0.3.22 → spacr-0.3.31}/docs/source/conf.py +0 -0
  94. {spacr-0.3.22 → spacr-0.3.31}/docs/source/index.rst +0 -0
  95. {spacr-0.3.22 → spacr-0.3.31}/docs/source/make.bat +0 -0
  96. {spacr-0.3.22 → spacr-0.3.31}/docs/source/modules.rst +0 -0
  97. {spacr-0.3.22 → spacr-0.3.31}/docs/source/spacr.rst +0 -0
  98. {spacr-0.3.22 → spacr-0.3.31}/environment.yaml +0 -0
  99. {spacr-0.3.22 → spacr-0.3.31}/fonts/OpenSans-Regular.ttf +0 -0
  100. {spacr-0.3.22 → spacr-0.3.31}/notebooks/cv_scoring_nb.ipynb +0 -0
  101. {spacr-0.3.22 → spacr-0.3.31}/notebooks/deep_learning_spacr.ipynb +0 -0
  102. {spacr-0.3.22 → spacr-0.3.31}/notebooks/machine_learning_spacr_nb.ipynb +0 -0
  103. {spacr-0.3.22 → spacr-0.3.31}/notebooks/spacr_0.1_all_settings_git.ipynb +0 -0
  104. {spacr-0.3.22 → spacr-0.3.31}/notebooks/spacr_0.1_minimal.ipynb +0 -0
  105. {spacr-0.3.22 → spacr-0.3.31}/path/home/carruthers/datasets/plate1/measurements/measurements.db +0 -0
  106. {spacr-0.3.22 → spacr-0.3.31}/path/home/carruthers/datasets/plate1/settings/measure_crop_settings.csv +0 -0
  107. {spacr-0.3.22 → spacr-0.3.31}/path/settings/preprocess_generate_masks_settings.csv +0 -0
  108. {spacr-0.3.22 → spacr-0.3.31}/requirements.txt +0 -0
  109. {spacr-0.3.22 → spacr-0.3.31}/settings/measure_crop_settings.csv +0 -0
  110. {spacr-0.3.22 → spacr-0.3.31}/setup.cfg +0 -0
  111. {spacr-0.3.22 → spacr-0.3.31}/setup_docs.sh +0 -0
  112. {spacr-0.3.22 → spacr-0.3.31}/source/conf.py +0 -0
  113. {spacr-0.3.22 → spacr-0.3.31}/source/index.rst +0 -0
  114. {spacr-0.3.22 → spacr-0.3.31}/source/modules.rst +0 -0
  115. {spacr-0.3.22 → spacr-0.3.31}/source/setup.rst +0 -0
  116. {spacr-0.3.22 → spacr-0.3.31}/source/spacr.rst +0 -0
  117. {spacr-0.3.22 → spacr-0.3.31}/spacr/__init__.py +0 -0
  118. {spacr-0.3.22 → spacr-0.3.31}/spacr/__main__.py +0 -0
  119. {spacr-0.3.22 → spacr-0.3.31}/spacr/app_classify.py +0 -0
  120. {spacr-0.3.22 → spacr-0.3.31}/spacr/app_make_masks.py +0 -0
  121. {spacr-0.3.22 → spacr-0.3.31}/spacr/app_mask.py +0 -0
  122. {spacr-0.3.22 → spacr-0.3.31}/spacr/app_measure.py +0 -0
  123. {spacr-0.3.22 → spacr-0.3.31}/spacr/app_sequencing.py +0 -0
  124. {spacr-0.3.22 → spacr-0.3.31}/spacr/app_umap.py +0 -0
  125. {spacr-0.3.22 → spacr-0.3.31}/spacr/cellpose.py +0 -0
  126. {spacr-0.3.22 → spacr-0.3.31}/spacr/core.py +0 -0
  127. {spacr-0.3.22 → spacr-0.3.31}/spacr/logger.py +0 -0
  128. {spacr-0.3.22 → spacr-0.3.31}/spacr/mediar.py +0 -0
  129. {spacr-0.3.22 → spacr-0.3.31}/spacr/ml.py +0 -0
  130. {spacr-0.3.22 → spacr-0.3.31}/spacr/openai.py +0 -0
  131. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/.gitignore +0 -0
  132. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/LICENSE +0 -0
  133. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/README.md +0 -0
  134. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/SetupDict.py +0 -0
  135. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/config/baseline.json +0 -0
  136. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/config/mediar_example.json +0 -0
  137. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/config/pred/pred_mediar.json +0 -0
  138. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/config/step1_pretraining/phase1.json +0 -0
  139. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/config/step1_pretraining/phase2.json +0 -0
  140. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/config/step2_finetuning/finetuning1.json +0 -0
  141. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/config/step2_finetuning/finetuning2.json +0 -0
  142. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/config/step3_prediction/base_prediction.json +0 -0
  143. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/config/step3_prediction/ensemble_tta.json +0 -0
  144. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/BasePredictor.py +0 -0
  145. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/BaseTrainer.py +0 -0
  146. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/Baseline/Predictor.py +0 -0
  147. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/Baseline/Trainer.py +0 -0
  148. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/Baseline/__init__.py +0 -0
  149. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/Baseline/utils.py +0 -0
  150. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/MEDIAR/EnsemblePredictor.py +0 -0
  151. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/MEDIAR/Predictor.py +0 -0
  152. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/MEDIAR/Trainer.py +0 -0
  153. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/MEDIAR/__init__.py +0 -0
  154. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/MEDIAR/utils.py +0 -0
  155. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/__init__.py +0 -0
  156. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/core/utils.py +0 -0
  157. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/evaluate.py +0 -0
  158. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/generate_mapping.py +0 -0
  159. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/image/examples/img1.tiff +0 -0
  160. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/image/examples/img2.tif +0 -0
  161. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/image/failure_cases.png +0 -0
  162. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/image/mediar_framework.png +0 -0
  163. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/image/mediar_model.PNG +0 -0
  164. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/image/mediar_results.png +0 -0
  165. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/main.py +0 -0
  166. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/predict.py +0 -0
  167. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/requirements.txt +0 -0
  168. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/__init__.py +0 -0
  169. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/data_utils/__init__.py +0 -0
  170. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/data_utils/custom/CellAware.py +0 -0
  171. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/data_utils/custom/LoadImage.py +0 -0
  172. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/data_utils/custom/NormalizeImage.py +0 -0
  173. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/data_utils/custom/__init__.py +0 -0
  174. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/data_utils/custom/modalities.pkl +0 -0
  175. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/data_utils/datasetter.py +0 -0
  176. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/data_utils/transforms.py +0 -0
  177. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/data_utils/utils.py +0 -0
  178. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/measures.py +0 -0
  179. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/models/MEDIARFormer.py +0 -0
  180. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/models/__init__.py +0 -0
  181. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/MEDIAR/train_tools/utils.py +0 -0
  182. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/data/lopit.csv +0 -0
  183. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/data/toxoplasma_metadata.csv +0 -0
  184. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/OFL.txt +0 -0
  185. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/OpenSans-Italic-VariableFont_wdth,wght.ttf +0 -0
  186. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/OpenSans-VariableFont_wdth,wght.ttf +0 -0
  187. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/README.txt +0 -0
  188. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-Bold.ttf +0 -0
  189. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-BoldItalic.ttf +0 -0
  190. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-ExtraBold.ttf +0 -0
  191. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-ExtraBoldItalic.ttf +0 -0
  192. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-Italic.ttf +0 -0
  193. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-Light.ttf +0 -0
  194. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-LightItalic.ttf +0 -0
  195. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-Medium.ttf +0 -0
  196. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-MediumItalic.ttf +0 -0
  197. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-Regular.ttf +0 -0
  198. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-SemiBold.ttf +0 -0
  199. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans-SemiBoldItalic.ttf +0 -0
  200. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Bold.ttf +0 -0
  201. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-BoldItalic.ttf +0 -0
  202. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-ExtraBold.ttf +0 -0
  203. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-ExtraBoldItalic.ttf +0 -0
  204. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Italic.ttf +0 -0
  205. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Light.ttf +0 -0
  206. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-LightItalic.ttf +0 -0
  207. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Medium.ttf +0 -0
  208. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-MediumItalic.ttf +0 -0
  209. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-Regular.ttf +0 -0
  210. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-SemiBold.ttf +0 -0
  211. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_Condensed-SemiBoldItalic.ttf +0 -0
  212. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Bold.ttf +0 -0
  213. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-BoldItalic.ttf +0 -0
  214. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-ExtraBold.ttf +0 -0
  215. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-ExtraBoldItalic.ttf +0 -0
  216. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Italic.ttf +0 -0
  217. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Light.ttf +0 -0
  218. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-LightItalic.ttf +0 -0
  219. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Medium.ttf +0 -0
  220. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-MediumItalic.ttf +0 -0
  221. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-Regular.ttf +0 -0
  222. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-SemiBold.ttf +0 -0
  223. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/font/open_sans/static/OpenSans_SemiCondensed-SemiBoldItalic.ttf +0 -0
  224. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/abort.png +0 -0
  225. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/annotate.png +0 -0
  226. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/cellpose_all.png +0 -0
  227. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/cellpose_masks.png +0 -0
  228. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/classify.png +0 -0
  229. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/convert.png +0 -0
  230. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/default.png +0 -0
  231. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/dna_matrix.mp4 +0 -0
  232. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/download.png +0 -0
  233. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/logo.pdf +0 -0
  234. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/logo_spacr.png +0 -0
  235. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/logo_spacr_1.png +0 -0
  236. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/make_masks.png +0 -0
  237. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/map_barcodes.png +0 -0
  238. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/mask.png +0 -0
  239. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/measure.png +0 -0
  240. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/ml_analyze.png +0 -0
  241. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/plaque.png +0 -0
  242. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/recruitment.png +0 -0
  243. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/regression.png +0 -0
  244. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/run.png +0 -0
  245. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/sequencing.png +0 -0
  246. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/settings.png +0 -0
  247. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/train_cellpose.png +0 -0
  248. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/icons/umap.png +0 -0
  249. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/images/plate1_E01_T0001F001L01A01Z01C02.tif +0 -0
  250. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/images/plate1_E01_T0001F001L01A02Z01C01.tif +0 -0
  251. {spacr-0.3.22 → spacr-0.3.31}/spacr/resources/images/plate1_E01_T0001F001L01A03Z01C03.tif +0 -0
  252. {spacr-0.3.22 → spacr-0.3.31}/spacr/sequencing.py +0 -0
  253. {spacr-0.3.22 → spacr-0.3.31}/spacr/sim.py +0 -0
  254. {spacr-0.3.22 → spacr-0.3.31}/spacr/submodules.py +0 -0
  255. {spacr-0.3.22 → spacr-0.3.31}/spacr/timelapse.py +0 -0
  256. {spacr-0.3.22 → spacr-0.3.31}/spacr/toxo.py +0 -0
  257. {spacr-0.3.22 → spacr-0.3.31}/spacr/version.py +0 -0
  258. {spacr-0.3.22 → spacr-0.3.31}/spacr.egg-info/SOURCES.txt +0 -0
  259. {spacr-0.3.22 → spacr-0.3.31}/spacr.egg-info/dependency_links.txt +0 -0
  260. {spacr-0.3.22 → spacr-0.3.31}/spacr.egg-info/entry_points.txt +0 -0
  261. {spacr-0.3.22 → spacr-0.3.31}/spacr.egg-info/requires.txt +0 -0
  262. {spacr-0.3.22 → spacr-0.3.31}/spacr.egg-info/top_level.txt +0 -0
  263. {spacr-0.3.22 → spacr-0.3.31}/tests/test_annotate_app.py +0 -0
  264. {spacr-0.3.22 → spacr-0.3.31}/tests/test_core.py +0 -0
  265. {spacr-0.3.22 → spacr-0.3.31}/tests/test_gui_classify_app.py +0 -0
  266. {spacr-0.3.22 → spacr-0.3.31}/tests/test_gui_mask_app.py +0 -0
  267. {spacr-0.3.22 → spacr-0.3.31}/tests/test_gui_measure_app.py +0 -0
  268. {spacr-0.3.22 → spacr-0.3.31}/tests/test_gui_sim_app.py +0 -0
  269. {spacr-0.3.22 → spacr-0.3.31}/tests/test_gui_utils.py +0 -0
  270. {spacr-0.3.22 → spacr-0.3.31}/tests/test_io.py +0 -0
  271. {spacr-0.3.22 → spacr-0.3.31}/tests/test_mask_app.py +0 -0
  272. {spacr-0.3.22 → spacr-0.3.31}/tests/test_measure.py +0 -0
  273. {spacr-0.3.22 → spacr-0.3.31}/tests/test_plot.py +0 -0
  274. {spacr-0.3.22 → spacr-0.3.31}/tests/test_sim.py +0 -0
  275. {spacr-0.3.22 → spacr-0.3.31}/tests/test_timelapse.py +0 -0
  276. {spacr-0.3.22 → spacr-0.3.31}/tests/test_train.py +0 -0
  277. {spacr-0.3.22 → spacr-0.3.31}/tests/test_umap.py +0 -0
  278. {spacr-0.3.22 → spacr-0.3.31}/tests/test_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: spacr
3
- Version: 0.3.22
3
+ Version: 0.3.31
4
4
  Summary: Spatial phenotype analysis of crisp screens (SpaCr)
5
5
  Home-page: https://github.com/EinarOlafsson/spacr
6
6
  Author: Einar Birnir Olafsson
@@ -67,7 +67,7 @@ dependencies = [
67
67
 
68
68
  setup(
69
69
  name="spacr",
70
- version="0.3.22",
70
+ version="0.3.31",
71
71
  author="Einar Birnir Olafsson",
72
72
  author_email="olafsson@med.umich.com",
73
73
  description="Spatial phenotype analysis of crisp screens (SpaCr)",
@@ -20,7 +20,7 @@ def initiate_annotation_app(parent_frame):
20
20
  settings['img_size'] = list(map(int, settings['img_size'].split(','))) # Convert string to list of integers
21
21
  settings['percentiles'] = list(map(int, settings['percentiles'].split(','))) # Convert string to list of integers
22
22
  settings['normalize'] = settings['normalize'].lower() == 'true'
23
-
23
+ settings['normalize_channels'] = settings['normalize_channels'].split(',')
24
24
  try:
25
25
  settings['measurement'] = settings['measurement'].split(',') if settings['measurement'] else None
26
26
  settings['threshold'] = None if settings['threshold'].lower() == 'none' else int(settings['threshold'])
@@ -38,7 +38,6 @@ def initiate_annotation_app(parent_frame):
38
38
  settings[key] = None
39
39
 
40
40
  settings_window.destroy()
41
-
42
41
  annotate_app(parent_frame, settings)
43
42
 
44
43
  start_button = spacrButton(settings_window, text="annotate", command=start_annotation_app, show_text=False)
@@ -610,264 +610,168 @@ def train_model(dst, model_type, train_loaders, epochs=100, learning_rate=0.0001
610
610
 
611
611
  return model, model_path
612
612
 
613
- def visualize_saliency_map(settings):
614
- from spacr.utils import SaliencyMapGenerator, print_progress
615
- from spacr.io import TarImageDataset # Assuming you have a dataset class
616
- from torchvision.utils import make_grid
617
-
613
+ def generate_activation_map(settings):
614
+
615
+ from .utils import SaliencyMapGenerator, GradCAMGenerator, SelectChannels, activation_maps_to_database, activation_correlations_to_database
616
+ from .utils import print_progress, save_settings, calculate_activation_correlations
617
+ from .io import TarImageDataset
618
+ from .settings import get_default_generate_activation_map_settings
619
+
620
+ torch.cuda.empty_cache()
621
+ gc.collect()
622
+
623
+ plt.clf()
618
624
  use_cuda = torch.cuda.is_available()
619
625
  device = torch.device("cuda" if use_cuda else "cpu")
620
-
626
+
627
+ source_folder = os.path.dirname(os.path.dirname(settings['dataset']))
628
+ settings['src'] = source_folder
629
+ settings = get_default_generate_activation_map_settings(settings)
630
+ save_settings(settings, name=f"{settings['cam_type']}_settings", show=False)
631
+
632
+ if settings['model_type'] == 'maxvit' and settings['target_layer'] == None:
633
+ settings['target_layer'] = 'base_model.blocks.3.layers.1.layers.MBconv.layers.conv_b'
634
+ if settings['cam_type'] in ['saliency_image', 'saliency_channel']:
635
+ settings['target_layer'] = None
636
+
621
637
  # Set number of jobs for loading
622
- if settings['n_jobs'] is None:
638
+ n_jobs = settings['n_jobs']
639
+ if n_jobs is None:
623
640
  n_jobs = max(1, cpu_count() - 4)
624
- else:
625
- n_jobs = settings['n_jobs']
626
641
 
627
642
  # Set transforms for images
628
- if settings['normalize']:
629
- transform = transforms.Compose([
630
- transforms.ToTensor(),
631
- transforms.CenterCrop(size=(settings['image_size'], settings['image_size'])),
632
- transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
633
- else:
634
- transform = transforms.Compose([
635
- transforms.ToTensor(),
636
- transforms.CenterCrop(size=(settings['image_size'], settings['image_size']))])
643
+ transform = transforms.Compose([
644
+ transforms.ToTensor(),
645
+ transforms.CenterCrop(size=(settings['image_size'], settings['image_size'])),
646
+ transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) if settings['normalize_input'] else None,
647
+ SelectChannels(settings['channels'])
648
+ ])
637
649
 
638
650
  # Handle dataset path
639
- if os.path.exists(settings['dataset']):
640
- tar_path = settings['dataset']
641
- else:
651
+ if not os.path.exists(settings['dataset']):
642
652
  print(f"Dataset not found at {settings['dataset']}")
643
653
  return
644
-
645
- if settings.get('save', False):
646
- if settings['dtype'] not in ['uint8', 'uint16']:
647
- print("Invalid dtype in settings. Please use 'uint8' or 'uint16'.")
648
- return
649
654
 
650
655
  # Load the model
651
656
  model = torch.load(settings['model_path'])
652
657
  model.to(device)
653
- model.eval() # Ensure the model is in evaluation mode
658
+ model.eval()
654
659
 
655
- # Create directory for saving saliency maps if it does not exist
656
- if settings.get('save', False):
657
- dataset_dir = os.path.dirname(tar_path)
658
- dataset_name = os.path.splitext(os.path.basename(tar_path))[0]
659
- save_dir = os.path.join(dataset_dir, dataset_name, 'saliency_maps')
660
+ # Create directory for saving activation maps if it does not exist
661
+ dataset_dir = os.path.dirname(settings['dataset'])
662
+ dataset_name = os.path.splitext(os.path.basename(settings['dataset']))[0]
663
+ save_dir = os.path.join(dataset_dir, dataset_name, settings['cam_type'])
664
+ batch_grid_fldr = os.path.join(save_dir, 'batch_grids')
665
+
666
+ if settings['save']:
660
667
  os.makedirs(save_dir, exist_ok=True)
661
- print(f"Saliency maps will be saved in: {save_dir}")
662
-
668
+ print(f"Activation maps will be saved in: {save_dir}")
669
+
670
+ if settings['plot']:
671
+ os.makedirs(batch_grid_fldr, exist_ok=True)
672
+ print(f"Batch grid maps will be saved in: {batch_grid_fldr}")
673
+
663
674
  # Load dataset
664
- dataset = TarImageDataset(tar_path, transform=transform)
665
- data_loader = DataLoader(dataset, batch_size=settings['batch_size'], shuffle=True, num_workers=n_jobs, pin_memory=True)
666
-
667
- # Initialize SaliencyMapGenerator
668
- cam_generator = SaliencyMapGenerator(model)
675
+ dataset = TarImageDataset(settings['dataset'], transform=transform)
676
+ data_loader = DataLoader(dataset, batch_size=settings['batch_size'], shuffle=settings['shuffle'], num_workers=n_jobs, pin_memory=True)
677
+
678
+ # Initialize generator based on cam_type
679
+ if settings['cam_type'] in ['gradcam', 'gradcam_pp']:
680
+ cam_generator = GradCAMGenerator(model, target_layer=settings['target_layer'], cam_type=settings['cam_type'])
681
+ elif settings['cam_type'] in ['saliency_image', 'saliency_channel']:
682
+ cam_generator = SaliencyMapGenerator(model)
683
+
669
684
  time_ls = []
670
-
671
685
  for batch_idx, (inputs, filenames) in enumerate(data_loader):
672
686
  start = time.time()
687
+ img_paths = []
673
688
  inputs = inputs.to(device)
674
-
675
- saliency_maps, predicted_classes = cam_generator.compute_saliency_and_predictions(inputs)
676
-
677
- if settings['saliency_mode'] not in ['mean', 'sum']:
678
- print("To generate channel average or sum saliency maps set saliency_mode to 'mean' or 'sum', respectively.")
679
-
680
- if settings['saliency_mode'] == 'mean':
681
- saliency_maps = saliency_maps.mean(dim=1, keepdim=True)
682
-
683
- elif settings['saliency_mode'] == 'sum':
684
- saliency_maps = saliency_maps.sum(dim=1, keepdim=True)
685
-
686
- # Example usage with the class
687
- if settings.get('plot', False):
688
- if settings['plot_mode'] not in ['mean', 'channel', '3-channel']:
689
- print("Invalid plot_mode in settings. Please use 'mean', 'channel', or '3-channel'.")
690
- return
691
- else:
692
- cam_generator.plot_saliency_grid(inputs, saliency_maps, predicted_classes, mode=settings['plot_mode'])
693
-
694
- if settings.get('save', False):
695
- for i in range(inputs.size(0)):
696
- saliency_map = saliency_maps[i].detach().cpu().numpy()
697
-
698
- # Check dtype in settings and normalize accordingly
699
- if settings['dtype'] == 'uint16':
700
- saliency_map = np.clip(saliency_map, 0, 1) * 65535
701
- saliency_map = saliency_map.astype(np.uint16)
702
- mode = 'I;16'
703
- elif settings['dtype'] == 'uint8':
704
- saliency_map = np.clip(saliency_map, 0, 1) * 255
705
- saliency_map = saliency_map.astype(np.uint8)
706
- mode = 'L' # Grayscale mode for uint8
707
-
708
- # Get the class prediction (0 or 1)
709
- class_pred = predicted_classes[i].item()
710
-
711
- save_class_dir = os.path.join(save_dir, f'class_{class_pred}')
712
- os.makedirs(save_class_dir, exist_ok=True)
713
- save_path = os.path.join(save_class_dir, filenames[i])
714
-
715
- # Handle different cases based on saliency_map dimensions
716
- if saliency_map.ndim == 3: # Multi-channel case (C, H, W)
717
- if saliency_map.shape[0] == 3: # RGB-like saliency map
718
- saliency_image = Image.fromarray(np.moveaxis(saliency_map, 0, -1), mode="RGB") # Convert (C, H, W) to (H, W, C)
719
- elif saliency_map.shape[0] == 1: # Single-channel case (1, H, W)
720
- saliency_map = np.squeeze(saliency_map) # Remove the extra channel dimension
721
- saliency_image = Image.fromarray(saliency_map, mode=mode) # Use grayscale mode for single-channel
722
- else:
723
- raise ValueError(f"Unexpected number of channels: {saliency_map.shape[0]}")
724
-
725
- elif saliency_map.ndim == 2: # Single-channel case (H, W)
726
- saliency_image = Image.fromarray(saliency_map, mode=mode) # Keep single channel (H, W)
727
-
728
- else:
729
- raise ValueError(f"Unexpected number of dimensions: {saliency_map.ndim}")
730
-
731
- # Save the image
732
- saliency_image.save(save_path)
733
689
 
690
+ # Compute activation maps and predictions
691
+ if settings['cam_type'] in ['gradcam', 'gradcam_pp']:
692
+ activation_maps, predicted_classes = cam_generator.compute_gradcam_and_predictions(inputs)
693
+ elif settings['cam_type'] in ['saliency_image', 'saliency_channel']:
694
+ activation_maps, predicted_classes = cam_generator.compute_saliency_and_predictions(inputs)
695
+
696
+ # Move activation maps to CPU
697
+ activation_maps = activation_maps.cpu()
698
+
699
+ # Sum saliency maps for 'saliency_image' type
700
+ if settings['cam_type'] == 'saliency_image':
701
+ summed_activation_maps = []
702
+ for i in range(activation_maps.size(0)):
703
+ activation_map = activation_maps[i]
704
+ #print(f"1: {activation_map.shape}")
705
+ activation_map_sum = activation_map.sum(dim=0, keepdim=False)
706
+ #print(f"2: {activation_map.shape}")
707
+ activation_map_sum = np.squeeze(activation_map_sum, axis=0)
708
+ #print(f"3: {activation_map_sum.shape}")
709
+ summed_activation_maps.append(activation_map_sum)
710
+ activation_maps = torch.stack(summed_activation_maps)
711
+
712
+ # For plotting
713
+ if settings['plot']:
714
+ fig = cam_generator.plot_activation_grid(inputs, activation_maps, predicted_classes, overlay=settings['overlay'], normalize=settings['normalize'])
715
+ pdf_save_path = os.path.join(batch_grid_fldr,f"batch_{batch_idx}_grid.pdf")
716
+ fig.savefig(pdf_save_path, format='pdf')
717
+ print(f"Saved batch grid to {pdf_save_path}")
718
+ #plt.show()
719
+ display(fig)
720
+
721
+ for i in range(inputs.size(0)):
722
+ activation_map = activation_maps[i].detach().numpy()
723
+
724
+ if settings['cam_type'] in ['saliency_image', 'gradcam', 'gradcam_pp']:
725
+ #activation_map = activation_map.sum(axis=0)
726
+ activation_map = (activation_map - activation_map.min()) / (activation_map.max() - activation_map.min())
727
+ activation_map = (activation_map * 255).astype(np.uint8)
728
+ activation_image = Image.fromarray(activation_map, mode='L')
729
+
730
+ elif settings['cam_type'] == 'saliency_channel':
731
+ # Handle each channel separately and save as RGB
732
+ rgb_activation_map = np.zeros((activation_map.shape[1], activation_map.shape[2], 3), dtype=np.uint8)
733
+ for c in range(min(activation_map.shape[0], 3)): # Limit to 3 channels for RGB
734
+ channel_map = activation_map[c]
735
+ channel_map = (channel_map - channel_map.min()) / (channel_map.max() - channel_map.min())
736
+ rgb_activation_map[:, :, c] = (channel_map * 255).astype(np.uint8)
737
+ activation_image = Image.fromarray(rgb_activation_map, mode='RGB')
738
+
739
+ # Save activation maps
740
+ class_pred = predicted_classes[i].item()
741
+ parts = filenames[i].split('_')
742
+ plate = parts[0]
743
+ well = parts[1]
744
+ save_class_dir = os.path.join(save_dir, f'class_{class_pred}', str(plate), str(well))
745
+ os.makedirs(save_class_dir, exist_ok=True)
746
+ save_path = os.path.join(save_class_dir, f'{filenames[i]}')
747
+ if settings['save']:
748
+ activation_image.save(save_path)
749
+ img_paths.append(save_path)
750
+
751
+ if settings['save']:
752
+ activation_maps_to_database(img_paths, source_folder, settings)
753
+
754
+ if settings['correlation']:
755
+ df = calculate_activation_correlations(inputs, activation_maps, filenames, manders_thresholds=settings['manders_thresholds'])
756
+ if settings['plot']:
757
+ display(df)
758
+ if settings['save']:
759
+ activation_correlations_to_database(df, img_paths, source_folder, settings)
734
760
 
735
761
  stop = time.time()
736
762
  duration = stop - start
737
763
  time_ls.append(duration)
738
764
  files_processed = batch_idx * settings['batch_size']
739
- files_to_process = len(data_loader)
740
- print_progress(files_processed, files_to_process, n_jobs=n_jobs, time_ls=time_ls, batch_size=settings['batch_size'], operation_type="Generating Saliency Maps")
741
-
742
- print("Saliency map generation complete.")
743
-
744
- def visualize_saliency_map_v1(src, model_type='maxvit', model_path='', image_size=224, channels=[1,2,3], normalize=True, class_names=None, save_saliency=False, save_dir='saliency_maps'):
765
+ files_to_process = len(data_loader) * settings['batch_size']
766
+ print_progress(files_processed, files_to_process, n_jobs=n_jobs, time_ls=time_ls, batch_size=settings['batch_size'], operation_type="Generating Activation Maps")
745
767
 
746
- from spacr.utils import SaliencyMapGenerator, preprocess_image
747
-
748
- use_cuda = torch.cuda.is_available()
749
- device = torch.device("cuda" if use_cuda else "cpu")
750
-
751
- # Load the entire model object
752
- model = torch.load(model_path)
753
- model.to(device)
754
-
755
- # Create directory for saving saliency maps if it does not exist
756
- if save_saliency and not os.path.exists(save_dir):
757
- os.makedirs(save_dir)
758
-
759
- # Collect all images and their tensors
760
- images = []
761
- input_tensors = []
762
- filenames = []
763
- for file in os.listdir(src):
764
- if not file.endswith('.png'):
765
- continue
766
- image_path = os.path.join(src, file)
767
- image, input_tensor = preprocess_image(image_path, normalize=normalize, image_size=image_size, channels=channels)
768
- images.append(image)
769
- input_tensors.append(input_tensor)
770
- filenames.append(file)
771
-
772
- input_tensors = torch.cat(input_tensors).to(device)
773
- class_labels = torch.zeros(input_tensors.size(0), dtype=torch.long).to(device) # Replace with actual class labels if available
774
-
775
- # Generate saliency maps
776
- cam_generator = SaliencyMapGenerator(model)
777
- saliency_maps = cam_generator.compute_saliency_maps(input_tensors, class_labels)
778
-
779
- # Convert saliency maps to numpy arrays
780
- saliency_maps = saliency_maps.cpu().numpy()
781
-
782
- N = len(images)
783
-
784
- dst = os.path.join(src, 'saliency_maps')
785
-
786
- for i in range(N):
787
- fig, axes = plt.subplots(1, 3, figsize=(20, 5))
788
-
789
- # Original image
790
- axes[0].imshow(images[i])
791
- axes[0].axis('off')
792
- if class_names:
793
- axes[0].set_title(f"Class: {class_names[class_labels[i].item()]}")
794
-
795
- # Saliency Map
796
- axes[1].imshow(saliency_maps[i, 0], cmap='hot')
797
- axes[1].axis('off')
798
- axes[1].set_title("Saliency Map")
799
-
800
- # Overlay
801
- overlay = np.array(images[i])
802
- overlay = overlay / overlay.max()
803
- saliency_map_rgb = np.stack([saliency_maps[i, 0]] * 3, axis=-1) # Convert saliency map to RGB
804
- overlay = (overlay * 0.5 + saliency_map_rgb * 0.5).clip(0, 1)
805
- axes[2].imshow(overlay)
806
- axes[2].axis('off')
807
- axes[2].set_title("Overlay")
808
-
809
- plt.tight_layout()
810
- plt.show()
811
-
812
- # Save the saliency map if required
813
- if save_saliency:
814
- os.makedirs(dst, exist_ok=True)
815
- saliency_image = Image.fromarray((saliency_maps[i, 0] * 255).astype(np.uint8))
816
- saliency_image.save(os.path.join(dst, f'saliency_{filenames[i]}'))
817
-
818
- def visualize_grad_cam(src, model_path, target_layers=None, image_size=224, channels=[1, 2, 3], normalize=True, class_names=None, save_cam=False, save_dir='grad_cam'):
819
-
820
- from spacr.utils import GradCAM, preprocess_image, show_cam_on_image, recommend_target_layers
821
-
822
- use_cuda = torch.cuda.is_available()
823
- device = torch.device("cuda" if use_cuda else "cpu")
824
-
825
- model = torch.load(model_path)
826
- model.to(device)
827
-
828
- # If no target layers provided, recommend a target layer
829
- if target_layers is None:
830
- target_layers, all_layers = recommend_target_layers(model)
831
- print(f"No target layer provided. Using recommended layer: {target_layers[0]}")
832
- print("All possible target layers:")
833
- for layer in all_layers:
834
- print(layer)
835
-
836
- grad_cam = GradCAM(model=model, target_layers=target_layers, use_cuda=use_cuda)
837
-
838
- if save_cam and not os.path.exists(save_dir):
839
- os.makedirs(save_dir)
840
-
841
- images = []
842
- filenames = []
843
- for file in os.listdir(src):
844
- if not file.endswith('.png'):
845
- continue
846
- image_path = os.path.join(src, file)
847
- image, input_tensor = preprocess_image(image_path, normalize=normalize, image_size=image_size, channels=channels)
848
- images.append(image)
849
- filenames.append(file)
850
-
851
- input_tensor = input_tensor.to(device)
852
- cam = grad_cam(input_tensor)
853
- cam_image = show_cam_on_image(np.array(image) / 255.0, cam)
854
-
855
- fig, ax = plt.subplots(1, 2, figsize=(10, 5))
856
- ax[0].imshow(image)
857
- ax[0].axis('off')
858
- ax[0].set_title("Original Image")
859
- ax[1].imshow(cam_image)
860
- ax[1].axis('off')
861
- ax[1].set_title("Grad-CAM")
862
- plt.show()
863
-
864
- if save_cam:
865
- cam_pil = Image.fromarray(cam_image)
866
- cam_pil.save(os.path.join(save_dir, f'grad_cam_{file}'))
768
+ torch.cuda.empty_cache()
769
+ gc.collect()
770
+ print("Activation map generation complete.")
867
771
 
868
772
  def visualize_classes(model, dtype, class_names, **kwargs):
869
773
 
870
- from spacr.utils import class_visualization
774
+ from .utils import class_visualization
871
775
 
872
776
  for target_y in range(2): # Assuming binary classification
873
777
  print(f"Visualizing class: {class_names[target_y]}")
@@ -57,6 +57,7 @@ class MainApp(tk.Tk):
57
57
  "Map Barcodes": (lambda frame: initiate_root(self, 'map_barcodes'), "Map barcodes to data."),
58
58
  "Regression": (lambda frame: initiate_root(self, 'regression'), "Perform regression analysis."),
59
59
  "Recruitment": (lambda frame: initiate_root(self, 'recruitment'), "Analyze recruitment data."),
60
+ "Activation": (lambda frame: initiate_root(self, 'activation'), "Generate activation maps of computer vision models and measure channel-activation correlation."),
60
61
  "Plaque": (lambda frame: initiate_root(self, 'analyze_plaques'), "Analyze plaque data.")
61
62
  }
62
63
 
@@ -379,10 +379,13 @@ def set_globals(thread_control_var, q_var, console_output_var, parent_frame_var,
379
379
  index_control = index_control_var
380
380
 
381
381
  def import_settings(settings_type='mask'):
382
- from .gui_utils import convert_settings_dict_for_gui, hide_all_settings
383
382
  global vars_dict, scrollable_frame, button_scrollable_frame
384
- from .settings import generate_fields, set_default_settings_preprocess_generate_masks, get_measure_crop_settings, set_default_train_test_model, set_default_generate_barecode_mapping, set_default_umap_image_settings, get_analyze_recruitment_default_settings
385
383
 
384
+ from .gui_utils import convert_settings_dict_for_gui, hide_all_settings
385
+ from .settings import generate_fields, set_default_settings_preprocess_generate_masks, get_measure_crop_settings, set_default_train_test_model
386
+ from .settings import set_default_generate_barecode_mapping, set_default_umap_image_settings, get_analyze_recruitment_default_settings
387
+ from .settings import get_default_generate_activation_map_settings
388
+ #activation
386
389
  def read_settings_from_csv(csv_file_path):
387
390
  settings = {}
388
391
  with open(csv_file_path, newline='') as csvfile:
@@ -422,6 +425,8 @@ def import_settings(settings_type='mask'):
422
425
  settings = set_default_umap_image_settings(settings={})
423
426
  elif settings_type == 'recruitment':
424
427
  settings = get_analyze_recruitment_default_settings(settings={})
428
+ elif settings_type == 'activation':
429
+ settings = get_default_generate_activation_map_settings(settings={})
425
430
  elif settings_type == 'analyze_plaques':
426
431
  settings = {}
427
432
  elif settings_type == 'convert':
@@ -436,8 +441,10 @@ def import_settings(settings_type='mask'):
436
441
 
437
442
  def setup_settings_panel(vertical_container, settings_type='mask'):
438
443
  global vars_dict, scrollable_frame
439
- from .settings import get_identify_masks_finetune_default_settings, set_default_analyze_screen, set_default_settings_preprocess_generate_masks, get_measure_crop_settings, deep_spacr_defaults, set_default_generate_barecode_mapping, set_default_umap_image_settings
440
- from .settings import get_map_barcodes_default_settings, get_analyze_recruitment_default_settings, get_check_cellpose_models_default_settings, generate_fields, get_perform_regression_default_settings, get_train_cellpose_default_settings
444
+ from .settings import get_identify_masks_finetune_default_settings, set_default_analyze_screen, set_default_settings_preprocess_generate_masks
445
+ from .settings import get_measure_crop_settings, deep_spacr_defaults, set_default_generate_barecode_mapping, set_default_umap_image_settings
446
+ from .settings import get_map_barcodes_default_settings, get_analyze_recruitment_default_settings, get_check_cellpose_models_default_settings
447
+ from .settings import generate_fields, get_perform_regression_default_settings, get_train_cellpose_default_settings, get_default_generate_activation_map_settings
441
448
  from .gui_utils import convert_settings_dict_for_gui
442
449
  from .gui_elements import set_element_size
443
450
 
@@ -480,6 +487,8 @@ def setup_settings_panel(vertical_container, settings_type='mask'):
480
487
  settings = get_perform_regression_default_settings(settings={})
481
488
  elif settings_type == 'recruitment':
482
489
  settings = get_analyze_recruitment_default_settings(settings={})
490
+ elif settings_type == 'activation':
491
+ settings = get_default_generate_activation_map_settings(settings={})
483
492
  elif settings_type == 'analyze_plaques':
484
493
  settings = {'src':'path to images'}
485
494
  elif settings_type == 'convert':
@@ -20,6 +20,47 @@ from tkinter import ttk, scrolledtext
20
20
 
21
21
  fig = None
22
22
 
23
+ def create_menu_bar(root):
24
+ from .gui import initiate_root
25
+ gui_apps = {
26
+ "Mask": lambda: initiate_root(root, settings_type='mask'),
27
+ "Measure": lambda: initiate_root(root, settings_type='measure'),
28
+ "Annotate": lambda: initiate_root(root, settings_type='annotate'),
29
+ "Make Masks": lambda: initiate_root(root, settings_type='make_masks'),
30
+ "Classify": lambda: initiate_root(root, settings_type='classify'),
31
+ "Umap": lambda: initiate_root(root, settings_type='umap'),
32
+ "Train Cellpose": lambda: initiate_root(root, settings_type='train_cellpose'),
33
+ "ML Analyze": lambda: initiate_root(root, settings_type='ml_analyze'),
34
+ "Cellpose Masks": lambda: initiate_root(root, settings_type='cellpose_masks'),
35
+ "Cellpose All": lambda: initiate_root(root, settings_type='cellpose_all'),
36
+ "Map Barcodes": lambda: initiate_root(root, settings_type='map_barcodes'),
37
+ "Regression": lambda: initiate_root(root, settings_type='regression'),
38
+ "Activation": lambda: initiate_root(root, settings_type='activation'),
39
+ "Recruitment": lambda: initiate_root(root, settings_type='recruitment')
40
+ }
41
+
42
+ # Create the menu bar
43
+ menu_bar = tk.Menu(root, bg="#008080", fg="white")
44
+
45
+ # Create a "SpaCr Applications" menu
46
+ app_menu = tk.Menu(menu_bar, tearoff=0, bg="#008080", fg="white")
47
+ menu_bar.add_cascade(label="SpaCr Applications", menu=app_menu)
48
+
49
+ # Add options to the "SpaCr Applications" menu
50
+ for app_name, app_func in gui_apps.items():
51
+ app_menu.add_command(
52
+ label=app_name,
53
+ command=app_func
54
+ )
55
+
56
+ # Add a separator and an exit option
57
+ app_menu.add_separator()
58
+ app_menu.add_command(label="Help", command=lambda: webbrowser.open("https://spacr.readthedocs.io/en/latest/?badge=latest"))
59
+ app_menu.add_command(label="Exit", command=root.quit)
60
+
61
+ # Configure the menu for the root window
62
+ root.config(menu=menu_bar)
63
+
23
64
  def set_element_size():
24
65
 
25
66
  screen_width, screen_height = pyautogui.size()
@@ -2122,7 +2163,7 @@ class ModifyMaskApp:
2122
2163
  self.update_display()
2123
2164
 
2124
2165
  class AnnotateApp:
2125
- def __init__(self, root, db_path, src, image_type=None, channels=None, image_size=200, annotation_column='annotate', normalize=False, percentiles=(1, 99), measurement=None, threshold=None):
2166
+ def __init__(self, root, db_path, src, image_type=None, channels=None, image_size=200, annotation_column='annotate', normalize=False, percentiles=(1, 99), measurement=None, threshold=None, normalize_channels=None):
2126
2167
  self.root = root
2127
2168
  self.db_path = db_path
2128
2169
  self.src = src
@@ -2148,7 +2189,8 @@ class AnnotateApp:
2148
2189
  self.update_queue = Queue()
2149
2190
  self.measurement = measurement
2150
2191
  self.threshold = threshold
2151
-
2192
+ self.normalize_channels = normalize_channels
2193
+ print('self.normalize_channels',self.normalize_channels)
2152
2194
  style_out = set_dark_style(ttk.Style())
2153
2195
  self.font_loader = style_out['font_loader']
2154
2196
  self.font_size = style_out['font_size']
@@ -2157,7 +2199,6 @@ class AnnotateApp:
2157
2199
  self.active_color = style_out['active_color']
2158
2200
  self.inactive_color = style_out['inactive_color']
2159
2201
 
2160
-
2161
2202
  if self.font_loader:
2162
2203
  self.font_style = self.font_loader.get_font(size=self.font_size)
2163
2204
  else:
@@ -2356,14 +2397,26 @@ class AnnotateApp:
2356
2397
  def load_single_image(self, path_annotation_tuple):
2357
2398
  path, annotation = path_annotation_tuple
2358
2399
  img = Image.open(path)
2359
- img = self.normalize_image(img, self.normalize, self.percentiles)
2400
+ img = self.normalize_image(img, self.normalize, self.percentiles, self.normalize_channels)
2360
2401
  img = img.convert('RGB')
2361
2402
  img = self.filter_channels(img)
2362
2403
  img = img.resize(self.image_size)
2363
2404
  return img, annotation
2364
-
2405
+
2365
2406
  @staticmethod
2366
- def normalize_image(img, normalize=False, percentiles=(1, 99)):
2407
+ def normalize_image(img, normalize=False, percentiles=(1, 99), normalize_channels=None):
2408
+ """
2409
+ Normalize an image based on specific channels (R, G, B).
2410
+
2411
+ Args:
2412
+ img (PIL.Image or np.array): Input image.
2413
+ normalize (bool): Whether to normalize the image or not.
2414
+ percentiles (tuple): Percentiles to use for intensity rescaling.
2415
+ normalize_channels (list): List of channels to normalize. E.g., ['r', 'g', 'b'], ['r'], ['g'], etc.
2416
+
2417
+ Returns:
2418
+ PIL.Image: Normalized image.
2419
+ """
2367
2420
  img_array = np.array(img)
2368
2421
 
2369
2422
  if normalize:
@@ -2371,13 +2424,23 @@ class AnnotateApp:
2371
2424
  p2, p98 = np.percentile(img_array, percentiles)
2372
2425
  img_array = rescale_intensity(img_array, in_range=(p2, p98), out_range=(0, 255))
2373
2426
  else: # Color image or multi-channel image
2374
- for channel in range(img_array.shape[2]):
2375
- p2, p98 = np.percentile(img_array[:, :, channel], percentiles)
2376
- img_array[:, :, channel] = rescale_intensity(img_array[:, :, channel], in_range=(p2, p98), out_range=(0, 255))
2427
+ # Create a map for the color channels
2428
+ channel_map = {'r': 0, 'g': 1, 'b': 2}
2429
+
2430
+ # If normalize_channels is not specified, normalize all channels
2431
+ if normalize_channels is None:
2432
+ normalize_channels = ['r', 'g', 'b']
2433
+
2434
+ for channel_name in normalize_channels:
2435
+ if channel_name in channel_map:
2436
+ channel_idx = channel_map[channel_name]
2437
+ p2, p98 = np.percentile(img_array[:, :, channel_idx], percentiles)
2438
+ img_array[:, :, channel_idx] = rescale_intensity(img_array[:, :, channel_idx], in_range=(p2, p98), out_range=(0, 255))
2377
2439
 
2378
2440
  img_array = np.clip(img_array, 0, 255).astype('uint8')
2379
2441
 
2380
2442
  return Image.fromarray(img_array)
2443
+
2381
2444
 
2382
2445
  def add_colored_border(self, img, border_width, border_color):
2383
2446
  top_border = Image.new('RGB', (img.width, border_width), color=border_color)
@@ -2505,46 +2568,6 @@ class AnnotateApp:
2505
2568
  else:
2506
2569
  print('Waiting for pending updates to finish before quitting')
2507
2570
 
2508
- def create_menu_bar(root):
2509
- from .gui import initiate_root
2510
- gui_apps = {
2511
- "Mask": lambda: initiate_root(root, settings_type='mask'),
2512
- "Measure": lambda: initiate_root(root, settings_type='measure'),
2513
- "Annotate": lambda: initiate_root(root, settings_type='annotate'),
2514
- "Make Masks": lambda: initiate_root(root, settings_type='make_masks'),
2515
- "Classify": lambda: initiate_root(root, settings_type='classify'),
2516
- "Umap": lambda: initiate_root(root, settings_type='umap'),
2517
- "Train Cellpose": lambda: initiate_root(root, settings_type='train_cellpose'),
2518
- "ML Analyze": lambda: initiate_root(root, settings_type='ml_analyze'),
2519
- "Cellpose Masks": lambda: initiate_root(root, settings_type='cellpose_masks'),
2520
- "Cellpose All": lambda: initiate_root(root, settings_type='cellpose_all'),
2521
- "Map Barcodes": lambda: initiate_root(root, settings_type='map_barcodes'),
2522
- "Regression": lambda: initiate_root(root, settings_type='regression'),
2523
- "Recruitment": lambda: initiate_root(root, settings_type='recruitment')
2524
- }
2525
-
2526
- # Create the menu bar
2527
- menu_bar = tk.Menu(root, bg="#008080", fg="white")
2528
-
2529
- # Create a "SpaCr Applications" menu
2530
- app_menu = tk.Menu(menu_bar, tearoff=0, bg="#008080", fg="white")
2531
- menu_bar.add_cascade(label="SpaCr Applications", menu=app_menu)
2532
-
2533
- # Add options to the "SpaCr Applications" menu
2534
- for app_name, app_func in gui_apps.items():
2535
- app_menu.add_command(
2536
- label=app_name,
2537
- command=app_func
2538
- )
2539
-
2540
- # Add a separator and an exit option
2541
- app_menu.add_separator()
2542
- app_menu.add_command(label="Help", command=lambda: webbrowser.open("https://spacr.readthedocs.io/en/latest/?badge=latest"))
2543
- app_menu.add_command(label="Exit", command=root.quit)
2544
-
2545
- # Configure the menu for the root window
2546
- root.config(menu=menu_bar)
2547
-
2548
2571
  def standardize_figure(fig):
2549
2572
  from .gui_elements import set_dark_style
2550
2573
  from matplotlib.font_manager import FontProperties