melage 0.0.65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (501) hide show
  1. melage/__init__.py +16 -0
  2. melage/cli.py +4 -0
  3. melage/graphics/GLGraphicsItem.py +286 -0
  4. melage/graphics/GLViewWidget.py +595 -0
  5. melage/graphics/Transform3D.py +55 -0
  6. melage/graphics/__init__.py +8 -0
  7. melage/graphics/functions.py +101 -0
  8. melage/graphics/items/GLAxisItem.py +149 -0
  9. melage/graphics/items/GLGridItem.py +178 -0
  10. melage/graphics/items/GLPolygonItem.py +77 -0
  11. melage/graphics/items/GLScatterPlotItem.py +135 -0
  12. melage/graphics/items/GLVolumeItem.py +280 -0
  13. melage/graphics/items/GLVolumeItem_b.py +237 -0
  14. melage/graphics/items/__init__.py +0 -0
  15. melage/graphics/shaders.py +202 -0
  16. melage/main.py +270 -0
  17. melage/requirements22.txt +25 -0
  18. melage/requirements_old.txt +28 -0
  19. melage/resource/0circle.png +0 -0
  20. melage/resource/0circle_faded.png +0 -0
  21. melage/resource/3d.png +0 -0
  22. melage/resource/3d.psd +0 -0
  23. melage/resource/3dFaded.png +0 -0
  24. melage/resource/Eraser.png +0 -0
  25. melage/resource/EraserFaded.png +0 -0
  26. melage/resource/EraserX.png +0 -0
  27. melage/resource/EraserXFaded.png +0 -0
  28. melage/resource/Eraser_icon.svg +79 -0
  29. melage/resource/Hand.png +0 -0
  30. melage/resource/HandIcons_0.png +0 -0
  31. melage/resource/Hand_IX.png +0 -0
  32. melage/resource/Hand_IXFaded.png +0 -0
  33. melage/resource/Handsqueezed.png +0 -0
  34. melage/resource/Handwriting (copy).png +0 -0
  35. melage/resource/Handwriting.png +0 -0
  36. melage/resource/HandwritingMinus.png +0 -0
  37. melage/resource/HandwritingMinusX.png +0 -0
  38. melage/resource/HandwritingPlus.png +0 -0
  39. melage/resource/HandwritingPlusX.png +0 -0
  40. melage/resource/Move_icon.svg +8 -0
  41. melage/resource/PngItem_2422924.png +0 -0
  42. melage/resource/about.png +0 -0
  43. melage/resource/about_logo.png +0 -0
  44. melage/resource/about_logo0.png +0 -0
  45. melage/resource/action_check.png +0 -0
  46. melage/resource/action_check_OFF.png +0 -0
  47. melage/resource/arrow).png +0 -0
  48. melage/resource/arrow.png +0 -0
  49. melage/resource/arrowFaded.png +0 -0
  50. melage/resource/arrow_org.png +0 -0
  51. melage/resource/arrow_org.png.png +0 -0
  52. melage/resource/arrows.png +0 -0
  53. melage/resource/authors.mp4 +0 -0
  54. melage/resource/box.png +0 -0
  55. melage/resource/check-image-icon-0.jpg +0 -0
  56. melage/resource/circle.png +0 -0
  57. melage/resource/circle_faded.png +0 -0
  58. melage/resource/circle_or.png +0 -0
  59. melage/resource/close.png +0 -0
  60. melage/resource/close_bg.png +0 -0
  61. melage/resource/color/Simple.txt +18 -0
  62. melage/resource/color/Tissue.txt +24 -0
  63. melage/resource/color/Tissue12.txt +27 -0
  64. melage/resource/color/albert_LUT.txt +102 -0
  65. melage/resource/color/mcrib_LUT.txt +102 -0
  66. melage/resource/color/pediatric1.txt +29 -0
  67. melage/resource/color/pediatric1_old.txt +27 -0
  68. melage/resource/color/pediatric2.txt +87 -0
  69. melage/resource/color/pediatric3.txt +29 -0
  70. melage/resource/color/pediatrics (copy).csv +103 -0
  71. melage/resource/color/tissue_seg.txt +4 -0
  72. melage/resource/contour.png +0 -0
  73. melage/resource/contour.svg +2 -0
  74. melage/resource/contourFaded.png +0 -0
  75. melage/resource/contourX.png +0 -0
  76. melage/resource/contourXFaded.png +0 -0
  77. melage/resource/dti.png +0 -0
  78. melage/resource/dti0.png +0 -0
  79. melage/resource/dti222.png +0 -0
  80. melage/resource/dti_or.png +0 -0
  81. melage/resource/eco.png +0 -0
  82. melage/resource/eco22.png +0 -0
  83. melage/resource/eco_old.png +0 -0
  84. melage/resource/eco_or.png +0 -0
  85. melage/resource/eco_or2.png +0 -0
  86. melage/resource/eco_seg.png +0 -0
  87. melage/resource/eco_seg_old.png +0 -0
  88. melage/resource/export.png +0 -0
  89. melage/resource/hand-grab-icon-10.jpg +0 -0
  90. melage/resource/hand-grab-icon-25.jpg +0 -0
  91. melage/resource/info.png +0 -0
  92. melage/resource/line.png +0 -0
  93. melage/resource/linefaded.png +0 -0
  94. melage/resource/load.png +0 -0
  95. melage/resource/main.ico +0 -0
  96. melage/resource/manual_images/3D_rightc.png +0 -0
  97. melage/resource/manual_images/3D_rightc_goto.png +0 -0
  98. melage/resource/manual_images/3D_rightc_paint.png +0 -0
  99. melage/resource/manual_images/3D_rightc_paint_draw1.png +0 -0
  100. melage/resource/manual_images/3D_rightc_paint_draw2.png +0 -0
  101. melage/resource/manual_images/3D_rightc_paint_render.png +0 -0
  102. melage/resource/manual_images/3D_rightc_paint_render2.png +0 -0
  103. melage/resource/manual_images/3D_rightc_paint_render3.png +0 -0
  104. melage/resource/manual_images/3D_rightc_paint_render4.png +0 -0
  105. melage/resource/manual_images/3D_rightc_paint_render5.png +0 -0
  106. melage/resource/manual_images/3D_rightc_paint_render6.png +0 -0
  107. melage/resource/manual_images/3D_rightc_seg.png +0 -0
  108. melage/resource/manual_images/exit_toolbar.png +0 -0
  109. melage/resource/manual_images/load_image_file.png +0 -0
  110. melage/resource/manual_images/load_image_file_openp.png +0 -0
  111. melage/resource/manual_images/main_page.png +0 -0
  112. melage/resource/manual_images/menu_file.png +0 -0
  113. melage/resource/manual_images/menu_file_export.png +0 -0
  114. melage/resource/manual_images/menu_file_import.png +0 -0
  115. melage/resource/manual_images/menu_file_settings.png +0 -0
  116. melage/resource/manual_images/menu_file_ss.png +0 -0
  117. melage/resource/manual_images/open_save_load.png +0 -0
  118. melage/resource/manual_images/panning_toolbar.png +0 -0
  119. melage/resource/manual_images/segmentation_toolbar.png +0 -0
  120. melage/resource/manual_images/tab_mri.png +0 -0
  121. melage/resource/manual_images/tab_us.png +0 -0
  122. melage/resource/manual_images/tabs.png +0 -0
  123. melage/resource/manual_images/toolbar_tools.png +0 -0
  124. melage/resource/manual_images/tools_basic.png +0 -0
  125. melage/resource/manual_images/tools_bet.png +0 -0
  126. melage/resource/manual_images/tools_cs.png +0 -0
  127. melage/resource/manual_images/tools_deepbet.png +0 -0
  128. melage/resource/manual_images/tools_imageinfo.png +0 -0
  129. melage/resource/manual_images/tools_maskO.png +0 -0
  130. melage/resource/manual_images/tools_masking.png +0 -0
  131. melage/resource/manual_images/tools_n4b.png +0 -0
  132. melage/resource/manual_images/tools_resize.png +0 -0
  133. melage/resource/manual_images/tools_ruler.png +0 -0
  134. melage/resource/manual_images/tools_seg.png +0 -0
  135. melage/resource/manual_images/tools_threshold.png +0 -0
  136. melage/resource/manual_images/tools_tools.png +0 -0
  137. melage/resource/manual_images/widget_color.png +0 -0
  138. melage/resource/manual_images/widget_color_add.png +0 -0
  139. melage/resource/manual_images/widget_color_add2.png +0 -0
  140. melage/resource/manual_images/widget_color_additional.png +0 -0
  141. melage/resource/manual_images/widget_images.png +0 -0
  142. melage/resource/manual_images/widget_images2.png +0 -0
  143. melage/resource/manual_images/widget_images3.png +0 -0
  144. melage/resource/manual_images/widget_marker.png +0 -0
  145. melage/resource/manual_images/widget_mri.png +0 -0
  146. melage/resource/manual_images/widget_mri2.png +0 -0
  147. melage/resource/manual_images/widget_segintensity.png +0 -0
  148. melage/resource/manual_images/widget_tab_mutualview.png +0 -0
  149. melage/resource/manual_images/widget_tab_mutualview2.png +0 -0
  150. melage/resource/manual_images/widget_table.png +0 -0
  151. melage/resource/manual_images/widget_table2.png +0 -0
  152. melage/resource/manual_images/widget_us.png +0 -0
  153. melage/resource/melage_top.ico +0 -0
  154. melage/resource/melage_top.png +0 -0
  155. melage/resource/melage_top0.png +0 -0
  156. melage/resource/melage_top1.png +0 -0
  157. melage/resource/melage_top4.png +0 -0
  158. melage/resource/mri (copy).png +0 -0
  159. melage/resource/mri.png +0 -0
  160. melage/resource/mri0.png +0 -0
  161. melage/resource/mri000.png +0 -0
  162. melage/resource/mri22.png +0 -0
  163. melage/resource/mri_big.png +0 -0
  164. melage/resource/mri_old.png +0 -0
  165. melage/resource/mri_seg.png +0 -0
  166. melage/resource/mri_seg_old.png +0 -0
  167. melage/resource/new.png +0 -0
  168. melage/resource/open.png +0 -0
  169. melage/resource/open2.png +0 -0
  170. melage/resource/pan.png +0 -0
  171. melage/resource/pencil.png +0 -0
  172. melage/resource/pencilFaded.png +0 -0
  173. melage/resource/points.png +0 -0
  174. melage/resource/pointsFaded.png +0 -0
  175. melage/resource/rotate.png +0 -0
  176. melage/resource/ruler.png +0 -0
  177. melage/resource/rulerFaded.png +0 -0
  178. melage/resource/s.png +0 -0
  179. melage/resource/s.psd +0 -0
  180. melage/resource/save.png +0 -0
  181. melage/resource/saveas.png +0 -0
  182. melage/resource/seg_mri.png +0 -0
  183. melage/resource/seg_mri2.png +0 -0
  184. melage/resource/settings.png +0 -0
  185. melage/resource/synch.png +0 -0
  186. melage/resource/synchFaded.png +0 -0
  187. melage/resource/theme/rc/.keep +1 -0
  188. melage/resource/theme/rc/arrow_down.png +0 -0
  189. melage/resource/theme/rc/arrow_down@2x.png +0 -0
  190. melage/resource/theme/rc/arrow_down_disabled.png +0 -0
  191. melage/resource/theme/rc/arrow_down_disabled@2x.png +0 -0
  192. melage/resource/theme/rc/arrow_down_focus.png +0 -0
  193. melage/resource/theme/rc/arrow_down_focus@2x.png +0 -0
  194. melage/resource/theme/rc/arrow_down_pressed.png +0 -0
  195. melage/resource/theme/rc/arrow_down_pressed@2x.png +0 -0
  196. melage/resource/theme/rc/arrow_left.png +0 -0
  197. melage/resource/theme/rc/arrow_left@2x.png +0 -0
  198. melage/resource/theme/rc/arrow_left_disabled.png +0 -0
  199. melage/resource/theme/rc/arrow_left_disabled@2x.png +0 -0
  200. melage/resource/theme/rc/arrow_left_focus.png +0 -0
  201. melage/resource/theme/rc/arrow_left_focus@2x.png +0 -0
  202. melage/resource/theme/rc/arrow_left_pressed.png +0 -0
  203. melage/resource/theme/rc/arrow_left_pressed@2x.png +0 -0
  204. melage/resource/theme/rc/arrow_right.png +0 -0
  205. melage/resource/theme/rc/arrow_right@2x.png +0 -0
  206. melage/resource/theme/rc/arrow_right_disabled.png +0 -0
  207. melage/resource/theme/rc/arrow_right_disabled@2x.png +0 -0
  208. melage/resource/theme/rc/arrow_right_focus.png +0 -0
  209. melage/resource/theme/rc/arrow_right_focus@2x.png +0 -0
  210. melage/resource/theme/rc/arrow_right_pressed.png +0 -0
  211. melage/resource/theme/rc/arrow_right_pressed@2x.png +0 -0
  212. melage/resource/theme/rc/arrow_up.png +0 -0
  213. melage/resource/theme/rc/arrow_up@2x.png +0 -0
  214. melage/resource/theme/rc/arrow_up_disabled.png +0 -0
  215. melage/resource/theme/rc/arrow_up_disabled@2x.png +0 -0
  216. melage/resource/theme/rc/arrow_up_focus.png +0 -0
  217. melage/resource/theme/rc/arrow_up_focus@2x.png +0 -0
  218. melage/resource/theme/rc/arrow_up_pressed.png +0 -0
  219. melage/resource/theme/rc/arrow_up_pressed@2x.png +0 -0
  220. melage/resource/theme/rc/base_icon.png +0 -0
  221. melage/resource/theme/rc/base_icon@2x.png +0 -0
  222. melage/resource/theme/rc/base_icon_disabled.png +0 -0
  223. melage/resource/theme/rc/base_icon_disabled@2x.png +0 -0
  224. melage/resource/theme/rc/base_icon_focus.png +0 -0
  225. melage/resource/theme/rc/base_icon_focus@2x.png +0 -0
  226. melage/resource/theme/rc/base_icon_pressed.png +0 -0
  227. melage/resource/theme/rc/base_icon_pressed@2x.png +0 -0
  228. melage/resource/theme/rc/branch_closed.png +0 -0
  229. melage/resource/theme/rc/branch_closed@2x.png +0 -0
  230. melage/resource/theme/rc/branch_closed_disabled.png +0 -0
  231. melage/resource/theme/rc/branch_closed_disabled@2x.png +0 -0
  232. melage/resource/theme/rc/branch_closed_focus.png +0 -0
  233. melage/resource/theme/rc/branch_closed_focus@2x.png +0 -0
  234. melage/resource/theme/rc/branch_closed_pressed.png +0 -0
  235. melage/resource/theme/rc/branch_closed_pressed@2x.png +0 -0
  236. melage/resource/theme/rc/branch_end.png +0 -0
  237. melage/resource/theme/rc/branch_end@2x.png +0 -0
  238. melage/resource/theme/rc/branch_end_disabled.png +0 -0
  239. melage/resource/theme/rc/branch_end_disabled@2x.png +0 -0
  240. melage/resource/theme/rc/branch_end_focus.png +0 -0
  241. melage/resource/theme/rc/branch_end_focus@2x.png +0 -0
  242. melage/resource/theme/rc/branch_end_pressed.png +0 -0
  243. melage/resource/theme/rc/branch_end_pressed@2x.png +0 -0
  244. melage/resource/theme/rc/branch_line.png +0 -0
  245. melage/resource/theme/rc/branch_line@2x.png +0 -0
  246. melage/resource/theme/rc/branch_line_disabled.png +0 -0
  247. melage/resource/theme/rc/branch_line_disabled@2x.png +0 -0
  248. melage/resource/theme/rc/branch_line_focus.png +0 -0
  249. melage/resource/theme/rc/branch_line_focus@2x.png +0 -0
  250. melage/resource/theme/rc/branch_line_pressed.png +0 -0
  251. melage/resource/theme/rc/branch_line_pressed@2x.png +0 -0
  252. melage/resource/theme/rc/branch_more.png +0 -0
  253. melage/resource/theme/rc/branch_more@2x.png +0 -0
  254. melage/resource/theme/rc/branch_more_disabled.png +0 -0
  255. melage/resource/theme/rc/branch_more_disabled@2x.png +0 -0
  256. melage/resource/theme/rc/branch_more_focus.png +0 -0
  257. melage/resource/theme/rc/branch_more_focus@2x.png +0 -0
  258. melage/resource/theme/rc/branch_more_pressed.png +0 -0
  259. melage/resource/theme/rc/branch_more_pressed@2x.png +0 -0
  260. melage/resource/theme/rc/branch_open.png +0 -0
  261. melage/resource/theme/rc/branch_open@2x.png +0 -0
  262. melage/resource/theme/rc/branch_open_disabled.png +0 -0
  263. melage/resource/theme/rc/branch_open_disabled@2x.png +0 -0
  264. melage/resource/theme/rc/branch_open_focus.png +0 -0
  265. melage/resource/theme/rc/branch_open_focus@2x.png +0 -0
  266. melage/resource/theme/rc/branch_open_pressed.png +0 -0
  267. melage/resource/theme/rc/branch_open_pressed@2x.png +0 -0
  268. melage/resource/theme/rc/checkbox_checked.png +0 -0
  269. melage/resource/theme/rc/checkbox_checked0.png +0 -0
  270. melage/resource/theme/rc/checkbox_checked@2x.png +0 -0
  271. melage/resource/theme/rc/checkbox_checked@2x0.png +0 -0
  272. melage/resource/theme/rc/checkbox_checked@2x000.png.png +0 -0
  273. melage/resource/theme/rc/checkbox_checked_disabled.png +0 -0
  274. melage/resource/theme/rc/checkbox_checked_disabled0.png +0 -0
  275. melage/resource/theme/rc/checkbox_checked_disabled@2x.png +0 -0
  276. melage/resource/theme/rc/checkbox_checked_disabled@2x0.png +0 -0
  277. melage/resource/theme/rc/checkbox_checked_focus.png +0 -0
  278. melage/resource/theme/rc/checkbox_checked_focus0.png +0 -0
  279. melage/resource/theme/rc/checkbox_checked_focus@2x.png +0 -0
  280. melage/resource/theme/rc/checkbox_checked_focus@2x0.png +0 -0
  281. melage/resource/theme/rc/checkbox_checked_pressed.png +0 -0
  282. melage/resource/theme/rc/checkbox_checked_pressed0.png +0 -0
  283. melage/resource/theme/rc/checkbox_checked_pressed@2x.png +0 -0
  284. melage/resource/theme/rc/checkbox_checked_pressed@2x0.png +0 -0
  285. melage/resource/theme/rc/checkbox_indeterminate.png +0 -0
  286. melage/resource/theme/rc/checkbox_indeterminate@2x.png +0 -0
  287. melage/resource/theme/rc/checkbox_indeterminate_disabled.png +0 -0
  288. melage/resource/theme/rc/checkbox_indeterminate_disabled@2x.png +0 -0
  289. melage/resource/theme/rc/checkbox_indeterminate_focus.png +0 -0
  290. melage/resource/theme/rc/checkbox_indeterminate_focus@2x.png +0 -0
  291. melage/resource/theme/rc/checkbox_indeterminate_pressed.png +0 -0
  292. melage/resource/theme/rc/checkbox_indeterminate_pressed@2x.png +0 -0
  293. melage/resource/theme/rc/checkbox_unchecked.png +0 -0
  294. melage/resource/theme/rc/checkbox_unchecked0.png +0 -0
  295. melage/resource/theme/rc/checkbox_unchecked00.png +0 -0
  296. melage/resource/theme/rc/checkbox_unchecked@2x.png +0 -0
  297. melage/resource/theme/rc/checkbox_unchecked@2x0.png +0 -0
  298. melage/resource/theme/rc/checkbox_unchecked@2x00.png +0 -0
  299. melage/resource/theme/rc/checkbox_unchecked_disabled.png +0 -0
  300. melage/resource/theme/rc/checkbox_unchecked_disabled0.png +0 -0
  301. melage/resource/theme/rc/checkbox_unchecked_disabled00.png +0 -0
  302. melage/resource/theme/rc/checkbox_unchecked_disabled@2x.png +0 -0
  303. melage/resource/theme/rc/checkbox_unchecked_disabled@2x0.png +0 -0
  304. melage/resource/theme/rc/checkbox_unchecked_disabled@2x00.png +0 -0
  305. melage/resource/theme/rc/checkbox_unchecked_focus.png +0 -0
  306. melage/resource/theme/rc/checkbox_unchecked_focus0.png +0 -0
  307. melage/resource/theme/rc/checkbox_unchecked_focus00.png +0 -0
  308. melage/resource/theme/rc/checkbox_unchecked_focus@2x.png +0 -0
  309. melage/resource/theme/rc/checkbox_unchecked_focus@2x0.png +0 -0
  310. melage/resource/theme/rc/checkbox_unchecked_focus@2x00.png +0 -0
  311. melage/resource/theme/rc/checkbox_unchecked_pressed.png +0 -0
  312. melage/resource/theme/rc/checkbox_unchecked_pressed0.png +0 -0
  313. melage/resource/theme/rc/checkbox_unchecked_pressed00.png +0 -0
  314. melage/resource/theme/rc/checkbox_unchecked_pressed@2x.png +0 -0
  315. melage/resource/theme/rc/checkbox_unchecked_pressed@2x0.png +0 -0
  316. melage/resource/theme/rc/checkbox_unchecked_pressed@2x00.png +0 -0
  317. melage/resource/theme/rc/line_horizontal.png +0 -0
  318. melage/resource/theme/rc/line_horizontal@2x.png +0 -0
  319. melage/resource/theme/rc/line_horizontal_disabled.png +0 -0
  320. melage/resource/theme/rc/line_horizontal_disabled@2x.png +0 -0
  321. melage/resource/theme/rc/line_horizontal_focus.png +0 -0
  322. melage/resource/theme/rc/line_horizontal_focus@2x.png +0 -0
  323. melage/resource/theme/rc/line_horizontal_pressed.png +0 -0
  324. melage/resource/theme/rc/line_horizontal_pressed@2x.png +0 -0
  325. melage/resource/theme/rc/line_vertical.png +0 -0
  326. melage/resource/theme/rc/line_vertical@2x.png +0 -0
  327. melage/resource/theme/rc/line_vertical_disabled.png +0 -0
  328. melage/resource/theme/rc/line_vertical_disabled@2x.png +0 -0
  329. melage/resource/theme/rc/line_vertical_focus.png +0 -0
  330. melage/resource/theme/rc/line_vertical_focus@2x.png +0 -0
  331. melage/resource/theme/rc/line_vertical_pressed.png +0 -0
  332. melage/resource/theme/rc/line_vertical_pressed@2x.png +0 -0
  333. melage/resource/theme/rc/radio_checked.png +0 -0
  334. melage/resource/theme/rc/radio_checked@2x.png +0 -0
  335. melage/resource/theme/rc/radio_checked_disabled.png +0 -0
  336. melage/resource/theme/rc/radio_checked_disabled@2x.png +0 -0
  337. melage/resource/theme/rc/radio_checked_focus.png +0 -0
  338. melage/resource/theme/rc/radio_checked_focus@2x.png +0 -0
  339. melage/resource/theme/rc/radio_checked_pressed.png +0 -0
  340. melage/resource/theme/rc/radio_checked_pressed@2x.png +0 -0
  341. melage/resource/theme/rc/radio_unchecked.png +0 -0
  342. melage/resource/theme/rc/radio_unchecked@2x.png +0 -0
  343. melage/resource/theme/rc/radio_unchecked_disabled.png +0 -0
  344. melage/resource/theme/rc/radio_unchecked_disabled@2x.png +0 -0
  345. melage/resource/theme/rc/radio_unchecked_focus.png +0 -0
  346. melage/resource/theme/rc/radio_unchecked_focus@2x.png +0 -0
  347. melage/resource/theme/rc/radio_unchecked_pressed.png +0 -0
  348. melage/resource/theme/rc/radio_unchecked_pressed@2x.png +0 -0
  349. melage/resource/theme/rc/toolbar_move_horizontal.png +0 -0
  350. melage/resource/theme/rc/toolbar_move_horizontal@2x.png +0 -0
  351. melage/resource/theme/rc/toolbar_move_horizontal_disabled.png +0 -0
  352. melage/resource/theme/rc/toolbar_move_horizontal_disabled@2x.png +0 -0
  353. melage/resource/theme/rc/toolbar_move_horizontal_focus.png +0 -0
  354. melage/resource/theme/rc/toolbar_move_horizontal_focus@2x.png +0 -0
  355. melage/resource/theme/rc/toolbar_move_horizontal_pressed.png +0 -0
  356. melage/resource/theme/rc/toolbar_move_horizontal_pressed@2x.png +0 -0
  357. melage/resource/theme/rc/toolbar_move_vertical.png +0 -0
  358. melage/resource/theme/rc/toolbar_move_vertical@2x.png +0 -0
  359. melage/resource/theme/rc/toolbar_move_vertical_disabled.png +0 -0
  360. melage/resource/theme/rc/toolbar_move_vertical_disabled@2x.png +0 -0
  361. melage/resource/theme/rc/toolbar_move_vertical_focus.png +0 -0
  362. melage/resource/theme/rc/toolbar_move_vertical_focus@2x.png +0 -0
  363. melage/resource/theme/rc/toolbar_move_vertical_pressed.png +0 -0
  364. melage/resource/theme/rc/toolbar_move_vertical_pressed@2x.png +0 -0
  365. melage/resource/theme/rc/toolbar_separator_horizontal.png +0 -0
  366. melage/resource/theme/rc/toolbar_separator_horizontal@2x.png +0 -0
  367. melage/resource/theme/rc/toolbar_separator_horizontal_disabled.png +0 -0
  368. melage/resource/theme/rc/toolbar_separator_horizontal_disabled@2x.png +0 -0
  369. melage/resource/theme/rc/toolbar_separator_horizontal_focus.png +0 -0
  370. melage/resource/theme/rc/toolbar_separator_horizontal_focus@2x.png +0 -0
  371. melage/resource/theme/rc/toolbar_separator_horizontal_pressed.png +0 -0
  372. melage/resource/theme/rc/toolbar_separator_horizontal_pressed@2x.png +0 -0
  373. melage/resource/theme/rc/toolbar_separator_vertical.png +0 -0
  374. melage/resource/theme/rc/toolbar_separator_vertical@2x.png +0 -0
  375. melage/resource/theme/rc/toolbar_separator_vertical_disabled.png +0 -0
  376. melage/resource/theme/rc/toolbar_separator_vertical_disabled@2x.png +0 -0
  377. melage/resource/theme/rc/toolbar_separator_vertical_focus.png +0 -0
  378. melage/resource/theme/rc/toolbar_separator_vertical_focus@2x.png +0 -0
  379. melage/resource/theme/rc/toolbar_separator_vertical_pressed.png +0 -0
  380. melage/resource/theme/rc/toolbar_separator_vertical_pressed@2x.png +0 -0
  381. melage/resource/theme/rc/transparent.png +0 -0
  382. melage/resource/theme/rc/transparent@2x.png +0 -0
  383. melage/resource/theme/rc/transparent_disabled.png +0 -0
  384. melage/resource/theme/rc/transparent_disabled@2x.png +0 -0
  385. melage/resource/theme/rc/transparent_focus.png +0 -0
  386. melage/resource/theme/rc/transparent_focus@2x.png +0 -0
  387. melage/resource/theme/rc/transparent_pressed.png +0 -0
  388. melage/resource/theme/rc/transparent_pressed@2x.png +0 -0
  389. melage/resource/theme/rc/window_close.png +0 -0
  390. melage/resource/theme/rc/window_close@2x.png +0 -0
  391. melage/resource/theme/rc/window_close_disabled.png +0 -0
  392. melage/resource/theme/rc/window_close_disabled@2x.png +0 -0
  393. melage/resource/theme/rc/window_close_focus.png +0 -0
  394. melage/resource/theme/rc/window_close_focus@2x.png +0 -0
  395. melage/resource/theme/rc/window_close_pressed.png +0 -0
  396. melage/resource/theme/rc/window_close_pressed@2x.png +0 -0
  397. melage/resource/theme/rc/window_grip.png +0 -0
  398. melage/resource/theme/rc/window_grip@2x.png +0 -0
  399. melage/resource/theme/rc/window_grip_disabled.png +0 -0
  400. melage/resource/theme/rc/window_grip_disabled@2x.png +0 -0
  401. melage/resource/theme/rc/window_grip_focus.png +0 -0
  402. melage/resource/theme/rc/window_grip_focus@2x.png +0 -0
  403. melage/resource/theme/rc/window_grip_pressed.png +0 -0
  404. melage/resource/theme/rc/window_grip_pressed@2x.png +0 -0
  405. melage/resource/theme/rc/window_minimize.png +0 -0
  406. melage/resource/theme/rc/window_minimize@2x.png +0 -0
  407. melage/resource/theme/rc/window_minimize_disabled.png +0 -0
  408. melage/resource/theme/rc/window_minimize_disabled@2x.png +0 -0
  409. melage/resource/theme/rc/window_minimize_focus.png +0 -0
  410. melage/resource/theme/rc/window_minimize_focus@2x.png +0 -0
  411. melage/resource/theme/rc/window_minimize_pressed.png +0 -0
  412. melage/resource/theme/rc/window_minimize_pressed@2x.png +0 -0
  413. melage/resource/theme/rc/window_undock.png +0 -0
  414. melage/resource/theme/rc/window_undock@2x.png +0 -0
  415. melage/resource/theme/rc/window_undock_disabled.png +0 -0
  416. melage/resource/theme/rc/window_undock_disabled@2x.png +0 -0
  417. melage/resource/theme/rc/window_undock_focus.png +0 -0
  418. melage/resource/theme/rc/window_undock_focus@2x.png +0 -0
  419. melage/resource/theme/rc/window_undock_pressed.png +0 -0
  420. melage/resource/theme/rc/window_undock_pressed@2x.png +0 -0
  421. melage/resource/theme/style.qss +2223 -0
  422. melage/resource/tract.png +0 -0
  423. melage/resource/view1.png +0 -0
  424. melage/resource/view1_eco.png +0 -0
  425. melage/resource/view1_mri.png +0 -0
  426. melage/resource/view1_seg.png +0 -0
  427. melage/resource/view2.png +0 -0
  428. melage/resource/view2_seg.png +0 -0
  429. melage/resource/w.png +0 -0
  430. melage/resource/zoom_in.png +0 -0
  431. melage/resource/zoom_inFaded.png +0 -0
  432. melage/resource/zoom_out.png +0 -0
  433. melage/resource/zoom_outFaded.png +0 -0
  434. melage/some_notes.txt +3 -0
  435. melage/utils/DispalyIm.py +2788 -0
  436. melage/utils/GMM.py +720 -0
  437. melage/utils/Shaders_120.py +257 -0
  438. melage/utils/Shaders_330.py +314 -0
  439. melage/utils/Shaders_bu.py +314 -0
  440. melage/utils/__init__0.py +7 -0
  441. melage/utils/brain_extraction_helper.py +234 -0
  442. melage/utils/custom_QScrollBar.py +61 -0
  443. melage/utils/glScientific.py +1554 -0
  444. melage/utils/glScientific_bc.py +1585 -0
  445. melage/utils/readData.py +1061 -0
  446. melage/utils/registration.py +512 -0
  447. melage/utils/source_folder.py +18 -0
  448. melage/utils/utils.py +3808 -0
  449. melage/version.txt +1 -0
  450. melage/widgets/ApplyMask.py +212 -0
  451. melage/widgets/ChangeSystem.py +152 -0
  452. melage/widgets/DeepLModels/InfantSegment/Unet.py +464 -0
  453. melage/widgets/DeepLModels/NPP/dataset/mri_dataset_affine.py +149 -0
  454. melage/widgets/DeepLModels/NPP/models/checkpoints/npp_v1.pth.py +0 -0
  455. melage/widgets/DeepLModels/NPP/models/losses.py +146 -0
  456. melage/widgets/DeepLModels/NPP/models/model.py +272 -0
  457. melage/widgets/DeepLModels/NPP/models/utils.py +303 -0
  458. melage/widgets/DeepLModels/NPP/npp.py +116 -0
  459. melage/widgets/DeepLModels/NPP/requirements.txt +8 -0
  460. melage/widgets/DeepLModels/NPP/train/train.py +116 -0
  461. melage/widgets/DeepLModels/Unet3DAtt.py +657 -0
  462. melage/widgets/DeepLModels/Unet3D_basic.py +648 -0
  463. melage/widgets/DeepLModels/new_unet.py +652 -0
  464. melage/widgets/DeepLModels/new_unet_old.py +639 -0
  465. melage/widgets/DeepLModels/new_unet_old2.py +658 -0
  466. melage/widgets/HistImage.py +153 -0
  467. melage/widgets/ImageThresholding.py +222 -0
  468. melage/widgets/MaskOperations.py +147 -0
  469. melage/widgets/N4Dialog.py +241 -0
  470. melage/widgets/Segmentation/FCM.py +1553 -0
  471. melage/widgets/Segmentation/__init__.py +588 -0
  472. melage/widgets/Segmentation/utils.py +417 -0
  473. melage/widgets/SemiAutoSeg.py +666 -0
  474. melage/widgets/Synthstrip.py +141 -0
  475. melage/widgets/__init__0.py +5 -0
  476. melage/widgets/about.py +246 -0
  477. melage/widgets/activation.py +437 -0
  478. melage/widgets/activator.py +147 -0
  479. melage/widgets/be_dl.py +409 -0
  480. melage/widgets/be_dl_unet3d.py +441 -0
  481. melage/widgets/brain_extraction.py +855 -0
  482. melage/widgets/brain_extraction_dl.py +887 -0
  483. melage/widgets/brain_extraction_dl_bu.py +869 -0
  484. melage/widgets/colorwidget.py +100 -0
  485. melage/widgets/dockWidgets.py +2005 -0
  486. melage/widgets/enhanceImWidget.py +109 -0
  487. melage/widgets/fileDialog_widget.py +275 -0
  488. melage/widgets/iminfo.py +346 -0
  489. melage/widgets/mainwindow_widget.py +6775 -0
  490. melage/widgets/melageAbout.py +123 -0
  491. melage/widgets/openglWidgets.py +556 -0
  492. melage/widgets/registrationWidget.py +342 -0
  493. melage/widgets/repeat_widget.py +74 -0
  494. melage/widgets/screenshot_widget.py +138 -0
  495. melage/widgets/settings_widget.py +77 -0
  496. melage/widgets/tranformationWidget.py +275 -0
  497. melage-0.0.65.dist-info/METADATA +742 -0
  498. melage-0.0.65.dist-info/RECORD +501 -0
  499. melage-0.0.65.dist-info/WHEEL +5 -0
  500. melage-0.0.65.dist-info/entry_points.txt +2 -0
  501. melage-0.0.65.dist-info/top_level.txt +1 -0
@@ -0,0 +1,658 @@
1
+ from functools import partial
2
+
3
+ import numpy as np
4
+ import torch.nn as nn
5
+ import torch
6
+ import math
7
+ #from .model_utils import *
8
+ import math
9
+ import torch
10
+ from functools import partial
11
+ import torch.nn as nn
12
+ from einops import repeat, rearrange
13
+ #from einops import reduce, rearrange
14
+ #from einops.layers.torch import Rearrange
15
+ #from torch.optim import lr_scheduler
16
+ #import torch.nn.functional as F
17
+ def sigmoid_beta_schedule(timesteps, start = -3, end = 3, tau = 1, clamp_min = 1e-5):
18
+ """
19
+ sigmoid schedule
20
+ proposed in https://arxiv.org/abs/2212.11972 - Figure 8
21
+ better for images > 64x64, when used during training
22
+ """
23
+ steps = timesteps + 1
24
+ t = torch.linspace(0, timesteps, steps, dtype = torch.float64) / timesteps
25
+ v_start = torch.tensor(start / tau).sigmoid()
26
+ v_end = torch.tensor(end / tau).sigmoid()
27
+ alphas_cumprod = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
28
+ alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
29
+ betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
30
+ return torch.clip(betas, 0, 0.999)
31
+
32
+ class BlockLayer(nn.Module):
33
+ def __init__(self, num_blcks, block_layer, planes_in, planes_out, kernel_size=3, first_layer=False,
34
+ input_size=None, time_emb_dim=None, norm_type='layer'):
35
+ super(BlockLayer, self).__init__()
36
+
37
+ self.blocks = nn.ModuleList()
38
+ for i in range(num_blcks):
39
+ if i == 0:
40
+ self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=first_layer,
41
+ input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
42
+ else:
43
+ self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=False,
44
+ input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
45
+ planes_in = planes_out
46
+
47
+
48
+ def forward(self, x, t=None):
49
+ for i, block in enumerate(self.blocks):
50
+ x = block(x, t)
51
+ return x
52
+
53
+
54
+
55
+
56
+ class ResidualBlock(nn.Module):
57
+ def __init__(self, planes_in, planes_out, time_emb_dim = None, kernel_size=3, first_layer=False, input_size=128, norm_type='layer'):
58
+ super(ResidualBlock, self).__init__()
59
+ if time_emb_dim is not None:
60
+ if planes_in>planes_out:
61
+ dim = planes_in*2
62
+ else:
63
+ dim = planes_in*2
64
+ self.mlp = nn.Sequential(
65
+ nn.SiLU(),
66
+ nn.Linear(time_emb_dim, dim)
67
+ )
68
+
69
+ self.conv1 = ConvolutionalBlock(planes_in=planes_in, planes_out=planes_out, first_layer=first_layer,
70
+ kernel_size=kernel_size, dilation=1,
71
+ activation=nn.ReLU, input_size=input_size, norm_type= norm_type)
72
+ self.conv2 = ConvolutionalBlock(planes_in=planes_out, planes_out=planes_out, first_layer=False,
73
+ kernel_size=1,
74
+ dilation=1, activation=nn.ReLU, input_size=input_size, norm_type=norm_type)
75
+ if planes_in != planes_out:
76
+ self.sample = nn.Conv3d(planes_in, planes_out, (1, 1, 1), stride=(1, 1, 1), dilation=(1, 1, 1),
77
+ bias=True) #
78
+ else:
79
+ self.sample = None
80
+
81
+ def forward(self, x, time_emb= None):
82
+ identity = x.clone()
83
+ scale_shift = None
84
+ if time_emb is not None:
85
+ time_emb = self.mlp(time_emb)
86
+ time_emb = time_emb.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
87
+ scale_shift = time_emb.chunk(2, dim=1)
88
+ x = self.conv1(x, scale_shift= scale_shift)
89
+ x = self.conv2(x, scale_shift=None)
90
+
91
+
92
+ if self.sample is not None:
93
+ identity = self.sample(identity)
94
+
95
+
96
+ x += identity
97
+
98
+ return x
99
+
100
+
101
+ class UnetEncoder(nn.Module):
102
+ def __init__(self, in_channel, base_inc_channel=8, layer=BlockLayer, block=None,layer_blocks=None,
103
+ downsampling_stride=None,feature_dilation=1.5, layer_widths=None, kernel_size=3,
104
+ time_emb_dim=None, norm_type='layer'):
105
+ super(UnetEncoder, self).__init__()
106
+
107
+ self.layers = nn.ModuleList()
108
+ self.downsampling_convolutions = nn.ModuleList()
109
+ self.attention_modules = nn.ModuleList()
110
+ self.downsampling_zarib = []
111
+ in_channel_layer = in_channel
112
+ input_size = 192
113
+ self._layers_with = []
114
+ self._layers_with.append(base_inc_channel)
115
+ for i, num_blcks in enumerate(layer_blocks):
116
+ if layer_widths is not None:
117
+ out_channel_layer = layer_widths[i]
118
+ else:
119
+ out_channel_layer = base_inc_channel * int(feature_dilation ** (i+1))//2
120
+
121
+ if i == 0:
122
+ first_layer = True
123
+ else:
124
+ first_layer = False
125
+ self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
126
+ planes_in=in_channel_layer, planes_out=out_channel_layer,
127
+ kernel_size=kernel_size,
128
+ first_layer=first_layer, input_size=input_size,
129
+ time_emb_dim=time_emb_dim, norm_type=norm_type))
130
+ #self.attention_modules.append(Attention(out_channel_layer))
131
+ if i != len(layer_blocks) - 1:
132
+
133
+ padding = kernel_size // 2 # constant size
134
+ #downsampling_conv = nn.Conv3d(out_channel_layer, out_channel_layer, (kernel_size, kernel_size, kernel_size), padding=padding,
135
+ # stride=(downsampling_stride,downsampling_stride,downsampling_stride),
136
+ # bias=True)
137
+ downsampling_conv = nn.MaxPool3d(kernel_size=2, stride=2)
138
+
139
+ self.downsampling_convolutions.append(downsampling_conv)
140
+
141
+ input_size = input_size // 2
142
+ print("Encoder {}:".format(i), in_channel_layer, out_channel_layer)
143
+ self._layers_with.append(out_channel_layer)
144
+ in_channel_layer = out_channel_layer
145
+ self.out_channel_layer = in_channel_layer
146
+ self.output_size = input_size
147
+
148
+ def forward(self, x, time=None):
149
+ outputs = list()
150
+ #outputs.insert(0, x)
151
+ for layer, downsampling in zip(self.layers[:-1], self.downsampling_convolutions):
152
+ x = layer(x, time)
153
+
154
+ outputs.insert(0, x)
155
+
156
+ x = downsampling(x)
157
+ x = self.layers[-1](x, time)
158
+ outputs.insert(0, x) #bottle neck layer
159
+ return outputs
160
+
161
+ class ConvolutionalBlock(nn.Module):
162
+ def __init__(self, planes_in, planes_out, first_layer=False, kernel_size=3, dilation=1, activation=None,
163
+ input_size=None, norm_type='layer'):
164
+ super(ConvolutionalBlock, self).__init__()
165
+ if dilation == 1:
166
+ padding = kernel_size // 2 # constant size
167
+ else:
168
+ # (In + 2*padding - dilation * (kernel_size - 1) - 1)/stride + 1
169
+ if kernel_size == 3:
170
+ if dilation == 2:
171
+ padding = 2
172
+ elif dilation == 4:
173
+ padding = 4
174
+ elif dilation == 3:
175
+ padding = 3
176
+ else:
177
+ padding = None
178
+ elif kernel_size == 1:
179
+ padding = 0
180
+ self.activation = None
181
+ self.norm = None
182
+ if first_layer:
183
+ self.norm = nn.InstanceNorm3d(planes_in)
184
+ self.activation = activation()
185
+ self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
186
+ padding=padding, bias=True,
187
+ dilation=(dilation, dilation, dilation))
188
+ else:
189
+ if activation is not None:
190
+ if norm_type.lower()=='layer':
191
+ self.norm = nn.LayerNorm([input_size, input_size, input_size])
192
+ elif norm_type.lower()=='group':
193
+ valid_num_groups = np.array([16, 8, 4, 2])
194
+ valid_num_groups = valid_num_groups[valid_num_groups<planes_in]
195
+ num_groups = None
196
+ for num_groups in valid_num_groups:
197
+ if planes_in % num_groups != 0:
198
+ break
199
+ if num_groups is None:
200
+ raise exit('Num groups can not be determined')
201
+ self.norm = nn.GroupNorm(num_groups=num_groups, num_channels=planes_in)
202
+ elif norm_type.lower()=='batch':
203
+ self.norm = nn.BatchNorm3d(planes_in)
204
+ elif norm_type.lower() == 'instance':
205
+ self.norm = nn.InstanceNorm3d(planes_in)
206
+ else:
207
+ self.norm= None
208
+
209
+ self.activation = activation()
210
+ self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
211
+ padding=padding, bias=True,
212
+ dilation=(dilation, dilation, dilation))
213
+
214
+ else:
215
+ if norm_type.lower()=='layer':
216
+ if input_size<120:
217
+ self.norm = nn.LayerNorm([input_size, input_size, input_size])
218
+ else:
219
+ self.norm = nn.InstanceNorm3d(planes_in)
220
+ elif norm_type.lower()=='group':
221
+ valid_num_groups = [16, 8, 4, 2]
222
+ valid_num_groups = valid_num_groups[valid_num_groups < planes_in]
223
+ num_groups = None
224
+ for num_groups in valid_num_groups:
225
+ if planes_in % num_groups != 0:
226
+ break
227
+ if num_groups is None:
228
+ raise exit('Num groups can not be determined')
229
+ self.norm = nn.GroupNorm(num_groups=planes_in, num_channels=planes_in)
230
+ elif norm_type.lower() == 'batch':
231
+ self.norm = nn.BatchNorm3d(planes_in)
232
+ elif norm_type.lower() == 'instance':
233
+ self.norm = nn.InstanceNorm3d(planes_in)
234
+ else:
235
+ self.norm = None
236
+
237
+ self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
238
+ padding=padding, bias=True,
239
+ dilation=(dilation, dilation, dilation))
240
+
241
+
242
+ def forward(self, x, scale_shift=None):
243
+ if self.norm is not None:
244
+ x = self.norm(x)
245
+
246
+ if scale_shift is not None:
247
+ scale, shift = scale_shift
248
+ x = x * (scale + 1) + shift
249
+
250
+ if self.activation is not None:
251
+ x = self.activation(x)
252
+
253
+ x = self.conv(x)
254
+
255
+ return x
256
+ class SinusoidalPosEmb(nn.Module):
257
+ def __init__(self, dim):
258
+ super().__init__()
259
+ self.dim = dim
260
+
261
+ def forward(self, x):
262
+ device = x.device
263
+ half_dim = self.dim // 2
264
+ emb = math.log(10000) / (half_dim - 1)
265
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
266
+ emb = x[...,None]*emb[None,:]
267
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
268
+ if len(emb.shape)==3:
269
+ emb = emb.view(emb.shape[0], emb.shape[1] * emb.shape[2])
270
+ return emb
271
+
272
+
273
+
274
+ class UnetDecoder(nn.Module):
275
+ def __init__(self, in_channel, base_inc_channel=64, layer=BlockLayer, block=None,layer_blocks=[1,1,1,1],
276
+ feature_dilation=2, upsampling_stride=2, layer_widths=None, kernel_size=3,
277
+ upsampling_mode="trilinear", align_corners=False, use_transposed_convolutions=False, last_cov_channels=256,
278
+ time_emb_dim=None, norm_type='layer'
279
+ ):
280
+ super(UnetDecoder, self).__init__()
281
+ self.layers = nn.ModuleList()
282
+
283
+ self.upsampling_blocks = nn.ModuleList()
284
+
285
+ self.attention_modules = nn.ModuleList()
286
+ in_channel_layer = in_channel
287
+ #input_size = 24
288
+ input_size = 16
289
+
290
+
291
+ for i, num_blcks in enumerate(layer_blocks):
292
+ if layer_widths is not None:
293
+ out_channel_layer = layer_widths[i]
294
+ else:
295
+ out_channel_layer = base_inc_channel // (feature_dilation ** (i))
296
+
297
+ if i == 0:
298
+ first_layer = True
299
+ self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
300
+ planes_in=last_cov_channels, planes_out=out_channel_layer,
301
+ kernel_size=kernel_size,
302
+ first_layer=first_layer, input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
303
+ else:
304
+ first_layer = False
305
+
306
+ self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
307
+ planes_in=in_channel_layer+layer_widths[i-1], planes_out=out_channel_layer,
308
+ kernel_size=kernel_size,
309
+ first_layer=first_layer, input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
310
+
311
+ #self.upsampling_blocks.append(nn.ConvTranspose3d(out_channel_layer, out_channel_layer, kernel_size=2,
312
+ # stride=upsampling_stride, padding=0))
313
+ self.upsampling_blocks.append(nn.Upsample(scale_factor=2, mode='nearest'))
314
+
315
+ input_size = input_size *2
316
+ last_cov_channels = in_channel_layer#last_cov_channels//2
317
+ print("Decoder {}:".format(i), in_channel_layer, out_channel_layer)
318
+ in_channel_layer = out_channel_layer
319
+ self.out_channel_layer = in_channel_layer
320
+ def forward(self, x, t):
321
+ i = 0
322
+ outputs = list()
323
+ y = x[0]
324
+ for up, lay in zip(self.upsampling_blocks, self.layers[:-1]):
325
+ if i == 0:
326
+ y = lay(y, t)
327
+ else:
328
+ y = lay(y,t)
329
+ outputs.insert(0, y)
330
+ y = up(y)
331
+ #y = att(y)
332
+ y = torch.cat([y, x[i + 1]],1)
333
+ i += 1
334
+ y = self.layers[-1](y,t)
335
+ outputs.insert(0, y)
336
+ return y, outputs
337
+
338
+
339
+
340
+ class Attention(nn.Module):
341
+ def __init__(self, dim, heads = 4, dim_head = 16):
342
+ super().__init__()
343
+ self.scale = dim_head ** -0.5
344
+ self.heads = heads
345
+ hidden_dim = dim_head * heads
346
+
347
+ self.to_qkv = nn.Conv3d(dim, hidden_dim * 3, 1, bias = False)
348
+ self.to_out = nn.Conv3d(hidden_dim, dim, 1)
349
+
350
+ def forward(self, x, mask=None):
351
+ b, c, h, w, z = x.shape
352
+ qkv = self.to_qkv(x).chunk(3, dim = 1)
353
+ q, k, v = map(lambda t: rearrange(t, 'b (h c) x y z -> b h c (x y z)', h = self.heads), qkv)
354
+
355
+ scaled_dot_prod = torch.einsum('... i d , ... j d -> ... i j', q, k) * self.scale
356
+ attention = torch.softmax(scaled_dot_prod, dim=-1)
357
+ v = v / (h * w* z)
358
+ atv = torch.einsum('... i j , ... j d -> ... i d', attention, v)
359
+ out = rearrange(atv, "b h c (x y z) -> b (h c) x y z", h=self.heads, x=h, y=w, z=z)
360
+ return self.to_out(out)
361
+
362
+
363
+
364
+
365
+ class CrossConv3d(nn.Conv3d):
366
+
367
+ """
368
+ https://github.com/JJGO/UniverSeg/blob/main/universeg/nn/cross_conv.py
369
+ Compute pairwise convolution between all element of x and all elements of y.
370
+ x, y are tensors of size B,_,C,H,W where _ could be different number of elements in x and y
371
+ essentially, we do a meshgrid of the elements to get B,Sx,Sy,C,H,W tensors, and then
372
+ pairwise conv.
373
+ Args:
374
+ x (tensor): B,Sx,Cx,H,W
375
+ y (tensor): B,Sy,Cy,H,W
376
+ Returns:
377
+ tensor: B,Sx,Sy,Cout,H,W
378
+ """
379
+ """
380
+ CrossConv2d is a convolutional layer that performs pairwise convolutions between elements of two input tensors.
381
+
382
+ Parameters
383
+ ----------
384
+ in_channels : int or tuple of ints
385
+ Number of channels in the input tensor(s).
386
+ If the tensors have different number of channels, in_channels must be a tuple
387
+ out_channels : int
388
+ Number of output channels.
389
+ kernel_size : int or tuple of ints
390
+ Size of the convolutional kernel.
391
+ stride : int or tuple of ints, optional
392
+ Stride of the convolution. Default is 1.
393
+ padding : int or tuple of ints, optional
394
+ Zero-padding added to both sides of the input. Default is 0.
395
+ dilation : int or tuple of ints, optional
396
+ Spacing between kernel elements. Default is 1.
397
+ groups : int, optional
398
+ Number of blocked connections from input channels to output channels. Default is 1.
399
+ bias : bool, optional
400
+ If True, adds a learnable bias to the output. Default is True.
401
+ padding_mode : str, optional
402
+ Padding mode. Default is "zeros".
403
+ device : str, optional
404
+ Device on which to allocate the tensor. Default is None.
405
+ dtype : torch.dtype, optional
406
+ Data type assigned to the tensor. Default is None.
407
+
408
+ Returns
409
+ -------
410
+ torch.Tensor
411
+ Tensor resulting from the pairwise convolution between the elements of x and y.
412
+
413
+ Notes
414
+ -----
415
+ x and y are tensors of size (B, Sx, Cx, H, W) and (B, Sy, Cy, H, W), respectively,
416
+ The function does the cartesian product of the elements of x and y to obtain a tensor
417
+ of size (B, Sx, Sy, Cx + Cy, H, W), and then performs the same convolution for all
418
+ (B, Sx, Sy) in the batch dimension. Runtime and memory are O(Sx * Sy).
419
+
420
+ Examples
421
+ --------
422
+ >>> x = torch.randn(2, 3, 4, 32, 32)
423
+ >>> y = torch.randn(2, 5, 6, 32, 32)
424
+ >>> conv = CrossConv2d(in_channels=(4, 6), out_channels=7, kernel_size=3, padding=1)
425
+ >>> output = conv(x, y)
426
+ >>> output.shape #(2, 3, 5, 7, 32, 32)
427
+ """
428
+
429
+
430
+ def __init__(
431
+ self,
432
+ in_channels,
433
+ out_channels: int,
434
+ kernel_size,
435
+ stride = 1,
436
+ padding = 0,
437
+ dilation= 1,
438
+ groups: int = 1,
439
+ bias: bool = True,
440
+ padding_mode: str = "zeros",
441
+ device=None,
442
+ dtype=None,
443
+ ) -> None:
444
+
445
+ if isinstance(in_channels, (list, tuple)):
446
+ concat_channels = sum(in_channels)
447
+ else:
448
+ concat_channels = 2 * in_channels
449
+
450
+ super().__init__(
451
+ in_channels=concat_channels,
452
+ out_channels=out_channels,
453
+ kernel_size=kernel_size,
454
+ stride=stride,
455
+ padding=padding,
456
+ dilation=dilation,
457
+ groups=groups,
458
+ bias=bias,
459
+ padding_mode=padding_mode,
460
+ device=device,
461
+ dtype=dtype,
462
+ )
463
+
464
+ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
465
+ """
466
+ Compute pairwise convolution between all elements of x and all elements of y.
467
+
468
+ Parameters
469
+ ----------
470
+ x : torch.Tensor
471
+ Input tensor of size (B, Sx, Cx, H, W).
472
+ y : torch.Tensor
473
+ Input tensor of size (B, Sy, Cy, H, W).
474
+
475
+ Returns
476
+ -------
477
+ torch.Tensor
478
+ Tensor resulting from the cross-convolution between the elements of x and y.
479
+ Has size (B, Sx, Sy, Co, H, W), where Co is the number of output channels.
480
+ """
481
+ B, Sx, *_ = x.shape
482
+ _, Sy, *_ = y.shape
483
+
484
+ xs = repeat(x, "B Sx Cx H W Y -> B Sx Sy Cx H W Y", Sy=Sy)
485
+ ys = repeat(y, "B Sy Cy H W Y-> B Sx Sy Cy H W Y", Sx=Sx)
486
+
487
+ xy = torch.cat([xs, ys], dim=3,)
488
+
489
+ batched_xy = rearrange(xy, "B Sx Sy C2 H W Y -> (B Sx Sy) C2 H W Y")
490
+ batched_output = super().forward(batched_xy)
491
+
492
+ output = rearrange(
493
+ batched_output, "(B Sx Sy) Co H W Y-> B Sx Sy Co H W Y", B=B, Sx=Sx, Sy=Sy
494
+ )
495
+ return output
496
+
497
+ class UnetGen(nn.Module):
498
+ def __init__(self, base_inc_channel=8,
499
+ feature_dilation=2, downsampling_stride=2,
500
+ encoder_class=UnetEncoder, layer_widths=None, block=None,
501
+ kernel_size=3, interpolation_mode ="trilinear",decoder_class=None,
502
+ use_transposed_convolutions=True, time_embed = False, norm_type='layer'):
503
+ super(UnetGen, self).__init__()
504
+ time_embed = self.time_embed
505
+ use_transposed_convolutions = self.use_tr_conv
506
+ inblock = 16
507
+ base_inc_channel = inblock
508
+ self.base_inc_channel = base_inc_channel
509
+
510
+ sinu_pos_emb = SinusoidalPosEmb(inblock)
511
+ fourier_dim = inblock
512
+ #if self.spacing_embed:
513
+ # fourier_dim*=4
514
+
515
+ # time embeddings
516
+
517
+ time_dim = inblock * 4
518
+ if time_embed:
519
+ self.time_mlp = nn.Sequential(
520
+ sinu_pos_emb,
521
+ nn.Linear(fourier_dim, time_dim),
522
+ nn.GELU(),
523
+ nn.Linear(time_dim, time_dim)
524
+ )
525
+ else:
526
+ time_dim = None
527
+
528
+ #encoder_blocks = [1, 1, 1, 1, 1, 1]
529
+
530
+ #decoder_blocks = [1,1,1,1, 1, 1]
531
+ encoder_blocks = [1, 1, 1]
532
+
533
+ decoder_blocks = [1, 1, 1]
534
+
535
+ padding = kernel_size // 2 # constant size
536
+ self.before_encoder = nn.Conv3d(1, inblock, kernel_size=(3, 3, 3),
537
+ stride=(1, 1, 1), padding=3//2,
538
+ bias=True)
539
+
540
+
541
+ self.encoder = encoder_class(in_channel=inblock, base_inc_channel=base_inc_channel, layer_blocks=encoder_blocks,
542
+ block=block,
543
+ feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
544
+ layer_widths=layer_widths, kernel_size=kernel_size,
545
+ time_emb_dim=time_dim, norm_type=norm_type)
546
+
547
+ layer_widths = self.encoder._layers_with
548
+ in_channel = layer_widths[-1]
549
+ self.BottleNeck = BlockLayer(num_blcks=1, block_layer=block,
550
+ planes_in=in_channel, planes_out=in_channel,
551
+ kernel_size=kernel_size,
552
+ first_layer=False, input_size=self.encoder.output_size, time_emb_dim=time_dim, norm_type=norm_type)
553
+
554
+ self.BottleNeck_att = Attention(in_channel)
555
+
556
+ layer_widths = layer_widths[::-1][1:]
557
+
558
+
559
+ self.decoder = decoder_class(in_channel=in_channel, base_inc_channel=base_inc_channel*8, layer_blocks=decoder_blocks,
560
+ block=block, last_cov_channels = self.encoder.out_channel_layer,
561
+ upsampling_mode=interpolation_mode, layer_widths=layer_widths,
562
+ use_transposed_convolutions=use_transposed_convolutions,
563
+ kernel_size=kernel_size, time_emb_dim=time_dim, norm_type=norm_type,
564
+ )
565
+ self.decoder_mask = decoder_class(in_channel=in_channel, base_inc_channel=base_inc_channel*8, layer_blocks=decoder_blocks,
566
+ block=block, last_cov_channels = self.encoder.out_channel_layer,
567
+ upsampling_mode=interpolation_mode, layer_widths=layer_widths,
568
+ use_transposed_convolutions=use_transposed_convolutions,
569
+ kernel_size=kernel_size, time_emb_dim=time_dim, norm_type=norm_type,
570
+ )
571
+
572
+ kernel_size = 3
573
+
574
+ self.last_convolution = BlockLayer(num_blcks=1, block_layer=block,
575
+ planes_in=inblock*2, planes_out=inblock//2,
576
+ kernel_size=kernel_size,
577
+ first_layer=False, input_size=192, time_emb_dim=time_dim, norm_type=norm_type)
578
+
579
+ self.last_convolution_rec = BlockLayer(num_blcks=1, block_layer=block,
580
+ planes_in=inblock*2, planes_out=inblock//2,
581
+ kernel_size=kernel_size,
582
+ first_layer=False, input_size=192, time_emb_dim=time_dim, norm_type=norm_type)
583
+
584
+ self.final_convolution = nn.Conv3d(inblock//2, 1, kernel_size=(kernel_size, kernel_size, kernel_size),
585
+ stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
586
+ self.final_convolution_rec = nn.Conv3d(inblock//2, 1, kernel_size=(kernel_size, kernel_size, kernel_size),
587
+ stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
588
+ self.activation = nn.Softmax(dim=1)
589
+ self.sigmoid = nn.Sigmoid()
590
+
591
+
592
+
593
+
594
+
595
+ def forward(self, y, time, t=0, noise = None):
596
+
597
+
598
+ y = self.before_encoder(y)
599
+
600
+ if self.time_embed:
601
+ if len(time.shape)==1:
602
+ t = self.time_mlp(time)
603
+ else:
604
+ t = self.time_mlp(time)
605
+ else:
606
+ t = None
607
+
608
+ x = self.encoder(y, t)
609
+ x[0] = self.BottleNeck(x[0], t)
610
+ x[0] = self.BottleNeck_att(x[0])
611
+
612
+ mask,_ = self.decoder_mask(x,t)
613
+ x, _ = self.decoder(x, t)
614
+
615
+ ###############################Attention########################################
616
+ dim_head = 16
617
+ self.heads =4
618
+ self.scale = dim_head ** -0.5
619
+
620
+ b, c, h, w, z = x.shape
621
+ #qkv = self.to_qkv(x).chunk(3, dim = 1)
622
+ q = rearrange(x, 'b (h c) x y z -> b h c (x y z)', h = self.heads)
623
+ k = rearrange(mask, 'b (h c) x y z -> b h c (x y z)', h = self.heads)
624
+ #q, k, v = map(lambda t: rearrange(t, 'b (h c) x y z -> b h c (x y z)', h = self.heads), qkv)
625
+
626
+ scaled_dot_prod = torch.einsum('... i d , ... j d -> ... i j', q, k) * self.scale
627
+ attention = torch.softmax(scaled_dot_prod, dim=-1)
628
+
629
+ k = k / (h * w* z)
630
+ atv = torch.einsum('... i j , ... j d -> ... i d', attention, k)
631
+ x = rearrange(atv, "b h c (x y z) -> b (h c) x y z", h=self.heads, x=h, y=w, z=z)
632
+ ###############################Attention########################################
633
+ mask = torch.cat([mask, y], 1)
634
+ mask = self.last_convolution(mask)
635
+ mask = self.final_convolution(mask)
636
+
637
+
638
+ x = torch.cat([x, y], 1)
639
+ #x = (x * mask)
640
+ z = self.last_convolution_rec(x)
641
+ z = self.final_convolution_rec(z)
642
+
643
+ return [mask,z]
644
+
645
+
646
+ class Unet3D(UnetGen):
647
+ def __init__(self,time_embed=False, channels=1, *args, encoder_class=UnetEncoder, **kwargs):
648
+ self.time_embed = time_embed
649
+ self.use_tr_conv = False
650
+
651
+ norm_type = "instance"
652
+ super().__init__(*args, encoder_class=encoder_class, decoder_class=UnetDecoder,
653
+ block=ResidualBlock, norm_type=norm_type, **kwargs)
654
+
655
+ self.channels = channels
656
+ self.netName = 'Unet3D'
657
+ def name(self):
658
+ return 'unet3d'