melage 0.0.65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (501) hide show
  1. melage/__init__.py +16 -0
  2. melage/cli.py +4 -0
  3. melage/graphics/GLGraphicsItem.py +286 -0
  4. melage/graphics/GLViewWidget.py +595 -0
  5. melage/graphics/Transform3D.py +55 -0
  6. melage/graphics/__init__.py +8 -0
  7. melage/graphics/functions.py +101 -0
  8. melage/graphics/items/GLAxisItem.py +149 -0
  9. melage/graphics/items/GLGridItem.py +178 -0
  10. melage/graphics/items/GLPolygonItem.py +77 -0
  11. melage/graphics/items/GLScatterPlotItem.py +135 -0
  12. melage/graphics/items/GLVolumeItem.py +280 -0
  13. melage/graphics/items/GLVolumeItem_b.py +237 -0
  14. melage/graphics/items/__init__.py +0 -0
  15. melage/graphics/shaders.py +202 -0
  16. melage/main.py +270 -0
  17. melage/requirements22.txt +25 -0
  18. melage/requirements_old.txt +28 -0
  19. melage/resource/0circle.png +0 -0
  20. melage/resource/0circle_faded.png +0 -0
  21. melage/resource/3d.png +0 -0
  22. melage/resource/3d.psd +0 -0
  23. melage/resource/3dFaded.png +0 -0
  24. melage/resource/Eraser.png +0 -0
  25. melage/resource/EraserFaded.png +0 -0
  26. melage/resource/EraserX.png +0 -0
  27. melage/resource/EraserXFaded.png +0 -0
  28. melage/resource/Eraser_icon.svg +79 -0
  29. melage/resource/Hand.png +0 -0
  30. melage/resource/HandIcons_0.png +0 -0
  31. melage/resource/Hand_IX.png +0 -0
  32. melage/resource/Hand_IXFaded.png +0 -0
  33. melage/resource/Handsqueezed.png +0 -0
  34. melage/resource/Handwriting (copy).png +0 -0
  35. melage/resource/Handwriting.png +0 -0
  36. melage/resource/HandwritingMinus.png +0 -0
  37. melage/resource/HandwritingMinusX.png +0 -0
  38. melage/resource/HandwritingPlus.png +0 -0
  39. melage/resource/HandwritingPlusX.png +0 -0
  40. melage/resource/Move_icon.svg +8 -0
  41. melage/resource/PngItem_2422924.png +0 -0
  42. melage/resource/about.png +0 -0
  43. melage/resource/about_logo.png +0 -0
  44. melage/resource/about_logo0.png +0 -0
  45. melage/resource/action_check.png +0 -0
  46. melage/resource/action_check_OFF.png +0 -0
  47. melage/resource/arrow).png +0 -0
  48. melage/resource/arrow.png +0 -0
  49. melage/resource/arrowFaded.png +0 -0
  50. melage/resource/arrow_org.png +0 -0
  51. melage/resource/arrow_org.png.png +0 -0
  52. melage/resource/arrows.png +0 -0
  53. melage/resource/authors.mp4 +0 -0
  54. melage/resource/box.png +0 -0
  55. melage/resource/check-image-icon-0.jpg +0 -0
  56. melage/resource/circle.png +0 -0
  57. melage/resource/circle_faded.png +0 -0
  58. melage/resource/circle_or.png +0 -0
  59. melage/resource/close.png +0 -0
  60. melage/resource/close_bg.png +0 -0
  61. melage/resource/color/Simple.txt +18 -0
  62. melage/resource/color/Tissue.txt +24 -0
  63. melage/resource/color/Tissue12.txt +27 -0
  64. melage/resource/color/albert_LUT.txt +102 -0
  65. melage/resource/color/mcrib_LUT.txt +102 -0
  66. melage/resource/color/pediatric1.txt +29 -0
  67. melage/resource/color/pediatric1_old.txt +27 -0
  68. melage/resource/color/pediatric2.txt +87 -0
  69. melage/resource/color/pediatric3.txt +29 -0
  70. melage/resource/color/pediatrics (copy).csv +103 -0
  71. melage/resource/color/tissue_seg.txt +4 -0
  72. melage/resource/contour.png +0 -0
  73. melage/resource/contour.svg +2 -0
  74. melage/resource/contourFaded.png +0 -0
  75. melage/resource/contourX.png +0 -0
  76. melage/resource/contourXFaded.png +0 -0
  77. melage/resource/dti.png +0 -0
  78. melage/resource/dti0.png +0 -0
  79. melage/resource/dti222.png +0 -0
  80. melage/resource/dti_or.png +0 -0
  81. melage/resource/eco.png +0 -0
  82. melage/resource/eco22.png +0 -0
  83. melage/resource/eco_old.png +0 -0
  84. melage/resource/eco_or.png +0 -0
  85. melage/resource/eco_or2.png +0 -0
  86. melage/resource/eco_seg.png +0 -0
  87. melage/resource/eco_seg_old.png +0 -0
  88. melage/resource/export.png +0 -0
  89. melage/resource/hand-grab-icon-10.jpg +0 -0
  90. melage/resource/hand-grab-icon-25.jpg +0 -0
  91. melage/resource/info.png +0 -0
  92. melage/resource/line.png +0 -0
  93. melage/resource/linefaded.png +0 -0
  94. melage/resource/load.png +0 -0
  95. melage/resource/main.ico +0 -0
  96. melage/resource/manual_images/3D_rightc.png +0 -0
  97. melage/resource/manual_images/3D_rightc_goto.png +0 -0
  98. melage/resource/manual_images/3D_rightc_paint.png +0 -0
  99. melage/resource/manual_images/3D_rightc_paint_draw1.png +0 -0
  100. melage/resource/manual_images/3D_rightc_paint_draw2.png +0 -0
  101. melage/resource/manual_images/3D_rightc_paint_render.png +0 -0
  102. melage/resource/manual_images/3D_rightc_paint_render2.png +0 -0
  103. melage/resource/manual_images/3D_rightc_paint_render3.png +0 -0
  104. melage/resource/manual_images/3D_rightc_paint_render4.png +0 -0
  105. melage/resource/manual_images/3D_rightc_paint_render5.png +0 -0
  106. melage/resource/manual_images/3D_rightc_paint_render6.png +0 -0
  107. melage/resource/manual_images/3D_rightc_seg.png +0 -0
  108. melage/resource/manual_images/exit_toolbar.png +0 -0
  109. melage/resource/manual_images/load_image_file.png +0 -0
  110. melage/resource/manual_images/load_image_file_openp.png +0 -0
  111. melage/resource/manual_images/main_page.png +0 -0
  112. melage/resource/manual_images/menu_file.png +0 -0
  113. melage/resource/manual_images/menu_file_export.png +0 -0
  114. melage/resource/manual_images/menu_file_import.png +0 -0
  115. melage/resource/manual_images/menu_file_settings.png +0 -0
  116. melage/resource/manual_images/menu_file_ss.png +0 -0
  117. melage/resource/manual_images/open_save_load.png +0 -0
  118. melage/resource/manual_images/panning_toolbar.png +0 -0
  119. melage/resource/manual_images/segmentation_toolbar.png +0 -0
  120. melage/resource/manual_images/tab_mri.png +0 -0
  121. melage/resource/manual_images/tab_us.png +0 -0
  122. melage/resource/manual_images/tabs.png +0 -0
  123. melage/resource/manual_images/toolbar_tools.png +0 -0
  124. melage/resource/manual_images/tools_basic.png +0 -0
  125. melage/resource/manual_images/tools_bet.png +0 -0
  126. melage/resource/manual_images/tools_cs.png +0 -0
  127. melage/resource/manual_images/tools_deepbet.png +0 -0
  128. melage/resource/manual_images/tools_imageinfo.png +0 -0
  129. melage/resource/manual_images/tools_maskO.png +0 -0
  130. melage/resource/manual_images/tools_masking.png +0 -0
  131. melage/resource/manual_images/tools_n4b.png +0 -0
  132. melage/resource/manual_images/tools_resize.png +0 -0
  133. melage/resource/manual_images/tools_ruler.png +0 -0
  134. melage/resource/manual_images/tools_seg.png +0 -0
  135. melage/resource/manual_images/tools_threshold.png +0 -0
  136. melage/resource/manual_images/tools_tools.png +0 -0
  137. melage/resource/manual_images/widget_color.png +0 -0
  138. melage/resource/manual_images/widget_color_add.png +0 -0
  139. melage/resource/manual_images/widget_color_add2.png +0 -0
  140. melage/resource/manual_images/widget_color_additional.png +0 -0
  141. melage/resource/manual_images/widget_images.png +0 -0
  142. melage/resource/manual_images/widget_images2.png +0 -0
  143. melage/resource/manual_images/widget_images3.png +0 -0
  144. melage/resource/manual_images/widget_marker.png +0 -0
  145. melage/resource/manual_images/widget_mri.png +0 -0
  146. melage/resource/manual_images/widget_mri2.png +0 -0
  147. melage/resource/manual_images/widget_segintensity.png +0 -0
  148. melage/resource/manual_images/widget_tab_mutualview.png +0 -0
  149. melage/resource/manual_images/widget_tab_mutualview2.png +0 -0
  150. melage/resource/manual_images/widget_table.png +0 -0
  151. melage/resource/manual_images/widget_table2.png +0 -0
  152. melage/resource/manual_images/widget_us.png +0 -0
  153. melage/resource/melage_top.ico +0 -0
  154. melage/resource/melage_top.png +0 -0
  155. melage/resource/melage_top0.png +0 -0
  156. melage/resource/melage_top1.png +0 -0
  157. melage/resource/melage_top4.png +0 -0
  158. melage/resource/mri (copy).png +0 -0
  159. melage/resource/mri.png +0 -0
  160. melage/resource/mri0.png +0 -0
  161. melage/resource/mri000.png +0 -0
  162. melage/resource/mri22.png +0 -0
  163. melage/resource/mri_big.png +0 -0
  164. melage/resource/mri_old.png +0 -0
  165. melage/resource/mri_seg.png +0 -0
  166. melage/resource/mri_seg_old.png +0 -0
  167. melage/resource/new.png +0 -0
  168. melage/resource/open.png +0 -0
  169. melage/resource/open2.png +0 -0
  170. melage/resource/pan.png +0 -0
  171. melage/resource/pencil.png +0 -0
  172. melage/resource/pencilFaded.png +0 -0
  173. melage/resource/points.png +0 -0
  174. melage/resource/pointsFaded.png +0 -0
  175. melage/resource/rotate.png +0 -0
  176. melage/resource/ruler.png +0 -0
  177. melage/resource/rulerFaded.png +0 -0
  178. melage/resource/s.png +0 -0
  179. melage/resource/s.psd +0 -0
  180. melage/resource/save.png +0 -0
  181. melage/resource/saveas.png +0 -0
  182. melage/resource/seg_mri.png +0 -0
  183. melage/resource/seg_mri2.png +0 -0
  184. melage/resource/settings.png +0 -0
  185. melage/resource/synch.png +0 -0
  186. melage/resource/synchFaded.png +0 -0
  187. melage/resource/theme/rc/.keep +1 -0
  188. melage/resource/theme/rc/arrow_down.png +0 -0
  189. melage/resource/theme/rc/arrow_down@2x.png +0 -0
  190. melage/resource/theme/rc/arrow_down_disabled.png +0 -0
  191. melage/resource/theme/rc/arrow_down_disabled@2x.png +0 -0
  192. melage/resource/theme/rc/arrow_down_focus.png +0 -0
  193. melage/resource/theme/rc/arrow_down_focus@2x.png +0 -0
  194. melage/resource/theme/rc/arrow_down_pressed.png +0 -0
  195. melage/resource/theme/rc/arrow_down_pressed@2x.png +0 -0
  196. melage/resource/theme/rc/arrow_left.png +0 -0
  197. melage/resource/theme/rc/arrow_left@2x.png +0 -0
  198. melage/resource/theme/rc/arrow_left_disabled.png +0 -0
  199. melage/resource/theme/rc/arrow_left_disabled@2x.png +0 -0
  200. melage/resource/theme/rc/arrow_left_focus.png +0 -0
  201. melage/resource/theme/rc/arrow_left_focus@2x.png +0 -0
  202. melage/resource/theme/rc/arrow_left_pressed.png +0 -0
  203. melage/resource/theme/rc/arrow_left_pressed@2x.png +0 -0
  204. melage/resource/theme/rc/arrow_right.png +0 -0
  205. melage/resource/theme/rc/arrow_right@2x.png +0 -0
  206. melage/resource/theme/rc/arrow_right_disabled.png +0 -0
  207. melage/resource/theme/rc/arrow_right_disabled@2x.png +0 -0
  208. melage/resource/theme/rc/arrow_right_focus.png +0 -0
  209. melage/resource/theme/rc/arrow_right_focus@2x.png +0 -0
  210. melage/resource/theme/rc/arrow_right_pressed.png +0 -0
  211. melage/resource/theme/rc/arrow_right_pressed@2x.png +0 -0
  212. melage/resource/theme/rc/arrow_up.png +0 -0
  213. melage/resource/theme/rc/arrow_up@2x.png +0 -0
  214. melage/resource/theme/rc/arrow_up_disabled.png +0 -0
  215. melage/resource/theme/rc/arrow_up_disabled@2x.png +0 -0
  216. melage/resource/theme/rc/arrow_up_focus.png +0 -0
  217. melage/resource/theme/rc/arrow_up_focus@2x.png +0 -0
  218. melage/resource/theme/rc/arrow_up_pressed.png +0 -0
  219. melage/resource/theme/rc/arrow_up_pressed@2x.png +0 -0
  220. melage/resource/theme/rc/base_icon.png +0 -0
  221. melage/resource/theme/rc/base_icon@2x.png +0 -0
  222. melage/resource/theme/rc/base_icon_disabled.png +0 -0
  223. melage/resource/theme/rc/base_icon_disabled@2x.png +0 -0
  224. melage/resource/theme/rc/base_icon_focus.png +0 -0
  225. melage/resource/theme/rc/base_icon_focus@2x.png +0 -0
  226. melage/resource/theme/rc/base_icon_pressed.png +0 -0
  227. melage/resource/theme/rc/base_icon_pressed@2x.png +0 -0
  228. melage/resource/theme/rc/branch_closed.png +0 -0
  229. melage/resource/theme/rc/branch_closed@2x.png +0 -0
  230. melage/resource/theme/rc/branch_closed_disabled.png +0 -0
  231. melage/resource/theme/rc/branch_closed_disabled@2x.png +0 -0
  232. melage/resource/theme/rc/branch_closed_focus.png +0 -0
  233. melage/resource/theme/rc/branch_closed_focus@2x.png +0 -0
  234. melage/resource/theme/rc/branch_closed_pressed.png +0 -0
  235. melage/resource/theme/rc/branch_closed_pressed@2x.png +0 -0
  236. melage/resource/theme/rc/branch_end.png +0 -0
  237. melage/resource/theme/rc/branch_end@2x.png +0 -0
  238. melage/resource/theme/rc/branch_end_disabled.png +0 -0
  239. melage/resource/theme/rc/branch_end_disabled@2x.png +0 -0
  240. melage/resource/theme/rc/branch_end_focus.png +0 -0
  241. melage/resource/theme/rc/branch_end_focus@2x.png +0 -0
  242. melage/resource/theme/rc/branch_end_pressed.png +0 -0
  243. melage/resource/theme/rc/branch_end_pressed@2x.png +0 -0
  244. melage/resource/theme/rc/branch_line.png +0 -0
  245. melage/resource/theme/rc/branch_line@2x.png +0 -0
  246. melage/resource/theme/rc/branch_line_disabled.png +0 -0
  247. melage/resource/theme/rc/branch_line_disabled@2x.png +0 -0
  248. melage/resource/theme/rc/branch_line_focus.png +0 -0
  249. melage/resource/theme/rc/branch_line_focus@2x.png +0 -0
  250. melage/resource/theme/rc/branch_line_pressed.png +0 -0
  251. melage/resource/theme/rc/branch_line_pressed@2x.png +0 -0
  252. melage/resource/theme/rc/branch_more.png +0 -0
  253. melage/resource/theme/rc/branch_more@2x.png +0 -0
  254. melage/resource/theme/rc/branch_more_disabled.png +0 -0
  255. melage/resource/theme/rc/branch_more_disabled@2x.png +0 -0
  256. melage/resource/theme/rc/branch_more_focus.png +0 -0
  257. melage/resource/theme/rc/branch_more_focus@2x.png +0 -0
  258. melage/resource/theme/rc/branch_more_pressed.png +0 -0
  259. melage/resource/theme/rc/branch_more_pressed@2x.png +0 -0
  260. melage/resource/theme/rc/branch_open.png +0 -0
  261. melage/resource/theme/rc/branch_open@2x.png +0 -0
  262. melage/resource/theme/rc/branch_open_disabled.png +0 -0
  263. melage/resource/theme/rc/branch_open_disabled@2x.png +0 -0
  264. melage/resource/theme/rc/branch_open_focus.png +0 -0
  265. melage/resource/theme/rc/branch_open_focus@2x.png +0 -0
  266. melage/resource/theme/rc/branch_open_pressed.png +0 -0
  267. melage/resource/theme/rc/branch_open_pressed@2x.png +0 -0
  268. melage/resource/theme/rc/checkbox_checked.png +0 -0
  269. melage/resource/theme/rc/checkbox_checked0.png +0 -0
  270. melage/resource/theme/rc/checkbox_checked@2x.png +0 -0
  271. melage/resource/theme/rc/checkbox_checked@2x0.png +0 -0
  272. melage/resource/theme/rc/checkbox_checked@2x000.png.png +0 -0
  273. melage/resource/theme/rc/checkbox_checked_disabled.png +0 -0
  274. melage/resource/theme/rc/checkbox_checked_disabled0.png +0 -0
  275. melage/resource/theme/rc/checkbox_checked_disabled@2x.png +0 -0
  276. melage/resource/theme/rc/checkbox_checked_disabled@2x0.png +0 -0
  277. melage/resource/theme/rc/checkbox_checked_focus.png +0 -0
  278. melage/resource/theme/rc/checkbox_checked_focus0.png +0 -0
  279. melage/resource/theme/rc/checkbox_checked_focus@2x.png +0 -0
  280. melage/resource/theme/rc/checkbox_checked_focus@2x0.png +0 -0
  281. melage/resource/theme/rc/checkbox_checked_pressed.png +0 -0
  282. melage/resource/theme/rc/checkbox_checked_pressed0.png +0 -0
  283. melage/resource/theme/rc/checkbox_checked_pressed@2x.png +0 -0
  284. melage/resource/theme/rc/checkbox_checked_pressed@2x0.png +0 -0
  285. melage/resource/theme/rc/checkbox_indeterminate.png +0 -0
  286. melage/resource/theme/rc/checkbox_indeterminate@2x.png +0 -0
  287. melage/resource/theme/rc/checkbox_indeterminate_disabled.png +0 -0
  288. melage/resource/theme/rc/checkbox_indeterminate_disabled@2x.png +0 -0
  289. melage/resource/theme/rc/checkbox_indeterminate_focus.png +0 -0
  290. melage/resource/theme/rc/checkbox_indeterminate_focus@2x.png +0 -0
  291. melage/resource/theme/rc/checkbox_indeterminate_pressed.png +0 -0
  292. melage/resource/theme/rc/checkbox_indeterminate_pressed@2x.png +0 -0
  293. melage/resource/theme/rc/checkbox_unchecked.png +0 -0
  294. melage/resource/theme/rc/checkbox_unchecked0.png +0 -0
  295. melage/resource/theme/rc/checkbox_unchecked00.png +0 -0
  296. melage/resource/theme/rc/checkbox_unchecked@2x.png +0 -0
  297. melage/resource/theme/rc/checkbox_unchecked@2x0.png +0 -0
  298. melage/resource/theme/rc/checkbox_unchecked@2x00.png +0 -0
  299. melage/resource/theme/rc/checkbox_unchecked_disabled.png +0 -0
  300. melage/resource/theme/rc/checkbox_unchecked_disabled0.png +0 -0
  301. melage/resource/theme/rc/checkbox_unchecked_disabled00.png +0 -0
  302. melage/resource/theme/rc/checkbox_unchecked_disabled@2x.png +0 -0
  303. melage/resource/theme/rc/checkbox_unchecked_disabled@2x0.png +0 -0
  304. melage/resource/theme/rc/checkbox_unchecked_disabled@2x00.png +0 -0
  305. melage/resource/theme/rc/checkbox_unchecked_focus.png +0 -0
  306. melage/resource/theme/rc/checkbox_unchecked_focus0.png +0 -0
  307. melage/resource/theme/rc/checkbox_unchecked_focus00.png +0 -0
  308. melage/resource/theme/rc/checkbox_unchecked_focus@2x.png +0 -0
  309. melage/resource/theme/rc/checkbox_unchecked_focus@2x0.png +0 -0
  310. melage/resource/theme/rc/checkbox_unchecked_focus@2x00.png +0 -0
  311. melage/resource/theme/rc/checkbox_unchecked_pressed.png +0 -0
  312. melage/resource/theme/rc/checkbox_unchecked_pressed0.png +0 -0
  313. melage/resource/theme/rc/checkbox_unchecked_pressed00.png +0 -0
  314. melage/resource/theme/rc/checkbox_unchecked_pressed@2x.png +0 -0
  315. melage/resource/theme/rc/checkbox_unchecked_pressed@2x0.png +0 -0
  316. melage/resource/theme/rc/checkbox_unchecked_pressed@2x00.png +0 -0
  317. melage/resource/theme/rc/line_horizontal.png +0 -0
  318. melage/resource/theme/rc/line_horizontal@2x.png +0 -0
  319. melage/resource/theme/rc/line_horizontal_disabled.png +0 -0
  320. melage/resource/theme/rc/line_horizontal_disabled@2x.png +0 -0
  321. melage/resource/theme/rc/line_horizontal_focus.png +0 -0
  322. melage/resource/theme/rc/line_horizontal_focus@2x.png +0 -0
  323. melage/resource/theme/rc/line_horizontal_pressed.png +0 -0
  324. melage/resource/theme/rc/line_horizontal_pressed@2x.png +0 -0
  325. melage/resource/theme/rc/line_vertical.png +0 -0
  326. melage/resource/theme/rc/line_vertical@2x.png +0 -0
  327. melage/resource/theme/rc/line_vertical_disabled.png +0 -0
  328. melage/resource/theme/rc/line_vertical_disabled@2x.png +0 -0
  329. melage/resource/theme/rc/line_vertical_focus.png +0 -0
  330. melage/resource/theme/rc/line_vertical_focus@2x.png +0 -0
  331. melage/resource/theme/rc/line_vertical_pressed.png +0 -0
  332. melage/resource/theme/rc/line_vertical_pressed@2x.png +0 -0
  333. melage/resource/theme/rc/radio_checked.png +0 -0
  334. melage/resource/theme/rc/radio_checked@2x.png +0 -0
  335. melage/resource/theme/rc/radio_checked_disabled.png +0 -0
  336. melage/resource/theme/rc/radio_checked_disabled@2x.png +0 -0
  337. melage/resource/theme/rc/radio_checked_focus.png +0 -0
  338. melage/resource/theme/rc/radio_checked_focus@2x.png +0 -0
  339. melage/resource/theme/rc/radio_checked_pressed.png +0 -0
  340. melage/resource/theme/rc/radio_checked_pressed@2x.png +0 -0
  341. melage/resource/theme/rc/radio_unchecked.png +0 -0
  342. melage/resource/theme/rc/radio_unchecked@2x.png +0 -0
  343. melage/resource/theme/rc/radio_unchecked_disabled.png +0 -0
  344. melage/resource/theme/rc/radio_unchecked_disabled@2x.png +0 -0
  345. melage/resource/theme/rc/radio_unchecked_focus.png +0 -0
  346. melage/resource/theme/rc/radio_unchecked_focus@2x.png +0 -0
  347. melage/resource/theme/rc/radio_unchecked_pressed.png +0 -0
  348. melage/resource/theme/rc/radio_unchecked_pressed@2x.png +0 -0
  349. melage/resource/theme/rc/toolbar_move_horizontal.png +0 -0
  350. melage/resource/theme/rc/toolbar_move_horizontal@2x.png +0 -0
  351. melage/resource/theme/rc/toolbar_move_horizontal_disabled.png +0 -0
  352. melage/resource/theme/rc/toolbar_move_horizontal_disabled@2x.png +0 -0
  353. melage/resource/theme/rc/toolbar_move_horizontal_focus.png +0 -0
  354. melage/resource/theme/rc/toolbar_move_horizontal_focus@2x.png +0 -0
  355. melage/resource/theme/rc/toolbar_move_horizontal_pressed.png +0 -0
  356. melage/resource/theme/rc/toolbar_move_horizontal_pressed@2x.png +0 -0
  357. melage/resource/theme/rc/toolbar_move_vertical.png +0 -0
  358. melage/resource/theme/rc/toolbar_move_vertical@2x.png +0 -0
  359. melage/resource/theme/rc/toolbar_move_vertical_disabled.png +0 -0
  360. melage/resource/theme/rc/toolbar_move_vertical_disabled@2x.png +0 -0
  361. melage/resource/theme/rc/toolbar_move_vertical_focus.png +0 -0
  362. melage/resource/theme/rc/toolbar_move_vertical_focus@2x.png +0 -0
  363. melage/resource/theme/rc/toolbar_move_vertical_pressed.png +0 -0
  364. melage/resource/theme/rc/toolbar_move_vertical_pressed@2x.png +0 -0
  365. melage/resource/theme/rc/toolbar_separator_horizontal.png +0 -0
  366. melage/resource/theme/rc/toolbar_separator_horizontal@2x.png +0 -0
  367. melage/resource/theme/rc/toolbar_separator_horizontal_disabled.png +0 -0
  368. melage/resource/theme/rc/toolbar_separator_horizontal_disabled@2x.png +0 -0
  369. melage/resource/theme/rc/toolbar_separator_horizontal_focus.png +0 -0
  370. melage/resource/theme/rc/toolbar_separator_horizontal_focus@2x.png +0 -0
  371. melage/resource/theme/rc/toolbar_separator_horizontal_pressed.png +0 -0
  372. melage/resource/theme/rc/toolbar_separator_horizontal_pressed@2x.png +0 -0
  373. melage/resource/theme/rc/toolbar_separator_vertical.png +0 -0
  374. melage/resource/theme/rc/toolbar_separator_vertical@2x.png +0 -0
  375. melage/resource/theme/rc/toolbar_separator_vertical_disabled.png +0 -0
  376. melage/resource/theme/rc/toolbar_separator_vertical_disabled@2x.png +0 -0
  377. melage/resource/theme/rc/toolbar_separator_vertical_focus.png +0 -0
  378. melage/resource/theme/rc/toolbar_separator_vertical_focus@2x.png +0 -0
  379. melage/resource/theme/rc/toolbar_separator_vertical_pressed.png +0 -0
  380. melage/resource/theme/rc/toolbar_separator_vertical_pressed@2x.png +0 -0
  381. melage/resource/theme/rc/transparent.png +0 -0
  382. melage/resource/theme/rc/transparent@2x.png +0 -0
  383. melage/resource/theme/rc/transparent_disabled.png +0 -0
  384. melage/resource/theme/rc/transparent_disabled@2x.png +0 -0
  385. melage/resource/theme/rc/transparent_focus.png +0 -0
  386. melage/resource/theme/rc/transparent_focus@2x.png +0 -0
  387. melage/resource/theme/rc/transparent_pressed.png +0 -0
  388. melage/resource/theme/rc/transparent_pressed@2x.png +0 -0
  389. melage/resource/theme/rc/window_close.png +0 -0
  390. melage/resource/theme/rc/window_close@2x.png +0 -0
  391. melage/resource/theme/rc/window_close_disabled.png +0 -0
  392. melage/resource/theme/rc/window_close_disabled@2x.png +0 -0
  393. melage/resource/theme/rc/window_close_focus.png +0 -0
  394. melage/resource/theme/rc/window_close_focus@2x.png +0 -0
  395. melage/resource/theme/rc/window_close_pressed.png +0 -0
  396. melage/resource/theme/rc/window_close_pressed@2x.png +0 -0
  397. melage/resource/theme/rc/window_grip.png +0 -0
  398. melage/resource/theme/rc/window_grip@2x.png +0 -0
  399. melage/resource/theme/rc/window_grip_disabled.png +0 -0
  400. melage/resource/theme/rc/window_grip_disabled@2x.png +0 -0
  401. melage/resource/theme/rc/window_grip_focus.png +0 -0
  402. melage/resource/theme/rc/window_grip_focus@2x.png +0 -0
  403. melage/resource/theme/rc/window_grip_pressed.png +0 -0
  404. melage/resource/theme/rc/window_grip_pressed@2x.png +0 -0
  405. melage/resource/theme/rc/window_minimize.png +0 -0
  406. melage/resource/theme/rc/window_minimize@2x.png +0 -0
  407. melage/resource/theme/rc/window_minimize_disabled.png +0 -0
  408. melage/resource/theme/rc/window_minimize_disabled@2x.png +0 -0
  409. melage/resource/theme/rc/window_minimize_focus.png +0 -0
  410. melage/resource/theme/rc/window_minimize_focus@2x.png +0 -0
  411. melage/resource/theme/rc/window_minimize_pressed.png +0 -0
  412. melage/resource/theme/rc/window_minimize_pressed@2x.png +0 -0
  413. melage/resource/theme/rc/window_undock.png +0 -0
  414. melage/resource/theme/rc/window_undock@2x.png +0 -0
  415. melage/resource/theme/rc/window_undock_disabled.png +0 -0
  416. melage/resource/theme/rc/window_undock_disabled@2x.png +0 -0
  417. melage/resource/theme/rc/window_undock_focus.png +0 -0
  418. melage/resource/theme/rc/window_undock_focus@2x.png +0 -0
  419. melage/resource/theme/rc/window_undock_pressed.png +0 -0
  420. melage/resource/theme/rc/window_undock_pressed@2x.png +0 -0
  421. melage/resource/theme/style.qss +2223 -0
  422. melage/resource/tract.png +0 -0
  423. melage/resource/view1.png +0 -0
  424. melage/resource/view1_eco.png +0 -0
  425. melage/resource/view1_mri.png +0 -0
  426. melage/resource/view1_seg.png +0 -0
  427. melage/resource/view2.png +0 -0
  428. melage/resource/view2_seg.png +0 -0
  429. melage/resource/w.png +0 -0
  430. melage/resource/zoom_in.png +0 -0
  431. melage/resource/zoom_inFaded.png +0 -0
  432. melage/resource/zoom_out.png +0 -0
  433. melage/resource/zoom_outFaded.png +0 -0
  434. melage/some_notes.txt +3 -0
  435. melage/utils/DispalyIm.py +2788 -0
  436. melage/utils/GMM.py +720 -0
  437. melage/utils/Shaders_120.py +257 -0
  438. melage/utils/Shaders_330.py +314 -0
  439. melage/utils/Shaders_bu.py +314 -0
  440. melage/utils/__init__0.py +7 -0
  441. melage/utils/brain_extraction_helper.py +234 -0
  442. melage/utils/custom_QScrollBar.py +61 -0
  443. melage/utils/glScientific.py +1554 -0
  444. melage/utils/glScientific_bc.py +1585 -0
  445. melage/utils/readData.py +1061 -0
  446. melage/utils/registration.py +512 -0
  447. melage/utils/source_folder.py +18 -0
  448. melage/utils/utils.py +3808 -0
  449. melage/version.txt +1 -0
  450. melage/widgets/ApplyMask.py +212 -0
  451. melage/widgets/ChangeSystem.py +152 -0
  452. melage/widgets/DeepLModels/InfantSegment/Unet.py +464 -0
  453. melage/widgets/DeepLModels/NPP/dataset/mri_dataset_affine.py +149 -0
  454. melage/widgets/DeepLModels/NPP/models/checkpoints/npp_v1.pth.py +0 -0
  455. melage/widgets/DeepLModels/NPP/models/losses.py +146 -0
  456. melage/widgets/DeepLModels/NPP/models/model.py +272 -0
  457. melage/widgets/DeepLModels/NPP/models/utils.py +303 -0
  458. melage/widgets/DeepLModels/NPP/npp.py +116 -0
  459. melage/widgets/DeepLModels/NPP/requirements.txt +8 -0
  460. melage/widgets/DeepLModels/NPP/train/train.py +116 -0
  461. melage/widgets/DeepLModels/Unet3DAtt.py +657 -0
  462. melage/widgets/DeepLModels/Unet3D_basic.py +648 -0
  463. melage/widgets/DeepLModels/new_unet.py +652 -0
  464. melage/widgets/DeepLModels/new_unet_old.py +639 -0
  465. melage/widgets/DeepLModels/new_unet_old2.py +658 -0
  466. melage/widgets/HistImage.py +153 -0
  467. melage/widgets/ImageThresholding.py +222 -0
  468. melage/widgets/MaskOperations.py +147 -0
  469. melage/widgets/N4Dialog.py +241 -0
  470. melage/widgets/Segmentation/FCM.py +1553 -0
  471. melage/widgets/Segmentation/__init__.py +588 -0
  472. melage/widgets/Segmentation/utils.py +417 -0
  473. melage/widgets/SemiAutoSeg.py +666 -0
  474. melage/widgets/Synthstrip.py +141 -0
  475. melage/widgets/__init__0.py +5 -0
  476. melage/widgets/about.py +246 -0
  477. melage/widgets/activation.py +437 -0
  478. melage/widgets/activator.py +147 -0
  479. melage/widgets/be_dl.py +409 -0
  480. melage/widgets/be_dl_unet3d.py +441 -0
  481. melage/widgets/brain_extraction.py +855 -0
  482. melage/widgets/brain_extraction_dl.py +887 -0
  483. melage/widgets/brain_extraction_dl_bu.py +869 -0
  484. melage/widgets/colorwidget.py +100 -0
  485. melage/widgets/dockWidgets.py +2005 -0
  486. melage/widgets/enhanceImWidget.py +109 -0
  487. melage/widgets/fileDialog_widget.py +275 -0
  488. melage/widgets/iminfo.py +346 -0
  489. melage/widgets/mainwindow_widget.py +6775 -0
  490. melage/widgets/melageAbout.py +123 -0
  491. melage/widgets/openglWidgets.py +556 -0
  492. melage/widgets/registrationWidget.py +342 -0
  493. melage/widgets/repeat_widget.py +74 -0
  494. melage/widgets/screenshot_widget.py +138 -0
  495. melage/widgets/settings_widget.py +77 -0
  496. melage/widgets/tranformationWidget.py +275 -0
  497. melage-0.0.65.dist-info/METADATA +742 -0
  498. melage-0.0.65.dist-info/RECORD +501 -0
  499. melage-0.0.65.dist-info/WHEEL +5 -0
  500. melage-0.0.65.dist-info/entry_points.txt +2 -0
  501. melage-0.0.65.dist-info/top_level.txt +1 -0
@@ -0,0 +1,652 @@
1
+ from functools import partial
2
+
3
+ import numpy as np
4
+ import torch.nn as nn
5
+ import torch
6
+ import math
7
+ #from .model_utils import *
8
+ import math
9
+ import torch
10
+ from functools import partial
11
+ import torch.nn as nn
12
+ from einops import repeat, rearrange
13
+ #from einops import reduce, rearrange
14
+ #from einops.layers.torch import Rearrange
15
+ #from torch.optim import lr_scheduler
16
+ #import torch.nn.functional as F
17
+
18
+ class BlockLayer(nn.Module):
19
+ def __init__(self, num_blcks, block_layer, planes_in, planes_out, kernel_size=3, first_layer=False,
20
+ input_size=None, time_emb_dim=None, norm_type='layer'):
21
+ super(BlockLayer, self).__init__()
22
+
23
+ self.blocks = nn.ModuleList()
24
+ for i in range(num_blcks):
25
+ if i == 0:
26
+ self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=first_layer,
27
+ input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
28
+ else:
29
+ self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=False,
30
+ input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
31
+ planes_in = planes_out
32
+
33
+
34
+ def forward(self, x, t=None):
35
+ for i, block in enumerate(self.blocks):
36
+ x = block(x, t)
37
+ return x
38
+
39
+
40
+
41
+
42
+ class ResidualBlock(nn.Module):
43
+ def __init__(self, planes_in, planes_out, time_emb_dim = None, kernel_size=3, first_layer=False, input_size=128, norm_type='layer'):
44
+ super(ResidualBlock, self).__init__()
45
+ if time_emb_dim is not None:
46
+ if planes_in>planes_out:
47
+ dim = planes_in*2
48
+ else:
49
+ dim = planes_in*2
50
+ self.mlp = nn.Sequential(
51
+ nn.SiLU(),
52
+ nn.Linear(time_emb_dim, dim)
53
+ )
54
+
55
+ self.conv1 = ConvolutionalBlock(planes_in=planes_in, planes_out=planes_out, first_layer=first_layer,
56
+ kernel_size=kernel_size, dilation=1,
57
+ activation=nn.ReLU, input_size=input_size, norm_type= norm_type)
58
+ self.conv2 = ConvolutionalBlock(planes_in=planes_out, planes_out=planes_out, first_layer=False,
59
+ kernel_size=1,
60
+ dilation=1, activation=nn.ReLU, input_size=input_size, norm_type=norm_type)
61
+ if planes_in != planes_out:
62
+ self.sample = nn.Conv3d(planes_in, planes_out, (1, 1, 1), stride=(1, 1, 1), dilation=(1, 1, 1),
63
+ bias=True) #
64
+ else:
65
+ self.sample = None
66
+
67
+ def forward(self, x, time_emb= None):
68
+ identity = x.clone()
69
+ scale_shift = None
70
+ if time_emb is not None:
71
+ time_emb = self.mlp(time_emb)
72
+ time_emb = time_emb.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
73
+ scale_shift = time_emb.chunk(2, dim=1)
74
+ x = self.conv1(x, scale_shift= scale_shift)
75
+ x = self.conv2(x, scale_shift=None)
76
+
77
+
78
+ if self.sample is not None:
79
+ identity = self.sample(identity)
80
+
81
+
82
+ x += identity
83
+
84
+ return x
85
+
86
+
87
+ class UnetEncoder(nn.Module):
88
+ def __init__(self, in_channel, base_inc_channel=8, layer=BlockLayer, block=None,layer_blocks=None,
89
+ downsampling_stride=None,feature_dilation=1.5, layer_widths=None, kernel_size=3,
90
+ time_emb_dim=None, norm_type='layer'):
91
+ super(UnetEncoder, self).__init__()
92
+
93
+ self.layers = nn.ModuleList()
94
+ self.downsampling_convolutions = nn.ModuleList()
95
+ self.attention_modules = nn.ModuleList()
96
+ self.downsampling_zarib = []
97
+ in_channel_layer = in_channel
98
+ input_size = 192
99
+ self._layers_with = []
100
+ #self._layers_with.append(base_inc_channel)
101
+ for i, num_blcks in enumerate(layer_blocks):
102
+ if layer_widths is not None:
103
+ out_channel_layer = layer_widths[i]
104
+ else:
105
+ out_channel_layer = base_inc_channel * int(feature_dilation ** (i+1))//2
106
+
107
+ if i == 0:
108
+ first_layer = True
109
+ else:
110
+ first_layer = False
111
+ self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
112
+ planes_in=in_channel_layer, planes_out=out_channel_layer,
113
+ kernel_size=kernel_size,
114
+ first_layer=first_layer, input_size=input_size,
115
+ time_emb_dim=time_emb_dim, norm_type=norm_type))
116
+ #self.attention_modules.append(Attention(out_channel_layer))
117
+ if i != len(layer_blocks) - 1:
118
+
119
+ #padding = kernel_size // 2 # constant size
120
+ downsampling_conv = nn.Conv3d(out_channel_layer, out_channel_layer, (1, 1, 1), padding=1//2,
121
+ stride=(downsampling_stride,downsampling_stride,downsampling_stride),
122
+ bias=True)
123
+ #downsampling_conv = nn.MaxPool3d(kernel_size=2, stride=2)
124
+
125
+ self.downsampling_convolutions.append(downsampling_conv)
126
+
127
+ input_size = input_size // 2
128
+ print("Encoder {}:".format(i), in_channel_layer, out_channel_layer)
129
+ self._layers_with.append(out_channel_layer)
130
+ in_channel_layer = out_channel_layer
131
+ self.out_channel_layer = in_channel_layer//2
132
+ self.last_downsampling_conv = nn.Conv3d(out_channel_layer, out_channel_layer, (1, 1, 1),
133
+ padding=1 // 2 ,
134
+ stride=(downsampling_stride, downsampling_stride, downsampling_stride),
135
+ bias=True)
136
+ self.output_size = input_size
137
+
138
+ def forward(self, x, time=None):
139
+ outputs = list()
140
+ #outputs.insert(0, x)
141
+ for layer, downsampling in zip(self.layers[:-1], self.downsampling_convolutions):
142
+ x = layer(x, time)
143
+
144
+ outputs.insert(0, x)
145
+
146
+ x = downsampling(x)
147
+ outputs.insert(0, x)
148
+ x = self.layers[-1](x, time)
149
+ x = self.last_downsampling_conv(x)
150
+ outputs.insert(0, x) #bottle neck layer
151
+ return outputs
152
+
153
+ class ConvolutionalBlock(nn.Module):
154
+ def __init__(self, planes_in, planes_out, first_layer=False, kernel_size=3, dilation=1, activation=None,
155
+ input_size=None, norm_type='layer'):
156
+ super(ConvolutionalBlock, self).__init__()
157
+ if dilation == 1:
158
+ padding = kernel_size // 2 # constant size
159
+ else:
160
+ # (In + 2*padding - dilation * (kernel_size - 1) - 1)/stride + 1
161
+ if kernel_size == 3:
162
+ if dilation == 2:
163
+ padding = 2
164
+ elif dilation == 4:
165
+ padding = 4
166
+ elif dilation == 3:
167
+ padding = 3
168
+ else:
169
+ padding = None
170
+ elif kernel_size == 1:
171
+ padding = 0
172
+ self.activation = None
173
+ self.norm = None
174
+ if first_layer:
175
+ self.norm = nn.InstanceNorm3d(planes_in)
176
+ self.activation = activation()
177
+ self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
178
+ padding=padding, bias=True,
179
+ dilation=(dilation, dilation, dilation))
180
+ else:
181
+ if activation is not None:
182
+ if norm_type.lower()=='layer':
183
+ self.norm = nn.LayerNorm([input_size, input_size, input_size])
184
+ elif norm_type.lower()=='group':
185
+ valid_num_groups = np.array([16, 8, 4, 2])
186
+ valid_num_groups = valid_num_groups[valid_num_groups<planes_in]
187
+ num_groups = None
188
+ for num_groups in valid_num_groups:
189
+ if planes_in % num_groups != 0:
190
+ break
191
+ if num_groups is None:
192
+ raise exit('Num groups can not be determined')
193
+ self.norm = nn.GroupNorm(num_groups=num_groups, num_channels=planes_in)
194
+ elif norm_type.lower()=='batch':
195
+ self.norm = nn.BatchNorm3d(planes_in)
196
+ elif norm_type.lower() == 'instance':
197
+ self.norm = nn.InstanceNorm3d(planes_in)
198
+ else:
199
+ self.norm= None
200
+
201
+ self.activation = activation()
202
+ self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
203
+ padding=padding, bias=True,
204
+ dilation=(dilation, dilation, dilation))
205
+
206
+ else:
207
+ if norm_type.lower()=='layer':
208
+ if input_size<120:
209
+ self.norm = nn.LayerNorm([input_size, input_size, input_size])
210
+ else:
211
+ self.norm = nn.InstanceNorm3d(planes_in)
212
+ elif norm_type.lower()=='group':
213
+ valid_num_groups = [16, 8, 4, 2]
214
+ valid_num_groups = valid_num_groups[valid_num_groups < planes_in]
215
+ num_groups = None
216
+ for num_groups in valid_num_groups:
217
+ if planes_in % num_groups != 0:
218
+ break
219
+ if num_groups is None:
220
+ raise exit('Num groups can not be determined')
221
+ self.norm = nn.GroupNorm(num_groups=planes_in, num_channels=planes_in)
222
+ elif norm_type.lower() == 'batch':
223
+ self.norm = nn.BatchNorm3d(planes_in)
224
+ elif norm_type.lower() == 'instance':
225
+ self.norm = nn.InstanceNorm3d(planes_in)
226
+ else:
227
+ self.norm = None
228
+
229
+ self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
230
+ padding=padding, bias=True,
231
+ dilation=(dilation, dilation, dilation))
232
+
233
+
234
+ def forward(self, x, scale_shift=None):
235
+ if self.norm is not None:
236
+ x = self.norm(x)
237
+
238
+ if scale_shift is not None:
239
+ scale, shift = scale_shift
240
+ x = x * (scale + 1) + shift
241
+
242
+ if self.activation is not None:
243
+ x = self.activation(x)
244
+
245
+ x = self.conv(x)
246
+
247
+ return x
248
+ class SinusoidalPosEmb(nn.Module):
249
+ def __init__(self, dim):
250
+ super().__init__()
251
+ self.dim = dim
252
+
253
+ def forward(self, x):
254
+ device = x.device
255
+ half_dim = self.dim // 2
256
+ emb = math.log(10000) / (half_dim - 1)
257
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
258
+ emb = x[...,None]*emb[None,:]
259
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
260
+ if len(emb.shape)==3:
261
+ emb = emb.view(emb.shape[0], emb.shape[1] * emb.shape[2])
262
+ return emb
263
+
264
+
265
+
266
+ class UnetDecoder(nn.Module):
267
+ def __init__(self, in_channel, base_inc_channel=64, layer=BlockLayer, block=None,layer_blocks=[1,1,1,1],
268
+ feature_dilation=2, upsampling_stride=2, layer_widths=None, kernel_size=3,
269
+ upsampling_mode="trilinear", align_corners=False, use_transposed_convolutions=False, last_cov_channels=256,
270
+ time_emb_dim=None, norm_type='layer'
271
+ ):
272
+ super(UnetDecoder, self).__init__()
273
+ self.layers = nn.ModuleList()
274
+
275
+ self.upsampling_blocks = nn.ModuleList()
276
+
277
+ self.attention_modules = nn.ModuleList()
278
+ in_channel_layer = in_channel
279
+ #input_size = 24
280
+ input_size = 16
281
+
282
+
283
+ for i, num_blcks in enumerate(layer_blocks):
284
+ if layer_widths is not None:
285
+ out_channel_layer = layer_widths[i]
286
+ else:
287
+ out_channel_layer = base_inc_channel // (feature_dilation ** (i))
288
+
289
+ if i == 0:
290
+ first_layer = True
291
+ self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
292
+ planes_in=last_cov_channels, planes_out=out_channel_layer,
293
+ kernel_size=kernel_size,
294
+ first_layer=first_layer, input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
295
+ else:
296
+ first_layer = False
297
+
298
+ self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
299
+ planes_in=in_channel_layer+layer_widths[i-1], planes_out=out_channel_layer,
300
+ kernel_size=kernel_size,
301
+ first_layer=first_layer, input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
302
+
303
+ #self.upsampling_blocks.append(nn.ConvTranspose3d(out_channel_layer, out_channel_layer, kernel_size=2,
304
+ # stride=upsampling_stride, padding=0))
305
+ self.upsampling_blocks.append(nn.Upsample(scale_factor=2, mode='nearest'))
306
+
307
+ input_size = input_size *2
308
+ last_cov_channels = in_channel_layer#last_cov_channels//2
309
+ print("Decoder {}:".format(i), in_channel_layer, out_channel_layer)
310
+ in_channel_layer = out_channel_layer
311
+ self.out_channel_layer = in_channel_layer
312
+ def forward(self, x, t):
313
+ i = 0
314
+ outputs = list()
315
+ y = x[0]
316
+ for up, lay in zip(self.upsampling_blocks, self.layers[:-1]):
317
+ if i == 0:
318
+ y = lay(y, t)
319
+ else:
320
+ y = lay(y,t)
321
+ outputs.insert(0, y)
322
+ y = torch.cat([y, x[i ]], 1)
323
+ y = up(y)
324
+ #y = att(y)
325
+ #y = torch.cat([y, x[i + 1]],1)
326
+ i += 1
327
+ outputs.insert(0, y)
328
+ y = self.layers[-1](y,t)
329
+ y = up(y)
330
+ outputs.insert(0, y)
331
+ return y, outputs
332
+
333
+ class Attention(nn.Module):
334
+ def __init__(self, dim, heads = 4, dim_head = 16):
335
+ super().__init__()
336
+ self.scale = dim_head ** -0.5
337
+ self.heads = heads
338
+ hidden_dim = dim_head * heads
339
+
340
+ self.to_qkv = nn.Conv3d(dim, hidden_dim * 3, 1, bias = False)
341
+ self.to_out = nn.Conv3d(hidden_dim, dim//2, 1)
342
+
343
+ def forward(self, x, mask=None):
344
+ b, c, h, w, z = x.shape
345
+ qkv = self.to_qkv(x).chunk(3, dim = 1)
346
+ q, k, v = map(lambda t: rearrange(t, 'b (h c) x y z -> b h c (x y z)', h = self.heads), qkv)
347
+
348
+ scaled_dot_prod = torch.einsum('... i d , ... j d -> ... i j', q, k) * self.scale
349
+ attention = torch.softmax(scaled_dot_prod, dim=-1)
350
+ v = v / (h * w* z)
351
+ atv = torch.einsum('... i j , ... j d -> ... i d', attention, v)
352
+ out = rearrange(atv, "b h c (x y z) -> b (h c) x y z", h=self.heads, x=h, y=w, z=z)
353
+ return self.to_out(out)
354
+
355
+
356
+
357
+
358
+ class CrossConv3d(nn.Conv3d):
359
+
360
+ """
361
+ https://github.com/JJGO/UniverSeg/blob/main/universeg/nn/cross_conv.py
362
+ Compute pairwise convolution between all element of x and all elements of y.
363
+ x, y are tensors of size B,_,C,H,W where _ could be different number of elements in x and y
364
+ essentially, we do a meshgrid of the elements to get B,Sx,Sy,C,H,W tensors, and then
365
+ pairwise conv.
366
+ Args:
367
+ x (tensor): B,Sx,Cx,H,W
368
+ y (tensor): B,Sy,Cy,H,W
369
+ Returns:
370
+ tensor: B,Sx,Sy,Cout,H,W
371
+ """
372
+ """
373
+ CrossConv2d is a convolutional layer that performs pairwise convolutions between elements of two input tensors.
374
+
375
+ Parameters
376
+ ----------
377
+ in_channels : int or tuple of ints
378
+ Number of channels in the input tensor(s).
379
+ If the tensors have different number of channels, in_channels must be a tuple
380
+ out_channels : int
381
+ Number of output channels.
382
+ kernel_size : int or tuple of ints
383
+ Size of the convolutional kernel.
384
+ stride : int or tuple of ints, optional
385
+ Stride of the convolution. Default is 1.
386
+ padding : int or tuple of ints, optional
387
+ Zero-padding added to both sides of the input. Default is 0.
388
+ dilation : int or tuple of ints, optional
389
+ Spacing between kernel elements. Default is 1.
390
+ groups : int, optional
391
+ Number of blocked connections from input channels to output channels. Default is 1.
392
+ bias : bool, optional
393
+ If True, adds a learnable bias to the output. Default is True.
394
+ padding_mode : str, optional
395
+ Padding mode. Default is "zeros".
396
+ device : str, optional
397
+ Device on which to allocate the tensor. Default is None.
398
+ dtype : torch.dtype, optional
399
+ Data type assigned to the tensor. Default is None.
400
+
401
+ Returns
402
+ -------
403
+ torch.Tensor
404
+ Tensor resulting from the pairwise convolution between the elements of x and y.
405
+
406
+ Notes
407
+ -----
408
+ x and y are tensors of size (B, Sx, Cx, H, W) and (B, Sy, Cy, H, W), respectively,
409
+ The function does the cartesian product of the elements of x and y to obtain a tensor
410
+ of size (B, Sx, Sy, Cx + Cy, H, W), and then performs the same convolution for all
411
+ (B, Sx, Sy) in the batch dimension. Runtime and memory are O(Sx * Sy).
412
+
413
+ Examples
414
+ --------
415
+ >>> x = torch.randn(2, 3, 4, 32, 32)
416
+ >>> y = torch.randn(2, 5, 6, 32, 32)
417
+ >>> conv = CrossConv2d(in_channels=(4, 6), out_channels=7, kernel_size=3, padding=1)
418
+ >>> output = conv(x, y)
419
+ >>> output.shape #(2, 3, 5, 7, 32, 32)
420
+ """
421
+
422
+
423
+ def __init__(
424
+ self,
425
+ in_channels,
426
+ out_channels: int,
427
+ kernel_size,
428
+ stride = 1,
429
+ padding = 0,
430
+ dilation= 1,
431
+ groups: int = 1,
432
+ bias: bool = True,
433
+ padding_mode: str = "zeros",
434
+ device=None,
435
+ dtype=None,
436
+ ) -> None:
437
+
438
+ if isinstance(in_channels, (list, tuple)):
439
+ concat_channels = sum(in_channels)
440
+ else:
441
+ concat_channels = 2 * in_channels
442
+
443
+ super().__init__(
444
+ in_channels=concat_channels,
445
+ out_channels=out_channels,
446
+ kernel_size=kernel_size,
447
+ stride=stride,
448
+ padding=padding,
449
+ dilation=dilation,
450
+ groups=groups,
451
+ bias=bias,
452
+ padding_mode=padding_mode,
453
+ device=device,
454
+ dtype=dtype,
455
+ )
456
+
457
+ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
458
+ """
459
+ Compute pairwise convolution between all elements of x and all elements of y.
460
+
461
+ Parameters
462
+ ----------
463
+ x : torch.Tensor
464
+ Input tensor of size (B, Sx, Cx, H, W).
465
+ y : torch.Tensor
466
+ Input tensor of size (B, Sy, Cy, H, W).
467
+
468
+ Returns
469
+ -------
470
+ torch.Tensor
471
+ Tensor resulting from the cross-convolution between the elements of x and y.
472
+ Has size (B, Sx, Sy, Co, H, W), where Co is the number of output channels.
473
+ """
474
+ B, Sx, *_ = x.shape
475
+ _, Sy, *_ = y.shape
476
+
477
+ xs = repeat(x, "B Sx Cx H W Y -> B Sx Sy Cx H W Y", Sy=Sy)
478
+ ys = repeat(y, "B Sy Cy H W Y-> B Sx Sy Cy H W Y", Sx=Sx)
479
+
480
+ xy = torch.cat([xs, ys], dim=3,)
481
+
482
+ batched_xy = rearrange(xy, "B Sx Sy C2 H W Y -> (B Sx Sy) C2 H W Y")
483
+ batched_output = super().forward(batched_xy)
484
+
485
+ output = rearrange(
486
+ batched_output, "(B Sx Sy) Co H W Y-> B Sx Sy Co H W Y", B=B, Sx=Sx, Sy=Sy
487
+ )
488
+ return output
489
+
490
+ class UnetGen(nn.Module):
491
+ def __init__(self, base_inc_channel=8,
492
+ feature_dilation=2, downsampling_stride=2,
493
+ encoder_class=UnetEncoder, layer_widths=None, block=None,
494
+ kernel_size=3, interpolation_mode ="trilinear",decoder_class=None,
495
+ use_transposed_convolutions=True, time_embed = False, norm_type='layer'):
496
+ super(UnetGen, self).__init__()
497
+ time_embed = self.time_embed
498
+ use_transposed_convolutions = self.use_tr_conv
499
+ inblock = 16
500
+ base_inc_channel = inblock
501
+ self.base_inc_channel = base_inc_channel
502
+
503
+ sinu_pos_emb = SinusoidalPosEmb(inblock)
504
+ fourier_dim = inblock
505
+ #if self.spacing_embed:
506
+ # fourier_dim*=4
507
+
508
+ # time embeddings
509
+
510
+ time_dim = inblock * 4
511
+ if time_embed:
512
+ self.time_mlp = nn.Sequential(
513
+ sinu_pos_emb,
514
+ nn.Linear(fourier_dim, time_dim),
515
+ nn.GELU(),
516
+ nn.Linear(time_dim, time_dim)
517
+ )
518
+ else:
519
+ time_dim = None
520
+
521
+ #encoder_blocks = [1, 1, 1, 1, 1, 1]
522
+
523
+ #decoder_blocks = [1,1,1,1, 1, 1]
524
+ encoder_blocks = [1, 1, 1]
525
+
526
+ decoder_blocks = [1, 1, 1]
527
+
528
+ padding = kernel_size // 2 # constant size
529
+ self.before_encoder = nn.Conv3d(1, inblock, kernel_size=(3, 3, 3),
530
+ stride=(1, 1, 1), padding=3//2,
531
+ bias=True)
532
+
533
+
534
+ self.encoder = encoder_class(in_channel=inblock, base_inc_channel=base_inc_channel, layer_blocks=encoder_blocks,
535
+ block=block,
536
+ feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
537
+ layer_widths=layer_widths, kernel_size=kernel_size,
538
+ time_emb_dim=time_dim, norm_type=norm_type)
539
+
540
+ layer_widths = self.encoder._layers_with
541
+ in_channel = layer_widths[-1]
542
+ self.BottleNeck = BlockLayer(num_blcks=1, block_layer=block,
543
+ planes_in=in_channel, planes_out=in_channel,
544
+ kernel_size=kernel_size,
545
+ first_layer=False, input_size=self.encoder.output_size, time_emb_dim=time_dim, norm_type=norm_type)
546
+
547
+ self.BottleNeck_att = Attention(in_channel)
548
+
549
+ layer_widths = layer_widths[::-1]#[1:]
550
+ layer_widths[0]= layer_widths[0]//2
551
+
552
+ in_channel = in_channel//2
553
+ self.decoder = decoder_class(in_channel=in_channel, base_inc_channel=base_inc_channel*8, layer_blocks=decoder_blocks,
554
+ block=block, last_cov_channels = self.encoder.out_channel_layer,
555
+ upsampling_mode=interpolation_mode, layer_widths=layer_widths,
556
+ use_transposed_convolutions=use_transposed_convolutions,
557
+ kernel_size=kernel_size, time_emb_dim=time_dim, norm_type=norm_type,
558
+ )
559
+ self.decoder_mask = decoder_class(in_channel=in_channel, base_inc_channel=base_inc_channel*8, layer_blocks=decoder_blocks,
560
+ block=block, last_cov_channels = self.encoder.out_channel_layer,
561
+ upsampling_mode=interpolation_mode, layer_widths=layer_widths,
562
+ use_transposed_convolutions=use_transposed_convolutions,
563
+ kernel_size=kernel_size, time_emb_dim=time_dim, norm_type=norm_type,
564
+ )
565
+
566
+ kernel_size = 3
567
+
568
+ self.last_convolution = BlockLayer(num_blcks=1, block_layer=block,
569
+ planes_in=inblock*2, planes_out=inblock//2,
570
+ kernel_size=kernel_size,
571
+ first_layer=False, input_size=192, time_emb_dim=time_dim, norm_type=norm_type)
572
+
573
+ self.last_convolution_rec = BlockLayer(num_blcks=1, block_layer=block,
574
+ planes_in=inblock*2, planes_out=inblock//2,
575
+ kernel_size=kernel_size,
576
+ first_layer=False, input_size=192, time_emb_dim=time_dim, norm_type=norm_type)
577
+
578
+ self.final_convolution = nn.Conv3d(inblock//2, 1, kernel_size=(kernel_size, kernel_size, kernel_size),
579
+ stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
580
+ self.final_convolution_rec = nn.Conv3d(inblock//2, 1, kernel_size=(kernel_size, kernel_size, kernel_size),
581
+ stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
582
+ self.activation = nn.Softmax(dim=1)
583
+ self.sigmoid = nn.Sigmoid()
584
+
585
+
586
+
587
+
588
+
589
+ def forward(self, y, time, t=0, noise = None):
590
+
591
+
592
+ y = self.before_encoder(y)
593
+
594
+ if self.time_embed:
595
+ if len(time.shape)==1:
596
+ t = self.time_mlp(time)
597
+ else:
598
+ t = self.time_mlp(time)
599
+ else:
600
+ t = None
601
+
602
+ x = self.encoder(y, t)
603
+ x[0] = self.BottleNeck(x[0], t)
604
+ x[0] = self.BottleNeck_att(x[0])
605
+
606
+ mask,_ = self.decoder_mask(x,t)
607
+ x, _ = self.decoder(x, t)
608
+
609
+ ###############################Attention########################################
610
+ dim_head = 16
611
+ self.heads =4
612
+ self.scale = dim_head ** -0.5
613
+
614
+ b, c, h, w, z = x.shape
615
+ #qkv = self.to_qkv(x).chunk(3, dim = 1)
616
+ q = rearrange(x, 'b (h c) x y z -> b h c (x y z)', h = self.heads)
617
+ k = rearrange(mask, 'b (h c) x y z -> b h c (x y z)', h = self.heads)
618
+ #q, k, v = map(lambda t: rearrange(t, 'b (h c) x y z -> b h c (x y z)', h = self.heads), qkv)
619
+
620
+ scaled_dot_prod = torch.einsum('... i d , ... j d -> ... i j', q, k) * self.scale
621
+ attention = torch.softmax(scaled_dot_prod, dim=-1)
622
+
623
+ k = k / (h * w* z)
624
+ atv = torch.einsum('... i j , ... j d -> ... i d', attention, k)
625
+ x = rearrange(atv, "b h c (x y z) -> b (h c) x y z", h=self.heads, x=h, y=w, z=z)
626
+ ###############################Attention########################################
627
+ mask = torch.cat([mask, y], 1)
628
+ mask = self.last_convolution(mask)
629
+ mask = self.final_convolution(mask)
630
+
631
+
632
+ x = torch.cat([x, y], 1)
633
+ #x = (x * mask)
634
+ z = self.last_convolution_rec(x)
635
+ z = self.final_convolution_rec(z)
636
+
637
+
638
+ return [mask,z]
639
+
640
+ class Unet3D(UnetGen):
641
+ def __init__(self,time_embed=False, channels=1, *args, encoder_class=UnetEncoder, **kwargs):
642
+ self.time_embed = time_embed
643
+ self.use_tr_conv = False
644
+
645
+ norm_type = "instance"
646
+ super().__init__(*args, encoder_class=encoder_class, decoder_class=UnetDecoder,
647
+ block=ResidualBlock, norm_type=norm_type, **kwargs)
648
+
649
+ self.channels = channels
650
+ self.netName = 'Unet3D'
651
+ def name(self):
652
+ return 'unet3d'