melage 0.0.65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (501) hide show
  1. melage/__init__.py +16 -0
  2. melage/cli.py +4 -0
  3. melage/graphics/GLGraphicsItem.py +286 -0
  4. melage/graphics/GLViewWidget.py +595 -0
  5. melage/graphics/Transform3D.py +55 -0
  6. melage/graphics/__init__.py +8 -0
  7. melage/graphics/functions.py +101 -0
  8. melage/graphics/items/GLAxisItem.py +149 -0
  9. melage/graphics/items/GLGridItem.py +178 -0
  10. melage/graphics/items/GLPolygonItem.py +77 -0
  11. melage/graphics/items/GLScatterPlotItem.py +135 -0
  12. melage/graphics/items/GLVolumeItem.py +280 -0
  13. melage/graphics/items/GLVolumeItem_b.py +237 -0
  14. melage/graphics/items/__init__.py +0 -0
  15. melage/graphics/shaders.py +202 -0
  16. melage/main.py +270 -0
  17. melage/requirements22.txt +25 -0
  18. melage/requirements_old.txt +28 -0
  19. melage/resource/0circle.png +0 -0
  20. melage/resource/0circle_faded.png +0 -0
  21. melage/resource/3d.png +0 -0
  22. melage/resource/3d.psd +0 -0
  23. melage/resource/3dFaded.png +0 -0
  24. melage/resource/Eraser.png +0 -0
  25. melage/resource/EraserFaded.png +0 -0
  26. melage/resource/EraserX.png +0 -0
  27. melage/resource/EraserXFaded.png +0 -0
  28. melage/resource/Eraser_icon.svg +79 -0
  29. melage/resource/Hand.png +0 -0
  30. melage/resource/HandIcons_0.png +0 -0
  31. melage/resource/Hand_IX.png +0 -0
  32. melage/resource/Hand_IXFaded.png +0 -0
  33. melage/resource/Handsqueezed.png +0 -0
  34. melage/resource/Handwriting (copy).png +0 -0
  35. melage/resource/Handwriting.png +0 -0
  36. melage/resource/HandwritingMinus.png +0 -0
  37. melage/resource/HandwritingMinusX.png +0 -0
  38. melage/resource/HandwritingPlus.png +0 -0
  39. melage/resource/HandwritingPlusX.png +0 -0
  40. melage/resource/Move_icon.svg +8 -0
  41. melage/resource/PngItem_2422924.png +0 -0
  42. melage/resource/about.png +0 -0
  43. melage/resource/about_logo.png +0 -0
  44. melage/resource/about_logo0.png +0 -0
  45. melage/resource/action_check.png +0 -0
  46. melage/resource/action_check_OFF.png +0 -0
  47. melage/resource/arrow).png +0 -0
  48. melage/resource/arrow.png +0 -0
  49. melage/resource/arrowFaded.png +0 -0
  50. melage/resource/arrow_org.png +0 -0
  51. melage/resource/arrow_org.png.png +0 -0
  52. melage/resource/arrows.png +0 -0
  53. melage/resource/authors.mp4 +0 -0
  54. melage/resource/box.png +0 -0
  55. melage/resource/check-image-icon-0.jpg +0 -0
  56. melage/resource/circle.png +0 -0
  57. melage/resource/circle_faded.png +0 -0
  58. melage/resource/circle_or.png +0 -0
  59. melage/resource/close.png +0 -0
  60. melage/resource/close_bg.png +0 -0
  61. melage/resource/color/Simple.txt +18 -0
  62. melage/resource/color/Tissue.txt +24 -0
  63. melage/resource/color/Tissue12.txt +27 -0
  64. melage/resource/color/albert_LUT.txt +102 -0
  65. melage/resource/color/mcrib_LUT.txt +102 -0
  66. melage/resource/color/pediatric1.txt +29 -0
  67. melage/resource/color/pediatric1_old.txt +27 -0
  68. melage/resource/color/pediatric2.txt +87 -0
  69. melage/resource/color/pediatric3.txt +29 -0
  70. melage/resource/color/pediatrics (copy).csv +103 -0
  71. melage/resource/color/tissue_seg.txt +4 -0
  72. melage/resource/contour.png +0 -0
  73. melage/resource/contour.svg +2 -0
  74. melage/resource/contourFaded.png +0 -0
  75. melage/resource/contourX.png +0 -0
  76. melage/resource/contourXFaded.png +0 -0
  77. melage/resource/dti.png +0 -0
  78. melage/resource/dti0.png +0 -0
  79. melage/resource/dti222.png +0 -0
  80. melage/resource/dti_or.png +0 -0
  81. melage/resource/eco.png +0 -0
  82. melage/resource/eco22.png +0 -0
  83. melage/resource/eco_old.png +0 -0
  84. melage/resource/eco_or.png +0 -0
  85. melage/resource/eco_or2.png +0 -0
  86. melage/resource/eco_seg.png +0 -0
  87. melage/resource/eco_seg_old.png +0 -0
  88. melage/resource/export.png +0 -0
  89. melage/resource/hand-grab-icon-10.jpg +0 -0
  90. melage/resource/hand-grab-icon-25.jpg +0 -0
  91. melage/resource/info.png +0 -0
  92. melage/resource/line.png +0 -0
  93. melage/resource/linefaded.png +0 -0
  94. melage/resource/load.png +0 -0
  95. melage/resource/main.ico +0 -0
  96. melage/resource/manual_images/3D_rightc.png +0 -0
  97. melage/resource/manual_images/3D_rightc_goto.png +0 -0
  98. melage/resource/manual_images/3D_rightc_paint.png +0 -0
  99. melage/resource/manual_images/3D_rightc_paint_draw1.png +0 -0
  100. melage/resource/manual_images/3D_rightc_paint_draw2.png +0 -0
  101. melage/resource/manual_images/3D_rightc_paint_render.png +0 -0
  102. melage/resource/manual_images/3D_rightc_paint_render2.png +0 -0
  103. melage/resource/manual_images/3D_rightc_paint_render3.png +0 -0
  104. melage/resource/manual_images/3D_rightc_paint_render4.png +0 -0
  105. melage/resource/manual_images/3D_rightc_paint_render5.png +0 -0
  106. melage/resource/manual_images/3D_rightc_paint_render6.png +0 -0
  107. melage/resource/manual_images/3D_rightc_seg.png +0 -0
  108. melage/resource/manual_images/exit_toolbar.png +0 -0
  109. melage/resource/manual_images/load_image_file.png +0 -0
  110. melage/resource/manual_images/load_image_file_openp.png +0 -0
  111. melage/resource/manual_images/main_page.png +0 -0
  112. melage/resource/manual_images/menu_file.png +0 -0
  113. melage/resource/manual_images/menu_file_export.png +0 -0
  114. melage/resource/manual_images/menu_file_import.png +0 -0
  115. melage/resource/manual_images/menu_file_settings.png +0 -0
  116. melage/resource/manual_images/menu_file_ss.png +0 -0
  117. melage/resource/manual_images/open_save_load.png +0 -0
  118. melage/resource/manual_images/panning_toolbar.png +0 -0
  119. melage/resource/manual_images/segmentation_toolbar.png +0 -0
  120. melage/resource/manual_images/tab_mri.png +0 -0
  121. melage/resource/manual_images/tab_us.png +0 -0
  122. melage/resource/manual_images/tabs.png +0 -0
  123. melage/resource/manual_images/toolbar_tools.png +0 -0
  124. melage/resource/manual_images/tools_basic.png +0 -0
  125. melage/resource/manual_images/tools_bet.png +0 -0
  126. melage/resource/manual_images/tools_cs.png +0 -0
  127. melage/resource/manual_images/tools_deepbet.png +0 -0
  128. melage/resource/manual_images/tools_imageinfo.png +0 -0
  129. melage/resource/manual_images/tools_maskO.png +0 -0
  130. melage/resource/manual_images/tools_masking.png +0 -0
  131. melage/resource/manual_images/tools_n4b.png +0 -0
  132. melage/resource/manual_images/tools_resize.png +0 -0
  133. melage/resource/manual_images/tools_ruler.png +0 -0
  134. melage/resource/manual_images/tools_seg.png +0 -0
  135. melage/resource/manual_images/tools_threshold.png +0 -0
  136. melage/resource/manual_images/tools_tools.png +0 -0
  137. melage/resource/manual_images/widget_color.png +0 -0
  138. melage/resource/manual_images/widget_color_add.png +0 -0
  139. melage/resource/manual_images/widget_color_add2.png +0 -0
  140. melage/resource/manual_images/widget_color_additional.png +0 -0
  141. melage/resource/manual_images/widget_images.png +0 -0
  142. melage/resource/manual_images/widget_images2.png +0 -0
  143. melage/resource/manual_images/widget_images3.png +0 -0
  144. melage/resource/manual_images/widget_marker.png +0 -0
  145. melage/resource/manual_images/widget_mri.png +0 -0
  146. melage/resource/manual_images/widget_mri2.png +0 -0
  147. melage/resource/manual_images/widget_segintensity.png +0 -0
  148. melage/resource/manual_images/widget_tab_mutualview.png +0 -0
  149. melage/resource/manual_images/widget_tab_mutualview2.png +0 -0
  150. melage/resource/manual_images/widget_table.png +0 -0
  151. melage/resource/manual_images/widget_table2.png +0 -0
  152. melage/resource/manual_images/widget_us.png +0 -0
  153. melage/resource/melage_top.ico +0 -0
  154. melage/resource/melage_top.png +0 -0
  155. melage/resource/melage_top0.png +0 -0
  156. melage/resource/melage_top1.png +0 -0
  157. melage/resource/melage_top4.png +0 -0
  158. melage/resource/mri (copy).png +0 -0
  159. melage/resource/mri.png +0 -0
  160. melage/resource/mri0.png +0 -0
  161. melage/resource/mri000.png +0 -0
  162. melage/resource/mri22.png +0 -0
  163. melage/resource/mri_big.png +0 -0
  164. melage/resource/mri_old.png +0 -0
  165. melage/resource/mri_seg.png +0 -0
  166. melage/resource/mri_seg_old.png +0 -0
  167. melage/resource/new.png +0 -0
  168. melage/resource/open.png +0 -0
  169. melage/resource/open2.png +0 -0
  170. melage/resource/pan.png +0 -0
  171. melage/resource/pencil.png +0 -0
  172. melage/resource/pencilFaded.png +0 -0
  173. melage/resource/points.png +0 -0
  174. melage/resource/pointsFaded.png +0 -0
  175. melage/resource/rotate.png +0 -0
  176. melage/resource/ruler.png +0 -0
  177. melage/resource/rulerFaded.png +0 -0
  178. melage/resource/s.png +0 -0
  179. melage/resource/s.psd +0 -0
  180. melage/resource/save.png +0 -0
  181. melage/resource/saveas.png +0 -0
  182. melage/resource/seg_mri.png +0 -0
  183. melage/resource/seg_mri2.png +0 -0
  184. melage/resource/settings.png +0 -0
  185. melage/resource/synch.png +0 -0
  186. melage/resource/synchFaded.png +0 -0
  187. melage/resource/theme/rc/.keep +1 -0
  188. melage/resource/theme/rc/arrow_down.png +0 -0
  189. melage/resource/theme/rc/arrow_down@2x.png +0 -0
  190. melage/resource/theme/rc/arrow_down_disabled.png +0 -0
  191. melage/resource/theme/rc/arrow_down_disabled@2x.png +0 -0
  192. melage/resource/theme/rc/arrow_down_focus.png +0 -0
  193. melage/resource/theme/rc/arrow_down_focus@2x.png +0 -0
  194. melage/resource/theme/rc/arrow_down_pressed.png +0 -0
  195. melage/resource/theme/rc/arrow_down_pressed@2x.png +0 -0
  196. melage/resource/theme/rc/arrow_left.png +0 -0
  197. melage/resource/theme/rc/arrow_left@2x.png +0 -0
  198. melage/resource/theme/rc/arrow_left_disabled.png +0 -0
  199. melage/resource/theme/rc/arrow_left_disabled@2x.png +0 -0
  200. melage/resource/theme/rc/arrow_left_focus.png +0 -0
  201. melage/resource/theme/rc/arrow_left_focus@2x.png +0 -0
  202. melage/resource/theme/rc/arrow_left_pressed.png +0 -0
  203. melage/resource/theme/rc/arrow_left_pressed@2x.png +0 -0
  204. melage/resource/theme/rc/arrow_right.png +0 -0
  205. melage/resource/theme/rc/arrow_right@2x.png +0 -0
  206. melage/resource/theme/rc/arrow_right_disabled.png +0 -0
  207. melage/resource/theme/rc/arrow_right_disabled@2x.png +0 -0
  208. melage/resource/theme/rc/arrow_right_focus.png +0 -0
  209. melage/resource/theme/rc/arrow_right_focus@2x.png +0 -0
  210. melage/resource/theme/rc/arrow_right_pressed.png +0 -0
  211. melage/resource/theme/rc/arrow_right_pressed@2x.png +0 -0
  212. melage/resource/theme/rc/arrow_up.png +0 -0
  213. melage/resource/theme/rc/arrow_up@2x.png +0 -0
  214. melage/resource/theme/rc/arrow_up_disabled.png +0 -0
  215. melage/resource/theme/rc/arrow_up_disabled@2x.png +0 -0
  216. melage/resource/theme/rc/arrow_up_focus.png +0 -0
  217. melage/resource/theme/rc/arrow_up_focus@2x.png +0 -0
  218. melage/resource/theme/rc/arrow_up_pressed.png +0 -0
  219. melage/resource/theme/rc/arrow_up_pressed@2x.png +0 -0
  220. melage/resource/theme/rc/base_icon.png +0 -0
  221. melage/resource/theme/rc/base_icon@2x.png +0 -0
  222. melage/resource/theme/rc/base_icon_disabled.png +0 -0
  223. melage/resource/theme/rc/base_icon_disabled@2x.png +0 -0
  224. melage/resource/theme/rc/base_icon_focus.png +0 -0
  225. melage/resource/theme/rc/base_icon_focus@2x.png +0 -0
  226. melage/resource/theme/rc/base_icon_pressed.png +0 -0
  227. melage/resource/theme/rc/base_icon_pressed@2x.png +0 -0
  228. melage/resource/theme/rc/branch_closed.png +0 -0
  229. melage/resource/theme/rc/branch_closed@2x.png +0 -0
  230. melage/resource/theme/rc/branch_closed_disabled.png +0 -0
  231. melage/resource/theme/rc/branch_closed_disabled@2x.png +0 -0
  232. melage/resource/theme/rc/branch_closed_focus.png +0 -0
  233. melage/resource/theme/rc/branch_closed_focus@2x.png +0 -0
  234. melage/resource/theme/rc/branch_closed_pressed.png +0 -0
  235. melage/resource/theme/rc/branch_closed_pressed@2x.png +0 -0
  236. melage/resource/theme/rc/branch_end.png +0 -0
  237. melage/resource/theme/rc/branch_end@2x.png +0 -0
  238. melage/resource/theme/rc/branch_end_disabled.png +0 -0
  239. melage/resource/theme/rc/branch_end_disabled@2x.png +0 -0
  240. melage/resource/theme/rc/branch_end_focus.png +0 -0
  241. melage/resource/theme/rc/branch_end_focus@2x.png +0 -0
  242. melage/resource/theme/rc/branch_end_pressed.png +0 -0
  243. melage/resource/theme/rc/branch_end_pressed@2x.png +0 -0
  244. melage/resource/theme/rc/branch_line.png +0 -0
  245. melage/resource/theme/rc/branch_line@2x.png +0 -0
  246. melage/resource/theme/rc/branch_line_disabled.png +0 -0
  247. melage/resource/theme/rc/branch_line_disabled@2x.png +0 -0
  248. melage/resource/theme/rc/branch_line_focus.png +0 -0
  249. melage/resource/theme/rc/branch_line_focus@2x.png +0 -0
  250. melage/resource/theme/rc/branch_line_pressed.png +0 -0
  251. melage/resource/theme/rc/branch_line_pressed@2x.png +0 -0
  252. melage/resource/theme/rc/branch_more.png +0 -0
  253. melage/resource/theme/rc/branch_more@2x.png +0 -0
  254. melage/resource/theme/rc/branch_more_disabled.png +0 -0
  255. melage/resource/theme/rc/branch_more_disabled@2x.png +0 -0
  256. melage/resource/theme/rc/branch_more_focus.png +0 -0
  257. melage/resource/theme/rc/branch_more_focus@2x.png +0 -0
  258. melage/resource/theme/rc/branch_more_pressed.png +0 -0
  259. melage/resource/theme/rc/branch_more_pressed@2x.png +0 -0
  260. melage/resource/theme/rc/branch_open.png +0 -0
  261. melage/resource/theme/rc/branch_open@2x.png +0 -0
  262. melage/resource/theme/rc/branch_open_disabled.png +0 -0
  263. melage/resource/theme/rc/branch_open_disabled@2x.png +0 -0
  264. melage/resource/theme/rc/branch_open_focus.png +0 -0
  265. melage/resource/theme/rc/branch_open_focus@2x.png +0 -0
  266. melage/resource/theme/rc/branch_open_pressed.png +0 -0
  267. melage/resource/theme/rc/branch_open_pressed@2x.png +0 -0
  268. melage/resource/theme/rc/checkbox_checked.png +0 -0
  269. melage/resource/theme/rc/checkbox_checked0.png +0 -0
  270. melage/resource/theme/rc/checkbox_checked@2x.png +0 -0
  271. melage/resource/theme/rc/checkbox_checked@2x0.png +0 -0
  272. melage/resource/theme/rc/checkbox_checked@2x000.png.png +0 -0
  273. melage/resource/theme/rc/checkbox_checked_disabled.png +0 -0
  274. melage/resource/theme/rc/checkbox_checked_disabled0.png +0 -0
  275. melage/resource/theme/rc/checkbox_checked_disabled@2x.png +0 -0
  276. melage/resource/theme/rc/checkbox_checked_disabled@2x0.png +0 -0
  277. melage/resource/theme/rc/checkbox_checked_focus.png +0 -0
  278. melage/resource/theme/rc/checkbox_checked_focus0.png +0 -0
  279. melage/resource/theme/rc/checkbox_checked_focus@2x.png +0 -0
  280. melage/resource/theme/rc/checkbox_checked_focus@2x0.png +0 -0
  281. melage/resource/theme/rc/checkbox_checked_pressed.png +0 -0
  282. melage/resource/theme/rc/checkbox_checked_pressed0.png +0 -0
  283. melage/resource/theme/rc/checkbox_checked_pressed@2x.png +0 -0
  284. melage/resource/theme/rc/checkbox_checked_pressed@2x0.png +0 -0
  285. melage/resource/theme/rc/checkbox_indeterminate.png +0 -0
  286. melage/resource/theme/rc/checkbox_indeterminate@2x.png +0 -0
  287. melage/resource/theme/rc/checkbox_indeterminate_disabled.png +0 -0
  288. melage/resource/theme/rc/checkbox_indeterminate_disabled@2x.png +0 -0
  289. melage/resource/theme/rc/checkbox_indeterminate_focus.png +0 -0
  290. melage/resource/theme/rc/checkbox_indeterminate_focus@2x.png +0 -0
  291. melage/resource/theme/rc/checkbox_indeterminate_pressed.png +0 -0
  292. melage/resource/theme/rc/checkbox_indeterminate_pressed@2x.png +0 -0
  293. melage/resource/theme/rc/checkbox_unchecked.png +0 -0
  294. melage/resource/theme/rc/checkbox_unchecked0.png +0 -0
  295. melage/resource/theme/rc/checkbox_unchecked00.png +0 -0
  296. melage/resource/theme/rc/checkbox_unchecked@2x.png +0 -0
  297. melage/resource/theme/rc/checkbox_unchecked@2x0.png +0 -0
  298. melage/resource/theme/rc/checkbox_unchecked@2x00.png +0 -0
  299. melage/resource/theme/rc/checkbox_unchecked_disabled.png +0 -0
  300. melage/resource/theme/rc/checkbox_unchecked_disabled0.png +0 -0
  301. melage/resource/theme/rc/checkbox_unchecked_disabled00.png +0 -0
  302. melage/resource/theme/rc/checkbox_unchecked_disabled@2x.png +0 -0
  303. melage/resource/theme/rc/checkbox_unchecked_disabled@2x0.png +0 -0
  304. melage/resource/theme/rc/checkbox_unchecked_disabled@2x00.png +0 -0
  305. melage/resource/theme/rc/checkbox_unchecked_focus.png +0 -0
  306. melage/resource/theme/rc/checkbox_unchecked_focus0.png +0 -0
  307. melage/resource/theme/rc/checkbox_unchecked_focus00.png +0 -0
  308. melage/resource/theme/rc/checkbox_unchecked_focus@2x.png +0 -0
  309. melage/resource/theme/rc/checkbox_unchecked_focus@2x0.png +0 -0
  310. melage/resource/theme/rc/checkbox_unchecked_focus@2x00.png +0 -0
  311. melage/resource/theme/rc/checkbox_unchecked_pressed.png +0 -0
  312. melage/resource/theme/rc/checkbox_unchecked_pressed0.png +0 -0
  313. melage/resource/theme/rc/checkbox_unchecked_pressed00.png +0 -0
  314. melage/resource/theme/rc/checkbox_unchecked_pressed@2x.png +0 -0
  315. melage/resource/theme/rc/checkbox_unchecked_pressed@2x0.png +0 -0
  316. melage/resource/theme/rc/checkbox_unchecked_pressed@2x00.png +0 -0
  317. melage/resource/theme/rc/line_horizontal.png +0 -0
  318. melage/resource/theme/rc/line_horizontal@2x.png +0 -0
  319. melage/resource/theme/rc/line_horizontal_disabled.png +0 -0
  320. melage/resource/theme/rc/line_horizontal_disabled@2x.png +0 -0
  321. melage/resource/theme/rc/line_horizontal_focus.png +0 -0
  322. melage/resource/theme/rc/line_horizontal_focus@2x.png +0 -0
  323. melage/resource/theme/rc/line_horizontal_pressed.png +0 -0
  324. melage/resource/theme/rc/line_horizontal_pressed@2x.png +0 -0
  325. melage/resource/theme/rc/line_vertical.png +0 -0
  326. melage/resource/theme/rc/line_vertical@2x.png +0 -0
  327. melage/resource/theme/rc/line_vertical_disabled.png +0 -0
  328. melage/resource/theme/rc/line_vertical_disabled@2x.png +0 -0
  329. melage/resource/theme/rc/line_vertical_focus.png +0 -0
  330. melage/resource/theme/rc/line_vertical_focus@2x.png +0 -0
  331. melage/resource/theme/rc/line_vertical_pressed.png +0 -0
  332. melage/resource/theme/rc/line_vertical_pressed@2x.png +0 -0
  333. melage/resource/theme/rc/radio_checked.png +0 -0
  334. melage/resource/theme/rc/radio_checked@2x.png +0 -0
  335. melage/resource/theme/rc/radio_checked_disabled.png +0 -0
  336. melage/resource/theme/rc/radio_checked_disabled@2x.png +0 -0
  337. melage/resource/theme/rc/radio_checked_focus.png +0 -0
  338. melage/resource/theme/rc/radio_checked_focus@2x.png +0 -0
  339. melage/resource/theme/rc/radio_checked_pressed.png +0 -0
  340. melage/resource/theme/rc/radio_checked_pressed@2x.png +0 -0
  341. melage/resource/theme/rc/radio_unchecked.png +0 -0
  342. melage/resource/theme/rc/radio_unchecked@2x.png +0 -0
  343. melage/resource/theme/rc/radio_unchecked_disabled.png +0 -0
  344. melage/resource/theme/rc/radio_unchecked_disabled@2x.png +0 -0
  345. melage/resource/theme/rc/radio_unchecked_focus.png +0 -0
  346. melage/resource/theme/rc/radio_unchecked_focus@2x.png +0 -0
  347. melage/resource/theme/rc/radio_unchecked_pressed.png +0 -0
  348. melage/resource/theme/rc/radio_unchecked_pressed@2x.png +0 -0
  349. melage/resource/theme/rc/toolbar_move_horizontal.png +0 -0
  350. melage/resource/theme/rc/toolbar_move_horizontal@2x.png +0 -0
  351. melage/resource/theme/rc/toolbar_move_horizontal_disabled.png +0 -0
  352. melage/resource/theme/rc/toolbar_move_horizontal_disabled@2x.png +0 -0
  353. melage/resource/theme/rc/toolbar_move_horizontal_focus.png +0 -0
  354. melage/resource/theme/rc/toolbar_move_horizontal_focus@2x.png +0 -0
  355. melage/resource/theme/rc/toolbar_move_horizontal_pressed.png +0 -0
  356. melage/resource/theme/rc/toolbar_move_horizontal_pressed@2x.png +0 -0
  357. melage/resource/theme/rc/toolbar_move_vertical.png +0 -0
  358. melage/resource/theme/rc/toolbar_move_vertical@2x.png +0 -0
  359. melage/resource/theme/rc/toolbar_move_vertical_disabled.png +0 -0
  360. melage/resource/theme/rc/toolbar_move_vertical_disabled@2x.png +0 -0
  361. melage/resource/theme/rc/toolbar_move_vertical_focus.png +0 -0
  362. melage/resource/theme/rc/toolbar_move_vertical_focus@2x.png +0 -0
  363. melage/resource/theme/rc/toolbar_move_vertical_pressed.png +0 -0
  364. melage/resource/theme/rc/toolbar_move_vertical_pressed@2x.png +0 -0
  365. melage/resource/theme/rc/toolbar_separator_horizontal.png +0 -0
  366. melage/resource/theme/rc/toolbar_separator_horizontal@2x.png +0 -0
  367. melage/resource/theme/rc/toolbar_separator_horizontal_disabled.png +0 -0
  368. melage/resource/theme/rc/toolbar_separator_horizontal_disabled@2x.png +0 -0
  369. melage/resource/theme/rc/toolbar_separator_horizontal_focus.png +0 -0
  370. melage/resource/theme/rc/toolbar_separator_horizontal_focus@2x.png +0 -0
  371. melage/resource/theme/rc/toolbar_separator_horizontal_pressed.png +0 -0
  372. melage/resource/theme/rc/toolbar_separator_horizontal_pressed@2x.png +0 -0
  373. melage/resource/theme/rc/toolbar_separator_vertical.png +0 -0
  374. melage/resource/theme/rc/toolbar_separator_vertical@2x.png +0 -0
  375. melage/resource/theme/rc/toolbar_separator_vertical_disabled.png +0 -0
  376. melage/resource/theme/rc/toolbar_separator_vertical_disabled@2x.png +0 -0
  377. melage/resource/theme/rc/toolbar_separator_vertical_focus.png +0 -0
  378. melage/resource/theme/rc/toolbar_separator_vertical_focus@2x.png +0 -0
  379. melage/resource/theme/rc/toolbar_separator_vertical_pressed.png +0 -0
  380. melage/resource/theme/rc/toolbar_separator_vertical_pressed@2x.png +0 -0
  381. melage/resource/theme/rc/transparent.png +0 -0
  382. melage/resource/theme/rc/transparent@2x.png +0 -0
  383. melage/resource/theme/rc/transparent_disabled.png +0 -0
  384. melage/resource/theme/rc/transparent_disabled@2x.png +0 -0
  385. melage/resource/theme/rc/transparent_focus.png +0 -0
  386. melage/resource/theme/rc/transparent_focus@2x.png +0 -0
  387. melage/resource/theme/rc/transparent_pressed.png +0 -0
  388. melage/resource/theme/rc/transparent_pressed@2x.png +0 -0
  389. melage/resource/theme/rc/window_close.png +0 -0
  390. melage/resource/theme/rc/window_close@2x.png +0 -0
  391. melage/resource/theme/rc/window_close_disabled.png +0 -0
  392. melage/resource/theme/rc/window_close_disabled@2x.png +0 -0
  393. melage/resource/theme/rc/window_close_focus.png +0 -0
  394. melage/resource/theme/rc/window_close_focus@2x.png +0 -0
  395. melage/resource/theme/rc/window_close_pressed.png +0 -0
  396. melage/resource/theme/rc/window_close_pressed@2x.png +0 -0
  397. melage/resource/theme/rc/window_grip.png +0 -0
  398. melage/resource/theme/rc/window_grip@2x.png +0 -0
  399. melage/resource/theme/rc/window_grip_disabled.png +0 -0
  400. melage/resource/theme/rc/window_grip_disabled@2x.png +0 -0
  401. melage/resource/theme/rc/window_grip_focus.png +0 -0
  402. melage/resource/theme/rc/window_grip_focus@2x.png +0 -0
  403. melage/resource/theme/rc/window_grip_pressed.png +0 -0
  404. melage/resource/theme/rc/window_grip_pressed@2x.png +0 -0
  405. melage/resource/theme/rc/window_minimize.png +0 -0
  406. melage/resource/theme/rc/window_minimize@2x.png +0 -0
  407. melage/resource/theme/rc/window_minimize_disabled.png +0 -0
  408. melage/resource/theme/rc/window_minimize_disabled@2x.png +0 -0
  409. melage/resource/theme/rc/window_minimize_focus.png +0 -0
  410. melage/resource/theme/rc/window_minimize_focus@2x.png +0 -0
  411. melage/resource/theme/rc/window_minimize_pressed.png +0 -0
  412. melage/resource/theme/rc/window_minimize_pressed@2x.png +0 -0
  413. melage/resource/theme/rc/window_undock.png +0 -0
  414. melage/resource/theme/rc/window_undock@2x.png +0 -0
  415. melage/resource/theme/rc/window_undock_disabled.png +0 -0
  416. melage/resource/theme/rc/window_undock_disabled@2x.png +0 -0
  417. melage/resource/theme/rc/window_undock_focus.png +0 -0
  418. melage/resource/theme/rc/window_undock_focus@2x.png +0 -0
  419. melage/resource/theme/rc/window_undock_pressed.png +0 -0
  420. melage/resource/theme/rc/window_undock_pressed@2x.png +0 -0
  421. melage/resource/theme/style.qss +2223 -0
  422. melage/resource/tract.png +0 -0
  423. melage/resource/view1.png +0 -0
  424. melage/resource/view1_eco.png +0 -0
  425. melage/resource/view1_mri.png +0 -0
  426. melage/resource/view1_seg.png +0 -0
  427. melage/resource/view2.png +0 -0
  428. melage/resource/view2_seg.png +0 -0
  429. melage/resource/w.png +0 -0
  430. melage/resource/zoom_in.png +0 -0
  431. melage/resource/zoom_inFaded.png +0 -0
  432. melage/resource/zoom_out.png +0 -0
  433. melage/resource/zoom_outFaded.png +0 -0
  434. melage/some_notes.txt +3 -0
  435. melage/utils/DispalyIm.py +2788 -0
  436. melage/utils/GMM.py +720 -0
  437. melage/utils/Shaders_120.py +257 -0
  438. melage/utils/Shaders_330.py +314 -0
  439. melage/utils/Shaders_bu.py +314 -0
  440. melage/utils/__init__0.py +7 -0
  441. melage/utils/brain_extraction_helper.py +234 -0
  442. melage/utils/custom_QScrollBar.py +61 -0
  443. melage/utils/glScientific.py +1554 -0
  444. melage/utils/glScientific_bc.py +1585 -0
  445. melage/utils/readData.py +1061 -0
  446. melage/utils/registration.py +512 -0
  447. melage/utils/source_folder.py +18 -0
  448. melage/utils/utils.py +3808 -0
  449. melage/version.txt +1 -0
  450. melage/widgets/ApplyMask.py +212 -0
  451. melage/widgets/ChangeSystem.py +152 -0
  452. melage/widgets/DeepLModels/InfantSegment/Unet.py +464 -0
  453. melage/widgets/DeepLModels/NPP/dataset/mri_dataset_affine.py +149 -0
  454. melage/widgets/DeepLModels/NPP/models/checkpoints/npp_v1.pth.py +0 -0
  455. melage/widgets/DeepLModels/NPP/models/losses.py +146 -0
  456. melage/widgets/DeepLModels/NPP/models/model.py +272 -0
  457. melage/widgets/DeepLModels/NPP/models/utils.py +303 -0
  458. melage/widgets/DeepLModels/NPP/npp.py +116 -0
  459. melage/widgets/DeepLModels/NPP/requirements.txt +8 -0
  460. melage/widgets/DeepLModels/NPP/train/train.py +116 -0
  461. melage/widgets/DeepLModels/Unet3DAtt.py +657 -0
  462. melage/widgets/DeepLModels/Unet3D_basic.py +648 -0
  463. melage/widgets/DeepLModels/new_unet.py +652 -0
  464. melage/widgets/DeepLModels/new_unet_old.py +639 -0
  465. melage/widgets/DeepLModels/new_unet_old2.py +658 -0
  466. melage/widgets/HistImage.py +153 -0
  467. melage/widgets/ImageThresholding.py +222 -0
  468. melage/widgets/MaskOperations.py +147 -0
  469. melage/widgets/N4Dialog.py +241 -0
  470. melage/widgets/Segmentation/FCM.py +1553 -0
  471. melage/widgets/Segmentation/__init__.py +588 -0
  472. melage/widgets/Segmentation/utils.py +417 -0
  473. melage/widgets/SemiAutoSeg.py +666 -0
  474. melage/widgets/Synthstrip.py +141 -0
  475. melage/widgets/__init__0.py +5 -0
  476. melage/widgets/about.py +246 -0
  477. melage/widgets/activation.py +437 -0
  478. melage/widgets/activator.py +147 -0
  479. melage/widgets/be_dl.py +409 -0
  480. melage/widgets/be_dl_unet3d.py +441 -0
  481. melage/widgets/brain_extraction.py +855 -0
  482. melage/widgets/brain_extraction_dl.py +887 -0
  483. melage/widgets/brain_extraction_dl_bu.py +869 -0
  484. melage/widgets/colorwidget.py +100 -0
  485. melage/widgets/dockWidgets.py +2005 -0
  486. melage/widgets/enhanceImWidget.py +109 -0
  487. melage/widgets/fileDialog_widget.py +275 -0
  488. melage/widgets/iminfo.py +346 -0
  489. melage/widgets/mainwindow_widget.py +6775 -0
  490. melage/widgets/melageAbout.py +123 -0
  491. melage/widgets/openglWidgets.py +556 -0
  492. melage/widgets/registrationWidget.py +342 -0
  493. melage/widgets/repeat_widget.py +74 -0
  494. melage/widgets/screenshot_widget.py +138 -0
  495. melage/widgets/settings_widget.py +77 -0
  496. melage/widgets/tranformationWidget.py +275 -0
  497. melage-0.0.65.dist-info/METADATA +742 -0
  498. melage-0.0.65.dist-info/RECORD +501 -0
  499. melage-0.0.65.dist-info/WHEEL +5 -0
  500. melage-0.0.65.dist-info/entry_points.txt +2 -0
  501. melage-0.0.65.dist-info/top_level.txt +1 -0
@@ -0,0 +1,657 @@
1
+ from functools import partial
2
+ import torch.nn as nn
3
+ import torch
4
+ import math
5
+ #from .model_utils import *
6
+ import math
7
+ import torch
8
+ from functools import partial
9
+ import torch.nn as nn
10
+ from einops import repeat, rearrange
11
+ def sigmoid_beta_schedule(timesteps, start = -3, end = 3, tau = 1, clamp_min = 1e-5):
12
+ """
13
+ sigmoid schedule
14
+ proposed in https://arxiv.org/abs/2212.11972 - Figure 8
15
+ better for images > 64x64, when used during training
16
+ """
17
+ steps = timesteps + 1
18
+ t = torch.linspace(0, timesteps, steps, dtype = torch.float64) / timesteps
19
+ v_start = torch.tensor(start / tau).sigmoid()
20
+ v_end = torch.tensor(end / tau).sigmoid()
21
+ alphas_cumprod = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
22
+ alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
23
+ betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
24
+ return torch.clip(betas, 0, 0.999)
25
+
26
+ class BlockLayer(nn.Module):
27
+ def __init__(self, num_blcks, block_layer, planes_in, planes_out, kernel_size=3, first_layer=False,
28
+ input_size=None, time_emb_dim=None, norm_type='layer'):
29
+ super(BlockLayer, self).__init__()
30
+
31
+ self.blocks = nn.ModuleList()
32
+ for i in range(num_blcks):
33
+ if i == 0:
34
+ self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=first_layer,
35
+ input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
36
+ else:
37
+ self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=False,
38
+ input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
39
+ planes_in = planes_out
40
+
41
+
42
+ def forward(self, x, t=None):
43
+ for i, block in enumerate(self.blocks):
44
+ x = block(x, t)
45
+ return x
46
+
47
+
48
+
49
+
50
+ class ResidualBlock(nn.Module):
51
+ def __init__(self, planes_in, planes_out, time_emb_dim = None, kernel_size=3, first_layer=False, input_size=128, norm_type='layer'):
52
+ super(ResidualBlock, self).__init__()
53
+ if time_emb_dim is not None:
54
+ if planes_in>planes_out:
55
+ dim = planes_in*2
56
+ else:
57
+ dim = planes_in*2
58
+ self.mlp = nn.Sequential(
59
+ nn.SiLU(),
60
+ nn.Linear(time_emb_dim, dim)
61
+ )
62
+
63
+ self.conv1 = ConvolutionalBlock(planes_in=planes_in, planes_out=planes_out, first_layer=first_layer,
64
+ kernel_size=kernel_size, dilation=1,
65
+ activation=nn.ReLU, input_size=input_size, norm_type= norm_type)
66
+ self.conv2 = ConvolutionalBlock(planes_in=planes_out, planes_out=planes_out, first_layer=False,
67
+ kernel_size=1,
68
+ dilation=1, activation=nn.ReLU, input_size=input_size, norm_type=norm_type)
69
+ if planes_in != planes_out:
70
+ self.sample = nn.Conv3d(planes_in, planes_out, (1, 1, 1), stride=(1, 1, 1), dilation=(1, 1, 1),
71
+ bias=True) #
72
+ else:
73
+ self.sample = None
74
+
75
+ def forward(self, x, time_emb= None):
76
+ identity = x.clone()
77
+ scale_shift = None
78
+ if time_emb is not None:
79
+ time_emb = self.mlp(time_emb)
80
+ #time_emb = rearrange(time_emb, 'b c -> b c 1 1 1')
81
+ time_emb = time_emb.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
82
+ #scale_shift = time_emb#.chunk(2, dim = 1)
83
+ scale_shift = time_emb.chunk(2, dim=1)
84
+ x = self.conv1(x, scale_shift= scale_shift)
85
+ x = self.conv2(x, scale_shift=None)
86
+
87
+
88
+ if self.sample is not None:
89
+ identity = self.sample(identity)
90
+
91
+
92
+ x += identity
93
+
94
+ return x
95
+
96
+
97
+ class UnetEncoder(nn.Module):
98
+ def __init__(self, in_channel, base_inc_channel=8, layer=BlockLayer, block=None,layer_blocks=None,
99
+ downsampling_stride=None,feature_dilation=1.5, layer_widths=None, kernel_size=3,
100
+ time_emb_dim=None, norm_type='layer'):
101
+ super(UnetEncoder, self).__init__()
102
+
103
+ self.layers = nn.ModuleList()
104
+ self.downsampling_convolutions = nn.ModuleList()
105
+ self.attention_modules = nn.ModuleList()
106
+ self.downsampling_zarib = []
107
+ in_channel_layer = in_channel
108
+ input_size = 192
109
+ self._layers_with = []
110
+ self._layers_with.append(base_inc_channel)
111
+ for i, num_blcks in enumerate(layer_blocks):
112
+ if layer_widths is not None:
113
+ out_channel_layer = layer_widths[i]
114
+ else:
115
+ out_channel_layer = base_inc_channel * int(feature_dilation ** (i+1))//2
116
+ #if out_channel_layer>128:
117
+ # out_channel_layer = 128 + out_channel_layer//int(2*feature_dilation)
118
+
119
+ if i == 0:
120
+ first_layer = True
121
+ else:
122
+ first_layer = False
123
+ self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
124
+ planes_in=in_channel_layer, planes_out=out_channel_layer,
125
+ kernel_size=kernel_size,
126
+ first_layer=first_layer, input_size=input_size,
127
+ time_emb_dim=time_emb_dim, norm_type=norm_type))
128
+ #self.attention_modules.append(Attention(out_channel_layer))
129
+ if i != len(layer_blocks) - 1:
130
+
131
+ padding = kernel_size // 2 # constant size
132
+ #downsampling_conv = nn.Conv3d(out_channel_layer, out_channel_layer, (kernel_size, kernel_size, kernel_size), padding=padding,
133
+ # stride=(downsampling_stride,downsampling_stride,downsampling_stride),
134
+ # bias=True)
135
+ downsampling_conv = nn.MaxPool3d(kernel_size=2, stride=2)
136
+
137
+ self.downsampling_convolutions.append(downsampling_conv)
138
+
139
+ input_size = input_size // 2
140
+ print("Encoder {}:".format(i), in_channel_layer, out_channel_layer)
141
+ self._layers_with.append(out_channel_layer)
142
+ in_channel_layer = out_channel_layer
143
+ self.out_channel_layer = in_channel_layer
144
+ self.output_size = input_size
145
+
146
+ def forward(self, x, time=None):
147
+ outputs = list()
148
+ #outputs.insert(0, x)
149
+ for layer, downsampling in zip(self.layers[:-1], self.downsampling_convolutions):
150
+ x = layer(x, time)
151
+
152
+ outputs.insert(0, x)
153
+
154
+ x = downsampling(x)
155
+ x = self.layers[-1](x, time)
156
+ outputs.insert(0, x) #bottle neck layer
157
+ return outputs
158
+
159
+ class ConvolutionalBlock(nn.Module):
160
+ def __init__(self, planes_in, planes_out, first_layer=False, kernel_size=3, dilation=1, activation=None,
161
+ input_size=None, norm_type='layer'):
162
+ super(ConvolutionalBlock, self).__init__()
163
+ if dilation == 1:
164
+ padding = kernel_size // 2 # constant size
165
+ else:
166
+ # (In + 2*padding - dilation * (kernel_size - 1) - 1)/stride + 1
167
+ if kernel_size == 3:
168
+ if dilation == 2:
169
+ padding = 2
170
+ elif dilation == 4:
171
+ padding = 4
172
+ elif dilation == 3:
173
+ padding = 3
174
+ else:
175
+ padding = None
176
+ elif kernel_size == 1:
177
+ padding = 0
178
+ self.activation = None
179
+ self.norm = None
180
+ if first_layer:
181
+ self.norm = nn.InstanceNorm3d(planes_in)
182
+ self.activation = activation()
183
+ self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
184
+ padding=padding, bias=True,
185
+ dilation=(dilation, dilation, dilation))
186
+ else:
187
+ if activation is not None:
188
+ if norm_type.lower()=='layer':
189
+ self.norm = nn.LayerNorm([input_size, input_size, input_size])
190
+ elif norm_type.lower()=='group':
191
+ valid_num_groups = [16, 8, 4, 2]
192
+ num_groups = None
193
+ for num_groups in valid_num_groups:
194
+ if planes_in % num_groups != 0:
195
+ break
196
+ if num_groups is None:
197
+ raise exit('Num groups can not be determined')
198
+ self.norm = nn.GroupNorm(num_groups=num_groups, num_channels=planes_in)
199
+ elif norm_type.lower()=='batch':
200
+ self.norm = nn.BatchNorm3d(planes_in)
201
+ elif norm_type.lower() == 'instance':
202
+ self.norm = nn.InstanceNorm3d(planes_in)
203
+ else:
204
+ self.norm= None
205
+
206
+ self.activation = activation()
207
+ self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
208
+ padding=padding, bias=True,
209
+ dilation=(dilation, dilation, dilation))
210
+
211
+ else:
212
+ if norm_type.lower()=='layer':
213
+ if input_size<120:
214
+ self.norm = nn.LayerNorm([input_size, input_size, input_size])
215
+ else:
216
+ self.norm = nn.InstanceNorm3d(planes_in)
217
+ elif norm_type.lower()=='group':
218
+ valid_num_groups = [16, 8, 4, 2]
219
+ num_groups = None
220
+ for num_groups in valid_num_groups:
221
+ if planes_in % num_groups != 0:
222
+ break
223
+ if num_groups is None:
224
+ raise exit('Num groups can not be determined')
225
+ self.norm = nn.GroupNorm(num_groups=planes_in, num_channels=planes_in)
226
+ elif norm_type.lower() == 'batch':
227
+ self.norm = nn.BatchNorm3d(planes_in)
228
+ elif norm_type.lower() == 'instance':
229
+ self.norm = nn.InstanceNorm3d(planes_in)
230
+ else:
231
+ self.norm = None
232
+
233
+ self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
234
+ padding=padding, bias=True,
235
+ dilation=(dilation, dilation, dilation))
236
+
237
+
238
+ def forward(self, x, scale_shift=None):
239
+ if self.norm is not None:
240
+ x = self.norm(x)
241
+
242
+ if scale_shift is not None:
243
+ scale, shift = scale_shift
244
+ #scale1, scale2 = scale.chunk(2, dim=0)
245
+ #shift1, shift2 = scale.chunk(2, dim=0)
246
+ #x = x * (scale1 + 1) + shift1 + x * (scale2 + 1) + shift2
247
+ x = x * (scale + 1) + shift
248
+
249
+ if self.activation is not None:
250
+ x = self.activation(x)
251
+
252
+ x = self.conv(x)
253
+
254
+ return x
255
+ class SinusoidalPosEmb(nn.Module):
256
+ def __init__(self, dim):
257
+ super().__init__()
258
+ self.dim = dim
259
+
260
+ def forward(self, x):
261
+ device = x.device
262
+ half_dim = self.dim // 2
263
+ emb = math.log(10000) / (half_dim - 1)
264
+ emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
265
+ emb = x[...,None]*emb[None,:]
266
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
267
+ if len(emb.shape)==3:
268
+ emb = emb.view(emb.shape[0], emb.shape[1] * emb.shape[2])
269
+ return emb
270
+
271
+
272
+
273
+ class UnetDecoder(nn.Module):
274
+ def __init__(self, in_channel, base_inc_channel=64, layer=BlockLayer, block=None,layer_blocks=[1,1,1,1],
275
+ feature_dilation=2, upsampling_stride=2, layer_widths=None, kernel_size=3,
276
+ upsampling_mode="trilinear", align_corners=False, use_transposed_convolutions=False, last_cov_channels=256,
277
+ time_emb_dim=None, norm_type='layer'
278
+ ):
279
+ super(UnetDecoder, self).__init__()
280
+ self.layers = nn.ModuleList()
281
+ if use_transposed_convolutions:
282
+ self.upsampling_blocks = nn.ModuleList()
283
+ else:
284
+ self.upsampling_blocks = list()
285
+ self.attention_modules = nn.ModuleList()
286
+ in_channel_layer = in_channel
287
+ input_size = 24
288
+
289
+
290
+ for i, num_blcks in enumerate(layer_blocks):
291
+ if layer_widths is not None:
292
+ out_channel_layer = layer_widths[i]
293
+ else:
294
+ out_channel_layer = base_inc_channel // (feature_dilation ** (i))
295
+
296
+ if i == 0:
297
+ first_layer = True
298
+ self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
299
+ planes_in=last_cov_channels, planes_out=out_channel_layer,
300
+ kernel_size=kernel_size,
301
+ first_layer=first_layer, input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
302
+ else:
303
+ first_layer = False
304
+
305
+ self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
306
+ planes_in=in_channel_layer+layer_widths[i-1], planes_out=out_channel_layer,
307
+ kernel_size=kernel_size,
308
+ first_layer=first_layer, input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
309
+ #self.attention_modules.append(Attention(out_channel_layer))
310
+ if 2>1:#i != len(layer_blocks) - 1:
311
+
312
+ if use_transposed_convolutions:
313
+
314
+ self.upsampling_blocks.append(nn.ConvTranspose3d(out_channel_layer, out_channel_layer, kernel_size=2,
315
+ stride=upsampling_stride, padding=0))
316
+ else:
317
+
318
+ #self.upsampling_blocks.append(partial(nn.functional.interpolate, scale_factor=upsampling_stride,
319
+ # mode=upsampling_mode, align_corners=align_corners))
320
+ self.upsampling_blocks.append(nn.Upsample(scale_factor=2, mode='nearest'))
321
+
322
+
323
+ input_size = input_size *2
324
+ last_cov_channels = in_channel_layer#last_cov_channels//2
325
+ print("Decoder {}:".format(i), in_channel_layer, out_channel_layer)
326
+ in_channel_layer = out_channel_layer
327
+ self.out_channel_layer = in_channel_layer
328
+ def forward(self, x, t):
329
+ i = 0
330
+
331
+ y = x[0]
332
+ for up, lay in zip(self.upsampling_blocks, self.layers[:-1]):
333
+ if i == 0:
334
+ y = lay(y, t)
335
+ else:
336
+ y = lay(y,t)
337
+ y = up(y)
338
+ #y = att(y)
339
+ y = torch.cat([y, x[i + 1]],1)
340
+ i += 1
341
+ y = self.layers[-1](y,t)
342
+ return y
343
+ #from .model_utils import *
344
+
345
+ class Attention(nn.Module):
346
+ def __init__(self, dim, heads = 4, dim_head = 16):
347
+ super().__init__()
348
+ self.scale = dim_head ** -0.5
349
+ self.heads = heads
350
+ hidden_dim = dim_head * heads
351
+
352
+ self.to_qkv = nn.Conv3d(dim, hidden_dim * 3, 1, bias = False)
353
+ self.to_out = nn.Conv3d(hidden_dim, dim, 1)
354
+
355
+ def forward(self, x, mask=None):
356
+ b, c, h, w, z = x.shape
357
+ qkv = self.to_qkv(x).chunk(3, dim = 1)
358
+ q, k, v = map(lambda t: rearrange(t, 'b (h c) x y z -> b h c (x y z)', h = self.heads), qkv)
359
+
360
+ scaled_dot_prod = torch.einsum('... i d , ... j d -> ... i j', q, k) * self.scale
361
+ attention = torch.softmax(scaled_dot_prod, dim=-1)
362
+ v = v / (h * w* z)
363
+ atv = torch.einsum('... i j , ... j d -> ... i d', attention, v)
364
+ out = rearrange(atv, "b h c (x y z) -> b (h c) x y z", h=self.heads, x=h, y=w, z=z)
365
+ return self.to_out(out)
366
+
367
+
368
+
369
+
370
+ class CrossConv3d(nn.Conv3d):
371
+
372
+ """
373
+ https://github.com/JJGO/UniverSeg/blob/main/universeg/nn/cross_conv.py
374
+ Compute pairwise convolution between all element of x and all elements of y.
375
+ x, y are tensors of size B,_,C,H,W where _ could be different number of elements in x and y
376
+ essentially, we do a meshgrid of the elements to get B,Sx,Sy,C,H,W tensors, and then
377
+ pairwise conv.
378
+ Args:
379
+ x (tensor): B,Sx,Cx,H,W
380
+ y (tensor): B,Sy,Cy,H,W
381
+ Returns:
382
+ tensor: B,Sx,Sy,Cout,H,W
383
+ """
384
+ """
385
+ CrossConv2d is a convolutional layer that performs pairwise convolutions between elements of two input tensors.
386
+
387
+ Parameters
388
+ ----------
389
+ in_channels : int or tuple of ints
390
+ Number of channels in the input tensor(s).
391
+ If the tensors have different number of channels, in_channels must be a tuple
392
+ out_channels : int
393
+ Number of output channels.
394
+ kernel_size : int or tuple of ints
395
+ Size of the convolutional kernel.
396
+ stride : int or tuple of ints, optional
397
+ Stride of the convolution. Default is 1.
398
+ padding : int or tuple of ints, optional
399
+ Zero-padding added to both sides of the input. Default is 0.
400
+ dilation : int or tuple of ints, optional
401
+ Spacing between kernel elements. Default is 1.
402
+ groups : int, optional
403
+ Number of blocked connections from input channels to output channels. Default is 1.
404
+ bias : bool, optional
405
+ If True, adds a learnable bias to the output. Default is True.
406
+ padding_mode : str, optional
407
+ Padding mode. Default is "zeros".
408
+ device : str, optional
409
+ Device on which to allocate the tensor. Default is None.
410
+ dtype : torch.dtype, optional
411
+ Data type assigned to the tensor. Default is None.
412
+
413
+ Returns
414
+ -------
415
+ torch.Tensor
416
+ Tensor resulting from the pairwise convolution between the elements of x and y.
417
+
418
+ Notes
419
+ -----
420
+ x and y are tensors of size (B, Sx, Cx, H, W) and (B, Sy, Cy, H, W), respectively,
421
+ The function does the cartesian product of the elements of x and y to obtain a tensor
422
+ of size (B, Sx, Sy, Cx + Cy, H, W), and then performs the same convolution for all
423
+ (B, Sx, Sy) in the batch dimension. Runtime and memory are O(Sx * Sy).
424
+
425
+ Examples
426
+ --------
427
+ >>> x = torch.randn(2, 3, 4, 32, 32)
428
+ >>> y = torch.randn(2, 5, 6, 32, 32)
429
+ >>> conv = CrossConv2d(in_channels=(4, 6), out_channels=7, kernel_size=3, padding=1)
430
+ >>> output = conv(x, y)
431
+ >>> output.shape #(2, 3, 5, 7, 32, 32)
432
+ """
433
+
434
+
435
+ def __init__(
436
+ self,
437
+ in_channels,
438
+ out_channels: int,
439
+ kernel_size,
440
+ stride = 1,
441
+ padding = 0,
442
+ dilation= 1,
443
+ groups: int = 1,
444
+ bias: bool = True,
445
+ padding_mode: str = "zeros",
446
+ device=None,
447
+ dtype=None,
448
+ ) -> None:
449
+
450
+ if isinstance(in_channels, (list, tuple)):
451
+ concat_channels = sum(in_channels)
452
+ else:
453
+ concat_channels = 2 * in_channels
454
+
455
+ super().__init__(
456
+ in_channels=concat_channels,
457
+ out_channels=out_channels,
458
+ kernel_size=kernel_size,
459
+ stride=stride,
460
+ padding=padding,
461
+ dilation=dilation,
462
+ groups=groups,
463
+ bias=bias,
464
+ padding_mode=padding_mode,
465
+ device=device,
466
+ dtype=dtype,
467
+ )
468
+
469
+ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
470
+ """
471
+ Compute pairwise convolution between all elements of x and all elements of y.
472
+
473
+ Parameters
474
+ ----------
475
+ x : torch.Tensor
476
+ Input tensor of size (B, Sx, Cx, H, W).
477
+ y : torch.Tensor
478
+ Input tensor of size (B, Sy, Cy, H, W).
479
+
480
+ Returns
481
+ -------
482
+ torch.Tensor
483
+ Tensor resulting from the cross-convolution between the elements of x and y.
484
+ Has size (B, Sx, Sy, Co, H, W), where Co is the number of output channels.
485
+ """
486
+ B, Sx, *_ = x.shape
487
+ _, Sy, *_ = y.shape
488
+
489
+ xs = repeat(x, "B Sx Cx H W Y -> B Sx Sy Cx H W Y", Sy=Sy)
490
+ ys = repeat(y, "B Sy Cy H W Y-> B Sx Sy Cy H W Y", Sx=Sx)
491
+
492
+ xy = torch.cat([xs, ys], dim=3,)
493
+
494
+ batched_xy = rearrange(xy, "B Sx Sy C2 H W Y -> (B Sx Sy) C2 H W Y")
495
+ batched_output = super().forward(batched_xy)
496
+
497
+ output = rearrange(
498
+ batched_output, "(B Sx Sy) Co H W Y-> B Sx Sy Co H W Y", B=B, Sx=Sx, Sy=Sy
499
+ )
500
+ return output
501
+
502
+ class UnetGen(nn.Module):
503
+ def __init__(self, base_inc_channel=8,
504
+ feature_dilation=2, downsampling_stride=2,
505
+ encoder_class=UnetEncoder, layer_widths=None, block=None,
506
+ kernel_size=3, interpolation_mode ="trilinear",decoder_class=None,
507
+ use_transposed_convolutions=True, time_embed = False, norm_type='layer'):
508
+ super(UnetGen, self).__init__()
509
+ time_embed = self.time_embed
510
+ use_transposed_convolutions = self.use_tr_conv
511
+ inblock = 16
512
+ base_inc_channel = inblock
513
+ self.base_inc_channel = base_inc_channel
514
+
515
+ sinu_pos_emb = SinusoidalPosEmb(inblock)
516
+ fourier_dim = inblock
517
+ #if self.spacing_embed:
518
+ # fourier_dim*=4
519
+
520
+ # time embeddings
521
+
522
+ time_dim = inblock * 4
523
+ if time_embed:
524
+ self.time_mlp = nn.Sequential(
525
+ sinu_pos_emb,
526
+ nn.Linear(fourier_dim, time_dim),
527
+ nn.GELU(),
528
+ nn.Linear(time_dim, time_dim)
529
+ )
530
+ else:
531
+ time_dim = None
532
+
533
+ #encoder_blocks = [1, 1, 1, 1, 1, 1]
534
+
535
+ #decoder_blocks = [1,1,1,1, 1, 1]
536
+ encoder_blocks = [1, 1, 1]
537
+
538
+ decoder_blocks = [1, 1, 1]
539
+
540
+ padding = kernel_size // 2 # constant size
541
+ self.before_encoder = nn.Conv3d(1, inblock, kernel_size=(3, 3, 3),
542
+ stride=(1, 1, 1), padding=3//2,
543
+ bias=True)
544
+
545
+
546
+
547
+
548
+ #self.before_encoder = nn.Sequential(*[cv])
549
+
550
+ self.encoder = encoder_class(in_channel=inblock, base_inc_channel=base_inc_channel, layer_blocks=encoder_blocks,
551
+ block=block,
552
+ feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
553
+ layer_widths=layer_widths, kernel_size=kernel_size,
554
+ time_emb_dim=time_dim, norm_type=norm_type)
555
+
556
+ layer_widths = self.encoder._layers_with
557
+ in_channel = layer_widths[-1]
558
+ self.BottleNeck = BlockLayer(num_blcks=1, block_layer=block,
559
+ planes_in=in_channel, planes_out=in_channel,
560
+ kernel_size=kernel_size,
561
+ first_layer=False, input_size=self.encoder.output_size, time_emb_dim=time_dim, norm_type=norm_type)
562
+
563
+ self.BottleNeck_att = Attention(in_channel)
564
+
565
+ layer_widths = layer_widths[::-1][1:]
566
+
567
+
568
+ self.decoder = decoder_class(in_channel=in_channel, base_inc_channel=base_inc_channel*8, layer_blocks=decoder_blocks,
569
+ block=block, last_cov_channels = self.encoder.out_channel_layer,
570
+ upsampling_mode=interpolation_mode, layer_widths=layer_widths,
571
+ use_transposed_convolutions=use_transposed_convolutions,
572
+ kernel_size=kernel_size, time_emb_dim=time_dim, norm_type=norm_type,
573
+ )
574
+
575
+ kernel_size = 3
576
+
577
+ self.last_convolution = BlockLayer(num_blcks=1, block_layer=block,
578
+ planes_in=inblock*2, planes_out=inblock//2,
579
+ kernel_size=kernel_size,
580
+ first_layer=False, input_size=192, time_emb_dim=time_dim, norm_type=norm_type)
581
+
582
+ self.last_convolution_rec = BlockLayer(num_blcks=1, block_layer=block,
583
+ planes_in=inblock*2, planes_out=inblock//2,
584
+ kernel_size=kernel_size,
585
+ first_layer=False, input_size=192, time_emb_dim=time_dim, norm_type=norm_type)
586
+
587
+ self.final_convolution = nn.Conv3d(inblock//2, 1, kernel_size=(kernel_size, kernel_size, kernel_size),
588
+ stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
589
+ self.final_convolution_rec = nn.Conv3d(inblock//2, 1, kernel_size=(kernel_size, kernel_size, kernel_size),
590
+ stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
591
+ self.activation = nn.Softmax(dim=1)
592
+ self.sigmoid = nn.Sigmoid()
593
+ def extract(self, a, t, x_shape):
594
+ b, *_ = t.shape
595
+ out = a.gather(-1,t.long())
596
+ return out.reshape(b, *((1,) * (len(x_shape) - 1)))
597
+
598
+
599
+
600
+
601
+ def forward(self, y, time, t=0, noise = None):
602
+ #if t>0 and noise is not None and torch.rand(1)[0]<0.8:
603
+ # v = torch.randn_like(y)
604
+ # #biasf = self.add_bias(y, bias_ratio=0.05)
605
+ # y = self.extract(self.sqrt_alphas_cumprod, t, y.shape) * y + \
606
+ # self.extract(self.sqrt_one_minus_alphas_cumprod, t, y.shape) * 0.5 * (noise.detach() + v / 2)
607
+
608
+
609
+ y = self.before_encoder(y)
610
+
611
+ if self.time_embed:
612
+ if len(time.shape)==1:
613
+ t = self.time_mlp(time)
614
+ else:
615
+ t = self.time_mlp(time)
616
+ else:
617
+ t = None
618
+
619
+ x = self.encoder(y, t)
620
+ x[0] = self.BottleNeck(x[0], t)
621
+ x[0] = self.BottleNeck_att(x[0])
622
+ a = x[0]
623
+ x=self.decoder(x, t)
624
+ x = torch.cat([x, y], 1)
625
+ #x = self.activation(x)
626
+ y = self.last_convolution(x)
627
+ y = self.final_convolution(y)
628
+ z = self.last_convolution_rec(x)
629
+ z = self.final_convolution_rec(z)
630
+ return [y,z]
631
+
632
+
633
+ class Unet3DAtt(UnetGen):
634
+ def __init__(self, time_embed=False, spacing_embed=1,channels=1, *args, encoder_class=UnetEncoder, **kwargs):
635
+ self.time_embed = time_embed
636
+ self.spacing_embed = spacing_embed
637
+ self.use_tr_conv = False
638
+
639
+
640
+
641
+
642
+ norm_type = "instance"
643
+ super().__init__(*args, encoder_class=encoder_class, decoder_class=UnetDecoder,
644
+ block=ResidualBlock, norm_type=norm_type, **kwargs)
645
+
646
+ betas = sigmoid_beta_schedule(timesteps=100, **dict())
647
+
648
+ alphas = 1. - betas
649
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
650
+ register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
651
+ register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
652
+ register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
653
+
654
+ self.channels = channels
655
+ self.netName = 'Unet3D'
656
+ def name(self):
657
+ return 'unet3d'