melage 0.0.65__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- melage/__init__.py +16 -0
- melage/cli.py +4 -0
- melage/graphics/GLGraphicsItem.py +286 -0
- melage/graphics/GLViewWidget.py +595 -0
- melage/graphics/Transform3D.py +55 -0
- melage/graphics/__init__.py +8 -0
- melage/graphics/functions.py +101 -0
- melage/graphics/items/GLAxisItem.py +149 -0
- melage/graphics/items/GLGridItem.py +178 -0
- melage/graphics/items/GLPolygonItem.py +77 -0
- melage/graphics/items/GLScatterPlotItem.py +135 -0
- melage/graphics/items/GLVolumeItem.py +280 -0
- melage/graphics/items/GLVolumeItem_b.py +237 -0
- melage/graphics/items/__init__.py +0 -0
- melage/graphics/shaders.py +202 -0
- melage/main.py +270 -0
- melage/requirements22.txt +25 -0
- melage/requirements_old.txt +28 -0
- melage/resource/0circle.png +0 -0
- melage/resource/0circle_faded.png +0 -0
- melage/resource/3d.png +0 -0
- melage/resource/3d.psd +0 -0
- melage/resource/3dFaded.png +0 -0
- melage/resource/Eraser.png +0 -0
- melage/resource/EraserFaded.png +0 -0
- melage/resource/EraserX.png +0 -0
- melage/resource/EraserXFaded.png +0 -0
- melage/resource/Eraser_icon.svg +79 -0
- melage/resource/Hand.png +0 -0
- melage/resource/HandIcons_0.png +0 -0
- melage/resource/Hand_IX.png +0 -0
- melage/resource/Hand_IXFaded.png +0 -0
- melage/resource/Handsqueezed.png +0 -0
- melage/resource/Handwriting (copy).png +0 -0
- melage/resource/Handwriting.png +0 -0
- melage/resource/HandwritingMinus.png +0 -0
- melage/resource/HandwritingMinusX.png +0 -0
- melage/resource/HandwritingPlus.png +0 -0
- melage/resource/HandwritingPlusX.png +0 -0
- melage/resource/Move_icon.svg +8 -0
- melage/resource/PngItem_2422924.png +0 -0
- melage/resource/about.png +0 -0
- melage/resource/about_logo.png +0 -0
- melage/resource/about_logo0.png +0 -0
- melage/resource/action_check.png +0 -0
- melage/resource/action_check_OFF.png +0 -0
- melage/resource/arrow).png +0 -0
- melage/resource/arrow.png +0 -0
- melage/resource/arrowFaded.png +0 -0
- melage/resource/arrow_org.png +0 -0
- melage/resource/arrow_org.png.png +0 -0
- melage/resource/arrows.png +0 -0
- melage/resource/authors.mp4 +0 -0
- melage/resource/box.png +0 -0
- melage/resource/check-image-icon-0.jpg +0 -0
- melage/resource/circle.png +0 -0
- melage/resource/circle_faded.png +0 -0
- melage/resource/circle_or.png +0 -0
- melage/resource/close.png +0 -0
- melage/resource/close_bg.png +0 -0
- melage/resource/color/Simple.txt +18 -0
- melage/resource/color/Tissue.txt +24 -0
- melage/resource/color/Tissue12.txt +27 -0
- melage/resource/color/albert_LUT.txt +102 -0
- melage/resource/color/mcrib_LUT.txt +102 -0
- melage/resource/color/pediatric1.txt +29 -0
- melage/resource/color/pediatric1_old.txt +27 -0
- melage/resource/color/pediatric2.txt +87 -0
- melage/resource/color/pediatric3.txt +29 -0
- melage/resource/color/pediatrics (copy).csv +103 -0
- melage/resource/color/tissue_seg.txt +4 -0
- melage/resource/contour.png +0 -0
- melage/resource/contour.svg +2 -0
- melage/resource/contourFaded.png +0 -0
- melage/resource/contourX.png +0 -0
- melage/resource/contourXFaded.png +0 -0
- melage/resource/dti.png +0 -0
- melage/resource/dti0.png +0 -0
- melage/resource/dti222.png +0 -0
- melage/resource/dti_or.png +0 -0
- melage/resource/eco.png +0 -0
- melage/resource/eco22.png +0 -0
- melage/resource/eco_old.png +0 -0
- melage/resource/eco_or.png +0 -0
- melage/resource/eco_or2.png +0 -0
- melage/resource/eco_seg.png +0 -0
- melage/resource/eco_seg_old.png +0 -0
- melage/resource/export.png +0 -0
- melage/resource/hand-grab-icon-10.jpg +0 -0
- melage/resource/hand-grab-icon-25.jpg +0 -0
- melage/resource/info.png +0 -0
- melage/resource/line.png +0 -0
- melage/resource/linefaded.png +0 -0
- melage/resource/load.png +0 -0
- melage/resource/main.ico +0 -0
- melage/resource/manual_images/3D_rightc.png +0 -0
- melage/resource/manual_images/3D_rightc_goto.png +0 -0
- melage/resource/manual_images/3D_rightc_paint.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_draw1.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_draw2.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render2.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render3.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render4.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render5.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render6.png +0 -0
- melage/resource/manual_images/3D_rightc_seg.png +0 -0
- melage/resource/manual_images/exit_toolbar.png +0 -0
- melage/resource/manual_images/load_image_file.png +0 -0
- melage/resource/manual_images/load_image_file_openp.png +0 -0
- melage/resource/manual_images/main_page.png +0 -0
- melage/resource/manual_images/menu_file.png +0 -0
- melage/resource/manual_images/menu_file_export.png +0 -0
- melage/resource/manual_images/menu_file_import.png +0 -0
- melage/resource/manual_images/menu_file_settings.png +0 -0
- melage/resource/manual_images/menu_file_ss.png +0 -0
- melage/resource/manual_images/open_save_load.png +0 -0
- melage/resource/manual_images/panning_toolbar.png +0 -0
- melage/resource/manual_images/segmentation_toolbar.png +0 -0
- melage/resource/manual_images/tab_mri.png +0 -0
- melage/resource/manual_images/tab_us.png +0 -0
- melage/resource/manual_images/tabs.png +0 -0
- melage/resource/manual_images/toolbar_tools.png +0 -0
- melage/resource/manual_images/tools_basic.png +0 -0
- melage/resource/manual_images/tools_bet.png +0 -0
- melage/resource/manual_images/tools_cs.png +0 -0
- melage/resource/manual_images/tools_deepbet.png +0 -0
- melage/resource/manual_images/tools_imageinfo.png +0 -0
- melage/resource/manual_images/tools_maskO.png +0 -0
- melage/resource/manual_images/tools_masking.png +0 -0
- melage/resource/manual_images/tools_n4b.png +0 -0
- melage/resource/manual_images/tools_resize.png +0 -0
- melage/resource/manual_images/tools_ruler.png +0 -0
- melage/resource/manual_images/tools_seg.png +0 -0
- melage/resource/manual_images/tools_threshold.png +0 -0
- melage/resource/manual_images/tools_tools.png +0 -0
- melage/resource/manual_images/widget_color.png +0 -0
- melage/resource/manual_images/widget_color_add.png +0 -0
- melage/resource/manual_images/widget_color_add2.png +0 -0
- melage/resource/manual_images/widget_color_additional.png +0 -0
- melage/resource/manual_images/widget_images.png +0 -0
- melage/resource/manual_images/widget_images2.png +0 -0
- melage/resource/manual_images/widget_images3.png +0 -0
- melage/resource/manual_images/widget_marker.png +0 -0
- melage/resource/manual_images/widget_mri.png +0 -0
- melage/resource/manual_images/widget_mri2.png +0 -0
- melage/resource/manual_images/widget_segintensity.png +0 -0
- melage/resource/manual_images/widget_tab_mutualview.png +0 -0
- melage/resource/manual_images/widget_tab_mutualview2.png +0 -0
- melage/resource/manual_images/widget_table.png +0 -0
- melage/resource/manual_images/widget_table2.png +0 -0
- melage/resource/manual_images/widget_us.png +0 -0
- melage/resource/melage_top.ico +0 -0
- melage/resource/melage_top.png +0 -0
- melage/resource/melage_top0.png +0 -0
- melage/resource/melage_top1.png +0 -0
- melage/resource/melage_top4.png +0 -0
- melage/resource/mri (copy).png +0 -0
- melage/resource/mri.png +0 -0
- melage/resource/mri0.png +0 -0
- melage/resource/mri000.png +0 -0
- melage/resource/mri22.png +0 -0
- melage/resource/mri_big.png +0 -0
- melage/resource/mri_old.png +0 -0
- melage/resource/mri_seg.png +0 -0
- melage/resource/mri_seg_old.png +0 -0
- melage/resource/new.png +0 -0
- melage/resource/open.png +0 -0
- melage/resource/open2.png +0 -0
- melage/resource/pan.png +0 -0
- melage/resource/pencil.png +0 -0
- melage/resource/pencilFaded.png +0 -0
- melage/resource/points.png +0 -0
- melage/resource/pointsFaded.png +0 -0
- melage/resource/rotate.png +0 -0
- melage/resource/ruler.png +0 -0
- melage/resource/rulerFaded.png +0 -0
- melage/resource/s.png +0 -0
- melage/resource/s.psd +0 -0
- melage/resource/save.png +0 -0
- melage/resource/saveas.png +0 -0
- melage/resource/seg_mri.png +0 -0
- melage/resource/seg_mri2.png +0 -0
- melage/resource/settings.png +0 -0
- melage/resource/synch.png +0 -0
- melage/resource/synchFaded.png +0 -0
- melage/resource/theme/rc/.keep +1 -0
- melage/resource/theme/rc/arrow_down.png +0 -0
- melage/resource/theme/rc/arrow_down@2x.png +0 -0
- melage/resource/theme/rc/arrow_down_disabled.png +0 -0
- melage/resource/theme/rc/arrow_down_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_down_focus.png +0 -0
- melage/resource/theme/rc/arrow_down_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_down_pressed.png +0 -0
- melage/resource/theme/rc/arrow_down_pressed@2x.png +0 -0
- melage/resource/theme/rc/arrow_left.png +0 -0
- melage/resource/theme/rc/arrow_left@2x.png +0 -0
- melage/resource/theme/rc/arrow_left_disabled.png +0 -0
- melage/resource/theme/rc/arrow_left_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_left_focus.png +0 -0
- melage/resource/theme/rc/arrow_left_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_left_pressed.png +0 -0
- melage/resource/theme/rc/arrow_left_pressed@2x.png +0 -0
- melage/resource/theme/rc/arrow_right.png +0 -0
- melage/resource/theme/rc/arrow_right@2x.png +0 -0
- melage/resource/theme/rc/arrow_right_disabled.png +0 -0
- melage/resource/theme/rc/arrow_right_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_right_focus.png +0 -0
- melage/resource/theme/rc/arrow_right_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_right_pressed.png +0 -0
- melage/resource/theme/rc/arrow_right_pressed@2x.png +0 -0
- melage/resource/theme/rc/arrow_up.png +0 -0
- melage/resource/theme/rc/arrow_up@2x.png +0 -0
- melage/resource/theme/rc/arrow_up_disabled.png +0 -0
- melage/resource/theme/rc/arrow_up_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_up_focus.png +0 -0
- melage/resource/theme/rc/arrow_up_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_up_pressed.png +0 -0
- melage/resource/theme/rc/arrow_up_pressed@2x.png +0 -0
- melage/resource/theme/rc/base_icon.png +0 -0
- melage/resource/theme/rc/base_icon@2x.png +0 -0
- melage/resource/theme/rc/base_icon_disabled.png +0 -0
- melage/resource/theme/rc/base_icon_disabled@2x.png +0 -0
- melage/resource/theme/rc/base_icon_focus.png +0 -0
- melage/resource/theme/rc/base_icon_focus@2x.png +0 -0
- melage/resource/theme/rc/base_icon_pressed.png +0 -0
- melage/resource/theme/rc/base_icon_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_closed.png +0 -0
- melage/resource/theme/rc/branch_closed@2x.png +0 -0
- melage/resource/theme/rc/branch_closed_disabled.png +0 -0
- melage/resource/theme/rc/branch_closed_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_closed_focus.png +0 -0
- melage/resource/theme/rc/branch_closed_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_closed_pressed.png +0 -0
- melage/resource/theme/rc/branch_closed_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_end.png +0 -0
- melage/resource/theme/rc/branch_end@2x.png +0 -0
- melage/resource/theme/rc/branch_end_disabled.png +0 -0
- melage/resource/theme/rc/branch_end_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_end_focus.png +0 -0
- melage/resource/theme/rc/branch_end_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_end_pressed.png +0 -0
- melage/resource/theme/rc/branch_end_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_line.png +0 -0
- melage/resource/theme/rc/branch_line@2x.png +0 -0
- melage/resource/theme/rc/branch_line_disabled.png +0 -0
- melage/resource/theme/rc/branch_line_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_line_focus.png +0 -0
- melage/resource/theme/rc/branch_line_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_line_pressed.png +0 -0
- melage/resource/theme/rc/branch_line_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_more.png +0 -0
- melage/resource/theme/rc/branch_more@2x.png +0 -0
- melage/resource/theme/rc/branch_more_disabled.png +0 -0
- melage/resource/theme/rc/branch_more_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_more_focus.png +0 -0
- melage/resource/theme/rc/branch_more_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_more_pressed.png +0 -0
- melage/resource/theme/rc/branch_more_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_open.png +0 -0
- melage/resource/theme/rc/branch_open@2x.png +0 -0
- melage/resource/theme/rc/branch_open_disabled.png +0 -0
- melage/resource/theme/rc/branch_open_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_open_focus.png +0 -0
- melage/resource/theme/rc/branch_open_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_open_pressed.png +0 -0
- melage/resource/theme/rc/branch_open_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked.png +0 -0
- melage/resource/theme/rc/checkbox_checked0.png +0 -0
- melage/resource/theme/rc/checkbox_checked@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_checked@2x000.png.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate@2x.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_disabled.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_disabled@2x.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_focus.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_focus@2x.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_pressed.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked@2x00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled@2x00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus@2x00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed@2x00.png +0 -0
- melage/resource/theme/rc/line_horizontal.png +0 -0
- melage/resource/theme/rc/line_horizontal@2x.png +0 -0
- melage/resource/theme/rc/line_horizontal_disabled.png +0 -0
- melage/resource/theme/rc/line_horizontal_disabled@2x.png +0 -0
- melage/resource/theme/rc/line_horizontal_focus.png +0 -0
- melage/resource/theme/rc/line_horizontal_focus@2x.png +0 -0
- melage/resource/theme/rc/line_horizontal_pressed.png +0 -0
- melage/resource/theme/rc/line_horizontal_pressed@2x.png +0 -0
- melage/resource/theme/rc/line_vertical.png +0 -0
- melage/resource/theme/rc/line_vertical@2x.png +0 -0
- melage/resource/theme/rc/line_vertical_disabled.png +0 -0
- melage/resource/theme/rc/line_vertical_disabled@2x.png +0 -0
- melage/resource/theme/rc/line_vertical_focus.png +0 -0
- melage/resource/theme/rc/line_vertical_focus@2x.png +0 -0
- melage/resource/theme/rc/line_vertical_pressed.png +0 -0
- melage/resource/theme/rc/line_vertical_pressed@2x.png +0 -0
- melage/resource/theme/rc/radio_checked.png +0 -0
- melage/resource/theme/rc/radio_checked@2x.png +0 -0
- melage/resource/theme/rc/radio_checked_disabled.png +0 -0
- melage/resource/theme/rc/radio_checked_disabled@2x.png +0 -0
- melage/resource/theme/rc/radio_checked_focus.png +0 -0
- melage/resource/theme/rc/radio_checked_focus@2x.png +0 -0
- melage/resource/theme/rc/radio_checked_pressed.png +0 -0
- melage/resource/theme/rc/radio_checked_pressed@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked.png +0 -0
- melage/resource/theme/rc/radio_unchecked@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked_disabled.png +0 -0
- melage/resource/theme/rc/radio_unchecked_disabled@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked_focus.png +0 -0
- melage/resource/theme/rc/radio_unchecked_focus@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked_pressed.png +0 -0
- melage/resource/theme/rc/radio_unchecked_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_focus.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_focus.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_focus.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_focus.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_pressed@2x.png +0 -0
- melage/resource/theme/rc/transparent.png +0 -0
- melage/resource/theme/rc/transparent@2x.png +0 -0
- melage/resource/theme/rc/transparent_disabled.png +0 -0
- melage/resource/theme/rc/transparent_disabled@2x.png +0 -0
- melage/resource/theme/rc/transparent_focus.png +0 -0
- melage/resource/theme/rc/transparent_focus@2x.png +0 -0
- melage/resource/theme/rc/transparent_pressed.png +0 -0
- melage/resource/theme/rc/transparent_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_close.png +0 -0
- melage/resource/theme/rc/window_close@2x.png +0 -0
- melage/resource/theme/rc/window_close_disabled.png +0 -0
- melage/resource/theme/rc/window_close_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_close_focus.png +0 -0
- melage/resource/theme/rc/window_close_focus@2x.png +0 -0
- melage/resource/theme/rc/window_close_pressed.png +0 -0
- melage/resource/theme/rc/window_close_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_grip.png +0 -0
- melage/resource/theme/rc/window_grip@2x.png +0 -0
- melage/resource/theme/rc/window_grip_disabled.png +0 -0
- melage/resource/theme/rc/window_grip_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_grip_focus.png +0 -0
- melage/resource/theme/rc/window_grip_focus@2x.png +0 -0
- melage/resource/theme/rc/window_grip_pressed.png +0 -0
- melage/resource/theme/rc/window_grip_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_minimize.png +0 -0
- melage/resource/theme/rc/window_minimize@2x.png +0 -0
- melage/resource/theme/rc/window_minimize_disabled.png +0 -0
- melage/resource/theme/rc/window_minimize_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_minimize_focus.png +0 -0
- melage/resource/theme/rc/window_minimize_focus@2x.png +0 -0
- melage/resource/theme/rc/window_minimize_pressed.png +0 -0
- melage/resource/theme/rc/window_minimize_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_undock.png +0 -0
- melage/resource/theme/rc/window_undock@2x.png +0 -0
- melage/resource/theme/rc/window_undock_disabled.png +0 -0
- melage/resource/theme/rc/window_undock_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_undock_focus.png +0 -0
- melage/resource/theme/rc/window_undock_focus@2x.png +0 -0
- melage/resource/theme/rc/window_undock_pressed.png +0 -0
- melage/resource/theme/rc/window_undock_pressed@2x.png +0 -0
- melage/resource/theme/style.qss +2223 -0
- melage/resource/tract.png +0 -0
- melage/resource/view1.png +0 -0
- melage/resource/view1_eco.png +0 -0
- melage/resource/view1_mri.png +0 -0
- melage/resource/view1_seg.png +0 -0
- melage/resource/view2.png +0 -0
- melage/resource/view2_seg.png +0 -0
- melage/resource/w.png +0 -0
- melage/resource/zoom_in.png +0 -0
- melage/resource/zoom_inFaded.png +0 -0
- melage/resource/zoom_out.png +0 -0
- melage/resource/zoom_outFaded.png +0 -0
- melage/some_notes.txt +3 -0
- melage/utils/DispalyIm.py +2788 -0
- melage/utils/GMM.py +720 -0
- melage/utils/Shaders_120.py +257 -0
- melage/utils/Shaders_330.py +314 -0
- melage/utils/Shaders_bu.py +314 -0
- melage/utils/__init__0.py +7 -0
- melage/utils/brain_extraction_helper.py +234 -0
- melage/utils/custom_QScrollBar.py +61 -0
- melage/utils/glScientific.py +1554 -0
- melage/utils/glScientific_bc.py +1585 -0
- melage/utils/readData.py +1061 -0
- melage/utils/registration.py +512 -0
- melage/utils/source_folder.py +18 -0
- melage/utils/utils.py +3808 -0
- melage/version.txt +1 -0
- melage/widgets/ApplyMask.py +212 -0
- melage/widgets/ChangeSystem.py +152 -0
- melage/widgets/DeepLModels/InfantSegment/Unet.py +464 -0
- melage/widgets/DeepLModels/NPP/dataset/mri_dataset_affine.py +149 -0
- melage/widgets/DeepLModels/NPP/models/checkpoints/npp_v1.pth.py +0 -0
- melage/widgets/DeepLModels/NPP/models/losses.py +146 -0
- melage/widgets/DeepLModels/NPP/models/model.py +272 -0
- melage/widgets/DeepLModels/NPP/models/utils.py +303 -0
- melage/widgets/DeepLModels/NPP/npp.py +116 -0
- melage/widgets/DeepLModels/NPP/requirements.txt +8 -0
- melage/widgets/DeepLModels/NPP/train/train.py +116 -0
- melage/widgets/DeepLModels/Unet3DAtt.py +657 -0
- melage/widgets/DeepLModels/Unet3D_basic.py +648 -0
- melage/widgets/DeepLModels/new_unet.py +652 -0
- melage/widgets/DeepLModels/new_unet_old.py +639 -0
- melage/widgets/DeepLModels/new_unet_old2.py +658 -0
- melage/widgets/HistImage.py +153 -0
- melage/widgets/ImageThresholding.py +222 -0
- melage/widgets/MaskOperations.py +147 -0
- melage/widgets/N4Dialog.py +241 -0
- melage/widgets/Segmentation/FCM.py +1553 -0
- melage/widgets/Segmentation/__init__.py +588 -0
- melage/widgets/Segmentation/utils.py +417 -0
- melage/widgets/SemiAutoSeg.py +666 -0
- melage/widgets/Synthstrip.py +141 -0
- melage/widgets/__init__0.py +5 -0
- melage/widgets/about.py +246 -0
- melage/widgets/activation.py +437 -0
- melage/widgets/activator.py +147 -0
- melage/widgets/be_dl.py +409 -0
- melage/widgets/be_dl_unet3d.py +441 -0
- melage/widgets/brain_extraction.py +855 -0
- melage/widgets/brain_extraction_dl.py +887 -0
- melage/widgets/brain_extraction_dl_bu.py +869 -0
- melage/widgets/colorwidget.py +100 -0
- melage/widgets/dockWidgets.py +2005 -0
- melage/widgets/enhanceImWidget.py +109 -0
- melage/widgets/fileDialog_widget.py +275 -0
- melage/widgets/iminfo.py +346 -0
- melage/widgets/mainwindow_widget.py +6775 -0
- melage/widgets/melageAbout.py +123 -0
- melage/widgets/openglWidgets.py +556 -0
- melage/widgets/registrationWidget.py +342 -0
- melage/widgets/repeat_widget.py +74 -0
- melage/widgets/screenshot_widget.py +138 -0
- melage/widgets/settings_widget.py +77 -0
- melage/widgets/tranformationWidget.py +275 -0
- melage-0.0.65.dist-info/METADATA +742 -0
- melage-0.0.65.dist-info/RECORD +501 -0
- melage-0.0.65.dist-info/WHEEL +5 -0
- melage-0.0.65.dist-info/entry_points.txt +2 -0
- melage-0.0.65.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,464 @@
|
|
|
1
|
+
from functools import partial
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import torch.nn as nn
|
|
5
|
+
import torch
|
|
6
|
+
import math
|
|
7
|
+
# from .model_utils import *
|
|
8
|
+
import math
|
|
9
|
+
import torch
|
|
10
|
+
from functools import partial
|
|
11
|
+
import torch.nn as nn
|
|
12
|
+
from einops import repeat, rearrange
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BlockLayer(nn.Module):
|
|
17
|
+
def __init__(self, num_blcks, block_layer, planes_in, planes_out, kernel_size=3, first_layer=False,
|
|
18
|
+
input_size=None, norm_type='layer'):
|
|
19
|
+
super(BlockLayer, self).__init__()
|
|
20
|
+
|
|
21
|
+
self.blocks = nn.ModuleList()
|
|
22
|
+
for i in range(num_blcks):
|
|
23
|
+
if i == 0:
|
|
24
|
+
self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=first_layer,
|
|
25
|
+
input_size=input_size, norm_type=norm_type))
|
|
26
|
+
else:
|
|
27
|
+
self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=False,
|
|
28
|
+
input_size=input_size, norm_type=norm_type))
|
|
29
|
+
planes_in = planes_out
|
|
30
|
+
|
|
31
|
+
def forward(self, x):
|
|
32
|
+
for i, block in enumerate(self.blocks):
|
|
33
|
+
x = block(x)
|
|
34
|
+
return x
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ResidualBlock(nn.Module):
|
|
38
|
+
def __init__(self, planes_in, planes_out, kernel_size=3, first_layer=False, input_size=128, norm_type='layer'):
|
|
39
|
+
super(ResidualBlock, self).__init__()
|
|
40
|
+
|
|
41
|
+
self.conv1 = ConvolutionalBlock(planes_in=planes_in, planes_out=planes_out, first_layer=first_layer,
|
|
42
|
+
kernel_size=kernel_size, dilation=1,
|
|
43
|
+
activation=nn.ELU, input_size=input_size, norm_type=norm_type)
|
|
44
|
+
self.conv2 = ConvolutionalBlock(planes_in=planes_out, planes_out=planes_out, first_layer=False,
|
|
45
|
+
kernel_size=1,
|
|
46
|
+
dilation=1, activation=nn.ELU, input_size=input_size, norm_type=norm_type)
|
|
47
|
+
if planes_in != planes_out:
|
|
48
|
+
self.sample = nn.Conv3d(planes_in, planes_out, (1, 1, 1), stride=(1, 1, 1), dilation=(1, 1, 1),
|
|
49
|
+
bias=True) #
|
|
50
|
+
else:
|
|
51
|
+
self.sample = None
|
|
52
|
+
|
|
53
|
+
def forward(self, x):
|
|
54
|
+
identity = x.clone()
|
|
55
|
+
|
|
56
|
+
x = self.conv1(x)
|
|
57
|
+
x = self.conv2(x)
|
|
58
|
+
|
|
59
|
+
if self.sample is not None:
|
|
60
|
+
identity = self.sample(identity)
|
|
61
|
+
|
|
62
|
+
x += identity
|
|
63
|
+
|
|
64
|
+
return x
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class UnetEncoder(nn.Module):
|
|
68
|
+
def __init__(self, in_channel, base_inc_channel=8, layer=BlockLayer, block=None, layer_blocks=None,
|
|
69
|
+
downsampling_stride=None, feature_dilation=1.5, layer_widths=None, kernel_size=3,
|
|
70
|
+
norm_type='layer'):
|
|
71
|
+
super(UnetEncoder, self).__init__()
|
|
72
|
+
|
|
73
|
+
self.layers = nn.ModuleList()
|
|
74
|
+
self.downsampling_convolutions = nn.ModuleList()
|
|
75
|
+
self.attention_modules = nn.ModuleList()
|
|
76
|
+
self.downsampling_zarib = []
|
|
77
|
+
in_channel_layer = in_channel
|
|
78
|
+
input_size = 192
|
|
79
|
+
self._layers_with = []
|
|
80
|
+
# self._layers_with.append(base_inc_channel)
|
|
81
|
+
for i, num_blcks in enumerate(layer_blocks):
|
|
82
|
+
if layer_widths is not None:
|
|
83
|
+
out_channel_layer = layer_widths[i]
|
|
84
|
+
else:
|
|
85
|
+
out_channel_layer = base_inc_channel * int(feature_dilation ** (i + 1)) // 2
|
|
86
|
+
|
|
87
|
+
if i == 0:
|
|
88
|
+
first_layer = True
|
|
89
|
+
else:
|
|
90
|
+
first_layer = False
|
|
91
|
+
self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
|
|
92
|
+
planes_in=in_channel_layer, planes_out=out_channel_layer,
|
|
93
|
+
kernel_size=kernel_size,
|
|
94
|
+
first_layer=first_layer, input_size=input_size,
|
|
95
|
+
norm_type=norm_type))
|
|
96
|
+
# self.attention_modules.append(Attention(out_channel_layer))
|
|
97
|
+
if i != len(layer_blocks) - 1:
|
|
98
|
+
# padding = kernel_size // 2 # constant size
|
|
99
|
+
downsampling_conv = nn.Conv3d(out_channel_layer, out_channel_layer, (3, 3, 3), padding=3 // 2,
|
|
100
|
+
stride=(downsampling_stride, downsampling_stride, downsampling_stride),
|
|
101
|
+
bias=True)
|
|
102
|
+
# downsampling_conv = nn.MaxPool3d(kernel_size=2, stride=2)
|
|
103
|
+
|
|
104
|
+
self.downsampling_convolutions.append(downsampling_conv)
|
|
105
|
+
|
|
106
|
+
input_size = input_size // 2
|
|
107
|
+
print("Encoder {}:".format(i), in_channel_layer, out_channel_layer)
|
|
108
|
+
self._layers_with.append(out_channel_layer)
|
|
109
|
+
in_channel_layer = out_channel_layer
|
|
110
|
+
self.out_channel_layer = in_channel_layer // 2
|
|
111
|
+
self.last_downsampling_conv = nn.Conv3d(out_channel_layer, out_channel_layer, (3, 3, 3),
|
|
112
|
+
padding=3 // 2,
|
|
113
|
+
stride=(downsampling_stride, downsampling_stride, downsampling_stride),
|
|
114
|
+
bias=True)
|
|
115
|
+
self.output_size = input_size
|
|
116
|
+
|
|
117
|
+
def forward(self, x):
|
|
118
|
+
outputs = list()
|
|
119
|
+
# outputs.insert(0, x)
|
|
120
|
+
for layer, downsampling in zip(self.layers[:-1], self.downsampling_convolutions):
|
|
121
|
+
x = layer(x)
|
|
122
|
+
|
|
123
|
+
outputs.insert(0, x)
|
|
124
|
+
|
|
125
|
+
x = downsampling(x)
|
|
126
|
+
outputs.insert(0, x)
|
|
127
|
+
x = self.layers[-1](x)
|
|
128
|
+
x = self.last_downsampling_conv(x)
|
|
129
|
+
#outputs.insert(0, x) # bottle neck layer
|
|
130
|
+
return x, outputs
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class ConvolutionalBlock(nn.Module):
|
|
134
|
+
def __init__(self, planes_in, planes_out, first_layer=False, kernel_size=3, dilation=1, activation=None,
|
|
135
|
+
input_size=None, norm_type='layer'):
|
|
136
|
+
super(ConvolutionalBlock, self).__init__()
|
|
137
|
+
if dilation == 1:
|
|
138
|
+
padding = kernel_size // 2 # constant size
|
|
139
|
+
else:
|
|
140
|
+
# (In + 2*padding - dilation * (kernel_size - 1) - 1)/stride + 1
|
|
141
|
+
if kernel_size == 3:
|
|
142
|
+
if dilation == 2:
|
|
143
|
+
padding = 2
|
|
144
|
+
elif dilation == 4:
|
|
145
|
+
padding = 4
|
|
146
|
+
elif dilation == 3:
|
|
147
|
+
padding = 3
|
|
148
|
+
else:
|
|
149
|
+
padding = None
|
|
150
|
+
elif kernel_size == 1:
|
|
151
|
+
padding = 0
|
|
152
|
+
self.activation = None
|
|
153
|
+
self.norm = None
|
|
154
|
+
if first_layer:
|
|
155
|
+
self.norm = nn.InstanceNorm3d(planes_in)
|
|
156
|
+
self.activation = activation()
|
|
157
|
+
self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
|
|
158
|
+
padding=padding, bias=True,
|
|
159
|
+
dilation=(dilation, dilation, dilation))
|
|
160
|
+
else:
|
|
161
|
+
if activation is not None:
|
|
162
|
+
if norm_type.lower() == 'layer':
|
|
163
|
+
self.norm = nn.LayerNorm([input_size, input_size, input_size])
|
|
164
|
+
elif norm_type.lower() == 'group':
|
|
165
|
+
valid_num_groups = np.array([16, 8, 4, 2])
|
|
166
|
+
valid_num_groups = valid_num_groups[valid_num_groups < planes_in]
|
|
167
|
+
num_groups = None
|
|
168
|
+
for num_groups in valid_num_groups:
|
|
169
|
+
if planes_in % num_groups != 0:
|
|
170
|
+
break
|
|
171
|
+
if num_groups is None:
|
|
172
|
+
raise exit('Num groups can not be determined')
|
|
173
|
+
self.norm = nn.GroupNorm(num_groups=num_groups, num_channels=planes_in)
|
|
174
|
+
elif norm_type.lower() == 'batch':
|
|
175
|
+
self.norm = nn.BatchNorm3d(planes_in)
|
|
176
|
+
elif norm_type.lower() == 'instance':
|
|
177
|
+
self.norm = nn.InstanceNorm3d(planes_in)
|
|
178
|
+
else:
|
|
179
|
+
self.norm = None
|
|
180
|
+
|
|
181
|
+
self.activation = activation()
|
|
182
|
+
self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
|
|
183
|
+
padding=padding, bias=True,
|
|
184
|
+
dilation=(dilation, dilation, dilation))
|
|
185
|
+
|
|
186
|
+
else:
|
|
187
|
+
if norm_type.lower() == 'layer':
|
|
188
|
+
if input_size < 120:
|
|
189
|
+
self.norm = nn.LayerNorm([input_size, input_size, input_size])
|
|
190
|
+
else:
|
|
191
|
+
self.norm = nn.InstanceNorm3d(planes_in)
|
|
192
|
+
elif norm_type.lower() == 'group':
|
|
193
|
+
valid_num_groups = [16, 8, 4, 2]
|
|
194
|
+
valid_num_groups = valid_num_groups[valid_num_groups < planes_in]
|
|
195
|
+
num_groups = None
|
|
196
|
+
for num_groups in valid_num_groups:
|
|
197
|
+
if planes_in % num_groups != 0:
|
|
198
|
+
break
|
|
199
|
+
if num_groups is None:
|
|
200
|
+
raise exit('Num groups can not be determined')
|
|
201
|
+
self.norm = nn.GroupNorm(num_groups=planes_in, num_channels=planes_in)
|
|
202
|
+
elif norm_type.lower() == 'batch':
|
|
203
|
+
self.norm = nn.BatchNorm3d(planes_in)
|
|
204
|
+
elif norm_type.lower() == 'instance':
|
|
205
|
+
self.norm = nn.InstanceNorm3d(planes_in)
|
|
206
|
+
else:
|
|
207
|
+
self.norm = None
|
|
208
|
+
|
|
209
|
+
self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
|
|
210
|
+
padding=padding, bias=True,
|
|
211
|
+
dilation=(dilation, dilation, dilation))
|
|
212
|
+
|
|
213
|
+
def forward(self, x):
|
|
214
|
+
if self.norm is not None:
|
|
215
|
+
x = self.norm(x)
|
|
216
|
+
|
|
217
|
+
if self.activation is not None:
|
|
218
|
+
x = self.activation(x)
|
|
219
|
+
|
|
220
|
+
x = self.conv(x)
|
|
221
|
+
|
|
222
|
+
return x
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
class UnetDecoder(nn.Module):
|
|
226
|
+
def __init__(self, in_channel, base_inc_channel=64, layer=BlockLayer, block=None, layer_blocks=[1, 1, 1, 1],
|
|
227
|
+
feature_dilation=2, upsampling_stride=2, layer_widths=None, kernel_size=3,
|
|
228
|
+
upsampling_mode="trilinear", align_corners=False, use_transposed_convolutions=False,
|
|
229
|
+
last_cov_channels=256,
|
|
230
|
+
norm_type='layer'
|
|
231
|
+
):
|
|
232
|
+
super(UnetDecoder, self).__init__()
|
|
233
|
+
self.layers = nn.ModuleList()
|
|
234
|
+
|
|
235
|
+
self.upsampling_blocks = nn.ModuleList()
|
|
236
|
+
|
|
237
|
+
self.attention_modules = nn.ModuleList()
|
|
238
|
+
in_channel_layer = in_channel
|
|
239
|
+
# input_size = 24
|
|
240
|
+
input_size = 16
|
|
241
|
+
|
|
242
|
+
for i, num_blcks in enumerate(layer_blocks):
|
|
243
|
+
if layer_widths is not None:
|
|
244
|
+
out_channel_layer = layer_widths[i]
|
|
245
|
+
else:
|
|
246
|
+
out_channel_layer = base_inc_channel // (feature_dilation ** (i))
|
|
247
|
+
|
|
248
|
+
if i == 0:
|
|
249
|
+
first_layer = True
|
|
250
|
+
self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
|
|
251
|
+
planes_in=last_cov_channels, planes_out=out_channel_layer,
|
|
252
|
+
kernel_size=kernel_size,
|
|
253
|
+
first_layer=first_layer, input_size=input_size, norm_type=norm_type))
|
|
254
|
+
else:
|
|
255
|
+
first_layer = False
|
|
256
|
+
|
|
257
|
+
self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
|
|
258
|
+
planes_in=in_channel_layer + layer_widths[i - 1], planes_out=out_channel_layer,
|
|
259
|
+
kernel_size=kernel_size,
|
|
260
|
+
first_layer=first_layer, input_size=input_size, norm_type=norm_type))
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
# self.upsampling_blocks.append(nn.ConvTranspose3d(out_channel_layer, out_channel_layer, kernel_size=2,
|
|
264
|
+
# stride=upsampling_stride, padding=0))
|
|
265
|
+
self.upsampling_blocks.append(nn.Upsample(scale_factor=2, mode='trilinear'))
|
|
266
|
+
|
|
267
|
+
input_size = input_size * 2
|
|
268
|
+
last_cov_channels = in_channel_layer # last_cov_channels//2
|
|
269
|
+
print("Decoder {}:".format(i), in_channel_layer, out_channel_layer)
|
|
270
|
+
in_channel_layer = out_channel_layer
|
|
271
|
+
self.out_channel_layer = in_channel_layer
|
|
272
|
+
|
|
273
|
+
def forward(self, y, x):
|
|
274
|
+
i = 0
|
|
275
|
+
outputs = list()
|
|
276
|
+
#y = x[0]
|
|
277
|
+
for up, lay in zip(self.upsampling_blocks, self.layers[:-1]):
|
|
278
|
+
if i == 0:
|
|
279
|
+
y = lay(y)
|
|
280
|
+
else:
|
|
281
|
+
y = lay(y)
|
|
282
|
+
outputs.insert(0, y)
|
|
283
|
+
y = up(y)
|
|
284
|
+
y = torch.cat([y, x[i]], 1)
|
|
285
|
+
|
|
286
|
+
# y = att(y)
|
|
287
|
+
# y = torch.cat([y, x[i + 1]],1)
|
|
288
|
+
i += 1
|
|
289
|
+
outputs.insert(0, y)
|
|
290
|
+
y = self.layers[-1](y)
|
|
291
|
+
y = up(y)
|
|
292
|
+
outputs.insert(0, y)
|
|
293
|
+
return y, outputs
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
class Attention(nn.Module):
|
|
297
|
+
def __init__(self, dim, heads=4, dim_head=16):
|
|
298
|
+
super().__init__()
|
|
299
|
+
self.scale = dim_head ** -0.5
|
|
300
|
+
self.heads = heads
|
|
301
|
+
hidden_dim = dim_head * heads
|
|
302
|
+
|
|
303
|
+
self.to_qkv = nn.Conv3d(dim, hidden_dim * 3, 1, bias=False)
|
|
304
|
+
self.to_out = nn.Conv3d(hidden_dim, dim // 2, 1)
|
|
305
|
+
|
|
306
|
+
def forward(self, x, mask=None):
|
|
307
|
+
b, c, h, w, z = x.shape
|
|
308
|
+
qkv = self.to_qkv(x).chunk(3, dim=1)
|
|
309
|
+
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y z -> b h c (x y z)', h=self.heads), qkv)
|
|
310
|
+
|
|
311
|
+
scaled_dot_prod = torch.einsum('... i d , ... j d -> ... i j', q, k) * self.scale
|
|
312
|
+
attention = torch.softmax(scaled_dot_prod, dim=-1)
|
|
313
|
+
v = v / (h * w * z)
|
|
314
|
+
atv = torch.einsum('... i j , ... j d -> ... i d', attention, v)
|
|
315
|
+
out = rearrange(atv, "b h c (x y z) -> b (h c) x y z", h=self.heads, x=h, y=w, z=z)
|
|
316
|
+
return self.to_out(out)
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
class UnetGen(nn.Module):
|
|
320
|
+
def __init__(self, base_inc_channel=8,
|
|
321
|
+
feature_dilation=2, downsampling_stride=2,
|
|
322
|
+
encoder_class=UnetEncoder, layer_widths=None, block=None,
|
|
323
|
+
kernel_size=3, interpolation_mode="trilinear", decoder_class=None,
|
|
324
|
+
use_transposed_convolutions=True, norm_type='layer', outChannels=3):
|
|
325
|
+
super(UnetGen, self).__init__()
|
|
326
|
+
|
|
327
|
+
use_transposed_convolutions = self.use_tr_conv
|
|
328
|
+
inblock = 16
|
|
329
|
+
base_inc_channel = inblock
|
|
330
|
+
self.base_inc_channel = base_inc_channel
|
|
331
|
+
|
|
332
|
+
# encoder_blocks = [1, 1, 1, 1, 1, 1]
|
|
333
|
+
|
|
334
|
+
# decoder_blocks = [1,1,1,1, 1, 1]
|
|
335
|
+
#encoder_blocks = [1, 1, 1]
|
|
336
|
+
|
|
337
|
+
#decoder_blocks = [1, 1, 1]
|
|
338
|
+
|
|
339
|
+
encoder_blocks = [1, 1, 1, 1, 1]
|
|
340
|
+
|
|
341
|
+
decoder_blocks = [1, 1, 1, 1, 1]
|
|
342
|
+
|
|
343
|
+
padding = kernel_size // 2 # constant size
|
|
344
|
+
self.before_encoder = nn.Conv3d(1, inblock, kernel_size=(3, 3, 3),
|
|
345
|
+
stride=(1, 1, 1), padding=3 // 2,
|
|
346
|
+
bias=True)
|
|
347
|
+
|
|
348
|
+
self.encoder = encoder_class(in_channel=inblock, base_inc_channel=base_inc_channel, layer_blocks=encoder_blocks,
|
|
349
|
+
block=block,
|
|
350
|
+
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
|
|
351
|
+
layer_widths=layer_widths, kernel_size=kernel_size,
|
|
352
|
+
norm_type=norm_type)
|
|
353
|
+
|
|
354
|
+
layer_widths = self.encoder._layers_with
|
|
355
|
+
in_channel = layer_widths[-1]
|
|
356
|
+
self.BottleNeck = BlockLayer(num_blcks=1, block_layer=block,
|
|
357
|
+
planes_in=in_channel, planes_out=in_channel // 2,
|
|
358
|
+
kernel_size=kernel_size,
|
|
359
|
+
first_layer=False, input_size=self.encoder.output_size, norm_type=norm_type)
|
|
360
|
+
|
|
361
|
+
# self.BottleNeck_att = Attention(in_channel)
|
|
362
|
+
|
|
363
|
+
layer_widths = layer_widths[::-1] # [1:]
|
|
364
|
+
layer_widths[0] = layer_widths[0] // 2
|
|
365
|
+
layer_widths[-1] = layer_widths[-2]
|
|
366
|
+
|
|
367
|
+
in_channel = in_channel // 2
|
|
368
|
+
self.decoder = decoder_class(in_channel=in_channel, base_inc_channel=base_inc_channel * 8,
|
|
369
|
+
layer_blocks=decoder_blocks,
|
|
370
|
+
block=block, last_cov_channels=self.encoder.out_channel_layer,
|
|
371
|
+
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
|
|
372
|
+
use_transposed_convolutions=use_transposed_convolutions,
|
|
373
|
+
kernel_size=kernel_size, norm_type=norm_type,
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
kernel_size = 3
|
|
377
|
+
|
|
378
|
+
#self.last_convolution_rec = BlockLayer(num_blcks=1, block_layer=block,
|
|
379
|
+
# planes_in=inblock * 2, planes_out=inblock * 4,
|
|
380
|
+
# kernel_size=kernel_size,
|
|
381
|
+
# first_layer=False, input_size=192, norm_type=norm_type)
|
|
382
|
+
|
|
383
|
+
"""
|
|
384
|
+
|
|
385
|
+
self.decoder2 = decoder_class(in_channel=in_channel, base_inc_channel=base_inc_channel*8, layer_blocks=decoder_blocks,
|
|
386
|
+
block=block, last_cov_channels = self.encoder.out_channel_layer,
|
|
387
|
+
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
|
|
388
|
+
use_transposed_convolutions=use_transposed_convolutions,
|
|
389
|
+
kernel_size=kernel_size, norm_type=norm_type,
|
|
390
|
+
)
|
|
391
|
+
self.last_convolution_rec2 = BlockLayer(num_blcks=1, block_layer=block,
|
|
392
|
+
planes_in=inblock*2, planes_out=inblock//2,
|
|
393
|
+
kernel_size=kernel_size,
|
|
394
|
+
first_layer=False, input_size=192, norm_type=norm_type)
|
|
395
|
+
self.final_convolution_rec2 = nn.Conv3d(inblock//2, 1, kernel_size=(kernel_size, kernel_size, kernel_size),
|
|
396
|
+
stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
|
|
397
|
+
"""
|
|
398
|
+
kernel_size = 1
|
|
399
|
+
self.final_convolution_rec = nn.Conv3d(inblock * 3, outChannels, kernel_size=(kernel_size, kernel_size, kernel_size),
|
|
400
|
+
stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
|
|
401
|
+
#self.seg_layer = nn.Conv3d(inblock * 3, outChannels, kernel_size=(kernel_size, kernel_size, kernel_size),
|
|
402
|
+
# stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
|
|
403
|
+
self.activation = nn.Softmax(dim=1)
|
|
404
|
+
self.sigmoid = nn.Sigmoid()
|
|
405
|
+
|
|
406
|
+
def forward(self, y):
|
|
407
|
+
y = self.before_encoder(y)
|
|
408
|
+
|
|
409
|
+
y1, x = self.encoder(y)
|
|
410
|
+
y1 = self.BottleNeck(y1)
|
|
411
|
+
# x[0] = self.BottleNeck_att(x[0])
|
|
412
|
+
|
|
413
|
+
x1, _ = self.decoder(y1, x)
|
|
414
|
+
|
|
415
|
+
x1 = torch.cat([x1, y], 1)
|
|
416
|
+
# x = (x * mask)
|
|
417
|
+
#z1 = self.last_convolution_rec(x1)
|
|
418
|
+
z1 = self.final_convolution_rec(x1)
|
|
419
|
+
z2 = 0# self.seg_layer(x1)
|
|
420
|
+
|
|
421
|
+
"""
|
|
422
|
+
|
|
423
|
+
x2, _ = self.decoder2(x)
|
|
424
|
+
|
|
425
|
+
x2 = torch.cat([x2, y], 1)
|
|
426
|
+
#x = (x * mask)
|
|
427
|
+
z2 = self.last_convolution_rec2(x2)
|
|
428
|
+
z2 = self.final_convolution_rec2(z2)
|
|
429
|
+
"""
|
|
430
|
+
return z1, z2
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
class MGA_NET(UnetGen):
|
|
434
|
+
def __init__(self, channels=3, *args, encoder_class=UnetEncoder, **kwargs):
|
|
435
|
+
self.use_tr_conv = False
|
|
436
|
+
|
|
437
|
+
norm_type = "instance"
|
|
438
|
+
super().__init__(encoder_class=encoder_class, decoder_class=UnetDecoder,
|
|
439
|
+
block=ResidualBlock, outChannels=channels, norm_type=norm_type, **kwargs)
|
|
440
|
+
|
|
441
|
+
self.channels = channels
|
|
442
|
+
self.netName = 'MGA_NET'
|
|
443
|
+
|
|
444
|
+
def name(self):
|
|
445
|
+
return 'MGA_NET'
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
class Segmentor(nn.Module):
|
|
449
|
+
"""
|
|
450
|
+
Segmentor
|
|
451
|
+
"""
|
|
452
|
+
|
|
453
|
+
def __init__(self,
|
|
454
|
+
channels=30):
|
|
455
|
+
"""
|
|
456
|
+
|
|
457
|
+
"""
|
|
458
|
+
super().__init__()
|
|
459
|
+
self.seg_model = MGA_NET(channels=channels)
|
|
460
|
+
|
|
461
|
+
def forward(self, source, shape=None):
|
|
462
|
+
pred_logits, pred_logits_seg = self.seg_model(source)
|
|
463
|
+
|
|
464
|
+
return pred_logits, pred_logits_seg
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
import os.path
|
|
2
|
+
from torch.utils.data import Dataset
|
|
3
|
+
import nibabel as nib
|
|
4
|
+
from multiprocessing import Manager
|
|
5
|
+
import random
|
|
6
|
+
import torchio as tio
|
|
7
|
+
import pickle
|
|
8
|
+
import torch
|
|
9
|
+
|
|
10
|
+
error_list = [5298, 5894, 393, 66, 68, 6576, 76, 6653, 347, 6218, 804, 844,
|
|
11
|
+
5445, 751, 6075, 5501, 368, 236, 269, 5472, 494, 6711, 5571, 6543,
|
|
12
|
+
5837, 5586, 6663, 791, 6113, 318, 299, 688, 323, 26, 6496, 5746]
|
|
13
|
+
def Generate_dataset():
|
|
14
|
+
|
|
15
|
+
# This if else sentence is used to decide whether enables dataset cache funciton
|
|
16
|
+
if False:
|
|
17
|
+
cache = DatasetCache(None,use_cache=False)
|
|
18
|
+
manager2 = Manager()
|
|
19
|
+
cache2 = DatasetCache(manager2,use_cache=True)
|
|
20
|
+
else:
|
|
21
|
+
manager = None
|
|
22
|
+
cache = DatasetCache(manager,use_cache=False)
|
|
23
|
+
manager2 = None
|
|
24
|
+
cache2 = DatasetCache(manager2,use_cache=False)
|
|
25
|
+
|
|
26
|
+
# Loading all availible files
|
|
27
|
+
if os.path.exists('dataset/pretrain_files_list.pkl'):
|
|
28
|
+
with open('dataset/pretrain_files_list.pkl', 'rb') as file:
|
|
29
|
+
files_list = pickle.load(file)
|
|
30
|
+
|
|
31
|
+
# splitting the training and testing datasets. The Oasis will be treated as testing while the rest of them will be trated as training dataset.
|
|
32
|
+
testing_set = files_list[0:1]
|
|
33
|
+
|
|
34
|
+
training_sets = files_list
|
|
35
|
+
del training_sets[0]
|
|
36
|
+
|
|
37
|
+
training_sets[2] = [i.replace('GSP','GSP/FS_4p5') for i in training_sets[2]]
|
|
38
|
+
training_sets_path = sum(training_sets, [])
|
|
39
|
+
for index in sorted(error_list, reverse=True):
|
|
40
|
+
del training_sets_path[index]
|
|
41
|
+
training_sets_path= ['/scratch/datasets/xh278/orig'+i for i in training_sets_path]
|
|
42
|
+
testing_set_path = sum(testing_set, [])
|
|
43
|
+
|
|
44
|
+
train_dataset = VoxelDataset(training_sets_path, cache=cache, train=True)
|
|
45
|
+
val_dataset = VoxelDataset(testing_set_path, cache=cache2, train=False)
|
|
46
|
+
|
|
47
|
+
return train_dataset,val_dataset
|
|
48
|
+
|
|
49
|
+
class DatasetCache(object):
|
|
50
|
+
def __init__(self, manager, use_cache=True):
|
|
51
|
+
self.use_cache = use_cache
|
|
52
|
+
self.manager = manager
|
|
53
|
+
if self.manager is not None:
|
|
54
|
+
self._dict = manager.dict()
|
|
55
|
+
|
|
56
|
+
def is_cached(self, key):
|
|
57
|
+
if not self.use_cache:
|
|
58
|
+
return False
|
|
59
|
+
return str(key) in self._dict
|
|
60
|
+
|
|
61
|
+
def reset(self):
|
|
62
|
+
self._dict.clear()
|
|
63
|
+
|
|
64
|
+
def get(self, key):
|
|
65
|
+
if not self.use_cache:
|
|
66
|
+
raise AttributeError('Data caching is disabled and get funciton is unavailable! Check your config.')
|
|
67
|
+
return self._dict[str(key)]
|
|
68
|
+
|
|
69
|
+
def cache(self, key, img, lbl):
|
|
70
|
+
# only store if full data in memory is enabled
|
|
71
|
+
if not self.use_cache:
|
|
72
|
+
return
|
|
73
|
+
# only store if not already cached
|
|
74
|
+
if str(key) in self._dict:
|
|
75
|
+
return
|
|
76
|
+
self._dict[str(key)] = (img, lbl)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class VoxelDataset(Dataset):
|
|
80
|
+
def __init__(self, norm_file_path, rescale_sdf=True, cache = None,train=True):
|
|
81
|
+
# note that input image paths are for processed images rather than unprocessed
|
|
82
|
+
self.norm_file_path = [i.replace('proc/ForAdrian_Talairach','orig').replace('talairach/','') for i in norm_file_path]
|
|
83
|
+
self.orig_file_path = [i.replace('norm.mgz','orig.mgz').replace('talairach/','') for i in self.norm_file_path]
|
|
84
|
+
|
|
85
|
+
self.intensity_spatial_norm_file_path = [i.replace('orig.mgz','talairach/norm.mgz') for i in self.orig_file_path]
|
|
86
|
+
self.intensity_norm_file_path =[i.replace('orig.mgz','norm.mgz') for i in self.orig_file_path]
|
|
87
|
+
self.seg_path =[i.replace('orig.mgz','talairach/aseg.mgz') for i in self.orig_file_path]
|
|
88
|
+
|
|
89
|
+
removal = []
|
|
90
|
+
for i in range(len(self.seg_path)):
|
|
91
|
+
if not os.path.exists(self.seg_path[i]):
|
|
92
|
+
removal.append(i)
|
|
93
|
+
removal = []
|
|
94
|
+
|
|
95
|
+
for i in reversed(removal):
|
|
96
|
+
self.seg_path.pop(i)
|
|
97
|
+
self.intensity_spatial_norm_file_path.pop(i)
|
|
98
|
+
self.orig_file_path.pop(i)
|
|
99
|
+
self.rescale_sdf = rescale_sdf
|
|
100
|
+
self.cache = cache
|
|
101
|
+
self.train = train
|
|
102
|
+
self.transform = tio.Compose([
|
|
103
|
+
tio.RandomGamma(log_gamma = (-0.3,0.3),p=0.3),
|
|
104
|
+
]
|
|
105
|
+
)
|
|
106
|
+
def __len__(self):
|
|
107
|
+
return len(self.orig_file_path)
|
|
108
|
+
|
|
109
|
+
def __getitem__(self, index):
|
|
110
|
+
image_resolution= 256
|
|
111
|
+
normalization=255
|
|
112
|
+
output_index = [index]
|
|
113
|
+
for cur_index in output_index:
|
|
114
|
+
array = nib.load(self.orig_file_path[cur_index])
|
|
115
|
+
input = array.get_fdata()
|
|
116
|
+
input = torch.FloatTensor(input)
|
|
117
|
+
input = input.unsqueeze(0).unsqueeze(0)
|
|
118
|
+
if image_resolution != 256:
|
|
119
|
+
input = torch.nn.functional.interpolate(input, size=[image_resolution, image_resolution, image_resolution], mode='trilinear',align_corners=False)[0]
|
|
120
|
+
else:
|
|
121
|
+
input = torch.Tensor(input)[0]
|
|
122
|
+
|
|
123
|
+
array = nib.load(self.intensity_spatial_norm_file_path[cur_index])
|
|
124
|
+
intensity_spatial_norm = array.get_fdata()
|
|
125
|
+
intensity_spatial_norm = torch.FloatTensor(intensity_spatial_norm)
|
|
126
|
+
intensity_spatial_norm = intensity_spatial_norm.unsqueeze(0).unsqueeze(0)
|
|
127
|
+
if image_resolution != 256:
|
|
128
|
+
intensity_spatial_norm = torch.nn.functional.interpolate(intensity_spatial_norm, size=[image_resolution, image_resolution, image_resolution], mode='trilinear',align_corners=False)[0]
|
|
129
|
+
else:
|
|
130
|
+
intensity_spatial_norm = torch.Tensor(intensity_spatial_norm)[0]
|
|
131
|
+
|
|
132
|
+
seg = intensity_spatial_norm.unsqueeze(0)
|
|
133
|
+
if image_resolution != 256:
|
|
134
|
+
seg = torch.nn.functional.interpolate(seg, size=[image_resolution, image_resolution, image_resolution], mode='nearest')[0]
|
|
135
|
+
else:
|
|
136
|
+
seg = torch.Tensor(seg)[0]
|
|
137
|
+
if self.train:
|
|
138
|
+
subject = tio.Subject(image = tio.ScalarImage(tensor=input))
|
|
139
|
+
transformed = self.transform(subject)
|
|
140
|
+
input = transformed['image'].data
|
|
141
|
+
input = input.clip(0, normalization) / normalization
|
|
142
|
+
intensity_spatial_norm = intensity_spatial_norm.clip(0, normalization) / normalization
|
|
143
|
+
else:
|
|
144
|
+
input = input.clip(0, normalization) / normalization
|
|
145
|
+
intensity_spatial_norm = intensity_spatial_norm.clip(0, normalization) / normalization
|
|
146
|
+
return input,intensity_spatial_norm,seg,cur_index
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
|
|
File without changes
|