melage 0.0.65__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- melage/__init__.py +16 -0
- melage/cli.py +4 -0
- melage/graphics/GLGraphicsItem.py +286 -0
- melage/graphics/GLViewWidget.py +595 -0
- melage/graphics/Transform3D.py +55 -0
- melage/graphics/__init__.py +8 -0
- melage/graphics/functions.py +101 -0
- melage/graphics/items/GLAxisItem.py +149 -0
- melage/graphics/items/GLGridItem.py +178 -0
- melage/graphics/items/GLPolygonItem.py +77 -0
- melage/graphics/items/GLScatterPlotItem.py +135 -0
- melage/graphics/items/GLVolumeItem.py +280 -0
- melage/graphics/items/GLVolumeItem_b.py +237 -0
- melage/graphics/items/__init__.py +0 -0
- melage/graphics/shaders.py +202 -0
- melage/main.py +270 -0
- melage/requirements22.txt +25 -0
- melage/requirements_old.txt +28 -0
- melage/resource/0circle.png +0 -0
- melage/resource/0circle_faded.png +0 -0
- melage/resource/3d.png +0 -0
- melage/resource/3d.psd +0 -0
- melage/resource/3dFaded.png +0 -0
- melage/resource/Eraser.png +0 -0
- melage/resource/EraserFaded.png +0 -0
- melage/resource/EraserX.png +0 -0
- melage/resource/EraserXFaded.png +0 -0
- melage/resource/Eraser_icon.svg +79 -0
- melage/resource/Hand.png +0 -0
- melage/resource/HandIcons_0.png +0 -0
- melage/resource/Hand_IX.png +0 -0
- melage/resource/Hand_IXFaded.png +0 -0
- melage/resource/Handsqueezed.png +0 -0
- melage/resource/Handwriting (copy).png +0 -0
- melage/resource/Handwriting.png +0 -0
- melage/resource/HandwritingMinus.png +0 -0
- melage/resource/HandwritingMinusX.png +0 -0
- melage/resource/HandwritingPlus.png +0 -0
- melage/resource/HandwritingPlusX.png +0 -0
- melage/resource/Move_icon.svg +8 -0
- melage/resource/PngItem_2422924.png +0 -0
- melage/resource/about.png +0 -0
- melage/resource/about_logo.png +0 -0
- melage/resource/about_logo0.png +0 -0
- melage/resource/action_check.png +0 -0
- melage/resource/action_check_OFF.png +0 -0
- melage/resource/arrow).png +0 -0
- melage/resource/arrow.png +0 -0
- melage/resource/arrowFaded.png +0 -0
- melage/resource/arrow_org.png +0 -0
- melage/resource/arrow_org.png.png +0 -0
- melage/resource/arrows.png +0 -0
- melage/resource/authors.mp4 +0 -0
- melage/resource/box.png +0 -0
- melage/resource/check-image-icon-0.jpg +0 -0
- melage/resource/circle.png +0 -0
- melage/resource/circle_faded.png +0 -0
- melage/resource/circle_or.png +0 -0
- melage/resource/close.png +0 -0
- melage/resource/close_bg.png +0 -0
- melage/resource/color/Simple.txt +18 -0
- melage/resource/color/Tissue.txt +24 -0
- melage/resource/color/Tissue12.txt +27 -0
- melage/resource/color/albert_LUT.txt +102 -0
- melage/resource/color/mcrib_LUT.txt +102 -0
- melage/resource/color/pediatric1.txt +29 -0
- melage/resource/color/pediatric1_old.txt +27 -0
- melage/resource/color/pediatric2.txt +87 -0
- melage/resource/color/pediatric3.txt +29 -0
- melage/resource/color/pediatrics (copy).csv +103 -0
- melage/resource/color/tissue_seg.txt +4 -0
- melage/resource/contour.png +0 -0
- melage/resource/contour.svg +2 -0
- melage/resource/contourFaded.png +0 -0
- melage/resource/contourX.png +0 -0
- melage/resource/contourXFaded.png +0 -0
- melage/resource/dti.png +0 -0
- melage/resource/dti0.png +0 -0
- melage/resource/dti222.png +0 -0
- melage/resource/dti_or.png +0 -0
- melage/resource/eco.png +0 -0
- melage/resource/eco22.png +0 -0
- melage/resource/eco_old.png +0 -0
- melage/resource/eco_or.png +0 -0
- melage/resource/eco_or2.png +0 -0
- melage/resource/eco_seg.png +0 -0
- melage/resource/eco_seg_old.png +0 -0
- melage/resource/export.png +0 -0
- melage/resource/hand-grab-icon-10.jpg +0 -0
- melage/resource/hand-grab-icon-25.jpg +0 -0
- melage/resource/info.png +0 -0
- melage/resource/line.png +0 -0
- melage/resource/linefaded.png +0 -0
- melage/resource/load.png +0 -0
- melage/resource/main.ico +0 -0
- melage/resource/manual_images/3D_rightc.png +0 -0
- melage/resource/manual_images/3D_rightc_goto.png +0 -0
- melage/resource/manual_images/3D_rightc_paint.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_draw1.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_draw2.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render2.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render3.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render4.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render5.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render6.png +0 -0
- melage/resource/manual_images/3D_rightc_seg.png +0 -0
- melage/resource/manual_images/exit_toolbar.png +0 -0
- melage/resource/manual_images/load_image_file.png +0 -0
- melage/resource/manual_images/load_image_file_openp.png +0 -0
- melage/resource/manual_images/main_page.png +0 -0
- melage/resource/manual_images/menu_file.png +0 -0
- melage/resource/manual_images/menu_file_export.png +0 -0
- melage/resource/manual_images/menu_file_import.png +0 -0
- melage/resource/manual_images/menu_file_settings.png +0 -0
- melage/resource/manual_images/menu_file_ss.png +0 -0
- melage/resource/manual_images/open_save_load.png +0 -0
- melage/resource/manual_images/panning_toolbar.png +0 -0
- melage/resource/manual_images/segmentation_toolbar.png +0 -0
- melage/resource/manual_images/tab_mri.png +0 -0
- melage/resource/manual_images/tab_us.png +0 -0
- melage/resource/manual_images/tabs.png +0 -0
- melage/resource/manual_images/toolbar_tools.png +0 -0
- melage/resource/manual_images/tools_basic.png +0 -0
- melage/resource/manual_images/tools_bet.png +0 -0
- melage/resource/manual_images/tools_cs.png +0 -0
- melage/resource/manual_images/tools_deepbet.png +0 -0
- melage/resource/manual_images/tools_imageinfo.png +0 -0
- melage/resource/manual_images/tools_maskO.png +0 -0
- melage/resource/manual_images/tools_masking.png +0 -0
- melage/resource/manual_images/tools_n4b.png +0 -0
- melage/resource/manual_images/tools_resize.png +0 -0
- melage/resource/manual_images/tools_ruler.png +0 -0
- melage/resource/manual_images/tools_seg.png +0 -0
- melage/resource/manual_images/tools_threshold.png +0 -0
- melage/resource/manual_images/tools_tools.png +0 -0
- melage/resource/manual_images/widget_color.png +0 -0
- melage/resource/manual_images/widget_color_add.png +0 -0
- melage/resource/manual_images/widget_color_add2.png +0 -0
- melage/resource/manual_images/widget_color_additional.png +0 -0
- melage/resource/manual_images/widget_images.png +0 -0
- melage/resource/manual_images/widget_images2.png +0 -0
- melage/resource/manual_images/widget_images3.png +0 -0
- melage/resource/manual_images/widget_marker.png +0 -0
- melage/resource/manual_images/widget_mri.png +0 -0
- melage/resource/manual_images/widget_mri2.png +0 -0
- melage/resource/manual_images/widget_segintensity.png +0 -0
- melage/resource/manual_images/widget_tab_mutualview.png +0 -0
- melage/resource/manual_images/widget_tab_mutualview2.png +0 -0
- melage/resource/manual_images/widget_table.png +0 -0
- melage/resource/manual_images/widget_table2.png +0 -0
- melage/resource/manual_images/widget_us.png +0 -0
- melage/resource/melage_top.ico +0 -0
- melage/resource/melage_top.png +0 -0
- melage/resource/melage_top0.png +0 -0
- melage/resource/melage_top1.png +0 -0
- melage/resource/melage_top4.png +0 -0
- melage/resource/mri (copy).png +0 -0
- melage/resource/mri.png +0 -0
- melage/resource/mri0.png +0 -0
- melage/resource/mri000.png +0 -0
- melage/resource/mri22.png +0 -0
- melage/resource/mri_big.png +0 -0
- melage/resource/mri_old.png +0 -0
- melage/resource/mri_seg.png +0 -0
- melage/resource/mri_seg_old.png +0 -0
- melage/resource/new.png +0 -0
- melage/resource/open.png +0 -0
- melage/resource/open2.png +0 -0
- melage/resource/pan.png +0 -0
- melage/resource/pencil.png +0 -0
- melage/resource/pencilFaded.png +0 -0
- melage/resource/points.png +0 -0
- melage/resource/pointsFaded.png +0 -0
- melage/resource/rotate.png +0 -0
- melage/resource/ruler.png +0 -0
- melage/resource/rulerFaded.png +0 -0
- melage/resource/s.png +0 -0
- melage/resource/s.psd +0 -0
- melage/resource/save.png +0 -0
- melage/resource/saveas.png +0 -0
- melage/resource/seg_mri.png +0 -0
- melage/resource/seg_mri2.png +0 -0
- melage/resource/settings.png +0 -0
- melage/resource/synch.png +0 -0
- melage/resource/synchFaded.png +0 -0
- melage/resource/theme/rc/.keep +1 -0
- melage/resource/theme/rc/arrow_down.png +0 -0
- melage/resource/theme/rc/arrow_down@2x.png +0 -0
- melage/resource/theme/rc/arrow_down_disabled.png +0 -0
- melage/resource/theme/rc/arrow_down_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_down_focus.png +0 -0
- melage/resource/theme/rc/arrow_down_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_down_pressed.png +0 -0
- melage/resource/theme/rc/arrow_down_pressed@2x.png +0 -0
- melage/resource/theme/rc/arrow_left.png +0 -0
- melage/resource/theme/rc/arrow_left@2x.png +0 -0
- melage/resource/theme/rc/arrow_left_disabled.png +0 -0
- melage/resource/theme/rc/arrow_left_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_left_focus.png +0 -0
- melage/resource/theme/rc/arrow_left_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_left_pressed.png +0 -0
- melage/resource/theme/rc/arrow_left_pressed@2x.png +0 -0
- melage/resource/theme/rc/arrow_right.png +0 -0
- melage/resource/theme/rc/arrow_right@2x.png +0 -0
- melage/resource/theme/rc/arrow_right_disabled.png +0 -0
- melage/resource/theme/rc/arrow_right_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_right_focus.png +0 -0
- melage/resource/theme/rc/arrow_right_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_right_pressed.png +0 -0
- melage/resource/theme/rc/arrow_right_pressed@2x.png +0 -0
- melage/resource/theme/rc/arrow_up.png +0 -0
- melage/resource/theme/rc/arrow_up@2x.png +0 -0
- melage/resource/theme/rc/arrow_up_disabled.png +0 -0
- melage/resource/theme/rc/arrow_up_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_up_focus.png +0 -0
- melage/resource/theme/rc/arrow_up_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_up_pressed.png +0 -0
- melage/resource/theme/rc/arrow_up_pressed@2x.png +0 -0
- melage/resource/theme/rc/base_icon.png +0 -0
- melage/resource/theme/rc/base_icon@2x.png +0 -0
- melage/resource/theme/rc/base_icon_disabled.png +0 -0
- melage/resource/theme/rc/base_icon_disabled@2x.png +0 -0
- melage/resource/theme/rc/base_icon_focus.png +0 -0
- melage/resource/theme/rc/base_icon_focus@2x.png +0 -0
- melage/resource/theme/rc/base_icon_pressed.png +0 -0
- melage/resource/theme/rc/base_icon_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_closed.png +0 -0
- melage/resource/theme/rc/branch_closed@2x.png +0 -0
- melage/resource/theme/rc/branch_closed_disabled.png +0 -0
- melage/resource/theme/rc/branch_closed_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_closed_focus.png +0 -0
- melage/resource/theme/rc/branch_closed_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_closed_pressed.png +0 -0
- melage/resource/theme/rc/branch_closed_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_end.png +0 -0
- melage/resource/theme/rc/branch_end@2x.png +0 -0
- melage/resource/theme/rc/branch_end_disabled.png +0 -0
- melage/resource/theme/rc/branch_end_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_end_focus.png +0 -0
- melage/resource/theme/rc/branch_end_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_end_pressed.png +0 -0
- melage/resource/theme/rc/branch_end_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_line.png +0 -0
- melage/resource/theme/rc/branch_line@2x.png +0 -0
- melage/resource/theme/rc/branch_line_disabled.png +0 -0
- melage/resource/theme/rc/branch_line_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_line_focus.png +0 -0
- melage/resource/theme/rc/branch_line_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_line_pressed.png +0 -0
- melage/resource/theme/rc/branch_line_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_more.png +0 -0
- melage/resource/theme/rc/branch_more@2x.png +0 -0
- melage/resource/theme/rc/branch_more_disabled.png +0 -0
- melage/resource/theme/rc/branch_more_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_more_focus.png +0 -0
- melage/resource/theme/rc/branch_more_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_more_pressed.png +0 -0
- melage/resource/theme/rc/branch_more_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_open.png +0 -0
- melage/resource/theme/rc/branch_open@2x.png +0 -0
- melage/resource/theme/rc/branch_open_disabled.png +0 -0
- melage/resource/theme/rc/branch_open_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_open_focus.png +0 -0
- melage/resource/theme/rc/branch_open_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_open_pressed.png +0 -0
- melage/resource/theme/rc/branch_open_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked.png +0 -0
- melage/resource/theme/rc/checkbox_checked0.png +0 -0
- melage/resource/theme/rc/checkbox_checked@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_checked@2x000.png.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate@2x.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_disabled.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_disabled@2x.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_focus.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_focus@2x.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_pressed.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked@2x00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled@2x00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus@2x00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed@2x00.png +0 -0
- melage/resource/theme/rc/line_horizontal.png +0 -0
- melage/resource/theme/rc/line_horizontal@2x.png +0 -0
- melage/resource/theme/rc/line_horizontal_disabled.png +0 -0
- melage/resource/theme/rc/line_horizontal_disabled@2x.png +0 -0
- melage/resource/theme/rc/line_horizontal_focus.png +0 -0
- melage/resource/theme/rc/line_horizontal_focus@2x.png +0 -0
- melage/resource/theme/rc/line_horizontal_pressed.png +0 -0
- melage/resource/theme/rc/line_horizontal_pressed@2x.png +0 -0
- melage/resource/theme/rc/line_vertical.png +0 -0
- melage/resource/theme/rc/line_vertical@2x.png +0 -0
- melage/resource/theme/rc/line_vertical_disabled.png +0 -0
- melage/resource/theme/rc/line_vertical_disabled@2x.png +0 -0
- melage/resource/theme/rc/line_vertical_focus.png +0 -0
- melage/resource/theme/rc/line_vertical_focus@2x.png +0 -0
- melage/resource/theme/rc/line_vertical_pressed.png +0 -0
- melage/resource/theme/rc/line_vertical_pressed@2x.png +0 -0
- melage/resource/theme/rc/radio_checked.png +0 -0
- melage/resource/theme/rc/radio_checked@2x.png +0 -0
- melage/resource/theme/rc/radio_checked_disabled.png +0 -0
- melage/resource/theme/rc/radio_checked_disabled@2x.png +0 -0
- melage/resource/theme/rc/radio_checked_focus.png +0 -0
- melage/resource/theme/rc/radio_checked_focus@2x.png +0 -0
- melage/resource/theme/rc/radio_checked_pressed.png +0 -0
- melage/resource/theme/rc/radio_checked_pressed@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked.png +0 -0
- melage/resource/theme/rc/radio_unchecked@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked_disabled.png +0 -0
- melage/resource/theme/rc/radio_unchecked_disabled@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked_focus.png +0 -0
- melage/resource/theme/rc/radio_unchecked_focus@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked_pressed.png +0 -0
- melage/resource/theme/rc/radio_unchecked_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_focus.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_focus.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_focus.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_focus.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_pressed@2x.png +0 -0
- melage/resource/theme/rc/transparent.png +0 -0
- melage/resource/theme/rc/transparent@2x.png +0 -0
- melage/resource/theme/rc/transparent_disabled.png +0 -0
- melage/resource/theme/rc/transparent_disabled@2x.png +0 -0
- melage/resource/theme/rc/transparent_focus.png +0 -0
- melage/resource/theme/rc/transparent_focus@2x.png +0 -0
- melage/resource/theme/rc/transparent_pressed.png +0 -0
- melage/resource/theme/rc/transparent_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_close.png +0 -0
- melage/resource/theme/rc/window_close@2x.png +0 -0
- melage/resource/theme/rc/window_close_disabled.png +0 -0
- melage/resource/theme/rc/window_close_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_close_focus.png +0 -0
- melage/resource/theme/rc/window_close_focus@2x.png +0 -0
- melage/resource/theme/rc/window_close_pressed.png +0 -0
- melage/resource/theme/rc/window_close_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_grip.png +0 -0
- melage/resource/theme/rc/window_grip@2x.png +0 -0
- melage/resource/theme/rc/window_grip_disabled.png +0 -0
- melage/resource/theme/rc/window_grip_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_grip_focus.png +0 -0
- melage/resource/theme/rc/window_grip_focus@2x.png +0 -0
- melage/resource/theme/rc/window_grip_pressed.png +0 -0
- melage/resource/theme/rc/window_grip_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_minimize.png +0 -0
- melage/resource/theme/rc/window_minimize@2x.png +0 -0
- melage/resource/theme/rc/window_minimize_disabled.png +0 -0
- melage/resource/theme/rc/window_minimize_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_minimize_focus.png +0 -0
- melage/resource/theme/rc/window_minimize_focus@2x.png +0 -0
- melage/resource/theme/rc/window_minimize_pressed.png +0 -0
- melage/resource/theme/rc/window_minimize_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_undock.png +0 -0
- melage/resource/theme/rc/window_undock@2x.png +0 -0
- melage/resource/theme/rc/window_undock_disabled.png +0 -0
- melage/resource/theme/rc/window_undock_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_undock_focus.png +0 -0
- melage/resource/theme/rc/window_undock_focus@2x.png +0 -0
- melage/resource/theme/rc/window_undock_pressed.png +0 -0
- melage/resource/theme/rc/window_undock_pressed@2x.png +0 -0
- melage/resource/theme/style.qss +2223 -0
- melage/resource/tract.png +0 -0
- melage/resource/view1.png +0 -0
- melage/resource/view1_eco.png +0 -0
- melage/resource/view1_mri.png +0 -0
- melage/resource/view1_seg.png +0 -0
- melage/resource/view2.png +0 -0
- melage/resource/view2_seg.png +0 -0
- melage/resource/w.png +0 -0
- melage/resource/zoom_in.png +0 -0
- melage/resource/zoom_inFaded.png +0 -0
- melage/resource/zoom_out.png +0 -0
- melage/resource/zoom_outFaded.png +0 -0
- melage/some_notes.txt +3 -0
- melage/utils/DispalyIm.py +2788 -0
- melage/utils/GMM.py +720 -0
- melage/utils/Shaders_120.py +257 -0
- melage/utils/Shaders_330.py +314 -0
- melage/utils/Shaders_bu.py +314 -0
- melage/utils/__init__0.py +7 -0
- melage/utils/brain_extraction_helper.py +234 -0
- melage/utils/custom_QScrollBar.py +61 -0
- melage/utils/glScientific.py +1554 -0
- melage/utils/glScientific_bc.py +1585 -0
- melage/utils/readData.py +1061 -0
- melage/utils/registration.py +512 -0
- melage/utils/source_folder.py +18 -0
- melage/utils/utils.py +3808 -0
- melage/version.txt +1 -0
- melage/widgets/ApplyMask.py +212 -0
- melage/widgets/ChangeSystem.py +152 -0
- melage/widgets/DeepLModels/InfantSegment/Unet.py +464 -0
- melage/widgets/DeepLModels/NPP/dataset/mri_dataset_affine.py +149 -0
- melage/widgets/DeepLModels/NPP/models/checkpoints/npp_v1.pth.py +0 -0
- melage/widgets/DeepLModels/NPP/models/losses.py +146 -0
- melage/widgets/DeepLModels/NPP/models/model.py +272 -0
- melage/widgets/DeepLModels/NPP/models/utils.py +303 -0
- melage/widgets/DeepLModels/NPP/npp.py +116 -0
- melage/widgets/DeepLModels/NPP/requirements.txt +8 -0
- melage/widgets/DeepLModels/NPP/train/train.py +116 -0
- melage/widgets/DeepLModels/Unet3DAtt.py +657 -0
- melage/widgets/DeepLModels/Unet3D_basic.py +648 -0
- melage/widgets/DeepLModels/new_unet.py +652 -0
- melage/widgets/DeepLModels/new_unet_old.py +639 -0
- melage/widgets/DeepLModels/new_unet_old2.py +658 -0
- melage/widgets/HistImage.py +153 -0
- melage/widgets/ImageThresholding.py +222 -0
- melage/widgets/MaskOperations.py +147 -0
- melage/widgets/N4Dialog.py +241 -0
- melage/widgets/Segmentation/FCM.py +1553 -0
- melage/widgets/Segmentation/__init__.py +588 -0
- melage/widgets/Segmentation/utils.py +417 -0
- melage/widgets/SemiAutoSeg.py +666 -0
- melage/widgets/Synthstrip.py +141 -0
- melage/widgets/__init__0.py +5 -0
- melage/widgets/about.py +246 -0
- melage/widgets/activation.py +437 -0
- melage/widgets/activator.py +147 -0
- melage/widgets/be_dl.py +409 -0
- melage/widgets/be_dl_unet3d.py +441 -0
- melage/widgets/brain_extraction.py +855 -0
- melage/widgets/brain_extraction_dl.py +887 -0
- melage/widgets/brain_extraction_dl_bu.py +869 -0
- melage/widgets/colorwidget.py +100 -0
- melage/widgets/dockWidgets.py +2005 -0
- melage/widgets/enhanceImWidget.py +109 -0
- melage/widgets/fileDialog_widget.py +275 -0
- melage/widgets/iminfo.py +346 -0
- melage/widgets/mainwindow_widget.py +6775 -0
- melage/widgets/melageAbout.py +123 -0
- melage/widgets/openglWidgets.py +556 -0
- melage/widgets/registrationWidget.py +342 -0
- melage/widgets/repeat_widget.py +74 -0
- melage/widgets/screenshot_widget.py +138 -0
- melage/widgets/settings_widget.py +77 -0
- melage/widgets/tranformationWidget.py +275 -0
- melage-0.0.65.dist-info/METADATA +742 -0
- melage-0.0.65.dist-info/RECORD +501 -0
- melage-0.0.65.dist-info/WHEEL +5 -0
- melage-0.0.65.dist-info/entry_points.txt +2 -0
- melage-0.0.65.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,639 @@
|
|
|
1
|
+
from functools import partial
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import torch.nn as nn
|
|
5
|
+
import torch
|
|
6
|
+
import math
|
|
7
|
+
#from .model_utils import *
|
|
8
|
+
import math
|
|
9
|
+
import torch
|
|
10
|
+
from functools import partial
|
|
11
|
+
import torch.nn as nn
|
|
12
|
+
from einops import repeat, rearrange
|
|
13
|
+
#from einops import reduce, rearrange
|
|
14
|
+
#from einops.layers.torch import Rearrange
|
|
15
|
+
#from torch.optim import lr_scheduler
|
|
16
|
+
#import torch.nn.functional as F
|
|
17
|
+
def sigmoid_beta_schedule(timesteps, start = -3, end = 3, tau = 1, clamp_min = 1e-5):
|
|
18
|
+
"""
|
|
19
|
+
sigmoid schedule
|
|
20
|
+
proposed in https://arxiv.org/abs/2212.11972 - Figure 8
|
|
21
|
+
better for images > 64x64, when used during training
|
|
22
|
+
"""
|
|
23
|
+
steps = timesteps + 1
|
|
24
|
+
t = torch.linspace(0, timesteps, steps, dtype = torch.float64) / timesteps
|
|
25
|
+
v_start = torch.tensor(start / tau).sigmoid()
|
|
26
|
+
v_end = torch.tensor(end / tau).sigmoid()
|
|
27
|
+
alphas_cumprod = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
|
|
28
|
+
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
|
|
29
|
+
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
|
|
30
|
+
return torch.clip(betas, 0, 0.999)
|
|
31
|
+
|
|
32
|
+
class BlockLayer(nn.Module):
|
|
33
|
+
def __init__(self, num_blcks, block_layer, planes_in, planes_out, kernel_size=3, first_layer=False,
|
|
34
|
+
input_size=None, time_emb_dim=None, norm_type='layer'):
|
|
35
|
+
super(BlockLayer, self).__init__()
|
|
36
|
+
|
|
37
|
+
self.blocks = nn.ModuleList()
|
|
38
|
+
for i in range(num_blcks):
|
|
39
|
+
if i == 0:
|
|
40
|
+
self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=first_layer,
|
|
41
|
+
input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
|
|
42
|
+
else:
|
|
43
|
+
self.blocks.append(block_layer(planes_in, planes_out, kernel_size=kernel_size, first_layer=False,
|
|
44
|
+
input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
|
|
45
|
+
planes_in = planes_out
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def forward(self, x, t=None):
|
|
49
|
+
for i, block in enumerate(self.blocks):
|
|
50
|
+
x = block(x, t)
|
|
51
|
+
return x
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class ResidualBlock(nn.Module):
|
|
57
|
+
def __init__(self, planes_in, planes_out, time_emb_dim = None, kernel_size=3, first_layer=False, input_size=128, norm_type='layer'):
|
|
58
|
+
super(ResidualBlock, self).__init__()
|
|
59
|
+
if time_emb_dim is not None:
|
|
60
|
+
if planes_in>planes_out:
|
|
61
|
+
dim = planes_in*2
|
|
62
|
+
else:
|
|
63
|
+
dim = planes_in*2
|
|
64
|
+
self.mlp = nn.Sequential(
|
|
65
|
+
nn.SiLU(),
|
|
66
|
+
nn.Linear(time_emb_dim, dim)
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
self.conv1 = ConvolutionalBlock(planes_in=planes_in, planes_out=planes_out, first_layer=first_layer,
|
|
70
|
+
kernel_size=kernel_size, dilation=1,
|
|
71
|
+
activation=nn.ReLU, input_size=input_size, norm_type= norm_type)
|
|
72
|
+
self.conv2 = ConvolutionalBlock(planes_in=planes_out, planes_out=planes_out, first_layer=False,
|
|
73
|
+
kernel_size=1,
|
|
74
|
+
dilation=1, activation=nn.ReLU, input_size=input_size, norm_type=norm_type)
|
|
75
|
+
if planes_in != planes_out:
|
|
76
|
+
self.sample = nn.Conv3d(planes_in, planes_out, (1, 1, 1), stride=(1, 1, 1), dilation=(1, 1, 1),
|
|
77
|
+
bias=True) #
|
|
78
|
+
else:
|
|
79
|
+
self.sample = None
|
|
80
|
+
|
|
81
|
+
def forward(self, x, time_emb= None):
|
|
82
|
+
identity = x.clone()
|
|
83
|
+
scale_shift = None
|
|
84
|
+
if time_emb is not None:
|
|
85
|
+
time_emb = self.mlp(time_emb)
|
|
86
|
+
time_emb = time_emb.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
|
|
87
|
+
scale_shift = time_emb.chunk(2, dim=1)
|
|
88
|
+
x = self.conv1(x, scale_shift= scale_shift)
|
|
89
|
+
x = self.conv2(x, scale_shift=None)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
if self.sample is not None:
|
|
93
|
+
identity = self.sample(identity)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
x += identity
|
|
97
|
+
|
|
98
|
+
return x
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class UnetEncoder(nn.Module):
|
|
102
|
+
def __init__(self, in_channel, base_inc_channel=8, layer=BlockLayer, block=None,layer_blocks=None,
|
|
103
|
+
downsampling_stride=None,feature_dilation=1.5, layer_widths=None, kernel_size=3,
|
|
104
|
+
time_emb_dim=None, norm_type='layer'):
|
|
105
|
+
super(UnetEncoder, self).__init__()
|
|
106
|
+
|
|
107
|
+
self.layers = nn.ModuleList()
|
|
108
|
+
self.downsampling_convolutions = nn.ModuleList()
|
|
109
|
+
self.attention_modules = nn.ModuleList()
|
|
110
|
+
self.downsampling_zarib = []
|
|
111
|
+
in_channel_layer = in_channel
|
|
112
|
+
input_size = 192
|
|
113
|
+
self._layers_with = []
|
|
114
|
+
self._layers_with.append(base_inc_channel)
|
|
115
|
+
for i, num_blcks in enumerate(layer_blocks):
|
|
116
|
+
if layer_widths is not None:
|
|
117
|
+
out_channel_layer = layer_widths[i]
|
|
118
|
+
else:
|
|
119
|
+
out_channel_layer = base_inc_channel * int(feature_dilation ** (i+1))//2
|
|
120
|
+
|
|
121
|
+
if i == 0:
|
|
122
|
+
first_layer = True
|
|
123
|
+
else:
|
|
124
|
+
first_layer = False
|
|
125
|
+
self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
|
|
126
|
+
planes_in=in_channel_layer, planes_out=out_channel_layer,
|
|
127
|
+
kernel_size=kernel_size,
|
|
128
|
+
first_layer=first_layer, input_size=input_size,
|
|
129
|
+
time_emb_dim=time_emb_dim, norm_type=norm_type))
|
|
130
|
+
#self.attention_modules.append(Attention(out_channel_layer))
|
|
131
|
+
if i != len(layer_blocks) - 1:
|
|
132
|
+
|
|
133
|
+
padding = kernel_size // 2 # constant size
|
|
134
|
+
#downsampling_conv = nn.Conv3d(out_channel_layer, out_channel_layer, (kernel_size, kernel_size, kernel_size), padding=padding,
|
|
135
|
+
# stride=(downsampling_stride,downsampling_stride,downsampling_stride),
|
|
136
|
+
# bias=True)
|
|
137
|
+
downsampling_conv = nn.MaxPool3d(kernel_size=2, stride=2)
|
|
138
|
+
|
|
139
|
+
self.downsampling_convolutions.append(downsampling_conv)
|
|
140
|
+
|
|
141
|
+
input_size = input_size // 2
|
|
142
|
+
print("Encoder {}:".format(i), in_channel_layer, out_channel_layer)
|
|
143
|
+
self._layers_with.append(out_channel_layer)
|
|
144
|
+
in_channel_layer = out_channel_layer
|
|
145
|
+
self.out_channel_layer = in_channel_layer
|
|
146
|
+
self.output_size = input_size
|
|
147
|
+
|
|
148
|
+
def forward(self, x, time=None):
|
|
149
|
+
outputs = list()
|
|
150
|
+
#outputs.insert(0, x)
|
|
151
|
+
for layer, downsampling in zip(self.layers[:-1], self.downsampling_convolutions):
|
|
152
|
+
x = layer(x, time)
|
|
153
|
+
|
|
154
|
+
outputs.insert(0, x)
|
|
155
|
+
|
|
156
|
+
x = downsampling(x)
|
|
157
|
+
x = self.layers[-1](x, time)
|
|
158
|
+
outputs.insert(0, x) #bottle neck layer
|
|
159
|
+
return outputs
|
|
160
|
+
|
|
161
|
+
class ConvolutionalBlock(nn.Module):
|
|
162
|
+
def __init__(self, planes_in, planes_out, first_layer=False, kernel_size=3, dilation=1, activation=None,
|
|
163
|
+
input_size=None, norm_type='layer'):
|
|
164
|
+
super(ConvolutionalBlock, self).__init__()
|
|
165
|
+
if dilation == 1:
|
|
166
|
+
padding = kernel_size // 2 # constant size
|
|
167
|
+
else:
|
|
168
|
+
# (In + 2*padding - dilation * (kernel_size - 1) - 1)/stride + 1
|
|
169
|
+
if kernel_size == 3:
|
|
170
|
+
if dilation == 2:
|
|
171
|
+
padding = 2
|
|
172
|
+
elif dilation == 4:
|
|
173
|
+
padding = 4
|
|
174
|
+
elif dilation == 3:
|
|
175
|
+
padding = 3
|
|
176
|
+
else:
|
|
177
|
+
padding = None
|
|
178
|
+
elif kernel_size == 1:
|
|
179
|
+
padding = 0
|
|
180
|
+
self.activation = None
|
|
181
|
+
self.norm = None
|
|
182
|
+
if first_layer:
|
|
183
|
+
self.norm = nn.InstanceNorm3d(planes_in)
|
|
184
|
+
self.activation = activation()
|
|
185
|
+
self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
|
|
186
|
+
padding=padding, bias=True,
|
|
187
|
+
dilation=(dilation, dilation, dilation))
|
|
188
|
+
else:
|
|
189
|
+
if activation is not None:
|
|
190
|
+
if norm_type.lower()=='layer':
|
|
191
|
+
self.norm = nn.LayerNorm([input_size, input_size, input_size])
|
|
192
|
+
elif norm_type.lower()=='group':
|
|
193
|
+
valid_num_groups = np.array([16, 8, 4, 2])
|
|
194
|
+
valid_num_groups = valid_num_groups[valid_num_groups<planes_in]
|
|
195
|
+
num_groups = None
|
|
196
|
+
for num_groups in valid_num_groups:
|
|
197
|
+
if planes_in % num_groups != 0:
|
|
198
|
+
break
|
|
199
|
+
if num_groups is None:
|
|
200
|
+
raise exit('Num groups can not be determined')
|
|
201
|
+
self.norm = nn.GroupNorm(num_groups=num_groups, num_channels=planes_in)
|
|
202
|
+
elif norm_type.lower()=='batch':
|
|
203
|
+
self.norm = nn.BatchNorm3d(planes_in)
|
|
204
|
+
elif norm_type.lower() == 'instance':
|
|
205
|
+
self.norm = nn.InstanceNorm3d(planes_in)
|
|
206
|
+
else:
|
|
207
|
+
self.norm= None
|
|
208
|
+
|
|
209
|
+
self.activation = activation()
|
|
210
|
+
self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
|
|
211
|
+
padding=padding, bias=True,
|
|
212
|
+
dilation=(dilation, dilation, dilation))
|
|
213
|
+
|
|
214
|
+
else:
|
|
215
|
+
if norm_type.lower()=='layer':
|
|
216
|
+
if input_size<120:
|
|
217
|
+
self.norm = nn.LayerNorm([input_size, input_size, input_size])
|
|
218
|
+
else:
|
|
219
|
+
self.norm = nn.InstanceNorm3d(planes_in)
|
|
220
|
+
elif norm_type.lower()=='group':
|
|
221
|
+
valid_num_groups = [16, 8, 4, 2]
|
|
222
|
+
valid_num_groups = valid_num_groups[valid_num_groups < planes_in]
|
|
223
|
+
num_groups = None
|
|
224
|
+
for num_groups in valid_num_groups:
|
|
225
|
+
if planes_in % num_groups != 0:
|
|
226
|
+
break
|
|
227
|
+
if num_groups is None:
|
|
228
|
+
raise exit('Num groups can not be determined')
|
|
229
|
+
self.norm = nn.GroupNorm(num_groups=planes_in, num_channels=planes_in)
|
|
230
|
+
elif norm_type.lower() == 'batch':
|
|
231
|
+
self.norm = nn.BatchNorm3d(planes_in)
|
|
232
|
+
elif norm_type.lower() == 'instance':
|
|
233
|
+
self.norm = nn.InstanceNorm3d(planes_in)
|
|
234
|
+
else:
|
|
235
|
+
self.norm = None
|
|
236
|
+
|
|
237
|
+
self.conv = nn.Conv3d(planes_in, planes_out, (kernel_size, kernel_size, kernel_size),
|
|
238
|
+
padding=padding, bias=True,
|
|
239
|
+
dilation=(dilation, dilation, dilation))
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def forward(self, x, scale_shift=None):
|
|
243
|
+
if self.norm is not None:
|
|
244
|
+
x = self.norm(x)
|
|
245
|
+
|
|
246
|
+
if scale_shift is not None:
|
|
247
|
+
scale, shift = scale_shift
|
|
248
|
+
x = x * (scale + 1) + shift
|
|
249
|
+
|
|
250
|
+
if self.activation is not None:
|
|
251
|
+
x = self.activation(x)
|
|
252
|
+
|
|
253
|
+
x = self.conv(x)
|
|
254
|
+
|
|
255
|
+
return x
|
|
256
|
+
class SinusoidalPosEmb(nn.Module):
|
|
257
|
+
def __init__(self, dim):
|
|
258
|
+
super().__init__()
|
|
259
|
+
self.dim = dim
|
|
260
|
+
|
|
261
|
+
def forward(self, x):
|
|
262
|
+
device = x.device
|
|
263
|
+
half_dim = self.dim // 2
|
|
264
|
+
emb = math.log(10000) / (half_dim - 1)
|
|
265
|
+
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
|
|
266
|
+
emb = x[...,None]*emb[None,:]
|
|
267
|
+
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
|
|
268
|
+
if len(emb.shape)==3:
|
|
269
|
+
emb = emb.view(emb.shape[0], emb.shape[1] * emb.shape[2])
|
|
270
|
+
return emb
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
class UnetDecoder(nn.Module):
|
|
275
|
+
def __init__(self, in_channel, base_inc_channel=64, layer=BlockLayer, block=None,layer_blocks=[1,1,1,1],
|
|
276
|
+
feature_dilation=2, upsampling_stride=2, layer_widths=None, kernel_size=3,
|
|
277
|
+
upsampling_mode="trilinear", align_corners=False, use_transposed_convolutions=False, last_cov_channels=256,
|
|
278
|
+
time_emb_dim=None, norm_type='layer'
|
|
279
|
+
):
|
|
280
|
+
super(UnetDecoder, self).__init__()
|
|
281
|
+
self.layers = nn.ModuleList()
|
|
282
|
+
|
|
283
|
+
self.upsampling_blocks = nn.ModuleList()
|
|
284
|
+
|
|
285
|
+
self.attention_modules = nn.ModuleList()
|
|
286
|
+
in_channel_layer = in_channel
|
|
287
|
+
#input_size = 24
|
|
288
|
+
input_size = 16
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
for i, num_blcks in enumerate(layer_blocks):
|
|
292
|
+
if layer_widths is not None:
|
|
293
|
+
out_channel_layer = layer_widths[i]
|
|
294
|
+
else:
|
|
295
|
+
out_channel_layer = base_inc_channel // (feature_dilation ** (i))
|
|
296
|
+
|
|
297
|
+
if i == 0:
|
|
298
|
+
first_layer = True
|
|
299
|
+
self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
|
|
300
|
+
planes_in=last_cov_channels, planes_out=out_channel_layer,
|
|
301
|
+
kernel_size=kernel_size,
|
|
302
|
+
first_layer=first_layer, input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
|
|
303
|
+
else:
|
|
304
|
+
first_layer = False
|
|
305
|
+
|
|
306
|
+
self.layers.append(layer(num_blcks=num_blcks, block_layer=block,
|
|
307
|
+
planes_in=in_channel_layer+layer_widths[i-1], planes_out=out_channel_layer,
|
|
308
|
+
kernel_size=kernel_size,
|
|
309
|
+
first_layer=first_layer, input_size=input_size, time_emb_dim=time_emb_dim, norm_type=norm_type))
|
|
310
|
+
|
|
311
|
+
#self.upsampling_blocks.append(nn.ConvTranspose3d(out_channel_layer, out_channel_layer, kernel_size=2,
|
|
312
|
+
# stride=upsampling_stride, padding=0))
|
|
313
|
+
self.upsampling_blocks.append(nn.Upsample(scale_factor=2, mode='nearest'))
|
|
314
|
+
|
|
315
|
+
input_size = input_size *2
|
|
316
|
+
last_cov_channels = in_channel_layer#last_cov_channels//2
|
|
317
|
+
print("Decoder {}:".format(i), in_channel_layer, out_channel_layer)
|
|
318
|
+
in_channel_layer = out_channel_layer
|
|
319
|
+
self.out_channel_layer = in_channel_layer
|
|
320
|
+
def forward(self, x, t):
|
|
321
|
+
i = 0
|
|
322
|
+
outputs = list()
|
|
323
|
+
y = x[0]
|
|
324
|
+
for up, lay in zip(self.upsampling_blocks, self.layers[:-1]):
|
|
325
|
+
if i == 0:
|
|
326
|
+
y = lay(y, t)
|
|
327
|
+
else:
|
|
328
|
+
y = lay(y,t)
|
|
329
|
+
outputs.insert(0, y)
|
|
330
|
+
y = up(y)
|
|
331
|
+
#y = att(y)
|
|
332
|
+
y = torch.cat([y, x[i + 1]],1)
|
|
333
|
+
i += 1
|
|
334
|
+
y = self.layers[-1](y,t)
|
|
335
|
+
outputs.insert(0, y)
|
|
336
|
+
return y, outputs
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
class Attention(nn.Module):
|
|
341
|
+
def __init__(self, dim, heads = 4, dim_head = 16):
|
|
342
|
+
super().__init__()
|
|
343
|
+
self.scale = dim_head ** -0.5
|
|
344
|
+
self.heads = heads
|
|
345
|
+
hidden_dim = dim_head * heads
|
|
346
|
+
|
|
347
|
+
self.to_qkv = nn.Conv3d(dim, hidden_dim * 3, 1, bias = False)
|
|
348
|
+
self.to_out = nn.Conv3d(hidden_dim, dim, 1)
|
|
349
|
+
|
|
350
|
+
def forward(self, x, mask=None):
|
|
351
|
+
b, c, h, w, z = x.shape
|
|
352
|
+
qkv = self.to_qkv(x).chunk(3, dim = 1)
|
|
353
|
+
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y z -> b h c (x y z)', h = self.heads), qkv)
|
|
354
|
+
|
|
355
|
+
scaled_dot_prod = torch.einsum('... i d , ... j d -> ... i j', q, k) * self.scale
|
|
356
|
+
attention = torch.softmax(scaled_dot_prod, dim=-1)
|
|
357
|
+
v = v / (h * w* z)
|
|
358
|
+
atv = torch.einsum('... i j , ... j d -> ... i d', attention, v)
|
|
359
|
+
out = rearrange(atv, "b h c (x y z) -> b (h c) x y z", h=self.heads, x=h, y=w, z=z)
|
|
360
|
+
return self.to_out(out)
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
class CrossConv3d(nn.Conv3d):
|
|
366
|
+
|
|
367
|
+
"""
|
|
368
|
+
https://github.com/JJGO/UniverSeg/blob/main/universeg/nn/cross_conv.py
|
|
369
|
+
Compute pairwise convolution between all element of x and all elements of y.
|
|
370
|
+
x, y are tensors of size B,_,C,H,W where _ could be different number of elements in x and y
|
|
371
|
+
essentially, we do a meshgrid of the elements to get B,Sx,Sy,C,H,W tensors, and then
|
|
372
|
+
pairwise conv.
|
|
373
|
+
Args:
|
|
374
|
+
x (tensor): B,Sx,Cx,H,W
|
|
375
|
+
y (tensor): B,Sy,Cy,H,W
|
|
376
|
+
Returns:
|
|
377
|
+
tensor: B,Sx,Sy,Cout,H,W
|
|
378
|
+
"""
|
|
379
|
+
"""
|
|
380
|
+
CrossConv2d is a convolutional layer that performs pairwise convolutions between elements of two input tensors.
|
|
381
|
+
|
|
382
|
+
Parameters
|
|
383
|
+
----------
|
|
384
|
+
in_channels : int or tuple of ints
|
|
385
|
+
Number of channels in the input tensor(s).
|
|
386
|
+
If the tensors have different number of channels, in_channels must be a tuple
|
|
387
|
+
out_channels : int
|
|
388
|
+
Number of output channels.
|
|
389
|
+
kernel_size : int or tuple of ints
|
|
390
|
+
Size of the convolutional kernel.
|
|
391
|
+
stride : int or tuple of ints, optional
|
|
392
|
+
Stride of the convolution. Default is 1.
|
|
393
|
+
padding : int or tuple of ints, optional
|
|
394
|
+
Zero-padding added to both sides of the input. Default is 0.
|
|
395
|
+
dilation : int or tuple of ints, optional
|
|
396
|
+
Spacing between kernel elements. Default is 1.
|
|
397
|
+
groups : int, optional
|
|
398
|
+
Number of blocked connections from input channels to output channels. Default is 1.
|
|
399
|
+
bias : bool, optional
|
|
400
|
+
If True, adds a learnable bias to the output. Default is True.
|
|
401
|
+
padding_mode : str, optional
|
|
402
|
+
Padding mode. Default is "zeros".
|
|
403
|
+
device : str, optional
|
|
404
|
+
Device on which to allocate the tensor. Default is None.
|
|
405
|
+
dtype : torch.dtype, optional
|
|
406
|
+
Data type assigned to the tensor. Default is None.
|
|
407
|
+
|
|
408
|
+
Returns
|
|
409
|
+
-------
|
|
410
|
+
torch.Tensor
|
|
411
|
+
Tensor resulting from the pairwise convolution between the elements of x and y.
|
|
412
|
+
|
|
413
|
+
Notes
|
|
414
|
+
-----
|
|
415
|
+
x and y are tensors of size (B, Sx, Cx, H, W) and (B, Sy, Cy, H, W), respectively,
|
|
416
|
+
The function does the cartesian product of the elements of x and y to obtain a tensor
|
|
417
|
+
of size (B, Sx, Sy, Cx + Cy, H, W), and then performs the same convolution for all
|
|
418
|
+
(B, Sx, Sy) in the batch dimension. Runtime and memory are O(Sx * Sy).
|
|
419
|
+
|
|
420
|
+
Examples
|
|
421
|
+
--------
|
|
422
|
+
>>> x = torch.randn(2, 3, 4, 32, 32)
|
|
423
|
+
>>> y = torch.randn(2, 5, 6, 32, 32)
|
|
424
|
+
>>> conv = CrossConv2d(in_channels=(4, 6), out_channels=7, kernel_size=3, padding=1)
|
|
425
|
+
>>> output = conv(x, y)
|
|
426
|
+
>>> output.shape #(2, 3, 5, 7, 32, 32)
|
|
427
|
+
"""
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
def __init__(
|
|
431
|
+
self,
|
|
432
|
+
in_channels,
|
|
433
|
+
out_channels: int,
|
|
434
|
+
kernel_size,
|
|
435
|
+
stride = 1,
|
|
436
|
+
padding = 0,
|
|
437
|
+
dilation= 1,
|
|
438
|
+
groups: int = 1,
|
|
439
|
+
bias: bool = True,
|
|
440
|
+
padding_mode: str = "zeros",
|
|
441
|
+
device=None,
|
|
442
|
+
dtype=None,
|
|
443
|
+
) -> None:
|
|
444
|
+
|
|
445
|
+
if isinstance(in_channels, (list, tuple)):
|
|
446
|
+
concat_channels = sum(in_channels)
|
|
447
|
+
else:
|
|
448
|
+
concat_channels = 2 * in_channels
|
|
449
|
+
|
|
450
|
+
super().__init__(
|
|
451
|
+
in_channels=concat_channels,
|
|
452
|
+
out_channels=out_channels,
|
|
453
|
+
kernel_size=kernel_size,
|
|
454
|
+
stride=stride,
|
|
455
|
+
padding=padding,
|
|
456
|
+
dilation=dilation,
|
|
457
|
+
groups=groups,
|
|
458
|
+
bias=bias,
|
|
459
|
+
padding_mode=padding_mode,
|
|
460
|
+
device=device,
|
|
461
|
+
dtype=dtype,
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
|
465
|
+
"""
|
|
466
|
+
Compute pairwise convolution between all elements of x and all elements of y.
|
|
467
|
+
|
|
468
|
+
Parameters
|
|
469
|
+
----------
|
|
470
|
+
x : torch.Tensor
|
|
471
|
+
Input tensor of size (B, Sx, Cx, H, W).
|
|
472
|
+
y : torch.Tensor
|
|
473
|
+
Input tensor of size (B, Sy, Cy, H, W).
|
|
474
|
+
|
|
475
|
+
Returns
|
|
476
|
+
-------
|
|
477
|
+
torch.Tensor
|
|
478
|
+
Tensor resulting from the cross-convolution between the elements of x and y.
|
|
479
|
+
Has size (B, Sx, Sy, Co, H, W), where Co is the number of output channels.
|
|
480
|
+
"""
|
|
481
|
+
B, Sx, *_ = x.shape
|
|
482
|
+
_, Sy, *_ = y.shape
|
|
483
|
+
|
|
484
|
+
xs = repeat(x, "B Sx Cx H W Y -> B Sx Sy Cx H W Y", Sy=Sy)
|
|
485
|
+
ys = repeat(y, "B Sy Cy H W Y-> B Sx Sy Cy H W Y", Sx=Sx)
|
|
486
|
+
|
|
487
|
+
xy = torch.cat([xs, ys], dim=3,)
|
|
488
|
+
|
|
489
|
+
batched_xy = rearrange(xy, "B Sx Sy C2 H W Y -> (B Sx Sy) C2 H W Y")
|
|
490
|
+
batched_output = super().forward(batched_xy)
|
|
491
|
+
|
|
492
|
+
output = rearrange(
|
|
493
|
+
batched_output, "(B Sx Sy) Co H W Y-> B Sx Sy Co H W Y", B=B, Sx=Sx, Sy=Sy
|
|
494
|
+
)
|
|
495
|
+
return output
|
|
496
|
+
|
|
497
|
+
class UnetGen(nn.Module):
|
|
498
|
+
def __init__(self, base_inc_channel=8,
|
|
499
|
+
feature_dilation=2, downsampling_stride=2,
|
|
500
|
+
encoder_class=UnetEncoder, layer_widths=None, block=None,
|
|
501
|
+
kernel_size=3, interpolation_mode ="trilinear",decoder_class=None,
|
|
502
|
+
use_transposed_convolutions=True, time_embed = False, norm_type='layer'):
|
|
503
|
+
super(UnetGen, self).__init__()
|
|
504
|
+
time_embed = self.time_embed
|
|
505
|
+
use_transposed_convolutions = self.use_tr_conv
|
|
506
|
+
inblock = 16
|
|
507
|
+
base_inc_channel = inblock
|
|
508
|
+
self.base_inc_channel = base_inc_channel
|
|
509
|
+
|
|
510
|
+
sinu_pos_emb = SinusoidalPosEmb(inblock)
|
|
511
|
+
fourier_dim = inblock
|
|
512
|
+
#if self.spacing_embed:
|
|
513
|
+
# fourier_dim*=4
|
|
514
|
+
|
|
515
|
+
# time embeddings
|
|
516
|
+
|
|
517
|
+
time_dim = inblock * 4
|
|
518
|
+
if time_embed:
|
|
519
|
+
self.time_mlp = nn.Sequential(
|
|
520
|
+
sinu_pos_emb,
|
|
521
|
+
nn.Linear(fourier_dim, time_dim),
|
|
522
|
+
nn.GELU(),
|
|
523
|
+
nn.Linear(time_dim, time_dim)
|
|
524
|
+
)
|
|
525
|
+
else:
|
|
526
|
+
time_dim = None
|
|
527
|
+
|
|
528
|
+
#encoder_blocks = [1, 1, 1, 1, 1, 1]
|
|
529
|
+
|
|
530
|
+
#decoder_blocks = [1,1,1,1, 1, 1]
|
|
531
|
+
encoder_blocks = [1, 1, 1]
|
|
532
|
+
|
|
533
|
+
decoder_blocks = [1, 1, 1]
|
|
534
|
+
|
|
535
|
+
padding = kernel_size // 2 # constant size
|
|
536
|
+
self.before_encoder = nn.Conv3d(1, inblock, kernel_size=(3, 3, 3),
|
|
537
|
+
stride=(1, 1, 1), padding=3//2,
|
|
538
|
+
bias=True)
|
|
539
|
+
|
|
540
|
+
|
|
541
|
+
self.encoder = encoder_class(in_channel=inblock, base_inc_channel=base_inc_channel, layer_blocks=encoder_blocks,
|
|
542
|
+
block=block,
|
|
543
|
+
feature_dilation=feature_dilation, downsampling_stride=downsampling_stride,
|
|
544
|
+
layer_widths=layer_widths, kernel_size=kernel_size,
|
|
545
|
+
time_emb_dim=time_dim, norm_type=norm_type)
|
|
546
|
+
|
|
547
|
+
layer_widths = self.encoder._layers_with
|
|
548
|
+
in_channel = layer_widths[-1]
|
|
549
|
+
self.BottleNeck = BlockLayer(num_blcks=1, block_layer=block,
|
|
550
|
+
planes_in=in_channel, planes_out=in_channel,
|
|
551
|
+
kernel_size=kernel_size,
|
|
552
|
+
first_layer=False, input_size=self.encoder.output_size, time_emb_dim=time_dim, norm_type=norm_type)
|
|
553
|
+
|
|
554
|
+
self.BottleNeck_att = Attention(in_channel)
|
|
555
|
+
|
|
556
|
+
layer_widths = layer_widths[::-1][1:]
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
self.decoder = decoder_class(in_channel=in_channel, base_inc_channel=base_inc_channel*8, layer_blocks=decoder_blocks,
|
|
560
|
+
block=block, last_cov_channels = self.encoder.out_channel_layer,
|
|
561
|
+
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
|
|
562
|
+
use_transposed_convolutions=use_transposed_convolutions,
|
|
563
|
+
kernel_size=kernel_size, time_emb_dim=time_dim, norm_type=norm_type,
|
|
564
|
+
)
|
|
565
|
+
self.decoder_mask = decoder_class(in_channel=in_channel, base_inc_channel=base_inc_channel*8, layer_blocks=decoder_blocks,
|
|
566
|
+
block=block, last_cov_channels = self.encoder.out_channel_layer,
|
|
567
|
+
upsampling_mode=interpolation_mode, layer_widths=layer_widths,
|
|
568
|
+
use_transposed_convolutions=use_transposed_convolutions,
|
|
569
|
+
kernel_size=kernel_size, time_emb_dim=time_dim, norm_type=norm_type,
|
|
570
|
+
)
|
|
571
|
+
|
|
572
|
+
kernel_size = 3
|
|
573
|
+
|
|
574
|
+
self.last_convolution = BlockLayer(num_blcks=1, block_layer=block,
|
|
575
|
+
planes_in=inblock*2, planes_out=inblock//2,
|
|
576
|
+
kernel_size=kernel_size,
|
|
577
|
+
first_layer=False, input_size=192, time_emb_dim=time_dim, norm_type=norm_type)
|
|
578
|
+
|
|
579
|
+
self.last_convolution_rec = BlockLayer(num_blcks=1, block_layer=block,
|
|
580
|
+
planes_in=inblock*2, planes_out=inblock//2,
|
|
581
|
+
kernel_size=kernel_size,
|
|
582
|
+
first_layer=False, input_size=192, time_emb_dim=time_dim, norm_type=norm_type)
|
|
583
|
+
|
|
584
|
+
self.final_convolution = nn.Conv3d(inblock//2, 1, kernel_size=(kernel_size, kernel_size, kernel_size),
|
|
585
|
+
stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
|
|
586
|
+
self.final_convolution_rec = nn.Conv3d(inblock//2, 1, kernel_size=(kernel_size, kernel_size, kernel_size),
|
|
587
|
+
stride=(1, 1, 1), bias=True, padding=kernel_size // 2)
|
|
588
|
+
self.activation = nn.Softmax(dim=1)
|
|
589
|
+
self.sigmoid = nn.Sigmoid()
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
def forward(self, y, time, t=0, noise = None):
|
|
596
|
+
|
|
597
|
+
|
|
598
|
+
y = self.before_encoder(y)
|
|
599
|
+
|
|
600
|
+
if self.time_embed:
|
|
601
|
+
if len(time.shape)==1:
|
|
602
|
+
t = self.time_mlp(time)
|
|
603
|
+
else:
|
|
604
|
+
t = self.time_mlp(time)
|
|
605
|
+
else:
|
|
606
|
+
t = None
|
|
607
|
+
|
|
608
|
+
x = self.encoder(y, t)
|
|
609
|
+
x[0] = self.BottleNeck(x[0], t)
|
|
610
|
+
x[0] = self.BottleNeck_att(x[0])
|
|
611
|
+
|
|
612
|
+
mask,_ = self.decoder_mask(x,t)
|
|
613
|
+
mask = torch.cat([mask, y], 1)
|
|
614
|
+
mask = self.last_convolution(mask)
|
|
615
|
+
mask = self.final_convolution(mask)
|
|
616
|
+
|
|
617
|
+
x, _=self.decoder(x, t)
|
|
618
|
+
x = torch.cat([x, y], 1)
|
|
619
|
+
x = (x * mask)
|
|
620
|
+
z = self.last_convolution_rec(x)
|
|
621
|
+
z = self.final_convolution_rec(z)
|
|
622
|
+
|
|
623
|
+
|
|
624
|
+
return [mask,z]
|
|
625
|
+
|
|
626
|
+
|
|
627
|
+
class Unet3D(UnetGen):
|
|
628
|
+
def __init__(self,time_embed=False, channels=1, *args, encoder_class=UnetEncoder, **kwargs):
|
|
629
|
+
self.time_embed = time_embed
|
|
630
|
+
self.use_tr_conv = False
|
|
631
|
+
|
|
632
|
+
norm_type = "instance"
|
|
633
|
+
super().__init__(*args, encoder_class=encoder_class, decoder_class=UnetDecoder,
|
|
634
|
+
block=ResidualBlock, norm_type=norm_type, **kwargs)
|
|
635
|
+
|
|
636
|
+
self.channels = channels
|
|
637
|
+
self.netName = 'Unet3D'
|
|
638
|
+
def name(self):
|
|
639
|
+
return 'unet3d'
|