melage 0.0.65__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- melage/__init__.py +16 -0
- melage/cli.py +4 -0
- melage/graphics/GLGraphicsItem.py +286 -0
- melage/graphics/GLViewWidget.py +595 -0
- melage/graphics/Transform3D.py +55 -0
- melage/graphics/__init__.py +8 -0
- melage/graphics/functions.py +101 -0
- melage/graphics/items/GLAxisItem.py +149 -0
- melage/graphics/items/GLGridItem.py +178 -0
- melage/graphics/items/GLPolygonItem.py +77 -0
- melage/graphics/items/GLScatterPlotItem.py +135 -0
- melage/graphics/items/GLVolumeItem.py +280 -0
- melage/graphics/items/GLVolumeItem_b.py +237 -0
- melage/graphics/items/__init__.py +0 -0
- melage/graphics/shaders.py +202 -0
- melage/main.py +270 -0
- melage/requirements22.txt +25 -0
- melage/requirements_old.txt +28 -0
- melage/resource/0circle.png +0 -0
- melage/resource/0circle_faded.png +0 -0
- melage/resource/3d.png +0 -0
- melage/resource/3d.psd +0 -0
- melage/resource/3dFaded.png +0 -0
- melage/resource/Eraser.png +0 -0
- melage/resource/EraserFaded.png +0 -0
- melage/resource/EraserX.png +0 -0
- melage/resource/EraserXFaded.png +0 -0
- melage/resource/Eraser_icon.svg +79 -0
- melage/resource/Hand.png +0 -0
- melage/resource/HandIcons_0.png +0 -0
- melage/resource/Hand_IX.png +0 -0
- melage/resource/Hand_IXFaded.png +0 -0
- melage/resource/Handsqueezed.png +0 -0
- melage/resource/Handwriting (copy).png +0 -0
- melage/resource/Handwriting.png +0 -0
- melage/resource/HandwritingMinus.png +0 -0
- melage/resource/HandwritingMinusX.png +0 -0
- melage/resource/HandwritingPlus.png +0 -0
- melage/resource/HandwritingPlusX.png +0 -0
- melage/resource/Move_icon.svg +8 -0
- melage/resource/PngItem_2422924.png +0 -0
- melage/resource/about.png +0 -0
- melage/resource/about_logo.png +0 -0
- melage/resource/about_logo0.png +0 -0
- melage/resource/action_check.png +0 -0
- melage/resource/action_check_OFF.png +0 -0
- melage/resource/arrow).png +0 -0
- melage/resource/arrow.png +0 -0
- melage/resource/arrowFaded.png +0 -0
- melage/resource/arrow_org.png +0 -0
- melage/resource/arrow_org.png.png +0 -0
- melage/resource/arrows.png +0 -0
- melage/resource/authors.mp4 +0 -0
- melage/resource/box.png +0 -0
- melage/resource/check-image-icon-0.jpg +0 -0
- melage/resource/circle.png +0 -0
- melage/resource/circle_faded.png +0 -0
- melage/resource/circle_or.png +0 -0
- melage/resource/close.png +0 -0
- melage/resource/close_bg.png +0 -0
- melage/resource/color/Simple.txt +18 -0
- melage/resource/color/Tissue.txt +24 -0
- melage/resource/color/Tissue12.txt +27 -0
- melage/resource/color/albert_LUT.txt +102 -0
- melage/resource/color/mcrib_LUT.txt +102 -0
- melage/resource/color/pediatric1.txt +29 -0
- melage/resource/color/pediatric1_old.txt +27 -0
- melage/resource/color/pediatric2.txt +87 -0
- melage/resource/color/pediatric3.txt +29 -0
- melage/resource/color/pediatrics (copy).csv +103 -0
- melage/resource/color/tissue_seg.txt +4 -0
- melage/resource/contour.png +0 -0
- melage/resource/contour.svg +2 -0
- melage/resource/contourFaded.png +0 -0
- melage/resource/contourX.png +0 -0
- melage/resource/contourXFaded.png +0 -0
- melage/resource/dti.png +0 -0
- melage/resource/dti0.png +0 -0
- melage/resource/dti222.png +0 -0
- melage/resource/dti_or.png +0 -0
- melage/resource/eco.png +0 -0
- melage/resource/eco22.png +0 -0
- melage/resource/eco_old.png +0 -0
- melage/resource/eco_or.png +0 -0
- melage/resource/eco_or2.png +0 -0
- melage/resource/eco_seg.png +0 -0
- melage/resource/eco_seg_old.png +0 -0
- melage/resource/export.png +0 -0
- melage/resource/hand-grab-icon-10.jpg +0 -0
- melage/resource/hand-grab-icon-25.jpg +0 -0
- melage/resource/info.png +0 -0
- melage/resource/line.png +0 -0
- melage/resource/linefaded.png +0 -0
- melage/resource/load.png +0 -0
- melage/resource/main.ico +0 -0
- melage/resource/manual_images/3D_rightc.png +0 -0
- melage/resource/manual_images/3D_rightc_goto.png +0 -0
- melage/resource/manual_images/3D_rightc_paint.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_draw1.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_draw2.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render2.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render3.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render4.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render5.png +0 -0
- melage/resource/manual_images/3D_rightc_paint_render6.png +0 -0
- melage/resource/manual_images/3D_rightc_seg.png +0 -0
- melage/resource/manual_images/exit_toolbar.png +0 -0
- melage/resource/manual_images/load_image_file.png +0 -0
- melage/resource/manual_images/load_image_file_openp.png +0 -0
- melage/resource/manual_images/main_page.png +0 -0
- melage/resource/manual_images/menu_file.png +0 -0
- melage/resource/manual_images/menu_file_export.png +0 -0
- melage/resource/manual_images/menu_file_import.png +0 -0
- melage/resource/manual_images/menu_file_settings.png +0 -0
- melage/resource/manual_images/menu_file_ss.png +0 -0
- melage/resource/manual_images/open_save_load.png +0 -0
- melage/resource/manual_images/panning_toolbar.png +0 -0
- melage/resource/manual_images/segmentation_toolbar.png +0 -0
- melage/resource/manual_images/tab_mri.png +0 -0
- melage/resource/manual_images/tab_us.png +0 -0
- melage/resource/manual_images/tabs.png +0 -0
- melage/resource/manual_images/toolbar_tools.png +0 -0
- melage/resource/manual_images/tools_basic.png +0 -0
- melage/resource/manual_images/tools_bet.png +0 -0
- melage/resource/manual_images/tools_cs.png +0 -0
- melage/resource/manual_images/tools_deepbet.png +0 -0
- melage/resource/manual_images/tools_imageinfo.png +0 -0
- melage/resource/manual_images/tools_maskO.png +0 -0
- melage/resource/manual_images/tools_masking.png +0 -0
- melage/resource/manual_images/tools_n4b.png +0 -0
- melage/resource/manual_images/tools_resize.png +0 -0
- melage/resource/manual_images/tools_ruler.png +0 -0
- melage/resource/manual_images/tools_seg.png +0 -0
- melage/resource/manual_images/tools_threshold.png +0 -0
- melage/resource/manual_images/tools_tools.png +0 -0
- melage/resource/manual_images/widget_color.png +0 -0
- melage/resource/manual_images/widget_color_add.png +0 -0
- melage/resource/manual_images/widget_color_add2.png +0 -0
- melage/resource/manual_images/widget_color_additional.png +0 -0
- melage/resource/manual_images/widget_images.png +0 -0
- melage/resource/manual_images/widget_images2.png +0 -0
- melage/resource/manual_images/widget_images3.png +0 -0
- melage/resource/manual_images/widget_marker.png +0 -0
- melage/resource/manual_images/widget_mri.png +0 -0
- melage/resource/manual_images/widget_mri2.png +0 -0
- melage/resource/manual_images/widget_segintensity.png +0 -0
- melage/resource/manual_images/widget_tab_mutualview.png +0 -0
- melage/resource/manual_images/widget_tab_mutualview2.png +0 -0
- melage/resource/manual_images/widget_table.png +0 -0
- melage/resource/manual_images/widget_table2.png +0 -0
- melage/resource/manual_images/widget_us.png +0 -0
- melage/resource/melage_top.ico +0 -0
- melage/resource/melage_top.png +0 -0
- melage/resource/melage_top0.png +0 -0
- melage/resource/melage_top1.png +0 -0
- melage/resource/melage_top4.png +0 -0
- melage/resource/mri (copy).png +0 -0
- melage/resource/mri.png +0 -0
- melage/resource/mri0.png +0 -0
- melage/resource/mri000.png +0 -0
- melage/resource/mri22.png +0 -0
- melage/resource/mri_big.png +0 -0
- melage/resource/mri_old.png +0 -0
- melage/resource/mri_seg.png +0 -0
- melage/resource/mri_seg_old.png +0 -0
- melage/resource/new.png +0 -0
- melage/resource/open.png +0 -0
- melage/resource/open2.png +0 -0
- melage/resource/pan.png +0 -0
- melage/resource/pencil.png +0 -0
- melage/resource/pencilFaded.png +0 -0
- melage/resource/points.png +0 -0
- melage/resource/pointsFaded.png +0 -0
- melage/resource/rotate.png +0 -0
- melage/resource/ruler.png +0 -0
- melage/resource/rulerFaded.png +0 -0
- melage/resource/s.png +0 -0
- melage/resource/s.psd +0 -0
- melage/resource/save.png +0 -0
- melage/resource/saveas.png +0 -0
- melage/resource/seg_mri.png +0 -0
- melage/resource/seg_mri2.png +0 -0
- melage/resource/settings.png +0 -0
- melage/resource/synch.png +0 -0
- melage/resource/synchFaded.png +0 -0
- melage/resource/theme/rc/.keep +1 -0
- melage/resource/theme/rc/arrow_down.png +0 -0
- melage/resource/theme/rc/arrow_down@2x.png +0 -0
- melage/resource/theme/rc/arrow_down_disabled.png +0 -0
- melage/resource/theme/rc/arrow_down_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_down_focus.png +0 -0
- melage/resource/theme/rc/arrow_down_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_down_pressed.png +0 -0
- melage/resource/theme/rc/arrow_down_pressed@2x.png +0 -0
- melage/resource/theme/rc/arrow_left.png +0 -0
- melage/resource/theme/rc/arrow_left@2x.png +0 -0
- melage/resource/theme/rc/arrow_left_disabled.png +0 -0
- melage/resource/theme/rc/arrow_left_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_left_focus.png +0 -0
- melage/resource/theme/rc/arrow_left_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_left_pressed.png +0 -0
- melage/resource/theme/rc/arrow_left_pressed@2x.png +0 -0
- melage/resource/theme/rc/arrow_right.png +0 -0
- melage/resource/theme/rc/arrow_right@2x.png +0 -0
- melage/resource/theme/rc/arrow_right_disabled.png +0 -0
- melage/resource/theme/rc/arrow_right_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_right_focus.png +0 -0
- melage/resource/theme/rc/arrow_right_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_right_pressed.png +0 -0
- melage/resource/theme/rc/arrow_right_pressed@2x.png +0 -0
- melage/resource/theme/rc/arrow_up.png +0 -0
- melage/resource/theme/rc/arrow_up@2x.png +0 -0
- melage/resource/theme/rc/arrow_up_disabled.png +0 -0
- melage/resource/theme/rc/arrow_up_disabled@2x.png +0 -0
- melage/resource/theme/rc/arrow_up_focus.png +0 -0
- melage/resource/theme/rc/arrow_up_focus@2x.png +0 -0
- melage/resource/theme/rc/arrow_up_pressed.png +0 -0
- melage/resource/theme/rc/arrow_up_pressed@2x.png +0 -0
- melage/resource/theme/rc/base_icon.png +0 -0
- melage/resource/theme/rc/base_icon@2x.png +0 -0
- melage/resource/theme/rc/base_icon_disabled.png +0 -0
- melage/resource/theme/rc/base_icon_disabled@2x.png +0 -0
- melage/resource/theme/rc/base_icon_focus.png +0 -0
- melage/resource/theme/rc/base_icon_focus@2x.png +0 -0
- melage/resource/theme/rc/base_icon_pressed.png +0 -0
- melage/resource/theme/rc/base_icon_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_closed.png +0 -0
- melage/resource/theme/rc/branch_closed@2x.png +0 -0
- melage/resource/theme/rc/branch_closed_disabled.png +0 -0
- melage/resource/theme/rc/branch_closed_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_closed_focus.png +0 -0
- melage/resource/theme/rc/branch_closed_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_closed_pressed.png +0 -0
- melage/resource/theme/rc/branch_closed_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_end.png +0 -0
- melage/resource/theme/rc/branch_end@2x.png +0 -0
- melage/resource/theme/rc/branch_end_disabled.png +0 -0
- melage/resource/theme/rc/branch_end_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_end_focus.png +0 -0
- melage/resource/theme/rc/branch_end_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_end_pressed.png +0 -0
- melage/resource/theme/rc/branch_end_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_line.png +0 -0
- melage/resource/theme/rc/branch_line@2x.png +0 -0
- melage/resource/theme/rc/branch_line_disabled.png +0 -0
- melage/resource/theme/rc/branch_line_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_line_focus.png +0 -0
- melage/resource/theme/rc/branch_line_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_line_pressed.png +0 -0
- melage/resource/theme/rc/branch_line_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_more.png +0 -0
- melage/resource/theme/rc/branch_more@2x.png +0 -0
- melage/resource/theme/rc/branch_more_disabled.png +0 -0
- melage/resource/theme/rc/branch_more_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_more_focus.png +0 -0
- melage/resource/theme/rc/branch_more_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_more_pressed.png +0 -0
- melage/resource/theme/rc/branch_more_pressed@2x.png +0 -0
- melage/resource/theme/rc/branch_open.png +0 -0
- melage/resource/theme/rc/branch_open@2x.png +0 -0
- melage/resource/theme/rc/branch_open_disabled.png +0 -0
- melage/resource/theme/rc/branch_open_disabled@2x.png +0 -0
- melage/resource/theme/rc/branch_open_focus.png +0 -0
- melage/resource/theme/rc/branch_open_focus@2x.png +0 -0
- melage/resource/theme/rc/branch_open_pressed.png +0 -0
- melage/resource/theme/rc/branch_open_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked.png +0 -0
- melage/resource/theme/rc/checkbox_checked0.png +0 -0
- melage/resource/theme/rc/checkbox_checked@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_checked@2x000.png.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked_disabled@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked_focus@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed0.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_checked_pressed@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate@2x.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_disabled.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_disabled@2x.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_focus.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_focus@2x.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_pressed.png +0 -0
- melage/resource/theme/rc/checkbox_indeterminate_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked@2x00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_disabled@2x00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_focus@2x00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed00.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed@2x.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed@2x0.png +0 -0
- melage/resource/theme/rc/checkbox_unchecked_pressed@2x00.png +0 -0
- melage/resource/theme/rc/line_horizontal.png +0 -0
- melage/resource/theme/rc/line_horizontal@2x.png +0 -0
- melage/resource/theme/rc/line_horizontal_disabled.png +0 -0
- melage/resource/theme/rc/line_horizontal_disabled@2x.png +0 -0
- melage/resource/theme/rc/line_horizontal_focus.png +0 -0
- melage/resource/theme/rc/line_horizontal_focus@2x.png +0 -0
- melage/resource/theme/rc/line_horizontal_pressed.png +0 -0
- melage/resource/theme/rc/line_horizontal_pressed@2x.png +0 -0
- melage/resource/theme/rc/line_vertical.png +0 -0
- melage/resource/theme/rc/line_vertical@2x.png +0 -0
- melage/resource/theme/rc/line_vertical_disabled.png +0 -0
- melage/resource/theme/rc/line_vertical_disabled@2x.png +0 -0
- melage/resource/theme/rc/line_vertical_focus.png +0 -0
- melage/resource/theme/rc/line_vertical_focus@2x.png +0 -0
- melage/resource/theme/rc/line_vertical_pressed.png +0 -0
- melage/resource/theme/rc/line_vertical_pressed@2x.png +0 -0
- melage/resource/theme/rc/radio_checked.png +0 -0
- melage/resource/theme/rc/radio_checked@2x.png +0 -0
- melage/resource/theme/rc/radio_checked_disabled.png +0 -0
- melage/resource/theme/rc/radio_checked_disabled@2x.png +0 -0
- melage/resource/theme/rc/radio_checked_focus.png +0 -0
- melage/resource/theme/rc/radio_checked_focus@2x.png +0 -0
- melage/resource/theme/rc/radio_checked_pressed.png +0 -0
- melage/resource/theme/rc/radio_checked_pressed@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked.png +0 -0
- melage/resource/theme/rc/radio_unchecked@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked_disabled.png +0 -0
- melage/resource/theme/rc/radio_unchecked_disabled@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked_focus.png +0 -0
- melage/resource/theme/rc/radio_unchecked_focus@2x.png +0 -0
- melage/resource/theme/rc/radio_unchecked_pressed.png +0 -0
- melage/resource/theme/rc/radio_unchecked_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_focus.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_move_horizontal_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_focus.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_move_vertical_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_focus.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_separator_horizontal_pressed@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_disabled.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_disabled@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_focus.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_focus@2x.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_pressed.png +0 -0
- melage/resource/theme/rc/toolbar_separator_vertical_pressed@2x.png +0 -0
- melage/resource/theme/rc/transparent.png +0 -0
- melage/resource/theme/rc/transparent@2x.png +0 -0
- melage/resource/theme/rc/transparent_disabled.png +0 -0
- melage/resource/theme/rc/transparent_disabled@2x.png +0 -0
- melage/resource/theme/rc/transparent_focus.png +0 -0
- melage/resource/theme/rc/transparent_focus@2x.png +0 -0
- melage/resource/theme/rc/transparent_pressed.png +0 -0
- melage/resource/theme/rc/transparent_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_close.png +0 -0
- melage/resource/theme/rc/window_close@2x.png +0 -0
- melage/resource/theme/rc/window_close_disabled.png +0 -0
- melage/resource/theme/rc/window_close_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_close_focus.png +0 -0
- melage/resource/theme/rc/window_close_focus@2x.png +0 -0
- melage/resource/theme/rc/window_close_pressed.png +0 -0
- melage/resource/theme/rc/window_close_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_grip.png +0 -0
- melage/resource/theme/rc/window_grip@2x.png +0 -0
- melage/resource/theme/rc/window_grip_disabled.png +0 -0
- melage/resource/theme/rc/window_grip_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_grip_focus.png +0 -0
- melage/resource/theme/rc/window_grip_focus@2x.png +0 -0
- melage/resource/theme/rc/window_grip_pressed.png +0 -0
- melage/resource/theme/rc/window_grip_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_minimize.png +0 -0
- melage/resource/theme/rc/window_minimize@2x.png +0 -0
- melage/resource/theme/rc/window_minimize_disabled.png +0 -0
- melage/resource/theme/rc/window_minimize_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_minimize_focus.png +0 -0
- melage/resource/theme/rc/window_minimize_focus@2x.png +0 -0
- melage/resource/theme/rc/window_minimize_pressed.png +0 -0
- melage/resource/theme/rc/window_minimize_pressed@2x.png +0 -0
- melage/resource/theme/rc/window_undock.png +0 -0
- melage/resource/theme/rc/window_undock@2x.png +0 -0
- melage/resource/theme/rc/window_undock_disabled.png +0 -0
- melage/resource/theme/rc/window_undock_disabled@2x.png +0 -0
- melage/resource/theme/rc/window_undock_focus.png +0 -0
- melage/resource/theme/rc/window_undock_focus@2x.png +0 -0
- melage/resource/theme/rc/window_undock_pressed.png +0 -0
- melage/resource/theme/rc/window_undock_pressed@2x.png +0 -0
- melage/resource/theme/style.qss +2223 -0
- melage/resource/tract.png +0 -0
- melage/resource/view1.png +0 -0
- melage/resource/view1_eco.png +0 -0
- melage/resource/view1_mri.png +0 -0
- melage/resource/view1_seg.png +0 -0
- melage/resource/view2.png +0 -0
- melage/resource/view2_seg.png +0 -0
- melage/resource/w.png +0 -0
- melage/resource/zoom_in.png +0 -0
- melage/resource/zoom_inFaded.png +0 -0
- melage/resource/zoom_out.png +0 -0
- melage/resource/zoom_outFaded.png +0 -0
- melage/some_notes.txt +3 -0
- melage/utils/DispalyIm.py +2788 -0
- melage/utils/GMM.py +720 -0
- melage/utils/Shaders_120.py +257 -0
- melage/utils/Shaders_330.py +314 -0
- melage/utils/Shaders_bu.py +314 -0
- melage/utils/__init__0.py +7 -0
- melage/utils/brain_extraction_helper.py +234 -0
- melage/utils/custom_QScrollBar.py +61 -0
- melage/utils/glScientific.py +1554 -0
- melage/utils/glScientific_bc.py +1585 -0
- melage/utils/readData.py +1061 -0
- melage/utils/registration.py +512 -0
- melage/utils/source_folder.py +18 -0
- melage/utils/utils.py +3808 -0
- melage/version.txt +1 -0
- melage/widgets/ApplyMask.py +212 -0
- melage/widgets/ChangeSystem.py +152 -0
- melage/widgets/DeepLModels/InfantSegment/Unet.py +464 -0
- melage/widgets/DeepLModels/NPP/dataset/mri_dataset_affine.py +149 -0
- melage/widgets/DeepLModels/NPP/models/checkpoints/npp_v1.pth.py +0 -0
- melage/widgets/DeepLModels/NPP/models/losses.py +146 -0
- melage/widgets/DeepLModels/NPP/models/model.py +272 -0
- melage/widgets/DeepLModels/NPP/models/utils.py +303 -0
- melage/widgets/DeepLModels/NPP/npp.py +116 -0
- melage/widgets/DeepLModels/NPP/requirements.txt +8 -0
- melage/widgets/DeepLModels/NPP/train/train.py +116 -0
- melage/widgets/DeepLModels/Unet3DAtt.py +657 -0
- melage/widgets/DeepLModels/Unet3D_basic.py +648 -0
- melage/widgets/DeepLModels/new_unet.py +652 -0
- melage/widgets/DeepLModels/new_unet_old.py +639 -0
- melage/widgets/DeepLModels/new_unet_old2.py +658 -0
- melage/widgets/HistImage.py +153 -0
- melage/widgets/ImageThresholding.py +222 -0
- melage/widgets/MaskOperations.py +147 -0
- melage/widgets/N4Dialog.py +241 -0
- melage/widgets/Segmentation/FCM.py +1553 -0
- melage/widgets/Segmentation/__init__.py +588 -0
- melage/widgets/Segmentation/utils.py +417 -0
- melage/widgets/SemiAutoSeg.py +666 -0
- melage/widgets/Synthstrip.py +141 -0
- melage/widgets/__init__0.py +5 -0
- melage/widgets/about.py +246 -0
- melage/widgets/activation.py +437 -0
- melage/widgets/activator.py +147 -0
- melage/widgets/be_dl.py +409 -0
- melage/widgets/be_dl_unet3d.py +441 -0
- melage/widgets/brain_extraction.py +855 -0
- melage/widgets/brain_extraction_dl.py +887 -0
- melage/widgets/brain_extraction_dl_bu.py +869 -0
- melage/widgets/colorwidget.py +100 -0
- melage/widgets/dockWidgets.py +2005 -0
- melage/widgets/enhanceImWidget.py +109 -0
- melage/widgets/fileDialog_widget.py +275 -0
- melage/widgets/iminfo.py +346 -0
- melage/widgets/mainwindow_widget.py +6775 -0
- melage/widgets/melageAbout.py +123 -0
- melage/widgets/openglWidgets.py +556 -0
- melage/widgets/registrationWidget.py +342 -0
- melage/widgets/repeat_widget.py +74 -0
- melage/widgets/screenshot_widget.py +138 -0
- melage/widgets/settings_widget.py +77 -0
- melage/widgets/tranformationWidget.py +275 -0
- melage-0.0.65.dist-info/METADATA +742 -0
- melage-0.0.65.dist-info/RECORD +501 -0
- melage-0.0.65.dist-info/WHEEL +5 -0
- melage-0.0.65.dist-info/entry_points.txt +2 -0
- melage-0.0.65.dist-info/top_level.txt +1 -0
melage/utils/utils.py
ADDED
|
@@ -0,0 +1,3808 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
__AUTHOR__ = 'Bahram Jafrasteh'
|
|
4
|
+
|
|
5
|
+
import sys
|
|
6
|
+
from operator import index
|
|
7
|
+
|
|
8
|
+
sys.path.append("../../")
|
|
9
|
+
import numpy as np
|
|
10
|
+
import struct
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from PyQt5 import QtWidgets, QtCore, QtGui
|
|
13
|
+
import json
|
|
14
|
+
import math
|
|
15
|
+
import os
|
|
16
|
+
import SimpleITK as sitk
|
|
17
|
+
from shapely.geometry import LineString, Polygon, Point
|
|
18
|
+
from PyQt5.QtCore import Qt
|
|
19
|
+
import cv2
|
|
20
|
+
import numpy as np
|
|
21
|
+
from skimage.transform import resize as image_resize_skimage
|
|
22
|
+
from skimage.transform import rotate as image_rotate_skimage
|
|
23
|
+
from collections import defaultdict
|
|
24
|
+
from qtwidgets import AnimatedToggle
|
|
25
|
+
from io import BytesIO
|
|
26
|
+
try:
|
|
27
|
+
import nibabel as nib
|
|
28
|
+
except:
|
|
29
|
+
None
|
|
30
|
+
try:
|
|
31
|
+
from melage.utils.source_folder import source_folder
|
|
32
|
+
except:
|
|
33
|
+
pass
|
|
34
|
+
# Direction of medical image Left, Right, Posterior Anterior, Inferior, Superior
|
|
35
|
+
code_direction = (('L', 'R'), ('P', 'A'), ('I', 'S'))
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
###################### Item class for read kretz ######################
|
|
39
|
+
@dataclass
|
|
40
|
+
class Item:
|
|
41
|
+
tagcl: bytes
|
|
42
|
+
tagel: bytes
|
|
43
|
+
size: bytes
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def guess_num_image_index(nifti_input):
|
|
47
|
+
"""
|
|
48
|
+
Guess which axis of the image shape represents the 'num_images' (volume/time/channel) dimension.
|
|
49
|
+
|
|
50
|
+
Parameters:
|
|
51
|
+
nifti_input: str or nib.Nifti1Image or nib.Nifti1Header
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
index (int or None): The axis index that likely corresponds to num_images, or None if 3D.
|
|
55
|
+
"""
|
|
56
|
+
if isinstance(nifti_input, nib.Nifti1Image):
|
|
57
|
+
hdr = nifti_input.header
|
|
58
|
+
else:
|
|
59
|
+
return None, 1
|
|
60
|
+
|
|
61
|
+
ndim = hdr['dim'][0]
|
|
62
|
+
shape = hdr['dim'][1:4]
|
|
63
|
+
if ndim == 4:
|
|
64
|
+
ind_num_image = 3
|
|
65
|
+
else:
|
|
66
|
+
ind_num_image = 3
|
|
67
|
+
num_image = hdr['dim'][4]
|
|
68
|
+
return ind_num_image, num_image
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def get_size_minDim(img):
|
|
73
|
+
"""
|
|
74
|
+
Efficiently split a 4D nibabel image along the smallest dimension.
|
|
75
|
+
Mimics nibabel's speed by using dataobj (memmap) without copying.
|
|
76
|
+
|
|
77
|
+
Parameters
|
|
78
|
+
----------
|
|
79
|
+
img : nibabel image
|
|
80
|
+
4D nibabel image with `.dataobj`, `.header`, and `.affine`.
|
|
81
|
+
|
|
82
|
+
Returns
|
|
83
|
+
-------
|
|
84
|
+
imgs : list of nibabel images
|
|
85
|
+
3D images sliced along the smallest dimension.
|
|
86
|
+
"""
|
|
87
|
+
arr = img.dataobj # usually a memmap, avoids full load
|
|
88
|
+
shape = arr.shape
|
|
89
|
+
if len(shape) != 4:
|
|
90
|
+
raise ValueError("Image must be 4D.")
|
|
91
|
+
|
|
92
|
+
min_dim = np.argmin(shape)
|
|
93
|
+
return min_dim, shape[min_dim]
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def fast_split_min_dim(img, min_dim, desired_index=0):
|
|
98
|
+
"""
|
|
99
|
+
Efficiently split a 4D nibabel image along the smallest dimension.
|
|
100
|
+
Mimics nibabel's speed by using dataobj (memmap) without copying.
|
|
101
|
+
|
|
102
|
+
Parameters
|
|
103
|
+
----------
|
|
104
|
+
img : nibabel image
|
|
105
|
+
4D nibabel image with `.dataobj`, `.header`, and `.affine`.
|
|
106
|
+
|
|
107
|
+
Returns
|
|
108
|
+
-------
|
|
109
|
+
imgs : list of nibabel images
|
|
110
|
+
3D images sliced along the smallest dimension.
|
|
111
|
+
"""
|
|
112
|
+
arr = img.dataobj # usually a memmap, avoids full load
|
|
113
|
+
shape = arr.shape
|
|
114
|
+
if len(shape) != 4:
|
|
115
|
+
raise ValueError("Image must be 4D.")
|
|
116
|
+
|
|
117
|
+
image_maker = img.__class__
|
|
118
|
+
header = img.header.copy()
|
|
119
|
+
affine = img.affine
|
|
120
|
+
|
|
121
|
+
num_dims = shape[min_dim]
|
|
122
|
+
for i in range(shape[min_dim]):
|
|
123
|
+
if i!=desired_index:
|
|
124
|
+
continue
|
|
125
|
+
slicer = [slice(None)] * 4
|
|
126
|
+
slicer[min_dim] = i
|
|
127
|
+
sliced_data = arr[tuple(slicer)]
|
|
128
|
+
return image_maker(sliced_data, affine, header)
|
|
129
|
+
|
|
130
|
+
###################### Compare the to data elements ######################
|
|
131
|
+
def Item_equal(Item, tag):
|
|
132
|
+
"""
|
|
133
|
+
Args:
|
|
134
|
+
Item:
|
|
135
|
+
tag: to tags in the item
|
|
136
|
+
|
|
137
|
+
Returns: boolean true or false
|
|
138
|
+
|
|
139
|
+
"""
|
|
140
|
+
a = struct.pack('H', tag[0])
|
|
141
|
+
b = struct.pack('H', tag[1])
|
|
142
|
+
if a == Item.tagcl and b == Item.tagel:
|
|
143
|
+
return True
|
|
144
|
+
return False
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
###################### convert to string with a precission ######################
|
|
148
|
+
def str_conv(a):
|
|
149
|
+
return f"{a:.1f}"
|
|
150
|
+
|
|
151
|
+
###################### save VTK image to nifti ######################
|
|
152
|
+
def save_as_nifti(VtkImage, meta_data, pathfile):
|
|
153
|
+
"""
|
|
154
|
+
Args:
|
|
155
|
+
VtkImage: vtk image data
|
|
156
|
+
meta_data: meta data of the image
|
|
157
|
+
pathfile: output path
|
|
158
|
+
Returns: write the file with info into the path specified
|
|
159
|
+
"""
|
|
160
|
+
import vtk
|
|
161
|
+
nifit_writer = vtk.vtkNIFTIImageWriter()
|
|
162
|
+
nifit_writer.SetInputData(VtkImage)
|
|
163
|
+
nifit_writer.SetFileName(pathfile+'.nii.gz')
|
|
164
|
+
nifit_writer.Write()
|
|
165
|
+
with open(pathfile+'.json', 'w') as fp:
|
|
166
|
+
json.dump(meta_data, fp)
|
|
167
|
+
|
|
168
|
+
###################### Reading files with desired coordinate system ######################
|
|
169
|
+
def read_file_with_cs(atlas_file, expected_source_system='RAS'):
|
|
170
|
+
# Read NIFTI images with desired coordinate system
|
|
171
|
+
from nibabel.orientations import aff2axcodes, axcodes2ornt, apply_orientation, ornt_transform
|
|
172
|
+
import nibabel as nib
|
|
173
|
+
im = nib.load(atlas_file)
|
|
174
|
+
orig_orient = nib.io_orientation(im.affine)
|
|
175
|
+
code_direction = (('L', 'R'), ('P', 'A'), ('I', 'S'))
|
|
176
|
+
source_system = ''.join(list(aff2axcodes(im.affine, code_direction)))
|
|
177
|
+
if source_system != expected_source_system:
|
|
178
|
+
print('converted to RAS')
|
|
179
|
+
target_orient = axcodes2ornt('RAS', code_direction)
|
|
180
|
+
transform = ornt_transform(orig_orient, target_orient)
|
|
181
|
+
im = im.as_reoriented(transform)
|
|
182
|
+
|
|
183
|
+
return im
|
|
184
|
+
|
|
185
|
+
###################### Read SITK image as nib ######################
|
|
186
|
+
def read_sitk_as_nib(sitk_im):
|
|
187
|
+
return nib.Nifti1Image(sitk.GetArrayFromImage(sitk_im).transpose(),
|
|
188
|
+
make_affine(sitk_im), None)
|
|
189
|
+
|
|
190
|
+
###################### Convert NIBABEL image to SITK ######################
|
|
191
|
+
def read_nib_as_sitk(image_nib, dtype=None):
|
|
192
|
+
# From https://github.com/gift-surg/PySiTK/blob/master/pysitk/simple_itk_helper.py
|
|
193
|
+
if dtype is None:
|
|
194
|
+
dtype = np.float32#image_nib.header["bitpix"].dtype
|
|
195
|
+
nda_nib = image_nib.get_fdata().astype(dtype)
|
|
196
|
+
nda_nib_shape = nda_nib.shape
|
|
197
|
+
nda = np.zeros((nda_nib_shape[2],
|
|
198
|
+
nda_nib_shape[1],
|
|
199
|
+
nda_nib_shape[0]),
|
|
200
|
+
dtype=dtype)
|
|
201
|
+
|
|
202
|
+
# Convert to (Simple)ITK data array format, i.e. reorder to
|
|
203
|
+
# z-y-x-components shape
|
|
204
|
+
for i in range(0, nda_nib_shape[2]):
|
|
205
|
+
for k in range(0, nda_nib_shape[0]):
|
|
206
|
+
nda[i, :, k] = nda_nib[k, :, i]
|
|
207
|
+
# Get SimpleITK image
|
|
208
|
+
vector_image_sitk = sitk.GetImageFromArray(nda)
|
|
209
|
+
# Update header from nibabel information
|
|
210
|
+
# (may introduce some header inaccuracies?)
|
|
211
|
+
R = np.array([
|
|
212
|
+
[-1, 0, 0],
|
|
213
|
+
[0, -1, 0],
|
|
214
|
+
[0, 0, 1]])
|
|
215
|
+
affine_nib = image_nib.affine.astype(np.float64)
|
|
216
|
+
R_nib = affine_nib[0:-1, 0:-1]
|
|
217
|
+
|
|
218
|
+
spacing_sitk = np.array(image_nib.header.get_zooms(), dtype=np.float64)
|
|
219
|
+
spacing_sitk = spacing_sitk[0:R_nib.shape[0]]
|
|
220
|
+
S_nib_inv = np.diag(1. / spacing_sitk)
|
|
221
|
+
|
|
222
|
+
direction_sitk = R.dot(R_nib).dot(S_nib_inv).flatten()
|
|
223
|
+
|
|
224
|
+
t_nib = affine_nib[0:-1, 3]
|
|
225
|
+
origin_sitk = R.dot(t_nib)
|
|
226
|
+
|
|
227
|
+
vector_image_sitk.SetSpacing(np.array(spacing_sitk).astype('double'))
|
|
228
|
+
vector_image_sitk.SetDirection(direction_sitk)
|
|
229
|
+
vector_image_sitk.SetOrigin(origin_sitk)
|
|
230
|
+
return vector_image_sitk
|
|
231
|
+
|
|
232
|
+
###################### Save modified file to NIFTI ######################
|
|
233
|
+
def save_modified_nifti(reader, source, filename):
|
|
234
|
+
"""
|
|
235
|
+
Args:
|
|
236
|
+
reader: a reader with image information
|
|
237
|
+
source: folder to write new image
|
|
238
|
+
filename: new file name
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
|
|
242
|
+
"""
|
|
243
|
+
print ('not implemented yet! save_modified')
|
|
244
|
+
import shutil
|
|
245
|
+
|
|
246
|
+
_imRotate = sitk.GetImageFromArray(reader.npImage)
|
|
247
|
+
for key in reader.im.GetMetaDataKeys():
|
|
248
|
+
_imRotate.SetMetaData(key, reader.im.GetMetaData(key))
|
|
249
|
+
_imRotate.SetSpacing(reader.ImSpacing)
|
|
250
|
+
_imRotate.SetOrigin(reader.ImOrigin)
|
|
251
|
+
_imRotate.SetDirection(reader.im.GetDirection())
|
|
252
|
+
im = sitk.Flip(_imRotate, [False, True, False])
|
|
253
|
+
sitk.WriteImage(im, '_tmp.nii.gz')
|
|
254
|
+
try:
|
|
255
|
+
fl, ext = os.path.splitext(filename)
|
|
256
|
+
if ext == '.gz':
|
|
257
|
+
fl, ext = os.path.splitext(fl)
|
|
258
|
+
|
|
259
|
+
eco_im = nib.load('_tmp.nii.gz')
|
|
260
|
+
d = eco_im.get_fdata()
|
|
261
|
+
newd = d.astype(np.uint8)
|
|
262
|
+
new_eco_im = nib.Nifti1Image(newd, eco_im.affine, header=eco_im.header)
|
|
263
|
+
path_file = os.path.join(source, fl+ '_modified_.nii.gz')
|
|
264
|
+
shutil.copy(os.path.join(source, fl+'.json'), os.path.join(source, fl+'_modified_.json'))
|
|
265
|
+
new_eco_im.to_filename(path_file)
|
|
266
|
+
except Exception as e:
|
|
267
|
+
print('Error Saving File')
|
|
268
|
+
print(e)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
###################### Write VTK image as dicom ######################
|
|
273
|
+
def save_as_dicom(VtkImage, meta_data, pathfile):
|
|
274
|
+
"""
|
|
275
|
+
Write VTK Image to dicom image
|
|
276
|
+
Args:
|
|
277
|
+
VtkImage: vtk image data
|
|
278
|
+
meta_data: meta data information
|
|
279
|
+
pathfile: output path
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
|
|
283
|
+
"""
|
|
284
|
+
save_as_nifti(VtkImage, meta_data, '.temp/tmp_1')
|
|
285
|
+
nifti_itk = sitk.ReadImage('.temp/tmp_1.nii.gz')
|
|
286
|
+
castFilter = sitk.CastImageFilter()
|
|
287
|
+
castFilter.SetOutputPixelType(sitk.sitkUInt16)
|
|
288
|
+
|
|
289
|
+
# Convert floating type image (imgSmooth) to int type (imgFiltered)
|
|
290
|
+
nifti_itk = castFilter.Execute(nifti_itk)
|
|
291
|
+
sitk.WriteImage(nifti_itk, pathfile+'.dcm')
|
|
292
|
+
###################### save vtk as nrrd ######################
|
|
293
|
+
def save_as_nrrd(VtkImage, meta_data, pathfile):
|
|
294
|
+
"""
|
|
295
|
+
Write vtk image to nrrd file
|
|
296
|
+
Args:
|
|
297
|
+
VtkImage:
|
|
298
|
+
meta_data:
|
|
299
|
+
pathfile:
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
|
|
303
|
+
"""
|
|
304
|
+
save_as_nifti(VtkImage, meta_data, '.temp/tmp_1')
|
|
305
|
+
nifti_itk = sitk.ReadImage('.temp/tmp_1.nii.gz')
|
|
306
|
+
sitk.WriteImage(nifti_itk, pathfile+'.nrrd')
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
def resample_to_size(im, new_size, scale_factor=None,method='linear'):
|
|
311
|
+
original_image = read_nib_as_sitk(im)
|
|
312
|
+
# Get the current size of the image
|
|
313
|
+
size = original_image.GetSize()
|
|
314
|
+
|
|
315
|
+
# Calculate the scale factor for resizing
|
|
316
|
+
if scale_factor is None:
|
|
317
|
+
scale_factor = [(float(sz)/new_sz)*spc for sz, new_sz, spc in zip(size, new_size, original_image.GetSpacing())]
|
|
318
|
+
|
|
319
|
+
# Resample the image using the scale factor
|
|
320
|
+
resampler = sitk.ResampleImageFilter()
|
|
321
|
+
resampler.SetReferenceImage(original_image)
|
|
322
|
+
resampler.SetOutputSpacing(scale_factor)
|
|
323
|
+
resampler.SetSize([int(el) for el in new_size])
|
|
324
|
+
|
|
325
|
+
resampler.SetOutputOrigin(original_image.GetOrigin())
|
|
326
|
+
resampler.SetOutputDirection(original_image.GetDirection())
|
|
327
|
+
|
|
328
|
+
if method.lower() == 'linear':
|
|
329
|
+
resampler.SetInterpolator(sitk.sitkLinear) # You can choose different interpolators
|
|
330
|
+
else:
|
|
331
|
+
resampler.SetInterpolator(sitk.sitkBSpline) # You can choose different interpolators
|
|
332
|
+
# Perform resampling
|
|
333
|
+
resized_image = resampler.Execute(original_image)
|
|
334
|
+
#inverse_scale_factor = [s1/float(sz) for sz, s1 in zip(*[resized_image.GetSize(), size])]
|
|
335
|
+
return read_sitk_as_nib(resized_image)#, inverse_scale_factor, list(size)
|
|
336
|
+
|
|
337
|
+
def extract_patches(image, patch_size, overlap):
|
|
338
|
+
|
|
339
|
+
patches = []
|
|
340
|
+
strides = [p-o for p, o in zip(patch_size, overlap)]
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
range_x = image.shape[0] + 1
|
|
344
|
+
range_y = image.shape[1] + 1
|
|
345
|
+
range_z = image.shape[2] + 1
|
|
346
|
+
|
|
347
|
+
for x in range(0, range_x, strides[0]):
|
|
348
|
+
for y in range(0, range_y, strides[1]):
|
|
349
|
+
for z in range(0, range_z, strides[2]):
|
|
350
|
+
patch = image[x:x+patch_size[0], y:y+patch_size[1], z:z+patch_size[2]]
|
|
351
|
+
patches.append(patch)
|
|
352
|
+
return patches
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def reconstruct_image(patches, original_size, overlap, patch_size=None):
|
|
356
|
+
reconstructed_image = np.zeros(original_size)
|
|
357
|
+
if patch_size is None:
|
|
358
|
+
patch_size = patches[0].shape
|
|
359
|
+
strides = [p-o for p, o in zip(patch_size, overlap)]
|
|
360
|
+
counts = np.zeros(original_size, dtype=np.int32)
|
|
361
|
+
|
|
362
|
+
idx = 0
|
|
363
|
+
for x in range(0, original_size[0] + 1, strides[0]):
|
|
364
|
+
for y in range(0, original_size[1] + 1, strides[1]):
|
|
365
|
+
for z in range(0, original_size[2] + 1, strides[2]):
|
|
366
|
+
reconstructed_image[x:x + patch_size[0], y:y + patch_size[1], z:z + patch_size[2]] += patches[idx]
|
|
367
|
+
counts[x:x + patch_size[0], y:y + patch_size[1], z:z + patch_size[2]] += 1
|
|
368
|
+
idx += 1
|
|
369
|
+
|
|
370
|
+
# Average the overlapping regions
|
|
371
|
+
reconstructed_image /= counts.astype(float)
|
|
372
|
+
|
|
373
|
+
return reconstructed_image
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
###################### Resample images to desired spacing ######################
|
|
377
|
+
def resample_to_spacing(im, newSpacing, method='spline'):
|
|
378
|
+
try:
|
|
379
|
+
original_image = read_nib_as_sitk(im)
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
# Define the new spacing (voxel size) for resampling
|
|
383
|
+
new_spacing = (newSpacing, newSpacing, newSpacing)
|
|
384
|
+
|
|
385
|
+
# Calculate the new size based on the original size and spacing
|
|
386
|
+
new_size = [int(sz * spc / new_spc + 0.5) for sz, spc, new_spc in
|
|
387
|
+
zip(original_image.GetSize(), original_image.GetSpacing(), new_spacing)]
|
|
388
|
+
|
|
389
|
+
# Set up the resampling filter
|
|
390
|
+
resampler = sitk.ResampleImageFilter()
|
|
391
|
+
resampler.SetSize(new_size)
|
|
392
|
+
resampler.SetOutputSpacing(new_spacing)
|
|
393
|
+
resampler.SetOutputOrigin(original_image.GetOrigin())
|
|
394
|
+
resampler.SetOutputDirection(original_image.GetDirection())
|
|
395
|
+
if method.lower()=='linear':
|
|
396
|
+
resampler.SetInterpolator(sitk.sitkLinear) # You can choose different interpolators
|
|
397
|
+
else:
|
|
398
|
+
resampler.SetInterpolator(sitk.sitkBSpline) # You can choose different interpolators
|
|
399
|
+
|
|
400
|
+
# Perform resampling
|
|
401
|
+
resampled_image = resampler.Execute(original_image)
|
|
402
|
+
|
|
403
|
+
return read_sitk_as_nib(resampled_image)
|
|
404
|
+
except:
|
|
405
|
+
from nibabel.processing import resample_to_output
|
|
406
|
+
return resample_to_output(im, [newSpacing, newSpacing, newSpacing])
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
###################### Help dialogue to open new image ######################
|
|
410
|
+
def help_dialogue_open_image(path):
|
|
411
|
+
try:
|
|
412
|
+
im = nib.load(path).dataobj
|
|
413
|
+
except:
|
|
414
|
+
from pydicom.filereader import dcmread
|
|
415
|
+
im = dcmread(path).pixel_array
|
|
416
|
+
if len(im.shape)==4:
|
|
417
|
+
d = im[...,0]
|
|
418
|
+
s1 = d[im.shape[0] // 2, :, :]
|
|
419
|
+
s2 = d[:, im.shape[1] // 2, :]
|
|
420
|
+
s3 = d[:, :, im.shape[2] // 2]
|
|
421
|
+
elif len(im.shape)==3:
|
|
422
|
+
d = im
|
|
423
|
+
s1 = d[im.shape[0] // 2, :, :]
|
|
424
|
+
s2 = d[:, im.shape[1] // 2, :]
|
|
425
|
+
s3 = d[:, :, im.shape[2] // 2]
|
|
426
|
+
elif len(im.shape)==2:
|
|
427
|
+
d = im
|
|
428
|
+
s1 = d
|
|
429
|
+
s2 = s1
|
|
430
|
+
s3 = s1
|
|
431
|
+
else:
|
|
432
|
+
return
|
|
433
|
+
|
|
434
|
+
size = 200
|
|
435
|
+
s1 = image_resize_skimage(s1, [size, size])[::-1]
|
|
436
|
+
s2 = image_resize_skimage(s2, [size, size])
|
|
437
|
+
s3 = image_resize_skimage(s3, [size, size])
|
|
438
|
+
s0 = np.zeros((size * 2, size * 2))
|
|
439
|
+
s0[:size, :size] = s1
|
|
440
|
+
s0[:size, size:] = s2
|
|
441
|
+
s0[size:, size - size // 2:size + size // 2] = s3
|
|
442
|
+
s = image_rotate_skimage(s0, 90)
|
|
443
|
+
s = s / s.max()
|
|
444
|
+
# pixmap = QPixmap(fileName+'.jpg')
|
|
445
|
+
s *= 255
|
|
446
|
+
s = np.expand_dims(s, -1)
|
|
447
|
+
s = s.astype(np.uint8)
|
|
448
|
+
return s
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
def calculate_snr(image_array):
|
|
453
|
+
from scipy.ndimage import gaussian_filter
|
|
454
|
+
# Calculate mean signal intensity
|
|
455
|
+
mean_signal = np.mean(image_array)
|
|
456
|
+
|
|
457
|
+
# Calculate standard deviation of the noise
|
|
458
|
+
# For simplicity, consider the noise as the difference between the original image and a smoothed version
|
|
459
|
+
|
|
460
|
+
smoothed_image = gaussian_filter(image_array, sigma=2.0)
|
|
461
|
+
noise_array = np.abs(image_array - (smoothed_image))
|
|
462
|
+
std_noise = np.std(noise_array)
|
|
463
|
+
|
|
464
|
+
# Calculate SNR
|
|
465
|
+
snr = mean_signal / std_noise
|
|
466
|
+
|
|
467
|
+
return snr
|
|
468
|
+
|
|
469
|
+
###################### A class to resize image######################
|
|
470
|
+
|
|
471
|
+
class resize_window(QtWidgets.QDialog):
|
|
472
|
+
from PyQt5.QtCore import pyqtSignal
|
|
473
|
+
closeSig = pyqtSignal()
|
|
474
|
+
resizeim = pyqtSignal(object)
|
|
475
|
+
comboboxCh = pyqtSignal(object, object)
|
|
476
|
+
"""
|
|
477
|
+
A dialog for combo box created for reading 4d images
|
|
478
|
+
"""
|
|
479
|
+
def __init__(self, parent=None, use_combobox=False):
|
|
480
|
+
QtWidgets.QDialog.__init__(self, parent)
|
|
481
|
+
self.setWindowTitle("Child Window!")
|
|
482
|
+
Dialog = self.window()
|
|
483
|
+
self.use_combobox = use_combobox
|
|
484
|
+
self.setupUi(Dialog)
|
|
485
|
+
self.check_box.setCheckState(Qt.Checked)
|
|
486
|
+
self._status = False
|
|
487
|
+
|
|
488
|
+
def setupUi(self, Dialog):
|
|
489
|
+
|
|
490
|
+
Dialog.setObjectName("Dialog")
|
|
491
|
+
Dialog.resize(500, 112)
|
|
492
|
+
self.grid_main = QtWidgets.QGridLayout(self)
|
|
493
|
+
self.grid_main.setContentsMargins(10,10,10,10)
|
|
494
|
+
self.grid_main.setObjectName("gridLayout")
|
|
495
|
+
|
|
496
|
+
self.hbox_0 = QtWidgets.QHBoxLayout()
|
|
497
|
+
self.label_warning = QtWidgets.QLabel()
|
|
498
|
+
self.label_warning.setText('The pixels are not isotropic. Do you want to resize image (isotropic)?')
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
self.hbox_0.addWidget(self.label_warning)
|
|
502
|
+
|
|
503
|
+
_translate = QtCore.QCoreApplication.translate
|
|
504
|
+
self.pushbutton = QtWidgets.QDialogButtonBox()
|
|
505
|
+
self.pushbutton_cancel = QtWidgets.QPushButton()
|
|
506
|
+
self.pushbutton.setStandardButtons(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
|
|
507
|
+
self.pushbutton.accepted.connect(self.accept_it)
|
|
508
|
+
self.pushbutton.rejected.connect(self.reject_it)
|
|
509
|
+
|
|
510
|
+
#self.hbox_0.addWidget(self.pushbutton)
|
|
511
|
+
|
|
512
|
+
if self.use_combobox:
|
|
513
|
+
self.comboBox_image = QtWidgets.QComboBox()
|
|
514
|
+
self.comboBox_image.setObjectName("comboBox_image")
|
|
515
|
+
self.comboBox_image.addItem("")
|
|
516
|
+
self.comboBox_image.addItem("")
|
|
517
|
+
self.comboBox_image.currentIndexChanged.connect(self.comboBOX_changed)
|
|
518
|
+
#
|
|
519
|
+
self.hbox_0.addWidget(self.comboBox_image)
|
|
520
|
+
self.grid_main.addLayout(self.hbox_0, 0, 0, 1, 1)
|
|
521
|
+
else:
|
|
522
|
+
self.grid_main.addLayout(self.hbox_0, 0, 0, 1, 1)
|
|
523
|
+
|
|
524
|
+
self.grid_main.addWidget(self.pushbutton, 3, 0, 1, 1)
|
|
525
|
+
self.check_box = QtWidgets.QCheckBox()
|
|
526
|
+
|
|
527
|
+
self.check_box.stateChanged.connect(self.changeAllSpacing)
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
self.label_current_spc0 = QtWidgets.QLabel()
|
|
531
|
+
self.label_current_spc0.setText('Current Spacing')
|
|
532
|
+
|
|
533
|
+
self.label_current_spc = QtWidgets.QLabel()
|
|
534
|
+
#self.label_current_spc.setReadOnly(True)
|
|
535
|
+
self.label_current_spc.setText('0,0,0')
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
self.radioButton_1 = QtWidgets.QCheckBox()
|
|
539
|
+
|
|
540
|
+
self.radioButton_1.setObjectName("radioButton_1")
|
|
541
|
+
self.radioButton_1.setChecked(False)
|
|
542
|
+
|
|
543
|
+
self.hbox = QtWidgets.QHBoxLayout()
|
|
544
|
+
self.hbox.addWidget(self.check_box)
|
|
545
|
+
self.hbox.addWidget(self.radioButton_1)
|
|
546
|
+
self.hbox.addWidget(self.label_current_spc0)
|
|
547
|
+
self.hbox.addWidget(self.label_current_spc)
|
|
548
|
+
|
|
549
|
+
|
|
550
|
+
self.hbox2 = QtWidgets.QHBoxLayout()
|
|
551
|
+
self.label_new_spc0 = QtWidgets.QLabel()
|
|
552
|
+
self.label_new_spc0.setText('New Spacing')
|
|
553
|
+
self.label_new_spc0.setStyleSheet('color: Red')
|
|
554
|
+
|
|
555
|
+
self.label_new_spc = QtWidgets.QDoubleSpinBox(self)
|
|
556
|
+
self.label_new_spc_y = QtWidgets.QDoubleSpinBox(self)
|
|
557
|
+
self.label_new_spc_z = QtWidgets.QDoubleSpinBox(self)
|
|
558
|
+
|
|
559
|
+
for spin_box in (self.label_new_spc, self.label_new_spc_y, self.label_new_spc_z):
|
|
560
|
+
spin_box.setMinimum(0.01)
|
|
561
|
+
spin_box.setMaximum(20)
|
|
562
|
+
spin_box.setSingleStep(0.1)
|
|
563
|
+
spin_box.setValue(1.0)
|
|
564
|
+
spin_box.setDecimals(2)
|
|
565
|
+
|
|
566
|
+
self.hbox2.addWidget(self.label_new_spc0)
|
|
567
|
+
self.hbox2.addWidget(self.label_new_spc)
|
|
568
|
+
self.hbox2.addWidget(self.label_new_spc_y)
|
|
569
|
+
self.hbox2.addWidget(self.label_new_spc_z)
|
|
570
|
+
|
|
571
|
+
|
|
572
|
+
#self.pushbutton.setText(_translate("Dialog", "OK"))
|
|
573
|
+
#self.grid_main.addWidget(self.label_warning, 0,0,1,1)
|
|
574
|
+
self.grid_main.addLayout(self.hbox, 1,0,1,1)
|
|
575
|
+
self.grid_main.addLayout(self.hbox2,2,0,1,1)
|
|
576
|
+
|
|
577
|
+
#self.hbox3 = QtWidgets.QHBoxLayout()
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
#self.grid_main.addLayout(self.hbox_0, 3, 0, 1, 1)
|
|
581
|
+
|
|
582
|
+
|
|
583
|
+
self.retranslateUi(Dialog)
|
|
584
|
+
|
|
585
|
+
QtCore.QMetaObject.connectSlotsByName(Dialog)
|
|
586
|
+
|
|
587
|
+
def changeAllSpacing(self, state):
|
|
588
|
+
if state == QtCore.Qt.Checked:
|
|
589
|
+
# Connect the change signal to synchronize function
|
|
590
|
+
self.label_new_spc.valueChanged.connect(self.synchronizeSpacings)
|
|
591
|
+
self.label_new_spc_y.valueChanged.connect(self.synchronizeSpacings)
|
|
592
|
+
self.label_new_spc_z.valueChanged.connect(self.synchronizeSpacings)
|
|
593
|
+
else:
|
|
594
|
+
# Disconnect the synchronize function to allow independent changes
|
|
595
|
+
self.label_new_spc.valueChanged.disconnect(self.synchronizeSpacings)
|
|
596
|
+
self.label_new_spc_y.valueChanged.disconnect(self.synchronizeSpacings)
|
|
597
|
+
self.label_new_spc_z.valueChanged.disconnect(self.synchronizeSpacings)
|
|
598
|
+
|
|
599
|
+
def synchronizeSpacings(self, value):
|
|
600
|
+
# Set the value of all spin boxes to the changed value
|
|
601
|
+
self.label_new_spc.blockSignals(True)
|
|
602
|
+
self.label_new_spc_y.blockSignals(True)
|
|
603
|
+
self.label_new_spc_z.blockSignals(True)
|
|
604
|
+
self.label_new_spc.setValue(value)
|
|
605
|
+
self.label_new_spc_y.setValue(value)
|
|
606
|
+
self.label_new_spc_z.setValue(value)
|
|
607
|
+
self.label_new_spc.blockSignals(False)
|
|
608
|
+
self.label_new_spc_y.blockSignals(False)
|
|
609
|
+
self.label_new_spc_z.blockSignals(False)
|
|
610
|
+
|
|
611
|
+
def comboBOX_changed(self):
|
|
612
|
+
ind = self.comboBox_image.currentIndex()
|
|
613
|
+
self.comboboxCh.emit(None, ind)
|
|
614
|
+
|
|
615
|
+
def retranslateUi(self, Dialog):
|
|
616
|
+
_translate = QtCore.QCoreApplication.translate
|
|
617
|
+
Dialog.setWindowTitle(_translate("Dialog", "Resize..."))
|
|
618
|
+
|
|
619
|
+
_translate = QtCore.QCoreApplication.translate
|
|
620
|
+
self.radioButton_1.setText(_translate("Main", "Linear"))
|
|
621
|
+
self.check_box.setText(_translate("Main", "Isotropic"))
|
|
622
|
+
if self.use_combobox:
|
|
623
|
+
self.comboBox_image.setItemText(0, _translate("Form", " Top Image "))
|
|
624
|
+
self.comboBox_image.setItemText(1, _translate("Form", " Bottom Image "))
|
|
625
|
+
def closeEvent(self, a0: QtGui.QCloseEvent) -> None:
|
|
626
|
+
self.closeSig.emit()
|
|
627
|
+
super(resize_window, self).closeEvent(a0)
|
|
628
|
+
def accept_it(self):
|
|
629
|
+
self._status = True
|
|
630
|
+
if self.use_combobox:
|
|
631
|
+
index = self.comboBox_image.currentIndex()
|
|
632
|
+
self.resizeim.emit(index)
|
|
633
|
+
self.accept()
|
|
634
|
+
|
|
635
|
+
def reject_it(self):
|
|
636
|
+
self._status = False
|
|
637
|
+
self.reject()
|
|
638
|
+
|
|
639
|
+
########### COMBO BOX To read 4D images #####################
|
|
640
|
+
class ComboBox_Dialog(QtWidgets.QDialog):
|
|
641
|
+
"""
|
|
642
|
+
A dialog for combo box created for reading 4d images
|
|
643
|
+
"""
|
|
644
|
+
selectedInd = QtCore.pyqtSignal(object)
|
|
645
|
+
def __init__(self, parent=None):
|
|
646
|
+
QtWidgets.QDialog.__init__(self, parent)
|
|
647
|
+
self.setWindowTitle("Child Window!")
|
|
648
|
+
Dialog = self.window()
|
|
649
|
+
self.setupUi(Dialog)
|
|
650
|
+
|
|
651
|
+
def accepted_emit(self):
|
|
652
|
+
ind = self.comboBox.currentIndex()
|
|
653
|
+
self.selectedInd = ind
|
|
654
|
+
self.accept()
|
|
655
|
+
def reject_emit(self):
|
|
656
|
+
self.selectedInd = None
|
|
657
|
+
self.reject()
|
|
658
|
+
def setupUi(self, Dialog):
|
|
659
|
+
Dialog.setObjectName("Dialog")
|
|
660
|
+
Dialog.resize(500, 112)
|
|
661
|
+
self.splitter = QtWidgets.QSplitter(Dialog)
|
|
662
|
+
self.splitter.setGeometry(QtCore.QRect(10, 10, 480, 91))
|
|
663
|
+
self.splitter.setOrientation(QtCore.Qt.Vertical)
|
|
664
|
+
self.splitter.setObjectName("splitter")
|
|
665
|
+
self.comboBox = QtWidgets.QComboBox(self.splitter)
|
|
666
|
+
self.comboBox.setObjectName("comboBox")
|
|
667
|
+
cbstyle = """
|
|
668
|
+
QComboBox QAbstractItemView {border: 1px solid grey;
|
|
669
|
+
background: #03211c;
|
|
670
|
+
selection-background-color: #03211c;}
|
|
671
|
+
QComboBox {background: #03211c;margin-right: 1px;}
|
|
672
|
+
QComboBox::drop-down {
|
|
673
|
+
subcontrol-origin: margin;}
|
|
674
|
+
"""
|
|
675
|
+
self.comboBox.setStyleSheet(cbstyle)
|
|
676
|
+
self.buttonBox = QtWidgets.QDialogButtonBox(self.splitter)
|
|
677
|
+
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
|
|
678
|
+
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
|
|
679
|
+
self.buttonBox.setObjectName("buttonBox")
|
|
680
|
+
|
|
681
|
+
self.retranslateUi(Dialog)
|
|
682
|
+
self.buttonBox.accepted.connect(self.accepted_emit)
|
|
683
|
+
self.buttonBox.rejected.connect(self.reject_emit)
|
|
684
|
+
QtCore.QMetaObject.connectSlotsByName(Dialog)
|
|
685
|
+
|
|
686
|
+
def retranslateUi(self, Dialog):
|
|
687
|
+
_translate = QtCore.QCoreApplication.translate
|
|
688
|
+
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
|
|
689
|
+
|
|
690
|
+
########### normalizing DWI images #####################
|
|
691
|
+
def norm_dti(vec):
|
|
692
|
+
normc = np.linalg.norm(vec)
|
|
693
|
+
if normc>1e-4:
|
|
694
|
+
vec/= normc
|
|
695
|
+
return vec
|
|
696
|
+
|
|
697
|
+
|
|
698
|
+
########### combo box to read images #####################
|
|
699
|
+
def create_combo_box_new(seridsc_total, sizes):
|
|
700
|
+
"""
|
|
701
|
+
|
|
702
|
+
:param seridsc_total:
|
|
703
|
+
:param sizes:
|
|
704
|
+
:return:
|
|
705
|
+
"""
|
|
706
|
+
combo = ComboBox_Dialog()
|
|
707
|
+
r = 0
|
|
708
|
+
for seridsc, size in zip(seridsc_total, sizes):
|
|
709
|
+
combo.comboBox.addItem("{} {} shape: {}".format(seridsc, r, size))
|
|
710
|
+
r += 1
|
|
711
|
+
return combo
|
|
712
|
+
|
|
713
|
+
def show_message_box(text = 'There is no file to read'):
|
|
714
|
+
"""
|
|
715
|
+
Display a message box
|
|
716
|
+
Args:
|
|
717
|
+
text: the text content of a message box
|
|
718
|
+
Returns:
|
|
719
|
+
|
|
720
|
+
"""
|
|
721
|
+
MessageBox = QtWidgets.QMessageBox()
|
|
722
|
+
MessageBox.setText(text)
|
|
723
|
+
MessageBox.setWindowTitle('Warning')
|
|
724
|
+
MessageBox.show()
|
|
725
|
+
|
|
726
|
+
###################### change image system ####################
|
|
727
|
+
def convert_to_ras(affine, target = "RAS"):
|
|
728
|
+
"""
|
|
729
|
+
Args:
|
|
730
|
+
affine: affine matrix
|
|
731
|
+
target: target system
|
|
732
|
+
|
|
733
|
+
Returns:
|
|
734
|
+
|
|
735
|
+
"""
|
|
736
|
+
from nibabel.orientations import aff2axcodes, axcodes2ornt, ornt_transform
|
|
737
|
+
orig_orient = nib.io_orientation(affine)
|
|
738
|
+
source_system = ''.join(list(aff2axcodes(affine, code_direction)))# get direction
|
|
739
|
+
target_orient = axcodes2ornt(target, code_direction)
|
|
740
|
+
transform = ornt_transform(orig_orient, target_orient)
|
|
741
|
+
|
|
742
|
+
return transform, source_system
|
|
743
|
+
|
|
744
|
+
###################### identify current coordinate system ####################
|
|
745
|
+
def getCurrentCoordSystem(affine):
|
|
746
|
+
from nibabel.orientations import aff2axcodes
|
|
747
|
+
orig_orient = nib.io_orientation(affine)
|
|
748
|
+
source_system = ''.join(list(aff2axcodes(affine, code_direction)))# get direction
|
|
749
|
+
return source_system
|
|
750
|
+
|
|
751
|
+
|
|
752
|
+
def is_valid_format(file, type_f='nifti'):
|
|
753
|
+
#if type_f == 'nifti':
|
|
754
|
+
valid_exts = [".nia", ".nii", ".nii.gz", ".hdr", ".img", ".img.gz", ".mgz"]
|
|
755
|
+
status = any(file.endswith(ext) for ext in valid_exts)
|
|
756
|
+
if status:
|
|
757
|
+
return "nifti", True
|
|
758
|
+
valid_exts = [".nhdr", ".nrrd"]
|
|
759
|
+
status = any(file.endswith(ext) for ext in valid_exts)
|
|
760
|
+
if status:
|
|
761
|
+
return "nrrd", True
|
|
762
|
+
valid_exts = [".dcm"]
|
|
763
|
+
status = any(file.endswith(ext) for ext in valid_exts)
|
|
764
|
+
if status:
|
|
765
|
+
return "dicom", True
|
|
766
|
+
else:
|
|
767
|
+
return "none", False
|
|
768
|
+
|
|
769
|
+
|
|
770
|
+
|
|
771
|
+
|
|
772
|
+
def dicom_series_to_nib(dicom_file):
|
|
773
|
+
# Read the DICOM series
|
|
774
|
+
dicom_dir = os.path.dirname(dicom_file)
|
|
775
|
+
reader = sitk.ImageSeriesReader()
|
|
776
|
+
series_IDs = reader.GetGDCMSeriesIDs(dicom_dir)
|
|
777
|
+
|
|
778
|
+
if not series_IDs:
|
|
779
|
+
return None
|
|
780
|
+
|
|
781
|
+
series_file_names = reader.GetGDCMSeriesFileNames(dicom_dir, series_IDs[0])
|
|
782
|
+
reader.SetFileNames(series_file_names)
|
|
783
|
+
sitk_image = reader.Execute()
|
|
784
|
+
|
|
785
|
+
# Convert SimpleITK image to numpy array and affine
|
|
786
|
+
affine = make_affine(sitk_image) # shape: (slices, height, width)
|
|
787
|
+
|
|
788
|
+
nib_im = nib.Nifti1Image(sitk.GetArrayFromImage(sitk_image).transpose(), affine)
|
|
789
|
+
return nib_im
|
|
790
|
+
|
|
791
|
+
###################### Read Segmentation file ##################
|
|
792
|
+
def read_segmentation_file(self, file, reader, update_color_s=True):
|
|
793
|
+
"""
|
|
794
|
+
Read the segmentation files
|
|
795
|
+
Args:
|
|
796
|
+
self:
|
|
797
|
+
file:
|
|
798
|
+
reader:
|
|
799
|
+
type:
|
|
800
|
+
|
|
801
|
+
Returns: image data and state
|
|
802
|
+
|
|
803
|
+
"""
|
|
804
|
+
#from nibabel.orientations import apply_orientation
|
|
805
|
+
type_found, val_stat = is_valid_format(file, type_f='nifti')
|
|
806
|
+
if not val_stat:
|
|
807
|
+
return 0, False, True
|
|
808
|
+
if type_found=='nifti':
|
|
809
|
+
im = nib.load(file) # read imag
|
|
810
|
+
affine = im.affine
|
|
811
|
+
elif type_found=='nrrd':
|
|
812
|
+
import nrrd
|
|
813
|
+
data, header = nrrd.read(file)
|
|
814
|
+
|
|
815
|
+
img = sitk.ReadImage(file)
|
|
816
|
+
if 'space directions' in header and 'space origin' in header:
|
|
817
|
+
directions = np.array(header['space directions'])
|
|
818
|
+
origin = np.array(header['space origin'])
|
|
819
|
+
affine = np.eye(4)
|
|
820
|
+
affine[:3, :3] = directions
|
|
821
|
+
affine[:3, 3] = origin
|
|
822
|
+
else:
|
|
823
|
+
affine = np.eye(4) # fallback
|
|
824
|
+
im = nib.Nifti1Image(data, affine)
|
|
825
|
+
elif type_found=='dicom':
|
|
826
|
+
im = dicom_series_to_nib(file)
|
|
827
|
+
|
|
828
|
+
affine = im.affine
|
|
829
|
+
if im is None:
|
|
830
|
+
return 0, False, True
|
|
831
|
+
|
|
832
|
+
"""
|
|
833
|
+
|
|
834
|
+
if abs(reader.affine-affine).max()>0.01:
|
|
835
|
+
from nibabel.processing import resample_from_to
|
|
836
|
+
from scipy.ndimage import map_coordinates
|
|
837
|
+
|
|
838
|
+
img_affine = np.round(reader.im.affine,1)
|
|
839
|
+
img_shape = reader.im.shape
|
|
840
|
+
|
|
841
|
+
seg_data = im.get_fdata()
|
|
842
|
+
seg_affine = np.round(affine,1)
|
|
843
|
+
|
|
844
|
+
coords = np.meshgrid(
|
|
845
|
+
np.arange(img_shape[0]),
|
|
846
|
+
np.arange(img_shape[1]),
|
|
847
|
+
np.arange(img_shape[2]),
|
|
848
|
+
indexing='ij'
|
|
849
|
+
)
|
|
850
|
+
coords = np.vstack([c.reshape(-1) for c in coords]) # shape (3, N)
|
|
851
|
+
|
|
852
|
+
# Convert image voxel coords -> world coords
|
|
853
|
+
world_coords = img_affine[:3, :3] @ coords + img_affine[:3, 3:4]
|
|
854
|
+
|
|
855
|
+
world_coords2 = seg_affine[:3, :3] @ coords + seg_affine[:3, 3:4]
|
|
856
|
+
|
|
857
|
+
# Convert world coords -> segmentation voxel coords
|
|
858
|
+
seg_vox_coords = np.linalg.inv(seg_affine[:3, :3]) @ (world_coords - seg_affine[:3, 3:4])
|
|
859
|
+
|
|
860
|
+
# Resample segmentation using nearest neighbor
|
|
861
|
+
seg_resampled_flat = map_coordinates(seg_data.astype('float'), seg_vox_coords, order=0, mode='nearest')
|
|
862
|
+
seg_resampled = seg_resampled_flat.reshape(img_shape)
|
|
863
|
+
im = nib.Nifti1Image(seg_resampled, reader.im.affine)
|
|
864
|
+
"""
|
|
865
|
+
im.get_data_dtype()
|
|
866
|
+
if im.ndim == 4:
|
|
867
|
+
from nibabel.funcs import four_to_three
|
|
868
|
+
im = four_to_three(im)[0] # select the first image
|
|
869
|
+
|
|
870
|
+
transform, _ = convert_to_ras(im.affine, target=reader.target_system)
|
|
871
|
+
im = im.as_reoriented(transform)
|
|
872
|
+
|
|
873
|
+
data = im.get_fdata()
|
|
874
|
+
data = data.transpose(2, 1, 0)[::-1, ::-1, ::-1] # new march 17, 2025 #BJ
|
|
875
|
+
if not np.issubdtype(data.dtype, np.integer):
|
|
876
|
+
data = np.round(data).astype('int')
|
|
877
|
+
if not all([i == j for i, j in zip(reader.npImage.shape, data.shape)]):
|
|
878
|
+
return data, True, False
|
|
879
|
+
data_add = None
|
|
880
|
+
if rhasattr(self,'readImECO.npSeg'):
|
|
881
|
+
if reader != self.readImECO:
|
|
882
|
+
data_add = self.readImECO.npSeg
|
|
883
|
+
if rhasattr(self, 'readImMRI.npSeg'):
|
|
884
|
+
if reader != self.readImMRI:
|
|
885
|
+
data_add = self.readImMRI.npSeg
|
|
886
|
+
uq = np.unique(data)
|
|
887
|
+
if uq.shape[0]>255:
|
|
888
|
+
if data.max()>80:
|
|
889
|
+
ind_g = data>50
|
|
890
|
+
data[ind_g] = 1
|
|
891
|
+
data[~ind_g]=0
|
|
892
|
+
elif data.max()<1:
|
|
893
|
+
ind_g = data ==0
|
|
894
|
+
data[~ind_g] = 1
|
|
895
|
+
#if len(uq)==2:
|
|
896
|
+
# ind = data==0
|
|
897
|
+
# data[~ind]=1
|
|
898
|
+
if update_color_s:
|
|
899
|
+
data, state = update_color_scheme(self, data, data_add=data_add)
|
|
900
|
+
|
|
901
|
+
return data, state, True
|
|
902
|
+
else:
|
|
903
|
+
return data,True, True
|
|
904
|
+
|
|
905
|
+
###################### Manually check items in color tree ##################
|
|
906
|
+
def manually_check_tree_item(self, txt='9876'):
|
|
907
|
+
"""
|
|
908
|
+
Put items checked manual according to the input
|
|
909
|
+
Args:
|
|
910
|
+
self: self
|
|
911
|
+
txt: item id
|
|
912
|
+
|
|
913
|
+
Returns:
|
|
914
|
+
|
|
915
|
+
"""
|
|
916
|
+
root = self.tree_colors.model().sourceModel().invisibleRootItem()
|
|
917
|
+
num_rows = root.rowCount()
|
|
918
|
+
ls = [i for i in range(num_rows) if
|
|
919
|
+
root.child(i).text() == txt]
|
|
920
|
+
for l in ls:
|
|
921
|
+
root.child(l).setCheckState(Qt.Checked)
|
|
922
|
+
return ls
|
|
923
|
+
|
|
924
|
+
###################### Updating current color scheme ##################
|
|
925
|
+
def update_color_scheme(self, data, data_add=None, dialog=True, update_widget=True):
|
|
926
|
+
"""
|
|
927
|
+
Updating current color scheme
|
|
928
|
+
Args:
|
|
929
|
+
self:
|
|
930
|
+
data:
|
|
931
|
+
data_add:
|
|
932
|
+
dialog:
|
|
933
|
+
update_widget:
|
|
934
|
+
Returns:
|
|
935
|
+
|
|
936
|
+
"""
|
|
937
|
+
if data_add is not None:
|
|
938
|
+
uq1 = [l for l in np.unique(data_add) if l > 0]
|
|
939
|
+
else:
|
|
940
|
+
uq1 = []
|
|
941
|
+
if data is None:
|
|
942
|
+
data = np.array([0,1])
|
|
943
|
+
uq = [l for l in np.unique(data).astype('int') if l > 0]
|
|
944
|
+
if len(uq) > 255:
|
|
945
|
+
return data, False
|
|
946
|
+
uq = list(set(uq) | set(uq1))
|
|
947
|
+
list_dif = list(set(uq)- set(self.color_index_rgb[:, 0]))
|
|
948
|
+
if len(list_dif)<1:
|
|
949
|
+
return data, True
|
|
950
|
+
else:
|
|
951
|
+
if dialog:
|
|
952
|
+
from PyQt5.QtWidgets import QFileDialog
|
|
953
|
+
filters = "TXT(*.txt)"
|
|
954
|
+
opts = QFileDialog.DontUseNativeDialog
|
|
955
|
+
fileObj = QFileDialog.getOpenFileName(self, "Open COLOR File", self.source_dir, filters, options=opts)
|
|
956
|
+
filen = fileObj[0]
|
|
957
|
+
else:
|
|
958
|
+
filen = ''
|
|
959
|
+
from_one = False
|
|
960
|
+
|
|
961
|
+
|
|
962
|
+
if filen == '':
|
|
963
|
+
print('automatic files')
|
|
964
|
+
len_u = len(np.unique(data))
|
|
965
|
+
if len_u<=2:
|
|
966
|
+
filen = source_folder + '/color/Simple.txt'
|
|
967
|
+
elif len_u<=9:
|
|
968
|
+
filen = source_folder + '/color/Tissue.txt'
|
|
969
|
+
elif len_u <90:
|
|
970
|
+
filen = source_folder + '/color/albert_LUT.txt'
|
|
971
|
+
else:
|
|
972
|
+
filen = source_folder + '/color/mcrib_LUT.txt'
|
|
973
|
+
from_one = False
|
|
974
|
+
try:
|
|
975
|
+
possible_color_name, possible_color_index_rgb, _ = read_txt_color(filen, from_one=from_one, mode='albert')
|
|
976
|
+
except:
|
|
977
|
+
try:
|
|
978
|
+
possible_color_name, possible_color_index_rgb, _ = read_txt_color(filen, from_one=from_one)
|
|
979
|
+
except:
|
|
980
|
+
return data, False
|
|
981
|
+
#uq = np.unique(data)
|
|
982
|
+
|
|
983
|
+
set_not_in_new_list = set(uq) - (set(possible_color_index_rgb[:, 0].astype('int')))
|
|
984
|
+
set_kept_new_list = set_not_in_new_list - (set_not_in_new_list - set(self.color_index_rgb[:, 0].astype('int')))
|
|
985
|
+
set_create_new_list = set_not_in_new_list - set_kept_new_list
|
|
986
|
+
for element in list(set_kept_new_list):
|
|
987
|
+
new_color_rgb = self.color_index_rgb[self.color_index_rgb[:,0]==element,:]
|
|
988
|
+
possible_color_index_rgb = np.vstack((possible_color_index_rgb, new_color_rgb))
|
|
989
|
+
try:
|
|
990
|
+
new_colr_name = [l for l in self.color_name if l.split('_')[0]==str(element)][0]
|
|
991
|
+
except:
|
|
992
|
+
r, l = [[r, l] for r, l in enumerate(self.color_name) if l.split('_')[0] == str(float(element))][0]
|
|
993
|
+
l2 =str(int(float(l.split('_fre')[0]))) + '_' + '_'.join(l.split('_')[1:])
|
|
994
|
+
self.color_name[r] = l2
|
|
995
|
+
new_colr_name = [l for l in self.color_name if l.split('_')[0]==str(element)][0]
|
|
996
|
+
possible_color_name.append(new_colr_name)
|
|
997
|
+
|
|
998
|
+
for element in set_create_new_list:
|
|
999
|
+
new_colr_name = '{}_structure_unknown'.format(element)
|
|
1000
|
+
possible_color_name.append(new_colr_name)
|
|
1001
|
+
new_color_rgb = [element, np.random.rand(), np.random.rand(), np.random.rand(), 1]
|
|
1002
|
+
possible_color_index_rgb = np.vstack((possible_color_index_rgb, np.array(new_color_rgb)))
|
|
1003
|
+
if 9876 not in possible_color_index_rgb[:,0]:
|
|
1004
|
+
new_colr_name = '9876_Combined'
|
|
1005
|
+
new_color_rgb = [9876, 1, 0, 0, 1]
|
|
1006
|
+
possible_color_name.append(new_colr_name)
|
|
1007
|
+
possible_color_index_rgb = np.vstack((possible_color_index_rgb, np.array(new_color_rgb)))
|
|
1008
|
+
|
|
1009
|
+
#self.color_index_rgb, self.color_name, self.colorsCombinations = combinedIndex(self.colorsCombinations, possible_color_index_rgb, possible_color_name, np.unique(data), uq1)
|
|
1010
|
+
self.color_index_rgb, self.color_name, self.colorsCombinations = generate_color_scheme_info(possible_color_index_rgb, possible_color_name)
|
|
1011
|
+
try:
|
|
1012
|
+
#self.dw2_cb.currentTextChanged.disconnect(self.changeColorPen)
|
|
1013
|
+
self.tree_colors.itemChanged.disconnect(self.changeColorPen)
|
|
1014
|
+
except:
|
|
1015
|
+
pass
|
|
1016
|
+
|
|
1017
|
+
set_new_color_scheme(self)
|
|
1018
|
+
try:
|
|
1019
|
+
#self.dw2_cb.currentTextChanged.connect(self.changeColorPen)
|
|
1020
|
+
self.tree_colors.itemChanged.connect(self.changeColorPen)
|
|
1021
|
+
except:
|
|
1022
|
+
pass
|
|
1023
|
+
|
|
1024
|
+
if update_widget:
|
|
1025
|
+
update_widget_color_scheme(self)
|
|
1026
|
+
return data, True
|
|
1027
|
+
|
|
1028
|
+
|
|
1029
|
+
###################### Add ultimate color ##################
|
|
1030
|
+
def addLastColor(self, last_color):
|
|
1031
|
+
"""
|
|
1032
|
+
Add ultimate color if it does not exist
|
|
1033
|
+
Args:
|
|
1034
|
+
self:
|
|
1035
|
+
last_color:
|
|
1036
|
+
|
|
1037
|
+
Returns:
|
|
1038
|
+
|
|
1039
|
+
"""
|
|
1040
|
+
if last_color not in self.color_name:
|
|
1041
|
+
rm =int(float(last_color.split('_')[0]))
|
|
1042
|
+
self.color_name.append(last_color)
|
|
1043
|
+
self.colorsCombinations[rm] = [1, 0, 0, 1]
|
|
1044
|
+
if rm == 9876:
|
|
1045
|
+
clr = [rm, 1, 0, 0, 1]
|
|
1046
|
+
else:
|
|
1047
|
+
clr = [rm, np.random.rand(), np.random.rand(), np.random.rand(), 1]
|
|
1048
|
+
self.color_index_rgb = np.vstack((self.color_index_rgb, np.array(clr)))
|
|
1049
|
+
|
|
1050
|
+
###################### Add new tree widget ##################
|
|
1051
|
+
def add_new_tree_widget(self, newindex, newText, color_rgb):
|
|
1052
|
+
"""
|
|
1053
|
+
Adding new tree widget
|
|
1054
|
+
Args:
|
|
1055
|
+
self:
|
|
1056
|
+
newindex:
|
|
1057
|
+
newText:
|
|
1058
|
+
color_rgb:
|
|
1059
|
+
|
|
1060
|
+
Returns:
|
|
1061
|
+
|
|
1062
|
+
"""
|
|
1063
|
+
int_index = int(float(newindex))
|
|
1064
|
+
self.colorsCombinations[int_index] = color_rgb
|
|
1065
|
+
parent = self.tree_colors.model().sourceModel().invisibleRootItem()
|
|
1066
|
+
addTreeRoot(parent, newindex, newText, color_rgb)
|
|
1067
|
+
new_color_name = newindex+'_'+newText
|
|
1068
|
+
if new_color_name not in self.color_name:
|
|
1069
|
+
self.color_name.append(new_color_name)
|
|
1070
|
+
if int_index not in self.color_index_rgb[:,0]:
|
|
1071
|
+
clr = color_rgb.copy()
|
|
1072
|
+
clr.insert(0, int_index)
|
|
1073
|
+
self.color_index_rgb = np.vstack((self.color_index_rgb, np.array(clr)))
|
|
1074
|
+
|
|
1075
|
+
manually_check_tree_item(self, newindex)
|
|
1076
|
+
|
|
1077
|
+
|
|
1078
|
+
###################### Adapt to previous version ##################
|
|
1079
|
+
def adapt_previous_versions(self):
|
|
1080
|
+
"""
|
|
1081
|
+
Adapt to the previous version of MELAGE
|
|
1082
|
+
Args:
|
|
1083
|
+
self:
|
|
1084
|
+
|
|
1085
|
+
Returns:
|
|
1086
|
+
|
|
1087
|
+
"""
|
|
1088
|
+
rm = 9876
|
|
1089
|
+
if rm not in self.colorsCombinations:
|
|
1090
|
+
last_color = '9876_Combined'
|
|
1091
|
+
self.colorsCombinations[rm] = [1, 0, 0, 1]
|
|
1092
|
+
if last_color not in self.color_name:
|
|
1093
|
+
self.color_name.append(last_color)
|
|
1094
|
+
if rm not in self.color_index_rgb[:, 0]:
|
|
1095
|
+
clr = [rm, 1, 0, 0, 1]
|
|
1096
|
+
self.color_index_rgb = np.vstack((self.color_index_rgb, np.array(clr)))
|
|
1097
|
+
|
|
1098
|
+
###################### Updating widget colors ##################
|
|
1099
|
+
def update_widget_color_scheme(self):
|
|
1100
|
+
widgets_num = [0, 1, 2, 3, 4, 5, 10, 11, 13, 23]
|
|
1101
|
+
for num in widgets_num:
|
|
1102
|
+
name = 'openGLWidget_' + str(num+1)
|
|
1103
|
+
widget = getattr(self, name)
|
|
1104
|
+
if hasattr(widget, 'colorsCombinations'):
|
|
1105
|
+
widget.colorsCombinations = self.colorsCombinations
|
|
1106
|
+
if hasattr(widget, 'color_name'):
|
|
1107
|
+
widget.color_name = self.color_name
|
|
1108
|
+
|
|
1109
|
+
###################### Make segmentation visible ##################
|
|
1110
|
+
def make_all_seg_visibl(self):
|
|
1111
|
+
"""
|
|
1112
|
+
This function makes all segmentations visible
|
|
1113
|
+
Args:
|
|
1114
|
+
self:
|
|
1115
|
+
|
|
1116
|
+
Returns:
|
|
1117
|
+
|
|
1118
|
+
"""
|
|
1119
|
+
widgets = find_avail_widgets(self)
|
|
1120
|
+
prefix = 'openGLWidget_'
|
|
1121
|
+
ind = 9876#self.dw2Text.index('X_Combined')+1#len(self.colorsCombinations)
|
|
1122
|
+
#self.dw2_cb.setCurrentIndex(ind)
|
|
1123
|
+
#self.dw2_cb.setCurrentText('9876_Combined')
|
|
1124
|
+
manually_check_tree_item(self, '9876')
|
|
1125
|
+
colorPen = [1,0,0,1]#self.colorsCombinations[ind]
|
|
1126
|
+
for k in widgets:
|
|
1127
|
+
name = prefix + str(k)
|
|
1128
|
+
widget = getattr(self, name)
|
|
1129
|
+
widget.colorInd = ind
|
|
1130
|
+
if k in [14]:
|
|
1131
|
+
widget.paint(self.readImECO.npSeg,
|
|
1132
|
+
self.readImECO.npImage, None)
|
|
1133
|
+
elif k in [24]:
|
|
1134
|
+
widget.paint(self.readImMRI.npSeg,
|
|
1135
|
+
self.readImMRI.npImage, None)
|
|
1136
|
+
else:
|
|
1137
|
+
widget.colorObject = colorPen
|
|
1138
|
+
#widget.colorInd = len(self.colorsCombinations)
|
|
1139
|
+
widget.makeObject()
|
|
1140
|
+
widget.update()
|
|
1141
|
+
|
|
1142
|
+
###################### Compute volume according to the selected region ##################
|
|
1143
|
+
def compute_volume(reader, filename, inds, in_txt=None, ind_screen=0):
|
|
1144
|
+
"""
|
|
1145
|
+
Compute total volume of visible structures
|
|
1146
|
+
Args:
|
|
1147
|
+
reader:
|
|
1148
|
+
filename:
|
|
1149
|
+
inds:
|
|
1150
|
+
|
|
1151
|
+
Returns:
|
|
1152
|
+
|
|
1153
|
+
"""
|
|
1154
|
+
if 9876 in inds:
|
|
1155
|
+
vol = (reader.npSeg > 0).sum()*reader.ImSpacing[0] ** 3 / 1000
|
|
1156
|
+
else:
|
|
1157
|
+
vol = 0
|
|
1158
|
+
for ind in inds:
|
|
1159
|
+
vol += (reader.npSeg == ind).sum()
|
|
1160
|
+
vol *= reader.ImSpacing[0] ** 3 / 1000
|
|
1161
|
+
#txt = 'File: {}, '.format(filename)
|
|
1162
|
+
division_ind = in_txt.find(' ; ')
|
|
1163
|
+
if in_txt is None or division_ind==-1:
|
|
1164
|
+
in_txt = ';'
|
|
1165
|
+
if ind_screen == 1:#'MRI'
|
|
1166
|
+
if division_ind!=0:
|
|
1167
|
+
kept_part = in_txt[:division_ind]
|
|
1168
|
+
else:
|
|
1169
|
+
kept_part = ''
|
|
1170
|
+
else:
|
|
1171
|
+
kept_part = in_txt[division_ind+2:]
|
|
1172
|
+
if len(filename)>10:
|
|
1173
|
+
txt = '{}..., '.format(filename[:10])
|
|
1174
|
+
else:
|
|
1175
|
+
txt = '{}, '.format(filename)
|
|
1176
|
+
|
|
1177
|
+
if ind_screen==1:#'MRI'
|
|
1178
|
+
txt += 'Vol : {0:0.2f} cm\u00b3'.format((vol))
|
|
1179
|
+
#if len(kept_part)>0:
|
|
1180
|
+
out_txt = kept_part.replace(' ', '') + ' ; ' + txt
|
|
1181
|
+
#else:
|
|
1182
|
+
# out_txt = txt
|
|
1183
|
+
else:
|
|
1184
|
+
txt += 'Vol : {0:0.2f} cm\u00b3'.format((vol))
|
|
1185
|
+
#if len(kept_part)>0:
|
|
1186
|
+
out_txt = txt + ' ; ' + kept_part.replace(' ', '')
|
|
1187
|
+
#else:
|
|
1188
|
+
# out_txt = txt
|
|
1189
|
+
return out_txt
|
|
1190
|
+
|
|
1191
|
+
###################### Unique of an array ##################
|
|
1192
|
+
def getUnique(mat):
|
|
1193
|
+
return np.unique(mat)
|
|
1194
|
+
|
|
1195
|
+
###################### Generate info for color schemes ##################
|
|
1196
|
+
def generate_color_scheme_info(color_index_rgb, color_name):
|
|
1197
|
+
"""
|
|
1198
|
+
Generate color scheme information
|
|
1199
|
+
Args:
|
|
1200
|
+
color_index_rgb:
|
|
1201
|
+
color_name:
|
|
1202
|
+
|
|
1203
|
+
Returns:
|
|
1204
|
+
|
|
1205
|
+
"""
|
|
1206
|
+
new_colorsCombinations = defaultdict(list)
|
|
1207
|
+
for color_index in color_index_rgb[:,0]:
|
|
1208
|
+
ind_l = color_index_rgb[:, 0] == color_index
|
|
1209
|
+
new_colorsCombinations[color_index] = [color_index_rgb[ind_l, 1][0], color_index_rgb[ind_l, 2][0],
|
|
1210
|
+
color_index_rgb[ind_l, 3][0], 1]
|
|
1211
|
+
|
|
1212
|
+
return color_index_rgb, color_name, new_colorsCombinations
|
|
1213
|
+
|
|
1214
|
+
|
|
1215
|
+
|
|
1216
|
+
################# SET COLOR SCHEME ###########################
|
|
1217
|
+
def set_new_color_scheme(self):
|
|
1218
|
+
"""
|
|
1219
|
+
SET new color scheme
|
|
1220
|
+
Args:
|
|
1221
|
+
self:
|
|
1222
|
+
|
|
1223
|
+
Returns:
|
|
1224
|
+
|
|
1225
|
+
"""
|
|
1226
|
+
#from widgets.tree_widget import TreeWidgetItem
|
|
1227
|
+
if self.color_index_rgb is None:
|
|
1228
|
+
import matplotlib
|
|
1229
|
+
import matplotlib.cm
|
|
1230
|
+
normalized = matplotlib.colors.Normalize(vmin=0,vmax=len(self.color_name))
|
|
1231
|
+
colors = []
|
|
1232
|
+
for i in range(len(self.color_name)):
|
|
1233
|
+
color = list(matplotlib.cm.tab20c(normalized(i)))
|
|
1234
|
+
color.insert(0, i + 1)
|
|
1235
|
+
colors.append(color)
|
|
1236
|
+
self.color_index_rgb = np.array(colors)
|
|
1237
|
+
|
|
1238
|
+
|
|
1239
|
+
######################################
|
|
1240
|
+
if hasattr(self, 'tree_colors'):
|
|
1241
|
+
self.tree_colors.model().sourceModel().clear()
|
|
1242
|
+
self.tree_colors.model().sourceModel().setColumnCount(2)
|
|
1243
|
+
self.tree_colors.model().sourceModel().setHorizontalHeaderLabels(['Index', 'Name'])
|
|
1244
|
+
parent = self.tree_colors.model().sourceModel().invisibleRootItem()
|
|
1245
|
+
for i in range(len(self.color_name)):
|
|
1246
|
+
cln = self.color_name[i]
|
|
1247
|
+
indc, descp = cln.split('_')[0], '_'.join(cln.split('_')[1:])
|
|
1248
|
+
try:
|
|
1249
|
+
clrvalue = self.color_index_rgb[self.color_index_rgb[:,0]==int(float(indc)),:][0]
|
|
1250
|
+
except:
|
|
1251
|
+
pass
|
|
1252
|
+
addTreeRoot(parent, indc, descp, clrvalue[1:-1])
|
|
1253
|
+
|
|
1254
|
+
check_nul = [l for l in self.color_name if '9876' in l.split('_')]
|
|
1255
|
+
colr = [1, 0, 0, 1]
|
|
1256
|
+
if len(check_nul)==0:
|
|
1257
|
+
parent = self.tree_colors.model().sourceModel().invisibleRootItem()
|
|
1258
|
+
addTreeRoot(parent, '9876', 'Combined', colr)
|
|
1259
|
+
|
|
1260
|
+
################# SET IMAGE SCHEME ###########################
|
|
1261
|
+
def update_image_sch(self, info=None, color = [1,1,0],loaded = False):
|
|
1262
|
+
"""
|
|
1263
|
+
SET new color scheme to read multiple images
|
|
1264
|
+
Args:
|
|
1265
|
+
self:
|
|
1266
|
+
|
|
1267
|
+
Returns:
|
|
1268
|
+
|
|
1269
|
+
"""
|
|
1270
|
+
|
|
1271
|
+
######################################
|
|
1272
|
+
if hasattr(self, 'tree_images'):
|
|
1273
|
+
|
|
1274
|
+
parent = self.tree_images.model().sourceModel().invisibleRootItem()
|
|
1275
|
+
|
|
1276
|
+
|
|
1277
|
+
[fileObj, index, index_view] = info
|
|
1278
|
+
if index_view==0:
|
|
1279
|
+
indc = 'View 1'
|
|
1280
|
+
elif index_view==1:
|
|
1281
|
+
indc = 'View 2'
|
|
1282
|
+
if index>=3:
|
|
1283
|
+
if index_view==0:
|
|
1284
|
+
indc = 'View 1 (seg)'
|
|
1285
|
+
elif index_view==1:
|
|
1286
|
+
indc = 'View 2 (seg)'
|
|
1287
|
+
"""
|
|
1288
|
+
|
|
1289
|
+
indc='Unknow'
|
|
1290
|
+
if index==0:
|
|
1291
|
+
indc='View 1'
|
|
1292
|
+
elif index==1:
|
|
1293
|
+
indc='View 1 (fetal)'
|
|
1294
|
+
elif index==2:
|
|
1295
|
+
indc='View 2'
|
|
1296
|
+
elif index==3:
|
|
1297
|
+
indc='View 1 (Seg)'
|
|
1298
|
+
elif index==4:
|
|
1299
|
+
indc='View 1 (fetal, seg)'
|
|
1300
|
+
elif index == 5:
|
|
1301
|
+
indc = 'View 2 (seg)'
|
|
1302
|
+
"""
|
|
1303
|
+
color = [int(c * 255) for c in color]
|
|
1304
|
+
for file in fileObj[0]:
|
|
1305
|
+
if '*' in file:
|
|
1306
|
+
continue
|
|
1307
|
+
descp = os.path.basename(file)
|
|
1308
|
+
if info is not None:
|
|
1309
|
+
existence_indices_view = [[file in f[0][0][0], f[0][0][2]] for f in self.imported_images]
|
|
1310
|
+
#if np.sum([el[0] for el in existence_indices_view])==0:
|
|
1311
|
+
if np.sum([f[0][0][2]==index_view for f in self.imported_images if file in f[0][0][0]])==0:
|
|
1312
|
+
self.imported_images.append([[[file, fileObj[1], index_view], index], color, loaded, indc])
|
|
1313
|
+
else:
|
|
1314
|
+
return
|
|
1315
|
+
|
|
1316
|
+
node1 = QtGui.QStandardItem(indc)
|
|
1317
|
+
|
|
1318
|
+
node1.setForeground(QtGui.QBrush(QtGui.QColor(color[0], color[1], color[2], 255)))
|
|
1319
|
+
|
|
1320
|
+
|
|
1321
|
+
node1.setData(index_view)
|
|
1322
|
+
|
|
1323
|
+
node1.setFlags(
|
|
1324
|
+
node1.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable )
|
|
1325
|
+
if loaded:
|
|
1326
|
+
node1.setCheckState(Qt.Checked)
|
|
1327
|
+
else:
|
|
1328
|
+
node1.setCheckState(Qt.Unchecked)
|
|
1329
|
+
node2 = QtGui.QStandardItem(descp)
|
|
1330
|
+
node2.setForeground(QtGui.QBrush(QtGui.QColor(color[0], color[1], color[2], 255)))
|
|
1331
|
+
node2.setFlags(node2.flags() | QtCore.Qt.ItemIsTristate)
|
|
1332
|
+
# node2.setCheckState(0)
|
|
1333
|
+
parent.appendRow([node1, node2])
|
|
1334
|
+
|
|
1335
|
+
|
|
1336
|
+
def get_back_data(im, shape_initial, pad_zero, border_value):
|
|
1337
|
+
im_fill = np.ones(shape_initial) * border_value
|
|
1338
|
+
im_fill[pad_zero[0][0]:pad_zero[0][1] + 1, pad_zero[1][0]:pad_zero[1][1] + 1,
|
|
1339
|
+
pad_zero[2][0]:pad_zero[2][1] + 1] = im
|
|
1340
|
+
return im_fill
|
|
1341
|
+
|
|
1342
|
+
|
|
1343
|
+
|
|
1344
|
+
|
|
1345
|
+
|
|
1346
|
+
def magic_selection(im, initial_point, connectivity = 4, tol = 60):
|
|
1347
|
+
#(int(realy), int(realx))
|
|
1348
|
+
h, w = im.shape[:2]
|
|
1349
|
+
|
|
1350
|
+
tolerance = (tol,) * 3
|
|
1351
|
+
|
|
1352
|
+
segmented_area = np.zeros((h + 2, w + 2), dtype=np.uint8)
|
|
1353
|
+
|
|
1354
|
+
segmented_area[:] = 0
|
|
1355
|
+
try:
|
|
1356
|
+
cv2.floodFill(im.astype(np.float32), segmented_area, initial_point, 0,
|
|
1357
|
+
(tol,), (tol,), (
|
|
1358
|
+
connectivity | cv2.FLOODFILL_FIXED_RANGE | cv2.FLOODFILL_MASK_ONLY | 255 >> 8
|
|
1359
|
+
))
|
|
1360
|
+
magic_mask = segmented_area[1:-1, 1:-1].copy()
|
|
1361
|
+
magic_mask[im == 0] = 0
|
|
1362
|
+
segmented_area = magic_mask>0
|
|
1363
|
+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
|
|
1364
|
+
segmented_area = cv2.morphologyEx(segmented_area.astype(np.float32), cv2.MORPH_CLOSE, kernel)
|
|
1365
|
+
|
|
1366
|
+
return segmented_area
|
|
1367
|
+
except:
|
|
1368
|
+
return None
|
|
1369
|
+
|
|
1370
|
+
################# Second way to clean parent image ###########################
|
|
1371
|
+
def clean_parent_image2(self, filename, indc,index_view):
|
|
1372
|
+
|
|
1373
|
+
parent = self.tree_images.model().sourceModel().invisibleRootItem()
|
|
1374
|
+
fn = os.path.basename(filename)
|
|
1375
|
+
index_row = None
|
|
1376
|
+
for i in range(parent.rowCount()):
|
|
1377
|
+
signal1 = parent.child(i, 0).text()
|
|
1378
|
+
signal2 = parent.child(i, 1).text()
|
|
1379
|
+
if signal2==fn and signal1 in indc:
|
|
1380
|
+
[info, _, _, _] = self.imported_images[i]
|
|
1381
|
+
if indc in info[0][1]:
|
|
1382
|
+
continue
|
|
1383
|
+
else:
|
|
1384
|
+
index_row = i
|
|
1385
|
+
signal = parent.child(i)
|
|
1386
|
+
signal.setCheckState(Qt.Checked)
|
|
1387
|
+
break
|
|
1388
|
+
if index_row is None:
|
|
1389
|
+
return
|
|
1390
|
+
indices = []
|
|
1391
|
+
for i in range(parent.rowCount()):
|
|
1392
|
+
signal = parent.child(i)
|
|
1393
|
+
if i==index_row or signal.data()!=index_view:
|
|
1394
|
+
continue
|
|
1395
|
+
if signal.checkState() == Qt.Checked:
|
|
1396
|
+
if signal.text() in indc:
|
|
1397
|
+
try:
|
|
1398
|
+
self.tree_images.model().sourceModel().itemChanged.disconnect(self.changeImage)
|
|
1399
|
+
except:
|
|
1400
|
+
pass
|
|
1401
|
+
signal.setCheckState(Qt.Unchecked)
|
|
1402
|
+
[info, _, _, _] = self.imported_images[i]
|
|
1403
|
+
#if indc in info[0][1]:
|
|
1404
|
+
indices.append(i)
|
|
1405
|
+
try:
|
|
1406
|
+
self.tree_images.model().sourceModel().itemChanged.connect(self.changeImage)
|
|
1407
|
+
except:
|
|
1408
|
+
pass
|
|
1409
|
+
for ind in indices:
|
|
1410
|
+
try:
|
|
1411
|
+
self.imported_images.pop(ind)
|
|
1412
|
+
parent = self.tree_images.model().sourceModel().invisibleRootItem()
|
|
1413
|
+
parent.removeRow(ind)
|
|
1414
|
+
except Exception as e:
|
|
1415
|
+
print(e)
|
|
1416
|
+
|
|
1417
|
+
|
|
1418
|
+
################# first way to clean parent image ###########################
|
|
1419
|
+
def clean_parent_image(self, index_row, indc,index_view):
|
|
1420
|
+
parent = self.tree_images.model().sourceModel().invisibleRootItem()
|
|
1421
|
+
for i in range(parent.rowCount()):
|
|
1422
|
+
signal = parent.child(i)
|
|
1423
|
+
if signal.data()!=index_view or i==index_row:
|
|
1424
|
+
continue
|
|
1425
|
+
|
|
1426
|
+
if signal.checkState() == Qt.Checked:
|
|
1427
|
+
if signal.text() in indc:
|
|
1428
|
+
try:
|
|
1429
|
+
self.tree_images.model().sourceModel().itemChanged.disconnect(self.changeImage)
|
|
1430
|
+
except:
|
|
1431
|
+
pass
|
|
1432
|
+
signal.setCheckState(Qt.Unchecked)
|
|
1433
|
+
try:
|
|
1434
|
+
self.tree_images.model().sourceModel().itemChanged.connect(self.changeImage)
|
|
1435
|
+
except:
|
|
1436
|
+
pass
|
|
1437
|
+
|
|
1438
|
+
|
|
1439
|
+
################# read color information from text files ###########################
|
|
1440
|
+
def read_txt_color(file, mode ='lut', from_one= False):
|
|
1441
|
+
def is_number(vl):
|
|
1442
|
+
return bool(re.match(r'^-?\d+(\.\d+)?$', vl))
|
|
1443
|
+
"""
|
|
1444
|
+
read color information from text files
|
|
1445
|
+
Args:
|
|
1446
|
+
file: name of the file
|
|
1447
|
+
mode: mode of reading
|
|
1448
|
+
from_one:
|
|
1449
|
+
|
|
1450
|
+
Returns:
|
|
1451
|
+
|
|
1452
|
+
"""
|
|
1453
|
+
import re
|
|
1454
|
+
inital_col = []
|
|
1455
|
+
if mode=='lut':
|
|
1456
|
+
with open(file, 'r') as fp:
|
|
1457
|
+
lines = fp.readlines()
|
|
1458
|
+
num_colors = len(lines) // 2
|
|
1459
|
+
color_name = []
|
|
1460
|
+
color_info = []
|
|
1461
|
+
r = 0
|
|
1462
|
+
for n, l in enumerate(lines):
|
|
1463
|
+
if n%2 ==0:
|
|
1464
|
+
if from_one:
|
|
1465
|
+
color_name.append('{}_'.format(r+1)+l.rstrip('\n'))
|
|
1466
|
+
r += 1
|
|
1467
|
+
else:
|
|
1468
|
+
color_name.append(lines[n+1].split(' ')[0]+ '_'+l.rstrip('\n'))
|
|
1469
|
+
else:
|
|
1470
|
+
color_info.append([int(i) for i in l.rstrip("\n").split(' ')])
|
|
1471
|
+
|
|
1472
|
+
#color_info = [[int(i) for i in l.rstrip("\n").split(' ')] for l in lines if not l[4].isalpha()]
|
|
1473
|
+
color_index_rgb = np.array(color_info).astype('float')
|
|
1474
|
+
color_index_rgb[:, [1, 2, 3, 4]] = color_index_rgb[:, [1, 2, 3, 4]] / 255.0
|
|
1475
|
+
inital_col = color_index_rgb[:, 0].copy()
|
|
1476
|
+
if from_one:
|
|
1477
|
+
color_index_rgb[:, 0] = np.arange(color_index_rgb.shape[0])+1
|
|
1478
|
+
#color_name = [l.rstrip('\n') for l in lines if l[0].isalpha()]
|
|
1479
|
+
else:
|
|
1480
|
+
# itk
|
|
1481
|
+
with open(file, 'r') as fp:
|
|
1482
|
+
lines = fp.readlines()
|
|
1483
|
+
#color_name = [l.split('\n')[0].split(',')[0]+'_'+l.split('\n')[0].split(',')[-1] for l in lines]
|
|
1484
|
+
for id, l in enumerate(lines):
|
|
1485
|
+
if l[0] == '#':
|
|
1486
|
+
continue
|
|
1487
|
+
try:
|
|
1488
|
+
spl = [r.replace('"', '') for r in re.sub(r'\s+', ' ', l[:-1]).split() if r != '']
|
|
1489
|
+
spl_1 = spl[1:]
|
|
1490
|
+
if len(spl_1)>3:
|
|
1491
|
+
indices_3 = [
|
|
1492
|
+
i for i in range(len(spl_1) - 2) # Ensure there are at least four elements to check
|
|
1493
|
+
if all(is_number(spl_1[i + j]) for j in range(3)) # Check the next four elements
|
|
1494
|
+
]
|
|
1495
|
+
indices_4 = [
|
|
1496
|
+
i for i in range(len(spl_1) - 3) # Ensure there are at least four elements to check
|
|
1497
|
+
if all(is_number(spl_1[i + j]) for j in range(4)) # Check the next four elements
|
|
1498
|
+
]
|
|
1499
|
+
indices_6 = [
|
|
1500
|
+
i for i in range(len(spl_1) - 5) # Ensure there are at least four elements to check
|
|
1501
|
+
if all(is_number(spl_1[i + j]) for j in range(6)) # Check the next four elements
|
|
1502
|
+
]
|
|
1503
|
+
if len(indices_3)==1: #RGB
|
|
1504
|
+
index_colr_start = indices_3[0] + 1
|
|
1505
|
+
index_colr_end = index_colr_start + 3
|
|
1506
|
+
elif len(indices_4)==1:#RGBA
|
|
1507
|
+
index_colr_start = indices_4[0] + 1
|
|
1508
|
+
index_colr_end = index_colr_start + 4
|
|
1509
|
+
elif len(indices_6)==1:#RGB (CCC)
|
|
1510
|
+
index_colr_start = indices_6[0] + 1
|
|
1511
|
+
index_colr_end = index_colr_start + 6
|
|
1512
|
+
indices_non_numeric = [r for r, s in enumerate(spl_1) if not is_number(s)]
|
|
1513
|
+
indices_non_numeric = indices_non_numeric[np.argmax([len(spl_1[el]) for el in indices_non_numeric])]+1
|
|
1514
|
+
break
|
|
1515
|
+
"""
|
|
1516
|
+
#spl = [r for r in re.split(r'[ ,|;"]+', l[:-1]) if r != '']
|
|
1517
|
+
spl = [r.replace('"', '') for r in re.sub(r'\s+', ' ', l[:-1]).split() if r != '']
|
|
1518
|
+
int(spl[0])
|
|
1519
|
+
if [r for r, s in enumerate(spl) if not s.isnumeric()][0]==7:
|
|
1520
|
+
#itk mode
|
|
1521
|
+
last_before_name = 7
|
|
1522
|
+
index_colr = 4
|
|
1523
|
+
elif [r for r, s in enumerate(spl) if not s.isnumeric()][0]==4:
|
|
1524
|
+
last_before_name=4
|
|
1525
|
+
index_colr=4
|
|
1526
|
+
else:
|
|
1527
|
+
last_before_name = 7
|
|
1528
|
+
index_colr = 4
|
|
1529
|
+
break
|
|
1530
|
+
"""
|
|
1531
|
+
except:
|
|
1532
|
+
continue
|
|
1533
|
+
|
|
1534
|
+
#color_name = [l.split('\t')[0]+"_"+l.split('\t')[-1][1:-2] for l in lines[id:]]
|
|
1535
|
+
#color_name = [[r for r in re.sub(r'\s+', ' ', l).split() if r != '' and r!='\n'][0] + "_"+' '.join([r.replace('"', '') for r in re.sub(r'\s+', ' ', l[:-1]).split() if r != '' and r!='\n'][indices_non_numeric]) for l in lines[id:] if l[0]!='#']
|
|
1536
|
+
color_name = [[r for r in re.sub(r'\s+', ' ', l).split() if r != '' and r!='\n'][0] + "_"+"".join([r.replace('"', '') for r in re.sub(r'\s+', ' ', l[:-1]).split() if r != '' and r!='\n'][indices_non_numeric]) for l in lines[id:] if l[0]!='#']
|
|
1537
|
+
indices_el = [int([r for r in re.sub(r'\s+', ' ', l).split() if r != '' and r != '\n'][0]) for l in lines[id:] if l[0] != '#']
|
|
1538
|
+
color_index_rgb = np.array([[int(float(s)) for s in [r for r in re.sub(r'\s+', ' ', l).split() if r != '' and r != '\n'][index_colr_start:index_colr_end]] for l in
|
|
1539
|
+
lines[id:] if l[0] != '#']).astype('float')
|
|
1540
|
+
#color_index_rgb = np.array([[int(l.split('\t')[0]), int(l.split('\t')[1]), int(l.split('\t')[2]), int(l.split('\t')[3]), 1] for l in
|
|
1541
|
+
# lines[id:]]).astype('float')
|
|
1542
|
+
color_index_rgb = color_index_rgb[..., :3]
|
|
1543
|
+
|
|
1544
|
+
#color_index_rgb = np.hstack( (color_index_rgb, np.ones((color_index_rgb.shape[0],1))))
|
|
1545
|
+
#color_index_rgb[:, [1, 2, 3]] = color_index_rgb[:, [1, 2, 3]] / 255.0
|
|
1546
|
+
color_index_rgb[:, [0, 1, 2]] = color_index_rgb[:, [0, 1, 2]] / 255.0
|
|
1547
|
+
color_index_rgb = np.hstack((np.array(indices_el).reshape(-1, 1), color_index_rgb, np.ones((color_index_rgb.shape[0],1))))
|
|
1548
|
+
#if from_one:
|
|
1549
|
+
# color_index_rgb[:, 0] = np.arange(color_index_rgb.shape[0])+1
|
|
1550
|
+
inds = color_index_rgb[:, [1, 2, 3]].sum(1) != 0
|
|
1551
|
+
color_index_rgb = color_index_rgb[inds, :]
|
|
1552
|
+
color_name = list(np.array(color_name)[inds])
|
|
1553
|
+
|
|
1554
|
+
return color_name, color_index_rgb, inital_col
|
|
1555
|
+
|
|
1556
|
+
###################### Load Tractography file ######################
|
|
1557
|
+
def load_trk(file):
|
|
1558
|
+
try:
|
|
1559
|
+
stk = nib.streamlines.load(file)
|
|
1560
|
+
success = True
|
|
1561
|
+
except:
|
|
1562
|
+
stk = None
|
|
1563
|
+
success = False
|
|
1564
|
+
return stk, success
|
|
1565
|
+
|
|
1566
|
+
###################### related to Tractography file ######################
|
|
1567
|
+
def divide_track_to_prinicipals(trk):
|
|
1568
|
+
"""
|
|
1569
|
+
|
|
1570
|
+
Args:
|
|
1571
|
+
trk:
|
|
1572
|
+
|
|
1573
|
+
Returns:
|
|
1574
|
+
|
|
1575
|
+
"""
|
|
1576
|
+
rng = np.linspace(np.floor(np.min((trk[:, 0]))), np.ceil(np.max((trk[:, 0]))), int(np.ceil(np.max((trk[:, 0])))-np.floor(np.min((trk[:, 0]))))*2)
|
|
1577
|
+
trks = []
|
|
1578
|
+
for r, rn in enumerate(rng):
|
|
1579
|
+
if r < len(rng)-1:
|
|
1580
|
+
ind_rng = (trk[:,0]>= rng[r])*(trk[:,0] <= rng[r+1])
|
|
1581
|
+
a = trk[ind_rng, :]
|
|
1582
|
+
|
|
1583
|
+
if a.shape[0]>0:
|
|
1584
|
+
trks.append(a.mean(0))
|
|
1585
|
+
return np.array(trks)
|
|
1586
|
+
|
|
1587
|
+
|
|
1588
|
+
|
|
1589
|
+
###################### Return voxel coordinates for reference x, y, z and vice versa ######################
|
|
1590
|
+
def apply_affine(coord, affine):
|
|
1591
|
+
""" Return voxel coordinates for reference x, y, z and vice versa"""
|
|
1592
|
+
if coord.shape[1] != 4:
|
|
1593
|
+
c = np.zeros((coord.shape[0], 4))
|
|
1594
|
+
c[:, :-1] = coord
|
|
1595
|
+
c[:, -1] = np.ones(coord.shape[0])
|
|
1596
|
+
coord = c.T
|
|
1597
|
+
return np.matmul(affine,coord).T
|
|
1598
|
+
|
|
1599
|
+
###################### Generate colors for tractography files ######################
|
|
1600
|
+
def generate_colors_track(streamls):
|
|
1601
|
+
"""
|
|
1602
|
+
Generate colors for tractography according to stream lines
|
|
1603
|
+
Args:
|
|
1604
|
+
streamls:
|
|
1605
|
+
|
|
1606
|
+
Returns:
|
|
1607
|
+
|
|
1608
|
+
"""
|
|
1609
|
+
def rgbcolor(line):
|
|
1610
|
+
if line.ndim == 1:
|
|
1611
|
+
norml = np.linalg.norm(line)
|
|
1612
|
+
color = np.abs(np.divide(line, norml, where=norml != 0))
|
|
1613
|
+
|
|
1614
|
+
return color
|
|
1615
|
+
|
|
1616
|
+
colors = [rgbcolor(strl[-1] - strl[0])
|
|
1617
|
+
for strl in streamls]
|
|
1618
|
+
return np.vstack(colors)
|
|
1619
|
+
|
|
1620
|
+
|
|
1621
|
+
###################### Get real world coordinates from tractography information ######################
|
|
1622
|
+
def get_world_from_trk(streamlines, affine, inverse=False, color_based_on_length=False):
|
|
1623
|
+
"""
|
|
1624
|
+
Get real world coordinates from tractography information
|
|
1625
|
+
Args:
|
|
1626
|
+
streamlines: stream lines
|
|
1627
|
+
affine: affine matrix
|
|
1628
|
+
inverse: inverse of affine matrix
|
|
1629
|
+
color_based_on_length: boolean to select color according to the length of segments
|
|
1630
|
+
|
|
1631
|
+
Returns:
|
|
1632
|
+
|
|
1633
|
+
"""
|
|
1634
|
+
length = streamlines._lengths
|
|
1635
|
+
ind_large = length > 1
|
|
1636
|
+
|
|
1637
|
+
if color_based_on_length:
|
|
1638
|
+
import matplotlib
|
|
1639
|
+
normalized = matplotlib.colors.Normalize(vmin=np.quantile(length[ind_large], 0.2), vmax=np.quantile(length[ind_large], 0.7))
|
|
1640
|
+
colors = [matplotlib.cm.jet(normalized(len(strl)))
|
|
1641
|
+
for strl in streamlines]
|
|
1642
|
+
|
|
1643
|
+
else:
|
|
1644
|
+
|
|
1645
|
+
colors = generate_colors_track(streamlines)
|
|
1646
|
+
|
|
1647
|
+
if inverse:
|
|
1648
|
+
affine0 = np.linalg.inv(affine)
|
|
1649
|
+
else:
|
|
1650
|
+
affine0 = affine.copy()
|
|
1651
|
+
str_world = []
|
|
1652
|
+
#colors = []
|
|
1653
|
+
r = 0
|
|
1654
|
+
for ln, clr in zip(streamlines[ind_large], colors[ind_large]):
|
|
1655
|
+
a = apply_affine(ln, affine0)
|
|
1656
|
+
#a = np.round(a).astype('int')
|
|
1657
|
+
#a[:,-1] = r
|
|
1658
|
+
a = np.unique(a, axis=0)
|
|
1659
|
+
color = clr
|
|
1660
|
+
|
|
1661
|
+
b = np.zeros((a.shape[0], 8))
|
|
1662
|
+
b[:, :3] = a[:,:-1]
|
|
1663
|
+
b[:, 3:6] = color
|
|
1664
|
+
b[:,-1] = r
|
|
1665
|
+
b[:, -2] = a.shape[0]
|
|
1666
|
+
r += 1
|
|
1667
|
+
str_world.append(b)
|
|
1668
|
+
|
|
1669
|
+
return np.concatenate(str_world)
|
|
1670
|
+
|
|
1671
|
+
def vox2ref(affine, ref):
|
|
1672
|
+
""" Return X, Y, Z coordinates for i, j, k """
|
|
1673
|
+
return np.matmul(affine, ref)[:3]
|
|
1674
|
+
|
|
1675
|
+
###################### Cursors ######################
|
|
1676
|
+
|
|
1677
|
+
def cursorOpenHand():
|
|
1678
|
+
bitmap = QtGui.QPixmap(source_folder+"/Hand.png")
|
|
1679
|
+
return QtGui.QCursor(bitmap)
|
|
1680
|
+
|
|
1681
|
+
def cursorClosedHand():
|
|
1682
|
+
bitmap = QtGui.QPixmap(source_folder+"/Handsqueezed.png")
|
|
1683
|
+
return QtGui.QCursor(bitmap)
|
|
1684
|
+
def cursorZoomIn():
|
|
1685
|
+
bitmap = QtGui.QPixmap(source_folder+"/zoom_in.png")
|
|
1686
|
+
return QtGui.QCursor(bitmap)
|
|
1687
|
+
def cursorZoomOut():
|
|
1688
|
+
bitmap = QtGui.QPixmap(source_folder+"/zoom_out.png")
|
|
1689
|
+
return QtGui.QCursor(bitmap)
|
|
1690
|
+
def cursorRotate():
|
|
1691
|
+
bitmap = QtGui.QPixmap(source_folder+"/rotate.png")
|
|
1692
|
+
return QtGui.QCursor(bitmap)
|
|
1693
|
+
def cursorArrow():
|
|
1694
|
+
#bitmap = QtGui.QPixmap(source_folder+"/arrow.png")
|
|
1695
|
+
return QtGui.QCursor(Qt.ArrowCursor)
|
|
1696
|
+
def cursorPaint():
|
|
1697
|
+
bitmap = QtGui.QPixmap(source_folder+"/HandwritingPlus.png")
|
|
1698
|
+
return QtGui.QCursor(bitmap)
|
|
1699
|
+
def cursorPaintX():
|
|
1700
|
+
bitmap = QtGui.QPixmap(source_folder+"/HandwritingPlusX.png")
|
|
1701
|
+
return QtGui.QCursor(bitmap)
|
|
1702
|
+
def cursorCircle(size = 50):
|
|
1703
|
+
|
|
1704
|
+
def pil_image_to_qpixmap(pil_img):
|
|
1705
|
+
|
|
1706
|
+
buffer = BytesIO()
|
|
1707
|
+
pil_img.save(buffer, format='PNG') # Save image to BytesIO buffer
|
|
1708
|
+
pixmap = QtGui.QPixmap()
|
|
1709
|
+
pixmap.loadFromData(buffer.getvalue()) # Load data into QPixmap
|
|
1710
|
+
return pixmap
|
|
1711
|
+
|
|
1712
|
+
from PIL import Image, ImageDraw
|
|
1713
|
+
|
|
1714
|
+
size_im = max(200,size)
|
|
1715
|
+
image = Image.new('RGBA', (size_im, size_im))
|
|
1716
|
+
draw = ImageDraw.Draw(image)
|
|
1717
|
+
# Size of Bounding Box for ellipse
|
|
1718
|
+
|
|
1719
|
+
x, y = size_im, size_im
|
|
1720
|
+
eX, eY = size / 2, size / 2
|
|
1721
|
+
bbox = (x / 2 - eX / 2, y / 2 - eY / 2, x / 2 + eX / 2, y / 2 + eY / 2)
|
|
1722
|
+
|
|
1723
|
+
draw.ellipse(bbox, fill=None, outline='blue', width=2)
|
|
1724
|
+
|
|
1725
|
+
eX, eY = size / 50, size / 50
|
|
1726
|
+
bbox2 = (x / 2 - eX / 2, y / 2 - eY / 2, x / 2 + eX / 2, y / 2 + eY / 2)
|
|
1727
|
+
draw.ellipse(bbox2, fill='blue', outline='blue', width=2)
|
|
1728
|
+
|
|
1729
|
+
|
|
1730
|
+
#return QtGui.QCursor(image.toqpixmap())
|
|
1731
|
+
return QtGui.QCursor(pil_image_to_qpixmap(image))
|
|
1732
|
+
|
|
1733
|
+
def cursorErase():
|
|
1734
|
+
bitmap = QtGui.QPixmap(source_folder+"/HandwritingMinus.png")
|
|
1735
|
+
return QtGui.QCursor(bitmap)
|
|
1736
|
+
def cursorEraseX():
|
|
1737
|
+
bitmap = QtGui.QPixmap(source_folder+"/HandwritingMinusX.png")
|
|
1738
|
+
return QtGui.QCursor(bitmap)
|
|
1739
|
+
|
|
1740
|
+
###################### locate proper widgets ######################
|
|
1741
|
+
def find_avail_widgets(self):
|
|
1742
|
+
"""
|
|
1743
|
+
Find available active widgets
|
|
1744
|
+
Args:
|
|
1745
|
+
self:
|
|
1746
|
+
|
|
1747
|
+
Returns:
|
|
1748
|
+
|
|
1749
|
+
"""
|
|
1750
|
+
prefix = 'openGLWidget_'
|
|
1751
|
+
widgets = [1, 2, 3, 4, 5, 6, 11, 12]
|
|
1752
|
+
widgets_mri = [4, 5, 6, 12, 24]
|
|
1753
|
+
widgets_eco = [11, 1, 2, 3, 14]
|
|
1754
|
+
_eco = False
|
|
1755
|
+
_mri = False
|
|
1756
|
+
|
|
1757
|
+
for k in widgets:
|
|
1758
|
+
name = prefix + str(k)
|
|
1759
|
+
widget = getattr(self, name)
|
|
1760
|
+
if widget.isVisible():
|
|
1761
|
+
if k in widgets_eco:
|
|
1762
|
+
_eco = True
|
|
1763
|
+
elif k in widgets_mri:
|
|
1764
|
+
_mri = True
|
|
1765
|
+
if _eco and _mri:
|
|
1766
|
+
widgets = widgets
|
|
1767
|
+
elif _eco:
|
|
1768
|
+
widgets = widgets_eco
|
|
1769
|
+
elif _mri:
|
|
1770
|
+
widgets = widgets_mri
|
|
1771
|
+
return widgets
|
|
1772
|
+
|
|
1773
|
+
###################### set cursors ######################
|
|
1774
|
+
def setCursorWidget(widget, code, reptime, rad_circle=50):
|
|
1775
|
+
"""
|
|
1776
|
+
Set Cursor Widgets
|
|
1777
|
+
Args:
|
|
1778
|
+
widget: widget
|
|
1779
|
+
code: integer code of the widget
|
|
1780
|
+
reptime: repetition time
|
|
1781
|
+
rad_circle: raidus of circle in case of circles
|
|
1782
|
+
|
|
1783
|
+
Returns:
|
|
1784
|
+
|
|
1785
|
+
"""
|
|
1786
|
+
try_disconnect(widget)
|
|
1787
|
+
widget.enabledPan = False # Panning
|
|
1788
|
+
widget.enabledRotate = False # Rotating
|
|
1789
|
+
widget.enabledPen = False # Polygon Drawing
|
|
1790
|
+
widget.enabledMagicTool = False # FreeHand Drawing
|
|
1791
|
+
widget.enabledErase = False # Erasing the points
|
|
1792
|
+
widget.enabledZoom = False # ZOOM DISABLED
|
|
1793
|
+
widget.enabledPointSelection = False # Point Selection
|
|
1794
|
+
widget.enabledRuler = False
|
|
1795
|
+
widget.enabledCircle = False
|
|
1796
|
+
#widget.enabledGoTo = False
|
|
1797
|
+
widget.enabledLine = False
|
|
1798
|
+
widget._magic_slice = None # for magic coloring
|
|
1799
|
+
widget.setMouseTracking(False)
|
|
1800
|
+
widget.makeObject()
|
|
1801
|
+
widget.update()
|
|
1802
|
+
if reptime <=1:
|
|
1803
|
+
if code == 0:
|
|
1804
|
+
widget.updateEvents()
|
|
1805
|
+
widget.setCursor(Qt.ArrowCursor)
|
|
1806
|
+
|
|
1807
|
+
elif code == 1: # ImFreeHand
|
|
1808
|
+
widget.updateEvents()
|
|
1809
|
+
widget.enabledMagicTool = True
|
|
1810
|
+
widget.setMouseTracking(True)
|
|
1811
|
+
widget.setCursor(cursorPaint())
|
|
1812
|
+
|
|
1813
|
+
|
|
1814
|
+
#from segment_anything import SamPredictor, sam_model_registry
|
|
1815
|
+
|
|
1816
|
+
# Choose the model variant to download
|
|
1817
|
+
#model_type = "vit_h" # Options: "vit_h", "vit_l", "vit_b"
|
|
1818
|
+
#checkp = os.path.join(os.path.dirname(os.path.dirname(__file__)),
|
|
1819
|
+
# 'widgets/DeepLModels/sam_vit_h_4b8939.pth')
|
|
1820
|
+
# Download and load the model
|
|
1821
|
+
#sam = sam_model_registry[model_type](checkpoint=checkp)
|
|
1822
|
+
#widget._sam_predictor = SamPredictor(sam)
|
|
1823
|
+
|
|
1824
|
+
try:
|
|
1825
|
+
widget.customContextMenuRequested.connect(widget.ShowContextMenu)
|
|
1826
|
+
except Exception as e:
|
|
1827
|
+
print('Cursor Widget Error')
|
|
1828
|
+
print(e)
|
|
1829
|
+
elif code == 2: # Panning
|
|
1830
|
+
widget.updateEvents()
|
|
1831
|
+
widget.enabledPan = True
|
|
1832
|
+
widget.setCursor(cursorOpenHand())
|
|
1833
|
+
|
|
1834
|
+
elif code == 3: # Erasing
|
|
1835
|
+
#widget.setCursor(cursorOpenHand())
|
|
1836
|
+
widget.updateEvents()
|
|
1837
|
+
widget.enabledErase = True
|
|
1838
|
+
widget.setCursor(cursorErase())
|
|
1839
|
+
|
|
1840
|
+
elif code == 4: # ImPaint Contour
|
|
1841
|
+
widget.updateEvents()
|
|
1842
|
+
widget.enabledPen = True
|
|
1843
|
+
|
|
1844
|
+
widget.setCursor(cursorPaint())
|
|
1845
|
+
try:
|
|
1846
|
+
widget.customContextMenuRequested.connect(widget.ShowContextMenu_contour)
|
|
1847
|
+
except Exception as e:
|
|
1848
|
+
print('Cursor Widget Error')
|
|
1849
|
+
print(e)
|
|
1850
|
+
elif code == 5: # point locator
|
|
1851
|
+
widget.updateEvents()
|
|
1852
|
+
widget.enabledPointSelection = True
|
|
1853
|
+
widget.setCursor(Qt.CrossCursor)
|
|
1854
|
+
elif code == 6: # ruler
|
|
1855
|
+
widget.updateEvents()
|
|
1856
|
+
widget.enabledRuler=True
|
|
1857
|
+
widget.setCursor(Qt.CrossCursor)
|
|
1858
|
+
try:
|
|
1859
|
+
widget.customContextMenuRequested.connect(widget.ShowContextMenu_ruler)
|
|
1860
|
+
except Exception as e:
|
|
1861
|
+
print('Cursor Widget Error')
|
|
1862
|
+
print(e)
|
|
1863
|
+
elif code == 7: # goto
|
|
1864
|
+
widget.updateEvents()
|
|
1865
|
+
widget.enabledGoTo = True
|
|
1866
|
+
widget.setCursor(Qt.CrossCursor)
|
|
1867
|
+
elif code == 8: # goto
|
|
1868
|
+
widget.updateEvents()
|
|
1869
|
+
widget.enabledLine = True
|
|
1870
|
+
widget.setCursor(cursorPaint())
|
|
1871
|
+
try:
|
|
1872
|
+
widget.customContextMenuRequested.connect(widget.ShowContextMenu_gen)
|
|
1873
|
+
except Exception as e:
|
|
1874
|
+
print('Cursor Widget Error')
|
|
1875
|
+
print(e)
|
|
1876
|
+
elif code == 9: #circle
|
|
1877
|
+
#widget.updateEvents()
|
|
1878
|
+
widget.setMouseTracking(True)
|
|
1879
|
+
widget.enabledCircle = True
|
|
1880
|
+
widget.setCursor(cursorCircle(rad_circle))
|
|
1881
|
+
|
|
1882
|
+
else:
|
|
1883
|
+
if code == 4: # ImPaint
|
|
1884
|
+
widget.updateEvents()
|
|
1885
|
+
widget.enabledPen = True
|
|
1886
|
+
widget.setCursor(cursorPaintX())
|
|
1887
|
+
elif code == 3: # Erasing
|
|
1888
|
+
widget.updateEvents()
|
|
1889
|
+
widget.enabledErase = True
|
|
1890
|
+
widget.setCursor(cursorEraseX())
|
|
1891
|
+
|
|
1892
|
+
###################### try disconnect widgets ######################
|
|
1893
|
+
def try_disconnect(widget):
|
|
1894
|
+
"""
|
|
1895
|
+
Try to disconnect connected widgets
|
|
1896
|
+
Args:
|
|
1897
|
+
widget:
|
|
1898
|
+
|
|
1899
|
+
Returns:
|
|
1900
|
+
|
|
1901
|
+
"""
|
|
1902
|
+
funcs = [widget.ShowContextMenu, widget.ShowContextMenu_ruler, widget.ShowContextMenu_contour, widget.ShowContextMenu_gen]
|
|
1903
|
+
for f in funcs:
|
|
1904
|
+
try:
|
|
1905
|
+
while True:
|
|
1906
|
+
widget.customContextMenuRequested.disconnect(f)
|
|
1907
|
+
except Exception as e:
|
|
1908
|
+
pass
|
|
1909
|
+
|
|
1910
|
+
###################### try disconnect widgets ######################
|
|
1911
|
+
def zonePoint(x1, y1, xc, yc):
|
|
1912
|
+
"""
|
|
1913
|
+
Find zone of a point regarding to another point
|
|
1914
|
+
Args:
|
|
1915
|
+
x1:
|
|
1916
|
+
y1:
|
|
1917
|
+
xc:
|
|
1918
|
+
yc:
|
|
1919
|
+
|
|
1920
|
+
Returns:
|
|
1921
|
+
|
|
1922
|
+
"""
|
|
1923
|
+
difx = x1 - xc
|
|
1924
|
+
dify = y1 - yc
|
|
1925
|
+
zone = 0
|
|
1926
|
+
if difx>0 and dify>0:
|
|
1927
|
+
zone = 4
|
|
1928
|
+
elif difx> 0 and dify<0:
|
|
1929
|
+
zone = 1
|
|
1930
|
+
elif difx < 0 and dify<0:
|
|
1931
|
+
zone = 2
|
|
1932
|
+
elif difx < 0 and dify>0:
|
|
1933
|
+
zone = 3
|
|
1934
|
+
return zone
|
|
1935
|
+
|
|
1936
|
+
###################### Convert points to polygons ######################
|
|
1937
|
+
def ConvertPToPolygons(points, ignoredInd = 0):
|
|
1938
|
+
"""
|
|
1939
|
+
Convert points to polygons
|
|
1940
|
+
Args:
|
|
1941
|
+
points:
|
|
1942
|
+
ignoredInd:
|
|
1943
|
+
|
|
1944
|
+
Returns:
|
|
1945
|
+
|
|
1946
|
+
"""
|
|
1947
|
+
from shapely.ops import polygonize, unary_union
|
|
1948
|
+
|
|
1949
|
+
ls = LineString(points)
|
|
1950
|
+
polys = []
|
|
1951
|
+
# closed, non-simple
|
|
1952
|
+
lr = LineString(ls.coords[:] + ls.coords[0:1]) # line strings
|
|
1953
|
+
if not lr.is_simple: # not suitable for learning ring
|
|
1954
|
+
mls = unary_union(lr)
|
|
1955
|
+
for polygon in polygonize(mls):
|
|
1956
|
+
polys.append(polygon.buffer(0))
|
|
1957
|
+
else:
|
|
1958
|
+
polys.append(Polygon(points))
|
|
1959
|
+
return polys
|
|
1960
|
+
|
|
1961
|
+
###################### Convert points to polygons with a defined buffer size ######################
|
|
1962
|
+
def ConvertPointsToPolygons(points, width = 0):
|
|
1963
|
+
"""
|
|
1964
|
+
Convert Points to polygons
|
|
1965
|
+
Args:
|
|
1966
|
+
points:
|
|
1967
|
+
width:
|
|
1968
|
+
|
|
1969
|
+
Returns:
|
|
1970
|
+
|
|
1971
|
+
"""
|
|
1972
|
+
if width <= 0:
|
|
1973
|
+
return Polygon(points)
|
|
1974
|
+
ls = LineString(points)
|
|
1975
|
+
d = list(ls.buffer(width).exterior.coords)
|
|
1976
|
+
return Polygon(np.hstack((np.array(d), np.ones((len(d), 1)) * points[0][-1])))
|
|
1977
|
+
|
|
1978
|
+
|
|
1979
|
+
|
|
1980
|
+
|
|
1981
|
+
|
|
1982
|
+
###################### Convert multipolygons to one polygon ######################
|
|
1983
|
+
def ConvertMPolyToPolygons(mPoly):
|
|
1984
|
+
"""
|
|
1985
|
+
Convert multipolygon to one polygon
|
|
1986
|
+
Args:
|
|
1987
|
+
mPoly:
|
|
1988
|
+
|
|
1989
|
+
Returns:
|
|
1990
|
+
|
|
1991
|
+
"""
|
|
1992
|
+
if mPoly.type == 'MultiPolygon':
|
|
1993
|
+
maxArea = 0
|
|
1994
|
+
for poly in mPoly:
|
|
1995
|
+
if poly.area> maxArea:
|
|
1996
|
+
polygon = poly
|
|
1997
|
+
maxArea = poly.area
|
|
1998
|
+
else:
|
|
1999
|
+
polygon = mPoly
|
|
2000
|
+
|
|
2001
|
+
return polygon
|
|
2002
|
+
|
|
2003
|
+
|
|
2004
|
+
###################### fill inside the polygon (To fill pixels) ######################
|
|
2005
|
+
def fillInsidePol(poly):
|
|
2006
|
+
"""
|
|
2007
|
+
Fill inside polygons
|
|
2008
|
+
Args:
|
|
2009
|
+
poly:
|
|
2010
|
+
|
|
2011
|
+
Returns:
|
|
2012
|
+
|
|
2013
|
+
"""
|
|
2014
|
+
from matplotlib.path import Path
|
|
2015
|
+
|
|
2016
|
+
try:
|
|
2017
|
+
coords = np.array(poly.exterior.coords)
|
|
2018
|
+
sliceNo = coords[0,2]
|
|
2019
|
+
p = Path(coords[:, :2])
|
|
2020
|
+
xmin, ymin, xmax, ymax = poly.bounds
|
|
2021
|
+
x = np.arange(np.floor(xmin), np.ceil(xmax), 1)
|
|
2022
|
+
y = np.arange(np.floor(ymin), np.ceil(ymax), 1)
|
|
2023
|
+
points = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
|
|
2024
|
+
ind_points = p.contains_points(points)
|
|
2025
|
+
selected_points = points[ind_points]
|
|
2026
|
+
# add slice number
|
|
2027
|
+
total_points = np.hstack([selected_points, np.ones([selected_points.shape[0], 1]) * sliceNo])
|
|
2028
|
+
|
|
2029
|
+
return total_points, coords
|
|
2030
|
+
except:
|
|
2031
|
+
print('Line 397 utils')
|
|
2032
|
+
|
|
2033
|
+
###################### fill index of white voxels ######################
|
|
2034
|
+
def findIndexWhiteVoxels(poly, segmentShowWindowName, is_pixel = False, bool_permute_axis=True):
|
|
2035
|
+
"""
|
|
2036
|
+
Find index of white voxels in the segmetnation
|
|
2037
|
+
Args:
|
|
2038
|
+
poly: polygon
|
|
2039
|
+
segmentShowWindowName: string name
|
|
2040
|
+
is_pixel: is pixel
|
|
2041
|
+
bool_permute_axis: permute axis or not
|
|
2042
|
+
|
|
2043
|
+
Returns:
|
|
2044
|
+
|
|
2045
|
+
"""
|
|
2046
|
+
try:
|
|
2047
|
+
if is_pixel:
|
|
2048
|
+
whiteVoxels = np.array(poly).astype("int")
|
|
2049
|
+
edges = whiteVoxels.copy()
|
|
2050
|
+
if bool_permute_axis:
|
|
2051
|
+
whiteVoxels, edges = permute_axis(whiteVoxels, edges, segmentShowWindowName)
|
|
2052
|
+
else:
|
|
2053
|
+
pixels, edges = fillInsidePol(poly)
|
|
2054
|
+
if len(pixels)<=0:
|
|
2055
|
+
return None
|
|
2056
|
+
elif len(pixels)>100000:
|
|
2057
|
+
print('')
|
|
2058
|
+
|
|
2059
|
+
whiteVoxels = np.array(pixels).astype("int")
|
|
2060
|
+
edges = np.array(edges).astype("int")
|
|
2061
|
+
if bool_permute_axis:
|
|
2062
|
+
whiteVoxels, edges = permute_axis(whiteVoxels, edges, segmentShowWindowName)
|
|
2063
|
+
return whiteVoxels, edges
|
|
2064
|
+
except:
|
|
2065
|
+
print('line 424 utils')
|
|
2066
|
+
|
|
2067
|
+
###################### permute axis of white voxels for painting ######################
|
|
2068
|
+
def permute_axis(whiteVoxels, edges, segmentShowWindowName):
|
|
2069
|
+
"""
|
|
2070
|
+
Permute axis according to plane
|
|
2071
|
+
Args:
|
|
2072
|
+
whiteVoxels:
|
|
2073
|
+
edges:
|
|
2074
|
+
segmentShowWindowName:
|
|
2075
|
+
|
|
2076
|
+
Returns:
|
|
2077
|
+
|
|
2078
|
+
"""
|
|
2079
|
+
if segmentShowWindowName == 'sagittal':
|
|
2080
|
+
whiteVoxels = whiteVoxels[:, [1, 0, 2]]
|
|
2081
|
+
edges = edges[:, [1, 0, 2]]
|
|
2082
|
+
elif segmentShowWindowName == 'coronal':
|
|
2083
|
+
whiteVoxels = whiteVoxels[:, [1, 2, 0]]
|
|
2084
|
+
edges = edges[:, [1, 2, 0]]
|
|
2085
|
+
elif segmentShowWindowName == 'axial':
|
|
2086
|
+
whiteVoxels = whiteVoxels[:, [2, 1, 0]]
|
|
2087
|
+
edges = edges[:, [2, 1, 0]]
|
|
2088
|
+
return whiteVoxels, edges
|
|
2089
|
+
|
|
2090
|
+
###################### find painted voxels ######################
|
|
2091
|
+
def findWhiteVoxels(totalPs, segmentShowWindowName,seg = None):
|
|
2092
|
+
"""
|
|
2093
|
+
Find white voxels
|
|
2094
|
+
Args:
|
|
2095
|
+
totalPs: total points
|
|
2096
|
+
segmentShowWindowName: texts
|
|
2097
|
+
seg: segmetations
|
|
2098
|
+
|
|
2099
|
+
Returns:
|
|
2100
|
+
|
|
2101
|
+
"""
|
|
2102
|
+
pixels = []
|
|
2103
|
+
for sliceN in totalPs.keys():
|
|
2104
|
+
for key in totalPs[sliceN].keys():
|
|
2105
|
+
poly, _ = totalPs[sliceN][key]
|
|
2106
|
+
ones, edges = fillInsidePol(poly)
|
|
2107
|
+
pixels += ones
|
|
2108
|
+
whiteVoxels = np.array(pixels).astype("int")
|
|
2109
|
+
|
|
2110
|
+
if segmentShowWindowName == 'sagittal':
|
|
2111
|
+
whiteVoxels = whiteVoxels[:, [1, 0, 2]]
|
|
2112
|
+
elif segmentShowWindowName == 'coronal':
|
|
2113
|
+
whiteVoxels = whiteVoxels[:, [1, 2, 0]]
|
|
2114
|
+
elif segmentShowWindowName == 'axial':
|
|
2115
|
+
whiteVoxels = whiteVoxels[:, [2, 1, 0]]
|
|
2116
|
+
newSeg = np.zeros_like(seg)
|
|
2117
|
+
newSeg[tuple(zip(*whiteVoxels))] = 255.0
|
|
2118
|
+
return newSeg
|
|
2119
|
+
|
|
2120
|
+
###################### Permute to proper axis #################
|
|
2121
|
+
def PermuteProperAxis(whiteVoxels, segmentShowWindowName, axis = None):
|
|
2122
|
+
"""
|
|
2123
|
+
Permute to proper axis
|
|
2124
|
+
Args:
|
|
2125
|
+
whiteVoxels:
|
|
2126
|
+
segmentShowWindowName:
|
|
2127
|
+
axis:
|
|
2128
|
+
|
|
2129
|
+
Returns:
|
|
2130
|
+
|
|
2131
|
+
"""
|
|
2132
|
+
if axis is None:
|
|
2133
|
+
if segmentShowWindowName == 'sagittal':
|
|
2134
|
+
axis = [1, 0, 2]
|
|
2135
|
+
elif segmentShowWindowName == 'coronal':
|
|
2136
|
+
axis = [1, 2, 0]
|
|
2137
|
+
elif segmentShowWindowName == 'axial':
|
|
2138
|
+
axis = [2, 1, 0]
|
|
2139
|
+
whiteVoxels = whiteVoxels[:, axis]
|
|
2140
|
+
else:
|
|
2141
|
+
whiteVoxels = whiteVoxels[:, axis]
|
|
2142
|
+
return whiteVoxels
|
|
2143
|
+
|
|
2144
|
+
###################### 3D rotation of images #################
|
|
2145
|
+
def rotation3d(image, theta_axial, theta_coronal, theta_sagittal, remove_zeros=False):
|
|
2146
|
+
|
|
2147
|
+
"""
|
|
2148
|
+
|
|
2149
|
+
[0,0,1] : axial
|
|
2150
|
+
[0,1,0]: coronal
|
|
2151
|
+
rotate an image around proper axis with angle theta
|
|
2152
|
+
:param image: A nibabel image
|
|
2153
|
+
:param axis: rotation axis [0,0,1] is around z
|
|
2154
|
+
:param theta: Rotation angle
|
|
2155
|
+
:return: The rotated image
|
|
2156
|
+
"""
|
|
2157
|
+
def get_offset(f_data):
|
|
2158
|
+
xs, ys, zs = np.where(f_data > 2) #find zero values
|
|
2159
|
+
return np.array([np.mean(xs), np.mean(ys), np.mean(zs)])
|
|
2160
|
+
|
|
2161
|
+
def x_affine(theta):
|
|
2162
|
+
#https://nipy.org/nibabel/coordinate_systems.html#rotate-axis-0
|
|
2163
|
+
""" Rotation aroud x axis
|
|
2164
|
+
"""
|
|
2165
|
+
cosine = np.cos(theta)
|
|
2166
|
+
sinus = np.sin(theta)
|
|
2167
|
+
return np.array([[1, 0, 0, 0],
|
|
2168
|
+
[0, cosine, -sinus, 0],
|
|
2169
|
+
[0, sinus, cosine,0],
|
|
2170
|
+
[0, 0, 0, 1]])
|
|
2171
|
+
def y_affine(theta):
|
|
2172
|
+
#https://nipy.org/nibabel/coordinate_systems.html#rotate-axis-0
|
|
2173
|
+
""" Rotation aroud y axis
|
|
2174
|
+
|
|
2175
|
+
"""
|
|
2176
|
+
cosine = np.cos(theta)
|
|
2177
|
+
sinus = np.sin(theta)
|
|
2178
|
+
return np.array([[cosine, 0, sinus, 0],
|
|
2179
|
+
[0, 1, 0, 0],
|
|
2180
|
+
[-sinus, 0, cosine, 0],
|
|
2181
|
+
[0, 0, 0, 1]])
|
|
2182
|
+
|
|
2183
|
+
def z_affine(theta):
|
|
2184
|
+
#https://nipy.org/nibabel/coordinate_systems.html#rotate-axis-0
|
|
2185
|
+
""" Rotation aroud z axis
|
|
2186
|
+
"""
|
|
2187
|
+
cosine = np.cos(theta)
|
|
2188
|
+
sinus = np.sin(theta)
|
|
2189
|
+
return np.array([[cosine, -sinus, 0, 0],
|
|
2190
|
+
[sinus, cosine, 0, 0],
|
|
2191
|
+
[0, 0, 1, 0],
|
|
2192
|
+
[0, 0, 0, 1]])
|
|
2193
|
+
if theta_axial==0 and theta_sagittal==0 and theta_coronal==0:
|
|
2194
|
+
return image.get_fdata(), image.affine
|
|
2195
|
+
|
|
2196
|
+
|
|
2197
|
+
theta_axial *= np.pi/180
|
|
2198
|
+
theta_sagittal *= np.pi / 180
|
|
2199
|
+
theta_coronal *= np.pi / 180
|
|
2200
|
+
|
|
2201
|
+
M = x_affine(theta_sagittal).dot(y_affine(theta_coronal)).dot(z_affine(theta_axial))
|
|
2202
|
+
|
|
2203
|
+
offset = (np.array(image.shape)-M[:-1, :-1].dot(np.array(image.shape))/2.0)
|
|
2204
|
+
|
|
2205
|
+
f_data = resample_itk(image, M[:-1, :-1])#np.array(image.shape).astype('float')/2.0
|
|
2206
|
+
|
|
2207
|
+
if remove_zeros:
|
|
2208
|
+
xs, ys, zs = np.where(f_data != 0) #find zero values
|
|
2209
|
+
tol = 4
|
|
2210
|
+
|
|
2211
|
+
min_max = []
|
|
2212
|
+
for x in [xs, ys, zs]:
|
|
2213
|
+
minx = min(x)-tol if min(x)-tol>1 else min(x)
|
|
2214
|
+
maxx = max(x) + tol if max(x) + tol < f_data.shape[0]-1 else max(x)
|
|
2215
|
+
min_max.append([minx, maxx])
|
|
2216
|
+
f_data = f_data[min_max[0][0]:min_max[0][1] + 1, min_max[1][0]:min_max[1][1] + 1, min_max[2][0]:min_max[2][1] + 1]
|
|
2217
|
+
|
|
2218
|
+
return f_data, M
|
|
2219
|
+
###################### MultiOtsu thresholding #################
|
|
2220
|
+
def Threshold_MultiOtsu(a, numc):
|
|
2221
|
+
from skimage.filters import threshold_multiotsu, threshold_otsu
|
|
2222
|
+
if numc > 1 and numc <= 5:
|
|
2223
|
+
thresholds = threshold_multiotsu(a, classes=numc)
|
|
2224
|
+
elif numc == 1:
|
|
2225
|
+
thresholds = [threshold_otsu(a)]
|
|
2226
|
+
else:
|
|
2227
|
+
import numpy as np
|
|
2228
|
+
thresholds = list(threshold_multiotsu(a, classes=5))
|
|
2229
|
+
b = np.digitize(a, thresholds)
|
|
2230
|
+
|
|
2231
|
+
if numc == 6:
|
|
2232
|
+
vls = [4]
|
|
2233
|
+
elif numc == 7:
|
|
2234
|
+
vls = [4, 3]
|
|
2235
|
+
elif numc == 8:
|
|
2236
|
+
vls = [4, 3, 2]
|
|
2237
|
+
elif numc == 9:
|
|
2238
|
+
vls = [4, 3, 2, 1]
|
|
2239
|
+
elif numc == 10:
|
|
2240
|
+
vls = [4, 3, 2, 1]
|
|
2241
|
+
else:
|
|
2242
|
+
return
|
|
2243
|
+
for j in vls:
|
|
2244
|
+
c = a.copy()
|
|
2245
|
+
c[b != j] = 0
|
|
2246
|
+
if numc == 10 and j == 4:
|
|
2247
|
+
new_t = list(threshold_multiotsu(c, classes=4))
|
|
2248
|
+
else:
|
|
2249
|
+
new_t = list(threshold_multiotsu(c, classes=3))
|
|
2250
|
+
[thresholds.append(i) for i in new_t]
|
|
2251
|
+
thresholds = sorted(thresholds)
|
|
2252
|
+
|
|
2253
|
+
thresholds = [el for el in thresholds if el > 5]
|
|
2254
|
+
return thresholds
|
|
2255
|
+
|
|
2256
|
+
###################### apply thresholding #################
|
|
2257
|
+
def apply_thresholding(image, _currentThresholds):
|
|
2258
|
+
if not len(_currentThresholds)>0:
|
|
2259
|
+
return np.zeros_like(image)
|
|
2260
|
+
regions = np.digitize(image,_currentThresholds)
|
|
2261
|
+
return regions+1
|
|
2262
|
+
|
|
2263
|
+
###################### find affine of simpleITK image #################
|
|
2264
|
+
def make_affine(simpleITKImage):
|
|
2265
|
+
# https://niftynet.readthedocs.io/en/v0.2.1/_modules/niftynet/io/simple_itk_as_nibabel.html
|
|
2266
|
+
# get affine transform in LPS
|
|
2267
|
+
if simpleITKImage.GetDimension() == 4:
|
|
2268
|
+
c = [simpleITKImage.TransformContinuousIndexToPhysicalPoint(p)
|
|
2269
|
+
for p in ((1, 0, 0, 0),
|
|
2270
|
+
(0, 1, 0, 0),
|
|
2271
|
+
(0, 0, 1, 0),
|
|
2272
|
+
(0, 0, 0, 0))]
|
|
2273
|
+
c = np.array(c)
|
|
2274
|
+
c = c[:, :-1]
|
|
2275
|
+
elif simpleITKImage.GetDimension() == 3:
|
|
2276
|
+
c = [simpleITKImage.TransformContinuousIndexToPhysicalPoint(p)
|
|
2277
|
+
for p in ((1, 0, 0),
|
|
2278
|
+
(0, 1, 0),
|
|
2279
|
+
(0, 0, 1),
|
|
2280
|
+
(0, 0, 0))]
|
|
2281
|
+
c = np.array(c)
|
|
2282
|
+
affine = np.concatenate([
|
|
2283
|
+
np.concatenate([c[0:3] - c[3:], c[3:]], axis=0),
|
|
2284
|
+
[[0.], [0.], [0.], [1.]]
|
|
2285
|
+
], axis=1)
|
|
2286
|
+
affine = np.transpose(affine)
|
|
2287
|
+
# convert to RAS to match nibabel
|
|
2288
|
+
affine = np.matmul(np.diag([-1., -1., 1., 1.]), affine)
|
|
2289
|
+
return affine
|
|
2290
|
+
|
|
2291
|
+
|
|
2292
|
+
###################### resample image after rotation #################
|
|
2293
|
+
def resample_itk(image, M, offset=None):
|
|
2294
|
+
"""
|
|
2295
|
+
Resample iamge after rotation
|
|
2296
|
+
Args:
|
|
2297
|
+
image:
|
|
2298
|
+
M:
|
|
2299
|
+
offset:
|
|
2300
|
+
|
|
2301
|
+
Returns:
|
|
2302
|
+
|
|
2303
|
+
"""
|
|
2304
|
+
a = sitk.GetImageFromArray(image.get_fdata())
|
|
2305
|
+
width, height, depth = a.GetSize()
|
|
2306
|
+
center = a.TransformIndexToPhysicalPoint((int(np.ceil(width / 2)),
|
|
2307
|
+
int(np.ceil(height / 2)),
|
|
2308
|
+
int(np.ceil(depth / 2))))
|
|
2309
|
+
tr = sitk.Euler3DTransform()
|
|
2310
|
+
tr.SetCenter(center)
|
|
2311
|
+
tr.SetMatrix(np.asarray(M).flatten().tolist())
|
|
2312
|
+
return sitk.GetArrayFromImage(sitk.Resample(a, a, tr, sitk.sitkLinear, 0))
|
|
2313
|
+
|
|
2314
|
+
###################### Recursive search for an attribute of a class #################
|
|
2315
|
+
def rhasattr(obj, path):
|
|
2316
|
+
"""
|
|
2317
|
+
Recursive search for an attribute of a class
|
|
2318
|
+
Args:
|
|
2319
|
+
obj:
|
|
2320
|
+
path:
|
|
2321
|
+
|
|
2322
|
+
Returns:
|
|
2323
|
+
|
|
2324
|
+
"""
|
|
2325
|
+
import functools
|
|
2326
|
+
|
|
2327
|
+
try:
|
|
2328
|
+
functools.reduce(getattr, path.split("."), obj)
|
|
2329
|
+
return True
|
|
2330
|
+
except AttributeError:
|
|
2331
|
+
return False
|
|
2332
|
+
|
|
2333
|
+
###################### Compute Anisotropy Elipse ######################
|
|
2334
|
+
def computeAnisotropyElipse(kspacedata):
|
|
2335
|
+
"""
|
|
2336
|
+
This function computes anisotropy elipse based on the momentum inertia of elipse
|
|
2337
|
+
:param image: K space 2D data
|
|
2338
|
+
:return: a binary function to determine a point inside the elipse
|
|
2339
|
+
"""
|
|
2340
|
+
def dotproduct(v1, v2):
|
|
2341
|
+
return sum((a * b) for a, b in zip(v1, v2))
|
|
2342
|
+
|
|
2343
|
+
def length(v):
|
|
2344
|
+
return math.sqrt(dotproduct(v, v))
|
|
2345
|
+
|
|
2346
|
+
image = np.absolute(kspacedata)
|
|
2347
|
+
if image.max() > 0:
|
|
2348
|
+
scale = -3
|
|
2349
|
+
scaling_c = np.power(10., scale)
|
|
2350
|
+
np.log1p(image * scaling_c, out=image)
|
|
2351
|
+
# normalize between zero and 255
|
|
2352
|
+
fmin = float(image.min())
|
|
2353
|
+
fmax = float(image.max())
|
|
2354
|
+
if fmax != fmin:
|
|
2355
|
+
coeff = fmax - fmin
|
|
2356
|
+
image[:] = np.floor((image[:] - fmin) / coeff * 255.)
|
|
2357
|
+
|
|
2358
|
+
pixelX, pixelY = np.where(image >= 0)
|
|
2359
|
+
value = image.flatten()
|
|
2360
|
+
|
|
2361
|
+
Ixy = -np.sum(value * pixelX * pixelY)
|
|
2362
|
+
Iyy = np.sum(value * pixelX ** 2)
|
|
2363
|
+
Ixx = np.sum(value * pixelY ** 2)
|
|
2364
|
+
A = np.array([[Ixx, Ixy], [Ixy, Iyy]])
|
|
2365
|
+
|
|
2366
|
+
eigVal, eigVec = np.linalg.eig(A)
|
|
2367
|
+
BOverA = np.sqrt(eigVal[0] / eigVal[1])
|
|
2368
|
+
#if BOverA<1:
|
|
2369
|
+
# BOverA = 1/BOverA
|
|
2370
|
+
rotationAngle = math.degrees(
|
|
2371
|
+
math.atan(dotproduct(eigVec[0], eigVec[1]) / (length(eigVec[0]) * length(eigVec[1]))))
|
|
2372
|
+
x0 = 0
|
|
2373
|
+
y0 = 0
|
|
2374
|
+
convertX = lambda x, y: (x - x0) * np.cos(np.deg2rad(rotationAngle)) + (y - y0) * np.sin(
|
|
2375
|
+
np.deg2rad(rotationAngle))
|
|
2376
|
+
convertY = lambda x, y: -(x - x0) * np.sin(np.deg2rad(rotationAngle)) + (y - y0) * np.cos(
|
|
2377
|
+
np.deg2rad(rotationAngle))
|
|
2378
|
+
|
|
2379
|
+
return lambda x, y, bb: (convertX(x, y) ** 2 / (BOverA * bb+1e-5) ** 2 + convertY(x, y) ** 2 / (
|
|
2380
|
+
bb ** 2+1e-5) - 1) < 0
|
|
2381
|
+
|
|
2382
|
+
|
|
2383
|
+
##########################Search for a point in a contours################################
|
|
2384
|
+
def point_in_contour(segSlice, point, color):
|
|
2385
|
+
"""
|
|
2386
|
+
Search for a point in a contours
|
|
2387
|
+
Args:
|
|
2388
|
+
segSlice:
|
|
2389
|
+
point:
|
|
2390
|
+
color:
|
|
2391
|
+
|
|
2392
|
+
Returns:
|
|
2393
|
+
|
|
2394
|
+
"""
|
|
2395
|
+
segSlice[segSlice != color] = 0
|
|
2396
|
+
segSlice[segSlice==color]=1
|
|
2397
|
+
contours, hierarchy = cv2.findContours(image=segSlice.astype('uint8'), mode=cv2.RETR_TREE,
|
|
2398
|
+
method=cv2.CHAIN_APPROX_NONE)
|
|
2399
|
+
for contour1 in contours:
|
|
2400
|
+
if cv2.pointPolygonTest(contour1,point,True)>=0: #point in contour
|
|
2401
|
+
M = cv2.moments(contour1)
|
|
2402
|
+
xy = [M['m10'] / M['m00'], M['m01']/M['m00']] #centroid
|
|
2403
|
+
perimeter = cv2.arcLength(contour1, True) # perimeter
|
|
2404
|
+
area = cv2.contourArea(contour1) # area
|
|
2405
|
+
return area, perimeter, xy, contour1.squeeze()
|
|
2406
|
+
return 0.0, 0.0, [0.0, 0.0], [0.0, 0.0]
|
|
2407
|
+
|
|
2408
|
+
|
|
2409
|
+
|
|
2410
|
+
######extract attributes from widget and assign it to the dictionary######################
|
|
2411
|
+
def getAttributeWidget(widget, nameWidget, dic):
|
|
2412
|
+
"""
|
|
2413
|
+
extract attributes from widget and assign it to the dictionary
|
|
2414
|
+
Args:
|
|
2415
|
+
widget:
|
|
2416
|
+
nameWidget:
|
|
2417
|
+
dic:
|
|
2418
|
+
|
|
2419
|
+
Returns:
|
|
2420
|
+
|
|
2421
|
+
"""
|
|
2422
|
+
def IsKnownType(val):
|
|
2423
|
+
return type(val) == int or type(val) == np.ndarray or type(val) == list or val is None or type(
|
|
2424
|
+
val) == float or type(val) == defaultdict or \
|
|
2425
|
+
type(val) == Qt.GlobalColor or \
|
|
2426
|
+
type(val) == str or type(val)==bool or \
|
|
2427
|
+
type(val) == tuple
|
|
2428
|
+
def updateDic(val, attr, at):
|
|
2429
|
+
for el in attr:
|
|
2430
|
+
try:
|
|
2431
|
+
vl = getattr(val, el)()
|
|
2432
|
+
|
|
2433
|
+
if IsKnownType(vl):
|
|
2434
|
+
dic[nameWidget][at][el] = vl
|
|
2435
|
+
except Exception as e:
|
|
2436
|
+
print('Update Dictionary Error')
|
|
2437
|
+
print(e)
|
|
2438
|
+
|
|
2439
|
+
dic[nameWidget] = defaultdict(list)
|
|
2440
|
+
for at in dir(widget):
|
|
2441
|
+
if at[0] == '_' or at == 'program':
|
|
2442
|
+
continue
|
|
2443
|
+
val = getattr(widget, at)
|
|
2444
|
+
if type(val) == QtWidgets.QSlider: # slider
|
|
2445
|
+
|
|
2446
|
+
dic[nameWidget][at] = defaultdict(list)
|
|
2447
|
+
dic[nameWidget][at]['type'] = 'QSlider'
|
|
2448
|
+
attr = ['minimum', 'maximum', 'value', 'isHidden']
|
|
2449
|
+
updateDic(val, attr, at)
|
|
2450
|
+
elif type(val)== QtWidgets.QLabel:
|
|
2451
|
+
dic[nameWidget][at] = defaultdict(list)
|
|
2452
|
+
dic[nameWidget][at]['type'] = 'QLabel'
|
|
2453
|
+
attr = ['text', 'isHidden']
|
|
2454
|
+
updateDic(val, attr, at)
|
|
2455
|
+
elif type(val)== QtWidgets.QRadioButton:
|
|
2456
|
+
dic[nameWidget][at] = defaultdict(list)
|
|
2457
|
+
dic[nameWidget][at]['type'] = 'QRadioButton'
|
|
2458
|
+
attr = ['isHidden', 'isChecked']
|
|
2459
|
+
updateDic(val, attr, at)
|
|
2460
|
+
elif type(val)== AnimatedToggle:
|
|
2461
|
+
dic[nameWidget][at] = defaultdict(list)
|
|
2462
|
+
dic[nameWidget][at]['type'] = 'AnimatedToggle'
|
|
2463
|
+
attr = ['isHidden', 'isChecked']
|
|
2464
|
+
updateDic(val, attr, at)
|
|
2465
|
+
elif IsKnownType(val):
|
|
2466
|
+
if at=='items':
|
|
2467
|
+
if type(val)==defaultdict:
|
|
2468
|
+
val = list(val.keys())
|
|
2469
|
+
at = 'items_names'
|
|
2470
|
+
dic[nameWidget][at] = val
|
|
2471
|
+
return dic
|
|
2472
|
+
|
|
2473
|
+
######extract attributes from a dictionary and assign it to a widget######################
|
|
2474
|
+
def loadAttributeWidget(widget, nameWidget, dic, progressbar):
|
|
2475
|
+
"""
|
|
2476
|
+
extract attributes from dictionary and assign it to a widget
|
|
2477
|
+
Set attribute of a widget
|
|
2478
|
+
:param widget:
|
|
2479
|
+
:param nameWidget:
|
|
2480
|
+
:param dic:
|
|
2481
|
+
:return:
|
|
2482
|
+
"""
|
|
2483
|
+
if type(dic[nameWidget])==list:
|
|
2484
|
+
return
|
|
2485
|
+
lenkeys = len(dic[nameWidget].keys())
|
|
2486
|
+
for key in dic[nameWidget].keys():
|
|
2487
|
+
progressbar.setValue(progressbar.value())
|
|
2488
|
+
if type(dic[nameWidget][key]) == defaultdict and nameWidget=='main':
|
|
2489
|
+
if 'type' in dic[nameWidget][key]:
|
|
2490
|
+
tpe = dic[nameWidget][key]['type']
|
|
2491
|
+
try:
|
|
2492
|
+
Qel = getattr(widget, key)
|
|
2493
|
+
except Exception as e:
|
|
2494
|
+
print(e)
|
|
2495
|
+
continue
|
|
2496
|
+
subDic = dic[nameWidget][key]
|
|
2497
|
+
if tpe == 'QLabel':
|
|
2498
|
+
attr = ['text', 'isHidden']
|
|
2499
|
+
Qel.setVisible(not subDic['isHidden'])
|
|
2500
|
+
Qel.setText(subDic['text'])
|
|
2501
|
+
elif tpe == 'QRadioButton' or tpe == 'AnimatedToggle':
|
|
2502
|
+
attr = ['isHidden', 'isChecked']
|
|
2503
|
+
Qel.setVisible(not subDic['isHidden'])
|
|
2504
|
+
Qel.setChecked(subDic['isChecked'])
|
|
2505
|
+
elif tpe == 'QSlider':
|
|
2506
|
+
attr = ['minimum', 'maximum', 'value', 'isHidden']
|
|
2507
|
+
Qel.setVisible(not subDic['isHidden'])
|
|
2508
|
+
Qel.setRange(subDic['minimum'], subDic['maximum'])
|
|
2509
|
+
Qel.setValue(subDic['value'])
|
|
2510
|
+
else:
|
|
2511
|
+
setattr(widget, key, dic[nameWidget][key])
|
|
2512
|
+
else:
|
|
2513
|
+
#if key == 'ImCenter':
|
|
2514
|
+
# print(key)
|
|
2515
|
+
setattr(widget, key, dic[nameWidget][key])
|
|
2516
|
+
|
|
2517
|
+
if hasattr(widget, 'update'):
|
|
2518
|
+
if hasattr(widget, 'resetInit'):
|
|
2519
|
+
widget.resetInit()
|
|
2520
|
+
widget.setDisabled(False)
|
|
2521
|
+
widget.update()
|
|
2522
|
+
|
|
2523
|
+
########Extract info from current slice##############
|
|
2524
|
+
def getCurrentSlice(widget, npImage, npSeg, sliceNum, tract=None, tol_slice=3):
|
|
2525
|
+
"""
|
|
2526
|
+
Extract current slice information from image
|
|
2527
|
+
Args:
|
|
2528
|
+
widget:
|
|
2529
|
+
npImage:
|
|
2530
|
+
npSeg:
|
|
2531
|
+
sliceNum:
|
|
2532
|
+
tract:
|
|
2533
|
+
tol_slice:
|
|
2534
|
+
|
|
2535
|
+
Returns:
|
|
2536
|
+
|
|
2537
|
+
"""
|
|
2538
|
+
imSlice = None
|
|
2539
|
+
segSlice = None
|
|
2540
|
+
trk = None
|
|
2541
|
+
|
|
2542
|
+
if widget.activeDim == 0:
|
|
2543
|
+
imSlice = npImage[sliceNum, :, :]
|
|
2544
|
+
segSlice = npSeg[sliceNum, :, :]
|
|
2545
|
+
if tract is not None:
|
|
2546
|
+
trk = tract[(tract[:,2]>sliceNum-tol_slice)*(tract[:,2]<sliceNum+tol_slice),:]
|
|
2547
|
+
trk = trk[:,[0, 1, 2, 3,4,5,6,7]]
|
|
2548
|
+
elif widget.activeDim == 1:
|
|
2549
|
+
imSlice = npImage[:, sliceNum, :]
|
|
2550
|
+
segSlice = npSeg[:, sliceNum, :]
|
|
2551
|
+
if tract is not None:
|
|
2552
|
+
trk = tract[(tract[:,1]>sliceNum-tol_slice)*(tract[:,1]<sliceNum+tol_slice),:]
|
|
2553
|
+
trk = trk[:,[0, 2, 1, 3,4,5,6,7]]
|
|
2554
|
+
elif widget.activeDim == 2:
|
|
2555
|
+
imSlice = npImage[:, :, sliceNum]
|
|
2556
|
+
segSlice = npSeg[:, :, sliceNum]
|
|
2557
|
+
if tract is not None:
|
|
2558
|
+
trk = tract[(tract[:,0]>sliceNum-tol_slice)*(tract[:,0]<sliceNum+tol_slice), :]
|
|
2559
|
+
trk = trk[:, [1,2,0,3,4,5,6,7]]
|
|
2560
|
+
|
|
2561
|
+
return imSlice, segSlice, trk
|
|
2562
|
+
|
|
2563
|
+
########assign segmentation to a widget##############
|
|
2564
|
+
def setSliceSeg(widget, npSeg):
|
|
2565
|
+
"""
|
|
2566
|
+
assign segmentation to a widget
|
|
2567
|
+
Args:
|
|
2568
|
+
widget:
|
|
2569
|
+
npSeg:
|
|
2570
|
+
|
|
2571
|
+
Returns:
|
|
2572
|
+
|
|
2573
|
+
"""
|
|
2574
|
+
sliceNum = widget.sliceNum
|
|
2575
|
+
if widget.activeDim == 0:
|
|
2576
|
+
segSlice = npSeg[sliceNum, :, :]
|
|
2577
|
+
elif widget.activeDim == 1:
|
|
2578
|
+
segSlice = npSeg[:, sliceNum, :]
|
|
2579
|
+
elif widget.activeDim == 2:
|
|
2580
|
+
segSlice = npSeg[:, :, sliceNum]
|
|
2581
|
+
widget.segSlice = segSlice
|
|
2582
|
+
|
|
2583
|
+
########Get current slider value##############
|
|
2584
|
+
def getCurrentSlider(slider, widget, value):
|
|
2585
|
+
"""
|
|
2586
|
+
Get current slider value
|
|
2587
|
+
Args:
|
|
2588
|
+
slider:
|
|
2589
|
+
widget:
|
|
2590
|
+
value:
|
|
2591
|
+
|
|
2592
|
+
Returns:
|
|
2593
|
+
|
|
2594
|
+
"""
|
|
2595
|
+
rng = slider.maximum() - slider.minimum()
|
|
2596
|
+
rngnew = widget.imDepth
|
|
2597
|
+
sliceNum = (value - slider.minimum()) * rngnew / rng
|
|
2598
|
+
if sliceNum >= widget.imDepth:
|
|
2599
|
+
sliceNum = widget.imDepth - 1
|
|
2600
|
+
return int(sliceNum)
|
|
2601
|
+
|
|
2602
|
+
########Updating image view##############
|
|
2603
|
+
def updateSight(slider, widget, reader, value, tol_slice=3):
|
|
2604
|
+
"""
|
|
2605
|
+
Update slider and widget
|
|
2606
|
+
Args:
|
|
2607
|
+
slider:
|
|
2608
|
+
widget:
|
|
2609
|
+
reader:
|
|
2610
|
+
value:
|
|
2611
|
+
tol_slice:
|
|
2612
|
+
|
|
2613
|
+
Returns:
|
|
2614
|
+
|
|
2615
|
+
"""
|
|
2616
|
+
|
|
2617
|
+
|
|
2618
|
+
try:
|
|
2619
|
+
sliceNum = getCurrentSlider(slider,
|
|
2620
|
+
widget, value)
|
|
2621
|
+
widget.points = []
|
|
2622
|
+
widget.selectedPoints = []
|
|
2623
|
+
|
|
2624
|
+
widget.updateInfo(*getCurrentSlice(widget,
|
|
2625
|
+
reader.npImage, reader.npSeg,
|
|
2626
|
+
sliceNum, reader.tract, tol_slice=tol_slice), sliceNum, reader.npImage.shape,
|
|
2627
|
+
imSpacing = reader.ImSpacing)
|
|
2628
|
+
|
|
2629
|
+
widget.update()
|
|
2630
|
+
except Exception as e:
|
|
2631
|
+
print(e)
|
|
2632
|
+
print('Impossible')
|
|
2633
|
+
|
|
2634
|
+
########Change from sagital to coronal and axial##############
|
|
2635
|
+
def changeCoronalSagittalAxial(slider, widget, reader, windowName, indWind, label, initialState = False, tol_slice=3):
|
|
2636
|
+
try:
|
|
2637
|
+
widget.changeView(windowName, widget.zRot)
|
|
2638
|
+
widget.updateCurrentImageInfo(reader.npImage.shape)
|
|
2639
|
+
slider.setRange(0, reader.ImExtent[indWind])
|
|
2640
|
+
slider.setValue(reader.ImExtent[indWind] // 2)
|
|
2641
|
+
label.setText(str_conv(reader.ImExtent[indWind] // 2))
|
|
2642
|
+
|
|
2643
|
+
sliceNum = slider.value()
|
|
2644
|
+
widget.points = []
|
|
2645
|
+
widget.selectedPoints = []
|
|
2646
|
+
|
|
2647
|
+
widget.updateInfo(*getCurrentSlice(widget,reader.npImage, reader.npSeg,
|
|
2648
|
+
sliceNum, reader.tract, tol_slice=tol_slice), sliceNum, reader.npImage.shape, initialState=initialState,
|
|
2649
|
+
imSpacing = reader.ImSpacing)
|
|
2650
|
+
|
|
2651
|
+
widget.update()
|
|
2652
|
+
except Exception as e:
|
|
2653
|
+
print(e)
|
|
2654
|
+
print('Impossible')
|
|
2655
|
+
|
|
2656
|
+
###################### standardize between 0 and 255 ######################
|
|
2657
|
+
|
|
2658
|
+
def standardize( imdata, value=255.0):
|
|
2659
|
+
"""
|
|
2660
|
+
Standardize image between 0 and 255.0
|
|
2661
|
+
Args:
|
|
2662
|
+
imdata:
|
|
2663
|
+
|
|
2664
|
+
Returns:
|
|
2665
|
+
|
|
2666
|
+
"""
|
|
2667
|
+
imdata = (imdata - imdata.min()) * value / np.ptp(imdata) #range
|
|
2668
|
+
return imdata
|
|
2669
|
+
|
|
2670
|
+
###################### find non zero segmentation values ######################
|
|
2671
|
+
def getNoneZeroSeg(seg, whiteInd, colorInd, ind_color):
|
|
2672
|
+
if colorInd != ind_color:
|
|
2673
|
+
return whiteInd[np.where(seg[tuple(zip(*whiteInd))]==colorInd)[0],:]
|
|
2674
|
+
else:
|
|
2675
|
+
return whiteInd[np.where(seg[tuple(zip(*whiteInd))]>0)[0],:]
|
|
2676
|
+
|
|
2677
|
+
###################### find pixel that are not segmented ######################
|
|
2678
|
+
def getZeroSeg(seg, whiteInd, colrInd):
|
|
2679
|
+
return whiteInd[np.where(seg[tuple(zip(*whiteInd))] != colrInd)[0], :]
|
|
2680
|
+
return whiteInd[np.hstack((np.where(seg[tuple(zip(*whiteInd))]==9)[0], np.where(seg[tuple(zip(*whiteInd))]==48)[0],
|
|
2681
|
+
np.where(seg[tuple(zip(*whiteInd))]==1235)[0], np.where(seg[tuple(zip(*whiteInd))]==1234)[0])),:]
|
|
2682
|
+
return whiteInd[np.hstack((np.where(seg[tuple(zip(*whiteInd))]==9)[0], np.where(seg[tuple(zip(*whiteInd))]==48)[0])),:]
|
|
2683
|
+
|
|
2684
|
+
###################### repeating the segmentation ######################
|
|
2685
|
+
def repetition(shp, coords, numRep, windowName):
|
|
2686
|
+
"""
|
|
2687
|
+
repeat coordinate based on the number of repetitions
|
|
2688
|
+
Args:
|
|
2689
|
+
shp:
|
|
2690
|
+
coords:
|
|
2691
|
+
numRep:
|
|
2692
|
+
windowName:
|
|
2693
|
+
|
|
2694
|
+
Returns:
|
|
2695
|
+
|
|
2696
|
+
"""
|
|
2697
|
+
if numRep <= 1 and numRep>=-1:
|
|
2698
|
+
return coords
|
|
2699
|
+
|
|
2700
|
+
def updateCoord(ind, coords_final, numRep):
|
|
2701
|
+
abs_numRep = abs(numRep)
|
|
2702
|
+
for i in range(abs_numRep-1):
|
|
2703
|
+
#self.progressBarSaving.setValue((i+1)/(numRep-1))
|
|
2704
|
+
tmp = coords.copy()
|
|
2705
|
+
if numRep>0:
|
|
2706
|
+
tmp[:,ind]+=(i+1)
|
|
2707
|
+
elif numRep <0:
|
|
2708
|
+
tmp[:,ind]-=(i+1)
|
|
2709
|
+
coords_final = np.vstack((coords_final, tmp))
|
|
2710
|
+
|
|
2711
|
+
return coords_final
|
|
2712
|
+
|
|
2713
|
+
coords_final = coords.copy()
|
|
2714
|
+
if windowName.lower() == "coronal":
|
|
2715
|
+
if numRep>0:
|
|
2716
|
+
max_rep = shp[1]-coords_final[0][1]-1
|
|
2717
|
+
numRep = max_rep if numRep > max_rep else numRep
|
|
2718
|
+
else:
|
|
2719
|
+
numRep = max(numRep, -coords_final[0][1]-1)
|
|
2720
|
+
coords_final = updateCoord(1, coords_final, numRep)
|
|
2721
|
+
elif windowName.lower() == "sagittal":
|
|
2722
|
+
if numRep>0:
|
|
2723
|
+
max_rep = shp[2]-coords_final[0][2]-1
|
|
2724
|
+
numRep = max_rep if numRep > max_rep else numRep
|
|
2725
|
+
else:
|
|
2726
|
+
numRep = max(numRep, -coords_final[0][2]-1)
|
|
2727
|
+
coords_final = updateCoord(2, coords_final,numRep)
|
|
2728
|
+
elif windowName.lower() == "axial":
|
|
2729
|
+
if numRep>0:
|
|
2730
|
+
max_rep = shp[0]-coords_final[0][0]-1
|
|
2731
|
+
numRep = max_rep if numRep > max_rep else numRep
|
|
2732
|
+
else:
|
|
2733
|
+
numRep = max(numRep, -coords_final[0][0]-1)
|
|
2734
|
+
coords_final = updateCoord(0, coords_final, numRep)
|
|
2735
|
+
return coords_final
|
|
2736
|
+
|
|
2737
|
+
|
|
2738
|
+
###################### Linking MRI and Ultrasound images ######################
|
|
2739
|
+
def LinkMRI_ECO(pointsMRI, pointsECO, degree = 1):
|
|
2740
|
+
"""
|
|
2741
|
+
Link MRI image to US image
|
|
2742
|
+
Args:
|
|
2743
|
+
pointsMRI:
|
|
2744
|
+
pointsECO:
|
|
2745
|
+
degree:
|
|
2746
|
+
|
|
2747
|
+
Returns:
|
|
2748
|
+
|
|
2749
|
+
"""
|
|
2750
|
+
from sklearn.preprocessing import PolynomialFeatures
|
|
2751
|
+
from sklearn.linear_model import LinearRegression
|
|
2752
|
+
from sklearn.pipeline import Pipeline
|
|
2753
|
+
|
|
2754
|
+
pointsMRI = np.asarray(pointsMRI)
|
|
2755
|
+
pointsECO = np.asarray(pointsECO)
|
|
2756
|
+
models = []
|
|
2757
|
+
for i in range(3):
|
|
2758
|
+
model = Pipeline([('poly', PolynomialFeatures(degree=degree)),
|
|
2759
|
+
('linear', LinearRegression(fit_intercept=True))])
|
|
2760
|
+
model = model.fit(pointsMRI, pointsECO[:,i])
|
|
2761
|
+
models.append(model)
|
|
2762
|
+
|
|
2763
|
+
for i in range(3):
|
|
2764
|
+
model = Pipeline([('poly', PolynomialFeatures(degree=degree)),
|
|
2765
|
+
('linear', LinearRegression(fit_intercept=True))])
|
|
2766
|
+
model = model.fit(pointsECO, pointsMRI[:,i])
|
|
2767
|
+
models.append(model)
|
|
2768
|
+
|
|
2769
|
+
|
|
2770
|
+
#poly_reg = PolynomialFeatures(degree=degree)
|
|
2771
|
+
#X_poly = poly_reg.fit_transform(pointsMRI)
|
|
2772
|
+
#pol_reg = LinearRegression()
|
|
2773
|
+
#pol_reg.fit(X_poly, pointsECO)
|
|
2774
|
+
return models
|
|
2775
|
+
|
|
2776
|
+
|
|
2777
|
+
###################### ######################
|
|
2778
|
+
def destacked(x,y,z):
|
|
2779
|
+
return np.vstack((x, y, z)).transpose(1,0)
|
|
2780
|
+
|
|
2781
|
+
|
|
2782
|
+
###################### generate extrapoints on a line ######################
|
|
2783
|
+
def generate_extrapoint_on_line(l1, l2, sliceNum):
|
|
2784
|
+
"""
|
|
2785
|
+
:param l1: start line
|
|
2786
|
+
:param l2: end line
|
|
2787
|
+
:param sliceNum: slice number
|
|
2788
|
+
:return: point on the line
|
|
2789
|
+
"""
|
|
2790
|
+
angleline = math.atan2((l2[0] - l1[0]), (l2[1] - l1[1])) * 180 / np.pi
|
|
2791
|
+
#print(angleline)
|
|
2792
|
+
|
|
2793
|
+
if (abs(angleline) > 25 and abs(angleline) < 165):
|
|
2794
|
+
m = (l2[1] - l1[1]) / (l2[0] - l1[0])
|
|
2795
|
+
c = l2[1] - (m * l2[0])
|
|
2796
|
+
pts = np.sort([l1[0], l2[0]])
|
|
2797
|
+
#argm = np.argmin([l1[1], l2[1]])
|
|
2798
|
+
xs = np.linspace(pts[0], pts[1], int(pts[1] - pts[0]) + 1)
|
|
2799
|
+
ys = (m * xs + c)
|
|
2800
|
+
else:
|
|
2801
|
+
m = (l2[0] - l1[0]) / (l2[1] - l1[1])
|
|
2802
|
+
c = l2[0] - (m * l2[1])
|
|
2803
|
+
pts = np.sort([l1[1], l2[1]])
|
|
2804
|
+
#argm = np.argmin([l1[1], l2[1]])
|
|
2805
|
+
ys = np.linspace(pts[0], pts[1], int(pts[1] - pts[0]) + 1)
|
|
2806
|
+
xs = (m * ys + c)
|
|
2807
|
+
#xs = np.round(xs)
|
|
2808
|
+
#ys = np.round(ys)
|
|
2809
|
+
d = [[x0, y0, z0] for x0, y0, z0 in zip(xs, ys, [sliceNum] * len(xs))]
|
|
2810
|
+
if sum([abs(x-y) for x, y in zip(d[-1],l1)])<5:
|
|
2811
|
+
d = d[::-1]
|
|
2812
|
+
return d
|
|
2813
|
+
|
|
2814
|
+
###################### Add tree root items ######################
|
|
2815
|
+
def addTreeRoot(treeItem, name, description, color):
|
|
2816
|
+
"""
|
|
2817
|
+
|
|
2818
|
+
Args:
|
|
2819
|
+
treeItem:
|
|
2820
|
+
name:
|
|
2821
|
+
description:
|
|
2822
|
+
color:
|
|
2823
|
+
|
|
2824
|
+
Returns:
|
|
2825
|
+
|
|
2826
|
+
"""
|
|
2827
|
+
if name.lower()=='mri':
|
|
2828
|
+
clr = 55
|
|
2829
|
+
else:
|
|
2830
|
+
clr = 155
|
|
2831
|
+
#for i in [0,1]:
|
|
2832
|
+
# treeItem.setForeground(i,QtGui.QBrush(QtGui.QColor(color[0]*255, color[1]*255, color[2]*255, 255)))
|
|
2833
|
+
color = [int(c*255) for c in color]
|
|
2834
|
+
node1 = QtGui.QStandardItem(name)
|
|
2835
|
+
node1.setForeground(QtGui.QBrush(QtGui.QColor(color[0], color[1], color[2], 255)))
|
|
2836
|
+
node1.setFlags(node1.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEditable)
|
|
2837
|
+
node1.setCheckState(0)
|
|
2838
|
+
node2 = QtGui.QStandardItem(description)
|
|
2839
|
+
node2.setForeground(QtGui.QBrush(QtGui.QColor(color[0], color[1], color[2], 255)))
|
|
2840
|
+
node2.setFlags(node2.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsEditable)
|
|
2841
|
+
#node2.setCheckState(0)
|
|
2842
|
+
treeItem.appendRow([node1, node2])
|
|
2843
|
+
|
|
2844
|
+
###################### find larget connected components ######################
|
|
2845
|
+
def LargestCC(segmentation, connectivity=3):
|
|
2846
|
+
"""
|
|
2847
|
+
Get largets connected components
|
|
2848
|
+
"""
|
|
2849
|
+
ndim = 3
|
|
2850
|
+
from skimage.measure import label as label_connector
|
|
2851
|
+
if segmentation.ndim == 4:
|
|
2852
|
+
segmentation = segmentation.squeeze(-1)
|
|
2853
|
+
ndim = 4
|
|
2854
|
+
labels = label_connector(segmentation, connectivity=connectivity)
|
|
2855
|
+
frequency = np.bincount(labels.flat)
|
|
2856
|
+
# frequency = -np.sort(-frequency)
|
|
2857
|
+
return labels, frequency
|
|
2858
|
+
|
|
2859
|
+
|
|
2860
|
+
|
|
2861
|
+
|
|
2862
|
+
|
|
2863
|
+
def getscale(data, dst_min, dst_max, f_low=0.0, f_high=0.999):
|
|
2864
|
+
"""
|
|
2865
|
+
Function to get offset and scale of image intensities to robustly rescale to range dst_min..dst_max.
|
|
2866
|
+
Equivalent to how mri_convert conforms images.
|
|
2867
|
+
:param np.ndarray data: image data (intensity values)
|
|
2868
|
+
:param float dst_min: future minimal intensity value
|
|
2869
|
+
:param float dst_max: future maximal intensity value
|
|
2870
|
+
:param f_low: robust cropping at low end (0.0 no cropping)
|
|
2871
|
+
:param f_high: robust cropping at higher end (0.999 crop one thousandths of high intensity voxels)
|
|
2872
|
+
:return: float src_min: (adjusted) offset
|
|
2873
|
+
:return: float scale: scale factor
|
|
2874
|
+
"""
|
|
2875
|
+
# get min and max from source
|
|
2876
|
+
src_min = np.min(data)
|
|
2877
|
+
src_max = np.max(data)
|
|
2878
|
+
|
|
2879
|
+
#print("Input: min: " + format(src_min) + " max: " + format(src_max))
|
|
2880
|
+
|
|
2881
|
+
if f_low == 0.0 and f_high == 1.0:
|
|
2882
|
+
return src_min, 1.0
|
|
2883
|
+
|
|
2884
|
+
# compute non-zeros and total vox num
|
|
2885
|
+
nz = (np.abs(data) >= 1e-15).sum()
|
|
2886
|
+
voxnum = data.shape[0] * data.shape[1] * data.shape[2]
|
|
2887
|
+
|
|
2888
|
+
# compute histogram
|
|
2889
|
+
histosize = 1000
|
|
2890
|
+
bin_size = (src_max - src_min) / histosize
|
|
2891
|
+
hist, bin_edges = np.histogram(data, histosize)
|
|
2892
|
+
|
|
2893
|
+
# compute cummulative sum
|
|
2894
|
+
cs = np.concatenate(([0], np.cumsum(hist)))
|
|
2895
|
+
|
|
2896
|
+
# get lower limit
|
|
2897
|
+
nth = int(f_low * voxnum)
|
|
2898
|
+
idx = np.where(cs < nth)
|
|
2899
|
+
|
|
2900
|
+
if len(idx[0]) > 0:
|
|
2901
|
+
idx = idx[0][-1] + 1
|
|
2902
|
+
|
|
2903
|
+
else:
|
|
2904
|
+
idx = 0
|
|
2905
|
+
|
|
2906
|
+
src_min = idx * bin_size + src_min
|
|
2907
|
+
|
|
2908
|
+
# print("bin min: "+format(idx)+" nth: "+format(nth)+" passed: "+format(cs[idx])+"\n")
|
|
2909
|
+
# get upper limit
|
|
2910
|
+
nth = voxnum - int((1.0 - f_high) * nz)
|
|
2911
|
+
idx = np.where(cs >= nth)
|
|
2912
|
+
|
|
2913
|
+
if len(idx[0]) > 0:
|
|
2914
|
+
idx = idx[0][0] - 2
|
|
2915
|
+
|
|
2916
|
+
else:
|
|
2917
|
+
idx = 0
|
|
2918
|
+
print('ERROR: rescale upper bound not found')
|
|
2919
|
+
|
|
2920
|
+
src_max = idx * bin_size + src_min
|
|
2921
|
+
# print("bin max: "+format(idx)+" nth: "+format(nth)+" passed: "+format(voxnum-cs[idx])+"\n")
|
|
2922
|
+
|
|
2923
|
+
# scale
|
|
2924
|
+
if src_min == src_max:
|
|
2925
|
+
scale = 1.0
|
|
2926
|
+
|
|
2927
|
+
else:
|
|
2928
|
+
scale = (dst_max - dst_min) / (src_max - src_min)
|
|
2929
|
+
|
|
2930
|
+
#print("rescale: min: " + format(src_min) + " max: " + format(src_max) + " scale: " + format(scale))
|
|
2931
|
+
|
|
2932
|
+
return src_min, scale
|
|
2933
|
+
|
|
2934
|
+
|
|
2935
|
+
|
|
2936
|
+
|
|
2937
|
+
|
|
2938
|
+
def scalecrop(data, dst_min, dst_max, src_min, scale):
|
|
2939
|
+
"""
|
|
2940
|
+
Function to crop the intensity ranges to specific min and max values
|
|
2941
|
+
:param np.ndarray data: Image data (intensity values)
|
|
2942
|
+
:param float dst_min: future minimal intensity value
|
|
2943
|
+
:param float dst_max: future maximal intensity value
|
|
2944
|
+
:param float src_min: minimal value to consider from source (crops below)
|
|
2945
|
+
:param float scale: scale value by which source will be shifted
|
|
2946
|
+
:return: np.ndarray data_new: scaled image data
|
|
2947
|
+
"""
|
|
2948
|
+
data_new = dst_min + scale * (data - src_min)
|
|
2949
|
+
|
|
2950
|
+
# clip
|
|
2951
|
+
data_new = np.clip(data_new, dst_min, dst_max)
|
|
2952
|
+
#print("Output: min: " + format(data_new.min()) + " max: " + format(data_new.max()))
|
|
2953
|
+
|
|
2954
|
+
return data_new
|
|
2955
|
+
|
|
2956
|
+
|
|
2957
|
+
def normalize_mri(img):
|
|
2958
|
+
src_min, scale = getscale(img, 0, 255)
|
|
2959
|
+
new_data = scalecrop(img, 0, 255, src_min, scale)
|
|
2960
|
+
return new_data
|
|
2961
|
+
|
|
2962
|
+
|
|
2963
|
+
###################### find convex hul segmentation ######################
|
|
2964
|
+
def convexhull_spline(total_points, currentWidnowName, sliceNum, npSeg):
|
|
2965
|
+
"""
|
|
2966
|
+
:param total_points:
|
|
2967
|
+
:return:
|
|
2968
|
+
"""
|
|
2969
|
+
from scipy.spatial.distance import cdist
|
|
2970
|
+
from scipy.interpolate import splprep, splev
|
|
2971
|
+
|
|
2972
|
+
distance_point_line = lambda p1, p2, p3: np.linalg.norm(np.cross(p3 - p1, p1 - p2)) / np.linalg.norm(p3 - p1)
|
|
2973
|
+
angle_ps = lambda p1, p2: math.atan2(p2[1] - p1[1], p2[0] - p1[0]) * 180 / np.pi
|
|
2974
|
+
angle_cor = lambda a1: (a1 - 360) if a1 > 180 else a1 + 360 if a1 < -180 else abs(a1) if (a1 < 0, a1 > -180) else a1
|
|
2975
|
+
|
|
2976
|
+
# angle_three = lambda p1,p2, p3: [abs(angle_ps(p1,p3)-angle_ps(p1,p2)), abs(angle_ps(p1,p3)-angle_ps(p2,p3))]
|
|
2977
|
+
# angle_correct = lambda a1, a2: [(180-a1) if a1>=180 else a1, (180-a2) if a2>=180 else a2]
|
|
2978
|
+
criterion_met = lambda p1, p2, p3: abs(angle_cor(angle_ps(p1, p3)) - angle_cor(angle_ps(p1, p2))) < 75 and abs(
|
|
2979
|
+
angle_cor(angle_ps(p1, p3)) - angle_cor(angle_ps(p2, p3))) < 75 and abs(
|
|
2980
|
+
angle_ps(p2, p3) - angle_ps(p2, p1)) > 45
|
|
2981
|
+
def find_best(outrS, outrind):
|
|
2982
|
+
dists = []
|
|
2983
|
+
indc = []
|
|
2984
|
+
bls = []
|
|
2985
|
+
for indices in outrS:
|
|
2986
|
+
p1, p2, p3 = d[int(outrind[indices, 3]), :], d[int(outrind[indices, 2]), :], d[int(outrind[indices, 4]), :]
|
|
2987
|
+
dists.append(distance_point_line(p1, p2, p3))
|
|
2988
|
+
bls.append(criterion_met(p1,p2,p3))
|
|
2989
|
+
indc.append(indices)
|
|
2990
|
+
if len(indc) > 5:
|
|
2991
|
+
break
|
|
2992
|
+
ind_m = np.argsort(dists)
|
|
2993
|
+
succes = False
|
|
2994
|
+
indices = -1
|
|
2995
|
+
if dists[ind_m[0]]>=1.2:#1.2 pixel
|
|
2996
|
+
for ind in ind_m:
|
|
2997
|
+
if bls[ind]:
|
|
2998
|
+
indices = indc[ind]
|
|
2999
|
+
succes = True
|
|
3000
|
+
break
|
|
3001
|
+
|
|
3002
|
+
return indices, succes
|
|
3003
|
+
|
|
3004
|
+
x, y, z = np.where(npSeg == 1500)
|
|
3005
|
+
additional_point = np.vstack((x,y,z)).transpose()
|
|
3006
|
+
#total_points = np.loadtxt('totalpoints.txt')
|
|
3007
|
+
total_points = np.unique(total_points, axis=0)
|
|
3008
|
+
remps = []
|
|
3009
|
+
if currentWidnowName == 'coronal':
|
|
3010
|
+
main_axis = 1
|
|
3011
|
+
newp = total_points[total_points[:, 1] == sliceNum, :]
|
|
3012
|
+
other_axis = [2,0]
|
|
3013
|
+
remps = total_points[total_points[:, 1] != sliceNum, :]
|
|
3014
|
+
elif currentWidnowName == 'sagittal':
|
|
3015
|
+
main_axis = 2
|
|
3016
|
+
newp = total_points[total_points[:,2]==sliceNum,:]
|
|
3017
|
+
remps = total_points[total_points[:, 2] != sliceNum, :]
|
|
3018
|
+
other_axis = [1, 0]
|
|
3019
|
+
elif currentWidnowName == 'axial':
|
|
3020
|
+
main_axis = 0
|
|
3021
|
+
newp = total_points[total_points[:,0]==sliceNum,:]
|
|
3022
|
+
remps = total_points[total_points[:, 0] != sliceNum, :]
|
|
3023
|
+
other_axis = [2, 1]
|
|
3024
|
+
|
|
3025
|
+
additional_point = additional_point[:, [other_axis[0], other_axis[1], main_axis]]
|
|
3026
|
+
|
|
3027
|
+
d = np.array([newp[:, other_axis[0]], newp[:, other_axis[1]]]).T
|
|
3028
|
+
|
|
3029
|
+
convex_hull = np.array(LineString(d).convex_hull.exterior.xy).T
|
|
3030
|
+
routes = [np.argmin(np.sum(np.abs(con - d), 1)) for con in convex_hull]
|
|
3031
|
+
out_route = list(set(np.arange(d.shape[0])) - set(routes))
|
|
3032
|
+
|
|
3033
|
+
if len(out_route)>0:
|
|
3034
|
+
sorted_dist = list(cdist(d[routes, :], d[out_route, :]).min(0).argsort(0))
|
|
3035
|
+
out_route = np.array(out_route)[sorted_dist]
|
|
3036
|
+
dist = lambda x, y: np.linalg.norm(x - y)
|
|
3037
|
+
|
|
3038
|
+
path_dist = lambda ind, routes: np.array(
|
|
3039
|
+
[[dist(d[routes[r], :], d[ind, :]), dist(d[routes[r + 1], :], d[ind, :]), ind, routes[r], routes[r + 1]] for r
|
|
3040
|
+
in range(len(routes) - 1)])
|
|
3041
|
+
for ind in out_route:
|
|
3042
|
+
outrind = path_dist(ind, routes)
|
|
3043
|
+
|
|
3044
|
+
outrS = outrind[:, [1]].argsort(0)
|
|
3045
|
+
indices, succes = find_best(outrS, outrind)
|
|
3046
|
+
if succes:
|
|
3047
|
+
indsel = np.where(routes == outrind[indices,3])[0][0]
|
|
3048
|
+
routes.insert(indsel + 1, ind)
|
|
3049
|
+
tt = newp[routes, :][:,[other_axis[0], other_axis[1]]].T
|
|
3050
|
+
|
|
3051
|
+
tck, u = splprep(tt, u=None, s=0.0, per=1)
|
|
3052
|
+
u_new = np.linspace(u.min(), u.max(), 1000)
|
|
3053
|
+
x_new, y_new = splev(u_new, tck, der=0)
|
|
3054
|
+
debug = False
|
|
3055
|
+
if debug:
|
|
3056
|
+
import matplotlib.pyplot as plt
|
|
3057
|
+
plt.scatter(convex_hull[:, 0], convex_hull[:, 1]);
|
|
3058
|
+
plt.plot(d[routes, 0], d[routes, 1]);
|
|
3059
|
+
plt.scatter(d[:, 0], d[:, 1])
|
|
3060
|
+
plt.scatter(x_new, y_new)
|
|
3061
|
+
plt.show()
|
|
3062
|
+
|
|
3063
|
+
newp = np.array([x_new, y_new, np.repeat(sliceNum, x_new.shape[0])]).transpose()
|
|
3064
|
+
|
|
3065
|
+
|
|
3066
|
+
pl = Polygon(newp)
|
|
3067
|
+
if not pl.is_valid:
|
|
3068
|
+
pls = ConvertPToPolygons(newp)
|
|
3069
|
+
for ij, pl in enumerate(pls):
|
|
3070
|
+
if ij == 0:
|
|
3071
|
+
selected_points, edges = fillInsidePol(pl)
|
|
3072
|
+
else:
|
|
3073
|
+
n_points, edges= fillInsidePol(pl)
|
|
3074
|
+
selected_points = np.vstack([selected_points, n_points])
|
|
3075
|
+
else:
|
|
3076
|
+
selected_points, edges = fillInsidePol(pl)
|
|
3077
|
+
|
|
3078
|
+
|
|
3079
|
+
selected_points = PermuteProperAxis(np.vstack((additional_point, selected_points)), currentWidnowName)
|
|
3080
|
+
return selected_points.astype('int'), []#list(remps)
|
|
3081
|
+
|
|
3082
|
+
###################### find selected widget ######################
|
|
3083
|
+
def locateWidgets(sender, mainw):
|
|
3084
|
+
"""
|
|
3085
|
+
locate widget
|
|
3086
|
+
Args:
|
|
3087
|
+
sender:
|
|
3088
|
+
mainw:
|
|
3089
|
+
|
|
3090
|
+
Returns:
|
|
3091
|
+
|
|
3092
|
+
"""
|
|
3093
|
+
if sender == mainw.openGLWidget_4 or sender == mainw.openGLWidget_5 or sender == mainw.openGLWidget_6:
|
|
3094
|
+
# mri Image
|
|
3095
|
+
readerName = 'readImMRI'
|
|
3096
|
+
reader = mainw.readImMRI
|
|
3097
|
+
widgets = []
|
|
3098
|
+
if mainw.tabWidget.currentIndex() == 0:
|
|
3099
|
+
widgets = [mainw.openGLWidget_4, mainw.openGLWidget_5, mainw.openGLWidget_6]
|
|
3100
|
+
return readerName, reader, widgets
|
|
3101
|
+
elif sender == mainw.openGLWidget_1 or sender == mainw.openGLWidget_2 or sender == mainw.openGLWidget_3 or sender == mainw.openGLWidget_11:
|
|
3102
|
+
# eco
|
|
3103
|
+
readerName = 'readImECO'
|
|
3104
|
+
reader = mainw.readImECO
|
|
3105
|
+
widgets = []
|
|
3106
|
+
#if mainw.tabWidget.currentIndex() == 0:
|
|
3107
|
+
widgets = [mainw.openGLWidget_1, mainw.openGLWidget_2, mainw.openGLWidget_3,mainw.openGLWidget_11]
|
|
3108
|
+
#elif mainw.tabWidget.currentIndex() == 2:
|
|
3109
|
+
#widgets = [mainw.openGLWidget_11]
|
|
3110
|
+
return readerName, reader, widgets
|
|
3111
|
+
else:
|
|
3112
|
+
return None, None, None
|
|
3113
|
+
|
|
3114
|
+
###################### searching for additional points ######################
|
|
3115
|
+
def SearchForAdditionalPoints(nseg, sliceNum, windowname, max_threshold_to_be_line=30, max_lines=2,
|
|
3116
|
+
threshold_jump = 4, line_info = None, active_color_ind=1):
|
|
3117
|
+
"""
|
|
3118
|
+
Search for additional points ...
|
|
3119
|
+
Args:
|
|
3120
|
+
nseg:
|
|
3121
|
+
sliceNum:
|
|
3122
|
+
windowname:
|
|
3123
|
+
max_threshold_to_be_line:
|
|
3124
|
+
max_lines:
|
|
3125
|
+
threshold_jump:
|
|
3126
|
+
line_info:
|
|
3127
|
+
|
|
3128
|
+
Returns:
|
|
3129
|
+
|
|
3130
|
+
"""
|
|
3131
|
+
seg = nseg.copy()
|
|
3132
|
+
len_lines = dict()
|
|
3133
|
+
if line_info is not None:
|
|
3134
|
+
if windowname == 'coronal':
|
|
3135
|
+
point_sel = np.vstack(line_info)[:, [0, 2]]
|
|
3136
|
+
seg[tuple(zip(*point_sel))] = 0
|
|
3137
|
+
elif windowname == 'sagittal':
|
|
3138
|
+
point_sel = np.vstack(line_info)[:, [0, 1]]
|
|
3139
|
+
seg[tuple(zip(*point_sel))] = 0
|
|
3140
|
+
elif windowname == 'axial':
|
|
3141
|
+
point_sel = np.vstack(line_info)[:, [1, 2]]
|
|
3142
|
+
seg[tuple(zip(*point_sel))] = 0
|
|
3143
|
+
[x_ind, y_ind] = np.where(seg == np.inf)
|
|
3144
|
+
points = np.array([[x, y] for x, y in zip(x_ind, y_ind)])
|
|
3145
|
+
if points.shape[0]==0:
|
|
3146
|
+
len_lines['h'] = 0
|
|
3147
|
+
len_lines['v'] = 0
|
|
3148
|
+
return [], False, len_lines
|
|
3149
|
+
mean_x, mean_y = points.mean(0)
|
|
3150
|
+
def divide_to_lines(points, axis, threshold_jump):
|
|
3151
|
+
line_breaks = np.where(np.diff(np.sort(points[:, axis])) > threshold_jump)[0]
|
|
3152
|
+
breaks = [0]
|
|
3153
|
+
for br in line_breaks:
|
|
3154
|
+
if (br-breaks[-1])>5 and (points.shape[0]-br)>5:
|
|
3155
|
+
breaks.append(br+1)
|
|
3156
|
+
else:
|
|
3157
|
+
breaks[-1] = br+1
|
|
3158
|
+
breaks.append(points.shape[0])
|
|
3159
|
+
|
|
3160
|
+
return breaks
|
|
3161
|
+
success_h = False
|
|
3162
|
+
success_v = False
|
|
3163
|
+
|
|
3164
|
+
total_points = []
|
|
3165
|
+
for ax in ['h', 'v']:
|
|
3166
|
+
len_lines[ax] = 0
|
|
3167
|
+
if ax.lower()=='h':
|
|
3168
|
+
axis_used = 0
|
|
3169
|
+
axis_second=1
|
|
3170
|
+
mean_axis = mean_y
|
|
3171
|
+
|
|
3172
|
+
else:
|
|
3173
|
+
axis_used = 1
|
|
3174
|
+
axis_second = 0
|
|
3175
|
+
mean_axis = mean_x
|
|
3176
|
+
unique, counts = np.unique(points[:,axis_used], return_counts=True)
|
|
3177
|
+
if counts.max() > max_threshold_to_be_line:
|
|
3178
|
+
lines = np.sort(unique[counts > max_threshold_to_be_line])
|
|
3179
|
+
len_lines[ax] = len(lines)
|
|
3180
|
+
l_prev = np.inf
|
|
3181
|
+
if len(lines) >= max_lines:
|
|
3182
|
+
success = True
|
|
3183
|
+
if ax.lower() == 'h':
|
|
3184
|
+
success_h = True
|
|
3185
|
+
else:
|
|
3186
|
+
success_v = True
|
|
3187
|
+
for l in lines:
|
|
3188
|
+
point_in_line = points[points[:, axis_used] == l, :]
|
|
3189
|
+
breaks = divide_to_lines(point_in_line, axis_second, threshold_jump)
|
|
3190
|
+
if len(breaks)<5:
|
|
3191
|
+
|
|
3192
|
+
if abs(l_prev - l) < 2 and len(total_points) > 0 and len(breaks)==2:
|
|
3193
|
+
xy_st, xy_end = total_points[-2], total_points[-1]
|
|
3194
|
+
if ax.lower()=='h':
|
|
3195
|
+
xy2_st, xy2_end = [l, point_in_line[:, axis_second].min()], [l, point_in_line[:, axis_second].max()]
|
|
3196
|
+
else:
|
|
3197
|
+
xy2_st, xy2_end = [point_in_line[:, axis_second].min(), l], [point_in_line[:, axis_second].max(), l]
|
|
3198
|
+
if (xy2_st[axis_second] + xy_st[axis_second]) / 2 < mean_axis:
|
|
3199
|
+
ind_st = np.argmax([xy_st[axis_second], xy2_st[axis_second]])
|
|
3200
|
+
xy_s = xy_st if ind_st == axis_used else xy2_st
|
|
3201
|
+
ind_end = np.argmin([xy_end[axis_second], xy2_end[axis_second]])
|
|
3202
|
+
xy_e = xy_end if ind_end == axis_used else xy2_end
|
|
3203
|
+
else:
|
|
3204
|
+
ind_st = np.argmin([xy_st[axis_second], xy2_st[axis_second]])
|
|
3205
|
+
xy_s = xy_st if ind_st == axis_used else xy2_st
|
|
3206
|
+
ind_end = np.argmax([xy_end[axis_second], xy2_end[axis_second]])
|
|
3207
|
+
xy_e = xy_end if ind_end == axis_used else xy2_end
|
|
3208
|
+
total_points = total_points[:-2]
|
|
3209
|
+
total_points.append(xy_s)
|
|
3210
|
+
total_points.append(xy_e)
|
|
3211
|
+
else:
|
|
3212
|
+
if ax.lower() == 'h':
|
|
3213
|
+
for b in range(len(breaks)-1):
|
|
3214
|
+
total_points.append([l, point_in_line[breaks[b]:breaks[b+1], axis_second].min()])
|
|
3215
|
+
total_points.append([l, point_in_line[breaks[b]:breaks[b+1], axis_second].max()])
|
|
3216
|
+
#total_points.append([l, point_in_line[:, axis_second].min()])
|
|
3217
|
+
#total_points.append([l, point_in_line[:, axis_second].max()])
|
|
3218
|
+
else:
|
|
3219
|
+
for b in range(len(breaks)-1):
|
|
3220
|
+
total_points.append([point_in_line[breaks[b]:breaks[b+1], axis_second].min(), l])
|
|
3221
|
+
total_points.append([point_in_line[breaks[b]:breaks[b+1], axis_second].max(), l])
|
|
3222
|
+
l_prev = l
|
|
3223
|
+
|
|
3224
|
+
if len(total_points)>0:
|
|
3225
|
+
total_points = np.unique(total_points, axis=0)
|
|
3226
|
+
total_points = total_points[:, [1, 0]]
|
|
3227
|
+
total_points = np.hstack((total_points,np.repeat(sliceNum, total_points.shape[0]).reshape(-1,1)))
|
|
3228
|
+
total_points = PermuteProperAxis(total_points, windowname)
|
|
3229
|
+
return total_points, success_h*success_v, len_lines
|
|
3230
|
+
|
|
3231
|
+
###################### find index of selected colors ######################
|
|
3232
|
+
def _get_color_index(npSeg, WI):
|
|
3233
|
+
uq = np.unique(npSeg[tuple(zip(*WI))])
|
|
3234
|
+
inds, us = [], []
|
|
3235
|
+
for u in uq:
|
|
3236
|
+
ind = npSeg[tuple(zip(*WI))] == u
|
|
3237
|
+
inds.append(ind)
|
|
3238
|
+
us.append(u)
|
|
3239
|
+
return inds, us
|
|
3240
|
+
|
|
3241
|
+
###################### updating segmentation ######################
|
|
3242
|
+
def update_last(self, npSeg, colorInd, whiteInd, colorInd2, guide_lines = False):
|
|
3243
|
+
"""
|
|
3244
|
+
update last
|
|
3245
|
+
Args:
|
|
3246
|
+
self:
|
|
3247
|
+
npSeg:
|
|
3248
|
+
colorInd:
|
|
3249
|
+
whiteInd:
|
|
3250
|
+
colorInd2:
|
|
3251
|
+
guide_lines:
|
|
3252
|
+
|
|
3253
|
+
Returns:
|
|
3254
|
+
|
|
3255
|
+
"""
|
|
3256
|
+
if colorInd != 0:
|
|
3257
|
+
WI = getZeroSeg(npSeg, whiteInd, colorInd)
|
|
3258
|
+
#WI = whiteInd
|
|
3259
|
+
if WI.shape[0]< 1:
|
|
3260
|
+
return
|
|
3261
|
+
inds, us = _get_color_index(npSeg, WI)
|
|
3262
|
+
self._lastReaderSegCol.append(colorInd)
|
|
3263
|
+
self._lastReaderSegInd.append([WI, inds, us])
|
|
3264
|
+
WI = whiteInd # to be commented to not check
|
|
3265
|
+
|
|
3266
|
+
else:
|
|
3267
|
+
WI = getNoneZeroSeg(npSeg, whiteInd, colorInd2, 9876)
|
|
3268
|
+
if WI.shape[0]< 1:
|
|
3269
|
+
return
|
|
3270
|
+
self._lastReaderSegCol.append(colorInd)
|
|
3271
|
+
inds, us = _get_color_index(npSeg, WI)
|
|
3272
|
+
self._lastReaderSegInd.append([WI, inds, us])
|
|
3273
|
+
|
|
3274
|
+
self._lastReaderSegPrevCol.append(colorInd2)
|
|
3275
|
+
self._undoTimes = 0
|
|
3276
|
+
if len(self._lastReaderSegInd) > self._lastMax:
|
|
3277
|
+
self._lastReaderSegCol = self._lastReaderSegCol[1:]
|
|
3278
|
+
self._lastReaderSegInd = self._lastReaderSegInd[1:]
|
|
3279
|
+
self._lastReaderSegPrevCol = self._lastReaderSegPrevCol[1:]
|
|
3280
|
+
if guide_lines:
|
|
3281
|
+
self._lastlines.append(WI)
|
|
3282
|
+
npSeg[tuple(zip(*WI))] = colorInd
|
|
3283
|
+
if colorInd == 0:
|
|
3284
|
+
colsel = colorInd2
|
|
3285
|
+
else:
|
|
3286
|
+
colsel = colorInd
|
|
3287
|
+
if self._sender in [getattr(self, 'openGLWidget_{}'.format(f)) for f in self.widgets_eco]:
|
|
3288
|
+
|
|
3289
|
+
txt = 'File: {}'.format(self.filenameEco)
|
|
3290
|
+
if colorInd == 9876:
|
|
3291
|
+
txt += ' TV (US) : {0:0.2f} cm\u00b3'.format((self.readImECO.npSeg > 0).sum() * self.readImECO.ImSpacing[0] ** 3 / 1000)
|
|
3292
|
+
else:
|
|
3293
|
+
txt += ' TV (US) : {0:0.2f} cm\u00b3'.format((self.readImECO.npSeg == colsel).sum() * self.readImECO.ImSpacing[0] ** 3 / 1000)
|
|
3294
|
+
self.openedFileName.setText(txt)
|
|
3295
|
+
else:
|
|
3296
|
+
txt = 'File: {}'.format(self.filenameMRI)
|
|
3297
|
+
if colorInd==9876:
|
|
3298
|
+
txt += ' TV (MRI) : {0:0.2f} cm\u00b3'.format((self.readImMRI.npSeg > 0).sum() * self.readImMRI.ImSpacing[0] ** 3 / 1000)
|
|
3299
|
+
else:
|
|
3300
|
+
txt += ' TV (MRI) : {0:0.2f} cm\u00b3'.format((self.readImMRI.npSeg == colsel).sum() * self.readImMRI.ImSpacing[0] ** 3 / 1000)
|
|
3301
|
+
self.openedFileName.setText(txt)
|
|
3302
|
+
if colorInd == 1500:
|
|
3303
|
+
self._lineinfo.append(WI)
|
|
3304
|
+
|
|
3305
|
+
###################### select proper widgets ######################
|
|
3306
|
+
def select_proper_widgets(self):
|
|
3307
|
+
from PyQt5.QtCore import QObject
|
|
3308
|
+
widgets = []
|
|
3309
|
+
sender = QObject.sender(self)
|
|
3310
|
+
if self.tabWidget.currentIndex() == 0:
|
|
3311
|
+
widgets = []
|
|
3312
|
+
if sender in [self.openGLWidget_4, self.openGLWidget_6,self.openGLWidget_5]:
|
|
3313
|
+
widgets.append(self.openGLWidget_4)
|
|
3314
|
+
widgets.append(self.openGLWidget_5)
|
|
3315
|
+
widgets.append(self.openGLWidget_6)
|
|
3316
|
+
elif sender in [self.openGLWidget_1, self.openGLWidget_2,self.openGLWidget_3]:
|
|
3317
|
+
widgets.append(self.openGLWidget_1)
|
|
3318
|
+
widgets.append(self.openGLWidget_2)
|
|
3319
|
+
widgets.append(self.openGLWidget_3)
|
|
3320
|
+
elif self.tabWidget.currentIndex() == 2:
|
|
3321
|
+
wndnm = self.openGLWidget_11.currentWidnowName
|
|
3322
|
+
if wndnm.lower() == 'sagittal':
|
|
3323
|
+
widgets.append(self.openGLWidget_2)
|
|
3324
|
+
elif wndnm.lower() == 'coronal':
|
|
3325
|
+
widgets.append(self.openGLWidget_1)
|
|
3326
|
+
elif wndnm.lower() == 'axial':
|
|
3327
|
+
widgets.append(self.openGLWidget_3)
|
|
3328
|
+
widgets.append(self.openGLWidget_11)
|
|
3329
|
+
elif self.tabWidget.currentIndex() == 3:
|
|
3330
|
+
wndnm = self.openGLWidget_11.currentWidnowName
|
|
3331
|
+
if wndnm.lower() == 'sagittal':
|
|
3332
|
+
widgets.append(self.openGLWidget_5)
|
|
3333
|
+
elif wndnm.lower() == 'coronal':
|
|
3334
|
+
widgets.append(self.openGLWidget_4)
|
|
3335
|
+
elif wndnm.lower() == 'axial':
|
|
3336
|
+
widgets.append(self.openGLWidget_6)
|
|
3337
|
+
return widgets
|
|
3338
|
+
|
|
3339
|
+
|
|
3340
|
+
|
|
3341
|
+
###################### save numpy array to image ######################
|
|
3342
|
+
def save_numpy_to_png(file, img):
|
|
3343
|
+
from matplotlib.image import imsave
|
|
3344
|
+
imsave(file, img)
|
|
3345
|
+
|
|
3346
|
+
|
|
3347
|
+
###################### saving 3D images ######################
|
|
3348
|
+
def save_3d_img(reader, file, img, format='tif', type_im = 'mri', cs=['RAS', 'AS', True]):
|
|
3349
|
+
import csv
|
|
3350
|
+
cors, asto, save_csv = cs
|
|
3351
|
+
|
|
3352
|
+
if format == 'tif':
|
|
3353
|
+
from skimage.external import tifffile as tif
|
|
3354
|
+
tif.imsave(file+'.tif', img, bigtiff=True)
|
|
3355
|
+
elif format == 'nifti':
|
|
3356
|
+
if file[-7:] != '.nii.gz':
|
|
3357
|
+
file = file +'.nii.gz'
|
|
3358
|
+
if type_im == 'mri':
|
|
3359
|
+
transpose_axis = [2, 1, 0]
|
|
3360
|
+
flip_axis = None
|
|
3361
|
+
elif type_im == 'eco':
|
|
3362
|
+
transpose_axis = [2, 1, 0]
|
|
3363
|
+
flip_axis = 1
|
|
3364
|
+
#img = np.transpose(img,transpose_axis)
|
|
3365
|
+
#img = np.flip(img, axis=flip_axis)
|
|
3366
|
+
|
|
3367
|
+
if hasattr(reader, 'affine'):
|
|
3368
|
+
affine = reader.im.affine
|
|
3369
|
+
#if reader.s2c:
|
|
3370
|
+
# try:
|
|
3371
|
+
# affine = reader._imChanged_affine
|
|
3372
|
+
# except Exception as e:
|
|
3373
|
+
# print(e)
|
|
3374
|
+
else:
|
|
3375
|
+
affine = np.eye(4)
|
|
3376
|
+
affine[:-1, -1] = np.array(reader.ImOrigin)
|
|
3377
|
+
np.fill_diagonal(affine[:-1, :-1], reader.ImSpacing)
|
|
3378
|
+
try:
|
|
3379
|
+
transform, _ = convert_to_ras(affine, target=cors)
|
|
3380
|
+
except:
|
|
3381
|
+
if hasattr(reader, 'source_system'):
|
|
3382
|
+
transform , _ = convert_to_ras(affine, target=reader.source_system)
|
|
3383
|
+
else:
|
|
3384
|
+
transform, _ = convert_to_ras(affine, target=reader.target_system)
|
|
3385
|
+
if hasattr(reader, 'header'):
|
|
3386
|
+
hdr = reader.header
|
|
3387
|
+
else:
|
|
3388
|
+
hdr = nib.Nifti1Header()
|
|
3389
|
+
hdr['dim'] = np.array([3, img.shape[0], img.shape[1], img.shape[2], 1, 1, 1, 1])
|
|
3390
|
+
new_im = nib.Nifti1Image(img.transpose(2, 1, 0)[::-1, ::-1, ::-1], affine) #get back to original
|
|
3391
|
+
from nibabel.orientations import apply_orientation
|
|
3392
|
+
if asto.lower()=='as':
|
|
3393
|
+
new_im = new_im.as_reoriented(transform) # reorient to the right transformation system
|
|
3394
|
+
elif asto.lower()=='to':
|
|
3395
|
+
img2 = apply_orientation(img, transform)
|
|
3396
|
+
#new_affine = affine @ nib.orientations.inv_ornt_aff(transform, img.shape)
|
|
3397
|
+
new_im = nib.Nifti1Image(img2, affine)
|
|
3398
|
+
#new_affine = new_im.as_reoriented(transform).affine
|
|
3399
|
+
#new_im = nib.Nifti1Image(img, new_affine, header=hdr)
|
|
3400
|
+
new_im.header['pixdim'] = hdr['pixdim']
|
|
3401
|
+
nib.save(new_im, file)
|
|
3402
|
+
|
|
3403
|
+
if save_csv:
|
|
3404
|
+
with open(file+'.csv', 'w') as f: # You will need 'wb' mode in Python 2.x
|
|
3405
|
+
w = csv.DictWriter(f, reader.metadata.keys())
|
|
3406
|
+
w.writeheader()
|
|
3407
|
+
w.writerow(reader.metadata)
|
|
3408
|
+
|
|
3409
|
+
#with open(file + '.json', 'w') as fp:
|
|
3410
|
+
# json.dump(reader.metadata, fp)
|
|
3411
|
+
|
|
3412
|
+
###################### compute volume of segmented area ######################
|
|
3413
|
+
def compute_vol_seg(npSeg_orig, segwnd):
|
|
3414
|
+
"""
|
|
3415
|
+
Compute segmentation volume
|
|
3416
|
+
Args:
|
|
3417
|
+
npSeg_orig:
|
|
3418
|
+
segwnd:
|
|
3419
|
+
|
|
3420
|
+
Returns:
|
|
3421
|
+
|
|
3422
|
+
"""
|
|
3423
|
+
npSeg = npSeg_orig.copy()
|
|
3424
|
+
selected_points_total = np.zeros((0,0,0))
|
|
3425
|
+
try:
|
|
3426
|
+
segwnd = segwnd.lower()
|
|
3427
|
+
if segwnd == 'coronal':
|
|
3428
|
+
searchwnd = npSeg.shape[1]
|
|
3429
|
+
elif segwnd == 'sagittal':
|
|
3430
|
+
searchwnd = npSeg.shape[2]
|
|
3431
|
+
elif segwnd == 'axial':
|
|
3432
|
+
searchwnd = npSeg.shape[0]
|
|
3433
|
+
nd = 0
|
|
3434
|
+
for slc in range(searchwnd):
|
|
3435
|
+
|
|
3436
|
+
if segwnd=='coronal':
|
|
3437
|
+
seg = npSeg[:, slc, :]
|
|
3438
|
+
elif segwnd == 'sagittal':
|
|
3439
|
+
seg = npSeg[:,:,slc]
|
|
3440
|
+
elif segwnd == 'axial':
|
|
3441
|
+
seg = npSeg[slc,:,:]
|
|
3442
|
+
[x_ind, y_ind] = np.where(seg > 0)
|
|
3443
|
+
pointxy = np.array([[x, y] for x, y in zip(x_ind, y_ind)])
|
|
3444
|
+
if pointxy.shape[0]>1:
|
|
3445
|
+
total_points, success, len_lines = SearchForAdditionalPoints(seg, slc, segwnd)
|
|
3446
|
+
if len(total_points) > 0 and success:
|
|
3447
|
+
|
|
3448
|
+
selected_points,_ = convexhull_spline(total_points, segwnd, slc, npSeg)
|
|
3449
|
+
tmp_seg = seg.copy()
|
|
3450
|
+
|
|
3451
|
+
tmp_seg[selected_points[:, 0], selected_points[:, 1]] = 1
|
|
3452
|
+
if (selected_points.shape[0] - sum(npSeg[tuple(zip(*selected_points))]>0))>0 and sum([len_lines[key] for key in len_lines.keys()])<40:
|
|
3453
|
+
if nd==0:
|
|
3454
|
+
selected_points_total = selected_points
|
|
3455
|
+
nd=1
|
|
3456
|
+
else:
|
|
3457
|
+
selected_points_total = np.vstack((selected_points_total, selected_points))
|
|
3458
|
+
#seg[seg != 0] = 0
|
|
3459
|
+
#seg[selected_points[:, 0], selected_points[:, 1]] = 1
|
|
3460
|
+
except Exception as e:
|
|
3461
|
+
print('Compute Vol Seg Error')
|
|
3462
|
+
print(e)
|
|
3463
|
+
return selected_points_total
|
|
3464
|
+
|
|
3465
|
+
|
|
3466
|
+
###################### export information from a table ######################
|
|
3467
|
+
def export_tables(self, file):
|
|
3468
|
+
"""
|
|
3469
|
+
Export data to tables
|
|
3470
|
+
Args:
|
|
3471
|
+
self:
|
|
3472
|
+
file:
|
|
3473
|
+
|
|
3474
|
+
Returns:
|
|
3475
|
+
|
|
3476
|
+
"""
|
|
3477
|
+
if file[0]=='':
|
|
3478
|
+
return
|
|
3479
|
+
num_header = self.table_widget_measure.columnCount()
|
|
3480
|
+
headers = []
|
|
3481
|
+
dicts = defaultdict(list)
|
|
3482
|
+
rows = self.table_widget_measure.rowCount()
|
|
3483
|
+
cols = self.table_widget_measure.columnCount()
|
|
3484
|
+
for i in range(num_header):
|
|
3485
|
+
itm = self.table_widget_measure.horizontalHeaderItem(i)
|
|
3486
|
+
if itm is not None:
|
|
3487
|
+
txt = itm.text()
|
|
3488
|
+
else:
|
|
3489
|
+
txt = 'unknown'
|
|
3490
|
+
headers.append(txt)
|
|
3491
|
+
|
|
3492
|
+
dicts_0 = defaultdict(list)
|
|
3493
|
+
for r in range(rows):
|
|
3494
|
+
dicts_0[r] = []
|
|
3495
|
+
for c in range(cols):
|
|
3496
|
+
itm = self.table_widget_measure.item(r, c)
|
|
3497
|
+
if itm is not None:
|
|
3498
|
+
txt = itm.text()
|
|
3499
|
+
dicts_0[r].append(txt)
|
|
3500
|
+
import csv
|
|
3501
|
+
with open(file + '.csv', 'w') as f: # You will need 'wb' mode in Python 2.x
|
|
3502
|
+
f.write(','.join(headers)+'\n')
|
|
3503
|
+
|
|
3504
|
+
for key in dicts_0.keys():
|
|
3505
|
+
f.write(','.join(dicts_0[key])+'\n')
|
|
3506
|
+
|
|
3507
|
+
#with open(file+'.json', 'w') as fp:
|
|
3508
|
+
# json.dump(dicts, fp)
|
|
3509
|
+
|
|
3510
|
+
###################### n4 Bias filed correction ######################
|
|
3511
|
+
def N4_bias_correction(image_nib, use_otsu=True, shrinkFactor=1,
|
|
3512
|
+
numberFittingLevels=6, max_iter=5):
|
|
3513
|
+
inputImage = read_nib_as_sitk(image_nib)
|
|
3514
|
+
inputImage = sitk.Cast(inputImage, sitk.sitkFloat32)
|
|
3515
|
+
if use_otsu:
|
|
3516
|
+
#maskImage = sitk.OtsuThreshold(inputImage, 0, 1, 200)
|
|
3517
|
+
threshold_val = Threshold_MultiOtsu(image_nib.get_fdata(), 1)[0]
|
|
3518
|
+
a = image_nib.get_fdata().copy()
|
|
3519
|
+
a[a <= threshold_val] = 0
|
|
3520
|
+
a[a > threshold_val] = 1
|
|
3521
|
+
mask_image = make_image_using_affine(a, image_nib.affine)
|
|
3522
|
+
maskImage = read_nib_as_sitk(mask_image)
|
|
3523
|
+
maskImage = sitk.Cast(maskImage, sitk.sitkUInt8)
|
|
3524
|
+
|
|
3525
|
+
else:
|
|
3526
|
+
|
|
3527
|
+
mask_image = nib.Nifti1Image((image_nib.get_fdata()>0).astype(np.int8), image_nib.affine, header=image_nib.header)
|
|
3528
|
+
#maskImage = sitk.Cast(sitk.GetImageFromArray((image.get_fdata()>0).astype('int'), sitk.sitkInt8), sitk.sitkUInt8)
|
|
3529
|
+
maskImage = read_nib_as_sitk(mask_image)
|
|
3530
|
+
maskImage = sitk.Cast(maskImage, sitk.sitkUInt8)
|
|
3531
|
+
|
|
3532
|
+
|
|
3533
|
+
if shrinkFactor > 1:
|
|
3534
|
+
inputImage = sitk.Shrink(
|
|
3535
|
+
inputImage, [shrinkFactor] * inputImage.GetDimension()
|
|
3536
|
+
)
|
|
3537
|
+
maskImage = sitk.Shrink(
|
|
3538
|
+
maskImage, [shrinkFactor] * inputImage.GetDimension()
|
|
3539
|
+
)
|
|
3540
|
+
|
|
3541
|
+
|
|
3542
|
+
corrector = sitk.N4BiasFieldCorrectionImageFilter()
|
|
3543
|
+
|
|
3544
|
+
|
|
3545
|
+
|
|
3546
|
+
if max_iter > 5:
|
|
3547
|
+
corrector.SetMaximumNumberOfIterations(
|
|
3548
|
+
[max_iter] * numberFittingLevels
|
|
3549
|
+
)
|
|
3550
|
+
|
|
3551
|
+
corrected_image = corrector.Execute(inputImage, maskImage)
|
|
3552
|
+
affine = make_affine(corrected_image)
|
|
3553
|
+
nib_im = nib.Nifti1Image(sitk.GetArrayFromImage(corrected_image).transpose(), affine)
|
|
3554
|
+
return nib_im
|
|
3555
|
+
|
|
3556
|
+
|
|
3557
|
+
|
|
3558
|
+
###################### creat an image given affine matrix ######################
|
|
3559
|
+
def make_image_using_affine(data, affine, header=None):
|
|
3560
|
+
if affine is None:
|
|
3561
|
+
affine = np.eye(4)
|
|
3562
|
+
return nib.Nifti1Image(data, affine, header)
|
|
3563
|
+
|
|
3564
|
+
###################### creat an image based on another one ######################
|
|
3565
|
+
def make_image(data, target):
|
|
3566
|
+
return nib.Nifti1Image(data, target.affine, target.header)
|
|
3567
|
+
|
|
3568
|
+
|
|
3569
|
+
###################### unique value of an image ######################
|
|
3570
|
+
def len_unique(im):
|
|
3571
|
+
uq = np.unique(im)
|
|
3572
|
+
return uq, uq.shape[0]
|
|
3573
|
+
|
|
3574
|
+
|
|
3575
|
+
###################### function used in slice interpolation ######################
|
|
3576
|
+
def bwperim(bw, n=4):
|
|
3577
|
+
#https://github.com/lforet/CoinVision/blob/master/build/mahotas/mahotas/bwperim.py
|
|
3578
|
+
# with some modifications
|
|
3579
|
+
"""
|
|
3580
|
+
perim = bwperim(bw, n=4)
|
|
3581
|
+
Find the perimeter of objects in binary images.
|
|
3582
|
+
A pixel is part of an object perimeter if its value is one and there
|
|
3583
|
+
is at least one zero-valued pixel in its neighborhood.
|
|
3584
|
+
By default the neighborhood of a pixel is 4 nearest pixels, but
|
|
3585
|
+
if `n` is set to 8 the 8 nearest pixels will be considered.
|
|
3586
|
+
Parameters
|
|
3587
|
+
----------
|
|
3588
|
+
bw : A black-and-white image
|
|
3589
|
+
n : Connectivity. Must be 4 or 8 (default: 8)
|
|
3590
|
+
Returns
|
|
3591
|
+
-------
|
|
3592
|
+
perim : A boolean image
|
|
3593
|
+
"""
|
|
3594
|
+
|
|
3595
|
+
if n not in (4,8):
|
|
3596
|
+
raise ValueError('mahotas.bwperim: n must be 4 or 8')
|
|
3597
|
+
rows,cols = bw.shape
|
|
3598
|
+
|
|
3599
|
+
# Translate image by one pixel in all directions
|
|
3600
|
+
north = np.zeros((rows,cols))
|
|
3601
|
+
south = np.zeros((rows,cols))
|
|
3602
|
+
west = np.zeros((rows,cols))
|
|
3603
|
+
east = np.zeros((rows,cols))
|
|
3604
|
+
|
|
3605
|
+
north[:-1,:] = bw[1:,:]
|
|
3606
|
+
south[1:,:] = bw[:-1,:]
|
|
3607
|
+
west[:,:-1] = bw[:,1:]
|
|
3608
|
+
east[:,1:] = bw[:,:-1]
|
|
3609
|
+
idx = (north == bw) & \
|
|
3610
|
+
(south == bw) & \
|
|
3611
|
+
(west == bw) & \
|
|
3612
|
+
(east == bw)
|
|
3613
|
+
if n == 8:
|
|
3614
|
+
north_east = np.zeros((rows, cols))
|
|
3615
|
+
north_west = np.zeros((rows, cols))
|
|
3616
|
+
south_east = np.zeros((rows, cols))
|
|
3617
|
+
south_west = np.zeros((rows, cols))
|
|
3618
|
+
north_east[:-1, 1:] = bw[1:, :-1]
|
|
3619
|
+
north_west[:-1, :-1] = bw[1:, 1:]
|
|
3620
|
+
south_east[1:, 1:] = bw[:-1, :-1]
|
|
3621
|
+
south_west[1:, :-1] = bw[:-1, 1:]
|
|
3622
|
+
idx &= (north_east == bw) & \
|
|
3623
|
+
(south_east == bw) & \
|
|
3624
|
+
(south_west == bw) & \
|
|
3625
|
+
(north_west == bw)
|
|
3626
|
+
return ~idx * bw
|
|
3627
|
+
|
|
3628
|
+
###################### function used in slice interpolation ######################
|
|
3629
|
+
def signed_bwdist(im):
|
|
3630
|
+
'''
|
|
3631
|
+
Find perim and return masked image (signed/reversed)
|
|
3632
|
+
'''
|
|
3633
|
+
im = -bwdist(bwperim(im))*np.logical_not(im) + bwdist(bwperim(im))*im
|
|
3634
|
+
return im
|
|
3635
|
+
|
|
3636
|
+
###################### Find distance map of image ######################
|
|
3637
|
+
def bwdist(im):
|
|
3638
|
+
'''
|
|
3639
|
+
Find distance map of image
|
|
3640
|
+
'''
|
|
3641
|
+
from scipy.ndimage.morphology import distance_transform_edt
|
|
3642
|
+
dist_im = distance_transform_edt(1-im)
|
|
3643
|
+
return dist_im
|
|
3644
|
+
###################### Slice interpolation ######################
|
|
3645
|
+
def slice_intepolation(reader, slices, currentWidnowName, colorInd, WI):
|
|
3646
|
+
from scipy.interpolate import interpn
|
|
3647
|
+
'''
|
|
3648
|
+
Interpolate between two slices in the image
|
|
3649
|
+
'''
|
|
3650
|
+
|
|
3651
|
+
slices = np.unique(slices)
|
|
3652
|
+
slices.sort()
|
|
3653
|
+
interpolated_slices = []
|
|
3654
|
+
if currentWidnowName== 'coronal':
|
|
3655
|
+
selected_slices = reader.npSeg[:, slices, :]
|
|
3656
|
+
selected_slices = np.transpose(selected_slices, [0, 2, 1])
|
|
3657
|
+
elif currentWidnowName == 'sagittal':
|
|
3658
|
+
selected_slices = reader.npSeg[:, :, slices]
|
|
3659
|
+
elif currentWidnowName == 'axial':
|
|
3660
|
+
selected_slices = reader.npSeg[slices, :, :]
|
|
3661
|
+
selected_slices = np.transpose(selected_slices, [1, 2, 0])
|
|
3662
|
+
ind_zero = selected_slices != colorInd
|
|
3663
|
+
selected_slices[ind_zero]=0
|
|
3664
|
+
for j in range(slices.shape[0]-1):
|
|
3665
|
+
top = (selected_slices[..., j]>0).astype('int')
|
|
3666
|
+
bottom = (selected_slices[..., j+1]>0).astype('int')
|
|
3667
|
+
max_slice =slices[j+1]
|
|
3668
|
+
min_slice = slices[j]
|
|
3669
|
+
precisions = np.linspace(0, 2, (max_slice - min_slice+1))[1:-1]#np.arange(2 / (max_slice - min_slice-1), 2, 2 / (max_slice - min_slice-1))
|
|
3670
|
+
top = signed_bwdist(top)
|
|
3671
|
+
bottom = signed_bwdist(bottom)
|
|
3672
|
+
|
|
3673
|
+
# row,cols definition
|
|
3674
|
+
r, c = top.shape
|
|
3675
|
+
|
|
3676
|
+
# rejoin top, bottom into a single array of shape (2, r, c)
|
|
3677
|
+
top_and_bottom = np.stack((top, bottom))
|
|
3678
|
+
|
|
3679
|
+
# create ndgrids
|
|
3680
|
+
points = (np.r_[0, 2], np.arange(r), np.arange(c))
|
|
3681
|
+
for k, precision in enumerate(precisions):
|
|
3682
|
+
xi = np.rollaxis(np.mgrid[:r, :c], 0, 3).reshape((r*c, 2))
|
|
3683
|
+
xi = np.c_[np.full((r*c),precision), xi]
|
|
3684
|
+
# Interpolate for new plane
|
|
3685
|
+
out = interpn(points, top_and_bottom, xi)
|
|
3686
|
+
out = out.reshape((r, c))
|
|
3687
|
+
sliceNo = min_slice + k + 1
|
|
3688
|
+
# Threshold distmap to values above 0
|
|
3689
|
+
#out = out > 0
|
|
3690
|
+
out = np.argwhere(out > 0)
|
|
3691
|
+
out = np.hstack([out[:,[1,0]], np.ones([out.shape[0], 1]) * sliceNo])
|
|
3692
|
+
interpolated_slices.append(out)
|
|
3693
|
+
|
|
3694
|
+
return PermuteProperAxis(np.concatenate(interpolated_slices), currentWidnowName).astype('int')
|
|
3695
|
+
|
|
3696
|
+
###################### function used in semgentation based on circle ######################
|
|
3697
|
+
def seperate_lcc(whiteInd, center):
|
|
3698
|
+
minw = whiteInd.min(0)
|
|
3699
|
+
maxw = whiteInd.max(0)
|
|
3700
|
+
df = maxw - minw
|
|
3701
|
+
img = np.zeros([df[0] + 1, df[1] + 1])
|
|
3702
|
+
img[whiteInd[:, 0] - minw[0], whiteInd[:, 1] - minw[1]] = 1
|
|
3703
|
+
img_l, img_f = LargestCC(img, connectivity=1)
|
|
3704
|
+
index_sel = img_l[center[1]-minw[0], center[0]-minw[1] ]
|
|
3705
|
+
nw = np.argwhere(img_l == index_sel)
|
|
3706
|
+
nw[:, 0] += minw[0]
|
|
3707
|
+
nw[:, 1] += minw[1]
|
|
3708
|
+
whiteInd = np.hstack([nw, np.ones([nw.shape[0], 1]) * (whiteInd[0, 2])]).astype('int')
|
|
3709
|
+
return whiteInd
|
|
3710
|
+
|
|
3711
|
+
###################### find sequence name from DICOM ######################
|
|
3712
|
+
def get_SequenceName(SequenceName):
|
|
3713
|
+
if 'ep_b' in SequenceName:
|
|
3714
|
+
return 'dwi'
|
|
3715
|
+
elif 'epfid2d' in SequenceName:
|
|
3716
|
+
return 'perf'
|
|
3717
|
+
elif 'epfid3d1_15' in SequenceName or 'fl3d1r' in SequenceName:
|
|
3718
|
+
return 'swi'
|
|
3719
|
+
elif 'epse2d' in SequenceName:
|
|
3720
|
+
return 'dwi' #(when b-vals specified)
|
|
3721
|
+
elif 'fl2d' in SequenceName:
|
|
3722
|
+
return 'localizer'
|
|
3723
|
+
elif 'fl3d1r_t' in SequenceName:
|
|
3724
|
+
return 'angio'
|
|
3725
|
+
elif 'spc3d' in SequenceName:
|
|
3726
|
+
return 'T2'
|
|
3727
|
+
elif 'spcir' in SequenceName or 'tir2d' in SequenceName:
|
|
3728
|
+
return 'flair'
|
|
3729
|
+
elif 'spcR' in SequenceName:
|
|
3730
|
+
return 'PD'
|
|
3731
|
+
elif 'tfl3d' in SequenceName:
|
|
3732
|
+
return 'T1'
|
|
3733
|
+
elif 'tfl_me3d5_16ns' in SequenceName:
|
|
3734
|
+
return 'T1' #T1 (ME-MPRAGE)
|
|
3735
|
+
elif 'tse2d' in SequenceName or 'tse3d' in SequenceName:
|
|
3736
|
+
return 'T2'
|
|
3737
|
+
|
|
3738
|
+
|
|
3739
|
+
def adapt_to_size(imAzero, NewSpacing, Spacing, borderp):
|
|
3740
|
+
from nibabel.processing import resample_to_output, resample_from_to
|
|
3741
|
+
def signdf(df):
|
|
3742
|
+
if df <= 0:
|
|
3743
|
+
return -1
|
|
3744
|
+
else:
|
|
3745
|
+
return 1
|
|
3746
|
+
# minus_sign = True
|
|
3747
|
+
maxdim = np.array([el * sp / NewSpacing for el in imAzero.shape for sp in Spacing]).max()
|
|
3748
|
+
df = (maxdim - 192)
|
|
3749
|
+
minus_sign = False
|
|
3750
|
+
prev_sign = signdf(df)
|
|
3751
|
+
while True:
|
|
3752
|
+
maxdim = np.array([el * sp / NewSpacing for el in imAzero.shape for sp in Spacing]).max()
|
|
3753
|
+
df = (maxdim - 192)
|
|
3754
|
+
if signdf(df) != prev_sign:
|
|
3755
|
+
break
|
|
3756
|
+
if abs(abs(df) - borderp) < 10: # and df <= 0:
|
|
3757
|
+
break
|
|
3758
|
+
if minus_sign:
|
|
3759
|
+
NewSpacing -= 0.1
|
|
3760
|
+
else:
|
|
3761
|
+
NewSpacing += 0.1
|
|
3762
|
+
prev_sign = signdf(df)
|
|
3763
|
+
while True:
|
|
3764
|
+
imAa = resample_to_output(imAzero, [NewSpacing, NewSpacing, NewSpacing])
|
|
3765
|
+
df = (np.max(imAa.shape) - 192)
|
|
3766
|
+
if signdf(df) != prev_sign and df < 0:
|
|
3767
|
+
break
|
|
3768
|
+
prev_sign = signdf(df)
|
|
3769
|
+
if abs(df) <= borderp and df <= 0:
|
|
3770
|
+
break
|
|
3771
|
+
else:
|
|
3772
|
+
if signdf(df) == -1:
|
|
3773
|
+
minus_sign = True
|
|
3774
|
+
else:
|
|
3775
|
+
minus_sign = False
|
|
3776
|
+
if minus_sign:
|
|
3777
|
+
NewSpacing -= 0.1
|
|
3778
|
+
else:
|
|
3779
|
+
NewSpacing += 0.1
|
|
3780
|
+
return imAa
|
|
3781
|
+
|
|
3782
|
+
|
|
3783
|
+
def histogram_equalization(source):
|
|
3784
|
+
def histogram_equalization_3d(image, method='ehist'):
|
|
3785
|
+
import cv2
|
|
3786
|
+
# Reshape the 3D image into a 2D array with shape (num_slices, height * width)
|
|
3787
|
+
num_slices, height, width = image.shape
|
|
3788
|
+
flattened_image = image.reshape((num_slices, height * width))
|
|
3789
|
+
flattened_image = normalize_mri(flattened_image)
|
|
3790
|
+
# Apply histogram equalization to the flattened image
|
|
3791
|
+
if method=='ehist':
|
|
3792
|
+
alg = cv2.equalizeHist
|
|
3793
|
+
elif method=='clahe':
|
|
3794
|
+
clip_limit = 2; tile_grid_size=(8,8)
|
|
3795
|
+
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)
|
|
3796
|
+
alg = clahe.apply
|
|
3797
|
+
|
|
3798
|
+
equalized_flattened = np.apply_along_axis(alg, axis=1,
|
|
3799
|
+
arr=(flattened_image).astype(np.uint8))
|
|
3800
|
+
|
|
3801
|
+
# Reshape the equalized 2D array back to 3D
|
|
3802
|
+
equalized_image = equalized_flattened.reshape((num_slices, height, width))
|
|
3803
|
+
|
|
3804
|
+
return equalized_image
|
|
3805
|
+
# Compute histograms
|
|
3806
|
+
reference = histogram_equalization_3d(source, method='clahe')
|
|
3807
|
+
#source = rescale_between_a_b(sourceo, 0, 255)
|
|
3808
|
+
return normalize_mri(reference)
|