nettracer3d 0.9.0__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nettracer3d might be problematic. Click here for more details.
- nettracer3d/neighborhoods.py +107 -16
- nettracer3d/nettracer.py +60 -31
- nettracer3d/nettracer_gui.py +546 -308
- nettracer3d/segmenter.py +514 -372
- nettracer3d/segmenter_GPU.py +434 -281
- {nettracer3d-0.9.0.dist-info → nettracer3d-0.9.1.dist-info}/METADATA +7 -14
- {nettracer3d-0.9.0.dist-info → nettracer3d-0.9.1.dist-info}/RECORD +11 -11
- {nettracer3d-0.9.0.dist-info → nettracer3d-0.9.1.dist-info}/WHEEL +0 -0
- {nettracer3d-0.9.0.dist-info → nettracer3d-0.9.1.dist-info}/entry_points.txt +0 -0
- {nettracer3d-0.9.0.dist-info → nettracer3d-0.9.1.dist-info}/licenses/LICENSE +0 -0
- {nettracer3d-0.9.0.dist-info → nettracer3d-0.9.1.dist-info}/top_level.txt +0 -0
nettracer3d/nettracer_gui.py
CHANGED
|
@@ -466,6 +466,8 @@ class ImageViewerWindow(QMainWindow):
|
|
|
466
466
|
|
|
467
467
|
self.resume = False
|
|
468
468
|
|
|
469
|
+
self.hold_update = False
|
|
470
|
+
|
|
469
471
|
def start_left_scroll(self):
|
|
470
472
|
"""Start scrolling left when left arrow is pressed."""
|
|
471
473
|
# Single increment first
|
|
@@ -500,7 +502,6 @@ class ImageViewerWindow(QMainWindow):
|
|
|
500
502
|
self.slice_slider.setValue(new_value)
|
|
501
503
|
elif self.scroll_direction > 0 and new_value <= self.slice_slider.maximum():
|
|
502
504
|
self.slice_slider.setValue(new_value)
|
|
503
|
-
|
|
504
505
|
|
|
505
506
|
def create_highlight_overlay(self, node_indices=None, edge_indices=None, overlay1_indices = None, overlay2_indices = None, bounds = False):
|
|
506
507
|
"""
|
|
@@ -514,6 +515,17 @@ class ImageViewerWindow(QMainWindow):
|
|
|
514
515
|
self.mini_overlay = False #If this method is ever being called, it means we are rendering the entire overlay so mini overlay needs to reset.
|
|
515
516
|
self.mini_overlay_data = None
|
|
516
517
|
|
|
518
|
+
|
|
519
|
+
if not self.high_button.isChecked():
|
|
520
|
+
|
|
521
|
+
if len(self.clicked_values['edges']) > 0:
|
|
522
|
+
self.format_for_upperright_table(self.clicked_values['edges'], title = 'Selected Edges')
|
|
523
|
+
if len(self.clicked_values['nodes']) > 0:
|
|
524
|
+
self.format_for_upperright_table(self.clicked_values['nodes'], title = 'Selected Nodes')
|
|
525
|
+
|
|
526
|
+
return
|
|
527
|
+
|
|
528
|
+
|
|
517
529
|
def process_chunk(chunk_data, indices_to_check):
|
|
518
530
|
"""Process a single chunk of the array to create highlight mask"""
|
|
519
531
|
mask = np.isin(chunk_data, indices_to_check)
|
|
@@ -550,7 +562,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
550
562
|
current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
|
|
551
563
|
current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
|
|
552
564
|
|
|
553
|
-
if not node_indices and not edge_indices and not overlay1_indices and not overlay2_indices:
|
|
565
|
+
if not node_indices and not edge_indices and not overlay1_indices and not overlay2_indices and self.machine_window is None:
|
|
554
566
|
self.highlight_overlay = None
|
|
555
567
|
self.highlight_bounds = None
|
|
556
568
|
self.update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
@@ -623,7 +635,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
623
635
|
self.highlight_overlay = np.maximum(self.highlight_overlay, overlay1_overlay)
|
|
624
636
|
if overlay2_overlay is not None:
|
|
625
637
|
self.highlight_overlay = np.maximum(self.highlight_overlay, overlay2_overlay)
|
|
626
|
-
|
|
638
|
+
|
|
627
639
|
# Update display
|
|
628
640
|
self.update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
629
641
|
|
|
@@ -822,10 +834,6 @@ class ImageViewerWindow(QMainWindow):
|
|
|
822
834
|
|
|
823
835
|
|
|
824
836
|
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
837
|
#METHODS RELATED TO RIGHT CLICK:
|
|
830
838
|
|
|
831
839
|
def create_context_menu(self, event):
|
|
@@ -1991,7 +1999,10 @@ class ImageViewerWindow(QMainWindow):
|
|
|
1991
1999
|
del my_network.network_lists[0][i]
|
|
1992
2000
|
del my_network.network_lists[1][i]
|
|
1993
2001
|
del my_network.network_lists[2][i]
|
|
1994
|
-
|
|
2002
|
+
for node in self.clicked_values['nodes']:
|
|
2003
|
+
del my_network.node_centroids[node]
|
|
2004
|
+
del my_network.node_identities[node]
|
|
2005
|
+
del my_network.communities[node]
|
|
1995
2006
|
|
|
1996
2007
|
|
|
1997
2008
|
if len(self.clicked_values['edges']) > 0:
|
|
@@ -2006,6 +2017,8 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2006
2017
|
del my_network.network_lists[0][i]
|
|
2007
2018
|
del my_network.network_lists[1][i]
|
|
2008
2019
|
del my_network.network_lists[2][i]
|
|
2020
|
+
for node in self.clicked_values['edges']:
|
|
2021
|
+
del my_network.edge_centroids[edge]
|
|
2009
2022
|
|
|
2010
2023
|
my_network.network_lists = my_network.network_lists
|
|
2011
2024
|
|
|
@@ -2021,7 +2034,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2021
2034
|
for column in range(model.columnCount(None)):
|
|
2022
2035
|
self.network_table.resizeColumnToContents(column)
|
|
2023
2036
|
|
|
2024
|
-
self.show_centroid_dialog()
|
|
2037
|
+
#self.show_centroid_dialog()
|
|
2025
2038
|
except Exception as e:
|
|
2026
2039
|
print(f"Error: {e}")
|
|
2027
2040
|
|
|
@@ -2147,11 +2160,16 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2147
2160
|
current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
|
|
2148
2161
|
current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
|
|
2149
2162
|
|
|
2150
|
-
if self.high_button.isChecked():
|
|
2163
|
+
if self.high_button.isChecked() and self.machine_window is None:
|
|
2151
2164
|
if self.highlight_overlay is None and ((len(self.clicked_values['nodes']) + len(self.clicked_values['edges'])) > 0):
|
|
2152
2165
|
if self.needs_mini:
|
|
2153
2166
|
self.create_mini_overlay(node_indices = self.clicked_values['nodes'], edge_indices = self.clicked_values['edges'])
|
|
2154
2167
|
self.needs_mini = False
|
|
2168
|
+
else:
|
|
2169
|
+
self.create_highlight_overlay(node_indices = self.clicked_values['nodes'], edge_indices = self.clicked_values['edges'])
|
|
2170
|
+
else:
|
|
2171
|
+
self.create_highlight_overlay(node_indices = self.clicked_values['nodes'], edge_indices = self.clicked_values['edges'])
|
|
2172
|
+
|
|
2155
2173
|
|
|
2156
2174
|
self.update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
2157
2175
|
|
|
@@ -2233,6 +2251,11 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2233
2251
|
|
|
2234
2252
|
# Store current channel visibility state
|
|
2235
2253
|
self.pre_pan_channel_state = self.channel_visible.copy()
|
|
2254
|
+
|
|
2255
|
+
self.prev_down = self.downsample_factor
|
|
2256
|
+
if self.throttle:
|
|
2257
|
+
if self.downsample_factor < 3:
|
|
2258
|
+
self.validate_downsample_input(text = 3)
|
|
2236
2259
|
|
|
2237
2260
|
# Create static background from currently visible channels
|
|
2238
2261
|
self.create_pan_background()
|
|
@@ -2782,7 +2805,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2782
2805
|
self.pan_zoom_state = (current_xlim, current_ylim)
|
|
2783
2806
|
|
|
2784
2807
|
def create_composite_for_pan(self):
|
|
2785
|
-
"""Create a properly rendered composite image for panning"""
|
|
2808
|
+
"""Create a properly rendered composite image for panning with downsample support"""
|
|
2786
2809
|
# Get active channels and dimensions (copied from update_display)
|
|
2787
2810
|
active_channels = [i for i in range(4) if self.channel_data[i] is not None]
|
|
2788
2811
|
if active_channels:
|
|
@@ -2793,8 +2816,33 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2793
2816
|
else:
|
|
2794
2817
|
return None
|
|
2795
2818
|
|
|
2796
|
-
#
|
|
2797
|
-
|
|
2819
|
+
# Store original dimensions for coordinate mapping
|
|
2820
|
+
self.original_dims = (min_height, min_width)
|
|
2821
|
+
|
|
2822
|
+
# Get current downsample factor
|
|
2823
|
+
downsample_factor = getattr(self, 'downsample_factor', 1)
|
|
2824
|
+
|
|
2825
|
+
# Calculate display dimensions (downsampled)
|
|
2826
|
+
display_height = min_height // downsample_factor
|
|
2827
|
+
display_width = min_width // downsample_factor
|
|
2828
|
+
|
|
2829
|
+
# Helper function to downsample image (same as in update_display)
|
|
2830
|
+
def downsample_image(image, factor):
|
|
2831
|
+
if factor == 1:
|
|
2832
|
+
return image
|
|
2833
|
+
|
|
2834
|
+
# Handle different image types
|
|
2835
|
+
if len(image.shape) == 2:
|
|
2836
|
+
# Grayscale
|
|
2837
|
+
return image[::factor, ::factor]
|
|
2838
|
+
elif len(image.shape) == 3:
|
|
2839
|
+
# RGB/RGBA
|
|
2840
|
+
return image[::factor, ::factor, :]
|
|
2841
|
+
else:
|
|
2842
|
+
return image
|
|
2843
|
+
|
|
2844
|
+
# Create a blank RGBA composite to accumulate all channels (using display dimensions)
|
|
2845
|
+
composite = np.zeros((display_height, display_width, 4), dtype=np.float32)
|
|
2798
2846
|
|
|
2799
2847
|
# Process each visible channel exactly like update_display does
|
|
2800
2848
|
for channel in range(4):
|
|
@@ -2811,24 +2859,27 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2811
2859
|
else:
|
|
2812
2860
|
current_image = self.channel_data[channel]
|
|
2813
2861
|
|
|
2862
|
+
# Downsample the image for rendering
|
|
2863
|
+
display_image = downsample_image(current_image, downsample_factor)
|
|
2864
|
+
|
|
2814
2865
|
if is_rgb and self.channel_data[channel].shape[-1] == 3:
|
|
2815
2866
|
# RGB image - convert to RGBA and blend
|
|
2816
|
-
rgb_alpha = np.ones((*
|
|
2817
|
-
rgb_alpha[:, :, :3] =
|
|
2867
|
+
rgb_alpha = np.ones((*display_image.shape[:2], 4), dtype=np.float32)
|
|
2868
|
+
rgb_alpha[:, :, :3] = display_image.astype(np.float32) / 255.0
|
|
2818
2869
|
rgb_alpha[:, :, 3] = 0.7 # Same alpha as update_display
|
|
2819
2870
|
composite = self.blend_layers(composite, rgb_alpha)
|
|
2820
2871
|
|
|
2821
2872
|
elif is_rgb and self.channel_data[channel].shape[-1] == 4:
|
|
2822
2873
|
# RGBA image - blend directly
|
|
2823
|
-
rgba_image =
|
|
2874
|
+
rgba_image = display_image.astype(np.float32) / 255.0
|
|
2824
2875
|
composite = self.blend_layers(composite, rgba_image)
|
|
2825
2876
|
|
|
2826
2877
|
else:
|
|
2827
2878
|
# Regular channel processing (same logic as update_display)
|
|
2828
2879
|
if self.min_max[channel][0] == None:
|
|
2829
|
-
self.min_max[channel][0] = np.min(
|
|
2880
|
+
self.min_max[channel][0] = np.min(self.channel_data[channel])
|
|
2830
2881
|
if self.min_max[channel][1] == None:
|
|
2831
|
-
self.min_max[channel][1] = np.max(
|
|
2882
|
+
self.min_max[channel][1] = np.max(self.channel_data[channel])
|
|
2832
2883
|
|
|
2833
2884
|
img_min = self.min_max[channel][0]
|
|
2834
2885
|
img_max = self.min_max[channel][1]
|
|
@@ -2840,16 +2891,16 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2840
2891
|
vmin = img_min + (img_max - img_min) * self.channel_brightness[channel]['min']
|
|
2841
2892
|
vmax = img_min + (img_max - img_min) * self.channel_brightness[channel]['max']
|
|
2842
2893
|
|
|
2843
|
-
# Normalize the image
|
|
2894
|
+
# Normalize the downsampled image
|
|
2844
2895
|
if vmin == vmax:
|
|
2845
|
-
normalized_image = np.zeros_like(
|
|
2896
|
+
normalized_image = np.zeros_like(display_image)
|
|
2846
2897
|
else:
|
|
2847
|
-
normalized_image = np.clip((
|
|
2898
|
+
normalized_image = np.clip((display_image - vmin) / (vmax - vmin), 0, 1)
|
|
2848
2899
|
|
|
2849
2900
|
# Apply channel color and alpha
|
|
2850
2901
|
if channel == 2 and self.machine_window is not None:
|
|
2851
2902
|
# Special case for machine window channel 2
|
|
2852
|
-
channel_rgba = self.apply_machine_colormap(
|
|
2903
|
+
channel_rgba = self.apply_machine_colormap(display_image)
|
|
2853
2904
|
else:
|
|
2854
2905
|
# Regular channel with custom color
|
|
2855
2906
|
color = self.base_colors[channel]
|
|
@@ -2862,16 +2913,18 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2862
2913
|
# Blend this channel into the composite
|
|
2863
2914
|
composite = self.blend_layers(composite, channel_rgba)
|
|
2864
2915
|
|
|
2865
|
-
# Add highlight overlays if they exist (
|
|
2916
|
+
# Add highlight overlays if they exist (with downsampling)
|
|
2866
2917
|
if self.mini_overlay and self.highlight and self.machine_window is None:
|
|
2867
|
-
|
|
2918
|
+
display_overlay = downsample_image(self.mini_overlay_data, downsample_factor)
|
|
2919
|
+
highlight_rgba = self.create_highlight_rgba(display_overlay, yellow=True)
|
|
2868
2920
|
composite = self.blend_layers(composite, highlight_rgba)
|
|
2869
2921
|
elif self.highlight_overlay is not None and self.highlight:
|
|
2870
2922
|
highlight_slice = self.highlight_overlay[self.current_slice]
|
|
2923
|
+
display_highlight = downsample_image(highlight_slice, downsample_factor)
|
|
2871
2924
|
if self.machine_window is None:
|
|
2872
|
-
highlight_rgba = self.create_highlight_rgba(
|
|
2925
|
+
highlight_rgba = self.create_highlight_rgba(display_highlight, yellow=True)
|
|
2873
2926
|
else:
|
|
2874
|
-
highlight_rgba = self.create_highlight_rgba(
|
|
2927
|
+
highlight_rgba = self.create_highlight_rgba(display_highlight, yellow=False)
|
|
2875
2928
|
composite = self.blend_layers(composite, highlight_rgba)
|
|
2876
2929
|
|
|
2877
2930
|
# Convert to 0-255 range for display
|
|
@@ -2902,7 +2955,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2902
2955
|
if yellow:
|
|
2903
2956
|
# Yellow highlight
|
|
2904
2957
|
mask = highlight_data > 0
|
|
2905
|
-
rgba[mask] = [1, 1, 0, 0.
|
|
2958
|
+
rgba[mask] = [1, 1, 0, 0.8] # Yellow with alpha 0.5
|
|
2906
2959
|
else:
|
|
2907
2960
|
# Multi-color highlight for machine window
|
|
2908
2961
|
mask_1 = (highlight_data == 1)
|
|
@@ -2914,7 +2967,31 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2914
2967
|
|
|
2915
2968
|
def blend_layers(self, base, overlay):
|
|
2916
2969
|
"""Alpha blend two RGBA layers"""
|
|
2917
|
-
|
|
2970
|
+
|
|
2971
|
+
def resize_overlay_to_base(overlay_arr, base_arr):
|
|
2972
|
+
base_height, base_width = base_arr.shape[:2]
|
|
2973
|
+
overlay_height, overlay_width = overlay_arr.shape[:2]
|
|
2974
|
+
|
|
2975
|
+
# First crop if overlay is larger
|
|
2976
|
+
cropped_overlay = overlay_arr[:base_height, :base_width]
|
|
2977
|
+
|
|
2978
|
+
# Then pad if still smaller after cropping
|
|
2979
|
+
current_height, current_width = cropped_overlay.shape[:2]
|
|
2980
|
+
pad_height = base_height - current_height
|
|
2981
|
+
pad_width = base_width - current_width
|
|
2982
|
+
|
|
2983
|
+
if pad_height > 0 or pad_width > 0:
|
|
2984
|
+
cropped_overlay = np.pad(cropped_overlay,
|
|
2985
|
+
((0, pad_height), (0, pad_width), (0, 0)),
|
|
2986
|
+
mode='constant', constant_values=0)
|
|
2987
|
+
|
|
2988
|
+
return cropped_overlay
|
|
2989
|
+
|
|
2990
|
+
# Resize the ENTIRE overlay array to match base dimensions
|
|
2991
|
+
if overlay.shape[:2] != base.shape[:2]:
|
|
2992
|
+
overlay = resize_overlay_to_base(overlay, base)
|
|
2993
|
+
|
|
2994
|
+
# Now extract alpha channels (they should be the same size)
|
|
2918
2995
|
alpha_overlay = overlay[:, :, 3:4]
|
|
2919
2996
|
alpha_base = base[:, :, 3:4]
|
|
2920
2997
|
|
|
@@ -2936,17 +3013,26 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2936
3013
|
return result
|
|
2937
3014
|
|
|
2938
3015
|
def update_display_pan_mode(self):
|
|
2939
|
-
"""Lightweight display update for pan preview mode"""
|
|
3016
|
+
"""Lightweight display update for pan preview mode with downsample support"""
|
|
2940
3017
|
|
|
2941
3018
|
if self.is_pan_preview and self.pan_background_image is not None:
|
|
2942
3019
|
# Clear and setup axes
|
|
2943
3020
|
self.ax.clear()
|
|
2944
3021
|
self.ax.set_facecolor('black')
|
|
2945
3022
|
|
|
2946
|
-
# Get dimensions
|
|
2947
|
-
|
|
3023
|
+
# Get original dimensions (before downsampling)
|
|
3024
|
+
if hasattr(self, 'original_dims') and self.original_dims:
|
|
3025
|
+
height, width = self.original_dims
|
|
3026
|
+
else:
|
|
3027
|
+
# Fallback to pan background image dimensions
|
|
3028
|
+
height, width = self.pan_background_image.shape[:2]
|
|
3029
|
+
# If we have downsample factor, scale back up
|
|
3030
|
+
downsample_factor = getattr(self, 'downsample_factor', 1)
|
|
3031
|
+
height *= downsample_factor
|
|
3032
|
+
width *= downsample_factor
|
|
2948
3033
|
|
|
2949
3034
|
# Display the composite background with preserved zoom
|
|
3035
|
+
# Use extent to stretch downsampled image back to original coordinate space
|
|
2950
3036
|
self.ax.imshow(self.pan_background_image,
|
|
2951
3037
|
extent=(-0.5, width-0.5, height-0.5, -0.5),
|
|
2952
3038
|
aspect='equal')
|
|
@@ -2956,10 +3042,16 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2956
3042
|
self.ax.set_xlim(self.pan_zoom_state[0])
|
|
2957
3043
|
self.ax.set_ylim(self.pan_zoom_state[1])
|
|
2958
3044
|
|
|
3045
|
+
# Get downsample factor for title display
|
|
3046
|
+
downsample_factor = getattr(self, 'downsample_factor', 1)
|
|
3047
|
+
|
|
2959
3048
|
# Style the axes (same as update_display)
|
|
2960
3049
|
self.ax.set_xlabel('X')
|
|
2961
|
-
self.ax.set_ylabel('Y')
|
|
2962
|
-
|
|
3050
|
+
self.ax.set_ylabel('Y')
|
|
3051
|
+
if downsample_factor > 1:
|
|
3052
|
+
self.ax.set_title(f'Slice {self.current_slice} (DS: {downsample_factor}x)')
|
|
3053
|
+
else:
|
|
3054
|
+
self.ax.set_title(f'Slice {self.current_slice}')
|
|
2963
3055
|
self.ax.xaxis.label.set_color('black')
|
|
2964
3056
|
self.ax.yaxis.label.set_color('black')
|
|
2965
3057
|
self.ax.title.set_color('black')
|
|
@@ -2967,7 +3059,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2967
3059
|
for spine in self.ax.spines.values():
|
|
2968
3060
|
spine.set_color('black')
|
|
2969
3061
|
|
|
2970
|
-
# Add measurement points if they exist (
|
|
3062
|
+
# Add measurement points if they exist (coordinates remain in original space)
|
|
2971
3063
|
for point in self.measurement_points:
|
|
2972
3064
|
x1, y1, z1 = point['point1']
|
|
2973
3065
|
x2, y2, z2 = point['point2']
|
|
@@ -3093,7 +3185,10 @@ class ImageViewerWindow(QMainWindow):
|
|
|
3093
3185
|
except:
|
|
3094
3186
|
pass
|
|
3095
3187
|
self.selection_rect = None
|
|
3096
|
-
self.
|
|
3188
|
+
current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
|
|
3189
|
+
current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
|
|
3190
|
+
self.update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
3191
|
+
#self.canvas.draw()
|
|
3097
3192
|
|
|
3098
3193
|
elif self.zoom_mode:
|
|
3099
3194
|
# Handle zoom mode press
|
|
@@ -3126,11 +3221,13 @@ class ImageViewerWindow(QMainWindow):
|
|
|
3126
3221
|
|
|
3127
3222
|
new_xlim = [xdata - x_range, xdata + x_range]
|
|
3128
3223
|
new_ylim = [ydata - y_range, ydata + y_range]
|
|
3224
|
+
|
|
3225
|
+
shift_pressed = 'shift' in event.modifiers
|
|
3129
3226
|
|
|
3130
|
-
if (new_xlim[0] <=
|
|
3131
|
-
new_xlim[1] >= self.
|
|
3132
|
-
new_ylim[0] <=
|
|
3133
|
-
new_ylim[1] >= self.
|
|
3227
|
+
if (new_xlim[0] <= 0 or
|
|
3228
|
+
new_xlim[1] >= self.shape[2] or
|
|
3229
|
+
new_ylim[0] <= 0 or
|
|
3230
|
+
new_ylim[1] >= self.shape[1]) or shift_pressed:
|
|
3134
3231
|
self.ax.set_xlim(self.original_xlim)
|
|
3135
3232
|
self.ax.set_ylim(self.original_ylim)
|
|
3136
3233
|
else:
|
|
@@ -3142,7 +3239,10 @@ class ImageViewerWindow(QMainWindow):
|
|
|
3142
3239
|
if not hasattr(self, 'zoom_changed'):
|
|
3143
3240
|
self.zoom_changed = False
|
|
3144
3241
|
|
|
3145
|
-
self.
|
|
3242
|
+
current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
|
|
3243
|
+
current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
|
|
3244
|
+
self.update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
3245
|
+
#self.canvas.draw()
|
|
3146
3246
|
|
|
3147
3247
|
# Handle brush mode cleanup with paint session management
|
|
3148
3248
|
if self.brush_mode and hasattr(self, 'painting') and self.painting:
|
|
@@ -3277,7 +3377,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
3277
3377
|
|
|
3278
3378
|
if not hasattr(self, 'zoom_changed'):
|
|
3279
3379
|
self.zoom_changed = False
|
|
3280
|
-
|
|
3380
|
+
|
|
3281
3381
|
elif event.button == 3: # Right click - zoom out
|
|
3282
3382
|
x_range = (current_xlim[1] - current_xlim[0])
|
|
3283
3383
|
y_range = (current_ylim[1] - current_ylim[0])
|
|
@@ -3296,10 +3396,11 @@ class ImageViewerWindow(QMainWindow):
|
|
|
3296
3396
|
self.ax.set_ylim(new_ylim)
|
|
3297
3397
|
|
|
3298
3398
|
|
|
3299
|
-
|
|
3300
|
-
|
|
3301
|
-
|
|
3302
|
-
|
|
3399
|
+
current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
|
|
3400
|
+
current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
|
|
3401
|
+
self.update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
3402
|
+
|
|
3403
|
+
#self.canvas.draw()
|
|
3303
3404
|
|
|
3304
3405
|
elif event.button == 3: # Right click
|
|
3305
3406
|
self.create_context_menu(event)
|
|
@@ -3606,12 +3707,110 @@ class ImageViewerWindow(QMainWindow):
|
|
|
3606
3707
|
help_button = menubar.addAction("Help")
|
|
3607
3708
|
help_button.triggered.connect(self.help_me)
|
|
3608
3709
|
|
|
3710
|
+
# Initialize downsample factor
|
|
3711
|
+
self.downsample_factor = 1
|
|
3712
|
+
|
|
3713
|
+
"""
|
|
3714
|
+
# Create container widget for corner controls
|
|
3715
|
+
corner_widget = QWidget()
|
|
3716
|
+
corner_layout = QHBoxLayout(corner_widget)
|
|
3717
|
+
corner_layout.setContentsMargins(5, 0, 5, 0)
|
|
3718
|
+
|
|
3719
|
+
# Add downsample control
|
|
3720
|
+
downsample_label = QLabel("Downsample Display:")
|
|
3721
|
+
downsample_label.setStyleSheet("color: black; font-size: 11px;")
|
|
3722
|
+
corner_layout.addWidget(downsample_label)
|
|
3723
|
+
|
|
3724
|
+
self.downsample_input = QLineEdit("1")
|
|
3725
|
+
self.downsample_input.setFixedWidth(40)
|
|
3726
|
+
self.downsample_input.setFixedHeight(25)
|
|
3727
|
+
self.downsample_input.setStyleSheet("""
|
|
3728
|
+
#QLineEdit {
|
|
3729
|
+
#border: 1px solid gray;
|
|
3730
|
+
#border-radius: 2px;
|
|
3731
|
+
#padding: 1px;
|
|
3732
|
+
#font-size: 11px;
|
|
3733
|
+
#}
|
|
3734
|
+
""")
|
|
3735
|
+
self.downsample_input.textChanged.connect(self.on_downsample_changed)
|
|
3736
|
+
self.downsample_input.editingFinished.connect(self.validate_downsample_input)
|
|
3737
|
+
corner_layout.addWidget(self.downsample_input)
|
|
3738
|
+
|
|
3739
|
+
# Add some spacing
|
|
3740
|
+
corner_layout.addSpacing(10)
|
|
3741
|
+
|
|
3742
|
+
# Add camera button
|
|
3743
|
+
cam_button = QPushButton("📷")
|
|
3744
|
+
cam_button.setFixedSize(40, 40)
|
|
3745
|
+
cam_button.setStyleSheet("font-size: 24px;")
|
|
3746
|
+
cam_button.clicked.connect(self.snap)
|
|
3747
|
+
corner_layout.addWidget(cam_button)
|
|
3748
|
+
|
|
3749
|
+
# Set as corner widget
|
|
3750
|
+
menubar.setCornerWidget(corner_widget, Qt.Corner.TopRightCorner)
|
|
3751
|
+
"""
|
|
3609
3752
|
cam_button = QPushButton("📷")
|
|
3610
3753
|
cam_button.setFixedSize(40, 40)
|
|
3611
3754
|
cam_button.setStyleSheet("font-size: 24px;") # Makes emoji larger
|
|
3612
3755
|
cam_button.clicked.connect(self.snap)
|
|
3613
3756
|
menubar.setCornerWidget(cam_button, Qt.Corner.TopRightCorner)
|
|
3614
3757
|
|
|
3758
|
+
def on_downsample_changed(self, text):
|
|
3759
|
+
"""Called whenever the text in the downsample input changes"""
|
|
3760
|
+
try:
|
|
3761
|
+
if text.strip() == "":
|
|
3762
|
+
self.downsample_factor = 1
|
|
3763
|
+
else:
|
|
3764
|
+
value = float(text)
|
|
3765
|
+
if value <= 0:
|
|
3766
|
+
self.downsample_factor = 1
|
|
3767
|
+
else:
|
|
3768
|
+
self.downsample_factor = int(value) if value == int(value) else value
|
|
3769
|
+
except (ValueError, TypeError):
|
|
3770
|
+
self.downsample_factor = 1
|
|
3771
|
+
|
|
3772
|
+
def validate_downsample_input(self, text = None, update = True):
|
|
3773
|
+
"""Called when user finishes editing (loses focus or presses Enter)"""
|
|
3774
|
+
if text:
|
|
3775
|
+
self.downsample_factor = text
|
|
3776
|
+
else:
|
|
3777
|
+
try: # If enabled for manual display downsampling
|
|
3778
|
+
text = self.downsample_input.text().strip()
|
|
3779
|
+
if text == "":
|
|
3780
|
+
# Empty input - set to default
|
|
3781
|
+
self.downsample_factor = 1
|
|
3782
|
+
self.downsample_input.setText("1")
|
|
3783
|
+
else:
|
|
3784
|
+
value = int(text)
|
|
3785
|
+
if value < 1:
|
|
3786
|
+
# Invalid value - reset to default
|
|
3787
|
+
self.downsample_factor = 1
|
|
3788
|
+
self.downsample_input.setText("1")
|
|
3789
|
+
else:
|
|
3790
|
+
# Valid value - use it (prefer int if possible)
|
|
3791
|
+
if value == int(value):
|
|
3792
|
+
self.downsample_factor = int(value)
|
|
3793
|
+
self.downsample_input.setText(str(int(value)))
|
|
3794
|
+
else:
|
|
3795
|
+
self.downsample_factor = value
|
|
3796
|
+
self.downsample_input.setText(f"{value:.1f}")
|
|
3797
|
+
except:
|
|
3798
|
+
# Invalid input - reset to default
|
|
3799
|
+
self.downsample_factor = 1
|
|
3800
|
+
|
|
3801
|
+
self.throttle = self.shape[1] * self.shape[2] > 3000 * 3000 * self.downsample_factor
|
|
3802
|
+
if self.machine_window is not None:
|
|
3803
|
+
if self.throttle: #arbitrary throttle for large arrays.
|
|
3804
|
+
self.machine_window.update_interval = 10
|
|
3805
|
+
else:
|
|
3806
|
+
self.machine_window.update_interval = 1 # Increased to 1s
|
|
3807
|
+
|
|
3808
|
+
# Optional: Trigger display update if you want immediate effect
|
|
3809
|
+
if update:
|
|
3810
|
+
current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
|
|
3811
|
+
current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
|
|
3812
|
+
self.update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
3813
|
+
|
|
3615
3814
|
def snap(self):
|
|
3616
3815
|
try:
|
|
3617
3816
|
# Check if we have any data to save
|
|
@@ -3643,7 +3842,14 @@ class ImageViewerWindow(QMainWindow):
|
|
|
3643
3842
|
filename += '.png'
|
|
3644
3843
|
format_type = 'png'
|
|
3645
3844
|
|
|
3646
|
-
|
|
3845
|
+
if self.downsample_factor > 1:
|
|
3846
|
+
self.pan_mode = True # Update display will ignore downsamples if this is true so we can just use it here
|
|
3847
|
+
self.downsample_factor = 1
|
|
3848
|
+
current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
|
|
3849
|
+
current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
|
|
3850
|
+
self.update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
3851
|
+
|
|
3852
|
+
# Save with axes bbox
|
|
3647
3853
|
bbox = self.ax.get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())
|
|
3648
3854
|
self.figure.savefig(filename,
|
|
3649
3855
|
dpi=300,
|
|
@@ -3654,6 +3860,8 @@ class ImageViewerWindow(QMainWindow):
|
|
|
3654
3860
|
pad_inches=0)
|
|
3655
3861
|
|
|
3656
3862
|
print(f"Axes snapshot saved: {filename}")
|
|
3863
|
+
|
|
3864
|
+
self.toggle_pan_mode() # Assesses pan state since we messed with its vars potentially
|
|
3657
3865
|
|
|
3658
3866
|
except Exception as e:
|
|
3659
3867
|
print(f"Error saving snapshot: {e}")
|
|
@@ -4372,7 +4580,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
4372
4580
|
|
|
4373
4581
|
if directory != "":
|
|
4374
4582
|
|
|
4375
|
-
self.reset(network = True, xy_scale = 1, z_scale = 1, edges = True, network_overlay = True, id_overlay = True)
|
|
4583
|
+
self.reset(network = True, xy_scale = 1, z_scale = 1, edges = True, network_overlay = True, id_overlay = True, update = False)
|
|
4376
4584
|
|
|
4377
4585
|
my_network.assemble(directory)
|
|
4378
4586
|
|
|
@@ -4685,6 +4893,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
4685
4893
|
"""Load a channel and enable active channel selection if needed."""
|
|
4686
4894
|
|
|
4687
4895
|
try:
|
|
4896
|
+
self.hold_update = True
|
|
4688
4897
|
if not data: # For solo loading
|
|
4689
4898
|
filename, _ = QFileDialog.getOpenFileName(
|
|
4690
4899
|
self,
|
|
@@ -4752,9 +4961,13 @@ class ImageViewerWindow(QMainWindow):
|
|
|
4752
4961
|
try:
|
|
4753
4962
|
if len(self.channel_data[channel_index].shape) == 3: # potentially 2D RGB
|
|
4754
4963
|
if self.channel_data[channel_index].shape[-1] in (3, 4): # last dim is 3 or 4
|
|
4755
|
-
if self.
|
|
4756
|
-
|
|
4964
|
+
if not data and self.shape is None:
|
|
4965
|
+
if self.confirm_rgb_dialog():
|
|
4966
|
+
# User confirmed it's 2D RGB, expand to 4D
|
|
4967
|
+
self.channel_data[channel_index] = np.expand_dims(self.channel_data[channel_index], axis=0)
|
|
4968
|
+
elif self.shape[0] == 1: # this can only be true if the user already loaded in a 2d image
|
|
4757
4969
|
self.channel_data[channel_index] = np.expand_dims(self.channel_data[channel_index], axis=0)
|
|
4970
|
+
|
|
4758
4971
|
except:
|
|
4759
4972
|
pass
|
|
4760
4973
|
|
|
@@ -4861,6 +5074,11 @@ class ImageViewerWindow(QMainWindow):
|
|
|
4861
5074
|
pass
|
|
4862
5075
|
|
|
4863
5076
|
self.shape = self.channel_data[channel_index].shape
|
|
5077
|
+
if self.shape[1] * self.shape[2] > 3000 * 3000 * self.downsample_factor:
|
|
5078
|
+
self.throttle = True
|
|
5079
|
+
else:
|
|
5080
|
+
self.throttle = False
|
|
5081
|
+
|
|
4864
5082
|
|
|
4865
5083
|
self.img_height, self.img_width = self.shape[1], self.shape[2]
|
|
4866
5084
|
self.original_ylim, self.original_xlim = (self.shape[1] + 0.5, -0.5), (-0.5, self.shape[2] - 0.5)
|
|
@@ -4878,7 +5096,6 @@ class ImageViewerWindow(QMainWindow):
|
|
|
4878
5096
|
|
|
4879
5097
|
self.update_display(reset_resize = reset_resize, preserve_zoom = preserve_zoom)
|
|
4880
5098
|
|
|
4881
|
-
|
|
4882
5099
|
|
|
4883
5100
|
except Exception as e:
|
|
4884
5101
|
|
|
@@ -4890,7 +5107,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
4890
5107
|
f"Failed to load tiff file: {str(e)}"
|
|
4891
5108
|
)
|
|
4892
5109
|
|
|
4893
|
-
def delete_channel(self, channel_index, called = True):
|
|
5110
|
+
def delete_channel(self, channel_index, called = True, update = True):
|
|
4894
5111
|
"""Delete the specified channel and update the display."""
|
|
4895
5112
|
if called:
|
|
4896
5113
|
# Confirm deletion
|
|
@@ -4936,11 +5153,13 @@ class ImageViewerWindow(QMainWindow):
|
|
|
4936
5153
|
else:
|
|
4937
5154
|
# If no channels are available, disable active channel selector
|
|
4938
5155
|
self.active_channel_combo.setEnabled(False)
|
|
5156
|
+
self.shape = None # Also there is not an active shape anymore
|
|
4939
5157
|
|
|
4940
|
-
|
|
4941
|
-
|
|
5158
|
+
if update:
|
|
5159
|
+
# Update display
|
|
5160
|
+
self.update_display()
|
|
4942
5161
|
|
|
4943
|
-
def reset(self, nodes = False, network = False, xy_scale = 1, z_scale = 1, edges = False, search_region = False, network_overlay = False, id_overlay = False):
|
|
5162
|
+
def reset(self, nodes = False, network = False, xy_scale = 1, z_scale = 1, edges = False, search_region = False, network_overlay = False, id_overlay = False, update = True):
|
|
4944
5163
|
"""Method to flexibly reset certain fields to free up the RAM as desired"""
|
|
4945
5164
|
|
|
4946
5165
|
# Set scales first before any clearing operations
|
|
@@ -4961,10 +5180,10 @@ class ImageViewerWindow(QMainWindow):
|
|
|
4961
5180
|
self.selection_table.setModel(PandasModel(empty_df))
|
|
4962
5181
|
|
|
4963
5182
|
if nodes:
|
|
4964
|
-
self.delete_channel(0, False)
|
|
5183
|
+
self.delete_channel(0, False, update = update)
|
|
4965
5184
|
|
|
4966
5185
|
if edges:
|
|
4967
|
-
self.delete_channel(1, False)
|
|
5186
|
+
self.delete_channel(1, False, update = update)
|
|
4968
5187
|
try:
|
|
4969
5188
|
if search_region:
|
|
4970
5189
|
my_network.search_region = None
|
|
@@ -4972,10 +5191,10 @@ class ImageViewerWindow(QMainWindow):
|
|
|
4972
5191
|
pass
|
|
4973
5192
|
|
|
4974
5193
|
if network_overlay:
|
|
4975
|
-
self.delete_channel(2, False)
|
|
5194
|
+
self.delete_channel(2, False, update = update)
|
|
4976
5195
|
|
|
4977
5196
|
if id_overlay:
|
|
4978
|
-
self.delete_channel(3, False)
|
|
5197
|
+
self.delete_channel(3, False, update = update)
|
|
4979
5198
|
|
|
4980
5199
|
|
|
4981
5200
|
|
|
@@ -5109,7 +5328,10 @@ class ImageViewerWindow(QMainWindow):
|
|
|
5109
5328
|
self.current_slice = slice_value
|
|
5110
5329
|
if self.mini_overlay == True: #If we are rendering the highlight overlay for selected values one at a time.
|
|
5111
5330
|
self.create_mini_overlay(node_indices = self.clicked_values['nodes'], edge_indices = self.clicked_values['edges'])
|
|
5112
|
-
self.
|
|
5331
|
+
if not self.hold_update:
|
|
5332
|
+
self.update_display(preserve_zoom=view_settings)
|
|
5333
|
+
else:
|
|
5334
|
+
self.hold_update = False
|
|
5113
5335
|
#if self.machine_window is not None:
|
|
5114
5336
|
#self.machine_window.poke_segmenter()
|
|
5115
5337
|
self.pending_slice = None
|
|
@@ -5127,51 +5349,59 @@ class ImageViewerWindow(QMainWindow):
|
|
|
5127
5349
|
self.update_display(preserve_zoom = (current_xlim, current_ylim))
|
|
5128
5350
|
|
|
5129
5351
|
|
|
5130
|
-
|
|
5131
|
-
|
|
5132
|
-
def update_display(self, preserve_zoom=None, dims = None, called = False, reset_resize = False):
|
|
5133
|
-
"""Update the display with currently visible channels and highlight overlay."""
|
|
5352
|
+
def update_display(self, preserve_zoom=None, dims=None, called=False, reset_resize=False, skip=False):
|
|
5353
|
+
"""Optimized display update with view-based cropping for performance."""
|
|
5134
5354
|
try:
|
|
5135
|
-
|
|
5355
|
+
# Initialize reusable components if they don't exist
|
|
5356
|
+
if not hasattr(self, 'channel_images'):
|
|
5357
|
+
self.channel_images = {}
|
|
5358
|
+
self.highlight_image = None
|
|
5359
|
+
self.measurement_artists = []
|
|
5360
|
+
self.axes_initialized = False
|
|
5361
|
+
self.original_dims = None
|
|
5362
|
+
|
|
5363
|
+
# Handle special states (pan, static background)
|
|
5136
5364
|
if self.pan_background_image is not None:
|
|
5137
|
-
# Restore previously visible channels
|
|
5138
5365
|
self.channel_visible = self.pre_pan_channel_state.copy()
|
|
5139
5366
|
self.is_pan_preview = False
|
|
5140
5367
|
self.pan_background_image = None
|
|
5141
5368
|
if self.resume:
|
|
5142
5369
|
self.machine_window.segmentation_worker.resume()
|
|
5143
5370
|
self.resume = False
|
|
5371
|
+
if self.prev_down != self.downsample_factor:
|
|
5372
|
+
self.validate_downsample_input(text = self.prev_down)
|
|
5373
|
+
return
|
|
5374
|
+
|
|
5144
5375
|
if self.static_background is not None:
|
|
5145
|
-
#
|
|
5376
|
+
# Your existing virtual strokes conversion logic
|
|
5146
5377
|
if (hasattr(self, 'virtual_draw_operations') and self.virtual_draw_operations) or \
|
|
5147
5378
|
(hasattr(self, 'virtual_erase_operations') and self.virtual_erase_operations) or \
|
|
5148
5379
|
(hasattr(self, 'current_operation') and self.current_operation):
|
|
5149
|
-
# Finish current operation first
|
|
5150
5380
|
if hasattr(self, 'current_operation') and self.current_operation:
|
|
5151
5381
|
self.pm.finish_current_virtual_operation()
|
|
5152
|
-
# Now convert to real data
|
|
5153
5382
|
self.pm.convert_virtual_strokes_to_data()
|
|
5154
5383
|
|
|
5155
|
-
# Restore hidden channels
|
|
5156
5384
|
try:
|
|
5157
5385
|
for i in self.restore_channels:
|
|
5158
5386
|
self.channel_visible[i] = True
|
|
5159
5387
|
self.restore_channels = []
|
|
5160
5388
|
except:
|
|
5161
5389
|
pass
|
|
5162
|
-
|
|
5163
5390
|
self.static_background = None
|
|
5164
|
-
|
|
5391
|
+
|
|
5392
|
+
# Your existing machine_window logic
|
|
5165
5393
|
if self.machine_window is None:
|
|
5166
5394
|
try:
|
|
5167
|
-
self.channel_data[4][self.current_slice, :, :] = n3d.overlay_arrays_simple(
|
|
5168
|
-
|
|
5395
|
+
self.channel_data[4][self.current_slice, :, :] = n3d.overlay_arrays_simple(
|
|
5396
|
+
self.channel_data[self.temp_chan][self.current_slice, :, :],
|
|
5397
|
+
self.channel_data[4][self.current_slice, :, :])
|
|
5398
|
+
self.load_channel(self.temp_chan, self.channel_data[4], data=True, end_paint=True)
|
|
5169
5399
|
self.channel_data[4] = None
|
|
5170
5400
|
self.channel_visible[4] = False
|
|
5171
5401
|
except:
|
|
5172
5402
|
pass
|
|
5173
5403
|
|
|
5174
|
-
# Get
|
|
5404
|
+
# Get dimensions
|
|
5175
5405
|
active_channels = [i for i in range(4) if self.channel_data[i] is not None]
|
|
5176
5406
|
if dims is None:
|
|
5177
5407
|
if active_channels:
|
|
@@ -5180,247 +5410,258 @@ class ImageViewerWindow(QMainWindow):
|
|
|
5180
5410
|
min_height = min(d[0] for d in dims)
|
|
5181
5411
|
min_width = min(d[1] for d in dims)
|
|
5182
5412
|
else:
|
|
5183
|
-
min_height = 1
|
|
5184
|
-
min_width = 1
|
|
5413
|
+
min_height = min_width = 1
|
|
5185
5414
|
else:
|
|
5186
|
-
min_height = dims[
|
|
5187
|
-
|
|
5188
|
-
|
|
5189
|
-
|
|
5415
|
+
min_height, min_width = dims[:2]
|
|
5416
|
+
|
|
5417
|
+
# Store original dimensions for pixel coordinate conversion
|
|
5418
|
+
self.original_dims = (min_height, min_width)
|
|
5419
|
+
|
|
5420
|
+
# Initialize axes only once or when needed
|
|
5421
|
+
if not self.axes_initialized or not hasattr(self, 'ax') or self.ax is None:
|
|
5422
|
+
self.figure.clear()
|
|
5423
|
+
self.figure.patch.set_facecolor('white')
|
|
5424
|
+
self.ax = self.figure.add_subplot(111)
|
|
5425
|
+
self.ax.set_facecolor('black')
|
|
5426
|
+
self.axes_initialized = True
|
|
5427
|
+
|
|
5428
|
+
# Style the axes once
|
|
5429
|
+
self.ax.set_xlabel('X')
|
|
5430
|
+
self.ax.set_ylabel('Y')
|
|
5431
|
+
self.ax.xaxis.label.set_color('black')
|
|
5432
|
+
self.ax.yaxis.label.set_color('black')
|
|
5433
|
+
self.ax.tick_params(colors='black')
|
|
5434
|
+
for spine in self.ax.spines.values():
|
|
5435
|
+
spine.set_color('black')
|
|
5436
|
+
else:
|
|
5437
|
+
# Clear only the image data, keep axes structure
|
|
5438
|
+
for img in list(self.ax.get_images()):
|
|
5439
|
+
img.remove()
|
|
5440
|
+
# Clear measurement points
|
|
5441
|
+
for artist in self.measurement_artists:
|
|
5442
|
+
artist.remove()
|
|
5443
|
+
self.measurement_artists.clear()
|
|
5444
|
+
|
|
5445
|
+
# Determine the current view bounds (either from preserve_zoom or current state)
|
|
5446
|
+
if preserve_zoom:
|
|
5447
|
+
current_xlim, current_ylim = preserve_zoom
|
|
5448
|
+
else:
|
|
5449
|
+
current_xlim = (-0.5, self.shape[2] - 0.5)
|
|
5450
|
+
current_ylim = (self.shape[1] - 0.5, -0.5)
|
|
5451
|
+
|
|
5452
|
+
# Calculate the visible region in pixel coordinates
|
|
5453
|
+
x_min = max(0, int(np.floor(current_xlim[0] + 0.5)))
|
|
5454
|
+
x_max = min(min_width, int(np.ceil(current_xlim[1] + 0.5)))
|
|
5455
|
+
y_min = max(0, int(np.floor(current_ylim[1] + 0.5))) # Note: y is flipped
|
|
5456
|
+
y_max = min(min_height, int(np.ceil(current_ylim[0] + 0.5)))
|
|
5457
|
+
|
|
5458
|
+
if not self.pan_mode: # If using image pyramids
|
|
5459
|
+
size = (x_max - x_min) * (y_max - y_min)
|
|
5460
|
+
if size < (3000 * 3000): # Smaller window
|
|
5461
|
+
val = 1
|
|
5462
|
+
elif size > (3000 * 3000) and size < (6000 * 6000): # Med window
|
|
5463
|
+
val = 2
|
|
5464
|
+
elif size > (6000 * 6000) and size < (9000 * 9000): # Large window
|
|
5465
|
+
val = 3
|
|
5466
|
+
elif size > (9000 * 9000): # Very large window
|
|
5467
|
+
val = 3
|
|
5468
|
+
self.validate_downsample_input(text = val, update = False)
|
|
5469
|
+
downsample_factor = self.downsample_factor
|
|
5470
|
+
|
|
5471
|
+
# Add some padding to avoid edge artifacts during pan/zoom
|
|
5472
|
+
padding = max(10, downsample_factor * 2)
|
|
5473
|
+
x_min_padded = max(0, x_min - padding)
|
|
5474
|
+
x_max_padded = min(min_width, x_max + padding)
|
|
5475
|
+
y_min_padded = max(0, y_min - padding)
|
|
5476
|
+
y_max_padded = min(min_height, y_max + padding)
|
|
5477
|
+
|
|
5478
|
+
# Calculate the extent for the cropped region (in original coordinates)
|
|
5479
|
+
crop_extent = (x_min_padded - 0.5, x_max_padded - 0.5,
|
|
5480
|
+
y_max_padded - 0.5, y_min_padded - 0.5)
|
|
5481
|
+
|
|
5482
|
+
# Set limits to original dimensions (important for pixel queries)
|
|
5190
5483
|
self.ax.set_xlim(-0.5, min_width - 0.5)
|
|
5191
5484
|
self.ax.set_ylim(min_height - 0.5, -0.5)
|
|
5485
|
+
self.ax.set_title(f'Slice {self.current_slice}')
|
|
5486
|
+
self.ax.title.set_color('black')
|
|
5192
5487
|
|
|
5193
|
-
# Create subplot with tight layout and white figure background
|
|
5194
|
-
self.figure.patch.set_facecolor('white')
|
|
5195
|
-
self.ax = self.figure.add_subplot(111)
|
|
5196
|
-
|
|
5197
|
-
# Store current zoom limits if they exist and weren't provided
|
|
5198
|
-
|
|
5199
|
-
current_xlim, current_ylim = preserve_zoom if preserve_zoom else (None, None)
|
|
5200
|
-
|
|
5201
|
-
# Define base colors for each channel with increased intensity
|
|
5202
5488
|
base_colors = self.base_colors
|
|
5203
|
-
# Set only the axes (image area) background to black
|
|
5204
|
-
self.ax.set_facecolor('black')
|
|
5205
5489
|
|
|
5206
|
-
#
|
|
5490
|
+
# Helper function to crop and downsample image
|
|
5491
|
+
def crop_and_downsample_image(image, y_start, y_end, x_start, x_end, factor):
|
|
5492
|
+
# Crop first
|
|
5493
|
+
if len(image.shape) == 2:
|
|
5494
|
+
cropped = image[y_start:y_end, x_start:x_end]
|
|
5495
|
+
elif len(image.shape) == 3:
|
|
5496
|
+
cropped = image[y_start:y_end, x_start:x_end, :]
|
|
5497
|
+
else:
|
|
5498
|
+
cropped = image
|
|
5499
|
+
|
|
5500
|
+
# Then downsample if needed
|
|
5501
|
+
if factor == 1:
|
|
5502
|
+
return cropped
|
|
5503
|
+
|
|
5504
|
+
if len(cropped.shape) == 2:
|
|
5505
|
+
return cropped[::factor, ::factor]
|
|
5506
|
+
elif len(cropped.shape) == 3:
|
|
5507
|
+
return cropped[::factor, ::factor, :]
|
|
5508
|
+
else:
|
|
5509
|
+
return cropped
|
|
5510
|
+
|
|
5511
|
+
# Update channel images efficiently with cropping and downsampling
|
|
5207
5512
|
for channel in range(4):
|
|
5208
|
-
if
|
|
5209
|
-
|
|
5210
|
-
|
|
5211
|
-
|
|
5212
|
-
is_rgb = len(self.channel_data[channel].shape) == 4 and (self.channel_data[channel].shape[-1] == 3 or self.channel_data[channel].shape[-1] == 4)
|
|
5513
|
+
if self.channel_visible[channel] and self.channel_data[channel] is not None:
|
|
5514
|
+
# Get current image data
|
|
5515
|
+
is_rgb = len(self.channel_data[channel].shape) == 4 and (
|
|
5516
|
+
self.channel_data[channel].shape[-1] in [3, 4])
|
|
5213
5517
|
|
|
5214
5518
|
if len(self.channel_data[channel].shape) == 3 and not is_rgb:
|
|
5215
5519
|
current_image = self.channel_data[channel][self.current_slice, :, :]
|
|
5216
5520
|
elif is_rgb:
|
|
5217
|
-
current_image = self.channel_data[channel][self.current_slice]
|
|
5521
|
+
current_image = self.channel_data[channel][self.current_slice]
|
|
5218
5522
|
else:
|
|
5219
5523
|
current_image = self.channel_data[channel]
|
|
5220
5524
|
|
|
5525
|
+
# Crop and downsample the image for rendering
|
|
5526
|
+
display_image = crop_and_downsample_image(
|
|
5527
|
+
current_image, y_min_padded, y_max_padded,
|
|
5528
|
+
x_min_padded, x_max_padded, downsample_factor)
|
|
5529
|
+
|
|
5221
5530
|
if is_rgb and self.channel_data[channel].shape[-1] in [3, 4]:
|
|
5222
|
-
#
|
|
5223
|
-
|
|
5224
|
-
# Calculate alpha based on brightness settings
|
|
5531
|
+
# RGB handling (keep your existing logic)
|
|
5225
5532
|
brightness_min = self.channel_brightness[channel]['min']
|
|
5226
5533
|
brightness_max = self.channel_brightness[channel]['max']
|
|
5227
|
-
|
|
5228
|
-
# Map brightness range to alpha range (0.0 to 1.0)
|
|
5229
|
-
# brightness_min controls minimum alpha, brightness_max controls maximum alpha
|
|
5230
5534
|
alpha_range = brightness_max - brightness_min
|
|
5231
|
-
base_alpha = brightness_min
|
|
5232
|
-
|
|
5233
|
-
final_alpha = base_alpha + alpha_range # Scale to reasonable alpha range
|
|
5234
|
-
final_alpha = np.clip(final_alpha, 0.0, 1.0) # Ensure valid alpha range
|
|
5535
|
+
base_alpha = brightness_min
|
|
5536
|
+
final_alpha = np.clip(base_alpha + alpha_range, 0.0, 1.0)
|
|
5235
5537
|
|
|
5236
|
-
|
|
5237
|
-
|
|
5238
|
-
# For RGBA, multiply existing alpha by our brightness-controlled alpha
|
|
5239
|
-
img_with_alpha = current_image.copy()
|
|
5538
|
+
if display_image.shape[-1] == 4:
|
|
5539
|
+
img_with_alpha = display_image.copy()
|
|
5240
5540
|
img_with_alpha[..., 3] = img_with_alpha[..., 3] * final_alpha
|
|
5241
|
-
|
|
5541
|
+
# Use crop_extent to place in correct location
|
|
5542
|
+
im = self.ax.imshow(img_with_alpha, extent=crop_extent)
|
|
5242
5543
|
else:
|
|
5243
|
-
|
|
5244
|
-
self.ax.imshow(current_image, alpha=final_alpha)
|
|
5245
|
-
|
|
5544
|
+
im = self.ax.imshow(display_image, alpha=final_alpha, extent=crop_extent)
|
|
5246
5545
|
else:
|
|
5247
|
-
# Regular channel processing with
|
|
5248
|
-
# Calculate brightness/contrast limits from entire volume
|
|
5546
|
+
# Regular channel processing with optimized normalization
|
|
5249
5547
|
if self.min_max[channel][0] is None:
|
|
5250
|
-
|
|
5251
|
-
|
|
5252
|
-
|
|
5253
|
-
|
|
5254
|
-
|
|
5548
|
+
# For very large arrays, consider sampling for min/max
|
|
5549
|
+
if self.channel_data[channel].size > 1000000:
|
|
5550
|
+
sample = self.channel_data[channel][::max(1, self.channel_data[channel].shape[0]//100)]
|
|
5551
|
+
self.min_max[channel] = [np.min(sample), np.max(sample)]
|
|
5552
|
+
else:
|
|
5553
|
+
self.min_max[channel] = [np.min(self.channel_data[channel]),
|
|
5554
|
+
np.max(self.channel_data[channel])]
|
|
5555
|
+
|
|
5556
|
+
img_min, img_max = self.min_max[channel]
|
|
5255
5557
|
|
|
5256
|
-
# Calculate vmin and vmax, ensuring we don't get a zero range
|
|
5257
5558
|
if img_min == img_max:
|
|
5258
|
-
vmin = img_min
|
|
5259
|
-
|
|
5559
|
+
vmin, vmax = img_min, img_min + 1
|
|
5560
|
+
normalized_image = np.zeros_like(display_image)
|
|
5260
5561
|
else:
|
|
5261
5562
|
vmin = img_min + (img_max - img_min) * self.channel_brightness[channel]['min']
|
|
5262
5563
|
vmax = img_min + (img_max - img_min) * self.channel_brightness[channel]['max']
|
|
5263
|
-
|
|
5264
|
-
|
|
5265
|
-
|
|
5266
|
-
|
|
5267
|
-
|
|
5268
|
-
normalized_image = np.clip((current_image - vmin) / (vmax - vmin), 0, 1)
|
|
5564
|
+
|
|
5565
|
+
if vmin == vmax:
|
|
5566
|
+
normalized_image = np.zeros_like(display_image)
|
|
5567
|
+
else:
|
|
5568
|
+
normalized_image = np.clip((display_image - vmin) / (vmax - vmin), 0, 1)
|
|
5269
5569
|
|
|
5270
5570
|
if channel == 2 and self.machine_window is not None:
|
|
5271
5571
|
custom_cmap = LinearSegmentedColormap.from_list(
|
|
5272
5572
|
f'custom_{channel}',
|
|
5273
|
-
[(0, 0, 0, 0),
|
|
5274
|
-
(0.5, 1, 0.5, 1), # light green for 1
|
|
5275
|
-
(1, 0.5, 0.5, 1)] # light red for 2
|
|
5573
|
+
[(0, 0, 0, 0), (0.5, 1, 0.5, 1), (1, 0.5, 0.5, 1)]
|
|
5276
5574
|
)
|
|
5277
|
-
self.ax.imshow(
|
|
5278
|
-
|
|
5279
|
-
vmin=0,
|
|
5280
|
-
vmax=2,
|
|
5281
|
-
alpha=0.7,
|
|
5282
|
-
interpolation='nearest',
|
|
5283
|
-
extent=(-0.5, min_width-0.5, min_height-0.5, -0.5))
|
|
5575
|
+
im = self.ax.imshow(display_image, cmap=custom_cmap, vmin=0, vmax=2,
|
|
5576
|
+
alpha=0.7, interpolation='nearest', extent=crop_extent)
|
|
5284
5577
|
else:
|
|
5285
|
-
# Create custom colormap with higher intensity
|
|
5286
5578
|
color = base_colors[channel]
|
|
5287
5579
|
custom_cmap = LinearSegmentedColormap.from_list(
|
|
5288
|
-
f'custom_{channel}',
|
|
5289
|
-
[(0,0,0,0), (*color,1)]
|
|
5290
|
-
)
|
|
5580
|
+
f'custom_{channel}', [(0,0,0,0), (*color,1)])
|
|
5291
5581
|
|
|
5292
|
-
|
|
5293
|
-
|
|
5294
|
-
alpha=0.7,
|
|
5295
|
-
cmap=custom_cmap,
|
|
5296
|
-
vmin=0,
|
|
5297
|
-
vmax=1,
|
|
5298
|
-
extent=(-0.5, min_width-0.5, min_height-0.5, -0.5))
|
|
5582
|
+
im = self.ax.imshow(normalized_image, alpha=0.7, cmap=custom_cmap,
|
|
5583
|
+
vmin=0, vmax=1, extent=crop_extent)
|
|
5299
5584
|
|
|
5585
|
+
# Handle preview, overlays, and measurements (apply cropping here too)
|
|
5300
5586
|
if self.preview and not called:
|
|
5301
|
-
self.create_highlight_overlay_slice(self.targs, bounds
|
|
5587
|
+
self.create_highlight_overlay_slice(self.targs, bounds=self.bounds)
|
|
5302
5588
|
|
|
5303
|
-
#
|
|
5589
|
+
# Overlay handling (optimized with cropping and downsampling)
|
|
5304
5590
|
if self.mini_overlay and self.highlight and self.machine_window is None:
|
|
5305
|
-
highlight_cmap = LinearSegmentedColormap.from_list(
|
|
5306
|
-
|
|
5307
|
-
|
|
5308
|
-
|
|
5309
|
-
self.ax.imshow(
|
|
5310
|
-
cmap=highlight_cmap,
|
|
5311
|
-
alpha=0.8)
|
|
5312
|
-
elif self.highlight_overlay is not None and self.highlight and self.machine_window is None:
|
|
5313
|
-
highlight_slice = self.highlight_overlay[self.current_slice]
|
|
5314
|
-
highlight_cmap = LinearSegmentedColormap.from_list(
|
|
5315
|
-
'highlight',
|
|
5316
|
-
[(0, 0, 0, 0), (1, 1, 0, 1)] # yellow
|
|
5317
|
-
)
|
|
5318
|
-
self.ax.imshow(highlight_slice,
|
|
5319
|
-
cmap=highlight_cmap,
|
|
5320
|
-
alpha=0.8)
|
|
5591
|
+
highlight_cmap = LinearSegmentedColormap.from_list('highlight', [(0, 0, 0, 0), (1, 1, 0, 1)])
|
|
5592
|
+
display_overlay = crop_and_downsample_image(
|
|
5593
|
+
self.mini_overlay_data, y_min_padded, y_max_padded,
|
|
5594
|
+
x_min_padded, x_max_padded, downsample_factor)
|
|
5595
|
+
self.ax.imshow(display_overlay, cmap=highlight_cmap, alpha=0.8, extent=crop_extent)
|
|
5321
5596
|
elif self.highlight_overlay is not None and self.highlight:
|
|
5322
5597
|
highlight_slice = self.highlight_overlay[self.current_slice]
|
|
5323
|
-
|
|
5324
|
-
|
|
5325
|
-
|
|
5326
|
-
|
|
5327
|
-
|
|
5328
|
-
|
|
5329
|
-
self.ax.imshow(highlight_slice,
|
|
5330
|
-
cmap=highlight_cmap,
|
|
5331
|
-
vmin=0,
|
|
5332
|
-
vmax=2, # Important: set vmax to 2 to accommodate both values
|
|
5333
|
-
alpha=0.3)
|
|
5334
|
-
|
|
5335
|
-
if self.channel_data[4] is not None:
|
|
5336
|
-
|
|
5337
|
-
highlight_slice = self.channel_data[4][self.current_slice]
|
|
5338
|
-
img_min = self.min_max[4][0]
|
|
5339
|
-
img_max = self.min_max[4][1]
|
|
5340
|
-
|
|
5341
|
-
# Calculate vmin and vmax, ensuring we don't get a zero range
|
|
5342
|
-
if img_min == img_max:
|
|
5343
|
-
vmin = img_min
|
|
5344
|
-
vmax = img_min + 1
|
|
5345
|
-
else:
|
|
5346
|
-
vmin = img_min + (img_max - img_min) * self.channel_brightness[4]['min']
|
|
5347
|
-
vmax = img_min + (img_max - img_min) * self.channel_brightness[4]['max']
|
|
5348
|
-
|
|
5349
|
-
# Normalize the image safely
|
|
5350
|
-
if vmin == vmax:
|
|
5351
|
-
normalized_image = np.zeros_like(highlight_slice)
|
|
5598
|
+
display_highlight = crop_and_downsample_image(
|
|
5599
|
+
highlight_slice, y_min_padded, y_max_padded,
|
|
5600
|
+
x_min_padded, x_max_padded, downsample_factor)
|
|
5601
|
+
if self.machine_window is None:
|
|
5602
|
+
highlight_cmap = LinearSegmentedColormap.from_list('highlight', [(0, 0, 0, 0), (1, 1, 0, 1)])
|
|
5603
|
+
self.ax.imshow(display_highlight, cmap=highlight_cmap, alpha=0.8, extent=crop_extent)
|
|
5352
5604
|
else:
|
|
5353
|
-
|
|
5354
|
-
|
|
5355
|
-
|
|
5356
|
-
custom_cmap = LinearSegmentedColormap.from_list(
|
|
5357
|
-
f'custom_{4}',
|
|
5358
|
-
[(0,0,0,0), (*color,1)]
|
|
5359
|
-
)
|
|
5360
|
-
|
|
5361
|
-
|
|
5362
|
-
self.ax.imshow(normalized_image,
|
|
5363
|
-
alpha=0.7,
|
|
5364
|
-
cmap=custom_cmap,
|
|
5365
|
-
vmin=0,
|
|
5366
|
-
vmax=1)
|
|
5367
|
-
|
|
5368
|
-
# Style the axes
|
|
5369
|
-
self.ax.set_xlabel('X')
|
|
5370
|
-
self.ax.set_ylabel('Y')
|
|
5371
|
-
self.ax.set_title(f'Slice {self.current_slice}')
|
|
5372
|
-
|
|
5373
|
-
# Make axis labels and ticks black for visibility against white background
|
|
5374
|
-
self.ax.xaxis.label.set_color('black')
|
|
5375
|
-
self.ax.yaxis.label.set_color('black')
|
|
5376
|
-
self.ax.title.set_color('black')
|
|
5377
|
-
self.ax.tick_params(colors='black')
|
|
5378
|
-
for spine in self.ax.spines.values():
|
|
5379
|
-
spine.set_color('black')
|
|
5380
|
-
|
|
5381
|
-
# Adjust the layout to ensure the plot fits well in the figure
|
|
5382
|
-
self.figure.tight_layout()
|
|
5605
|
+
highlight_cmap = LinearSegmentedColormap.from_list('highlight',
|
|
5606
|
+
[(0, 0, 0, 0), (1, 1, 0, 1), (0, 0.7, 1, 1)])
|
|
5607
|
+
self.ax.imshow(display_highlight, cmap=highlight_cmap, vmin=0, vmax=2, alpha=0.3, extent=crop_extent)
|
|
5383
5608
|
|
|
5384
|
-
# Redraw measurement points
|
|
5609
|
+
# Redraw measurement points efficiently (no cropping needed - these are vector graphics)
|
|
5610
|
+
# Only draw points that are within the visible region for additional performance
|
|
5385
5611
|
for point in self.measurement_points:
|
|
5386
5612
|
x1, y1, z1 = point['point1']
|
|
5387
5613
|
x2, y2, z2 = point['point2']
|
|
5388
5614
|
pair_idx = point['pair_index']
|
|
5389
5615
|
|
|
5390
|
-
#
|
|
5391
|
-
|
|
5392
|
-
|
|
5393
|
-
|
|
5394
|
-
|
|
5395
|
-
|
|
5396
|
-
|
|
5397
|
-
|
|
5398
|
-
|
|
5616
|
+
# Check if points are in visible region
|
|
5617
|
+
point1_visible = (z1 == self.current_slice and
|
|
5618
|
+
current_xlim[0] <= x1 <= current_xlim[1] and
|
|
5619
|
+
current_ylim[1] <= y1 <= current_ylim[0])
|
|
5620
|
+
point2_visible = (z2 == self.current_slice and
|
|
5621
|
+
current_xlim[0] <= x2 <= current_xlim[1] and
|
|
5622
|
+
current_ylim[1] <= y2 <= current_ylim[0])
|
|
5623
|
+
|
|
5624
|
+
if point1_visible:
|
|
5625
|
+
pt1 = self.ax.plot(x1, y1, 'yo', markersize=8)[0]
|
|
5626
|
+
txt1 = self.ax.text(x1, y1+5, str(pair_idx), color='white', ha='center', va='bottom')
|
|
5627
|
+
self.measurement_artists.extend([pt1, txt1])
|
|
5399
5628
|
|
|
5400
|
-
|
|
5401
|
-
|
|
5402
|
-
self.ax.
|
|
5403
|
-
|
|
5404
|
-
|
|
5405
|
-
self.
|
|
5406
|
-
|
|
5629
|
+
if point2_visible:
|
|
5630
|
+
pt2 = self.ax.plot(x2, y2, 'yo', markersize=8)[0]
|
|
5631
|
+
txt2 = self.ax.text(x2, y2+5, str(pair_idx), color='white', ha='center', va='bottom')
|
|
5632
|
+
self.measurement_artists.extend([pt2, txt2])
|
|
5633
|
+
|
|
5634
|
+
if z1 == z2 == self.current_slice and (point1_visible or point2_visible):
|
|
5635
|
+
line = self.ax.plot([x1, x2], [y1, y2], 'r--', alpha=0.5)[0]
|
|
5636
|
+
self.measurement_artists.append(line)
|
|
5637
|
+
|
|
5638
|
+
# Store current view limits for next update
|
|
5639
|
+
self.ax._current_xlim = current_xlim
|
|
5640
|
+
self.ax._current_ylim = current_ylim
|
|
5407
5641
|
|
|
5642
|
+
# Handle resizing
|
|
5408
5643
|
if self.resizing:
|
|
5409
5644
|
self.original_xlim = self.ax.get_xlim()
|
|
5410
5645
|
self.original_ylim = self.ax.get_ylim()
|
|
5411
|
-
|
|
5646
|
+
|
|
5647
|
+
# Restore zoom (this sets the final view, not the data extent)
|
|
5412
5648
|
if current_xlim is not None and current_ylim is not None:
|
|
5413
5649
|
self.ax.set_xlim(current_xlim)
|
|
5414
5650
|
self.ax.set_ylim(current_ylim)
|
|
5651
|
+
|
|
5415
5652
|
if reset_resize:
|
|
5416
5653
|
self.resizing = False
|
|
5417
5654
|
|
|
5418
|
-
|
|
5655
|
+
# Use draw_idle for better performance
|
|
5656
|
+
self.canvas.draw_idle()
|
|
5657
|
+
|
|
5658
|
+
except Exception as e:
|
|
5659
|
+
pass
|
|
5660
|
+
#import traceback
|
|
5661
|
+
#print(traceback.format_exc())
|
|
5662
|
+
|
|
5419
5663
|
|
|
5420
5664
|
|
|
5421
|
-
except:
|
|
5422
|
-
import traceback
|
|
5423
|
-
print(traceback.format_exc())
|
|
5424
5665
|
|
|
5425
5666
|
def get_channel_image(self, channel):
|
|
5426
5667
|
"""Find the matplotlib image object for a specific channel."""
|
|
@@ -5452,7 +5693,10 @@ class ImageViewerWindow(QMainWindow):
|
|
|
5452
5693
|
stats['num_nodes'] = my_network.network.number_of_nodes()
|
|
5453
5694
|
stats['num_edges'] = my_network.network.number_of_edges()
|
|
5454
5695
|
except:
|
|
5455
|
-
|
|
5696
|
+
try:
|
|
5697
|
+
stats['num_nodes'] = len(np.unique(my_network.nodes)) - 1
|
|
5698
|
+
except:
|
|
5699
|
+
pass
|
|
5456
5700
|
|
|
5457
5701
|
try:
|
|
5458
5702
|
idens = invert_dict(my_network.node_identities)
|
|
@@ -7378,11 +7622,10 @@ class ComIdDialog(QDialog):
|
|
|
7378
7622
|
self.umap.setChecked(True)
|
|
7379
7623
|
layout.addRow("Generate UMAP?:", self.umap)
|
|
7380
7624
|
|
|
7381
|
-
|
|
7382
|
-
self.label
|
|
7383
|
-
self.label.
|
|
7384
|
-
self.label
|
|
7385
|
-
layout.addRow("If using above - label UMAP points?:", self.label)
|
|
7625
|
+
self.label = QComboBox()
|
|
7626
|
+
self.label.addItems(["No Label", "By Community", "By Neighborhood (If already calculated via 'Analyze -> Network -> Convert Network Communities...')"])
|
|
7627
|
+
self.label.setCurrentIndex(0)
|
|
7628
|
+
layout.addRow("Label UMAP Points How?:", self.label)
|
|
7386
7629
|
|
|
7387
7630
|
self.limit = QLineEdit("")
|
|
7388
7631
|
layout.addRow("Min Community Size for UMAP (Smaller communities will be ignored in graph, does not apply if empty)", self.limit)
|
|
@@ -7402,6 +7645,12 @@ class ComIdDialog(QDialog):
|
|
|
7402
7645
|
|
|
7403
7646
|
try:
|
|
7404
7647
|
|
|
7648
|
+
if self.parent().prev_coms is not None:
|
|
7649
|
+
temp = my_network.communities
|
|
7650
|
+
my_network.communities = self.parent().prev_coms
|
|
7651
|
+
else:
|
|
7652
|
+
temp = None
|
|
7653
|
+
|
|
7405
7654
|
if my_network.node_identities is None:
|
|
7406
7655
|
print("Node identities must be set")
|
|
7407
7656
|
|
|
@@ -7414,7 +7663,9 @@ class ComIdDialog(QDialog):
|
|
|
7414
7663
|
mode = self.mode.currentIndex()
|
|
7415
7664
|
|
|
7416
7665
|
umap = self.umap.isChecked()
|
|
7417
|
-
|
|
7666
|
+
|
|
7667
|
+
label = self.label.currentIndex()
|
|
7668
|
+
|
|
7418
7669
|
proportional = self.proportional.isChecked()
|
|
7419
7670
|
limit = int(self.limit.text()) if self.limit.text().strip() else 0
|
|
7420
7671
|
|
|
@@ -7427,10 +7678,13 @@ class ComIdDialog(QDialog):
|
|
|
7427
7678
|
|
|
7428
7679
|
else:
|
|
7429
7680
|
|
|
7430
|
-
info, names = my_network.community_id_info_per_com(umap = umap, label = label, limit = limit, proportional = proportional)
|
|
7681
|
+
info, names = my_network.community_id_info_per_com(umap = umap, label = label, limit = limit, proportional = proportional, neighbors = temp)
|
|
7431
7682
|
|
|
7432
7683
|
self.parent().format_for_upperright_table(info, 'Community', names, 'Average of Community Makeup')
|
|
7433
7684
|
|
|
7685
|
+
if self.parent().prev_coms is not None:
|
|
7686
|
+
my_network.communities = temp
|
|
7687
|
+
|
|
7434
7688
|
self.accept()
|
|
7435
7689
|
|
|
7436
7690
|
except Exception as e:
|
|
@@ -7786,10 +8040,12 @@ class NearNeighDialog(QDialog):
|
|
|
7786
8040
|
title = f"Nearest {num} Neighbor(s) Distance of {targ} from {root}"
|
|
7787
8041
|
header = f"Shortest Distance to Closest {num} {targ}(s)"
|
|
7788
8042
|
header2 = f"{root} Node ID"
|
|
8043
|
+
header3 = f'Theoretical Uniform Distance to Closest {num} {targ}(s)'
|
|
7789
8044
|
else:
|
|
7790
8045
|
title = f"Nearest {num} Neighbor(s) Distance Between Nodes"
|
|
7791
8046
|
header = f"Shortest Distance to Closest {num} Nodes"
|
|
7792
8047
|
header2 = "Root Node ID"
|
|
8048
|
+
header3 = f'Simulated Theoretical Uniform Distance to Closest {num} Nodes'
|
|
7793
8049
|
|
|
7794
8050
|
if centroids and my_network.node_centroids is None:
|
|
7795
8051
|
self.parent().show_centroid_dialog()
|
|
@@ -7797,15 +8053,22 @@ class NearNeighDialog(QDialog):
|
|
|
7797
8053
|
return
|
|
7798
8054
|
|
|
7799
8055
|
if not numpy:
|
|
7800
|
-
avg, output, quant_overlay = my_network.nearest_neighbors_avg(root, targ, my_network.xy_scale, my_network.z_scale, num = num, heatmap = heatmap, threed = threed, quant = quant, centroids = centroids)
|
|
8056
|
+
avg, output, quant_overlay, pred = my_network.nearest_neighbors_avg(root, targ, my_network.xy_scale, my_network.z_scale, num = num, heatmap = heatmap, threed = threed, quant = quant, centroids = centroids)
|
|
7801
8057
|
else:
|
|
7802
|
-
avg, output, overlay, quant_overlay = my_network.nearest_neighbors_avg(root, targ, my_network.xy_scale, my_network.z_scale, num = num, heatmap = heatmap, threed = threed, numpy = True, quant = quant, centroids = centroids)
|
|
8058
|
+
avg, output, overlay, quant_overlay, pred = my_network.nearest_neighbors_avg(root, targ, my_network.xy_scale, my_network.z_scale, num = num, heatmap = heatmap, threed = threed, numpy = True, quant = quant, centroids = centroids)
|
|
7803
8059
|
self.parent().load_channel(3, overlay, data = True)
|
|
7804
8060
|
|
|
7805
8061
|
if quant_overlay is not None:
|
|
7806
8062
|
self.parent().load_channel(2, quant_overlay, data = True)
|
|
8063
|
+
|
|
8064
|
+
avg = {header:avg}
|
|
8065
|
+
|
|
8066
|
+
if pred is not None:
|
|
8067
|
+
|
|
8068
|
+
avg[header3] = pred
|
|
8069
|
+
|
|
7807
8070
|
|
|
7808
|
-
self.parent().format_for_upperright_table(
|
|
8071
|
+
self.parent().format_for_upperright_table(avg, 'Category', 'Value', title = f'Avg {title}')
|
|
7809
8072
|
self.parent().format_for_upperright_table(output, header2, header, title = title)
|
|
7810
8073
|
|
|
7811
8074
|
self.accept()
|
|
@@ -7830,7 +8093,7 @@ class NearNeighDialog(QDialog):
|
|
|
7830
8093
|
root = available[0]
|
|
7831
8094
|
|
|
7832
8095
|
for targ in available:
|
|
7833
|
-
avg, _, _ = my_network.nearest_neighbors_avg(root, targ, my_network.xy_scale, my_network.z_scale, num = num, centroids = centroids)
|
|
8096
|
+
avg, _, _, _ = my_network.nearest_neighbors_avg(root, targ, my_network.xy_scale, my_network.z_scale, num = num, centroids = centroids)
|
|
7834
8097
|
output_dict[f"{root} vs {targ}"] = avg
|
|
7835
8098
|
|
|
7836
8099
|
del available[0]
|
|
@@ -8742,15 +9005,6 @@ class ResizeDialog(QDialog):
|
|
|
8742
9005
|
undo_button.clicked.connect(lambda: self.run_resize(undo = True))
|
|
8743
9006
|
layout.addRow(undo_button)
|
|
8744
9007
|
|
|
8745
|
-
if my_network.xy_scale != my_network.z_scale:
|
|
8746
|
-
norm_button_upsize = QPushButton(f"Normalize Scaling with Upsample")
|
|
8747
|
-
norm_button_upsize.clicked.connect(lambda: self.run_resize(upsize = True, special = True))
|
|
8748
|
-
layout.addRow(norm_button_upsize)
|
|
8749
|
-
|
|
8750
|
-
norm_button_downsize = QPushButton("Normalize Scaling with Downsample")
|
|
8751
|
-
norm_button_downsize.clicked.connect(lambda: self.run_resize(upsize = False, special = True))
|
|
8752
|
-
layout.addRow(norm_button_downsize)
|
|
8753
|
-
|
|
8754
9008
|
run_button = QPushButton("Run Resize")
|
|
8755
9009
|
run_button.clicked.connect(self.run_resize)
|
|
8756
9010
|
layout.addRow(run_button)
|
|
@@ -8764,7 +9018,7 @@ class ResizeDialog(QDialog):
|
|
|
8764
9018
|
|
|
8765
9019
|
def run_resize(self, undo = False, upsize = True, special = False):
|
|
8766
9020
|
try:
|
|
8767
|
-
self.parent().resizing =
|
|
9021
|
+
self.parent().resizing = False
|
|
8768
9022
|
# Get parameters
|
|
8769
9023
|
try:
|
|
8770
9024
|
resize = float(self.resize.text()) if self.resize.text() else None
|
|
@@ -8778,6 +9032,12 @@ class ResizeDialog(QDialog):
|
|
|
8778
9032
|
|
|
8779
9033
|
resize = resize if resize is not None else (zsize, ysize, xsize)
|
|
8780
9034
|
|
|
9035
|
+
if (self.parent().shape[1] * resize) < 1 or (self.parent().shape[2] * resize) < 1:
|
|
9036
|
+
print("Incompatible x/y dimensions")
|
|
9037
|
+
return
|
|
9038
|
+
elif (self.parent().shape[0] * resize) < 1:
|
|
9039
|
+
resize = (1, resize, resize)
|
|
9040
|
+
|
|
8781
9041
|
if special:
|
|
8782
9042
|
if upsize:
|
|
8783
9043
|
if (my_network.z_scale > my_network.xy_scale):
|
|
@@ -8819,11 +9079,7 @@ class ResizeDialog(QDialog):
|
|
|
8819
9079
|
new_shape = tuple(int(dim * resize) for dim in array_shape)
|
|
8820
9080
|
else:
|
|
8821
9081
|
new_shape = tuple(int(dim * factor) for dim, factor in zip(array_shape, resize))
|
|
8822
|
-
|
|
8823
|
-
#if any(dim < 1 for dim in new_shape):
|
|
8824
|
-
#QMessageBox.critical(self, "Error", f"Resize would result in invalid dimensions: {new_shape}")
|
|
8825
|
-
#self.reset_fields()
|
|
8826
|
-
#return
|
|
9082
|
+
|
|
8827
9083
|
|
|
8828
9084
|
cubic = self.cubic.isChecked()
|
|
8829
9085
|
order = 3 if cubic else 0
|
|
@@ -8837,7 +9093,7 @@ class ResizeDialog(QDialog):
|
|
|
8837
9093
|
for channel in range(4):
|
|
8838
9094
|
if self.parent().channel_data[channel] is not None:
|
|
8839
9095
|
resized_data = n3d.resize(self.parent().channel_data[channel], resize, order)
|
|
8840
|
-
self.parent().load_channel(channel, channel_data=resized_data, data=True
|
|
9096
|
+
self.parent().load_channel(channel, channel_data=resized_data, data=True)
|
|
8841
9097
|
|
|
8842
9098
|
|
|
8843
9099
|
|
|
@@ -8858,7 +9114,7 @@ class ResizeDialog(QDialog):
|
|
|
8858
9114
|
for channel in range(4):
|
|
8859
9115
|
if self.parent().channel_data[channel] is not None:
|
|
8860
9116
|
resized_data = n3d.upsample_with_padding(self.parent().channel_data[channel], original_shape = self.parent().original_shape)
|
|
8861
|
-
self.parent().load_channel(channel, channel_data=resized_data, data=True
|
|
9117
|
+
self.parent().load_channel(channel, channel_data=resized_data, data=True)
|
|
8862
9118
|
|
|
8863
9119
|
if self.parent().mini_overlay_data is not None:
|
|
8864
9120
|
|
|
@@ -10172,7 +10428,7 @@ class SegmentationWorker(QThread):
|
|
|
10172
10428
|
self.mem_lock = mem_lock
|
|
10173
10429
|
self._stop = False
|
|
10174
10430
|
self._paused = False # Add pause flag
|
|
10175
|
-
if self.machine_window.parent().shape[1] * self.machine_window.parent().shape[2] > 3000 * 3000: #arbitrary throttle for large arrays.
|
|
10431
|
+
if self.machine_window.parent().shape[1] * self.machine_window.parent().shape[2] > 3000 * 3000 * self.machine_window.parent().downsample_factor: #arbitrary throttle for large arrays.
|
|
10176
10432
|
self.update_interval = 10
|
|
10177
10433
|
else:
|
|
10178
10434
|
self.update_interval = 1 # Increased to 1s
|
|
@@ -10218,11 +10474,14 @@ class SegmentationWorker(QThread):
|
|
|
10218
10474
|
if self._stop:
|
|
10219
10475
|
break
|
|
10220
10476
|
|
|
10221
|
-
|
|
10222
|
-
|
|
10223
|
-
|
|
10224
|
-
|
|
10225
|
-
|
|
10477
|
+
if foreground_coords:
|
|
10478
|
+
fg_array = np.array(list(foreground_coords))
|
|
10479
|
+
self.overlay[fg_array[:, 0], fg_array[:, 1], fg_array[:, 2]] = 1
|
|
10480
|
+
|
|
10481
|
+
if background_coords:
|
|
10482
|
+
bg_array = np.array(list(background_coords))
|
|
10483
|
+
self.overlay[bg_array[:, 0], bg_array[:, 1], bg_array[:, 2]] = 2
|
|
10484
|
+
|
|
10226
10485
|
self.chunks_since_update += 1
|
|
10227
10486
|
current_time = time.time()
|
|
10228
10487
|
if (self.chunks_since_update >= self.chunks_per_update and
|
|
@@ -10247,27 +10506,6 @@ class SegmentationWorker(QThread):
|
|
|
10247
10506
|
import traceback
|
|
10248
10507
|
traceback.print_exc()
|
|
10249
10508
|
|
|
10250
|
-
def run_batch(self):
|
|
10251
|
-
try:
|
|
10252
|
-
foreground_coords, _ = self.segmenter.segment_volume()
|
|
10253
|
-
|
|
10254
|
-
# Modify the array directly
|
|
10255
|
-
self.overlay.fill(False)
|
|
10256
|
-
for z,y,x in foreground_coords:
|
|
10257
|
-
# Check for pause/stop during batch processing too
|
|
10258
|
-
self._check_pause()
|
|
10259
|
-
if self._stop:
|
|
10260
|
-
break
|
|
10261
|
-
self.overlay[z,y,x] = True
|
|
10262
|
-
|
|
10263
|
-
self.finished.emit()
|
|
10264
|
-
|
|
10265
|
-
except Exception as e:
|
|
10266
|
-
print(f"Error in segmentation: {e}")
|
|
10267
|
-
raise
|
|
10268
|
-
|
|
10269
|
-
|
|
10270
|
-
|
|
10271
10509
|
|
|
10272
10510
|
class ThresholdWindow(QMainWindow):
|
|
10273
10511
|
def __init__(self, parent=None, accepted_mode=0):
|