nettracer3d 0.9.4__py3-none-any.whl → 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nettracer3d might be problematic. Click here for more details.

@@ -210,6 +210,11 @@ class ImageViewerWindow(QMainWindow):
210
210
  buttons_widget = QWidget()
211
211
  buttons_layout = QHBoxLayout(buttons_widget)
212
212
 
213
+ self.reset_view = QPushButton("🏠")
214
+ self.reset_view.setFixedSize(20, 20)
215
+ self.reset_view.clicked.connect(self.home)
216
+ control_layout.addWidget(self.reset_view)
217
+
213
218
  # "Create" zoom button
214
219
  self.zoom_button = QPushButton("🔍")
215
220
  self.zoom_button.setCheckable(True)
@@ -293,14 +298,6 @@ class ImageViewerWindow(QMainWindow):
293
298
 
294
299
  control_layout.addWidget(channel_container)
295
300
 
296
- self.show_channels = QPushButton("✓")
297
- self.show_channels.setCheckable(True)
298
- self.show_channels.setChecked(True)
299
- self.show_channels.setFixedSize(20, 20)
300
- self.show_channels.clicked.connect(self.toggle_chan_load)
301
- control_layout.addWidget(self.show_channels)
302
- self.chan_load = True
303
-
304
301
  # Create the main widget and layout
305
302
  main_widget = QWidget()
306
303
  self.setCentralWidget(main_widget)
@@ -1979,6 +1976,13 @@ class ImageViewerWindow(QMainWindow):
1979
1976
  except:
1980
1977
  pass
1981
1978
 
1979
+
1980
+ if my_network.network is not None:
1981
+ try:
1982
+ info_dict['Neighbors'] = list(my_network.network.neighbors(label))
1983
+ except:
1984
+ pass
1985
+
1982
1986
  if my_network.communities is not None:
1983
1987
  try:
1984
1988
  info_dict['Community'] = my_network.communities[label]
@@ -2012,6 +2016,24 @@ class ImageViewerWindow(QMainWindow):
2012
2016
 
2013
2017
  info_dict['Object Class'] = 'Edge'
2014
2018
 
2019
+ try:
2020
+ # Get the existing DataFrame from the model
2021
+ original_df = self.network_table.model()._data
2022
+
2023
+ # Create mask for rows where one column is any original node AND the other column is any neighbor
2024
+ mask = (
2025
+ (original_df.iloc[:, 0].isin(self.clicked_values['nodes'])) |
2026
+ (original_df.iloc[:, 1].isin(self.clicked_values['nodes'])) |
2027
+ (original_df.iloc[:, 2].isin(self.clicked_values['edges']))
2028
+ )
2029
+
2030
+ filtered_df = original_df[mask].copy()
2031
+ node_list = list(set(filtered_df.iloc[:, 0].to_list() + filtered_df.iloc[:, 1].to_list()))
2032
+ info_dict["Num Nodes"] = len(node_list)
2033
+ info_dict['Nodes'] = node_list
2034
+ except:
2035
+ pass
2036
+
2015
2037
  if my_network.edge_centroids is not None:
2016
2038
  try:
2017
2039
  info_dict['Centroid'] = my_network.edge_centroids[label]
@@ -2440,12 +2462,11 @@ class ImageViewerWindow(QMainWindow):
2440
2462
  print(f"Error: {e}")
2441
2463
 
2442
2464
 
2443
- def toggle_chan_load(self):
2465
+ def home(self):
2466
+
2467
+ self.update_display()
2468
+
2444
2469
 
2445
- if self.show_channels.isChecked():
2446
- self.chan_load = True
2447
- else:
2448
- self.chan_load = False
2449
2470
 
2450
2471
  def toggle_highlight(self):
2451
2472
  self.highlight = self.high_button.isChecked()
@@ -2471,7 +2492,8 @@ class ImageViewerWindow(QMainWindow):
2471
2492
  self.zoom_mode = self.zoom_button.isChecked()
2472
2493
 
2473
2494
  if self.zoom_mode:
2474
- self.pan_button.setChecked(False)
2495
+ if self.pan_mode:
2496
+ self.pan_button.click()
2475
2497
 
2476
2498
  self.pen_button.setChecked(False)
2477
2499
  self.brush_mode = False
@@ -2547,33 +2569,23 @@ class ImageViewerWindow(QMainWindow):
2547
2569
  current_xlim = self.ax.get_xlim()
2548
2570
  current_ylim = self.ax.get_ylim()
2549
2571
 
2550
- if (abs(current_xlim[1] - current_xlim[0]) * abs(current_ylim[0] - current_ylim[1]) > 400 * 400 and not self.shape[2] * self.shape[1] > 9000 * 9000 * 6) or self.shape[2] * self.shape[1] < 3000 * 3000:
2572
+ # Create static background from currently visible channels
2573
+ self.create_pan_background()
2574
+
2575
+ # Hide all channels and show only the background
2576
+ self.channel_visible = [False] * 4
2577
+ self.is_pan_preview = True
2578
+
2579
+ # Get current downsample factor
2580
+ current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
2581
+ current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
2582
+ # Update display to show only background
2583
+ self.update_display_pan_mode(current_xlim, current_ylim)
2551
2584
 
2552
- # Create static background from currently visible channels
2553
- self.create_pan_background()
2554
-
2555
- # Hide all channels and show only the background
2556
- self.channel_visible = [False] * 4
2557
- self.is_pan_preview = True
2558
-
2559
- # Get current downsample factor
2560
- current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
2561
- current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
2562
- # Update display to show only background
2563
- self.update_display_pan_mode(current_xlim, current_ylim)
2564
- self.needs_update = False
2565
- else:
2566
- self.needs_update = True
2567
- current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
2568
- current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
2569
- # Update display to show only background
2570
- self._first_pan_done = False
2571
- self.update_display(current_xlim, current_ylim)
2572
2585
 
2573
2586
  else:
2574
- current_xlim = self.ax.get_xlim()
2575
- current_ylim = self.ax.get_ylim()
2576
- self.update_display(preserve_zoom=(current_xlim, current_ylim))
2587
+ self.setEnabled(True)
2588
+ self.update_display(preserve_zoom=(self.ax.get_xlim(), self.ax.get_ylim()))
2577
2589
  if self.machine_window is None:
2578
2590
  self.canvas.setCursor(Qt.CursorShape.ArrowCursor)
2579
2591
  else:
@@ -2584,6 +2596,9 @@ class ImageViewerWindow(QMainWindow):
2584
2596
  self.brush_mode = self.pen_button.isChecked()
2585
2597
  if self.brush_mode:
2586
2598
 
2599
+ if self.pan_mode:
2600
+ self.pan_button.click()
2601
+
2587
2602
  self.pm = painting.PaintManager(parent = self)
2588
2603
 
2589
2604
  # Start virtual paint session
@@ -3069,14 +3084,7 @@ class ImageViewerWindow(QMainWindow):
3069
3084
  self.ax.set_xlim(new_xlim)
3070
3085
  self.ax.set_ylim(new_ylim)
3071
3086
 
3072
- # Only call draw_idle if we have a pan background OR if this isn't the first pan
3073
- if self.pan_background_image is not None or self._first_pan_done == True:
3074
- self.canvas.draw_idle()
3075
- else:
3076
- # For the first pan without background, mark that we've done the first pan
3077
- self._first_pan_done = True
3078
- # Force a proper display update instead of draw_idle
3079
- self.update_display(preserve_zoom=(new_xlim, new_ylim))
3087
+ self.canvas.draw_idle()
3080
3088
 
3081
3089
  # Update pan start position
3082
3090
  self.pan_start = (event.xdata, event.ydata)
@@ -3105,18 +3113,36 @@ class ImageViewerWindow(QMainWindow):
3105
3113
 
3106
3114
 
3107
3115
  def create_pan_background(self):
3108
- """Create a static background image from currently visible channels with proper rendering"""
3109
- # Store current zoom state
3110
- current_xlim = self.ax.get_xlim()
3111
- current_ylim = self.ax.get_ylim()
3112
-
3113
- # Render all visible channels with proper colors/brightness into a single composite
3114
- self.pan_background_image = self.create_composite_for_pan()
3115
- self.pan_zoom_state = (current_xlim, current_ylim)
3116
-
3116
+ """Create a static background image from currently visible channels with proper rendering"""
3117
+ # Store current zoom state
3118
+ current_xlim = self.ax.get_xlim()
3119
+ current_ylim = self.ax.get_ylim()
3120
+
3121
+ # Try GPU acceleration first, fallback to CPU
3122
+ try:
3123
+ import cupy as cp
3124
+ self.use_gpu = True
3125
+ except ImportError:
3126
+ self.use_gpu = False
3127
+
3128
+ # Render all visible channels with proper colors/brightness into a single composite
3129
+ self.channel_visible = self.pre_pan_channel_state.copy()
3130
+ try:
3131
+ if self.use_gpu:
3132
+ self.pan_background_image = self.create_composite_for_pan_gpu()
3133
+ else:
3134
+ self.pan_background_image = self.create_composite_for_pan()
3135
+ except Exception as e:
3136
+ print(f'GPU implementation failed: {e}, falling back to CPU')
3137
+ self.use_gpu = False
3138
+ self.pan_background_image = self.create_composite_for_pan()
3139
+
3140
+ self.pan_zoom_state = (current_xlim, current_ylim)
3117
3141
 
3118
- def create_composite_for_pan(self):
3119
- """Create a properly rendered composite image for panning with downsample support"""
3142
+ def create_composite_for_pan_gpu(self):
3143
+ """Create a properly rendered composite image for panning with GPU acceleration"""
3144
+ import cupy as cp
3145
+
3120
3146
  # Get active channels and dimensions (copied from update_display)
3121
3147
  active_channels = [i for i in range(4) if self.channel_data[i] is not None]
3122
3148
  if active_channels:
@@ -3134,6 +3160,28 @@ class ImageViewerWindow(QMainWindow):
3134
3160
  current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
3135
3161
  current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
3136
3162
 
3163
+ # Helper function to crop and downsample image on GPU
3164
+ def crop_and_downsample_image_gpu(image_cp, y_start, y_end, x_start, x_end, factor):
3165
+ # Crop first
3166
+ if len(image_cp.shape) == 2:
3167
+ cropped = image_cp[y_start:y_end, x_start:x_end]
3168
+ elif len(image_cp.shape) == 3:
3169
+ cropped = image_cp[y_start:y_end, x_start:x_end, :]
3170
+ else:
3171
+ cropped = image_cp
3172
+
3173
+ # Then downsample if needed
3174
+ if factor == 1:
3175
+ return cropped
3176
+
3177
+ if len(cropped.shape) == 2:
3178
+ return cropped[::factor, ::factor]
3179
+ elif len(cropped.shape) == 3:
3180
+ return cropped[::factor, ::factor, :]
3181
+ else:
3182
+ return cropped
3183
+
3184
+ min_height, min_width = self.original_dims
3137
3185
 
3138
3186
  # Calculate the visible region in pixel coordinates
3139
3187
  x_min = max(0, int(np.floor(current_xlim[0] + 0.5)))
@@ -3141,41 +3189,281 @@ class ImageViewerWindow(QMainWindow):
3141
3189
  y_min = max(0, int(np.floor(current_ylim[1] + 0.5))) # Note: y is flipped
3142
3190
  y_max = min(min_height, int(np.ceil(current_ylim[0] + 0.5)))
3143
3191
 
3144
- box_len = x_max - x_min
3145
- box_height = y_max - y_min
3192
+ box_len = int((x_max - x_min)/2)
3193
+ box_height = int((y_max - y_min)/2)
3146
3194
  x_min = max(0, x_min - box_len)
3147
3195
  x_max = min(self.shape[2], x_max + box_len)
3148
3196
  y_min = max(0, y_min - box_height)
3149
3197
  y_max = min(self.shape[1], y_max + box_height)
3150
3198
 
3151
- # If using image pyramids
3152
3199
  size = (x_max - x_min) * (y_max - y_min)
3153
- val = int(np.ceil(size/(3000 * 3000)))
3154
- if self.shape[1] * self.shape[2] > 3000 * 3000 * val:
3155
- val = 3
3156
-
3200
+ val = int(np.ceil(size/(2000 * 2000)))
3157
3201
  self.validate_downsample_input(text = val, update = False)
3158
3202
 
3159
3203
  downsample_factor = self.downsample_factor
3160
3204
 
3161
- # Calculate display dimensions (downsampled)
3162
- display_height = min_height // downsample_factor
3163
- display_width = min_width // downsample_factor
3205
+ # Add some padding to avoid edge artifacts during pan/zoom
3206
+ padding = max(10, downsample_factor * 2)
3207
+ x_min_padded = max(0, x_min - padding)
3208
+ x_max_padded = min(min_width, x_max + padding)
3209
+ y_min_padded = max(0, y_min - padding)
3210
+ y_max_padded = min(min_height, y_max + padding)
3211
+
3212
+ display_height = (y_max_padded - y_min_padded) // downsample_factor
3213
+ display_width = (x_max_padded - x_min_padded) // downsample_factor
3164
3214
 
3165
- # Helper function to downsample image (same as in update_display)
3166
- def downsample_image(image, factor):
3167
- if factor == 1:
3168
- return image
3215
+ # Create a blank RGBA composite to accumulate all channels (using display dimensions) - on GPU
3216
+ composite = cp.zeros((display_height, display_width, 4), dtype=cp.float32)
3217
+
3218
+ # Process each visible channel exactly like update_display does
3219
+ for channel in range(4):
3220
+ if (self.channel_visible[channel] and
3221
+ self.channel_data[channel] is not None):
3222
+
3223
+ # Get current slice data (same logic as update_display)
3224
+ is_rgb = len(self.channel_data[channel].shape) == 4 and (self.channel_data[channel].shape[-1] == 3 or self.channel_data[channel].shape[-1] == 4)
3225
+
3226
+ if len(self.channel_data[channel].shape) == 3 and not is_rgb:
3227
+ current_image = self.channel_data[channel][self.current_slice, :, :]
3228
+ elif is_rgb:
3229
+ current_image = self.channel_data[channel][self.current_slice]
3230
+ else:
3231
+ current_image = self.channel_data[channel]
3232
+
3233
+ # Convert to CuPy array and crop/downsample on GPU
3234
+ current_image_cp = cp.asarray(current_image)
3235
+ display_image_cp = crop_and_downsample_image_gpu(
3236
+ current_image_cp, y_min_padded, y_max_padded,
3237
+ x_min_padded, x_max_padded, downsample_factor)
3238
+
3239
+ if is_rgb and self.channel_data[channel].shape[-1] == 3:
3240
+ # RGB image - convert to RGBA and blend
3241
+ rgb_alpha = cp.ones((*display_image_cp.shape[:2], 4), dtype=cp.float32)
3242
+ rgb_alpha[:, :, :3] = display_image_cp.astype(cp.float32) / 255.0
3243
+ rgb_alpha[:, :, 3] = 0.7 # Same alpha as update_display
3244
+ composite = self.blend_layers_gpu(composite, rgb_alpha)
3245
+
3246
+ elif is_rgb and self.channel_data[channel].shape[-1] == 4:
3247
+ # RGBA image - blend directly
3248
+ rgba_image = display_image_cp.astype(cp.float32) / 255.0
3249
+ composite = self.blend_layers_gpu(composite, rgba_image)
3250
+
3251
+ else:
3252
+ # Regular channel processing (same logic as update_display)
3253
+ if self.min_max[channel][0] == None:
3254
+ self.min_max[channel][0] = cp.asnumpy(cp.min(current_image_cp))
3255
+ if self.min_max[channel][1] == None:
3256
+ self.min_max[channel][1] = cp.asnumpy(cp.max(current_image_cp))
3257
+
3258
+ img_min = self.min_max[channel][0]
3259
+ img_max = self.min_max[channel][1]
3260
+
3261
+ if img_min == img_max:
3262
+ vmin = img_min
3263
+ vmax = img_min + 1
3264
+ else:
3265
+ vmin = img_min + (img_max - img_min) * self.channel_brightness[channel]['min']
3266
+ vmax = img_min + (img_max - img_min) * self.channel_brightness[channel]['max']
3267
+
3268
+ # Normalize the downsampled image on GPU
3269
+ if vmin == vmax:
3270
+ normalized_image = cp.zeros_like(display_image_cp)
3271
+ else:
3272
+ normalized_image = cp.clip((display_image_cp - vmin) / (vmax - vmin), 0, 1)
3273
+
3274
+ # Apply channel color and alpha
3275
+ if channel == 2 and self.machine_window is not None:
3276
+ # Special case for machine window channel 2
3277
+ channel_rgba = self.apply_machine_colormap_gpu(display_image_cp)
3278
+ else:
3279
+ # Regular channel with custom color
3280
+ color = self.base_colors[channel]
3281
+ channel_rgba = cp.zeros((*normalized_image.shape, 4), dtype=cp.float32)
3282
+ channel_rgba[:, :, 0] = normalized_image * color[0] # R
3283
+ channel_rgba[:, :, 1] = normalized_image * color[1] # G
3284
+ channel_rgba[:, :, 2] = normalized_image * color[2] # B
3285
+ channel_rgba[:, :, 3] = normalized_image * 0.7 # A (same alpha as update_display)
3286
+
3287
+ # Blend this channel into the composite
3288
+ composite = self.blend_layers_gpu(composite, channel_rgba)
3289
+
3290
+ # Add highlight overlays if they exist (with downsampling)
3291
+ if self.mini_overlay and self.highlight and self.machine_window is None:
3292
+ mini_overlay_cp = cp.asarray(self.mini_overlay_data)
3293
+ display_overlay = crop_and_downsample_image_gpu(mini_overlay_cp, y_min_padded, y_max_padded, x_min_padded, x_max_padded, downsample_factor)
3294
+ highlight_rgba = self.create_highlight_rgba_gpu(display_overlay, yellow=True)
3295
+ composite = self.blend_layers_gpu(composite, highlight_rgba)
3296
+ elif self.highlight_overlay is not None and self.highlight:
3297
+ highlight_slice = self.highlight_overlay[self.current_slice]
3298
+ highlight_slice_cp = cp.asarray(highlight_slice)
3299
+ display_highlight = crop_and_downsample_image_gpu(highlight_slice_cp, y_min_padded, y_max_padded, x_min_padded, x_max_padded, downsample_factor)
3300
+ if self.machine_window is None:
3301
+ highlight_rgba = self.create_highlight_rgba_gpu(display_highlight, yellow=True)
3302
+ else:
3303
+ highlight_rgba = self.create_highlight_rgba_gpu(display_highlight, yellow=False)
3304
+ composite = self.blend_layers_gpu(composite, highlight_rgba)
3305
+
3306
+ # Convert back to CPU and to 0-255 range for display
3307
+ composite_cpu = cp.asnumpy(composite)
3308
+ return (composite_cpu * 255).astype(np.uint8)
3309
+
3310
+ def apply_machine_colormap_gpu(self, image_cp):
3311
+ """Apply the special machine window colormap for channel 2 - GPU version"""
3312
+ import cupy as cp
3313
+
3314
+ rgba = cp.zeros((*image_cp.shape, 4), dtype=cp.float32)
3315
+
3316
+ # Transparent for 0
3317
+ mask_0 = (image_cp == 0)
3318
+ rgba[mask_0] = cp.array([0, 0, 0, 0])
3319
+
3320
+ # Light green for 1
3321
+ mask_1 = (image_cp == 1)
3322
+ rgba[mask_1] = cp.array([0.5, 1, 0.5, 0.7])
3323
+
3324
+ # Light red for 2
3325
+ mask_2 = (image_cp == 2)
3326
+ rgba[mask_2] = cp.array([1, 0.5, 0.5, 0.7])
3327
+
3328
+ return rgba
3329
+
3330
+ def create_highlight_rgba_gpu(self, highlight_data_cp, yellow=True):
3331
+ """Create RGBA highlight overlay - GPU version"""
3332
+ import cupy as cp
3333
+
3334
+ rgba = cp.zeros((*highlight_data_cp.shape, 4), dtype=cp.float32)
3335
+
3336
+ if yellow:
3337
+ # Yellow highlight
3338
+ mask = highlight_data_cp > 0
3339
+ rgba[mask] = cp.array([1, 1, 0, 0.8]) # Yellow with alpha 0.8
3340
+ else:
3341
+ # Multi-color highlight for machine window
3342
+ mask_1 = (highlight_data_cp == 1)
3343
+ mask_2 = (highlight_data_cp == 2)
3344
+ rgba[mask_1] = cp.array([1, 1, 0, 0.5]) # Yellow for 1
3345
+ rgba[mask_2] = cp.array([0, 0.7, 1, 0.5]) # Blue for 2
3346
+
3347
+ return rgba
3348
+
3349
+ def blend_layers_gpu(self, base_cp, overlay_cp):
3350
+ """Alpha blend two RGBA layers - GPU version"""
3351
+ import cupy as cp
3352
+
3353
+ def resize_overlay_to_base_gpu(overlay_arr_cp, base_arr_cp):
3354
+ base_height, base_width = base_arr_cp.shape[:2]
3355
+ overlay_height, overlay_width = overlay_arr_cp.shape[:2]
3169
3356
 
3170
- # Handle different image types
3357
+ # First crop if overlay is larger
3358
+ cropped_overlay = overlay_arr_cp[:base_height, :base_width]
3359
+
3360
+ # Then pad if still smaller after cropping
3361
+ current_height, current_width = cropped_overlay.shape[:2]
3362
+ pad_height = base_height - current_height
3363
+ pad_width = base_width - current_width
3364
+
3365
+ if pad_height > 0 or pad_width > 0:
3366
+ cropped_overlay = cp.pad(cropped_overlay,
3367
+ ((0, pad_height), (0, pad_width), (0, 0)),
3368
+ mode='constant', constant_values=0)
3369
+
3370
+ return cropped_overlay
3371
+
3372
+ # Resize the ENTIRE overlay array to match base dimensions
3373
+ if overlay_cp.shape[:2] != base_cp.shape[:2]:
3374
+ overlay_cp = resize_overlay_to_base_gpu(overlay_cp, base_cp)
3375
+
3376
+ # Now extract alpha channels (they should be the same size)
3377
+ alpha_overlay = overlay_cp[:, :, 3:4]
3378
+ alpha_base = base_cp[:, :, 3:4]
3379
+
3380
+ # Calculate output alpha
3381
+ alpha_out = alpha_overlay + alpha_base * (1 - alpha_overlay)
3382
+
3383
+ # Calculate output RGB
3384
+ rgb_out = cp.zeros_like(base_cp[:, :, :3])
3385
+ mask = alpha_out[:, :, 0] > 0
3386
+
3387
+ rgb_out[mask] = (overlay_cp[mask, :3] * alpha_overlay[mask] +
3388
+ base_cp[mask, :3] * alpha_base[mask] * (1 - alpha_overlay[mask])) / alpha_out[mask]
3389
+
3390
+ # Combine RGB and alpha
3391
+ result = cp.zeros_like(base_cp)
3392
+ result[:, :, :3] = rgb_out
3393
+ result[:, :, 3:4] = alpha_out
3394
+
3395
+ return result
3396
+
3397
+ def create_composite_for_pan(self):
3398
+ """Create a properly rendered composite image for panning with downsample support"""
3399
+ # Get active channels and dimensions (copied from update_display)
3400
+ active_channels = [i for i in range(4) if self.channel_data[i] is not None]
3401
+ if active_channels:
3402
+ dims = [(self.channel_data[i].shape[1:3] if len(self.channel_data[i].shape) >= 3 else
3403
+ self.channel_data[i].shape) for i in active_channels]
3404
+ min_height = min(d[0] for d in dims)
3405
+ min_width = min(d[1] for d in dims)
3406
+ else:
3407
+ return None
3408
+
3409
+ # Store original dimensions for coordinate mapping
3410
+ self.original_dims = (min_height, min_width)
3411
+
3412
+ # Get current downsample factor
3413
+ current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
3414
+ current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
3415
+
3416
+ # Helper function to crop and downsample image
3417
+ def crop_and_downsample_image(image, y_start, y_end, x_start, x_end, factor):
3418
+ # Crop first
3171
3419
  if len(image.shape) == 2:
3172
- # Grayscale
3173
- return image[::factor, ::factor]
3420
+ cropped = image[y_start:y_end, x_start:x_end]
3174
3421
  elif len(image.shape) == 3:
3175
- # RGB/RGBA
3176
- return image[::factor, ::factor, :]
3422
+ cropped = image[y_start:y_end, x_start:x_end, :]
3423
+ else:
3424
+ cropped = image
3425
+
3426
+ # Then downsample if needed
3427
+ if factor == 1:
3428
+ return cropped
3429
+
3430
+ if len(cropped.shape) == 2:
3431
+ return cropped[::factor, ::factor]
3432
+ elif len(cropped.shape) == 3:
3433
+ return cropped[::factor, ::factor, :]
3177
3434
  else:
3178
- return image
3435
+ return cropped
3436
+
3437
+ min_height, min_width = self.original_dims
3438
+
3439
+ # Calculate the visible region in pixel coordinates
3440
+ x_min = max(0, int(np.floor(current_xlim[0] + 0.5)))
3441
+ x_max = min(min_width, int(np.ceil(current_xlim[1] + 0.5)))
3442
+ y_min = max(0, int(np.floor(current_ylim[1] + 0.5))) # Note: y is flipped
3443
+ y_max = min(min_height, int(np.ceil(current_ylim[0] + 0.5)))
3444
+
3445
+ box_len = int((x_max - x_min)/2)
3446
+ box_height = int((y_max - y_min)/2)
3447
+ x_min = max(0, x_min - box_len)
3448
+ x_max = min(self.shape[2], x_max + box_len)
3449
+ y_min = max(0, y_min - box_height)
3450
+ y_max = min(self.shape[1], y_max + box_height)
3451
+
3452
+ size = (x_max - x_min) * (y_max - y_min)
3453
+ val = max(2, int(np.ceil(size/(2000 * 2000))))
3454
+ self.validate_downsample_input(text = val, update = False)
3455
+
3456
+ downsample_factor = self.downsample_factor
3457
+
3458
+ # Add some padding to avoid edge artifacts during pan/zoom
3459
+ padding = max(10, downsample_factor * 2)
3460
+ x_min_padded = max(0, x_min - padding)
3461
+ x_max_padded = min(min_width, x_max + padding)
3462
+ y_min_padded = max(0, y_min - padding)
3463
+ y_max_padded = min(min_height, y_max + padding)
3464
+
3465
+ display_height = (y_max_padded - y_min_padded) // downsample_factor
3466
+ display_width = (x_max_padded - x_min_padded) // downsample_factor
3179
3467
 
3180
3468
  # Create a blank RGBA composite to accumulate all channels (using display dimensions)
3181
3469
  composite = np.zeros((display_height, display_width, 4), dtype=np.float32)
@@ -3195,8 +3483,10 @@ class ImageViewerWindow(QMainWindow):
3195
3483
  else:
3196
3484
  current_image = self.channel_data[channel]
3197
3485
 
3198
- # Downsample the image for rendering
3199
- display_image = downsample_image(current_image, downsample_factor)
3486
+ # Crop and downsample the image for rendering
3487
+ display_image = crop_and_downsample_image(
3488
+ current_image, y_min_padded, y_max_padded,
3489
+ x_min_padded, x_max_padded, downsample_factor)
3200
3490
 
3201
3491
  if is_rgb and self.channel_data[channel].shape[-1] == 3:
3202
3492
  # RGB image - convert to RGBA and blend
@@ -3251,12 +3541,12 @@ class ImageViewerWindow(QMainWindow):
3251
3541
 
3252
3542
  # Add highlight overlays if they exist (with downsampling)
3253
3543
  if self.mini_overlay and self.highlight and self.machine_window is None:
3254
- display_overlay = downsample_image(self.mini_overlay_data, downsample_factor)
3544
+ display_overlay = crop_and_downsample_image(self.mini_overlay_data, y_min_padded, y_max_padded, x_min_padded, x_max_padded, downsample_factor)
3255
3545
  highlight_rgba = self.create_highlight_rgba(display_overlay, yellow=True)
3256
3546
  composite = self.blend_layers(composite, highlight_rgba)
3257
3547
  elif self.highlight_overlay is not None and self.highlight:
3258
3548
  highlight_slice = self.highlight_overlay[self.current_slice]
3259
- display_highlight = downsample_image(highlight_slice, downsample_factor)
3549
+ display_highlight = crop_and_downsample_image(highlight_slice, y_min_padded, y_max_padded, x_min_padded, x_max_padded, downsample_factor)
3260
3550
  if self.machine_window is None:
3261
3551
  highlight_rgba = self.create_highlight_rgba(display_highlight, yellow=True)
3262
3552
  else:
@@ -3266,7 +3556,6 @@ class ImageViewerWindow(QMainWindow):
3266
3556
  # Convert to 0-255 range for display
3267
3557
  return (composite * 255).astype(np.uint8)
3268
3558
 
3269
-
3270
3559
  def apply_machine_colormap(self, image):
3271
3560
  """Apply the special machine window colormap for channel 2"""
3272
3561
  rgba = np.zeros((*image.shape, 4), dtype=np.float32)
@@ -3368,18 +3657,6 @@ class ImageViewerWindow(QMainWindow):
3368
3657
  height *= downsample_factor
3369
3658
  width *= downsample_factor
3370
3659
 
3371
- def crop_image(image, y_start, y_end, x_start, x_end):
3372
- # Crop
3373
- if len(image.shape) == 2:
3374
- cropped = image[y_start:y_end, x_start:x_end]
3375
- elif len(image.shape) == 3:
3376
- cropped = image[y_start:y_end, x_start:x_end, :]
3377
- else:
3378
- cropped = image
3379
-
3380
- return cropped
3381
-
3382
-
3383
3660
  downsample_factor = self.downsample_factor
3384
3661
  min_height, min_width = self.original_dims
3385
3662
 
@@ -3389,8 +3666,8 @@ class ImageViewerWindow(QMainWindow):
3389
3666
  y_min = max(0, int(np.floor(current_ylim[1] + 0.5))) # Note: y is flipped
3390
3667
  y_max = min(min_height, int(np.ceil(current_ylim[0] + 0.5)))
3391
3668
 
3392
- box_len = x_max - x_min
3393
- box_height = y_max - y_min
3669
+ box_len = int((x_max - x_min)/2)
3670
+ box_height = int((y_max - y_min)/2)
3394
3671
  x_min = max(0, x_min - box_len)
3395
3672
  x_max = min(self.shape[2], x_max + box_len)
3396
3673
  y_min = max(0, y_min - box_height)
@@ -3418,11 +3695,6 @@ class ImageViewerWindow(QMainWindow):
3418
3695
  x_max_padded_ds = min(self.pan_background_image.shape[1], x_max_ds + padding_ds)
3419
3696
  y_min_padded_ds = max(0, y_min_ds - padding_ds)
3420
3697
  y_max_padded_ds = min(self.pan_background_image.shape[0], y_max_ds + padding_ds)
3421
-
3422
- # Crop using downsampled coordinates
3423
- display_image = crop_image(
3424
- self.pan_background_image, y_min_padded_ds, y_max_padded_ds,
3425
- x_min_padded_ds, x_max_padded_ds)
3426
3698
 
3427
3699
  # Calculate the extent for the cropped region (in original coordinates)
3428
3700
  crop_extent = (x_min_padded - 0.5, x_max_padded - 0.5,
@@ -3430,7 +3702,7 @@ class ImageViewerWindow(QMainWindow):
3430
3702
 
3431
3703
  # Display the composite background with preserved zoom
3432
3704
  # Use extent to stretch downsampled image back to original coordinate space
3433
- self.ax.imshow(display_image,
3705
+ self.ax.imshow(self.pan_background_image,
3434
3706
  extent=crop_extent,
3435
3707
  aspect='equal')
3436
3708
 
@@ -3490,11 +3762,17 @@ class ImageViewerWindow(QMainWindow):
3490
3762
  # Update display to show only background
3491
3763
  current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
3492
3764
  current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
3493
- # Update display to show only background
3494
- if self.pan_background_image is not None:
3765
+ #self.update_display(preserve_zoom = (current_xlim, current_ylim))
3766
+ self.setEnabled(False)
3767
+
3768
+ try:
3769
+ self.create_pan_background()
3770
+ current_xlim = self.ax.get_xlim()
3771
+ current_ylim = self.ax.get_ylim()
3495
3772
  self.update_display_pan_mode(current_xlim, current_ylim)
3496
- else:
3497
- self.update_display(preserve_zoom=(current_xlim, current_ylim))
3773
+ finally:
3774
+ # Re-enable the widget when done
3775
+ self.setEnabled(True)
3498
3776
 
3499
3777
  self.panning = False
3500
3778
  self.pan_start = None
@@ -3561,8 +3839,9 @@ class ImageViewerWindow(QMainWindow):
3561
3839
  self.create_highlight_overlay(node_indices=self.clicked_values['nodes'])
3562
3840
 
3563
3841
  # Try to highlight the last selected value in tables
3564
- if self.clicked_values['nodes']:
3842
+ if len(self.clicked_values['nodes']) == 1:
3565
3843
  self.highlight_value_in_tables(self.clicked_values['nodes'][-1])
3844
+ self.handle_info('node')
3566
3845
 
3567
3846
  elif self.active_channel == 1: # Edges
3568
3847
  if not ctrl_pressed:
@@ -3577,8 +3856,9 @@ class ImageViewerWindow(QMainWindow):
3577
3856
  self.create_highlight_overlay(edge_indices=self.clicked_values['edges'])
3578
3857
 
3579
3858
  # Try to highlight the last selected value in tables
3580
- if self.clicked_values['edges']:
3859
+ if len(self.clicked_values['edges']):
3581
3860
  self.highlight_value_in_tables(self.clicked_values['edges'][-1])
3861
+ self.handle_info('edge')
3582
3862
 
3583
3863
  elif not self.selecting and self.selection_start: # If we had a click but never started selection
3584
3864
  # Handle as a normal click
@@ -4080,7 +4360,7 @@ class ImageViewerWindow(QMainWindow):
4080
4360
  gennodes_action.triggered.connect(self.show_gennodes_dialog)
4081
4361
  branch_action = generate_menu.addAction("Label Branches")
4082
4362
  branch_action.triggered.connect(lambda: self.show_branch_dialog())
4083
- genvor_action = generate_menu.addAction("Generate Voronoi Diagram (From Node Centroids) - goes in Overlay2")
4363
+ genvor_action = generate_menu.addAction("Generate Voronoi Diagram - goes in Overlay2")
4084
4364
  genvor_action.triggered.connect(self.voronoi)
4085
4365
 
4086
4366
  modify_action = process_menu.addAction("Modify Network")
@@ -4102,8 +4382,6 @@ class ImageViewerWindow(QMainWindow):
4102
4382
  idoverlay_action.triggered.connect(self.show_idoverlay_dialog)
4103
4383
  coloroverlay_action = overlay_menu.addAction("Color Nodes (or Edges)")
4104
4384
  coloroverlay_action.triggered.connect(self.show_coloroverlay_dialog)
4105
- #searchoverlay_action = overlay_menu.addAction("Show Search Regions")
4106
- #searchoverlay_action.triggered.connect(self.show_search_dialog)
4107
4385
  shuffle_action = overlay_menu.addAction("Shuffle")
4108
4386
  shuffle_action.triggered.connect(self.show_shuffle_dialog)
4109
4387
  arbitrary_action = image_menu.addAction("Select Objects")
@@ -4686,18 +4964,7 @@ class ImageViewerWindow(QMainWindow):
4686
4964
 
4687
4965
  try:
4688
4966
 
4689
- if my_network.nodes is not None:
4690
- shape = my_network.nodes.shape
4691
- else:
4692
- shape = None
4693
-
4694
- if my_network.node_centroids is None:
4695
- self.show_centroid_dialog()
4696
- if my_network.node_centroids is None:
4697
- print("Node centroids must be set")
4698
- return
4699
-
4700
- array = pxt.create_voronoi_3d_kdtree(my_network.node_centroids, shape)
4967
+ array = sdl.smart_dilate(self.channel_data[self.active_channel], dilate_xy = np.max(self.shape), dilate_z = np.max(self.shape), use_dt_dil_amount = np.max(self.shape), fast_dil = False)
4701
4968
  self.load_channel(3, array, True)
4702
4969
 
4703
4970
  except Exception as e:
@@ -4753,10 +5020,6 @@ class ImageViewerWindow(QMainWindow):
4753
5020
  dialog = ColorOverlayDialog(self)
4754
5021
  dialog.exec()
4755
5022
 
4756
- def show_search_dialog(self):
4757
- """Show the search dialog"""
4758
- dialog = SearchOverlayDialog(self)
4759
- dialog.exec()
4760
5023
 
4761
5024
  def show_shuffle_dialog(self):
4762
5025
  """Show the shuffle dialog"""
@@ -5448,12 +5711,8 @@ class ImageViewerWindow(QMainWindow):
5448
5711
  if all(not btn.isEnabled() for btn in self.channel_buttons[:channel_index]):
5449
5712
  self.set_active_channel(channel_index)
5450
5713
 
5451
- if self.chan_load:
5452
- if not self.channel_buttons[channel_index].isChecked():
5453
- self.channel_buttons[channel_index].click()
5454
- else:
5455
- if self.channel_buttons[channel_index].isChecked():
5456
- self.channel_buttons[channel_index].click()
5714
+ if not self.channel_buttons[channel_index].isChecked():
5715
+ self.channel_buttons[channel_index].click()
5457
5716
 
5458
5717
  self.min_max[channel_index][0] = np.min(self.channel_data[channel_index])
5459
5718
  self.min_max[channel_index][1] = np.max(self.channel_data[channel_index])
@@ -5559,7 +5818,7 @@ class ImageViewerWindow(QMainWindow):
5559
5818
 
5560
5819
  if update:
5561
5820
  # Update display
5562
- self.update_display()
5821
+ self.update_display(preserve_zoom = (self.ax.get_xlim(), self.ax.get_ylim()))
5563
5822
 
5564
5823
  def reset(self, nodes = False, network = False, xy_scale = 1, z_scale = 1, edges = False, search_region = False, network_overlay = False, id_overlay = False, update = True):
5565
5824
  """Method to flexibly reset certain fields to free up the RAM as desired"""
@@ -5691,6 +5950,9 @@ class ImageViewerWindow(QMainWindow):
5691
5950
  def toggle_channel(self, channel_index):
5692
5951
  """Toggle visibility of a channel."""
5693
5952
  # Store current zoom settings before toggling
5953
+ if self.pan_mode:
5954
+ self.pan_button.click()
5955
+
5694
5956
  current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
5695
5957
  current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
5696
5958
 
@@ -5763,9 +6025,9 @@ class ImageViewerWindow(QMainWindow):
5763
6025
  self.measurement_artists = []
5764
6026
  self.axes_initialized = False
5765
6027
  self.original_dims = None
5766
-
6028
+
5767
6029
  # Handle special states (pan, static background)
5768
- if self.pan_background_image is not None:
6030
+ if self.pan_background_image is not None and not self.pan_mode:
5769
6031
  self.channel_visible = self.pre_pan_channel_state.copy()
5770
6032
  self.is_pan_preview = False
5771
6033
  self.pan_background_image = None
@@ -5774,7 +6036,6 @@ class ImageViewerWindow(QMainWindow):
5774
6036
  self.resume = False
5775
6037
  if self.prev_down != self.downsample_factor:
5776
6038
  self.validate_downsample_input(text = self.prev_down)
5777
- return
5778
6039
 
5779
6040
  if self.static_background is not None:
5780
6041
  # Your existing virtual strokes conversion logic
@@ -7239,7 +7500,7 @@ class ColorDialog(QDialog):
7239
7500
  self.parent().base_colors[i] = new_color
7240
7501
 
7241
7502
  # Update the display
7242
- self.parent().update_display()
7503
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
7243
7504
  self.accept()
7244
7505
 
7245
7506
  class ArbitraryDialog(QDialog):
@@ -7643,7 +7904,7 @@ class NetOverlayDialog(QDialog):
7643
7904
 
7644
7905
  my_network.network_overlay = my_network.draw_network()
7645
7906
 
7646
- self.parent().load_channel(2, channel_data = my_network.network_overlay, data = True)
7907
+ self.parent().load_channel(2, channel_data = my_network.network_overlay, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
7647
7908
 
7648
7909
  self.accept()
7649
7910
 
@@ -7651,34 +7912,6 @@ class NetOverlayDialog(QDialog):
7651
7912
 
7652
7913
  print(f"Error with Overlay Generation: {e}")
7653
7914
 
7654
- class SearchOverlayDialog(QDialog):
7655
-
7656
- def __init__(self, parent=None):
7657
-
7658
- super().__init__(parent)
7659
- self.setWindowTitle("Generate Search Region Overlay?")
7660
- self.setModal(True)
7661
-
7662
- layout = QFormLayout(self)
7663
-
7664
- # Add Run button
7665
- run_button = QPushButton("Generate (Will go to Overlay 2)")
7666
- run_button.clicked.connect(self.searchoverlay)
7667
- layout.addWidget(run_button)
7668
-
7669
- def searchoverlay(self):
7670
-
7671
- try:
7672
-
7673
- my_network.id_overlay = my_network.search_region
7674
-
7675
- self.parent().load_channel(3, channel_data = my_network.search_region, data = True)
7676
-
7677
- self.accept()
7678
-
7679
- except Exception as e:
7680
-
7681
- print(f"Error with Overlay Generation: {e}")
7682
7915
 
7683
7916
  class IdOverlayDialog(QDialog):
7684
7917
 
@@ -7732,7 +7965,7 @@ class IdOverlayDialog(QDialog):
7732
7965
  my_network.id_overlay = my_network.draw_edge_indices()
7733
7966
 
7734
7967
 
7735
- self.parent().load_channel(3, channel_data = my_network.id_overlay, data = True)
7968
+ self.parent().load_channel(3, channel_data = my_network.id_overlay, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
7736
7969
 
7737
7970
  self.accept()
7738
7971
 
@@ -7781,7 +8014,7 @@ class ColorOverlayDialog(QDialog):
7781
8014
  self.parent().format_for_upperright_table(legend, f'{self.sort} Id', f'Encoding Val: {self.sort}', 'Legend')
7782
8015
 
7783
8016
 
7784
- self.parent().load_channel(3, channel_data = result, data = True)
8017
+ self.parent().load_channel(3, channel_data = result, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
7785
8018
 
7786
8019
  self.accept()
7787
8020
 
@@ -7849,7 +8082,7 @@ class ShuffleDialog(QDialog):
7849
8082
  except:
7850
8083
  self.parent().highlight_overay = None
7851
8084
  else:
7852
- self.parent().load_channel(accepted_mode, channel_data = target_data, data = True)
8085
+ self.parent().load_channel(accepted_mode, channel_data = target_data, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
7853
8086
  except:
7854
8087
  pass
7855
8088
 
@@ -7861,14 +8094,14 @@ class ShuffleDialog(QDialog):
7861
8094
  except:
7862
8095
  self.parent().highlight_overlay = None
7863
8096
  else:
7864
- self.parent().load_channel(accepted_target, channel_data = active_data, data = True)
8097
+ self.parent().load_channel(accepted_target, channel_data = active_data, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
7865
8098
  except:
7866
8099
  pass
7867
8100
 
7868
8101
 
7869
8102
 
7870
8103
 
7871
- self.parent().update_display()
8104
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
7872
8105
 
7873
8106
  self.accept()
7874
8107
 
@@ -8476,10 +8709,10 @@ class NearNeighDialog(QDialog):
8476
8709
  avg, output, quant_overlay, pred = my_network.nearest_neighbors_avg(root, targ, my_network.xy_scale, my_network.z_scale, num = num, heatmap = heatmap, threed = threed, quant = quant, centroids = centroids)
8477
8710
  else:
8478
8711
  avg, output, overlay, quant_overlay, pred = my_network.nearest_neighbors_avg(root, targ, my_network.xy_scale, my_network.z_scale, num = num, heatmap = heatmap, threed = threed, numpy = True, quant = quant, centroids = centroids)
8479
- self.parent().load_channel(3, overlay, data = True)
8712
+ self.parent().load_channel(3, overlay, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
8480
8713
 
8481
8714
  if quant_overlay is not None:
8482
- self.parent().load_channel(2, quant_overlay, data = True)
8715
+ self.parent().load_channel(2, quant_overlay, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
8483
8716
 
8484
8717
  avg = {header:avg}
8485
8718
 
@@ -8595,6 +8828,8 @@ class NeighborIdentityDialog(QDialog):
8595
8828
 
8596
8829
  self.accept()
8597
8830
  except Exception as e:
8831
+ import traceback
8832
+ print(traceback.format_exc())
8598
8833
  print(f"Error: {e}")
8599
8834
 
8600
8835
 
@@ -8826,7 +9061,7 @@ class HeatmapDialog(QDialog):
8826
9061
  else:
8827
9062
 
8828
9063
  heat_dict, overlay = my_network.community_heatmap(num_nodes = nodecount, is3d = is3d, numpy = True)
8829
- self.parent().load_channel(3, overlay, data = True)
9064
+ self.parent().load_channel(3, overlay, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
8830
9065
 
8831
9066
 
8832
9067
  self.parent().format_for_upperright_table(heat_dict, metric='Community', value='ln(Predicted Community Nodecount/Actual)', title="Community Heatmap")
@@ -9183,7 +9418,7 @@ class DegreeDialog(QDialog):
9183
9418
  nodes = n3d.upsample_with_padding(nodes, down_factor, original_shape)
9184
9419
 
9185
9420
  if accepted_mode > 0:
9186
- self.parent().load_channel(3, channel_data = nodes, data = True)
9421
+ self.parent().load_channel(3, channel_data = nodes, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
9187
9422
 
9188
9423
 
9189
9424
  self.accept()
@@ -9246,7 +9481,7 @@ class HubDialog(QDialog):
9246
9481
 
9247
9482
  if img is not None:
9248
9483
 
9249
- self.parent().load_channel(3, channel_data = img, data = True)
9484
+ self.parent().load_channel(3, channel_data = img, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
9250
9485
 
9251
9486
 
9252
9487
  self.accept()
@@ -9305,7 +9540,7 @@ class MotherDialog(QDialog):
9305
9540
  G = my_network.isolate_mothers(self, ret_nodes = True, called = True)
9306
9541
  else:
9307
9542
  G, result = my_network.isolate_mothers(self, ret_nodes = False, called = True)
9308
- self.parent().load_channel(2, channel_data = result, data = True)
9543
+ self.parent().load_channel(2, channel_data = result, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
9309
9544
 
9310
9545
  degree_dict = {}
9311
9546
 
@@ -9380,8 +9615,7 @@ class CodeDialog(QDialog):
9380
9615
 
9381
9616
  self.parent().format_for_upperright_table(output, f'{self.sort} Id', f'Encoding Val: {self.sort}', 'Legend')
9382
9617
 
9383
-
9384
- self.parent().load_channel(3, image, True)
9618
+ self.parent().load_channel(3, image, True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
9385
9619
  self.accept()
9386
9620
 
9387
9621
  except Exception as e:
@@ -9794,7 +10028,7 @@ class BinarizeDialog(QDialog):
9794
10028
  # Update the corresponding property in my_network
9795
10029
  setattr(my_network, network_properties[self.parent().active_channel], result)
9796
10030
 
9797
- self.parent().update_display()
10031
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
9798
10032
  self.accept()
9799
10033
 
9800
10034
  except Exception as e:
@@ -9846,7 +10080,7 @@ class LabelDialog(QDialog):
9846
10080
  # Update the corresponding property in my_network
9847
10081
  setattr(my_network, network_properties[self.parent().active_channel], result)
9848
10082
 
9849
- self.parent().update_display()
10083
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
9850
10084
  self.accept()
9851
10085
 
9852
10086
  except Exception as e:
@@ -9929,7 +10163,7 @@ class SLabelDialog(QDialog):
9929
10163
 
9930
10164
  binary_array = binary_array * label_array
9931
10165
 
9932
- self.parent().load_channel(accepted_target, binary_array, True)
10166
+ self.parent().load_channel(accepted_target, binary_array, True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
9933
10167
 
9934
10168
  self.accept()
9935
10169
 
@@ -10613,6 +10847,10 @@ class MachineWindow(QMainWindow):
10613
10847
 
10614
10848
  def start_segmentation(self):
10615
10849
 
10850
+ if self.parent().pan_mode:
10851
+ self.parent().pan_button.click()
10852
+
10853
+
10616
10854
  self.parent().static_background = None
10617
10855
 
10618
10856
  self.kill_segmentation()
@@ -11336,7 +11574,7 @@ class SmartDilateDialog(QDialog):
11336
11574
 
11337
11575
  result = sdl.smart_dilate(active_data, dilate_xy, dilate_z, GPU = GPU, predownsample = down_factor, fast_dil = predt, use_dt_dil_amount = amount, xy_scale = xy_scale, z_scale = z_scale)
11338
11576
 
11339
- self.parent().load_channel(self.parent().active_channel, result, True)
11577
+ self.parent().load_channel(self.parent().active_channel, result, True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
11340
11578
  self.accept()
11341
11579
 
11342
11580
 
@@ -11477,7 +11715,7 @@ class ErodeDialog(QDialog):
11477
11715
 
11478
11716
  # Add mode selection dropdown
11479
11717
  self.mode_selector = QComboBox()
11480
- self.mode_selector.addItems(["Pseudo3D Binary Kernels (For Fast, small erosions)", "Distance Transform-Based (Slower but more accurate at larger dilations)"])
11718
+ self.mode_selector.addItems(["Pseudo3D Binary Kernels (For Fast, small erosions)", "Distance Transform-Based (Slower but more accurate at larger dilations)", "Preserve Labels (Slower)"])
11481
11719
  self.mode_selector.setCurrentIndex(0) # Default to Mode 1
11482
11720
  layout.addRow("Execution Mode:", self.mode_selector)
11483
11721
 
@@ -11513,6 +11751,12 @@ class ErodeDialog(QDialog):
11513
11751
  z_scale = 1
11514
11752
 
11515
11753
  mode = self.mode_selector.currentIndex()
11754
+
11755
+ if mode == 2:
11756
+ mode = 1
11757
+ preserve_labels = True
11758
+ else:
11759
+ preserve_labels = False
11516
11760
 
11517
11761
  # Get the active channel data from parent
11518
11762
  active_data = self.parent().channel_data[self.parent().active_channel]
@@ -11525,14 +11769,13 @@ class ErodeDialog(QDialog):
11525
11769
  amount,
11526
11770
  xy_scale = xy_scale,
11527
11771
  z_scale = z_scale,
11528
- mode = mode
11772
+ mode = mode,
11773
+ preserve_labels = preserve_labels
11529
11774
  )
11530
11775
 
11531
11776
 
11532
- self.parent().load_channel(self.parent().active_channel, result, True)
11533
-
11777
+ self.parent().load_channel(self.parent().active_channel, result, True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
11534
11778
 
11535
- self.parent().update_display(preserve_zoom=(current_xlim, current_ylim))
11536
11779
  self.accept()
11537
11780
 
11538
11781
  except Exception as e:
@@ -11598,7 +11841,7 @@ class HoleDialog(QDialog):
11598
11841
  self.parent().load_channel(3, active_data - result, True)
11599
11842
 
11600
11843
 
11601
- self.parent().update_display()
11844
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
11602
11845
  self.accept()
11603
11846
 
11604
11847
  except Exception as e:
@@ -11676,9 +11919,9 @@ class MaskDialog(QDialog):
11676
11919
 
11677
11920
 
11678
11921
  # Update both the display data and the network object
11679
- self.parent().load_channel(output_target, channel_data = result, data = True)
11922
+ self.parent().load_channel(output_target, channel_data = result, data = True,)
11680
11923
 
11681
- self.parent().update_display()
11924
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
11682
11925
 
11683
11926
  self.accept()
11684
11927
 
@@ -11911,7 +12154,7 @@ class TypeDialog(QDialog):
11911
12154
 
11912
12155
  active_data = active_data.astype(np.float64)
11913
12156
 
11914
- self.parent().load_channel(self.active_chan, active_data, True)
12157
+ self.parent().load_channel(self.active_chan, active_data, True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
11915
12158
 
11916
12159
 
11917
12160
  print(f"Channel {self.active_chan}) dtype now: {self.parent().channel_data[self.active_chan].dtype}")
@@ -11972,6 +12215,8 @@ class SkeletonizeDialog(QDialog):
11972
12215
 
11973
12216
  if remove > 0:
11974
12217
  result = n3d.remove_branches_new(result, remove)
12218
+ result = n3d.dilate_3D(result, 3, 3, 3)
12219
+ result = n3d.skeletonize(result)
11975
12220
 
11976
12221
 
11977
12222
  # Update both the display data and the network object
@@ -11981,7 +12226,7 @@ class SkeletonizeDialog(QDialog):
11981
12226
  # Update the corresponding property in my_network
11982
12227
  setattr(my_network, network_properties[self.parent().active_channel], result)
11983
12228
 
11984
- self.parent().update_display()
12229
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
11985
12230
  self.accept()
11986
12231
 
11987
12232
  except Exception as e:
@@ -12012,7 +12257,7 @@ class DistanceDialog(QDialog):
12012
12257
 
12013
12258
  data = sdl.compute_distance_transform_distance(data, sampling = [my_network.z_scale, my_network.xy_scale, my_network.xy_scale])
12014
12259
 
12015
- self.parent().load_channel(self.parent().active_channel, data, data = True)
12260
+ self.parent().load_channel(self.parent().active_channel, data, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
12016
12261
 
12017
12262
  except Exception as e:
12018
12263
 
@@ -12050,7 +12295,7 @@ class GrayWaterDialog(QDialog):
12050
12295
 
12051
12296
  data = n3d.gray_watershed(data, min_peak_distance, min_intensity)
12052
12297
 
12053
- self.parent().load_channel(self.parent().active_channel, data, data = True)
12298
+ self.parent().load_channel(self.parent().active_channel, data, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
12054
12299
 
12055
12300
  self.accept()
12056
12301
 
@@ -12175,7 +12420,7 @@ class WatershedDialog(QDialog):
12175
12420
  # Update the corresponding property in my_network
12176
12421
  setattr(my_network, network_properties[self.parent().active_channel], result)
12177
12422
 
12178
- self.parent().update_display()
12423
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
12179
12424
  self.accept()
12180
12425
 
12181
12426
  except Exception as e:
@@ -12228,7 +12473,7 @@ class InvertDialog(QDialog):
12228
12473
  # Update the corresponding property in my_network
12229
12474
  setattr(my_network, network_properties[self.parent().active_channel], result)
12230
12475
 
12231
- self.parent().update_display()
12476
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
12232
12477
  self.accept()
12233
12478
 
12234
12479
  except Exception as e:
@@ -12272,7 +12517,7 @@ class ZDialog(QDialog):
12272
12517
  for i in range(len(self.parent().channel_data)):
12273
12518
  try:
12274
12519
  self.parent().channel_data[i] = n3d.z_project(self.parent().channel_data[i], mode)
12275
- self.parent().load_channel(i, self.parent().channel_data[i], True)
12520
+ self.parent().load_channel(i, self.parent().channel_data[i], True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
12276
12521
  except:
12277
12522
  pass
12278
12523
 
@@ -12775,7 +13020,7 @@ class BranchDialog(QDialog):
12775
13020
 
12776
13021
  self.parent().load_channel(1, channel_data = output, data = True)
12777
13022
 
12778
- self.parent().update_display()
13023
+ self.parent().update_display(preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
12779
13024
  self.accept()
12780
13025
 
12781
13026
  except Exception as e:
@@ -13155,6 +13400,11 @@ class CentroidDialog(QDialog):
13155
13400
  self.mode_selector.setCurrentIndex(0) # Default to Mode 1
13156
13401
  layout.addRow("Execution Mode:", self.mode_selector)
13157
13402
 
13403
+ self.ignore_empty = QPushButton("Skip ID-less?")
13404
+ self.ignore_empty.setCheckable(True)
13405
+ self.ignore_empty.setChecked(False)
13406
+ layout.addRow("Skip Node Centroids Without Identity Property?:", self.ignore_empty)
13407
+
13158
13408
  # Add Run button
13159
13409
  run_button = QPushButton("Run Calculate Centroids")
13160
13410
  run_button.clicked.connect(self.run_centroids)
@@ -13167,6 +13417,7 @@ class CentroidDialog(QDialog):
13167
13417
  print("Calculating centroids...")
13168
13418
 
13169
13419
  chan = self.mode_selector.currentIndex()
13420
+ ignore_empty = self.ignore_empty.isChecked()
13170
13421
 
13171
13422
  # Get directory (None if empty)
13172
13423
  directory = self.directory.text() if self.directory.text() else None
@@ -13227,6 +13478,12 @@ class CentroidDialog(QDialog):
13227
13478
  except Exception as e:
13228
13479
  print(f"Error loading edge centroid table: {e}")
13229
13480
 
13481
+ if ignore_empty:
13482
+ try:
13483
+ my_network.remove_ids()
13484
+ self.parent().format_for_upperright_table(my_network.node_centroids, 'NodeID', ['Z', 'Y', 'X'], 'Node Centroids')
13485
+ except:
13486
+ pass
13230
13487
 
13231
13488
  self.parent().update_display()
13232
13489
  self.accept()