nettracer3d 0.7.2__py3-none-any.whl → 0.7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,611 @@
1
+ import numpy as np
2
+ #try:
3
+ #import torch
4
+ #except:
5
+ #pass
6
+ import cupy as cp
7
+ import cupyx.scipy.ndimage as cpx
8
+ #try:
9
+ #from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
10
+ #except:
11
+ #pass
12
+ import concurrent.futures
13
+ from concurrent.futures import ThreadPoolExecutor
14
+ import threading
15
+ from scipy import ndimage
16
+ import multiprocessing
17
+ from sklearn.ensemble import RandomForestClassifier
18
+ from collections import defaultdict
19
+
20
+ class InteractiveSegmenter:
21
+ def __init__(self, image_3d):
22
+ image_3d = cp.asarray(image_3d)
23
+ self.image_3d = image_3d
24
+ self.patterns = []
25
+
26
+ self.model = RandomForestClassifier(
27
+ n_estimators=100,
28
+ n_jobs=-1,
29
+ max_depth=None
30
+ )
31
+
32
+ self.feature_cache = None
33
+ self.lock = threading.Lock()
34
+ self._currently_segmenting = None
35
+
36
+ # Current position attributes
37
+ self.current_z = None
38
+ self.current_x = None
39
+ self.current_y = None
40
+
41
+ self.realtimechunks = None
42
+ self.current_speed = False
43
+
44
+ # Tracking if we're using 2d or 3d segs
45
+ self.use_two = False
46
+ self.two_slices = []
47
+ self.speed = True
48
+ self.cur_gpu = False
49
+ self.map_slice = None
50
+ self.prev_z = None
51
+ self.previewing = False
52
+
53
+ # flags to track state
54
+ self._currently_processing = False
55
+ self._skip_next_update = False
56
+ self._last_processed_slice = None
57
+ self.mem_lock = False
58
+
59
+ #Adjustable feature map params:
60
+ self.alphas = [1,2,4,8]
61
+ self.windows = 10
62
+ self.dogs = [(1, 2), (2, 4), (4, 8)]
63
+ self.master_chunk = 49
64
+
65
+ def process_chunk(self, chunk_coords):
66
+ """Process a chunk staying in CuPy as much as possible"""
67
+
68
+ foreground_coords = [] # Keep as list of CuPy coordinates
69
+ background_coords = []
70
+
71
+ if self.realtimechunks is None:
72
+ z_min, z_max = chunk_coords[0], chunk_coords[1]
73
+ y_min, y_max = chunk_coords[2], chunk_coords[3]
74
+ x_min, x_max = chunk_coords[4], chunk_coords[5]
75
+
76
+ # Create meshgrid using CuPy - already good
77
+ z_range = cp.arange(z_min, z_max)
78
+ y_range = cp.arange(y_min, y_max)
79
+ x_range = cp.arange(x_min, x_max)
80
+
81
+ # More efficient way to create coordinates
82
+ chunk_coords_array = cp.stack(cp.meshgrid(
83
+ z_range, y_range, x_range, indexing='ij'
84
+ )).reshape(3, -1).T
85
+
86
+ # Keep as CuPy array instead of converting to list
87
+ chunk_coords_gpu = chunk_coords_array
88
+ else:
89
+ # Convert list to CuPy array once
90
+ chunk_coords_gpu = cp.array(chunk_coords)
91
+ z_coords = chunk_coords_gpu[:, 0]
92
+ y_coords = chunk_coords_gpu[:, 1]
93
+ x_coords = chunk_coords_gpu[:, 2]
94
+
95
+ z_min, z_max = cp.min(z_coords).item(), cp.max(z_coords).item()
96
+ y_min, y_max = cp.min(y_coords).item(), cp.max(y_coords).item()
97
+ x_min, x_max = cp.min(x_coords).item(), cp.max(x_coords).item()
98
+
99
+ # Extract subarray - already good
100
+ subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
101
+
102
+ # Compute features
103
+ if self.speed:
104
+ feature_map = self.compute_feature_maps_gpu(subarray)
105
+ else:
106
+ feature_map = self.compute_deep_feature_maps_gpu(subarray)
107
+
108
+ # Extract features more efficiently
109
+ local_coords = chunk_coords_gpu.copy()
110
+ local_coords[:, 0] -= z_min
111
+ local_coords[:, 1] -= y_min
112
+ local_coords[:, 2] -= x_min
113
+
114
+ # Vectorized feature extraction
115
+ features_gpu = feature_map[local_coords[:, 0], local_coords[:, 1], local_coords[:, 2]]
116
+
117
+ features_cpu = cp.asnumpy(features_gpu)
118
+ predictions = self.model.predict(features_cpu)
119
+
120
+ # Keep coordinates as CuPy arrays
121
+ pred_mask = cp.array(predictions, dtype=bool)
122
+ foreground_coords = chunk_coords_gpu[pred_mask]
123
+ background_coords = chunk_coords_gpu[~pred_mask]
124
+
125
+ return foreground_coords, background_coords
126
+
127
+ def compute_feature_maps_gpu(self, image_3d=None):
128
+ """Compute feature maps using GPU with CuPy"""
129
+ import cupy as cp
130
+ import cupyx.scipy.ndimage as cupy_ndimage
131
+
132
+ features = []
133
+ if image_3d is None:
134
+ image_3d = self.image_3d # Assuming this is already a cupy array
135
+
136
+ original_shape = image_3d.shape
137
+
138
+ # Gaussian smoothing at different scales
139
+ for sigma in self.alphas:
140
+ smooth = cupy_ndimage.gaussian_filter(image_3d, sigma)
141
+ features.append(smooth)
142
+
143
+ # Difference of Gaussians
144
+ for (s1, s2) in self.dogs:
145
+ g1 = cupy_ndimage.gaussian_filter(image_3d, s1)
146
+ g2 = cupy_ndimage.gaussian_filter(image_3d, s2)
147
+ dog = g1 - g2
148
+ features.append(dog)
149
+
150
+ # Gradient computations using cupyx
151
+ gx = cupy_ndimage.sobel(image_3d, axis=2, mode='reflect') # x direction
152
+ gy = cupy_ndimage.sobel(image_3d, axis=1, mode='reflect') # y direction
153
+ gz = cupy_ndimage.sobel(image_3d, axis=0, mode='reflect') # z direction
154
+
155
+ # Gradient magnitude
156
+ gradient_magnitude = cp.sqrt(gx**2 + gy**2 + gz**2)
157
+ features.append(gradient_magnitude)
158
+
159
+ # Verify shapes
160
+ for i, feat in enumerate(features):
161
+ if feat.shape != original_shape:
162
+ feat_adjusted = cp.expand_dims(feat, axis=0)
163
+ if feat_adjusted.shape != original_shape:
164
+ raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
165
+ features[i] = feat_adjusted
166
+
167
+ return cp.stack(features, axis=-1)
168
+
169
+ def compute_deep_feature_maps_gpu(self, image_3d=None):
170
+ """Compute feature maps using GPU"""
171
+ import cupy as cp
172
+ import cupyx.scipy.ndimage as cupy_ndimage
173
+
174
+ features = []
175
+ if image_3d is None:
176
+ image_3d = self.image_3d # Assuming this is already a cupy array
177
+ original_shape = image_3d.shape
178
+
179
+ # Gaussian and DoG using cupyx
180
+ for sigma in self.alphas:
181
+ smooth = cupy_ndimage.gaussian_filter(image_3d, sigma)
182
+ features.append(smooth)
183
+
184
+ # Difference of Gaussians
185
+ for (s1, s2) in self.dogs:
186
+ g1 = cupy_ndimage.gaussian_filter(image_3d, s1)
187
+ g2 = cupy_ndimage.gaussian_filter(image_3d, s2)
188
+ dog = g1 - g2
189
+ features.append(dog)
190
+
191
+ # Local statistics using cupyx's convolve
192
+ window_size = self.windows
193
+ kernel = cp.ones((window_size, window_size, window_size)) / (window_size**3)
194
+
195
+ # Local mean
196
+ local_mean = cupy_ndimage.convolve(image_3d, kernel, mode='reflect')
197
+ features.append(local_mean)
198
+
199
+ # Local variance
200
+ mean = cp.mean(image_3d)
201
+ local_var = cupy_ndimage.convolve((image_3d - mean)**2, kernel, mode='reflect')
202
+ features.append(local_var)
203
+
204
+ # Gradient computations using cupyx
205
+ gx = cupy_ndimage.sobel(image_3d, axis=2, mode='reflect')
206
+ gy = cupy_ndimage.sobel(image_3d, axis=1, mode='reflect')
207
+ gz = cupy_ndimage.sobel(image_3d, axis=0, mode='reflect')
208
+
209
+ # Gradient magnitude
210
+ gradient_magnitude = cp.sqrt(gx**2 + gy**2 + gz**2)
211
+ features.append(gradient_magnitude)
212
+
213
+ # Second-order gradients
214
+ gxx = cupy_ndimage.sobel(gx, axis=2, mode='reflect')
215
+ gyy = cupy_ndimage.sobel(gy, axis=1, mode='reflect')
216
+ gzz = cupy_ndimage.sobel(gz, axis=0, mode='reflect')
217
+
218
+ # Laplacian (sum of second derivatives)
219
+ laplacian = gxx + gyy + gzz
220
+ features.append(laplacian)
221
+
222
+ # Hessian determinant
223
+ hessian_det = gxx * gyy * gzz
224
+ features.append(hessian_det)
225
+
226
+ # Verify shapes
227
+ for i, feat in enumerate(features):
228
+ if feat.shape != original_shape:
229
+ feat_adjusted = cp.expand_dims(feat, axis=0)
230
+ if feat_adjusted.shape != original_shape:
231
+ raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
232
+ features[i] = feat_adjusted
233
+
234
+ return cp.stack(features, axis=-1)
235
+
236
+ def segment_volume(self, array, chunk_size=None, gpu=True):
237
+ """Segment volume using parallel processing of chunks with vectorized chunk creation"""
238
+
239
+ array = cp.asarray(array) # Ensure CuPy array
240
+
241
+ self.realtimechunks = None
242
+ self.map_slice = None
243
+ chunk_size = self.master_chunk
244
+
245
+ # Round to nearest multiple of 32 for better memory alignment
246
+ chunk_size = ((chunk_size + 15) // 32) * 32
247
+
248
+ # Calculate number of chunks in each dimension
249
+ z_chunks = (self.image_3d.shape[0] + chunk_size - 1) // chunk_size
250
+ y_chunks = (self.image_3d.shape[1] + chunk_size - 1) // chunk_size
251
+ x_chunks = (self.image_3d.shape[2] + chunk_size - 1) // chunk_size
252
+
253
+ # Create start indices for all chunks at once using CuPy
254
+ chunk_starts = cp.array(cp.meshgrid(
255
+ cp.arange(z_chunks) * chunk_size,
256
+ cp.arange(y_chunks) * chunk_size,
257
+ cp.arange(x_chunks) * chunk_size,
258
+ indexing='ij'
259
+ )).reshape(3, -1).T
260
+
261
+ # Process chunks
262
+ print("Segmenting chunks...")
263
+
264
+ for i, chunk_start_gpu in enumerate(chunk_starts):
265
+ # Extract values from CuPy array
266
+ z_start = int(chunk_start_gpu[0]) # Convert to regular Python int
267
+ y_start = int(chunk_start_gpu[1])
268
+ x_start = int(chunk_start_gpu[2])
269
+
270
+ z_end = min(z_start + chunk_size, self.image_3d.shape[0])
271
+ y_end = min(y_start + chunk_size, self.image_3d.shape[1])
272
+ x_end = min(x_start + chunk_size, self.image_3d.shape[2])
273
+
274
+ coords = [z_start, z_end, y_start, y_end, x_start, x_end]
275
+
276
+ # Process chunk - returns CuPy arrays
277
+ fore_coords, _ = self.process_chunk(coords)
278
+
279
+ if len(fore_coords) > 0:
280
+ # Direct indexing with CuPy arrays
281
+ array[fore_coords[:, 0], fore_coords[:, 1], fore_coords[:, 2]] = 255
282
+
283
+ print(f"Processed {i}/{len(chunk_starts)} chunks")
284
+
285
+ # Clean up GPU memory
286
+ cp.get_default_memory_pool().free_all_blocks()
287
+
288
+ # Only convert to NumPy at the very end for return
289
+ return cp.asnumpy(array)
290
+
291
+
292
+ def update_position(self, z=None, x=None, y=None):
293
+ """Update current position for chunk prioritization with safeguards"""
294
+
295
+ # Check if we should skip this update
296
+ if hasattr(self, '_skip_next_update') and self._skip_next_update:
297
+ self._skip_next_update = False
298
+ return
299
+
300
+ # Store the previous z-position if not set
301
+ if not hasattr(self, 'prev_z') or self.prev_z is None:
302
+ self.prev_z = z
303
+
304
+ # Check if currently processing - if so, only update position but don't trigger map_slice changes
305
+ if hasattr(self, '_currently_processing') and self._currently_processing:
306
+ self.current_z = z
307
+ self.current_x = x
308
+ self.current_y = y
309
+ self.prev_z = z
310
+ return
311
+
312
+ # Update current positions
313
+ self.current_z = z
314
+ self.current_x = x
315
+ self.current_y = y
316
+
317
+ # Only clear map_slice if z changes and we're not already generating a new one
318
+ if self.current_z != self.prev_z:
319
+ # Instead of setting to None, check if we already have it in the cache
320
+ if hasattr(self, 'feature_cache') and self.feature_cache is not None:
321
+ if self.current_z not in self.feature_cache:
322
+ self.map_slice = None
323
+ self._currently_segmenting = None
324
+
325
+ # Update previous z
326
+ self.prev_z = z
327
+
328
+
329
+ def get_realtime_chunks(self, chunk_size=49):
330
+
331
+ # Determine if we need to chunk XY planes
332
+ small_dims = (self.image_3d.shape[1] <= chunk_size and
333
+ self.image_3d.shape[2] <= chunk_size)
334
+ few_z = self.image_3d.shape[0] <= 100 # arbitrary threshold
335
+
336
+ # If small enough, each Z is one chunk
337
+ if small_dims and few_z:
338
+ chunk_size_xy = max(self.image_3d.shape[1], self.image_3d.shape[2])
339
+ else:
340
+ chunk_size_xy = chunk_size
341
+
342
+ # Calculate chunks for XY plane
343
+ y_chunks = (self.image_3d.shape[1] + chunk_size_xy - 1) // chunk_size_xy
344
+ x_chunks = (self.image_3d.shape[2] + chunk_size_xy - 1) // chunk_size_xy
345
+
346
+ # Populate chunk dictionary
347
+ chunk_dict = {}
348
+
349
+ # Create chunks for each Z plane
350
+ for z in range(self.image_3d.shape[0]):
351
+ if small_dims:
352
+
353
+ chunk_dict[(z, 0, 0)] = {
354
+ 'coords': [0, self.image_3d.shape[1], 0, self.image_3d.shape[2]],
355
+ 'processed': False,
356
+ 'z': z
357
+ }
358
+ else:
359
+ # Multiple chunks per Z
360
+ for y_chunk in range(y_chunks):
361
+ for x_chunk in range(x_chunks):
362
+ y_start = y_chunk * chunk_size_xy
363
+ x_start = x_chunk * chunk_size_xy
364
+ y_end = min(y_start + chunk_size_xy, self.image_3d.shape[1])
365
+ x_end = min(x_start + chunk_size_xy, self.image_3d.shape[2])
366
+
367
+ chunk_dict[(z, y_start, x_start)] = {
368
+ 'coords': [y_start, y_end, x_start, x_end],
369
+ 'processed': False,
370
+ 'z': z
371
+ }
372
+
373
+ self.realtimechunks = chunk_dict
374
+
375
+ print("Ready!")
376
+
377
+
378
+ def segment_volume_realtime(self, gpu=True):
379
+ """Segment volume in realtime using CuPy for GPU acceleration"""
380
+ import cupy as cp
381
+
382
+ try:
383
+ from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
384
+ gpu_ml_available = True
385
+ except:
386
+ print("Cannot find cuML, using CPU to segment instead...")
387
+ gpu_ml_available = False
388
+ gpu = False
389
+
390
+ if self.realtimechunks is None:
391
+ self.get_realtime_chunks()
392
+ else:
393
+ for chunk_pos in self.realtimechunks: # chunk_pos is the (z, y_start, x_start) tuple
394
+ self.realtimechunks[chunk_pos]['processed'] = False
395
+
396
+ chunk_dict = self.realtimechunks
397
+
398
+ def get_nearest_unprocessed_chunk(self):
399
+ """Get nearest unprocessed chunk prioritizing current Z"""
400
+ curr_z = self.current_z if self.current_z is not None else self.image_3d.shape[0] // 2
401
+ curr_y = self.current_y if self.current_y is not None else self.image_3d.shape[1] // 2
402
+ curr_x = self.current_x if self.current_x is not None else self.image_3d.shape[2] // 2
403
+
404
+ # First try to find chunks at current Z
405
+ current_z_chunks = [(pos, info) for pos, info in chunk_dict.items()
406
+ if pos[0] == curr_z and not info['processed']]
407
+
408
+ if current_z_chunks:
409
+ # Find nearest chunk in current Z plane using the chunk positions from the key
410
+ nearest = min(current_z_chunks,
411
+ key=lambda x: ((x[0][1] - curr_y) ** 2 +
412
+ (x[0][2] - curr_x) ** 2))
413
+ return nearest[0]
414
+
415
+ # If no chunks at current Z, find nearest Z with available chunks
416
+ available_z = sorted(
417
+ [(pos[0], pos) for pos, info in chunk_dict.items()
418
+ if not info['processed']],
419
+ key=lambda x: abs(x[0] - curr_z)
420
+ )
421
+
422
+ if available_z:
423
+ target_z = available_z[0][0]
424
+ # Find nearest chunk in target Z plane
425
+ z_chunks = [(pos, info) for pos, info in chunk_dict.items()
426
+ if pos[0] == target_z and not info['processed']]
427
+ nearest = min(z_chunks,
428
+ key=lambda x: ((x[0][1] - curr_y) ** 2 +
429
+ (x[0][2] - curr_x) ** 2))
430
+ return nearest[0]
431
+
432
+ return None
433
+
434
+ while True:
435
+ # Find nearest unprocessed chunk using class attributes
436
+ chunk_idx = get_nearest_unprocessed_chunk(self)
437
+ if chunk_idx is None:
438
+ break
439
+
440
+ # Process the chunk directly
441
+ chunk = chunk_dict[chunk_idx]
442
+ chunk['processed'] = True
443
+ coords = chunk['coords']
444
+
445
+ # Use CuPy for meshgrid
446
+ coords_array = cp.stack(cp.meshgrid(
447
+ cp.array([chunk['z']]),
448
+ cp.arange(coords[0], coords[1]),
449
+ cp.arange(coords[2], coords[3]),
450
+ indexing='ij'
451
+ )).reshape(3, -1).T
452
+
453
+ # Convert to CPU for further processing - add cp.asnumpy() here
454
+ coords = list(map(tuple, cp.asnumpy(coords_array)))
455
+
456
+ # Process the chunk directly based on whether GPU is available
457
+ fore, back = self.process_chunk(coords)
458
+
459
+ # Yield the results
460
+ yield cp.asnumpy(fore), cp.asnumpy(back)
461
+
462
+
463
+ def cleanup(self):
464
+ """Clean up GPU memory"""
465
+ import cupy as cp
466
+
467
+ try:
468
+ # Force garbage collection first
469
+ import gc
470
+ gc.collect()
471
+
472
+ # Clean up CuPy memory pools
473
+ mempool = cp.get_default_memory_pool()
474
+ pinned_mempool = cp.get_default_pinned_memory_pool()
475
+
476
+ # Print memory usage before cleanup (optional)
477
+ # print(f"Used GPU memory: {mempool.used_bytes() / 1024**2:.2f} MB")
478
+
479
+ # Free all blocks
480
+ mempool.free_all_blocks()
481
+ pinned_mempool.free_all_blocks()
482
+
483
+ # Print memory usage after cleanup (optional)
484
+ # print(f"Used GPU memory after cleanup: {mempool.used_bytes() / 1024**2:.2f} MB")
485
+
486
+ except Exception as e:
487
+ print(f"Warning: Could not clean up GPU memory: {e}")
488
+
489
+ def train_batch(self, foreground_array, speed=True, use_gpu=True, use_two=False, mem_lock=False):
490
+ """Train directly on foreground and background arrays using GPU acceleration"""
491
+ import cupy as cp
492
+
493
+ print("Training model...")
494
+ self.speed = speed
495
+ self.cur_gpu = use_gpu
496
+ self.realtimechunks = None # dump ram
497
+
498
+ self.mem_lock = mem_lock
499
+
500
+ self.model = RandomForestClassifier(
501
+ n_estimators=100,
502
+ n_jobs=-1,
503
+ max_depth=None
504
+ )
505
+
506
+ box_size = self.master_chunk
507
+
508
+ # Memory-efficient approach: compute features only for necessary subarrays
509
+ foreground_features = []
510
+ background_features = []
511
+
512
+ # Convert foreground_array to CuPy array
513
+ foreground_array_gpu = cp.asarray(foreground_array)
514
+
515
+ # Find coordinates of foreground and background scribbles
516
+ z_fore = cp.argwhere(foreground_array_gpu == 1)
517
+ z_back = cp.argwhere(foreground_array_gpu == 2)
518
+
519
+ # Convert back to NumPy for compatibility with the rest of the code
520
+ z_fore_cpu = cp.asnumpy(z_fore)
521
+ z_back_cpu = cp.asnumpy(z_back)
522
+
523
+ # If no scribbles, return empty lists
524
+ if len(z_fore_cpu) == 0 and len(z_back_cpu) == 0:
525
+ return foreground_features, background_features
526
+
527
+ # Get dimensions of the input array
528
+ depth, height, width = foreground_array.shape
529
+
530
+ # Determine the minimum number of boxes needed to cover all scribbles
531
+ half_box = box_size // 2
532
+
533
+ # Step 1: Find the minimum set of boxes that cover all scribbles
534
+ # We'll divide the volume into a grid of boxes of size box_size
535
+
536
+ # Calculate how many boxes are needed in each dimension
537
+ z_grid_size = (depth + box_size - 1) // box_size
538
+ y_grid_size = (height + box_size - 1) // box_size
539
+ x_grid_size = (width + box_size - 1) // box_size
540
+
541
+ # Track which grid cells contain scribbles
542
+ grid_cells_with_scribbles = set()
543
+
544
+ # Map original coordinates to grid cells
545
+ for z, y, x in cp.vstack((z_fore_cpu, z_back_cpu)) if len(z_back_cpu) > 0 else z_fore_cpu:
546
+ grid_z = int(z // box_size)
547
+ grid_y = int(y // box_size)
548
+ grid_x = int(x // box_size)
549
+ grid_cells_with_scribbles.add((grid_z, grid_y, grid_x))
550
+
551
+ # Step 2: Process each grid cell that contains scribbles
552
+ for grid_z, grid_y, grid_x in grid_cells_with_scribbles:
553
+ # Calculate the boundaries of this grid cell
554
+ z_min = grid_z * box_size
555
+ y_min = grid_y * box_size
556
+ x_min = grid_x * box_size
557
+
558
+ z_max = min(z_min + box_size, depth)
559
+ y_max = min(y_min + box_size, height)
560
+ x_max = min(x_min + box_size, width)
561
+
562
+ # Extract the subarray (assuming image_3d is already a CuPy array)
563
+ subarray = self.image_3d[z_min:z_max, y_min:y_max, x_min:x_max]
564
+ subarray2 = foreground_array_gpu[z_min:z_max, y_min:y_max, x_min:x_max]
565
+
566
+ # Compute features for this subarray
567
+ if self.speed:
568
+ subarray_features = self.compute_feature_maps_gpu(subarray)
569
+ else:
570
+ subarray_features = self.compute_deep_feature_maps_gpu(subarray)
571
+
572
+ # Extract foreground features using a direct mask comparison
573
+ local_fore_coords = cp.argwhere(subarray2 == 1)
574
+ for local_z, local_y, local_x in cp.asnumpy(local_fore_coords):
575
+ feature = subarray_features[int(local_z), int(local_y), int(local_x)]
576
+ foreground_features.append(cp.asnumpy(feature))
577
+
578
+ # Extract background features using a direct mask comparison
579
+ local_back_coords = cp.argwhere(subarray2 == 2)
580
+ for local_z, local_y, local_x in cp.asnumpy(local_back_coords):
581
+ feature = subarray_features[int(local_z), int(local_y), int(local_x)]
582
+ background_features.append(cp.asnumpy(feature))
583
+
584
+ # Combine features and labels - convert to NumPy for sklearn compatibility
585
+ if foreground_features and background_features:
586
+ X = np.vstack([np.array(foreground_features), np.array(background_features)])
587
+ y = np.hstack([np.ones(len(z_fore_cpu)), np.zeros(len(z_back_cpu))])
588
+ elif foreground_features:
589
+ X = np.array(foreground_features)
590
+ y = np.ones(len(z_fore_cpu))
591
+ elif background_features:
592
+ X = np.array(background_features)
593
+ y = np.zeros(len(z_back_cpu))
594
+ else:
595
+ X = np.array([])
596
+ y = np.array([])
597
+
598
+ # Train the model
599
+ try:
600
+ self.model.fit(X, y)
601
+ except Exception as e:
602
+ print(f"Error during model training: {e}")
603
+ print(X)
604
+ print(y)
605
+
606
+ self.current_speed = speed
607
+
608
+ # Clean up GPU memory
609
+ cp.get_default_memory_pool().free_all_blocks()
610
+
611
+ print("Done")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nettracer3d
3
- Version: 0.7.2
3
+ Version: 0.7.4
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <liamm@wustl.edu>
6
6
  Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
@@ -20,7 +20,7 @@ Requires-Dist: matplotlib
20
20
  Requires-Dist: networkx
21
21
  Requires-Dist: opencv-python-headless
22
22
  Requires-Dist: openpyxl
23
- Requires-Dist: pandas==2.2.5
23
+ Requires-Dist: pandas
24
24
  Requires-Dist: napari
25
25
  Requires-Dist: python-louvain
26
26
  Requires-Dist: tifffile
@@ -73,9 +73,8 @@ NetTracer3D is free to use/fork for academic/nonprofit use so long as citation i
73
73
 
74
74
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
75
75
 
76
- -- Version 0.7.2 Updates --
76
+ -- Version 0.7.4 Updates --
77
77
 
78
- * Added new option to the modify network qualities menu to remove node centroids with unassigned id values.
79
- * Bug fixes, mainly:
80
- * Had to fix a bug with the ripley's function that was making it always evaluate nodes of one id against themselves even when a seperate id was specified.
81
- * Fixed some bugs when processing 2D images.
78
+ * Bug fixes
79
+ * The segmenter now has a GPU option that actually works quite a bit faster! Only available with CUDA toolkit and cupy.
80
+ * The segmenter also now no longer leaks any memory.
@@ -2,19 +2,21 @@ nettracer3d/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  nettracer3d/community_extractor.py,sha256=5v9SCCLX3P1RX0fjPVKH5NHMFkMolZ5BTe0bR_a67xg,24479
3
3
  nettracer3d/modularity.py,sha256=FH3GpTHorRNkdQULe-2DWgFE3i0_u__hrao7Nx_6Ge4,30249
4
4
  nettracer3d/morphology.py,sha256=jyDjYzrZ4LvI5jOyw8DLsxmo-i5lpqHsejYpW7Tq7Mo,19786
5
- nettracer3d/nettracer.py,sha256=aNeRZq6EAbtPC3uEtgIX35t7y0PtGUc3I4BEk9H7ato,218669
6
- nettracer3d/nettracer_gui.py,sha256=CsVMxHu3vP61gMIgebqejYE14W7mxTM9GsTOiNWdhDU,427477
5
+ nettracer3d/nettracer.py,sha256=4M87zjhwJd3mxR60KInmj5MbYMt8i3q2HEe1e499NDY,219787
6
+ nettracer3d/nettracer_gui.py,sha256=DWLExjr5HxfczgOfLTLTKkUC7nG4uUNhj9r7IU2Fidg,428544
7
7
  nettracer3d/network_analysis.py,sha256=q1q7lxtA3lebxitfC_jfiT9cnpYXJw4q0Oy2_-Aj8qE,48068
8
8
  nettracer3d/network_draw.py,sha256=F7fw6Pcf4qWOhdKwLmhwqWdschbDlHzwCVolQC9imeU,14117
9
9
  nettracer3d/node_draw.py,sha256=k3sCTfUCJs3aH1C1q1gTNxDz9EAQbBd1hsUIJajxRx8,9823
10
10
  nettracer3d/proximity.py,sha256=nlVBXzJ6r84TlP8UaLcdamWifYn-jfVIF0uB-56k_Js,24752
11
11
  nettracer3d/run.py,sha256=xYeaAc8FCx8MuzTGyL3NR3mK7WZzffAYAH23bNRZYO4,127
12
- nettracer3d/segmenter.py,sha256=gJS2AXqHhnw29cbzIxAah2LsrE7_7XnzG7mYSAovZ4I,87847
12
+ nettracer3d/segmenter - Copy.py,sha256=gJS2AXqHhnw29cbzIxAah2LsrE7_7XnzG7mYSAovZ4I,87847
13
+ nettracer3d/segmenter.py,sha256=Sm-ASMK2TOBCfK29-0GdvzC5wgY_atSR0BH1-F62Yvs,61757
14
+ nettracer3d/segmenter_GPU.py,sha256=4tatAe9wOl9FJd3rYM7I2oL8QxBLlIqNlKCX-Ni5tRU,24667
13
15
  nettracer3d/simple_network.py,sha256=fP1gkDdtQcHruEZpUdasKdZeVacoLOxKhR3bY0L1CAQ,15426
14
16
  nettracer3d/smart_dilate.py,sha256=69z9Bn8xtA7rkhcVpqd1PxRSxxRFnIQse9lc2-LU4TU,25879
15
- nettracer3d-0.7.2.dist-info/licenses/LICENSE,sha256=gM207DhJjWrxLuEWXl0Qz5ISbtWDmADfjHp3yC2XISs,888
16
- nettracer3d-0.7.2.dist-info/METADATA,sha256=YSXRHaxODA8bPM_ipSdCQZQZM53g19U5btGiuDjGYhQ,4474
17
- nettracer3d-0.7.2.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
18
- nettracer3d-0.7.2.dist-info/entry_points.txt,sha256=Nx1rr_0QhJXDBHAQg2vcqCzLMKBzSHfwy3xwGkueVyc,53
19
- nettracer3d-0.7.2.dist-info/top_level.txt,sha256=zsYy9rZwirfCEOubolhee4TyzqBAL5gSUeFMzhFTX8c,12
20
- nettracer3d-0.7.2.dist-info/RECORD,,
17
+ nettracer3d-0.7.4.dist-info/licenses/LICENSE,sha256=gM207DhJjWrxLuEWXl0Qz5ISbtWDmADfjHp3yC2XISs,888
18
+ nettracer3d-0.7.4.dist-info/METADATA,sha256=IhOaYQ3Ua9rKPqvFnqNgOy61TiXYAlF9XJ7l-tOAwaw,4301
19
+ nettracer3d-0.7.4.dist-info/WHEEL,sha256=A8Eltl-h0W-qZDVezsLjjslosEH_pdYC2lQ0JcbgCzs,91
20
+ nettracer3d-0.7.4.dist-info/entry_points.txt,sha256=Nx1rr_0QhJXDBHAQg2vcqCzLMKBzSHfwy3xwGkueVyc,53
21
+ nettracer3d-0.7.4.dist-info/top_level.txt,sha256=zsYy9rZwirfCEOubolhee4TyzqBAL5gSUeFMzhFTX8c,12
22
+ nettracer3d-0.7.4.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.3.1)
2
+ Generator: setuptools (80.7.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5