nettracer3d 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nettracer3d/morphology.py CHANGED
@@ -5,6 +5,7 @@ from scipy.ndimage import zoom
5
5
  import multiprocessing as mp
6
6
  from concurrent.futures import ThreadPoolExecutor, as_completed
7
7
  import tifffile
8
+ from functools import partial
8
9
  import pandas as pd
9
10
 
10
11
  def get_reslice_indices(args):
@@ -70,7 +71,7 @@ def _get_node_edge_dict(label_array, edge_array, label, dilate_xy, dilate_z, cor
70
71
 
71
72
  # Create a boolean mask where elements with the specified label are True
72
73
  label_array = label_array == label
73
- dil_array = nettracer.dilate_3D(label_array, dilate_xy, dilate_xy, dilate_z) #Dilate the label to see where the dilated label overlaps
74
+ dil_array = nettracer.dilate_3D_recursive(label_array, dilate_xy, dilate_xy, dilate_z) #Dilate the label to see where the dilated label overlaps
74
75
 
75
76
  if cores == 0: #For getting the volume of objects. Cores presumes you want the 'core' included in the interaction.
76
77
  edge_array = edge_array * dil_array # Filter the edges by the label in question
@@ -189,25 +190,166 @@ def quantify_edge_node(nodes, edges, search = 0, xy_scale = 1, z_scale = 1, core
189
190
 
190
191
  return edge_quants
191
192
 
192
- def calculate_voxel_volumes(array, xy_scale = 1, z_scale = 1):
193
+
194
+ def calculate_voxel_volumes(array, xy_scale=1, z_scale=1):
193
195
  """
194
- Calculate voxel volumes for each uniquely labelled object in a 3D numpy array.
196
+ Calculate voxel volumes for each uniquely labelled object in a 3D numpy array
197
+ using parallel processing.
195
198
 
196
199
  Args:
197
200
  array: 3D numpy array where different objects are marked with different integer labels
201
+ xy_scale: Scale factor for x and y dimensions
202
+ z_scale: Scale factor for z dimension
198
203
 
199
204
  Returns:
200
205
  Dictionary mapping object labels to their voxel volumes
201
206
  """
207
+
208
+ def process_volume_chunk(chunk_data, labels, xy_scale, z_scale):
209
+ """
210
+ Calculate volumes for a chunk of the array.
211
+
212
+ Args:
213
+ chunk_data: 3D numpy array chunk
214
+ labels: Array of unique labels to process
215
+ xy_scale: Scale factor for x and y dimensions
216
+ z_scale: Scale factor for z dimension
217
+
218
+ Returns:
219
+ Dictionary of label: volume pairs for this chunk
220
+ """
221
+ chunk_volumes = {}
222
+ for label in labels:
223
+ volume = np.count_nonzero(chunk_data == label) * (xy_scale**2) * z_scale
224
+ if volume > 0: # Only include if object exists in this chunk
225
+ chunk_volumes[label] = volume
226
+ return chunk_volumes
227
+
202
228
  # Get unique labels (excluding 0 which typically represents background)
203
229
  labels = np.unique(array)
204
230
  if len(labels) == 2:
205
231
  array, _ = nettracer.label_objects(array)
206
232
  labels = np.unique(array)
207
-
208
233
  labels = labels[labels != 0] # Remove background label if present
209
234
 
210
- # Create dictionary of label: volume pairs
211
- volumes = {label: (np.count_nonzero(array == label) * (xy_scale**2) * z_scale) for label in labels}
235
+ if len(labels) == 0:
236
+ return {}
237
+
238
+ # Get number of CPU cores
239
+ num_cores = mp.cpu_count()
240
+
241
+ # Calculate chunk size along y-axis
242
+ chunk_size = array.shape[1] // num_cores
243
+ if chunk_size < 1:
244
+ chunk_size = 1
245
+
246
+ # Create chunks along y-axis
247
+ chunks = []
248
+ for i in range(0, array.shape[1], chunk_size):
249
+ end = min(i + chunk_size, array.shape[1])
250
+ chunks.append(array[:, i:end, :])
251
+
252
+ # Process chunks in parallel
253
+ process_func = partial(process_volume_chunk,
254
+ labels=labels,
255
+ xy_scale=xy_scale,
256
+ z_scale=z_scale)
257
+
258
+ volumes = {}
259
+ with ThreadPoolExecutor(max_workers=num_cores) as executor:
260
+ chunk_results = list(executor.map(process_func, chunks))
261
+
262
+ # Combine results from all chunks
263
+ for chunk_volumes in chunk_results:
264
+ for label, volume in chunk_volumes.items():
265
+ if label in volumes:
266
+ volumes[label] += volume
267
+ else:
268
+ volumes[label] = volume
269
+
270
+ return volumes
271
+
272
+
273
+ def search_neighbor_ids(nodes, targets, id_dict, neighborhood_dict, totals, search, xy_scale, z_scale, root):
274
+
275
+ if 0 in targets:
276
+ targets.remove(0)
277
+ targets = np.isin(nodes, targets)
278
+ targets = nettracer.binarize(targets)
279
+
280
+ dilate_xy, dilate_z = nettracer.dilation_length_to_pixels(xy_scale, z_scale, search, search)
281
+
282
+ dilated = nettracer.dilate_3D_recursive(targets, dilate_xy, dilate_xy, dilate_z)
283
+ dilated = dilated - targets #technically we dont need the cores
284
+ search_vol = np.count_nonzero(dilated) * xy_scale * xy_scale * z_scale #need this for density
285
+ targets = dilated != 0
286
+ del dilated
287
+
288
+
289
+ targets = targets * nodes
290
+
291
+ unique, counts = np.unique(targets, return_counts=True)
292
+ count_dict = dict(zip(unique, counts))
293
+ print(count_dict)
294
+
295
+ del count_dict[0]
296
+
297
+ unique, counts = np.unique(nodes, return_counts=True)
298
+ total_dict = dict(zip(unique, counts))
299
+ print(total_dict)
300
+
301
+ del total_dict[0]
302
+
212
303
 
213
- return volumes
304
+ for label in total_dict:
305
+ if label in id_dict:
306
+ if label in count_dict:
307
+ neighborhood_dict[id_dict[label]] += count_dict[label]
308
+ totals[id_dict[label]] += total_dict[label]
309
+
310
+
311
+ try:
312
+ del neighborhood_dict[root] #no good way to get this
313
+ del totals[root] #no good way to get this
314
+ except:
315
+ pass
316
+
317
+ volume = nodes.shape[0] * nodes.shape[1] * nodes.shape[2] * xy_scale * xy_scale * z_scale
318
+ densities = {}
319
+ for nodeid, amount in totals.items():
320
+ densities[nodeid] = (neighborhood_dict[nodeid]/search_vol)/(amount/volume)
321
+
322
+ return neighborhood_dict, totals, densities
323
+
324
+
325
+
326
+
327
+
328
+
329
+ def get_search_space_dilate(target, centroids, id_dict, search, scaling = 1):
330
+
331
+ ymax = np.max(centroids[:, 0])
332
+ xmax = np.max(centroids[:, 1])
333
+
334
+
335
+ array = np.zeros((ymax + 1, xmax + 1))
336
+
337
+ for i, row in enumerate(centroids):
338
+ if i + 1 in id_dict and target in id_dict[i+1]:
339
+ y = row[0] # get y coordinate
340
+ x = row[1] # get x coordinate
341
+ array[y, x] = 1 # set value at that coordinate
342
+
343
+
344
+ #array = downsample(array, 3)
345
+ array = dilate_2D(array, search, search)
346
+
347
+ search_space = np.count_nonzero(array) * scaling * scaling
348
+
349
+ tifffile.imwrite('search_regions.tif', array)
350
+
351
+ print(f"Search space is {search_space}")
352
+
353
+
354
+
355
+ return array