nettracer3d 1.0.5__tar.gz → 1.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nettracer3d might be problematic. Click here for more details.

Files changed (30) hide show
  1. {nettracer3d-1.0.5/src/nettracer3d.egg-info → nettracer3d-1.0.7}/PKG-INFO +4 -4
  2. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/README.md +3 -3
  3. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/pyproject.toml +1 -1
  4. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/nettracer.py +14 -11
  5. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/nettracer_gui.py +145 -37
  6. {nettracer3d-1.0.5 → nettracer3d-1.0.7/src/nettracer3d.egg-info}/PKG-INFO +4 -4
  7. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/LICENSE +0 -0
  8. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/setup.cfg +0 -0
  9. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/__init__.py +0 -0
  10. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/cellpose_manager.py +0 -0
  11. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/community_extractor.py +0 -0
  12. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/excelotron.py +0 -0
  13. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/modularity.py +0 -0
  14. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/morphology.py +0 -0
  15. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/neighborhoods.py +0 -0
  16. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/network_analysis.py +0 -0
  17. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/network_draw.py +0 -0
  18. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/node_draw.py +0 -0
  19. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/painting.py +0 -0
  20. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/proximity.py +0 -0
  21. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/run.py +0 -0
  22. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/segmenter.py +0 -0
  23. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/segmenter_GPU.py +0 -0
  24. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/simple_network.py +0 -0
  25. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d/smart_dilate.py +0 -0
  26. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d.egg-info/SOURCES.txt +0 -0
  27. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d.egg-info/dependency_links.txt +0 -0
  28. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d.egg-info/entry_points.txt +0 -0
  29. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d.egg-info/requires.txt +0 -0
  30. {nettracer3d-1.0.5 → nettracer3d-1.0.7}/src/nettracer3d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nettracer3d
3
- Version: 1.0.5
3
+ Version: 1.0.7
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <liamm@wustl.edu>
6
6
  Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
@@ -110,7 +110,7 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
110
110
 
111
111
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
112
112
 
113
- -- Version 1.0.4 Updates --
113
+ -- Version 1.0.7 Updates --
114
114
 
115
- * Some small bug fixes and adjustments
116
- * Heatmap theoretical distances can now be calculated based on an area constrained within a binary mask.
115
+ * Bug fix
116
+ * Added handling if the user tries to load in a multichannel 3dimensional image (note this will not detect if you have a multi-channel image of 2d planes, it will think those are 3d. For now those can be split up with other software, or you can use the crop function to just isolate the channel you want as if it were a z-plane).
@@ -65,7 +65,7 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
65
65
 
66
66
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
67
67
 
68
- -- Version 1.0.4 Updates --
68
+ -- Version 1.0.7 Updates --
69
69
 
70
- * Some small bug fixes and adjustments
71
- * Heatmap theoretical distances can now be calculated based on an area constrained within a binary mask.
70
+ * Bug fix
71
+ * Added handling if the user tries to load in a multichannel 3dimensional image (note this will not detect if you have a multi-channel image of 2d planes, it will think those are 3d. For now those can be split up with other software, or you can use the crop function to just isolate the channel you want as if it were a z-plane).
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "nettracer3d"
3
- version = "1.0.5"
3
+ version = "1.0.7"
4
4
  authors = [
5
5
  { name="Liam McLaughlin", email="liamm@wustl.edu" },
6
6
  ]
@@ -5294,32 +5294,35 @@ class Network_3D:
5294
5294
  new_list.append(centroid)
5295
5295
 
5296
5296
  else:
5297
-
5298
5297
  if mode == 1:
5299
-
5300
5298
  legal = self.edges != 0
5301
-
5302
5299
  elif mode == 2:
5303
-
5304
5300
  legal = self.network_overlay != 0
5305
-
5306
5301
  elif mode == 3:
5307
-
5308
5302
  legal = self.id_overlay != 0
5309
-
5310
5303
  if self.nodes is None:
5311
-
5312
5304
  temp_array = proximity.populate_array(self.node_centroids, shape = legal.shape)
5313
5305
  else:
5314
5306
  temp_array = self.nodes
5315
-
5316
5307
  if dim == 2:
5317
5308
  volume = np.count_nonzero(legal) * self.xy_scale**2
5309
+ # Pad in x and y dimensions (assuming shape is [y, x])
5310
+ legal = np.pad(legal, pad_width=1, mode='constant', constant_values=0)
5318
5311
  else:
5319
5312
  volume = np.count_nonzero(legal) * self.z_scale * self.xy_scale**2
5313
+ # Pad in x, y, and z dimensions (assuming shape is [z, y, x])
5314
+ legal = np.pad(legal, pad_width=1, mode='constant', constant_values=0)
5315
+
5320
5316
  print(f"Using {volume} for the volume measurement (Volume of provided mask as scaled by xy and z scaling)")
5321
-
5322
- legal = smart_dilate.compute_distance_transform_distance(legal, sampling = [self.z_scale, self.xy_scale, self.xy_scale]) # Get true distances
5317
+
5318
+ # Compute distance transform on padded array
5319
+ legal = smart_dilate.compute_distance_transform_distance(legal, sampling = [self.z_scale, self.xy_scale, self.xy_scale])
5320
+
5321
+ # Remove padding after distance transform
5322
+ if dim == 2:
5323
+ legal = legal[1:-1, 1:-1] # Remove padding from x and y dimensions
5324
+ else:
5325
+ legal = legal[1:-1, 1:-1, 1:-1] # Remove padding from x, y, and z dimensions
5323
5326
 
5324
5327
  max_avail = np.max(legal) # Most internal point
5325
5328
  min_legal = factor * max_avail # Values of stuff 25% within the tissue
@@ -2009,7 +2009,7 @@ class ImageViewerWindow(QMainWindow):
2009
2009
  self.parent().toggle_channel(1)
2010
2010
  # Navigate to the Z-slice
2011
2011
  self.parent().slice_slider.setValue(int(centroid[0]))
2012
- print(f"Found edge {value} at Z-slice {centroid[0]}")
2012
+ print(f"Found edge {value} at [Z,Y,X] -> {centroid}")
2013
2013
 
2014
2014
  else:
2015
2015
  print(f"Edge {value} not found in centroids dictionary")
@@ -2045,9 +2045,9 @@ class ImageViewerWindow(QMainWindow):
2045
2045
  # Navigate to the Z-slice
2046
2046
  self.parent().slice_slider.setValue(int(centroid[0]))
2047
2047
  if mode == 0:
2048
- print(f"Found node {value} at Z-slice {centroid[0]}")
2048
+ print(f"Found node {value} at [Z,Y,X] -> {centroid}")
2049
2049
  elif mode == 2:
2050
- print(f"Found node {value} from community {com} at Z-slice {centroid[0]}")
2050
+ print(f"Found node {value} from community {com} at [Z,Y,X] -> {centroid}")
2051
2051
 
2052
2052
 
2053
2053
  else:
@@ -2332,7 +2332,7 @@ class ImageViewerWindow(QMainWindow):
2332
2332
  unique_labels = np.unique(input_array[binary_mask])
2333
2333
  print(f"Processing {len(unique_labels)} unique labels")
2334
2334
 
2335
- # Get all bounding boxes at once - this is very fast
2335
+ # Get all bounding boxes at once
2336
2336
  bounding_boxes = ndimage.find_objects(input_array)
2337
2337
 
2338
2338
  # Prepare work items - just check if bounding box exists for each label
@@ -2348,7 +2348,7 @@ class ImageViewerWindow(QMainWindow):
2348
2348
  bbox = bounding_boxes[bbox_index]
2349
2349
  work_items.append((orig_label, bbox))
2350
2350
 
2351
- print(f"Created {len(work_items)} work items")
2351
+ #print(f"Created {len(work_items)} work items")
2352
2352
 
2353
2353
  # If we have work items, process them
2354
2354
  if len(work_items) == 0:
@@ -2368,7 +2368,7 @@ class ImageViewerWindow(QMainWindow):
2368
2368
  return orig_label, bbox, labeled_sub, num_cc
2369
2369
 
2370
2370
  except Exception as e:
2371
- print(f"Error processing label {orig_label}: {e}")
2371
+ #print(f"Error processing label {orig_label}: {e}")
2372
2372
  return orig_label, bbox, None, 0
2373
2373
 
2374
2374
  # Execute in parallel
@@ -2384,7 +2384,7 @@ class ImageViewerWindow(QMainWindow):
2384
2384
 
2385
2385
  for orig_label, bbox, labeled_sub, num_cc in results:
2386
2386
  if num_cc > 0 and labeled_sub is not None:
2387
- print(f"Label {orig_label}: {num_cc} components")
2387
+ #print(f"Label {orig_label}: {num_cc} components")
2388
2388
  # Remap labels and place in output
2389
2389
  for cc_id in range(1, num_cc + 1):
2390
2390
  mask = labeled_sub == cc_id
@@ -2397,7 +2397,7 @@ class ImageViewerWindow(QMainWindow):
2397
2397
 
2398
2398
  def handle_seperate(self):
2399
2399
  """
2400
- Fixed version with proper mask handling and debugging
2400
+ Seperate objects in an array that share a label but do not touch
2401
2401
  """
2402
2402
  try:
2403
2403
  # Handle nodes
@@ -2406,7 +2406,6 @@ class ImageViewerWindow(QMainWindow):
2406
2406
  # Create highlight overlay (this should preserve original label values)
2407
2407
  self.create_highlight_overlay(node_indices=self.clicked_values['nodes'])
2408
2408
 
2409
- # DON'T convert to boolean yet - we need the original labels!
2410
2409
  # Create a boolean mask for where we have highlighted values
2411
2410
  highlight_mask = self.highlight_overlay != 0
2412
2411
 
@@ -2416,7 +2415,7 @@ class ImageViewerWindow(QMainWindow):
2416
2415
  # Get non-highlighted part of the array
2417
2416
  non_highlighted = np.where(highlight_mask, 0, my_network.nodes)
2418
2417
 
2419
- # Calculate max_val properly
2418
+ # Calculate max_val
2420
2419
  max_val = np.max(non_highlighted) if np.any(non_highlighted) else 0
2421
2420
 
2422
2421
  # Process highlighted part
@@ -5061,6 +5060,10 @@ class ImageViewerWindow(QMainWindow):
5061
5060
  dialog = MergeNodeIdDialog(self)
5062
5061
  dialog.exec()
5063
5062
 
5063
+ def show_multichan_dialog(self, data):
5064
+ dialog = MultiChanDialog(self, data)
5065
+ dialog.show()
5066
+
5064
5067
  def show_gray_water_dialog(self):
5065
5068
  """Show the gray watershed parameter dialog."""
5066
5069
  dialog = GrayWaterDialog(self)
@@ -5163,7 +5166,7 @@ class ImageViewerWindow(QMainWindow):
5163
5166
 
5164
5167
  my_network.edges = (my_network.nodes == 0) * my_network.edges
5165
5168
 
5166
- my_network.calculate_all(my_network.nodes, my_network.edges, xy_scale = my_network.xy_scale, z_scale = my_network.z_scale, search = None, diledge = None, inners = False, hash_inners = False, remove_trunk = 0, ignore_search_region = True, other_nodes = None, label_nodes = True, directory = None, GPU = False, fast_dil = False, skeletonize = False, GPU_downsample = None)
5169
+ my_network.calculate_all(my_network.nodes, my_network.edges, xy_scale = my_network.xy_scale, z_scale = my_network.z_scale, search = None, diledge = None, inners = False, remove_trunk = 0, ignore_search_region = True, other_nodes = None, label_nodes = True, directory = None, GPU = False, fast_dil = False, skeletonize = False, GPU_downsample = None)
5167
5170
 
5168
5171
  self.load_channel(1, my_network.edges, data = True)
5169
5172
  self.load_channel(0, my_network.nodes, data = True)
@@ -5917,6 +5920,16 @@ class ImageViewerWindow(QMainWindow):
5917
5920
  msg.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
5918
5921
  return msg.exec() == QMessageBox.StandardButton.Yes
5919
5922
 
5923
+ def confirm_multichan_dialog(self):
5924
+ """Shows a dialog asking user to confirm if image is multichan"""
5925
+ msg = QMessageBox()
5926
+ msg.setIcon(QMessageBox.Icon.Question)
5927
+ msg.setText("Image Format Alert")
5928
+ msg.setInformativeText("Is this a Multi-Channel (4D) image?")
5929
+ msg.setWindowTitle("Confirm Image Format")
5930
+ msg.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
5931
+ return msg.exec() == QMessageBox.StandardButton.Yes
5932
+
5920
5933
  def confirm_resize_dialog(self):
5921
5934
  """Shows a dialog asking user to resize image"""
5922
5935
  msg = QMessageBox()
@@ -6000,7 +6013,7 @@ class ImageViewerWindow(QMainWindow):
6000
6013
  try:
6001
6014
  if len(self.channel_data[channel_index].shape) == 3: # potentially 2D RGB
6002
6015
  if self.channel_data[channel_index].shape[-1] in (3, 4): # last dim is 3 or 4
6003
- if not data and self.shape is None:
6016
+ if not data:
6004
6017
  if self.confirm_rgb_dialog():
6005
6018
  # User confirmed it's 2D RGB, expand to 4D
6006
6019
  self.channel_data[channel_index] = np.expand_dims(self.channel_data[channel_index], axis=0)
@@ -6010,12 +6023,18 @@ class ImageViewerWindow(QMainWindow):
6010
6023
  except:
6011
6024
  pass
6012
6025
 
6013
- if not color:
6014
- try:
6015
- if len(self.channel_data[channel_index].shape) == 4 and (channel_index == 0 or channel_index == 1):
6026
+ if len(self.channel_data[channel_index].shape) == 4:
6027
+ if not self.channel_data[channel_index].shape[-1] in (3, 4):
6028
+ if self.confirm_multichan_dialog(): # User is trying to load 4D channel stack:
6029
+ my_data = copy.deepcopy(self.channel_data[channel_index])
6030
+ self.channel_data[channel_index] = None
6031
+ self.show_multichan_dialog(data = my_data)
6032
+ return
6033
+ elif not color and (channel_index == 0 or channel_index == 1):
6034
+ try:
6016
6035
  self.channel_data[channel_index] = self.reduce_rgb_dimension(self.channel_data[channel_index], 'weight')
6017
- except:
6018
- pass
6036
+ except:
6037
+ pass
6019
6038
 
6020
6039
  reset_resize = False
6021
6040
 
@@ -8432,6 +8451,89 @@ class MergeNodeIdDialog(QDialog):
8432
8451
  print(traceback.format_exc())
8433
8452
  #print(f"Error: {e}")
8434
8453
 
8454
+ class MultiChanDialog(QDialog):
8455
+
8456
+ def __init__(self, parent=None, data = None):
8457
+
8458
+ super().__init__(parent)
8459
+ self.setWindowTitle("Channel Loading")
8460
+ self.setModal(False)
8461
+
8462
+ layout = QFormLayout(self)
8463
+
8464
+ self.data = data
8465
+
8466
+ self.nodes = QComboBox()
8467
+ self.edges = QComboBox()
8468
+ self.overlay1 = QComboBox()
8469
+ self.overlay2 = QComboBox()
8470
+ options = ["None"]
8471
+ for i in range(self.data.shape[0]):
8472
+ options.append(str(i))
8473
+ self.nodes.addItems(options)
8474
+ self.edges.addItems(options)
8475
+ self.overlay1.addItems(options)
8476
+ self.overlay2.addItems(options)
8477
+ self.nodes.setCurrentIndex(0)
8478
+ self.edges.setCurrentIndex(0)
8479
+ self.overlay1.setCurrentIndex(0)
8480
+ self.overlay2.setCurrentIndex(0)
8481
+ layout.addRow("Load this channel into nodes?", self.nodes)
8482
+ layout.addRow("Load this channel into edges?", self.edges)
8483
+ layout.addRow("Load this channel into overlay1?", self.overlay1)
8484
+ layout.addRow("Load this channel into overlay2?", self.overlay2)
8485
+
8486
+ run_button = QPushButton("Load Channels")
8487
+ run_button.clicked.connect(self.run)
8488
+ layout.addWidget(run_button)
8489
+
8490
+ run_button2 = QPushButton("Save Channels to Directory")
8491
+ run_button2.clicked.connect(self.run2)
8492
+ layout.addWidget(run_button2)
8493
+
8494
+
8495
+ def run(self):
8496
+
8497
+ try:
8498
+ node_chan = int(self.nodes.currentText())
8499
+ self.parent().load_channel(0, self.data[node_chan, :, :, :], data = True)
8500
+ except:
8501
+ pass
8502
+ try:
8503
+ edge_chan = int(self.edges.currentText())
8504
+ self.parent().load_channel(1, self.data[edge_chan, :, :, :], data = True)
8505
+ except:
8506
+ pass
8507
+ try:
8508
+ overlay1_chan = int(self.overlay1.currentText())
8509
+ self.parent().load_channel(2, self.data[overlay1_chan, :, :, :], data = True)
8510
+ except:
8511
+ pass
8512
+ try:
8513
+ overlay2_chan = int(self.overlay2.currentText())
8514
+ self.parent().load_channel(3, self.data[overlay2_chan, :, :, :], data = True)
8515
+ except:
8516
+ pass
8517
+
8518
+ def run2(self):
8519
+
8520
+ try:
8521
+ # First let user select parent directory
8522
+ parent_dir = QFileDialog.getExistingDirectory(
8523
+ self,
8524
+ "Select Location to Save Channels",
8525
+ "",
8526
+ QFileDialog.Option.ShowDirsOnly
8527
+ )
8528
+
8529
+ for i in range(self.data.shape[0]):
8530
+ try:
8531
+ tifffile.imwrite(f'{parent_dir}/C{i}.tif', self.data[i, :, :, :])
8532
+ except:
8533
+ continue
8534
+ except:
8535
+ pass
8536
+
8435
8537
 
8436
8538
  class Show3dDialog(QDialog):
8437
8539
  def __init__(self, parent=None):
@@ -10192,38 +10294,44 @@ class ViolinDialog(QDialog):
10192
10294
 
10193
10295
  from . import neighborhoods
10194
10296
 
10195
- if self.idens.currentIndex() != 0:
10297
+ try:
10196
10298
 
10197
- iden = self.idens.currentText()
10198
- iden_list = []
10199
- import ast
10299
+ if self.idens.currentIndex() != 0:
10200
10300
 
10201
- for item in my_network.node_identities:
10301
+ iden = self.idens.currentText()
10302
+ iden_list = []
10303
+ import ast
10202
10304
 
10203
- try:
10204
- parse = ast.literal_eval(my_network.node_identities[item])
10205
- if iden in parse:
10206
- iden_list.append(item)
10207
- except:
10208
- if (iden == my_network.node_identities[item]):
10209
- iden_list.append(item)
10305
+ for item in my_network.node_identities:
10210
10306
 
10211
- violin_dict = df_to_dict_by_rows(self.df, iden_list, f"Z-Score-like Channel Intensities of Identity {iden}, {len(iden_list)} Nodes")
10307
+ try:
10308
+ parse = ast.literal_eval(my_network.node_identities[item])
10309
+ if iden in parse:
10310
+ iden_list.append(item)
10311
+ except:
10312
+ if (iden == my_network.node_identities[item]):
10313
+ iden_list.append(item)
10212
10314
 
10213
- neighborhoods.create_violin_plots(violin_dict, graph_title=f"Z-Score-like Channel Intensities of Identity {iden}, {len(iden_list)} Nodes")
10315
+ violin_dict = df_to_dict_by_rows(self.df, iden_list, f"Z-Score-like Channel Intensities of Identity {iden}, {len(iden_list)} Nodes")
10214
10316
 
10317
+ neighborhoods.create_violin_plots(violin_dict, graph_title=f"Z-Score-like Channel Intensities of Identity {iden}, {len(iden_list)} Nodes")
10318
+ except:
10319
+ pass
10215
10320
 
10216
- if self.coms.currentIndex() != 0:
10321
+ try:
10322
+ if self.coms.currentIndex() != 0:
10217
10323
 
10218
- com = self.coms.currentText()
10324
+ com = self.coms.currentText()
10219
10325
 
10220
- com_dict = n3d.invert_dict(my_network.communities)
10326
+ com_dict = n3d.invert_dict(my_network.communities)
10221
10327
 
10222
- com_list = com_dict[int(com)]
10328
+ com_list = com_dict[int(com)]
10223
10329
 
10224
- violin_dict = df_to_dict_by_rows(self.df, com_list, f"Z-Score-like Channel Intensities of Community/Neighborhood {com}, {len(com_list)} Nodes")
10330
+ violin_dict = df_to_dict_by_rows(self.df, com_list, f"Z-Score-like Channel Intensities of Community/Neighborhood {com}, {len(com_list)} Nodes")
10225
10331
 
10226
- neighborhoods.create_violin_plots(violin_dict, graph_title=f"Z-Score-like Channel Intensities of Community/Neighborhood {com}, {len(com_list)} Nodes")
10332
+ neighborhoods.create_violin_plots(violin_dict, graph_title=f"Z-Score-like Channel Intensities of Community/Neighborhood {com}, {len(com_list)} Nodes")
10333
+ except:
10334
+ pass
10227
10335
 
10228
10336
 
10229
10337
  def run2(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nettracer3d
3
- Version: 1.0.5
3
+ Version: 1.0.7
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <liamm@wustl.edu>
6
6
  Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
@@ -110,7 +110,7 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
110
110
 
111
111
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
112
112
 
113
- -- Version 1.0.4 Updates --
113
+ -- Version 1.0.7 Updates --
114
114
 
115
- * Some small bug fixes and adjustments
116
- * Heatmap theoretical distances can now be calculated based on an area constrained within a binary mask.
115
+ * Bug fix
116
+ * Added handling if the user tries to load in a multichannel 3dimensional image (note this will not detect if you have a multi-channel image of 2d planes, it will think those are 3d. For now those can be split up with other software, or you can use the crop function to just isolate the channel you want as if it were a z-plane).
File without changes
File without changes