nettracer3d 0.5.6__tar.gz → 0.5.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {nettracer3d-0.5.6/src/nettracer3d.egg-info → nettracer3d-0.5.8}/PKG-INFO +3 -3
  2. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/README.md +2 -2
  3. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/pyproject.toml +1 -1
  4. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/nettracer_gui.py +40 -7
  5. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/segmenter.py +102 -52
  6. {nettracer3d-0.5.6 → nettracer3d-0.5.8/src/nettracer3d.egg-info}/PKG-INFO +3 -3
  7. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/LICENSE +0 -0
  8. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/setup.cfg +0 -0
  9. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/__init__.py +0 -0
  10. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/community_extractor.py +0 -0
  11. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/hub_getter.py +0 -0
  12. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/modularity.py +0 -0
  13. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/morphology.py +0 -0
  14. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/nettracer.py +0 -0
  15. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/network_analysis.py +0 -0
  16. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/network_draw.py +0 -0
  17. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/node_draw.py +0 -0
  18. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/proximity.py +0 -0
  19. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/run.py +0 -0
  20. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/simple_network.py +0 -0
  21. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d/smart_dilate.py +0 -0
  22. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d.egg-info/SOURCES.txt +0 -0
  23. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d.egg-info/dependency_links.txt +0 -0
  24. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d.egg-info/entry_points.txt +0 -0
  25. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d.egg-info/requires.txt +0 -0
  26. {nettracer3d-0.5.6 → nettracer3d-0.5.8}/src/nettracer3d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: nettracer3d
3
- Version: 0.5.6
3
+ Version: 0.5.8
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <mclaughlinliam99@gmail.com>
6
6
  Project-URL: User_Tutorial, https://www.youtube.com/watch?v=cRatn5VTWDY
@@ -44,6 +44,6 @@ NetTracer3D is free to use/fork for academic/nonprofit use so long as citation i
44
44
 
45
45
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
46
46
 
47
- -- Version 0.5.6 updates --
47
+ -- Version 0.5.8 updates --
48
48
 
49
- 1. Minor change - Updated some of the overhead and post-random forrest voxel assignment for the segmenter to be faster. The major bottleneck for this is still querying sklearn which seems a bit harder to work around.
49
+ 1. Bug fixes and improvements on the 2d segmenter.
@@ -8,6 +8,6 @@ NetTracer3D is free to use/fork for academic/nonprofit use so long as citation i
8
8
 
9
9
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
10
10
 
11
- -- Version 0.5.6 updates --
11
+ -- Version 0.5.8 updates --
12
12
 
13
- 1. Minor change - Updated some of the overhead and post-random forrest voxel assignment for the segmenter to be faster. The major bottleneck for this is still querying sklearn which seems a bit harder to work around.
13
+ 1. Bug fixes and improvements on the 2d segmenter.
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "nettracer3d"
3
- version = "0.5.6"
3
+ version = "0.5.8"
4
4
  authors = [
5
5
  { name="Liam McLaughlin", email="mclaughlinliam99@gmail.com" },
6
6
  ]
@@ -3980,10 +3980,17 @@ class CustomTableView(QTableView):
3980
3980
  save_menu = context_menu.addMenu("Save As")
3981
3981
  save_csv = save_menu.addAction("CSV")
3982
3982
  save_excel = save_menu.addAction("Excel")
3983
+ save_gephi = save_menu.addAction("Gephi")
3984
+ save_graphml = save_menu.addAction("GraphML")
3985
+ save_pajek = save_menu.addAction("Pajek")
3983
3986
 
3984
3987
  # Connect the actions - ensure we're saving the active table
3985
3988
  save_csv.triggered.connect(lambda: self.parent.active_table.save_table_as('csv'))
3986
3989
  save_excel.triggered.connect(lambda: self.parent.active_table.save_table_as('xlsx'))
3990
+ save_gephi.triggered.connect(lambda: self.parent.active_table.save_table_as('gexf'))
3991
+ save_graphml.triggered.connect(lambda: self.parent.active_table.save_table_as('graphml'))
3992
+ save_pajek.triggered.connect(lambda: self.parent.active_table.save_table_as('net'))
3993
+
3987
3994
 
3988
3995
  if self == self.parent.selection_table:
3989
3996
  set_action = context_menu.addAction("Swap with network table (also sets internal network properties - may affect related functions)")
@@ -4061,14 +4068,19 @@ class CustomTableView(QTableView):
4061
4068
  table_name = "Selection"
4062
4069
 
4063
4070
  # Get save file name
4064
- file_filter = "CSV Files (*.csv)" if file_type == 'csv' else "Excel Files (*.xlsx)"
4071
+ file_filter = ("CSV Files (*.csv)" if file_type == 'csv' else
4072
+ "Excel Files (*.xlsx)" if file_type == 'excel' else
4073
+ "Gephi Graph (*.gexf)" if file_type == 'gexf' else
4074
+ "GraphML (*.graphml)" if file_type == 'graphml' else
4075
+ "Pajek Network (*.net)")
4076
+
4065
4077
  filename, _ = QFileDialog.getSaveFileName(
4066
4078
  self,
4067
4079
  f"Save {table_name} Table As",
4068
4080
  "",
4069
4081
  file_filter
4070
4082
  )
4071
-
4083
+
4072
4084
  if filename:
4073
4085
  try:
4074
4086
  if file_type == 'csv':
@@ -4076,11 +4088,28 @@ class CustomTableView(QTableView):
4076
4088
  if not filename.endswith('.csv'):
4077
4089
  filename += '.csv'
4078
4090
  df.to_csv(filename, index=False)
4079
- else:
4091
+ elif file_type == 'xlsx':
4080
4092
  # If user didn't type extension, add .xlsx
4081
4093
  if not filename.endswith('.xlsx'):
4082
4094
  filename += '.xlsx'
4083
4095
  df.to_excel(filename, index=False)
4096
+ elif file_type == 'gexf':
4097
+ # If user didn't type extension, add .gexf
4098
+ if not filename.endswith('.gexf'):
4099
+ filename += '.gexf'
4100
+ #for node in my_network.network.nodes():
4101
+ #my_network.network.nodes[node]['label'] = str(node)
4102
+ nx.write_gexf(my_network.network, filename, encoding='utf-8', prettyprint=True)
4103
+ elif file_type == 'graphml':
4104
+ # If user didn't type extension, add .graphml
4105
+ if not filename.endswith('.graphml'):
4106
+ filename += '.graphml'
4107
+ nx.write_graphml(my_network.network, filename)
4108
+ elif file_type == 'net':
4109
+ # If user didn't type extension, add .net
4110
+ if not filename.endswith('.net'):
4111
+ filename += '.net'
4112
+ nx.write_pajek(my_network.network, filename)
4084
4113
 
4085
4114
  QMessageBox.information(
4086
4115
  self,
@@ -6517,9 +6546,10 @@ class MachineWindow(QMainWindow):
6517
6546
  # Reset any processing flags in the segmenter
6518
6547
  if hasattr(self.segmenter, '_currently_processing'):
6519
6548
  self.segmenter._currently_processing = None
6520
-
6521
- # Create a new worker after a brief delay
6522
- QTimer.singleShot(500, self.start_segmentation)
6549
+
6550
+ if 0 in self.parent().highlight_overlay[current_z, :, :]:
6551
+ # Create a new worker after a brief delay
6552
+ QTimer.singleShot(500, self.start_segmentation)
6523
6553
 
6524
6554
 
6525
6555
 
@@ -6636,7 +6666,7 @@ class SegmentationWorker(QThread):
6636
6666
  self._stop = True
6637
6667
 
6638
6668
  def get_poked(self):
6639
- self.poked = True
6669
+ self.machine_window.poke_segmenter()
6640
6670
 
6641
6671
  def run(self):
6642
6672
  try:
@@ -6651,6 +6681,9 @@ class SegmentationWorker(QThread):
6651
6681
 
6652
6682
  # Process the slice with chunked generator
6653
6683
  for foreground, background in self.segmenter.segment_slice_chunked(current_z):
6684
+ if foreground == None and background == None:
6685
+ self.get_poked()
6686
+
6654
6687
  if self._stop:
6655
6688
  break
6656
6689
 
@@ -18,6 +18,7 @@ from concurrent.futures import ThreadPoolExecutor
18
18
  import threading
19
19
  from scipy import ndimage
20
20
  import multiprocessing
21
+ from collections import defaultdict
21
22
 
22
23
 
23
24
  class InteractiveSegmenter:
@@ -98,24 +99,41 @@ class InteractiveSegmenter:
98
99
  # First attempt to get the feature map
99
100
  feature_map = None
100
101
 
101
- if slice_z in self.feature_cache:
102
- feature_map = self.feature_cache[slice_z]
103
- elif hasattr(self, 'map_slice') and self.map_slice is not None and slice_z == self.current_z:
104
- feature_map = self.map_slice
105
- else:
102
+ try:
103
+ if slice_z in self.feature_cache:
104
+ feature_map = self.feature_cache[slice_z]
105
+ elif hasattr(self, 'map_slice') and self.map_slice is not None and slice_z == self.current_z:
106
+ feature_map = self.map_slice
107
+ else:
108
+ # Generate new feature map
109
+ try:
110
+ feature_map = self.get_feature_map_slice(slice_z, self.current_speed, False)
111
+ self.map_slice = feature_map
112
+ # Cache the feature map for future use
113
+ #if not hasattr(self, 'feature_cache'):
114
+ #self.feature_cache = {}
115
+ #self.feature_cache[slice_z] = feature_map
116
+ except Exception as e:
117
+ print(f"Error generating feature map: {e}")
118
+ import traceback
119
+ traceback.print_exc()
120
+ return # Exit if we can't generate the feature map
121
+ except:
106
122
  # Generate new feature map
123
+ #self.feature_cache = {}
107
124
  try:
108
125
  feature_map = self.get_feature_map_slice(slice_z, self.current_speed, False)
109
126
  self.map_slice = feature_map
110
127
  # Cache the feature map for future use
111
- if not hasattr(self, 'feature_cache'):
112
- self.feature_cache = {}
113
- self.feature_cache[slice_z] = feature_map
128
+ #if not hasattr(self, 'feature_cache'):
129
+ #self.feature_cache = {}
130
+ #self.feature_cache[slice_z] = feature_map
114
131
  except Exception as e:
115
132
  print(f"Error generating feature map: {e}")
116
133
  import traceback
117
134
  traceback.print_exc()
118
135
  return # Exit if we can't generate the feature map
136
+
119
137
 
120
138
  # Check that we have a valid feature map
121
139
  if feature_map is None:
@@ -152,7 +170,12 @@ class InteractiveSegmenter:
152
170
 
153
171
  # Predict
154
172
  try:
155
- predictions = self.model.predict(features)
173
+ try:
174
+ predictions = self.model.predict(features)
175
+ except ValueError:
176
+ self.feature_cache = None
177
+ self.map_slice = None
178
+ return None, None
156
179
 
157
180
  # Split results
158
181
  foreground = set()
@@ -692,18 +715,12 @@ class InteractiveSegmenter:
692
715
  Returns:
693
716
  Dictionary with z-values as keys and lists of corresponding [y, x] coordinates as values
694
717
  """
695
- z_dict = {}
718
+ z_dict = defaultdict(list)
696
719
 
697
- for coord in coordinates:
698
- z, y, x = coord # Unpack the coordinates
699
-
700
- # Add the y, x coordinate to the appropriate z-value group
701
- if z not in z_dict:
702
- z_dict[z] = []
703
-
704
- z_dict[z].append((y, x)) # Store as tuple, not list, so it's hashable
720
+ for z, y, x in coordinates:
721
+ z_dict[z].append((y, x))
705
722
 
706
- return z_dict
723
+ return dict(z_dict) # Convert back to regular dict
707
724
 
708
725
  def process_chunk(self, chunk_coords):
709
726
  """Process a chunk of coordinates"""
@@ -727,10 +744,13 @@ class InteractiveSegmenter:
727
744
  chunk_by_z = self.organize_by_z(chunk_coords)
728
745
  for z, coords in chunk_by_z.items():
729
746
 
730
- if z not in self.feature_cache and not self.previewing:
747
+ if self.feature_cache is None:
731
748
  features = self.get_feature_map_slice(z, self.speed, self.cur_gpu)
732
749
  features = [features[y, x] for y, x in coords]
733
- elif z not in self.feature_cache and self.previewing:
750
+ elif z not in self.feature_cache and not self.previewing:
751
+ features = self.get_feature_map_slice(z, self.speed, self.cur_gpu)
752
+ features = [features[y, x] for y, x in coords]
753
+ elif z not in self.feature_cache or self.feature_cache is None and self.previewing:
734
754
  features = self.map_slice
735
755
  try:
736
756
  features = [features[y, x] for y, x in coords]
@@ -850,6 +870,7 @@ class InteractiveSegmenter:
850
870
  chunks.append(list(map(tuple, coords)))
851
871
  else:
852
872
  chunks = create_2d_chunks()
873
+ self.feature_cache = None #Decided this should not maintain training data for segmenting 2D
853
874
 
854
875
  foreground_coords = set()
855
876
  background_coords = set()
@@ -904,8 +925,9 @@ class InteractiveSegmenter:
904
925
  # Only clear map_slice if z changes and we're not already generating a new one
905
926
  if self.current_z != self.prev_z:
906
927
  # Instead of setting to None, check if we already have it in the cache
907
- if hasattr(self, 'feature_cache') and self.current_z not in self.feature_cache:
908
- self.map_slice = None
928
+ if hasattr(self, 'feature_cache') and self.feature_cache is not None:
929
+ if self.current_z not in self.feature_cache:
930
+ self.map_slice = None
909
931
  self._currently_segmenting = None
910
932
 
911
933
  # Update previous z
@@ -1088,60 +1110,88 @@ class InteractiveSegmenter:
1088
1110
 
1089
1111
  if self.current_speed != speed:
1090
1112
  self.feature_cache = None
1113
+ if use_gpu:
1114
+ try:
1115
+ self.model = cuRandomForestClassifier(
1116
+ n_estimators=100,
1117
+ max_depth=None
1118
+ )
1119
+ except:
1120
+ self.model = RandomForestClassifier(
1121
+ n_estimators=100,
1122
+ n_jobs=-1,
1123
+ max_depth=None
1124
+ )
1125
+ else:
1126
+ self.model = RandomForestClassifier(
1127
+ n_estimators=100,
1128
+ n_jobs=-1,
1129
+ max_depth=None
1130
+ )
1131
+
1091
1132
 
1092
1133
  if use_two:
1093
1134
 
1094
- changed = [] #Track which slices need feature maps
1135
+ #changed = [] #Track which slices need feature maps
1095
1136
 
1096
1137
  if not self.use_two: #Clarifies if we need to redo feature cache for 2D
1097
1138
  self.feature_cache = None
1098
1139
  self.use_two = True
1099
1140
 
1141
+ self.feature_cache = None #Decided this should reset, can remove this line to have it retain prev feature maps
1142
+ self.two_slices = []
1143
+
1100
1144
  if self.feature_cache == None:
1101
1145
  self.feature_cache = {}
1102
1146
 
1103
1147
  # Get foreground coordinates and features
1104
1148
  z_fore, y_fore, x_fore = np.where(foreground_array == 1)
1149
+
1150
+
1151
+ fore_coords = list(zip(z_fore, y_fore, x_fore))
1105
1152
 
1106
1153
  # Get background coordinates and features
1107
1154
  z_back, y_back, x_back = np.where(foreground_array == 2)
1108
1155
 
1109
- slices = set(list(z_back) + list(z_fore))
1156
+ back_coords = list(zip(z_back, y_back, x_back))
1110
1157
 
1111
- for z in slices:
1112
- if z not in self.two_slices:
1113
- changed.append(z)
1114
- self.two_slices.append(z) #Tracks assigning coords to feature map slices
1158
+
1159
+ #slices = set(list(z_back) + list(z_fore))
1160
+
1161
+ #for z in slices:
1162
+ #if z not in self.two_slices:
1163
+ #changed.append(z)
1164
+ #self.two_slices.append(z) #Tracks assigning coords to feature map slices
1115
1165
 
1116
1166
  foreground_features = []
1117
1167
  background_features = []
1118
1168
 
1119
- for i, z in enumerate(z_fore):
1120
- if z in changed: # Means this slice needs a feature map
1121
- new_map = self.get_feature_map_slice(z, speed, use_gpu)
1122
- self.feature_cache[z] = new_map
1123
- changed.remove(z)
1124
-
1125
- current_map = self.feature_cache[z]
1126
-
1127
- # Get the feature vector for this foreground point
1128
- feature_vector = current_map[y_fore[i], x_fore[i]]
1129
-
1130
- # Add to our collection
1131
- foreground_features.append(feature_vector)
1169
+ z_fores = self.organize_by_z(fore_coords)
1170
+ z_backs = self.organize_by_z(back_coords)
1171
+ slices = set(list(z_fores.keys()) + list(z_backs.keys()))
1132
1172
 
1133
- for i, z in enumerate(z_back):
1134
- if z in changed: # Means this slice needs a feature map
1135
- new_map = self.get_feature_map_slice(z, speed, use_gpu)
1136
- self.feature_cache[z] = new_map
1137
-
1138
- current_map = self.feature_cache[z]
1173
+ for z in slices:
1174
+
1175
+
1176
+ current_map = self.get_feature_map_slice(z, speed, use_gpu)
1177
+
1178
+ if z in z_fores:
1139
1179
 
1140
- # Get the feature vector for this foreground point
1141
- feature_vector = current_map[y_back[i], x_back[i]]
1180
+ for y, x in z_fores[z]:
1181
+ # Get the feature vector for this foreground point
1182
+ feature_vector = current_map[y, x]
1183
+
1184
+ # Add to our collection
1185
+ foreground_features.append(feature_vector)
1186
+
1187
+ if z in z_backs:
1142
1188
 
1143
- # Add to our collection
1144
- background_features.append(feature_vector)
1189
+ for y, x in z_backs[z]:
1190
+ # Get the feature vector for this foreground point
1191
+ feature_vector = current_map[y, x]
1192
+
1193
+ # Add to our collection
1194
+ background_features.append(feature_vector)
1145
1195
 
1146
1196
 
1147
1197
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: nettracer3d
3
- Version: 0.5.6
3
+ Version: 0.5.8
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <mclaughlinliam99@gmail.com>
6
6
  Project-URL: User_Tutorial, https://www.youtube.com/watch?v=cRatn5VTWDY
@@ -44,6 +44,6 @@ NetTracer3D is free to use/fork for academic/nonprofit use so long as citation i
44
44
 
45
45
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
46
46
 
47
- -- Version 0.5.6 updates --
47
+ -- Version 0.5.8 updates --
48
48
 
49
- 1. Minor change - Updated some of the overhead and post-random forrest voxel assignment for the segmenter to be faster. The major bottleneck for this is still querying sklearn which seems a bit harder to work around.
49
+ 1. Bug fixes and improvements on the 2d segmenter.
File without changes
File without changes