nettracer3d 0.4.2__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nettracer3d/proximity.py CHANGED
@@ -89,9 +89,11 @@ def process_label(args):
89
89
  nodes, label, dilate_xy, dilate_z, array_shape = args
90
90
  print(f"Processing node {label}")
91
91
  indices = np.argwhere(nodes == label)
92
+ if len(indices) == 0:
93
+ return None, None
92
94
  z_vals, y_vals, x_vals = get_reslice_indices((indices, dilate_xy, dilate_z, array_shape))
93
95
  if z_vals is None: #If get_reslice_indices ran into a ValueError, nothing is returned.
94
- return None, None, None
96
+ return None, None
95
97
  sub_nodes = reslice_3d_array((nodes, z_vals, y_vals, x_vals))
96
98
  return label, sub_nodes
97
99
 
@@ -0,0 +1,290 @@
1
+ from sklearn.ensemble import RandomForestClassifier
2
+ import numpy as np
3
+ import cupy as cp
4
+ import torch
5
+ from concurrent.futures import ThreadPoolExecutor
6
+ import threading
7
+ import cupyx.scipy.ndimage as cpx
8
+
9
+
10
+ class InteractiveSegmenter:
11
+ def __init__(self, image_3d, use_gpu=True):
12
+ self.image_3d = image_3d
13
+ self.patterns = []
14
+
15
+ self.use_gpu = use_gpu and cp.cuda.is_available()
16
+ if self.use_gpu:
17
+ print(f"Using GPU: {torch.cuda.get_device_name()}")
18
+ self.image_gpu = cp.asarray(image_3d)
19
+
20
+ self.model = RandomForestClassifier(
21
+ n_estimators=100,
22
+ n_jobs=-1,
23
+ max_depth=None
24
+ )
25
+ self.feature_cache = None
26
+ self.lock = threading.Lock()
27
+
28
+ def compute_feature_maps(self):
29
+ """Compute all feature maps using GPU acceleration"""
30
+ if not self.use_gpu:
31
+ return super().compute_feature_maps()
32
+
33
+ features = []
34
+ image = self.image_gpu
35
+ original_shape = self.image_3d.shape
36
+
37
+ # Gaussian smoothing at different scales
38
+ print("Obtaining gaussians")
39
+ for sigma in [0.5, 1.0, 2.0, 4.0]:
40
+ smooth = cp.asnumpy(self.gaussian_filter_gpu(image, sigma))
41
+ features.append(smooth)
42
+
43
+ print("Obtaining dif of gaussians")
44
+
45
+ # Difference of Gaussians
46
+ for (s1, s2) in [(1, 2), (2, 4)]:
47
+ g1 = self.gaussian_filter_gpu(image, s1)
48
+ g2 = self.gaussian_filter_gpu(image, s2)
49
+ dog = cp.asnumpy(g1 - g2)
50
+ features.append(dog)
51
+
52
+ # Convert image to PyTorch tensor for gradient operations
53
+ image_torch = torch.from_numpy(self.image_3d).cuda()
54
+ image_torch = image_torch.float().unsqueeze(0).unsqueeze(0)
55
+
56
+ # Calculate required padding
57
+ kernel_size = 3
58
+ padding = kernel_size // 2
59
+
60
+ # Create a single padded version with same padding
61
+ pad = torch.nn.functional.pad(image_torch, (padding, padding, padding, padding, padding, padding), mode='replicate')
62
+
63
+ print("Computing sobel kernels")
64
+
65
+ # Create sobel kernels
66
+ sobel_x = torch.tensor([-1, 0, 1], device='cuda').float().view(1,1,1,1,3)
67
+ sobel_y = torch.tensor([-1, 0, 1], device='cuda').float().view(1,1,1,3,1)
68
+ sobel_z = torch.tensor([-1, 0, 1], device='cuda').float().view(1,1,3,1,1)
69
+
70
+ # Compute gradients
71
+ print("Computing gradiants")
72
+
73
+ gx = torch.nn.functional.conv3d(pad, sobel_x, padding=0)[:,:,:original_shape[0],:original_shape[1],:original_shape[2]]
74
+ gy = torch.nn.functional.conv3d(pad, sobel_y, padding=0)[:,:,:original_shape[0],:original_shape[1],:original_shape[2]]
75
+ gz = torch.nn.functional.conv3d(pad, sobel_z, padding=0)[:,:,:original_shape[0],:original_shape[1],:original_shape[2]]
76
+
77
+ # Compute gradient magnitude
78
+ print("Computing gradiant mags")
79
+
80
+ gradient_magnitude = torch.sqrt(gx**2 + gy**2 + gz**2)
81
+ gradient_feature = gradient_magnitude.cpu().numpy().squeeze()
82
+
83
+ features.append(gradient_feature)
84
+
85
+ # Verify shapes
86
+ for i, feat in enumerate(features):
87
+ if feat.shape != original_shape:
88
+ raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
89
+
90
+ return np.stack(features, axis=-1)
91
+
92
+ def gaussian_filter_gpu(self, image, sigma):
93
+ """GPU-accelerated Gaussian filter"""
94
+ # Create Gaussian kernel
95
+ result = cpx.gaussian_filter(image, sigma=sigma)
96
+
97
+ return result
98
+
99
+
100
+ def train(self):
101
+ """Train random forest on accumulated patterns"""
102
+ if len(self.patterns) < 2:
103
+ return
104
+
105
+ X = []
106
+ y = []
107
+ for pattern in self.patterns:
108
+ X.extend(pattern['features'])
109
+ y.extend([pattern['is_foreground']] * len(pattern['features']))
110
+
111
+ X = np.array(X)
112
+ y = np.array(y)
113
+ self.model.fit(X, y)
114
+ self.patterns = []
115
+
116
+ def process_chunk(self, chunk_coords):
117
+ """Process a chunk of coordinates"""
118
+ features = [self.feature_cache[z, y, x] for z, y, x in chunk_coords]
119
+ predictions = self.model.predict(features)
120
+
121
+ foreground = set()
122
+ background = set()
123
+ for coord, pred in zip(chunk_coords, predictions):
124
+ if pred:
125
+ foreground.add(coord)
126
+ else:
127
+ background.add(coord)
128
+
129
+ return foreground, background
130
+
131
+ def segment_volume(self, chunk_size=32):
132
+ """Segment volume using parallel processing of chunks"""
133
+ if self.feature_cache is None:
134
+ with self.lock:
135
+ if self.feature_cache is None:
136
+ self.feature_cache = self.compute_feature_maps()
137
+
138
+ # Create chunks of coordinates
139
+ chunks = []
140
+ for z in range(0, self.image_3d.shape[0], chunk_size):
141
+ for y in range(0, self.image_3d.shape[1], chunk_size):
142
+ for x in range(0, self.image_3d.shape[2], chunk_size):
143
+ chunk_coords = [
144
+ (zz, yy, xx)
145
+ for zz in range(z, min(z + chunk_size, self.image_3d.shape[0]))
146
+ for yy in range(y, min(y + chunk_size, self.image_3d.shape[1]))
147
+ for xx in range(x, min(x + chunk_size, self.image_3d.shape[2]))
148
+ ]
149
+ chunks.append(chunk_coords)
150
+
151
+ foreground_coords = set()
152
+ background_coords = set()
153
+
154
+ # Process chunks in parallel
155
+ with ThreadPoolExecutor() as executor:
156
+ futures = [executor.submit(self.process_chunk, chunk) for chunk in chunks]
157
+
158
+ for i, future in enumerate(futures):
159
+ fore, back = future.result()
160
+ foreground_coords.update(fore)
161
+ background_coords.update(back)
162
+ if i % 10 == 0:
163
+ print(f"Processed {i}/{len(chunks)} chunks")
164
+
165
+ return foreground_coords, background_coords
166
+
167
+ def cleanup(self):
168
+ """Clean up GPU memory"""
169
+ if self.use_gpu:
170
+ cp.get_default_memory_pool().free_all_blocks()
171
+ torch.cuda.empty_cache()
172
+
173
+ def train_batch(self, foreground_array, background_array):
174
+ """Train directly on foreground and background arrays"""
175
+ if self.feature_cache is None:
176
+ with self.lock:
177
+ if self.feature_cache is None:
178
+ self.feature_cache = self.compute_feature_maps()
179
+
180
+ # Get foreground coordinates and features
181
+ z_fore, y_fore, x_fore = np.where(foreground_array > 0)
182
+ foreground_features = self.feature_cache[z_fore, y_fore, x_fore]
183
+
184
+ # Get background coordinates and features
185
+ z_back, y_back, x_back = np.where(background_array > 0)
186
+ background_features = self.feature_cache[z_back, y_back, x_back]
187
+
188
+ # Combine features and labels
189
+ X = np.vstack([foreground_features, background_features])
190
+ y = np.hstack([np.ones(len(z_fore)), np.zeros(len(z_back))])
191
+
192
+ # Train the model
193
+ self.model.fit(X, y)
194
+
195
+ print("Done")
196
+
197
+
198
+
199
+
200
+
201
+
202
+
203
+
204
+
205
+ def segment_volume_subprocess(self, chunk_size=32, current_z=None, current_x=None, current_y=None):
206
+ """
207
+ Segment volume prioritizing chunks near user location.
208
+ Returns chunks as they're processed.
209
+ """
210
+ if self.feature_cache is None:
211
+ with self.lock:
212
+ if self.feature_cache is None:
213
+ self.feature_cache = self.compute_feature_maps()
214
+
215
+ # Create chunks with position information
216
+ chunks_info = []
217
+ for z in range(0, self.image_3d.shape[0], chunk_size):
218
+ for y in range(0, self.image_3d.shape[1], chunk_size):
219
+ for x in range(0, self.image_3d.shape[2], chunk_size):
220
+ chunk_coords = [
221
+ (zz, yy, xx)
222
+ for zz in range(z, min(z + chunk_size, self.image_3d.shape[0]))
223
+ for yy in range(y, min(y + chunk_size, self.image_3d.shape[1]))
224
+ for xx in range(x, min(x + chunk_size, self.image_3d.shape[2]))
225
+ ]
226
+
227
+ # Store chunk with its corner position
228
+ chunks_info.append({
229
+ 'coords': chunk_coords,
230
+ 'corner': (z, y, x),
231
+ 'processed': False
232
+ })
233
+
234
+ def get_chunk_priority(chunk):
235
+ """Calculate priority based on distance from user position"""
236
+ z, y, x = chunk['corner']
237
+ priority = 0
238
+
239
+ # Priority based on Z distance (always used)
240
+ if current_z is not None:
241
+ priority += abs(z - current_z)
242
+
243
+ # Add X/Y distance if provided
244
+ if current_x is not None and current_y is not None:
245
+ xy_distance = ((x - current_x) ** 2 + (y - current_y) ** 2) ** 0.5
246
+ priority += xy_distance
247
+
248
+ return priority
249
+
250
+ with ThreadPoolExecutor() as executor:
251
+ futures = {} # Track active futures
252
+
253
+ while True:
254
+ # Sort unprocessed chunks by priority
255
+ unprocessed_chunks = [c for c in chunks_info if not c['processed']]
256
+ if not unprocessed_chunks:
257
+ break
258
+
259
+ # Sort by distance from current position
260
+ unprocessed_chunks.sort(key=get_chunk_priority)
261
+
262
+ # Submit new chunks to replace completed ones
263
+ while len(futures) < executor._max_workers and unprocessed_chunks:
264
+ chunk = unprocessed_chunks.pop(0)
265
+ future = executor.submit(self.process_chunk, chunk['coords'])
266
+ futures[future] = chunk
267
+ chunk['processed'] = True
268
+
269
+ # Check completed futures
270
+ done, _ = concurrent.futures.wait(
271
+ futures.keys(),
272
+ timeout=0.1,
273
+ return_when=concurrent.futures.FIRST_COMPLETED
274
+ )
275
+
276
+ # Process completed chunks
277
+ for future in done:
278
+ chunk = futures[future]
279
+ fore, back = future.result()
280
+
281
+ # Yield chunk results with position information
282
+ yield {
283
+ 'foreground': fore,
284
+ 'background': back,
285
+ 'corner': chunk['corner'],
286
+ 'size': chunk_size
287
+ }
288
+
289
+ del futures[future]
290
+
@@ -383,42 +383,83 @@ def smart_label(binary_array, label_array, directory = None, GPU = True, predown
383
383
  return dilated_nodes_with_labels
384
384
 
385
385
  def compute_distance_transform_GPU(nodes):
386
+ is_pseudo_3d = nodes.shape[0] == 1
387
+ if is_pseudo_3d:
388
+ nodes = np.squeeze(nodes) # Convert to 2D for processing
389
+
386
390
  # Convert numpy array to CuPy array
387
391
  nodes_cp = cp.asarray(nodes)
388
392
 
389
393
  # Compute the distance transform on the GPU
390
- distance, nearest_label_indices = cpx.distance_transform_edt(nodes_cp, return_indices=True)
394
+ _, nearest_label_indices = cpx.distance_transform_edt(nodes_cp, return_indices=True)
391
395
 
392
396
  # Convert results back to numpy arrays
393
397
  nearest_label_indices_np = cp.asnumpy(nearest_label_indices)
394
398
 
399
+ if is_pseudo_3d:
400
+ # For 2D input, we get (2, H, W) but need (3, 1, H, W)
401
+ H, W = nearest_label_indices_np[0].shape
402
+ indices_4d = np.zeros((3, 1, H, W), dtype=nearest_label_indices_np.dtype)
403
+ indices_4d[1:, 0] = nearest_label_indices_np # Copy Y and X coordinates
404
+ # indices_4d[0] stays 0 for all Z coordinates
405
+ nearest_label_indices_np = indices_4d
406
+
407
+
408
+
409
+
395
410
  return nearest_label_indices_np
396
411
 
397
412
 
398
413
  def compute_distance_transform(nodes):
414
+ is_pseudo_3d = nodes.shape[0] == 1
415
+ if is_pseudo_3d:
416
+ nodes = np.squeeze(nodes) # Convert to 2D for processing
417
+
399
418
  distance, nearest_label_indices = distance_transform_edt(nodes, return_indices=True)
419
+
420
+ if is_pseudo_3d:
421
+ # For 2D input, we get (2, H, W) but need (3, 1, H, W)
422
+ H, W = nearest_label_indices_np[0].shape
423
+ indices_4d = np.zeros((3, 1, H, W), dtype=nearest_label_indices_np.dtype)
424
+ indices_4d[1:, 0] = nearest_label_indices_np # Copy Y and X coordinates
425
+ # indices_4d[0] stays 0 for all Z coordinates
426
+ nearest_label_indices_np = indices_4d
427
+
400
428
  return nearest_label_indices
401
429
 
402
430
 
403
431
 
404
432
  def compute_distance_transform_distance_GPU(nodes):
405
433
 
434
+ is_pseudo_3d = nodes.shape[0] == 1
435
+ if is_pseudo_3d:
436
+ nodes = np.squeeze(nodes) # Convert to 2D for processing
437
+
406
438
  # Convert numpy array to CuPy array
407
439
  nodes_cp = cp.asarray(nodes)
408
440
 
409
441
  # Compute the distance transform on the GPU
410
- distance, nearest_label_indices = cpx.distance_transform_edt(nodes_cp, return_indices=True)
442
+ distance, _ = cpx.distance_transform_edt(nodes_cp, return_indices=True)
411
443
 
412
444
  # Convert results back to numpy arrays
413
445
  distance = cp.asnumpy(distance)
446
+
447
+ if is_pseudo_3d:
448
+ np.expand_dims(distance, axis = 0)
414
449
 
415
450
  return distance
416
451
 
417
452
 
418
453
  def compute_distance_transform_distance(nodes):
419
454
 
455
+ is_pseudo_3d = nodes.shape[0] == 1
456
+ if is_pseudo_3d:
457
+ nodes = np.squeeze(nodes) # Convert to 2D for processing
458
+
420
459
  # Fallback to CPU if there's an issue with GPU computation
421
- distance, nearest_label_indices = distance_transform_edt(nodes, return_indices=True)
460
+ distance, _ = distance_transform_edt(nodes, return_indices=True)
461
+ if is_pseudo_3d:
462
+ np.expand_dims(distance, axis = 0)
422
463
  return distance
423
464
 
424
465
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: nettracer3d
3
- Version: 0.4.2
3
+ Version: 0.4.4
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <boom2449@gmail.com>
6
6
  Project-URL: User_Manual, https://drive.google.com/drive/folders/1fTkz3n4LN9_VxKRKC8lVQSlrz_wq0bVn?usp=drive_link
@@ -23,6 +23,7 @@ Requires-Dist: pandas
23
23
  Requires-Dist: napari
24
24
  Requires-Dist: python-louvain
25
25
  Requires-Dist: tifffile
26
+ Requires-Dist: qtrangeslider
26
27
  Requires-Dist: PyQt6
27
28
  Provides-Extra: cuda11
28
29
  Requires-Dist: cupy-cuda11x; extra == "cuda11"
@@ -31,15 +32,8 @@ Requires-Dist: cupy-cuda12x; extra == "cuda12"
31
32
  Provides-Extra: cupy
32
33
  Requires-Dist: cupy; extra == "cupy"
33
34
 
34
- NetTracer3D is a python package developed for both 2D and 3D analysis of microscopic images in the .tif file format. It supports generation of 3D networks showing the relationships between objects (or nodes) in three dimensional space, either based on their own proximity or connectivity via connecting objects such as nerves or blood vessels. In addition to these functionalities are several advanced 3D data processing algorithms, such as labeling of branched structures or abstraction of branched structures into networks. Note that nettracer3d uses segmented data, which can be segmented from other softwares such as ImageJ and imported into NetTracer3D, although it does offer its own segmentation via intensity or volumetric thresholding. NetTracer3D currently has a fully functional GUI. To use the GUI, after installing the nettracer3d package via pip, run a python script in your env with the following commands:
35
+ NetTracer3D is a python package developed for both 2D and 3D analysis of microscopic images in the .tif file format. It supports generation of 3D networks showing the relationships between objects (or nodes) in three dimensional space, either based on their own proximity or connectivity via connecting objects such as nerves or blood vessels. In addition to these functionalities are several advanced 3D data processing algorithms, such as labeling of branched structures or abstraction of branched structures into networks. Note that nettracer3d uses segmented data, which can be segmented from other softwares such as ImageJ and imported into NetTracer3D, although it does offer its own segmentation via intensity and volumetric thresholding, or random forest machine learning segmentation. NetTracer3D currently has a fully functional GUI. To use the GUI, after installing the nettracer3d package via pip, enter the command 'nettracer3d' in your command prompt:
35
36
 
36
- #Start
37
-
38
- from nettracer3d import nettracer_gui
39
-
40
- nettracer_gui.run_gui()
41
-
42
- #End
43
37
 
44
38
  This gui is built from the PyQt6 package and therefore may not function on dockers or virtual envs that are unable to support PyQt6 displays. More advanced documentation (especially for the GUI) is coming down the line, but for now please see: https://drive.google.com/drive/folders/1fTkz3n4LN9_VxKRKC8lVQSlrz_wq0bVn?usp=drive_link
45
39
  for a user manual that provides older documentation.
@@ -0,0 +1,21 @@
1
+ nettracer3d/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nettracer3d/community_extractor.py,sha256=8bRDJOfZhOFLtpkJVaDQrQ4O8wUywyr-EfVvW5fxyPs,31700
3
+ nettracer3d/hub_getter.py,sha256=KiNtxdajLkwB1ftslvrh1FE1Ch9ZCFEmHSEEotwR-To,8298
4
+ nettracer3d/modularity.py,sha256=V1f3s_vGd8EuVz27mzq6ycIGr0BWIpH7c7NU4QjgAHU,30247
5
+ nettracer3d/morphology.py,sha256=CsRWB0DY-vBBlKdF9IQwgfYYZswuE7n1Iu_Osxgmxnw,13042
6
+ nettracer3d/nettracer.py,sha256=x-4xjEJNWzTy5gKvpGAmRzlWD3Wx-lomH_ItqRNv9v8,207727
7
+ nettracer3d/nettracer_gui.py,sha256=1b5sFTCe3Jinc200vRTknGsY_Xm2rj-BG2CDyo-gHGU,324790
8
+ nettracer3d/network_analysis.py,sha256=MJBBjslA1k_R8ymid77U-qGSgzxFVfzGVQhE0IdhnbE,48046
9
+ nettracer3d/network_draw.py,sha256=F7fw6Pcf4qWOhdKwLmhwqWdschbDlHzwCVolQC9imeU,14117
10
+ nettracer3d/node_draw.py,sha256=BMiD_FrlOHeGD4AQZ_Emd152PfxFuMgGf2x4S0TOTnw,9752
11
+ nettracer3d/proximity.py,sha256=B1pmFegx5Wb0JKI5rvpILv2VRU09f6M2iljAQAqBja0,11059
12
+ nettracer3d/run.py,sha256=xYeaAc8FCx8MuzTGyL3NR3mK7WZzffAYAH23bNRZYO4,127
13
+ nettracer3d/segmenter.py,sha256=Inp3m6ajgR03zh_JyJt3Qntb-ZXmxuxVDBbmQnhb3pM,11619
14
+ nettracer3d/simple_network.py,sha256=fP1gkDdtQcHruEZpUdasKdZeVacoLOxKhR3bY0L1CAQ,15426
15
+ nettracer3d/smart_dilate.py,sha256=JALeAZ7LLU2psTuI5wlCZobtDx8I0CfihdoEblwfhOY,23520
16
+ nettracer3d-0.4.4.dist-info/LICENSE,sha256=gM207DhJjWrxLuEWXl0Qz5ISbtWDmADfjHp3yC2XISs,888
17
+ nettracer3d-0.4.4.dist-info/METADATA,sha256=4os3ofWdAzCFAfKt0LvFlUE4atoVKO26s0OKUq1NO7s,2884
18
+ nettracer3d-0.4.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
19
+ nettracer3d-0.4.4.dist-info/entry_points.txt,sha256=Nx1rr_0QhJXDBHAQg2vcqCzLMKBzSHfwy3xwGkueVyc,53
20
+ nettracer3d-0.4.4.dist-info/top_level.txt,sha256=zsYy9rZwirfCEOubolhee4TyzqBAL5gSUeFMzhFTX8c,12
21
+ nettracer3d-0.4.4.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- nettracer3d/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- nettracer3d/community_extractor.py,sha256=8bRDJOfZhOFLtpkJVaDQrQ4O8wUywyr-EfVvW5fxyPs,31700
3
- nettracer3d/hub_getter.py,sha256=KiNtxdajLkwB1ftslvrh1FE1Ch9ZCFEmHSEEotwR-To,8298
4
- nettracer3d/modularity.py,sha256=V1f3s_vGd8EuVz27mzq6ycIGr0BWIpH7c7NU4QjgAHU,30247
5
- nettracer3d/morphology.py,sha256=wv7v06YUcn5lMyefcc_znQlXF5iDxvUdoc0fXOKlGTw,12982
6
- nettracer3d/nettracer.py,sha256=sFwHz-Ghoft3nNS3H691EcMTpoeLKT3I_X-6m6vXpyg,203264
7
- nettracer3d/nettracer_gui.py,sha256=GSTICp2V0MAwMpw6RcUrR0kdF721bpBOGMHnOK2LHLM,290039
8
- nettracer3d/network_analysis.py,sha256=MJBBjslA1k_R8ymid77U-qGSgzxFVfzGVQhE0IdhnbE,48046
9
- nettracer3d/network_draw.py,sha256=F7fw6Pcf4qWOhdKwLmhwqWdschbDlHzwCVolQC9imeU,14117
10
- nettracer3d/node_draw.py,sha256=BMiD_FrlOHeGD4AQZ_Emd152PfxFuMgGf2x4S0TOTnw,9752
11
- nettracer3d/proximity.py,sha256=KYs4QUbt1U79RLzTvt8BmrxeGVaeKOQ2brtzTjjA78c,11011
12
- nettracer3d/run.py,sha256=xYeaAc8FCx8MuzTGyL3NR3mK7WZzffAYAH23bNRZYO4,127
13
- nettracer3d/simple_network.py,sha256=fP1gkDdtQcHruEZpUdasKdZeVacoLOxKhR3bY0L1CAQ,15426
14
- nettracer3d/smart_dilate.py,sha256=howfO6Lw5PxNjkaOBSCjkmf7fyau_-_8iTct2mAuTAQ,22083
15
- nettracer3d-0.4.2.dist-info/LICENSE,sha256=gM207DhJjWrxLuEWXl0Qz5ISbtWDmADfjHp3yC2XISs,888
16
- nettracer3d-0.4.2.dist-info/METADATA,sha256=ffklpH2f0M7A5xOTDE9blsCn-yZnCXhe0GaBpgUQcEI,2894
17
- nettracer3d-0.4.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
18
- nettracer3d-0.4.2.dist-info/entry_points.txt,sha256=Nx1rr_0QhJXDBHAQg2vcqCzLMKBzSHfwy3xwGkueVyc,53
19
- nettracer3d-0.4.2.dist-info/top_level.txt,sha256=zsYy9rZwirfCEOubolhee4TyzqBAL5gSUeFMzhFTX8c,12
20
- nettracer3d-0.4.2.dist-info/RECORD,,