nettracer3d 0.4.7__tar.gz → 0.4.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nettracer3d-0.4.7/src/nettracer3d.egg-info → nettracer3d-0.4.9}/PKG-INFO +15 -15
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/pyproject.toml +15 -15
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/nettracer_gui.py +227 -77
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/segmenter.py +31 -28
- {nettracer3d-0.4.7 → nettracer3d-0.4.9/src/nettracer3d.egg-info}/PKG-INFO +15 -15
- nettracer3d-0.4.9/src/nettracer3d.egg-info/requires.txt +24 -0
- nettracer3d-0.4.7/src/nettracer3d.egg-info/requires.txt +0 -24
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/LICENSE +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/README.md +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/setup.cfg +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/__init__.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/community_extractor.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/hub_getter.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/modularity.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/morphology.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/nettracer.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/network_analysis.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/network_draw.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/node_draw.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/proximity.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/run.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/simple_network.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d/smart_dilate.py +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d.egg-info/SOURCES.txt +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d.egg-info/dependency_links.txt +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d.egg-info/entry_points.txt +0 -0
- {nettracer3d-0.4.7 → nettracer3d-0.4.9}/src/nettracer3d.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: nettracer3d
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.9
|
|
4
4
|
Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
|
|
5
5
|
Author-email: Liam McLaughlin <boom2449@gmail.com>
|
|
6
6
|
Project-URL: User_Manual, https://drive.google.com/drive/folders/1fTkz3n4LN9_VxKRKC8lVQSlrz_wq0bVn?usp=drive_link
|
|
@@ -12,20 +12,20 @@ Requires-Python: >=3.8
|
|
|
12
12
|
Description-Content-Type: text/markdown
|
|
13
13
|
License-File: LICENSE
|
|
14
14
|
Requires-Dist: numpy==1.26.4
|
|
15
|
-
Requires-Dist: scipy
|
|
16
|
-
Requires-Dist: scikit-image
|
|
17
|
-
Requires-Dist: Pillow
|
|
18
|
-
Requires-Dist: matplotlib
|
|
19
|
-
Requires-Dist: networkx
|
|
20
|
-
Requires-Dist: opencv-python-headless
|
|
21
|
-
Requires-Dist: openpyxl
|
|
22
|
-
Requires-Dist: pandas
|
|
23
|
-
Requires-Dist: napari
|
|
24
|
-
Requires-Dist: python-louvain
|
|
25
|
-
Requires-Dist: tifffile
|
|
26
|
-
Requires-Dist: qtrangeslider
|
|
27
|
-
Requires-Dist: PyQt6
|
|
28
|
-
Requires-Dist: scikit-learn
|
|
15
|
+
Requires-Dist: scipy==1.14.1
|
|
16
|
+
Requires-Dist: scikit-image==0.25.0
|
|
17
|
+
Requires-Dist: Pillow==11.1.0
|
|
18
|
+
Requires-Dist: matplotlib==3.9.2
|
|
19
|
+
Requires-Dist: networkx==3.2.1
|
|
20
|
+
Requires-Dist: opencv-python-headless==4.10.0.84
|
|
21
|
+
Requires-Dist: openpyxl==3.1.2
|
|
22
|
+
Requires-Dist: pandas==2.2.0
|
|
23
|
+
Requires-Dist: napari==0.5.5
|
|
24
|
+
Requires-Dist: python-louvain==0.16
|
|
25
|
+
Requires-Dist: tifffile==2023.7.18
|
|
26
|
+
Requires-Dist: qtrangeslider==0.1.5
|
|
27
|
+
Requires-Dist: PyQt6==6.8.0
|
|
28
|
+
Requires-Dist: scikit-learn==1.6.1
|
|
29
29
|
Provides-Extra: cuda11
|
|
30
30
|
Requires-Dist: cupy-cuda11x; extra == "cuda11"
|
|
31
31
|
Provides-Extra: cuda12
|
|
@@ -1,26 +1,26 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "nettracer3d"
|
|
3
|
-
version = "0.4.
|
|
3
|
+
version = "0.4.9"
|
|
4
4
|
authors = [
|
|
5
5
|
{ name="Liam McLaughlin", email="boom2449@gmail.com" },
|
|
6
6
|
]
|
|
7
7
|
description = "Scripts for intializing and analyzing networks from segmentations of three dimensional images."
|
|
8
8
|
dependencies = [
|
|
9
9
|
"numpy == 1.26.4",
|
|
10
|
-
"scipy",
|
|
11
|
-
"scikit-image",
|
|
12
|
-
"Pillow",
|
|
13
|
-
"matplotlib",
|
|
14
|
-
"networkx",
|
|
15
|
-
"opencv-python-headless",
|
|
16
|
-
"openpyxl",
|
|
17
|
-
"pandas",
|
|
18
|
-
"napari",
|
|
19
|
-
"python-louvain",
|
|
20
|
-
"tifffile",
|
|
21
|
-
"qtrangeslider",
|
|
22
|
-
"PyQt6",
|
|
23
|
-
"scikit-learn"
|
|
10
|
+
"scipy == 1.14.1",
|
|
11
|
+
"scikit-image == 0.25.0",
|
|
12
|
+
"Pillow == 11.1.0",
|
|
13
|
+
"matplotlib == 3.9.2",
|
|
14
|
+
"networkx == 3.2.1",
|
|
15
|
+
"opencv-python-headless == 4.10.0.84",
|
|
16
|
+
"openpyxl == 3.1.2",
|
|
17
|
+
"pandas == 2.2.0",
|
|
18
|
+
"napari == 0.5.5",
|
|
19
|
+
"python-louvain == 0.16",
|
|
20
|
+
"tifffile == 2023.7.18",
|
|
21
|
+
"qtrangeslider == 0.1.5",
|
|
22
|
+
"PyQt6 == 6.8.0",
|
|
23
|
+
"scikit-learn == 1.6.1"
|
|
24
24
|
]
|
|
25
25
|
|
|
26
26
|
readme = "README.md"
|
|
@@ -4,7 +4,7 @@ from PyQt6.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout,
|
|
|
4
4
|
QHBoxLayout, QSlider, QMenuBar, QMenu, QDialog,
|
|
5
5
|
QFormLayout, QLineEdit, QPushButton, QFileDialog,
|
|
6
6
|
QLabel, QComboBox, QMessageBox, QTableView, QInputDialog,
|
|
7
|
-
QMenu, QTabWidget)
|
|
7
|
+
QMenu, QTabWidget, QGroupBox)
|
|
8
8
|
from PyQt6.QtCore import (QPoint, Qt, QAbstractTableModel, QTimer, QThread, pyqtSignal)
|
|
9
9
|
import numpy as np
|
|
10
10
|
import time
|
|
@@ -196,6 +196,17 @@ class ImageViewerWindow(QMainWindow):
|
|
|
196
196
|
buttons_layout.addWidget(self.high_button)
|
|
197
197
|
self.highlight = True
|
|
198
198
|
|
|
199
|
+
self.pen_button = QPushButton("🖊️")
|
|
200
|
+
self.pen_button.setCheckable(True)
|
|
201
|
+
self.pen_button.setFixedSize(40, 40)
|
|
202
|
+
self.pen_button.clicked.connect(self.toggle_brush_mode)
|
|
203
|
+
buttons_layout.addWidget(self.pen_button)
|
|
204
|
+
|
|
205
|
+
self.thresh_button = QPushButton("✏️")
|
|
206
|
+
self.thresh_button.setFixedSize(40, 40)
|
|
207
|
+
self.thresh_button.clicked.connect(self.show_thresh_dialog)
|
|
208
|
+
buttons_layout.addWidget(self.thresh_button)
|
|
209
|
+
|
|
199
210
|
control_layout.addWidget(buttons_widget)
|
|
200
211
|
|
|
201
212
|
self.preview = False #Whether in preview mode or not
|
|
@@ -1451,6 +1462,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
1451
1462
|
self.zoom_mode = self.zoom_button.isChecked()
|
|
1452
1463
|
if self.zoom_mode:
|
|
1453
1464
|
self.pan_button.setChecked(False)
|
|
1465
|
+
self.pen_button.setChecked(False)
|
|
1454
1466
|
self.pan_mode = False
|
|
1455
1467
|
self.brush_mode = False
|
|
1456
1468
|
if self.machine_window is not None:
|
|
@@ -1468,6 +1480,7 @@ class ImageViewerWindow(QMainWindow):
|
|
|
1468
1480
|
self.pan_mode = self.pan_button.isChecked()
|
|
1469
1481
|
if self.pan_mode:
|
|
1470
1482
|
self.zoom_button.setChecked(False)
|
|
1483
|
+
self.pen_button.setChecked(False)
|
|
1471
1484
|
self.zoom_mode = False
|
|
1472
1485
|
self.brush_mode = False
|
|
1473
1486
|
if self.machine_window is not None:
|
|
@@ -1479,6 +1492,17 @@ class ImageViewerWindow(QMainWindow):
|
|
|
1479
1492
|
else:
|
|
1480
1493
|
self.machine_window.toggle_brush_button()
|
|
1481
1494
|
|
|
1495
|
+
def toggle_brush_mode(self):
|
|
1496
|
+
"""Toggle brush mode on/off"""
|
|
1497
|
+
self.brush_mode = self.pen_button.isChecked()
|
|
1498
|
+
if self.brush_mode:
|
|
1499
|
+
self.pan_button.setChecked(False)
|
|
1500
|
+
self.zoom_button.setChecked(False)
|
|
1501
|
+
self.pan_mode = False
|
|
1502
|
+
self.zoom_mode = False
|
|
1503
|
+
self.update_brush_cursor()
|
|
1504
|
+
else:
|
|
1505
|
+
self.canvas.setCursor(Qt.CursorShape.ArrowCursor)
|
|
1482
1506
|
|
|
1483
1507
|
|
|
1484
1508
|
def on_mpl_scroll(self, event):
|
|
@@ -1666,7 +1690,10 @@ class ImageViewerWindow(QMainWindow):
|
|
|
1666
1690
|
x, y = int(event.xdata), int(event.ydata)
|
|
1667
1691
|
self.last_paint_pos = (x, y)
|
|
1668
1692
|
|
|
1669
|
-
|
|
1693
|
+
if self.pen_button.isChecked():
|
|
1694
|
+
channel = self.active_channel
|
|
1695
|
+
else:
|
|
1696
|
+
channel = 2
|
|
1670
1697
|
|
|
1671
1698
|
# Paint at initial position
|
|
1672
1699
|
self.paint_at_position(x, y, self.erase, channel)
|
|
@@ -1705,6 +1732,8 @@ class ImageViewerWindow(QMainWindow):
|
|
|
1705
1732
|
|
|
1706
1733
|
if erase:
|
|
1707
1734
|
val = 0
|
|
1735
|
+
elif self.machine_window is None:
|
|
1736
|
+
val = 255
|
|
1708
1737
|
elif self.foreground:
|
|
1709
1738
|
val = 1
|
|
1710
1739
|
else:
|
|
@@ -1789,13 +1818,16 @@ class ImageViewerWindow(QMainWindow):
|
|
|
1789
1818
|
|
|
1790
1819
|
x, y = int(event.xdata), int(event.ydata)
|
|
1791
1820
|
|
|
1792
|
-
|
|
1821
|
+
if self.pen_button.isChecked():
|
|
1822
|
+
channel = self.active_channel
|
|
1823
|
+
else:
|
|
1824
|
+
channel = 2
|
|
1793
1825
|
|
|
1794
1826
|
|
|
1795
|
-
if self.channel_data[
|
|
1827
|
+
if self.channel_data[channel] is not None:
|
|
1796
1828
|
current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
|
|
1797
1829
|
current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
|
|
1798
|
-
height, width = self.channel_data[
|
|
1830
|
+
height, width = self.channel_data[channel][self.current_slice].shape
|
|
1799
1831
|
|
|
1800
1832
|
if hasattr(self, 'last_paint_pos'):
|
|
1801
1833
|
last_x, last_y = self.last_paint_pos
|
|
@@ -1888,8 +1920,11 @@ class ImageViewerWindow(QMainWindow):
|
|
|
1888
1920
|
|
|
1889
1921
|
if self.brush_mode:
|
|
1890
1922
|
self.painting = False
|
|
1891
|
-
|
|
1892
|
-
|
|
1923
|
+
try:
|
|
1924
|
+
for i in self.restore_channels:
|
|
1925
|
+
self.channel_visible[i] = True
|
|
1926
|
+
except:
|
|
1927
|
+
pass
|
|
1893
1928
|
current_xlim = self.ax.get_xlim() if hasattr(self, 'ax') and self.ax.get_xlim() != (0, 1) else None
|
|
1894
1929
|
current_ylim = self.ax.get_ylim() if hasattr(self, 'ax') and self.ax.get_ylim() != (0, 1) else None
|
|
1895
1930
|
self.update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
@@ -2223,6 +2258,8 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2223
2258
|
skeletonize_action.triggered.connect(self.show_skeletonize_dialog)
|
|
2224
2259
|
watershed_action = image_menu.addAction("Watershed")
|
|
2225
2260
|
watershed_action.triggered.connect(self.show_watershed_dialog)
|
|
2261
|
+
invert_action = image_menu.addAction("Invert")
|
|
2262
|
+
invert_action.triggered.connect(self.show_invert_dialog)
|
|
2226
2263
|
z_proj_action = image_menu.addAction("Z Project")
|
|
2227
2264
|
z_proj_action.triggered.connect(self.show_z_dialog)
|
|
2228
2265
|
|
|
@@ -2254,8 +2291,6 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2254
2291
|
idoverlay_action = overlay_menu.addAction("Create ID Overlay")
|
|
2255
2292
|
idoverlay_action.triggered.connect(self.show_idoverlay_dialog)
|
|
2256
2293
|
searchoverlay_action = overlay_menu.addAction("Show Search Regions")
|
|
2257
|
-
white_action = overlay_menu.addAction("White Background Overlay")
|
|
2258
|
-
white_action.triggered.connect(self.show_white_dialog)
|
|
2259
2294
|
searchoverlay_action.triggered.connect(self.show_search_dialog)
|
|
2260
2295
|
shuffle_action = overlay_menu.addAction("Shuffle")
|
|
2261
2296
|
shuffle_action.triggered.connect(self.show_shuffle_dialog)
|
|
@@ -2389,6 +2424,11 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2389
2424
|
dialog = WatershedDialog(self)
|
|
2390
2425
|
dialog.exec()
|
|
2391
2426
|
|
|
2427
|
+
def show_invert_dialog(self):
|
|
2428
|
+
"""Show the watershed parameter dialog."""
|
|
2429
|
+
dialog = InvertDialog(self)
|
|
2430
|
+
dialog.exec()
|
|
2431
|
+
|
|
2392
2432
|
def show_z_dialog(self):
|
|
2393
2433
|
"""Show the z-proj dialog."""
|
|
2394
2434
|
dialog = ZDialog(self)
|
|
@@ -2431,6 +2471,9 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2431
2471
|
|
|
2432
2472
|
def show_thresh_dialog(self):
|
|
2433
2473
|
"""Show threshold dialog"""
|
|
2474
|
+
if self.machine_window is not None:
|
|
2475
|
+
return
|
|
2476
|
+
|
|
2434
2477
|
dialog = ThresholdDialog(self)
|
|
2435
2478
|
dialog.exec()
|
|
2436
2479
|
|
|
@@ -2532,11 +2575,6 @@ class ImageViewerWindow(QMainWindow):
|
|
|
2532
2575
|
dialog = SearchOverlayDialog(self)
|
|
2533
2576
|
dialog.exec()
|
|
2534
2577
|
|
|
2535
|
-
def show_white_dialog(self):
|
|
2536
|
-
"""Show the white dialog"""
|
|
2537
|
-
dialog = WhiteDialog(self)
|
|
2538
|
-
dialog.exec()
|
|
2539
|
-
|
|
2540
2578
|
def show_shuffle_dialog(self):
|
|
2541
2579
|
"""Show the shuffle dialog"""
|
|
2542
2580
|
dialog = ShuffleDialog(self)
|
|
@@ -4622,40 +4660,6 @@ class IdOverlayDialog(QDialog):
|
|
|
4622
4660
|
|
|
4623
4661
|
self.accept()
|
|
4624
4662
|
|
|
4625
|
-
class WhiteDialog(QDialog):
|
|
4626
|
-
|
|
4627
|
-
def __init__(self, parent=None):
|
|
4628
|
-
|
|
4629
|
-
super().__init__(parent)
|
|
4630
|
-
self.setWindowTitle("Generate White Overlay?")
|
|
4631
|
-
self.setModal(True)
|
|
4632
|
-
|
|
4633
|
-
layout = QFormLayout(self)
|
|
4634
|
-
|
|
4635
|
-
# Add Run button
|
|
4636
|
-
run_button = QPushButton("Generate (Will go to Overlay 2)")
|
|
4637
|
-
run_button.clicked.connect(self.white_overlay)
|
|
4638
|
-
layout.addWidget(run_button)
|
|
4639
|
-
|
|
4640
|
-
def white_overlay(self):
|
|
4641
|
-
|
|
4642
|
-
try:
|
|
4643
|
-
if isinstance(my_network.nodes, np.ndarray) :
|
|
4644
|
-
overlay = np.ones_like(my_network.nodes).astype(np.uint8) * 255
|
|
4645
|
-
elif isinstance(my_network.edges, np.ndarray):
|
|
4646
|
-
overlay = np.ones_like(my_network.edges).astype(np.uint8) * 255
|
|
4647
|
-
elif isinstance(my_network.network_overlay, np.ndarray):
|
|
4648
|
-
overlay = np.ones_like(my_network.network_overlay).astype(np.uint8) * 255
|
|
4649
|
-
|
|
4650
|
-
my_network.id_overlay = overlay
|
|
4651
|
-
|
|
4652
|
-
self.parent().load_channel(3, channel_data = my_network.id_overlay, data = True)
|
|
4653
|
-
|
|
4654
|
-
self.accept()
|
|
4655
|
-
|
|
4656
|
-
except Exception as e:
|
|
4657
|
-
print(f"Error making white background: {e}")
|
|
4658
|
-
|
|
4659
4663
|
|
|
4660
4664
|
class ShuffleDialog(QDialog):
|
|
4661
4665
|
|
|
@@ -5820,6 +5824,11 @@ class MachineWindow(QMainWindow):
|
|
|
5820
5824
|
|
|
5821
5825
|
layout.addLayout(form_layout)
|
|
5822
5826
|
|
|
5827
|
+
if self.parent().pen_button.isChecked(): #Disable the pen mode if the user is in it because the segmenter pen forks it
|
|
5828
|
+
self.parent().pen_button.click()
|
|
5829
|
+
|
|
5830
|
+
self.parent().pen_button.setEnabled(False)
|
|
5831
|
+
|
|
5823
5832
|
|
|
5824
5833
|
if self.parent().active_channel == 0:
|
|
5825
5834
|
if self.parent().channel_data[0] is not None:
|
|
@@ -5848,56 +5857,85 @@ class MachineWindow(QMainWindow):
|
|
|
5848
5857
|
|
|
5849
5858
|
self.parent().update_display()
|
|
5850
5859
|
|
|
5851
|
-
# Set a reasonable default size
|
|
5852
|
-
self.setMinimumWidth(
|
|
5853
|
-
self.setMinimumHeight(
|
|
5860
|
+
# Set a reasonable default size for the window
|
|
5861
|
+
self.setMinimumWidth(600) # Increased to accommodate grouped buttons
|
|
5862
|
+
self.setMinimumHeight(500)
|
|
5854
5863
|
|
|
5855
|
-
# Create
|
|
5856
|
-
|
|
5857
|
-
|
|
5864
|
+
# Create main layout container
|
|
5865
|
+
main_widget = QWidget()
|
|
5866
|
+
main_layout = QVBoxLayout(main_widget)
|
|
5858
5867
|
|
|
5859
|
-
#
|
|
5868
|
+
# Group 1: Drawing tools (Brush + Foreground/Background)
|
|
5869
|
+
drawing_group = QGroupBox("Drawing Tools")
|
|
5870
|
+
drawing_layout = QHBoxLayout()
|
|
5871
|
+
|
|
5872
|
+
# Brush button
|
|
5860
5873
|
self.brush_button = QPushButton("🖌️")
|
|
5861
5874
|
self.brush_button.setCheckable(True)
|
|
5862
5875
|
self.brush_button.setFixedSize(40, 40)
|
|
5863
5876
|
self.brush_button.clicked.connect(self.toggle_brush_mode)
|
|
5864
|
-
form_layout.addWidget(self.brush_button)
|
|
5865
5877
|
self.brush_button.click()
|
|
5866
5878
|
|
|
5879
|
+
# Foreground/Background buttons in their own horizontal layout
|
|
5880
|
+
fb_layout = QHBoxLayout()
|
|
5867
5881
|
self.fore_button = QPushButton("Foreground")
|
|
5868
5882
|
self.fore_button.setCheckable(True)
|
|
5869
5883
|
self.fore_button.setChecked(True)
|
|
5870
5884
|
self.fore_button.clicked.connect(self.toggle_foreground)
|
|
5871
|
-
form_layout.addWidget(self.fore_button)
|
|
5872
5885
|
|
|
5873
5886
|
self.back_button = QPushButton("Background")
|
|
5874
5887
|
self.back_button.setCheckable(True)
|
|
5875
5888
|
self.back_button.setChecked(False)
|
|
5876
5889
|
self.back_button.clicked.connect(self.toggle_background)
|
|
5877
|
-
form_layout.addWidget(self.back_button)
|
|
5878
5890
|
|
|
5891
|
+
fb_layout.addWidget(self.fore_button)
|
|
5892
|
+
fb_layout.addWidget(self.back_button)
|
|
5893
|
+
|
|
5894
|
+
drawing_layout.addWidget(self.brush_button)
|
|
5895
|
+
drawing_layout.addLayout(fb_layout)
|
|
5896
|
+
drawing_group.setLayout(drawing_layout)
|
|
5897
|
+
|
|
5898
|
+
# Group 2: Processing Options (GPU)
|
|
5899
|
+
processing_group = QGroupBox("Processing Options")
|
|
5900
|
+
processing_layout = QHBoxLayout()
|
|
5879
5901
|
self.GPU = QPushButton("GPU")
|
|
5880
5902
|
self.GPU.setCheckable(True)
|
|
5881
5903
|
self.GPU.setChecked(False)
|
|
5882
5904
|
self.GPU.clicked.connect(self.toggle_GPU)
|
|
5883
|
-
form_layout.addWidget(self.GPU)
|
|
5884
5905
|
self.use_gpu = False
|
|
5885
|
-
|
|
5886
|
-
|
|
5887
|
-
|
|
5888
|
-
|
|
5889
|
-
|
|
5890
|
-
|
|
5891
|
-
|
|
5892
|
-
|
|
5893
|
-
|
|
5906
|
+
processing_layout.addWidget(self.GPU)
|
|
5907
|
+
processing_group.setLayout(processing_layout)
|
|
5908
|
+
|
|
5909
|
+
# Group 3: Training Options
|
|
5910
|
+
training_group = QGroupBox("Training")
|
|
5911
|
+
training_layout = QVBoxLayout()
|
|
5912
|
+
train_quick = QPushButton("Train Quick Model")
|
|
5913
|
+
train_quick.clicked.connect(lambda: self.train_model(speed=True))
|
|
5914
|
+
train_detailed = QPushButton("Train More Detailed Model")
|
|
5915
|
+
train_detailed.clicked.connect(lambda: self.train_model(speed=False))
|
|
5916
|
+
training_layout.addWidget(train_quick)
|
|
5917
|
+
training_layout.addWidget(train_detailed)
|
|
5918
|
+
training_group.setLayout(training_layout)
|
|
5919
|
+
|
|
5920
|
+
# Group 4: Segmentation Options
|
|
5921
|
+
segmentation_group = QGroupBox("Segmentation")
|
|
5922
|
+
segmentation_layout = QVBoxLayout()
|
|
5894
5923
|
seg_button = QPushButton("Preview Segment")
|
|
5895
5924
|
seg_button.clicked.connect(self.start_segmentation)
|
|
5896
|
-
form_layout.addRow(seg_button)
|
|
5897
|
-
|
|
5898
5925
|
full_button = QPushButton("Segment All")
|
|
5899
5926
|
full_button.clicked.connect(self.segment)
|
|
5900
|
-
|
|
5927
|
+
segmentation_layout.addWidget(seg_button)
|
|
5928
|
+
segmentation_layout.addWidget(full_button)
|
|
5929
|
+
segmentation_group.setLayout(segmentation_layout)
|
|
5930
|
+
|
|
5931
|
+
# Add all groups to main layout
|
|
5932
|
+
main_layout.addWidget(drawing_group)
|
|
5933
|
+
main_layout.addWidget(processing_group)
|
|
5934
|
+
main_layout.addWidget(training_group)
|
|
5935
|
+
main_layout.addWidget(segmentation_group)
|
|
5936
|
+
|
|
5937
|
+
# Set the main widget as the central widget
|
|
5938
|
+
self.setCentralWidget(main_widget)
|
|
5901
5939
|
|
|
5902
5940
|
self.trained = False
|
|
5903
5941
|
|
|
@@ -5964,8 +6002,19 @@ class MachineWindow(QMainWindow):
|
|
|
5964
6002
|
def start_segmentation(self):
|
|
5965
6003
|
|
|
5966
6004
|
self.kill_segmentation()
|
|
6005
|
+
time.sleep(0.1)
|
|
6006
|
+
|
|
5967
6007
|
print("Beginning new segmentation...")
|
|
5968
6008
|
|
|
6009
|
+
if self.parent().active_channel == 0:
|
|
6010
|
+
if self.parent().channel_data[0] is not None:
|
|
6011
|
+
active_data = self.parent().channel_data[0]
|
|
6012
|
+
else:
|
|
6013
|
+
active_data = self.parent().channel_data[1]
|
|
6014
|
+
|
|
6015
|
+
array3 = np.zeros_like(active_data)
|
|
6016
|
+
self.parent().highlight_overlay = array3 #Clear this out for the segmenter to use
|
|
6017
|
+
|
|
5969
6018
|
if not self.trained:
|
|
5970
6019
|
return
|
|
5971
6020
|
else:
|
|
@@ -5977,7 +6026,25 @@ class MachineWindow(QMainWindow):
|
|
|
5977
6026
|
self.segmenter.update_position(self.parent().current_slice, int((current_ylim[0] - current_ylim[1])/2), int((current_xlim[1] - current_xlim[0])/2))
|
|
5978
6027
|
self.segmentation_worker.start()
|
|
5979
6028
|
|
|
6029
|
+
def confirm_seg_dialog(self):
|
|
6030
|
+
"""Shows a dialog asking user to confirm segment all"""
|
|
6031
|
+
msg = QMessageBox()
|
|
6032
|
+
msg.setIcon(QMessageBox.Icon.Question)
|
|
6033
|
+
msg.setText("Alert")
|
|
6034
|
+
msg.setInformativeText("Segment Entire Image? (Window will freeze for processing)")
|
|
6035
|
+
msg.setWindowTitle("Confirm")
|
|
6036
|
+
msg.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
|
|
6037
|
+
return msg.exec() == QMessageBox.StandardButton.Yes
|
|
5980
6038
|
|
|
6039
|
+
def confirm_close_dialog(self):
|
|
6040
|
+
"""Shows a dialog asking user to confirm segment all"""
|
|
6041
|
+
msg = QMessageBox()
|
|
6042
|
+
msg.setIcon(QMessageBox.Icon.Question)
|
|
6043
|
+
msg.setText("Alert")
|
|
6044
|
+
msg.setInformativeText("Close Window?")
|
|
6045
|
+
msg.setWindowTitle("Confirm")
|
|
6046
|
+
msg.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
|
|
6047
|
+
return msg.exec() == QMessageBox.StandardButton.Yes
|
|
5981
6048
|
|
|
5982
6049
|
def update_display(self):
|
|
5983
6050
|
if not hasattr(self, '_last_update'):
|
|
@@ -6006,6 +6073,8 @@ class MachineWindow(QMainWindow):
|
|
|
6006
6073
|
current_ylim = self.parent().ax.get_ylim()
|
|
6007
6074
|
self.parent().update_display(preserve_zoom=(current_xlim, current_ylim))
|
|
6008
6075
|
self.kill_segmentation()
|
|
6076
|
+
time.sleep(0.1)
|
|
6077
|
+
|
|
6009
6078
|
|
|
6010
6079
|
def kill_segmentation(self):
|
|
6011
6080
|
if hasattr(self, 'segmentation_worker'):
|
|
@@ -6017,7 +6086,21 @@ class MachineWindow(QMainWindow):
|
|
|
6017
6086
|
|
|
6018
6087
|
if not self.trained:
|
|
6019
6088
|
return
|
|
6089
|
+
elif not self.confirm_seg_dialog():
|
|
6090
|
+
return
|
|
6020
6091
|
else:
|
|
6092
|
+
self.kill_segmentation()
|
|
6093
|
+
time.sleep(0.1)
|
|
6094
|
+
|
|
6095
|
+
if self.parent().active_channel == 0:
|
|
6096
|
+
if self.parent().channel_data[0] is not None:
|
|
6097
|
+
active_data = self.parent().channel_data[0]
|
|
6098
|
+
else:
|
|
6099
|
+
active_data = self.parent().channel_data[1]
|
|
6100
|
+
|
|
6101
|
+
array3 = np.zeros_like(active_data)
|
|
6102
|
+
self.parent().highlight_overlay = array3 #Clear this out for the segmenter to use
|
|
6103
|
+
|
|
6021
6104
|
print("Segmenting entire volume with model...")
|
|
6022
6105
|
foreground_coords, background_coords = self.segmenter.segment_volume(gpu = self.use_gpu)
|
|
6023
6106
|
|
|
@@ -6041,12 +6124,20 @@ class MachineWindow(QMainWindow):
|
|
|
6041
6124
|
print("Finished segmentation moved to Overlay 2. Use File -> Save(As) for disk saving.")
|
|
6042
6125
|
|
|
6043
6126
|
def closeEvent(self, event):
|
|
6044
|
-
if self.
|
|
6045
|
-
self.
|
|
6046
|
-
|
|
6047
|
-
|
|
6048
|
-
|
|
6049
|
-
|
|
6127
|
+
if self.parent().isVisible():
|
|
6128
|
+
if self.confirm_close_dialog():
|
|
6129
|
+
|
|
6130
|
+
if self.brush_button.isChecked():
|
|
6131
|
+
self.silence_button()
|
|
6132
|
+
self.toggle_brush_mode()
|
|
6133
|
+
self.parent().pen_button.setEnabled(True)
|
|
6134
|
+
self.parent().brush_mode = False
|
|
6135
|
+
self.parent().machine_window = None
|
|
6136
|
+
self.kill_segmentation()
|
|
6137
|
+
time.sleep(0.1)
|
|
6138
|
+
else:
|
|
6139
|
+
event.ignore()
|
|
6140
|
+
|
|
6050
6141
|
|
|
6051
6142
|
|
|
6052
6143
|
|
|
@@ -6919,6 +7010,65 @@ class WatershedDialog(QDialog):
|
|
|
6919
7010
|
f"Error running watershed: {str(e)}"
|
|
6920
7011
|
)
|
|
6921
7012
|
|
|
7013
|
+
class InvertDialog(QDialog):
|
|
7014
|
+
|
|
7015
|
+
def __init__(self, parent=None):
|
|
7016
|
+
super().__init__(parent)
|
|
7017
|
+
self.setWindowTitle("Invert Active Channel?")
|
|
7018
|
+
self.setModal(True)
|
|
7019
|
+
|
|
7020
|
+
layout = QFormLayout(self)
|
|
7021
|
+
|
|
7022
|
+
# Add Run button
|
|
7023
|
+
run_button = QPushButton("Run Invert")
|
|
7024
|
+
run_button.clicked.connect(self.run_invert)
|
|
7025
|
+
layout.addRow(run_button)
|
|
7026
|
+
|
|
7027
|
+
def run_invert(self):
|
|
7028
|
+
|
|
7029
|
+
try:
|
|
7030
|
+
|
|
7031
|
+
# Get the active channel data from parent
|
|
7032
|
+
active_data = self.parent().channel_data[self.parent().active_channel]
|
|
7033
|
+
if active_data is None:
|
|
7034
|
+
raise ValueError("No active image selected")
|
|
7035
|
+
|
|
7036
|
+
try:
|
|
7037
|
+
# Call binarize method with parameters
|
|
7038
|
+
if active_data.dtype == 'uint8' or 'int8':
|
|
7039
|
+
num = 255
|
|
7040
|
+
elif active_data.dtype == 'uint16' or 'int16':
|
|
7041
|
+
num = 65,535
|
|
7042
|
+
elif active_data.dtype == 'uint32' or 'int32':
|
|
7043
|
+
num = 2,147,483,647
|
|
7044
|
+
|
|
7045
|
+
result = (num - active_data
|
|
7046
|
+
)
|
|
7047
|
+
|
|
7048
|
+
# Update both the display data and the network object
|
|
7049
|
+
self.parent().channel_data[self.parent().active_channel] = result
|
|
7050
|
+
|
|
7051
|
+
|
|
7052
|
+
# Update the corresponding property in my_network
|
|
7053
|
+
setattr(my_network, network_properties[self.parent().active_channel], result)
|
|
7054
|
+
|
|
7055
|
+
self.parent().update_display()
|
|
7056
|
+
self.accept()
|
|
7057
|
+
|
|
7058
|
+
except Exception as e:
|
|
7059
|
+
QMessageBox.critical(
|
|
7060
|
+
self,
|
|
7061
|
+
"Error",
|
|
7062
|
+
f"Error running invert: {str(e)}"
|
|
7063
|
+
)
|
|
7064
|
+
|
|
7065
|
+
except Exception as e:
|
|
7066
|
+
QMessageBox.critical(
|
|
7067
|
+
self,
|
|
7068
|
+
"Error",
|
|
7069
|
+
f"Error running invert: {str(e)}"
|
|
7070
|
+
)
|
|
7071
|
+
|
|
6922
7072
|
class ZDialog(QDialog):
|
|
6923
7073
|
|
|
6924
7074
|
def __init__(self, parent=None):
|
|
@@ -11,6 +11,7 @@ import concurrent.futures
|
|
|
11
11
|
from concurrent.futures import ThreadPoolExecutor
|
|
12
12
|
import threading
|
|
13
13
|
from scipy import ndimage
|
|
14
|
+
import multiprocessing
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
class InteractiveSegmenter:
|
|
@@ -18,11 +19,7 @@ class InteractiveSegmenter:
|
|
|
18
19
|
self.image_3d = image_3d
|
|
19
20
|
self.patterns = []
|
|
20
21
|
|
|
21
|
-
|
|
22
|
-
self.use_gpu = use_gpu and cp.cuda.is_available()
|
|
23
|
-
except:
|
|
24
|
-
self.use_gpu = False
|
|
25
|
-
|
|
22
|
+
self.use_gpu = use_gpu and cp.cuda.is_available()
|
|
26
23
|
if self.use_gpu:
|
|
27
24
|
print(f"Using GPU: {torch.cuda.get_device_name()}")
|
|
28
25
|
self.image_gpu = cp.asarray(image_3d)
|
|
@@ -350,23 +347,6 @@ class InteractiveSegmenter:
|
|
|
350
347
|
|
|
351
348
|
return result
|
|
352
349
|
|
|
353
|
-
|
|
354
|
-
def train(self):
|
|
355
|
-
"""Train random forest on accumulated patterns"""
|
|
356
|
-
if len(self.patterns) < 2:
|
|
357
|
-
return
|
|
358
|
-
|
|
359
|
-
X = []
|
|
360
|
-
y = []
|
|
361
|
-
for pattern in self.patterns:
|
|
362
|
-
X.extend(pattern['features'])
|
|
363
|
-
y.extend([pattern['is_foreground']] * len(pattern['features']))
|
|
364
|
-
|
|
365
|
-
X = np.array(X)
|
|
366
|
-
y = np.array(y)
|
|
367
|
-
self.model.fit(X, y)
|
|
368
|
-
self.patterns = []
|
|
369
|
-
|
|
370
350
|
def process_chunk_GPU(self, chunk_coords):
|
|
371
351
|
"""Process a chunk of coordinates using GPU acceleration"""
|
|
372
352
|
coords = np.array(chunk_coords)
|
|
@@ -409,20 +389,40 @@ class InteractiveSegmenter:
|
|
|
409
389
|
|
|
410
390
|
return foreground, background
|
|
411
391
|
|
|
412
|
-
def segment_volume(self, chunk_size=
|
|
392
|
+
def segment_volume(self, chunk_size=64, gpu=False):
|
|
413
393
|
"""Segment volume using parallel processing of chunks with vectorized chunk creation"""
|
|
394
|
+
#Change the above chunk size to None to have it auto-compute largest chunks (not sure which is faster, 64 seems reasonable in test cases)
|
|
414
395
|
|
|
415
396
|
try:
|
|
416
397
|
from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
|
|
417
398
|
except:
|
|
418
|
-
print("Cannot find
|
|
399
|
+
print("Cannot find cuML, using CPU to segment instead...")
|
|
419
400
|
gpu = False
|
|
420
|
-
|
|
421
|
-
|
|
401
|
+
|
|
422
402
|
if self.feature_cache is None:
|
|
423
403
|
with self.lock:
|
|
424
404
|
if self.feature_cache is None:
|
|
425
405
|
self.feature_cache = self.compute_feature_maps()
|
|
406
|
+
|
|
407
|
+
print("Chunking data...")
|
|
408
|
+
|
|
409
|
+
# Determine optimal chunk size based on number of cores if not specified
|
|
410
|
+
if chunk_size is None:
|
|
411
|
+
total_cores = multiprocessing.cpu_count()
|
|
412
|
+
|
|
413
|
+
# Calculate total volume and target volume per core
|
|
414
|
+
total_volume = np.prod(self.image_3d.shape)
|
|
415
|
+
target_volume_per_chunk = total_volume / total_cores
|
|
416
|
+
|
|
417
|
+
# Calculate chunk size that would give us roughly one chunk per core
|
|
418
|
+
# Using cube root since we want roughly equal sizes in all dimensions
|
|
419
|
+
chunk_size = int(np.cbrt(target_volume_per_chunk))
|
|
420
|
+
|
|
421
|
+
# Ensure chunk size is at least 32 (minimum reasonable size) and not larger than smallest dimension
|
|
422
|
+
chunk_size = max(32, min(chunk_size, min(self.image_3d.shape)))
|
|
423
|
+
|
|
424
|
+
# Round to nearest multiple of 32 for better memory alignment
|
|
425
|
+
chunk_size = ((chunk_size + 15) // 32) * 32
|
|
426
426
|
|
|
427
427
|
# Calculate number of chunks in each dimension
|
|
428
428
|
z_chunks = (self.image_3d.shape[0] + chunk_size - 1) // chunk_size
|
|
@@ -455,6 +455,9 @@ class InteractiveSegmenter:
|
|
|
455
455
|
|
|
456
456
|
foreground_coords = set()
|
|
457
457
|
background_coords = set()
|
|
458
|
+
|
|
459
|
+
print("Segmenting chunks...")
|
|
460
|
+
|
|
458
461
|
|
|
459
462
|
with ThreadPoolExecutor() as executor:
|
|
460
463
|
if gpu:
|
|
@@ -482,7 +485,7 @@ class InteractiveSegmenter:
|
|
|
482
485
|
self.current_y = y
|
|
483
486
|
|
|
484
487
|
|
|
485
|
-
def get_realtime_chunks(self, chunk_size =
|
|
488
|
+
def get_realtime_chunks(self, chunk_size = 64):
|
|
486
489
|
print("Computing some overhead...")
|
|
487
490
|
|
|
488
491
|
|
|
@@ -553,7 +556,7 @@ class InteractiveSegmenter:
|
|
|
553
556
|
try:
|
|
554
557
|
from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
|
|
555
558
|
except:
|
|
556
|
-
print("Cannot find
|
|
559
|
+
print("Cannot find cuML, using CPU to segment instead...")
|
|
557
560
|
gpu = False
|
|
558
561
|
|
|
559
562
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: nettracer3d
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.9
|
|
4
4
|
Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
|
|
5
5
|
Author-email: Liam McLaughlin <boom2449@gmail.com>
|
|
6
6
|
Project-URL: User_Manual, https://drive.google.com/drive/folders/1fTkz3n4LN9_VxKRKC8lVQSlrz_wq0bVn?usp=drive_link
|
|
@@ -12,20 +12,20 @@ Requires-Python: >=3.8
|
|
|
12
12
|
Description-Content-Type: text/markdown
|
|
13
13
|
License-File: LICENSE
|
|
14
14
|
Requires-Dist: numpy==1.26.4
|
|
15
|
-
Requires-Dist: scipy
|
|
16
|
-
Requires-Dist: scikit-image
|
|
17
|
-
Requires-Dist: Pillow
|
|
18
|
-
Requires-Dist: matplotlib
|
|
19
|
-
Requires-Dist: networkx
|
|
20
|
-
Requires-Dist: opencv-python-headless
|
|
21
|
-
Requires-Dist: openpyxl
|
|
22
|
-
Requires-Dist: pandas
|
|
23
|
-
Requires-Dist: napari
|
|
24
|
-
Requires-Dist: python-louvain
|
|
25
|
-
Requires-Dist: tifffile
|
|
26
|
-
Requires-Dist: qtrangeslider
|
|
27
|
-
Requires-Dist: PyQt6
|
|
28
|
-
Requires-Dist: scikit-learn
|
|
15
|
+
Requires-Dist: scipy==1.14.1
|
|
16
|
+
Requires-Dist: scikit-image==0.25.0
|
|
17
|
+
Requires-Dist: Pillow==11.1.0
|
|
18
|
+
Requires-Dist: matplotlib==3.9.2
|
|
19
|
+
Requires-Dist: networkx==3.2.1
|
|
20
|
+
Requires-Dist: opencv-python-headless==4.10.0.84
|
|
21
|
+
Requires-Dist: openpyxl==3.1.2
|
|
22
|
+
Requires-Dist: pandas==2.2.0
|
|
23
|
+
Requires-Dist: napari==0.5.5
|
|
24
|
+
Requires-Dist: python-louvain==0.16
|
|
25
|
+
Requires-Dist: tifffile==2023.7.18
|
|
26
|
+
Requires-Dist: qtrangeslider==0.1.5
|
|
27
|
+
Requires-Dist: PyQt6==6.8.0
|
|
28
|
+
Requires-Dist: scikit-learn==1.6.1
|
|
29
29
|
Provides-Extra: cuda11
|
|
30
30
|
Requires-Dist: cupy-cuda11x; extra == "cuda11"
|
|
31
31
|
Provides-Extra: cuda12
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
numpy==1.26.4
|
|
2
|
+
scipy==1.14.1
|
|
3
|
+
scikit-image==0.25.0
|
|
4
|
+
Pillow==11.1.0
|
|
5
|
+
matplotlib==3.9.2
|
|
6
|
+
networkx==3.2.1
|
|
7
|
+
opencv-python-headless==4.10.0.84
|
|
8
|
+
openpyxl==3.1.2
|
|
9
|
+
pandas==2.2.0
|
|
10
|
+
napari==0.5.5
|
|
11
|
+
python-louvain==0.16
|
|
12
|
+
tifffile==2023.7.18
|
|
13
|
+
qtrangeslider==0.1.5
|
|
14
|
+
PyQt6==6.8.0
|
|
15
|
+
scikit-learn==1.6.1
|
|
16
|
+
|
|
17
|
+
[CUDA11]
|
|
18
|
+
cupy-cuda11x
|
|
19
|
+
|
|
20
|
+
[CUDA12]
|
|
21
|
+
cupy-cuda12x
|
|
22
|
+
|
|
23
|
+
[cupy]
|
|
24
|
+
cupy
|
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
numpy==1.26.4
|
|
2
|
-
scipy
|
|
3
|
-
scikit-image
|
|
4
|
-
Pillow
|
|
5
|
-
matplotlib
|
|
6
|
-
networkx
|
|
7
|
-
opencv-python-headless
|
|
8
|
-
openpyxl
|
|
9
|
-
pandas
|
|
10
|
-
napari
|
|
11
|
-
python-louvain
|
|
12
|
-
tifffile
|
|
13
|
-
qtrangeslider
|
|
14
|
-
PyQt6
|
|
15
|
-
scikit-learn
|
|
16
|
-
|
|
17
|
-
[CUDA11]
|
|
18
|
-
cupy-cuda11x
|
|
19
|
-
|
|
20
|
-
[CUDA12]
|
|
21
|
-
cupy-cuda12x
|
|
22
|
-
|
|
23
|
-
[cupy]
|
|
24
|
-
cupy
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|