nettracer3d 1.1.0__py3-none-any.whl → 1.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nettracer3d might be problematic. Click here for more details.
- nettracer3d/branch_stitcher.py +420 -0
- nettracer3d/filaments.py +1060 -0
- nettracer3d/morphology.py +9 -4
- nettracer3d/neighborhoods.py +99 -67
- nettracer3d/nettracer.py +390 -46
- nettracer3d/nettracer_gui.py +1795 -485
- nettracer3d/network_draw.py +9 -3
- nettracer3d/node_draw.py +41 -58
- nettracer3d/proximity.py +123 -2
- nettracer3d/smart_dilate.py +36 -0
- nettracer3d/tutorial.py +2874 -0
- {nettracer3d-1.1.0.dist-info → nettracer3d-1.2.3.dist-info}/METADATA +5 -3
- nettracer3d-1.2.3.dist-info/RECORD +29 -0
- nettracer3d-1.1.0.dist-info/RECORD +0 -26
- {nettracer3d-1.1.0.dist-info → nettracer3d-1.2.3.dist-info}/WHEEL +0 -0
- {nettracer3d-1.1.0.dist-info → nettracer3d-1.2.3.dist-info}/entry_points.txt +0 -0
- {nettracer3d-1.1.0.dist-info → nettracer3d-1.2.3.dist-info}/licenses/LICENSE +0 -0
- {nettracer3d-1.1.0.dist-info → nettracer3d-1.2.3.dist-info}/top_level.txt +0 -0
nettracer3d/tutorial.py
ADDED
|
@@ -0,0 +1,2874 @@
|
|
|
1
|
+
from PyQt6.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout,
|
|
2
|
+
QHBoxLayout, QPushButton, QLabel, QMenuBar, QMenu,
|
|
3
|
+
QTextEdit, QToolBar)
|
|
4
|
+
from PyQt6.QtCore import Qt, QRect, QRectF, QPoint, QPointF, QTimer, pyqtSignal
|
|
5
|
+
from PyQt6.QtGui import QPainter, QColor, QPen, QBrush, QFont, QPainterPath, QAction
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class TutorialOverlay(QWidget):
|
|
10
|
+
"""Overlay widget that covers the entire window and highlights specific elements"""
|
|
11
|
+
next_clicked = pyqtSignal()
|
|
12
|
+
back_clicked = pyqtSignal()
|
|
13
|
+
skip_clicked = pyqtSignal()
|
|
14
|
+
|
|
15
|
+
def __init__(self, parent=None):
|
|
16
|
+
super().__init__(parent)
|
|
17
|
+
self.setAttribute(Qt.WidgetAttribute.WA_TransparentForMouseEvents, False)
|
|
18
|
+
self.setAttribute(Qt.WidgetAttribute.WA_TranslucentBackground)
|
|
19
|
+
self.setWindowFlags(Qt.WindowType.FramelessWindowHint)
|
|
20
|
+
|
|
21
|
+
self.highlight_rect = None
|
|
22
|
+
self.highlight_type = "circle"
|
|
23
|
+
self.message = ""
|
|
24
|
+
self.message_position = "bottom"
|
|
25
|
+
self.show_back_button = False
|
|
26
|
+
|
|
27
|
+
# Scroll support
|
|
28
|
+
self.scroll_offset = 0
|
|
29
|
+
self.max_scroll = 0
|
|
30
|
+
self.needs_scroll = False
|
|
31
|
+
self.scrollbar_rect = None
|
|
32
|
+
|
|
33
|
+
def set_highlight(
|
|
34
|
+
self,
|
|
35
|
+
widget_or_rect,
|
|
36
|
+
highlight_type="circle",
|
|
37
|
+
message="",
|
|
38
|
+
message_position="bottom",
|
|
39
|
+
show_back_button=False
|
|
40
|
+
):
|
|
41
|
+
"""Set which widget/rect to highlight and what message to display.
|
|
42
|
+
|
|
43
|
+
If highlight_type is None / "" / "none", no highlight will be drawn,
|
|
44
|
+
even if widget_or_rect is provided.
|
|
45
|
+
"""
|
|
46
|
+
# Normalize highlight type
|
|
47
|
+
if not highlight_type or highlight_type == "none":
|
|
48
|
+
self.highlight_type = "none"
|
|
49
|
+
self.highlight_rect = None
|
|
50
|
+
else:
|
|
51
|
+
self.highlight_type = highlight_type
|
|
52
|
+
|
|
53
|
+
if widget_or_rect is None:
|
|
54
|
+
self.highlight_rect = None
|
|
55
|
+
elif isinstance(widget_or_rect, QRect):
|
|
56
|
+
# It's already a rect in global coordinates, convert to local
|
|
57
|
+
local_pos = self.mapFromGlobal(widget_or_rect.topLeft())
|
|
58
|
+
self.highlight_rect = QRect(local_pos, widget_or_rect.size())
|
|
59
|
+
else:
|
|
60
|
+
# It's a widget, get its global position and convert to overlay coordinates
|
|
61
|
+
global_pos = widget_or_rect.mapToGlobal(QPoint(0, 0))
|
|
62
|
+
local_pos = self.mapFromGlobal(global_pos)
|
|
63
|
+
self.highlight_rect = QRect(local_pos, widget_or_rect.size())
|
|
64
|
+
|
|
65
|
+
if self.message != message:
|
|
66
|
+
self.scroll_offset = 0
|
|
67
|
+
|
|
68
|
+
self.message = message
|
|
69
|
+
self.message_position = message_position
|
|
70
|
+
self.show_back_button = show_back_button
|
|
71
|
+
self.update()
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def paintEvent(self, event):
|
|
75
|
+
painter = QPainter(self)
|
|
76
|
+
painter.setRenderHint(QPainter.RenderHint.Antialiasing)
|
|
77
|
+
|
|
78
|
+
# Create a path for the entire overlay
|
|
79
|
+
full_path = QPainterPath()
|
|
80
|
+
full_path.addRect(QRectF(self.rect()))
|
|
81
|
+
|
|
82
|
+
# If there's a highlight, subtract it from the overlay
|
|
83
|
+
if self.highlight_rect:
|
|
84
|
+
highlight_path = QPainterPath()
|
|
85
|
+
|
|
86
|
+
if self.highlight_type == "circle":
|
|
87
|
+
# Create circular highlight
|
|
88
|
+
center = self.highlight_rect.center()
|
|
89
|
+
radius = max(self.highlight_rect.width(), self.highlight_rect.height()) // 2 + 20
|
|
90
|
+
highlight_path.addEllipse(QPointF(center), radius, radius)
|
|
91
|
+
else: # rectangle
|
|
92
|
+
padding = 10
|
|
93
|
+
highlight_rect = self.highlight_rect.adjusted(-padding, -padding, padding, padding)
|
|
94
|
+
highlight_path.addRoundedRect(QRectF(highlight_rect), 10, 10)
|
|
95
|
+
|
|
96
|
+
# Subtract the highlight area from the full overlay
|
|
97
|
+
full_path = full_path.subtracted(highlight_path)
|
|
98
|
+
|
|
99
|
+
# Draw semi-transparent overlay (excluding the highlight area)
|
|
100
|
+
painter.fillPath(full_path, QColor(0, 0, 0, 180))
|
|
101
|
+
|
|
102
|
+
# Draw highlight border
|
|
103
|
+
if self.highlight_rect:
|
|
104
|
+
painter.setPen(QPen(QColor(0, 191, 255), 3))
|
|
105
|
+
painter.setBrush(Qt.BrushStyle.NoBrush)
|
|
106
|
+
|
|
107
|
+
if self.highlight_type == "circle":
|
|
108
|
+
center = self.highlight_rect.center()
|
|
109
|
+
radius = max(self.highlight_rect.width(), self.highlight_rect.height()) // 2 + 20
|
|
110
|
+
painter.drawEllipse(QPointF(center), radius, radius)
|
|
111
|
+
else:
|
|
112
|
+
padding = 10
|
|
113
|
+
highlight_rect = self.highlight_rect.adjusted(-padding, -padding, padding, padding)
|
|
114
|
+
painter.drawRoundedRect(QRectF(highlight_rect), 10, 10)
|
|
115
|
+
|
|
116
|
+
# Draw arrow pointing to the highlighted area
|
|
117
|
+
if self.highlight_type == "circle":
|
|
118
|
+
self._draw_arrow_to_circle(painter)
|
|
119
|
+
|
|
120
|
+
# Draw message box
|
|
121
|
+
if self.message:
|
|
122
|
+
self._draw_message_box(painter)
|
|
123
|
+
|
|
124
|
+
def _draw_arrow_to_circle(self, painter):
|
|
125
|
+
"""Draw an arrow pointing to the highlighted circle"""
|
|
126
|
+
center = self.highlight_rect.center()
|
|
127
|
+
radius = max(self.highlight_rect.width(), self.highlight_rect.height()) // 2 + 20
|
|
128
|
+
|
|
129
|
+
# Determine arrow position based on message position
|
|
130
|
+
if self.message_position == "bottom":
|
|
131
|
+
arrow_start = QPoint(center.x(), center.y() + radius + 100)
|
|
132
|
+
arrow_end = QPoint(center.x(), center.y() + radius)
|
|
133
|
+
elif self.message_position == "top":
|
|
134
|
+
arrow_start = QPoint(center.x(), center.y() - radius - 100)
|
|
135
|
+
arrow_end = QPoint(center.x(), center.y() - radius)
|
|
136
|
+
elif self.message_position == "left":
|
|
137
|
+
arrow_start = QPoint(center.x() - radius - 100, center.y())
|
|
138
|
+
arrow_end = QPoint(center.x() - radius, center.y())
|
|
139
|
+
else: # right
|
|
140
|
+
arrow_start = QPoint(center.x() + radius + 100, center.y())
|
|
141
|
+
arrow_end = QPoint(center.x() + radius, center.y())
|
|
142
|
+
|
|
143
|
+
# Draw arrow line
|
|
144
|
+
painter.setPen(QPen(QColor(255, 215, 0), 4))
|
|
145
|
+
painter.drawLine(arrow_start, arrow_end)
|
|
146
|
+
|
|
147
|
+
# Draw arrowhead
|
|
148
|
+
self._draw_arrowhead(painter, arrow_start, arrow_end)
|
|
149
|
+
|
|
150
|
+
def _draw_arrowhead(self, painter, start, end):
|
|
151
|
+
"""Draw an arrowhead at the end of a line"""
|
|
152
|
+
# Calculate angle
|
|
153
|
+
import math
|
|
154
|
+
angle = math.atan2(end.y() - start.y(), end.x() - start.x())
|
|
155
|
+
|
|
156
|
+
arrow_size = 15
|
|
157
|
+
angle1 = angle + math.pi * 0.8
|
|
158
|
+
angle2 = angle - math.pi * 0.8
|
|
159
|
+
|
|
160
|
+
p1 = QPointF(end.x() + arrow_size * math.cos(angle1),
|
|
161
|
+
end.y() + arrow_size * math.sin(angle1))
|
|
162
|
+
p2 = QPointF(end.x() + arrow_size * math.cos(angle2),
|
|
163
|
+
end.y() + arrow_size * math.sin(angle2))
|
|
164
|
+
|
|
165
|
+
path = QPainterPath()
|
|
166
|
+
path.moveTo(QPointF(end))
|
|
167
|
+
path.lineTo(p1)
|
|
168
|
+
path.lineTo(p2)
|
|
169
|
+
path.closeSubpath()
|
|
170
|
+
|
|
171
|
+
painter.setBrush(QBrush(QColor(255, 215, 0)))
|
|
172
|
+
painter.setPen(Qt.PenStyle.NoPen)
|
|
173
|
+
painter.drawPath(path)
|
|
174
|
+
|
|
175
|
+
def _draw_message_box(self, painter):
|
|
176
|
+
"""Draw the tutorial message box"""
|
|
177
|
+
# Calculate message box position
|
|
178
|
+
padding = 20
|
|
179
|
+
box_width = 350
|
|
180
|
+
|
|
181
|
+
# Calculate required height based on text
|
|
182
|
+
font = QFont("Arial", 11)
|
|
183
|
+
painter.setFont(font)
|
|
184
|
+
fm = painter.fontMetrics()
|
|
185
|
+
|
|
186
|
+
# Calculate text area
|
|
187
|
+
text_width = box_width - 30
|
|
188
|
+
button_area_height = 50
|
|
189
|
+
text_padding = 30
|
|
190
|
+
|
|
191
|
+
# Calculate required height for text
|
|
192
|
+
text_rect_temp = QRect(0, 0, text_width, 10000)
|
|
193
|
+
bounding_rect = fm.boundingRect(text_rect_temp,
|
|
194
|
+
Qt.TextFlag.TextWordWrap | Qt.AlignmentFlag.AlignTop,
|
|
195
|
+
self.message)
|
|
196
|
+
|
|
197
|
+
required_text_height = bounding_rect.height()
|
|
198
|
+
ideal_box_height = required_text_height + text_padding + button_area_height
|
|
199
|
+
|
|
200
|
+
# Calculate maximum available height
|
|
201
|
+
max_available_height = self.height() - 2 * padding
|
|
202
|
+
|
|
203
|
+
# Determine if we need scrolling
|
|
204
|
+
self.needs_scroll = ideal_box_height > max_available_height
|
|
205
|
+
|
|
206
|
+
if self.needs_scroll:
|
|
207
|
+
box_height = max_available_height
|
|
208
|
+
scrollbar_width = 20 # Made wider for arrow buttons
|
|
209
|
+
actual_text_width = text_width - scrollbar_width - 10
|
|
210
|
+
self.max_scroll = required_text_height - (box_height - text_padding - button_area_height)
|
|
211
|
+
self.max_scroll = max(0, self.max_scroll + 100) # Add 30px padding
|
|
212
|
+
else:
|
|
213
|
+
box_height = max(150, ideal_box_height)
|
|
214
|
+
scrollbar_width = 0
|
|
215
|
+
actual_text_width = text_width
|
|
216
|
+
self.scroll_offset = 0
|
|
217
|
+
self.max_scroll = 0
|
|
218
|
+
|
|
219
|
+
# Position calculation (same as before)
|
|
220
|
+
if self.message_position == "beside":
|
|
221
|
+
box_x = padding
|
|
222
|
+
box_y = self.height() - box_height - padding
|
|
223
|
+
elif self.message_position == "top_right":
|
|
224
|
+
box_x = self.width() - box_width - padding
|
|
225
|
+
box_y = padding
|
|
226
|
+
elif self.message_position == 'top_left':
|
|
227
|
+
box_x = padding
|
|
228
|
+
box_y = padding
|
|
229
|
+
elif self.highlight_rect:
|
|
230
|
+
center = self.highlight_rect.center()
|
|
231
|
+
radius = max(self.highlight_rect.width(), self.highlight_rect.height()) // 2 + 20
|
|
232
|
+
|
|
233
|
+
if self.message_position == "bottom":
|
|
234
|
+
box_x = center.x() - box_width // 2
|
|
235
|
+
box_y = center.y() + radius + 120
|
|
236
|
+
elif self.message_position == "top":
|
|
237
|
+
box_x = center.x() - box_width // 2
|
|
238
|
+
box_y = center.y() - radius - 120 - box_height
|
|
239
|
+
elif self.message_position == "left":
|
|
240
|
+
box_x = center.x() - radius - 120 - box_width
|
|
241
|
+
box_y = center.y() - box_height // 2
|
|
242
|
+
else: # right
|
|
243
|
+
box_x = center.x() + radius + 120
|
|
244
|
+
box_y = center.y() - box_height // 2
|
|
245
|
+
else:
|
|
246
|
+
if self.message_position == "bottom":
|
|
247
|
+
box_x = (self.width() - box_width) // 2
|
|
248
|
+
box_y = self.height() - box_height - padding - 100
|
|
249
|
+
elif self.message_position == "top":
|
|
250
|
+
box_x = (self.width() - box_width) // 2
|
|
251
|
+
box_y = padding + 100
|
|
252
|
+
elif self.message_position == "left":
|
|
253
|
+
box_x = padding + 50
|
|
254
|
+
box_y = (self.height() - box_height) // 2
|
|
255
|
+
elif self.message_position == "right":
|
|
256
|
+
box_x = self.width() - box_width - padding - 50
|
|
257
|
+
box_y = (self.height() - box_height) // 2
|
|
258
|
+
else:
|
|
259
|
+
box_x = (self.width() - box_width) // 2
|
|
260
|
+
box_y = (self.height() - box_height) // 2
|
|
261
|
+
|
|
262
|
+
# Ensure box stays within bounds
|
|
263
|
+
box_x = max(padding, min(box_x, self.width() - box_width - padding))
|
|
264
|
+
box_y = max(padding, min(box_y, self.height() - box_height - padding))
|
|
265
|
+
|
|
266
|
+
message_rect = QRect(box_x, box_y, box_width, box_height)
|
|
267
|
+
self.message_rect_for_scroll = message_rect
|
|
268
|
+
|
|
269
|
+
# Draw message box background
|
|
270
|
+
painter.setBrush(QBrush(QColor(255, 255, 255)))
|
|
271
|
+
painter.setPen(QPen(QColor(0, 191, 255), 2))
|
|
272
|
+
painter.drawRoundedRect(message_rect, 10, 10)
|
|
273
|
+
|
|
274
|
+
# Set up clipping for text area
|
|
275
|
+
text_rect = message_rect.adjusted(15, 15, -15 - scrollbar_width, -button_area_height)
|
|
276
|
+
|
|
277
|
+
# Save painter state and set clipping
|
|
278
|
+
painter.save()
|
|
279
|
+
painter.setClipRect(text_rect)
|
|
280
|
+
|
|
281
|
+
# Draw message text with scroll offset
|
|
282
|
+
painter.setPen(QColor(0, 0, 0))
|
|
283
|
+
scrolled_text_rect = text_rect.adjusted(0, -self.scroll_offset, 0, required_text_height)
|
|
284
|
+
painter.drawText(scrolled_text_rect, Qt.TextFlag.TextWordWrap | Qt.AlignmentFlag.AlignTop, self.message)
|
|
285
|
+
|
|
286
|
+
# Restore painter state
|
|
287
|
+
painter.restore()
|
|
288
|
+
|
|
289
|
+
# Draw scroll arrows if needed
|
|
290
|
+
if self.needs_scroll:
|
|
291
|
+
arrow_x = message_rect.right() - scrollbar_width - 8
|
|
292
|
+
arrow_width = scrollbar_width
|
|
293
|
+
arrow_height = 25
|
|
294
|
+
|
|
295
|
+
# Up arrow
|
|
296
|
+
up_arrow_y = text_rect.top()
|
|
297
|
+
self.scroll_up_rect = QRect(arrow_x, up_arrow_y, arrow_width, arrow_height)
|
|
298
|
+
|
|
299
|
+
# Draw up arrow button
|
|
300
|
+
up_color = QColor(150, 150, 150) if self.scroll_offset == 0 else QColor(0, 191, 255)
|
|
301
|
+
painter.setBrush(QBrush(up_color))
|
|
302
|
+
painter.setPen(Qt.PenStyle.NoPen)
|
|
303
|
+
painter.drawRoundedRect(self.scroll_up_rect, 3, 3)
|
|
304
|
+
|
|
305
|
+
# Draw up arrow triangle
|
|
306
|
+
painter.setBrush(QBrush(QColor(255, 255, 255)))
|
|
307
|
+
arrow_path = QPainterPath()
|
|
308
|
+
center_x = self.scroll_up_rect.center().x()
|
|
309
|
+
arrow_path.moveTo(center_x, up_arrow_y + 8)
|
|
310
|
+
arrow_path.lineTo(center_x - 5, up_arrow_y + 17)
|
|
311
|
+
arrow_path.lineTo(center_x + 5, up_arrow_y + 17)
|
|
312
|
+
arrow_path.closeSubpath()
|
|
313
|
+
painter.drawPath(arrow_path)
|
|
314
|
+
|
|
315
|
+
# Down arrow
|
|
316
|
+
down_arrow_y = text_rect.bottom() - arrow_height
|
|
317
|
+
self.scroll_down_rect = QRect(arrow_x, down_arrow_y, arrow_width, arrow_height)
|
|
318
|
+
|
|
319
|
+
# Draw down arrow button
|
|
320
|
+
down_color = QColor(150, 150, 150) if self.scroll_offset >= self.max_scroll else QColor(0, 191, 255)
|
|
321
|
+
painter.setBrush(QBrush(down_color))
|
|
322
|
+
painter.setPen(Qt.PenStyle.NoPen)
|
|
323
|
+
painter.drawRoundedRect(self.scroll_down_rect, 3, 3)
|
|
324
|
+
|
|
325
|
+
# Draw down arrow triangle
|
|
326
|
+
painter.setBrush(QBrush(QColor(255, 255, 255)))
|
|
327
|
+
arrow_path = QPainterPath()
|
|
328
|
+
center_x = self.scroll_down_rect.center().x()
|
|
329
|
+
arrow_path.moveTo(center_x, down_arrow_y + 17)
|
|
330
|
+
arrow_path.lineTo(center_x - 5, down_arrow_y + 8)
|
|
331
|
+
arrow_path.lineTo(center_x + 5, down_arrow_y + 8)
|
|
332
|
+
arrow_path.closeSubpath()
|
|
333
|
+
painter.drawPath(arrow_path)
|
|
334
|
+
|
|
335
|
+
# Draw buttons (same as before)
|
|
336
|
+
button_width = 80
|
|
337
|
+
button_height = 30
|
|
338
|
+
button_spacing = 10
|
|
339
|
+
|
|
340
|
+
if self.show_back_button:
|
|
341
|
+
next_rect = QRect(message_rect.right() - button_width - 15,
|
|
342
|
+
message_rect.bottom() - button_height - 10,
|
|
343
|
+
button_width, button_height)
|
|
344
|
+
|
|
345
|
+
skip_rect = QRect(next_rect.left() - button_width - button_spacing,
|
|
346
|
+
next_rect.top(),
|
|
347
|
+
button_width, button_height)
|
|
348
|
+
|
|
349
|
+
back_rect = QRect(skip_rect.left() - button_width - button_spacing,
|
|
350
|
+
skip_rect.top(),
|
|
351
|
+
button_width, button_height)
|
|
352
|
+
|
|
353
|
+
self.back_button_rect = back_rect
|
|
354
|
+
|
|
355
|
+
painter.setBrush(QBrush(QColor(150, 150, 150)))
|
|
356
|
+
painter.setPen(Qt.PenStyle.NoPen)
|
|
357
|
+
painter.drawRoundedRect(back_rect, 5, 5)
|
|
358
|
+
painter.setPen(QColor(255, 255, 255))
|
|
359
|
+
painter.drawText(back_rect, Qt.AlignmentFlag.AlignCenter, "Back")
|
|
360
|
+
else:
|
|
361
|
+
next_rect = QRect(message_rect.right() - button_width - 15,
|
|
362
|
+
message_rect.bottom() - button_height - 10,
|
|
363
|
+
button_width, button_height)
|
|
364
|
+
|
|
365
|
+
skip_rect = QRect(next_rect.left() - button_width - button_spacing,
|
|
366
|
+
next_rect.top(),
|
|
367
|
+
button_width, button_height)
|
|
368
|
+
|
|
369
|
+
self.back_button_rect = None
|
|
370
|
+
|
|
371
|
+
self.next_button_rect = next_rect
|
|
372
|
+
self.skip_button_rect = skip_rect
|
|
373
|
+
|
|
374
|
+
painter.setBrush(QBrush(QColor(0, 191, 255)))
|
|
375
|
+
painter.setPen(Qt.PenStyle.NoPen)
|
|
376
|
+
painter.drawRoundedRect(next_rect, 5, 5)
|
|
377
|
+
painter.setPen(QColor(255, 255, 255))
|
|
378
|
+
painter.drawText(next_rect, Qt.AlignmentFlag.AlignCenter, "Next")
|
|
379
|
+
|
|
380
|
+
painter.setBrush(QBrush(QColor(200, 200, 200)))
|
|
381
|
+
painter.setPen(Qt.PenStyle.NoPen)
|
|
382
|
+
painter.drawRoundedRect(skip_rect, 5, 5)
|
|
383
|
+
painter.setPen(QColor(0, 0, 0))
|
|
384
|
+
painter.drawText(skip_rect, Qt.AlignmentFlag.AlignCenter, "Skip")
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def keyPressEvent(self, event):
|
|
388
|
+
|
|
389
|
+
"""Key press shortcuts for main class"""
|
|
390
|
+
print('hello')
|
|
391
|
+
if event.key() == Qt.Key_Space:
|
|
392
|
+
self.next_clicked.emit()
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
def mousePressEvent(self, event):
|
|
396
|
+
"""Handle clicks - treat any click as Next unless it's Skip, Back, or scroll arrows"""
|
|
397
|
+
event.accept()
|
|
398
|
+
|
|
399
|
+
# Check scroll up arrow
|
|
400
|
+
if self.needs_scroll and hasattr(self, 'scroll_up_rect') and self.scroll_up_rect.contains(event.pos()):
|
|
401
|
+
self.scroll_offset = max(0, self.scroll_offset - 30)
|
|
402
|
+
self.update()
|
|
403
|
+
return
|
|
404
|
+
|
|
405
|
+
# Check scroll down arrow
|
|
406
|
+
if self.needs_scroll and hasattr(self, 'scroll_down_rect') and self.scroll_down_rect.contains(event.pos()):
|
|
407
|
+
self.scroll_offset = min(self.max_scroll, self.scroll_offset + 30)
|
|
408
|
+
self.update()
|
|
409
|
+
return
|
|
410
|
+
|
|
411
|
+
# Check if clicking Skip button
|
|
412
|
+
if hasattr(self, 'skip_button_rect') and self.skip_button_rect.contains(event.pos()):
|
|
413
|
+
self.skip_clicked.emit()
|
|
414
|
+
# Check if clicking Back button
|
|
415
|
+
elif hasattr(self, 'back_button_rect') and self.back_button_rect and self.back_button_rect.contains(event.pos()):
|
|
416
|
+
self.back_clicked.emit()
|
|
417
|
+
elif hasattr(self, 'next_button_rect') and self.next_button_rect and self.next_button_rect.contains(event.pos()):
|
|
418
|
+
self.next_clicked.emit()
|
|
419
|
+
elif event.button() == Qt.MouseButton.RightButton:
|
|
420
|
+
self.next_clicked.emit()
|
|
421
|
+
# Any other click (including Next button or anywhere else) advances
|
|
422
|
+
#else:
|
|
423
|
+
# self.next_clicked.emit()
|
|
424
|
+
|
|
425
|
+
def wheelEvent(self, event):
|
|
426
|
+
if self.needs_scroll and hasattr(self, 'message_rect_for_scroll'):
|
|
427
|
+
# Check if mouse is over the message box
|
|
428
|
+
if self.message_rect_for_scroll.contains(event.position().toPoint()):
|
|
429
|
+
delta = event.angleDelta().y()
|
|
430
|
+
scroll_amount = delta // 120 * 20 # Scroll by 20 pixels per notch
|
|
431
|
+
|
|
432
|
+
self.scroll_offset -= scroll_amount
|
|
433
|
+
self.scroll_offset = max(0, min(self.scroll_offset, self.max_scroll))
|
|
434
|
+
|
|
435
|
+
self.update()
|
|
436
|
+
event.accept()
|
|
437
|
+
else:
|
|
438
|
+
event.ignore()
|
|
439
|
+
else:
|
|
440
|
+
event.ignore()
|
|
441
|
+
|
|
442
|
+
class TutorialManager:
|
|
443
|
+
"""Manages the tutorial steps and progression"""
|
|
444
|
+
|
|
445
|
+
def __init__(self, main_window):
|
|
446
|
+
self.main_window = main_window
|
|
447
|
+
self.overlay = None
|
|
448
|
+
self.current_step = 0
|
|
449
|
+
self.steps = []
|
|
450
|
+
|
|
451
|
+
def add_step(self, widget, message, highlight_type="circle", message_position="bottom",
|
|
452
|
+
action=None, pre_action=None):
|
|
453
|
+
"""
|
|
454
|
+
Add a tutorial step
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
widget: The widget to highlight
|
|
458
|
+
message: The message to display
|
|
459
|
+
highlight_type: "circle" or "rect"
|
|
460
|
+
message_position: "top", "bottom", "left", or "right"
|
|
461
|
+
action: Callable to execute when moving to next step
|
|
462
|
+
pre_action: Callable to execute before showing this step (e.g., to open a menu)
|
|
463
|
+
"""
|
|
464
|
+
self.steps.append({
|
|
465
|
+
'widget': widget,
|
|
466
|
+
'message': message,
|
|
467
|
+
'highlight_type': highlight_type,
|
|
468
|
+
'message_position': message_position,
|
|
469
|
+
'action': action,
|
|
470
|
+
'pre_action': pre_action
|
|
471
|
+
})
|
|
472
|
+
|
|
473
|
+
def start(self):
|
|
474
|
+
"""Start the tutorial"""
|
|
475
|
+
if not self.overlay:
|
|
476
|
+
self.overlay = TutorialOverlay(self.main_window)
|
|
477
|
+
self.overlay.next_clicked.connect(self.next_step)
|
|
478
|
+
self.overlay.back_clicked.connect(self.previous_step)
|
|
479
|
+
self.overlay.skip_clicked.connect(self.end_tutorial)
|
|
480
|
+
|
|
481
|
+
self.current_step = 0
|
|
482
|
+
self.overlay.setGeometry(self.main_window.rect())
|
|
483
|
+
self.overlay.show()
|
|
484
|
+
self.overlay.raise_()
|
|
485
|
+
self.show_current_step()
|
|
486
|
+
|
|
487
|
+
def show_current_step(self):
|
|
488
|
+
"""Display the current tutorial step"""
|
|
489
|
+
if self.current_step >= len(self.steps):
|
|
490
|
+
self.end_tutorial()
|
|
491
|
+
return
|
|
492
|
+
|
|
493
|
+
step = self.steps[self.current_step]
|
|
494
|
+
|
|
495
|
+
# Execute pre-action if any (e.g., open menu)
|
|
496
|
+
if step['pre_action']:
|
|
497
|
+
step['pre_action']()
|
|
498
|
+
# Small delay to let the action complete
|
|
499
|
+
QTimer.singleShot(50, lambda: self._show_step_highlight(step))
|
|
500
|
+
else:
|
|
501
|
+
self._show_step_highlight(step)
|
|
502
|
+
|
|
503
|
+
def _show_step_highlight(self, step):
|
|
504
|
+
"""Show the highlight for a step"""
|
|
505
|
+
widget_or_rect = step['widget']() if callable(step['widget']) else step['widget']
|
|
506
|
+
|
|
507
|
+
# Determine if back button should be shown (not on first step)
|
|
508
|
+
show_back = self.current_step > 0
|
|
509
|
+
|
|
510
|
+
if widget_or_rect is not None:
|
|
511
|
+
self.overlay.set_highlight(
|
|
512
|
+
widget_or_rect,
|
|
513
|
+
step['highlight_type'],
|
|
514
|
+
step['message'],
|
|
515
|
+
step['message_position'],
|
|
516
|
+
show_back_button=show_back
|
|
517
|
+
)
|
|
518
|
+
else:
|
|
519
|
+
# If widget not found, show message only
|
|
520
|
+
self.overlay.set_highlight(None, "rect", step['message'], step['message_position'], show_back_button=show_back)
|
|
521
|
+
|
|
522
|
+
def next_step(self):
|
|
523
|
+
"""Move to the next tutorial step"""
|
|
524
|
+
step = self.steps[self.current_step]
|
|
525
|
+
|
|
526
|
+
# Execute step action if any
|
|
527
|
+
if step['action']:
|
|
528
|
+
step['action']()
|
|
529
|
+
|
|
530
|
+
self.current_step += 1
|
|
531
|
+
|
|
532
|
+
if self.current_step < len(self.steps):
|
|
533
|
+
self.show_current_step()
|
|
534
|
+
else:
|
|
535
|
+
self.end_tutorial()
|
|
536
|
+
|
|
537
|
+
def previous_step(self):
|
|
538
|
+
"""Move to the previous tutorial step"""
|
|
539
|
+
if self.current_step > 0:
|
|
540
|
+
# Execute cleanup action from current step if going back
|
|
541
|
+
step = self.steps[self.current_step]
|
|
542
|
+
if step['action']:
|
|
543
|
+
step['action']()
|
|
544
|
+
|
|
545
|
+
self.current_step -= 1
|
|
546
|
+
self.show_current_step()
|
|
547
|
+
|
|
548
|
+
def end_tutorial(self):
|
|
549
|
+
"""End the tutorial"""
|
|
550
|
+
if self.overlay:
|
|
551
|
+
self.overlay.hide()
|
|
552
|
+
self.overlay.deleteLater()
|
|
553
|
+
self.overlay = None
|
|
554
|
+
self.current_step = 0
|
|
555
|
+
|
|
556
|
+
|
|
557
|
+
class MenuHelper:
|
|
558
|
+
"""Helper class for interacting with menus programmatically"""
|
|
559
|
+
|
|
560
|
+
@staticmethod
|
|
561
|
+
def open_menu(window, menu_name):
|
|
562
|
+
"""Open a menu by name"""
|
|
563
|
+
menubar = window.menuBar()
|
|
564
|
+
for action in menubar.actions():
|
|
565
|
+
if action.text() == menu_name:
|
|
566
|
+
menu = action.menu()
|
|
567
|
+
if menu:
|
|
568
|
+
# Get the geometry of this specific action to position correctly
|
|
569
|
+
action_rect = menubar.actionGeometry(action)
|
|
570
|
+
popup_pos = window.mapToGlobal(menubar.mapToParent(action_rect.bottomLeft()))
|
|
571
|
+
menu.popup(popup_pos)
|
|
572
|
+
return menu
|
|
573
|
+
return None
|
|
574
|
+
|
|
575
|
+
@staticmethod
|
|
576
|
+
def close_menu(window, menu_name):
|
|
577
|
+
"""Close a menu by name"""
|
|
578
|
+
menubar = window.menuBar()
|
|
579
|
+
for action in menubar.actions():
|
|
580
|
+
if action.text() == menu_name:
|
|
581
|
+
menu = action.menu()
|
|
582
|
+
if menu:
|
|
583
|
+
menu.hide()
|
|
584
|
+
return
|
|
585
|
+
|
|
586
|
+
@staticmethod
|
|
587
|
+
def get_menu(window, menu_name):
|
|
588
|
+
"""Get a menu object by name"""
|
|
589
|
+
menubar = window.menuBar()
|
|
590
|
+
for action in menubar.actions():
|
|
591
|
+
if action.text() == menu_name:
|
|
592
|
+
return action.menu()
|
|
593
|
+
return None
|
|
594
|
+
|
|
595
|
+
@staticmethod
|
|
596
|
+
def open_submenu(menu, submenu_name):
|
|
597
|
+
"""Open a submenu within a menu"""
|
|
598
|
+
if not menu:
|
|
599
|
+
return None
|
|
600
|
+
for action in menu.actions():
|
|
601
|
+
if action.text() == submenu_name:
|
|
602
|
+
submenu = action.menu()
|
|
603
|
+
if submenu:
|
|
604
|
+
# Get the geometry of this specific action in the parent menu
|
|
605
|
+
action_rect = menu.actionGeometry(action)
|
|
606
|
+
# Position submenu to the right of the parent menu item
|
|
607
|
+
popup_pos = menu.mapToGlobal(action_rect.topRight())
|
|
608
|
+
submenu.popup(popup_pos)
|
|
609
|
+
return submenu
|
|
610
|
+
return None
|
|
611
|
+
|
|
612
|
+
@staticmethod
|
|
613
|
+
def trigger_action(menu, action_name):
|
|
614
|
+
"""Trigger a menu action by name"""
|
|
615
|
+
if not menu:
|
|
616
|
+
return False
|
|
617
|
+
for action in menu.actions():
|
|
618
|
+
if action.text() == action_name:
|
|
619
|
+
action.trigger()
|
|
620
|
+
return True
|
|
621
|
+
return False
|
|
622
|
+
|
|
623
|
+
@staticmethod
|
|
624
|
+
def click_menu_item(window, menu_name, item_name):
|
|
625
|
+
"""Click a menu item by menu and item name"""
|
|
626
|
+
menu = MenuHelper.open_menu(window, menu_name)
|
|
627
|
+
if menu:
|
|
628
|
+
QTimer.singleShot(100, lambda: MenuHelper.trigger_action(menu, item_name))
|
|
629
|
+
return True
|
|
630
|
+
return False
|
|
631
|
+
|
|
632
|
+
@staticmethod
|
|
633
|
+
def click_submenu_item(window, menu_name, submenu_name, item_name):
|
|
634
|
+
"""Click a submenu item"""
|
|
635
|
+
menu = MenuHelper.open_menu(window, menu_name)
|
|
636
|
+
if menu:
|
|
637
|
+
def trigger_submenu():
|
|
638
|
+
submenu = MenuHelper.open_submenu(menu, submenu_name)
|
|
639
|
+
if submenu:
|
|
640
|
+
QTimer.singleShot(100, lambda: MenuHelper.trigger_action(submenu, item_name))
|
|
641
|
+
QTimer.singleShot(100, trigger_submenu)
|
|
642
|
+
return True
|
|
643
|
+
return False
|
|
644
|
+
|
|
645
|
+
@staticmethod
|
|
646
|
+
def get_action_rect(menu, action_name, window):
|
|
647
|
+
"""Get the global rect for a menu action to highlight it"""
|
|
648
|
+
if not menu:
|
|
649
|
+
return None
|
|
650
|
+
for action in menu.actions():
|
|
651
|
+
if action.text() == action_name:
|
|
652
|
+
# Get the action's geometry within the menu
|
|
653
|
+
action_rect = menu.actionGeometry(action)
|
|
654
|
+
# Convert to global coordinates
|
|
655
|
+
global_pos = menu.mapToGlobal(action_rect.topLeft())
|
|
656
|
+
# Create a QRect in global coordinates
|
|
657
|
+
return QRect(global_pos, action_rect.size())
|
|
658
|
+
return None
|
|
659
|
+
|
|
660
|
+
@staticmethod
|
|
661
|
+
def get_submenu_action_rect(parent_menu, submenu_name, action_name, window):
|
|
662
|
+
"""Get the global rect for an action within a submenu"""
|
|
663
|
+
submenu = None
|
|
664
|
+
for action in parent_menu.actions():
|
|
665
|
+
if action.text() == submenu_name:
|
|
666
|
+
submenu = action.menu()
|
|
667
|
+
break
|
|
668
|
+
|
|
669
|
+
if submenu:
|
|
670
|
+
return MenuHelper.get_action_rect(submenu, action_name, window)
|
|
671
|
+
return None
|
|
672
|
+
|
|
673
|
+
@staticmethod
|
|
674
|
+
def create_menu_step_rect_getter(window, menu_name):
|
|
675
|
+
"""Create a function that gets a menu item rect for tutorial highlighting"""
|
|
676
|
+
def get_menu_rect():
|
|
677
|
+
menubar = window.menuBar()
|
|
678
|
+
for action in menubar.actions():
|
|
679
|
+
if action.text() == menu_name:
|
|
680
|
+
action_rect = menubar.actionGeometry(action)
|
|
681
|
+
global_pos = window.mapToGlobal(menubar.mapToParent(action_rect.topLeft()))
|
|
682
|
+
return QRect(global_pos, action_rect.size())
|
|
683
|
+
# Fallback
|
|
684
|
+
menubar_rect = menubar.rect()
|
|
685
|
+
global_pos = window.mapToGlobal(menubar.mapToParent(menubar_rect.topLeft()))
|
|
686
|
+
return QRect(global_pos.x() + 50, global_pos.y(), 100, menubar_rect.height())
|
|
687
|
+
return get_menu_rect
|
|
688
|
+
|
|
689
|
+
@staticmethod
|
|
690
|
+
def create_submenu_item_rect_getter(window, menu_name, submenu_name):
|
|
691
|
+
"""Create a function that gets a submenu item rect for tutorial highlighting"""
|
|
692
|
+
def get_submenu_rect():
|
|
693
|
+
menu = MenuHelper.get_menu(window, menu_name)
|
|
694
|
+
if menu and menu.isVisible():
|
|
695
|
+
rect = MenuHelper.get_action_rect(menu, submenu_name, window)
|
|
696
|
+
if rect:
|
|
697
|
+
return rect
|
|
698
|
+
# Fallback
|
|
699
|
+
menubar = window.menuBar()
|
|
700
|
+
menubar_rect = menubar.rect()
|
|
701
|
+
global_pos = window.mapToGlobal(menubar.mapToParent(menubar_rect.topLeft()))
|
|
702
|
+
return QRect(global_pos.x() + 150, global_pos.y(), 100, menubar_rect.height())
|
|
703
|
+
return get_submenu_rect
|
|
704
|
+
|
|
705
|
+
@staticmethod
|
|
706
|
+
def create_submenu_action_rect_getter(window, menu_name, submenu_name, action_name):
|
|
707
|
+
"""Create a function that gets a submenu action rect for tutorial highlighting"""
|
|
708
|
+
def get_action_rect():
|
|
709
|
+
menu = MenuHelper.get_menu(window, menu_name)
|
|
710
|
+
if menu and menu.isVisible():
|
|
711
|
+
submenu = None
|
|
712
|
+
for action in menu.actions():
|
|
713
|
+
if action.text() == submenu_name:
|
|
714
|
+
submenu = action.menu()
|
|
715
|
+
break
|
|
716
|
+
|
|
717
|
+
if submenu and submenu.isVisible():
|
|
718
|
+
rect = MenuHelper.get_action_rect(submenu, action_name, window)
|
|
719
|
+
if rect:
|
|
720
|
+
return rect
|
|
721
|
+
# Fallback
|
|
722
|
+
menubar = window.menuBar()
|
|
723
|
+
menubar_rect = menubar.rect()
|
|
724
|
+
global_pos = window.mapToGlobal(menubar.mapToParent(menubar_rect.topLeft()))
|
|
725
|
+
return QRect(global_pos.x() + 250, global_pos.y() + 100, 150, 30)
|
|
726
|
+
return get_action_rect
|
|
727
|
+
|
|
728
|
+
@staticmethod
|
|
729
|
+
def create_dialog_opener(window, tutorial, dialog_method_name, dialog_class_name, store_attr_name,
|
|
730
|
+
widget_type=None, *method_args, **method_kwargs):
|
|
731
|
+
"""Create functions to open a dialog/window and store its reference
|
|
732
|
+
|
|
733
|
+
Args:
|
|
734
|
+
widget_type: The base class to check for (QDialog, QMainWindow, etc.).
|
|
735
|
+
If None, will accept any QWidget.
|
|
736
|
+
"""
|
|
737
|
+
def open_dialog():
|
|
738
|
+
# Clear any existing reference to the old dialog/window
|
|
739
|
+
if hasattr(tutorial, store_attr_name):
|
|
740
|
+
old_widget = getattr(tutorial, store_attr_name)
|
|
741
|
+
if old_widget:
|
|
742
|
+
try:
|
|
743
|
+
old_widget.close()
|
|
744
|
+
old_widget.deleteLater()
|
|
745
|
+
except:
|
|
746
|
+
pass
|
|
747
|
+
setattr(tutorial, store_attr_name, None)
|
|
748
|
+
|
|
749
|
+
# Close any open menus first
|
|
750
|
+
for action in window.menuBar().actions():
|
|
751
|
+
menu = action.menu()
|
|
752
|
+
if menu:
|
|
753
|
+
menu.hide()
|
|
754
|
+
|
|
755
|
+
# Open the dialog/window with provided args/kwargs
|
|
756
|
+
QTimer.singleShot(200, lambda: getattr(window, dialog_method_name)(*method_args, **method_kwargs))
|
|
757
|
+
# Store reference
|
|
758
|
+
QTimer.singleShot(300, lambda: store_dialog())
|
|
759
|
+
|
|
760
|
+
def store_dialog():
|
|
761
|
+
from PyQt6.QtWidgets import QDialog, QMainWindow, QWidget
|
|
762
|
+
|
|
763
|
+
# Determine what type to check for
|
|
764
|
+
if widget_type is None:
|
|
765
|
+
check_type = QWidget
|
|
766
|
+
else:
|
|
767
|
+
check_type = widget_type
|
|
768
|
+
|
|
769
|
+
# Find the most recently created widget that matches and is visible
|
|
770
|
+
matching_widgets = []
|
|
771
|
+
for child in window.children():
|
|
772
|
+
if child.__class__.__name__ == dialog_class_name and isinstance(child, check_type):
|
|
773
|
+
if child.isVisible():
|
|
774
|
+
matching_widgets.append(child)
|
|
775
|
+
|
|
776
|
+
if matching_widgets:
|
|
777
|
+
setattr(tutorial, store_attr_name, matching_widgets[-1])
|
|
778
|
+
else:
|
|
779
|
+
QTimer.singleShot(200, store_dialog)
|
|
780
|
+
|
|
781
|
+
return open_dialog, store_dialog
|
|
782
|
+
|
|
783
|
+
@staticmethod
|
|
784
|
+
def create_widget_getter(tutorial, dialog_attr, widget_attr):
|
|
785
|
+
"""
|
|
786
|
+
Create a function that gets a widget from a dialog for highlighting
|
|
787
|
+
|
|
788
|
+
Args:
|
|
789
|
+
tutorial: Tutorial manager instance
|
|
790
|
+
dialog_attr: Dialog attribute name (e.g., 'dilate_dialog')
|
|
791
|
+
widget_attr: Widget attribute name (e.g., 'mode_selector')
|
|
792
|
+
|
|
793
|
+
Returns:
|
|
794
|
+
Function that returns the widget or None
|
|
795
|
+
"""
|
|
796
|
+
def getter():
|
|
797
|
+
if hasattr(tutorial, dialog_attr):
|
|
798
|
+
dialog = getattr(tutorial, dialog_attr)
|
|
799
|
+
if dialog and hasattr(dialog, widget_attr):
|
|
800
|
+
return getattr(dialog, widget_attr)
|
|
801
|
+
return None
|
|
802
|
+
return getter
|
|
803
|
+
|
|
804
|
+
@staticmethod
|
|
805
|
+
def create_widget_interaction(tutorial, dialog_attr, widget_attr, interaction, delay=0):
|
|
806
|
+
"""
|
|
807
|
+
Create an action that performs an interaction on a widget
|
|
808
|
+
|
|
809
|
+
Args:
|
|
810
|
+
tutorial: Tutorial manager instance
|
|
811
|
+
dialog_attr: Dialog attribute name (e.g., 'dilate_dialog')
|
|
812
|
+
widget_attr: Widget attribute name (e.g., 'mode_selector')
|
|
813
|
+
interaction: Interaction string (e.g., 'showPopup()', 'setText("5")', 'click()')
|
|
814
|
+
delay: Optional delay in ms before executing interaction
|
|
815
|
+
|
|
816
|
+
Returns:
|
|
817
|
+
Function that performs the interaction
|
|
818
|
+
|
|
819
|
+
Example:
|
|
820
|
+
action=MenuHelper.create_widget_interaction(
|
|
821
|
+
tutorial, 'dilate_dialog', 'mode_selector', 'showPopup()', delay=100
|
|
822
|
+
)
|
|
823
|
+
"""
|
|
824
|
+
def action():
|
|
825
|
+
if hasattr(tutorial, dialog_attr):
|
|
826
|
+
dialog = getattr(tutorial, dialog_attr)
|
|
827
|
+
if dialog and hasattr(dialog, widget_attr):
|
|
828
|
+
if interaction == 'close()':
|
|
829
|
+
dialog.close()
|
|
830
|
+
return
|
|
831
|
+
widget = getattr(dialog, widget_attr)
|
|
832
|
+
|
|
833
|
+
def do_interaction():
|
|
834
|
+
# Special handling for click() on buttons
|
|
835
|
+
if interaction == 'click()':
|
|
836
|
+
def blink_sequence():
|
|
837
|
+
widget.click() # Turn off
|
|
838
|
+
QTimer.singleShot(200, lambda: widget.click()) # Turn on
|
|
839
|
+
QTimer.singleShot(400, lambda: widget.click()) # Turn off
|
|
840
|
+
QTimer.singleShot(600, lambda: widget.click()) # Turn on (final state)
|
|
841
|
+
from PyQt6.QtWidgets import QPushButton
|
|
842
|
+
# Check if it's a checkable button that's already checked
|
|
843
|
+
if isinstance(widget, QPushButton) and widget.isCheckable() and widget.isChecked():
|
|
844
|
+
# Blink effect: toggle off and on multiple times to draw attention
|
|
845
|
+
blink_sequence()
|
|
846
|
+
else:
|
|
847
|
+
# Just click normally (button is off or not checkable)
|
|
848
|
+
widget.click()
|
|
849
|
+
blink_sequence()
|
|
850
|
+
elif interaction.startswith('setText'):
|
|
851
|
+
exec(f'widget.{interaction}', {'widget': widget})
|
|
852
|
+
if interaction.endswith('("")'):
|
|
853
|
+
widget.deselect()
|
|
854
|
+
else:
|
|
855
|
+
widget.selectAll()
|
|
856
|
+
else:
|
|
857
|
+
# Execute the interaction on the widget for all other cases
|
|
858
|
+
# Pass locals to exec so it can access 'widget'
|
|
859
|
+
exec(f'widget.{interaction}', {'widget': widget})
|
|
860
|
+
|
|
861
|
+
if delay > 0:
|
|
862
|
+
QTimer.singleShot(delay, do_interaction)
|
|
863
|
+
else:
|
|
864
|
+
do_interaction()
|
|
865
|
+
|
|
866
|
+
return action
|
|
867
|
+
|
|
868
|
+
def setup_start_tutorial(window):
|
|
869
|
+
"""
|
|
870
|
+
Set up the basic interface tutorial for NetTracer3D
|
|
871
|
+
|
|
872
|
+
Args:
|
|
873
|
+
window: ImageViewerWindow instance from nettracer_gui
|
|
874
|
+
|
|
875
|
+
Returns:
|
|
876
|
+
TutorialManager instance
|
|
877
|
+
"""
|
|
878
|
+
tutorial = TutorialManager(window)
|
|
879
|
+
|
|
880
|
+
# Step 1: Welcome
|
|
881
|
+
tutorial.add_step(
|
|
882
|
+
window.canvas,
|
|
883
|
+
"Welcome to NetTracer3D! This tutorial will give you a basic overview of this application. Click 'Next' or use Right-Click to continue.",
|
|
884
|
+
highlight_type=None,
|
|
885
|
+
message_position="bottom"
|
|
886
|
+
)
|
|
887
|
+
|
|
888
|
+
tutorial.add_step(
|
|
889
|
+
window.canvas,
|
|
890
|
+
"This program is designed to analysis of two or three dimensional images, such as those aquired via microscopy or medical imaging.",
|
|
891
|
+
highlight_type=None,
|
|
892
|
+
message_position="bottom"
|
|
893
|
+
)
|
|
894
|
+
|
|
895
|
+
tutorial.add_step(
|
|
896
|
+
window.canvas,
|
|
897
|
+
"The major form of analysis is done by creating undirected networks between objects of interest, called nodes. These can be biological structures such as cells or functional tissue units.",
|
|
898
|
+
highlight_type=None,
|
|
899
|
+
message_position="bottom"
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
tutorial.add_step(
|
|
903
|
+
window.canvas,
|
|
904
|
+
"Analysis can also be done on more direct measures of morphology or spatial arrangement, such as analyzing object measures like volumes or making clustering heatmaps.",
|
|
905
|
+
highlight_type=None,
|
|
906
|
+
message_position="bottom"
|
|
907
|
+
)
|
|
908
|
+
|
|
909
|
+
# Threshold Tool
|
|
910
|
+
tutorial.add_step(
|
|
911
|
+
window.thresh_button,
|
|
912
|
+
"Any quantifications need to be done on segmented data. A segmented image is one where all your objects of interest have either been assigned a binary value (ie 1 or 255), or assigned a discrete integer label (ie each cell contains the val 1, 2, 3, etc). Raw images should be segmented first, either here or with another software.",
|
|
913
|
+
highlight_type="circle",
|
|
914
|
+
message_position="top"
|
|
915
|
+
)
|
|
916
|
+
|
|
917
|
+
tutorial.add_step(
|
|
918
|
+
window.canvas,
|
|
919
|
+
"When it comes to making networks, there are three major modalities that NetTracer3D offers.",
|
|
920
|
+
highlight_type=None,
|
|
921
|
+
message_position="bottom"
|
|
922
|
+
)
|
|
923
|
+
|
|
924
|
+
tutorial.add_step(
|
|
925
|
+
window.canvas,
|
|
926
|
+
"The first is the 'connectivity network', where your node objects are connected via a secondary structure, deemed 'edges'. For example, we can evaluate how groups of segmented cell aggregates are connected via vasculature.",
|
|
927
|
+
highlight_type=None,
|
|
928
|
+
message_position="bottom"
|
|
929
|
+
)
|
|
930
|
+
|
|
931
|
+
tutorial.add_step(
|
|
932
|
+
window.channel_buttons[0],
|
|
933
|
+
"This would require providing two segmentations, one for your nodes.",
|
|
934
|
+
highlight_type="rect",
|
|
935
|
+
message_position="top"
|
|
936
|
+
)
|
|
937
|
+
|
|
938
|
+
tutorial.add_step(
|
|
939
|
+
window.channel_buttons[1],
|
|
940
|
+
"And a second for your edges.",
|
|
941
|
+
highlight_type="rect",
|
|
942
|
+
message_position="top")
|
|
943
|
+
|
|
944
|
+
tutorial.add_step(
|
|
945
|
+
window.canvas,
|
|
946
|
+
"The second modality is making networks directly from branched structures. First, you would provide a binary segmentation of a branching structure like a nerve or a blood vessel. Next, you can algorithmically label the branches in NetTracer3D.",
|
|
947
|
+
highlight_type=None,
|
|
948
|
+
message_position="bottom"
|
|
949
|
+
)
|
|
950
|
+
|
|
951
|
+
tutorial.add_step(
|
|
952
|
+
window.channel_buttons[1],
|
|
953
|
+
"Typically you would queue your image to be branch-labeled in the edges channel.",
|
|
954
|
+
highlight_type="rect",
|
|
955
|
+
message_position="top")
|
|
956
|
+
|
|
957
|
+
tutorial.add_step(
|
|
958
|
+
window.channel_buttons[0],
|
|
959
|
+
"But you can also load them into the nodes channel, although note whatever is in the 'edges' channel takes priority. This is because the program has to actually make nodes at the branchpoints of your edges so it temporarily treats branches like edges.",
|
|
960
|
+
highlight_type="rect",
|
|
961
|
+
message_position="top"
|
|
962
|
+
)
|
|
963
|
+
|
|
964
|
+
tutorial.add_step(
|
|
965
|
+
window.canvas,
|
|
966
|
+
"Labeled branches can be turned into two types of networks. The first way is to connect the branchpoints. The second is to connect the branches themselves, just based on what other branches they come off of.",
|
|
967
|
+
highlight_type=None,
|
|
968
|
+
message_position="bottom"
|
|
969
|
+
)
|
|
970
|
+
|
|
971
|
+
tutorial.add_step(
|
|
972
|
+
window.canvas,
|
|
973
|
+
"The final modality is making networks based on proximity. This is an option to evaluate spatial clusters in your image, for example, deciphering what sort of groups a set of cells are arranged in. This would be an ideal way to analyze a multiplexed image with a lot of different channels bearing cellular fluorescent labels, for example.",
|
|
974
|
+
highlight_type=None,
|
|
975
|
+
message_position="bottom"
|
|
976
|
+
)
|
|
977
|
+
|
|
978
|
+
tutorial.add_step(
|
|
979
|
+
None,
|
|
980
|
+
"Networks can be directly quantified, but there are still many more options for direct morphological/spatial analysis, and for making interesting visualizations!",
|
|
981
|
+
message_position="bottom"
|
|
982
|
+
)
|
|
983
|
+
|
|
984
|
+
def open_to_properties():
|
|
985
|
+
menu = MenuHelper.open_menu(window, "Image")
|
|
986
|
+
if menu:
|
|
987
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Properties"))
|
|
988
|
+
|
|
989
|
+
tutorial.add_step(
|
|
990
|
+
MenuHelper.create_submenu_item_rect_getter(window, "Image", "Properties"),
|
|
991
|
+
"Your current session has several stored properties, some of which are available here.",
|
|
992
|
+
highlight_type=None,
|
|
993
|
+
message_position="beside",
|
|
994
|
+
pre_action=open_to_properties,
|
|
995
|
+
action=lambda: MenuHelper.close_menu(window, "Image")
|
|
996
|
+
)
|
|
997
|
+
|
|
998
|
+
# Step 5: Open the dialog
|
|
999
|
+
open_dialog, _ = MenuHelper.create_dialog_opener(
|
|
1000
|
+
window, tutorial, "show_properties_dialog", "PropertiesDialog", "properties_dialog"
|
|
1001
|
+
)
|
|
1002
|
+
|
|
1003
|
+
tutorial.add_step(
|
|
1004
|
+
None,
|
|
1005
|
+
"Let's open the Properties menu to see all the options available. Click 'Next' to open it.",
|
|
1006
|
+
message_position="beside",
|
|
1007
|
+
action=open_dialog
|
|
1008
|
+
)
|
|
1009
|
+
|
|
1010
|
+
tutorial.add_step(
|
|
1011
|
+
None,
|
|
1012
|
+
"A blue button means the property has data, an unselected button means it's empty. Deselecting any blue property and pressing enter below will erase that property from the current session.",
|
|
1013
|
+
message_position="beside"
|
|
1014
|
+
)
|
|
1015
|
+
|
|
1016
|
+
|
|
1017
|
+
# Step 6: Explain xy_scale field (and demonstrate interaction)
|
|
1018
|
+
tutorial.add_step(
|
|
1019
|
+
MenuHelper.create_widget_getter(tutorial, 'properties_dialog', 'xy_scale'),
|
|
1020
|
+
"xy_scale affects how NetTracer3D interprets distances in the X and Y dimensions. If your image has anisotropic voxels (different spacing in X/Y vs Z), you may need to adjust this to compensate. Note that your data is always presumed to have an equal resolution in the xy plane itself.",
|
|
1021
|
+
highlight_type=None,
|
|
1022
|
+
message_position="beside",
|
|
1023
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'xy_scale', 'selectAll()'),
|
|
1024
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'xy_scale', 'deselect()')
|
|
1025
|
+
)
|
|
1026
|
+
|
|
1027
|
+
# Step 7: Explain z_scale field
|
|
1028
|
+
tutorial.add_step(
|
|
1029
|
+
MenuHelper.create_widget_getter(tutorial, 'properties_dialog', 'z_scale'),
|
|
1030
|
+
"z_scale adjusts the evaluation of distances in the Z dimension. Many microscopy images have a different Z step size than XY resolution, so you might set this differently than xy_scale.",
|
|
1031
|
+
highlight_type=None,
|
|
1032
|
+
message_position="beside",
|
|
1033
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'z_scale', 'selectAll()'),
|
|
1034
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'z_scale', 'deselect()')
|
|
1035
|
+
)
|
|
1036
|
+
|
|
1037
|
+
# Step 8: Explain nodes button
|
|
1038
|
+
tutorial.add_step(
|
|
1039
|
+
MenuHelper.create_widget_getter(tutorial, 'properties_dialog', 'nodes'),
|
|
1040
|
+
"This signifies your nodes channel has data.",
|
|
1041
|
+
highlight_type=None,
|
|
1042
|
+
message_position="beside",
|
|
1043
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'nodes', 'click()'),
|
|
1044
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'nodes', 'toggle()')
|
|
1045
|
+
)
|
|
1046
|
+
|
|
1047
|
+
# Step 8: Explain nodes button
|
|
1048
|
+
tutorial.add_step(
|
|
1049
|
+
MenuHelper.create_widget_getter(tutorial, 'properties_dialog', 'edges'),
|
|
1050
|
+
"This signifies your edges channel has data.",
|
|
1051
|
+
highlight_type=None,
|
|
1052
|
+
message_position="beside",
|
|
1053
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'edges', 'click()'),
|
|
1054
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'edges', 'toggle()')
|
|
1055
|
+
)
|
|
1056
|
+
|
|
1057
|
+
# Step 8: Explain nodes button
|
|
1058
|
+
tutorial.add_step(
|
|
1059
|
+
MenuHelper.create_widget_getter(tutorial, 'properties_dialog', 'network_overlay'),
|
|
1060
|
+
"This signifies your overlay channel 1 has data. (and same with the second overlay below)",
|
|
1061
|
+
highlight_type=None,
|
|
1062
|
+
message_position="beside",
|
|
1063
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'network_overlay', 'click()'),
|
|
1064
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'network_overlay', 'toggle()')
|
|
1065
|
+
)
|
|
1066
|
+
|
|
1067
|
+
# Step 8: Explain nodes button
|
|
1068
|
+
tutorial.add_step(
|
|
1069
|
+
MenuHelper.create_widget_getter(tutorial, 'properties_dialog', 'network'),
|
|
1070
|
+
"This signifies your network has been calculated, or instead loaded into the current session",
|
|
1071
|
+
highlight_type=None,
|
|
1072
|
+
message_position="beside",
|
|
1073
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'network', 'click()'),
|
|
1074
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'network', 'toggle()')
|
|
1075
|
+
)
|
|
1076
|
+
|
|
1077
|
+
tutorial.add_step(
|
|
1078
|
+
MenuHelper.create_widget_getter(tutorial, 'properties_dialog', 'node_identities'),
|
|
1079
|
+
"This signifies your nodes have 'identities' associated with them, such as 'T-Cell'. Nodes with identities can be used to analyze how different types of nodes aggregate.",
|
|
1080
|
+
highlight_type=None,
|
|
1081
|
+
message_position="beside",
|
|
1082
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'node_identities', 'click()'),
|
|
1083
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'node_identities', 'toggle()')
|
|
1084
|
+
)
|
|
1085
|
+
|
|
1086
|
+
tutorial.add_step(
|
|
1087
|
+
None,
|
|
1088
|
+
"Other properties include centroids for your nodes and edges, as well as community/neighborhood groupings for your nodes.",
|
|
1089
|
+
message_position="beside"
|
|
1090
|
+
)
|
|
1091
|
+
|
|
1092
|
+
tutorial.add_step(
|
|
1093
|
+
MenuHelper.create_widget_getter(tutorial, 'properties_dialog', 'report_button'),
|
|
1094
|
+
"Click on the report button to view a summary of these other properties in the upper right table",
|
|
1095
|
+
highlight_type=None,
|
|
1096
|
+
message_position="beside",
|
|
1097
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'report_button', 'click()')
|
|
1098
|
+
)
|
|
1099
|
+
|
|
1100
|
+
|
|
1101
|
+
|
|
1102
|
+
|
|
1103
|
+
|
|
1104
|
+
# Step 9: Close dialog and finish
|
|
1105
|
+
def close_dialog():
|
|
1106
|
+
if hasattr(tutorial, 'properties_dialog') and tutorial.properties_dialog:
|
|
1107
|
+
tutorial.properties_dialog.close()
|
|
1108
|
+
tutorial.properties_dialog = None
|
|
1109
|
+
|
|
1110
|
+
tutorial.add_step(
|
|
1111
|
+
None,
|
|
1112
|
+
"That's it for the Intro tutorial! Select the Basic Interface Tour next to see how to use the main GUI elements.",
|
|
1113
|
+
message_position="bottom",
|
|
1114
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'xy_scale', 'close()'),
|
|
1115
|
+
action=close_dialog
|
|
1116
|
+
)
|
|
1117
|
+
|
|
1118
|
+
return tutorial
|
|
1119
|
+
|
|
1120
|
+
|
|
1121
|
+
def setup_basics_tutorial(window):
|
|
1122
|
+
"""
|
|
1123
|
+
Set up the basic interface tutorial for NetTracer3D
|
|
1124
|
+
|
|
1125
|
+
Args:
|
|
1126
|
+
window: ImageViewerWindow instance from nettracer_gui
|
|
1127
|
+
|
|
1128
|
+
Returns:
|
|
1129
|
+
TutorialManager instance
|
|
1130
|
+
"""
|
|
1131
|
+
tutorial = TutorialManager(window)
|
|
1132
|
+
|
|
1133
|
+
# Step 1: Welcome
|
|
1134
|
+
tutorial.add_step(
|
|
1135
|
+
window.canvas,
|
|
1136
|
+
"This tutorial will guide you through the main features of the GUI window. Click 'Next' or use 'Right-Click' to continue.",
|
|
1137
|
+
highlight_type="rect",
|
|
1138
|
+
message_position="bottom"
|
|
1139
|
+
)
|
|
1140
|
+
|
|
1141
|
+
# Step 2: Canvas explanation
|
|
1142
|
+
tutorial.add_step(
|
|
1143
|
+
window.canvas,
|
|
1144
|
+
"This canvas is where your loaded images will render.",
|
|
1145
|
+
highlight_type="rect",
|
|
1146
|
+
message_position="bottom"
|
|
1147
|
+
)
|
|
1148
|
+
|
|
1149
|
+
tutorial.add_step(
|
|
1150
|
+
window.canvas,
|
|
1151
|
+
"Clicking a node or edge in this canvas will select it (if the nodes or edges channels are set as the 'active channel', respectively). Click and drag to select multiple objects. This is intended mainly for segmented, labeled data rather than interacting directly with raw images.",
|
|
1152
|
+
highlight_type="rect",
|
|
1153
|
+
message_position="bottom"
|
|
1154
|
+
)
|
|
1155
|
+
|
|
1156
|
+
tutorial.add_step(
|
|
1157
|
+
window.canvas,
|
|
1158
|
+
"Selected objects will be highlighted yellow and can be used for certain functions. Clicking a background val in an image (ie voxel with value 0) will deselect your objects.",
|
|
1159
|
+
highlight_type="rect",
|
|
1160
|
+
message_position="bottom"
|
|
1161
|
+
)
|
|
1162
|
+
|
|
1163
|
+
tutorial.add_step(
|
|
1164
|
+
window.canvas,
|
|
1165
|
+
"Use right click to interact with highlighted objects (ie, delete them or merge them into one object); or rather to select objects algorithmically (for example, the neighbors of a node in your network)",
|
|
1166
|
+
highlight_type="rect",
|
|
1167
|
+
message_position="bottom"
|
|
1168
|
+
)
|
|
1169
|
+
|
|
1170
|
+
# Active Channel Selector
|
|
1171
|
+
tutorial.add_step(
|
|
1172
|
+
window.active_channel_combo,
|
|
1173
|
+
"This dropdown lets you select which image channel is active for operations. Many of the 'process' functions will execute on the 'active image', instead of you having to select one each time. You can choose between Nodes, Edges, and either of the Overlay channels. To select nodes or edges when clicking on the canvas, you will also need to have them as the active image here.",
|
|
1174
|
+
highlight_type="rect",
|
|
1175
|
+
message_position="top"
|
|
1176
|
+
)
|
|
1177
|
+
|
|
1178
|
+
# Scale Button
|
|
1179
|
+
tutorial.add_step(
|
|
1180
|
+
window.toggle_scale,
|
|
1181
|
+
"Click this to toggle a scale bar that shows distances in the canvas.",
|
|
1182
|
+
highlight_type="circle",
|
|
1183
|
+
message_position="top"
|
|
1184
|
+
)
|
|
1185
|
+
|
|
1186
|
+
# Home/Reset View Button
|
|
1187
|
+
tutorial.add_step(
|
|
1188
|
+
window.reset_view,
|
|
1189
|
+
"Click this Home button to reset your view to the original zoom and position.",
|
|
1190
|
+
highlight_type="circle",
|
|
1191
|
+
message_position="top"
|
|
1192
|
+
)
|
|
1193
|
+
|
|
1194
|
+
# Zoom Tool
|
|
1195
|
+
tutorial.add_step(
|
|
1196
|
+
window.zoom_button,
|
|
1197
|
+
"(Shortcut Z) Use the Zoom tool to zoom into specific areas of your image. Left click to zoom in, right click to zoom out. Click and drag to draw a rectangle and zoom into a specific area of your image.",
|
|
1198
|
+
highlight_type="circle",
|
|
1199
|
+
message_position="top"
|
|
1200
|
+
)
|
|
1201
|
+
|
|
1202
|
+
# Pan Tool
|
|
1203
|
+
tutorial.add_step(
|
|
1204
|
+
window.pan_button,
|
|
1205
|
+
"(Shortcut middle mouse) The Pan tool lets you click and drag to move around your image when zoomed in.",
|
|
1206
|
+
highlight_type="circle",
|
|
1207
|
+
message_position="top"
|
|
1208
|
+
)
|
|
1209
|
+
|
|
1210
|
+
# Highlight Toggle
|
|
1211
|
+
tutorial.add_step(
|
|
1212
|
+
window.high_button,
|
|
1213
|
+
"(Shortcut X) Toggle this to show/hide highlighting of selected nodes and edges in your network.",
|
|
1214
|
+
highlight_type="circle",
|
|
1215
|
+
message_position="top"
|
|
1216
|
+
)
|
|
1217
|
+
|
|
1218
|
+
# Pen/Brush Tool
|
|
1219
|
+
tutorial.add_step(
|
|
1220
|
+
window.pen_button,
|
|
1221
|
+
"The Pen tool allows you to manually paint write foreground regions into your data. Note this modifies data directly and is for making minor corrections of segmented data. Ctrl + Mousewheel will change the brush size.",
|
|
1222
|
+
highlight_type="circle",
|
|
1223
|
+
message_position="top"
|
|
1224
|
+
)
|
|
1225
|
+
|
|
1226
|
+
# Pen/Brush Tool
|
|
1227
|
+
tutorial.add_step(
|
|
1228
|
+
window.pen_button,
|
|
1229
|
+
"Pressing 'D' in the pen tool will enable a pen that writes into multiple image planes at once. In this mode, the mousewheel will change how many planes are being written into. Pressing 'F' will enable a fill can.",
|
|
1230
|
+
highlight_type="circle",
|
|
1231
|
+
message_position="top"
|
|
1232
|
+
)
|
|
1233
|
+
|
|
1234
|
+
# Threshold Tool
|
|
1235
|
+
tutorial.add_step(
|
|
1236
|
+
window.thresh_button,
|
|
1237
|
+
"Use this to open the threshold dialog to segment your images by intensity or volume; or instead to open the Machine Learning segmenter.",
|
|
1238
|
+
highlight_type="circle",
|
|
1239
|
+
message_position="top"
|
|
1240
|
+
)
|
|
1241
|
+
|
|
1242
|
+
# Channel Buttons
|
|
1243
|
+
tutorial.add_step(
|
|
1244
|
+
window.channel_buttons[0],
|
|
1245
|
+
"These channel buttons let you toggle the visibility of different image layers. The '×' button next to each channel allows you to delete that channel's data.",
|
|
1246
|
+
highlight_type="rect",
|
|
1247
|
+
message_position="top"
|
|
1248
|
+
)
|
|
1249
|
+
|
|
1250
|
+
tutorial.add_step(
|
|
1251
|
+
window.channel_buttons[0],
|
|
1252
|
+
"The Nodes Channel is where you will typically load data that you want to convert into a network.",
|
|
1253
|
+
highlight_type="rect",
|
|
1254
|
+
message_position="top"
|
|
1255
|
+
)
|
|
1256
|
+
|
|
1257
|
+
tutorial.add_step(
|
|
1258
|
+
window.channel_buttons[1],
|
|
1259
|
+
"The Edges Channel is where you can load data for a secondary structure that you want to connect your nodes. For example, this might be an image showing blood vessels. However, this is not the only way to connect your nodes. Furthermore, either the nodes or the edges channels can be used as generic overlays for visualization or direct analysis if you are not particularly interested in using them for networks.",
|
|
1260
|
+
highlight_type="rect",
|
|
1261
|
+
message_position="top"
|
|
1262
|
+
)
|
|
1263
|
+
|
|
1264
|
+
tutorial.add_step(
|
|
1265
|
+
window.channel_buttons[2],
|
|
1266
|
+
"These Overlay Channels will show informative overlay outputs that NetTracer3D generates. They can also be loaded to directly to visualize multiple channels stacked together.",
|
|
1267
|
+
highlight_type="rect",
|
|
1268
|
+
message_position="top"
|
|
1269
|
+
)
|
|
1270
|
+
|
|
1271
|
+
tutorial.add_step(
|
|
1272
|
+
window.channel_buttons[3],
|
|
1273
|
+
"These Overlay Channels will show informative overlay outputs that NetTracer3D generates. They can also be loaded to directly to visualize multiple channels stacked together.",
|
|
1274
|
+
highlight_type="rect",
|
|
1275
|
+
message_position="top"
|
|
1276
|
+
)
|
|
1277
|
+
|
|
1278
|
+
# Slice Slider
|
|
1279
|
+
tutorial.add_step(
|
|
1280
|
+
window.slice_slider,
|
|
1281
|
+
"(Shortcut - 'Shift + Mouse Wheel' or 'Ctrl + Shift + Mouse Wheel'). Use this slider to navigate through different Z-slices (depth) of your 3D image stack. The arrow buttons allow continuous scrolling.",
|
|
1282
|
+
highlight_type="rect",
|
|
1283
|
+
message_position="top"
|
|
1284
|
+
)
|
|
1285
|
+
|
|
1286
|
+
# Data Tables
|
|
1287
|
+
tutorial.add_step(
|
|
1288
|
+
window.network_button,
|
|
1289
|
+
"Switch between Network and Selection views to see different data tables. The Network table shows all nodes/edges, while Selection shows only selected items.",
|
|
1290
|
+
highlight_type="rect",
|
|
1291
|
+
message_position="top"
|
|
1292
|
+
)
|
|
1293
|
+
|
|
1294
|
+
# The actual table
|
|
1295
|
+
tutorial.add_step(
|
|
1296
|
+
window.network_table,
|
|
1297
|
+
"This table displays your network data. You can click rows to highlight corresponding elements in the image, and sort columns by clicking headers. Right click to export any tables in spreadsheet format, or in a format for a few other types of network analysis software.",
|
|
1298
|
+
highlight_type="rect",
|
|
1299
|
+
message_position="left"
|
|
1300
|
+
)
|
|
1301
|
+
|
|
1302
|
+
tutorial.add_step(
|
|
1303
|
+
window.tabbed_data,
|
|
1304
|
+
"This table displays outputs of most of the quantifications NetTracer3D runs. Right click to export these as a spreadsheets, to close all tables, or to use a table to threshold your nodes (possible with any two column table quantifying the nodes with some parameter).",
|
|
1305
|
+
highlight_type="rect",
|
|
1306
|
+
message_position="left"
|
|
1307
|
+
)
|
|
1308
|
+
|
|
1309
|
+
tutorial.add_step(
|
|
1310
|
+
window.load_button,
|
|
1311
|
+
"Use this to open a spreadsheet back into the upper right tables. You would mainly do this if you wanted to use it to threshold your nodes.",
|
|
1312
|
+
highlight_type="circle",
|
|
1313
|
+
message_position="bottom"
|
|
1314
|
+
)
|
|
1315
|
+
|
|
1316
|
+
tutorial.add_step(
|
|
1317
|
+
window.cam_button,
|
|
1318
|
+
"This button can be used to take a screenshot of what you currently see in your main canvas.",
|
|
1319
|
+
highlight_type="circle",
|
|
1320
|
+
message_position="bottom"
|
|
1321
|
+
)
|
|
1322
|
+
|
|
1323
|
+
tutorial.add_step(
|
|
1324
|
+
window.popup_button,
|
|
1325
|
+
"This button can be used to eject the canvas from the main window, to make it larger. Just click back in the main window to return it.",
|
|
1326
|
+
highlight_type="circle",
|
|
1327
|
+
message_position="bottom"
|
|
1328
|
+
)
|
|
1329
|
+
|
|
1330
|
+
# File Menu - show where to load data
|
|
1331
|
+
tutorial.add_step(
|
|
1332
|
+
lambda: window.menuBar(),
|
|
1333
|
+
"The File menu contains options to load images, save your work, and export data. Start by loading a TIF image stack to begin analyzing your network.",
|
|
1334
|
+
highlight_type="rect",
|
|
1335
|
+
message_position="bottom",
|
|
1336
|
+
pre_action=lambda: MenuHelper.open_menu(window, "File"),
|
|
1337
|
+
action=lambda: MenuHelper.close_menu(window, "File")
|
|
1338
|
+
)
|
|
1339
|
+
|
|
1340
|
+
tutorial.add_step(
|
|
1341
|
+
lambda: window.menuBar(),
|
|
1342
|
+
"The Analyze menu contains options to quantify your segmented data and networks.",
|
|
1343
|
+
highlight_type="rect",
|
|
1344
|
+
message_position="bottom",
|
|
1345
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Analyze"),
|
|
1346
|
+
action=lambda: MenuHelper.close_menu(window, "Analyze")
|
|
1347
|
+
)
|
|
1348
|
+
|
|
1349
|
+
tutorial.add_step(
|
|
1350
|
+
lambda: window.menuBar(),
|
|
1351
|
+
"The Process menu contains functions to actually create your networks, and to alter your segmented data, such as via watershedding (seperate fused objects), improve your segmentations, labeling branches, and more.",
|
|
1352
|
+
highlight_type="rect",
|
|
1353
|
+
message_position="bottom",
|
|
1354
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Process"),
|
|
1355
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
1356
|
+
)
|
|
1357
|
+
|
|
1358
|
+
tutorial.add_step(
|
|
1359
|
+
lambda: window.menuBar(),
|
|
1360
|
+
"The Image menu contains ways to alter the visualization, such as changing channel color, brightness, creating informative overlays, and showing a 3D renders.",
|
|
1361
|
+
highlight_type="rect",
|
|
1362
|
+
message_position="bottom",
|
|
1363
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Image"),
|
|
1364
|
+
action=lambda: MenuHelper.close_menu(window, "Image")
|
|
1365
|
+
)
|
|
1366
|
+
|
|
1367
|
+
tutorial.add_step(
|
|
1368
|
+
lambda: window.menuBar(),
|
|
1369
|
+
"The Help menu can be used to access the documentation, which contains an in-depth description of every available function. It can also be used to access the tutorial.",
|
|
1370
|
+
highlight_type="rect",
|
|
1371
|
+
message_position="bottom",
|
|
1372
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Help"),
|
|
1373
|
+
action=lambda: MenuHelper.close_menu(window, "Help")
|
|
1374
|
+
)
|
|
1375
|
+
|
|
1376
|
+
# Completion
|
|
1377
|
+
tutorial.add_step(
|
|
1378
|
+
None,
|
|
1379
|
+
"That's it! You're ready to use NetTracer3D. Load an image from the File menu to get started, then use the tools to analyze your 3D network structures.",
|
|
1380
|
+
message_position="bottom"
|
|
1381
|
+
)
|
|
1382
|
+
|
|
1383
|
+
return tutorial
|
|
1384
|
+
|
|
1385
|
+
def setup_file_tutorial(window):
|
|
1386
|
+
"""
|
|
1387
|
+
Set up the basic interface tutorial for NetTracer3D
|
|
1388
|
+
|
|
1389
|
+
Args:
|
|
1390
|
+
window: ImageViewerWindow instance from nettracer_gui
|
|
1391
|
+
|
|
1392
|
+
Returns:
|
|
1393
|
+
TutorialManager instance
|
|
1394
|
+
"""
|
|
1395
|
+
tutorial = TutorialManager(window)
|
|
1396
|
+
|
|
1397
|
+
# Step 1: Welcome
|
|
1398
|
+
tutorial.add_step(
|
|
1399
|
+
None,
|
|
1400
|
+
"This tutorial will guide you through saving and loading data.",
|
|
1401
|
+
highlight_type="rect",
|
|
1402
|
+
message_position="bottom"
|
|
1403
|
+
)
|
|
1404
|
+
|
|
1405
|
+
tutorial.add_step(
|
|
1406
|
+
MenuHelper.create_menu_step_rect_getter(window, "File"),
|
|
1407
|
+
"The File menu contains the options for saving/loading your data. This includes all images and properties.",
|
|
1408
|
+
highlight_type="rect",
|
|
1409
|
+
message_position="beside",
|
|
1410
|
+
pre_action=lambda: MenuHelper.open_menu(window, "File"),
|
|
1411
|
+
action=lambda: MenuHelper.close_menu(window, "File")
|
|
1412
|
+
)
|
|
1413
|
+
|
|
1414
|
+
def open_to_save():
|
|
1415
|
+
menu = MenuHelper.open_menu(window, "File")
|
|
1416
|
+
if menu:
|
|
1417
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Save As"))
|
|
1418
|
+
|
|
1419
|
+
# Step 3: Point to Image submenu
|
|
1420
|
+
tutorial.add_step(
|
|
1421
|
+
MenuHelper.create_submenu_action_rect_getter(window, "File", "Save As", "Save Nodes As"),
|
|
1422
|
+
f"""--Saving occurs from the SaveAs menu.
|
|
1423
|
+
\n\n--Use 'Save Network3D Object As' as the primary save function. This will dump all the relevant properties. First, you will be prompted to select a folder on your computer. Next, you will enter the name of a new folder to create in the aforementioned parent folder. All the outputs will be saved to this new folder.
|
|
1424
|
+
\n\n--The other SaveAs options can be used to save any of the image channels as a .tif.""",
|
|
1425
|
+
highlight_type=None,
|
|
1426
|
+
message_position="top_right",
|
|
1427
|
+
pre_action=open_to_save,
|
|
1428
|
+
)
|
|
1429
|
+
|
|
1430
|
+
def open_to_load():
|
|
1431
|
+
menu = MenuHelper.open_menu(window, "File")
|
|
1432
|
+
if menu:
|
|
1433
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Load"))
|
|
1434
|
+
|
|
1435
|
+
# Step 3: Point to Image submenu
|
|
1436
|
+
tutorial.add_step(
|
|
1437
|
+
MenuHelper.create_submenu_action_rect_getter(window, "File", "Load", "Load Network3D Object"),
|
|
1438
|
+
f"""--Loading occurs from the load menu. Acceptable image types are .tif, .tiff, .nii, .png, .jpeg, and .jpg.
|
|
1439
|
+
\n\n--'Load Network3D Object' can be used to load in an entire previously saved session, assuming it had been saved with the correspoinding 'Save Network3D Object' method. Navigate your way to the directory the 'Network3D Object' dumped to. Select it to reload all properties within.
|
|
1440
|
+
\n\n--Use 'load nodes' to load an image into the nodes channel. Similarly, use load edges to load edges, and either of the load overlays to load the overlays.
|
|
1441
|
+
\n\n--Use 'load network' to load your saved network data from .csv or .xlsx format. Note this will expect to see the corresponding spreadsheet in the layout that NetTracer3D saves it.
|
|
1442
|
+
\n\n--'Loading from the excel helper' opens a secondary gui where (mainly node identities) can be reassigned with a set of string keywords. For example, a node with identity 'x' and 'y' can be configured to be loaded as 'identity xy'
|
|
1443
|
+
\n\n--'Load misc properties' can be used to load node identities, node centroids, edge centroids, or node communities directly from a .csv or .xlsx spreadsheet, expecting the format that NetTracer3D saves these properties in.""",
|
|
1444
|
+
highlight_type=None,
|
|
1445
|
+
message_position="top_right",
|
|
1446
|
+
pre_action=open_to_load,
|
|
1447
|
+
)
|
|
1448
|
+
|
|
1449
|
+
def open_to_merge():
|
|
1450
|
+
menu = MenuHelper.open_menu(window, "File")
|
|
1451
|
+
if menu:
|
|
1452
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Images -> Node Identities"))
|
|
1453
|
+
|
|
1454
|
+
tutorial.add_step(
|
|
1455
|
+
MenuHelper.create_submenu_action_rect_getter(window, "File", "Images -> Node Identities", "Merge Labeled Images Into Nodes"),
|
|
1456
|
+
f"""--This 'Images -> Node Identities' menu will be a primary way to assign identities from nodes you are trying to load in.
|
|
1457
|
+
\n\n--The option 'Merge Labeled Images Into Nodes' will prompt you to find another .tif or a folder of .tif images containing additional segmented nodes. These nodes will be merged with your current nodes image, assigned an IDs based on the file names themselves. Use this to evaluate multiple pre-segmented channels as nodes.
|
|
1458
|
+
\n\n--The option 'Assign Node Identities From Overlap With Other Images' is specifically designed for assigning cell identities in multiplex images (ie has many channels) based on a single cell segmentation (usually nuclei).
|
|
1459
|
+
""",
|
|
1460
|
+
highlight_type=None,
|
|
1461
|
+
message_position="top_right",
|
|
1462
|
+
pre_action=open_to_merge,
|
|
1463
|
+
)
|
|
1464
|
+
|
|
1465
|
+
tutorial.add_step(
|
|
1466
|
+
None,
|
|
1467
|
+
"In short, load your segmented cells/nuclei into the nodes channel. Arrange the rest of your RAW channels of interest into a seperate folder. You will be prompted to find this folder.",
|
|
1468
|
+
highlight_type=None,
|
|
1469
|
+
message_position="top_right"
|
|
1470
|
+
)
|
|
1471
|
+
|
|
1472
|
+
tutorial.add_step(
|
|
1473
|
+
None,
|
|
1474
|
+
"Next, for each channel in the folder, you will be asked to assign intensity threshold boundaries where the segmented cells are assigned an identity based on whether their average intensity of expression falls within the assigned bounds.",
|
|
1475
|
+
highlight_type=None,
|
|
1476
|
+
message_position="top_right"
|
|
1477
|
+
)
|
|
1478
|
+
|
|
1479
|
+
tutorial.add_step(
|
|
1480
|
+
None,
|
|
1481
|
+
"This can be used to rapidly assign differential identities to your cells. Note that all the channels should have the same shape.",
|
|
1482
|
+
highlight_type=None,
|
|
1483
|
+
message_position="top_right"
|
|
1484
|
+
)
|
|
1485
|
+
|
|
1486
|
+
tutorial.add_step(
|
|
1487
|
+
None,
|
|
1488
|
+
"When you're done with the identity assignments, you will be prompted to save a resultant intensity table containing average intensity of each channel for each cell.",
|
|
1489
|
+
highlight_type=None,
|
|
1490
|
+
message_position="top_right"
|
|
1491
|
+
)
|
|
1492
|
+
|
|
1493
|
+
open_dialog, _ = MenuHelper.create_dialog_opener(
|
|
1494
|
+
window, tutorial, "show_violin_dialog", "ViolinDialog", "violin_dialog",
|
|
1495
|
+
called=True
|
|
1496
|
+
)
|
|
1497
|
+
|
|
1498
|
+
tutorial.add_step(
|
|
1499
|
+
None,
|
|
1500
|
+
"This data table can be used to access this menu (Also available from 'Analyze -> Stats -> Show Identity Violins...'), where you can generate informative violin plots about shared marker expression amongst cells of different communities or shared expression between cells defined as some identity.",
|
|
1501
|
+
message_position="top_right",
|
|
1502
|
+
pre_action=open_dialog
|
|
1503
|
+
)
|
|
1504
|
+
|
|
1505
|
+
tutorial.add_step(
|
|
1506
|
+
None,
|
|
1507
|
+
"You can also algorithmically cluster your cells into higher order neighborhoods based on shared marker expression, for deciphering phenotypes",
|
|
1508
|
+
highlight_type=None,
|
|
1509
|
+
message_position="top_right",
|
|
1510
|
+
)
|
|
1511
|
+
|
|
1512
|
+
# Step 9: Close dialog and finish
|
|
1513
|
+
def close_dialog():
|
|
1514
|
+
if hasattr(tutorial, 'violin_dialog') and tutorial.violin_dialog:
|
|
1515
|
+
tutorial.violin_dialog.close()
|
|
1516
|
+
tutorial.violin_dialog = None
|
|
1517
|
+
|
|
1518
|
+
tutorial.add_step(
|
|
1519
|
+
None,
|
|
1520
|
+
"That's it for the data loading tutorial!",
|
|
1521
|
+
message_position="top_right",
|
|
1522
|
+
action=close_dialog
|
|
1523
|
+
)
|
|
1524
|
+
|
|
1525
|
+
|
|
1526
|
+
|
|
1527
|
+
return tutorial
|
|
1528
|
+
|
|
1529
|
+
def setup_connectivity_tutorial(window):
|
|
1530
|
+
|
|
1531
|
+
tutorial = TutorialManager(window)
|
|
1532
|
+
|
|
1533
|
+
tutorial.add_step(
|
|
1534
|
+
None,
|
|
1535
|
+
"This tutorial will guide you through generating the first sort of network, the 'connectivity network'. These networks should be used to connect one type of object (your nodes), which can be for example cells or functional tissue units, via a secondary structure (your edges), which should be some kind of connector medium, such as nerves or blood vessels.",
|
|
1536
|
+
highlight_type="rect",
|
|
1537
|
+
message_position="bottom"
|
|
1538
|
+
)
|
|
1539
|
+
|
|
1540
|
+
tutorial.add_step(
|
|
1541
|
+
window.channel_buttons[0],
|
|
1542
|
+
"Start by loading your data into the nodes channel (binary or numerically labeled). Make sure to segment it first if you haven't.",
|
|
1543
|
+
highlight_type="circle",
|
|
1544
|
+
message_position="top"
|
|
1545
|
+
)
|
|
1546
|
+
|
|
1547
|
+
tutorial.add_step(
|
|
1548
|
+
window.channel_buttons[1],
|
|
1549
|
+
"Also load your segmented edges into the edges channel (should be binary).",
|
|
1550
|
+
highlight_type="circle",
|
|
1551
|
+
message_position="top"
|
|
1552
|
+
)
|
|
1553
|
+
|
|
1554
|
+
def open_to_connect():
|
|
1555
|
+
menu = MenuHelper.open_menu(window, "Process")
|
|
1556
|
+
if menu:
|
|
1557
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Calculate Network"))
|
|
1558
|
+
|
|
1559
|
+
tutorial.add_step(
|
|
1560
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Calculate Network", "Calculate Connectivity Network (Find Node-Edge-Node Network)"),
|
|
1561
|
+
"Next, select 'Process -> Calculate Network -> Calculate Connectivity Network'.",
|
|
1562
|
+
highlight_type=None,
|
|
1563
|
+
message_position="beside",
|
|
1564
|
+
pre_action=open_to_connect,
|
|
1565
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
1566
|
+
)
|
|
1567
|
+
|
|
1568
|
+
open_dialog, _ = MenuHelper.create_dialog_opener(
|
|
1569
|
+
window, tutorial, "show_calc_all_dialog", "CalcAllDialog", "con_dialog"
|
|
1570
|
+
)
|
|
1571
|
+
|
|
1572
|
+
tutorial.add_step(
|
|
1573
|
+
None,
|
|
1574
|
+
"Let's open the connectivity network calculator to see all the options available. Click 'Next' to open it.",
|
|
1575
|
+
message_position="bottom",
|
|
1576
|
+
action=open_dialog
|
|
1577
|
+
)
|
|
1578
|
+
|
|
1579
|
+
tutorial.add_step(
|
|
1580
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'xy_scale'),
|
|
1581
|
+
"xy_scale affects how NetTracer3D interprets distances in the X and Y dimensions. If your image has anisotropic voxels (different spacing in X/Y vs Z), you may need to adjust this to compensate. Note that your data is always presumed to have an equal resolution in the xy plane itself.",
|
|
1582
|
+
highlight_type=None,
|
|
1583
|
+
message_position="beside",
|
|
1584
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'xy_scale', 'selectAll()'),
|
|
1585
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'xy_scale', 'deselect()')
|
|
1586
|
+
)
|
|
1587
|
+
|
|
1588
|
+
# Step 7: Explain z_scale field
|
|
1589
|
+
tutorial.add_step(
|
|
1590
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'z_scale'),
|
|
1591
|
+
"z_scale adjusts the evaluation of distances in the Z dimension. Many microscopy images have a different Z step size than XY resolution, so you might set this differently than xy_scale.",
|
|
1592
|
+
highlight_type=None,
|
|
1593
|
+
message_position="beside",
|
|
1594
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'z_scale', 'selectAll()'),
|
|
1595
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'z_scale', 'deselect()')
|
|
1596
|
+
)
|
|
1597
|
+
|
|
1598
|
+
tutorial.add_step(
|
|
1599
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'search'),
|
|
1600
|
+
"Node search can expand your nodes by a set distance to look for connections nearby. Otherwise, they will just consider what edges pass directly through them. Note this distance is affected by the xy and z scales.",
|
|
1601
|
+
highlight_type=None,
|
|
1602
|
+
message_position="beside",
|
|
1603
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'search', 'setText("FLOAT!")'),
|
|
1604
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'search', 'setText("")')
|
|
1605
|
+
)
|
|
1606
|
+
|
|
1607
|
+
tutorial.add_step(
|
|
1608
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'diledge'),
|
|
1609
|
+
"Edge Search similarly expands your edges by the entered distance. While nodes that expand keep their identity when they push up into each other, expanding your edges will actually fuse them. This is intended to account for segmentation artifacts, such as small holes. Edges must be contiguous in space to connect a node pair. Note you can preprocess your edges via dilating or closing to skip this, or ignore it entirely.",
|
|
1610
|
+
highlight_type=None,
|
|
1611
|
+
message_position="beside",
|
|
1612
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'diledge', 'setText("FLOAT!")'),
|
|
1613
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'diledge', 'setText("")')
|
|
1614
|
+
)
|
|
1615
|
+
|
|
1616
|
+
tutorial.add_step(
|
|
1617
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'label_nodes'),
|
|
1618
|
+
"Having this option on will have the system use a basic labeling algorithm to label your nodes, where each discrete object in space takes on a seperate integer label. This is for binary data mainly. If your nodes are already labeled, please toggle this option off!",
|
|
1619
|
+
highlight_type=None,
|
|
1620
|
+
message_position="beside",
|
|
1621
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'label_nodes', 'click()'),
|
|
1622
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'label_nodes', 'toggle()'))
|
|
1623
|
+
|
|
1624
|
+
tutorial.add_step(
|
|
1625
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'remove_trunk'),
|
|
1626
|
+
"'Time to Remove Edge Trunks' can be given an integer which will tell the system to remove that many trunks from the edges before connecting your nodes. A trunk is a large confluence of edges, such as a large nerve from which all other nerves arise. Sometimes this will result in most of your nodes being connected by the trunk alone and result in a hard-to-analyze network. If 1 is given here, for example, the single largest volume edge will be removed before making the network. If 2 is given, the single largest two edges are removed, etc. I would recommend skipping this at first, then coming back and trying 1 if there is a large trunk causing problems.",
|
|
1627
|
+
highlight_type=None,
|
|
1628
|
+
message_position="beside",
|
|
1629
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'remove_trunk', 'setText("INTEGER!")'),
|
|
1630
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'remove_trunk', 'setText("")')
|
|
1631
|
+
)
|
|
1632
|
+
|
|
1633
|
+
tutorial.add_step(
|
|
1634
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'inners'),
|
|
1635
|
+
"Deselecting this button will have the system not consider 'inner edges'. Inner edges are portions of your edge image that exist solely within nodes (as well as their expanded search regions). You can deselect this to ignore inner connections between within node clusters, for example if you only wanted to consider more distal connections to get a simpler network. However, I would recommend keeping this enabled unless you had a good reason to not.",
|
|
1636
|
+
highlight_type=None,
|
|
1637
|
+
message_position="beside",
|
|
1638
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'inners', 'click()'))
|
|
1639
|
+
|
|
1640
|
+
tutorial.add_step(
|
|
1641
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'down_factor'),
|
|
1642
|
+
"Enter an int here to downsample your nodes prior to finding their centroids. The resultant centroids will be scaled back up to their proper values. This can speed up the centroid calculation and is recommended for large images. Note that small nodes may be completely erased if the downsample is too large. A larger int equates to a greater downsample. Downsampling here will also enlarge any overlays generated in this window.",
|
|
1643
|
+
highlight_type=None,
|
|
1644
|
+
message_position="beside",
|
|
1645
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'down_factor', 'setText("INTEGER!")'),
|
|
1646
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'down_factor', 'setText("")')
|
|
1647
|
+
)
|
|
1648
|
+
|
|
1649
|
+
""" # <-- so I am trying out removing these because their use cases are confusing
|
|
1650
|
+
|
|
1651
|
+
tutorial.add_step(
|
|
1652
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'GPU_downsample'),
|
|
1653
|
+
"If you want to try and use the GPU, you can likewise enter an arbitrary integer downsample factor here to speed it up. Note the GPU calculation can be greedy with VRAM and will automatically try to downsample itself in a lot of cases."
|
|
1654
|
+
highlight_type=None,
|
|
1655
|
+
message_position="beside",
|
|
1656
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'GPU_downsample', 'setText("INTEGER!")'),
|
|
1657
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'GPU_downsample', 'setText("")')
|
|
1658
|
+
)
|
|
1659
|
+
|
|
1660
|
+
tutorial.add_step(
|
|
1661
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'GPU'),
|
|
1662
|
+
"Enable this to have your system attempt to use the GPU. You will need a CUDA toolkit and a corresponding cupy package installed. Note that I consider this function somewhat experimental. In short, the cupy implementation uses a distance transform calculation that can be very greedy with VRAM. If it overflows, it will attempt to iteratively downsample itself until the calculation works (specifically containing to calculating the 'node search' volume). Note this risks kicking out small nodes from your image. Furthermore, it is only really applicable of 'fast dilation' is enabled. Therefore, generally skip using this option. However, it can be a way to rapidly assess the general network structure of a large image.",
|
|
1663
|
+
highlight_type=None,
|
|
1664
|
+
message_position="beside",
|
|
1665
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'GPU', 'click()'),
|
|
1666
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'GPU', 'toggle()'))
|
|
1667
|
+
|
|
1668
|
+
tutorial.add_step(
|
|
1669
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'fastdil'),
|
|
1670
|
+
"Enable this to have the algorithm use fast dilation. Fast dilation "
|
|
1671
|
+
highlight_type=None,
|
|
1672
|
+
message_position="beside",
|
|
1673
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'fastdil', 'click()'),
|
|
1674
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'fastdil', 'toggle()'))
|
|
1675
|
+
"""
|
|
1676
|
+
|
|
1677
|
+
tutorial.add_step(
|
|
1678
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'overlays'),
|
|
1679
|
+
"Selecting 'Overlays' will have the system also generate a 'network overlay' which literally draws white lines into an image between your nodes. This will be loaded into Overlay 1. It will also generate an 'ID overlay', which draws the integer labels of nodes directly at their centroids. This will be loaded into Overlay 2. These overlays can also be generated after the fact",
|
|
1680
|
+
highlight_type=None,
|
|
1681
|
+
message_position="beside",
|
|
1682
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'overlays', 'click()'),
|
|
1683
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'overlays', 'toggle()'))
|
|
1684
|
+
|
|
1685
|
+
|
|
1686
|
+
tutorial.add_step(
|
|
1687
|
+
MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'update'),
|
|
1688
|
+
"This is enabled by default and will just make it so your 'nodes' are replaced by the 'labeled nodes' if you are labelling them. It will also replace your 'edges' with the labeled edges that this function always generates and utilizes to make connections. Note that you should generally enable this and just save both sets of images. Selecting objects with the highlight overlay and having them correspond to the actual calculated network will only work if you also make sure these images were updated to correspond.",
|
|
1689
|
+
highlight_type=None,
|
|
1690
|
+
message_position="beside",
|
|
1691
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'update', 'click()'))
|
|
1692
|
+
|
|
1693
|
+
def close_dialog():
|
|
1694
|
+
if hasattr(tutorial, 'con_dialog') and tutorial.con_dialog:
|
|
1695
|
+
tutorial.con_dialog.close()
|
|
1696
|
+
tutorial.con_dialog = None
|
|
1697
|
+
|
|
1698
|
+
tutorial.add_step(
|
|
1699
|
+
None,
|
|
1700
|
+
"That's it for the connectivity network creation!",
|
|
1701
|
+
message_position="bottom",
|
|
1702
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'xy_scale', 'close()'),
|
|
1703
|
+
action=close_dialog
|
|
1704
|
+
)
|
|
1705
|
+
|
|
1706
|
+
|
|
1707
|
+
return tutorial
|
|
1708
|
+
|
|
1709
|
+
def setup_branch_tutorial(window):
|
|
1710
|
+
tutorial = TutorialManager(window)
|
|
1711
|
+
|
|
1712
|
+
tutorial.add_step(
|
|
1713
|
+
None,
|
|
1714
|
+
"This tutorial will guide you through generating the second sort of network, the 'branch network'. These networks should be used to create branch graphs of segmentations of branchy images, such as nerves or vessels.",
|
|
1715
|
+
highlight_type="rect",
|
|
1716
|
+
message_position="bottom"
|
|
1717
|
+
)
|
|
1718
|
+
|
|
1719
|
+
tutorial.add_step(
|
|
1720
|
+
window.channel_buttons[1],
|
|
1721
|
+
"First, load your segmented branch image into the edges channel. Make sure it is segmented, not raw data.",
|
|
1722
|
+
highlight_type="circle",
|
|
1723
|
+
message_position="top"
|
|
1724
|
+
)
|
|
1725
|
+
|
|
1726
|
+
tutorial.add_step(
|
|
1727
|
+
window.channel_buttons[0],
|
|
1728
|
+
"You can also load to the nodes channel first, but the program will prioritize whatever is in edges for any branch analysis.",
|
|
1729
|
+
highlight_type="circle",
|
|
1730
|
+
message_position="top"
|
|
1731
|
+
)
|
|
1732
|
+
|
|
1733
|
+
def open_to_connect():
|
|
1734
|
+
menu = MenuHelper.open_menu(window, "Process")
|
|
1735
|
+
if menu:
|
|
1736
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Calculate Network"))
|
|
1737
|
+
|
|
1738
|
+
tutorial.add_step(
|
|
1739
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Calculate Network", "Calculate Connectivity Network (Find Node-Edge-Node Network)"),
|
|
1740
|
+
"--There are two options for making branch networks. \n\n --1. ('Process -> Calculate Network -> Calculate Branchpoint Network') is 'branchpoint networks', where the vertices of your branches are joined in a network\n\n--2. (Process -> Calculate Network -> Calculate Branch Adjacency Network) is 'branch adjacency networks', where the branches themselves are connected based on which other branches they happen to touch.",
|
|
1741
|
+
highlight_type=None,
|
|
1742
|
+
message_position="beside",
|
|
1743
|
+
pre_action=open_to_connect,
|
|
1744
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
1745
|
+
)
|
|
1746
|
+
|
|
1747
|
+
tutorial.add_step(
|
|
1748
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Calculate Network", "Calculate Connectivity Network (Find Node-Edge-Node Network)"),
|
|
1749
|
+
"We will start with the 'branch adjacency network' for this demo.",
|
|
1750
|
+
highlight_type=None,
|
|
1751
|
+
message_position="beside",
|
|
1752
|
+
pre_action=open_to_connect,
|
|
1753
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
1754
|
+
)
|
|
1755
|
+
|
|
1756
|
+
open_dialog, _ = MenuHelper.create_dialog_opener(
|
|
1757
|
+
window, tutorial, "show_branch_dialog", "BranchDialog", "branch_dialog",
|
|
1758
|
+
tutorial_example = True
|
|
1759
|
+
)
|
|
1760
|
+
|
|
1761
|
+
tutorial.add_step(
|
|
1762
|
+
None,
|
|
1763
|
+
"This menu will appear when calculating the 'branch adjacency network'. It is the same menu that you'll get when just trying to label branches with 'Process -> Generate -> Label Branches'. For the most part the parameters are set to the recommended defaults, however I will go over what options are available.",
|
|
1764
|
+
message_position="beside",
|
|
1765
|
+
pre_action=open_dialog
|
|
1766
|
+
)
|
|
1767
|
+
|
|
1768
|
+
"""
|
|
1769
|
+
tutorial.add_step(
|
|
1770
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'fix'),
|
|
1771
|
+
"This first auto-correction option is designed if you feel like the branch labels are generally too busy. Selecting this will have the program attempt to collapse overly-dense regions of branches into a single label. Note that this behavior is somewhat tricky to predict so I generally don't use it but feel free to give it a shot and see how it looks.",
|
|
1772
|
+
highlight_type=None,
|
|
1773
|
+
message_position="beside",
|
|
1774
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix', 'click()'),
|
|
1775
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix', 'toggle()')
|
|
1776
|
+
)
|
|
1777
|
+
|
|
1778
|
+
tutorial.add_step(
|
|
1779
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'fix_val'),
|
|
1780
|
+
"This integer value tells the above parameter (if enabled) what degree of branch-busyness should get merged. In short, a lower value is more aggressive with merging while a higher value only merges very busy regions. By default it is set to 4.",
|
|
1781
|
+
highlight_type=None,
|
|
1782
|
+
message_position="beside",
|
|
1783
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix_val', 'selectAll()'),
|
|
1784
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix_val', 'deselect()')
|
|
1785
|
+
)
|
|
1786
|
+
|
|
1787
|
+
tutorial.add_step(
|
|
1788
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'seed'),
|
|
1789
|
+
"The random seed for grouping branches above can be changed here with an integer value, if the behavior of the above option is desired to be tweaked somewhat. It will use 42 by default.",
|
|
1790
|
+
highlight_type=None,
|
|
1791
|
+
message_position="beside",
|
|
1792
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'seed', 'setText("INTEGER!")'),
|
|
1793
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'seed', 'setText("")')
|
|
1794
|
+
)
|
|
1795
|
+
|
|
1796
|
+
"""
|
|
1797
|
+
|
|
1798
|
+
tutorial.add_step(
|
|
1799
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'fix2'),
|
|
1800
|
+
"The second auto-correction option will automatically merge any internal labels that arise with their outer-neighbors. This is something that can occasionally happen with fat, trunk-like branches that are tricky to algorithmically decipher. I have found that this merge handles these issues quite well, so this option is enabled by default.",
|
|
1801
|
+
highlight_type=None,
|
|
1802
|
+
message_position="beside",
|
|
1803
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix2', 'click()')
|
|
1804
|
+
)
|
|
1805
|
+
|
|
1806
|
+
tutorial.add_step(
|
|
1807
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'fix3'),
|
|
1808
|
+
"This auto-correction step will automatically correct any branches that aren't contiguous in space. Rarely (Depending on the segmentation, really) a branch can initially be labeled non-contiguously, which is usually not correct. This is because the 'meat' of any branch is at first labeled based on which internal filament it's closest to. So if you have a very wide branch it may rarely aquire labels of nearby smaller branches across gaps. Enabling this will split those labels into seperate regions as to not confound the connectivity graph. The largest component is considered the 'correct one' and keeps its label, while smaller components inherit the label of the largest shared border of a 'real' branch they are bordering. It is enabled here by default to mitigate any potential errors, although note this does not apply to the branchpoint networks since they don't actually utilize the branches themselves.",
|
|
1809
|
+
highlight_type=None,
|
|
1810
|
+
message_position="beside",
|
|
1811
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix3', 'click()')
|
|
1812
|
+
)
|
|
1813
|
+
|
|
1814
|
+
tutorial.add_step(
|
|
1815
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'fix4'),
|
|
1816
|
+
"This final auto-correction step will try to automatically merge any similarly sized branches moving in the same direction, instead of just letting a larger branch with many sub-branches get chopped up. It is off by default because of its less predictable behavior, although its good if you want your branches to be more continuous. Just note each of these fixes does add extra processing time.",
|
|
1817
|
+
highlight_type=None,
|
|
1818
|
+
message_position="beside",
|
|
1819
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix4', 'click()'),
|
|
1820
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix4', 'toggle()')
|
|
1821
|
+
)
|
|
1822
|
+
|
|
1823
|
+
tutorial.add_step(
|
|
1824
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'fix4_val'),
|
|
1825
|
+
"This threshold values controls how likely a junction is to merge any pair of its nearby branches. Regardless of what you enter here, only two branches at a time can merge at a junction. Values between 20-40 are more meaningful, while those lower tend to merge everything and those higher usually emrge nothing.",
|
|
1826
|
+
highlight_type=None,
|
|
1827
|
+
message_position="beside",
|
|
1828
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix4_val', 'setText("FLOAT!")'),
|
|
1829
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix4_val', 'setText("")')
|
|
1830
|
+
)
|
|
1831
|
+
|
|
1832
|
+
tutorial.add_step(
|
|
1833
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'down_factor'),
|
|
1834
|
+
"This integer value can be used to temporarily downsample the image while creating branches. Aside from speeding up the process, this may actually improve branch-labeling behavior with thick branches but will lose labeling for smaller branches (instead merging them with nearby thicker branches they arise from). It is disabled by default. Larger values will downsample more aggressively.",
|
|
1835
|
+
highlight_type=None,
|
|
1836
|
+
message_position="beside",
|
|
1837
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'down_factor', 'selectAll()'),
|
|
1838
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'down_factor', 'deselect()')
|
|
1839
|
+
)
|
|
1840
|
+
|
|
1841
|
+
|
|
1842
|
+
tutorial.add_step(
|
|
1843
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'compute'),
|
|
1844
|
+
"Leaving this option enabled will have the program also compute branch lengths and branch tortuosities for the labeled branches. These computations are pretty cheap so I usually leave this enabled. This will get skipped if you use a downsample but you can calculate these again from the Analyze menu.",
|
|
1845
|
+
highlight_type=None,
|
|
1846
|
+
message_position="beside",
|
|
1847
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'compute', 'click()')
|
|
1848
|
+
)
|
|
1849
|
+
|
|
1850
|
+
|
|
1851
|
+
tutorial.add_step(
|
|
1852
|
+
MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'nodes'),
|
|
1853
|
+
"If you have already created node vertices for your branch labeling scheme (ie via 'Process -> Generate -> Generate Nodes from Edge Vertices) and have loaded those nodes into the nodes channel (with your branches in the edges channel) you can forgo regenerating these vertices by disabling this. However, it is usually presumed you will be generating them from scratch, so this is enabled by default'.",
|
|
1854
|
+
highlight_type=None,
|
|
1855
|
+
message_position="beside",
|
|
1856
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'nodes', 'click()')
|
|
1857
|
+
)
|
|
1858
|
+
|
|
1859
|
+
def close_dialog():
|
|
1860
|
+
if hasattr(tutorial, 'branch_dialog') and tutorial.branch_dialog:
|
|
1861
|
+
tutorial.branch_dialog.close()
|
|
1862
|
+
tutorial.branch_dialog = None
|
|
1863
|
+
|
|
1864
|
+
tutorial.add_step(
|
|
1865
|
+
None,
|
|
1866
|
+
"Press 'Run Branch Label' to move on. This will move you to the step to generate nodes for your branch vertices.",
|
|
1867
|
+
message_position="beside",
|
|
1868
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'down_factor', 'close()'),
|
|
1869
|
+
action=close_dialog
|
|
1870
|
+
)
|
|
1871
|
+
|
|
1872
|
+
open_dialog, _ = MenuHelper.create_dialog_opener(
|
|
1873
|
+
window, tutorial, "show_gennodes_dialog", "GenNodesDialog", "gen_dialog",
|
|
1874
|
+
tutorial_example = True
|
|
1875
|
+
)
|
|
1876
|
+
|
|
1877
|
+
tutorial.add_step(
|
|
1878
|
+
None,
|
|
1879
|
+
"This also happens to be the only menu that will appear if you were to have chosen to create a 'Branchpoint Network', and so the description of this menu applies to both the initial step for the 'branchpoint network' and the second step for the 'branch adjacency network'.",
|
|
1880
|
+
message_position="beside",
|
|
1881
|
+
pre_action=open_dialog
|
|
1882
|
+
)
|
|
1883
|
+
|
|
1884
|
+
tutorial.add_step(
|
|
1885
|
+
MenuHelper.create_widget_getter(tutorial, 'gen_dialog', 'down_factor'),
|
|
1886
|
+
"This integer value can be used to temporarily downsample the image while creating branchpoints. Aside from speeding up the process, this may alter branch detection, possibly performing a cleaner branch appraisal of very thick branches but losing network identification of smaller branches (Much like in the prior menu - note that any value entered in the prior menu will be applied by default here for consistency, and you won't see this option). It is disabled by default. Larger values will downsample more aggressively.",
|
|
1887
|
+
highlight_type=None,
|
|
1888
|
+
message_position="beside",
|
|
1889
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'down_factor', 'selectAll()'),
|
|
1890
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'down_factor', 'deselect()')
|
|
1891
|
+
)
|
|
1892
|
+
|
|
1893
|
+
|
|
1894
|
+
tutorial.add_step(
|
|
1895
|
+
MenuHelper.create_widget_getter(tutorial, 'gen_dialog', 'branch_removal'),
|
|
1896
|
+
"IMPORTANT - This branch removal parameter (Skeleton voxel branch to remove...) is something I would consider entering a value for. This is the length of terminal branches that will be removed prior to any vertex/branch labeling. Any branch shorter than the value here will be removed, but only if it is a terminal branch. For more jagged segmentations, this may be a necessity to prevent branchpoints from arising from spine-like artifacts. More internal branches will not be removed, so as a test it is generally safe to enter a large value here, which will preserve the majority of the branch schema and just risk losing occasional terminal branches.",
|
|
1897
|
+
highlight_type=None,
|
|
1898
|
+
message_position="beside",
|
|
1899
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'branch_removal', 'setText("INTEGER!")'),
|
|
1900
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'branch_removal', 'setText("")')
|
|
1901
|
+
)
|
|
1902
|
+
|
|
1903
|
+
tutorial.add_step(
|
|
1904
|
+
MenuHelper.create_widget_getter(tutorial, 'gen_dialog', 'auto'),
|
|
1905
|
+
"This 'attempt to auto correct skeleton looping' option should generally be enabled for 3D data. In short it applies an extra algorithmic step to improve the branch detection algorithm. However, this does not really apply to 2D data. It will be enabled by default for 3D data and disabled by default for 2D data.",
|
|
1906
|
+
highlight_type=None,
|
|
1907
|
+
message_position="beside",
|
|
1908
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'auto', 'click()'),
|
|
1909
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'auto', 'toggle()')
|
|
1910
|
+
)
|
|
1911
|
+
|
|
1912
|
+
tutorial.add_step(
|
|
1913
|
+
MenuHelper.create_widget_getter(tutorial, 'gen_dialog', 'comp_dil'),
|
|
1914
|
+
"This final 'attempt to expand nodes' will cause your nodes (branchpoint labels) to grow in size by the specified amount. They will fuse with any neighbors they encounter. Doing this will decrease the label splitting along a single branch that has many branches emerge from it in a tightly packed stretch, just as an example, because the system would instead see a single branchpoint there. This can generally be skipped, but if you notice a plethora of tightly packed vertices that you'd want to be treated as a single vertice, you could consider using it.",
|
|
1915
|
+
highlight_type=None,
|
|
1916
|
+
message_position="beside",
|
|
1917
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'comp_dil', 'setText("INTEGER!")'),
|
|
1918
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'comp_dil', 'setText("")')
|
|
1919
|
+
)
|
|
1920
|
+
|
|
1921
|
+
def close_dialog():
|
|
1922
|
+
if hasattr(tutorial, 'gen_dialog') and tutorial.gen_dialog:
|
|
1923
|
+
tutorial.gen_dialog.close()
|
|
1924
|
+
tutorial.gen_dialog = None
|
|
1925
|
+
|
|
1926
|
+
tutorial.add_step(
|
|
1927
|
+
None,
|
|
1928
|
+
"That's it for creating branch networks!.",
|
|
1929
|
+
message_position="beside",
|
|
1930
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'down_factor', 'close()'),
|
|
1931
|
+
action=close_dialog
|
|
1932
|
+
)
|
|
1933
|
+
|
|
1934
|
+
|
|
1935
|
+
return tutorial
|
|
1936
|
+
|
|
1937
|
+
def setup_prox_tutorial(window):
|
|
1938
|
+
|
|
1939
|
+
tutorial = TutorialManager(window)
|
|
1940
|
+
|
|
1941
|
+
tutorial.add_step(
|
|
1942
|
+
None,
|
|
1943
|
+
"This tutorial will guide you through generating the third sort of network, the 'proximity network'. These networks should be used to evaluate spatial clustering and general arrangement of objects in an image, for example, just looking at groups of cells and what subtypes group together.",
|
|
1944
|
+
highlight_type="rect",
|
|
1945
|
+
message_position="bottom"
|
|
1946
|
+
)
|
|
1947
|
+
|
|
1948
|
+
tutorial.add_step(
|
|
1949
|
+
window.channel_buttons[0],
|
|
1950
|
+
"First, load your segmented objects into the nodes channel. Alternatively, you can load in the data for just the node centroids, either through 'File -> Load' or 'File -> Load From Excel Helper'",
|
|
1951
|
+
highlight_type="circle",
|
|
1952
|
+
message_position="top"
|
|
1953
|
+
)
|
|
1954
|
+
|
|
1955
|
+
def open_to_connect():
|
|
1956
|
+
menu = MenuHelper.open_menu(window, "Process")
|
|
1957
|
+
if menu:
|
|
1958
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Calculate Network"))
|
|
1959
|
+
|
|
1960
|
+
tutorial.add_step(
|
|
1961
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Calculate Network", "Calculate Proximity Network (connect nodes by distance)"),
|
|
1962
|
+
"From the calculate menu, select 'Calculate Proximity Network...'",
|
|
1963
|
+
highlight_type=None,
|
|
1964
|
+
message_position="beside",
|
|
1965
|
+
pre_action=open_to_connect,
|
|
1966
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
1967
|
+
)
|
|
1968
|
+
|
|
1969
|
+
open_dialog, _ = MenuHelper.create_dialog_opener(
|
|
1970
|
+
window, tutorial, "show_calc_prox_dialog", "ProxDialog", "prox_dialog",
|
|
1971
|
+
tutorial_example = True
|
|
1972
|
+
)
|
|
1973
|
+
|
|
1974
|
+
tutorial.add_step(
|
|
1975
|
+
None,
|
|
1976
|
+
"You will then see this menu. Let's walk through what options are available.",
|
|
1977
|
+
message_position="beside",
|
|
1978
|
+
pre_action=open_dialog
|
|
1979
|
+
)
|
|
1980
|
+
|
|
1981
|
+
tutorial.add_step(
|
|
1982
|
+
MenuHelper.create_widget_getter(tutorial, 'prox_dialog', 'search'),
|
|
1983
|
+
"The search region value will tell the program how close you want a pair of nodes to be before they are connected. You must provide a value here to use this function.",
|
|
1984
|
+
highlight_type=None,
|
|
1985
|
+
message_position="beside",
|
|
1986
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'search', 'setText("FLOAT!")'),
|
|
1987
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'search', 'setText("")')
|
|
1988
|
+
)
|
|
1989
|
+
|
|
1990
|
+
tutorial.add_step(
|
|
1991
|
+
MenuHelper.create_widget_getter(tutorial, 'prox_dialog', 'xy_scale'),
|
|
1992
|
+
"xy_scale affects how NetTracer3D interprets distances in the X and Y dimensions. If your image has anisotropic voxels (different spacing in X/Y vs Z), you may need to adjust this to compensate. Note that your data is always presumed to have an equal resolution in the xy plane itself.",
|
|
1993
|
+
highlight_type=None,
|
|
1994
|
+
message_position="beside",
|
|
1995
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'xy_scale', 'selectAll()'),
|
|
1996
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'xy_scale', 'deselect()')
|
|
1997
|
+
)
|
|
1998
|
+
|
|
1999
|
+
# Step 7: Explain z_scale field
|
|
2000
|
+
tutorial.add_step(
|
|
2001
|
+
MenuHelper.create_widget_getter(tutorial, 'prox_dialog', 'z_scale'),
|
|
2002
|
+
"z_scale adjusts the evaluation of distances in the Z dimension. Many microscopy images have a different Z step size than XY resolution, so you might set this differently than xy_scale.",
|
|
2003
|
+
highlight_type=None,
|
|
2004
|
+
message_position="beside",
|
|
2005
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'z_scale', 'selectAll()'),
|
|
2006
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'z_scale', 'deselect()')
|
|
2007
|
+
)
|
|
2008
|
+
|
|
2009
|
+
tutorial.add_step(
|
|
2010
|
+
MenuHelper.create_widget_getter(tutorial, 'prox_dialog', 'mode_selector'),
|
|
2011
|
+
"--Execution Mode tells the program if you want to link nodes by comparing the distances between their centroids or their borders.\n\n--The first option will utilize centroids, which is usually faster and good for objects that are rougly circular or spheroid, such as cells. Note the search distance will start at the centroid and only create a pair if the search encounter's another centroid, so you may need to increase that value to compensate if the borders are larger than the centroids.\n\n--The second option will search from the actual object's boundary and may be slower to process, but is ideal for more oddly shaped nodes whose location cannot be described well by a centroid. Nodes will be linked based on their boundary-to-boundary distance.",
|
|
2012
|
+
highlight_type=None,
|
|
2013
|
+
message_position="beside",
|
|
2014
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'mode_selector', 'showPopup()'),
|
|
2015
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'mode_selector', 'hidePopup()')
|
|
2016
|
+
)
|
|
2017
|
+
|
|
2018
|
+
tutorial.add_step(
|
|
2019
|
+
MenuHelper.create_widget_getter(tutorial, 'prox_dialog', 'id_selector'),
|
|
2020
|
+
"If your nodes have been assigned identities, this menu will allow you to only use one as a basis for finding connections. So if I were to choose 'identity A' here, then any nodes bearing 'identity A' could connect to any other node type (including A, B, C, etc), but nodes of 'identity B' or 'C' could not connect to each other. This can be used to evaluate relationships around specific subtypes of objects. Note that a network exclusively between two identity types can also be created in post with 'Process -> Modify Network...'",
|
|
2021
|
+
highlight_type=None,
|
|
2022
|
+
message_position="beside",
|
|
2023
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'id_selector', 'showPopup()'),
|
|
2024
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'id_selector', 'hidePopup()')
|
|
2025
|
+
)
|
|
2026
|
+
|
|
2027
|
+
tutorial.add_step(
|
|
2028
|
+
MenuHelper.create_widget_getter(tutorial, 'prox_dialog', 'overlays'),
|
|
2029
|
+
"Selecting 'Overlays' will have the system also generate a 'network overlay' which literally draws white lines into an image between your nodes. This will be loaded into Overlay 1. It will also generate an 'ID overlay', which draws the integer labels of nodes directly at their centroids. This will be loaded into Overlay 2. These overlays can also be generated after the fact",
|
|
2030
|
+
highlight_type=None,
|
|
2031
|
+
message_position="beside",
|
|
2032
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'overlays', 'click()'),
|
|
2033
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'overlays', 'toggle()'))
|
|
2034
|
+
|
|
2035
|
+
tutorial.add_step(
|
|
2036
|
+
MenuHelper.create_widget_getter(tutorial, 'prox_dialog', 'downsample'),
|
|
2037
|
+
"The downsample factor integer value will downsample when generating the overlays, which is essentially just a trick to make them render larger. This only matters if you want to generate the overlays here.",
|
|
2038
|
+
highlight_type=None,
|
|
2039
|
+
message_position="beside",
|
|
2040
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'downsample', 'setText("INT!")'),
|
|
2041
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'downsample', 'setText("")')
|
|
2042
|
+
)
|
|
2043
|
+
|
|
2044
|
+
tutorial.add_step(
|
|
2045
|
+
MenuHelper.create_widget_getter(tutorial, 'prox_dialog', 'populate'),
|
|
2046
|
+
"Since you can skip using an actual image and just use the data in the centroids property (if nothing is in nodes) to generate this network, enabling this 'Populate Nodes From Centroids' option will cause the program to generate a new image and place the centroids in it as single, labeled points. This will be loaded into the nodes channel. The labeled points can be dilated after the fact if you'd like to make them more visible.",
|
|
2047
|
+
highlight_type=None,
|
|
2048
|
+
message_position="beside",
|
|
2049
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'populate', 'click()'),
|
|
2050
|
+
action = MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'populate', 'toggle()'))
|
|
2051
|
+
|
|
2052
|
+
tutorial.add_step(
|
|
2053
|
+
MenuHelper.create_widget_getter(tutorial, 'prox_dialog', 'max_neighbors'),
|
|
2054
|
+
"The integer entered here will cause any node to only be able to have a maximum of that many connections. It will preferentially take connections to its closest neighbors. You can enter a cap here to simplify network structure in dense images. Alternatively, if you are using the centroid search you can enter a very large distance for your search region (note this sort of distance might slow down the border search substantially) and then pass a value here as a way to appraise the 'n' closest neighbors for each node.",
|
|
2055
|
+
highlight_type=None,
|
|
2056
|
+
message_position="beside",
|
|
2057
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'max_neighbors', 'setText("INT!")'),
|
|
2058
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'max_neighbors', 'setText("")')
|
|
2059
|
+
)
|
|
2060
|
+
|
|
2061
|
+
def close_dialog():
|
|
2062
|
+
if hasattr(tutorial, 'prox_dialog') and tutorial.prox_dialog:
|
|
2063
|
+
tutorial.prox_dialog.close()
|
|
2064
|
+
tutorial.prox_dialog = None
|
|
2065
|
+
|
|
2066
|
+
tutorial.add_step(
|
|
2067
|
+
None,
|
|
2068
|
+
"That's it for creating proximity networks!.",
|
|
2069
|
+
message_position="beside",
|
|
2070
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'prox_dialog', 'downsample', 'close()'),
|
|
2071
|
+
action=close_dialog
|
|
2072
|
+
)
|
|
2073
|
+
|
|
2074
|
+
|
|
2075
|
+
|
|
2076
|
+
return tutorial
|
|
2077
|
+
|
|
2078
|
+
def setup_seg_tutorial(window):
|
|
2079
|
+
|
|
2080
|
+
tutorial = TutorialManager(window)
|
|
2081
|
+
|
|
2082
|
+
tutorial.add_step(
|
|
2083
|
+
None,
|
|
2084
|
+
"This tutorial will guide you through options for segmenting your data within NetTracer3D",
|
|
2085
|
+
highlight_type="rect",
|
|
2086
|
+
message_position="bottom"
|
|
2087
|
+
)
|
|
2088
|
+
|
|
2089
|
+
tutorial.add_step(
|
|
2090
|
+
None,
|
|
2091
|
+
"Alternatively, you can segment your data with a seperate software and bring it to NetTracer3D. Prepare your segmented data into a .tif format, where the image has been reduced to binary (ie 1 or 255 for the foreground, 0 for the background), or where each discrete object has its own numerical label (ie 1, 2, 3, etc).",
|
|
2092
|
+
highlight_type="rect",
|
|
2093
|
+
message_position="bottom"
|
|
2094
|
+
)
|
|
2095
|
+
|
|
2096
|
+
tutorial.add_step(
|
|
2097
|
+
window.thresh_button,
|
|
2098
|
+
"Some of the main segmentation options are accessible by clicking this widget.",
|
|
2099
|
+
highlight_type="circle",
|
|
2100
|
+
message_position="top"
|
|
2101
|
+
)
|
|
2102
|
+
|
|
2103
|
+
open_dialog, _ = MenuHelper.create_dialog_opener(
|
|
2104
|
+
window, tutorial, "show_thresh_dialog", "ThresholdDialog", "thresh_dialog",
|
|
2105
|
+
tutorial_example = True
|
|
2106
|
+
)
|
|
2107
|
+
|
|
2108
|
+
tutorial.add_step(
|
|
2109
|
+
None,
|
|
2110
|
+
"You will then see this menu. Let's walk through what options are available.",
|
|
2111
|
+
message_position="beside",
|
|
2112
|
+
pre_action=open_dialog
|
|
2113
|
+
)
|
|
2114
|
+
|
|
2115
|
+
tutorial.add_step(
|
|
2116
|
+
MenuHelper.create_widget_getter(tutorial, 'thresh_dialog', 'mode_selector'),
|
|
2117
|
+
"This menu shows some default thresholding options. Thresholding by label/brightness is an easy way to produce a segmentation, although it will only work for regions that are brighter than the background. Thresholding by volume can be used to remove noise after a segmentation is produced.",
|
|
2118
|
+
highlight_type=None,
|
|
2119
|
+
message_position="beside",
|
|
2120
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'thresh_dialog', 'mode_selector', 'showPopup()'),
|
|
2121
|
+
action=MenuHelper.create_widget_interaction(tutorial, 'thresh_dialog', 'mode_selector', 'hidePopup()')
|
|
2122
|
+
)
|
|
2123
|
+
|
|
2124
|
+
tutorial.add_step(
|
|
2125
|
+
window.active_channel_combo,
|
|
2126
|
+
"Choose 'select' to open the thresholding histogram. This thresholding will execute on whatever the active channel is",
|
|
2127
|
+
highlight_type="rect",
|
|
2128
|
+
message_position="top"
|
|
2129
|
+
)
|
|
2130
|
+
|
|
2131
|
+
def close_dialog():
|
|
2132
|
+
if hasattr(tutorial, 'thresh_dialog') and tutorial.thresh_dialog:
|
|
2133
|
+
tutorial.thresh_dialog.close()
|
|
2134
|
+
tutorial.thresh_dialog = None
|
|
2135
|
+
|
|
2136
|
+
tutorial.add_step(
|
|
2137
|
+
None,
|
|
2138
|
+
"For data with less signal-to-noise, you can try to use the 'machine learning' segmenter.",
|
|
2139
|
+
message_position="beside",
|
|
2140
|
+
action = close_dialog
|
|
2141
|
+
)
|
|
2142
|
+
|
|
2143
|
+
tutorial.add_step(
|
|
2144
|
+
window.channel_buttons[0],
|
|
2145
|
+
"First, load the data you'd like to segment into the nodes channel. Then, click 'machine learning' from the threshold menu.",
|
|
2146
|
+
highlight_type="circle",
|
|
2147
|
+
message_position="top"
|
|
2148
|
+
)
|
|
2149
|
+
|
|
2150
|
+
open_dialog, _ = MenuHelper.create_dialog_opener(
|
|
2151
|
+
window, tutorial, "show_machine_window_tutorial", "MachineWindow", "machine_window"
|
|
2152
|
+
)
|
|
2153
|
+
|
|
2154
|
+
tutorial.add_step(
|
|
2155
|
+
None,
|
|
2156
|
+
"This window will then appear and is used to control the machine learning segmenter.",
|
|
2157
|
+
message_position="top_left",
|
|
2158
|
+
pre_action=open_dialog
|
|
2159
|
+
)
|
|
2160
|
+
|
|
2161
|
+
tutorial.add_step(
|
|
2162
|
+
window.channel_buttons[2],
|
|
2163
|
+
"Note that at the moment the segmenter will use this overlay channel to store training data. You can directly save and reload this overlay if you'd like to save your training data and use it again later.",
|
|
2164
|
+
highlight_type="circle",
|
|
2165
|
+
message_position="top_left"
|
|
2166
|
+
)
|
|
2167
|
+
|
|
2168
|
+
tutorial.add_step(
|
|
2169
|
+
window.high_button,
|
|
2170
|
+
"It will also use the highlight overlay to render your segmentation preview. As a result, make sure you have enough RAM to accomodate these additional arrays. In general, I would advise against segmenting something over 5 GB with this, so please downsample any data larger than that with 'Process -> Image -> Resize'.",
|
|
2171
|
+
highlight_type="circle",
|
|
2172
|
+
message_position="top_left"
|
|
2173
|
+
)
|
|
2174
|
+
|
|
2175
|
+
tutorial.add_step(
|
|
2176
|
+
MenuHelper.create_widget_getter(tutorial, 'machine_window', 'brush_button'),
|
|
2177
|
+
"Use this button to access the brush. Use left click to draw markings for the segmenter, and right click to erase them. Ctrl + Mousewheel can increase the size of the brush.",
|
|
2178
|
+
highlight_type=None,
|
|
2179
|
+
message_position="top_left",
|
|
2180
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'machine_window', 'brush_button', 'click()')
|
|
2181
|
+
)
|
|
2182
|
+
|
|
2183
|
+
tutorial.add_step(
|
|
2184
|
+
MenuHelper.create_widget_getter(tutorial, 'machine_window', 'brush_button'),
|
|
2185
|
+
"You will use the brush to mark the 'foreground' of the image (what you'd like to keep) and the 'background' (what you'd like to remove).",
|
|
2186
|
+
highlight_type=None,
|
|
2187
|
+
message_position="top_left",
|
|
2188
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'machine_window', 'brush_button', 'click()')
|
|
2189
|
+
)
|
|
2190
|
+
|
|
2191
|
+
tutorial.add_step(
|
|
2192
|
+
window.canvas,
|
|
2193
|
+
"You will mark these regions directly on the canvas.",
|
|
2194
|
+
highlight_type="rect",
|
|
2195
|
+
message_position="top_left"
|
|
2196
|
+
)
|
|
2197
|
+
|
|
2198
|
+
tutorial.add_step(
|
|
2199
|
+
window.canvas,
|
|
2200
|
+
"The program will use your markings to train itself. When you train a model, it will learn to segment out regions that look like those you marked as foreground, while ignoring regions that you marked as background.",
|
|
2201
|
+
highlight_type="rect",
|
|
2202
|
+
message_position="top_left"
|
|
2203
|
+
)
|
|
2204
|
+
|
|
2205
|
+
tutorial.add_step(
|
|
2206
|
+
MenuHelper.create_widget_getter(tutorial, 'machine_window', 'fore_button'),
|
|
2207
|
+
"You can toggle whether you're labeling foreground or background with the foreground or background buttons here.",
|
|
2208
|
+
highlight_type=None,
|
|
2209
|
+
message_position="top_left",
|
|
2210
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'machine_window', 'fore_button', 'click()')
|
|
2211
|
+
)
|
|
2212
|
+
|
|
2213
|
+
tutorial.add_step(
|
|
2214
|
+
MenuHelper.create_widget_getter(tutorial, 'machine_window', 'GPU'),
|
|
2215
|
+
"Click the GPU button to use the GPU, available if you set up a CUDA toolkit and installed the corresponding cupy package.",
|
|
2216
|
+
highlight_type=None,
|
|
2217
|
+
message_position="top_left",
|
|
2218
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'machine_window', 'GPU', 'click()')
|
|
2219
|
+
)
|
|
2220
|
+
|
|
2221
|
+
tutorial.add_step(
|
|
2222
|
+
MenuHelper.create_widget_getter(tutorial, 'machine_window', 'two'),
|
|
2223
|
+
"Selecting 'Train by 2D Slice Patterns' will have the program only consider two dimensional patterns around your marked regions when learning to segment. This is faster but does not consider 3D structures",
|
|
2224
|
+
highlight_type=None,
|
|
2225
|
+
message_position="top_left",
|
|
2226
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'machine_window', 'two', 'click()')
|
|
2227
|
+
)
|
|
2228
|
+
|
|
2229
|
+
tutorial.add_step(
|
|
2230
|
+
MenuHelper.create_widget_getter(tutorial, 'machine_window', 'three'),
|
|
2231
|
+
"Selecting 'Train by 3D Slice Patterns' will likewise consider 3D patterns, but is slower.",
|
|
2232
|
+
highlight_type=None,
|
|
2233
|
+
message_position="top_left",
|
|
2234
|
+
pre_action=MenuHelper.create_widget_interaction(tutorial, 'machine_window', 'three', 'click()')
|
|
2235
|
+
)
|
|
2236
|
+
|
|
2237
|
+
tutorial.add_step(
|
|
2238
|
+
None,
|
|
2239
|
+
"When you've marked your data a bit, you can select 'Train Quick Model' or Train Detailed Model' to train the segmenter. The quick model is both faster and lighter and a good option for when the signal-to-noise is decent. It also doesn't require as much training.",
|
|
2240
|
+
highlight_type="rect",
|
|
2241
|
+
message_position="top_left"
|
|
2242
|
+
)
|
|
2243
|
+
|
|
2244
|
+
tutorial.add_step(
|
|
2245
|
+
None,
|
|
2246
|
+
"The 'Detailed Model' considers structure more than signal and is an option for less distinct data. It will be slower and likely require more training data than the quick model.",
|
|
2247
|
+
highlight_type="rect",
|
|
2248
|
+
message_position="top_left"
|
|
2249
|
+
)
|
|
2250
|
+
|
|
2251
|
+
tutorial.add_step(
|
|
2252
|
+
None,
|
|
2253
|
+
"Select 'Preview Segment' to make the segmenter start segmenting the image without interrupting the training session. Its preview segmentation will begin to render in the highlight overlay. During this period, you should observe how it's segmenting and correct it if it's making mistakes.",
|
|
2254
|
+
highlight_type="rect",
|
|
2255
|
+
message_position="top_left"
|
|
2256
|
+
)
|
|
2257
|
+
|
|
2258
|
+
tutorial.add_step(
|
|
2259
|
+
None,
|
|
2260
|
+
"At some point you'll reach some kind of ceiling where additional training data won't really help more. This might take 20-40 minutes of training. So try to end the training session by then, or earlier if it looks satisfactory.",
|
|
2261
|
+
highlight_type="rect",
|
|
2262
|
+
message_position="top_left"
|
|
2263
|
+
)
|
|
2264
|
+
|
|
2265
|
+
tutorial.add_step(
|
|
2266
|
+
None,
|
|
2267
|
+
"The button with ▶/⏸️ can be pressed to pause the segmentation preview, or to start it again.",
|
|
2268
|
+
highlight_type="rect",
|
|
2269
|
+
message_position="top_left"
|
|
2270
|
+
)
|
|
2271
|
+
|
|
2272
|
+
tutorial.add_step(
|
|
2273
|
+
None,
|
|
2274
|
+
"Select 'segment all' to have the program calculate the segmentation for the entire image. This will freeze the NetTracer3D window until it's done.",
|
|
2275
|
+
highlight_type="rect",
|
|
2276
|
+
message_position="top_left"
|
|
2277
|
+
)
|
|
2278
|
+
|
|
2279
|
+
tutorial.add_step(
|
|
2280
|
+
window.channel_buttons[3],
|
|
2281
|
+
"The finished binary segmentation will be placed here. Make sure to save it with 'File -> Save As...'.",
|
|
2282
|
+
highlight_type="circle",
|
|
2283
|
+
message_position="top_left"
|
|
2284
|
+
)
|
|
2285
|
+
|
|
2286
|
+
tutorial.add_step(
|
|
2287
|
+
None,
|
|
2288
|
+
"If you'd like to reuse a model you trained, select 'Save Model' to save it to your computer somewhere.",
|
|
2289
|
+
highlight_type="rect",
|
|
2290
|
+
message_position="top_left"
|
|
2291
|
+
)
|
|
2292
|
+
|
|
2293
|
+
tutorial.add_step(
|
|
2294
|
+
None,
|
|
2295
|
+
"Likewise, 'Load Model' can be used to reopen a saved model. You can train over an old model to have it combine all the training data, although note the model might become more ponderous the more you train over it.",
|
|
2296
|
+
highlight_type="rect",
|
|
2297
|
+
message_position="top_left"
|
|
2298
|
+
)
|
|
2299
|
+
|
|
2300
|
+
tutorial.add_step(
|
|
2301
|
+
None,
|
|
2302
|
+
"Select 'Load Image' from the segmenter window to load a new image into the nodes to segment. IMPORTANT - This option allows you to segment color images, such as an H&E slide. The 'nodes' channel will not let you load color images otherwise.",
|
|
2303
|
+
highlight_type="rect",
|
|
2304
|
+
message_position="top_left"
|
|
2305
|
+
)
|
|
2306
|
+
|
|
2307
|
+
def close_dialog():
|
|
2308
|
+
if hasattr(tutorial, 'machine_window') and tutorial.machine_window:
|
|
2309
|
+
tutorial.machine_window.close()
|
|
2310
|
+
tutorial.machine_window = None
|
|
2311
|
+
|
|
2312
|
+
tutorial.add_step(
|
|
2313
|
+
None,
|
|
2314
|
+
"This is a way to produce binary segmentations, however for cellular data, we may want to instead have labeled data.",
|
|
2315
|
+
message_position="top_right",
|
|
2316
|
+
pre_action=close_dialog
|
|
2317
|
+
)
|
|
2318
|
+
|
|
2319
|
+
def open_to_connect():
|
|
2320
|
+
menu = MenuHelper.open_menu(window, "Process")
|
|
2321
|
+
if menu:
|
|
2322
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Image"))
|
|
2323
|
+
|
|
2324
|
+
tutorial.add_step(
|
|
2325
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Image", "Binary Watershed"),
|
|
2326
|
+
"--Watershedding can be used to split a binary image into labeled components, or to directly label an image\n\n--From the displayed menu, 'Binary Watershed' can be used to split apart fused components of your binary segmentation, assuming they are distinct enough.\n\n--'Gray Watershed' can be used to direcly label objects like cell nuclei from a raw image, provided they have distinct peaks of intensity in the image. Note both of these methods can be prickly about their default parameters so may require some testing on your specific dataset. Please reference the documentation for more info.'",
|
|
2327
|
+
highlight_type=None,
|
|
2328
|
+
message_position="top_right",
|
|
2329
|
+
pre_action=open_to_connect,
|
|
2330
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
2331
|
+
)
|
|
2332
|
+
|
|
2333
|
+
def open_to_connect():
|
|
2334
|
+
menu = MenuHelper.open_menu(window, "Process")
|
|
2335
|
+
if menu:
|
|
2336
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Generate"))
|
|
2337
|
+
|
|
2338
|
+
tutorial.add_step(
|
|
2339
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Generate", "Trace Filaments"),
|
|
2340
|
+
"--Trace Filaments can be used to try to automatically trace a cleaner segmentation of a rough segmentation of filamental objects, like nerves or vessels. This can be used to improve a segmentation either generated by rote intensity thresholding, or by the ML segmenter. For alternative ways to remove noise, use morphological calculations (Analyze -> Stats -> Morphological) to characterize objects in the image, then place your segmentation in the 'Nodes' channel and threshold them from the upper-right table. Alternatively, you can select and manually delete noise from the canvas by clicking it, then right clicking and choosing 'Selection -> Delete Object'.'",
|
|
2341
|
+
highlight_type=None,
|
|
2342
|
+
message_position="top_right",
|
|
2343
|
+
pre_action=open_to_connect,
|
|
2344
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
2345
|
+
)
|
|
2346
|
+
|
|
2347
|
+
tutorial.add_step(
|
|
2348
|
+
lambda: window.menuBar(),
|
|
2349
|
+
"Another option is to use other software. One that's useful for segmenting cells is Cellpose. If you package cellpose with NetTracer3D, you can actually open it from here, although it generally requires you to have a decent GPU.",
|
|
2350
|
+
highlight_type="rect",
|
|
2351
|
+
message_position="top_right",
|
|
2352
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Image"),
|
|
2353
|
+
action=lambda: MenuHelper.close_menu(window, "Image")
|
|
2354
|
+
)
|
|
2355
|
+
|
|
2356
|
+
|
|
2357
|
+
return tutorial
|
|
2358
|
+
|
|
2359
|
+
def setup_analysis_tutorial(window):
|
|
2360
|
+
|
|
2361
|
+
tutorial = TutorialManager(window)
|
|
2362
|
+
# Step 1: Welcome
|
|
2363
|
+
tutorial.add_step(
|
|
2364
|
+
None,
|
|
2365
|
+
"This tutorial will guide you through the options for analysis. It will briefly describe what is available. Please reference the documentation for full information.",
|
|
2366
|
+
highlight_type="rect",
|
|
2367
|
+
message_position="top_right"
|
|
2368
|
+
)
|
|
2369
|
+
|
|
2370
|
+
tutorial.add_step(
|
|
2371
|
+
MenuHelper.create_menu_step_rect_getter(window, "Analyze"),
|
|
2372
|
+
"The Analyze menu contains the options for analyzing your data. This applies to both analysis of the networks and of the images themselves.",
|
|
2373
|
+
highlight_type="rect",
|
|
2374
|
+
message_position="top_right",
|
|
2375
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Analyze"),
|
|
2376
|
+
action=lambda: MenuHelper.close_menu(window, "Analyze")
|
|
2377
|
+
)
|
|
2378
|
+
|
|
2379
|
+
tutorial.add_step(
|
|
2380
|
+
MenuHelper.create_menu_step_rect_getter(window, "Analyze"),
|
|
2381
|
+
"The Network submenu contains options for network visualization, getting network statistics, and grouping networks into communities or neighborhoods.",
|
|
2382
|
+
highlight_type="rect",
|
|
2383
|
+
message_position="top_right",
|
|
2384
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Analyze"),
|
|
2385
|
+
action=lambda: MenuHelper.close_menu(window, "Analyze")
|
|
2386
|
+
)
|
|
2387
|
+
|
|
2388
|
+
def open_to_save():
|
|
2389
|
+
menu = MenuHelper.open_menu(window, "Analyze")
|
|
2390
|
+
if menu:
|
|
2391
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Network"))
|
|
2392
|
+
|
|
2393
|
+
# Step 3: Point to Image submenu
|
|
2394
|
+
tutorial.add_step(
|
|
2395
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Analyze", "Network", "Show Network"),
|
|
2396
|
+
f"""--Use 'Show Network' to visualize your network in an interactive matplotlib graph. You can enable geo_layout to position nodes based on their 3D coordinates, and color-code nodes by community or node-ID. Note this visualization is slow with networks with lots of nodes (100k+) or edges. In those cases, render the network overlay instead, followed by the 3D visualization or z-projection.
|
|
2397
|
+
|
|
2398
|
+
\n\n--Use 'Generic Network Report' to get basic statistics about your Network 3D Object. This includes node count, edge count, nodes per identity category, and nodes per community (if assigned).
|
|
2399
|
+
|
|
2400
|
+
\n\n--Use 'Community Partition + Generic Network Stats' to group nodes into communities using either Label Propagation or Louvain algorithms. This function also calculates comprehensive community statistics including modularity, clustering coefficients, and per-community metrics like density and conductance.""",
|
|
2401
|
+
highlight_type=None,
|
|
2402
|
+
message_position="top_right",
|
|
2403
|
+
pre_action=open_to_save,
|
|
2404
|
+
)
|
|
2405
|
+
|
|
2406
|
+
tutorial.add_step(
|
|
2407
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Analyze", "Network", "Show Network"),
|
|
2408
|
+
|
|
2409
|
+
f"""--Use 'Calculate Composition of Network Communities (And Show UMAP)' to analyze the compositional makeup of your communities based on node identities. This function can provide per-community identity proportions or a weighted average across all communities, and can generate a UMAP to visualize compositional similarity between communities.
|
|
2410
|
+
\n\n--Use 'Convert Network Communities Into Neighborhoods' to group similar communities into a smaller set of neighborhoods using K-means or DBSCAN clustering. This function returns compositional heatmap graphs showing identity distributions across neighborhoods, including optional robust heatmaps that highlight overrepresented node types. Note this will reassign the 'communities' property to neighborhoods.
|
|
2411
|
+
|
|
2412
|
+
\n\n--Use 'Create Communities Based on Cuboidal Proximity Cells' as an alternative spatial method for grouping nodes into communities. This splits the image into user-defined cuboidal cells and assigns nodes to communities based on whether they share a cell, independent of the network structure. You would mostly use it for images where the nodes were chaotically arranged (and so not in meaningful network communities), and you were just interested in creating neighborhoods to describe what is clustered with what.""",
|
|
2413
|
+
highlight_type=None,
|
|
2414
|
+
message_position="top_right",
|
|
2415
|
+
pre_action=open_to_save,
|
|
2416
|
+
)
|
|
2417
|
+
|
|
2418
|
+
tutorial.add_step(
|
|
2419
|
+
MenuHelper.create_menu_step_rect_getter(window, "Analyze"),
|
|
2420
|
+
"The Stats submenu contains options for quantifying your networks, analyzing object morphology, and comparing spatial distribution of objects.",
|
|
2421
|
+
highlight_type="rect",
|
|
2422
|
+
message_position="top_right",
|
|
2423
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Analyze"),
|
|
2424
|
+
action=lambda: MenuHelper.close_menu(window, "Analyze")
|
|
2425
|
+
)
|
|
2426
|
+
|
|
2427
|
+
def open_to_save():
|
|
2428
|
+
menu = MenuHelper.open_menu(window, "Analyze")
|
|
2429
|
+
if menu:
|
|
2430
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Stats"))
|
|
2431
|
+
|
|
2432
|
+
tutorial.add_step(
|
|
2433
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Analyze", "Stats", "Network Related"),
|
|
2434
|
+
|
|
2435
|
+
f"""--The first submenu (Network Related) is for calculating more comprehenive stats about your network.
|
|
2436
|
+
|
|
2437
|
+
\n\n --Use 'Calculate Generic Network Stats' to generate basic network statistics including node/edge counts, density, connectivity, degree metrics, and centrality averages.
|
|
2438
|
+
|
|
2439
|
+
\n\n--Use 'Network Statistics Histograms' to generate and visualize distributions of various network properties using matplotlib histograms. Options include degree distribution, centrality metrics (betweenness, closeness, eigenvector), clustering coefficients, and many others based on NetworkX functions. These stats are displayed in the tabulated data widget and can be used to directly threshold your nodes.
|
|
2440
|
+
|
|
2441
|
+
\n\n--Use 'Radial Distribution Analysis' to create a graph showing the average number of neighboring nodes versus distance from any given node. This helps evaluate how far apart connected nodes are in 3D space and assess network efficiency.,
|
|
2442
|
+
|
|
2443
|
+
\n\n--Use 'Community Cluster Heatmap' to visualize community density in 2D or 3D, with nodes colored by whether they're in higher (red) or lower (blue) density communities than expected. Can be output as a matplotlib graph or RGB overlay.""",
|
|
2444
|
+
|
|
2445
|
+
highlight_type=None,
|
|
2446
|
+
message_position="top_right",
|
|
2447
|
+
pre_action=open_to_save,
|
|
2448
|
+
)
|
|
2449
|
+
|
|
2450
|
+
tutorial.add_step(
|
|
2451
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Analyze", "Stats", "Network Related"),
|
|
2452
|
+
|
|
2453
|
+
f"""--The second submenu (Spatial) is for calculating spatial relationships between objects that are not network-dependent.
|
|
2454
|
+
|
|
2455
|
+
\n\n--Use 'Identity Distribution of Neighbors' to explore what types of nodes tend to be located near or connected to nodes of a specific identity. Choose between network-based connectivity analysis or morphological density-based analysis within a search radius.
|
|
2456
|
+
|
|
2457
|
+
\n\n--Use 'Ripley Clustering Analysis' to generate a Ripley's K curve that compares relative object clustering versus distance. This identifies whether nodes are clustered or dispersed and how clustering behavior varies across the image, with optional border exclusion and edge correction.
|
|
2458
|
+
|
|
2459
|
+
\n\n--Use 'Average Nearest Neighbors' to analyze nearest neighbor distances, either for specific identity pairs or all nodes. Can generate heatmaps showing nodes colored by their proximity to neighbors and output quantifiable overlays with distance values.
|
|
2460
|
+
|
|
2461
|
+
\n\n--Use 'Calculate Node < > Edge Interactions' to quantify how much edge channel image surrounds each labeled node. Can measure either volumes or lengths of adjacent edges within a specified search distance, with options to include or exclude regions inside nodes.""",
|
|
2462
|
+
|
|
2463
|
+
highlight_type=None,
|
|
2464
|
+
message_position="top_right",
|
|
2465
|
+
pre_action=open_to_save,
|
|
2466
|
+
)
|
|
2467
|
+
|
|
2468
|
+
tutorial.add_step(
|
|
2469
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Analyze", "Stats", "Network Related"),
|
|
2470
|
+
|
|
2471
|
+
f"""--The second submenu (Morphological) is for calculating morphological characteristics of objects.
|
|
2472
|
+
|
|
2473
|
+
\n\n--Use 'Calculate Volumes' to find the volumes of all labeled objects in the Active Image, scaled by axis scalings and returned as a table in the tabulated data widget.
|
|
2474
|
+
|
|
2475
|
+
\n\n--Use 'Calculate Radii' to find the largest radius of each labeled object in the Active Image, useful for evaluating thickness of structures like branches.
|
|
2476
|
+
|
|
2477
|
+
\n\n--Use 'Calculate Surface Area' to find the approximate surface area of each labeled object in the Active Image. Note that this will slightly overestimate smooth surfaces as it just rotely counts the voxel faces, but it is fine for comparing objects in the same image.
|
|
2478
|
+
|
|
2479
|
+
\n\n--Use 'Calculate Sphericities' to find the sphericities of each labeled object in the Active Image. Values closer to 1 are more spherical while those closer to 0 are less so. This also computes volumes and surface areas as described above.
|
|
2480
|
+
|
|
2481
|
+
\n\n--Use 'Calculate Branch Stats' to get stats for branches that you've labeled first, including lengths and tortuosities.""",
|
|
2482
|
+
|
|
2483
|
+
highlight_type=None,
|
|
2484
|
+
message_position="top_right",
|
|
2485
|
+
pre_action=open_to_save,
|
|
2486
|
+
)
|
|
2487
|
+
|
|
2488
|
+
tutorial.add_step(
|
|
2489
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Analyze", "Stats", "Network Related"),
|
|
2490
|
+
|
|
2491
|
+
f"""--The last stats functions are as follows:
|
|
2492
|
+
|
|
2493
|
+
\n\n--Use 'Significance Testing' to open a dedicated GUI for statistical testing on your data. Arrange data in Excel format, drag columns to compare, and select from various tests including t-tests, ANOVA, Mann-Whitney U, Pearson, Shapiro-Wilk, and Chi-squared tests.
|
|
2494
|
+
|
|
2495
|
+
\n\n--Use 'Show Identities Violin/UMAP' to visualize normalized violin plots and UMAPs for nodes assigned identities via multiple channel markers. Displays intensity expression patterns for specific identities or communities/neighborhoods based on channel marker data. Nodes can also be grouped into neighborhoods based on shared intensity expressions across channels. This requires use of the table obtained from 'File -> Images -> Node Identities -> Assign Node Identities from Overlap with Other Images.""",
|
|
2496
|
+
|
|
2497
|
+
highlight_type=None,
|
|
2498
|
+
message_position="top_right",
|
|
2499
|
+
pre_action=open_to_save,
|
|
2500
|
+
)
|
|
2501
|
+
|
|
2502
|
+
|
|
2503
|
+
tutorial.add_step(
|
|
2504
|
+
MenuHelper.create_menu_step_rect_getter(window, "Analyze"),
|
|
2505
|
+
"The Data/Overlays submenu contains options to generate informative overlays based on different analytical outputs.",
|
|
2506
|
+
highlight_type="rect",
|
|
2507
|
+
message_position="top_right",
|
|
2508
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Analyze"),
|
|
2509
|
+
action=lambda: MenuHelper.close_menu(window, "Analyze")
|
|
2510
|
+
)
|
|
2511
|
+
|
|
2512
|
+
def open_to_save():
|
|
2513
|
+
menu = MenuHelper.open_menu(window, "Analyze")
|
|
2514
|
+
if menu:
|
|
2515
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Data/Overlays"))
|
|
2516
|
+
|
|
2517
|
+
|
|
2518
|
+
tutorial.add_step(
|
|
2519
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Analyze", "Data/Overlays", "Get Degree Information"),
|
|
2520
|
+
|
|
2521
|
+
f"""--Use 'Get Degree Information' to extract and visualize node connectivity. Options include creating a data table, drawing degree values as text overlays, relabeling nodes by their degree for thresholding, or generating an RGB heatmap where high-degree nodes are red and low-degree nodes are blue. Can optionally filter to show only top proportion of high-degree nodes.
|
|
2522
|
+
|
|
2523
|
+
\n\n--Use 'Get Hub Information' to identify hub nodes with the fewest degrees of separation from other nodes. Can optionally create an overlay isolating hubs and specify the proportion of most connected hubs to return (e.g., top 10%). Hubs are evaluated independently per network component.
|
|
2524
|
+
|
|
2525
|
+
\n\n--Use 'Get Mother Nodes' to identify nodes that bridge connections between different communities. These nodes enable inter-community interactions. Can optionally create an overlay isolating mother nodes, which goes into the Overlay 1 channel.""",
|
|
2526
|
+
|
|
2527
|
+
highlight_type=None,
|
|
2528
|
+
message_position="top_right",
|
|
2529
|
+
pre_action=open_to_save,
|
|
2530
|
+
)
|
|
2531
|
+
|
|
2532
|
+
tutorial.add_step(
|
|
2533
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Analyze", "Data/Overlays", "Get Degree Information"),
|
|
2534
|
+
|
|
2535
|
+
f"""--Use 'Code Communities' to generate overlays showing community membership. Choose between a color-coded RGB overlay for easy visualization or a grayscale overlay labeled by community number for downstream thresholding and analysis. A legend table is also generated.
|
|
2536
|
+
|
|
2537
|
+
\n\n--Use 'Code Identities' to generate overlays showing node identity membership. Choose between a color-coded RGB overlay for easy visualization or a grayscale overlay labeled by numerical identity for downstream thresholding and analysis. A legend table is also generated.
|
|
2538
|
+
|
|
2539
|
+
\n\n--Use 'Centroid UMAP' to create a UMAP clustering nodes based on spatial similarity of their centroids. Nodes are colored by identity if available. This is useful for 3D data to quickly identify spatial groupings.""",
|
|
2540
|
+
highlight_type=None,
|
|
2541
|
+
message_position="top_right",
|
|
2542
|
+
pre_action=open_to_save,
|
|
2543
|
+
)
|
|
2544
|
+
|
|
2545
|
+
tutorial.add_step(
|
|
2546
|
+
MenuHelper.create_menu_step_rect_getter(window, "Analyze"),
|
|
2547
|
+
"The Randomize submenu contains options to randomize either the position of your nodes (just the centroids) or the connections within your network. This can serve as a way to demonstrate non-randomness, by comparing these distributions to your observed.",
|
|
2548
|
+
highlight_type="rect",
|
|
2549
|
+
message_position="top_right",
|
|
2550
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Analyze"),
|
|
2551
|
+
action=lambda: MenuHelper.close_menu(window, "Analyze")
|
|
2552
|
+
)
|
|
2553
|
+
|
|
2554
|
+
def open_to_save():
|
|
2555
|
+
menu = MenuHelper.open_menu(window, "Analyze")
|
|
2556
|
+
if menu:
|
|
2557
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Randomize"))
|
|
2558
|
+
|
|
2559
|
+
tutorial.add_step(
|
|
2560
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Analyze", "Randomize", "Scramble Nodes (Centroids)"),
|
|
2561
|
+
f"""--Use 'Generate Equivalent Random Network' to create a random network with the same number of edges and nodes as your current network. This is useful for comparing your network to a random control to demonstrate non-randomness. The random network is placed in the 'Selection' table where it can be saved or swapped to active. Optional weighted parameter allows edges to stack into weighted edges.
|
|
2562
|
+
|
|
2563
|
+
\n\n--Use 'Scramble Nodes (Centroids)' to randomize node locations for comparison against random distributions. Choose where nodes can be repositioned: anywhere in image bounds, within dimensional bounds of current nodes, or within masked bounds of edge/overlay channels. Only centroids are randomized.""",
|
|
2564
|
+
highlight_type=None,
|
|
2565
|
+
message_position="top_right",
|
|
2566
|
+
pre_action=open_to_save,
|
|
2567
|
+
)
|
|
2568
|
+
|
|
2569
|
+
|
|
2570
|
+
return tutorial
|
|
2571
|
+
|
|
2572
|
+
def setup_process_tutorial(window):
|
|
2573
|
+
"""
|
|
2574
|
+
Set up the image processing tutorial for NetTracer3D
|
|
2575
|
+
|
|
2576
|
+
Args:
|
|
2577
|
+
window: ImageViewerWindow instance from nettracer_gui
|
|
2578
|
+
|
|
2579
|
+
Returns:
|
|
2580
|
+
TutorialManager instance
|
|
2581
|
+
"""
|
|
2582
|
+
tutorial = TutorialManager(window)
|
|
2583
|
+
|
|
2584
|
+
# Step 1: Welcome
|
|
2585
|
+
tutorial.add_step(
|
|
2586
|
+
None,
|
|
2587
|
+
"This tutorial will guide you through the options for processing your data. It will briefly describe what is available. Please reference the documentation for full information.",
|
|
2588
|
+
highlight_type="rect",
|
|
2589
|
+
message_position="top_right"
|
|
2590
|
+
)
|
|
2591
|
+
|
|
2592
|
+
tutorial.add_step(
|
|
2593
|
+
MenuHelper.create_menu_step_rect_getter(window, "Process"),
|
|
2594
|
+
"The Process menu contains the options for processing your data. This is where you can generate your networks, label your branches, improve segmentations, and modify the network.",
|
|
2595
|
+
highlight_type="rect",
|
|
2596
|
+
message_position="top_right",
|
|
2597
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Process"),
|
|
2598
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
2599
|
+
)
|
|
2600
|
+
|
|
2601
|
+
tutorial.add_step(
|
|
2602
|
+
MenuHelper.create_menu_step_rect_getter(window, "Process"),
|
|
2603
|
+
"The 'Calculate Network' submenu contains options for creating your networks, and also calculating the centroids for your objects. All of the network calculation options have their own detailed tutorials from the tutorial window.",
|
|
2604
|
+
highlight_type="rect",
|
|
2605
|
+
message_position="top_right",
|
|
2606
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Process"),
|
|
2607
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
2608
|
+
)
|
|
2609
|
+
|
|
2610
|
+
def open_to_save():
|
|
2611
|
+
menu = MenuHelper.open_menu(window, "Process")
|
|
2612
|
+
if menu:
|
|
2613
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Calculate Network"))
|
|
2614
|
+
|
|
2615
|
+
tutorial.add_step(
|
|
2616
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Calculate Network", "Calculate Connectivity Network"),
|
|
2617
|
+
f"""--Use 'Calculate Connectivity Network' to connect nodes via edges in your images. Key parameters include node search distance (how far nodes look for edges), edge reconnection distance (to fill segmentation holes).
|
|
2618
|
+
|
|
2619
|
+
\n\n--Use 'Calculate Proximity Network' to connect nodes based on spatial proximity within a user-defined distance. Choose between centroid-based search (faster, ideal for spherical nodes) or morphological search (slower but better for irregular shapes). Can optionally restrict connections to nearest neighbors only and create networks from specific node identities.
|
|
2620
|
+
|
|
2621
|
+
\n\n--Use 'Calculate Branchpoint Network' to convert branchpoints in branchy structures (like blood vessels) into network nodes, which are then joined in a network based on which branches they border.
|
|
2622
|
+
|
|
2623
|
+
\n\n--Use 'Calculate Branch Adjacency Network' to connect adjacent branches in branchy structures by converting the branches themselves (not branchpoints) into network nodes and joining them into a network based on which branches border each other.
|
|
2624
|
+
|
|
2625
|
+
\n\n--Use 'Calculate Centroids' to find the center of mass for nodes and/or edges. Centroids provide a low-memory way to track object locations and are required for many other functions. Can downsample temporarily for speed.""",
|
|
2626
|
+
highlight_type=None,
|
|
2627
|
+
message_position="top_right",
|
|
2628
|
+
pre_action=open_to_save,
|
|
2629
|
+
)
|
|
2630
|
+
|
|
2631
|
+
def open_to_save():
|
|
2632
|
+
menu = MenuHelper.open_menu(window, "Process")
|
|
2633
|
+
if menu:
|
|
2634
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Image"))
|
|
2635
|
+
|
|
2636
|
+
tutorial.add_step(
|
|
2637
|
+
MenuHelper.create_menu_step_rect_getter(window, "Process"),
|
|
2638
|
+
"The 'Image' submenu contains options altering your segmented data.",
|
|
2639
|
+
highlight_type="rect",
|
|
2640
|
+
message_position="top_right",
|
|
2641
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Process"),
|
|
2642
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
2643
|
+
)
|
|
2644
|
+
|
|
2645
|
+
tutorial.add_step(
|
|
2646
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Image", "Dilate"),
|
|
2647
|
+
f"""--Use 'Resize' to resize images by upsampling or downsampling in any dimension. Enter values between 0-1 for downsampling or above 1 for upsampling. Can resize all dimensions uniformly or individual axes. Includes options to normalize scaling between xy and z dimensions and restore to original shape after prior resampling.
|
|
2648
|
+
|
|
2649
|
+
\n\n--Use 'Clean Segmentation' to access quick cleaning operations including Close (dilation then erosion to fill gaps), Open (erosion then dilation to remove noise, smooth edges), Fill Holes, trace cleaner filamental segmentations of vessels/nerves, or Threshold Noise by Volume for removing small objects.
|
|
2650
|
+
|
|
2651
|
+
\n\n--Use 'Dilate' to expand objects in an image. Dilation radius is scaled by xy_scale and z_scale properties.
|
|
2652
|
+
|
|
2653
|
+
\n\n--Use 'Erode' to shrink objects in an image.""",
|
|
2654
|
+
highlight_type=None,
|
|
2655
|
+
message_position="top_right",
|
|
2656
|
+
pre_action=open_to_save,
|
|
2657
|
+
)
|
|
2658
|
+
|
|
2659
|
+
tutorial.add_step(
|
|
2660
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Image", "Dilate"),
|
|
2661
|
+
f"""--Use 'Fill Holes' to eliminate artifacts in binary segmentations by filling enclosed gaps. Can operate in 2D slicing mode only (XY plane) or across all 3D planes, with optional border hole filling and ability to output hole mask to Overlay 2 for selective filling.
|
|
2662
|
+
|
|
2663
|
+
\n\n--Use 'Binarize' to convert images to binary format. Choose between total binarize (sets all nonzero to 255) or predict foreground (uses Otsu's method to automatically segment signal from background).
|
|
2664
|
+
|
|
2665
|
+
\n\n--Use 'Label Objects' to assign distinct numerical identities to all touching, nonzero regions in an image. Useful for separating binary segmentations into unique objects.
|
|
2666
|
+
|
|
2667
|
+
\n\n--Use 'Neighbor Labels' to label objects in one image based on proximity to labeled objects in another image. The first option is all nonzero objects take on the label of the closest labeled object. The second option is all nonzero objects take on the label of the closest labeled object that they are continuous with in space. Useful for defining spatial relationships between images.""",
|
|
2668
|
+
highlight_type=None,
|
|
2669
|
+
message_position="top_right",
|
|
2670
|
+
pre_action=open_to_save,
|
|
2671
|
+
)
|
|
2672
|
+
|
|
2673
|
+
tutorial.add_step(
|
|
2674
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Image", "Dilate"),
|
|
2675
|
+
f"""--Use 'Threshold/Segment' to access intensity-based, volume-based, radius-based, or degree-based thresholding windows. Alternatively, launch the Machine Learning segmenter which uses Random Forest Classifier trained on user-designated regions to segment based on morphological patterns.
|
|
2676
|
+
|
|
2677
|
+
\n\n--Use 'Mask Channel' to use the binarized version of one channel to mask another, preserving only regions that exist in the mask.
|
|
2678
|
+
|
|
2679
|
+
\n\n--Use 'Crop Channels' to crop all available channels to specified Z, Y, X boundaries. Can be auto-called by Shift+left-click-dragging in the Image Viewer Window.
|
|
2680
|
+
|
|
2681
|
+
\n\n--Use 'Channel dtype' to change the data type of a channel (8-bit, 16-bit, 32-bit int, or 32/64-bit float) to preserve memory when larger data types aren't needed.,
|
|
2682
|
+
\n\n--Use 'Skeletonize' to reduce images to their medial axis. Can optionally remove terminal branches of specified pixel length and auto-correct loop artifacts that appear in thick regions.""",
|
|
2683
|
+
highlight_type=None,
|
|
2684
|
+
message_position="top_right",
|
|
2685
|
+
pre_action=open_to_save,
|
|
2686
|
+
)
|
|
2687
|
+
|
|
2688
|
+
tutorial.add_step(
|
|
2689
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Image", "Dilate"),
|
|
2690
|
+
"""--Use 'Binary Watershed' to split fused objects in binary segmentations that appear as separate objects. Control aggressiveness with smallest radius or proportion parameters. Ideal for separating overlapping binary objects.
|
|
2691
|
+
|
|
2692
|
+
\n\n--Use 'Gray Watershed' to watershed grayscale images, separating and labeling objects based on size and blobbiness. Best for quickly segmenting cells without ML training. Set minimum peak distance between labeled components for optimal results.
|
|
2693
|
+
|
|
2694
|
+
\n\n--Use 'Invert' to invert an image, swapping high and low values.
|
|
2695
|
+
|
|
2696
|
+
\n\n--Use 'Z-Project' to superimpose all XY slices into a single 2D slice using max, mean, min, sum, or standard deviation projection modes.""",
|
|
2697
|
+
highlight_type=None,
|
|
2698
|
+
message_position="top_right",
|
|
2699
|
+
pre_action=open_to_save,
|
|
2700
|
+
)
|
|
2701
|
+
|
|
2702
|
+
def open_to_save():
|
|
2703
|
+
menu = MenuHelper.open_menu(window, "Process")
|
|
2704
|
+
if menu:
|
|
2705
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Generate"))
|
|
2706
|
+
|
|
2707
|
+
tutorial.add_step(
|
|
2708
|
+
MenuHelper.create_menu_step_rect_getter(window, "Process"),
|
|
2709
|
+
"The 'Generate' submenu contains options for creating data.",
|
|
2710
|
+
highlight_type="rect",
|
|
2711
|
+
message_position="top_right",
|
|
2712
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Process"),
|
|
2713
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
2714
|
+
)
|
|
2715
|
+
|
|
2716
|
+
tutorial.add_step(
|
|
2717
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Process", "Generate", "Label Branches"),
|
|
2718
|
+
f"""--Use 'Generate Nodes (From Node Centroids)' to convert your node_centroids property into a labeled image in the nodes channel, where each centroid becomes a labeled point. This is useful when centroids were loaded from a previous session or extracted from another analysis tool and you want to access image functions.
|
|
2719
|
+
|
|
2720
|
+
\n\n--Use 'Generate Nodes (From Edge Vertices)' to generate nodes at the vertices of a branch-like segmented structure loaded into the edges channel.
|
|
2721
|
+
|
|
2722
|
+
\n\n--Use 'Label Branches' to label the branches of a binary mask from branchy structures. Ideal for analyzing branchy structures like blood vessels.
|
|
2723
|
+
|
|
2724
|
+
\n\n--Use 'Trace Filaments' to open a window for automatically cleaning segmentations of filament-like structures (such as vessels, nerves) by tracing a new, cleaner mask over pathways the program can detect.
|
|
2725
|
+
|
|
2726
|
+
\n\n--Use 'Generate Voronoi Diagram' to create a Voronoi diagram from node_centroids, where labeled cells represent the region closest to each centroid. This provides an alternative way to define node neighborhoods for connectivity networks, particularly useful for small or homogeneous spheroid nodes. The diagram is loaded into Overlay2.""",
|
|
2727
|
+
highlight_type=None,
|
|
2728
|
+
message_position="top_right",
|
|
2729
|
+
pre_action=open_to_save,
|
|
2730
|
+
)
|
|
2731
|
+
|
|
2732
|
+
def open_to_save():
|
|
2733
|
+
menu = MenuHelper.open_menu(window, "Process")
|
|
2734
|
+
if menu:
|
|
2735
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Modify Network/Properties"))
|
|
2736
|
+
|
|
2737
|
+
tutorial.add_step(
|
|
2738
|
+
MenuHelper.create_menu_step_rect_getter(window, "Process"),
|
|
2739
|
+
"The 'Modify Network/Properties' menu will help you tweak elements of a network in post.",
|
|
2740
|
+
highlight_type="rect",
|
|
2741
|
+
message_position="top_right",
|
|
2742
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Process"),
|
|
2743
|
+
action=lambda: MenuHelper.close_menu(window, "Process")
|
|
2744
|
+
)
|
|
2745
|
+
|
|
2746
|
+
open_dialog, _ = MenuHelper.create_dialog_opener(
|
|
2747
|
+
window, tutorial, "show_modify_dialog", "ModifyDialog", "modify_dialog"
|
|
2748
|
+
)
|
|
2749
|
+
|
|
2750
|
+
|
|
2751
|
+
tutorial.add_step(
|
|
2752
|
+
MenuHelper.create_menu_step_rect_getter(window, "Process"),
|
|
2753
|
+
f"""--Use 'Remove Unassigned IDs from Centroid List' to remove centroids of nodes without associated identities. This prepares the data for ID-oriented functions that expect all nodes to have an identity.
|
|
2754
|
+
|
|
2755
|
+
\n\n--Use 'Force Any Multiple IDs to Pick a Random Single ID' to randomly assign a single identity to nodes with multiple identities. This simplifies identity visualization when there are many identity permutations.
|
|
2756
|
+
|
|
2757
|
+
\n\n--Use 'Remove Any Nodes Not in Nodes Channel From Properties' to clean up node_centroids and node_identities properties by removing any nodes whose labels aren't present in the nodes channel image. Useful after cropping datasets.
|
|
2758
|
+
|
|
2759
|
+
\n\n--Use 'Remove Trunk' to eliminate the most interconnected edge from the network, which can dominate analysis when evaluating downstream connections in trunk-heavy networks.""",
|
|
2760
|
+
highlight_type="rect",
|
|
2761
|
+
message_position="top_right",
|
|
2762
|
+
pre_action=open_dialog
|
|
2763
|
+
)
|
|
2764
|
+
|
|
2765
|
+
tutorial.add_step(
|
|
2766
|
+
MenuHelper.create_menu_step_rect_getter(window, "Process"),
|
|
2767
|
+
f"""--Use 'Convert Trunk to Node' to transform the trunk into a new node instead of removing it, preserving network structure by treating the trunk as a central hub rather than shattering the network into subgraphs. Also moves the trunk from edges image to nodes image.
|
|
2768
|
+
|
|
2769
|
+
\n\n--Use 'Convert Edges to Node Objects' to merge all edges into the nodes image and update the network to pair nodes based on their previously shared edges. Edges receive new labels and gain the identity 'edge'. Useful for visualizing exact connectivity between objects.
|
|
2770
|
+
|
|
2771
|
+
\n\n--Use 'Remove Network Weights' to eliminate edge weights from connectivity networks, reducing each edge to a parameter of absolute connectivity rather than weighted by the number of joining edges.
|
|
2772
|
+
|
|
2773
|
+
\n\n--Use 'Prune Connections Between Nodes of the Same Type' to remove all connections between nodes with matching identities, isolating only connections between different node types.""",
|
|
2774
|
+
highlight_type="rect",
|
|
2775
|
+
message_position="top_right"
|
|
2776
|
+
)
|
|
2777
|
+
|
|
2778
|
+
def close_dialog():
|
|
2779
|
+
if hasattr(tutorial, 'modify_dialog') and tutorial.modify_dialog:
|
|
2780
|
+
tutorial.modify_dialog.close()
|
|
2781
|
+
tutorial.modify_dialog = None
|
|
2782
|
+
|
|
2783
|
+
tutorial.add_step(
|
|
2784
|
+
MenuHelper.create_menu_step_rect_getter(window, "Process"),
|
|
2785
|
+
f"""--Use 'Isolate Connections Between Two Specific Node Types' to filter the network to show only connections involving two user-selected node identities, removing all other connections.
|
|
2786
|
+
|
|
2787
|
+
\n\n--Use 'Rearrange Community IDs by Size' to renumber communities by node count, with 1 being the largest community. Makes community IDs more meaningful for visualization and analysis.
|
|
2788
|
+
|
|
2789
|
+
\n\n--Use 'Convert Communities to Nodes' to replace the node-to-node network with a community-to-community network, and relabel nodes in the image by their community ID rather than original ID.
|
|
2790
|
+
|
|
2791
|
+
\n\n--Use 'Add/Remove Network Pairs' to manually add or remove specific node pairs from the network with optional edge IDs. Allows arbitrary network modification beyond what table widgets support.""",
|
|
2792
|
+
highlight_type="rect",
|
|
2793
|
+
message_position="top_right",
|
|
2794
|
+
action = close_dialog
|
|
2795
|
+
)
|
|
2796
|
+
|
|
2797
|
+
|
|
2798
|
+
|
|
2799
|
+
return tutorial
|
|
2800
|
+
|
|
2801
|
+
def setup_image_tutorial(window):
|
|
2802
|
+
tutorial = TutorialManager(window)
|
|
2803
|
+
|
|
2804
|
+
# Step 1: Welcome
|
|
2805
|
+
tutorial.add_step(
|
|
2806
|
+
None,
|
|
2807
|
+
"This tutorial will guide you through the options for visualizing your data.",
|
|
2808
|
+
highlight_type="rect",
|
|
2809
|
+
message_position="top_right"
|
|
2810
|
+
)
|
|
2811
|
+
|
|
2812
|
+
tutorial.add_step(
|
|
2813
|
+
MenuHelper.create_menu_step_rect_getter(window, "Image"),
|
|
2814
|
+
"The Image menu contains the options visualizing your data. Here you can adjust the brightness, generate some overlays, move channels around, and show the 3D display.",
|
|
2815
|
+
highlight_type="rect",
|
|
2816
|
+
message_position="top_right",
|
|
2817
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Image"),
|
|
2818
|
+
action=lambda: MenuHelper.close_menu(window, "Image")
|
|
2819
|
+
)
|
|
2820
|
+
|
|
2821
|
+
tutorial.add_step(
|
|
2822
|
+
MenuHelper.create_menu_step_rect_getter(window, "Image"),
|
|
2823
|
+
f"""--Use 'Properties' to view and modify current session properties including xy_scale and z_scale settings. You can also purge specific channels (nodes, edges, overlays) and properties (network, identities, centroids) by unchecking them and pressing Enter. The Report Properties button populates spreadsheet-based properties to the upper-right table.
|
|
2824
|
+
|
|
2825
|
+
\n\n--Use 'Adjust Brightness/Contrast' to modify the visibility of each channel using dual-knobbed slider bars or by entering min/max values (0-65535). Essential for making loaded images visible when they appear too dark or bright.
|
|
2826
|
+
|
|
2827
|
+
\n\n--Use 'Channel Colors' to change the display colors for each channel. Default colors are light_red for nodes, light_green for edges, and white for Overlay1 and Overlay2.""",
|
|
2828
|
+
highlight_type="rect",
|
|
2829
|
+
message_position="top_right",
|
|
2830
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Image"),
|
|
2831
|
+
action=lambda: MenuHelper.close_menu(window, "Image")
|
|
2832
|
+
)
|
|
2833
|
+
|
|
2834
|
+
def open_to_save():
|
|
2835
|
+
menu = MenuHelper.open_menu(window, "Image")
|
|
2836
|
+
if menu:
|
|
2837
|
+
QTimer.singleShot(100, lambda: MenuHelper.open_submenu(menu, "Overlays"))
|
|
2838
|
+
|
|
2839
|
+
tutorial.add_step(
|
|
2840
|
+
MenuHelper.create_submenu_action_rect_getter(window, "Image", "Overlays", "Shuffle"),
|
|
2841
|
+
f"""-Use 'Overlays -> Create Network Overlay' to draw 1-voxel thick white lines between all node centroids in the network, placed in Overlay1. Provides convenient network structure visualization, especially in 3D. Optional downsampling enlarges the rendered output.
|
|
2842
|
+
|
|
2843
|
+
\n\n--Use 'Overlays -> Create ID Overlay' to write the numerical ID of each node over its centroid, placed in Overlay2. Provides convenient node label visualization with optional downsampling to enlarge rendered output.
|
|
2844
|
+
|
|
2845
|
+
\n\n--Use 'Overlays -> Color Nodes (or edges)' to create an RGB overlay where each grayscale label receives a unique color, placed in Overlay2 with a color legend in the data tables. Excellent for visualizing labeled objects.
|
|
2846
|
+
|
|
2847
|
+
\n\n--Use 'Overlays -> Shuffle' to swap data between channels. Useful for moving outputs to correct channels since many functions expect content in specific channels (e.g., Nodes for network generation).""",
|
|
2848
|
+
highlight_type=None,
|
|
2849
|
+
message_position="top_right",
|
|
2850
|
+
pre_action=open_to_save,
|
|
2851
|
+
)
|
|
2852
|
+
|
|
2853
|
+
tutorial.add_step(
|
|
2854
|
+
MenuHelper.create_menu_step_rect_getter(window, "Image"),
|
|
2855
|
+
|
|
2856
|
+
f"""--Use 'Select Objects' to arbitrarily select and highlight groups of nodes or edges by entering comma-separated IDs or importing from a spreadsheet. Automatically navigates to the Z-plane of the first selected object for easy searching.
|
|
2857
|
+
|
|
2858
|
+
\n\n--Use 'Show 3D (Napari)' to launch Napari for interactive 3D visualization of all visible channels. Uses GPU for smooth rendering if available. Can downsample for speed and optionally include a bounding box. Requires Napari to be installed.
|
|
2859
|
+
|
|
2860
|
+
\n\n--Use 'Cellpose' to open the Cellpose3 GUI for cell segmentation. Opens 3D-stack version for 3D images and 2D-stack version for 2D images. Requires Cellpose3 to be installed in NetTracer3D's package environment.""",
|
|
2861
|
+
highlight_type="rect",
|
|
2862
|
+
message_position="top_right",
|
|
2863
|
+
pre_action=lambda: MenuHelper.open_menu(window, "Image"),
|
|
2864
|
+
action=lambda: MenuHelper.close_menu(window, "Image")
|
|
2865
|
+
)
|
|
2866
|
+
|
|
2867
|
+
return tutorial
|
|
2868
|
+
|
|
2869
|
+
def setup_nettracer_tutorial(window):
|
|
2870
|
+
"""
|
|
2871
|
+
DEPRECATED: Use setup_basics_tutorial instead
|
|
2872
|
+
This function is kept for backwards compatibility
|
|
2873
|
+
"""
|
|
2874
|
+
return setup_basics_tutorial(window)
|