singlebehaviorlab 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sam2/__init__.py +11 -0
- sam2/automatic_mask_generator.py +454 -0
- sam2/benchmark.py +92 -0
- sam2/build_sam.py +174 -0
- sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
- sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
- sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
- sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
- sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
- sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
- sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
- sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
- sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
- sam2/modeling/__init__.py +5 -0
- sam2/modeling/backbones/__init__.py +5 -0
- sam2/modeling/backbones/hieradet.py +317 -0
- sam2/modeling/backbones/image_encoder.py +134 -0
- sam2/modeling/backbones/utils.py +93 -0
- sam2/modeling/memory_attention.py +169 -0
- sam2/modeling/memory_encoder.py +181 -0
- sam2/modeling/position_encoding.py +239 -0
- sam2/modeling/sam/__init__.py +5 -0
- sam2/modeling/sam/mask_decoder.py +295 -0
- sam2/modeling/sam/prompt_encoder.py +202 -0
- sam2/modeling/sam/transformer.py +311 -0
- sam2/modeling/sam2_base.py +913 -0
- sam2/modeling/sam2_utils.py +323 -0
- sam2/sam2_hiera_b+.yaml +113 -0
- sam2/sam2_hiera_l.yaml +117 -0
- sam2/sam2_hiera_s.yaml +116 -0
- sam2/sam2_hiera_t.yaml +118 -0
- sam2/sam2_image_predictor.py +466 -0
- sam2/sam2_video_predictor.py +1388 -0
- sam2/sam2_video_predictor_legacy.py +1172 -0
- sam2/utils/__init__.py +5 -0
- sam2/utils/amg.py +348 -0
- sam2/utils/misc.py +349 -0
- sam2/utils/transforms.py +118 -0
- singlebehaviorlab/__init__.py +4 -0
- singlebehaviorlab/__main__.py +130 -0
- singlebehaviorlab/_paths.py +100 -0
- singlebehaviorlab/backend/__init__.py +2 -0
- singlebehaviorlab/backend/augmentations.py +320 -0
- singlebehaviorlab/backend/data_store.py +420 -0
- singlebehaviorlab/backend/model.py +1290 -0
- singlebehaviorlab/backend/train.py +4667 -0
- singlebehaviorlab/backend/uncertainty.py +578 -0
- singlebehaviorlab/backend/video_processor.py +688 -0
- singlebehaviorlab/backend/video_utils.py +139 -0
- singlebehaviorlab/data/config/config.yaml +85 -0
- singlebehaviorlab/data/training_profiles.json +334 -0
- singlebehaviorlab/gui/__init__.py +4 -0
- singlebehaviorlab/gui/analysis_widget.py +2291 -0
- singlebehaviorlab/gui/attention_export.py +311 -0
- singlebehaviorlab/gui/clip_extraction_widget.py +481 -0
- singlebehaviorlab/gui/clustering_widget.py +3187 -0
- singlebehaviorlab/gui/inference_popups.py +1138 -0
- singlebehaviorlab/gui/inference_widget.py +4550 -0
- singlebehaviorlab/gui/inference_worker.py +651 -0
- singlebehaviorlab/gui/labeling_widget.py +2324 -0
- singlebehaviorlab/gui/main_window.py +754 -0
- singlebehaviorlab/gui/metadata_management_widget.py +1119 -0
- singlebehaviorlab/gui/motion_tracking.py +764 -0
- singlebehaviorlab/gui/overlay_export.py +1234 -0
- singlebehaviorlab/gui/plot_integration.py +729 -0
- singlebehaviorlab/gui/qt_helpers.py +29 -0
- singlebehaviorlab/gui/registration_widget.py +1485 -0
- singlebehaviorlab/gui/review_widget.py +1330 -0
- singlebehaviorlab/gui/segmentation_tracking_widget.py +2752 -0
- singlebehaviorlab/gui/tab_tutorial_dialog.py +312 -0
- singlebehaviorlab/gui/timeline_themes.py +131 -0
- singlebehaviorlab/gui/training_profiles.py +418 -0
- singlebehaviorlab/gui/training_widget.py +3719 -0
- singlebehaviorlab/gui/video_utils.py +233 -0
- singlebehaviorlab/licenses/SAM2-LICENSE +201 -0
- singlebehaviorlab/licenses/VideoPrism-LICENSE +202 -0
- singlebehaviorlab-2.0.0.dist-info/METADATA +447 -0
- singlebehaviorlab-2.0.0.dist-info/RECORD +88 -0
- singlebehaviorlab-2.0.0.dist-info/WHEEL +5 -0
- singlebehaviorlab-2.0.0.dist-info/entry_points.txt +2 -0
- singlebehaviorlab-2.0.0.dist-info/licenses/LICENSE +21 -0
- singlebehaviorlab-2.0.0.dist-info/top_level.txt +3 -0
- videoprism/__init__.py +0 -0
- videoprism/encoders.py +910 -0
- videoprism/layers.py +1136 -0
- videoprism/models.py +407 -0
- videoprism/tokenizers.py +167 -0
- videoprism/utils.py +168 -0
|
@@ -0,0 +1,312 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Per-tab help popups with detailed NOR-themed tutorials.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from PyQt6.QtCore import Qt
|
|
6
|
+
from PyQt6.QtWidgets import QDialog, QDialogButtonBox, QLabel, QPushButton, QTextBrowser, QVBoxLayout
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _wrap_html(title: str, body: str) -> str:
|
|
10
|
+
return (
|
|
11
|
+
f"<h2>{title}</h2>"
|
|
12
|
+
"<p><b>Example: Novel Object Recognition (NOR)</b><br>"
|
|
13
|
+
"Overhead video of a mouse in an arena with one familiar object and one novel object. "
|
|
14
|
+
"Typical behaviors: <i>sniff_novel</i>, <i>sniff_familiar</i>, <i>walk_arena</i>, "
|
|
15
|
+
"<i>rear</i>, <i>groom</i>, <i>freeze</i>. Adapt names to your own protocol.</p>"
|
|
16
|
+
f"{body}"
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
TAB_TUTORIALS: dict[str, dict[str, str]] = {
|
|
21
|
+
"labeling": {
|
|
22
|
+
"title": "Labeling",
|
|
23
|
+
"body": _wrap_html(
|
|
24
|
+
"Labeling",
|
|
25
|
+
"""
|
|
26
|
+
<h3>What this tab is for</h3>
|
|
27
|
+
<p>Start here when you have raw NOR videos and want to build the first labeled training set.
|
|
28
|
+
This tab extracts short clips, lets you define behavior classes, and lets you label either whole clips or frame ranges inside clips.</p>
|
|
29
|
+
|
|
30
|
+
<h3>How to use it</h3>
|
|
31
|
+
<ol>
|
|
32
|
+
<li>Add one or more source videos from your NOR sessions.</li>
|
|
33
|
+
<li>Set <b>Target FPS</b> and <b>Frames per clip</b>. A good first setting is <b>12 fps</b> and <b>8 frames</b> per clip.</li>
|
|
34
|
+
<li>Set <b>Step frames</b> to control how densely clips are extracted. Use a smaller step for denser coverage.</li>
|
|
35
|
+
<li>If the videos are very long, set <b>Max clips per video</b> so the first pass stays manageable.</li>
|
|
36
|
+
<li>Click <b>Extract all clips</b>. Clips are saved into <code>data/clips/</code>.</li>
|
|
37
|
+
<li>Create behavior classes. For NOR, common classes are <i>sniff_novel</i>, <i>sniff_familiar</i>, <i>walk_arena</i>, <i>groom</i>, and <i>rear</i>.</li>
|
|
38
|
+
<li>Start by collecting about <b>10–20 clean clips per class</b>. That is enough for a first training round, and you can always add more later in the <b>Refine</b> stage.</li>
|
|
39
|
+
<li>Use the <b>Random</b> button often instead of labeling clips strictly in sequence. This usually gives a more diverse first set across videos, animals, and contexts.</li>
|
|
40
|
+
<li>Select a clip and assign a class with the class buttons or keys <b>1–9</b>.</li>
|
|
41
|
+
<li>At the beginning, focus on <b>clean 8-frame clips</b> where the behavior is isolated and obvious. You do <b>not</b> need to label transition clips first as long as you have good isolated examples of the behavior of interest.</li>
|
|
42
|
+
<li>Use <b>per-frame labeling</b> later when transitions are important for improving boundaries, such as walking changing into sniff_novel within one clip.</li>
|
|
43
|
+
<li>Press <b>Ctrl+S</b> to save.</li>
|
|
44
|
+
</ol>
|
|
45
|
+
|
|
46
|
+
<h3>Helpful optional tools</h3>
|
|
47
|
+
<ul>
|
|
48
|
+
<li><b>Show unlabeled only</b> and <b>Next unlabeled</b> help you move through the dataset quickly.</li>
|
|
49
|
+
<li><b>Fullscreen</b> helps when the nose-object interaction is subtle.</li>
|
|
50
|
+
<li><b>Multi-label</b> is useful if you want OvR training later.</li>
|
|
51
|
+
<li><b>Hard-negative round dataset</b> is helpful when two classes look similar, such as sniff_familiar vs general head-down exploration.</li>
|
|
52
|
+
</ul>
|
|
53
|
+
|
|
54
|
+
<h3>Localization bbox</h3>
|
|
55
|
+
<p>VideoPrism sees clips at <b>288×288</b>. If the mouse is small in the frame because the arena is large or the camera is far away,
|
|
56
|
+
draw a localization box around the animal and save it. This helps the model crop in before classification.
|
|
57
|
+
In many normal NOR setups, you can try training without this first.</p>
|
|
58
|
+
""",
|
|
59
|
+
),
|
|
60
|
+
"next_tab": "Training Sequencing Model",
|
|
61
|
+
"next_hint": "Once you have enough labeled clips across your behaviors, train the model.",
|
|
62
|
+
},
|
|
63
|
+
"training": {
|
|
64
|
+
"title": "Training Sequencing Model",
|
|
65
|
+
"body": _wrap_html(
|
|
66
|
+
"Training Sequencing Model",
|
|
67
|
+
"""
|
|
68
|
+
<h3>What this tab is for</h3>
|
|
69
|
+
<p>This is where labeled clips become a classifier. The model learns how to separate the NOR behaviors you defined in Labeling.</p>
|
|
70
|
+
|
|
71
|
+
<h3>How to use it</h3>
|
|
72
|
+
<ol>
|
|
73
|
+
<li>Check that the <b>annotation file</b> and <b>clips directory</b> point to your experiment.</li>
|
|
74
|
+
<li>Use <b>Dataset info</b> to confirm that class counts look reasonable.</li>
|
|
75
|
+
<li>Start with a preset profile such as <b>LowInputData</b> if you are unsure.</li>
|
|
76
|
+
<li>Choose epochs, batch size, and learning rates if you want to tune manually.</li>
|
|
77
|
+
<li>Leave the <b>temporal decoder</b> on unless your dataset is extremely small. It helps sequence frame-level context inside the clip.</li>
|
|
78
|
+
<li>For modest datasets, turn on <b>data augmentation</b>. This is usually important, not just optional, because it improves generalization across sessions, lighting, and animal-to-animal variation.</li>
|
|
79
|
+
<li>Decide early whether to use <b>OvR</b> or standard softmax. OvR is an important modeling choice when classes can overlap or when you want one binary head per behavior.</li>
|
|
80
|
+
<li>Use a <b>validation split</b> when possible so you can compare checkpoints more reliably.</li>
|
|
81
|
+
<li>Click <b>Visualize training</b> if you want live loss and F1 plots.</li>
|
|
82
|
+
<li>Click <b>Start training</b>. The best checkpoint is saved to <code>models/behavior_heads/</code>.</li>
|
|
83
|
+
</ol>
|
|
84
|
+
|
|
85
|
+
<h3>Additional training tools</h3>
|
|
86
|
+
<ul>
|
|
87
|
+
<li><b>Confusion-aware hard mining</b> focuses training on clips the model gets wrong.</li>
|
|
88
|
+
<li><b>Fine-tune from pretrained</b> is useful if you already trained on a similar experiment and only want to adapt.</li>
|
|
89
|
+
<li><b>Auto-tune</b> can try multiple settings automatically before a final run.</li>
|
|
90
|
+
<li><b>Batch Train</b> is useful if you want to compare several profiles in one go.</li>
|
|
91
|
+
</ul>
|
|
92
|
+
|
|
93
|
+
<h3>Localization</h3>
|
|
94
|
+
<p>The Localization section only becomes active when bbox labels exist. If your NOR animal is small relative to the arena,
|
|
95
|
+
training can first learn localization and then classify on the crop.</p>
|
|
96
|
+
""",
|
|
97
|
+
),
|
|
98
|
+
"next_tab": "Sequencing",
|
|
99
|
+
"next_hint": "Load the trained checkpoint and run it on new NOR test videos.",
|
|
100
|
+
},
|
|
101
|
+
"sequencing": {
|
|
102
|
+
"title": "Sequencing",
|
|
103
|
+
"body": _wrap_html(
|
|
104
|
+
"Sequencing",
|
|
105
|
+
"""
|
|
106
|
+
<h3>What this tab is for</h3>
|
|
107
|
+
<p>This tab runs your trained model on unseen videos and converts predictions into a behavior timeline.</p>
|
|
108
|
+
|
|
109
|
+
<h3>How to use it</h3>
|
|
110
|
+
<ol>
|
|
111
|
+
<li>Load the trained <code>.pt</code> checkpoint.</li>
|
|
112
|
+
<li>Select one or more NOR videos to score.</li>
|
|
113
|
+
<li>Match clip settings to training: <b>FPS</b>, <b>clip length</b>, <b>step</b>, and <b>resolution</b>.</li>
|
|
114
|
+
<li>Run inference.</li>
|
|
115
|
+
<li>Inspect the resulting timeline. For NOR, verify that object investigation periods are where you expect them.</li>
|
|
116
|
+
</ol>
|
|
117
|
+
|
|
118
|
+
<h3>Helpful optional tools</h3>
|
|
119
|
+
<ul>
|
|
120
|
+
<li><b>Quick-check sampled inference</b> runs only a few chunks first, so you can verify behavior quality before processing a whole session.</li>
|
|
121
|
+
<li><b>Viterbi smoothing</b> and segment merging reduce flicker between nearby labels.</li>
|
|
122
|
+
<li><b>Collect attention maps</b> helps you verify whether the model is looking at the mouse-object interaction area.</li>
|
|
123
|
+
<li>Click on timeline segments to open a clip popup with scores and localization overlays.</li>
|
|
124
|
+
</ul>
|
|
125
|
+
|
|
126
|
+
<h3>Exports</h3>
|
|
127
|
+
<p>You can export JSON results, CSV/SVG timelines, and overlay videos. For NOR, overlay videos are often the fastest sanity check before formal analysis.</p>
|
|
128
|
+
""",
|
|
129
|
+
),
|
|
130
|
+
"next_tab": "Refine",
|
|
131
|
+
"next_hint": "Review uncertain or boundary clips and feed corrections back into training.",
|
|
132
|
+
},
|
|
133
|
+
"refine": {
|
|
134
|
+
"title": "Refine",
|
|
135
|
+
"body": _wrap_html(
|
|
136
|
+
"Refine",
|
|
137
|
+
"""
|
|
138
|
+
<h3>What this tab is for</h3>
|
|
139
|
+
<p>Refine is the active-learning loop. Instead of relabeling everything, you focus on the clips that matter most for improving the model.</p>
|
|
140
|
+
|
|
141
|
+
<h3>How to use it</h3>
|
|
142
|
+
<ol>
|
|
143
|
+
<li>Load the uncertainty or inference results if they are not already passed in automatically.</li>
|
|
144
|
+
<li>Pick a review mode:
|
|
145
|
+
<ul>
|
|
146
|
+
<li><b>Uncertain</b> — best for fixing model mistakes.</li>
|
|
147
|
+
<li><b>Confident</b> — best for adding more easy training data fast.</li>
|
|
148
|
+
<li><b>Transition</b> — best for improving behavior boundaries.</li>
|
|
149
|
+
</ul>
|
|
150
|
+
</li>
|
|
151
|
+
<li>Inspect each suggested clip, especially cases where the mouse briefly inspects an object and then walks away.</li>
|
|
152
|
+
<li>Accept, relabel, or mark as hard negative.</li>
|
|
153
|
+
<li>Save the updated clips back into annotations.</li>
|
|
154
|
+
</ol>
|
|
155
|
+
|
|
156
|
+
<h3>When to use it in NOR</h3>
|
|
157
|
+
<p>This is especially useful when <i>sniff_novel</i> and <i>sniff_familiar</i> are confused, or when very short transitions between walking and investigation create noisy boundaries.</p>
|
|
158
|
+
""",
|
|
159
|
+
),
|
|
160
|
+
"next_tab": "Training Sequencing Model",
|
|
161
|
+
"next_hint": "Retrain with the refined clips, then run Sequencing again on held-out videos.",
|
|
162
|
+
},
|
|
163
|
+
"analysis": {
|
|
164
|
+
"title": "Downstream Analysis",
|
|
165
|
+
"body": _wrap_html(
|
|
166
|
+
"Downstream Analysis",
|
|
167
|
+
"""
|
|
168
|
+
<h3>What this tab is for</h3>
|
|
169
|
+
<p>This is where predictions become summary figures, spatial behavior maps, and statistical comparisons.</p>
|
|
170
|
+
|
|
171
|
+
<h3>How to use it</h3>
|
|
172
|
+
<ol>
|
|
173
|
+
<li>Load inference or bout-level outputs from your experiment.</li>
|
|
174
|
+
<li>Use the overview plots to summarize <b>occurrences</b>, <b>average bout duration</b>, <b>total duration</b>, and <b>percent time</b>.</li>
|
|
175
|
+
<li>For NOR, compare <b>time on novel</b> vs <b>time on familiar</b>, or compare groups such as control vs treated.</li>
|
|
176
|
+
<li>Use <b>spatial distribution</b> if you want to see where behaviors happen in the arena.</li>
|
|
177
|
+
<li>Use the <b>transition graph</b> if you want to understand sequences such as walk_arena → sniff_novel → groom.</li>
|
|
178
|
+
</ol>
|
|
179
|
+
|
|
180
|
+
<h3>Helpful optional tools</h3>
|
|
181
|
+
<ul>
|
|
182
|
+
<li><b>Group comparison</b> is useful when you have multiple animals or conditions.</li>
|
|
183
|
+
<li>Statistical tests include Mann-Whitney and Kruskal-Wallis depending on how many groups you compare.</li>
|
|
184
|
+
<li>You can export figures as PDF, SVG, PNG, or HTML, and tables as CSV.</li>
|
|
185
|
+
</ul>
|
|
186
|
+
""",
|
|
187
|
+
),
|
|
188
|
+
"next_tab": "Labeling or Segmentation Tracking",
|
|
189
|
+
"next_hint": "Either continue your supervised loop with new labels, or start the unbiased discovery path on a larger cohort.",
|
|
190
|
+
},
|
|
191
|
+
"segmentation": {
|
|
192
|
+
"title": "Segmentation Tracking",
|
|
193
|
+
"body": _wrap_html(
|
|
194
|
+
"Segmentation Tracking",
|
|
195
|
+
"""
|
|
196
|
+
<h3>What this tab is for</h3>
|
|
197
|
+
<p>This is the first stage of the unbiased discovery path. It isolates animals from many videos so later embeddings reflect the animal's behavior rather than the full frame layout.</p>
|
|
198
|
+
|
|
199
|
+
<h3>How to use it</h3>
|
|
200
|
+
<ol>
|
|
201
|
+
<li>Load one or more NOR videos.</li>
|
|
202
|
+
<li>Choose a SAM2 model and tracking resolution.</li>
|
|
203
|
+
<li>On a clear frame, click <b>positive</b> points on the mouse and <b>negative</b> points on background, shadows, or objects when needed.</li>
|
|
204
|
+
<li>Use object IDs if more than one animal must be tracked.</li>
|
|
205
|
+
<li>Run tracking.</li>
|
|
206
|
+
<li>If the mask drifts, pause, add corrective points, and resume.</li>
|
|
207
|
+
<li>Save masks and optionally an overlay video.</li>
|
|
208
|
+
</ol>
|
|
209
|
+
|
|
210
|
+
<h3>Why this matters for discovery</h3>
|
|
211
|
+
<p>When you want unbiased differences across many animals and sessions, standardized animal isolation reduces variation caused by arena framing and camera placement.</p>
|
|
212
|
+
""",
|
|
213
|
+
),
|
|
214
|
+
"next_tab": "Registration",
|
|
215
|
+
"next_hint": "Use the saved masks to build consistent animal-centered crops and extract embeddings.",
|
|
216
|
+
},
|
|
217
|
+
"registration": {
|
|
218
|
+
"title": "Registration",
|
|
219
|
+
"body": _wrap_html(
|
|
220
|
+
"Registration",
|
|
221
|
+
"""
|
|
222
|
+
<h3>What this tab is for</h3>
|
|
223
|
+
<p>Registration converts segmented videos into normalized, animal-centered clips and then extracts VideoPrism embeddings.</p>
|
|
224
|
+
|
|
225
|
+
<h3>How to use it</h3>
|
|
226
|
+
<ol>
|
|
227
|
+
<li>Load video and mask pairs.</li>
|
|
228
|
+
<li>Choose crop box size, target size, normalization, and whether ROI stays locked or updates frame-by-frame.</li>
|
|
229
|
+
<li>Set clip extraction settings. For consistency with Labeling, 12 fps and 8 frames are a good first pass.</li>
|
|
230
|
+
<li>Process videos into registered clips.</li>
|
|
231
|
+
<li>Click <b>Extract embeddings</b> to run VideoPrism and save the matrix plus metadata.</li>
|
|
232
|
+
</ol>
|
|
233
|
+
|
|
234
|
+
<h3>Why this matters for discovery</h3>
|
|
235
|
+
<p>This step makes later clustering less biased by background or scale, so clusters are more likely to reflect behavior rather than nuisance differences.</p>
|
|
236
|
+
""",
|
|
237
|
+
),
|
|
238
|
+
"next_tab": "Clustering",
|
|
239
|
+
"next_hint": "Use embeddings to reveal behavior structure without predefining labels.",
|
|
240
|
+
},
|
|
241
|
+
"clustering": {
|
|
242
|
+
"title": "Clustering",
|
|
243
|
+
"body": _wrap_html(
|
|
244
|
+
"Clustering",
|
|
245
|
+
"""
|
|
246
|
+
<h3>What this tab is for</h3>
|
|
247
|
+
<p>Clustering is the heart of the unbiased discovery path. Instead of deciding the important behaviors first, you let the embedding space reveal repeated patterns across many sessions and animals.</p>
|
|
248
|
+
|
|
249
|
+
<h3>How to use it</h3>
|
|
250
|
+
<ol>
|
|
251
|
+
<li>Load the embedding matrix and metadata from Registration.</li>
|
|
252
|
+
<li>Optionally preprocess with scaling or PCA.</li>
|
|
253
|
+
<li>Run UMAP to obtain a lower-dimensional view.</li>
|
|
254
|
+
<li>Run Leiden or HDBSCAN to form clusters.</li>
|
|
255
|
+
<li>Inspect representative points from each cluster.</li>
|
|
256
|
+
<li>Click directly on points in the embedding plot to open the corresponding clips and see what kind of behavior repeatedly appears in that region of the cluster.</li>
|
|
257
|
+
<li>For NOR, clusters may reveal repeated modes such as object approach, nose contact, locomotion around the perimeter, grooming away from the object, or rearing.</li>
|
|
258
|
+
</ol>
|
|
259
|
+
|
|
260
|
+
<h3>How to proceed</h3>
|
|
261
|
+
<p>After you discover stable clusters, switch to <b>Labeling</b> and use the setup dialog to load representative clips. Use the clicked example clips to judge what behavior is most common in that cluster, then assign names such as <i>sniff_novel</i> or <i>walk_arena</i> and move into supervised training.</p>
|
|
262
|
+
""",
|
|
263
|
+
),
|
|
264
|
+
"next_tab": "Labeling",
|
|
265
|
+
"next_hint": "Name discovered clusters and turn them into a diverse labeled training set.",
|
|
266
|
+
},
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def show_tab_tutorial(parent, tab_id: str) -> None:
|
|
271
|
+
data = TAB_TUTORIALS.get(tab_id)
|
|
272
|
+
if not data:
|
|
273
|
+
return
|
|
274
|
+
|
|
275
|
+
dlg = QDialog(parent)
|
|
276
|
+
dlg.setWindowTitle(f"{data['title']} Guide")
|
|
277
|
+
dlg.setMinimumSize(720, 600)
|
|
278
|
+
layout = QVBoxLayout(dlg)
|
|
279
|
+
|
|
280
|
+
browser = QTextBrowser()
|
|
281
|
+
browser.setOpenExternalLinks(False)
|
|
282
|
+
browser.setHtml(data["body"])
|
|
283
|
+
layout.addWidget(browser, 1)
|
|
284
|
+
|
|
285
|
+
next_lbl = QLabel(
|
|
286
|
+
f"<b>Next:</b> open tab <i>{data['next_tab']}</i> — {data['next_hint']}"
|
|
287
|
+
)
|
|
288
|
+
next_lbl.setWordWrap(True)
|
|
289
|
+
next_lbl.setTextFormat(Qt.TextFormat.RichText)
|
|
290
|
+
layout.addWidget(next_lbl)
|
|
291
|
+
|
|
292
|
+
next_title = data["next_tab"]
|
|
293
|
+
if hasattr(parent, "tabs") and " or " not in next_title and next_title:
|
|
294
|
+
go_btn = QPushButton("Open next tab")
|
|
295
|
+
go_btn.setToolTip(f"Switch to: {next_title}")
|
|
296
|
+
|
|
297
|
+
def _switch_next() -> None:
|
|
298
|
+
tw = parent.tabs
|
|
299
|
+
for i in range(tw.count()):
|
|
300
|
+
if tw.tabText(i) == next_title:
|
|
301
|
+
tw.setCurrentIndex(i)
|
|
302
|
+
dlg.accept()
|
|
303
|
+
return
|
|
304
|
+
|
|
305
|
+
go_btn.clicked.connect(_switch_next)
|
|
306
|
+
layout.addWidget(go_btn)
|
|
307
|
+
|
|
308
|
+
buttons = QDialogButtonBox(QDialogButtonBox.StandardButton.Close)
|
|
309
|
+
buttons.rejected.connect(dlg.reject)
|
|
310
|
+
layout.addWidget(buttons)
|
|
311
|
+
|
|
312
|
+
dlg.exec()
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
"""Timeline color themes: name -> list of (R, G, B) tuples for behavior classes."""
|
|
2
|
+
|
|
3
|
+
DEFAULT_THEME = "Bright"
|
|
4
|
+
|
|
5
|
+
TIMELINE_COLOR_THEMES = {
|
|
6
|
+
"Bright": [
|
|
7
|
+
(31, 119, 180),
|
|
8
|
+
(255, 127, 14),
|
|
9
|
+
(44, 160, 44),
|
|
10
|
+
(214, 39, 40),
|
|
11
|
+
(148, 103, 189),
|
|
12
|
+
(140, 86, 75),
|
|
13
|
+
(227, 119, 194),
|
|
14
|
+
(127, 127, 127),
|
|
15
|
+
(188, 189, 34),
|
|
16
|
+
(23, 190, 207),
|
|
17
|
+
],
|
|
18
|
+
"Muted": [
|
|
19
|
+
(76, 114, 176),
|
|
20
|
+
(221, 132, 82),
|
|
21
|
+
(85, 168, 104),
|
|
22
|
+
(196, 78, 82),
|
|
23
|
+
(129, 114, 179),
|
|
24
|
+
(147, 120, 96),
|
|
25
|
+
(218, 139, 195),
|
|
26
|
+
(140, 140, 140),
|
|
27
|
+
(204, 185, 116),
|
|
28
|
+
(100, 181, 205),
|
|
29
|
+
],
|
|
30
|
+
"Colorblind Safe": [
|
|
31
|
+
(0, 114, 178),
|
|
32
|
+
(230, 159, 0),
|
|
33
|
+
(0, 158, 115),
|
|
34
|
+
(204, 121, 167),
|
|
35
|
+
(86, 180, 233),
|
|
36
|
+
(213, 94, 0),
|
|
37
|
+
(240, 228, 66),
|
|
38
|
+
(0, 0, 0),
|
|
39
|
+
(128, 128, 128),
|
|
40
|
+
(148, 103, 189),
|
|
41
|
+
],
|
|
42
|
+
"Dark": [
|
|
43
|
+
(120, 187, 255),
|
|
44
|
+
(255, 170, 90),
|
|
45
|
+
(120, 210, 140),
|
|
46
|
+
(255, 120, 120),
|
|
47
|
+
(190, 150, 255),
|
|
48
|
+
(210, 170, 130),
|
|
49
|
+
(255, 160, 220),
|
|
50
|
+
(180, 180, 180),
|
|
51
|
+
(230, 220, 120),
|
|
52
|
+
(120, 220, 230),
|
|
53
|
+
],
|
|
54
|
+
"Pastel": [
|
|
55
|
+
(179, 205, 227),
|
|
56
|
+
(251, 180, 174),
|
|
57
|
+
(204, 235, 197),
|
|
58
|
+
(222, 203, 228),
|
|
59
|
+
(254, 217, 166),
|
|
60
|
+
(255, 255, 204),
|
|
61
|
+
(229, 216, 189),
|
|
62
|
+
(253, 218, 236),
|
|
63
|
+
(242, 242, 242),
|
|
64
|
+
(204, 204, 204),
|
|
65
|
+
],
|
|
66
|
+
"Neon": [
|
|
67
|
+
(0, 255, 255),
|
|
68
|
+
(255, 0, 255),
|
|
69
|
+
(0, 255, 0),
|
|
70
|
+
(255, 255, 0),
|
|
71
|
+
(255, 128, 0),
|
|
72
|
+
(0, 128, 255),
|
|
73
|
+
(255, 0, 128),
|
|
74
|
+
(128, 255, 0),
|
|
75
|
+
(0, 255, 128),
|
|
76
|
+
(128, 0, 255),
|
|
77
|
+
],
|
|
78
|
+
"Earth": [
|
|
79
|
+
(101, 67, 33),
|
|
80
|
+
(160, 82, 45),
|
|
81
|
+
(205, 133, 63),
|
|
82
|
+
(34, 139, 34),
|
|
83
|
+
(85, 107, 47),
|
|
84
|
+
(46, 139, 87),
|
|
85
|
+
(107, 142, 35),
|
|
86
|
+
(112, 128, 144),
|
|
87
|
+
(105, 105, 105),
|
|
88
|
+
(47, 79, 79),
|
|
89
|
+
],
|
|
90
|
+
"Solarized": [
|
|
91
|
+
(38, 139, 210),
|
|
92
|
+
(220, 50, 47),
|
|
93
|
+
(133, 153, 0),
|
|
94
|
+
(181, 137, 0),
|
|
95
|
+
(42, 161, 152),
|
|
96
|
+
(211, 54, 130),
|
|
97
|
+
(108, 113, 196),
|
|
98
|
+
(88, 110, 117),
|
|
99
|
+
(147, 161, 161),
|
|
100
|
+
(131, 148, 150),
|
|
101
|
+
],
|
|
102
|
+
"Viridis": [
|
|
103
|
+
(68, 1, 84),
|
|
104
|
+
(72, 35, 116),
|
|
105
|
+
(64, 67, 135),
|
|
106
|
+
(52, 94, 141),
|
|
107
|
+
(41, 120, 142),
|
|
108
|
+
(32, 144, 140),
|
|
109
|
+
(34, 167, 132),
|
|
110
|
+
(68, 190, 112),
|
|
111
|
+
(121, 209, 81),
|
|
112
|
+
(189, 223, 38),
|
|
113
|
+
],
|
|
114
|
+
"Magma": [
|
|
115
|
+
(0, 0, 4),
|
|
116
|
+
(28, 16, 68),
|
|
117
|
+
(60, 9, 108),
|
|
118
|
+
(91, 16, 110),
|
|
119
|
+
(122, 28, 109),
|
|
120
|
+
(153, 43, 96),
|
|
121
|
+
(181, 63, 78),
|
|
122
|
+
(207, 89, 63),
|
|
123
|
+
(229, 119, 43),
|
|
124
|
+
(251, 159, 25),
|
|
125
|
+
],
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def get_palette(theme_name: str) -> list[tuple[int, int, int]]:
|
|
130
|
+
"""Return (R, G, B) list for the given theme; fallback to DEFAULT_THEME."""
|
|
131
|
+
return TIMELINE_COLOR_THEMES.get(theme_name, TIMELINE_COLOR_THEMES[DEFAULT_THEME])
|