w2t-bkin 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,397 @@
1
+ """Facemap ROI computation and facial motion signal extraction (Phase 3 - Optional).
2
+
3
+ Defines Regions of Interest (ROIs) on facial videos, computes motion energy
4
+ or SVD-based signals within each ROI, and aligns the resulting time series
5
+ to the reference timebase for integration into NWB files.
6
+
7
+ The module supports multiple motion metrics (absolute difference, SVD components),
8
+ handles multi-camera setups, and produces signals compatible with NWB TimeSeries
9
+ for behavioral neuroscience analysis.
10
+
11
+ Key Features:
12
+ -------------
13
+ - **ROI Definition**: Rectangular or polygonal regions on facial videos
14
+ - **Motion Metrics**: Absolute difference, SVD components, optical flow (planned)
15
+ - **Multi-Camera Support**: Process multiple facial views independently
16
+ - **Temporal Alignment**: Sync signals to reference timebase
17
+ - **NWB Integration**: Produces FacemapBundle for NWB TimeSeries
18
+
19
+ Main Functions:
20
+ ---------------
21
+ - define_rois: Create ROI specifications from config
22
+ - compute_motion_energy: Calculate per-ROI motion signals
23
+ - compute_svd_components: Extract principal motion components (planned)
24
+ - align_signals_to_timebase: Sync signals to reference timestamps
25
+ - create_facemap_bundle: Package signals for NWB
26
+
27
+ Requirements:
28
+ -------------
29
+ - FR-6: Compute facial motion signals
30
+ - FR-FACE-1: Define ROIs from configuration
31
+ - FR-FACE-2: Compute motion energy per ROI
32
+ - FR-FACE-3: Align signals to reference timebase
33
+
34
+ Acceptance Criteria:
35
+ -------------------
36
+ - A-FACE-1: Define ROIs from config specs
37
+ - A-FACE-2: Compute motion energy for each ROI
38
+ - A-FACE-3: Align signals to reference timebase
39
+ - A-FACE-4: Create FacemapBundle for NWB
40
+
41
+ Data Flow:
42
+ ----------
43
+ 1. define_rois → ROI specifications
44
+ 2. compute_motion_energy / compute_svd_components → Raw signals
45
+ 3. align_signals_to_timebase → Sync to reference
46
+ 4. create_facemap_bundle → Package for NWB
47
+
48
+ Example:
49
+ --------
50
+ >>> from w2t_bkin.facemap import define_rois, compute_motion_energy
51
+ >>> from w2t_bkin.sync import create_timebase_provider
52
+ >>>
53
+ >>> # Define ROIs
54
+ >>> roi_specs = [
55
+ ... {"name": "left_whisker", "x": 100, "y": 200, "w": 50, "h": 50},
56
+ ... {"name": "right_whisker", "x": 300, "y": 200, "w": 50, "h": 50}
57
+ ... ]
58
+ >>> rois = define_rois(roi_specs)
59
+ >>>
60
+ >>> # Compute motion energy
61
+ >>> signals = compute_motion_energy("facial_video.avi", rois)
62
+ >>> print(f"Computed {len(signals)} ROI signals")
63
+ >>>
64
+ >>> # Align to reference timebase
65
+ >>> from w2t_bkin.facemap import align_signals_to_timebase
66
+ >>> aligned = align_signals_to_timebase(
67
+ ... signals,
68
+ ... reference_times=timebase_provider.get_timestamps(len(signals[0].values))
69
+ ... )
70
+ """
71
+
72
+ import logging
73
+ from pathlib import Path
74
+ from typing import Dict, List, Optional
75
+
76
+ import cv2
77
+ import numpy as np
78
+
79
+ from .models import FacemapBundle, FacemapROI, FacemapSignal
80
+
81
+ logger = logging.getLogger(__name__)
82
+
83
+
84
+ class FacemapError(Exception):
85
+ """Base exception for facemap-related errors."""
86
+
87
+ pass
88
+
89
+
90
+ def define_rois(roi_specs: List[Dict]) -> List[FacemapROI]:
91
+ """Create FacemapROI objects from specifications.
92
+
93
+ Args:
94
+ roi_specs: List of ROI specification dicts
95
+
96
+ Returns:
97
+ List of FacemapROI objects
98
+
99
+ Raises:
100
+ FacemapError: If ROI coordinates are invalid
101
+ """
102
+ rois = []
103
+
104
+ for spec in roi_specs:
105
+ # Validate coordinates are non-negative
106
+ if spec["x"] < 0 or spec["y"] < 0:
107
+ raise FacemapError(f"ROI coordinates must be non-negative: {spec}")
108
+
109
+ if spec["width"] <= 0 or spec["height"] <= 0:
110
+ raise FacemapError(f"ROI dimensions must be positive: {spec}")
111
+
112
+ roi = FacemapROI(name=spec["name"], x=spec["x"], y=spec["y"], width=spec["width"], height=spec["height"])
113
+ rois.append(roi)
114
+
115
+ # Check for overlaps
116
+ for i in range(len(rois)):
117
+ for j in range(i + 1, len(rois)):
118
+ if _rois_overlap(rois[i], rois[j]):
119
+ logger.warning(f"ROIs overlap: {rois[i].name} and {rois[j].name}")
120
+
121
+ return rois
122
+
123
+
124
+ def _rois_overlap(roi1: FacemapROI, roi2: FacemapROI) -> bool:
125
+ """Check if two ROIs overlap significantly."""
126
+ # Simple bounding box overlap check
127
+ x1_min, x1_max = roi1.x, roi1.x + roi1.width
128
+ y1_min, y1_max = roi1.y, roi1.y + roi1.height
129
+ x2_min, x2_max = roi2.x, roi2.x + roi2.width
130
+ y2_min, y2_max = roi2.y, roi2.y + roi2.height
131
+
132
+ # Check if rectangles overlap
133
+ overlap_x = max(0, min(x1_max, x2_max) - max(x1_min, x2_min))
134
+ overlap_y = max(0, min(y1_max, y2_max) - max(y1_min, y2_min))
135
+
136
+ if overlap_x > 0 and overlap_y > 0:
137
+ overlap_area = overlap_x * overlap_y
138
+ area1 = roi1.width * roi1.height
139
+ area2 = roi2.width * roi2.height
140
+
141
+ # Consider significant if overlap > 20% of smaller ROI
142
+ min_area = min(area1, area2)
143
+ if overlap_area / min_area > 0.2:
144
+ return True
145
+
146
+ return False
147
+
148
+
149
+ def import_facemap_output(npy_path: Path) -> Dict:
150
+ """Import precomputed Facemap .npy output.
151
+
152
+ Args:
153
+ npy_path: Path to Facemap .npy file
154
+
155
+ Returns:
156
+ Dict containing Facemap data
157
+
158
+ Raises:
159
+ FacemapError: If file doesn't exist or format is invalid
160
+ """
161
+ if not npy_path.exists():
162
+ raise FacemapError(f"Facemap file not found: {npy_path}")
163
+
164
+ try:
165
+ data = np.load(npy_path, allow_pickle=True).item()
166
+ return data
167
+ except Exception as e:
168
+ raise FacemapError(f"Failed to load Facemap file: {e}")
169
+
170
+
171
+ def compute_facemap_signals(video_path: Path, rois: List[FacemapROI]) -> List[FacemapSignal]:
172
+ """Compute motion energy signals for each ROI.
173
+
174
+ Args:
175
+ video_path: Path to video file
176
+ rois: List of ROIs to compute signals for
177
+
178
+ Returns:
179
+ List of FacemapSignal objects
180
+
181
+ Raises:
182
+ FacemapError: If video cannot be read
183
+ """
184
+ if not video_path.exists():
185
+ raise FacemapError(f"Video file not found: {video_path}")
186
+
187
+ try:
188
+ cap = cv2.VideoCapture(str(video_path))
189
+
190
+ if not cap.isOpened():
191
+ raise FacemapError(f"Cannot open video: {video_path}")
192
+
193
+ # Get video properties
194
+ fps = cap.get(cv2.CAP_PROP_FPS)
195
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
196
+
197
+ # Initialize signal storage
198
+ roi_signals = {roi.name: [] for roi in rois}
199
+
200
+ # Read frames and compute motion energy
201
+ prev_frame = None
202
+ frame_idx = 0
203
+
204
+ while True:
205
+ ret, frame = cap.read()
206
+ if not ret:
207
+ break
208
+
209
+ # Convert to grayscale
210
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
211
+
212
+ if prev_frame is not None:
213
+ # Compute motion for each ROI
214
+ for roi in rois:
215
+ # Extract ROI regions
216
+ roi_prev = prev_frame[roi.y : roi.y + roi.height, roi.x : roi.x + roi.width]
217
+ roi_curr = gray[roi.y : roi.y + roi.height, roi.x : roi.x + roi.width]
218
+
219
+ # Compute absolute difference (motion energy)
220
+ diff = cv2.absdiff(roi_curr, roi_prev)
221
+ motion_energy = float(np.mean(diff))
222
+
223
+ roi_signals[roi.name].append(motion_energy)
224
+ else:
225
+ # First frame - no motion
226
+ for roi in rois:
227
+ roi_signals[roi.name].append(0.0)
228
+
229
+ prev_frame = gray
230
+ frame_idx += 1
231
+
232
+ cap.release()
233
+
234
+ # Create FacemapSignal objects
235
+ signals = []
236
+ for roi in rois:
237
+ # Generate timestamps based on frame rate
238
+ timestamps = [i / fps for i in range(len(roi_signals[roi.name]))]
239
+
240
+ signal = FacemapSignal(roi_name=roi.name, timestamps=timestamps, values=roi_signals[roi.name], sampling_rate=fps)
241
+ signals.append(signal)
242
+
243
+ return signals
244
+
245
+ except Exception as e:
246
+ raise FacemapError(f"Failed to compute Facemap signals: {e}")
247
+
248
+
249
+ def align_facemap_to_timebase(signals: List[Dict], reference_times: List[float], mapping: str = "nearest") -> List[Dict]:
250
+ """Align facemap signal timestamps to reference timebase.
251
+
252
+ Args:
253
+ signals: List of signal dicts with frame_indices and values
254
+ reference_times: Reference timestamps from sync
255
+ mapping: Alignment strategy ("nearest" or "linear")
256
+
257
+ Returns:
258
+ List of aligned signal dicts with timestamps
259
+
260
+ Raises:
261
+ FacemapError: If alignment fails
262
+ """
263
+ aligned_signals = []
264
+
265
+ for signal in signals:
266
+ frame_indices = signal["frame_indices"]
267
+ values = signal["values"]
268
+
269
+ # Validate lengths match
270
+ if len(frame_indices) != len(values):
271
+ raise FacemapError(f"Frame indices length ({len(frame_indices)}) != values length ({len(values)})")
272
+
273
+ # Map frame indices to timestamps
274
+ timestamps = []
275
+ for frame_idx in frame_indices:
276
+ if mapping == "nearest":
277
+ if frame_idx < len(reference_times):
278
+ timestamp = reference_times[frame_idx]
279
+ else:
280
+ timestamp = reference_times[-1]
281
+ elif mapping == "linear":
282
+ if frame_idx < len(reference_times):
283
+ timestamp = reference_times[frame_idx]
284
+ else:
285
+ # Linear extrapolation
286
+ if len(reference_times) >= 2:
287
+ dt = reference_times[-1] - reference_times[-2]
288
+ timestamp = reference_times[-1] + dt * (frame_idx - len(reference_times) + 1)
289
+ else:
290
+ timestamp = reference_times[-1]
291
+ else:
292
+ raise FacemapError(f"Unknown mapping strategy: {mapping}")
293
+
294
+ timestamps.append(timestamp)
295
+
296
+ aligned_signals.append({"roi_name": signal["roi_name"], "timestamps": timestamps, "values": values})
297
+
298
+ return aligned_signals
299
+
300
+
301
+ def validate_facemap_sampling_rate(signal: FacemapSignal, expected_rate: float, tolerance: float = 0.1) -> bool:
302
+ """Validate that signal sampling rate matches expected rate.
303
+
304
+ Args:
305
+ signal: FacemapSignal to validate
306
+ expected_rate: Expected sampling rate in Hz
307
+ tolerance: Tolerance for rate mismatch (fraction)
308
+
309
+ Returns:
310
+ True if sampling rate is within tolerance, False otherwise
311
+ """
312
+ if len(signal.timestamps) < 2:
313
+ return True
314
+
315
+ # Compute actual rate from timestamps
316
+ total_time = signal.timestamps[-1] - signal.timestamps[0]
317
+ num_samples = len(signal.timestamps)
318
+ actual_rate = (num_samples - 1) / total_time if total_time > 0 else 0
319
+
320
+ # Check if within tolerance
321
+ rate_diff = abs(actual_rate - expected_rate)
322
+ relative_diff = rate_diff / expected_rate if expected_rate > 0 else 0
323
+
324
+ if relative_diff > tolerance:
325
+ logger.warning(f"Sampling rate mismatch: actual={actual_rate:.2f} Hz, " f"expected={expected_rate:.2f} Hz (diff={relative_diff:.1%})")
326
+ return False
327
+
328
+ return True
329
+
330
+
331
+ if __name__ == "__main__":
332
+ """Usage examples for facemap module."""
333
+ from pathlib import Path
334
+
335
+ import numpy as np
336
+
337
+ print("=" * 70)
338
+ print("W2T-BKIN Facemap Module - Usage Examples")
339
+ print("=" * 70)
340
+ print()
341
+
342
+ print("Example 1: Define ROI (Region of Interest)")
343
+ print("-" * 50)
344
+
345
+ # Define a rectangular ROI for whisker region
346
+ whisker_roi = {"name": "whiskers_right", "type": "rectangle", "x": 100, "y": 150, "width": 80, "height": 60, "description": "Right whisker region"}
347
+
348
+ print(f"ROI: {whisker_roi['name']}")
349
+ print(f"Type: {whisker_roi['type']}")
350
+ print(f"Bounds: ({whisker_roi['x']}, {whisker_roi['y']}) " f"{whisker_roi['width']}x{whisker_roi['height']}")
351
+ print()
352
+
353
+ # Example 2: Simulate motion energy signal
354
+ print("Example 2: Simulate Facemap Signal")
355
+ print("-" * 50)
356
+
357
+ # Create synthetic motion energy signal (30 fps, 10 seconds)
358
+ timestamps = np.linspace(0, 10, 300)
359
+ motion_energy = np.abs(np.sin(timestamps * 2) + np.random.randn(300) * 0.1)
360
+
361
+ signal = {
362
+ "roi_name": "whiskers_right",
363
+ "signal_type": "motion_energy",
364
+ "timestamps": timestamps.tolist(),
365
+ "values": motion_energy.tolist(),
366
+ "sampling_rate": 30.0,
367
+ }
368
+
369
+ print(f"Signal type: {signal['signal_type']}")
370
+ print(f"ROI: {signal['roi_name']}")
371
+ print(f"Duration: {timestamps[-1]:.1f} seconds")
372
+ print(f"Samples: {len(signal['values'])}")
373
+ print(f"Mean motion energy: {np.mean(motion_energy):.3f}")
374
+ print()
375
+
376
+ # Example 3: Validate sampling rate
377
+ print("Example 3: Validate Sampling Rate")
378
+ print("-" * 50)
379
+
380
+ actual_rate = 30.0
381
+ expected_rate = 30.0
382
+ is_valid = validate_facemap_sampling_rate(actual_rate, expected_rate, tolerance=0.01)
383
+
384
+ print(f"Actual rate: {actual_rate} Hz")
385
+ print(f"Expected rate: {expected_rate} Hz")
386
+ print(f"Valid: {is_valid}")
387
+ print()
388
+
389
+ print("Production usage:")
390
+ print(" from w2t_bkin.facemap import define_rois, compute_facemap_signals")
391
+ print(" rois = define_rois(video_path, roi_definitions)")
392
+ print(" signals = compute_facemap_signals(video_path, rois)")
393
+ print()
394
+
395
+ print("=" * 70)
396
+ print("Examples completed. See module docstring for API details.")
397
+ print("=" * 70)
@@ -0,0 +1,134 @@
1
+ """Facemap module-local models for facial motion energy analysis.
2
+
3
+ This module defines models owned by the facemap module for representing
4
+ Facemap ROI definitions and motion energy signals aligned to the session
5
+ reference timebase.
6
+
7
+ Model ownership follows the target architecture where each module owns
8
+ its own models rather than sharing through a central domain package.
9
+ """
10
+
11
+ from typing import List, Literal
12
+
13
+ from pydantic import BaseModel, Field, model_validator
14
+
15
+ __all__ = ["FacemapROI", "FacemapSignal", "FacemapBundle"]
16
+
17
+
18
+ class FacemapROI(BaseModel):
19
+ """Region of interest for Facemap analysis.
20
+
21
+ Defines a rectangular ROI on the face camera for motion energy
22
+ extraction.
23
+
24
+ Attributes:
25
+ name: ROI identifier (e.g., "eye", "whisker", "nose")
26
+ x: Top-left X coordinate (pixels)
27
+ y: Top-left Y coordinate (pixels)
28
+ width: ROI width (pixels)
29
+ height: ROI height (pixels)
30
+
31
+ Requirements:
32
+ - FR-6: Import/compute Facemap metrics
33
+ """
34
+
35
+ model_config = {"frozen": True, "extra": "forbid"}
36
+
37
+ name: str = Field(..., description="ROI identifier (e.g., 'eye', 'whisker', 'nose')")
38
+ x: int = Field(..., description="Top-left X coordinate in pixels", ge=0)
39
+ y: int = Field(..., description="Top-left Y coordinate in pixels", ge=0)
40
+ width: int = Field(..., description="ROI width in pixels", gt=0)
41
+ height: int = Field(..., description="ROI height in pixels", gt=0)
42
+
43
+
44
+ class FacemapSignal(BaseModel):
45
+ """Time series signal from Facemap ROI.
46
+
47
+ Motion energy or other signal extracted from an ROI, aligned
48
+ to the session reference timebase.
49
+
50
+ Attributes:
51
+ roi_name: Name of source ROI (must match an ROI in FacemapBundle)
52
+ timestamps: Aligned timestamps (seconds, reference timebase)
53
+ values: Signal values (motion energy, 0.0-1.0 normalized)
54
+ sampling_rate: Signal sampling rate (Hz)
55
+
56
+ Requirements:
57
+ - FR-6: Import/compute Facemap metrics
58
+ - FR-TB-1..6: Align to reference timebase
59
+
60
+ Note:
61
+ Timestamps are aligned to the session reference timebase
62
+ using the mapping strategy (nearest|linear) configured in
63
+ timebase.mapping.
64
+ """
65
+
66
+ model_config = {"frozen": True, "extra": "forbid"}
67
+
68
+ roi_name: str = Field(..., description="Name of source ROI (must match an ROI in FacemapBundle)")
69
+ timestamps: List[float] = Field(..., description="Aligned timestamps in seconds (reference timebase)")
70
+ values: List[float] = Field(..., description="Motion energy signal values (normalized 0.0-1.0)")
71
+ sampling_rate: float = Field(..., description="Signal sampling rate in Hz", gt=0)
72
+
73
+
74
+ class FacemapBundle(BaseModel):
75
+ """Facemap data bundle aligned to reference timebase.
76
+
77
+ Complete Facemap dataset for one camera with ROI definitions
78
+ and aligned motion energy signals.
79
+
80
+ Attributes:
81
+ session_id: Session identifier
82
+ camera_id: Camera identifier
83
+ rois: List of ROI definitions
84
+ signals: List of motion energy signals (one per ROI)
85
+ alignment_method: Timebase alignment method ("nearest"|"linear")
86
+ generated_at: ISO 8601 timestamp
87
+
88
+ Requirements:
89
+ - FR-6: Import/compute Facemap metrics
90
+ - FR-TB-1..6: Align to reference timebase
91
+ - A1: Include in NWB
92
+ - A3: Include in QC report
93
+
94
+ Validation:
95
+ All signals must reference ROIs defined in the rois list.
96
+
97
+ Example:
98
+ >>> from w2t_bkin.facemap.models import FacemapBundle, FacemapROI, FacemapSignal
99
+ >>> bundle = FacemapBundle(
100
+ ... session_id="Session-001",
101
+ ... camera_id="cam0",
102
+ ... rois=[FacemapROI(name="eye", ...)],
103
+ ... signals=[FacemapSignal(roi_name="eye", ...)],
104
+ ... alignment_method="nearest",
105
+ ... generated_at="2025-11-13T10:30:00Z"
106
+ ... )
107
+ """
108
+
109
+ model_config = {"frozen": True, "extra": "forbid"}
110
+
111
+ session_id: str = Field(..., description="Session identifier")
112
+ camera_id: str = Field(..., description="Camera identifier")
113
+ rois: List[FacemapROI] = Field(..., description="List of ROI definitions")
114
+ signals: List[FacemapSignal] = Field(..., description="List of motion energy signals (one per ROI)")
115
+ alignment_method: Literal["nearest", "linear"] = Field(..., description="Timebase alignment method: 'nearest' | 'linear'")
116
+ generated_at: str = Field(..., description="ISO 8601 timestamp of facemap bundle generation")
117
+
118
+ @model_validator(mode="after")
119
+ def validate_signals_match_rois(self) -> "FacemapBundle":
120
+ """Validate that all signals reference defined ROIs.
121
+
122
+ Ensures referential integrity between signals and ROIs.
123
+
124
+ Raises:
125
+ ValueError: If a signal references an undefined ROI.
126
+
127
+ Requirements:
128
+ - Data integrity check for cross-model references
129
+ """
130
+ roi_names = {roi.name for roi in self.rois}
131
+ for signal in self.signals:
132
+ if signal.roi_name not in roi_names:
133
+ raise ValueError(f"Signal references undefined ROI: {signal.roi_name}. " f"Defined ROIs: {roi_names}")
134
+ return self