kinemotion 0.10.6__py3-none-any.whl → 0.67.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/__init__.py +31 -6
- kinemotion/api.py +39 -598
- kinemotion/cli.py +2 -0
- kinemotion/cmj/__init__.py +5 -0
- kinemotion/cmj/analysis.py +621 -0
- kinemotion/cmj/api.py +563 -0
- kinemotion/cmj/cli.py +324 -0
- kinemotion/cmj/debug_overlay.py +457 -0
- kinemotion/cmj/joint_angles.py +307 -0
- kinemotion/cmj/kinematics.py +360 -0
- kinemotion/cmj/metrics_validator.py +767 -0
- kinemotion/cmj/validation_bounds.py +341 -0
- kinemotion/core/__init__.py +28 -0
- kinemotion/core/auto_tuning.py +71 -37
- kinemotion/core/cli_utils.py +60 -0
- kinemotion/core/debug_overlay_utils.py +385 -0
- kinemotion/core/determinism.py +83 -0
- kinemotion/core/experimental.py +103 -0
- kinemotion/core/filtering.py +9 -6
- kinemotion/core/formatting.py +75 -0
- kinemotion/core/metadata.py +231 -0
- kinemotion/core/model_downloader.py +172 -0
- kinemotion/core/pipeline_utils.py +433 -0
- kinemotion/core/pose.py +298 -141
- kinemotion/core/pose_landmarks.py +67 -0
- kinemotion/core/quality.py +393 -0
- kinemotion/core/smoothing.py +250 -154
- kinemotion/core/timing.py +247 -0
- kinemotion/core/types.py +42 -0
- kinemotion/core/validation.py +201 -0
- kinemotion/core/video_io.py +135 -50
- kinemotion/dropjump/__init__.py +1 -1
- kinemotion/dropjump/analysis.py +367 -182
- kinemotion/dropjump/api.py +665 -0
- kinemotion/dropjump/cli.py +156 -466
- kinemotion/dropjump/debug_overlay.py +136 -206
- kinemotion/dropjump/kinematics.py +232 -255
- kinemotion/dropjump/metrics_validator.py +240 -0
- kinemotion/dropjump/validation_bounds.py +157 -0
- kinemotion/models/__init__.py +0 -0
- kinemotion/models/pose_landmarker_lite.task +0 -0
- kinemotion-0.67.0.dist-info/METADATA +726 -0
- kinemotion-0.67.0.dist-info/RECORD +47 -0
- {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/WHEEL +1 -1
- kinemotion-0.10.6.dist-info/METADATA +0 -561
- kinemotion-0.10.6.dist-info/RECORD +0 -20
- {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,433 @@
|
|
|
1
|
+
"Shared pipeline utilities for kinematic analysis."
|
|
2
|
+
|
|
3
|
+
import multiprocessing as mp
|
|
4
|
+
from collections.abc import Callable
|
|
5
|
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
6
|
+
from typing import TypeVar
|
|
7
|
+
|
|
8
|
+
import cv2
|
|
9
|
+
import numpy as np
|
|
10
|
+
|
|
11
|
+
from ..cmj.analysis import compute_average_hip_position
|
|
12
|
+
from ..dropjump.analysis import compute_average_foot_position
|
|
13
|
+
from .auto_tuning import AnalysisParameters, QualityPreset, VideoCharacteristics
|
|
14
|
+
from .pose import PoseTracker
|
|
15
|
+
from .smoothing import smooth_landmarks, smooth_landmarks_advanced
|
|
16
|
+
from .timing import NULL_TIMER, Timer
|
|
17
|
+
from .video_io import VideoProcessor
|
|
18
|
+
|
|
19
|
+
TResult = TypeVar("TResult")
|
|
20
|
+
TConfig = TypeVar("TConfig")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def parse_quality_preset(quality: str) -> QualityPreset:
|
|
24
|
+
"""Parse and validate quality preset string.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
quality: Quality preset string ('fast', 'balanced', or 'accurate')
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
QualityPreset enum value
|
|
31
|
+
|
|
32
|
+
Raises:
|
|
33
|
+
ValueError: If quality preset is invalid
|
|
34
|
+
"""
|
|
35
|
+
try:
|
|
36
|
+
return QualityPreset(quality.lower())
|
|
37
|
+
except ValueError as e:
|
|
38
|
+
raise ValueError(
|
|
39
|
+
f"Invalid quality preset: {quality}. Must be 'fast', 'balanced', or 'accurate'"
|
|
40
|
+
) from e
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def determine_confidence_levels(
|
|
44
|
+
quality_preset: QualityPreset,
|
|
45
|
+
detection_confidence: float | None,
|
|
46
|
+
tracking_confidence: float | None,
|
|
47
|
+
) -> tuple[float, float]:
|
|
48
|
+
"""Determine detection and tracking confidence levels.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
quality_preset: Quality preset enum
|
|
52
|
+
detection_confidence: Optional expert override for detection confidence
|
|
53
|
+
tracking_confidence: Optional expert override for tracking confidence
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Tuple of (detection_confidence, tracking_confidence)
|
|
57
|
+
"""
|
|
58
|
+
initial_detection_conf = 0.5
|
|
59
|
+
initial_tracking_conf = 0.5
|
|
60
|
+
|
|
61
|
+
if quality_preset == QualityPreset.FAST:
|
|
62
|
+
initial_detection_conf = 0.3
|
|
63
|
+
initial_tracking_conf = 0.3
|
|
64
|
+
elif quality_preset == QualityPreset.ACCURATE:
|
|
65
|
+
initial_detection_conf = 0.6
|
|
66
|
+
initial_tracking_conf = 0.6
|
|
67
|
+
|
|
68
|
+
if detection_confidence is not None:
|
|
69
|
+
initial_detection_conf = detection_confidence
|
|
70
|
+
if tracking_confidence is not None:
|
|
71
|
+
initial_tracking_conf = tracking_confidence
|
|
72
|
+
|
|
73
|
+
return initial_detection_conf, initial_tracking_conf
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def apply_expert_overrides(
|
|
77
|
+
params: AnalysisParameters,
|
|
78
|
+
smoothing_window: int | None,
|
|
79
|
+
velocity_threshold: float | None,
|
|
80
|
+
min_contact_frames: int | None,
|
|
81
|
+
visibility_threshold: float | None,
|
|
82
|
+
) -> AnalysisParameters:
|
|
83
|
+
"""Apply expert parameter overrides to auto-tuned parameters.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
params: Auto-tuned parameters object
|
|
87
|
+
smoothing_window: Optional override for smoothing window
|
|
88
|
+
velocity_threshold: Optional override for velocity threshold
|
|
89
|
+
min_contact_frames: Optional override for minimum contact frames
|
|
90
|
+
visibility_threshold: Optional override for visibility threshold
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
Modified params object (mutated in place)
|
|
94
|
+
"""
|
|
95
|
+
if smoothing_window is not None:
|
|
96
|
+
params.smoothing_window = smoothing_window
|
|
97
|
+
if velocity_threshold is not None:
|
|
98
|
+
params.velocity_threshold = velocity_threshold
|
|
99
|
+
if min_contact_frames is not None:
|
|
100
|
+
params.min_contact_frames = min_contact_frames
|
|
101
|
+
if visibility_threshold is not None:
|
|
102
|
+
params.visibility_threshold = visibility_threshold
|
|
103
|
+
return params
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def print_verbose_parameters(
|
|
107
|
+
video: VideoProcessor,
|
|
108
|
+
characteristics: VideoCharacteristics,
|
|
109
|
+
quality_preset: QualityPreset,
|
|
110
|
+
params: AnalysisParameters,
|
|
111
|
+
) -> None:
|
|
112
|
+
"""Print auto-tuned parameters in verbose mode.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
video: Video processor with fps and dimensions
|
|
116
|
+
characteristics: Video analysis characteristics
|
|
117
|
+
quality_preset: Selected quality preset
|
|
118
|
+
params: Auto-tuned parameters
|
|
119
|
+
"""
|
|
120
|
+
print("\n" + "=" * 60)
|
|
121
|
+
print("AUTO-TUNED PARAMETERS")
|
|
122
|
+
print("=" * 60)
|
|
123
|
+
print(f"Video FPS: {video.fps:.2f}")
|
|
124
|
+
print(
|
|
125
|
+
f"Tracking quality: {characteristics.tracking_quality} "
|
|
126
|
+
f"(avg visibility: {characteristics.avg_visibility:.2f})"
|
|
127
|
+
)
|
|
128
|
+
print(f"Quality preset: {quality_preset.value}")
|
|
129
|
+
print("\nSelected parameters:")
|
|
130
|
+
print(f" smoothing_window: {params.smoothing_window}")
|
|
131
|
+
print(f" polyorder: {params.polyorder}")
|
|
132
|
+
print(f" velocity_threshold: {params.velocity_threshold:.4f}")
|
|
133
|
+
print(f" min_contact_frames: {params.min_contact_frames}")
|
|
134
|
+
print(f" visibility_threshold: {params.visibility_threshold}")
|
|
135
|
+
print(f" detection_confidence: {params.detection_confidence}")
|
|
136
|
+
print(f" tracking_confidence: {params.tracking_confidence}")
|
|
137
|
+
print(f" outlier_rejection: {params.outlier_rejection}")
|
|
138
|
+
print(f" bilateral_filter: {params.bilateral_filter}")
|
|
139
|
+
print(f" use_curvature: {params.use_curvature}")
|
|
140
|
+
print("=" * 60 + "\n")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _process_frames_loop(
|
|
144
|
+
video: VideoProcessor,
|
|
145
|
+
tracker: PoseTracker,
|
|
146
|
+
step: int,
|
|
147
|
+
should_resize: bool,
|
|
148
|
+
debug_w: int,
|
|
149
|
+
debug_h: int,
|
|
150
|
+
) -> tuple[list, list, list]:
|
|
151
|
+
"""Internal loop for processing frames to reduce complexity."""
|
|
152
|
+
landmarks_sequence = []
|
|
153
|
+
debug_frames = []
|
|
154
|
+
frame_indices = []
|
|
155
|
+
frame_idx = 0
|
|
156
|
+
|
|
157
|
+
while True:
|
|
158
|
+
frame = video.read_frame()
|
|
159
|
+
if frame is None:
|
|
160
|
+
break
|
|
161
|
+
|
|
162
|
+
landmarks = tracker.process_frame(frame, video.current_timestamp_ms)
|
|
163
|
+
landmarks_sequence.append(landmarks)
|
|
164
|
+
|
|
165
|
+
if frame_idx % step == 0:
|
|
166
|
+
if should_resize:
|
|
167
|
+
processed_frame = cv2.resize(
|
|
168
|
+
frame, (debug_w, debug_h), interpolation=cv2.INTER_LINEAR
|
|
169
|
+
)
|
|
170
|
+
else:
|
|
171
|
+
processed_frame = frame
|
|
172
|
+
|
|
173
|
+
debug_frames.append(processed_frame)
|
|
174
|
+
frame_indices.append(frame_idx)
|
|
175
|
+
|
|
176
|
+
frame_idx += 1
|
|
177
|
+
|
|
178
|
+
return debug_frames, landmarks_sequence, frame_indices
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def process_all_frames(
|
|
182
|
+
video: VideoProcessor,
|
|
183
|
+
tracker: PoseTracker,
|
|
184
|
+
verbose: bool,
|
|
185
|
+
timer: Timer | None = None,
|
|
186
|
+
close_tracker: bool = True,
|
|
187
|
+
target_debug_fps: float = 30.0,
|
|
188
|
+
max_debug_dim: int = 720,
|
|
189
|
+
) -> tuple[list, list, list]:
|
|
190
|
+
"""Process all frames from video and extract pose landmarks.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
video: Video processor to read frames from
|
|
194
|
+
tracker: Pose tracker for landmark detection
|
|
195
|
+
verbose: Print progress messages
|
|
196
|
+
timer: Optional Timer for measuring operations
|
|
197
|
+
close_tracker: Whether to close the tracker after processing (default: True)
|
|
198
|
+
target_debug_fps: Target FPS for debug video (default: 30.0)
|
|
199
|
+
max_debug_dim: Max dimension for debug video frames (default: 720)
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
Tuple of (debug_frames, landmarks_sequence, frame_indices)
|
|
203
|
+
|
|
204
|
+
Raises:
|
|
205
|
+
ValueError: If no frames could be processed
|
|
206
|
+
"""
|
|
207
|
+
if verbose:
|
|
208
|
+
print("Tracking pose landmarks...")
|
|
209
|
+
|
|
210
|
+
timer = timer or NULL_TIMER
|
|
211
|
+
step = max(1, int(video.fps / target_debug_fps))
|
|
212
|
+
|
|
213
|
+
w, h = video.display_width, video.display_height
|
|
214
|
+
scale = 1.0
|
|
215
|
+
if max(w, h) > max_debug_dim:
|
|
216
|
+
scale = max_debug_dim / max(w, h)
|
|
217
|
+
|
|
218
|
+
debug_w = int(w * scale) // 2 * 2
|
|
219
|
+
debug_h = int(h * scale) // 2 * 2
|
|
220
|
+
should_resize = (debug_w != video.width) or (debug_h != video.height)
|
|
221
|
+
|
|
222
|
+
with timer.measure("pose_tracking"):
|
|
223
|
+
debug_frames, landmarks_sequence, frame_indices = _process_frames_loop(
|
|
224
|
+
video, tracker, step, should_resize, debug_w, debug_h
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
if close_tracker:
|
|
228
|
+
tracker.close()
|
|
229
|
+
|
|
230
|
+
if not landmarks_sequence:
|
|
231
|
+
raise ValueError("No frames could be processed from video")
|
|
232
|
+
|
|
233
|
+
return debug_frames, landmarks_sequence, frame_indices
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def apply_smoothing(
|
|
237
|
+
landmarks_sequence: list,
|
|
238
|
+
params: AnalysisParameters,
|
|
239
|
+
verbose: bool,
|
|
240
|
+
timer: Timer | None = None,
|
|
241
|
+
) -> list:
|
|
242
|
+
"""Apply smoothing to landmark sequence with auto-tuned parameters.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
landmarks_sequence: Sequence of landmarks from all frames
|
|
246
|
+
params: Auto-tuned parameters containing smoothing settings
|
|
247
|
+
verbose: Print progress messages
|
|
248
|
+
timer: Optional Timer for measuring operations
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Smoothed landmarks sequence
|
|
252
|
+
"""
|
|
253
|
+
timer = timer or NULL_TIMER
|
|
254
|
+
use_advanced = params.outlier_rejection or params.bilateral_filter
|
|
255
|
+
|
|
256
|
+
if verbose:
|
|
257
|
+
if use_advanced:
|
|
258
|
+
if params.outlier_rejection:
|
|
259
|
+
print("Smoothing landmarks with outlier rejection...")
|
|
260
|
+
if params.bilateral_filter:
|
|
261
|
+
print("Using bilateral temporal filter...")
|
|
262
|
+
else:
|
|
263
|
+
print("Smoothing landmarks...")
|
|
264
|
+
|
|
265
|
+
def _run_smoothing() -> list:
|
|
266
|
+
if use_advanced:
|
|
267
|
+
return smooth_landmarks_advanced(
|
|
268
|
+
landmarks_sequence,
|
|
269
|
+
window_length=params.smoothing_window,
|
|
270
|
+
polyorder=params.polyorder,
|
|
271
|
+
use_outlier_rejection=params.outlier_rejection,
|
|
272
|
+
use_bilateral=params.bilateral_filter,
|
|
273
|
+
timer=timer,
|
|
274
|
+
)
|
|
275
|
+
else:
|
|
276
|
+
return smooth_landmarks(
|
|
277
|
+
landmarks_sequence,
|
|
278
|
+
window_length=params.smoothing_window,
|
|
279
|
+
polyorder=params.polyorder,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
with timer.measure("smoothing"):
|
|
283
|
+
return _run_smoothing()
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def calculate_foot_visibility(frame_landmarks: dict) -> float:
|
|
287
|
+
"""Calculate average visibility of foot landmarks.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
frame_landmarks: Dictionary of landmarks for a frame
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
Average visibility value (0-1)
|
|
294
|
+
"""
|
|
295
|
+
foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
|
|
296
|
+
foot_vis = [frame_landmarks[key][2] for key in foot_keys if key in frame_landmarks]
|
|
297
|
+
return float(np.mean(foot_vis)) if foot_vis else 0.0
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def extract_vertical_positions(
|
|
301
|
+
smoothed_landmarks: list,
|
|
302
|
+
target: str = "foot",
|
|
303
|
+
) -> tuple[np.ndarray, np.ndarray]:
|
|
304
|
+
"""Extract vertical positions and visibilities from smoothed landmarks.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
smoothed_landmarks: Smoothed landmark sequence
|
|
308
|
+
target: Tracking target "foot" or "hip" (default: "foot")
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
Tuple of (vertical_positions, visibilities) as numpy arrays
|
|
312
|
+
"""
|
|
313
|
+
position_list: list[float] = []
|
|
314
|
+
visibilities_list: list[float] = []
|
|
315
|
+
|
|
316
|
+
for frame_landmarks in smoothed_landmarks:
|
|
317
|
+
if frame_landmarks:
|
|
318
|
+
if target == "hip":
|
|
319
|
+
_, y = compute_average_hip_position(frame_landmarks)
|
|
320
|
+
vis = calculate_foot_visibility(frame_landmarks)
|
|
321
|
+
else:
|
|
322
|
+
_, y = compute_average_foot_position(frame_landmarks)
|
|
323
|
+
vis = calculate_foot_visibility(frame_landmarks)
|
|
324
|
+
|
|
325
|
+
position_list.append(y)
|
|
326
|
+
visibilities_list.append(vis)
|
|
327
|
+
else:
|
|
328
|
+
position_list.append(position_list[-1] if position_list else 0.5)
|
|
329
|
+
visibilities_list.append(0.0)
|
|
330
|
+
|
|
331
|
+
return np.array(position_list), np.array(visibilities_list)
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def convert_timer_to_stage_names(
|
|
335
|
+
timer_metrics: dict[str, float],
|
|
336
|
+
) -> dict[str, float]:
|
|
337
|
+
"""Convert timer metric names to human-readable stage names.
|
|
338
|
+
|
|
339
|
+
Args:
|
|
340
|
+
timer_metrics: Dictionary from Timer.get_metrics()
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
Dictionary with human-readable stage names as keys
|
|
344
|
+
"""
|
|
345
|
+
mapping = {
|
|
346
|
+
"video_initialization": "Video initialization",
|
|
347
|
+
"pose_tracking": "Pose tracking",
|
|
348
|
+
"parameter_auto_tuning": "Parameter auto-tuning",
|
|
349
|
+
"smoothing": "Smoothing",
|
|
350
|
+
"vertical_position_extraction": "Vertical position extraction",
|
|
351
|
+
"ground_contact_detection": "Ground contact detection",
|
|
352
|
+
"metrics_calculation": "Metrics calculation",
|
|
353
|
+
"quality_assessment": "Quality assessment",
|
|
354
|
+
"metadata_building": "Metadata building",
|
|
355
|
+
"metrics_validation": "Metrics validation",
|
|
356
|
+
"phase_detection": "Phase detection",
|
|
357
|
+
"json_serialization": "JSON serialization",
|
|
358
|
+
"debug_video_generation": "Debug video generation",
|
|
359
|
+
"debug_video_reencode": "Debug video re-encoding",
|
|
360
|
+
"frame_rotation": "Frame rotation",
|
|
361
|
+
"debug_video_resize": "Debug video resizing",
|
|
362
|
+
"debug_video_copy": "Debug video frame copy",
|
|
363
|
+
"debug_video_draw": "Debug video drawing",
|
|
364
|
+
"debug_video_write": "Debug video encoding",
|
|
365
|
+
# Granular metrics
|
|
366
|
+
"frame_conversion": "Frame BGR-RGB conversion",
|
|
367
|
+
"mediapipe_inference": "MediaPipe inference",
|
|
368
|
+
"landmark_extraction": "Landmark extraction",
|
|
369
|
+
"smoothing_outlier_rejection": "Smoothing (outlier rejection)",
|
|
370
|
+
"smoothing_bilateral": "Smoothing (bilateral)",
|
|
371
|
+
"smoothing_savgol": "Smoothing (Savitzky-Golay)",
|
|
372
|
+
"cmj_compute_derivatives": "CMJ derivatives computation",
|
|
373
|
+
"cmj_find_takeoff": "CMJ takeoff detection",
|
|
374
|
+
"cmj_find_lowest_point": "CMJ lowest point detection",
|
|
375
|
+
"cmj_find_landing": "CMJ landing detection",
|
|
376
|
+
"cmj_find_standing_end": "CMJ standing end detection",
|
|
377
|
+
"dj_compute_velocity": "DJ velocity computation",
|
|
378
|
+
"dj_find_contact_frames": "DJ contact frame search",
|
|
379
|
+
"dj_detect_drop_start": "DJ drop start detection",
|
|
380
|
+
"dj_find_phases": "DJ phase finding",
|
|
381
|
+
"dj_identify_contact": "DJ contact identification",
|
|
382
|
+
"dj_analyze_flight": "DJ flight analysis",
|
|
383
|
+
}
|
|
384
|
+
return {mapping.get(k, k): v for k, v in timer_metrics.items()}
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def process_videos_bulk_generic(
|
|
388
|
+
configs: list[TConfig],
|
|
389
|
+
processor_func: Callable[[TConfig], TResult],
|
|
390
|
+
error_factory: Callable[[str, str], TResult],
|
|
391
|
+
max_workers: int = 4,
|
|
392
|
+
progress_callback: Callable[[TResult], None] | None = None,
|
|
393
|
+
) -> list[TResult]:
|
|
394
|
+
"""
|
|
395
|
+
Generic function to process multiple videos in parallel.
|
|
396
|
+
|
|
397
|
+
Args:
|
|
398
|
+
configs: List of configuration objects
|
|
399
|
+
processor_func: Function to process a single config (must be picklable)
|
|
400
|
+
error_factory: Function that takes (video_path, error_msg) and returns a
|
|
401
|
+
result object
|
|
402
|
+
max_workers: Maximum number of parallel workers
|
|
403
|
+
progress_callback: Optional callback for progress updates
|
|
404
|
+
|
|
405
|
+
Returns:
|
|
406
|
+
List of result objects
|
|
407
|
+
"""
|
|
408
|
+
results: list[TResult] = []
|
|
409
|
+
|
|
410
|
+
# Use 'spawn' context to avoid fork() issues in multi-threaded pytest environment
|
|
411
|
+
mp_context = mp.get_context("spawn")
|
|
412
|
+
with ProcessPoolExecutor(max_workers=max_workers, mp_context=mp_context) as executor:
|
|
413
|
+
future_to_config = {executor.submit(processor_func, config): config for config in configs}
|
|
414
|
+
|
|
415
|
+
for future in as_completed(future_to_config):
|
|
416
|
+
config = future_to_config[future]
|
|
417
|
+
# Assume config has video_path - this is a constraint on TConfig
|
|
418
|
+
# but we can't easily enforce it with TypeVar in generic way
|
|
419
|
+
# without Protocol
|
|
420
|
+
# For now we assume dynamic access is okay or TConfig is duck-typed
|
|
421
|
+
video_path = getattr(config, "video_path", "unknown")
|
|
422
|
+
|
|
423
|
+
try:
|
|
424
|
+
result = future.result()
|
|
425
|
+
except Exception as exc:
|
|
426
|
+
result = error_factory(video_path, f"Unexpected error: {str(exc)}")
|
|
427
|
+
|
|
428
|
+
results.append(result)
|
|
429
|
+
|
|
430
|
+
if progress_callback:
|
|
431
|
+
progress_callback(result)
|
|
432
|
+
|
|
433
|
+
return results
|