kinemotion 0.10.0__py3-none-any.whl → 0.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -130,13 +130,13 @@ def remove_outliers(
130
130
  if not np.any(outlier_mask):
131
131
  return positions_clean
132
132
 
133
- outlier_indices = np.where(outlier_mask)[0]
133
+ outlier_indices = np.nonzero(outlier_mask)[0]
134
134
 
135
135
  for idx in outlier_indices:
136
136
  if method == "interpolate":
137
137
  # Find nearest valid points before and after
138
- valid_before = np.where(~outlier_mask[:idx])[0]
139
- valid_after = np.where(~outlier_mask[idx + 1 :])[0]
138
+ valid_before = np.nonzero(~outlier_mask[:idx])[0]
139
+ valid_after = np.nonzero(~outlier_mask[idx + 1 :])[0]
140
140
 
141
141
  if len(valid_before) > 0 and len(valid_after) > 0:
142
142
  # Linear interpolation between nearest valid points
kinemotion/core/pose.py CHANGED
@@ -1,6 +1,5 @@
1
1
  """Pose tracking using MediaPipe Pose."""
2
2
 
3
-
4
3
  import cv2
5
4
  import mediapipe as mp
6
5
  import numpy as np
@@ -54,7 +54,7 @@ def smooth_landmarks(
54
54
 
55
55
  for i, frame_landmarks in enumerate(landmark_sequence):
56
56
  if frame_landmarks is not None and landmark_name in frame_landmarks:
57
- x, y, vis = frame_landmarks[landmark_name]
57
+ x, y, _ = frame_landmarks[landmark_name] # vis not used
58
58
  x_coords.append(x)
59
59
  y_coords.append(y)
60
60
  valid_frames.append(i)
@@ -287,7 +287,7 @@ def smooth_landmarks_advanced(
287
287
 
288
288
  for i, frame_landmarks in enumerate(landmark_sequence):
289
289
  if frame_landmarks is not None and landmark_name in frame_landmarks:
290
- x, y, vis = frame_landmarks[landmark_name]
290
+ x, y, _ = frame_landmarks[landmark_name] # vis not used
291
291
  x_coords.append(x)
292
292
  y_coords.append(y)
293
293
  valid_frames.append(i)
@@ -218,7 +218,6 @@ def dropjump_analyze(
218
218
  visibility_threshold,
219
219
  detection_confidence,
220
220
  tracking_confidence,
221
- verbose,
222
221
  )
223
222
  else:
224
223
  # Single video mode (original behavior)
@@ -302,7 +301,6 @@ def _process_single(
302
301
  landmarks_sequence = []
303
302
  frames = []
304
303
 
305
- frame_idx = 0
306
304
  bar: Any
307
305
  with click.progressbar(
308
306
  length=video.frame_count, label="Processing frames"
@@ -316,7 +314,6 @@ def _process_single(
316
314
  landmarks = tracker.process_frame(frame)
317
315
  landmarks_sequence.append(landmarks)
318
316
 
319
- frame_idx += 1
320
317
  bar.update(1)
321
318
 
322
319
  tracker.close()
@@ -555,7 +552,6 @@ def _process_batch(
555
552
  visibility_threshold: float | None,
556
553
  detection_confidence: float | None,
557
554
  tracking_confidence: float | None,
558
- verbose: bool,
559
555
  ) -> None:
560
556
  """Process multiple videos in batch mode using parallel processing."""
561
557
  click.echo(
@@ -651,19 +647,30 @@ def _process_batch(
651
647
  ]
652
648
 
653
649
  if with_gct:
654
- avg_gct = sum(r.metrics.ground_contact_time * 1000 for r in with_gct) / len(
655
- with_gct
656
- )
650
+ # Type assertion: filtering ensures metrics and ground_contact_time are not None
651
+ avg_gct = sum(
652
+ r.metrics.ground_contact_time * 1000
653
+ for r in with_gct
654
+ if r.metrics and r.metrics.ground_contact_time is not None
655
+ ) / len(with_gct)
657
656
  click.echo(f"\nAverage ground contact time: {avg_gct:.1f} ms", err=True)
658
657
 
659
658
  if with_flight:
660
- avg_flight = sum(r.metrics.flight_time * 1000 for r in with_flight) / len(
661
- with_flight
662
- )
659
+ # Type assertion: filtering ensures metrics and flight_time are not None
660
+ avg_flight = sum(
661
+ r.metrics.flight_time * 1000
662
+ for r in with_flight
663
+ if r.metrics and r.metrics.flight_time is not None
664
+ ) / len(with_flight)
663
665
  click.echo(f"Average flight time: {avg_flight:.1f} ms", err=True)
664
666
 
665
667
  if with_jump:
666
- avg_jump = sum(r.metrics.jump_height for r in with_jump) / len(with_jump)
668
+ # Type assertion: filtering ensures metrics and jump_height are not None
669
+ avg_jump = sum(
670
+ r.metrics.jump_height
671
+ for r in with_jump
672
+ if r.metrics and r.metrics.jump_height is not None
673
+ ) / len(with_jump)
667
674
  click.echo(
668
675
  f"Average jump height: {avg_jump:.3f} m ({avg_jump * 100:.1f} cm)",
669
676
  err=True,
@@ -87,7 +87,7 @@ class DebugOverlayRenderer:
87
87
  if landmarks:
88
88
  if use_com:
89
89
  # Draw center of mass position
90
- com_x, com_y, com_vis = compute_center_of_mass(landmarks)
90
+ com_x, com_y, _ = compute_center_of_mass(landmarks) # com_vis not used
91
91
  px = int(com_x * self.width)
92
92
  py = int(com_y * self.height)
93
93
 
@@ -147,8 +147,12 @@ def calculate_drop_jump_metrics(
147
147
  position_change_threshold=0.005, # 0.5% of frame height - sensitive to drop start
148
148
  smoothing_window=smoothing_window,
149
149
  )
150
- # If manually specified or auto-detected, use it
151
- drop_start_frame_value = drop_start_frame if drop_start_frame is not None else 0
150
+ # If manually specified or auto-detected, use it; otherwise start from frame 0
151
+ drop_start_frame_value: int
152
+ if drop_start_frame is None: # pyright: ignore[reportUnnecessaryComparison]
153
+ drop_start_frame_value = 0
154
+ else:
155
+ drop_start_frame_value = drop_start_frame
152
156
 
153
157
  phases = find_contact_phases(contact_states)
154
158
 
@@ -200,6 +204,10 @@ def calculate_drop_jump_metrics(
200
204
  if not ground_phases:
201
205
  return metrics
202
206
 
207
+ # Initialize contact variables with first ground phase as fallback
208
+ # (will be overridden by drop jump or regular jump detection logic)
209
+ contact_start, contact_end = ground_phases[0][0], ground_phases[0][1]
210
+
203
211
  # Detect if this is a drop jump or regular jump
204
212
  # Drop jump: first ground phase is elevated (lower y), followed by drop, then landing (higher y)
205
213
  is_drop_jump = False
@@ -393,7 +401,6 @@ def calculate_drop_jump_metrics(
393
401
 
394
402
  def estimate_jump_height_from_trajectory(
395
403
  foot_y_positions: np.ndarray,
396
- contact_start: int,
397
404
  flight_start: int,
398
405
  flight_end: int,
399
406
  pixel_to_meter_ratio: float | None = None,
@@ -403,7 +410,6 @@ def estimate_jump_height_from_trajectory(
403
410
 
404
411
  Args:
405
412
  foot_y_positions: Vertical positions of feet (normalized or pixels)
406
- contact_start: Frame where ground contact starts
407
413
  flight_start: Frame where flight begins
408
414
  flight_end: Frame where flight ends
409
415
  pixel_to_meter_ratio: Conversion factor from pixels to meters
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.10.0
3
+ Version: 0.10.1
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -52,6 +52,9 @@ A video-based kinematic analysis tool for athletic performance. Analyzes side-vi
52
52
  - **Calibrated measurements** - use known drop height for theoretically improved accuracy (⚠️ accuracy claims unvalidated)
53
53
  - **JSON output** for easy integration with other tools
54
54
  - **Optional debug video** with visual overlays showing contact states and landmarks
55
+ - **Batch processing** - CLI and Python API for parallel processing of multiple videos
56
+ - **Python library API** - use kinemotion programmatically in your own code
57
+ - **CSV export** - aggregated results for research and analysis
55
58
  - **Configurable parameters** for smoothing, thresholds, and detection
56
59
 
57
60
  **Note**: Drop jump analysis uses foot-based tracking with fixed velocity thresholds. Center of mass (CoM) tracking and adaptive thresholding (available in `core/` modules) require longer videos (~5+ seconds) with a 3-second standing baseline, making them unsuitable for typical drop jump videos (~3 seconds). These features may be available in future jump types like CMJ (countermovement jump).
@@ -172,18 +175,148 @@ kinemotion dropjump-analyze video.mp4 \
172
175
  --velocity-threshold 0.015
173
176
  ```
174
177
 
178
+ ### Batch Processing
179
+
180
+ Process multiple videos in parallel from the command line:
181
+
182
+ ```bash
183
+ # Process multiple videos with glob pattern
184
+ kinemotion dropjump-analyze videos/*.mp4 --batch --drop-height 0.40 --workers 4
185
+
186
+ # Save all results to directories
187
+ kinemotion dropjump-analyze videos/*.mp4 --batch --drop-height 0.40 \
188
+ --json-output-dir results/ \
189
+ --output-dir debug_videos/ \
190
+ --csv-summary summary.csv
191
+
192
+ # Multiple explicit paths (batch mode auto-enabled)
193
+ kinemotion dropjump-analyze video1.mp4 video2.mp4 video3.mp4 --drop-height 0.40
194
+ ```
195
+
196
+ **Batch options:**
197
+
198
+ - `--batch`: Explicitly enable batch mode
199
+ - `--workers <int>`: Number of parallel workers (default: 4)
200
+ - `--output-dir <path>`: Directory for debug videos (auto-named per video)
201
+ - `--json-output-dir <path>`: Directory for JSON metrics (auto-named per video)
202
+ - `--csv-summary <path>`: Export aggregated results to CSV
203
+
204
+ **Output example:**
205
+
206
+ ```text
207
+ Batch processing 10 videos with 4 workers
208
+ ======================================================================
209
+
210
+ Processing videos...
211
+ [1/10] ✓ athlete1.mp4 (2.3s)
212
+ [2/10] ✓ athlete2.mp4 (2.1s)
213
+ [3/10] ✗ athlete3.mp4 (0.5s)
214
+ Error: No frames could be processed
215
+
216
+ ======================================================================
217
+ BATCH PROCESSING SUMMARY
218
+ ======================================================================
219
+ Total videos: 10
220
+ Successful: 9
221
+ Failed: 1
222
+
223
+ Average ground contact time: 245.3 ms
224
+ Average flight time: 523.7 ms
225
+ Average jump height: 0.352 m (35.2 cm)
226
+ ```
227
+
228
+ ## Python API
229
+
230
+ Use kinemotion as a library in your Python code for automated pipelines and custom analysis:
231
+
232
+ ### Single Video Processing
233
+
234
+ ```python
235
+ from kinemotion import process_video
236
+
237
+ # Process a single video
238
+ metrics = process_video(
239
+ video_path="athlete_jump.mp4",
240
+ drop_height=0.40, # 40cm drop box
241
+ quality="balanced",
242
+ verbose=True
243
+ )
244
+
245
+ # Access results
246
+ print(f"Jump height: {metrics.jump_height:.3f} m")
247
+ print(f"Ground contact time: {metrics.ground_contact_time * 1000:.1f} ms")
248
+ print(f"Flight time: {metrics.flight_time * 1000:.1f} ms")
249
+ ```
250
+
251
+ ### Bulk Video Processing
252
+
253
+ ```python
254
+ from kinemotion import VideoConfig, process_videos_bulk
255
+
256
+ # Configure multiple videos
257
+ configs = [
258
+ VideoConfig("video1.mp4", drop_height=0.40),
259
+ VideoConfig("video2.mp4", drop_height=0.30, quality="accurate"),
260
+ VideoConfig("video3.mp4", drop_height=0.50, output_video="debug3.mp4"),
261
+ ]
262
+
263
+ # Process in parallel with 4 workers
264
+ results = process_videos_bulk(configs, max_workers=4)
265
+
266
+ # Check results
267
+ for result in results:
268
+ if result.success:
269
+ print(f"✓ {result.video_path}: {result.metrics.jump_height:.3f} m")
270
+ else:
271
+ print(f"✗ {result.video_path}: {result.error}")
272
+ ```
273
+
274
+ ### Export Results to CSV
275
+
276
+ ```python
277
+ import csv
278
+ from pathlib import Path
279
+ from kinemotion import VideoConfig, process_videos_bulk
280
+
281
+ # Process directory of videos
282
+ video_dir = Path("athlete_videos")
283
+ configs = [
284
+ VideoConfig(str(v), drop_height=0.40, quality="balanced")
285
+ for v in video_dir.glob("*.mp4")
286
+ ]
287
+
288
+ results = process_videos_bulk(configs, max_workers=4)
289
+
290
+ # Export to CSV
291
+ with open("results.csv", "w", newline="") as f:
292
+ writer = csv.writer(f)
293
+ writer.writerow(["Video", "GCT (ms)", "Flight (ms)", "Jump (m)"])
294
+
295
+ for r in results:
296
+ if r.success and r.metrics:
297
+ writer.writerow([
298
+ Path(r.video_path).name,
299
+ f"{r.metrics.ground_contact_time * 1000:.1f}" if r.metrics.ground_contact_time else "N/A",
300
+ f"{r.metrics.flight_time * 1000:.1f}" if r.metrics.flight_time else "N/A",
301
+ f"{r.metrics.jump_height:.3f}" if r.metrics.jump_height else "N/A",
302
+ ])
303
+ ```
304
+
305
+ **See [examples/bulk/README.md](examples/bulk/README.md) for comprehensive API documentation and more examples.**
306
+
175
307
  ## Configuration Options
176
308
 
177
309
  ### Intelligent Auto-Tuning
178
310
 
179
311
  Kinemotion automatically optimizes parameters based on your video:
312
+
180
313
  - **FPS-based scaling**: 30fps, 60fps, 120fps videos use different thresholds automatically
181
314
  - **Quality-based adjustments**: Adapts smoothing based on MediaPipe tracking confidence
182
315
  - **Always enabled**: Outlier rejection, curvature analysis, drop start detection
183
316
 
184
317
  ### Required Parameters
185
318
 
186
- - `--drop-height <float>` **[REQUIRED]**
319
+ - `--drop-height <float>` **\[REQUIRED\]**
187
320
  - Height of drop box/platform in meters (e.g., 0.40 for 40cm)
188
321
  - Used for accurate calibration of jump height measurements
189
322
  - Measure your box height accurately for best results
@@ -191,18 +324,22 @@ Kinemotion automatically optimizes parameters based on your video:
191
324
  ### Optional Parameters
192
325
 
193
326
  - `--quality [fast|balanced|accurate]` (default: balanced)
327
+
194
328
  - **fast**: Quick analysis, less precise (~50% faster)
195
329
  - **balanced**: Good accuracy/speed tradeoff (recommended)
196
330
  - **accurate**: Research-grade analysis, slower (maximum precision)
197
331
 
198
332
  - `--verbose` / `-v`
333
+
199
334
  - Show auto-selected parameters and analysis details
200
335
  - Useful for understanding what the tool is doing
201
336
 
202
337
  - `--output <path>` / `-o`
338
+
203
339
  - Generate annotated debug video with pose tracking visualization
204
340
 
205
341
  - `--json-output <path>` / `-j`
342
+
206
343
  - Save metrics to JSON file instead of stdout
207
344
 
208
345
  ### Expert Overrides (Rarely Needed)
@@ -271,9 +408,9 @@ The debug video includes:
271
408
  **Solutions**:
272
409
 
273
410
  1. **Check video quality**: Ensure the athlete is clearly visible in profile view
274
- 2. **Increase smoothing**: Use `--smoothing-window 7` or higher
275
- 3. **Adjust detection confidence**: Try `--detection-confidence 0.6` or `--tracking-confidence 0.6`
276
- 4. **Generate debug video**: Use `--output` to visualize what's being tracked
411
+ 1. **Increase smoothing**: Use `--smoothing-window 7` or higher
412
+ 1. **Adjust detection confidence**: Try `--detection-confidence 0.6` or `--tracking-confidence 0.6`
413
+ 1. **Generate debug video**: Use `--output` to visualize what's being tracked
277
414
 
278
415
  ### No Pose Detected
279
416
 
@@ -282,9 +419,9 @@ The debug video includes:
282
419
  **Solutions**:
283
420
 
284
421
  1. **Verify video format**: OpenCV must be able to read the video
285
- 2. **Check framing**: Ensure full body is visible in side view
286
- 3. **Lower confidence thresholds**: Try `--detection-confidence 0.3 --tracking-confidence 0.3`
287
- 4. **Test video playback**: Verify video opens correctly with standard video players
422
+ 1. **Check framing**: Ensure full body is visible in side view
423
+ 1. **Lower confidence thresholds**: Try `--detection-confidence 0.3 --tracking-confidence 0.3`
424
+ 1. **Test video playback**: Verify video opens correctly with standard video players
288
425
 
289
426
  ### Incorrect Contact Detection
290
427
 
@@ -293,11 +430,11 @@ The debug video includes:
293
430
  **Solutions**:
294
431
 
295
432
  1. **Generate debug video**: Visualize contact states to diagnose the issue
296
- 2. **Adjust velocity threshold**:
433
+ 1. **Adjust velocity threshold**:
297
434
  - If missing contacts: decrease to `--velocity-threshold 0.01`
298
435
  - If false contacts: increase to `--velocity-threshold 0.03`
299
- 3. **Adjust minimum frames**: `--min-contact-frames 5` for longer required contact
300
- 4. **Check visibility**: Lower `--visibility-threshold 0.3` if feet are partially obscured
436
+ 1. **Adjust minimum frames**: `--min-contact-frames 5` for longer required contact
437
+ 1. **Check visibility**: Lower `--visibility-threshold 0.3` if feet are partially obscured
301
438
 
302
439
  ### Jump Height Seems Wrong
303
440
 
@@ -307,9 +444,9 @@ The debug video includes:
307
444
 
308
445
  1. **Use calibration**: For drop jumps, add `--drop-height` parameter with box height in meters (e.g., `--drop-height 0.40`)
309
446
  - Theoretically improves accuracy (⚠️ unvalidated)
310
- 2. **Verify flight time detection**: Check `flight_start_frame` and `flight_end_frame` in JSON
311
- 3. **Compare measurements**: JSON output includes both `jump_height_m` (primary) and `jump_height_kinematic_m` (kinematic-only)
312
- 4. **Check for drop jump detection**: If doing a drop jump, ensure first phase is elevated enough (>5% of frame height)
447
+ 1. **Verify flight time detection**: Check `flight_start_frame` and `flight_end_frame` in JSON
448
+ 1. **Compare measurements**: JSON output includes both `jump_height_m` (primary) and `jump_height_kinematic_m` (kinematic-only)
449
+ 1. **Check for drop jump detection**: If doing a drop jump, ensure first phase is elevated enough (>5% of frame height)
313
450
 
314
451
  ### Video Codec Issues
315
452
 
@@ -318,30 +455,30 @@ The debug video includes:
318
455
  **Solutions**:
319
456
 
320
457
  1. **Install additional codecs**: Ensure OpenCV has proper video codec support
321
- 2. **Try different output format**: Use `.avi` extension instead of `.mp4`
322
- 3. **Check output path**: Ensure write permissions for output directory
458
+ 1. **Try different output format**: Use `.avi` extension instead of `.mp4`
459
+ 1. **Check output path**: Ensure write permissions for output directory
323
460
 
324
461
  ## How It Works
325
462
 
326
463
  1. **Pose Tracking**: MediaPipe extracts 2D pose landmarks (foot points: ankles, heels, foot indices) from each frame
327
- 2. **Position Calculation**: Averages ankle, heel, and foot index positions to determine foot location
328
- 3. **Smoothing**: Savitzky-Golay filter reduces tracking jitter while preserving motion dynamics
329
- 4. **Contact Detection**: Analyzes vertical position velocity to identify ground contact vs. flight phases
330
- 5. **Phase Identification**: Finds continuous ground contact and flight periods
464
+ 1. **Position Calculation**: Averages ankle, heel, and foot index positions to determine foot location
465
+ 1. **Smoothing**: Savitzky-Golay filter reduces tracking jitter while preserving motion dynamics
466
+ 1. **Contact Detection**: Analyzes vertical position velocity to identify ground contact vs. flight phases
467
+ 1. **Phase Identification**: Finds continuous ground contact and flight periods
331
468
  - Automatically detects drop jumps vs regular jumps
332
469
  - For drop jumps: identifies box → drop → ground contact → jump sequence
333
- 6. **Sub-Frame Interpolation**: Estimates exact transition times between frames
470
+ 1. **Sub-Frame Interpolation**: Estimates exact transition times between frames
334
471
  - Uses Savitzky-Golay derivative for smooth velocity calculation
335
472
  - Linear interpolation of velocity to find threshold crossings
336
473
  - Achieves sub-millisecond timing precision (at 30fps: ±10ms vs ±33ms)
337
474
  - Reduces timing error by 60-70% for contact and flight measurements
338
475
  - Smoother velocity curves eliminate false threshold crossings
339
- 7. **Trajectory Curvature Analysis**: Refines transitions using acceleration patterns
476
+ 1. **Trajectory Curvature Analysis**: Refines transitions using acceleration patterns
340
477
  - Computes second derivative (acceleration) from position trajectory
341
478
  - Detects landing impact by acceleration spike
342
479
  - Identifies takeoff by acceleration change patterns
343
480
  - Provides independent validation and refinement of velocity-based detection
344
- 8. **Metric Calculation**:
481
+ 1. **Metric Calculation**:
345
482
  - Ground contact time = contact phase duration (using fractional frames)
346
483
  - Flight time = flight phase duration (using fractional frames)
347
484
  - Jump height = calibrated position-based measurement (if --drop-height provided)
@@ -356,7 +493,8 @@ This project enforces strict code quality standards:
356
493
  - **Type safety**: Full mypy strict mode compliance with complete type annotations
357
494
  - **Linting**: Comprehensive ruff checks (pycodestyle, pyflakes, isort, pep8-naming, etc.)
358
495
  - **Formatting**: Black code style
359
- - **Testing**: pytest with 25 unit tests
496
+ - **Testing**: pytest with 61 unit tests
497
+ - **PEP 561 compliant**: Includes py.typed marker for type checking support
360
498
 
361
499
  ### Development Commands
362
500
 
@@ -391,9 +529,9 @@ uv run ruff check && uv run mypy src/dropjump && uv run pytest
391
529
  Before committing code, ensure all checks pass:
392
530
 
393
531
  1. Format with Black
394
- 2. Fix linting issues with ruff
395
- 3. Ensure type safety with mypy
396
- 4. Run all tests with pytest
532
+ 1. Fix linting issues with ruff
533
+ 1. Ensure type safety with mypy
534
+ 1. Run all tests with pytest
397
535
 
398
536
  See [CLAUDE.md](CLAUDE.md) for detailed development guidelines.
399
537
 
@@ -414,9 +552,7 @@ See [CLAUDE.md](CLAUDE.md) for detailed development guidelines.
414
552
  - Advanced camera calibration (intrinsic parameters, lens distortion)
415
553
  - Multi-angle analysis support
416
554
  - Automatic camera orientation detection
417
- - Batch processing for multiple videos
418
555
  - Real-time analysis from webcam
419
- - Export to CSV/Excel formats
420
556
  - Comparison with reference values
421
557
  - Force plate integration for validation
422
558
 
@@ -0,0 +1,20 @@
1
+ kinemotion/__init__.py,sha256=Z85xg29NA-r4IjrSbAkJpMFigyxACFGUb-37AiMp6YY,350
2
+ kinemotion/api.py,sha256=3Hswx5lfyWc-EanS6iV-4MPUa_uB5t8BpGe4EB4gIIQ,15453
3
+ kinemotion/cli.py,sha256=2IFA2_TE9a5zBtmGVzv5SnX39w7yPuBlw42dL7ca25U,402
4
+ kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
5
+ kinemotion/core/auto_tuning.py,sha256=cvmxUI-CbahpOJQtR2r5jOx4Q6yKPe3DO1o15hOQIdw,10508
6
+ kinemotion/core/filtering.py,sha256=f-m-aA59e4WqE6u-9MA51wssu7rI-Y_7n1cG8IWdeRQ,11241
7
+ kinemotion/core/pose.py,sha256=Wfd1RR-2ZznYpWeQUbySwcV3mvReqn8n3XO6S7pGq4M,8390
8
+ kinemotion/core/smoothing.py,sha256=FON4qKtsSp1-03GnJrDkEUAePaACn4QPMJF0eTIYqR0,12925
9
+ kinemotion/core/video_io.py,sha256=z8Z0qbNaKbcdB40KnbNOBMzab3BbgnhBxp-mUBYeXgM,6577
10
+ kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
11
+ kinemotion/dropjump/analysis.py,sha256=HfJt2t9IsMBiBUz7apIzdxbRH9QqzlFnDVVWcKhU3ow,23291
12
+ kinemotion/dropjump/cli.py,sha256=GuVOc3cWeZ3FjNrf-28DdA-xkLuxwyNVquX3AbiLtE0,26208
13
+ kinemotion/dropjump/debug_overlay.py,sha256=GMo-jCl5OPIv82uPxDbBVI7CsAMwATTvxZMeWfs8k8M,8701
14
+ kinemotion/dropjump/kinematics.py,sha256=4G_7_KWnXiT09G9BduQNIgFtxvwjo2RyH1sP9SU3hSE,17949
15
+ kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
+ kinemotion-0.10.1.dist-info/METADATA,sha256=6PtvHfOyoKCqyAH6UO4YAjmZhq4xk-WlK_XaU49PCB8,20333
17
+ kinemotion-0.10.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ kinemotion-0.10.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
19
+ kinemotion-0.10.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
20
+ kinemotion-0.10.1.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- kinemotion/__init__.py,sha256=Z85xg29NA-r4IjrSbAkJpMFigyxACFGUb-37AiMp6YY,350
2
- kinemotion/api.py,sha256=3Hswx5lfyWc-EanS6iV-4MPUa_uB5t8BpGe4EB4gIIQ,15453
3
- kinemotion/cli.py,sha256=2IFA2_TE9a5zBtmGVzv5SnX39w7yPuBlw42dL7ca25U,402
4
- kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
5
- kinemotion/core/auto_tuning.py,sha256=cvmxUI-CbahpOJQtR2r5jOx4Q6yKPe3DO1o15hOQIdw,10508
6
- kinemotion/core/filtering.py,sha256=5opOq0Fbot_AvsT8cT2kS9uyD_uqLW-jR6SXZbg672c,11235
7
- kinemotion/core/pose.py,sha256=5Dhw3LqX3STR-eLb5JAQkxhS-dd0PqGytBWnaQ66nWc,8391
8
- kinemotion/core/smoothing.py,sha256=VVv95auiuah_GPG3jxiQPyiYXF5i3B4fF9UGI5FLX-Q,12897
9
- kinemotion/core/video_io.py,sha256=z8Z0qbNaKbcdB40KnbNOBMzab3BbgnhBxp-mUBYeXgM,6577
10
- kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
11
- kinemotion/dropjump/analysis.py,sha256=HfJt2t9IsMBiBUz7apIzdxbRH9QqzlFnDVVWcKhU3ow,23291
12
- kinemotion/dropjump/cli.py,sha256=Wc0Z3w8RBaozEQ-Oc8_zTIUrfsgRwhD97kH4P8ZozzQ,25759
13
- kinemotion/dropjump/debug_overlay.py,sha256=hmEtadqYP8K-kGr_Q03KDQyl1152-YSPeRJzEXMyuhs,8687
14
- kinemotion/dropjump/kinematics.py,sha256=RceIH2HndpHQpcOQd56MmEdXQNEst-CWXfBKPJk2g3Y,17659
15
- kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- kinemotion-0.10.0.dist-info/METADATA,sha256=mwCvLjVVaKjlIptGUxPKeQD5BH6IAs5luVm7OIUd1SU,16319
17
- kinemotion-0.10.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
- kinemotion-0.10.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
19
- kinemotion-0.10.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
20
- kinemotion-0.10.0.dist-info/RECORD,,