kinemotion 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -638,6 +638,50 @@ class CMJVideoResult:
638
638
  processing_time: float = 0.0
639
639
 
640
640
 
641
+ def _generate_cmj_outputs(
642
+ output_video: str | None,
643
+ json_output: str | None,
644
+ metrics: CMJMetrics,
645
+ frames: list,
646
+ smoothed_landmarks: list,
647
+ video_width: int,
648
+ video_height: int,
649
+ video_display_width: int,
650
+ video_display_height: int,
651
+ video_fps: float,
652
+ verbose: bool,
653
+ ) -> None:
654
+ """Generate JSON and debug video outputs for CMJ analysis."""
655
+ if json_output:
656
+ import json
657
+
658
+ output_path = Path(json_output)
659
+ output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
660
+ if verbose:
661
+ print(f"Metrics written to: {json_output}")
662
+
663
+ if output_video:
664
+ if verbose:
665
+ print(f"Generating debug video: {output_video}")
666
+
667
+ with CMJDebugOverlayRenderer(
668
+ output_video,
669
+ video_width,
670
+ video_height,
671
+ video_display_width,
672
+ video_display_height,
673
+ video_fps,
674
+ ) as renderer:
675
+ for i, frame in enumerate(frames):
676
+ annotated = renderer.render_frame(
677
+ frame, smoothed_landmarks[i], i, metrics
678
+ )
679
+ renderer.write_frame(annotated)
680
+
681
+ if verbose:
682
+ print(f"Debug video saved: {output_video}")
683
+
684
+
641
685
  def process_cmj_video(
642
686
  video_path: str,
643
687
  quality: str = "balanced",
@@ -645,7 +689,6 @@ def process_cmj_video(
645
689
  json_output: str | None = None,
646
690
  smoothing_window: int | None = None,
647
691
  velocity_threshold: float | None = None,
648
- countermovement_threshold: float | None = None,
649
692
  min_contact_frames: int | None = None,
650
693
  visibility_threshold: float | None = None,
651
694
  detection_confidence: float | None = None,
@@ -666,7 +709,6 @@ def process_cmj_video(
666
709
  json_output: Optional path for JSON metrics output
667
710
  smoothing_window: Optional override for smoothing window
668
711
  velocity_threshold: Optional override for velocity threshold
669
- countermovement_threshold: Optional override for countermovement threshold
670
712
  min_contact_frames: Optional override for minimum contact frames
671
713
  visibility_threshold: Optional override for visibility threshold
672
714
  detection_confidence: Optional override for pose detection confidence
@@ -741,12 +783,6 @@ def process_cmj_video(
741
783
  vertical_positions, _ = _extract_vertical_positions(smoothed_landmarks)
742
784
  tracking_method = "foot"
743
785
 
744
- # Calculate countermovement threshold (FPS-adjusted)
745
- # POSITIVE threshold for downward motion (squatting) in normalized coordinates
746
- cm_threshold = countermovement_threshold
747
- if cm_threshold is None:
748
- cm_threshold = 0.015 * (30.0 / video.fps)
749
-
750
786
  # Detect CMJ phases
751
787
  if verbose:
752
788
  print("Detecting CMJ phases...")
@@ -788,34 +824,19 @@ def process_cmj_video(
788
824
  )
789
825
 
790
826
  # Generate outputs if requested
791
- if json_output:
792
- import json
793
-
794
- output_path = Path(json_output)
795
- output_path.write_text(json.dumps(metrics.to_dict(), indent=2))
796
- if verbose:
797
- print(f"Metrics written to: {json_output}")
798
-
799
- if output_video:
800
- if verbose:
801
- print(f"Generating debug video: {output_video}")
802
-
803
- with CMJDebugOverlayRenderer(
804
- output_video,
805
- video.width,
806
- video.height,
807
- video.display_width,
808
- video.display_height,
809
- video.fps,
810
- ) as renderer:
811
- for i, frame in enumerate(frames):
812
- annotated = renderer.render_frame(
813
- frame, smoothed_landmarks[i], i, metrics
814
- )
815
- renderer.write_frame(annotated)
816
-
817
- if verbose:
818
- print(f"Debug video saved: {output_video}")
827
+ _generate_cmj_outputs(
828
+ output_video,
829
+ json_output,
830
+ metrics,
831
+ frames,
832
+ smoothed_landmarks,
833
+ video.width,
834
+ video.height,
835
+ video.display_width,
836
+ video.display_height,
837
+ video.fps,
838
+ verbose,
839
+ )
819
840
 
820
841
  if verbose:
821
842
  print(f"\nJump height: {metrics.jump_height:.3f}m")
@@ -279,8 +279,6 @@ def find_cmj_takeoff_from_velocity_peak(
279
279
  velocities: np.ndarray,
280
280
  lowest_point_frame: int,
281
281
  fps: float,
282
- window_length: int = 5,
283
- polyorder: int = 2,
284
282
  ) -> float:
285
283
  """
286
284
  Find CMJ takeoff frame as peak upward velocity during concentric phase.
@@ -293,8 +291,6 @@ def find_cmj_takeoff_from_velocity_peak(
293
291
  velocities: Array of SIGNED vertical velocities (negative = upward)
294
292
  lowest_point_frame: Frame at lowest point
295
293
  fps: Video frame rate
296
- window_length: Window size for derivative calculations
297
- polyorder: Polynomial order for Savitzky-Golay filter
298
294
 
299
295
  Returns:
300
296
  Takeoff frame with fractional precision.
@@ -407,7 +403,7 @@ def find_interpolated_takeoff_landing(
407
403
 
408
404
  # Find takeoff using peak velocity method (CMJ-specific)
409
405
  takeoff_frame = find_cmj_takeoff_from_velocity_peak(
410
- positions, velocities, lowest_point_frame, fps, window_length, polyorder
406
+ positions, velocities, lowest_point_frame, fps
411
407
  )
412
408
 
413
409
  # Find landing using position peak and impact detection
kinemotion/cmj/cli.py CHANGED
@@ -18,8 +18,12 @@ from ..core.auto_tuning import (
18
18
  analyze_video_sample,
19
19
  auto_tune_parameters,
20
20
  )
21
+ from ..core.cli_utils import (
22
+ determine_initial_confidence,
23
+ smooth_landmark_sequence,
24
+ track_all_frames,
25
+ )
21
26
  from ..core.pose import PoseTracker
22
- from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
23
27
  from ..core.video_io import VideoProcessor
24
28
  from .analysis import detect_cmj_phases
25
29
  from .debug_overlay import CMJDebugOverlayRenderer
@@ -39,6 +43,64 @@ class AnalysisParameters:
39
43
  tracking_confidence: float | None = None
40
44
 
41
45
 
46
+ def _collect_video_files(video_path: tuple[str, ...]) -> list[str]:
47
+ """Expand glob patterns and collect all video files."""
48
+ video_files: list[str] = []
49
+ for pattern in video_path:
50
+ expanded = glob.glob(pattern)
51
+ if expanded:
52
+ video_files.extend(expanded)
53
+ elif Path(pattern).exists():
54
+ video_files.append(pattern)
55
+ else:
56
+ click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
57
+ return video_files
58
+
59
+
60
+ def _generate_output_paths(
61
+ video: str, output_dir: str | None, json_output_dir: str | None
62
+ ) -> tuple[str | None, str | None]:
63
+ """Generate output paths for debug video and JSON."""
64
+ out_path = None
65
+ json_path = None
66
+ if output_dir:
67
+ out_path = str(Path(output_dir) / f"{Path(video).stem}_debug.mp4")
68
+ if json_output_dir:
69
+ json_path = str(Path(json_output_dir) / f"{Path(video).stem}.json")
70
+ return out_path, json_path
71
+
72
+
73
+ def _process_batch_videos(
74
+ video_files: list[str],
75
+ output_dir: str | None,
76
+ json_output_dir: str | None,
77
+ quality_preset: QualityPreset,
78
+ verbose: bool,
79
+ expert_params: AnalysisParameters,
80
+ workers: int,
81
+ ) -> None:
82
+ """Process multiple videos in batch mode."""
83
+ click.echo(
84
+ f"Batch mode: Processing {len(video_files)} video(s) with {workers} workers",
85
+ err=True,
86
+ )
87
+ click.echo("Note: Batch processing not yet fully implemented", err=True)
88
+ click.echo("Processing videos sequentially...", err=True)
89
+
90
+ for video in video_files:
91
+ try:
92
+ click.echo(f"\nProcessing: {video}", err=True)
93
+ out_path, json_path = _generate_output_paths(
94
+ video, output_dir, json_output_dir
95
+ )
96
+ _process_single(
97
+ video, out_path, json_path, quality_preset, verbose, expert_params
98
+ )
99
+ except Exception as e:
100
+ click.echo(f"Error processing {video}: {e}", err=True)
101
+ continue
102
+
103
+
42
104
  @click.command(name="cmj-analyze")
43
105
  @click.argument("video_path", nargs=-1, type=click.Path(exists=False), required=True)
44
106
  @click.option(
@@ -189,15 +251,7 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
189
251
  --json-output-dir results/ --csv-summary summary.csv
190
252
  """
191
253
  # Expand glob patterns and collect all video files
192
- video_files: list[str] = []
193
- for pattern in video_path:
194
- expanded = glob.glob(pattern)
195
- if expanded:
196
- video_files.extend(expanded)
197
- elif Path(pattern).exists():
198
- video_files.append(pattern)
199
- else:
200
- click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
254
+ video_files = _collect_video_files(video_path)
201
255
 
202
256
  if not video_files:
203
257
  click.echo("Error: No video files found", err=True)
@@ -220,27 +274,15 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
220
274
  )
221
275
 
222
276
  if use_batch:
223
- click.echo(
224
- f"Batch mode: Processing {len(video_files)} video(s) with {workers} workers",
225
- err=True,
277
+ _process_batch_videos(
278
+ video_files,
279
+ output_dir,
280
+ json_output_dir,
281
+ quality_preset,
282
+ verbose,
283
+ expert_params,
284
+ workers,
226
285
  )
227
- click.echo("Note: Batch processing not yet fully implemented", err=True)
228
- click.echo("Processing videos sequentially...", err=True)
229
- for video in video_files:
230
- try:
231
- click.echo(f"\nProcessing: {video}", err=True)
232
- out_path = None
233
- json_path = None
234
- if output_dir:
235
- out_path = str(Path(output_dir) / f"{Path(video).stem}_debug.mp4")
236
- if json_output_dir:
237
- json_path = str(Path(json_output_dir) / f"{Path(video).stem}.json")
238
- _process_single(
239
- video, out_path, json_path, quality_preset, verbose, expert_params
240
- )
241
- except Exception as e:
242
- click.echo(f"Error processing {video}: {e}", err=True)
243
- continue
244
286
  else:
245
287
  # Single video mode
246
288
  try:
@@ -257,53 +299,6 @@ def cmj_analyze( # NOSONAR(S107) - Click CLI requires individual parameters for
257
299
  sys.exit(1)
258
300
 
259
301
 
260
- def _determine_initial_confidence(
261
- quality_preset: QualityPreset,
262
- expert_params: AnalysisParameters,
263
- ) -> tuple[float, float]:
264
- """Determine initial detection and tracking confidence levels."""
265
- initial_detection_conf = 0.5
266
- initial_tracking_conf = 0.5
267
-
268
- if quality_preset == QualityPreset.FAST:
269
- initial_detection_conf = 0.3
270
- initial_tracking_conf = 0.3
271
- elif quality_preset == QualityPreset.ACCURATE:
272
- initial_detection_conf = 0.6
273
- initial_tracking_conf = 0.6
274
-
275
- # Override with expert values if provided
276
- if expert_params.detection_confidence is not None:
277
- initial_detection_conf = expert_params.detection_confidence
278
- if expert_params.tracking_confidence is not None:
279
- initial_tracking_conf = expert_params.tracking_confidence
280
-
281
- return initial_detection_conf, initial_tracking_conf
282
-
283
-
284
- def _track_all_frames(video: VideoProcessor, tracker: PoseTracker) -> tuple[list, list]:
285
- """Track pose landmarks in all video frames."""
286
- click.echo("Tracking pose landmarks...", err=True)
287
- landmarks_sequence = []
288
- frames = []
289
-
290
- bar: Any
291
- with click.progressbar(length=video.frame_count, label="Processing frames") as bar:
292
- while True:
293
- frame = video.read_frame()
294
- if frame is None:
295
- break
296
-
297
- frames.append(frame)
298
- landmarks = tracker.process_frame(frame)
299
- landmarks_sequence.append(landmarks)
300
-
301
- bar.update(1)
302
-
303
- tracker.close()
304
- return frames, landmarks_sequence
305
-
306
-
307
302
  def _apply_expert_param_overrides(
308
303
  params: AutoTunedParams, expert_params: AnalysisParameters
309
304
  ) -> AutoTunedParams:
@@ -348,32 +343,20 @@ def _print_auto_tuned_params(
348
343
  click.echo("=" * 60 + "\n", err=True)
349
344
 
350
345
 
351
- def _smooth_landmark_sequence(
352
- landmarks_sequence: list, params: AutoTunedParams
353
- ) -> list:
354
- """Apply smoothing to landmark sequence."""
355
- if params.outlier_rejection or params.bilateral_filter:
356
- if params.outlier_rejection:
357
- click.echo("Smoothing landmarks with outlier rejection...", err=True)
358
- if params.bilateral_filter:
359
- click.echo(
360
- "Using bilateral temporal filter for edge-preserving smoothing...",
361
- err=True,
362
- )
363
- return smooth_landmarks_advanced(
364
- landmarks_sequence,
365
- window_length=params.smoothing_window,
366
- polyorder=params.polyorder,
367
- use_outlier_rejection=params.outlier_rejection,
368
- use_bilateral=params.bilateral_filter,
369
- )
370
- else:
371
- click.echo("Smoothing landmarks...", err=True)
372
- return smooth_landmarks(
373
- landmarks_sequence,
374
- window_length=params.smoothing_window,
375
- polyorder=params.polyorder,
376
- )
346
+ def _get_foot_position(frame_landmarks: dict | None, last_position: float) -> float:
347
+ """Extract average foot position from frame landmarks."""
348
+ if not frame_landmarks:
349
+ return last_position
350
+
351
+ # Average foot position (ankles and heels)
352
+ foot_y_values = []
353
+ for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
354
+ if key in frame_landmarks:
355
+ foot_y_values.append(frame_landmarks[key][1])
356
+
357
+ if foot_y_values:
358
+ return float(np.mean(foot_y_values))
359
+ return last_position
377
360
 
378
361
 
379
362
  def _extract_positions_from_landmarks(
@@ -391,20 +374,9 @@ def _extract_positions_from_landmarks(
391
374
  position_list: list[float] = []
392
375
 
393
376
  for frame_landmarks in smoothed_landmarks:
394
- if frame_landmarks:
395
- # Average foot position (ankles and heels)
396
- foot_y_values = []
397
- for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
398
- if key in frame_landmarks:
399
- foot_y_values.append(frame_landmarks[key][1])
400
-
401
- if foot_y_values:
402
- avg_y = float(np.mean(foot_y_values))
403
- position_list.append(avg_y)
404
- else:
405
- position_list.append(position_list[-1] if position_list else 0.5)
406
- else:
407
- position_list.append(position_list[-1] if position_list else 0.5)
377
+ last_pos = position_list[-1] if position_list else 0.5
378
+ position = _get_foot_position(frame_landmarks, last_pos)
379
+ position_list.append(position)
408
380
 
409
381
  return np.array(position_list), "foot"
410
382
 
@@ -427,7 +399,7 @@ def _process_single(
427
399
  )
428
400
 
429
401
  # Determine confidence levels
430
- detection_conf, tracking_conf = _determine_initial_confidence(
402
+ detection_conf, tracking_conf = determine_initial_confidence(
431
403
  quality_preset, expert_params
432
404
  )
433
405
 
@@ -436,7 +408,7 @@ def _process_single(
436
408
  min_detection_confidence=detection_conf,
437
409
  min_tracking_confidence=tracking_conf,
438
410
  )
439
- frames, landmarks_sequence = _track_all_frames(video, tracker)
411
+ frames, landmarks_sequence = track_all_frames(video, tracker)
440
412
 
441
413
  if not landmarks_sequence:
442
414
  click.echo("Error: No frames processed", err=True)
@@ -462,7 +434,7 @@ def _process_single(
462
434
  )
463
435
 
464
436
  # Apply smoothing
465
- smoothed_landmarks = _smooth_landmark_sequence(landmarks_sequence, params)
437
+ smoothed_landmarks = smooth_landmark_sequence(landmarks_sequence, params)
466
438
 
467
439
  # Extract foot positions
468
440
  vertical_positions, tracking_method = _extract_positions_from_landmarks(
@@ -98,6 +98,76 @@ class CMJDebugOverlayRenderer:
98
98
  }
99
99
  return colors.get(phase, (128, 128, 128))
100
100
 
101
+ def _get_skeleton_segments(
102
+ self, side_prefix: str
103
+ ) -> list[tuple[str, str, tuple[int, int, int], int]]:
104
+ """Get skeleton segments for one side of the body."""
105
+ return [
106
+ (f"{side_prefix}heel", f"{side_prefix}ankle", (0, 255, 255), 3), # Foot
107
+ (
108
+ f"{side_prefix}heel",
109
+ f"{side_prefix}foot_index",
110
+ (0, 255, 255),
111
+ 2,
112
+ ), # Alt foot
113
+ (f"{side_prefix}ankle", f"{side_prefix}knee", (255, 100, 100), 4), # Shin
114
+ (f"{side_prefix}knee", f"{side_prefix}hip", (100, 255, 100), 4), # Femur
115
+ (
116
+ f"{side_prefix}hip",
117
+ f"{side_prefix}shoulder",
118
+ (100, 100, 255),
119
+ 4,
120
+ ), # Trunk
121
+ (f"{side_prefix}shoulder", "nose", (150, 150, 255), 2), # Neck
122
+ ]
123
+
124
+ def _draw_segment(
125
+ self,
126
+ frame: np.ndarray,
127
+ landmarks: dict[str, tuple[float, float, float]],
128
+ start_key: str,
129
+ end_key: str,
130
+ color: tuple[int, int, int],
131
+ thickness: int,
132
+ ) -> None:
133
+ """Draw a single skeleton segment if both endpoints are visible."""
134
+ if start_key not in landmarks or end_key not in landmarks:
135
+ return
136
+
137
+ start_vis = landmarks[start_key][2]
138
+ end_vis = landmarks[end_key][2]
139
+
140
+ # Very low threshold to show as much as possible
141
+ if start_vis > 0.2 and end_vis > 0.2:
142
+ start_x = int(landmarks[start_key][0] * self.width)
143
+ start_y = int(landmarks[start_key][1] * self.height)
144
+ end_x = int(landmarks[end_key][0] * self.width)
145
+ end_y = int(landmarks[end_key][1] * self.height)
146
+
147
+ cv2.line(frame, (start_x, start_y), (end_x, end_y), color, thickness)
148
+
149
+ def _draw_joints(
150
+ self,
151
+ frame: np.ndarray,
152
+ landmarks: dict[str, tuple[float, float, float]],
153
+ side_prefix: str,
154
+ ) -> None:
155
+ """Draw joint circles for one side of the body."""
156
+ joint_keys = [
157
+ f"{side_prefix}heel",
158
+ f"{side_prefix}foot_index",
159
+ f"{side_prefix}ankle",
160
+ f"{side_prefix}knee",
161
+ f"{side_prefix}hip",
162
+ f"{side_prefix}shoulder",
163
+ ]
164
+ for key in joint_keys:
165
+ if key in landmarks and landmarks[key][2] > 0.2:
166
+ jx = int(landmarks[key][0] * self.width)
167
+ jy = int(landmarks[key][1] * self.height)
168
+ cv2.circle(frame, (jx, jy), 6, (255, 255, 255), -1)
169
+ cv2.circle(frame, (jx, jy), 8, (0, 0, 0), 2)
170
+
101
171
  def _draw_skeleton(
102
172
  self, frame: np.ndarray, landmarks: dict[str, tuple[float, float, float]]
103
173
  ) -> None:
@@ -112,68 +182,16 @@ class CMJDebugOverlayRenderer:
112
182
  """
113
183
  # Try both sides and draw all visible segments
114
184
  for side_prefix in ["right_", "left_"]:
115
- segments = [
116
- (f"{side_prefix}heel", f"{side_prefix}ankle", (0, 255, 255), 3), # Foot
117
- (
118
- f"{side_prefix}heel",
119
- f"{side_prefix}foot_index",
120
- (0, 255, 255),
121
- 2,
122
- ), # Alt foot
123
- (
124
- f"{side_prefix}ankle",
125
- f"{side_prefix}knee",
126
- (255, 100, 100),
127
- 4,
128
- ), # Shin
129
- (
130
- f"{side_prefix}knee",
131
- f"{side_prefix}hip",
132
- (100, 255, 100),
133
- 4,
134
- ), # Femur
135
- (
136
- f"{side_prefix}hip",
137
- f"{side_prefix}shoulder",
138
- (100, 100, 255),
139
- 4,
140
- ), # Trunk
141
- # Additional segments for better visualization
142
- (f"{side_prefix}shoulder", "nose", (150, 150, 255), 2), # Neck
143
- ]
185
+ segments = self._get_skeleton_segments(side_prefix)
144
186
 
145
187
  # Draw ALL visible segments (not just one side)
146
188
  for start_key, end_key, color, thickness in segments:
147
- if start_key in landmarks and end_key in landmarks:
148
- start_vis = landmarks[start_key][2]
149
- end_vis = landmarks[end_key][2]
150
-
151
- # Very low threshold to show as much as possible
152
- if start_vis > 0.2 and end_vis > 0.2:
153
- start_x = int(landmarks[start_key][0] * self.width)
154
- start_y = int(landmarks[start_key][1] * self.height)
155
- end_x = int(landmarks[end_key][0] * self.width)
156
- end_y = int(landmarks[end_key][1] * self.height)
157
-
158
- cv2.line(
159
- frame, (start_x, start_y), (end_x, end_y), color, thickness
160
- )
189
+ self._draw_segment(
190
+ frame, landmarks, start_key, end_key, color, thickness
191
+ )
161
192
 
162
193
  # Draw joints as circles for this side
163
- joint_keys = [
164
- f"{side_prefix}heel",
165
- f"{side_prefix}foot_index",
166
- f"{side_prefix}ankle",
167
- f"{side_prefix}knee",
168
- f"{side_prefix}hip",
169
- f"{side_prefix}shoulder",
170
- ]
171
- for key in joint_keys:
172
- if key in landmarks and landmarks[key][2] > 0.2:
173
- jx = int(landmarks[key][0] * self.width)
174
- jy = int(landmarks[key][1] * self.height)
175
- cv2.circle(frame, (jx, jy), 6, (255, 255, 255), -1)
176
- cv2.circle(frame, (jx, jy), 8, (0, 0, 0), 2)
194
+ self._draw_joints(frame, landmarks, side_prefix)
177
195
 
178
196
  # Always draw nose (head position) if visible
179
197
  if "nose" in landmarks and landmarks["nose"][2] > 0.2:
@@ -316,6 +334,126 @@ class CMJDebugOverlayRenderer:
316
334
  # Draw arc (simplified as a circle for now)
317
335
  cv2.circle(frame, (jx, jy), radius, arc_color, 2)
318
336
 
337
+ def _draw_foot_landmarks(
338
+ self,
339
+ frame: np.ndarray,
340
+ landmarks: dict[str, tuple[float, float, float]],
341
+ phase_color: tuple[int, int, int],
342
+ ) -> None:
343
+ """Draw foot landmarks and average position."""
344
+ foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
345
+ foot_positions = []
346
+
347
+ for key in foot_keys:
348
+ if key in landmarks:
349
+ x, y, vis = landmarks[key]
350
+ if vis > 0.5:
351
+ lx = int(x * self.width)
352
+ ly = int(y * self.height)
353
+ foot_positions.append((lx, ly))
354
+ cv2.circle(frame, (lx, ly), 5, (255, 255, 0), -1)
355
+
356
+ # Draw average foot position with phase color
357
+ if foot_positions:
358
+ avg_x = int(np.mean([p[0] for p in foot_positions]))
359
+ avg_y = int(np.mean([p[1] for p in foot_positions]))
360
+ cv2.circle(frame, (avg_x, avg_y), 12, phase_color, -1)
361
+ cv2.circle(frame, (avg_x, avg_y), 14, (255, 255, 255), 2)
362
+
363
+ def _draw_phase_banner(
364
+ self, frame: np.ndarray, phase: str | None, phase_color: tuple[int, int, int]
365
+ ) -> None:
366
+ """Draw phase indicator banner."""
367
+ if not phase:
368
+ return
369
+
370
+ phase_text = f"Phase: {phase.upper()}"
371
+ text_size = cv2.getTextSize(phase_text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0]
372
+ cv2.rectangle(frame, (5, 5), (text_size[0] + 15, 45), phase_color, -1)
373
+ cv2.putText(
374
+ frame, phase_text, (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2
375
+ )
376
+
377
+ def _draw_key_frame_markers(
378
+ self, frame: np.ndarray, frame_idx: int, metrics: CMJMetrics
379
+ ) -> None:
380
+ """Draw markers for key frames (standing start, lowest, takeoff, landing)."""
381
+ y_offset = 120
382
+ markers = []
383
+
384
+ if metrics.standing_start_frame and frame_idx == int(
385
+ metrics.standing_start_frame
386
+ ):
387
+ markers.append("COUNTERMOVEMENT START")
388
+
389
+ if frame_idx == int(metrics.lowest_point_frame):
390
+ markers.append("LOWEST POINT")
391
+
392
+ if frame_idx == int(metrics.takeoff_frame):
393
+ markers.append("TAKEOFF")
394
+
395
+ if frame_idx == int(metrics.landing_frame):
396
+ markers.append("LANDING")
397
+
398
+ for marker in markers:
399
+ cv2.putText(
400
+ frame,
401
+ marker,
402
+ (10, y_offset),
403
+ cv2.FONT_HERSHEY_SIMPLEX,
404
+ 0.7,
405
+ (255, 255, 0),
406
+ 2,
407
+ )
408
+ y_offset += 35
409
+
410
+ def _draw_metrics_summary(
411
+ self, frame: np.ndarray, frame_idx: int, metrics: CMJMetrics
412
+ ) -> None:
413
+ """Draw metrics summary in bottom right (last 30 frames)."""
414
+ total_frames = int(metrics.landing_frame) + 30
415
+ if frame_idx < total_frames - 30:
416
+ return
417
+
418
+ metrics_text = [
419
+ f"Jump Height: {metrics.jump_height:.3f}m",
420
+ f"Flight Time: {metrics.flight_time*1000:.0f}ms",
421
+ f"CM Depth: {metrics.countermovement_depth:.3f}m",
422
+ f"Ecc Duration: {metrics.eccentric_duration*1000:.0f}ms",
423
+ f"Con Duration: {metrics.concentric_duration*1000:.0f}ms",
424
+ ]
425
+
426
+ # Draw background
427
+ box_height = len(metrics_text) * 30 + 20
428
+ cv2.rectangle(
429
+ frame,
430
+ (self.width - 320, self.height - box_height - 10),
431
+ (self.width - 10, self.height - 10),
432
+ (0, 0, 0),
433
+ -1,
434
+ )
435
+ cv2.rectangle(
436
+ frame,
437
+ (self.width - 320, self.height - box_height - 10),
438
+ (self.width - 10, self.height - 10),
439
+ (0, 255, 0),
440
+ 2,
441
+ )
442
+
443
+ # Draw metrics text
444
+ text_y = self.height - box_height + 10
445
+ for text in metrics_text:
446
+ cv2.putText(
447
+ frame,
448
+ text,
449
+ (self.width - 310, text_y),
450
+ cv2.FONT_HERSHEY_SIMPLEX,
451
+ 0.6,
452
+ (255, 255, 255),
453
+ 1,
454
+ )
455
+ text_y += 30
456
+
319
457
  def render_frame(
320
458
  self,
321
459
  frame: np.ndarray,
@@ -346,47 +484,12 @@ class CMJDebugOverlayRenderer:
346
484
 
347
485
  # Draw skeleton and triple extension if landmarks available
348
486
  if landmarks:
349
- # Draw skeleton segments for triple extension
350
487
  self._draw_skeleton(annotated, landmarks)
351
-
352
- # Draw joint angles
353
488
  self._draw_joint_angles(annotated, landmarks, phase_color)
354
-
355
- # Draw foot landmarks
356
- foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
357
- foot_positions = []
358
-
359
- for key in foot_keys:
360
- if key in landmarks:
361
- x, y, vis = landmarks[key]
362
- if vis > 0.5:
363
- lx = int(x * self.width)
364
- ly = int(y * self.height)
365
- foot_positions.append((lx, ly))
366
- cv2.circle(annotated, (lx, ly), 5, (255, 255, 0), -1)
367
-
368
- # Draw average foot position with phase color
369
- if foot_positions:
370
- avg_x = int(np.mean([p[0] for p in foot_positions]))
371
- avg_y = int(np.mean([p[1] for p in foot_positions]))
372
- cv2.circle(annotated, (avg_x, avg_y), 12, phase_color, -1)
373
- cv2.circle(annotated, (avg_x, avg_y), 14, (255, 255, 255), 2)
489
+ self._draw_foot_landmarks(annotated, landmarks, phase_color)
374
490
 
375
491
  # Draw phase indicator banner
376
- if phase:
377
- # Phase name with background
378
- phase_text = f"Phase: {phase.upper()}"
379
- text_size = cv2.getTextSize(phase_text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0]
380
- cv2.rectangle(annotated, (5, 5), (text_size[0] + 15, 45), phase_color, -1)
381
- cv2.putText(
382
- annotated,
383
- phase_text,
384
- (10, 35),
385
- cv2.FONT_HERSHEY_SIMPLEX,
386
- 1,
387
- (0, 0, 0),
388
- 2,
389
- )
492
+ self._draw_phase_banner(annotated, phase, phase_color)
390
493
 
391
494
  # Draw frame number
392
495
  cv2.putText(
@@ -399,78 +502,10 @@ class CMJDebugOverlayRenderer:
399
502
  2,
400
503
  )
401
504
 
402
- # Draw key frame markers
505
+ # Draw key frame markers and metrics summary
403
506
  if metrics:
404
- y_offset = 120
405
- markers = []
406
-
407
- if metrics.standing_start_frame and frame_idx == int(
408
- metrics.standing_start_frame
409
- ):
410
- markers.append("COUNTERMOVEMENT START")
411
-
412
- if frame_idx == int(metrics.lowest_point_frame):
413
- markers.append("LOWEST POINT")
414
-
415
- if frame_idx == int(metrics.takeoff_frame):
416
- markers.append("TAKEOFF")
417
-
418
- if frame_idx == int(metrics.landing_frame):
419
- markers.append("LANDING")
420
-
421
- for marker in markers:
422
- cv2.putText(
423
- annotated,
424
- marker,
425
- (10, y_offset),
426
- cv2.FONT_HERSHEY_SIMPLEX,
427
- 0.7,
428
- (255, 255, 0),
429
- 2,
430
- )
431
- y_offset += 35
432
-
433
- # Draw metrics summary in bottom right (last 30 frames)
434
- total_frames = int(metrics.landing_frame) + 30
435
- if frame_idx >= total_frames - 30:
436
- metrics_text = [
437
- f"Jump Height: {metrics.jump_height:.3f}m",
438
- f"Flight Time: {metrics.flight_time*1000:.0f}ms",
439
- f"CM Depth: {metrics.countermovement_depth:.3f}m",
440
- f"Ecc Duration: {metrics.eccentric_duration*1000:.0f}ms",
441
- f"Con Duration: {metrics.concentric_duration*1000:.0f}ms",
442
- ]
443
-
444
- # Draw background
445
- box_height = len(metrics_text) * 30 + 20
446
- cv2.rectangle(
447
- annotated,
448
- (self.width - 320, self.height - box_height - 10),
449
- (self.width - 10, self.height - 10),
450
- (0, 0, 0),
451
- -1,
452
- )
453
- cv2.rectangle(
454
- annotated,
455
- (self.width - 320, self.height - box_height - 10),
456
- (self.width - 10, self.height - 10),
457
- (0, 255, 0),
458
- 2,
459
- )
460
-
461
- # Draw metrics text
462
- text_y = self.height - box_height + 10
463
- for text in metrics_text:
464
- cv2.putText(
465
- annotated,
466
- text,
467
- (self.width - 310, text_y),
468
- cv2.FONT_HERSHEY_SIMPLEX,
469
- 0.6,
470
- (255, 255, 255),
471
- 1,
472
- )
473
- text_y += 30
507
+ self._draw_key_frame_markers(annotated, frame_idx, metrics)
508
+ self._draw_metrics_summary(annotated, frame_idx, metrics)
474
509
 
475
510
  return annotated
476
511
 
@@ -0,0 +1,113 @@
1
+ """Shared CLI utilities for drop jump and CMJ analysis."""
2
+
3
+ from typing import Any, Protocol
4
+
5
+ import click
6
+
7
+ from .auto_tuning import AutoTunedParams, QualityPreset
8
+ from .pose import PoseTracker
9
+ from .smoothing import smooth_landmarks, smooth_landmarks_advanced
10
+ from .video_io import VideoProcessor
11
+
12
+
13
+ class ExpertParameters(Protocol):
14
+ """Protocol for expert parameter overrides."""
15
+
16
+ detection_confidence: float | None
17
+ tracking_confidence: float | None
18
+
19
+
20
+ def determine_initial_confidence(
21
+ quality_preset: QualityPreset,
22
+ expert_params: ExpertParameters,
23
+ ) -> tuple[float, float]:
24
+ """Determine initial detection and tracking confidence levels.
25
+
26
+ Args:
27
+ quality_preset: Quality preset enum
28
+ expert_params: Expert parameter overrides
29
+
30
+ Returns:
31
+ Tuple of (detection_confidence, tracking_confidence)
32
+ """
33
+ initial_detection_conf = 0.5
34
+ initial_tracking_conf = 0.5
35
+
36
+ if quality_preset == QualityPreset.FAST:
37
+ initial_detection_conf = 0.3
38
+ initial_tracking_conf = 0.3
39
+ elif quality_preset == QualityPreset.ACCURATE:
40
+ initial_detection_conf = 0.6
41
+ initial_tracking_conf = 0.6
42
+
43
+ # Override with expert values if provided
44
+ if expert_params.detection_confidence is not None:
45
+ initial_detection_conf = expert_params.detection_confidence
46
+ if expert_params.tracking_confidence is not None:
47
+ initial_tracking_conf = expert_params.tracking_confidence
48
+
49
+ return initial_detection_conf, initial_tracking_conf
50
+
51
+
52
+ def track_all_frames(video: VideoProcessor, tracker: PoseTracker) -> tuple[list, list]:
53
+ """Track pose landmarks in all video frames.
54
+
55
+ Args:
56
+ video: Video processor
57
+ tracker: Pose tracker
58
+
59
+ Returns:
60
+ Tuple of (frames, landmarks_sequence)
61
+ """
62
+ click.echo("Tracking pose landmarks...", err=True)
63
+ landmarks_sequence = []
64
+ frames = []
65
+
66
+ bar: Any
67
+ with click.progressbar(length=video.frame_count, label="Processing frames") as bar:
68
+ while True:
69
+ frame = video.read_frame()
70
+ if frame is None:
71
+ break
72
+
73
+ frames.append(frame)
74
+ landmarks = tracker.process_frame(frame)
75
+ landmarks_sequence.append(landmarks)
76
+ bar.update(1)
77
+
78
+ tracker.close()
79
+ return frames, landmarks_sequence
80
+
81
+
82
+ def smooth_landmark_sequence(landmarks_sequence: list, params: AutoTunedParams) -> list:
83
+ """Apply smoothing to landmark sequence.
84
+
85
+ Args:
86
+ landmarks_sequence: Raw landmark sequence
87
+ params: Auto-tuned parameters
88
+
89
+ Returns:
90
+ Smoothed landmark sequence
91
+ """
92
+ if params.outlier_rejection or params.bilateral_filter:
93
+ if params.outlier_rejection:
94
+ click.echo("Smoothing landmarks with outlier rejection...", err=True)
95
+ if params.bilateral_filter:
96
+ click.echo(
97
+ "Using bilateral temporal filter for edge-preserving smoothing...",
98
+ err=True,
99
+ )
100
+ return smooth_landmarks_advanced(
101
+ landmarks_sequence,
102
+ window_length=params.smoothing_window,
103
+ polyorder=params.polyorder,
104
+ use_outlier_rejection=params.outlier_rejection,
105
+ use_bilateral=params.bilateral_filter,
106
+ )
107
+ else:
108
+ click.echo("Smoothing landmarks...", err=True)
109
+ return smooth_landmarks(
110
+ landmarks_sequence,
111
+ window_length=params.smoothing_window,
112
+ polyorder=params.polyorder,
113
+ )
@@ -21,8 +21,12 @@ from ..core.auto_tuning import (
21
21
  analyze_video_sample,
22
22
  auto_tune_parameters,
23
23
  )
24
+ from ..core.cli_utils import (
25
+ determine_initial_confidence,
26
+ smooth_landmark_sequence,
27
+ track_all_frames,
28
+ )
24
29
  from ..core.pose import PoseTracker
25
- from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
26
30
  from ..core.video_io import VideoProcessor
27
31
  from .analysis import (
28
32
  ContactState,
@@ -256,69 +260,6 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual parameter
256
260
  )
257
261
 
258
262
 
259
- def _determine_initial_confidence(
260
- quality_preset: QualityPreset,
261
- expert_params: AnalysisParameters,
262
- ) -> tuple[float, float]:
263
- """Determine initial detection and tracking confidence levels.
264
-
265
- Args:
266
- quality_preset: Quality preset enum
267
- expert_params: Expert parameter overrides
268
-
269
- Returns:
270
- Tuple of (detection_confidence, tracking_confidence)
271
- """
272
- initial_detection_conf = 0.5
273
- initial_tracking_conf = 0.5
274
-
275
- if quality_preset == QualityPreset.FAST:
276
- initial_detection_conf = 0.3
277
- initial_tracking_conf = 0.3
278
- elif quality_preset == QualityPreset.ACCURATE:
279
- initial_detection_conf = 0.6
280
- initial_tracking_conf = 0.6
281
-
282
- # Override with expert values if provided
283
- if expert_params.detection_confidence is not None:
284
- initial_detection_conf = expert_params.detection_confidence
285
- if expert_params.tracking_confidence is not None:
286
- initial_tracking_conf = expert_params.tracking_confidence
287
-
288
- return initial_detection_conf, initial_tracking_conf
289
-
290
-
291
- def _track_all_frames(video: VideoProcessor, tracker: PoseTracker) -> tuple[list, list]:
292
- """Track pose landmarks in all video frames.
293
-
294
- Args:
295
- video: Video processor
296
- tracker: Pose tracker
297
-
298
- Returns:
299
- Tuple of (frames, landmarks_sequence)
300
- """
301
- click.echo("Tracking pose landmarks...", err=True)
302
- landmarks_sequence = []
303
- frames = []
304
-
305
- bar: Any
306
- with click.progressbar(length=video.frame_count, label="Processing frames") as bar:
307
- while True:
308
- frame = video.read_frame()
309
- if frame is None:
310
- break
311
-
312
- frames.append(frame)
313
- landmarks = tracker.process_frame(frame)
314
- landmarks_sequence.append(landmarks)
315
-
316
- bar.update(1)
317
-
318
- tracker.close()
319
- return frames, landmarks_sequence
320
-
321
-
322
263
  def _apply_expert_param_overrides(
323
264
  params: AutoTunedParams, expert_params: AnalysisParameters
324
265
  ) -> AutoTunedParams:
@@ -380,42 +321,6 @@ def _print_auto_tuned_params(
380
321
  click.echo("=" * 60 + "\n", err=True)
381
322
 
382
323
 
383
- def _smooth_landmark_sequence(
384
- landmarks_sequence: list, params: AutoTunedParams
385
- ) -> list:
386
- """Apply smoothing to landmark sequence.
387
-
388
- Args:
389
- landmarks_sequence: Raw landmark sequence
390
- params: Auto-tuned parameters
391
-
392
- Returns:
393
- Smoothed landmarks
394
- """
395
- if params.outlier_rejection or params.bilateral_filter:
396
- if params.outlier_rejection:
397
- click.echo("Smoothing landmarks with outlier rejection...", err=True)
398
- if params.bilateral_filter:
399
- click.echo(
400
- "Using bilateral temporal filter for edge-preserving smoothing...",
401
- err=True,
402
- )
403
- return smooth_landmarks_advanced(
404
- landmarks_sequence,
405
- window_length=params.smoothing_window,
406
- polyorder=params.polyorder,
407
- use_outlier_rejection=params.outlier_rejection,
408
- use_bilateral=params.bilateral_filter,
409
- )
410
- else:
411
- click.echo("Smoothing landmarks...", err=True)
412
- return smooth_landmarks(
413
- landmarks_sequence,
414
- window_length=params.smoothing_window,
415
- polyorder=params.polyorder,
416
- )
417
-
418
-
419
324
  def _extract_positions_and_visibilities(
420
325
  smoothed_landmarks: list,
421
326
  ) -> tuple[np.ndarray, np.ndarray]:
@@ -533,7 +438,7 @@ def _process_single(
533
438
  )
534
439
 
535
440
  # Determine confidence levels
536
- detection_conf, tracking_conf = _determine_initial_confidence(
441
+ detection_conf, tracking_conf = determine_initial_confidence(
537
442
  quality_preset, expert_params
538
443
  )
539
444
 
@@ -542,7 +447,7 @@ def _process_single(
542
447
  min_detection_confidence=detection_conf,
543
448
  min_tracking_confidence=tracking_conf,
544
449
  )
545
- frames, landmarks_sequence = _track_all_frames(video, tracker)
450
+ frames, landmarks_sequence = track_all_frames(video, tracker)
546
451
 
547
452
  if not landmarks_sequence:
548
453
  click.echo("Error: No frames processed", err=True)
@@ -560,7 +465,7 @@ def _process_single(
560
465
  _print_auto_tuned_params(video, characteristics, quality_preset, params)
561
466
 
562
467
  # Apply smoothing
563
- smoothed_landmarks = _smooth_landmark_sequence(landmarks_sequence, params)
468
+ smoothed_landmarks = smooth_landmark_sequence(landmarks_sequence, params)
564
469
 
565
470
  # Extract positions
566
471
  vertical_positions, visibilities = _extract_positions_and_visibilities(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.11.1
3
+ Version: 0.11.3
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,26 +1,27 @@
1
1
  kinemotion/__init__.py,sha256=REBC9wrwYC_grvCS00qEOyign65Zc1sc-5buLpyqQxA,654
2
- kinemotion/api.py,sha256=mbI57PFvRK7iNU3p4PNdweuHCCU0HP9nLeUk9fx-b2g,31390
2
+ kinemotion/api.py,sha256=10Rj_dqAa6KCcmCjlGEtQFZLoNyqeMzFLo7vGZcttWY,31586
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
- kinemotion/cmj/analysis.py,sha256=ciH5f5SC1IJjG0j8nkyZRu7QYR-Al6bWCBIfOW4Q1xQ,18466
6
- kinemotion/cmj/cli.py,sha256=HUXat4xecCZ7JOiyo4zfcOk5xJLFpUn5ZaTDVAJL6qo,20565
7
- kinemotion/cmj/debug_overlay.py,sha256=TVDrZ16TJClftM_zhkrCzBLMs87SfYDa8H-eqfzQJ4c,17976
5
+ kinemotion/cmj/analysis.py,sha256=4HYGn4VDIB6oExAees-VcPfpNgWOltpgwjyNTU7YAb4,18263
6
+ kinemotion/cmj/cli.py,sha256=lVVlh8teFHXbDzaFdDfq3xSwRf4kVwyYyd6FU8ta_Ec,19044
7
+ kinemotion/cmj/debug_overlay.py,sha256=ELrSYQ9LmLV81bJS5w9i2c4VwRS0EYAUnMehMHU7VGc,18724
8
8
  kinemotion/cmj/joint_angles.py,sha256=8ucpDGPvbt4iX3tx9eVxJEUv0laTm2Y58_--VzJCogE,9113
9
9
  kinemotion/cmj/kinematics.py,sha256=Xl_PlC2OqMoA-zOc3SRB_GqI0AgLlJol5FTPe5J_qLc,7573
10
10
  kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
11
11
  kinemotion/core/auto_tuning.py,sha256=cvmxUI-CbahpOJQtR2r5jOx4Q6yKPe3DO1o15hOQIdw,10508
12
+ kinemotion/core/cli_utils.py,sha256=hNf2-_LIbi-ntXAkovjqcuWifYLazikqJBzeTN9YmZc,3492
12
13
  kinemotion/core/filtering.py,sha256=f-m-aA59e4WqE6u-9MA51wssu7rI-Y_7n1cG8IWdeRQ,11241
13
14
  kinemotion/core/pose.py,sha256=Wfd1RR-2ZznYpWeQUbySwcV3mvReqn8n3XO6S7pGq4M,8390
14
15
  kinemotion/core/smoothing.py,sha256=FON4qKtsSp1-03GnJrDkEUAePaACn4QPMJF0eTIYqR0,12925
15
16
  kinemotion/core/video_io.py,sha256=z8Z0qbNaKbcdB40KnbNOBMzab3BbgnhBxp-mUBYeXgM,6577
16
17
  kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
17
18
  kinemotion/dropjump/analysis.py,sha256=HfJt2t9IsMBiBUz7apIzdxbRH9QqzlFnDVVWcKhU3ow,23291
18
- kinemotion/dropjump/cli.py,sha256=zo23qoYSpC_2BcScy-JOilcGcWGM0j3Xv0lpO0_n0wk,27975
19
+ kinemotion/dropjump/cli.py,sha256=eXO-9H9z0g-EJUD1uIT37KIMgfyje4fPAO2FgZiEZzk,24985
19
20
  kinemotion/dropjump/debug_overlay.py,sha256=GMo-jCl5OPIv82uPxDbBVI7CsAMwATTvxZMeWfs8k8M,8701
20
21
  kinemotion/dropjump/kinematics.py,sha256=RM_O8Kdc6aEiPIu_99N4cu-4EhYSQxtBGASJF_dmQaU,19081
21
22
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- kinemotion-0.11.1.dist-info/METADATA,sha256=XRgrT5bV--WLFpW-pDbZAQpo0N5RekTUvtFMntKcQoA,18990
23
- kinemotion-0.11.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
24
- kinemotion-0.11.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
25
- kinemotion-0.11.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
26
- kinemotion-0.11.1.dist-info/RECORD,,
23
+ kinemotion-0.11.3.dist-info/METADATA,sha256=Yu4EP1FzusXr0Y0KRNIfywzuw0nNYeACUojxFhzrDuE,18990
24
+ kinemotion-0.11.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
25
+ kinemotion-0.11.3.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
26
+ kinemotion-0.11.3.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
27
+ kinemotion-0.11.3.dist-info/RECORD,,