kinemotion 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/cli.py CHANGED
@@ -1,22 +1,8 @@
1
1
  """Command-line interface for kinemotion analysis."""
2
2
 
3
- import json
4
- import sys
5
- from pathlib import Path
6
-
7
3
  import click
8
- import numpy as np
9
4
 
10
- from .core.pose import PoseTracker, compute_center_of_mass
11
- from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
12
- from .core.video_io import VideoProcessor
13
- from .dropjump.analysis import (
14
- calculate_adaptive_threshold,
15
- compute_average_foot_position,
16
- detect_ground_contact,
17
- )
18
- from .dropjump.debug_overlay import DebugOverlayRenderer
19
- from .dropjump.kinematics import calculate_drop_jump_metrics
5
+ from .dropjump.cli import dropjump_analyze
20
6
 
21
7
 
22
8
  @click.group()
@@ -26,374 +12,8 @@ def cli() -> None:
26
12
  pass
27
13
 
28
14
 
29
- @cli.command(name="dropjump-analyze")
30
- @click.argument("video_path", type=click.Path(exists=True))
31
- @click.option(
32
- "--output",
33
- "-o",
34
- type=click.Path(),
35
- help="Path for debug video output (optional)",
36
- )
37
- @click.option(
38
- "--json-output",
39
- "-j",
40
- type=click.Path(),
41
- help="Path for JSON metrics output (default: stdout)",
42
- )
43
- @click.option(
44
- "--smoothing-window",
45
- type=int,
46
- default=5,
47
- help="Smoothing window size (must be odd, >= 3)",
48
- show_default=True,
49
- )
50
- @click.option(
51
- "--polyorder",
52
- type=int,
53
- default=2,
54
- help=(
55
- "Polynomial order for Savitzky-Golay smoothing "
56
- "(2=quadratic, 3=cubic, must be < smoothing-window)"
57
- ),
58
- show_default=True,
59
- )
60
- @click.option(
61
- "--outlier-rejection/--no-outlier-rejection",
62
- default=True,
63
- help=(
64
- "Apply RANSAC and median-based outlier rejection to remove tracking glitches "
65
- "(default: enabled, +1-2%% accuracy)"
66
- ),
67
- )
68
- @click.option(
69
- "--bilateral-filter/--no-bilateral-filter",
70
- default=False,
71
- help=(
72
- "Use bilateral temporal filter for edge-preserving smoothing "
73
- "(default: disabled, experimental)"
74
- ),
75
- )
76
- @click.option(
77
- "--velocity-threshold",
78
- type=float,
79
- default=0.02,
80
- help="Velocity threshold for contact detection (normalized units)",
81
- show_default=True,
82
- )
83
- @click.option(
84
- "--min-contact-frames",
85
- type=int,
86
- default=3,
87
- help="Minimum frames for valid ground contact",
88
- show_default=True,
89
- )
90
- @click.option(
91
- "--visibility-threshold",
92
- type=float,
93
- default=0.5,
94
- help="Minimum landmark visibility score (0-1)",
95
- show_default=True,
96
- )
97
- @click.option(
98
- "--detection-confidence",
99
- type=float,
100
- default=0.5,
101
- help="Pose detection confidence threshold (0-1)",
102
- show_default=True,
103
- )
104
- @click.option(
105
- "--tracking-confidence",
106
- type=float,
107
- default=0.5,
108
- help="Pose tracking confidence threshold (0-1)",
109
- show_default=True,
110
- )
111
- @click.option(
112
- "--drop-height",
113
- type=float,
114
- default=None,
115
- help="Height of drop box/platform in meters (e.g., 0.40 for 40cm) - used for calibration",
116
- )
117
- @click.option(
118
- "--use-curvature/--no-curvature",
119
- default=True,
120
- help="Use trajectory curvature analysis for refining transitions (default: enabled)",
121
- )
122
- @click.option(
123
- "--use-com/--use-feet",
124
- default=False,
125
- help="Track center of mass instead of feet for improved accuracy (default: feet)",
126
- )
127
- @click.option(
128
- "--adaptive-threshold/--fixed-threshold",
129
- default=False,
130
- help="Auto-calibrate velocity threshold from video baseline (default: fixed)",
131
- )
132
- def dropjump_analyze(
133
- video_path: str,
134
- output: str | None,
135
- json_output: str | None,
136
- smoothing_window: int,
137
- polyorder: int,
138
- outlier_rejection: bool,
139
- bilateral_filter: bool,
140
- velocity_threshold: float,
141
- min_contact_frames: int,
142
- visibility_threshold: float,
143
- detection_confidence: float,
144
- tracking_confidence: float,
145
- drop_height: float | None,
146
- use_curvature: bool,
147
- use_com: bool,
148
- adaptive_threshold: bool,
149
- ) -> None:
150
- """
151
- Analyze drop-jump video to estimate ground contact time, flight time, and jump height.
152
-
153
- VIDEO_PATH: Path to the input video file
154
- """
155
- click.echo(f"Analyzing video: {video_path}", err=True)
156
-
157
- # Validate parameters
158
- if smoothing_window < 3:
159
- click.echo("Error: smoothing-window must be >= 3", err=True)
160
- sys.exit(1)
161
-
162
- if smoothing_window % 2 == 0:
163
- smoothing_window += 1
164
- click.echo(
165
- f"Adjusting smoothing-window to {smoothing_window} (must be odd)", err=True
166
- )
167
-
168
- if polyorder < 1:
169
- click.echo("Error: polyorder must be >= 1", err=True)
170
- sys.exit(1)
171
-
172
- if polyorder >= smoothing_window:
173
- click.echo(
174
- f"Error: polyorder ({polyorder}) must be < smoothing-window ({smoothing_window})",
175
- err=True,
176
- )
177
- sys.exit(1)
178
-
179
- try:
180
- # Initialize video processor
181
- with VideoProcessor(video_path) as video:
182
- click.echo(
183
- f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
184
- f"{video.frame_count} frames",
185
- err=True,
186
- )
187
-
188
- # Initialize pose tracker
189
- tracker = PoseTracker(
190
- min_detection_confidence=detection_confidence,
191
- min_tracking_confidence=tracking_confidence,
192
- )
193
-
194
- # Process all frames
195
- click.echo("Tracking pose landmarks...", err=True)
196
- landmarks_sequence = []
197
- frames = []
198
-
199
- frame_idx = 0
200
- with click.progressbar(
201
- length=video.frame_count, label="Processing frames"
202
- ) as bar:
203
- while True:
204
- frame = video.read_frame()
205
- if frame is None:
206
- break
207
-
208
- frames.append(frame)
209
- landmarks = tracker.process_frame(frame)
210
- landmarks_sequence.append(landmarks)
211
-
212
- frame_idx += 1
213
- bar.update(1)
214
-
215
- tracker.close()
216
-
217
- if not landmarks_sequence:
218
- click.echo("Error: No frames processed", err=True)
219
- sys.exit(1)
220
-
221
- # Smooth landmarks
222
- if outlier_rejection or bilateral_filter:
223
- if outlier_rejection:
224
- click.echo(
225
- "Smoothing landmarks with outlier rejection...", err=True
226
- )
227
- if bilateral_filter:
228
- click.echo(
229
- "Using bilateral temporal filter for edge-preserving smoothing...",
230
- err=True,
231
- )
232
- smoothed_landmarks = smooth_landmarks_advanced(
233
- landmarks_sequence,
234
- window_length=smoothing_window,
235
- polyorder=polyorder,
236
- use_outlier_rejection=outlier_rejection,
237
- use_bilateral=bilateral_filter,
238
- )
239
- else:
240
- click.echo("Smoothing landmarks...", err=True)
241
- smoothed_landmarks = smooth_landmarks(
242
- landmarks_sequence, window_length=smoothing_window, polyorder=polyorder
243
- )
244
-
245
- # Extract vertical positions (either CoM or feet)
246
- if use_com:
247
- click.echo("Computing center of mass positions...", err=True)
248
- else:
249
- click.echo("Extracting foot positions...", err=True)
250
-
251
- position_list: list[float] = []
252
- visibilities_list: list[float] = []
253
-
254
- for frame_landmarks in smoothed_landmarks:
255
- if frame_landmarks:
256
- if use_com:
257
- # Use center of mass estimation
258
- com_x, com_y, com_vis = compute_center_of_mass(
259
- frame_landmarks, visibility_threshold=visibility_threshold
260
- )
261
- position_list.append(com_y)
262
- visibilities_list.append(com_vis)
263
- else:
264
- # Use average foot position (original method)
265
- foot_x, foot_y = compute_average_foot_position(frame_landmarks)
266
- position_list.append(foot_y)
267
-
268
- # Average visibility of foot landmarks
269
- foot_vis = []
270
- for key in [
271
- "left_ankle",
272
- "right_ankle",
273
- "left_heel",
274
- "right_heel",
275
- ]:
276
- if key in frame_landmarks:
277
- foot_vis.append(frame_landmarks[key][2])
278
- visibilities_list.append(
279
- float(np.mean(foot_vis)) if foot_vis else 0.0
280
- )
281
- else:
282
- # Use previous position if available, otherwise default
283
- position_list.append(
284
- position_list[-1] if position_list else 0.5
285
- )
286
- visibilities_list.append(0.0)
287
-
288
- vertical_positions: np.ndarray = np.array(position_list)
289
- visibilities: np.ndarray = np.array(visibilities_list)
290
-
291
- # Calculate adaptive threshold if enabled
292
- if adaptive_threshold:
293
- click.echo("Calculating adaptive velocity threshold...", err=True)
294
- velocity_threshold = calculate_adaptive_threshold(
295
- vertical_positions,
296
- video.fps,
297
- baseline_duration=3.0,
298
- multiplier=1.5,
299
- smoothing_window=smoothing_window,
300
- polyorder=polyorder,
301
- )
302
- click.echo(
303
- f"Adaptive threshold: {velocity_threshold:.4f} "
304
- f"(auto-calibrated from baseline)",
305
- err=True,
306
- )
307
-
308
- # Detect ground contact
309
- contact_states = detect_ground_contact(
310
- vertical_positions,
311
- velocity_threshold=velocity_threshold,
312
- min_contact_frames=min_contact_frames,
313
- visibility_threshold=visibility_threshold,
314
- visibilities=visibilities,
315
- )
316
-
317
- # Calculate metrics
318
- click.echo("Calculating metrics...", err=True)
319
- if use_com:
320
- click.echo("Using center of mass tracking for improved accuracy", err=True)
321
- if drop_height:
322
- click.echo(
323
- f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)",
324
- err=True,
325
- )
326
- metrics = calculate_drop_jump_metrics(
327
- contact_states,
328
- vertical_positions,
329
- video.fps,
330
- drop_height_m=drop_height,
331
- velocity_threshold=velocity_threshold,
332
- smoothing_window=smoothing_window,
333
- polyorder=polyorder,
334
- use_curvature=use_curvature,
335
- )
336
-
337
- # Output metrics as JSON
338
- metrics_dict = metrics.to_dict()
339
- metrics_json = json.dumps(metrics_dict, indent=2)
340
-
341
- if json_output:
342
- output_path = Path(json_output)
343
- output_path.write_text(metrics_json)
344
- click.echo(f"Metrics written to: {json_output}", err=True)
345
- else:
346
- click.echo(metrics_json)
347
-
348
- # Generate debug video if requested
349
- if output:
350
- click.echo(f"Generating debug video: {output}", err=True)
351
- if video.display_width != video.width or video.display_height != video.height:
352
- click.echo(
353
- f"Source video encoded: {video.width}x{video.height}",
354
- err=True,
355
- )
356
- click.echo(
357
- f"Output dimensions: {video.display_width}x{video.display_height} "
358
- f"(respecting display aspect ratio)",
359
- err=True,
360
- )
361
- else:
362
- click.echo(
363
- f"Output dimensions: {video.width}x{video.height} "
364
- f"(matching source video aspect ratio)",
365
- err=True,
366
- )
367
- with DebugOverlayRenderer(
368
- output,
369
- video.width,
370
- video.height,
371
- video.display_width,
372
- video.display_height,
373
- video.fps,
374
- ) as renderer:
375
- with click.progressbar(
376
- length=len(frames), label="Rendering frames"
377
- ) as bar:
378
- for i, frame in enumerate(frames):
379
- annotated = renderer.render_frame(
380
- frame,
381
- smoothed_landmarks[i],
382
- contact_states[i],
383
- i,
384
- metrics,
385
- use_com=use_com,
386
- )
387
- renderer.write_frame(annotated)
388
- bar.update(1)
389
-
390
- click.echo(f"Debug video saved: {output}", err=True)
391
-
392
- click.echo("Analysis complete!", err=True)
393
-
394
- except Exception as e:
395
- click.echo(f"Error: {str(e)}", err=True)
396
- sys.exit(1)
15
+ # Register commands from submodules
16
+ cli.add_command(dropjump_analyze)
397
17
 
398
18
 
399
19
  if __name__ == "__main__":
@@ -95,16 +95,24 @@ def detect_ground_contact(
95
95
  min_contact_frames: int = 3,
96
96
  visibility_threshold: float = 0.5,
97
97
  visibilities: np.ndarray | None = None,
98
+ window_length: int = 5,
99
+ polyorder: int = 2,
98
100
  ) -> list[ContactState]:
99
101
  """
100
102
  Detect when feet are in contact with ground based on vertical motion.
101
103
 
104
+ Uses derivative-based velocity calculation via Savitzky-Golay filter for smooth,
105
+ accurate velocity estimates. This is consistent with the velocity calculation used
106
+ throughout the pipeline for sub-frame interpolation and curvature analysis.
107
+
102
108
  Args:
103
109
  foot_positions: Array of foot y-positions (normalized, 0-1, where 1 is bottom)
104
110
  velocity_threshold: Threshold for vertical velocity to consider stationary
105
111
  min_contact_frames: Minimum consecutive frames to confirm contact
106
112
  visibility_threshold: Minimum visibility score to trust landmark
107
113
  visibilities: Array of visibility scores for each frame
114
+ window_length: Window size for velocity derivative calculation (must be odd)
115
+ polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
108
116
 
109
117
  Returns:
110
118
  List of ContactState for each frame
@@ -115,8 +123,12 @@ def detect_ground_contact(
115
123
  if n_frames < 2:
116
124
  return states
117
125
 
118
- # Compute vertical velocity (positive = moving down in image coordinates)
119
- velocities = np.diff(foot_positions, prepend=foot_positions[0])
126
+ # Compute vertical velocity using derivative-based method
127
+ # This provides smoother, more accurate velocity estimates than frame-to-frame differences
128
+ # and is consistent with the velocity calculation used for sub-frame interpolation
129
+ velocities = compute_velocity_from_derivative(
130
+ foot_positions, window_length=window_length, polyorder=polyorder
131
+ )
120
132
 
121
133
  # Detect potential contact frames based on low velocity
122
134
  is_stationary = np.abs(velocities) < velocity_threshold
@@ -0,0 +1,361 @@
1
+ """Command-line interface for drop jump analysis."""
2
+
3
+ import json
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import click
8
+ import numpy as np
9
+
10
+ from ..core.pose import PoseTracker
11
+ from ..core.smoothing import smooth_landmarks, smooth_landmarks_advanced
12
+ from ..core.video_io import VideoProcessor
13
+ from .analysis import (
14
+ compute_average_foot_position,
15
+ detect_ground_contact,
16
+ )
17
+ from .debug_overlay import DebugOverlayRenderer
18
+ from .kinematics import calculate_drop_jump_metrics
19
+
20
+
21
+ @click.command(name="dropjump-analyze")
22
+ @click.argument("video_path", type=click.Path(exists=True))
23
+ @click.option(
24
+ "--output",
25
+ "-o",
26
+ type=click.Path(),
27
+ help="Path for debug video output (optional)",
28
+ )
29
+ @click.option(
30
+ "--json-output",
31
+ "-j",
32
+ type=click.Path(),
33
+ help="Path for JSON metrics output (default: stdout)",
34
+ )
35
+ @click.option(
36
+ "--smoothing-window",
37
+ type=int,
38
+ default=5,
39
+ help="Smoothing window size (must be odd, >= 3)",
40
+ show_default=True,
41
+ )
42
+ @click.option(
43
+ "--polyorder",
44
+ type=int,
45
+ default=2,
46
+ help=(
47
+ "Polynomial order for Savitzky-Golay smoothing "
48
+ "(2=quadratic, 3=cubic, must be < smoothing-window)"
49
+ ),
50
+ show_default=True,
51
+ )
52
+ @click.option(
53
+ "--outlier-rejection/--no-outlier-rejection",
54
+ default=True,
55
+ help=(
56
+ "Apply RANSAC and median-based outlier rejection to remove tracking glitches "
57
+ "(default: enabled, +1-2%% accuracy)"
58
+ ),
59
+ )
60
+ @click.option(
61
+ "--bilateral-filter/--no-bilateral-filter",
62
+ default=False,
63
+ help=(
64
+ "Use bilateral temporal filter for edge-preserving smoothing "
65
+ "(default: disabled, experimental)"
66
+ ),
67
+ )
68
+ @click.option(
69
+ "--velocity-threshold",
70
+ type=float,
71
+ default=0.02,
72
+ help="Velocity threshold for contact detection (normalized units)",
73
+ show_default=True,
74
+ )
75
+ @click.option(
76
+ "--min-contact-frames",
77
+ type=int,
78
+ default=3,
79
+ help="Minimum frames for valid ground contact",
80
+ show_default=True,
81
+ )
82
+ @click.option(
83
+ "--visibility-threshold",
84
+ type=float,
85
+ default=0.5,
86
+ help="Minimum landmark visibility score (0-1)",
87
+ show_default=True,
88
+ )
89
+ @click.option(
90
+ "--detection-confidence",
91
+ type=float,
92
+ default=0.5,
93
+ help="Pose detection confidence threshold (0-1)",
94
+ show_default=True,
95
+ )
96
+ @click.option(
97
+ "--tracking-confidence",
98
+ type=float,
99
+ default=0.5,
100
+ help="Pose tracking confidence threshold (0-1)",
101
+ show_default=True,
102
+ )
103
+ @click.option(
104
+ "--drop-height",
105
+ type=float,
106
+ default=None,
107
+ help="Height of drop box/platform in meters (e.g., 0.40 for 40cm) - used for calibration",
108
+ )
109
+ @click.option(
110
+ "--use-curvature/--no-curvature",
111
+ default=True,
112
+ help="Use trajectory curvature analysis for refining transitions (default: enabled)",
113
+ )
114
+ @click.option(
115
+ "--kinematic-correction-factor",
116
+ type=float,
117
+ default=1.0,
118
+ help=(
119
+ "Correction factor for kinematic jump height (default: 1.0 = no correction). "
120
+ "Historical testing suggested 1.35, but this is UNVALIDATED. "
121
+ "Use --drop-height for validated measurements."
122
+ ),
123
+ show_default=True,
124
+ )
125
+ def dropjump_analyze(
126
+ video_path: str,
127
+ output: str | None,
128
+ json_output: str | None,
129
+ smoothing_window: int,
130
+ polyorder: int,
131
+ outlier_rejection: bool,
132
+ bilateral_filter: bool,
133
+ velocity_threshold: float,
134
+ min_contact_frames: int,
135
+ visibility_threshold: float,
136
+ detection_confidence: float,
137
+ tracking_confidence: float,
138
+ drop_height: float | None,
139
+ use_curvature: bool,
140
+ kinematic_correction_factor: float,
141
+ ) -> None:
142
+ """
143
+ Analyze drop-jump video to estimate ground contact time, flight time, and jump height.
144
+
145
+ VIDEO_PATH: Path to the input video file
146
+ """
147
+ click.echo(f"Analyzing video: {video_path}", err=True)
148
+
149
+ # Validate parameters
150
+ if smoothing_window < 3:
151
+ click.echo("Error: smoothing-window must be >= 3", err=True)
152
+ sys.exit(1)
153
+
154
+ if smoothing_window % 2 == 0:
155
+ smoothing_window += 1
156
+ click.echo(
157
+ f"Adjusting smoothing-window to {smoothing_window} (must be odd)", err=True
158
+ )
159
+
160
+ if polyorder < 1:
161
+ click.echo("Error: polyorder must be >= 1", err=True)
162
+ sys.exit(1)
163
+
164
+ if polyorder >= smoothing_window:
165
+ click.echo(
166
+ f"Error: polyorder ({polyorder}) must be < smoothing-window ({smoothing_window})",
167
+ err=True,
168
+ )
169
+ sys.exit(1)
170
+
171
+ try:
172
+ # Initialize video processor
173
+ with VideoProcessor(video_path) as video:
174
+ click.echo(
175
+ f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
176
+ f"{video.frame_count} frames",
177
+ err=True,
178
+ )
179
+
180
+ # Initialize pose tracker
181
+ tracker = PoseTracker(
182
+ min_detection_confidence=detection_confidence,
183
+ min_tracking_confidence=tracking_confidence,
184
+ )
185
+
186
+ # Process all frames
187
+ click.echo("Tracking pose landmarks...", err=True)
188
+ landmarks_sequence = []
189
+ frames = []
190
+
191
+ frame_idx = 0
192
+ with click.progressbar(
193
+ length=video.frame_count, label="Processing frames"
194
+ ) as bar:
195
+ while True:
196
+ frame = video.read_frame()
197
+ if frame is None:
198
+ break
199
+
200
+ frames.append(frame)
201
+ landmarks = tracker.process_frame(frame)
202
+ landmarks_sequence.append(landmarks)
203
+
204
+ frame_idx += 1
205
+ bar.update(1)
206
+
207
+ tracker.close()
208
+
209
+ if not landmarks_sequence:
210
+ click.echo("Error: No frames processed", err=True)
211
+ sys.exit(1)
212
+
213
+ # Smooth landmarks
214
+ if outlier_rejection or bilateral_filter:
215
+ if outlier_rejection:
216
+ click.echo(
217
+ "Smoothing landmarks with outlier rejection...", err=True
218
+ )
219
+ if bilateral_filter:
220
+ click.echo(
221
+ "Using bilateral temporal filter for edge-preserving smoothing...",
222
+ err=True,
223
+ )
224
+ smoothed_landmarks = smooth_landmarks_advanced(
225
+ landmarks_sequence,
226
+ window_length=smoothing_window,
227
+ polyorder=polyorder,
228
+ use_outlier_rejection=outlier_rejection,
229
+ use_bilateral=bilateral_filter,
230
+ )
231
+ else:
232
+ click.echo("Smoothing landmarks...", err=True)
233
+ smoothed_landmarks = smooth_landmarks(
234
+ landmarks_sequence, window_length=smoothing_window, polyorder=polyorder
235
+ )
236
+
237
+ # Extract vertical positions from feet
238
+ click.echo("Extracting foot positions...", err=True)
239
+
240
+ position_list: list[float] = []
241
+ visibilities_list: list[float] = []
242
+
243
+ for frame_landmarks in smoothed_landmarks:
244
+ if frame_landmarks:
245
+ # Use average foot position
246
+ _, foot_y = compute_average_foot_position(frame_landmarks)
247
+ position_list.append(foot_y)
248
+
249
+ # Average visibility of foot landmarks
250
+ foot_vis = []
251
+ for key in [
252
+ "left_ankle",
253
+ "right_ankle",
254
+ "left_heel",
255
+ "right_heel",
256
+ ]:
257
+ if key in frame_landmarks:
258
+ foot_vis.append(frame_landmarks[key][2])
259
+ visibilities_list.append(
260
+ float(np.mean(foot_vis)) if foot_vis else 0.0
261
+ )
262
+ else:
263
+ # Use previous position if available, otherwise default
264
+ position_list.append(
265
+ position_list[-1] if position_list else 0.5
266
+ )
267
+ visibilities_list.append(0.0)
268
+
269
+ vertical_positions: np.ndarray = np.array(position_list)
270
+ visibilities: np.ndarray = np.array(visibilities_list)
271
+
272
+ # Detect ground contact
273
+ contact_states = detect_ground_contact(
274
+ vertical_positions,
275
+ velocity_threshold=velocity_threshold,
276
+ min_contact_frames=min_contact_frames,
277
+ visibility_threshold=visibility_threshold,
278
+ visibilities=visibilities,
279
+ window_length=smoothing_window,
280
+ polyorder=polyorder,
281
+ )
282
+
283
+ # Calculate metrics
284
+ click.echo("Calculating metrics...", err=True)
285
+ if drop_height:
286
+ click.echo(
287
+ f"Using drop height calibration: {drop_height}m ({drop_height*100:.0f}cm)",
288
+ err=True,
289
+ )
290
+ metrics = calculate_drop_jump_metrics(
291
+ contact_states,
292
+ vertical_positions,
293
+ video.fps,
294
+ drop_height_m=drop_height,
295
+ velocity_threshold=velocity_threshold,
296
+ smoothing_window=smoothing_window,
297
+ polyorder=polyorder,
298
+ use_curvature=use_curvature,
299
+ kinematic_correction_factor=kinematic_correction_factor,
300
+ )
301
+
302
+ # Output metrics as JSON
303
+ metrics_dict = metrics.to_dict()
304
+ metrics_json = json.dumps(metrics_dict, indent=2)
305
+
306
+ if json_output:
307
+ output_path = Path(json_output)
308
+ output_path.write_text(metrics_json)
309
+ click.echo(f"Metrics written to: {json_output}", err=True)
310
+ else:
311
+ click.echo(metrics_json)
312
+
313
+ # Generate debug video if requested
314
+ if output:
315
+ click.echo(f"Generating debug video: {output}", err=True)
316
+ if video.display_width != video.width or video.display_height != video.height:
317
+ click.echo(
318
+ f"Source video encoded: {video.width}x{video.height}",
319
+ err=True,
320
+ )
321
+ click.echo(
322
+ f"Output dimensions: {video.display_width}x{video.display_height} "
323
+ f"(respecting display aspect ratio)",
324
+ err=True,
325
+ )
326
+ else:
327
+ click.echo(
328
+ f"Output dimensions: {video.width}x{video.height} "
329
+ f"(matching source video aspect ratio)",
330
+ err=True,
331
+ )
332
+ with DebugOverlayRenderer(
333
+ output,
334
+ video.width,
335
+ video.height,
336
+ video.display_width,
337
+ video.display_height,
338
+ video.fps,
339
+ ) as renderer:
340
+ with click.progressbar(
341
+ length=len(frames), label="Rendering frames"
342
+ ) as bar:
343
+ for i, frame in enumerate(frames):
344
+ annotated = renderer.render_frame(
345
+ frame,
346
+ smoothed_landmarks[i],
347
+ contact_states[i],
348
+ i,
349
+ metrics,
350
+ use_com=False,
351
+ )
352
+ renderer.write_frame(annotated)
353
+ bar.update(1)
354
+
355
+ click.echo(f"Debug video saved: {output}", err=True)
356
+
357
+ click.echo("Analysis complete!", err=True)
358
+
359
+ except Exception as e:
360
+ click.echo(f"Error: {str(e)}", err=True)
361
+ sys.exit(1)
@@ -113,6 +113,7 @@ def calculate_drop_jump_metrics(
113
113
  smoothing_window: int = 5,
114
114
  polyorder: int = 2,
115
115
  use_curvature: bool = True,
116
+ kinematic_correction_factor: float = 1.0,
116
117
  ) -> DropJumpMetrics:
117
118
  """
118
119
  Calculate drop-jump metrics from contact states and positions.
@@ -126,6 +127,9 @@ def calculate_drop_jump_metrics(
126
127
  smoothing_window: Window size for velocity/acceleration smoothing (must be odd)
127
128
  polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
128
129
  use_curvature: Whether to use curvature analysis for refining transitions
130
+ kinematic_correction_factor: Correction factor for kinematic jump height calculation
131
+ (default: 1.0 = no correction). Historical testing suggested 1.35, but this is
132
+ unvalidated. Use calibrated measurement (--drop-height) for validated results.
129
133
 
130
134
  Returns:
131
135
  DropJumpMetrics object with calculated values
@@ -315,18 +319,30 @@ def calculate_drop_jump_metrics(
315
319
  metrics.jump_height = height_normalized * scale_factor
316
320
  metrics.jump_height_kinematic = jump_height_kinematic
317
321
  else:
318
- # Use empirical correction factor for kinematic method
319
- # Testing shows kinematic method underestimates by ~29% due to:
320
- # 1. Contact detection timing (detects landing slightly early)
321
- # 2. Frame rate limitations (30 fps = 33ms intervals)
322
- # 3. Foot position vs center of mass difference
323
- kinematic_correction_factor = 1.35
322
+ # Apply kinematic correction factor to kinematic method
323
+ # ⚠️ WARNING: Kinematic correction factor is EXPERIMENTAL and UNVALIDATED
324
+ #
325
+ # The kinematic method h = (g × t²) / 8 may underestimate jump height due to:
326
+ # 1. Contact detection timing (may detect landing slightly early/late)
327
+ # 2. Frame rate limitations (30 fps = 33ms intervals between samples)
328
+ # 3. Foot position vs center of mass difference (feet land before CoM peak)
329
+ #
330
+ # Default correction factor is 1.0 (no correction). Historical testing
331
+ # suggested 1.35 could improve accuracy, but:
332
+ # - This value has NOT been validated against gold standards
333
+ # (force plates, motion capture)
334
+ # - The actual correction needed may vary by athlete, jump type, and video quality
335
+ # - Using a correction factor without validation is experimental
336
+ #
337
+ # For validated measurements, use:
338
+ # - Calibrated measurement with --drop-height parameter
339
+ # - Or compare against validated measurement systems
324
340
  metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
325
341
  metrics.jump_height_kinematic = jump_height_kinematic
326
342
  else:
327
343
  # Fallback to kinematic if no position data
328
344
  if drop_height_m is None:
329
- kinematic_correction_factor = 1.35
345
+ # Apply kinematic correction factor (see detailed comment above)
330
346
  metrics.jump_height = jump_height_kinematic * kinematic_correction_factor
331
347
  else:
332
348
  metrics.jump_height = jump_height_kinematic
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.2.0
3
+ Version: 0.4.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -33,9 +33,7 @@ A video-based kinematic analysis tool for athletic performance. Analyzes side-vi
33
33
  ## Features
34
34
 
35
35
  - **Automatic pose tracking** using MediaPipe Pose landmarks
36
- - **Center of mass (CoM) tracking** - biomechanical CoM estimation for 3-5% accuracy improvement
37
- - **Adaptive velocity thresholding** - auto-calibrates from video baseline for 2-3% additional accuracy
38
- - **Ground contact detection** based on velocity and position (feet or CoM)
36
+ - **Ground contact detection** based on foot velocity and position
39
37
  - **Derivative-based velocity** - smooth velocity calculation from position trajectory
40
38
  - **Trajectory curvature analysis** - acceleration patterns for refined event detection
41
39
  - **Sub-frame interpolation** - precise timing beyond frame boundaries for improved accuracy
@@ -44,13 +42,25 @@ A video-based kinematic analysis tool for athletic performance. Analyzes side-vi
44
42
  - Ground contact time (ms)
45
43
  - Flight time (ms)
46
44
  - Jump height (m) - with optional calibration using drop box height
47
- - **Calibrated measurements** - use known drop height for ~88% accuracy (vs 71% uncalibrated)
48
- - With CoM tracking: potential for 91-93% accuracy
49
- - With adaptive thresholding + CoM: potential for 93-96% accuracy
45
+ - **Calibrated measurements** - use known drop height for theoretically improved accuracy (⚠️ accuracy claims unvalidated)
50
46
  - **JSON output** for easy integration with other tools
51
47
  - **Optional debug video** with visual overlays showing contact states and landmarks
52
48
  - **Configurable parameters** for smoothing, thresholds, and detection
53
49
 
50
+ **Note**: Drop jump analysis uses foot-based tracking with fixed velocity thresholds. Center of mass (CoM) tracking and adaptive thresholding (available in `core/` modules) require longer videos (~5+ seconds) with a 3-second standing baseline, making them unsuitable for typical drop jump videos (~3 seconds). These features may be available in future jump types like CMJ (countermovement jump).
51
+
52
+ ## Validation Status
53
+
54
+ ⚠️ **IMPORTANT**: This tool's accuracy has **not been validated** against gold standard measurements (force plates, 3D motion capture). All accuracy claims and improvement estimates are theoretical and based on algorithmic considerations, not empirical testing.
55
+
56
+ The tool provides consistent measurements and may be useful for:
57
+
58
+ - Tracking relative changes in an individual athlete over time
59
+ - Comparing similar jumps under controlled conditions
60
+ - Exploratory analysis and research
61
+
62
+ For clinical, research, or performance assessment requiring validated accuracy, this tool should be compared against validated measurement systems before use.
63
+
54
64
  ## Setup
55
65
 
56
66
  ### Prerequisites
@@ -67,13 +77,13 @@ asdf plugin add python
67
77
  asdf plugin add uv
68
78
  ```
69
79
 
70
- 2. **Install versions specified in `.tool-versions`**:
80
+ 1. **Install versions specified in `.tool-versions`**:
71
81
 
72
82
  ```bash
73
83
  asdf install
74
84
  ```
75
85
 
76
- 3. **Install project dependencies using uv**:
86
+ 1. **Install project dependencies using uv**:
77
87
 
78
88
  ```bash
79
89
  uv sync
@@ -120,46 +130,11 @@ kinemotion dropjump-analyze drop-jump.mp4 \
120
130
  --output debug.mp4
121
131
  ```
122
132
 
123
- ### Center of Mass Tracking (Improved Accuracy)
124
-
125
- Use CoM tracking for 3-5% accuracy improvement:
126
-
127
- ```bash
128
- # Basic CoM tracking
129
- kinemotion dropjump-analyze video.mp4 --use-com
130
-
131
- # CoM tracking with calibration for maximum accuracy
132
- kinemotion dropjump-analyze drop-jump.mp4 \
133
- --use-com \
134
- --drop-height 0.40 \
135
- --output debug_com.mp4 \
136
- --json-output metrics.json
137
- ```
138
-
139
- ### Adaptive Thresholding (Auto-Calibration)
140
-
141
- Auto-calibrate velocity threshold from video baseline for 2-3% accuracy improvement:
142
-
143
- ```bash
144
- # Basic adaptive thresholding
145
- kinemotion dropjump-analyze video.mp4 --adaptive-threshold
146
-
147
- # Combined with CoM for maximum accuracy
148
- kinemotion dropjump-analyze video.mp4 \
149
- --adaptive-threshold \
150
- --use-com \
151
- --drop-height 0.40 \
152
- --output debug.mp4 \
153
- --json-output metrics.json
154
- ```
155
-
156
133
  ### Full Example (Maximum Accuracy)
157
134
 
158
135
  ```bash
159
- # With all accuracy improvements enabled (~93-96% accuracy)
136
+ # With all accuracy improvements enabled
160
137
  kinemotion dropjump-analyze jump.mp4 \
161
- --adaptive-threshold \
162
- --use-com \
163
138
  --outlier-rejection \
164
139
  --drop-height 0.40 \
165
140
  --output debug.mp4 \
@@ -169,8 +144,6 @@ kinemotion dropjump-analyze jump.mp4 \
169
144
 
170
145
  # Alternative: With experimental bilateral filter
171
146
  kinemotion dropjump-analyze jump.mp4 \
172
- --adaptive-threshold \
173
- --use-com \
174
147
  --outlier-rejection \
175
148
  --bilateral-filter \
176
149
  --drop-height 0.40 \
@@ -183,6 +156,7 @@ kinemotion dropjump-analyze jump.mp4 \
183
156
  > **📖 For detailed explanations of all parameters, see [docs/PARAMETERS.md](docs/PARAMETERS.md)**
184
157
  >
185
158
  > This section provides a quick reference. The full guide includes:
159
+ >
186
160
  > - How each parameter works internally
187
161
  > - When and why to adjust them
188
162
  > - Scenario-based recommendations
@@ -270,38 +244,10 @@ kinemotion dropjump-analyze jump.mp4 \
270
244
  - `--drop-height <float>` (optional)
271
245
  - Height of drop box/platform in meters (e.g., 0.40 for 40cm)
272
246
  - Enables calibrated jump height measurement using known drop height
273
- - Improves accuracy from ~71% to ~88%
247
+ - Theoretically improves accuracy (⚠️ unvalidated - requires empirical validation)
274
248
  - Only applicable for drop jumps (box → drop → landing → jump)
275
249
  - **Tip**: Measure your box height accurately for best results
276
250
 
277
- ### Tracking Method
278
-
279
- - `--use-com / --use-feet` (default: --use-feet)
280
- - Choose between center of mass (CoM) or foot-based tracking
281
- - **CoM tracking** (`--use-com`): Uses biomechanical CoM estimation with Dempster's body segment parameters
282
- - Head: 8%, Trunk: 50%, Thighs: 20%, Legs: 10%, Feet: 3% of body mass
283
- - Tracks true body movement instead of foot position
284
- - Reduces error from foot dorsiflexion/plantarflexion during flight
285
- - **Accuracy improvement**: +3-5% over foot-based tracking
286
- - **Foot tracking** (`--use-feet`): Traditional method using average ankle/heel positions
287
- - Faster, simpler, well-tested baseline method
288
- - **Tip**: Use `--use-com` for maximum accuracy, especially for drop jumps
289
-
290
- ### Velocity Threshold Mode
291
-
292
- - `--adaptive-threshold / --fixed-threshold` (default: --fixed-threshold)
293
- - Choose between adaptive or fixed velocity threshold for contact detection
294
- - **Adaptive threshold** (`--adaptive-threshold`): Auto-calibrates from video baseline
295
- - Analyzes first 3 seconds of video (assumed relatively stationary)
296
- - Computes noise floor as 95th percentile of baseline velocity
297
- - Sets threshold as 1.5× noise floor (bounded: 0.005-0.05)
298
- - Adapts to camera distance, lighting, frame rate, and compression artifacts
299
- - **Accuracy improvement**: +2-3% by eliminating manual tuning
300
- - **Fixed threshold** (`--fixed-threshold`): Uses `--velocity-threshold` value (default: 0.02)
301
- - Consistent, predictable behavior
302
- - Requires manual tuning for optimal results
303
- - **Tip**: Use `--adaptive-threshold` for varying video conditions or when unsure of optimal threshold
304
-
305
251
  ### Trajectory Analysis
306
252
 
307
253
  - `--use-curvature / --no-curvature` (default: --use-curvature)
@@ -337,6 +283,7 @@ kinemotion dropjump-analyze jump.mp4 \
337
283
  ```
338
284
 
339
285
  **Fields**:
286
+
340
287
  - `jump_height_m`: Primary jump height measurement (calibrated if --drop-height provided, otherwise corrected kinematic)
341
288
  - `jump_height_kinematic_m`: Kinematic estimate from flight time: h = (g × t²) / 8
342
289
  - `jump_height_trajectory_normalized`: Position-based measurement in normalized coordinates (0-1 range)
@@ -348,6 +295,7 @@ kinemotion dropjump-analyze jump.mp4 \
348
295
  ### Debug Video
349
296
 
350
297
  The debug video includes:
298
+
351
299
  - **Green circle**: Average foot position when on ground
352
300
  - **Red circle**: Average foot position when in air
353
301
  - **Yellow circles**: Individual foot landmarks (ankles, heels)
@@ -363,6 +311,7 @@ The debug video includes:
363
311
  **Symptoms**: Erratic landmark positions, missing detections, incorrect contact states
364
312
 
365
313
  **Solutions**:
314
+
366
315
  1. **Check video quality**: Ensure the athlete is clearly visible in profile view
367
316
  2. **Increase smoothing**: Use `--smoothing-window 7` or higher
368
317
  3. **Adjust detection confidence**: Try `--detection-confidence 0.6` or `--tracking-confidence 0.6`
@@ -373,6 +322,7 @@ The debug video includes:
373
322
  **Symptoms**: "No frames processed" error or all null landmarks
374
323
 
375
324
  **Solutions**:
325
+
376
326
  1. **Verify video format**: OpenCV must be able to read the video
377
327
  2. **Check framing**: Ensure full body is visible in side view
378
328
  3. **Lower confidence thresholds**: Try `--detection-confidence 0.3 --tracking-confidence 0.3`
@@ -383,6 +333,7 @@ The debug video includes:
383
333
  **Symptoms**: Wrong ground contact times, flight phases not detected
384
334
 
385
335
  **Solutions**:
336
+
386
337
  1. **Generate debug video**: Visualize contact states to diagnose the issue
387
338
  2. **Adjust velocity threshold**:
388
339
  - If missing contacts: decrease to `--velocity-threshold 0.01`
@@ -395,8 +346,9 @@ The debug video includes:
395
346
  **Symptoms**: Unrealistic jump height values
396
347
 
397
348
  **Solutions**:
349
+
398
350
  1. **Use calibration**: For drop jumps, add `--drop-height` parameter with box height in meters (e.g., `--drop-height 0.40`)
399
- - This improves accuracy from ~71% to ~88%
351
+ - Theoretically improves accuracy (⚠️ unvalidated)
400
352
  2. **Verify flight time detection**: Check `flight_start_frame` and `flight_end_frame` in JSON
401
353
  3. **Compare measurements**: JSON output includes both `jump_height_m` (primary) and `jump_height_kinematic_m` (kinematic-only)
402
354
  4. **Check for drop jump detection**: If doing a drop jump, ensure first phase is elevated enough (>5% of frame height)
@@ -406,18 +358,15 @@ The debug video includes:
406
358
  **Symptoms**: Cannot write debug video or corrupted output
407
359
 
408
360
  **Solutions**:
361
+
409
362
  1. **Install additional codecs**: Ensure OpenCV has proper video codec support
410
363
  2. **Try different output format**: Use `.avi` extension instead of `.mp4`
411
364
  3. **Check output path**: Ensure write permissions for output directory
412
365
 
413
366
  ## How It Works
414
367
 
415
- 1. **Pose Tracking**: MediaPipe extracts 2D pose landmarks (13 points: feet, ankles, knees, hips, shoulders, nose) from each frame
416
- 2. **Position Calculation**: Two methods available:
417
- - **Foot-based** (default): Averages ankle, heel, and foot index positions
418
- - **CoM-based** (--use-com): Biomechanical center of mass using Dempster's body segment parameters
419
- - Head: 8%, Trunk: 50%, Thighs: 20%, Legs: 10%, Feet: 3% of body mass
420
- - Weighted average reduces error from foot movement artifacts
368
+ 1. **Pose Tracking**: MediaPipe extracts 2D pose landmarks (foot points: ankles, heels, foot indices) from each frame
369
+ 2. **Position Calculation**: Averages ankle, heel, and foot index positions to determine foot location
421
370
  3. **Smoothing**: Savitzky-Golay filter reduces tracking jitter while preserving motion dynamics
422
371
  4. **Contact Detection**: Analyzes vertical position velocity to identify ground contact vs. flight phases
423
372
  5. **Phase Identification**: Finds continuous ground contact and flight periods
@@ -438,13 +387,14 @@ The debug video includes:
438
387
  - Ground contact time = contact phase duration (using fractional frames)
439
388
  - Flight time = flight phase duration (using fractional frames)
440
389
  - Jump height = calibrated position-based measurement (if --drop-height provided)
441
- - Fallback: corrected kinematic estimate (g × t²) / 8 × 1.35
390
+ - Fallback: kinematic estimate (g × t²) / 8 with optional empirical correction factor (⚠️ unvalidated)
442
391
 
443
392
  ## Development
444
393
 
445
394
  ### Code Quality Standards
446
395
 
447
396
  This project enforces strict code quality standards:
397
+
448
398
  - **Type safety**: Full mypy strict mode compliance with complete type annotations
449
399
  - **Linting**: Comprehensive ruff checks (pycodestyle, pyflakes, isort, pep8-naming, etc.)
450
400
  - **Formatting**: Black code style
@@ -492,7 +442,7 @@ See [CLAUDE.md](CLAUDE.md) for detailed development guidelines.
492
442
  ## Limitations
493
443
 
494
444
  - **2D Analysis**: Only analyzes motion in the camera's view plane
495
- - **Calibration accuracy**: With drop height calibration, achieves ~88% accuracy; without calibration ~71% accuracy
445
+ - **Validation Status**: ⚠️ Accuracy has not been validated against gold standard measurements (force plates, 3D motion capture)
496
446
  - **Side View Required**: Must film from the side to accurately track vertical motion
497
447
  - **Single Athlete**: Designed for analyzing one athlete at a time
498
448
  - **Timing precision**:
@@ -1,16 +1,17 @@
1
1
  kinemotion/__init__.py,sha256=JhS0ZTgcTdcMH5WcIyWxEqZJPOoBUSKX8tT8hsG-xWk,98
2
- kinemotion/cli.py,sha256=afQiAWBQBbLM2SZnwFPZ0gr_jjVIQtkhYh5cHYWPeco,13532
2
+ kinemotion/cli.py,sha256=2IFA2_TE9a5zBtmGVzv5SnX39w7yPuBlw42dL7ca25U,402
3
3
  kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
4
4
  kinemotion/core/filtering.py,sha256=QtZRz8KlcLtR4dLRFH9sGqRQsUo_Dqcr1ZJIyWwPlcM,11266
5
5
  kinemotion/core/pose.py,sha256=5Dhw3LqX3STR-eLb5JAQkxhS-dd0PqGytBWnaQ66nWc,8391
6
6
  kinemotion/core/smoothing.py,sha256=z2qnpEGohDm6ZUrzqRXGLp189-NJL0ngKqYwXkU-iW0,13166
7
7
  kinemotion/core/video_io.py,sha256=LD7qmHIqUYomGxS1kxz6khugIbFo2y4tDSY7XqJQCOM,4581
8
8
  kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
9
- kinemotion/dropjump/analysis.py,sha256=MsEnho8WeGFxStHpKVGbj7gzdb3MUfozksmlAReAkI0,18026
9
+ kinemotion/dropjump/analysis.py,sha256=5lyTJFiItqmSHw96m8HmFrl7N6nCVQZnERWU2prjn9Y,18719
10
+ kinemotion/dropjump/cli.py,sha256=URQguQ6tmDofWagGydXzvc4NPXOCfOGX-yyFgvLV6lM,11954
10
11
  kinemotion/dropjump/debug_overlay.py,sha256=s7hwYLA2JenRYOPD2GNmx3kATFseeZT3pW8jxiVgys8,8621
11
- kinemotion/dropjump/kinematics.py,sha256=bM1A6LGSDWbNOrRa_x2v9hXJOwxef69h3R_0naLZ4Zw,15092
12
- kinemotion-0.2.0.dist-info/METADATA,sha256=pJ1KUIaG6F7xljGjjmwLALM2j9IHEAE5eJ4F6lWB4Lc,20616
13
- kinemotion-0.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
14
- kinemotion-0.2.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
15
- kinemotion-0.2.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
16
- kinemotion-0.2.0.dist-info/RECORD,,
12
+ kinemotion/dropjump/kinematics.py,sha256=wcXaGUrb1kjSTus0KEwgdDzdkJRMy-umAzfStGq0_t4,16258
13
+ kinemotion-0.4.0.dist-info/METADATA,sha256=gbmHKdrYweUTafp6utQxnVAqHEgx4_EWNPEua86_kBU,18693
14
+ kinemotion-0.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
15
+ kinemotion-0.4.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
16
+ kinemotion-0.4.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
17
+ kinemotion-0.4.0.dist-info/RECORD,,