kinemotion 0.41.1__py3-none-any.whl → 0.41.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -178,6 +178,7 @@ def _process_all_frames(
178
178
  tracker: PoseTracker,
179
179
  verbose: bool,
180
180
  timer: PerformanceTimer | None = None,
181
+ close_tracker: bool = True,
181
182
  ) -> tuple[list, list]:
182
183
  """Process all frames from video and extract pose landmarks.
183
184
 
@@ -186,6 +187,7 @@ def _process_all_frames(
186
187
  tracker: Pose tracker for landmark detection
187
188
  verbose: Print progress messages
188
189
  timer: Optional PerformanceTimer for measuring operations
190
+ close_tracker: Whether to close the tracker after processing (default: True)
189
191
 
190
192
  Returns:
191
193
  Tuple of (frames, landmarks_sequence)
@@ -219,7 +221,8 @@ def _process_all_frames(
219
221
  landmarks = tracker.process_frame(frame)
220
222
  landmarks_sequence.append(landmarks)
221
223
 
222
- tracker.close()
224
+ if close_tracker:
225
+ tracker.close()
223
226
 
224
227
  if not landmarks_sequence:
225
228
  raise ValueError("No frames could be processed from video")
@@ -508,6 +511,7 @@ def process_dropjump_video(
508
511
  tracking_confidence: float | None = None,
509
512
  verbose: bool = False,
510
513
  timer: PerformanceTimer | None = None,
514
+ pose_tracker: "PoseTracker | None" = None,
511
515
  ) -> DropJumpMetrics:
512
516
  """
513
517
  Process a single drop jump video and return metrics.
@@ -528,6 +532,7 @@ def process_dropjump_video(
528
532
  tracking_confidence: Optional override for pose tracking confidence
529
533
  verbose: Print processing details
530
534
  timer: Optional PerformanceTimer for measuring operations
535
+ pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
531
536
 
532
537
  Returns:
533
538
  DropJumpMetrics object containing analysis results
@@ -554,15 +559,9 @@ def process_dropjump_video(
554
559
  # Convert quality string to enum
555
560
  quality_preset = _parse_quality_preset(quality)
556
561
 
557
- # Initialize video processor
562
+ # Load video
558
563
  with timer.measure("video_initialization"):
559
564
  with VideoProcessor(video_path, timer=timer) as video:
560
- if verbose:
561
- print(
562
- f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
563
- f"{video.frame_count} frames"
564
- )
565
-
566
565
  # Determine detection/tracking confidence levels
567
566
  detection_conf, tracking_conf = _determine_confidence_levels(
568
567
  quality_preset, detection_confidence, tracking_confidence
@@ -571,13 +570,21 @@ def process_dropjump_video(
571
570
  # Process all frames with pose tracking
572
571
  if verbose:
573
572
  print("Processing all frames with MediaPipe pose tracking...")
574
- tracker = PoseTracker(
575
- min_detection_confidence=detection_conf,
576
- min_tracking_confidence=tracking_conf,
577
- timer=timer,
578
- )
573
+
574
+ # Use provided tracker or create new one
575
+ tracker = pose_tracker
576
+ should_close_tracker = False
577
+
578
+ if tracker is None:
579
+ tracker = PoseTracker(
580
+ min_detection_confidence=detection_conf,
581
+ min_tracking_confidence=tracking_conf,
582
+ timer=timer,
583
+ )
584
+ should_close_tracker = True
585
+
579
586
  frames, landmarks_sequence = _process_all_frames(
580
- video, tracker, verbose, timer
587
+ video, tracker, verbose, timer, close_tracker=should_close_tracker
581
588
  )
582
589
 
583
590
  # Analyze video characteristics and auto-tune parameters
@@ -671,72 +678,47 @@ def process_dropjump_video(
671
678
  phase_count=phase_count,
672
679
  )
673
680
 
674
- # Build complete metadata
675
- with timer.measure("metadata_building"):
676
- processing_time = time.time() - start_time
677
-
678
- video_info = VideoInfo(
679
- source_path=video_path,
680
- fps=video.fps,
681
- width=video.width,
682
- height=video.height,
683
- duration_s=video.frame_count / video.fps,
684
- frame_count=video.frame_count,
685
- codec=video.codec,
686
- )
687
-
688
- # Check if drop start was auto-detected
689
- drop_frame = None
690
- if drop_start_frame is None and metrics.drop_start_frame is not None:
691
- # Auto-detected drop start from box
692
- drop_frame = metrics.drop_start_frame
693
- elif drop_start_frame is not None:
694
- # Manual drop start provided
695
- drop_frame = drop_start_frame
696
-
697
- algorithm_config = AlgorithmConfig(
698
- detection_method="forward_search",
699
- tracking_method="mediapipe_pose",
700
- model_complexity=1,
701
- smoothing=SmoothingConfig(
702
- window_size=params.smoothing_window,
703
- polynomial_order=params.polyorder,
704
- use_bilateral_filter=params.bilateral_filter,
705
- use_outlier_rejection=params.outlier_rejection,
706
- ),
707
- detection=DetectionConfig(
708
- velocity_threshold=params.velocity_threshold,
709
- min_contact_frames=params.min_contact_frames,
710
- visibility_threshold=params.visibility_threshold,
711
- use_curvature_refinement=params.use_curvature,
712
- ),
713
- drop_detection=DropDetectionConfig(
714
- auto_detect_drop_start=(drop_start_frame is None),
715
- detected_drop_frame=drop_frame,
716
- min_stationary_duration_s=0.5,
717
- ),
718
- )
719
-
720
- # Convert timer metrics to human-readable stage names
721
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
722
-
723
- processing_info = ProcessingInfo(
724
- version=get_kinemotion_version(),
725
- timestamp=create_timestamp(),
726
- quality_preset=quality_preset.value,
727
- processing_time_s=processing_time,
728
- timing_breakdown=stage_times,
729
- )
730
-
731
- result_metadata = ResultMetadata(
732
- quality=quality_result,
733
- video=video_info,
734
- processing=processing_info,
735
- algorithm=algorithm_config,
736
- )
681
+ # Build algorithm configuration early (but attach metadata later)
682
+ drop_frame = None
683
+ if drop_start_frame is None and metrics.drop_start_frame is not None:
684
+ # Auto-detected drop start from box
685
+ drop_frame = metrics.drop_start_frame
686
+ elif drop_start_frame is not None:
687
+ # Manual drop start provided
688
+ drop_frame = drop_start_frame
689
+
690
+ algorithm_config = AlgorithmConfig(
691
+ detection_method="forward_search",
692
+ tracking_method="mediapipe_pose",
693
+ model_complexity=1,
694
+ smoothing=SmoothingConfig(
695
+ window_size=params.smoothing_window,
696
+ polynomial_order=params.polyorder,
697
+ use_bilateral_filter=params.bilateral_filter,
698
+ use_outlier_rejection=params.outlier_rejection,
699
+ ),
700
+ detection=DetectionConfig(
701
+ velocity_threshold=params.velocity_threshold,
702
+ min_contact_frames=params.min_contact_frames,
703
+ visibility_threshold=params.visibility_threshold,
704
+ use_curvature_refinement=params.use_curvature,
705
+ ),
706
+ drop_detection=DropDetectionConfig(
707
+ auto_detect_drop_start=(drop_start_frame is None),
708
+ detected_drop_frame=drop_frame,
709
+ min_stationary_duration_s=0.5,
710
+ ),
711
+ )
737
712
 
738
- # Attach complete metadata to metrics
739
- metrics.result_metadata = result_metadata
713
+ video_info = VideoInfo(
714
+ source_path=video_path,
715
+ fps=video.fps,
716
+ width=video.width,
717
+ height=video.height,
718
+ duration_s=video.frame_count / video.fps,
719
+ frame_count=video.frame_count,
720
+ codec=video.codec,
721
+ )
740
722
 
741
723
  if verbose and quality_result.warnings:
742
724
  print("\n⚠️ Quality Warnings:")
@@ -744,18 +726,56 @@ def process_dropjump_video(
744
726
  print(f" - {warning}")
745
727
  print()
746
728
 
747
- # Generate outputs (JSON and debug video)
748
- _generate_dropjump_outputs(
749
- metrics,
750
- json_output,
751
- output_video,
752
- frames,
753
- smoothed_landmarks,
754
- contact_states,
755
- video,
756
- verbose,
757
- timer,
758
- )
729
+ # Generate debug video (but not JSON yet - we need to attach metadata first)
730
+ if output_video:
731
+ if verbose:
732
+ print(f"Generating debug video: {output_video}")
733
+
734
+ if timer:
735
+ with timer.measure("debug_video_generation"):
736
+ with DebugOverlayRenderer(
737
+ output_video,
738
+ video.width,
739
+ video.height,
740
+ video.display_width,
741
+ video.display_height,
742
+ video.fps,
743
+ ) as renderer:
744
+ for i, frame in enumerate(frames):
745
+ annotated = renderer.render_frame(
746
+ frame,
747
+ smoothed_landmarks[i],
748
+ contact_states[i],
749
+ i,
750
+ metrics,
751
+ use_com=False,
752
+ )
753
+ renderer.write_frame(annotated)
754
+ # Capture re-encoding duration separately
755
+ with timer.measure("debug_video_reencode"):
756
+ pass # Re-encoding happens in context manager __exit__
757
+ else:
758
+ with DebugOverlayRenderer(
759
+ output_video,
760
+ video.width,
761
+ video.height,
762
+ video.display_width,
763
+ video.display_height,
764
+ video.fps,
765
+ ) as renderer:
766
+ for i, frame in enumerate(frames):
767
+ annotated = renderer.render_frame(
768
+ frame,
769
+ smoothed_landmarks[i],
770
+ contact_states[i],
771
+ i,
772
+ metrics,
773
+ use_com=False,
774
+ )
775
+ renderer.write_frame(annotated)
776
+
777
+ if verbose:
778
+ print(f"Debug video saved: {output_video}")
759
779
 
760
780
  # Validate metrics against physiological bounds
761
781
  with timer.measure("metrics_validation"):
@@ -768,13 +788,57 @@ def process_dropjump_video(
768
788
  for issue in validation_result.issues:
769
789
  print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
770
790
 
791
+ # NOW create ProcessingInfo with complete timing breakdown
792
+ # (includes debug video generation timing)
793
+ processing_time = time.time() - start_time
794
+ stage_times = _convert_timer_to_stage_names(timer.get_metrics())
795
+
796
+ processing_info = ProcessingInfo(
797
+ version=get_kinemotion_version(),
798
+ timestamp=create_timestamp(),
799
+ quality_preset=quality_preset.value,
800
+ processing_time_s=processing_time,
801
+ timing_breakdown=stage_times,
802
+ )
803
+
804
+ result_metadata = ResultMetadata(
805
+ quality=quality_result,
806
+ video=video_info,
807
+ processing=processing_info,
808
+ algorithm=algorithm_config,
809
+ )
810
+
811
+ # Attach complete metadata to metrics
812
+ metrics.result_metadata = result_metadata
813
+
814
+ # NOW write JSON after metadata is attached
815
+ if json_output:
816
+ if timer:
817
+ with timer.measure("json_serialization"):
818
+ output_path = Path(json_output)
819
+ metrics_dict = metrics.to_dict()
820
+ import json
821
+
822
+ json_str = json.dumps(metrics_dict, indent=2)
823
+ output_path.write_text(json_str)
824
+ else:
825
+ output_path = Path(json_output)
826
+ metrics_dict = metrics.to_dict()
827
+ import json
828
+
829
+ json_str = json.dumps(metrics_dict, indent=2)
830
+ output_path.write_text(json_str)
831
+
832
+ if verbose:
833
+ print(f"Metrics written to: {json_output}")
834
+
771
835
  # Print timing summary if verbose
772
836
  if verbose:
773
837
  total_time = time.time() - start_time
774
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
838
+ stage_times_verbose = _convert_timer_to_stage_names(timer.get_metrics())
775
839
 
776
840
  print("\n=== Timing Summary ===")
777
- for stage, duration in stage_times.items():
841
+ for stage, duration in stage_times_verbose.items():
778
842
  percentage = (duration / total_time) * 100
779
843
  dur_ms = duration * 1000
780
844
  print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
@@ -1032,6 +1096,7 @@ def process_cmj_video(
1032
1096
  tracking_confidence: float | None = None,
1033
1097
  verbose: bool = False,
1034
1098
  timer: PerformanceTimer | None = None,
1099
+ pose_tracker: "PoseTracker | None" = None,
1035
1100
  ) -> CMJMetrics:
1036
1101
  """
1037
1102
  Process a single CMJ video and return metrics.
@@ -1053,6 +1118,7 @@ def process_cmj_video(
1053
1118
  tracking_confidence: Optional override for pose tracking confidence
1054
1119
  verbose: Print processing details
1055
1120
  timer: Optional PerformanceTimer for measuring operations
1121
+ pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
1056
1122
 
1057
1123
  Returns:
1058
1124
  CMJMetrics object containing analysis results
@@ -1098,13 +1164,21 @@ def process_cmj_video(
1098
1164
  # Track all frames
1099
1165
  if verbose:
1100
1166
  print("Processing all frames with MediaPipe pose tracking...")
1101
- tracker = PoseTracker(
1102
- min_detection_confidence=det_conf,
1103
- min_tracking_confidence=track_conf,
1104
- timer=timer,
1105
- )
1167
+
1168
+ # Use provided tracker or create new one
1169
+ tracker = pose_tracker
1170
+ should_close_tracker = False
1171
+
1172
+ if tracker is None:
1173
+ tracker = PoseTracker(
1174
+ min_detection_confidence=det_conf,
1175
+ min_tracking_confidence=track_conf,
1176
+ timer=timer,
1177
+ )
1178
+ should_close_tracker = True
1179
+
1106
1180
  frames, landmarks_sequence = _process_all_frames(
1107
- video, tracker, verbose, timer
1181
+ video, tracker, verbose, timer, close_tracker=should_close_tracker
1108
1182
  )
1109
1183
 
1110
1184
  # Auto-tune parameters
@@ -1216,59 +1290,35 @@ def process_cmj_video(
1216
1290
  phase_count=phase_count,
1217
1291
  )
1218
1292
 
1219
- # Build complete metadata
1220
- with timer.measure("metadata_building"):
1221
- processing_time = time.time() - start_time
1222
-
1223
- video_info = VideoInfo(
1224
- source_path=video_path,
1225
- fps=video.fps,
1226
- width=video.width,
1227
- height=video.height,
1228
- duration_s=video.frame_count / video.fps,
1229
- frame_count=video.frame_count,
1230
- codec=video.codec,
1231
- )
1232
-
1233
- # Convert timer metrics to human-readable stage names
1234
- stage_times = _convert_timer_to_stage_names(timer.get_metrics())
1235
-
1236
- processing_info = ProcessingInfo(
1237
- version=get_kinemotion_version(),
1238
- timestamp=create_timestamp(),
1239
- quality_preset=quality_preset.value,
1240
- processing_time_s=processing_time,
1241
- timing_breakdown=stage_times,
1242
- )
1243
-
1244
- algorithm_config = AlgorithmConfig(
1245
- detection_method="backward_search",
1246
- tracking_method="mediapipe_pose",
1247
- model_complexity=1,
1248
- smoothing=SmoothingConfig(
1249
- window_size=params.smoothing_window,
1250
- polynomial_order=params.polyorder,
1251
- use_bilateral_filter=params.bilateral_filter,
1252
- use_outlier_rejection=params.outlier_rejection,
1253
- ),
1254
- detection=DetectionConfig(
1255
- velocity_threshold=params.velocity_threshold,
1256
- min_contact_frames=params.min_contact_frames,
1257
- visibility_threshold=params.visibility_threshold,
1258
- use_curvature_refinement=params.use_curvature,
1259
- ),
1260
- drop_detection=None, # CMJ doesn't have drop detection
1261
- )
1262
-
1263
- result_metadata = ResultMetadata(
1264
- quality=quality_result,
1265
- video=video_info,
1266
- processing=processing_info,
1267
- algorithm=algorithm_config,
1268
- )
1293
+ # Build algorithm config early (but attach metadata later)
1294
+ algorithm_config = AlgorithmConfig(
1295
+ detection_method="backward_search",
1296
+ tracking_method="mediapipe_pose",
1297
+ model_complexity=1,
1298
+ smoothing=SmoothingConfig(
1299
+ window_size=params.smoothing_window,
1300
+ polynomial_order=params.polyorder,
1301
+ use_bilateral_filter=params.bilateral_filter,
1302
+ use_outlier_rejection=params.outlier_rejection,
1303
+ ),
1304
+ detection=DetectionConfig(
1305
+ velocity_threshold=params.velocity_threshold,
1306
+ min_contact_frames=params.min_contact_frames,
1307
+ visibility_threshold=params.visibility_threshold,
1308
+ use_curvature_refinement=params.use_curvature,
1309
+ ),
1310
+ drop_detection=None, # CMJ doesn't have drop detection
1311
+ )
1269
1312
 
1270
- # Attach complete metadata to metrics
1271
- metrics.result_metadata = result_metadata
1313
+ video_info = VideoInfo(
1314
+ source_path=video_path,
1315
+ fps=video.fps,
1316
+ width=video.width,
1317
+ height=video.height,
1318
+ duration_s=video.frame_count / video.fps,
1319
+ frame_count=video.frame_count,
1320
+ codec=video.codec,
1321
+ )
1272
1322
 
1273
1323
  if verbose and quality_result.warnings:
1274
1324
  print("\n⚠️ Quality Warnings:")
@@ -1276,21 +1326,46 @@ def process_cmj_video(
1276
1326
  print(f" - {warning}")
1277
1327
  print()
1278
1328
 
1279
- # Generate outputs if requested
1280
- _generate_cmj_outputs(
1281
- output_video,
1282
- json_output,
1283
- metrics,
1284
- frames,
1285
- smoothed_landmarks,
1286
- video.width,
1287
- video.height,
1288
- video.display_width,
1289
- video.display_height,
1290
- video.fps,
1291
- verbose,
1292
- timer,
1293
- )
1329
+ # Generate debug video (but not JSON yet - we need to attach metadata first)
1330
+ if output_video:
1331
+ if verbose:
1332
+ print(f"Generating debug video: {output_video}")
1333
+
1334
+ if timer:
1335
+ with timer.measure("debug_video_generation"):
1336
+ with CMJDebugOverlayRenderer(
1337
+ output_video,
1338
+ video.width,
1339
+ video.height,
1340
+ video.display_width,
1341
+ video.display_height,
1342
+ video.fps,
1343
+ ) as renderer:
1344
+ for i, frame in enumerate(frames):
1345
+ annotated = renderer.render_frame(
1346
+ frame, smoothed_landmarks[i], i, metrics
1347
+ )
1348
+ renderer.write_frame(annotated)
1349
+ # Capture re-encoding duration separately
1350
+ with timer.measure("debug_video_reencode"):
1351
+ pass # Re-encoding happens in context manager __exit__
1352
+ else:
1353
+ with CMJDebugOverlayRenderer(
1354
+ output_video,
1355
+ video.width,
1356
+ video.height,
1357
+ video.display_width,
1358
+ video.display_height,
1359
+ video.fps,
1360
+ ) as renderer:
1361
+ for i, frame in enumerate(frames):
1362
+ annotated = renderer.render_frame(
1363
+ frame, smoothed_landmarks[i], i, metrics
1364
+ )
1365
+ renderer.write_frame(annotated)
1366
+
1367
+ if verbose:
1368
+ print(f"Debug video saved: {output_video}")
1294
1369
 
1295
1370
  # Validate metrics against physiological bounds
1296
1371
  with timer.measure("metrics_validation"):
@@ -1298,6 +1373,50 @@ def process_cmj_video(
1298
1373
  validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
1299
1374
  metrics.validation_result = validation_result
1300
1375
 
1376
+ # NOW create ProcessingInfo with complete timing breakdown
1377
+ # (includes debug video generation timing)
1378
+ processing_time = time.time() - start_time
1379
+ stage_times = _convert_timer_to_stage_names(timer.get_metrics())
1380
+
1381
+ processing_info = ProcessingInfo(
1382
+ version=get_kinemotion_version(),
1383
+ timestamp=create_timestamp(),
1384
+ quality_preset=quality_preset.value,
1385
+ processing_time_s=processing_time,
1386
+ timing_breakdown=stage_times,
1387
+ )
1388
+
1389
+ result_metadata = ResultMetadata(
1390
+ quality=quality_result,
1391
+ video=video_info,
1392
+ processing=processing_info,
1393
+ algorithm=algorithm_config,
1394
+ )
1395
+
1396
+ # Attach complete metadata to metrics
1397
+ metrics.result_metadata = result_metadata
1398
+
1399
+ # NOW write JSON after metadata is attached
1400
+ if json_output:
1401
+ if timer:
1402
+ with timer.measure("json_serialization"):
1403
+ output_path = Path(json_output)
1404
+ metrics_dict = metrics.to_dict()
1405
+ import json
1406
+
1407
+ json_str = json.dumps(metrics_dict, indent=2)
1408
+ output_path.write_text(json_str)
1409
+ else:
1410
+ output_path = Path(json_output)
1411
+ metrics_dict = metrics.to_dict()
1412
+ import json
1413
+
1414
+ json_str = json.dumps(metrics_dict, indent=2)
1415
+ output_path.write_text(json_str)
1416
+
1417
+ if verbose:
1418
+ print(f"Metrics written to: {json_output}")
1419
+
1301
1420
  if verbose and validation_result.issues:
1302
1421
  print("\n⚠️ Validation Results:")
1303
1422
  for issue in validation_result.issues:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.41.1
3
+ Version: 0.41.3
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,5 +1,5 @@
1
1
  kinemotion/__init__.py,sha256=wPItmyGJUOFM6GPRVhAEvRz0-ErI7e2qiUREYJ9EfPQ,943
2
- kinemotion/api.py,sha256=B_orKAJ5KNsL5zse5B0s4pumT_4OcAVoxdSEa3N9qMY,52843
2
+ kinemotion/api.py,sha256=plEUM5kuSvPJNgIg-iDFzwHCLU2w9YjUpDpBE7BHNWA,58273
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
5
  kinemotion/cmj/analysis.py,sha256=qtULzp9uYzm5M0_Qu5YGJpuwjg9fz1VKAg6xg4NJxvM,21639
@@ -32,8 +32,8 @@ kinemotion/dropjump/kinematics.py,sha256=kH-XM66wlOCYMpjvyb6_Qh5ZebyOfFZ47rmhgE1
32
32
  kinemotion/dropjump/metrics_validator.py,sha256=CrTlGup8q2kyPXtA6HNwm7_yq0AsBaDllG7RVZdXmYA,9342
33
33
  kinemotion/dropjump/validation_bounds.py,sha256=5b4I3CKPybuvrbn-nP5yCcGF_sH4Vtyw3a5AWWvWnBk,4645
34
34
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- kinemotion-0.41.1.dist-info/METADATA,sha256=0hR96r4xCgvlzmGt2CEiwwxZ1k7sTEVLLAHLSZa99OY,26020
36
- kinemotion-0.41.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
37
- kinemotion-0.41.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
38
- kinemotion-0.41.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
39
- kinemotion-0.41.1.dist-info/RECORD,,
35
+ kinemotion-0.41.3.dist-info/METADATA,sha256=oMLlD0b4PDqlYf7wm3VkOmj-tJA_J4UPT3RC6FCshLQ,26020
36
+ kinemotion-0.41.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
37
+ kinemotion-0.41.3.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
38
+ kinemotion-0.41.3.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
39
+ kinemotion-0.41.3.dist-info/RECORD,,