simba-uw-tf-dev 4.7.1__py3-none-any.whl → 4.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- simba/SimBA.py +1178 -1171
- simba/assets/lookups/yolo_schematics/yolo_mitra.csv +9 -0
- simba/mixins/train_model_mixin.py +1 -4
- simba/model/inference_batch.py +1 -1
- simba/model/yolo_fit.py +22 -15
- simba/model/yolo_pose_inference.py +7 -2
- simba/sandbox/convert_h264_to_mp4_lossless.py +129 -0
- simba/sandbox/extract_and_convert_videos.py +257 -0
- simba/third_party_label_appenders/transform/simba_to_yolo.py +8 -5
- simba/ui/pop_ups/run_machine_models_popup.py +22 -22
- simba/ui/pop_ups/simba_to_yolo_keypoints_popup.py +2 -2
- simba/ui/pop_ups/yolo_inference_popup.py +1 -1
- simba/ui/pop_ups/yolo_pose_train_popup.py +1 -1
- simba/utils/lookups.py +67 -1
- simba/video_processors/video_processing.py +97 -39
- {simba_uw_tf_dev-4.7.1.dist-info → simba_uw_tf_dev-4.7.3.dist-info}/METADATA +1 -1
- {simba_uw_tf_dev-4.7.1.dist-info → simba_uw_tf_dev-4.7.3.dist-info}/RECORD +21 -18
- {simba_uw_tf_dev-4.7.1.dist-info → simba_uw_tf_dev-4.7.3.dist-info}/LICENSE +0 -0
- {simba_uw_tf_dev-4.7.1.dist-info → simba_uw_tf_dev-4.7.3.dist-info}/WHEEL +0 -0
- {simba_uw_tf_dev-4.7.1.dist-info → simba_uw_tf_dev-4.7.3.dist-info}/entry_points.txt +0 -0
- {simba_uw_tf_dev-4.7.1.dist-info → simba_uw_tf_dev-4.7.3.dist-info}/top_level.txt +0 -0
|
@@ -1070,10 +1070,7 @@ class TrainModelMixin(object):
|
|
|
1070
1070
|
MissingUserInputWarning(msg=f'Skipping {str(config.get("SML settings", "target_name_" + str(n + 1)))} classifier analysis: missing information (e.g., no discrimination threshold and/or minimum bout set in the project_config.ini',source=self.__class__.__name__)
|
|
1071
1071
|
|
|
1072
1072
|
if len(model_dict.keys()) == 0:
|
|
1073
|
-
raise NoDataError(
|
|
1074
|
-
msg=f"There are no models with accurate data specified in the RUN MODELS menu. Specify the model information to SimBA RUN MODELS menu to use them to analyze videos",
|
|
1075
|
-
source=self.get_model_info.__name__,
|
|
1076
|
-
)
|
|
1073
|
+
raise NoDataError(msg=f"There are no models with accurate data specified in the RUN MODELS menu. Specify the model information to SimBA RUN MODELS menu to use them to analyze videos. PLease check the model paths, thresholds, and minimum bout lengths.", source=self.get_model_info.__name__)
|
|
1077
1074
|
else:
|
|
1078
1075
|
return model_dict
|
|
1079
1076
|
|
simba/model/inference_batch.py
CHANGED
|
@@ -101,7 +101,7 @@ class InferenceBatch(TrainModelMixin, ConfigReader):
|
|
|
101
101
|
video_timer.stop_timer()
|
|
102
102
|
print(f"Predictions created for {file_name} (frame count: {len(in_df)}, elapsed time: {video_timer.elapsed_time_str}) ...")
|
|
103
103
|
self.timer.stop_timer()
|
|
104
|
-
stdout_success(msg=f"Machine predictions complete. Files saved in {self.save_dir} directory", elapsed_time=self.timer.elapsed_time_str, source=self.__class__.__name__)
|
|
104
|
+
stdout_success(msg=f"Machine predictions complete for {len(self.feature_file_paths)} file(s). Files saved in {self.save_dir} directory", elapsed_time=self.timer.elapsed_time_str, source=self.__class__.__name__)
|
|
105
105
|
|
|
106
106
|
if __name__ == "__main__" and not hasattr(sys, 'ps1'):
|
|
107
107
|
parser = argparse.ArgumentParser(description="Perform classifications according to rules defined in SImAB project_config.ini.")
|
simba/model/yolo_fit.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import sys
|
|
3
|
+
from contextlib import redirect_stderr, redirect_stdout
|
|
3
4
|
|
|
4
5
|
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
|
5
6
|
import argparse
|
|
@@ -21,7 +22,8 @@ from simba.utils.checks import (check_file_exist_and_readable,
|
|
|
21
22
|
check_valid_boolean, check_valid_device)
|
|
22
23
|
from simba.utils.enums import Options
|
|
23
24
|
from simba.utils.errors import SimBAGPUError, SimBAPAckageVersionError
|
|
24
|
-
from simba.utils.
|
|
25
|
+
from simba.utils.printing import stdout_information
|
|
26
|
+
from simba.utils.read_write import find_core_cnt, get_current_time
|
|
25
27
|
from simba.utils.yolo import load_yolo_model
|
|
26
28
|
|
|
27
29
|
|
|
@@ -108,20 +110,25 @@ class FitYolo():
|
|
|
108
110
|
|
|
109
111
|
|
|
110
112
|
def run(self):
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
113
|
+
# Temporarily redirect stdout/stderr to terminal to ensure ultralytics output goes to terminal
|
|
114
|
+
# sys.__stdout__ and sys.__stderr__ are the original terminal streams
|
|
115
|
+
stdout_information(msg=f'[{get_current_time()}] Please follow the YOLO pose model training in the terminal from where SimBA was launched ...', source=self.__class__.__name__)
|
|
116
|
+
stdout_information(msg=f'[{get_current_time()}] Results will be stored in the {self.save_path} directory ..', source=self.__class__.__name__)
|
|
117
|
+
with redirect_stdout(sys.__stdout__), redirect_stderr(sys.__stderr__):
|
|
118
|
+
model = load_yolo_model(weights_path=self.weights_path,
|
|
119
|
+
verbose=self.verbose,
|
|
120
|
+
format=self.format,
|
|
121
|
+
device=self.device)
|
|
122
|
+
|
|
123
|
+
model.train(data=self.model_yaml,
|
|
124
|
+
epochs=self.epochs,
|
|
125
|
+
project=self.save_path,
|
|
126
|
+
batch=self.batch,
|
|
127
|
+
plots=self.plots,
|
|
128
|
+
imgsz=self.imgsz,
|
|
129
|
+
workers=self.workers,
|
|
130
|
+
device=self.device,
|
|
131
|
+
patience=self.patience)
|
|
125
132
|
|
|
126
133
|
|
|
127
134
|
if __name__ == "__main__" and not hasattr(sys, 'ps1'):
|
|
@@ -34,7 +34,7 @@ from simba.utils.errors import (CountError, InvalidFilepathError,
|
|
|
34
34
|
InvalidFileTypeError, SimBAGPUError,
|
|
35
35
|
SimBAPAckageVersionError)
|
|
36
36
|
from simba.utils.lookups import get_current_time
|
|
37
|
-
from simba.utils.printing import SimbaTimer, stdout_success
|
|
37
|
+
from simba.utils.printing import SimbaTimer, stdout_information, stdout_success
|
|
38
38
|
from simba.utils.read_write import (find_files_of_filetypes_in_directory,
|
|
39
39
|
get_video_meta_data, recursive_file_search)
|
|
40
40
|
from simba.utils.warnings import FileExistWarning, NoDataFoundWarning
|
|
@@ -182,7 +182,12 @@ class YOLOPoseInference():
|
|
|
182
182
|
results = {}
|
|
183
183
|
class_dict = self.model.names
|
|
184
184
|
timer = SimbaTimer(start=True)
|
|
185
|
-
|
|
185
|
+
if self.save_dir is not None:
|
|
186
|
+
msg = f'[{get_current_time()}] Starting tracking inference for {len(self.video_path)} video(s). Results will be saved in {self.save_dir} ... '
|
|
187
|
+
else:
|
|
188
|
+
msg = f'[{get_current_time()}] Starting tracking inference for {len(self.video_path)} video(s) ... '
|
|
189
|
+
stdout_information(msg=msg, source=self.__class__.__name__)
|
|
190
|
+
stdout_information(msg='Follow progress in OS terminal window ...', source=self.__class__.__name__)
|
|
186
191
|
for video_cnt, path in enumerate(self.video_path):
|
|
187
192
|
video_timer = SimbaTimer(start=True)
|
|
188
193
|
_, video_name, _ = get_fn_ext(filepath=path)
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Convert .h264 files to lossless MP4 format using FFmpeg.
|
|
3
|
+
|
|
4
|
+
This script converts H.264 raw video files to MP4 container format using
|
|
5
|
+
lossless encoding (copy codec) to preserve quality.
|
|
6
|
+
"""
|
|
7
|
+
import os
|
|
8
|
+
import subprocess
|
|
9
|
+
import glob
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import List, Union
|
|
12
|
+
|
|
13
|
+
def check_ffmpeg_available() -> bool:
|
|
14
|
+
"""Check if FFmpeg is available in the system."""
|
|
15
|
+
try:
|
|
16
|
+
subprocess.run(['ffmpeg', '-version'],
|
|
17
|
+
stdout=subprocess.PIPE,
|
|
18
|
+
stderr=subprocess.PIPE,
|
|
19
|
+
check=True)
|
|
20
|
+
return True
|
|
21
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
22
|
+
return False
|
|
23
|
+
|
|
24
|
+
def convert_h264_to_mp4_lossless(input_path: Union[str, Path],
|
|
25
|
+
output_path: Union[str, Path] = None) -> bool:
|
|
26
|
+
"""
|
|
27
|
+
Convert a single .h264 file to lossless MP4.
|
|
28
|
+
|
|
29
|
+
:param Union[str, Path] input_path: Path to input .h264 file
|
|
30
|
+
:param Union[str, Path] output_path: Optional output path. If None, creates output in same directory with .mp4 extension
|
|
31
|
+
:return: True if conversion successful, False otherwise
|
|
32
|
+
"""
|
|
33
|
+
input_path = Path(input_path)
|
|
34
|
+
|
|
35
|
+
if not input_path.exists():
|
|
36
|
+
print(f"[ERROR] File not found: {input_path}")
|
|
37
|
+
return False
|
|
38
|
+
|
|
39
|
+
if output_path is None:
|
|
40
|
+
output_path = input_path.with_suffix('.mp4')
|
|
41
|
+
else:
|
|
42
|
+
output_path = Path(output_path)
|
|
43
|
+
|
|
44
|
+
if output_path.exists():
|
|
45
|
+
print(f"[SKIP] Output file already exists: {output_path}")
|
|
46
|
+
return False
|
|
47
|
+
|
|
48
|
+
# FFmpeg command for lossless conversion (copy codec, no re-encoding)
|
|
49
|
+
# -c:v copy: Copy video stream without re-encoding (lossless)
|
|
50
|
+
# -c:a copy: Copy audio stream if present (lossless)
|
|
51
|
+
# -movflags +faststart: Optimize for web streaming (optional)
|
|
52
|
+
cmd = [
|
|
53
|
+
'ffmpeg',
|
|
54
|
+
'-i', str(input_path),
|
|
55
|
+
'-c:v', 'copy', # Copy video codec (lossless)
|
|
56
|
+
'-c:a', 'copy', # Copy audio codec if present (lossless)
|
|
57
|
+
'-y', # Overwrite output file if exists
|
|
58
|
+
str(output_path)
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
print(f"Converting {input_path.name} -> {output_path.name}...")
|
|
63
|
+
result = subprocess.run(
|
|
64
|
+
cmd,
|
|
65
|
+
stdout=subprocess.PIPE,
|
|
66
|
+
stderr=subprocess.PIPE,
|
|
67
|
+
check=True,
|
|
68
|
+
text=True
|
|
69
|
+
)
|
|
70
|
+
print(f" [OK] Successfully converted {input_path.name}")
|
|
71
|
+
return True
|
|
72
|
+
except subprocess.CalledProcessError as e:
|
|
73
|
+
print(f" [ERROR] FFmpeg error for {input_path.name}: {e.stderr}")
|
|
74
|
+
return False
|
|
75
|
+
except Exception as e:
|
|
76
|
+
print(f" [ERROR] Unexpected error for {input_path.name}: {e}")
|
|
77
|
+
return False
|
|
78
|
+
|
|
79
|
+
def convert_all_h264_files(directory: str,
|
|
80
|
+
pattern: str = None) -> None:
|
|
81
|
+
"""
|
|
82
|
+
Convert all .h264 files in a directory to MP4.
|
|
83
|
+
|
|
84
|
+
:param str directory: Directory containing .h264 files
|
|
85
|
+
:param str pattern: Optional pattern to match in filename (e.g., '4.03.001_6_2026_01_16_09_15_00_000')
|
|
86
|
+
"""
|
|
87
|
+
dir_path = Path(directory)
|
|
88
|
+
|
|
89
|
+
if not dir_path.exists():
|
|
90
|
+
print(f"[ERROR] Directory does not exist: {directory}")
|
|
91
|
+
return
|
|
92
|
+
|
|
93
|
+
if not check_ffmpeg_available():
|
|
94
|
+
print("[ERROR] FFmpeg is not available. Please install FFmpeg.")
|
|
95
|
+
return
|
|
96
|
+
|
|
97
|
+
# Find all .h264 files
|
|
98
|
+
h264_files = list(dir_path.glob('*.h264'))
|
|
99
|
+
|
|
100
|
+
if pattern:
|
|
101
|
+
h264_files = [f for f in h264_files if pattern in f.name]
|
|
102
|
+
|
|
103
|
+
if not h264_files:
|
|
104
|
+
pattern_msg = f" matching pattern '{pattern}'" if pattern else ""
|
|
105
|
+
print(f"No .h264 files found{pattern_msg} in {directory}")
|
|
106
|
+
return
|
|
107
|
+
|
|
108
|
+
print(f"Found {len(h264_files)} .h264 file(s) to convert...")
|
|
109
|
+
|
|
110
|
+
successful = 0
|
|
111
|
+
failed = 0
|
|
112
|
+
|
|
113
|
+
for h264_file in sorted(h264_files):
|
|
114
|
+
if convert_h264_to_mp4_lossless(h264_file):
|
|
115
|
+
successful += 1
|
|
116
|
+
else:
|
|
117
|
+
failed += 1
|
|
118
|
+
|
|
119
|
+
print(f"\nConversion complete!")
|
|
120
|
+
print(f" Successful: {successful}")
|
|
121
|
+
print(f" Failed: {failed}")
|
|
122
|
+
print(f" Total: {len(h264_files)}")
|
|
123
|
+
|
|
124
|
+
if __name__ == "__main__":
|
|
125
|
+
# Convert all .h264 files in the directory
|
|
126
|
+
directory = r"E:\lp_videos_tar"
|
|
127
|
+
pattern = None # Convert all .h264 files, not just matching a pattern
|
|
128
|
+
|
|
129
|
+
convert_all_h264_files(directory, pattern=pattern)
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Extract tar files and convert all videos to lossless MP4 format.
|
|
3
|
+
|
|
4
|
+
This script:
|
|
5
|
+
1. Extracts all .tar, .tar.gz, and .tgz files in a directory
|
|
6
|
+
2. Finds all video files (various formats)
|
|
7
|
+
3. Converts them to lossless MP4 using FFmpeg stream copy
|
|
8
|
+
"""
|
|
9
|
+
import os
|
|
10
|
+
import tarfile
|
|
11
|
+
import glob
|
|
12
|
+
import subprocess
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import List, Set
|
|
15
|
+
|
|
16
|
+
# Common video file extensions
|
|
17
|
+
VIDEO_EXTENSIONS = {'.h264', '.avi', '.mov', '.mkv', '.flv', '.m4v', '.mp4',
|
|
18
|
+
'.webm', '.wmv', '.mpg', '.mpeg', '.ts', '.mts', '.m2ts'}
|
|
19
|
+
|
|
20
|
+
def check_ffmpeg_available() -> bool:
|
|
21
|
+
"""Check if FFmpeg is available in the system."""
|
|
22
|
+
try:
|
|
23
|
+
subprocess.run(['ffmpeg', '-version'],
|
|
24
|
+
stdout=subprocess.PIPE,
|
|
25
|
+
stderr=subprocess.PIPE,
|
|
26
|
+
check=True)
|
|
27
|
+
return True
|
|
28
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
29
|
+
return False
|
|
30
|
+
|
|
31
|
+
def extract_tar_files(directory_path: str, output_dir: str = None) -> List[Path]:
|
|
32
|
+
"""
|
|
33
|
+
Extracts all .tar, .tar.gz, and .tgz files in a given directory.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
directory_path (str): The path to the directory containing the tar files.
|
|
37
|
+
output_dir (str, optional): The directory where to extract the contents.
|
|
38
|
+
If None, extracts to the same directory as the tar file.
|
|
39
|
+
Defaults to None.
|
|
40
|
+
Returns:
|
|
41
|
+
List[Path]: List of directories where files were extracted
|
|
42
|
+
"""
|
|
43
|
+
if output_dir is None:
|
|
44
|
+
output_dir = directory_path
|
|
45
|
+
|
|
46
|
+
dir_path = Path(directory_path)
|
|
47
|
+
output_path = Path(output_dir)
|
|
48
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
49
|
+
|
|
50
|
+
tar_files = []
|
|
51
|
+
for ext in ['*.tar', '*.tar.gz', '*.tgz']:
|
|
52
|
+
tar_files.extend(dir_path.glob(ext))
|
|
53
|
+
|
|
54
|
+
if not tar_files:
|
|
55
|
+
print(f"No tar file(s) found in {directory_path}")
|
|
56
|
+
return []
|
|
57
|
+
|
|
58
|
+
print(f"Found {len(tar_files)} tar file(s) to extract...")
|
|
59
|
+
|
|
60
|
+
extracted_dirs = []
|
|
61
|
+
for tar_file_path in tar_files:
|
|
62
|
+
tar_file = Path(tar_file_path)
|
|
63
|
+
print(f"Extracting {tar_file.name}...")
|
|
64
|
+
try:
|
|
65
|
+
# Extract to a subdirectory named after the tar file (without extension)
|
|
66
|
+
extract_dir = output_path / tar_file.stem
|
|
67
|
+
extract_dir.mkdir(parents=True, exist_ok=True)
|
|
68
|
+
|
|
69
|
+
with tarfile.open(tar_file_path, 'r:*') as f:
|
|
70
|
+
if hasattr(tarfile, 'data_filter'): # Python 3.12+
|
|
71
|
+
f.extractall(path=extract_dir, filter='data')
|
|
72
|
+
else:
|
|
73
|
+
f.extractall(path=extract_dir)
|
|
74
|
+
print(f" [OK] Extracted {tar_file.name} to {extract_dir}")
|
|
75
|
+
extracted_dirs.append(extract_dir)
|
|
76
|
+
except tarfile.ReadError as e:
|
|
77
|
+
print(f" [ERROR] Error extracting {tar_file.name}: {e} (Not a valid tar file or corrupted)")
|
|
78
|
+
except Exception as e:
|
|
79
|
+
print(f" [ERROR] Error extracting {tar_file.name}: {e}")
|
|
80
|
+
|
|
81
|
+
print("\nExtraction complete!")
|
|
82
|
+
return extracted_dirs
|
|
83
|
+
|
|
84
|
+
def find_video_files(directory: Path, recursive: bool = True) -> List[Path]:
|
|
85
|
+
"""
|
|
86
|
+
Find all video files in a directory.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
directory: Directory to search
|
|
90
|
+
recursive: If True, search recursively in subdirectories
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
List of video file paths
|
|
94
|
+
"""
|
|
95
|
+
video_files = []
|
|
96
|
+
|
|
97
|
+
if recursive:
|
|
98
|
+
for ext in VIDEO_EXTENSIONS:
|
|
99
|
+
video_files.extend(directory.rglob(f'*{ext}'))
|
|
100
|
+
else:
|
|
101
|
+
for ext in VIDEO_EXTENSIONS:
|
|
102
|
+
video_files.extend(directory.glob(f'*{ext}'))
|
|
103
|
+
|
|
104
|
+
return sorted(video_files)
|
|
105
|
+
|
|
106
|
+
def convert_video_to_mp4_lossless(input_path: Path, output_path: Path = None) -> bool:
|
|
107
|
+
"""
|
|
108
|
+
Convert a video file to lossless MP4 using FFmpeg stream copy.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
input_path: Path to input video file
|
|
112
|
+
output_path: Optional output path. If None, creates output in same directory with .mp4 extension
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
True if conversion successful, False otherwise
|
|
116
|
+
"""
|
|
117
|
+
if not input_path.exists():
|
|
118
|
+
print(f"[ERROR] File not found: {input_path}")
|
|
119
|
+
return False
|
|
120
|
+
|
|
121
|
+
if output_path is None:
|
|
122
|
+
output_path = input_path.with_suffix('.mp4')
|
|
123
|
+
else:
|
|
124
|
+
output_path = Path(output_path)
|
|
125
|
+
|
|
126
|
+
# Skip if already MP4
|
|
127
|
+
if input_path.suffix.lower() == '.mp4':
|
|
128
|
+
print(f"[SKIP] File is already MP4: {input_path.name}")
|
|
129
|
+
return False
|
|
130
|
+
|
|
131
|
+
if output_path.exists():
|
|
132
|
+
print(f"[SKIP] Output file already exists: {output_path.name}")
|
|
133
|
+
return False
|
|
134
|
+
|
|
135
|
+
# FFmpeg command for lossless conversion (stream copy)
|
|
136
|
+
# -c:v copy: Copy video stream without re-encoding (lossless)
|
|
137
|
+
# -c:a copy: Copy audio stream if present (lossless)
|
|
138
|
+
# -y: Overwrite output file if exists
|
|
139
|
+
cmd = [
|
|
140
|
+
'ffmpeg',
|
|
141
|
+
'-i', str(input_path),
|
|
142
|
+
'-c:v', 'copy', # Copy video codec (lossless)
|
|
143
|
+
'-c:a', 'copy', # Copy audio codec if present (lossless)
|
|
144
|
+
'-y',
|
|
145
|
+
str(output_path)
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
print(f"Converting {input_path.name} -> {output_path.name}...")
|
|
150
|
+
result = subprocess.run(
|
|
151
|
+
cmd,
|
|
152
|
+
stdout=subprocess.PIPE,
|
|
153
|
+
stderr=subprocess.PIPE,
|
|
154
|
+
check=True,
|
|
155
|
+
text=True
|
|
156
|
+
)
|
|
157
|
+
print(f" [OK] Successfully converted {input_path.name}")
|
|
158
|
+
return True
|
|
159
|
+
except subprocess.CalledProcessError as e:
|
|
160
|
+
print(f" [ERROR] FFmpeg error for {input_path.name}")
|
|
161
|
+
# Print first few lines of stderr for debugging
|
|
162
|
+
stderr_lines = e.stderr.split('\n')[:5]
|
|
163
|
+
for line in stderr_lines:
|
|
164
|
+
if line.strip():
|
|
165
|
+
print(f" {line}")
|
|
166
|
+
return False
|
|
167
|
+
except Exception as e:
|
|
168
|
+
print(f" [ERROR] Unexpected error for {input_path.name}: {e}")
|
|
169
|
+
return False
|
|
170
|
+
|
|
171
|
+
def extract_and_convert_videos(directory_path: str,
|
|
172
|
+
extract_to_subdirs: bool = True,
|
|
173
|
+
convert_recursive: bool = True) -> None:
|
|
174
|
+
"""
|
|
175
|
+
Extract all tar files and convert all videos to lossless MP4.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
directory_path: Directory containing tar files
|
|
179
|
+
extract_to_subdirs: If True, extract each tar to its own subdirectory
|
|
180
|
+
convert_recursive: If True, search for videos recursively in extracted directories
|
|
181
|
+
"""
|
|
182
|
+
dir_path = Path(directory_path)
|
|
183
|
+
|
|
184
|
+
if not dir_path.exists():
|
|
185
|
+
print(f"[ERROR] Directory does not exist: {directory_path}")
|
|
186
|
+
return
|
|
187
|
+
|
|
188
|
+
if not check_ffmpeg_available():
|
|
189
|
+
print("[ERROR] FFmpeg is not available. Please install FFmpeg.")
|
|
190
|
+
return
|
|
191
|
+
|
|
192
|
+
# Step 1: Extract tar files
|
|
193
|
+
print("=" * 60)
|
|
194
|
+
print("STEP 1: Extracting tar files...")
|
|
195
|
+
print("=" * 60)
|
|
196
|
+
extracted_dirs = extract_tar_files(directory_path,
|
|
197
|
+
output_dir=directory_path if extract_to_subdirs else None)
|
|
198
|
+
|
|
199
|
+
# Step 2: Find all video files
|
|
200
|
+
print("\n" + "=" * 60)
|
|
201
|
+
print("STEP 2: Finding video files...")
|
|
202
|
+
print("=" * 60)
|
|
203
|
+
|
|
204
|
+
# Search in extracted directories and the main directory
|
|
205
|
+
search_dirs = extracted_dirs if extracted_dirs else [dir_path]
|
|
206
|
+
all_video_files = []
|
|
207
|
+
|
|
208
|
+
for search_dir in search_dirs:
|
|
209
|
+
videos = find_video_files(search_dir, recursive=convert_recursive)
|
|
210
|
+
all_video_files.extend(videos)
|
|
211
|
+
if videos:
|
|
212
|
+
print(f"Found {len(videos)} video file(s) in {search_dir}")
|
|
213
|
+
|
|
214
|
+
# Also search in main directory if we extracted to subdirs
|
|
215
|
+
if extract_to_subdirs and dir_path not in search_dirs:
|
|
216
|
+
videos = find_video_files(dir_path, recursive=False)
|
|
217
|
+
all_video_files.extend(videos)
|
|
218
|
+
if videos:
|
|
219
|
+
print(f"Found {len(videos)} video file(s) in {dir_path}")
|
|
220
|
+
|
|
221
|
+
if not all_video_files:
|
|
222
|
+
print("No video files found to convert.")
|
|
223
|
+
return
|
|
224
|
+
|
|
225
|
+
print(f"\nTotal video files found: {len(all_video_files)}")
|
|
226
|
+
|
|
227
|
+
# Step 3: Convert videos to MP4
|
|
228
|
+
print("\n" + "=" * 60)
|
|
229
|
+
print("STEP 3: Converting videos to lossless MP4...")
|
|
230
|
+
print("=" * 60)
|
|
231
|
+
|
|
232
|
+
successful = 0
|
|
233
|
+
failed = 0
|
|
234
|
+
skipped = 0
|
|
235
|
+
|
|
236
|
+
for video_file in all_video_files:
|
|
237
|
+
result = convert_video_to_mp4_lossless(video_file)
|
|
238
|
+
if result is True:
|
|
239
|
+
successful += 1
|
|
240
|
+
elif result is False and video_file.suffix.lower() == '.mp4':
|
|
241
|
+
skipped += 1
|
|
242
|
+
else:
|
|
243
|
+
failed += 1
|
|
244
|
+
|
|
245
|
+
print("\n" + "=" * 60)
|
|
246
|
+
print("Conversion complete!")
|
|
247
|
+
print("=" * 60)
|
|
248
|
+
print(f" Successful: {successful}")
|
|
249
|
+
print(f" Failed: {failed}")
|
|
250
|
+
print(f" Skipped (already MP4): {skipped}")
|
|
251
|
+
print(f" Total: {len(all_video_files)}")
|
|
252
|
+
|
|
253
|
+
if __name__ == "__main__":
|
|
254
|
+
target_directory = r"E:\new_tars"
|
|
255
|
+
extract_and_convert_videos(target_directory,
|
|
256
|
+
extract_to_subdirs=True,
|
|
257
|
+
convert_recursive=True)
|
|
@@ -117,12 +117,15 @@ class SimBA2Yolo:
|
|
|
117
117
|
annotations, timer, body_part_headers = [], SimbaTimer(start=True), []
|
|
118
118
|
for file_cnt, video_name in enumerate(self.data_w_video):
|
|
119
119
|
data = read_df(file_path=self.data_paths[video_name], file_type=self.config.file_type)
|
|
120
|
+
data.columns = [x.lower() for x in list(data.columns)]
|
|
121
|
+
bp_header_names = [x.lower() for x in self.config.bp_headers]
|
|
120
122
|
check_valid_dataframe(df=data, source=f'{self.__class__.__name__} {self.data_paths[video_name]}', valid_dtypes=Formats.NUMERIC_DTYPES.value)
|
|
121
123
|
video_path = self.video_paths[video_name]
|
|
122
124
|
check_video_and_data_frm_count_align(video=video_path, data=data, name=self.data_paths[video_name], raise_error=True)
|
|
123
125
|
p_data = data[data.columns[list(data.columns.str.endswith('_p'))]]
|
|
124
126
|
data = data.loc[:, ~data.columns.str.endswith('_p')].reset_index(drop=True)
|
|
125
127
|
data = data.iloc[(p_data[(p_data > self.threshold).all(axis=1)].index)]
|
|
128
|
+
data = data[[x for x in bp_header_names if not x.endswith('_p')]]
|
|
126
129
|
body_part_headers = data.columns
|
|
127
130
|
data['video'], frm_cnt = video_name, len(data)
|
|
128
131
|
if self.sample_size is None:
|
|
@@ -155,7 +158,7 @@ class SimBA2Yolo:
|
|
|
155
158
|
if frm_idx in train_idx:
|
|
156
159
|
img_save_path, lbl_save_path = os.path.join(self.img_train_dir, f'{file_name}.png'), os.path.join(self.lbl_train_dir, f'{file_name}.txt')
|
|
157
160
|
else:
|
|
158
|
-
img_save_path, lbl_save_path = os.path.join(self.
|
|
161
|
+
img_save_path, lbl_save_path = os.path.join(self.img_val_dir, f'{file_name}.png'), os.path.join(self.lb_val_dir, f'{file_name}.txt')
|
|
159
162
|
img = read_frm_of_video(video_path=vid_path, frame_index=frm_idx, greyscale=self.greyscale, clahe=self.clahe)
|
|
160
163
|
img_h, img_w = img.shape[0], img.shape[1]
|
|
161
164
|
keypoints_with_id = {}
|
|
@@ -181,8 +184,8 @@ class SimBA2Yolo:
|
|
|
181
184
|
timer.stop_timer()
|
|
182
185
|
stdout_success(msg=f'YOLO formated data saved in {self.save_dir} directory', source=self.__class__.__name__, elapsed_time=timer.elapsed_time_str)
|
|
183
186
|
|
|
184
|
-
|
|
185
|
-
# SAVE_DIR = r'
|
|
186
|
-
# CONFIG_PATH = r"
|
|
187
|
-
# runner = SimBA2Yolo(config_path=CONFIG_PATH, save_dir=SAVE_DIR, sample_size=
|
|
187
|
+
#
|
|
188
|
+
# SAVE_DIR = r'E:\troubleshooting\mitra\yolo_0126\yolo_train_0126'
|
|
189
|
+
# CONFIG_PATH = r"E:\troubleshooting\mitra\project_folder\project_config.ini"
|
|
190
|
+
# runner = SimBA2Yolo(config_path=CONFIG_PATH, save_dir=SAVE_DIR, sample_size=50, verbose=True, names=('animal_1',), threshold=0.5)
|
|
188
191
|
# runner.run()
|
|
@@ -8,8 +8,8 @@ from simba.mixins.config_reader import ConfigReader
|
|
|
8
8
|
from simba.mixins.pop_up_mixin import PopUpMixin
|
|
9
9
|
from simba.model.inference_batch import InferenceBatch
|
|
10
10
|
from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, Entry_Box,
|
|
11
|
-
FileSelect, SimbaButton,
|
|
12
|
-
|
|
11
|
+
FileSelect, SimbaButton, SimBALabel,
|
|
12
|
+
SimBASeperator)
|
|
13
13
|
from simba.utils.checks import (check_file_exist_and_readable, check_float,
|
|
14
14
|
check_int)
|
|
15
15
|
from simba.utils.enums import ConfigKey, Dtypes, Formats, Keys, Links
|
|
@@ -31,34 +31,34 @@ class RunMachineModelsPopUp(PopUpMixin, ConfigReader):
|
|
|
31
31
|
def __init__(self, config_path: Union[str, os.PathLike]):
|
|
32
32
|
ConfigReader.__init__(self, config_path=config_path, read_video_info=False)
|
|
33
33
|
PopUpMixin.__init__(self, title="SET MODEL PARAMETERS", icon='equation_small')
|
|
34
|
+
padx, self.config_path = (0, 25), config_path
|
|
34
35
|
self.clf_table_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.SET_RUN_ML_PARAMETERS.value)
|
|
35
|
-
clf_header = SimBALabel(parent=self.clf_table_frm, txt="CLASSIFIER", font=Formats.FONT_HEADER.value)
|
|
36
|
-
mdl_path_header = SimBALabel(parent=self.clf_table_frm, txt="MODEL PATH (.SAV)", font=Formats.FONT_HEADER.value)
|
|
37
|
-
threshold_header = SimBALabel(parent=self.clf_table_frm, txt="THRESHOLD (0.0 - 1.0)", font=Formats.FONT_HEADER.value)
|
|
38
|
-
min_bout_header = SimBALabel(parent=self.clf_table_frm, txt="MINIMUM BOUT LENGTH (MS)", font=Formats.FONT_HEADER.value)
|
|
39
|
-
clf_header.grid(row=0, column=0, sticky=
|
|
40
|
-
mdl_path_header.grid(row=0, column=1, sticky=NW)
|
|
41
|
-
threshold_header.grid(row=0, column=2, sticky=NW)
|
|
42
|
-
min_bout_header.grid(row=0, column=3, sticky=NW)
|
|
36
|
+
clf_header = SimBALabel(parent=self.clf_table_frm, txt="CLASSIFIER", font=Formats.FONT_HEADER.value, img='label')
|
|
37
|
+
mdl_path_header = SimBALabel(parent=self.clf_table_frm, txt="MODEL PATH (.SAV)", font=Formats.FONT_HEADER.value, img='file_type', justify='center')
|
|
38
|
+
threshold_header = SimBALabel(parent=self.clf_table_frm, txt="THRESHOLD (0.0 - 1.0)", font=Formats.FONT_HEADER.value, img='threshold', justify='center')
|
|
39
|
+
min_bout_header = SimBALabel(parent=self.clf_table_frm, txt="MINIMUM BOUT LENGTH (MS)", font=Formats.FONT_HEADER.value, img='timer_2', justify='center')
|
|
40
|
+
clf_header.grid(row=0, column=0, sticky=NW, padx=padx)
|
|
41
|
+
mdl_path_header.grid(row=0, column=1, sticky=NW, padx=padx)
|
|
42
|
+
threshold_header.grid(row=0, column=2, sticky=NW, padx=padx)
|
|
43
|
+
min_bout_header.grid(row=0, column=3, sticky=NW, padx=padx)
|
|
44
|
+
|
|
45
|
+
seperator = SimBASeperator(parent=self.clf_table_frm, color='grey', orient='horizontal', borderwidth=1)
|
|
46
|
+
seperator.grid(row=1, column=0, columnspan=4, rowspan=1, sticky="ew", pady=(0, 10))
|
|
43
47
|
|
|
44
48
|
self.clf_data = {}
|
|
45
49
|
for clf_cnt, clf_name in enumerate(self.clf_names):
|
|
46
50
|
self.clf_data[clf_name] = {}
|
|
47
|
-
SimBALabel(parent=self.clf_table_frm, txt=clf_name, font=Formats.FONT_REGULAR_ITALICS.value).grid(row=clf_cnt +
|
|
51
|
+
SimBALabel(parent=self.clf_table_frm, txt=clf_name, font=Formats.FONT_REGULAR_ITALICS.value).grid(row=clf_cnt + 2, column=0, sticky=W, padx=padx)
|
|
48
52
|
mdl_path = read_config_entry(config=self.config, section=ConfigKey.SML_SETTINGS.value, option=f"model_path_{clf_cnt + 1}", default_value='Select model (.sav) file', data_type=Dtypes.STR.value)
|
|
49
53
|
self.clf_data[clf_name][PATH] = FileSelect(self.clf_table_frm, title="Select model (.sav) file", initialdir=self.project_path, file_types=[("SimBA Classifier", "*.sav")], initial_path=mdl_path)
|
|
50
54
|
threshold = read_config_entry(config=self.config, section=ConfigKey.THRESHOLD_SETTINGS.value, option=f"threshold_{clf_cnt + 1}", default_value='', data_type=Dtypes.STR.value)
|
|
51
|
-
self.clf_data[clf_name][THRESHOLD] = Entry_Box(parent=self.clf_table_frm, fileDescription='', labelwidth=0, entry_box_width=20, value=threshold)
|
|
55
|
+
self.clf_data[clf_name][THRESHOLD] = Entry_Box(parent=self.clf_table_frm, fileDescription='', labelwidth=0, entry_box_width=20, value=threshold, justify='center')
|
|
52
56
|
bout_length = read_config_entry(config=self.config, section=ConfigKey.MIN_BOUT_LENGTH.value, option=f"min_bout_{clf_cnt + 1}", default_value='', data_type=Dtypes.STR.value)
|
|
53
|
-
self.clf_data[clf_name][MIN_BOUT] = Entry_Box(parent=self.clf_table_frm, fileDescription='', labelwidth=0, entry_box_width=20, value=bout_length)
|
|
54
|
-
self.clf_data[clf_name][PATH].grid(row=clf_cnt +
|
|
55
|
-
self.clf_data[clf_name][THRESHOLD].grid(row=clf_cnt +
|
|
56
|
-
self.clf_data[clf_name][MIN_BOUT].grid(row=clf_cnt +
|
|
57
|
+
self.clf_data[clf_name][MIN_BOUT] = Entry_Box(parent=self.clf_table_frm, fileDescription='', labelwidth=0, entry_box_width=20, value=bout_length, justify='center')
|
|
58
|
+
self.clf_data[clf_name][PATH].grid(row=clf_cnt + 2, column=1, sticky=NW, padx=padx)
|
|
59
|
+
self.clf_data[clf_name][THRESHOLD].grid(row=clf_cnt + 2, column=2, sticky=NW, padx=padx)
|
|
60
|
+
self.clf_data[clf_name][MIN_BOUT].grid(row=clf_cnt + 2, column=3, sticky=NW, padx=padx)
|
|
57
61
|
self.clf_table_frm.grid(row=0, sticky=W, pady=5, padx=5)
|
|
58
|
-
# self.runtime_settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="RUNTIME SETTINGS", icon_name='run', icon_link=Links.SET_RUN_ML_PARAMETERS.value)
|
|
59
|
-
# self.core_cnt_dropdown = SimBADropDown(parent=self.runtime_settings_frm, dropdown_options=CORE_CNT_OPTIONS, label='CPU CORE COUNT:', label_width=30, dropdown_width=20, value=int(find_core_cnt()[0]/3))
|
|
60
|
-
# self.runtime_settings_frm.grid(row=1, sticky=W, pady=5, padx=5)
|
|
61
|
-
# self.core_cnt_dropdown.grid(row=0, sticky=W, pady=5, padx=5)
|
|
62
62
|
run_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header=f"ANALYZE {len(self.feature_file_paths)} FILES(S)", icon_name='rocket')
|
|
63
63
|
run_btn = SimbaButton(parent=run_frm, txt="RUN", img='rocket', txt_clr='red', font=Formats.FONT_REGULAR.value, hover_font=Formats.FONT_REGULAR.value, cmd=self.run)
|
|
64
64
|
run_frm.grid(row=2, sticky=W, pady=5, padx=5)
|
|
@@ -95,7 +95,7 @@ class RunMachineModelsPopUp(PopUpMixin, ConfigReader):
|
|
|
95
95
|
with open(self.config_path, "w") as f:
|
|
96
96
|
self.config.write(f)
|
|
97
97
|
|
|
98
|
-
stdout_success(msg="Model paths/settings saved in project_config.ini", source=self.__class__.__name__)
|
|
98
|
+
stdout_success(msg=f"Model paths/settings saved in project_config.ini ({self.config_path})", source=self.__class__.__name__)
|
|
99
99
|
|
|
100
100
|
if len(self.feature_file_paths) == 0:
|
|
101
101
|
raise NoDataError(msg=f'Cannot run machine model predictions: No data files found in {self.features_dir} directory', source=self.__class__.__name__)
|
|
@@ -103,4 +103,4 @@ class RunMachineModelsPopUp(PopUpMixin, ConfigReader):
|
|
|
103
103
|
inferencer = InferenceBatch(config_path=self.config_path, features_dir=None, save_dir=None, minimum_bout_length=None)
|
|
104
104
|
inferencer.run()
|
|
105
105
|
|
|
106
|
-
#_ = RunMachineModelsPopUp(config_path=r"
|
|
106
|
+
#_ = RunMachineModelsPopUp(config_path=r"E:\troubleshooting\mitra_emergence\project_folder\project_config.ini")
|
|
@@ -63,7 +63,7 @@ class SimBA2YoloKeypointsPopUp(PopUpMixin):
|
|
|
63
63
|
animal_names = list(config.animal_bp_dict.keys())
|
|
64
64
|
bps = [x[:-2] for x in config.animal_bp_dict[animal_names[0]]['X_bps']]
|
|
65
65
|
flip_idx = get_yolo_keypoint_flip_idx(x=bps)
|
|
66
|
-
map_dict = {c: k for c, k in enumerate(animal_names)}
|
|
66
|
+
#map_dict = {c: k for c, k in enumerate(animal_names)}
|
|
67
67
|
bp_id_idx = None
|
|
68
68
|
if len(animal_names) > 1:
|
|
69
69
|
bp_id_idx = get_yolo_keypoint_bp_id_idx(animal_bp_dict=config.animal_bp_dict)
|
|
@@ -85,7 +85,7 @@ class SimBA2YoloKeypointsPopUp(PopUpMixin):
|
|
|
85
85
|
greyscale=grey,
|
|
86
86
|
padding=padding,
|
|
87
87
|
flip_idx=flip_idx,
|
|
88
|
-
names=
|
|
88
|
+
names=tuple(animal_names),
|
|
89
89
|
sample_size=sample_size,
|
|
90
90
|
bp_id_idx=bp_id_idx,
|
|
91
91
|
clahe=clahe)
|
|
@@ -30,7 +30,7 @@ SMOOTHING_OPTIONS = ['None', 50, 100, 200, 300, 400, 500]
|
|
|
30
30
|
|
|
31
31
|
YOLO_FORMATS = Options.VALID_YOLO_FORMATS.value + ['None']
|
|
32
32
|
devices = ['CPU']
|
|
33
|
-
THRESHOLD_OPTIONS = list(np.arange(0.
|
|
33
|
+
THRESHOLD_OPTIONS = list(np.arange(0.05, 1.05, 0.05).astype(np.float32))
|
|
34
34
|
|
|
35
35
|
simba_dir = os.path.dirname(simba.__file__)
|
|
36
36
|
YOLO_SCHEMATICS_DIR = os.path.join(simba_dir, Paths.YOLO_SCHEMATICS_DIR.value)
|
|
@@ -33,7 +33,7 @@ class YOLOPoseTrainPopUP(PopUpMixin):
|
|
|
33
33
|
PopUpMixin.__init__(self, title="TRAIN YOLO POSE ESTIMATION MODEL", icon='ultralytics_2')
|
|
34
34
|
settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name='settings')
|
|
35
35
|
devices.extend([f'{x} : {y["model"]}' for x, y in gpus.items()])
|
|
36
|
-
self.yolo_map_path = FileSelect(parent=settings_frm, fileDescription='YOLO MAP FILE (YAML):', lblwidth=35, entry_width=45, file_types=[("YOLO MODEL FILE",
|
|
36
|
+
self.yolo_map_path = FileSelect(parent=settings_frm, fileDescription='YOLO MAP FILE (YAML):', lblwidth=35, entry_width=45, file_types=[("YOLO MODEL FILE", ".yaml")], lbl_icon='file')
|
|
37
37
|
self.save_dir = FolderSelect(settings_frm, folderDescription="SAVE DIRECTORY:", lblwidth=35, entry_width=45, lbl_icon='save')
|
|
38
38
|
self.weights_path = FileSelect(parent=settings_frm, fileDescription='INITIAL WEIGHT FILE (E.G., .PT):', lblwidth=35, entry_width=45, lbl_icon='file')
|
|
39
39
|
|