boris-behav-obs 8.9.16__py3-none-any.whl → 9.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of boris-behav-obs might be problematic. Click here for more details.
- boris/__init__.py +1 -1
- boris/__main__.py +1 -1
- boris/about.py +36 -39
- boris/add_modifier.py +122 -109
- boris/add_modifier_ui.py +239 -135
- boris/advanced_event_filtering.py +81 -45
- boris/analysis_plugins/__init__.py +0 -0
- boris/analysis_plugins/_latency.py +59 -0
- boris/analysis_plugins/irr_cohen_kappa.py +109 -0
- boris/analysis_plugins/irr_cohen_kappa_with_modifiers.py +112 -0
- boris/analysis_plugins/irr_weighted_cohen_kappa.py +157 -0
- boris/analysis_plugins/irr_weighted_cohen_kappa_with_modifiers.py +162 -0
- boris/analysis_plugins/list_of_dataframe_columns.py +22 -0
- boris/analysis_plugins/number_of_occurences.py +22 -0
- boris/analysis_plugins/number_of_occurences_by_independent_variable.py +54 -0
- boris/analysis_plugins/time_budget.py +61 -0
- boris/behav_coding_map_creator.py +228 -229
- boris/behavior_binary_table.py +33 -50
- boris/behaviors_coding_map.py +17 -18
- boris/boris_cli.py +6 -25
- boris/cmd_arguments.py +12 -1
- boris/coding_pad.py +42 -49
- boris/config.py +161 -77
- boris/config_file.py +63 -83
- boris/connections.py +112 -57
- boris/converters.py +13 -37
- boris/converters_ui.py +187 -110
- boris/cooccurence.py +250 -0
- boris/core.py +2511 -1824
- boris/core_qrc.py +15895 -10185
- boris/core_ui.py +946 -792
- boris/db_functions.py +21 -41
- boris/dev.py +134 -0
- boris/dialog.py +505 -244
- boris/duration_widget.py +15 -20
- boris/edit_event.py +84 -28
- boris/edit_event_ui.py +214 -78
- boris/event_operations.py +517 -415
- boris/events_cursor.py +25 -17
- boris/events_snapshots.py +36 -82
- boris/exclusion_matrix.py +4 -9
- boris/export_events.py +213 -583
- boris/export_observation.py +98 -611
- boris/external_processes.py +156 -97
- boris/geometric_measurement.py +652 -287
- boris/gui_utilities.py +91 -14
- boris/image_overlay.py +9 -9
- boris/import_observations.py +190 -98
- boris/ipc_mpv.py +325 -0
- boris/irr.py +26 -63
- boris/latency.py +34 -25
- boris/measurement_widget.py +14 -18
- boris/media_file.py +52 -84
- boris/menu_options.py +17 -6
- boris/modifier_coding_map_creator.py +1013 -0
- boris/modifiers_coding_map.py +7 -9
- boris/mpv.py +1 -0
- boris/mpv2.py +732 -705
- boris/observation.py +655 -310
- boris/observation_operations.py +1036 -404
- boris/observation_ui.py +584 -356
- boris/observations_list.py +71 -53
- boris/otx_parser.py +74 -80
- boris/param_panel.py +31 -16
- boris/param_panel_ui.py +254 -138
- boris/player_dock_widget.py +90 -60
- boris/plot_data_module.py +43 -46
- boris/plot_events.py +127 -90
- boris/plot_events_rt.py +17 -31
- boris/plot_spectrogram_rt.py +95 -30
- boris/plot_waveform_rt.py +32 -21
- boris/plugins.py +431 -0
- boris/portion/__init__.py +18 -8
- boris/portion/const.py +35 -18
- boris/portion/dict.py +5 -5
- boris/portion/func.py +2 -2
- boris/portion/interval.py +21 -41
- boris/portion/io.py +41 -32
- boris/preferences.py +306 -83
- boris/preferences_ui.py +685 -228
- boris/project.py +448 -293
- boris/project_functions.py +689 -254
- boris/project_import_export.py +213 -222
- boris/project_ui.py +674 -438
- boris/qrc_boris.py +6 -3
- boris/qrc_boris5.py +6 -3
- boris/select_modifiers.py +74 -48
- boris/select_observations.py +20 -199
- boris/select_subj_behav.py +67 -39
- boris/state_events.py +53 -37
- boris/subjects_pad.py +6 -9
- boris/synthetic_time_budget.py +45 -28
- boris/time_budget_functions.py +171 -171
- boris/time_budget_widget.py +84 -114
- boris/transitions.py +41 -47
- boris/utilities.py +766 -266
- boris/version.py +3 -3
- boris/video_equalizer.py +16 -14
- boris/video_equalizer_ui.py +199 -130
- boris/video_operations.py +125 -28
- boris/view_df.py +104 -0
- boris/view_df_ui.py +75 -0
- boris/write_event.py +538 -0
- boris_behav_obs-9.7.6.dist-info/METADATA +139 -0
- boris_behav_obs-9.7.6.dist-info/RECORD +109 -0
- {boris_behav_obs-8.9.16.dist-info → boris_behav_obs-9.7.6.dist-info}/WHEEL +1 -1
- boris_behav_obs-9.7.6.dist-info/entry_points.txt +2 -0
- boris/README.TXT +0 -22
- boris/add_modifier.ui +0 -323
- boris/boris_ui.py +0 -886
- boris/converters.ui +0 -289
- boris/core.qrc +0 -35
- boris/core.ui +0 -1543
- boris/edit_event.ui +0 -175
- boris/icons/logo_eye.ico +0 -0
- boris/map_creator.py +0 -850
- boris/observation.ui +0 -773
- boris/param_panel.ui +0 -379
- boris/preferences.ui +0 -537
- boris/project.ui +0 -1069
- boris/project_server.py +0 -236
- boris/vlc.py +0 -10343
- boris/vlc_local.py +0 -90
- boris_behav_obs-8.9.16.dist-info/LICENSE.TXT +0 -674
- boris_behav_obs-8.9.16.dist-info/METADATA +0 -129
- boris_behav_obs-8.9.16.dist-info/RECORD +0 -108
- boris_behav_obs-8.9.16.dist-info/entry_points.txt +0 -2
- {boris → boris_behav_obs-9.7.6.dist-info/licenses}/LICENSE.TXT +0 -0
- {boris_behav_obs-8.9.16.dist-info → boris_behav_obs-9.7.6.dist-info}/top_level.txt +0 -0
boris/utilities.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""
|
|
2
2
|
BORIS
|
|
3
3
|
Behavioral Observation Research Interactive Software
|
|
4
|
-
Copyright 2012-
|
|
4
|
+
Copyright 2012-2025 Olivier Friard
|
|
5
5
|
|
|
6
6
|
This program is free software; you can redistribute it and/or modify
|
|
7
7
|
it under the terms of the GNU General Public License as published by
|
|
@@ -19,32 +19,243 @@ Copyright 2012-2023 Olivier Friard
|
|
|
19
19
|
MA 02110-1301, USA.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
from
|
|
22
|
+
from decimal import Decimal as dec
|
|
23
|
+
from decimal import getcontext, ROUND_DOWN
|
|
24
|
+
from hachoir.metadata import extractMetadata
|
|
25
|
+
from hachoir.parser import createParser
|
|
26
|
+
from shutil import copyfile, which
|
|
27
|
+
from typing import Union, Tuple
|
|
23
28
|
import csv
|
|
24
29
|
import datetime
|
|
25
|
-
import
|
|
30
|
+
import datetime as dt
|
|
31
|
+
import exifread
|
|
26
32
|
import json
|
|
27
33
|
import logging
|
|
28
34
|
import math
|
|
35
|
+
import numpy as np
|
|
29
36
|
import os
|
|
30
|
-
|
|
37
|
+
from pathlib import Path
|
|
38
|
+
from PIL.ImageQt import Image
|
|
39
|
+
import platform
|
|
31
40
|
import re
|
|
32
|
-
import
|
|
41
|
+
import shutil
|
|
33
42
|
import subprocess
|
|
34
43
|
import sys
|
|
35
44
|
import urllib.parse
|
|
45
|
+
import urllib.request
|
|
36
46
|
import wave
|
|
37
|
-
|
|
38
|
-
from decimal import getcontext, ROUND_DOWN
|
|
39
|
-
from shutil import copyfile
|
|
40
|
-
|
|
41
|
-
import numpy as np
|
|
42
|
-
from PyQt5.QtGui import qRgb
|
|
43
|
-
from PyQt5.QtGui import QPixmap, QImage
|
|
47
|
+
import socket
|
|
44
48
|
|
|
45
|
-
from
|
|
49
|
+
from PySide6 import __version__ as pyside6_version
|
|
50
|
+
from PySide6.QtGui import QPixmap, QImage
|
|
51
|
+
from PySide6.QtCore import qVersion
|
|
46
52
|
|
|
47
53
|
from . import config as cfg
|
|
54
|
+
from . import version
|
|
55
|
+
|
|
56
|
+
logger = logging.getLogger(__name__)
|
|
57
|
+
|
|
58
|
+
if (sys.platform.startswith("win") or sys.platform.startswith("linux")) and ("-i" not in sys.argv) and ("--ipc" not in sys.argv):
|
|
59
|
+
try:
|
|
60
|
+
from . import mpv2 as mpv
|
|
61
|
+
except Exception:
|
|
62
|
+
logger.warning("MPV library not found")
|
|
63
|
+
|
|
64
|
+
if sys.platform.startswith("win"):
|
|
65
|
+
import ctypes
|
|
66
|
+
|
|
67
|
+
logger.info("The MPV library was not found!\nIt will be downloaded from the BORIS GitHub repository")
|
|
68
|
+
# ctypes.windll.user32.MessageBoxW(0, "The MPV library was not found!\nIt will be downloaded.", "BORIS", 0)
|
|
69
|
+
|
|
70
|
+
# test if following function works on windows
|
|
71
|
+
MessageBoxTimeoutW = ctypes.windll.user32.MessageBoxTimeoutW
|
|
72
|
+
MessageBoxTimeoutW.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint]
|
|
73
|
+
ctypes.windll.user32.MessageBoxTimeoutW(
|
|
74
|
+
None, "The MPV library was not found.\nIt will be downloaded from the BORIS GitHub repository.", "MPV library", 0, 0, 10000
|
|
75
|
+
) # time out
|
|
76
|
+
|
|
77
|
+
# download libmpv2.dll from https://github.com/boris-behav-obs/boris-behav-obs.github.io/releases/download/files/
|
|
78
|
+
|
|
79
|
+
url: str = "https://github.com/boris-behav-obs/boris-behav-obs.github.io/releases/download/files/"
|
|
80
|
+
|
|
81
|
+
external_files_dir = ""
|
|
82
|
+
# search where to download libmpv-2.dll
|
|
83
|
+
|
|
84
|
+
external_files_dir = Path(__file__).parent / "misc"
|
|
85
|
+
if not external_files_dir.is_dir():
|
|
86
|
+
logger.info(f"Creating {external_files_dir} directory")
|
|
87
|
+
external_files_dir.mkdir(parents=True, exist_ok=True)
|
|
88
|
+
|
|
89
|
+
logger.info(f"MPV library directory: {external_files_dir}")
|
|
90
|
+
|
|
91
|
+
local_filename = external_files_dir / "libmpv-2.dll"
|
|
92
|
+
logger.info("Downloading libmpv-2.dll...")
|
|
93
|
+
try:
|
|
94
|
+
urllib.request.urlretrieve(url + "libmpv-2.dll", local_filename)
|
|
95
|
+
logger.info(f"File downloaded as {local_filename}")
|
|
96
|
+
except Exception:
|
|
97
|
+
logger.critical("The MPV library can not be downloaded! Check your connection.")
|
|
98
|
+
ctypes.windll.user32.MessageBoxW(0, "The MPV library can not be downloaded!\nCheck your connection.", "BORIS", 0)
|
|
99
|
+
sys.exit(5)
|
|
100
|
+
# reload package
|
|
101
|
+
try:
|
|
102
|
+
from . import mpv2 as mpv
|
|
103
|
+
except Exception:
|
|
104
|
+
logger.critical("MPV library not found after dowloading")
|
|
105
|
+
sys.exit(5)
|
|
106
|
+
|
|
107
|
+
elif sys.platform.startswith("linux"):
|
|
108
|
+
text = (
|
|
109
|
+
"The MPV library was not found!\nInstall it\n\n"
|
|
110
|
+
"With Debian/Ubuntu/Mint:\nsudo apt install libmpv2\n\n"
|
|
111
|
+
"With Fedora:\nsudo dnf install mpv-libs\n\n"
|
|
112
|
+
"With OpenSUSE:\nsudo zypper install mpv\n\n"
|
|
113
|
+
"Arch Linux / Manjaro:\nsudo pacman -S mpv\n\n"
|
|
114
|
+
)
|
|
115
|
+
if shutil.which("zenity") is not None:
|
|
116
|
+
subprocess.run(["zenity", "--error", f"--text={text}"])
|
|
117
|
+
elif shutil.which("kdialog"):
|
|
118
|
+
subprocess.run(["kdialog", "--msgbox", text])
|
|
119
|
+
elif shutil.which("gxmessage"):
|
|
120
|
+
subprocess.run(["gxmessage", text])
|
|
121
|
+
elif shutil.which("xmessage"):
|
|
122
|
+
subprocess.run(["xmessage", text])
|
|
123
|
+
|
|
124
|
+
sys.exit(5)
|
|
125
|
+
else:
|
|
126
|
+
sys.exit(5)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def test_mpv_ipc(socket_path: str = cfg.MPV_SOCKET) -> bool:
|
|
130
|
+
"""
|
|
131
|
+
test if socket available
|
|
132
|
+
"""
|
|
133
|
+
if not os.path.exists(socket_path):
|
|
134
|
+
return False
|
|
135
|
+
|
|
136
|
+
try:
|
|
137
|
+
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
|
138
|
+
client.connect(socket_path)
|
|
139
|
+
client.close()
|
|
140
|
+
return True
|
|
141
|
+
except Exception:
|
|
142
|
+
return False
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def extract_exif_DateTimeOriginal(file_path: str) -> int:
|
|
146
|
+
"""
|
|
147
|
+
extract the EXIF DateTimeOriginal tag
|
|
148
|
+
return epoch time
|
|
149
|
+
if the tag is not available return -1
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
file_path (str): path of the media file
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
int: timestamp
|
|
156
|
+
|
|
157
|
+
"""
|
|
158
|
+
try:
|
|
159
|
+
with open(file_path, "rb") as f_in:
|
|
160
|
+
tags = exifread.process_file(f_in, details=False, stop_tag="EXIF DateTimeOriginal")
|
|
161
|
+
if "EXIF DateTimeOriginal" in tags:
|
|
162
|
+
date_time_original = (
|
|
163
|
+
f"{tags['EXIF DateTimeOriginal'].values[:4]}-"
|
|
164
|
+
f"{tags['EXIF DateTimeOriginal'].values[5:7]}-"
|
|
165
|
+
f"{tags['EXIF DateTimeOriginal'].values[8:10]} "
|
|
166
|
+
f"{tags['EXIF DateTimeOriginal'].values.split(' ')[-1]}"
|
|
167
|
+
)
|
|
168
|
+
return int(datetime.datetime.strptime(date_time_original, "%Y-%m-%d %H:%M:%S").timestamp())
|
|
169
|
+
else:
|
|
170
|
+
try:
|
|
171
|
+
# read from file name (YYYY-MM-DD_HHMMSS)
|
|
172
|
+
return int(datetime.datetime.strptime(Path(file_path).stem, "%Y-%m-%d_%H%M%S").timestamp())
|
|
173
|
+
except Exception:
|
|
174
|
+
# read from file name (YYYY-MM-DD_HH:MM:SS)
|
|
175
|
+
return int(datetime.datetime.strptime(Path(file_path).stem, "%Y-%m-%d_%H:%M:%S").timestamp())
|
|
176
|
+
|
|
177
|
+
except Exception:
|
|
178
|
+
return -1
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def extract_video_creation_date(file_path: str) -> int | None:
|
|
182
|
+
"""
|
|
183
|
+
returns the timestamp of the media creation date time with Hachoir
|
|
184
|
+
"""
|
|
185
|
+
|
|
186
|
+
logger.debug(f"extract_video_creation_date for {file_path}")
|
|
187
|
+
|
|
188
|
+
if not Path(file_path).is_file():
|
|
189
|
+
logger.debug(f"{file_path} not found")
|
|
190
|
+
return None
|
|
191
|
+
try:
|
|
192
|
+
parser = createParser(file_path)
|
|
193
|
+
metadata = extractMetadata(parser)
|
|
194
|
+
except Exception:
|
|
195
|
+
return None
|
|
196
|
+
|
|
197
|
+
if metadata.has("creation_date"):
|
|
198
|
+
if metadata.get("creation_date") == datetime.datetime(1904, 1, 1, 0, 0):
|
|
199
|
+
return None
|
|
200
|
+
return metadata.get("creation_date").timestamp()
|
|
201
|
+
else:
|
|
202
|
+
return None
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def extract_date_time_from_file_name(file_path: str) -> int | None:
|
|
206
|
+
"""
|
|
207
|
+
extract YYYY-MM-DD_HHMMSS or YYYY-MM-DD_HH:MM:SS from file name
|
|
208
|
+
"""
|
|
209
|
+
|
|
210
|
+
patterns = (r"\d{4}-\d{2}-\d{2}_\d{6}", r"\d{4}-\d{2}-\d{2}_\d{2}:\d{2}:\d{2}")
|
|
211
|
+
for pattern in patterns:
|
|
212
|
+
matches = re.findall(pattern, file_path)
|
|
213
|
+
|
|
214
|
+
if matches:
|
|
215
|
+
if pattern == r"\d{4}-\d{2}-\d{2}_\d{6}":
|
|
216
|
+
logger.debug(
|
|
217
|
+
f"extract_date_time_from_file_name timestamp from {file_path}: {int(datetime.datetime.strptime(matches[0], '%Y-%m-%d_%H%M%S').timestamp())}"
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
return int(datetime.datetime.strptime(matches[0], "%Y-%m-%d_%H%M%S").timestamp())
|
|
221
|
+
|
|
222
|
+
if pattern == r"\d{4}-\d{2}-\d{2}_\d{2}:\d{2}:\d{2}":
|
|
223
|
+
logger.debug(
|
|
224
|
+
f"extract_date_time_from_file_name timestamp from {file_path}: {int(datetime.datetime.strptime(matches[0], '%Y-%m-%d_%H:%M:%S').timestamp())}"
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
return int(datetime.datetime.strptime(matches[0], "%Y-%m-%d_%H:%M:%S").timestamp())
|
|
228
|
+
|
|
229
|
+
return None
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def mpv_lib_version() -> Tuple[str, str, str]:
|
|
233
|
+
"""
|
|
234
|
+
Version of MPV library
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
str: MPV library version
|
|
238
|
+
"""
|
|
239
|
+
if ("-i" in sys.argv) or ("--ipc" in sys.argv) or sys.platform.startswith("darwin"):
|
|
240
|
+
return "MPV IPC mode", "", ""
|
|
241
|
+
|
|
242
|
+
mpv_lib_file = None
|
|
243
|
+
if sys.platform.startswith("linux"):
|
|
244
|
+
mpv_lib_file = mpv.sofile
|
|
245
|
+
if sys.platform.startswith("win"):
|
|
246
|
+
mpv_lib_file = mpv.dll
|
|
247
|
+
|
|
248
|
+
return (".".join([str(x) for x in mpv._mpv_client_api_version()]), mpv_lib_file, mpv.MPV_VERSION)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def python_mpv_script_version() -> str:
|
|
252
|
+
"""
|
|
253
|
+
version of python-mpv script
|
|
254
|
+
"""
|
|
255
|
+
try:
|
|
256
|
+
return mpv.__version__
|
|
257
|
+
except Exception:
|
|
258
|
+
return "Not found"
|
|
48
259
|
|
|
49
260
|
|
|
50
261
|
def error_info(exc_info: tuple) -> tuple:
|
|
@@ -65,13 +276,12 @@ def error_info(exc_info: tuple) -> tuple:
|
|
|
65
276
|
return (f"{exc_type}: {exc_obj}", fname, exc_tb.tb_lineno)
|
|
66
277
|
|
|
67
278
|
|
|
68
|
-
def pil2pixmap(im) -> QPixmap:
|
|
279
|
+
def pil2pixmap(im: Image) -> QPixmap:
|
|
69
280
|
"""
|
|
70
281
|
convert PIL image to pixmap
|
|
71
282
|
see https://stackoverflow.com/questions/34697559/pil-image-to-qpixmap-conversion-issue
|
|
72
283
|
"""
|
|
73
284
|
|
|
74
|
-
# print(im.mode)
|
|
75
285
|
if im.mode == "RGB":
|
|
76
286
|
r, g, b = im.split()
|
|
77
287
|
im = Image.merge("RGB", (b, g, r))
|
|
@@ -117,7 +327,7 @@ def return_file_header(file_name: str, row_number: int = 5) -> list:
|
|
|
117
327
|
Returns:
|
|
118
328
|
list: first row_number row(s) of file_name
|
|
119
329
|
"""
|
|
120
|
-
header = []
|
|
330
|
+
header: list = []
|
|
121
331
|
try:
|
|
122
332
|
with open(file_name) as f_in:
|
|
123
333
|
for _ in range(row_number):
|
|
@@ -127,28 +337,36 @@ def return_file_header(file_name: str, row_number: int = 5) -> list:
|
|
|
127
337
|
return header
|
|
128
338
|
|
|
129
339
|
|
|
130
|
-
def
|
|
340
|
+
def return_file_header_footer(file_name: str, file_row_number: int = 0, row_number: int = 5) -> Tuple[list, list]:
|
|
131
341
|
"""
|
|
132
|
-
|
|
342
|
+
return file header and footer
|
|
133
343
|
|
|
134
344
|
Args:
|
|
135
|
-
|
|
345
|
+
file_name (str): path of file
|
|
346
|
+
file_row_number (int): total rows number of file
|
|
347
|
+
row_number (int): number of rows to return
|
|
136
348
|
|
|
137
349
|
Returns:
|
|
138
|
-
|
|
350
|
+
list: first row_number row(s) of file_name
|
|
139
351
|
"""
|
|
352
|
+
header: list = []
|
|
353
|
+
footer: list = []
|
|
354
|
+
try:
|
|
355
|
+
row_idx: int = 0
|
|
356
|
+
with open(file_name, "rt") as f_in:
|
|
357
|
+
for row in f_in:
|
|
358
|
+
if row_idx < row_number:
|
|
359
|
+
header.append(row.strip())
|
|
360
|
+
if file_row_number > row_number * 2 and (row_idx >= file_row_number - row_number):
|
|
361
|
+
footer.append(row.strip())
|
|
362
|
+
row_idx += 1
|
|
140
363
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
if fileSystemEncoding is None:
|
|
145
|
-
fileSystemEncoding = "UTF-8"
|
|
146
|
-
return b.decode(fileSystemEncoding)
|
|
147
|
-
else:
|
|
148
|
-
return b
|
|
364
|
+
except Exception:
|
|
365
|
+
return [], []
|
|
366
|
+
return header, footer
|
|
149
367
|
|
|
150
368
|
|
|
151
|
-
def convertTime(time_format: str, sec: float) -> str:
|
|
369
|
+
def convertTime(time_format: str, sec: Union[float, dec]) -> Union[str, None]:
|
|
152
370
|
"""
|
|
153
371
|
convert time in base at the current format (S or HHMMSS)
|
|
154
372
|
|
|
@@ -171,6 +389,20 @@ def convertTime(time_format: str, sec: float) -> str:
|
|
|
171
389
|
return None
|
|
172
390
|
|
|
173
391
|
|
|
392
|
+
def smart_time_format(sec: Union[float, dec], time_format: str = cfg.S, cutoff: dec = cfg.SMART_TIME_CUTOFF_DEFAULT) -> str:
|
|
393
|
+
"""
|
|
394
|
+
Smart time format
|
|
395
|
+
returns time in seconds if <= cutoff else in HH:MM:SS.ZZZ format
|
|
396
|
+
"""
|
|
397
|
+
# cutoff = 0 follows the time format selectd by user
|
|
398
|
+
if cutoff == 0:
|
|
399
|
+
return convertTime(time_format, sec)
|
|
400
|
+
if sec <= cutoff:
|
|
401
|
+
return f"{sec:.3f}"
|
|
402
|
+
else:
|
|
403
|
+
return seconds2time(sec)
|
|
404
|
+
|
|
405
|
+
|
|
174
406
|
def convert_time_to_decimal(pj: dict) -> dict:
|
|
175
407
|
"""
|
|
176
408
|
convert time of project from float to decimal
|
|
@@ -183,7 +415,10 @@ def convert_time_to_decimal(pj: dict) -> dict:
|
|
|
183
415
|
"""
|
|
184
416
|
for obs_id in pj[cfg.OBSERVATIONS]:
|
|
185
417
|
if cfg.TIME_OFFSET in pj[cfg.OBSERVATIONS][obs_id]:
|
|
186
|
-
pj[cfg.OBSERVATIONS][obs_id][cfg.TIME_OFFSET]
|
|
418
|
+
if pj[cfg.OBSERVATIONS][obs_id][cfg.TIME_OFFSET] is not None:
|
|
419
|
+
pj[cfg.OBSERVATIONS][obs_id][cfg.TIME_OFFSET] = dec(str(pj[cfg.OBSERVATIONS][obs_id][cfg.TIME_OFFSET]))
|
|
420
|
+
else:
|
|
421
|
+
pj[cfg.OBSERVATIONS][obs_id][cfg.TIME_OFFSET] = dec("0.000")
|
|
187
422
|
for idx, _ in enumerate(pj[cfg.OBSERVATIONS][obs_id][cfg.EVENTS]):
|
|
188
423
|
pj[cfg.OBSERVATIONS][obs_id][cfg.EVENTS][idx][cfg.EVENT_TIME_FIELD_IDX] = dec(
|
|
189
424
|
pj[cfg.OBSERVATIONS][obs_id][cfg.EVENTS][idx][cfg.EVENT_TIME_FIELD_IDX]
|
|
@@ -192,23 +427,18 @@ def convert_time_to_decimal(pj: dict) -> dict:
|
|
|
192
427
|
return pj
|
|
193
428
|
|
|
194
429
|
|
|
195
|
-
def
|
|
430
|
+
def count_media_file(media_files: dict) -> int:
|
|
196
431
|
"""
|
|
197
|
-
|
|
432
|
+
count number of media file for observation
|
|
198
433
|
"""
|
|
199
|
-
|
|
200
|
-
try:
|
|
201
|
-
with open(file_name, "rb") as f:
|
|
202
|
-
for chunk in iter(lambda: f.read(4096), b""):
|
|
203
|
-
hash_md5.update(chunk)
|
|
204
|
-
return hash_md5.hexdigest()
|
|
205
|
-
except FileNotFoundError:
|
|
206
|
-
return ""
|
|
434
|
+
return sum([len(media_files[idx]) for idx in media_files])
|
|
207
435
|
|
|
208
436
|
|
|
209
|
-
def txt2np_array(
|
|
437
|
+
def txt2np_array(
|
|
438
|
+
file_name: str, columns_str: str, substract_first_value: str, converters=None, column_converter=None
|
|
439
|
+
) -> Tuple[bool, str, np.array]:
|
|
210
440
|
"""
|
|
211
|
-
read a txt file (tsv or csv) and return np array with
|
|
441
|
+
read a txt file (tsv or csv) and return a np array with columns cited in columns_str
|
|
212
442
|
|
|
213
443
|
Args:
|
|
214
444
|
file_name (str): path of the file to load in numpy array
|
|
@@ -235,42 +465,66 @@ def txt2np_array(file_name: str, columns_str: str, substract_first_value: str, c
|
|
|
235
465
|
return False, f"Problem with columns {columns_str}", np.array([])
|
|
236
466
|
|
|
237
467
|
# check converters
|
|
238
|
-
np_converters = {}
|
|
468
|
+
np_converters: dict = {}
|
|
239
469
|
for column_idx in column_converter:
|
|
240
470
|
if column_converter[column_idx] in converters:
|
|
241
|
-
|
|
242
471
|
conv_name = column_converter[column_idx]
|
|
243
472
|
|
|
244
473
|
function = f"""def {conv_name}(INPUT):\n"""
|
|
245
|
-
function += """ INPUT = INPUT.decode("utf-8") if isinstance(INPUT, bytes) else INPUT"""
|
|
474
|
+
function += """ INPUT = INPUT.decode("utf-8") if isinstance(INPUT, bytes) else INPUT\n\n"""
|
|
246
475
|
for line in converters[conv_name]["code"].split("\n"):
|
|
247
476
|
function += f" {line}\n"
|
|
248
477
|
function += """ return OUTPUT"""
|
|
249
478
|
|
|
479
|
+
print("=============")
|
|
480
|
+
print(function)
|
|
481
|
+
print("=============")
|
|
482
|
+
|
|
483
|
+
import types
|
|
484
|
+
|
|
485
|
+
mod = types.ModuleType("converter_module")
|
|
486
|
+
exec(function, mod.__dict__)
|
|
487
|
+
|
|
488
|
+
"""
|
|
250
489
|
try:
|
|
251
490
|
exec(function)
|
|
252
491
|
except Exception:
|
|
253
492
|
return False, f"error in converter: {sys.exc_info()[1]}", np.array([])
|
|
254
493
|
|
|
255
|
-
|
|
494
|
+
print(f"{converters=}")
|
|
495
|
+
print(f"{column_converter=}")
|
|
496
|
+
print(locals())
|
|
497
|
+
print(f"{conv_name=}")
|
|
498
|
+
"""
|
|
499
|
+
|
|
500
|
+
# np_converters[column_idx - 1] = locals()['conv_name']
|
|
501
|
+
np_converters[column_idx - 1] = getattr(mod, conv_name)
|
|
256
502
|
|
|
257
503
|
else:
|
|
258
|
-
return False, f"converter {
|
|
504
|
+
return False, f"converter {column_converter[column_idx]} not found", np.array([])
|
|
259
505
|
|
|
260
506
|
# snif txt file
|
|
261
507
|
try:
|
|
262
508
|
with open(file_name) as csvfile:
|
|
263
|
-
buff = csvfile.read(
|
|
509
|
+
buff = csvfile.read(4096)
|
|
264
510
|
snif = csv.Sniffer()
|
|
265
511
|
dialect = snif.sniff(buff)
|
|
266
|
-
has_header = snif.has_header(buff)
|
|
512
|
+
"""has_header = snif.has_header(buff)"""
|
|
513
|
+
# count number of header rows
|
|
514
|
+
header_rows_nb = 0
|
|
515
|
+
csv.register_dialect("dialect", dialect)
|
|
516
|
+
with open(file_name, "r") as f:
|
|
517
|
+
reader = csv.reader(f, dialect="dialect")
|
|
518
|
+
for row in reader:
|
|
519
|
+
if sum([isinstance(intfloatstr(x), str) for x in row]) == len(row):
|
|
520
|
+
header_rows_nb += 1
|
|
521
|
+
|
|
267
522
|
except Exception:
|
|
268
523
|
return False, f"{sys.exc_info()[1]}", np.array([])
|
|
269
524
|
|
|
270
525
|
try:
|
|
271
|
-
data = np.loadtxt(
|
|
272
|
-
|
|
273
|
-
)
|
|
526
|
+
data = np.loadtxt(file_name, delimiter=dialect.delimiter, usecols=columns, skiprows=header_rows_nb, converters=np_converters)
|
|
527
|
+
|
|
274
528
|
except Exception:
|
|
275
529
|
return False, f"{sys.exc_info()[1]}", np.array([])
|
|
276
530
|
|
|
@@ -281,7 +535,7 @@ def txt2np_array(file_name: str, columns_str: str, substract_first_value: str, c
|
|
|
281
535
|
return True, "", data
|
|
282
536
|
|
|
283
537
|
|
|
284
|
-
def versiontuple(version_str: str):
|
|
538
|
+
def versiontuple(version_str: str) -> tuple:
|
|
285
539
|
"""
|
|
286
540
|
Convert version from str to tuple of str
|
|
287
541
|
|
|
@@ -297,9 +551,34 @@ def versiontuple(version_str: str):
|
|
|
297
551
|
return tuple(filled)
|
|
298
552
|
|
|
299
553
|
|
|
554
|
+
def behavior_user_color(ethogram: dict, behavior_code: str) -> Union[str, None]:
|
|
555
|
+
"""
|
|
556
|
+
returns the color of behavior if defined else None
|
|
557
|
+
"""
|
|
558
|
+
for x in ethogram:
|
|
559
|
+
if ethogram[x][cfg.BEHAVIOR_CODE] == behavior_code:
|
|
560
|
+
if ethogram[x].get(cfg.COLOR, None) == "":
|
|
561
|
+
return None
|
|
562
|
+
else:
|
|
563
|
+
return ethogram[x].get(cfg.COLOR, None)
|
|
564
|
+
|
|
565
|
+
return None
|
|
566
|
+
|
|
567
|
+
|
|
568
|
+
def behav_category_user_color(behavioral_categories: dict, name: str) -> Union[str, None]:
|
|
569
|
+
"""
|
|
570
|
+
returns the color of the behavioral category if defined else None
|
|
571
|
+
"""
|
|
572
|
+
for key in behavioral_categories:
|
|
573
|
+
if behavioral_categories[key]["name"] == name:
|
|
574
|
+
return behavioral_categories[key].get(cfg.COLOR, None)
|
|
575
|
+
|
|
576
|
+
return None
|
|
577
|
+
|
|
578
|
+
|
|
300
579
|
def state_behavior_codes(ethogram: dict) -> list:
|
|
301
580
|
"""
|
|
302
|
-
behavior codes defined as STATE event
|
|
581
|
+
returns a list of behavior codes defined as STATE event
|
|
303
582
|
|
|
304
583
|
Args:
|
|
305
584
|
ethogram (dict): ethogram dictionary
|
|
@@ -308,12 +587,12 @@ def state_behavior_codes(ethogram: dict) -> list:
|
|
|
308
587
|
list: list of behavior codes defined as STATE event
|
|
309
588
|
|
|
310
589
|
"""
|
|
311
|
-
return [ethogram[x][cfg.BEHAVIOR_CODE] for x in ethogram if
|
|
590
|
+
return [ethogram[x][cfg.BEHAVIOR_CODE] for x in ethogram if ethogram[x][cfg.TYPE] in cfg.STATE_EVENT_TYPES]
|
|
312
591
|
|
|
313
592
|
|
|
314
593
|
def point_behavior_codes(ethogram: dict) -> list:
|
|
315
594
|
"""
|
|
316
|
-
behavior codes defined as POINT event
|
|
595
|
+
returns a list of behavior codes defined as POINT event
|
|
317
596
|
|
|
318
597
|
Args:
|
|
319
598
|
ethogram (dict): ethogram dictionary
|
|
@@ -322,7 +601,7 @@ def point_behavior_codes(ethogram: dict) -> list:
|
|
|
322
601
|
list: list of behavior codes defined as POINT event
|
|
323
602
|
|
|
324
603
|
"""
|
|
325
|
-
return [ethogram[x][cfg.BEHAVIOR_CODE] for x in ethogram if
|
|
604
|
+
return [ethogram[x][cfg.BEHAVIOR_CODE] for x in ethogram if ethogram[x][cfg.TYPE] in (cfg.POINT_EVENT, cfg.POINT_EVENT_WITH_CODING_MAP)]
|
|
326
605
|
|
|
327
606
|
|
|
328
607
|
def group_events(pj: dict, obs_id: str, include_modifiers: bool = False) -> dict:
|
|
@@ -347,7 +626,6 @@ def group_events(pj: dict, obs_id: str, include_modifiers: bool = False) -> dict
|
|
|
347
626
|
intervals_behav = {}
|
|
348
627
|
|
|
349
628
|
for event in pj[cfg.OBSERVATIONS][obs_id][cfg.EVENTS]:
|
|
350
|
-
|
|
351
629
|
time_ = event[cfg.EVENT_TIME_FIELD_IDX]
|
|
352
630
|
subject = event[cfg.EVENT_SUBJECT_FIELD_IDX]
|
|
353
631
|
code = event[cfg.EVENT_BEHAVIOR_FIELD_IDX]
|
|
@@ -355,9 +633,7 @@ def group_events(pj: dict, obs_id: str, include_modifiers: bool = False) -> dict
|
|
|
355
633
|
|
|
356
634
|
# check if code is state
|
|
357
635
|
if code in state_events_list:
|
|
358
|
-
|
|
359
636
|
if (subject, code, modifier) in mem_behav and mem_behav[(subject, code, modifier)]:
|
|
360
|
-
|
|
361
637
|
if (subject, code, modifier) not in intervals_behav:
|
|
362
638
|
intervals_behav[(subject, code, modifier)] = []
|
|
363
639
|
intervals_behav[(subject, code, modifier)].append((mem_behav[(subject, code, modifier)], time_))
|
|
@@ -378,8 +654,21 @@ def group_events(pj: dict, obs_id: str, include_modifiers: bool = False) -> dict
|
|
|
378
654
|
return {"error": ""}
|
|
379
655
|
|
|
380
656
|
|
|
657
|
+
def flatten_list(nested_list) -> list:
|
|
658
|
+
"""
|
|
659
|
+
Flatten a list of lists.
|
|
660
|
+
"""
|
|
661
|
+
flattened: list = []
|
|
662
|
+
for item in nested_list:
|
|
663
|
+
if isinstance(item, list):
|
|
664
|
+
flattened.extend(flatten_list(item))
|
|
665
|
+
else:
|
|
666
|
+
flattened.append(item)
|
|
667
|
+
return flattened
|
|
668
|
+
|
|
669
|
+
|
|
381
670
|
def get_current_states_modifiers_by_subject(
|
|
382
|
-
state_behaviors_codes: list, events: list, subjects: dict,
|
|
671
|
+
state_behaviors_codes: list, events: list, subjects: dict, time_: dec, include_modifiers: bool = False
|
|
383
672
|
) -> dict:
|
|
384
673
|
"""
|
|
385
674
|
get current states and modifiers (if requested) for subjects at given time
|
|
@@ -394,57 +683,59 @@ def get_current_states_modifiers_by_subject(
|
|
|
394
683
|
Returns:
|
|
395
684
|
dict: current states by subject. dict of list
|
|
396
685
|
"""
|
|
397
|
-
current_states = {}
|
|
398
|
-
if
|
|
686
|
+
current_states: dict = {}
|
|
687
|
+
if time_.is_nan():
|
|
399
688
|
for idx in subjects:
|
|
400
689
|
current_states[idx] = []
|
|
401
690
|
return current_states
|
|
402
691
|
|
|
403
692
|
# check if time contains NA
|
|
404
|
-
if [x for x in events if
|
|
405
|
-
check_index = cfg.PJ_OBS_FIELDS[cfg.IMAGES][
|
|
693
|
+
if [x for x in events if x[cfg.EVENT_TIME_FIELD_IDX].is_nan()]:
|
|
694
|
+
check_index = cfg.PJ_OBS_FIELDS[cfg.IMAGES][cfg.IMAGE_INDEX]
|
|
406
695
|
else:
|
|
407
696
|
check_index = cfg.EVENT_TIME_FIELD_IDX
|
|
408
697
|
|
|
409
698
|
if include_modifiers:
|
|
410
699
|
for idx in subjects:
|
|
411
|
-
current_states[idx] =
|
|
412
|
-
|
|
413
|
-
|
|
700
|
+
current_states[subjects[idx]["name"]] = {}
|
|
701
|
+
for x in events:
|
|
702
|
+
if x[check_index] > time_:
|
|
703
|
+
break
|
|
704
|
+
if x[cfg.EVENT_BEHAVIOR_FIELD_IDX] in state_behaviors_codes:
|
|
705
|
+
if (x[cfg.EVENT_BEHAVIOR_FIELD_IDX], x[cfg.EVENT_MODIFIER_FIELD_IDX]) not in current_states[x[cfg.EVENT_SUBJECT_FIELD_IDX]]:
|
|
706
|
+
current_states[x[cfg.EVENT_SUBJECT_FIELD_IDX]][(x[cfg.EVENT_BEHAVIOR_FIELD_IDX], x[cfg.EVENT_MODIFIER_FIELD_IDX])] = (
|
|
707
|
+
False
|
|
708
|
+
)
|
|
709
|
+
|
|
710
|
+
current_states[x[cfg.EVENT_SUBJECT_FIELD_IDX]][
|
|
414
711
|
(x[cfg.EVENT_BEHAVIOR_FIELD_IDX], x[cfg.EVENT_MODIFIER_FIELD_IDX])
|
|
415
|
-
|
|
416
|
-
if x[cfg.EVENT_SUBJECT_FIELD_IDX] == subjects[idx][cfg.SUBJECT_NAME]
|
|
417
|
-
and x[cfg.EVENT_BEHAVIOR_FIELD_IDX] == sbc
|
|
418
|
-
and x[check_index] <= time
|
|
419
|
-
]
|
|
712
|
+
] = not current_states[x[cfg.EVENT_SUBJECT_FIELD_IDX]][(x[cfg.EVENT_BEHAVIOR_FIELD_IDX], x[cfg.EVENT_MODIFIER_FIELD_IDX])]
|
|
420
713
|
|
|
421
|
-
|
|
422
|
-
|
|
714
|
+
r: dict = {}
|
|
715
|
+
for idx in subjects:
|
|
716
|
+
r[idx] = [f"{bm[0]} ({bm[1]})" for bm in current_states[subjects[idx]["name"]] if current_states[subjects[idx]["name"]][bm]]
|
|
423
717
|
|
|
424
718
|
else:
|
|
425
719
|
for idx in subjects:
|
|
426
|
-
current_states[idx] =
|
|
427
|
-
for
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
current_states[idx].append(sbc)
|
|
720
|
+
current_states[subjects[idx]["name"]] = {}
|
|
721
|
+
for b in state_behaviors_codes:
|
|
722
|
+
current_states[subjects[idx]["name"]][b] = False
|
|
723
|
+
for x in events:
|
|
724
|
+
if x[check_index] > time_:
|
|
725
|
+
break
|
|
726
|
+
if x[cfg.EVENT_BEHAVIOR_FIELD_IDX] in state_behaviors_codes:
|
|
727
|
+
current_states[x[cfg.EVENT_SUBJECT_FIELD_IDX]][x[cfg.EVENT_BEHAVIOR_FIELD_IDX]] = not current_states[
|
|
728
|
+
x[cfg.EVENT_SUBJECT_FIELD_IDX]
|
|
729
|
+
][x[cfg.EVENT_BEHAVIOR_FIELD_IDX]]
|
|
730
|
+
|
|
731
|
+
r: dict = {}
|
|
732
|
+
for idx in subjects:
|
|
733
|
+
r[idx] = [b for b in state_behaviors_codes if current_states[subjects[idx]["name"]][b]]
|
|
441
734
|
|
|
442
|
-
return
|
|
735
|
+
return r
|
|
443
736
|
|
|
444
737
|
|
|
445
|
-
def get_current_states_modifiers_by_subject_2(
|
|
446
|
-
state_behaviors_codes: list, events: list, subjects: dict, time: dec
|
|
447
|
-
) -> dict:
|
|
738
|
+
def get_current_states_modifiers_by_subject_2(state_behaviors_codes: list, events: list, subjects: dict, time: dec) -> dict:
|
|
448
739
|
"""
|
|
449
740
|
get current states and modifiers for subjects at given time
|
|
450
741
|
differs from get_current_states_modifiers_by_subject in the output format: [behavior, modifiers]
|
|
@@ -454,7 +745,6 @@ def get_current_states_modifiers_by_subject_2(
|
|
|
454
745
|
events (list): list of events
|
|
455
746
|
subjects (dict): dictionary of subjects
|
|
456
747
|
time (Decimal): time
|
|
457
|
-
include_modifiers (bool): include modifier if True (default: False)
|
|
458
748
|
|
|
459
749
|
Returns:
|
|
460
750
|
dict: current states by subject. dict of list
|
|
@@ -508,7 +798,8 @@ def get_current_points_by_subject(
|
|
|
508
798
|
point_events = [
|
|
509
799
|
(x[cfg.EVENT_BEHAVIOR_FIELD_IDX], x[cfg.EVENT_MODIFIER_FIELD_IDX])
|
|
510
800
|
for x in events
|
|
511
|
-
if x[cfg.EVENT_SUBJECT_FIELD_IDX] == subjects[idx]["name"]
|
|
801
|
+
if x[cfg.EVENT_SUBJECT_FIELD_IDX] == subjects[idx]["name"]
|
|
802
|
+
and x[cfg.EVENT_BEHAVIOR_FIELD_IDX] == sbc
|
|
512
803
|
# and abs(x[EVENT_TIME_FIELD_IDX] - time) <= tolerance
|
|
513
804
|
and time <= x[cfg.EVENT_TIME_FIELD_IDX] < (time + tolerance)
|
|
514
805
|
]
|
|
@@ -525,19 +816,6 @@ def get_current_points_by_subject(
|
|
|
525
816
|
return current_points
|
|
526
817
|
|
|
527
818
|
|
|
528
|
-
def get_ip_address():
|
|
529
|
-
"""Get current IP address
|
|
530
|
-
|
|
531
|
-
Args:
|
|
532
|
-
|
|
533
|
-
Returns:
|
|
534
|
-
str: IP address
|
|
535
|
-
"""
|
|
536
|
-
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
537
|
-
s.connect(("8.8.8.8", 80))
|
|
538
|
-
return s.getsockname()[0]
|
|
539
|
-
|
|
540
|
-
|
|
541
819
|
def check_txt_file(file_name: str) -> dict:
|
|
542
820
|
"""
|
|
543
821
|
Extract parameters of txt file (test for tsv csv)
|
|
@@ -554,35 +832,43 @@ def check_txt_file(file_name: str) -> dict:
|
|
|
554
832
|
try:
|
|
555
833
|
# snif txt file
|
|
556
834
|
with open(file_name) as csvfile:
|
|
557
|
-
buff = csvfile.read(
|
|
835
|
+
buff = csvfile.read(4096)
|
|
558
836
|
snif = csv.Sniffer()
|
|
559
837
|
dialect = snif.sniff(buff)
|
|
560
838
|
has_header = snif.has_header(buff)
|
|
561
839
|
|
|
562
840
|
csv.register_dialect("dialect", dialect)
|
|
563
|
-
rows_len = []
|
|
841
|
+
rows_len: list = []
|
|
564
842
|
with open(file_name, "r") as f:
|
|
565
843
|
reader = csv.reader(f, dialect="dialect")
|
|
566
844
|
for row in reader:
|
|
567
|
-
|
|
568
845
|
if not row:
|
|
569
846
|
continue
|
|
847
|
+
"""
|
|
570
848
|
if len(row) not in rows_len:
|
|
571
849
|
rows_len.append(len(row))
|
|
572
850
|
if len(rows_len) > 1:
|
|
573
851
|
break
|
|
852
|
+
"""
|
|
853
|
+
rows_len.append(len(row))
|
|
854
|
+
|
|
855
|
+
rows_number = len(rows_len)
|
|
856
|
+
rows_uniq_len = set(rows_len)
|
|
574
857
|
|
|
575
858
|
# test if file empty
|
|
576
|
-
if not
|
|
859
|
+
if not rows_uniq_len:
|
|
577
860
|
return {"error": "The file is empty"}
|
|
578
861
|
|
|
579
|
-
if len(
|
|
580
|
-
return {
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
862
|
+
if len(rows_uniq_len) == 1:
|
|
863
|
+
return {
|
|
864
|
+
"homogeneous": True,
|
|
865
|
+
"fields number": rows_len[0],
|
|
866
|
+
"separator": dialect.delimiter,
|
|
867
|
+
"rows number": rows_number,
|
|
868
|
+
"has header": has_header,
|
|
869
|
+
}
|
|
584
870
|
else:
|
|
585
|
-
return {"homogeneous":
|
|
871
|
+
return {"homogeneous": False}
|
|
586
872
|
except Exception:
|
|
587
873
|
return {"error": str(sys.exc_info()[1])}
|
|
588
874
|
|
|
@@ -599,15 +885,15 @@ def extract_wav(ffmpeg_bin: str, media_file_path: str, tmp_dir: str) -> str:
|
|
|
599
885
|
str: wav file path or "" if error
|
|
600
886
|
"""
|
|
601
887
|
|
|
602
|
-
wav_file_path =
|
|
888
|
+
wav_file_path = Path(tmp_dir) / Path(media_file_path + ".wav").name
|
|
603
889
|
|
|
604
890
|
# check if media file is a wav file
|
|
605
891
|
try:
|
|
606
892
|
wav = wave.open(media_file_path, "r")
|
|
607
893
|
wav.close()
|
|
608
|
-
|
|
894
|
+
logger.debug(f"{media_file_path} is a WAV file. Copying in the temp directory...")
|
|
609
895
|
copyfile(media_file_path, wav_file_path)
|
|
610
|
-
|
|
896
|
+
logger.debug(f"{media_file_path} copied in {wav_file_path}")
|
|
611
897
|
return str(wav_file_path)
|
|
612
898
|
except Exception:
|
|
613
899
|
if wav_file_path.is_file():
|
|
@@ -622,7 +908,7 @@ def extract_wav(ffmpeg_bin: str, media_file_path: str, tmp_dir: str) -> str:
|
|
|
622
908
|
)
|
|
623
909
|
out, error = p.communicate()
|
|
624
910
|
out, error = out.decode("utf-8"), error.decode("utf-8")
|
|
625
|
-
|
|
911
|
+
logger.debug(f"{out}, {error}")
|
|
626
912
|
|
|
627
913
|
if "does not contain any stream" not in error:
|
|
628
914
|
if wav_file_path.is_file():
|
|
@@ -638,40 +924,46 @@ def decimal_default(obj):
|
|
|
638
924
|
raise TypeError
|
|
639
925
|
|
|
640
926
|
|
|
641
|
-
def complete(
|
|
927
|
+
def complete(lst: list, max_: int) -> list:
|
|
642
928
|
"""
|
|
643
929
|
complete list with empty string ("") until len = max
|
|
644
930
|
|
|
645
931
|
Args:
|
|
646
|
-
|
|
932
|
+
lst (list): list to complete
|
|
647
933
|
max_ (int): number of items to reach
|
|
648
934
|
|
|
649
935
|
Returns:
|
|
650
936
|
list: list completed to max_ items with empty string ("")
|
|
651
937
|
"""
|
|
652
|
-
while len(
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
return l
|
|
938
|
+
while len(lst) < max_:
|
|
939
|
+
lst.append("")
|
|
940
|
+
return lst
|
|
656
941
|
|
|
657
942
|
|
|
658
943
|
def datetime_iso8601(dt) -> str:
|
|
659
944
|
"""
|
|
660
|
-
current date time in ISO8601 format without
|
|
945
|
+
current date time in ISO8601 format without microseconds
|
|
661
946
|
example: 2019-06-13 10:01:02
|
|
662
947
|
|
|
663
948
|
Returns:
|
|
664
|
-
str: date time in ISO8601 format
|
|
949
|
+
str: date time in ISO8601 format without microseconds
|
|
665
950
|
"""
|
|
666
|
-
return dt.isoformat(" "
|
|
951
|
+
return dt.isoformat(sep=" ", timespec="seconds")
|
|
667
952
|
|
|
668
953
|
|
|
669
|
-
def seconds_of_day(dt) -> dec:
|
|
954
|
+
def seconds_of_day(timestamp: dt.datetime) -> dec:
|
|
670
955
|
"""
|
|
671
956
|
return the number of seconds since start of the day
|
|
957
|
+
|
|
958
|
+
Returns:
|
|
959
|
+
dev: number of seconds since the start of the day
|
|
672
960
|
"""
|
|
673
961
|
|
|
674
|
-
|
|
962
|
+
# logger.debug("function: seconds_of_day")
|
|
963
|
+
# logger.debug(f"{timestamp = }")
|
|
964
|
+
|
|
965
|
+
t = timestamp.time()
|
|
966
|
+
return dec(t.hour * 3600 + t.minute * 60 + t.second + t.microsecond / 1000000).quantize(dec("0.001"))
|
|
675
967
|
|
|
676
968
|
|
|
677
969
|
def sorted_keys(d: dict) -> list:
|
|
@@ -701,7 +993,7 @@ def intfloatstr(s: str) -> int:
|
|
|
701
993
|
return s
|
|
702
994
|
|
|
703
995
|
|
|
704
|
-
def distance(p1, p2):
|
|
996
|
+
def distance(p1: tuple, p2: tuple) -> float:
|
|
705
997
|
"""
|
|
706
998
|
euclidean distance between 2 points
|
|
707
999
|
"""
|
|
@@ -733,6 +1025,62 @@ def angle(vertex: tuple, side1: tuple, side2: tuple) -> float:
|
|
|
733
1025
|
)
|
|
734
1026
|
|
|
735
1027
|
|
|
1028
|
+
def oriented_angle(P1: tuple, P2: tuple, P3: tuple) -> float:
|
|
1029
|
+
"""
|
|
1030
|
+
Calculate the oriented angle between two segments.
|
|
1031
|
+
|
|
1032
|
+
Args:
|
|
1033
|
+
P1 (tuple): Coordinates of the vertex
|
|
1034
|
+
P2 (tuple): Coordinates of the first point
|
|
1035
|
+
P3 (tuple): Coordinates of the second point
|
|
1036
|
+
|
|
1037
|
+
Returns:
|
|
1038
|
+
float: The oriented angle between the two segments in degrees.
|
|
1039
|
+
"""
|
|
1040
|
+
|
|
1041
|
+
x1, y1 = P1
|
|
1042
|
+
x2, y2 = P2
|
|
1043
|
+
x3, y3 = P1
|
|
1044
|
+
x4, y4 = P3
|
|
1045
|
+
|
|
1046
|
+
angle_AB = math.atan2(y2 - y1, x2 - x1)
|
|
1047
|
+
angle_CD = math.atan2(y4 - y3, x4 - x3)
|
|
1048
|
+
|
|
1049
|
+
oriented_angle = math.degrees(angle_AB - angle_CD)
|
|
1050
|
+
|
|
1051
|
+
return oriented_angle
|
|
1052
|
+
|
|
1053
|
+
|
|
1054
|
+
def oriented_angle_trigo(B: Tuple[float, float], A: Tuple[float, float], C: Tuple[float, float]) -> float:
|
|
1055
|
+
"""
|
|
1056
|
+
Calculates the oriented angle between vectors BA and BC, in degrees.
|
|
1057
|
+
The angle is positive in the counter-clockwise (trigonometric) direction.
|
|
1058
|
+
|
|
1059
|
+
Parameters:
|
|
1060
|
+
B: The pivot point (the origin of the vectors BA and BC).
|
|
1061
|
+
A, C: Points that define the vectors.
|
|
1062
|
+
|
|
1063
|
+
Returns:
|
|
1064
|
+
Angle in degrees, between 0 and 360.
|
|
1065
|
+
"""
|
|
1066
|
+
# Vectors BA and BC
|
|
1067
|
+
v1 = (A[0] - B[0], A[1] - B[1])
|
|
1068
|
+
v2 = (C[0] - B[0], C[1] - B[1])
|
|
1069
|
+
|
|
1070
|
+
# Dot product and 2D cross product (determinant)
|
|
1071
|
+
dot = v1[0] * v2[0] + v1[1] * v2[1]
|
|
1072
|
+
det = v1[0] * v2[1] - v1[1] * v2[0]
|
|
1073
|
+
|
|
1074
|
+
# Signed angle in radians, then converted to degrees
|
|
1075
|
+
angle_rad = math.atan2(det, dot)
|
|
1076
|
+
angle_deg = math.degrees(angle_rad)
|
|
1077
|
+
|
|
1078
|
+
if angle_deg < 0:
|
|
1079
|
+
angle_deg += 360
|
|
1080
|
+
|
|
1081
|
+
return angle_deg
|
|
1082
|
+
|
|
1083
|
+
|
|
736
1084
|
def mem_info():
|
|
737
1085
|
"""
|
|
738
1086
|
get info about total mem, used mem and available mem using:
|
|
@@ -750,9 +1098,7 @@ def mem_info():
|
|
|
750
1098
|
process = subprocess.run(["free", "-m"], stdout=subprocess.PIPE)
|
|
751
1099
|
# out, err = process.communicate()
|
|
752
1100
|
out = process.stdout
|
|
753
|
-
_, tot_mem, used_mem, _, _, _, available_mem = [
|
|
754
|
-
x.decode("utf-8") for x in out.split(b"\n")[1].split(b" ") if x != b""
|
|
755
|
-
]
|
|
1101
|
+
_, tot_mem, used_mem, _, _, _, available_mem = [x.decode("utf-8") for x in out.split(b"\n")[1].split(b" ") if x != b""]
|
|
756
1102
|
return False, {
|
|
757
1103
|
"total_memory": int(tot_mem),
|
|
758
1104
|
"used_memory": int(used_mem),
|
|
@@ -771,11 +1117,8 @@ def mem_info():
|
|
|
771
1117
|
return True, {"msg": error_info(sys.exc_info())[0]}
|
|
772
1118
|
|
|
773
1119
|
if sys.platform.startswith("win"):
|
|
774
|
-
|
|
775
1120
|
try:
|
|
776
|
-
output = subprocess.run(
|
|
777
|
-
["wmic", "computersystem", "get", "TotalPhysicalMemory", "/", "Value"], stdout=subprocess.PIPE
|
|
778
|
-
)
|
|
1121
|
+
output = subprocess.run(["wmic", "computersystem", "get", "TotalPhysicalMemory", "/", "Value"], stdout=subprocess.PIPE)
|
|
779
1122
|
tot_mem = int(output.stdout.strip().split(b"=")[-1].decode("utf-8")) / 1024 / 1024
|
|
780
1123
|
|
|
781
1124
|
output = subprocess.run(["wmic", "OS", "get", "FreePhysicalMemory", "/", "Value"], stdout=subprocess.PIPE)
|
|
@@ -788,7 +1131,7 @@ def mem_info():
|
|
|
788
1131
|
return True, {"msg": "Unknown operating system"}
|
|
789
1132
|
|
|
790
1133
|
|
|
791
|
-
def polygon_area(poly):
|
|
1134
|
+
def polygon_area(poly: list) -> float:
|
|
792
1135
|
"""
|
|
793
1136
|
area of polygon
|
|
794
1137
|
from http://www.mathopenref.com/coordpolygonarea.html
|
|
@@ -803,7 +1146,20 @@ def polygon_area(poly):
|
|
|
803
1146
|
return abs(tot / 2)
|
|
804
1147
|
|
|
805
1148
|
|
|
806
|
-
def
|
|
1149
|
+
def polyline_length(poly: list) -> float:
|
|
1150
|
+
"""
|
|
1151
|
+
length of polyline
|
|
1152
|
+
"""
|
|
1153
|
+
tot = 0
|
|
1154
|
+
for p in range(1, len(poly)):
|
|
1155
|
+
x1, y1 = poly[p - 1]
|
|
1156
|
+
x2, y2 = poly[p]
|
|
1157
|
+
tot += ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
|
|
1158
|
+
|
|
1159
|
+
return tot
|
|
1160
|
+
|
|
1161
|
+
|
|
1162
|
+
def url2path(url: str) -> str:
|
|
807
1163
|
"""
|
|
808
1164
|
convert URL in local path name
|
|
809
1165
|
under windows, check if path name begin with /
|
|
@@ -834,14 +1190,20 @@ def time2seconds(time_: str) -> dec:
|
|
|
834
1190
|
Decimal: time in seconds
|
|
835
1191
|
"""
|
|
836
1192
|
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
1193
|
+
if " " in time_:
|
|
1194
|
+
try:
|
|
1195
|
+
return dec(str(dt.datetime.strptime(time_, "%Y-%m-%d %H:%M:%S.%f").timestamp()))
|
|
1196
|
+
except Exception:
|
|
1197
|
+
return dec("0.000")
|
|
1198
|
+
else:
|
|
1199
|
+
try:
|
|
1200
|
+
flag_neg = "-" in time_
|
|
1201
|
+
time_ = time_.replace("-", "")
|
|
1202
|
+
tsplit = time_.split(":")
|
|
1203
|
+
h, m, s = int(tsplit[0]), int(tsplit[1]), dec(tsplit[2])
|
|
1204
|
+
return dec(-(h * 3600 + m * 60 + s)) if flag_neg else dec(h * 3600 + m * 60 + s)
|
|
1205
|
+
except Exception:
|
|
1206
|
+
return dec("0.000")
|
|
845
1207
|
|
|
846
1208
|
|
|
847
1209
|
def seconds2time(sec: dec) -> str:
|
|
@@ -855,10 +1217,11 @@ def seconds2time(sec: dec) -> str:
|
|
|
855
1217
|
"""
|
|
856
1218
|
|
|
857
1219
|
if math.isnan(sec):
|
|
858
|
-
return
|
|
1220
|
+
return cfg.NA
|
|
859
1221
|
|
|
860
|
-
if sec >
|
|
861
|
-
|
|
1222
|
+
# if sec > one day treat as date
|
|
1223
|
+
if sec > cfg.DATE_CUTOFF:
|
|
1224
|
+
t = dt.datetime.fromtimestamp(float(sec))
|
|
862
1225
|
return f"{t:%Y-%m-%d %H:%M:%S}.{t.microsecond / 1000:03.0f}"
|
|
863
1226
|
|
|
864
1227
|
neg_sign = "-" * (sec < 0)
|
|
@@ -917,7 +1280,7 @@ def eol2space(s: str) -> str:
|
|
|
917
1280
|
return s.replace("\r\n", " ").replace("\n", " ").replace("\r", " ")
|
|
918
1281
|
|
|
919
1282
|
|
|
920
|
-
def test_ffmpeg_path(FFmpegPath):
|
|
1283
|
+
def test_ffmpeg_path(FFmpegPath: str) -> Tuple[bool, str]:
|
|
921
1284
|
"""
|
|
922
1285
|
test if ffmpeg has valid path
|
|
923
1286
|
|
|
@@ -929,22 +1292,20 @@ def test_ffmpeg_path(FFmpegPath):
|
|
|
929
1292
|
str: message
|
|
930
1293
|
"""
|
|
931
1294
|
|
|
932
|
-
out, error = subprocess.Popen(
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
logging.debug(f"test ffmpeg path output: {out}")
|
|
936
|
-
logging.debug(f"test ffmpeg path error: {error}")
|
|
1295
|
+
out, error = subprocess.Popen(f'"{FFmpegPath}" -version', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
|
|
1296
|
+
logger.debug(f"test ffmpeg path output: {out}")
|
|
1297
|
+
logger.debug(f"test ffmpeg path error: {error}")
|
|
937
1298
|
|
|
938
1299
|
if (b"avconv" in out) or (b"the Libav developers" in error):
|
|
939
1300
|
return False, "Please use FFmpeg from https://www.ffmpeg.org in place of FFmpeg from Libav project."
|
|
940
1301
|
|
|
941
1302
|
if (b"ffmpeg version" not in out) and (b"ffmpeg version" not in error):
|
|
942
|
-
return False, "FFmpeg is required but it was not found
|
|
1303
|
+
return False, "FFmpeg is required but it was not found.<br>See https://www.ffmpeg.org"
|
|
943
1304
|
|
|
944
1305
|
return True, ""
|
|
945
1306
|
|
|
946
1307
|
|
|
947
|
-
def check_ffmpeg_path():
|
|
1308
|
+
def check_ffmpeg_path() -> Tuple[bool, str]:
|
|
948
1309
|
"""
|
|
949
1310
|
check for ffmpeg path
|
|
950
1311
|
firstly search for embedded version
|
|
@@ -955,45 +1316,110 @@ def check_ffmpeg_path():
|
|
|
955
1316
|
str: if bool True returns ffmpegpath else returns error message
|
|
956
1317
|
"""
|
|
957
1318
|
|
|
1319
|
+
# search embedded ffmpeg
|
|
958
1320
|
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
|
|
1321
|
+
ffmpeg_executable = Path("ffmpeg")
|
|
1322
|
+
elif sys.platform.startswith("win"):
|
|
1323
|
+
ffmpeg_executable = Path("ffmpeg.exe")
|
|
959
1324
|
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
ffmpeg_path = pl.Path(sys.argv[0]).resolve().parent / "misc" / "ffmpeg"
|
|
966
|
-
|
|
967
|
-
if not ffmpeg_path.is_file():
|
|
968
|
-
# search global ffmpeg
|
|
969
|
-
ffmpeg_path = "ffmpeg"
|
|
970
|
-
|
|
971
|
-
# test ffmpeg
|
|
972
|
-
r, msg = test_ffmpeg_path(str(ffmpeg_path))
|
|
973
|
-
if r:
|
|
974
|
-
return True, str(ffmpeg_path)
|
|
975
|
-
else:
|
|
976
|
-
return False, "FFmpeg is not available"
|
|
1325
|
+
ffmpeg_path = Path(__file__).parent / "misc" / ffmpeg_executable
|
|
1326
|
+
|
|
1327
|
+
if not ffmpeg_path.is_file():
|
|
1328
|
+
# search global ffmpeg
|
|
1329
|
+
ffmpeg_path = ffmpeg_executable
|
|
977
1330
|
|
|
1331
|
+
# test ffmpeg
|
|
1332
|
+
r, msg = test_ffmpeg_path(str(ffmpeg_path))
|
|
1333
|
+
if r:
|
|
1334
|
+
return True, str(ffmpeg_path)
|
|
1335
|
+
else:
|
|
1336
|
+
return False, "FFmpeg is not available"
|
|
1337
|
+
|
|
1338
|
+
|
|
1339
|
+
def smart_size_format(n: Union[float, int, str, None]) -> str:
|
|
1340
|
+
"""
|
|
1341
|
+
format with kb, Mb or Gb in base of value
|
|
1342
|
+
"""
|
|
1343
|
+
if n is None:
|
|
1344
|
+
return cfg.NA
|
|
1345
|
+
if str(n) == "NA":
|
|
1346
|
+
return cfg.NA
|
|
1347
|
+
if math.isnan(n):
|
|
1348
|
+
return cfg.NA
|
|
1349
|
+
if n < 1_000:
|
|
1350
|
+
return f"{n:,.1f} b"
|
|
1351
|
+
if n < 1_000_000:
|
|
1352
|
+
return f"{n / 1_000:,.1f} Kb"
|
|
1353
|
+
if n < 1_000_000_000:
|
|
1354
|
+
return f"{n / 1_000_000:,.1f} Mb"
|
|
1355
|
+
return f"{n / 1_000_000_000:,.1f} Gb"
|
|
1356
|
+
|
|
1357
|
+
|
|
1358
|
+
def get_systeminfo() -> str:
|
|
1359
|
+
"""
|
|
1360
|
+
returns info about the system
|
|
1361
|
+
"""
|
|
1362
|
+
|
|
1363
|
+
mpv_lib_version_, mpv_lib_file_path, mpv_api_version = mpv_lib_version()
|
|
1364
|
+
|
|
1365
|
+
system_info = (
|
|
1366
|
+
f"BORIS version: {version.__version__}\n"
|
|
1367
|
+
f"OS: {platform.uname().system} {platform.uname().release} {platform.uname().version}\n"
|
|
1368
|
+
f"CPU: {platform.uname().machine} {platform.uname().processor}\n"
|
|
1369
|
+
f"Python {platform.python_version()} ({'64-bit' if sys.maxsize > 2**32 else '32-bit'})\n"
|
|
1370
|
+
f"Qt {qVersion()} - PySide {pyside6_version}\n"
|
|
1371
|
+
f"MPV library version: {mpv_lib_version_}\n"
|
|
1372
|
+
f"MPV API version: {mpv_api_version}\n"
|
|
1373
|
+
f"MPV library file path: {mpv_lib_file_path}\n\n"
|
|
1374
|
+
)
|
|
1375
|
+
|
|
1376
|
+
r, memory = mem_info()
|
|
1377
|
+
if not r:
|
|
1378
|
+
system_info += (
|
|
1379
|
+
f"Memory (RAM) Total: {memory.get('total_memory', 'Not available'):.2f} Mb "
|
|
1380
|
+
f"Free: {memory.get('free_memory', 'Not available'):.2f} Mb\n\n"
|
|
1381
|
+
)
|
|
1382
|
+
|
|
1383
|
+
return system_info
|
|
1384
|
+
|
|
1385
|
+
"""
|
|
1386
|
+
# system info
|
|
1387
|
+
systeminfo = ""
|
|
978
1388
|
if sys.platform.startswith("win"):
|
|
1389
|
+
# systeminfo = subprocess.getoutput("systeminfo")
|
|
1390
|
+
systeminfo = subprocess.run("systeminfo /FO csv /NH", capture_output=True, text=True, encoding="mbcs", shell=True).stdout
|
|
1391
|
+
|
|
1392
|
+
import csv
|
|
1393
|
+
from io import StringIO
|
|
1394
|
+
|
|
1395
|
+
# Parse it as CSV
|
|
1396
|
+
f = StringIO(systeminfo)
|
|
1397
|
+
reader = csv.reader(f)
|
|
1398
|
+
parsed_data = list(reader)[0]
|
|
1399
|
+
# Print specific fields by index
|
|
1400
|
+
info_to_show = ""
|
|
1401
|
+
info_to_show += f"Computer Name: {parsed_data[0]}\n"
|
|
1402
|
+
info_to_show += f"OS Name: {parsed_data[1]}\n"
|
|
1403
|
+
info_to_show += f"OS Version: {parsed_data[2]}\n"
|
|
1404
|
+
info_to_show += f"System Manufacturer: {parsed_data[11]}\n"
|
|
1405
|
+
info_to_show += f"System Model: {parsed_data[12]}\n"
|
|
1406
|
+
info_to_show += f"Processor: {parsed_data[14]}\n"
|
|
1407
|
+
info_to_show += f"Locale: {parsed_data[19]}\n"
|
|
1408
|
+
info_to_show += f"Installed Memory: {parsed_data[22]}\n"
|
|
1409
|
+
|
|
1410
|
+
# info about graphic card
|
|
1411
|
+
graphic_info = subprocess.run(
|
|
1412
|
+
"wmic path win32_videocontroller get name", capture_output=True, text=True, encoding="mbcs", shell=True
|
|
1413
|
+
).stdout
|
|
1414
|
+
info_to_show += graphic_info.replace("\n", "").replace("Name", "Graphic card model")
|
|
1415
|
+
|
|
1416
|
+
systeminfo = info_to_show
|
|
979
1417
|
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
ffmpeg_path = pl.Path(sys.argv[0]).resolve().parent / "misc" / "ffmpeg.exe"
|
|
986
|
-
|
|
987
|
-
if not ffmpeg_path.is_file():
|
|
988
|
-
# search global ffmpeg
|
|
989
|
-
ffmpeg_path = "ffmpeg"
|
|
990
|
-
|
|
991
|
-
# test ffmpeg
|
|
992
|
-
r, msg = test_ffmpeg_path(str(ffmpeg_path))
|
|
993
|
-
if r:
|
|
994
|
-
return True, str(ffmpeg_path)
|
|
995
|
-
else:
|
|
996
|
-
return False, "FFmpeg is not available"
|
|
1418
|
+
if sys.platform.startswith("linux"):
|
|
1419
|
+
systeminfo = subprocess.getoutput("cat /etc/*rel*; uname -a")
|
|
1420
|
+
|
|
1421
|
+
return systeminfo
|
|
1422
|
+
"""
|
|
997
1423
|
|
|
998
1424
|
|
|
999
1425
|
def ffprobe_media_analysis(ffmpeg_bin: str, file_name: str) -> dict:
|
|
@@ -1007,73 +1433,144 @@ def ffprobe_media_analysis(ffmpeg_bin: str, file_name: str) -> dict:
|
|
|
1007
1433
|
Returns:
|
|
1008
1434
|
dict
|
|
1009
1435
|
"""
|
|
1010
|
-
# ffprobe -v quiet -print_format json -show_format -show_streams /tmp/ramdisk/video1.mp4
|
|
1011
|
-
ffprobe_bin = ffmpeg_bin.replace("ffmpeg", "ffprobe")
|
|
1012
1436
|
|
|
1013
|
-
|
|
1437
|
+
# check ffprobe executable in same place than ffmpeg
|
|
1438
|
+
ffprobe_bin = ffmpeg_bin.replace("ffmpeg", "ffprobe")
|
|
1439
|
+
if not Path(ffprobe_bin).is_file():
|
|
1440
|
+
if which(ffprobe_bin) is None:
|
|
1441
|
+
return {"error": "ffprobe not found"}
|
|
1014
1442
|
|
|
1015
|
-
|
|
1443
|
+
command = f'"{ffprobe_bin}" -hide_banner -v error -print_format json -show_format -show_streams "{file_name}"'
|
|
1016
1444
|
|
|
1017
1445
|
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
|
1018
1446
|
out, error = p.communicate()
|
|
1447
|
+
if error:
|
|
1448
|
+
if b"invalid data" in error:
|
|
1449
|
+
return {"error": f"{error}"}
|
|
1019
1450
|
|
|
1020
1451
|
try:
|
|
1021
|
-
hasVideo
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1452
|
+
hasVideo = False
|
|
1453
|
+
hasAudio = False
|
|
1454
|
+
"""bitrate = None"""
|
|
1455
|
+
video_bitrate = None
|
|
1456
|
+
audio_bitrate = []
|
|
1457
|
+
resolution = None
|
|
1458
|
+
fps: float = 0
|
|
1459
|
+
sample_rate = None
|
|
1460
|
+
duration = None
|
|
1461
|
+
audio_duration = cfg.NA
|
|
1462
|
+
frames_number = None
|
|
1463
|
+
size = None
|
|
1464
|
+
audio_codec = None
|
|
1465
|
+
video_codec = None
|
|
1466
|
+
|
|
1033
1467
|
video_param = json.loads(out.decode("utf-8"))
|
|
1034
1468
|
if "size" in video_param["format"]:
|
|
1035
1469
|
size = int(video_param["format"]["size"])
|
|
1036
1470
|
|
|
1037
1471
|
for stream in video_param["streams"]:
|
|
1038
|
-
|
|
1039
1472
|
if stream["codec_type"] == "video":
|
|
1040
1473
|
hasVideo = True
|
|
1041
|
-
|
|
1474
|
+
video_bitrate = int(stream["bit_rate"]) if "bit_rate" in stream else None
|
|
1042
1475
|
resolution = f"{stream['width']}x{stream['height']}"
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1476
|
+
|
|
1477
|
+
"""
|
|
1478
|
+
if "avg_frame_rate" in stream:
|
|
1479
|
+
if stream["avg_frame_rate"] == "0/0":
|
|
1480
|
+
fps = 0
|
|
1481
|
+
else:
|
|
1482
|
+
try:
|
|
1483
|
+
fps = eval(stream["avg_frame_rate"])
|
|
1484
|
+
except Exception:
|
|
1485
|
+
fps = 0
|
|
1486
|
+
"""
|
|
1487
|
+
if "r_frame_rate" in stream:
|
|
1488
|
+
if stream["r_frame_rate"] == "0/0":
|
|
1489
|
+
fps = 0
|
|
1490
|
+
else:
|
|
1491
|
+
try:
|
|
1492
|
+
fps = eval(stream["r_frame_rate"])
|
|
1493
|
+
except Exception:
|
|
1494
|
+
fps = 0
|
|
1495
|
+
if fps >= 1000 and "avg_frame_rate" in stream: # case for some h265 video ("r_frame_rate": "1200000/1")
|
|
1496
|
+
try:
|
|
1497
|
+
fps = eval(stream["avg_frame_rate"])
|
|
1498
|
+
except Exception:
|
|
1499
|
+
pass
|
|
1500
|
+
|
|
1501
|
+
fps = round(fps, 3)
|
|
1502
|
+
|
|
1503
|
+
if "duration" in stream:
|
|
1504
|
+
duration = float(stream["duration"])
|
|
1505
|
+
if "duration_ts" in stream:
|
|
1506
|
+
frames_number = int(stream["duration_ts"])
|
|
1507
|
+
elif "nb_frames" in stream:
|
|
1508
|
+
frames_number = int(stream["nb_frames"])
|
|
1509
|
+
else:
|
|
1510
|
+
frames_number = None
|
|
1511
|
+
|
|
1046
1512
|
video_codec = stream["codec_long_name"] if "codec_long_name" in stream else None
|
|
1047
1513
|
|
|
1048
1514
|
if stream["codec_type"] == "audio":
|
|
1049
1515
|
hasAudio = True
|
|
1050
|
-
sample_rate = float(stream["sample_rate"])
|
|
1051
|
-
|
|
1516
|
+
sample_rate = float(stream["sample_rate"]) if "sample_rate" in stream else cfg.NA
|
|
1517
|
+
# TODO manage audio_duration parameter
|
|
1518
|
+
audio_duration = float(stream["duration"]) if "duration" in stream else cfg.NA
|
|
1052
1519
|
audio_codec = stream["codec_long_name"]
|
|
1520
|
+
audio_bitrate.append(int(stream.get("bit_rate", 0)))
|
|
1521
|
+
|
|
1522
|
+
# check duration
|
|
1523
|
+
if duration is None:
|
|
1524
|
+
if "duration" in video_param["format"]:
|
|
1525
|
+
duration = float(video_param["format"]["duration"])
|
|
1526
|
+
else:
|
|
1527
|
+
duration = 0
|
|
1528
|
+
|
|
1529
|
+
# check bit rate
|
|
1530
|
+
if "bit_rate" in video_param["format"]:
|
|
1531
|
+
all_bitrate = int(video_param["format"]["bit_rate"])
|
|
1532
|
+
else:
|
|
1533
|
+
all_bitrate = None
|
|
1534
|
+
|
|
1535
|
+
if video_bitrate is None and all_bitrate is not None:
|
|
1536
|
+
video_bitrate = all_bitrate - sum(audio_bitrate)
|
|
1537
|
+
|
|
1538
|
+
# extract format long name
|
|
1539
|
+
format_long_name = video_param["format"]["format_long_name"] if "format_long_name" in video_param["format"] else cfg.NA
|
|
1540
|
+
|
|
1541
|
+
# extract creation time ("creation_time": "2023-03-22T16:50:32.000000Z")
|
|
1542
|
+
creation_time = cfg.NA
|
|
1543
|
+
if "tags" in video_param["format"] and "creation_time" in video_param["format"]["tags"]:
|
|
1544
|
+
creation_time = video_param["format"]["tags"]["creation_time"].replace("T", " ")
|
|
1545
|
+
if "." in creation_time:
|
|
1546
|
+
creation_time = creation_time.split(".")[0]
|
|
1053
1547
|
|
|
1054
1548
|
return {
|
|
1055
1549
|
"analysis_program": "ffprobe",
|
|
1056
1550
|
"frames_number": frames_number,
|
|
1057
1551
|
"duration_ms": duration * 1000,
|
|
1058
1552
|
"duration": duration,
|
|
1553
|
+
"audio_duration": audio_duration,
|
|
1059
1554
|
"fps": fps,
|
|
1060
1555
|
"has_video": hasVideo,
|
|
1061
1556
|
"has_audio": hasAudio,
|
|
1062
|
-
"bitrate":
|
|
1557
|
+
"bitrate": video_bitrate,
|
|
1063
1558
|
"resolution": resolution,
|
|
1064
1559
|
"sample_rate": sample_rate,
|
|
1065
1560
|
"file size": size,
|
|
1066
1561
|
"audio_codec": audio_codec,
|
|
1067
1562
|
"video_codec": video_codec,
|
|
1563
|
+
"creation_time": creation_time,
|
|
1564
|
+
"format_long_name": format_long_name,
|
|
1068
1565
|
}
|
|
1069
1566
|
|
|
1070
|
-
except Exception:
|
|
1071
|
-
return {}
|
|
1567
|
+
except Exception as e:
|
|
1568
|
+
return {"error": str(e)}
|
|
1072
1569
|
|
|
1073
1570
|
|
|
1074
1571
|
def accurate_media_analysis(ffmpeg_bin: str, file_name: str) -> dict:
|
|
1075
1572
|
"""
|
|
1076
|
-
analyse frame rate and video duration with ffmpeg
|
|
1573
|
+
analyse frame rate and video duration with ffprobe or ffmpeg if ffprobe not available
|
|
1077
1574
|
Returns parameters: duration, duration_ms, bitrate, frames_number, fps, has_video (True/False), has_audio (True/False)
|
|
1078
1575
|
|
|
1079
1576
|
Args:
|
|
@@ -1087,19 +1584,22 @@ def accurate_media_analysis(ffmpeg_bin: str, file_name: str) -> dict:
|
|
|
1087
1584
|
|
|
1088
1585
|
ffprobe_results = ffprobe_media_analysis(ffmpeg_bin, file_name)
|
|
1089
1586
|
|
|
1090
|
-
|
|
1587
|
+
logger.debug(f"file_name: {file_name}")
|
|
1588
|
+
logger.debug(f"ffprobe_results: {ffprobe_results}")
|
|
1589
|
+
|
|
1590
|
+
if ("error" not in ffprobe_results) and (ffprobe_results["bitrate"] is not None):
|
|
1091
1591
|
return ffprobe_results
|
|
1092
1592
|
else:
|
|
1093
1593
|
# use ffmpeg
|
|
1094
|
-
command = f'"{ffmpeg_bin}" -i "{file_name}" > {"NUL" if sys.platform.startswith("win") else "/dev/null"}'
|
|
1594
|
+
command = f'"{ffmpeg_bin}" -hide_banner -i "{file_name}" > {"NUL" if sys.platform.startswith("win") else "/dev/null"}'
|
|
1095
1595
|
|
|
1096
1596
|
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
|
1097
1597
|
|
|
1098
|
-
duration, fps, hasVideo, hasAudio, bitrate = 0, 0, False, False,
|
|
1598
|
+
duration, fps, hasVideo, hasAudio, bitrate = 0, 0, False, False, None
|
|
1099
1599
|
try:
|
|
1100
|
-
error = p.communicate()
|
|
1101
|
-
except Exception:
|
|
1102
|
-
return {"error":
|
|
1600
|
+
_, error = p.communicate()
|
|
1601
|
+
except Exception as e:
|
|
1602
|
+
return {"error": str(e)}
|
|
1103
1603
|
|
|
1104
1604
|
rows = error.split(b"\n")
|
|
1105
1605
|
|
|
@@ -1125,10 +1625,10 @@ def accurate_media_analysis(ffmpeg_bin: str, file_name: str) -> dict:
|
|
|
1125
1625
|
if b"bitrate:" in row:
|
|
1126
1626
|
re_results = re.search(b"bitrate: (.{1,10}) kb", row, re.IGNORECASE)
|
|
1127
1627
|
if re_results:
|
|
1128
|
-
bitrate = int(re_results.group(1).strip())
|
|
1628
|
+
bitrate = int(re_results.group(1).strip()) * 1000
|
|
1129
1629
|
break
|
|
1130
1630
|
except Exception:
|
|
1131
|
-
bitrate =
|
|
1631
|
+
bitrate = None
|
|
1132
1632
|
|
|
1133
1633
|
# fps
|
|
1134
1634
|
fps = 0
|
|
@@ -1149,7 +1649,7 @@ def accurate_media_analysis(ffmpeg_bin: str, file_name: str) -> dict:
|
|
|
1149
1649
|
if b"Stream #" in row and b"Video:" in row:
|
|
1150
1650
|
hasVideo = True
|
|
1151
1651
|
# get resolution \d{3,5}x\d{3,5}
|
|
1152
|
-
re_results = re.search(
|
|
1652
|
+
re_results = re.search(r"\d{3,5}x\d{3,5}", row, re.IGNORECASE)
|
|
1153
1653
|
if re_results:
|
|
1154
1654
|
resolution = re_results.group(0).decode("utf-8")
|
|
1155
1655
|
break
|
|
@@ -1174,11 +1674,13 @@ def accurate_media_analysis(ffmpeg_bin: str, file_name: str) -> dict:
|
|
|
1174
1674
|
"frames_number": int(fps * duration),
|
|
1175
1675
|
"duration_ms": duration * 1000,
|
|
1176
1676
|
"duration": duration,
|
|
1677
|
+
"audio_duration": cfg.NA,
|
|
1177
1678
|
"fps": fps,
|
|
1178
1679
|
"has_video": hasVideo,
|
|
1179
1680
|
"has_audio": hasAudio,
|
|
1180
1681
|
"bitrate": bitrate,
|
|
1181
1682
|
"resolution": resolution,
|
|
1683
|
+
"format_long_name": "",
|
|
1182
1684
|
}
|
|
1183
1685
|
|
|
1184
1686
|
|
|
@@ -1199,7 +1701,7 @@ def behavior_color(colors_list: list, idx: int, default_color: str = "darkgray")
|
|
|
1199
1701
|
"""
|
|
1200
1702
|
|
|
1201
1703
|
try:
|
|
1202
|
-
return colors_list[idx % len(colors_list)]
|
|
1704
|
+
return colors_list[idx % len(colors_list)].replace("tab:", "")
|
|
1203
1705
|
except Exception:
|
|
1204
1706
|
return default_color
|
|
1205
1707
|
|
|
@@ -1219,12 +1721,37 @@ def all_behaviors(ethogram: dict) -> list:
|
|
|
1219
1721
|
return [ethogram[x][cfg.BEHAVIOR_CODE] for x in sorted_keys(ethogram)]
|
|
1220
1722
|
|
|
1221
1723
|
|
|
1724
|
+
def all_subjects(subjects: dict) -> list:
|
|
1725
|
+
"""
|
|
1726
|
+
extract all subjects from the subject configuration dictionary
|
|
1727
|
+
|
|
1728
|
+
Args:
|
|
1729
|
+
subject configuration (dict)
|
|
1730
|
+
|
|
1731
|
+
Returns:
|
|
1732
|
+
list: subjects name
|
|
1733
|
+
"""
|
|
1734
|
+
|
|
1735
|
+
return [subjects[x][cfg.SUBJECT_NAME] for x in sorted_keys(subjects)]
|
|
1736
|
+
|
|
1737
|
+
|
|
1738
|
+
def has_coding_map(ethogram: dict, behavior_idx: str) -> bool:
|
|
1739
|
+
"""
|
|
1740
|
+
check if behavior index has a coding map
|
|
1741
|
+
"""
|
|
1742
|
+
if not ethogram.get(behavior_idx, False):
|
|
1743
|
+
return False
|
|
1744
|
+
if not ethogram[behavior_idx].get("coding map", False):
|
|
1745
|
+
return False
|
|
1746
|
+
return True
|
|
1747
|
+
|
|
1748
|
+
|
|
1222
1749
|
def dir_images_number(dir_path_str: str) -> dict:
|
|
1223
1750
|
"""
|
|
1224
|
-
return number of images in dir_path (
|
|
1751
|
+
return number of images in dir_path (see cfg.IMAGE_EXTENSIONS)
|
|
1225
1752
|
"""
|
|
1226
1753
|
|
|
1227
|
-
dir_path =
|
|
1754
|
+
dir_path = Path(dir_path_str)
|
|
1228
1755
|
if not dir_path.is_dir():
|
|
1229
1756
|
return {"error": f"The directory {dir_path_str} does not exists"}
|
|
1230
1757
|
img_count = 0
|
|
@@ -1262,29 +1789,11 @@ def intersection(A, B, C, D):
|
|
|
1262
1789
|
ym = slope * xm + intersept
|
|
1263
1790
|
else:
|
|
1264
1791
|
xm = (
|
|
1265
|
-
(
|
|
1266
|
-
xd * xa * yc
|
|
1267
|
-
- xd * xb * yc
|
|
1268
|
-
- xd * xa * yb
|
|
1269
|
-
- xc * xa * yd
|
|
1270
|
-
+ xc * xa * yb
|
|
1271
|
-
+ xd * ya * xb
|
|
1272
|
-
+ xc * xb * yd
|
|
1273
|
-
- xc * ya * xb
|
|
1274
|
-
)
|
|
1792
|
+
(xd * xa * yc - xd * xb * yc - xd * xa * yb - xc * xa * yd + xc * xa * yb + xd * ya * xb + xc * xb * yd - xc * ya * xb)
|
|
1275
1793
|
/ (-yb * xd + yb * xc + ya * xd - ya * xc + xb * yd - xb * yc - xa * yd + xa * yc)
|
|
1276
1794
|
).quantize(dec(".001"), rounding=ROUND_DOWN)
|
|
1277
1795
|
ym = (
|
|
1278
|
-
(
|
|
1279
|
-
yb * xc * yd
|
|
1280
|
-
- yb * yc * xd
|
|
1281
|
-
- ya * xc * yd
|
|
1282
|
-
+ ya * yc * xd
|
|
1283
|
-
- xa * yb * yd
|
|
1284
|
-
+ xa * yb * yc
|
|
1285
|
-
+ ya * xb * yd
|
|
1286
|
-
- ya * xb * yc
|
|
1287
|
-
)
|
|
1796
|
+
(yb * xc * yd - yb * yc * xd - ya * xc * yd + ya * yc * xd - xa * yb * yd + xa * yb * yc + ya * xb * yd - ya * xb * yc)
|
|
1288
1797
|
/ (-yb * xd + yb * xc + ya * xd - ya * xc + xb * yd - xb * yc - xa * yd + xa * yc)
|
|
1289
1798
|
).quantize(dec(".001"), rounding=ROUND_DOWN)
|
|
1290
1799
|
|
|
@@ -1293,16 +1802,7 @@ def intersection(A, B, C, D):
|
|
|
1293
1802
|
ymin1, ymax1 = min(ya, yb), max(ya, yb)
|
|
1294
1803
|
ymin2, ymax2 = min(yc, yd), max(yc, yd)
|
|
1295
1804
|
|
|
1296
|
-
return
|
|
1297
|
-
xm >= xmin1
|
|
1298
|
-
and xm <= xmax1
|
|
1299
|
-
and xm >= xmin2
|
|
1300
|
-
and xm <= xmax2
|
|
1301
|
-
and ym >= ymin1
|
|
1302
|
-
and ym <= ymax1
|
|
1303
|
-
and ym >= ymin2
|
|
1304
|
-
and ym <= ymax2
|
|
1305
|
-
)
|
|
1805
|
+
return xm >= xmin1 and xm <= xmax1 and xm >= xmin2 and xm <= xmax2 and ym >= ymin1 and ym <= ymax1 and ym >= ymin2 and ym <= ymax2
|
|
1306
1806
|
|
|
1307
1807
|
except Exception: # for cases xa=xb=xc=xd
|
|
1308
1808
|
return True
|