boris-behav-obs 9.6__tar.gz → 9.6.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {boris_behav_obs-9.6/boris_behav_obs.egg-info → boris_behav_obs-9.6.2}/PKG-INFO +2 -1
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/add_modifier.py +1 -5
- boris_behav_obs-9.6.2/boris/analysis_plugins/irr_cohen_kappa.py +72 -0
- boris_behav_obs-9.6.2/boris/analysis_plugins/irr_cohen_kappa_with_modifiers.py +77 -0
- boris_behav_obs-9.6.2/boris/analysis_plugins/irr_weighted_cohen_kappa.py +120 -0
- boris_behav_obs-9.6.2/boris/analysis_plugins/irr_weighted_cohen_kappa_with_modifiers.py +125 -0
- boris_behav_obs-9.6.2/boris/analysis_plugins/number_of_occurences_by_independent_variable.py +54 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/boris_cli.py +1 -1
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/core.py +2 -4
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/core_qrc.py +3 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/exclusion_matrix.py +1 -1
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/irr.py +10 -22
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/menu_options.py +2 -0
- boris_behav_obs-9.6.2/boris/portion/__init__.py +31 -0
- boris_behav_obs-9.6.2/boris/portion/const.py +95 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/portion/dict.py +5 -5
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/portion/func.py +2 -2
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/portion/interval.py +21 -41
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/portion/io.py +41 -32
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/project_functions.py +2 -1
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/state_events.py +1 -1
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/transitions.py +1 -1
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/version.py +2 -2
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/write_event.py +4 -4
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2/boris_behav_obs.egg-info}/PKG-INFO +2 -1
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris_behav_obs.egg-info/SOURCES.txt +4 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris_behav_obs.egg-info/requires.txt +1 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/pyproject.toml +3 -2
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/tests/test_db_functions.py +1 -6
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/tests/test_export_observation.py +3 -21
- boris_behav_obs-9.6.2/tests/test_irr.py +300 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/tests/test_observation_gui.py +29 -27
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/tests/test_otx_parser.py +6 -7
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/tests/test_preferences_gui.py +8 -12
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/tests/test_project_functions.py +37 -54
- boris_behav_obs-9.6/boris/analysis_plugins/number_of_occurences_by_independent_variable.py +0 -34
- boris_behav_obs-9.6/boris/portion/__init__.py +0 -21
- boris_behav_obs-9.6/boris/portion/const.py +0 -78
- boris_behav_obs-9.6/tests/test_irr.py +0 -342
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/LICENSE.TXT +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/MANIFEST.in +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/README.TXT +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/README.md +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/__init__.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/__main__.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/about.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/add_modifier_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/advanced_event_filtering.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/analysis_plugins/__init__.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/analysis_plugins/_latency.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/analysis_plugins/list_of_dataframe_columns.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/analysis_plugins/number_of_occurences.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/analysis_plugins/time_budget.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/behav_coding_map_creator.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/behavior_binary_table.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/behaviors_coding_map.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/cmd_arguments.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/coding_pad.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/config.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/config_file.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/connections.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/converters.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/converters_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/cooccurence.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/core_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/db_functions.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/dev.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/dialog.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/duration_widget.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/edit_event.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/edit_event_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/event_operations.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/events_cursor.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/events_snapshots.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/export_events.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/export_observation.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/external_processes.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/geometric_measurement.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/gui_utilities.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/image_overlay.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/import_observations.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/latency.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/measurement_widget.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/media_file.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/modifier_coding_map_creator.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/modifiers_coding_map.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/mpv-1.0.3.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/mpv.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/mpv2.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/observation.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/observation_operations.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/observation_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/observations_list.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/otx_parser.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/param_panel.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/param_panel_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/player_dock_widget.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/plot_data_module.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/plot_events.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/plot_events_rt.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/plot_spectrogram_rt.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/plot_waveform_rt.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/plugins.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/preferences.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/preferences_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/project.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/project_import_export.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/project_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/qrc_boris.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/qrc_boris5.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/select_modifiers.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/select_observations.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/select_subj_behav.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/subjects_pad.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/synthetic_time_budget.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/time_budget_functions.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/time_budget_widget.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/utilities.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/video_equalizer.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/video_equalizer_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/video_operations.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/view_df.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris/view_df_ui.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris_behav_obs.egg-info/dependency_links.txt +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris_behav_obs.egg-info/entry_points.txt +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/boris_behav_obs.egg-info/top_level.txt +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/setup.cfg +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/tests/test_time_budget.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/tests/test_utilities.py +0 -0
- {boris_behav_obs-9.6 → boris_behav_obs-9.6.2}/tests/test_utilities2.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: boris-behav-obs
|
|
3
|
-
Version: 9.6
|
|
3
|
+
Version: 9.6.2
|
|
4
4
|
Summary: BORIS - Behavioral Observation Research Interactive Software
|
|
5
5
|
Author-email: Olivier Friard <olivier.friard@unito.it>
|
|
6
6
|
License-Expression: GPL-3.0-only
|
|
@@ -27,6 +27,7 @@ Requires-Dist: pyreadr
|
|
|
27
27
|
Requires-Dist: pyside6==6.9
|
|
28
28
|
Requires-Dist: hachoir>=3.3.0
|
|
29
29
|
Requires-Dist: scipy>=1.15.3
|
|
30
|
+
Requires-Dist: scikit-learn>=1.7.1
|
|
30
31
|
Provides-Extra: dev
|
|
31
32
|
Requires-Dist: ruff; extra == "dev"
|
|
32
33
|
Requires-Dist: pytest; extra == "dev"
|
|
@@ -123,11 +123,7 @@ class addModifierDialog(QDialog, Ui_Dialog):
|
|
|
123
123
|
if (
|
|
124
124
|
dialog.MessageDialog(
|
|
125
125
|
cfg.programName,
|
|
126
|
-
(
|
|
127
|
-
"You are working on a behavior.<br>"
|
|
128
|
-
"If you close the window it will be lost.<br>"
|
|
129
|
-
"Do you want to change modifiers set"
|
|
130
|
-
),
|
|
126
|
+
("You are working on a behavior.<br>If you close the window it will be lost.<br>Do you want to change modifiers set"),
|
|
131
127
|
[cfg.CLOSE, cfg.CANCEL],
|
|
132
128
|
)
|
|
133
129
|
== cfg.CANCEL
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""
|
|
2
|
+
BORIS plugin
|
|
3
|
+
|
|
4
|
+
Inter Rater Reliability (IRR) Unweighted Cohen Kappa
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from sklearn.metrics import cohen_kappa_score
|
|
9
|
+
|
|
10
|
+
__version__ = "0.0.1"
|
|
11
|
+
__version_date__ = "2025-08-25"
|
|
12
|
+
__plugin_name__ = "Inter Rater Reliability - Unweighted Cohen Kappa"
|
|
13
|
+
__author__ = "Olivier Friard - University of Torino - Italy"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run(df: pd.DataFrame):
|
|
17
|
+
"""
|
|
18
|
+
Calculate the Inter Rater Reliability - Unweighted Cohen Kappa
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
# attribute a code for each interval
|
|
22
|
+
def get_code(t_start, obs):
|
|
23
|
+
for seg in obs:
|
|
24
|
+
if t_start >= seg[0] and t_start < seg[1]:
|
|
25
|
+
return seg[2]
|
|
26
|
+
return ""
|
|
27
|
+
|
|
28
|
+
# Get unique values as a numpy array
|
|
29
|
+
unique_obs = df["Observation id"].unique()
|
|
30
|
+
|
|
31
|
+
# Convert to a list
|
|
32
|
+
unique_obs_list = unique_obs.tolist()
|
|
33
|
+
|
|
34
|
+
# Convert to tuples grouped by observation
|
|
35
|
+
grouped = {
|
|
36
|
+
obs: [
|
|
37
|
+
(row[0], row[1], row[2] + "|" + row[3]) # concatenate subject and behavior with |
|
|
38
|
+
for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"]].itertuples(index=False, name=None)
|
|
39
|
+
]
|
|
40
|
+
for obs, group in df.groupby("Observation id")
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
ck_results: dict = {}
|
|
44
|
+
for idx1, obs_id1 in enumerate(unique_obs_list):
|
|
45
|
+
obs1 = grouped[obs_id1]
|
|
46
|
+
|
|
47
|
+
ck_results[(obs_id1, obs_id1)] = "1.000"
|
|
48
|
+
|
|
49
|
+
for obs_id2 in unique_obs_list[idx1 + 1 :]:
|
|
50
|
+
obs2 = grouped[obs_id2]
|
|
51
|
+
|
|
52
|
+
# get all the break points
|
|
53
|
+
time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
|
|
54
|
+
|
|
55
|
+
# elementary intervals
|
|
56
|
+
elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
|
|
57
|
+
|
|
58
|
+
obs1_codes = [get_code(t[0], obs1) for t in elementary_intervals]
|
|
59
|
+
|
|
60
|
+
obs2_codes = [get_code(t[0], obs2) for t in elementary_intervals]
|
|
61
|
+
|
|
62
|
+
# Cohen's Kappa
|
|
63
|
+
kappa = cohen_kappa_score(obs1_codes, obs2_codes)
|
|
64
|
+
print(f"{obs_id1} - {obs_id2}: Cohen's Kappa : {kappa:.3f}")
|
|
65
|
+
|
|
66
|
+
ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
|
|
67
|
+
ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
|
|
68
|
+
|
|
69
|
+
# DataFrame conversion
|
|
70
|
+
df_results = pd.Series(ck_results).unstack()
|
|
71
|
+
|
|
72
|
+
return df_results
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""
|
|
2
|
+
BORIS plugin
|
|
3
|
+
|
|
4
|
+
Inter Rater Reliability (IRR) Unweighted Cohen Kappa with modifiers
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from sklearn.metrics import cohen_kappa_score
|
|
9
|
+
|
|
10
|
+
__version__ = "0.0.1"
|
|
11
|
+
__version_date__ = "2025-08-25"
|
|
12
|
+
__plugin_name__ = "Inter Rater Reliability - Unweighted Cohen Kappa with modifiers"
|
|
13
|
+
__author__ = "Olivier Friard - University of Torino - Italy"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run(df: pd.DataFrame):
|
|
17
|
+
"""
|
|
18
|
+
Calculate the Inter Rater Reliability - Unweighted Cohen Kappa with modifiers
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
# attribute a code for each interval
|
|
22
|
+
def get_code(t_start, obs):
|
|
23
|
+
for seg in obs:
|
|
24
|
+
if t_start >= seg[0] and t_start < seg[1]:
|
|
25
|
+
return seg[2]
|
|
26
|
+
return ""
|
|
27
|
+
|
|
28
|
+
# Get unique values as a numpy array
|
|
29
|
+
unique_obs = df["Observation id"].unique()
|
|
30
|
+
|
|
31
|
+
# Convert to a list
|
|
32
|
+
unique_obs_list = unique_obs.tolist()
|
|
33
|
+
|
|
34
|
+
# Convert to tuples grouped by observation
|
|
35
|
+
grouped: dict = {}
|
|
36
|
+
modifiers: list = []
|
|
37
|
+
for col in df.columns:
|
|
38
|
+
if isinstance(col, tuple):
|
|
39
|
+
modifiers.append(col)
|
|
40
|
+
|
|
41
|
+
for obs, group in df.groupby("Observation id"):
|
|
42
|
+
o: list = []
|
|
43
|
+
for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"] + modifiers].itertuples(index=False, name=None):
|
|
44
|
+
modif_list = [row[i] for idx, i in enumerate(range(4, 4 + len(modifiers))) if modifiers[idx][0] == row[3]]
|
|
45
|
+
o.append((row[0], row[1], row[2] + "|" + row[3] + "|" + ",".join(modif_list)))
|
|
46
|
+
grouped[obs] = o
|
|
47
|
+
|
|
48
|
+
ck_results: dict = {}
|
|
49
|
+
for idx1, obs_id1 in enumerate(unique_obs_list):
|
|
50
|
+
obs1 = grouped[obs_id1]
|
|
51
|
+
|
|
52
|
+
ck_results[(obs_id1, obs_id1)] = "1.000"
|
|
53
|
+
|
|
54
|
+
for obs_id2 in unique_obs_list[idx1 + 1 :]:
|
|
55
|
+
obs2 = grouped[obs_id2]
|
|
56
|
+
|
|
57
|
+
# get all the break points
|
|
58
|
+
time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
|
|
59
|
+
|
|
60
|
+
# elementary intervals
|
|
61
|
+
elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
|
|
62
|
+
|
|
63
|
+
obs1_codes = [get_code(t[0], obs1) for t in elementary_intervals]
|
|
64
|
+
|
|
65
|
+
obs2_codes = [get_code(t[0], obs2) for t in elementary_intervals]
|
|
66
|
+
|
|
67
|
+
# Cohen's Kappa
|
|
68
|
+
kappa = cohen_kappa_score(obs1_codes, obs2_codes)
|
|
69
|
+
print(f"{obs_id1} - {obs_id2}: Cohen's Kappa : {kappa:.3f}")
|
|
70
|
+
|
|
71
|
+
ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
|
|
72
|
+
ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
|
|
73
|
+
|
|
74
|
+
# DataFrame conversion
|
|
75
|
+
df_results = pd.Series(ck_results).unstack()
|
|
76
|
+
|
|
77
|
+
return df_results
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"""
|
|
2
|
+
BORIS plugin
|
|
3
|
+
|
|
4
|
+
Inter Rater Reliability (IRR) Weighted Cohen Kappa
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from typing import List, Tuple, Dict, Optional
|
|
9
|
+
|
|
10
|
+
__version__ = "0.0.1"
|
|
11
|
+
__version_date__ = "2025-08-25"
|
|
12
|
+
__plugin_name__ = "Inter Rater Reliability - Weighted Cohen Kappa"
|
|
13
|
+
__author__ = "Olivier Friard - University of Torino - Italy"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run(df: pd.DataFrame):
|
|
17
|
+
"""
|
|
18
|
+
Calculate the Inter Rater Reliability - Weighted Cohen Kappa
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def cohen_kappa_weighted_by_time(
|
|
22
|
+
obs1: List[Tuple[float, float, str]], obs2: List[Tuple[float, float, str]]
|
|
23
|
+
) -> Tuple[float, float, float, Dict[Tuple[Optional[str], Optional[str]], float]]:
|
|
24
|
+
"""
|
|
25
|
+
Compute Cohen's Kappa weighted by time duration.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
obs1: List of (start_time, end_time, code) for observer 1
|
|
29
|
+
obs2: List of (start_time, end_time, code) for observer 2
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
kappa (float): Cohen's Kappa weighted by duration
|
|
33
|
+
po (float): Observed agreement proportion (weighted)
|
|
34
|
+
pe (float): Expected agreement proportion by chance (weighted)
|
|
35
|
+
contingency (dict): Contingency table {(code1, code2): total_duration}
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
# 1. Collect all time boundaries from both observers
|
|
39
|
+
time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
|
|
40
|
+
|
|
41
|
+
# 2. Build elementary intervals (non-overlapping time bins)
|
|
42
|
+
elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
|
|
43
|
+
|
|
44
|
+
# 3. Helper: get the active code for an observer at a given time
|
|
45
|
+
def get_code(t: float, obs: List[Tuple[float, float, str]]) -> Optional[str]:
|
|
46
|
+
for seg in obs:
|
|
47
|
+
if seg[0] <= t < seg[1]:
|
|
48
|
+
return seg[2]
|
|
49
|
+
return None # in case no segment covers this time
|
|
50
|
+
|
|
51
|
+
# 4. Build weighted contingency table (durations instead of counts)
|
|
52
|
+
contingency: Dict[Tuple[Optional[str], Optional[str]], float] = {}
|
|
53
|
+
total_time = 0.0
|
|
54
|
+
|
|
55
|
+
for start, end in elementary_intervals:
|
|
56
|
+
c1 = get_code(start, obs1)
|
|
57
|
+
c2 = get_code(start, obs2)
|
|
58
|
+
duration = end - start
|
|
59
|
+
total_time += duration
|
|
60
|
+
contingency[(c1, c2)] = contingency.get((c1, c2), 0.0) + duration
|
|
61
|
+
|
|
62
|
+
# 5. Observed agreement (po)
|
|
63
|
+
po = sum(duration for (c1, c2), duration in contingency.items() if c1 == c2) / total_time
|
|
64
|
+
|
|
65
|
+
# Marginal distributions for each observer
|
|
66
|
+
codes1: Dict[Optional[str], float] = {}
|
|
67
|
+
codes2: Dict[Optional[str], float] = {}
|
|
68
|
+
for (c1, c2), duration in contingency.items():
|
|
69
|
+
codes1[c1] = codes1.get(c1, 0.0) + duration
|
|
70
|
+
codes2[c2] = codes2.get(c2, 0.0) + duration
|
|
71
|
+
|
|
72
|
+
# 6. Expected agreement (pe), using marginal proportions
|
|
73
|
+
all_codes = set(codes1) | set(codes2)
|
|
74
|
+
pe = sum((codes1.get(c, 0.0) / total_time) * (codes2.get(c, 0.0) / total_time) for c in all_codes)
|
|
75
|
+
|
|
76
|
+
# 7. Kappa calculation
|
|
77
|
+
kappa = (po - pe) / (1 - pe) if (1 - pe) != 0 else 0.0
|
|
78
|
+
|
|
79
|
+
return kappa, po, pe, contingency
|
|
80
|
+
|
|
81
|
+
# Get unique values as a numpy array
|
|
82
|
+
unique_obs = df["Observation id"].unique()
|
|
83
|
+
|
|
84
|
+
# Convert to a list
|
|
85
|
+
unique_obs_list = unique_obs.tolist()
|
|
86
|
+
|
|
87
|
+
# Convert to tuples grouped by observation
|
|
88
|
+
grouped = {
|
|
89
|
+
obs: [
|
|
90
|
+
(row[0], row[1], row[2] + "|" + row[3]) # concatenate subject and behavior with |
|
|
91
|
+
for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"]].itertuples(index=False, name=None)
|
|
92
|
+
]
|
|
93
|
+
for obs, group in df.groupby("Observation id")
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
ck_results: dict = {}
|
|
97
|
+
str_results: str = ""
|
|
98
|
+
for idx1, obs_id1 in enumerate(unique_obs_list):
|
|
99
|
+
obs1 = grouped[obs_id1]
|
|
100
|
+
|
|
101
|
+
ck_results[(obs_id1, obs_id1)] = "1.000"
|
|
102
|
+
|
|
103
|
+
for obs_id2 in unique_obs_list[idx1 + 1 :]:
|
|
104
|
+
obs2 = grouped[obs_id2]
|
|
105
|
+
|
|
106
|
+
# Cohen's Kappa
|
|
107
|
+
kappa, po, pe, table = cohen_kappa_weighted_by_time(obs1, obs2)
|
|
108
|
+
|
|
109
|
+
print(f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}")
|
|
110
|
+
str_results += (
|
|
111
|
+
f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}\n"
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
|
|
115
|
+
ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
|
|
116
|
+
|
|
117
|
+
# DataFrame conversion
|
|
118
|
+
df_results = pd.Series(ck_results).unstack()
|
|
119
|
+
|
|
120
|
+
return df_results, str_results
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
"""
|
|
2
|
+
BORIS plugin
|
|
3
|
+
|
|
4
|
+
Inter Rater Reliability (IRR) Weighted Cohen Kappa with modifiers
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from typing import List, Tuple, Dict, Optional
|
|
9
|
+
|
|
10
|
+
__version__ = "0.0.1"
|
|
11
|
+
__version_date__ = "2025-08-25"
|
|
12
|
+
__plugin_name__ = "Inter Rater Reliability - Weighted Cohen Kappa with modifiers"
|
|
13
|
+
__author__ = "Olivier Friard - University of Torino - Italy"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run(df: pd.DataFrame):
|
|
17
|
+
"""
|
|
18
|
+
Calculate the Inter Rater Reliability - Weighted Cohen Kappa with modifiers
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def cohen_kappa_weighted_by_time(
|
|
22
|
+
obs1: List[Tuple[float, float, str]], obs2: List[Tuple[float, float, str]]
|
|
23
|
+
) -> Tuple[float, float, float, Dict[Tuple[Optional[str], Optional[str]], float]]:
|
|
24
|
+
"""
|
|
25
|
+
Compute Cohen's Kappa weighted by time duration with modifiers.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
obs1: List of (start_time, end_time, code) for observer 1
|
|
29
|
+
obs2: List of (start_time, end_time, code) for observer 2
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
kappa (float): Cohen's Kappa weighted by duration
|
|
33
|
+
po (float): Observed agreement proportion (weighted)
|
|
34
|
+
pe (float): Expected agreement proportion by chance (weighted)
|
|
35
|
+
contingency (dict): Contingency table {(code1, code2): total_duration}
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
# 1. Collect all time boundaries from both observers
|
|
39
|
+
time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
|
|
40
|
+
|
|
41
|
+
# 2. Build elementary intervals (non-overlapping time bins)
|
|
42
|
+
elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
|
|
43
|
+
|
|
44
|
+
# 3. Helper: get the active code for an observer at a given time
|
|
45
|
+
def get_code(t: float, obs: List[Tuple[float, float, str]]) -> Optional[str]:
|
|
46
|
+
for seg in obs:
|
|
47
|
+
if seg[0] <= t < seg[1]:
|
|
48
|
+
return seg[2]
|
|
49
|
+
return None # in case no segment covers this time
|
|
50
|
+
|
|
51
|
+
# 4. Build weighted contingency table (durations instead of counts)
|
|
52
|
+
contingency: Dict[Tuple[Optional[str], Optional[str]], float] = {}
|
|
53
|
+
total_time = 0.0
|
|
54
|
+
|
|
55
|
+
for start, end in elementary_intervals:
|
|
56
|
+
c1 = get_code(start, obs1)
|
|
57
|
+
c2 = get_code(start, obs2)
|
|
58
|
+
duration = end - start
|
|
59
|
+
total_time += duration
|
|
60
|
+
contingency[(c1, c2)] = contingency.get((c1, c2), 0.0) + duration
|
|
61
|
+
|
|
62
|
+
# 5. Observed agreement (po)
|
|
63
|
+
po = sum(duration for (c1, c2), duration in contingency.items() if c1 == c2) / total_time
|
|
64
|
+
|
|
65
|
+
# Marginal distributions for each observer
|
|
66
|
+
codes1: Dict[Optional[str], float] = {}
|
|
67
|
+
codes2: Dict[Optional[str], float] = {}
|
|
68
|
+
for (c1, c2), duration in contingency.items():
|
|
69
|
+
codes1[c1] = codes1.get(c1, 0.0) + duration
|
|
70
|
+
codes2[c2] = codes2.get(c2, 0.0) + duration
|
|
71
|
+
|
|
72
|
+
# 6. Expected agreement (pe), using marginal proportions
|
|
73
|
+
all_codes = set(codes1) | set(codes2)
|
|
74
|
+
pe = sum((codes1.get(c, 0.0) / total_time) * (codes2.get(c, 0.0) / total_time) for c in all_codes)
|
|
75
|
+
|
|
76
|
+
# 7. Kappa calculation
|
|
77
|
+
kappa = (po - pe) / (1 - pe) if (1 - pe) != 0 else 0.0
|
|
78
|
+
|
|
79
|
+
return kappa, po, pe, contingency
|
|
80
|
+
|
|
81
|
+
# Get unique values as a numpy array
|
|
82
|
+
unique_obs = df["Observation id"].unique()
|
|
83
|
+
|
|
84
|
+
# Convert to a list
|
|
85
|
+
unique_obs_list = unique_obs.tolist()
|
|
86
|
+
|
|
87
|
+
# Convert to tuples grouped by observation
|
|
88
|
+
grouped: dict = {}
|
|
89
|
+
modifiers: list = []
|
|
90
|
+
for col in df.columns:
|
|
91
|
+
if isinstance(col, tuple):
|
|
92
|
+
modifiers.append(col)
|
|
93
|
+
|
|
94
|
+
for obs, group in df.groupby("Observation id"):
|
|
95
|
+
o = []
|
|
96
|
+
for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"] + modifiers].itertuples(index=False, name=None):
|
|
97
|
+
modif_list = [row[i] for idx, i in enumerate(range(4, 4 + len(modifiers))) if modifiers[idx][0] == row[3]]
|
|
98
|
+
o.append((row[0], row[1], row[2] + "|" + row[3] + "|" + ",".join(modif_list)))
|
|
99
|
+
grouped[obs] = o
|
|
100
|
+
|
|
101
|
+
ck_results: dict = {}
|
|
102
|
+
str_results: str = ""
|
|
103
|
+
for idx1, obs_id1 in enumerate(unique_obs_list):
|
|
104
|
+
obs1 = grouped[obs_id1]
|
|
105
|
+
|
|
106
|
+
ck_results[(obs_id1, obs_id1)] = "1.000"
|
|
107
|
+
|
|
108
|
+
for obs_id2 in unique_obs_list[idx1 + 1 :]:
|
|
109
|
+
obs2 = grouped[obs_id2]
|
|
110
|
+
|
|
111
|
+
# Cohen's Kappa
|
|
112
|
+
kappa, po, pe, table = cohen_kappa_weighted_by_time(obs1, obs2)
|
|
113
|
+
|
|
114
|
+
print(f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}")
|
|
115
|
+
str_results += (
|
|
116
|
+
f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}\n"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
|
|
120
|
+
ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
|
|
121
|
+
|
|
122
|
+
# DataFrame conversion
|
|
123
|
+
df_results = pd.Series(ck_results).unstack()
|
|
124
|
+
|
|
125
|
+
return df_results, str_results
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""
|
|
2
|
+
BORIS plugin
|
|
3
|
+
|
|
4
|
+
number of occurences of behaviors by independent_variable
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
|
|
9
|
+
__version__ = "0.4.0"
|
|
10
|
+
__version_date__ = "2025-07-17"
|
|
11
|
+
__plugin_name__ = "Number of occurences of behaviors by subject by independent_variable"
|
|
12
|
+
__author__ = "Olivier Friard - University of Torino - Italy"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def run(df: pd.DataFrame):
|
|
16
|
+
"""
|
|
17
|
+
Calculate the number of occurrences of behaviors by subject and by independent_variable.
|
|
18
|
+
|
|
19
|
+
This plugin returns a Pandas dataframe
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
df_results_list: list = []
|
|
23
|
+
|
|
24
|
+
flag_variable_found = False
|
|
25
|
+
|
|
26
|
+
for column in df.columns:
|
|
27
|
+
if isinstance(column, tuple) or (isinstance(column, str) and not column.startswith("independent variable '")):
|
|
28
|
+
continue
|
|
29
|
+
|
|
30
|
+
flag_variable_found = True
|
|
31
|
+
grouped_df: df.DataFrame = (
|
|
32
|
+
df.groupby(
|
|
33
|
+
[
|
|
34
|
+
column,
|
|
35
|
+
"Subject",
|
|
36
|
+
"Behavior",
|
|
37
|
+
]
|
|
38
|
+
)["Behavior"]
|
|
39
|
+
.count()
|
|
40
|
+
.reset_index(name="number of occurences")
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
grouped_df.rename(columns={column: "Value"}, inplace=True)
|
|
44
|
+
|
|
45
|
+
grouped_df.insert(0, "independent variable name", column)
|
|
46
|
+
|
|
47
|
+
df_results_list.append(grouped_df)
|
|
48
|
+
|
|
49
|
+
df_results = pd.concat(df_results_list, ignore_index=True) if df_results_list else pd.DataFrame([])
|
|
50
|
+
|
|
51
|
+
if not flag_variable_found:
|
|
52
|
+
return "No independent variable found"
|
|
53
|
+
else:
|
|
54
|
+
return df_results
|
|
@@ -252,7 +252,7 @@ if args.command:
|
|
|
252
252
|
|
|
253
253
|
K, out = irr.cohen_kappa(cursor, observations_id_list[0], observations_id_list[1], interval, subjects, include_modifiers)
|
|
254
254
|
|
|
255
|
-
print(("Cohen's Kappa - Index of Inter-Rater Reliability\n\
|
|
255
|
+
print(("Cohen's Kappa - Index of Inter-Rater Reliability\n\nInterval time: {interval:.3f} s\n").format(interval=interval))
|
|
256
256
|
|
|
257
257
|
print(out)
|
|
258
258
|
sys.exit()
|
|
@@ -4263,7 +4263,7 @@ class MainWindow(QMainWindow, Ui_MainWindow):
|
|
|
4263
4263
|
|
|
4264
4264
|
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].extend(events_to_add)
|
|
4265
4265
|
self.project_changed()
|
|
4266
|
-
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].sort()
|
|
4266
|
+
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].sort(key=lambda x: x[:3])
|
|
4267
4267
|
|
|
4268
4268
|
self.load_tw_events(self.observationId)
|
|
4269
4269
|
|
|
@@ -4361,14 +4361,12 @@ class MainWindow(QMainWindow, Ui_MainWindow):
|
|
|
4361
4361
|
|
|
4362
4362
|
events_to_add = project_functions.fix_unpaired_state_events2(self.pj[cfg.ETHOGRAM], events, time_to_stop)
|
|
4363
4363
|
|
|
4364
|
-
# print(f"{events_to_add=}")
|
|
4365
|
-
|
|
4366
4364
|
if events_to_add:
|
|
4367
4365
|
self.statusbar.showMessage("The playlist has finished. Some ongoing state events were stopped automatically", 0)
|
|
4368
4366
|
|
|
4369
4367
|
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].extend(events_to_add)
|
|
4370
4368
|
self.project_changed()
|
|
4371
|
-
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].sort()
|
|
4369
|
+
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].sort(key=lambda x: x[:3])
|
|
4372
4370
|
|
|
4373
4371
|
self.load_tw_events(self.observationId)
|
|
4374
4372
|
|
|
@@ -15946,10 +15946,13 @@ qt_resource_struct = b"\
|
|
|
15946
15946
|
\x00\x00\x01\x95k&\xa4B\
|
|
15947
15947
|
"
|
|
15948
15948
|
|
|
15949
|
+
|
|
15949
15950
|
def qInitResources():
|
|
15950
15951
|
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
|
|
15951
15952
|
|
|
15953
|
+
|
|
15952
15954
|
def qCleanupResources():
|
|
15953
15955
|
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
|
|
15954
15956
|
|
|
15957
|
+
|
|
15955
15958
|
qInitResources()
|
|
@@ -42,7 +42,7 @@ class ExclusionMatrix(QDialog):
|
|
|
42
42
|
|
|
43
43
|
self.label = QLabel()
|
|
44
44
|
self.label.setText(
|
|
45
|
-
("Check if behaviors are mutually exclusive.\
|
|
45
|
+
("Check if behaviors are mutually exclusive.\nThe Point events (displayed on blue background) cannot be excluded)")
|
|
46
46
|
)
|
|
47
47
|
hbox.addWidget(self.label)
|
|
48
48
|
|
|
@@ -126,7 +126,7 @@ def cohen_kappa(cursor, obsid1: str, obsid2: str, interval: dec, selected_subjec
|
|
|
126
126
|
first_event = cursor.execute(
|
|
127
127
|
(
|
|
128
128
|
"SELECT min(start) FROM aggregated_events "
|
|
129
|
-
f"WHERE observation in (?, ?) AND subject in ({','.join('?'*len(selected_subjects))}) "
|
|
129
|
+
f"WHERE observation in (?, ?) AND subject in ({','.join('?' * len(selected_subjects))}) "
|
|
130
130
|
),
|
|
131
131
|
(obsid1, obsid2) + tuple(selected_subjects),
|
|
132
132
|
).fetchone()[0]
|
|
@@ -134,21 +134,18 @@ def cohen_kappa(cursor, obsid1: str, obsid2: str, interval: dec, selected_subjec
|
|
|
134
134
|
logging.debug(f"first_event: {first_event}")
|
|
135
135
|
|
|
136
136
|
last_event = cursor.execute(
|
|
137
|
-
(
|
|
138
|
-
"SELECT max(stop) FROM aggregated_events "
|
|
139
|
-
f"WHERE observation in (?, ?) AND subject in ({','.join('?'*len(selected_subjects))}) "
|
|
140
|
-
),
|
|
137
|
+
(f"SELECT max(stop) FROM aggregated_events WHERE observation in (?, ?) AND subject in ({','.join('?' * len(selected_subjects))}) "),
|
|
141
138
|
(obsid1, obsid2) + tuple(selected_subjects),
|
|
142
139
|
).fetchone()[0]
|
|
143
140
|
|
|
144
141
|
logging.debug(f"last_event: {last_event}")
|
|
145
142
|
|
|
146
143
|
nb_events1 = cursor.execute(
|
|
147
|
-
("SELECT COUNT(*) FROM aggregated_events
|
|
144
|
+
(f"SELECT COUNT(*) FROM aggregated_events WHERE observation = ? AND subject in ({','.join('?' * len(selected_subjects))}) "),
|
|
148
145
|
(obsid1,) + tuple(selected_subjects),
|
|
149
146
|
).fetchone()[0]
|
|
150
147
|
nb_events2 = cursor.execute(
|
|
151
|
-
("SELECT COUNT(*) FROM aggregated_events
|
|
148
|
+
(f"SELECT COUNT(*) FROM aggregated_events WHERE observation = ? AND subject in ({','.join('?' * len(selected_subjects))}) "),
|
|
152
149
|
(obsid2,) + tuple(selected_subjects),
|
|
153
150
|
).fetchone()[0]
|
|
154
151
|
|
|
@@ -201,11 +198,7 @@ def cohen_kappa(cursor, obsid1: str, obsid2: str, interval: dec, selected_subjec
|
|
|
201
198
|
logging.debug(f"contingency_table:\n {contingency_table}")
|
|
202
199
|
|
|
203
200
|
template = (
|
|
204
|
-
"Observation: {obsid1}\n"
|
|
205
|
-
"number of events: {nb_events1}\n\n"
|
|
206
|
-
"Observation: {obsid2}\n"
|
|
207
|
-
"number of events: {nb_events2:.0f}\n\n"
|
|
208
|
-
"K = {K:.3f}"
|
|
201
|
+
"Observation: {obsid1}\nnumber of events: {nb_events1}\n\nObservation: {obsid2}\nnumber of events: {nb_events2:.0f}\n\nK = {K:.3f}"
|
|
209
202
|
)
|
|
210
203
|
|
|
211
204
|
# out += "Observation length: <b>{:.3f} s</b><br>".format(self.observationTotalMediaLength(obsid1))
|
|
@@ -470,7 +463,7 @@ def needleman_wunsch_identity(cursor, obsid1: str, obsid2: str, interval, select
|
|
|
470
463
|
first_event = cursor.execute(
|
|
471
464
|
(
|
|
472
465
|
"SELECT min(start) FROM aggregated_events "
|
|
473
|
-
f"WHERE observation in (?, ?) AND subject in ({','.join('?'*len(selected_subjects))}) "
|
|
466
|
+
f"WHERE observation in (?, ?) AND subject in ({','.join('?' * len(selected_subjects))}) "
|
|
474
467
|
),
|
|
475
468
|
(obsid1, obsid2) + tuple(selected_subjects),
|
|
476
469
|
).fetchone()[0]
|
|
@@ -483,22 +476,19 @@ def needleman_wunsch_identity(cursor, obsid1: str, obsid2: str, interval, select
|
|
|
483
476
|
logging.debug(f"first_event: {first_event}")
|
|
484
477
|
|
|
485
478
|
last_event = cursor.execute(
|
|
486
|
-
(
|
|
487
|
-
"SELECT max(stop) FROM aggregated_events "
|
|
488
|
-
f"WHERE observation in (?, ?) AND subject in ({','.join('?'*len(selected_subjects))}) "
|
|
489
|
-
),
|
|
479
|
+
(f"SELECT max(stop) FROM aggregated_events WHERE observation in (?, ?) AND subject in ({','.join('?' * len(selected_subjects))}) "),
|
|
490
480
|
(obsid1, obsid2) + tuple(selected_subjects),
|
|
491
481
|
).fetchone()[0]
|
|
492
482
|
|
|
493
483
|
logging.debug(f"last_event: {last_event}")
|
|
494
484
|
|
|
495
485
|
nb_events1 = cursor.execute(
|
|
496
|
-
("SELECT COUNT(*) FROM aggregated_events
|
|
486
|
+
(f"SELECT COUNT(*) FROM aggregated_events WHERE observation = ? AND subject in ({','.join('?' * len(selected_subjects))}) "),
|
|
497
487
|
(obsid1,) + tuple(selected_subjects),
|
|
498
488
|
).fetchone()[0]
|
|
499
489
|
|
|
500
490
|
nb_events2 = cursor.execute(
|
|
501
|
-
("SELECT COUNT(*) FROM aggregated_events
|
|
491
|
+
(f"SELECT COUNT(*) FROM aggregated_events WHERE observation = ? AND subject in ({','.join('?' * len(selected_subjects))}) "),
|
|
502
492
|
(obsid2,) + tuple(selected_subjects),
|
|
503
493
|
).fetchone()[0]
|
|
504
494
|
|
|
@@ -606,9 +596,7 @@ def needleman_wunch(self):
|
|
|
606
596
|
|
|
607
597
|
cursor = db_connector.cursor()
|
|
608
598
|
out = (
|
|
609
|
-
"Needleman-Wunsch similarity\n\n"
|
|
610
|
-
f"Time unit: {interval:.3f} s\n"
|
|
611
|
-
f"Selected subjects: {', '.join(parameters[cfg.SELECTED_SUBJECTS])}\n\n"
|
|
599
|
+
f"Needleman-Wunsch similarity\n\nTime unit: {interval:.3f} s\nSelected subjects: {', '.join(parameters[cfg.SELECTED_SUBJECTS])}\n\n"
|
|
612
600
|
)
|
|
613
601
|
mem_done = []
|
|
614
602
|
nws_results = np.ones((len(selected_observations), len(selected_observations)))
|
|
@@ -143,6 +143,8 @@ def update_menu(self):
|
|
|
143
143
|
self.actionJumpForward,
|
|
144
144
|
self.actionJumpBackward,
|
|
145
145
|
self.actionJumpTo,
|
|
146
|
+
self.action_change_time_offset_of_players,
|
|
147
|
+
self.action_deinterlace,
|
|
146
148
|
self.actionZoom_level,
|
|
147
149
|
self.actionRotate_current_video,
|
|
148
150
|
self.actionDisplay_subtitles,
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from .const import Bound, inf
|
|
2
|
+
from .interval import Interval, open, closed, openclosed, closedopen, empty, singleton
|
|
3
|
+
from .func import iterate
|
|
4
|
+
from .io import from_string, to_string, from_data, to_data
|
|
5
|
+
|
|
6
|
+
# disabled because BORIS does not need IntervalDict
|
|
7
|
+
# so the sortedcontainers module is not required
|
|
8
|
+
# from .dict import IntervalDict
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"inf",
|
|
13
|
+
"CLOSED",
|
|
14
|
+
"OPEN",
|
|
15
|
+
"Interval",
|
|
16
|
+
"open",
|
|
17
|
+
"closed",
|
|
18
|
+
"openclosed",
|
|
19
|
+
"closedopen",
|
|
20
|
+
"singleton",
|
|
21
|
+
"empty",
|
|
22
|
+
"iterate",
|
|
23
|
+
"from_string",
|
|
24
|
+
"to_string",
|
|
25
|
+
"from_data",
|
|
26
|
+
"to_data",
|
|
27
|
+
"IntervalDict",
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
CLOSED = Bound.CLOSED
|
|
31
|
+
OPEN = Bound.OPEN
|