boris-behav-obs 9.6.1__py2.py3-none-any.whl → 9.6.3__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- boris/add_modifier.py +1 -5
- boris/analysis_plugins/irr_cohen_kappa.py +72 -0
- boris/analysis_plugins/irr_cohen_kappa_with_modifiers.py +77 -0
- boris/analysis_plugins/irr_weighted_cohen_kappa.py +120 -0
- boris/analysis_plugins/irr_weighted_cohen_kappa_with_modifiers.py +125 -0
- boris/analysis_plugins/time_budget.py +0 -4
- boris/behav_coding_map_creator.py +0 -1
- boris/boris_cli.py +1 -1
- boris/coding_pad.py +0 -2
- boris/core.py +11 -28
- boris/core_qrc.py +3 -0
- boris/db_functions.py +4 -4
- boris/edit_event.py +0 -9
- boris/exclusion_matrix.py +1 -1
- boris/export_events.py +63 -71
- boris/gui_utilities.py +0 -1
- boris/irr.py +10 -22
- boris/menu_options.py +2 -0
- boris/modifier_coding_map_creator.py +0 -2
- boris/observation_operations.py +2 -4
- boris/param_panel.py +0 -4
- boris/plot_spectrogram_rt.py +2 -1
- boris/plot_waveform_rt.py +2 -1
- boris/portion/__init__.py +18 -8
- boris/portion/const.py +35 -18
- boris/portion/dict.py +5 -5
- boris/portion/func.py +2 -2
- boris/portion/interval.py +21 -41
- boris/portion/io.py +41 -32
- boris/project_functions.py +2 -1
- boris/state_events.py +1 -1
- boris/time_budget_functions.py +0 -9
- boris/transitions.py +1 -1
- boris/version.py +2 -2
- boris/video_equalizer.py +0 -2
- boris/view_df.py +0 -2
- boris/write_event.py +4 -13
- {boris_behav_obs-9.6.1.dist-info → boris_behav_obs-9.6.3.dist-info}/METADATA +12 -8
- {boris_behav_obs-9.6.1.dist-info → boris_behav_obs-9.6.3.dist-info}/RECORD +43 -39
- {boris_behav_obs-9.6.1.dist-info → boris_behav_obs-9.6.3.dist-info}/WHEEL +0 -0
- {boris_behav_obs-9.6.1.dist-info → boris_behav_obs-9.6.3.dist-info}/entry_points.txt +0 -0
- {boris_behav_obs-9.6.1.dist-info → boris_behav_obs-9.6.3.dist-info}/licenses/LICENSE.TXT +0 -0
- {boris_behav_obs-9.6.1.dist-info → boris_behav_obs-9.6.3.dist-info}/top_level.txt +0 -0
boris/add_modifier.py
CHANGED
|
@@ -123,11 +123,7 @@ class addModifierDialog(QDialog, Ui_Dialog):
|
|
|
123
123
|
if (
|
|
124
124
|
dialog.MessageDialog(
|
|
125
125
|
cfg.programName,
|
|
126
|
-
(
|
|
127
|
-
"You are working on a behavior.<br>"
|
|
128
|
-
"If you close the window it will be lost.<br>"
|
|
129
|
-
"Do you want to change modifiers set"
|
|
130
|
-
),
|
|
126
|
+
("You are working on a behavior.<br>If you close the window it will be lost.<br>Do you want to change modifiers set"),
|
|
131
127
|
[cfg.CLOSE, cfg.CANCEL],
|
|
132
128
|
)
|
|
133
129
|
== cfg.CANCEL
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""
|
|
2
|
+
BORIS plugin
|
|
3
|
+
|
|
4
|
+
Inter Rater Reliability (IRR) Unweighted Cohen Kappa
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from sklearn.metrics import cohen_kappa_score
|
|
9
|
+
|
|
10
|
+
__version__ = "0.0.1"
|
|
11
|
+
__version_date__ = "2025-08-25"
|
|
12
|
+
__plugin_name__ = "Inter Rater Reliability - Unweighted Cohen Kappa"
|
|
13
|
+
__author__ = "Olivier Friard - University of Torino - Italy"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run(df: pd.DataFrame):
|
|
17
|
+
"""
|
|
18
|
+
Calculate the Inter Rater Reliability - Unweighted Cohen Kappa
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
# attribute a code for each interval
|
|
22
|
+
def get_code(t_start, obs):
|
|
23
|
+
for seg in obs:
|
|
24
|
+
if t_start >= seg[0] and t_start < seg[1]:
|
|
25
|
+
return seg[2]
|
|
26
|
+
return ""
|
|
27
|
+
|
|
28
|
+
# Get unique values as a numpy array
|
|
29
|
+
unique_obs = df["Observation id"].unique()
|
|
30
|
+
|
|
31
|
+
# Convert to a list
|
|
32
|
+
unique_obs_list = unique_obs.tolist()
|
|
33
|
+
|
|
34
|
+
# Convert to tuples grouped by observation
|
|
35
|
+
grouped = {
|
|
36
|
+
obs: [
|
|
37
|
+
(row[0], row[1], row[2] + "|" + row[3]) # concatenate subject and behavior with |
|
|
38
|
+
for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"]].itertuples(index=False, name=None)
|
|
39
|
+
]
|
|
40
|
+
for obs, group in df.groupby("Observation id")
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
ck_results: dict = {}
|
|
44
|
+
for idx1, obs_id1 in enumerate(unique_obs_list):
|
|
45
|
+
obs1 = grouped[obs_id1]
|
|
46
|
+
|
|
47
|
+
ck_results[(obs_id1, obs_id1)] = "1.000"
|
|
48
|
+
|
|
49
|
+
for obs_id2 in unique_obs_list[idx1 + 1 :]:
|
|
50
|
+
obs2 = grouped[obs_id2]
|
|
51
|
+
|
|
52
|
+
# get all the break points
|
|
53
|
+
time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
|
|
54
|
+
|
|
55
|
+
# elementary intervals
|
|
56
|
+
elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
|
|
57
|
+
|
|
58
|
+
obs1_codes = [get_code(t[0], obs1) for t in elementary_intervals]
|
|
59
|
+
|
|
60
|
+
obs2_codes = [get_code(t[0], obs2) for t in elementary_intervals]
|
|
61
|
+
|
|
62
|
+
# Cohen's Kappa
|
|
63
|
+
kappa = cohen_kappa_score(obs1_codes, obs2_codes)
|
|
64
|
+
print(f"{obs_id1} - {obs_id2}: Cohen's Kappa : {kappa:.3f}")
|
|
65
|
+
|
|
66
|
+
ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
|
|
67
|
+
ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
|
|
68
|
+
|
|
69
|
+
# DataFrame conversion
|
|
70
|
+
df_results = pd.Series(ck_results).unstack()
|
|
71
|
+
|
|
72
|
+
return df_results
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""
|
|
2
|
+
BORIS plugin
|
|
3
|
+
|
|
4
|
+
Inter Rater Reliability (IRR) Unweighted Cohen Kappa with modifiers
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from sklearn.metrics import cohen_kappa_score
|
|
9
|
+
|
|
10
|
+
__version__ = "0.0.1"
|
|
11
|
+
__version_date__ = "2025-08-25"
|
|
12
|
+
__plugin_name__ = "Inter Rater Reliability - Unweighted Cohen Kappa with modifiers"
|
|
13
|
+
__author__ = "Olivier Friard - University of Torino - Italy"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run(df: pd.DataFrame):
|
|
17
|
+
"""
|
|
18
|
+
Calculate the Inter Rater Reliability - Unweighted Cohen Kappa with modifiers
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
# attribute a code for each interval
|
|
22
|
+
def get_code(t_start, obs):
|
|
23
|
+
for seg in obs:
|
|
24
|
+
if t_start >= seg[0] and t_start < seg[1]:
|
|
25
|
+
return seg[2]
|
|
26
|
+
return ""
|
|
27
|
+
|
|
28
|
+
# Get unique values as a numpy array
|
|
29
|
+
unique_obs = df["Observation id"].unique()
|
|
30
|
+
|
|
31
|
+
# Convert to a list
|
|
32
|
+
unique_obs_list = unique_obs.tolist()
|
|
33
|
+
|
|
34
|
+
# Convert to tuples grouped by observation
|
|
35
|
+
grouped: dict = {}
|
|
36
|
+
modifiers: list = []
|
|
37
|
+
for col in df.columns:
|
|
38
|
+
if isinstance(col, tuple):
|
|
39
|
+
modifiers.append(col)
|
|
40
|
+
|
|
41
|
+
for obs, group in df.groupby("Observation id"):
|
|
42
|
+
o: list = []
|
|
43
|
+
for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"] + modifiers].itertuples(index=False, name=None):
|
|
44
|
+
modif_list = [row[i] for idx, i in enumerate(range(4, 4 + len(modifiers))) if modifiers[idx][0] == row[3]]
|
|
45
|
+
o.append((row[0], row[1], row[2] + "|" + row[3] + "|" + ",".join(modif_list)))
|
|
46
|
+
grouped[obs] = o
|
|
47
|
+
|
|
48
|
+
ck_results: dict = {}
|
|
49
|
+
for idx1, obs_id1 in enumerate(unique_obs_list):
|
|
50
|
+
obs1 = grouped[obs_id1]
|
|
51
|
+
|
|
52
|
+
ck_results[(obs_id1, obs_id1)] = "1.000"
|
|
53
|
+
|
|
54
|
+
for obs_id2 in unique_obs_list[idx1 + 1 :]:
|
|
55
|
+
obs2 = grouped[obs_id2]
|
|
56
|
+
|
|
57
|
+
# get all the break points
|
|
58
|
+
time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
|
|
59
|
+
|
|
60
|
+
# elementary intervals
|
|
61
|
+
elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
|
|
62
|
+
|
|
63
|
+
obs1_codes = [get_code(t[0], obs1) for t in elementary_intervals]
|
|
64
|
+
|
|
65
|
+
obs2_codes = [get_code(t[0], obs2) for t in elementary_intervals]
|
|
66
|
+
|
|
67
|
+
# Cohen's Kappa
|
|
68
|
+
kappa = cohen_kappa_score(obs1_codes, obs2_codes)
|
|
69
|
+
print(f"{obs_id1} - {obs_id2}: Cohen's Kappa : {kappa:.3f}")
|
|
70
|
+
|
|
71
|
+
ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
|
|
72
|
+
ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
|
|
73
|
+
|
|
74
|
+
# DataFrame conversion
|
|
75
|
+
df_results = pd.Series(ck_results).unstack()
|
|
76
|
+
|
|
77
|
+
return df_results
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"""
|
|
2
|
+
BORIS plugin
|
|
3
|
+
|
|
4
|
+
Inter Rater Reliability (IRR) Weighted Cohen Kappa
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from typing import List, Tuple, Dict, Optional
|
|
9
|
+
|
|
10
|
+
__version__ = "0.0.1"
|
|
11
|
+
__version_date__ = "2025-08-25"
|
|
12
|
+
__plugin_name__ = "Inter Rater Reliability - Weighted Cohen Kappa"
|
|
13
|
+
__author__ = "Olivier Friard - University of Torino - Italy"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run(df: pd.DataFrame):
|
|
17
|
+
"""
|
|
18
|
+
Calculate the Inter Rater Reliability - Weighted Cohen Kappa
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def cohen_kappa_weighted_by_time(
|
|
22
|
+
obs1: List[Tuple[float, float, str]], obs2: List[Tuple[float, float, str]]
|
|
23
|
+
) -> Tuple[float, float, float, Dict[Tuple[Optional[str], Optional[str]], float]]:
|
|
24
|
+
"""
|
|
25
|
+
Compute Cohen's Kappa weighted by time duration.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
obs1: List of (start_time, end_time, code) for observer 1
|
|
29
|
+
obs2: List of (start_time, end_time, code) for observer 2
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
kappa (float): Cohen's Kappa weighted by duration
|
|
33
|
+
po (float): Observed agreement proportion (weighted)
|
|
34
|
+
pe (float): Expected agreement proportion by chance (weighted)
|
|
35
|
+
contingency (dict): Contingency table {(code1, code2): total_duration}
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
# 1. Collect all time boundaries from both observers
|
|
39
|
+
time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
|
|
40
|
+
|
|
41
|
+
# 2. Build elementary intervals (non-overlapping time bins)
|
|
42
|
+
elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
|
|
43
|
+
|
|
44
|
+
# 3. Helper: get the active code for an observer at a given time
|
|
45
|
+
def get_code(t: float, obs: List[Tuple[float, float, str]]) -> Optional[str]:
|
|
46
|
+
for seg in obs:
|
|
47
|
+
if seg[0] <= t < seg[1]:
|
|
48
|
+
return seg[2]
|
|
49
|
+
return None # in case no segment covers this time
|
|
50
|
+
|
|
51
|
+
# 4. Build weighted contingency table (durations instead of counts)
|
|
52
|
+
contingency: Dict[Tuple[Optional[str], Optional[str]], float] = {}
|
|
53
|
+
total_time = 0.0
|
|
54
|
+
|
|
55
|
+
for start, end in elementary_intervals:
|
|
56
|
+
c1 = get_code(start, obs1)
|
|
57
|
+
c2 = get_code(start, obs2)
|
|
58
|
+
duration = end - start
|
|
59
|
+
total_time += duration
|
|
60
|
+
contingency[(c1, c2)] = contingency.get((c1, c2), 0.0) + duration
|
|
61
|
+
|
|
62
|
+
# 5. Observed agreement (po)
|
|
63
|
+
po = sum(duration for (c1, c2), duration in contingency.items() if c1 == c2) / total_time
|
|
64
|
+
|
|
65
|
+
# Marginal distributions for each observer
|
|
66
|
+
codes1: Dict[Optional[str], float] = {}
|
|
67
|
+
codes2: Dict[Optional[str], float] = {}
|
|
68
|
+
for (c1, c2), duration in contingency.items():
|
|
69
|
+
codes1[c1] = codes1.get(c1, 0.0) + duration
|
|
70
|
+
codes2[c2] = codes2.get(c2, 0.0) + duration
|
|
71
|
+
|
|
72
|
+
# 6. Expected agreement (pe), using marginal proportions
|
|
73
|
+
all_codes = set(codes1) | set(codes2)
|
|
74
|
+
pe = sum((codes1.get(c, 0.0) / total_time) * (codes2.get(c, 0.0) / total_time) for c in all_codes)
|
|
75
|
+
|
|
76
|
+
# 7. Kappa calculation
|
|
77
|
+
kappa = (po - pe) / (1 - pe) if (1 - pe) != 0 else 0.0
|
|
78
|
+
|
|
79
|
+
return kappa, po, pe, contingency
|
|
80
|
+
|
|
81
|
+
# Get unique values as a numpy array
|
|
82
|
+
unique_obs = df["Observation id"].unique()
|
|
83
|
+
|
|
84
|
+
# Convert to a list
|
|
85
|
+
unique_obs_list = unique_obs.tolist()
|
|
86
|
+
|
|
87
|
+
# Convert to tuples grouped by observation
|
|
88
|
+
grouped = {
|
|
89
|
+
obs: [
|
|
90
|
+
(row[0], row[1], row[2] + "|" + row[3]) # concatenate subject and behavior with |
|
|
91
|
+
for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"]].itertuples(index=False, name=None)
|
|
92
|
+
]
|
|
93
|
+
for obs, group in df.groupby("Observation id")
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
ck_results: dict = {}
|
|
97
|
+
str_results: str = ""
|
|
98
|
+
for idx1, obs_id1 in enumerate(unique_obs_list):
|
|
99
|
+
obs1 = grouped[obs_id1]
|
|
100
|
+
|
|
101
|
+
ck_results[(obs_id1, obs_id1)] = "1.000"
|
|
102
|
+
|
|
103
|
+
for obs_id2 in unique_obs_list[idx1 + 1 :]:
|
|
104
|
+
obs2 = grouped[obs_id2]
|
|
105
|
+
|
|
106
|
+
# Cohen's Kappa
|
|
107
|
+
kappa, po, pe, table = cohen_kappa_weighted_by_time(obs1, obs2)
|
|
108
|
+
|
|
109
|
+
print(f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}")
|
|
110
|
+
str_results += (
|
|
111
|
+
f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}\n"
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
|
|
115
|
+
ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
|
|
116
|
+
|
|
117
|
+
# DataFrame conversion
|
|
118
|
+
df_results = pd.Series(ck_results).unstack()
|
|
119
|
+
|
|
120
|
+
return df_results, str_results
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
"""
|
|
2
|
+
BORIS plugin
|
|
3
|
+
|
|
4
|
+
Inter Rater Reliability (IRR) Weighted Cohen Kappa with modifiers
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from typing import List, Tuple, Dict, Optional
|
|
9
|
+
|
|
10
|
+
__version__ = "0.0.1"
|
|
11
|
+
__version_date__ = "2025-08-25"
|
|
12
|
+
__plugin_name__ = "Inter Rater Reliability - Weighted Cohen Kappa with modifiers"
|
|
13
|
+
__author__ = "Olivier Friard - University of Torino - Italy"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def run(df: pd.DataFrame):
|
|
17
|
+
"""
|
|
18
|
+
Calculate the Inter Rater Reliability - Weighted Cohen Kappa with modifiers
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def cohen_kappa_weighted_by_time(
|
|
22
|
+
obs1: List[Tuple[float, float, str]], obs2: List[Tuple[float, float, str]]
|
|
23
|
+
) -> Tuple[float, float, float, Dict[Tuple[Optional[str], Optional[str]], float]]:
|
|
24
|
+
"""
|
|
25
|
+
Compute Cohen's Kappa weighted by time duration with modifiers.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
obs1: List of (start_time, end_time, code) for observer 1
|
|
29
|
+
obs2: List of (start_time, end_time, code) for observer 2
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
kappa (float): Cohen's Kappa weighted by duration
|
|
33
|
+
po (float): Observed agreement proportion (weighted)
|
|
34
|
+
pe (float): Expected agreement proportion by chance (weighted)
|
|
35
|
+
contingency (dict): Contingency table {(code1, code2): total_duration}
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
# 1. Collect all time boundaries from both observers
|
|
39
|
+
time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
|
|
40
|
+
|
|
41
|
+
# 2. Build elementary intervals (non-overlapping time bins)
|
|
42
|
+
elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
|
|
43
|
+
|
|
44
|
+
# 3. Helper: get the active code for an observer at a given time
|
|
45
|
+
def get_code(t: float, obs: List[Tuple[float, float, str]]) -> Optional[str]:
|
|
46
|
+
for seg in obs:
|
|
47
|
+
if seg[0] <= t < seg[1]:
|
|
48
|
+
return seg[2]
|
|
49
|
+
return None # in case no segment covers this time
|
|
50
|
+
|
|
51
|
+
# 4. Build weighted contingency table (durations instead of counts)
|
|
52
|
+
contingency: Dict[Tuple[Optional[str], Optional[str]], float] = {}
|
|
53
|
+
total_time = 0.0
|
|
54
|
+
|
|
55
|
+
for start, end in elementary_intervals:
|
|
56
|
+
c1 = get_code(start, obs1)
|
|
57
|
+
c2 = get_code(start, obs2)
|
|
58
|
+
duration = end - start
|
|
59
|
+
total_time += duration
|
|
60
|
+
contingency[(c1, c2)] = contingency.get((c1, c2), 0.0) + duration
|
|
61
|
+
|
|
62
|
+
# 5. Observed agreement (po)
|
|
63
|
+
po = sum(duration for (c1, c2), duration in contingency.items() if c1 == c2) / total_time
|
|
64
|
+
|
|
65
|
+
# Marginal distributions for each observer
|
|
66
|
+
codes1: Dict[Optional[str], float] = {}
|
|
67
|
+
codes2: Dict[Optional[str], float] = {}
|
|
68
|
+
for (c1, c2), duration in contingency.items():
|
|
69
|
+
codes1[c1] = codes1.get(c1, 0.0) + duration
|
|
70
|
+
codes2[c2] = codes2.get(c2, 0.0) + duration
|
|
71
|
+
|
|
72
|
+
# 6. Expected agreement (pe), using marginal proportions
|
|
73
|
+
all_codes = set(codes1) | set(codes2)
|
|
74
|
+
pe = sum((codes1.get(c, 0.0) / total_time) * (codes2.get(c, 0.0) / total_time) for c in all_codes)
|
|
75
|
+
|
|
76
|
+
# 7. Kappa calculation
|
|
77
|
+
kappa = (po - pe) / (1 - pe) if (1 - pe) != 0 else 0.0
|
|
78
|
+
|
|
79
|
+
return kappa, po, pe, contingency
|
|
80
|
+
|
|
81
|
+
# Get unique values as a numpy array
|
|
82
|
+
unique_obs = df["Observation id"].unique()
|
|
83
|
+
|
|
84
|
+
# Convert to a list
|
|
85
|
+
unique_obs_list = unique_obs.tolist()
|
|
86
|
+
|
|
87
|
+
# Convert to tuples grouped by observation
|
|
88
|
+
grouped: dict = {}
|
|
89
|
+
modifiers: list = []
|
|
90
|
+
for col in df.columns:
|
|
91
|
+
if isinstance(col, tuple):
|
|
92
|
+
modifiers.append(col)
|
|
93
|
+
|
|
94
|
+
for obs, group in df.groupby("Observation id"):
|
|
95
|
+
o = []
|
|
96
|
+
for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"] + modifiers].itertuples(index=False, name=None):
|
|
97
|
+
modif_list = [row[i] for idx, i in enumerate(range(4, 4 + len(modifiers))) if modifiers[idx][0] == row[3]]
|
|
98
|
+
o.append((row[0], row[1], row[2] + "|" + row[3] + "|" + ",".join(modif_list)))
|
|
99
|
+
grouped[obs] = o
|
|
100
|
+
|
|
101
|
+
ck_results: dict = {}
|
|
102
|
+
str_results: str = ""
|
|
103
|
+
for idx1, obs_id1 in enumerate(unique_obs_list):
|
|
104
|
+
obs1 = grouped[obs_id1]
|
|
105
|
+
|
|
106
|
+
ck_results[(obs_id1, obs_id1)] = "1.000"
|
|
107
|
+
|
|
108
|
+
for obs_id2 in unique_obs_list[idx1 + 1 :]:
|
|
109
|
+
obs2 = grouped[obs_id2]
|
|
110
|
+
|
|
111
|
+
# Cohen's Kappa
|
|
112
|
+
kappa, po, pe, table = cohen_kappa_weighted_by_time(obs1, obs2)
|
|
113
|
+
|
|
114
|
+
print(f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}")
|
|
115
|
+
str_results += (
|
|
116
|
+
f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}\n"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
|
|
120
|
+
ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
|
|
121
|
+
|
|
122
|
+
# DataFrame conversion
|
|
123
|
+
df_results = pd.Series(ck_results).unstack()
|
|
124
|
+
|
|
125
|
+
return df_results, str_results
|
boris/boris_cli.py
CHANGED
|
@@ -252,7 +252,7 @@ if args.command:
|
|
|
252
252
|
|
|
253
253
|
K, out = irr.cohen_kappa(cursor, observations_id_list[0], observations_id_list[1], interval, subjects, include_modifiers)
|
|
254
254
|
|
|
255
|
-
print(("Cohen's Kappa - Index of Inter-Rater Reliability\n\
|
|
255
|
+
print(("Cohen's Kappa - Index of Inter-Rater Reliability\n\nInterval time: {interval:.3f} s\n").format(interval=interval))
|
|
256
256
|
|
|
257
257
|
print(out)
|
|
258
258
|
sys.exit()
|
boris/coding_pad.py
CHANGED
|
@@ -261,8 +261,6 @@ def show_coding_pad(self):
|
|
|
261
261
|
self.codingpad.setWindowFlags(Qt.WindowStaysOnTopHint)
|
|
262
262
|
self.codingpad.sendEventSignal.connect(self.signal_from_widget)
|
|
263
263
|
|
|
264
|
-
print(f"{self.signal_from_widget=}")
|
|
265
|
-
|
|
266
264
|
self.codingpad.clickSignal.connect(self.click_signal_from_coding_pad)
|
|
267
265
|
self.codingpad.close_signal.connect(self.close_signal_from_coding_pad)
|
|
268
266
|
self.codingpad.show()
|
boris/core.py
CHANGED
|
@@ -205,7 +205,6 @@ class TableModel(QAbstractTableModel):
|
|
|
205
205
|
if 0 <= row < self.rowCount():
|
|
206
206
|
column = index.column()
|
|
207
207
|
|
|
208
|
-
# print(self._data[:3])
|
|
209
208
|
if cfg.TW_EVENTS_FIELDS[self.observation_type][column] == "type":
|
|
210
209
|
return self._data[row][-1]
|
|
211
210
|
else:
|
|
@@ -2137,39 +2136,47 @@ class MainWindow(QMainWindow, Ui_MainWindow):
|
|
|
2137
2136
|
set_and_update_pan_and_zoom(new_pan_x, new_pan_y, new_zoom)
|
|
2138
2137
|
|
|
2139
2138
|
if cmd == "MBTN_LEFT_DBL":
|
|
2140
|
-
|
|
2139
|
+
logging.debug("MBTN_LEFT_DBL")
|
|
2141
2140
|
# ZOOM IN x2
|
|
2142
2141
|
do_zoom_in_clicked_coords(zoom_increment=1)
|
|
2143
2142
|
return
|
|
2144
2143
|
if cmd == "MBTN_RIGHT_DBL":
|
|
2144
|
+
logging.debug("MBTN_RIGHT_DBL")
|
|
2145
2145
|
# ZOOM OUT x2
|
|
2146
2146
|
do_zoom_in_clicked_coords(zoom_increment=-1)
|
|
2147
2147
|
return
|
|
2148
2148
|
if cmd == "Ctrl+WHEEL_UP":
|
|
2149
|
+
logging.debug("Ctrl+WHEEL_UP")
|
|
2149
2150
|
# ZOOM IN (3 wheel steps to zoom X2)
|
|
2150
2151
|
do_zoom_in_clicked_coords(zoom_increment=1.0 / 3.0)
|
|
2151
2152
|
return
|
|
2152
2153
|
if cmd == "Ctrl+WHEEL_DOWN":
|
|
2154
|
+
logging.debug("Ctrl+WHEEL_DOWN")
|
|
2153
2155
|
# ZOOM OUT (3 wheel steps to zoom X2)
|
|
2154
2156
|
do_zoom_in_clicked_coords(zoom_increment=-1.0 / 3.0)
|
|
2155
2157
|
return
|
|
2156
2158
|
if cmd == "WHEEL_UP":
|
|
2159
|
+
logging.debug("WHEEL_UP")
|
|
2157
2160
|
# PAN UP (VIDEO MOVES DOWN)
|
|
2158
2161
|
do_pan_in_clicked_coords(pan_x_increment=0, pan_y_increment=+0.01)
|
|
2159
2162
|
return
|
|
2160
2163
|
if cmd == "WHEEL_DOWN":
|
|
2164
|
+
logging.debug("WHEEL_DOWN")
|
|
2161
2165
|
# PAN DOWN (VIDEO MOVES UP)
|
|
2162
2166
|
do_pan_in_clicked_coords(pan_x_increment=0, pan_y_increment=-0.01)
|
|
2163
2167
|
return
|
|
2164
2168
|
if cmd == "Shift+WHEEL_UP":
|
|
2169
|
+
logging.debug("Shift+WHEEL_UP")
|
|
2165
2170
|
# PAN LEFT (VIDEO MOVES TO THE RIGHT)
|
|
2166
2171
|
do_pan_in_clicked_coords(pan_x_increment=+0.01, pan_y_increment=0)
|
|
2167
2172
|
return
|
|
2168
2173
|
if cmd == "Shift+WHEEL_DOWN":
|
|
2174
|
+
logging.debug("Shift+WHEEL_DOWN")
|
|
2169
2175
|
# PAN RIGHT (VIDEO MOVES TO THE LEFT)
|
|
2170
2176
|
do_pan_in_clicked_coords(pan_x_increment=-0.01, pan_y_increment=0)
|
|
2171
2177
|
return
|
|
2172
2178
|
if cmd == "Shift+MBTN_LEFT":
|
|
2179
|
+
logging.debug("Shift+MBTN_LEFT")
|
|
2173
2180
|
# RESET PAN AND ZOOM TO DEFAULT
|
|
2174
2181
|
set_and_update_pan_and_zoom(pan_x=0, pan_y=0, zoom=0)
|
|
2175
2182
|
return
|
|
@@ -3745,21 +3752,10 @@ class MainWindow(QMainWindow, Ui_MainWindow):
|
|
|
3745
3752
|
|
|
3746
3753
|
# calculate time for current media file in case of many queued media files
|
|
3747
3754
|
|
|
3748
|
-
print(mediaFileIdx_s)
|
|
3749
|
-
print(type(eventtime_s))
|
|
3750
|
-
print(durations)
|
|
3751
|
-
|
|
3752
3755
|
eventtime_onmedia_s = round(eventtime_s - util.float2decimal(sum(durations[0:mediaFileIdx_s])), 3)
|
|
3753
3756
|
eventtime_onmedia_e = round(eventtime_e - util.float2decimal(sum(durations[0:mediaFileIdx_e])), 3)
|
|
3754
3757
|
|
|
3755
|
-
print(row_s, media_path_s, eventtime_s, eventtime_onmedia_s)
|
|
3756
|
-
print(self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS][row_s])
|
|
3757
|
-
|
|
3758
|
-
print(row_e, media_path_e, eventtime_e, eventtime_onmedia_e)
|
|
3759
|
-
print(self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS][row_e])
|
|
3760
|
-
|
|
3761
3758
|
if media_path_s != media_path_e:
|
|
3762
|
-
print("events are located on 2 different media files")
|
|
3763
3759
|
return
|
|
3764
3760
|
|
|
3765
3761
|
media_path = media_path_s
|
|
@@ -3770,7 +3766,6 @@ class MainWindow(QMainWindow, Ui_MainWindow):
|
|
|
3770
3766
|
if "BORISEXTERNAL" in os.environ:
|
|
3771
3767
|
external_command_template = os.environ["BORISEXTERNAL"]
|
|
3772
3768
|
else:
|
|
3773
|
-
print("BORISEXTERNAL env var not defined")
|
|
3774
3769
|
return
|
|
3775
3770
|
|
|
3776
3771
|
external_command = external_command_template.format(
|
|
@@ -3853,11 +3848,6 @@ class MainWindow(QMainWindow, Ui_MainWindow):
|
|
|
3853
3848
|
|
|
3854
3849
|
time_, cumulative_time = self.get_obs_time()
|
|
3855
3850
|
|
|
3856
|
-
"""
|
|
3857
|
-
print(time_)
|
|
3858
|
-
print(cumulative_time)
|
|
3859
|
-
"""
|
|
3860
|
-
|
|
3861
3851
|
if self.pj[cfg.OBSERVATIONS][self.observationId].get(cfg.MEDIA_CREATION_DATE_AS_OFFSET, False):
|
|
3862
3852
|
write_event.write_event(self, event, time_)
|
|
3863
3853
|
else:
|
|
@@ -4263,7 +4253,7 @@ class MainWindow(QMainWindow, Ui_MainWindow):
|
|
|
4263
4253
|
|
|
4264
4254
|
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].extend(events_to_add)
|
|
4265
4255
|
self.project_changed()
|
|
4266
|
-
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].sort()
|
|
4256
|
+
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].sort(key=lambda x: x[:3])
|
|
4267
4257
|
|
|
4268
4258
|
self.load_tw_events(self.observationId)
|
|
4269
4259
|
|
|
@@ -4361,14 +4351,12 @@ class MainWindow(QMainWindow, Ui_MainWindow):
|
|
|
4361
4351
|
|
|
4362
4352
|
events_to_add = project_functions.fix_unpaired_state_events2(self.pj[cfg.ETHOGRAM], events, time_to_stop)
|
|
4363
4353
|
|
|
4364
|
-
# print(f"{events_to_add=}")
|
|
4365
|
-
|
|
4366
4354
|
if events_to_add:
|
|
4367
4355
|
self.statusbar.showMessage("The playlist has finished. Some ongoing state events were stopped automatically", 0)
|
|
4368
4356
|
|
|
4369
4357
|
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].extend(events_to_add)
|
|
4370
4358
|
self.project_changed()
|
|
4371
|
-
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].sort()
|
|
4359
|
+
self.pj[cfg.OBSERVATIONS][self.observationId][cfg.EVENTS].sort(key=lambda x: x[:3])
|
|
4372
4360
|
|
|
4373
4361
|
self.load_tw_events(self.observationId)
|
|
4374
4362
|
|
|
@@ -5224,25 +5212,20 @@ class MainWindow(QMainWindow, Ui_MainWindow):
|
|
|
5224
5212
|
if (not self.find_dialog.cbFindInSelectedEvents.isChecked()) or (
|
|
5225
5213
|
self.find_dialog.cbFindInSelectedEvents.isChecked() and event_idx in self.find_dialog.rowsToFind
|
|
5226
5214
|
):
|
|
5227
|
-
print(f"{event=}")
|
|
5228
|
-
|
|
5229
5215
|
# search only on filtered events
|
|
5230
5216
|
if event_idx not in self.tv_idx2events_idx:
|
|
5231
5217
|
continue
|
|
5232
5218
|
|
|
5233
5219
|
for idx in fields_list:
|
|
5234
|
-
print(f"{idx=}")
|
|
5235
5220
|
if (self.find_dialog.cb_case_sensitive.isChecked() and self.find_dialog.findText.text() in event[idx]) or (
|
|
5236
5221
|
not self.find_dialog.cb_case_sensitive.isChecked()
|
|
5237
5222
|
and self.find_dialog.findText.text().upper() in event[idx].upper()
|
|
5238
5223
|
):
|
|
5239
5224
|
self.find_dialog.currentIdx = event_idx
|
|
5240
5225
|
|
|
5241
|
-
# self.twEvents.scrollToItem(self.twEvents.item(event_idx, 0))
|
|
5242
5226
|
index = self.tv_events.model().index(event_idx, 0)
|
|
5243
5227
|
self.tv_events.scrollTo(index, QAbstractItemView.EnsureVisible)
|
|
5244
5228
|
self.tv_events.selectRow(event_idx)
|
|
5245
|
-
# self.twEvents.selectRow(event_idx)
|
|
5246
5229
|
return
|
|
5247
5230
|
|
|
5248
5231
|
if msg != "FIND_FROM_BEGINING":
|
boris/core_qrc.py
CHANGED
|
@@ -15946,10 +15946,13 @@ qt_resource_struct = b"\
|
|
|
15946
15946
|
\x00\x00\x01\x95k&\xa4B\
|
|
15947
15947
|
"
|
|
15948
15948
|
|
|
15949
|
+
|
|
15949
15950
|
def qInitResources():
|
|
15950
15951
|
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
|
|
15951
15952
|
|
|
15953
|
+
|
|
15952
15954
|
def qCleanupResources():
|
|
15953
15955
|
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
|
|
15954
15956
|
|
|
15957
|
+
|
|
15955
15958
|
qInitResources()
|
boris/db_functions.py
CHANGED
|
@@ -188,7 +188,7 @@ def load_aggregated_events_in_db(
|
|
|
188
188
|
selected_behaviors = sorted([pj[cfg.ETHOGRAM][x][cfg.BEHAVIOR_CODE] for x in pj[cfg.ETHOGRAM]])
|
|
189
189
|
|
|
190
190
|
# check if state events are paired
|
|
191
|
-
out = ""
|
|
191
|
+
out: str = ""
|
|
192
192
|
for obs_id in selected_observations:
|
|
193
193
|
r, msg = project_functions.check_state_events_obs(obs_id, pj[cfg.ETHOGRAM], pj[cfg.OBSERVATIONS][obs_id], cfg.HHMMSS)
|
|
194
194
|
if not r:
|
|
@@ -212,11 +212,11 @@ def load_aggregated_events_in_db(
|
|
|
212
212
|
|
|
213
213
|
db = sqlite3.connect(":memory:")
|
|
214
214
|
|
|
215
|
-
"""
|
|
216
215
|
# only for debugging
|
|
216
|
+
"""
|
|
217
217
|
import os
|
|
218
|
-
os.system("rm /tmp/ramdisk/
|
|
219
|
-
db = sqlite3.connect("/tmp/ramdisk/
|
|
218
|
+
os.system("rm /tmp/ramdisk/aggregated_events.sqlite")
|
|
219
|
+
db = sqlite3.connect("/tmp/ramdisk/aggregated_events.sqlite", isolation_level=None)
|
|
220
220
|
"""
|
|
221
221
|
|
|
222
222
|
db.row_factory = sqlite3.Row
|
boris/edit_event.py
CHANGED
|
@@ -128,8 +128,6 @@ class DlgEditEvent(QDialog, Ui_Form):
|
|
|
128
128
|
set time to current media time
|
|
129
129
|
"""
|
|
130
130
|
|
|
131
|
-
print(f"{self.current_time=}")
|
|
132
|
-
|
|
133
131
|
if self.observation_type in (cfg.LIVE, cfg.MEDIA):
|
|
134
132
|
self.time_widget.set_time(dec(float(self.current_time)))
|
|
135
133
|
|
|
@@ -137,13 +135,6 @@ class DlgEditEvent(QDialog, Ui_Form):
|
|
|
137
135
|
if self.exif_date_time is not None:
|
|
138
136
|
self.time_widget.set_time(dec(self.exif_date_time))
|
|
139
137
|
|
|
140
|
-
# def frame_idx_na(self):
|
|
141
|
-
# """
|
|
142
|
-
# set/unset frame index NA
|
|
143
|
-
# """
|
|
144
|
-
# self.lb_frame_idx.setEnabled(not self.cb_set_frame_idx_na.isChecked())
|
|
145
|
-
# self.sb_frame_idx.setEnabled(not self.cb_set_frame_idx_na.isChecked())
|
|
146
|
-
|
|
147
138
|
def time_na(self):
|
|
148
139
|
"""
|
|
149
140
|
set/unset time to NA
|
boris/exclusion_matrix.py
CHANGED
|
@@ -42,7 +42,7 @@ class ExclusionMatrix(QDialog):
|
|
|
42
42
|
|
|
43
43
|
self.label = QLabel()
|
|
44
44
|
self.label.setText(
|
|
45
|
-
("Check if behaviors are mutually exclusive.\
|
|
45
|
+
("Check if behaviors are mutually exclusive.\nThe Point events (displayed on blue background) cannot be excluded)")
|
|
46
46
|
)
|
|
47
47
|
hbox.addWidget(self.label)
|
|
48
48
|
|