boris-behav-obs 9.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. boris/__init__.py +26 -0
  2. boris/__main__.py +25 -0
  3. boris/about.py +143 -0
  4. boris/add_modifier.py +635 -0
  5. boris/add_modifier_ui.py +303 -0
  6. boris/advanced_event_filtering.py +455 -0
  7. boris/analysis_plugins/__init__.py +0 -0
  8. boris/analysis_plugins/_latency.py +59 -0
  9. boris/analysis_plugins/irr_cohen_kappa.py +109 -0
  10. boris/analysis_plugins/irr_cohen_kappa_with_modifiers.py +112 -0
  11. boris/analysis_plugins/irr_weighted_cohen_kappa.py +157 -0
  12. boris/analysis_plugins/irr_weighted_cohen_kappa_with_modifiers.py +162 -0
  13. boris/analysis_plugins/list_of_dataframe_columns.py +22 -0
  14. boris/analysis_plugins/number_of_occurences.py +22 -0
  15. boris/analysis_plugins/number_of_occurences_by_independent_variable.py +54 -0
  16. boris/analysis_plugins/time_budget.py +61 -0
  17. boris/behav_coding_map_creator.py +1110 -0
  18. boris/behavior_binary_table.py +305 -0
  19. boris/behaviors_coding_map.py +239 -0
  20. boris/boris_cli.py +340 -0
  21. boris/cmd_arguments.py +49 -0
  22. boris/coding_pad.py +280 -0
  23. boris/config.py +785 -0
  24. boris/config_file.py +356 -0
  25. boris/connections.py +409 -0
  26. boris/converters.py +333 -0
  27. boris/converters_ui.py +225 -0
  28. boris/cooccurence.py +250 -0
  29. boris/core.py +5901 -0
  30. boris/core_qrc.py +15958 -0
  31. boris/core_ui.py +1107 -0
  32. boris/db_functions.py +324 -0
  33. boris/dev.py +134 -0
  34. boris/dialog.py +1108 -0
  35. boris/duration_widget.py +238 -0
  36. boris/edit_event.py +245 -0
  37. boris/edit_event_ui.py +233 -0
  38. boris/event_operations.py +1040 -0
  39. boris/events_cursor.py +61 -0
  40. boris/events_snapshots.py +596 -0
  41. boris/exclusion_matrix.py +141 -0
  42. boris/export_events.py +1006 -0
  43. boris/export_observation.py +1203 -0
  44. boris/external_processes.py +332 -0
  45. boris/geometric_measurement.py +941 -0
  46. boris/gui_utilities.py +135 -0
  47. boris/image_overlay.py +72 -0
  48. boris/import_observations.py +242 -0
  49. boris/ipc_mpv.py +325 -0
  50. boris/irr.py +634 -0
  51. boris/latency.py +244 -0
  52. boris/measurement_widget.py +161 -0
  53. boris/media_file.py +115 -0
  54. boris/menu_options.py +213 -0
  55. boris/modifier_coding_map_creator.py +1013 -0
  56. boris/modifiers_coding_map.py +157 -0
  57. boris/mpv.py +2016 -0
  58. boris/mpv2.py +2193 -0
  59. boris/observation.py +1453 -0
  60. boris/observation_operations.py +2538 -0
  61. boris/observation_ui.py +679 -0
  62. boris/observations_list.py +337 -0
  63. boris/otx_parser.py +442 -0
  64. boris/param_panel.py +201 -0
  65. boris/param_panel_ui.py +305 -0
  66. boris/player_dock_widget.py +198 -0
  67. boris/plot_data_module.py +536 -0
  68. boris/plot_events.py +634 -0
  69. boris/plot_events_rt.py +237 -0
  70. boris/plot_spectrogram_rt.py +316 -0
  71. boris/plot_waveform_rt.py +230 -0
  72. boris/plugins.py +431 -0
  73. boris/portion/__init__.py +31 -0
  74. boris/portion/const.py +95 -0
  75. boris/portion/dict.py +365 -0
  76. boris/portion/func.py +52 -0
  77. boris/portion/interval.py +581 -0
  78. boris/portion/io.py +181 -0
  79. boris/preferences.py +510 -0
  80. boris/preferences_ui.py +770 -0
  81. boris/project.py +2007 -0
  82. boris/project_functions.py +2041 -0
  83. boris/project_import_export.py +1096 -0
  84. boris/project_ui.py +794 -0
  85. boris/qrc_boris.py +10389 -0
  86. boris/qrc_boris5.py +2579 -0
  87. boris/select_modifiers.py +312 -0
  88. boris/select_observations.py +210 -0
  89. boris/select_subj_behav.py +286 -0
  90. boris/state_events.py +197 -0
  91. boris/subjects_pad.py +106 -0
  92. boris/synthetic_time_budget.py +290 -0
  93. boris/time_budget_functions.py +1136 -0
  94. boris/time_budget_widget.py +1039 -0
  95. boris/transitions.py +365 -0
  96. boris/utilities.py +1810 -0
  97. boris/version.py +24 -0
  98. boris/video_equalizer.py +159 -0
  99. boris/video_equalizer_ui.py +248 -0
  100. boris/video_operations.py +310 -0
  101. boris/view_df.py +104 -0
  102. boris/view_df_ui.py +75 -0
  103. boris/write_event.py +538 -0
  104. boris_behav_obs-9.7.7.dist-info/METADATA +139 -0
  105. boris_behav_obs-9.7.7.dist-info/RECORD +109 -0
  106. boris_behav_obs-9.7.7.dist-info/WHEEL +5 -0
  107. boris_behav_obs-9.7.7.dist-info/entry_points.txt +2 -0
  108. boris_behav_obs-9.7.7.dist-info/licenses/LICENSE.TXT +674 -0
  109. boris_behav_obs-9.7.7.dist-info/top_level.txt +1 -0
@@ -0,0 +1,112 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Inter Rater Reliability (IRR) Unweighted Cohen's Kappa with modifiers
5
+ """
6
+
7
+ import pandas as pd
8
+
9
+ from sklearn.metrics import cohen_kappa_score
10
+ from PySide6.QtWidgets import QInputDialog
11
+
12
+ __version__ = "0.0.3"
13
+ __version_date__ = "2025-09-02"
14
+ __plugin_name__ = "Inter Rater Reliability - Unweighted Cohen's Kappa with modifiers"
15
+ __author__ = "Olivier Friard - University of Torino - Italy"
16
+ __description__ = """
17
+ This plugin calculates Cohen's Kappa to measure inter-rater reliability between two observers who code categorical behaviors over time intervals.
18
+ Unlike the weighted version, this approach does not take into account the duration of the intervals.
19
+ Each segment of time is treated equally, regardless of how long it lasts.
20
+ This plugin takes into account the modifiers.
21
+
22
+
23
+ How it works:
24
+
25
+ Time segmentation
26
+ The program identifies all the time boundaries (start and end points) used by both observers.
27
+ These boundaries are merged into a common timeline, which is then divided into a set of non-overlapping elementary intervals.
28
+
29
+ Assigning codes
30
+ For each elementary interval, the program determines which behavior was coded by each observer.
31
+
32
+ Comparison of codes
33
+ The program builds two parallel lists of behavior codes, one for each observer.
34
+ Each elementary interval is counted as one unit of observation, no matter how long the interval actually lasts.
35
+
36
+ Cohen's Kappa calculation
37
+ Using these two lists, the program computes Cohen's Kappa using the cohen_kappa_score function of the sklearn package.
38
+ (see https://scikit-learn.org/stable/modules/generated/sklearn.metrics.cohen_kappa_score.html for details)
39
+ This coefficient measures how much the observers agree on their coding, adjusted for the amount of agreement that would be expected by chance.
40
+
41
+ """
42
+
43
+
44
+ def run(df: pd.DataFrame):
45
+ """
46
+ Calculate the Inter Rater Reliability - Unweighted Cohen's Kappa with modifiers
47
+ """
48
+
49
+ # Attribute all active codes for each interval
50
+ def get_code(t_start, obs):
51
+ active_codes = [seg[2] for seg in obs if seg[0] <= t_start < seg[1]]
52
+ if not active_codes:
53
+ return ""
54
+ # Sort to ensure deterministic representation (e.g., "A+B" instead of "B+A")
55
+ return "+".join(sorted(active_codes))
56
+
57
+ # ask user for the number of decimal places for rounding (can be negative)
58
+ round_decimals, ok = QInputDialog.getInt(
59
+ None, "Rounding", "Enter the number of decimal places for rounding (can be negative)", value=3, minValue=-5, maxValue=3, step=1
60
+ )
61
+
62
+ # round times
63
+ df["Start (s)"] = df["Start (s)"].round(round_decimals)
64
+ df["Stop (s)"] = df["Stop (s)"].round(round_decimals)
65
+
66
+ # Get unique values
67
+ unique_obs_list = df["Observation id"].unique().tolist()
68
+
69
+ # Convert to tuples grouped by observation
70
+ grouped: dict = {}
71
+ modifiers: list = []
72
+ for col in df.columns:
73
+ if isinstance(col, tuple):
74
+ modifiers.append(col)
75
+
76
+ for obs, group in df.groupby("Observation id"):
77
+ o: list = []
78
+ for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"] + modifiers].itertuples(index=False, name=None):
79
+ modif_list = [row[i] for idx, i in enumerate(range(4, 4 + len(modifiers))) if modifiers[idx][0] == row[3]]
80
+ o.append((row[0], row[1], row[2] + "|" + row[3] + "|" + ",".join(modif_list)))
81
+ grouped[obs] = o
82
+
83
+ ck_results: dict = {}
84
+ for idx1, obs_id1 in enumerate(unique_obs_list):
85
+ obs1 = grouped[obs_id1]
86
+
87
+ ck_results[(obs_id1, obs_id1)] = "1.000"
88
+
89
+ for obs_id2 in unique_obs_list[idx1 + 1 :]:
90
+ obs2 = grouped[obs_id2]
91
+
92
+ # get all the break points
93
+ time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
94
+
95
+ # elementary intervals
96
+ elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
97
+
98
+ obs1_codes = [get_code(t[0], obs1) for t in elementary_intervals]
99
+
100
+ obs2_codes = [get_code(t[0], obs2) for t in elementary_intervals]
101
+
102
+ # Cohen's Kappa
103
+ kappa = cohen_kappa_score(obs1_codes, obs2_codes)
104
+ print(f"{obs_id1} - {obs_id2}: Cohen's Kappa : {kappa:.3f}")
105
+
106
+ ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
107
+ ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
108
+
109
+ # DataFrame conversion
110
+ df_results = pd.Series(ck_results).unstack()
111
+
112
+ return df_results
@@ -0,0 +1,157 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Inter Rater Reliability (IRR) Weighted Cohen's Kappa
5
+ """
6
+
7
+ import pandas as pd
8
+ from typing import List, Tuple, Dict, Optional
9
+
10
+ from PySide6.QtWidgets import QInputDialog
11
+
12
+ __version__ = "0.0.3"
13
+ __version_date__ = "2025-09-02"
14
+ __plugin_name__ = "Inter Rater Reliability - Weighted Cohen's Kappa"
15
+ __author__ = "Olivier Friard - University of Torino - Italy"
16
+ __description__ = """
17
+ This plugin calculates Cohen's Kappa to measure inter-rater reliability between two observers who code categorical behaviors over time intervals.
18
+ Unlike the unweighted version, this approach takes into account the duration of each coded interval, giving more weight to longer intervals in the agreement calculation.
19
+ This plugin does not take into account the modifiers.
20
+
21
+ How it works:
22
+
23
+ Time segmentation
24
+ The program collects all the time boundaries from both observers and merges them into a unified set of time points.
25
+ These define a set of non-overlapping elementary intervals covering the entire observed period.
26
+
27
+ Assigning codes
28
+ For each elementary interval, the program identifies the behavior category assigned by each observer.
29
+
30
+ Weighted contingency table
31
+ Instead of treating each interval equally, the program assigns a weight equal to the duration of the interval.
32
+ These durations are accumulated in a contingency table that records how much time was spent in each combination of categories across the two observers.
33
+
34
+ Agreement calculation
35
+
36
+ Observed agreement (po): The proportion of total time where both observers assigned the same category.
37
+
38
+ Expected agreement (pe): The proportion of agreement expected by chance, based on the time-weighted marginal distributions of each observer's coding.
39
+
40
+ Cohen's Kappa (κ): Computed from the weighted observed and expected agreements.
41
+ """
42
+
43
+
44
+ def run(df: pd.DataFrame):
45
+ """
46
+ Calculate the Inter Rater Reliability - Weighted Cohen's Kappa
47
+ """
48
+
49
+ def cohen_kappa_weighted_by_time(
50
+ obs1: List[Tuple[float, float, str]], obs2: List[Tuple[float, float, str]]
51
+ ) -> Tuple[float, float, float, Dict[Tuple[Optional[str], Optional[str]], float]]:
52
+ """
53
+ Compute Cohen's Kappa weighted by time duration.
54
+
55
+ Args:
56
+ obs1: List of (start_time, end_time, code) for observer 1
57
+ obs2: List of (start_time, end_time, code) for observer 2
58
+
59
+ Returns:
60
+ kappa (float): Cohen's Kappa weighted by duration
61
+ po (float): Observed agreement proportion (weighted)
62
+ pe (float): Expected agreement proportion by chance (weighted)
63
+ contingency (dict): Contingency table {(code1, code2): total_duration}
64
+ """
65
+
66
+ # 1. Collect all time boundaries from both observers
67
+ time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
68
+
69
+ # 2. Build elementary intervals (non-overlapping time bins)
70
+ elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
71
+
72
+ # 3. # Attribute all active codes for each interval
73
+ def get_code(t: float, obs: List[Tuple[float, float, str]]) -> Optional[str]:
74
+ active_codes = [seg[2] for seg in obs if seg[0] <= t < seg[1]]
75
+ if not active_codes:
76
+ return None
77
+ return "+".join(sorted(active_codes))
78
+
79
+ # 4. Build weighted contingency table (durations instead of counts)
80
+ contingency: Dict[Tuple[Optional[str], Optional[str]], float] = {}
81
+ total_time = 0.0
82
+
83
+ for start, end in elementary_intervals:
84
+ c1 = get_code(start, obs1)
85
+ c2 = get_code(start, obs2)
86
+ duration = end - start
87
+ total_time += duration
88
+ contingency[(c1, c2)] = contingency.get((c1, c2), 0.0) + duration
89
+
90
+ # 5. Observed agreement (po)
91
+ po = sum(duration for (c1, c2), duration in contingency.items() if c1 == c2) / total_time
92
+
93
+ # Marginal distributions for each observer
94
+ codes1: Dict[Optional[str], float] = {}
95
+ codes2: Dict[Optional[str], float] = {}
96
+ for (c1, c2), duration in contingency.items():
97
+ codes1[c1] = codes1.get(c1, 0.0) + duration
98
+ codes2[c2] = codes2.get(c2, 0.0) + duration
99
+
100
+ # 6. Expected agreement (pe), using marginal proportions
101
+ all_codes = set(codes1) | set(codes2)
102
+ pe = sum((codes1.get(c, 0.0) / total_time) * (codes2.get(c, 0.0) / total_time) for c in all_codes)
103
+
104
+ # 7. Kappa calculation
105
+ kappa = (po - pe) / (1 - pe) if (1 - pe) != 0 else 0.0
106
+
107
+ return kappa, po, pe, contingency
108
+
109
+ # ask user for the number of decimal places for rounding (can be negative)
110
+ round_decimals, ok = QInputDialog.getInt(
111
+ None, "Rounding", "Enter the number of decimal places for rounding (can be negative)", value=3, minValue=-5, maxValue=3, step=1
112
+ )
113
+
114
+ # round times
115
+ df["Start (s)"] = df["Start (s)"].round(round_decimals)
116
+ df["Stop (s)"] = df["Stop (s)"].round(round_decimals)
117
+
118
+ # Get unique values as a numpy array
119
+ unique_obs = df["Observation id"].unique()
120
+
121
+ # Convert to a list
122
+ unique_obs_list = unique_obs.tolist()
123
+
124
+ # Convert to tuples grouped by observation
125
+ grouped = {
126
+ obs: [
127
+ (row[0], row[1], row[2] + "|" + row[3]) # concatenate subject and behavior with |
128
+ for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"]].itertuples(index=False, name=None)
129
+ ]
130
+ for obs, group in df.groupby("Observation id")
131
+ }
132
+
133
+ ck_results: dict = {}
134
+ str_results: str = ""
135
+ for idx1, obs_id1 in enumerate(unique_obs_list):
136
+ obs1 = grouped[obs_id1]
137
+
138
+ ck_results[(obs_id1, obs_id1)] = "1.000"
139
+
140
+ for obs_id2 in unique_obs_list[idx1 + 1 :]:
141
+ obs2 = grouped[obs_id2]
142
+
143
+ # Cohen's Kappa
144
+ kappa, po, pe, table = cohen_kappa_weighted_by_time(obs1, obs2)
145
+
146
+ print(f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}")
147
+ str_results += (
148
+ f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}\n"
149
+ )
150
+
151
+ ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
152
+ ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
153
+
154
+ # DataFrame conversion
155
+ df_results = pd.Series(ck_results).unstack()
156
+
157
+ return df_results, str_results
@@ -0,0 +1,162 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Inter Rater Reliability (IRR) Weighted Cohen's Kappa with modifiers
5
+ """
6
+
7
+ import pandas as pd
8
+ from typing import List, Tuple, Dict, Optional
9
+
10
+ from PySide6.QtWidgets import QInputDialog
11
+
12
+ __version__ = "0.0.3"
13
+ __version_date__ = "2025-09-02"
14
+ __plugin_name__ = "Inter Rater Reliability - Weighted Cohen's Kappa with modifiers"
15
+ __author__ = "Olivier Friard - University of Torino - Italy"
16
+ __description__ = """
17
+ This plugin calculates Cohen's Kappa to measure inter-rater reliability between two observers who code categorical behaviors over time intervals.
18
+ Unlike the unweighted version, this approach takes into account the duration of each coded interval, giving more weight to longer intervals in the agreement calculation.
19
+ This plugin takes into account the modifiers.
20
+
21
+ How it works:
22
+
23
+ Time segmentation
24
+ The program collects all the time boundaries from both observers and merges them into a unified set of time points.
25
+ These define a set of non-overlapping elementary intervals covering the entire observed period.
26
+
27
+ Assigning codes
28
+ For each elementary interval, the program identifies the behavior category assigned by each observer.
29
+
30
+ Weighted contingency table
31
+ Instead of treating each interval equally, the program assigns a weight equal to the duration of the interval.
32
+ These durations are accumulated in a contingency table that records how much time was spent in each combination of categories across the two observers.
33
+
34
+ Agreement calculation
35
+
36
+ Observed agreement (po): The proportion of total time where both observers assigned the same category.
37
+
38
+ Expected agreement (pe): The proportion of agreement expected by chance, based on the time-weighted marginal distributions of each observer's coding.
39
+
40
+ Cohen's Kappa (κ): Computed from the weighted observed and expected agreements.
41
+ """
42
+
43
+
44
+ def run(df: pd.DataFrame):
45
+ """
46
+ Calculate the Inter Rater Reliability - Weighted Cohen's Kappa with modifiers
47
+ """
48
+
49
+ def cohen_kappa_weighted_by_time(
50
+ obs1: List[Tuple[float, float, str]], obs2: List[Tuple[float, float, str]]
51
+ ) -> Tuple[float, float, float, Dict[Tuple[Optional[str], Optional[str]], float]]:
52
+ """
53
+ Compute Cohen's Kappa weighted by time duration with modifiers.
54
+
55
+ Args:
56
+ obs1: List of (start_time, end_time, code) for observer 1
57
+ obs2: List of (start_time, end_time, code) for observer 2
58
+
59
+ Returns:
60
+ kappa (float): Cohen's Kappa weighted by duration
61
+ po (float): Observed agreement proportion (weighted)
62
+ pe (float): Expected agreement proportion by chance (weighted)
63
+ contingency (dict): Contingency table {(code1, code2): total_duration}
64
+ """
65
+
66
+ # 1. Collect all time boundaries from both observers
67
+ time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
68
+
69
+ # 2. Build elementary intervals (non-overlapping time bins)
70
+ elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
71
+
72
+ # 3. Attribute all active codes for each interval
73
+ def get_code(t: float, obs: List[Tuple[float, float, str]]) -> Optional[str]:
74
+ active_codes = [seg[2] for seg in obs if seg[0] <= t < seg[1]]
75
+ if not active_codes:
76
+ return None
77
+ return "+".join(sorted(active_codes))
78
+
79
+ # 4. Build weighted contingency table (durations instead of counts)
80
+ contingency: Dict[Tuple[Optional[str], Optional[str]], float] = {}
81
+ total_time = 0.0
82
+
83
+ for start, end in elementary_intervals:
84
+ c1 = get_code(start, obs1)
85
+ c2 = get_code(start, obs2)
86
+ duration = end - start
87
+ total_time += duration
88
+ contingency[(c1, c2)] = contingency.get((c1, c2), 0.0) + duration
89
+
90
+ # 5. Observed agreement (po)
91
+ po = sum(duration for (c1, c2), duration in contingency.items() if c1 == c2) / total_time
92
+
93
+ # Marginal distributions for each observer
94
+ codes1: Dict[Optional[str], float] = {}
95
+ codes2: Dict[Optional[str], float] = {}
96
+ for (c1, c2), duration in contingency.items():
97
+ codes1[c1] = codes1.get(c1, 0.0) + duration
98
+ codes2[c2] = codes2.get(c2, 0.0) + duration
99
+
100
+ # 6. Expected agreement (pe), using marginal proportions
101
+ all_codes = set(codes1) | set(codes2)
102
+ pe = sum((codes1.get(c, 0.0) / total_time) * (codes2.get(c, 0.0) / total_time) for c in all_codes)
103
+
104
+ # 7. Kappa calculation
105
+ kappa = (po - pe) / (1 - pe) if (1 - pe) != 0 else 0.0
106
+
107
+ return kappa, po, pe, contingency
108
+
109
+ # ask user for the number of decimal places for rounding (can be negative)
110
+ round_decimals, ok = QInputDialog.getInt(
111
+ None, "Rounding", "Enter the number of decimal places for rounding (can be negative)", value=3, minValue=-5, maxValue=3, step=1
112
+ )
113
+
114
+ # round times
115
+ df["Start (s)"] = df["Start (s)"].round(round_decimals)
116
+ df["Stop (s)"] = df["Stop (s)"].round(round_decimals)
117
+
118
+ # Get unique values as a numpy array
119
+ unique_obs = df["Observation id"].unique()
120
+
121
+ # Convert to a list
122
+ unique_obs_list = unique_obs.tolist()
123
+
124
+ # Convert to tuples grouped by observation
125
+ grouped: dict = {}
126
+ modifiers: list = []
127
+ for col in df.columns:
128
+ if isinstance(col, tuple):
129
+ modifiers.append(col)
130
+
131
+ for obs, group in df.groupby("Observation id"):
132
+ o = []
133
+ for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"] + modifiers].itertuples(index=False, name=None):
134
+ modif_list = [row[i] for idx, i in enumerate(range(4, 4 + len(modifiers))) if modifiers[idx][0] == row[3]]
135
+ o.append((row[0], row[1], row[2] + "|" + row[3] + "|" + ",".join(modif_list)))
136
+ grouped[obs] = o
137
+
138
+ ck_results: dict = {}
139
+ str_results: str = ""
140
+ for idx1, obs_id1 in enumerate(unique_obs_list):
141
+ obs1 = grouped[obs_id1]
142
+
143
+ ck_results[(obs_id1, obs_id1)] = "1.000"
144
+
145
+ for obs_id2 in unique_obs_list[idx1 + 1 :]:
146
+ obs2 = grouped[obs_id2]
147
+
148
+ # Cohen's Kappa
149
+ kappa, po, pe, table = cohen_kappa_weighted_by_time(obs1, obs2)
150
+
151
+ print(f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}")
152
+ str_results += (
153
+ f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}\n"
154
+ )
155
+
156
+ ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
157
+ ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
158
+
159
+ # DataFrame conversion
160
+ df_results = pd.Series(ck_results).unstack()
161
+
162
+ return df_results, str_results
@@ -0,0 +1,22 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ number of occurences of behaviors
5
+ """
6
+
7
+ import pandas as pd
8
+
9
+ __version__ = "0.0.1"
10
+ __version_date__ = "2025-06-13"
11
+ __plugin_name__ = "List of dataframe columns"
12
+ __author__ = "Olivier Friard - University of Torino - Italy"
13
+
14
+
15
+ def run(df: pd.DataFrame) -> pd.DataFrame:
16
+ """
17
+ List the columns present in the dataframe
18
+ """
19
+
20
+ df_results = pd.DataFrame(df.columns, columns=["column name"])
21
+
22
+ return df_results
@@ -0,0 +1,22 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ number of occurences of behaviors
5
+ """
6
+
7
+ import pandas as pd
8
+
9
+ __version__ = "0.3.0"
10
+ __version_date__ = "2025-03-17"
11
+ __plugin_name__ = "Number of occurences of behaviors"
12
+ __author__ = "Olivier Friard - University of Torino - Italy"
13
+
14
+
15
+ def run(df: pd.DataFrame):
16
+ """
17
+ Calculate the number of occurrences of behaviors by subject.
18
+ """
19
+
20
+ df_results: pd.DataFrame = df.groupby(["Subject", "Behavior"])["Behavior"].count().reset_index(name="number of occurences")
21
+
22
+ return df_results
@@ -0,0 +1,54 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ number of occurences of behaviors by independent_variable
5
+ """
6
+
7
+ import pandas as pd
8
+
9
+ __version__ = "0.4.0"
10
+ __version_date__ = "2025-07-17"
11
+ __plugin_name__ = "Number of occurences of behaviors by subject by independent_variable"
12
+ __author__ = "Olivier Friard - University of Torino - Italy"
13
+
14
+
15
+ def run(df: pd.DataFrame):
16
+ """
17
+ Calculate the number of occurrences of behaviors by subject and by independent_variable.
18
+
19
+ This plugin returns a Pandas dataframe
20
+ """
21
+
22
+ df_results_list: list = []
23
+
24
+ flag_variable_found = False
25
+
26
+ for column in df.columns:
27
+ if isinstance(column, tuple) or (isinstance(column, str) and not column.startswith("independent variable '")):
28
+ continue
29
+
30
+ flag_variable_found = True
31
+ grouped_df: df.DataFrame = (
32
+ df.groupby(
33
+ [
34
+ column,
35
+ "Subject",
36
+ "Behavior",
37
+ ]
38
+ )["Behavior"]
39
+ .count()
40
+ .reset_index(name="number of occurences")
41
+ )
42
+
43
+ grouped_df.rename(columns={column: "Value"}, inplace=True)
44
+
45
+ grouped_df.insert(0, "independent variable name", column)
46
+
47
+ df_results_list.append(grouped_df)
48
+
49
+ df_results = pd.concat(df_results_list, ignore_index=True) if df_results_list else pd.DataFrame([])
50
+
51
+ if not flag_variable_found:
52
+ return "No independent variable found"
53
+ else:
54
+ return df_results
@@ -0,0 +1,61 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Time budget
5
+ """
6
+
7
+ import pandas as pd
8
+ import numpy as np
9
+
10
+ __version__ = "0.3.0"
11
+ __version_date__ = "2025-03-17"
12
+ __plugin_name__ = "Time budget"
13
+ __author__ = "Olivier Friard - University of Torino - Italy"
14
+
15
+
16
+ def run(df: pd.DataFrame):
17
+ """
18
+ Calculate the following values:
19
+
20
+ - Total number of occurences of behavior
21
+ - Total duration of behavior (in seconds) (pandas.DataFrame.sum() ignore NaN values when computing the sum. Use min_count=1)
22
+ - Duration mean of behavior (in seconds)
23
+ - Standard deviation of behavior duration (in seconds)
24
+ - Inter-event intervals mean (in seconds)
25
+ - Inter-event intervals standard deviation (in seconds)
26
+ - % of total subject observation duration
27
+ """
28
+
29
+ group_by = ["Subject", "Behavior"]
30
+
31
+ dfs = [
32
+ df.groupby(group_by)["Behavior"].count().reset_index(name="number of occurences"),
33
+ df.groupby(group_by)["Duration (s)"].sum(min_count=1).reset_index(name="total duration"),
34
+ df.groupby(group_by)["Duration (s)"].mean().astype(float).round(3).reset_index(name="duration mean"),
35
+ df.groupby(group_by)["Duration (s)"].std().astype(float).round(3).reset_index(name="duration std dev"),
36
+ ]
37
+
38
+ # inter events
39
+ df2 = df.sort_values(by=["Observation id", "Subject", "Behavior", "Start (s)"])
40
+ df2["diff"] = df2.groupby(["Observation id", "Subject", "Behavior"])["Start (s)"].shift(periods=-1) - df2["Stop (s)"]
41
+
42
+ dfs.append(df2.groupby(group_by)["diff"].mean().astype(float).round(3).reset_index(name="inter-event intervals mean"))
43
+
44
+ dfs.append(df2.groupby(group_by)["diff"].std().astype(float).round(3).reset_index(name="inter-event intervals std dev"))
45
+
46
+ # % of total subject observation time
47
+
48
+ interval = (df.groupby(["Subject"])["Stop (s)"].max() - df.groupby(["Subject"])["Start (s)"].min()).replace(0, np.nan)
49
+
50
+ dfs.append(
51
+ (100 * df.groupby(group_by)["Duration (s)"].sum(min_count=1) / interval)
52
+ .astype(float)
53
+ .round(3)
54
+ .reset_index(name="% of total subject observation duration")
55
+ )
56
+
57
+ merged_df = dfs[0]
58
+ for df in dfs[1:]:
59
+ merged_df = pd.merge(merged_df, df, on=group_by)
60
+
61
+ return merged_df