boris-behav-obs 8.16.5__py3-none-any.whl → 9.7.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. boris/__init__.py +1 -1
  2. boris/__main__.py +1 -1
  3. boris/about.py +28 -40
  4. boris/add_modifier.py +88 -80
  5. boris/add_modifier_ui.py +266 -144
  6. boris/advanced_event_filtering.py +23 -29
  7. boris/analysis_plugins/__init__.py +0 -0
  8. boris/analysis_plugins/_export_to_feral.py +225 -0
  9. boris/analysis_plugins/_latency.py +59 -0
  10. boris/analysis_plugins/irr_cohen_kappa.py +109 -0
  11. boris/analysis_plugins/irr_cohen_kappa_with_modifiers.py +112 -0
  12. boris/analysis_plugins/irr_weighted_cohen_kappa.py +157 -0
  13. boris/analysis_plugins/irr_weighted_cohen_kappa_with_modifiers.py +162 -0
  14. boris/analysis_plugins/list_of_dataframe_columns.py +22 -0
  15. boris/analysis_plugins/number_of_occurences.py +22 -0
  16. boris/analysis_plugins/number_of_occurences_by_independent_variable.py +54 -0
  17. boris/analysis_plugins/time_budget.py +61 -0
  18. boris/behav_coding_map_creator.py +235 -236
  19. boris/behavior_binary_table.py +33 -50
  20. boris/behaviors_coding_map.py +17 -18
  21. boris/boris_cli.py +6 -25
  22. boris/cmd_arguments.py +12 -1
  23. boris/coding_pad.py +19 -36
  24. boris/config.py +109 -50
  25. boris/config_file.py +58 -67
  26. boris/connections.py +105 -58
  27. boris/converters.py +13 -37
  28. boris/converters_ui.py +187 -110
  29. boris/cooccurence.py +250 -0
  30. boris/core.py +2174 -1303
  31. boris/core_qrc.py +15892 -10829
  32. boris/core_ui.py +941 -806
  33. boris/db_functions.py +17 -42
  34. boris/dev.py +27 -7
  35. boris/dialog.py +461 -242
  36. boris/duration_widget.py +9 -14
  37. boris/edit_event.py +61 -31
  38. boris/edit_event_ui.py +208 -97
  39. boris/event_operations.py +405 -281
  40. boris/events_cursor.py +25 -17
  41. boris/events_snapshots.py +36 -82
  42. boris/exclusion_matrix.py +4 -9
  43. boris/export_events.py +180 -203
  44. boris/export_observation.py +60 -73
  45. boris/external_processes.py +123 -98
  46. boris/geometric_measurement.py +427 -218
  47. boris/gui_utilities.py +91 -14
  48. boris/image_overlay.py +4 -4
  49. boris/import_observations.py +190 -98
  50. boris/ipc_mpv.py +325 -0
  51. boris/irr.py +20 -57
  52. boris/latency.py +31 -24
  53. boris/measurement_widget.py +14 -18
  54. boris/media_file.py +17 -19
  55. boris/menu_options.py +16 -6
  56. boris/modifier_coding_map_creator.py +1013 -0
  57. boris/modifiers_coding_map.py +7 -9
  58. boris/mpv2.py +128 -35
  59. boris/observation.py +501 -211
  60. boris/observation_operations.py +1037 -393
  61. boris/observation_ui.py +573 -363
  62. boris/observations_list.py +51 -58
  63. boris/otx_parser.py +74 -68
  64. boris/param_panel.py +45 -59
  65. boris/param_panel_ui.py +254 -138
  66. boris/player_dock_widget.py +91 -56
  67. boris/plot_data_module.py +20 -53
  68. boris/plot_events.py +56 -153
  69. boris/plot_events_rt.py +16 -30
  70. boris/plot_spectrogram_rt.py +83 -56
  71. boris/plot_waveform_rt.py +27 -49
  72. boris/plugins.py +468 -0
  73. boris/portion/__init__.py +18 -8
  74. boris/portion/const.py +35 -18
  75. boris/portion/dict.py +5 -5
  76. boris/portion/func.py +2 -2
  77. boris/portion/interval.py +21 -41
  78. boris/portion/io.py +41 -32
  79. boris/preferences.py +307 -123
  80. boris/preferences_ui.py +686 -227
  81. boris/project.py +294 -271
  82. boris/project_functions.py +626 -537
  83. boris/project_import_export.py +204 -213
  84. boris/project_ui.py +673 -441
  85. boris/qrc_boris.py +6 -3
  86. boris/qrc_boris5.py +6 -3
  87. boris/select_modifiers.py +62 -90
  88. boris/select_observations.py +19 -197
  89. boris/select_subj_behav.py +67 -39
  90. boris/state_events.py +51 -33
  91. boris/subjects_pad.py +7 -9
  92. boris/synthetic_time_budget.py +42 -26
  93. boris/time_budget_functions.py +169 -169
  94. boris/time_budget_widget.py +77 -89
  95. boris/transitions.py +41 -41
  96. boris/utilities.py +594 -226
  97. boris/version.py +3 -3
  98. boris/video_equalizer.py +16 -14
  99. boris/video_equalizer_ui.py +199 -130
  100. boris/video_operations.py +86 -28
  101. boris/view_df.py +104 -0
  102. boris/view_df_ui.py +75 -0
  103. boris/write_event.py +240 -136
  104. boris_behav_obs-9.7.12.dist-info/METADATA +139 -0
  105. boris_behav_obs-9.7.12.dist-info/RECORD +110 -0
  106. {boris_behav_obs-8.16.5.dist-info → boris_behav_obs-9.7.12.dist-info}/WHEEL +1 -1
  107. boris_behav_obs-9.7.12.dist-info/entry_points.txt +2 -0
  108. boris/README.TXT +0 -22
  109. boris/add_modifier.ui +0 -323
  110. boris/converters.ui +0 -289
  111. boris/core.qrc +0 -37
  112. boris/core.ui +0 -1571
  113. boris/edit_event.ui +0 -233
  114. boris/icons/logo_eye.ico +0 -0
  115. boris/map_creator.py +0 -982
  116. boris/observation.ui +0 -814
  117. boris/param_panel.ui +0 -379
  118. boris/preferences.ui +0 -537
  119. boris/project.ui +0 -1074
  120. boris/vlc_local.py +0 -90
  121. boris_behav_obs-8.16.5.dist-info/LICENSE.TXT +0 -674
  122. boris_behav_obs-8.16.5.dist-info/METADATA +0 -134
  123. boris_behav_obs-8.16.5.dist-info/RECORD +0 -107
  124. boris_behav_obs-8.16.5.dist-info/entry_points.txt +0 -2
  125. {boris → boris_behav_obs-9.7.12.dist-info/licenses}/LICENSE.TXT +0 -0
  126. {boris_behav_obs-8.16.5.dist-info → boris_behav_obs-9.7.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,225 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Export to FERAL (getferal.ai)
5
+ """
6
+
7
+ import pandas as pd
8
+ import json
9
+ from pathlib import Path
10
+
11
+ from PySide6.QtWidgets import QFileDialog
12
+
13
+ # dependencies for CategoryDialog
14
+ from PySide6.QtWidgets import QListWidget, QListWidgetItem, QLabel, QPushButton, QVBoxLayout, QHBoxLayout, QDialog
15
+ from PySide6.QtCore import Qt
16
+
17
+
18
+ __version__ = "0.1.1"
19
+ __version_date__ = "2025-11-28"
20
+ __plugin_name__ = "Export observations to FERAL"
21
+ __author__ = "Olivier Friard - University of Torino - Italy"
22
+
23
+
24
+ class CategoryDialog(QDialog):
25
+ def __init__(self, items, parent=None):
26
+ super().__init__(parent)
27
+
28
+ self.setWindowTitle("Organize the videos in categories")
29
+
30
+ self.setModal(True)
31
+
32
+ # Main layout
33
+ main_layout = QVBoxLayout(self)
34
+ lists_layout = QHBoxLayout()
35
+
36
+ # All videos
37
+ self.list_unclassified = self._create_list_widget()
38
+ self.label_unclassified = QLabel("All videos")
39
+ col0_layout = QVBoxLayout()
40
+ col0_layout.addWidget(self.label_unclassified)
41
+ col0_layout.addWidget(self.list_unclassified)
42
+
43
+ self.list_cat1 = self._create_list_widget()
44
+ self.label_cat1 = QLabel("train")
45
+ col1_layout = QVBoxLayout()
46
+ col1_layout.addWidget(self.label_cat1)
47
+ col1_layout.addWidget(self.list_cat1)
48
+
49
+ self.list_cat2 = self._create_list_widget()
50
+ self.label_cat2 = QLabel("val")
51
+ col2_layout = QVBoxLayout()
52
+ col2_layout.addWidget(self.label_cat2)
53
+ col2_layout.addWidget(self.list_cat2)
54
+
55
+ self.list_cat3 = self._create_list_widget()
56
+ self.label_cat3 = QLabel("test")
57
+ col3_layout = QVBoxLayout()
58
+ col3_layout.addWidget(self.label_cat3)
59
+ col3_layout.addWidget(self.list_cat3)
60
+
61
+ self.list_cat4 = self._create_list_widget()
62
+ self.label_cat4 = QLabel("inference")
63
+ col4_layout = QVBoxLayout()
64
+ col4_layout.addWidget(self.label_cat4)
65
+ col4_layout.addWidget(self.list_cat4)
66
+
67
+ # Add all columns to the horizontal layout
68
+ lists_layout.addLayout(col0_layout)
69
+ lists_layout.addLayout(col1_layout)
70
+ lists_layout.addLayout(col2_layout)
71
+ lists_layout.addLayout(col3_layout)
72
+ lists_layout.addLayout(col4_layout)
73
+
74
+ main_layout.addLayout(lists_layout)
75
+
76
+ buttons_layout = QHBoxLayout()
77
+ self.btn_ok = QPushButton("OK")
78
+ self.btn_cancel = QPushButton("Cancel")
79
+
80
+ self.btn_ok.clicked.connect(self.accept)
81
+ self.btn_cancel.clicked.connect(self.reject)
82
+
83
+ buttons_layout.addStretch()
84
+ buttons_layout.addWidget(self.btn_ok)
85
+ buttons_layout.addWidget(self.btn_cancel)
86
+
87
+ main_layout.addLayout(buttons_layout)
88
+
89
+ # Populate "Unclassified" with input items
90
+ for text in items:
91
+ QListWidgetItem(text, self.list_unclassified)
92
+
93
+ def _create_list_widget(self):
94
+ """
95
+ Create a QListWidget ready for drag & drop.
96
+ """
97
+ lw = QListWidget()
98
+ lw.setSelectionMode(QListWidget.ExtendedSelection)
99
+ lw.setDragEnabled(True)
100
+ lw.setAcceptDrops(True)
101
+ lw.setDropIndicatorShown(True)
102
+ lw.setDragDropMode(QListWidget.DragDrop)
103
+ lw.setDefaultDropAction(Qt.MoveAction)
104
+ return lw
105
+
106
+ def get_categories(self):
107
+ """
108
+ Return the content of all categories as a dictionary of lists.
109
+ """
110
+
111
+ def collect(widget):
112
+ return [widget.item(i).text().rstrip("*") for i in range(widget.count())]
113
+
114
+ return {
115
+ "unclassified": collect(self.list_unclassified),
116
+ "train": collect(self.list_cat1),
117
+ "val": collect(self.list_cat2),
118
+ "test": collect(self.list_cat3),
119
+ "inference": collect(self.list_cat4),
120
+ }
121
+
122
+
123
+ def run(df: pd.DataFrame, project: dict):
124
+ """
125
+ Export observations to FERAL
126
+ See https://www.getferal.ai/ > Label Preparation
127
+ """
128
+
129
+ out: dict = {
130
+ "is_multilabel": False,
131
+ "splits": {
132
+ "train": [],
133
+ "val": [],
134
+ "test": [],
135
+ "inference": [],
136
+ },
137
+ }
138
+
139
+ log: list = []
140
+
141
+ # class names
142
+ class_names = {x: project["behaviors_conf"][x]["code"] for x in project["behaviors_conf"]}
143
+ out["class_names"] = class_names
144
+ reversed_class_names = {project["behaviors_conf"][x]["code"]: int(x) for x in project["behaviors_conf"]}
145
+ log.append(f"{class_names=}")
146
+
147
+ observations: list = sorted([x for x in project["observations"]])
148
+ log.append(f"Selected observation: {observations}")
149
+
150
+ labels: dict = {}
151
+ video_list: list = []
152
+ for observation_id in observations:
153
+ log.append("---")
154
+ log.append(observation_id)
155
+
156
+ # check number of media file in player #1
157
+ if len(project["observations"][observation_id]["file"]["1"]) != 1:
158
+ log.append(f"The observation {observation_id} contains more than one video")
159
+ continue
160
+
161
+ # check number of coded subjects
162
+ if len(set([x[1] for x in project["observations"][observation_id]["events"]])) > 1:
163
+ log.append(f"The observation {observation_id} contains more than one subject")
164
+ continue
165
+
166
+ media_file_path: str = project["observations"][observation_id]["file"]["1"][0]
167
+ media_file_name = str(Path(media_file_path).name)
168
+
169
+ # skip if no events
170
+ if not project["observations"][observation_id]["events"]:
171
+ video_list.append(media_file_name)
172
+ log.append(f"No events for observation {observation_id}")
173
+ continue
174
+ else:
175
+ video_list.append(media_file_name + "*")
176
+
177
+ # extract FPS
178
+ FPS = project["observations"][observation_id]["media_info"]["fps"][media_file_path]
179
+ log.append(f"{media_file_name} {FPS=}")
180
+ # extract media duration
181
+ duration = project["observations"][observation_id]["media_info"]["length"][media_file_path]
182
+ log.append(f"{media_file_name} {duration=}")
183
+
184
+ number_of_frames = int(duration / (1 / FPS))
185
+ log.append(f"{number_of_frames=}")
186
+
187
+ labels[media_file_name] = [0] * number_of_frames
188
+
189
+ for idx in range(number_of_frames):
190
+ t = idx * (1 / FPS)
191
+ behaviors = (
192
+ df[(df["Observation id"] == observation_id) & (df["Start (s)"] <= t) & (df["Stop (s)"] >= t)]["Behavior"].unique().tolist()
193
+ )
194
+ if len(behaviors) > 1:
195
+ log.append(f"The observation {observation_id} contains more than one behavior for frame {idx}")
196
+ del labels[media_file_name]
197
+ break
198
+ if behaviors:
199
+ behaviors_idx = reversed_class_names[behaviors[0]]
200
+ labels[media_file_name][idx] = behaviors_idx
201
+
202
+ out["labels"] = labels
203
+
204
+ # splits
205
+ dlg = CategoryDialog(video_list)
206
+
207
+ if dlg.exec(): # Dialog accepted
208
+ result = dlg.get_categories()
209
+ del result["unclassified"]
210
+ out["splits"] = result
211
+
212
+ filename, _ = QFileDialog.getSaveFileName(
213
+ None,
214
+ "Choose a file to save",
215
+ "", # start directory
216
+ "JSON files (*.json);;All files (*.*)",
217
+ )
218
+ if filename:
219
+ with open(filename, "w") as f_out:
220
+ f_out.write(json.dumps(out, separators=(",", ": "), indent=1))
221
+
222
+ else:
223
+ log.append("splits section missing")
224
+
225
+ return "\n".join(log)
@@ -0,0 +1,59 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ number of occurences of behaviors
5
+ """
6
+
7
+ import pandas as pd
8
+
9
+ __version__ = "0.0.1"
10
+ __version_date__ = "2025-04-10"
11
+ __plugin_name__ = "Behavior latencyxxx"
12
+ __author__ = "Olivier Friard - University of Torino - Italy"
13
+
14
+
15
+ import itertools
16
+
17
+
18
+ def run(df: pd.DataFrame):
19
+ """
20
+ Latency of a behavior after another.
21
+ """
22
+
23
+ df["start_time"] = pd.to_datetime(df["Start (s)"])
24
+ df["end_time"] = pd.to_datetime(df["Stop (s)"])
25
+
26
+ latency_by_subject: dict = {}
27
+
28
+ for subject, group in df.groupby("subject"):
29
+ behaviors = group["behavior"].tolist()
30
+ # combinations = []
31
+ # Utiliser itertools pour créer des combinaisons 2 à 2 des comportements
32
+ for comb in itertools.combinations(behaviors, 2):
33
+ # combinations.append(comb)
34
+
35
+ last_A_end_time = None
36
+
37
+ # Liste pour stocker les latences de chaque sujet
38
+ subject_latency = []
39
+
40
+ for index, row in group.iterrows():
41
+ if row["behavior"] == comb[0]:
42
+ # Si on rencontre un comportement A, on réinitialise le temps de fin du comportement A
43
+ last_A_end_time = row["end_time"]
44
+ subject_latency.append(None) # Pas de latence pour A
45
+ elif row["behavior"] == comb[1] and last_A_end_time is not None:
46
+ # Si on rencontre un comportement B et qu'on a déjà vu un A avant
47
+ latency_time = row["start_time"] - last_A_end_time
48
+ subject_latency.append(latency_time)
49
+ else:
50
+ # Si on rencontre un B mais sans A avant
51
+ subject_latency.append(None)
52
+
53
+ # Ajout des latences calculées au DataFrame
54
+ df.loc[group.index, f"latency {comb[1]} after {comb[0]}"] = subject_latency
55
+
56
+ # Calcul de la latence totale ou moyenne par sujet
57
+ latency_by_subject[(subject, comb)] = df.groupby("subject")["latency"].agg(["sum", "mean"])
58
+
59
+ return str(latency_by_subject)
@@ -0,0 +1,109 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Inter Rater Reliability (IRR) Unweighted Cohen's Kappa
5
+ """
6
+
7
+ import pandas as pd
8
+ from typing import Dict, Tuple
9
+
10
+ from sklearn.metrics import cohen_kappa_score
11
+ from PySide6.QtWidgets import QInputDialog
12
+
13
+
14
+ __version__ = "0.0.3"
15
+ __version_date__ = "2025-09-02"
16
+ __plugin_name__ = "Inter Rater Reliability - Unweighted Cohen's Kappa"
17
+ __author__ = "Olivier Friard - University of Torino - Italy"
18
+ __description__ = """
19
+ This plugin calculates Cohen's Kappa to measure inter-rater reliability between two observers who code categorical behaviors over time intervals.
20
+ Unlike the weighted version, this approach does not take into account the duration of the intervals.
21
+ Each segment of time is treated equally, regardless of how long it lasts.
22
+ This plugin does not take into account the modifiers.
23
+
24
+ How it works:
25
+
26
+ Time segmentation
27
+ The program identifies all the time boundaries (start and end points) used by both observers.
28
+ These boundaries are merged into a common timeline, which is then divided into a set of non-overlapping elementary intervals.
29
+
30
+ Assigning codes
31
+ For each elementary interval, the program determines which behavior was coded by each observer.
32
+
33
+ Comparison of codes
34
+ The program builds two parallel lists of behavior codes, one for each observer.
35
+ Each elementary interval is counted as one unit of observation, no matter how long the interval actually lasts.
36
+
37
+ Cohen's Kappa calculation
38
+ Using these two lists, the program computes Cohen's Kappa using the cohen_kappa_score function of the sklearn package.
39
+ (see https://scikit-learn.org/stable/modules/generated/sklearn.metrics.cohen_kappa_score.html for details)
40
+ This coefficient measures how much the observers agree on their coding, adjusted for the amount of agreement that would be expected by chance.
41
+
42
+ """
43
+
44
+
45
+ def run(df: pd.DataFrame) -> pd.DataFrame:
46
+ """
47
+ Calculate the Inter Rater Reliability - Unweighted Cohen's Kappa
48
+ """
49
+
50
+ # Attribute all active codes for each interval
51
+ def get_code(t_start, obs):
52
+ active_codes = [seg[2] for seg in obs if seg[0] <= t_start < seg[1]]
53
+ if not active_codes:
54
+ return ""
55
+ # Sort to ensure deterministic representation (e.g., "A+B" instead of "B+A")
56
+ return "+".join(sorted(active_codes))
57
+
58
+ # ask user for the number of decimal places for rounding (can be negative)
59
+ round_decimals, ok = QInputDialog.getInt(
60
+ None, "Rounding", "Enter the number of decimal places for rounding (can be negative)", value=3, minValue=-5, maxValue=3, step=1
61
+ )
62
+
63
+ # round times
64
+ df["Start (s)"] = df["Start (s)"].round(round_decimals)
65
+ df["Stop (s)"] = df["Stop (s)"].round(round_decimals)
66
+
67
+ # Get unique values
68
+ unique_obs_list = df["Observation id"].unique().tolist()
69
+
70
+ # Convert to tuples grouped by observation
71
+ grouped = {
72
+ obs: [
73
+ (row[0], row[1], row[2] + "|" + row[3]) # concatenate subject and behavior with |
74
+ for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"]].itertuples(index=False, name=None)
75
+ ]
76
+ for obs, group in df.groupby("Observation id")
77
+ }
78
+
79
+ ck_results: Dict[Tuple[str, str], str] = {}
80
+ for idx1, obs_id1 in enumerate(unique_obs_list):
81
+ obs1 = grouped[obs_id1]
82
+
83
+ # Perfect agreement with itself
84
+ ck_results[(obs_id1, obs_id1)] = "1.000"
85
+
86
+ for obs_id2 in unique_obs_list[idx1 + 1 :]:
87
+ obs2 = grouped[obs_id2]
88
+
89
+ # get all the break points
90
+ time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
91
+
92
+ # elementary intervals
93
+ elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
94
+
95
+ obs1_codes = [get_code(t[0], obs1) for t in elementary_intervals]
96
+
97
+ obs2_codes = [get_code(t[0], obs2) for t in elementary_intervals]
98
+
99
+ # Cohen's Kappa
100
+ kappa = cohen_kappa_score(obs1_codes, obs2_codes)
101
+ print(f"{obs_id1} - {obs_id2}: Cohen's Kappa : {kappa:.3f}")
102
+
103
+ ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
104
+ ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
105
+
106
+ # DataFrame conversion
107
+ df_results = pd.Series(ck_results).unstack()
108
+
109
+ return df_results
@@ -0,0 +1,112 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Inter Rater Reliability (IRR) Unweighted Cohen's Kappa with modifiers
5
+ """
6
+
7
+ import pandas as pd
8
+
9
+ from sklearn.metrics import cohen_kappa_score
10
+ from PySide6.QtWidgets import QInputDialog
11
+
12
+ __version__ = "0.0.3"
13
+ __version_date__ = "2025-09-02"
14
+ __plugin_name__ = "Inter Rater Reliability - Unweighted Cohen's Kappa with modifiers"
15
+ __author__ = "Olivier Friard - University of Torino - Italy"
16
+ __description__ = """
17
+ This plugin calculates Cohen's Kappa to measure inter-rater reliability between two observers who code categorical behaviors over time intervals.
18
+ Unlike the weighted version, this approach does not take into account the duration of the intervals.
19
+ Each segment of time is treated equally, regardless of how long it lasts.
20
+ This plugin takes into account the modifiers.
21
+
22
+
23
+ How it works:
24
+
25
+ Time segmentation
26
+ The program identifies all the time boundaries (start and end points) used by both observers.
27
+ These boundaries are merged into a common timeline, which is then divided into a set of non-overlapping elementary intervals.
28
+
29
+ Assigning codes
30
+ For each elementary interval, the program determines which behavior was coded by each observer.
31
+
32
+ Comparison of codes
33
+ The program builds two parallel lists of behavior codes, one for each observer.
34
+ Each elementary interval is counted as one unit of observation, no matter how long the interval actually lasts.
35
+
36
+ Cohen's Kappa calculation
37
+ Using these two lists, the program computes Cohen's Kappa using the cohen_kappa_score function of the sklearn package.
38
+ (see https://scikit-learn.org/stable/modules/generated/sklearn.metrics.cohen_kappa_score.html for details)
39
+ This coefficient measures how much the observers agree on their coding, adjusted for the amount of agreement that would be expected by chance.
40
+
41
+ """
42
+
43
+
44
+ def run(df: pd.DataFrame):
45
+ """
46
+ Calculate the Inter Rater Reliability - Unweighted Cohen's Kappa with modifiers
47
+ """
48
+
49
+ # Attribute all active codes for each interval
50
+ def get_code(t_start, obs):
51
+ active_codes = [seg[2] for seg in obs if seg[0] <= t_start < seg[1]]
52
+ if not active_codes:
53
+ return ""
54
+ # Sort to ensure deterministic representation (e.g., "A+B" instead of "B+A")
55
+ return "+".join(sorted(active_codes))
56
+
57
+ # ask user for the number of decimal places for rounding (can be negative)
58
+ round_decimals, ok = QInputDialog.getInt(
59
+ None, "Rounding", "Enter the number of decimal places for rounding (can be negative)", value=3, minValue=-5, maxValue=3, step=1
60
+ )
61
+
62
+ # round times
63
+ df["Start (s)"] = df["Start (s)"].round(round_decimals)
64
+ df["Stop (s)"] = df["Stop (s)"].round(round_decimals)
65
+
66
+ # Get unique values
67
+ unique_obs_list = df["Observation id"].unique().tolist()
68
+
69
+ # Convert to tuples grouped by observation
70
+ grouped: dict = {}
71
+ modifiers: list = []
72
+ for col in df.columns:
73
+ if isinstance(col, tuple):
74
+ modifiers.append(col)
75
+
76
+ for obs, group in df.groupby("Observation id"):
77
+ o: list = []
78
+ for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"] + modifiers].itertuples(index=False, name=None):
79
+ modif_list = [row[i] for idx, i in enumerate(range(4, 4 + len(modifiers))) if modifiers[idx][0] == row[3]]
80
+ o.append((row[0], row[1], row[2] + "|" + row[3] + "|" + ",".join(modif_list)))
81
+ grouped[obs] = o
82
+
83
+ ck_results: dict = {}
84
+ for idx1, obs_id1 in enumerate(unique_obs_list):
85
+ obs1 = grouped[obs_id1]
86
+
87
+ ck_results[(obs_id1, obs_id1)] = "1.000"
88
+
89
+ for obs_id2 in unique_obs_list[idx1 + 1 :]:
90
+ obs2 = grouped[obs_id2]
91
+
92
+ # get all the break points
93
+ time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
94
+
95
+ # elementary intervals
96
+ elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
97
+
98
+ obs1_codes = [get_code(t[0], obs1) for t in elementary_intervals]
99
+
100
+ obs2_codes = [get_code(t[0], obs2) for t in elementary_intervals]
101
+
102
+ # Cohen's Kappa
103
+ kappa = cohen_kappa_score(obs1_codes, obs2_codes)
104
+ print(f"{obs_id1} - {obs_id2}: Cohen's Kappa : {kappa:.3f}")
105
+
106
+ ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
107
+ ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
108
+
109
+ # DataFrame conversion
110
+ df_results = pd.Series(ck_results).unstack()
111
+
112
+ return df_results
@@ -0,0 +1,157 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Inter Rater Reliability (IRR) Weighted Cohen's Kappa
5
+ """
6
+
7
+ import pandas as pd
8
+ from typing import List, Tuple, Dict, Optional
9
+
10
+ from PySide6.QtWidgets import QInputDialog
11
+
12
+ __version__ = "0.0.3"
13
+ __version_date__ = "2025-09-02"
14
+ __plugin_name__ = "Inter Rater Reliability - Weighted Cohen's Kappa"
15
+ __author__ = "Olivier Friard - University of Torino - Italy"
16
+ __description__ = """
17
+ This plugin calculates Cohen's Kappa to measure inter-rater reliability between two observers who code categorical behaviors over time intervals.
18
+ Unlike the unweighted version, this approach takes into account the duration of each coded interval, giving more weight to longer intervals in the agreement calculation.
19
+ This plugin does not take into account the modifiers.
20
+
21
+ How it works:
22
+
23
+ Time segmentation
24
+ The program collects all the time boundaries from both observers and merges them into a unified set of time points.
25
+ These define a set of non-overlapping elementary intervals covering the entire observed period.
26
+
27
+ Assigning codes
28
+ For each elementary interval, the program identifies the behavior category assigned by each observer.
29
+
30
+ Weighted contingency table
31
+ Instead of treating each interval equally, the program assigns a weight equal to the duration of the interval.
32
+ These durations are accumulated in a contingency table that records how much time was spent in each combination of categories across the two observers.
33
+
34
+ Agreement calculation
35
+
36
+ Observed agreement (po): The proportion of total time where both observers assigned the same category.
37
+
38
+ Expected agreement (pe): The proportion of agreement expected by chance, based on the time-weighted marginal distributions of each observer's coding.
39
+
40
+ Cohen's Kappa (κ): Computed from the weighted observed and expected agreements.
41
+ """
42
+
43
+
44
+ def run(df: pd.DataFrame):
45
+ """
46
+ Calculate the Inter Rater Reliability - Weighted Cohen's Kappa
47
+ """
48
+
49
+ def cohen_kappa_weighted_by_time(
50
+ obs1: List[Tuple[float, float, str]], obs2: List[Tuple[float, float, str]]
51
+ ) -> Tuple[float, float, float, Dict[Tuple[Optional[str], Optional[str]], float]]:
52
+ """
53
+ Compute Cohen's Kappa weighted by time duration.
54
+
55
+ Args:
56
+ obs1: List of (start_time, end_time, code) for observer 1
57
+ obs2: List of (start_time, end_time, code) for observer 2
58
+
59
+ Returns:
60
+ kappa (float): Cohen's Kappa weighted by duration
61
+ po (float): Observed agreement proportion (weighted)
62
+ pe (float): Expected agreement proportion by chance (weighted)
63
+ contingency (dict): Contingency table {(code1, code2): total_duration}
64
+ """
65
+
66
+ # 1. Collect all time boundaries from both observers
67
+ time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
68
+
69
+ # 2. Build elementary intervals (non-overlapping time bins)
70
+ elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
71
+
72
+ # 3. # Attribute all active codes for each interval
73
+ def get_code(t: float, obs: List[Tuple[float, float, str]]) -> Optional[str]:
74
+ active_codes = [seg[2] for seg in obs if seg[0] <= t < seg[1]]
75
+ if not active_codes:
76
+ return None
77
+ return "+".join(sorted(active_codes))
78
+
79
+ # 4. Build weighted contingency table (durations instead of counts)
80
+ contingency: Dict[Tuple[Optional[str], Optional[str]], float] = {}
81
+ total_time = 0.0
82
+
83
+ for start, end in elementary_intervals:
84
+ c1 = get_code(start, obs1)
85
+ c2 = get_code(start, obs2)
86
+ duration = end - start
87
+ total_time += duration
88
+ contingency[(c1, c2)] = contingency.get((c1, c2), 0.0) + duration
89
+
90
+ # 5. Observed agreement (po)
91
+ po = sum(duration for (c1, c2), duration in contingency.items() if c1 == c2) / total_time
92
+
93
+ # Marginal distributions for each observer
94
+ codes1: Dict[Optional[str], float] = {}
95
+ codes2: Dict[Optional[str], float] = {}
96
+ for (c1, c2), duration in contingency.items():
97
+ codes1[c1] = codes1.get(c1, 0.0) + duration
98
+ codes2[c2] = codes2.get(c2, 0.0) + duration
99
+
100
+ # 6. Expected agreement (pe), using marginal proportions
101
+ all_codes = set(codes1) | set(codes2)
102
+ pe = sum((codes1.get(c, 0.0) / total_time) * (codes2.get(c, 0.0) / total_time) for c in all_codes)
103
+
104
+ # 7. Kappa calculation
105
+ kappa = (po - pe) / (1 - pe) if (1 - pe) != 0 else 0.0
106
+
107
+ return kappa, po, pe, contingency
108
+
109
+ # ask user for the number of decimal places for rounding (can be negative)
110
+ round_decimals, ok = QInputDialog.getInt(
111
+ None, "Rounding", "Enter the number of decimal places for rounding (can be negative)", value=3, minValue=-5, maxValue=3, step=1
112
+ )
113
+
114
+ # round times
115
+ df["Start (s)"] = df["Start (s)"].round(round_decimals)
116
+ df["Stop (s)"] = df["Stop (s)"].round(round_decimals)
117
+
118
+ # Get unique values as a numpy array
119
+ unique_obs = df["Observation id"].unique()
120
+
121
+ # Convert to a list
122
+ unique_obs_list = unique_obs.tolist()
123
+
124
+ # Convert to tuples grouped by observation
125
+ grouped = {
126
+ obs: [
127
+ (row[0], row[1], row[2] + "|" + row[3]) # concatenate subject and behavior with |
128
+ for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"]].itertuples(index=False, name=None)
129
+ ]
130
+ for obs, group in df.groupby("Observation id")
131
+ }
132
+
133
+ ck_results: dict = {}
134
+ str_results: str = ""
135
+ for idx1, obs_id1 in enumerate(unique_obs_list):
136
+ obs1 = grouped[obs_id1]
137
+
138
+ ck_results[(obs_id1, obs_id1)] = "1.000"
139
+
140
+ for obs_id2 in unique_obs_list[idx1 + 1 :]:
141
+ obs2 = grouped[obs_id2]
142
+
143
+ # Cohen's Kappa
144
+ kappa, po, pe, table = cohen_kappa_weighted_by_time(obs1, obs2)
145
+
146
+ print(f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}")
147
+ str_results += (
148
+ f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}\n"
149
+ )
150
+
151
+ ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
152
+ ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
153
+
154
+ # DataFrame conversion
155
+ df_results = pd.Series(ck_results).unstack()
156
+
157
+ return df_results, str_results