boris-behav-obs 8.12__py3-none-any.whl → 9.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. boris/__init__.py +1 -1
  2. boris/__main__.py +1 -1
  3. boris/about.py +28 -39
  4. boris/add_modifier.py +122 -109
  5. boris/add_modifier_ui.py +239 -135
  6. boris/advanced_event_filtering.py +81 -45
  7. boris/analysis_plugins/__init__.py +0 -0
  8. boris/analysis_plugins/_latency.py +59 -0
  9. boris/analysis_plugins/irr_cohen_kappa.py +109 -0
  10. boris/analysis_plugins/irr_cohen_kappa_with_modifiers.py +112 -0
  11. boris/analysis_plugins/irr_weighted_cohen_kappa.py +157 -0
  12. boris/analysis_plugins/irr_weighted_cohen_kappa_with_modifiers.py +162 -0
  13. boris/analysis_plugins/list_of_dataframe_columns.py +22 -0
  14. boris/analysis_plugins/number_of_occurences.py +22 -0
  15. boris/analysis_plugins/number_of_occurences_by_independent_variable.py +54 -0
  16. boris/analysis_plugins/time_budget.py +61 -0
  17. boris/behav_coding_map_creator.py +228 -229
  18. boris/behavior_binary_table.py +33 -50
  19. boris/behaviors_coding_map.py +17 -18
  20. boris/boris_cli.py +6 -25
  21. boris/cmd_arguments.py +12 -1
  22. boris/coding_pad.py +42 -49
  23. boris/config.py +141 -65
  24. boris/config_file.py +58 -67
  25. boris/connections.py +107 -61
  26. boris/converters.py +13 -37
  27. boris/converters_ui.py +187 -110
  28. boris/cooccurence.py +250 -0
  29. boris/core.py +2373 -1786
  30. boris/core_qrc.py +15895 -10743
  31. boris/core_ui.py +943 -798
  32. boris/db_functions.py +17 -42
  33. boris/dev.py +109 -8
  34. boris/dialog.py +482 -236
  35. boris/duration_widget.py +9 -14
  36. boris/edit_event.py +61 -31
  37. boris/edit_event_ui.py +208 -97
  38. boris/event_operations.py +408 -293
  39. boris/events_cursor.py +25 -17
  40. boris/events_snapshots.py +36 -82
  41. boris/exclusion_matrix.py +4 -9
  42. boris/export_events.py +184 -223
  43. boris/export_observation.py +74 -100
  44. boris/external_processes.py +123 -98
  45. boris/geometric_measurement.py +644 -290
  46. boris/gui_utilities.py +91 -14
  47. boris/image_overlay.py +4 -4
  48. boris/import_observations.py +190 -98
  49. boris/ipc_mpv.py +325 -0
  50. boris/irr.py +20 -57
  51. boris/latency.py +31 -24
  52. boris/measurement_widget.py +14 -18
  53. boris/media_file.py +17 -19
  54. boris/menu_options.py +17 -6
  55. boris/modifier_coding_map_creator.py +1013 -0
  56. boris/modifiers_coding_map.py +7 -9
  57. boris/mpv.py +1 -0
  58. boris/mpv2.py +732 -705
  59. boris/observation.py +533 -221
  60. boris/observation_operations.py +1025 -390
  61. boris/observation_ui.py +572 -362
  62. boris/observations_list.py +71 -53
  63. boris/otx_parser.py +74 -68
  64. boris/param_panel.py +31 -16
  65. boris/param_panel_ui.py +254 -138
  66. boris/player_dock_widget.py +90 -60
  67. boris/plot_data_module.py +25 -33
  68. boris/plot_events.py +127 -90
  69. boris/plot_events_rt.py +17 -31
  70. boris/plot_spectrogram_rt.py +95 -30
  71. boris/plot_waveform_rt.py +32 -21
  72. boris/plugins.py +431 -0
  73. boris/portion/__init__.py +18 -8
  74. boris/portion/const.py +35 -18
  75. boris/portion/dict.py +5 -5
  76. boris/portion/func.py +2 -2
  77. boris/portion/interval.py +21 -41
  78. boris/portion/io.py +41 -32
  79. boris/preferences.py +306 -83
  80. boris/preferences_ui.py +684 -227
  81. boris/project.py +448 -293
  82. boris/project_functions.py +671 -238
  83. boris/project_import_export.py +213 -222
  84. boris/project_ui.py +674 -438
  85. boris/qrc_boris.py +6 -3
  86. boris/qrc_boris5.py +6 -3
  87. boris/select_modifiers.py +74 -48
  88. boris/select_observations.py +20 -198
  89. boris/select_subj_behav.py +67 -39
  90. boris/state_events.py +52 -35
  91. boris/subjects_pad.py +6 -9
  92. boris/synthetic_time_budget.py +45 -28
  93. boris/time_budget_functions.py +171 -171
  94. boris/time_budget_widget.py +84 -114
  95. boris/transitions.py +41 -47
  96. boris/utilities.py +627 -236
  97. boris/version.py +3 -3
  98. boris/video_equalizer.py +16 -14
  99. boris/video_equalizer_ui.py +199 -130
  100. boris/video_operations.py +95 -29
  101. boris/view_df.py +104 -0
  102. boris/view_df_ui.py +75 -0
  103. boris/write_event.py +538 -0
  104. boris_behav_obs-9.7.6.dist-info/METADATA +139 -0
  105. boris_behav_obs-9.7.6.dist-info/RECORD +109 -0
  106. {boris_behav_obs-8.12.dist-info → boris_behav_obs-9.7.6.dist-info}/WHEEL +1 -1
  107. boris_behav_obs-9.7.6.dist-info/entry_points.txt +2 -0
  108. boris/README.TXT +0 -22
  109. boris/add_modifier.ui +0 -323
  110. boris/converters.ui +0 -289
  111. boris/core.qrc +0 -36
  112. boris/core.ui +0 -1556
  113. boris/edit_event.ui +0 -233
  114. boris/icons/logo_eye.ico +0 -0
  115. boris/map_creator.py +0 -850
  116. boris/observation.ui +0 -814
  117. boris/param_panel.ui +0 -379
  118. boris/preferences.ui +0 -537
  119. boris/project.ui +0 -1069
  120. boris/project_server.py +0 -236
  121. boris/vlc.py +0 -10343
  122. boris/vlc_local.py +0 -90
  123. boris_behav_obs-8.12.dist-info/LICENSE.TXT +0 -674
  124. boris_behav_obs-8.12.dist-info/METADATA +0 -128
  125. boris_behav_obs-8.12.dist-info/RECORD +0 -108
  126. boris_behav_obs-8.12.dist-info/entry_points.txt +0 -3
  127. {boris → boris_behav_obs-9.7.6.dist-info/licenses}/LICENSE.TXT +0 -0
  128. {boris_behav_obs-8.12.dist-info → boris_behav_obs-9.7.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,157 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Inter Rater Reliability (IRR) Weighted Cohen's Kappa
5
+ """
6
+
7
+ import pandas as pd
8
+ from typing import List, Tuple, Dict, Optional
9
+
10
+ from PySide6.QtWidgets import QInputDialog
11
+
12
+ __version__ = "0.0.3"
13
+ __version_date__ = "2025-09-02"
14
+ __plugin_name__ = "Inter Rater Reliability - Weighted Cohen's Kappa"
15
+ __author__ = "Olivier Friard - University of Torino - Italy"
16
+ __description__ = """
17
+ This plugin calculates Cohen's Kappa to measure inter-rater reliability between two observers who code categorical behaviors over time intervals.
18
+ Unlike the unweighted version, this approach takes into account the duration of each coded interval, giving more weight to longer intervals in the agreement calculation.
19
+ This plugin does not take into account the modifiers.
20
+
21
+ How it works:
22
+
23
+ Time segmentation
24
+ The program collects all the time boundaries from both observers and merges them into a unified set of time points.
25
+ These define a set of non-overlapping elementary intervals covering the entire observed period.
26
+
27
+ Assigning codes
28
+ For each elementary interval, the program identifies the behavior category assigned by each observer.
29
+
30
+ Weighted contingency table
31
+ Instead of treating each interval equally, the program assigns a weight equal to the duration of the interval.
32
+ These durations are accumulated in a contingency table that records how much time was spent in each combination of categories across the two observers.
33
+
34
+ Agreement calculation
35
+
36
+ Observed agreement (po): The proportion of total time where both observers assigned the same category.
37
+
38
+ Expected agreement (pe): The proportion of agreement expected by chance, based on the time-weighted marginal distributions of each observer's coding.
39
+
40
+ Cohen's Kappa (κ): Computed from the weighted observed and expected agreements.
41
+ """
42
+
43
+
44
+ def run(df: pd.DataFrame):
45
+ """
46
+ Calculate the Inter Rater Reliability - Weighted Cohen's Kappa
47
+ """
48
+
49
+ def cohen_kappa_weighted_by_time(
50
+ obs1: List[Tuple[float, float, str]], obs2: List[Tuple[float, float, str]]
51
+ ) -> Tuple[float, float, float, Dict[Tuple[Optional[str], Optional[str]], float]]:
52
+ """
53
+ Compute Cohen's Kappa weighted by time duration.
54
+
55
+ Args:
56
+ obs1: List of (start_time, end_time, code) for observer 1
57
+ obs2: List of (start_time, end_time, code) for observer 2
58
+
59
+ Returns:
60
+ kappa (float): Cohen's Kappa weighted by duration
61
+ po (float): Observed agreement proportion (weighted)
62
+ pe (float): Expected agreement proportion by chance (weighted)
63
+ contingency (dict): Contingency table {(code1, code2): total_duration}
64
+ """
65
+
66
+ # 1. Collect all time boundaries from both observers
67
+ time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
68
+
69
+ # 2. Build elementary intervals (non-overlapping time bins)
70
+ elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
71
+
72
+ # 3. # Attribute all active codes for each interval
73
+ def get_code(t: float, obs: List[Tuple[float, float, str]]) -> Optional[str]:
74
+ active_codes = [seg[2] for seg in obs if seg[0] <= t < seg[1]]
75
+ if not active_codes:
76
+ return None
77
+ return "+".join(sorted(active_codes))
78
+
79
+ # 4. Build weighted contingency table (durations instead of counts)
80
+ contingency: Dict[Tuple[Optional[str], Optional[str]], float] = {}
81
+ total_time = 0.0
82
+
83
+ for start, end in elementary_intervals:
84
+ c1 = get_code(start, obs1)
85
+ c2 = get_code(start, obs2)
86
+ duration = end - start
87
+ total_time += duration
88
+ contingency[(c1, c2)] = contingency.get((c1, c2), 0.0) + duration
89
+
90
+ # 5. Observed agreement (po)
91
+ po = sum(duration for (c1, c2), duration in contingency.items() if c1 == c2) / total_time
92
+
93
+ # Marginal distributions for each observer
94
+ codes1: Dict[Optional[str], float] = {}
95
+ codes2: Dict[Optional[str], float] = {}
96
+ for (c1, c2), duration in contingency.items():
97
+ codes1[c1] = codes1.get(c1, 0.0) + duration
98
+ codes2[c2] = codes2.get(c2, 0.0) + duration
99
+
100
+ # 6. Expected agreement (pe), using marginal proportions
101
+ all_codes = set(codes1) | set(codes2)
102
+ pe = sum((codes1.get(c, 0.0) / total_time) * (codes2.get(c, 0.0) / total_time) for c in all_codes)
103
+
104
+ # 7. Kappa calculation
105
+ kappa = (po - pe) / (1 - pe) if (1 - pe) != 0 else 0.0
106
+
107
+ return kappa, po, pe, contingency
108
+
109
+ # ask user for the number of decimal places for rounding (can be negative)
110
+ round_decimals, ok = QInputDialog.getInt(
111
+ None, "Rounding", "Enter the number of decimal places for rounding (can be negative)", value=3, minValue=-5, maxValue=3, step=1
112
+ )
113
+
114
+ # round times
115
+ df["Start (s)"] = df["Start (s)"].round(round_decimals)
116
+ df["Stop (s)"] = df["Stop (s)"].round(round_decimals)
117
+
118
+ # Get unique values as a numpy array
119
+ unique_obs = df["Observation id"].unique()
120
+
121
+ # Convert to a list
122
+ unique_obs_list = unique_obs.tolist()
123
+
124
+ # Convert to tuples grouped by observation
125
+ grouped = {
126
+ obs: [
127
+ (row[0], row[1], row[2] + "|" + row[3]) # concatenate subject and behavior with |
128
+ for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"]].itertuples(index=False, name=None)
129
+ ]
130
+ for obs, group in df.groupby("Observation id")
131
+ }
132
+
133
+ ck_results: dict = {}
134
+ str_results: str = ""
135
+ for idx1, obs_id1 in enumerate(unique_obs_list):
136
+ obs1 = grouped[obs_id1]
137
+
138
+ ck_results[(obs_id1, obs_id1)] = "1.000"
139
+
140
+ for obs_id2 in unique_obs_list[idx1 + 1 :]:
141
+ obs2 = grouped[obs_id2]
142
+
143
+ # Cohen's Kappa
144
+ kappa, po, pe, table = cohen_kappa_weighted_by_time(obs1, obs2)
145
+
146
+ print(f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}")
147
+ str_results += (
148
+ f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}\n"
149
+ )
150
+
151
+ ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
152
+ ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
153
+
154
+ # DataFrame conversion
155
+ df_results = pd.Series(ck_results).unstack()
156
+
157
+ return df_results, str_results
@@ -0,0 +1,162 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Inter Rater Reliability (IRR) Weighted Cohen's Kappa with modifiers
5
+ """
6
+
7
+ import pandas as pd
8
+ from typing import List, Tuple, Dict, Optional
9
+
10
+ from PySide6.QtWidgets import QInputDialog
11
+
12
+ __version__ = "0.0.3"
13
+ __version_date__ = "2025-09-02"
14
+ __plugin_name__ = "Inter Rater Reliability - Weighted Cohen's Kappa with modifiers"
15
+ __author__ = "Olivier Friard - University of Torino - Italy"
16
+ __description__ = """
17
+ This plugin calculates Cohen's Kappa to measure inter-rater reliability between two observers who code categorical behaviors over time intervals.
18
+ Unlike the unweighted version, this approach takes into account the duration of each coded interval, giving more weight to longer intervals in the agreement calculation.
19
+ This plugin takes into account the modifiers.
20
+
21
+ How it works:
22
+
23
+ Time segmentation
24
+ The program collects all the time boundaries from both observers and merges them into a unified set of time points.
25
+ These define a set of non-overlapping elementary intervals covering the entire observed period.
26
+
27
+ Assigning codes
28
+ For each elementary interval, the program identifies the behavior category assigned by each observer.
29
+
30
+ Weighted contingency table
31
+ Instead of treating each interval equally, the program assigns a weight equal to the duration of the interval.
32
+ These durations are accumulated in a contingency table that records how much time was spent in each combination of categories across the two observers.
33
+
34
+ Agreement calculation
35
+
36
+ Observed agreement (po): The proportion of total time where both observers assigned the same category.
37
+
38
+ Expected agreement (pe): The proportion of agreement expected by chance, based on the time-weighted marginal distributions of each observer's coding.
39
+
40
+ Cohen's Kappa (κ): Computed from the weighted observed and expected agreements.
41
+ """
42
+
43
+
44
+ def run(df: pd.DataFrame):
45
+ """
46
+ Calculate the Inter Rater Reliability - Weighted Cohen's Kappa with modifiers
47
+ """
48
+
49
+ def cohen_kappa_weighted_by_time(
50
+ obs1: List[Tuple[float, float, str]], obs2: List[Tuple[float, float, str]]
51
+ ) -> Tuple[float, float, float, Dict[Tuple[Optional[str], Optional[str]], float]]:
52
+ """
53
+ Compute Cohen's Kappa weighted by time duration with modifiers.
54
+
55
+ Args:
56
+ obs1: List of (start_time, end_time, code) for observer 1
57
+ obs2: List of (start_time, end_time, code) for observer 2
58
+
59
+ Returns:
60
+ kappa (float): Cohen's Kappa weighted by duration
61
+ po (float): Observed agreement proportion (weighted)
62
+ pe (float): Expected agreement proportion by chance (weighted)
63
+ contingency (dict): Contingency table {(code1, code2): total_duration}
64
+ """
65
+
66
+ # 1. Collect all time boundaries from both observers
67
+ time_points = sorted(set([t for seg in obs1 for t in seg[:2]] + [t for seg in obs2 for t in seg[:2]]))
68
+
69
+ # 2. Build elementary intervals (non-overlapping time bins)
70
+ elementary_intervals = [(time_points[i], time_points[i + 1]) for i in range(len(time_points) - 1)]
71
+
72
+ # 3. Attribute all active codes for each interval
73
+ def get_code(t: float, obs: List[Tuple[float, float, str]]) -> Optional[str]:
74
+ active_codes = [seg[2] for seg in obs if seg[0] <= t < seg[1]]
75
+ if not active_codes:
76
+ return None
77
+ return "+".join(sorted(active_codes))
78
+
79
+ # 4. Build weighted contingency table (durations instead of counts)
80
+ contingency: Dict[Tuple[Optional[str], Optional[str]], float] = {}
81
+ total_time = 0.0
82
+
83
+ for start, end in elementary_intervals:
84
+ c1 = get_code(start, obs1)
85
+ c2 = get_code(start, obs2)
86
+ duration = end - start
87
+ total_time += duration
88
+ contingency[(c1, c2)] = contingency.get((c1, c2), 0.0) + duration
89
+
90
+ # 5. Observed agreement (po)
91
+ po = sum(duration for (c1, c2), duration in contingency.items() if c1 == c2) / total_time
92
+
93
+ # Marginal distributions for each observer
94
+ codes1: Dict[Optional[str], float] = {}
95
+ codes2: Dict[Optional[str], float] = {}
96
+ for (c1, c2), duration in contingency.items():
97
+ codes1[c1] = codes1.get(c1, 0.0) + duration
98
+ codes2[c2] = codes2.get(c2, 0.0) + duration
99
+
100
+ # 6. Expected agreement (pe), using marginal proportions
101
+ all_codes = set(codes1) | set(codes2)
102
+ pe = sum((codes1.get(c, 0.0) / total_time) * (codes2.get(c, 0.0) / total_time) for c in all_codes)
103
+
104
+ # 7. Kappa calculation
105
+ kappa = (po - pe) / (1 - pe) if (1 - pe) != 0 else 0.0
106
+
107
+ return kappa, po, pe, contingency
108
+
109
+ # ask user for the number of decimal places for rounding (can be negative)
110
+ round_decimals, ok = QInputDialog.getInt(
111
+ None, "Rounding", "Enter the number of decimal places for rounding (can be negative)", value=3, minValue=-5, maxValue=3, step=1
112
+ )
113
+
114
+ # round times
115
+ df["Start (s)"] = df["Start (s)"].round(round_decimals)
116
+ df["Stop (s)"] = df["Stop (s)"].round(round_decimals)
117
+
118
+ # Get unique values as a numpy array
119
+ unique_obs = df["Observation id"].unique()
120
+
121
+ # Convert to a list
122
+ unique_obs_list = unique_obs.tolist()
123
+
124
+ # Convert to tuples grouped by observation
125
+ grouped: dict = {}
126
+ modifiers: list = []
127
+ for col in df.columns:
128
+ if isinstance(col, tuple):
129
+ modifiers.append(col)
130
+
131
+ for obs, group in df.groupby("Observation id"):
132
+ o = []
133
+ for row in group[["Start (s)", "Stop (s)", "Subject", "Behavior"] + modifiers].itertuples(index=False, name=None):
134
+ modif_list = [row[i] for idx, i in enumerate(range(4, 4 + len(modifiers))) if modifiers[idx][0] == row[3]]
135
+ o.append((row[0], row[1], row[2] + "|" + row[3] + "|" + ",".join(modif_list)))
136
+ grouped[obs] = o
137
+
138
+ ck_results: dict = {}
139
+ str_results: str = ""
140
+ for idx1, obs_id1 in enumerate(unique_obs_list):
141
+ obs1 = grouped[obs_id1]
142
+
143
+ ck_results[(obs_id1, obs_id1)] = "1.000"
144
+
145
+ for obs_id2 in unique_obs_list[idx1 + 1 :]:
146
+ obs2 = grouped[obs_id2]
147
+
148
+ # Cohen's Kappa
149
+ kappa, po, pe, table = cohen_kappa_weighted_by_time(obs1, obs2)
150
+
151
+ print(f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}")
152
+ str_results += (
153
+ f"{obs_id1} - {obs_id2}: Cohen's Kappa: {kappa:.3f} Expected agreement: {pe:.3f} Observed agreement: {po:.3f}\n"
154
+ )
155
+
156
+ ck_results[(obs_id1, obs_id2)] = f"{kappa:.3f}"
157
+ ck_results[(obs_id2, obs_id1)] = f"{kappa:.3f}"
158
+
159
+ # DataFrame conversion
160
+ df_results = pd.Series(ck_results).unstack()
161
+
162
+ return df_results, str_results
@@ -0,0 +1,22 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ number of occurences of behaviors
5
+ """
6
+
7
+ import pandas as pd
8
+
9
+ __version__ = "0.0.1"
10
+ __version_date__ = "2025-06-13"
11
+ __plugin_name__ = "List of dataframe columns"
12
+ __author__ = "Olivier Friard - University of Torino - Italy"
13
+
14
+
15
+ def run(df: pd.DataFrame) -> pd.DataFrame:
16
+ """
17
+ List the columns present in the dataframe
18
+ """
19
+
20
+ df_results = pd.DataFrame(df.columns, columns=["column name"])
21
+
22
+ return df_results
@@ -0,0 +1,22 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ number of occurences of behaviors
5
+ """
6
+
7
+ import pandas as pd
8
+
9
+ __version__ = "0.3.0"
10
+ __version_date__ = "2025-03-17"
11
+ __plugin_name__ = "Number of occurences of behaviors"
12
+ __author__ = "Olivier Friard - University of Torino - Italy"
13
+
14
+
15
+ def run(df: pd.DataFrame):
16
+ """
17
+ Calculate the number of occurrences of behaviors by subject.
18
+ """
19
+
20
+ df_results: pd.DataFrame = df.groupby(["Subject", "Behavior"])["Behavior"].count().reset_index(name="number of occurences")
21
+
22
+ return df_results
@@ -0,0 +1,54 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ number of occurences of behaviors by independent_variable
5
+ """
6
+
7
+ import pandas as pd
8
+
9
+ __version__ = "0.4.0"
10
+ __version_date__ = "2025-07-17"
11
+ __plugin_name__ = "Number of occurences of behaviors by subject by independent_variable"
12
+ __author__ = "Olivier Friard - University of Torino - Italy"
13
+
14
+
15
+ def run(df: pd.DataFrame):
16
+ """
17
+ Calculate the number of occurrences of behaviors by subject and by independent_variable.
18
+
19
+ This plugin returns a Pandas dataframe
20
+ """
21
+
22
+ df_results_list: list = []
23
+
24
+ flag_variable_found = False
25
+
26
+ for column in df.columns:
27
+ if isinstance(column, tuple) or (isinstance(column, str) and not column.startswith("independent variable '")):
28
+ continue
29
+
30
+ flag_variable_found = True
31
+ grouped_df: df.DataFrame = (
32
+ df.groupby(
33
+ [
34
+ column,
35
+ "Subject",
36
+ "Behavior",
37
+ ]
38
+ )["Behavior"]
39
+ .count()
40
+ .reset_index(name="number of occurences")
41
+ )
42
+
43
+ grouped_df.rename(columns={column: "Value"}, inplace=True)
44
+
45
+ grouped_df.insert(0, "independent variable name", column)
46
+
47
+ df_results_list.append(grouped_df)
48
+
49
+ df_results = pd.concat(df_results_list, ignore_index=True) if df_results_list else pd.DataFrame([])
50
+
51
+ if not flag_variable_found:
52
+ return "No independent variable found"
53
+ else:
54
+ return df_results
@@ -0,0 +1,61 @@
1
+ """
2
+ BORIS plugin
3
+
4
+ Time budget
5
+ """
6
+
7
+ import pandas as pd
8
+ import numpy as np
9
+
10
+ __version__ = "0.3.0"
11
+ __version_date__ = "2025-03-17"
12
+ __plugin_name__ = "Time budget"
13
+ __author__ = "Olivier Friard - University of Torino - Italy"
14
+
15
+
16
+ def run(df: pd.DataFrame):
17
+ """
18
+ Calculate the following values:
19
+
20
+ - Total number of occurences of behavior
21
+ - Total duration of behavior (in seconds) (pandas.DataFrame.sum() ignore NaN values when computing the sum. Use min_count=1)
22
+ - Duration mean of behavior (in seconds)
23
+ - Standard deviation of behavior duration (in seconds)
24
+ - Inter-event intervals mean (in seconds)
25
+ - Inter-event intervals standard deviation (in seconds)
26
+ - % of total subject observation duration
27
+ """
28
+
29
+ group_by = ["Subject", "Behavior"]
30
+
31
+ dfs = [
32
+ df.groupby(group_by)["Behavior"].count().reset_index(name="number of occurences"),
33
+ df.groupby(group_by)["Duration (s)"].sum(min_count=1).reset_index(name="total duration"),
34
+ df.groupby(group_by)["Duration (s)"].mean().astype(float).round(3).reset_index(name="duration mean"),
35
+ df.groupby(group_by)["Duration (s)"].std().astype(float).round(3).reset_index(name="duration std dev"),
36
+ ]
37
+
38
+ # inter events
39
+ df2 = df.sort_values(by=["Observation id", "Subject", "Behavior", "Start (s)"])
40
+ df2["diff"] = df2.groupby(["Observation id", "Subject", "Behavior"])["Start (s)"].shift(periods=-1) - df2["Stop (s)"]
41
+
42
+ dfs.append(df2.groupby(group_by)["diff"].mean().astype(float).round(3).reset_index(name="inter-event intervals mean"))
43
+
44
+ dfs.append(df2.groupby(group_by)["diff"].std().astype(float).round(3).reset_index(name="inter-event intervals std dev"))
45
+
46
+ # % of total subject observation time
47
+
48
+ interval = (df.groupby(["Subject"])["Stop (s)"].max() - df.groupby(["Subject"])["Start (s)"].min()).replace(0, np.nan)
49
+
50
+ dfs.append(
51
+ (100 * df.groupby(group_by)["Duration (s)"].sum(min_count=1) / interval)
52
+ .astype(float)
53
+ .round(3)
54
+ .reset_index(name="% of total subject observation duration")
55
+ )
56
+
57
+ merged_df = dfs[0]
58
+ for df in dfs[1:]:
59
+ merged_df = pd.merge(merged_df, df, on=group_by)
60
+
61
+ return merged_df