accelerometry-annotator 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,79 @@
1
+ """
2
+ Application-wide configuration constants.
3
+
4
+ Centralizes paths, color palettes, user lists, and annotation schema
5
+ so that changes propagate consistently across all modules.
6
+ """
7
+
8
+ import os
9
+
10
+ # ---------------------------------------------------------------------------
11
+ # UChicago brand color palette
12
+ # ---------------------------------------------------------------------------
13
+ UCHICAGO_MAROON = "#800000"
14
+ UCHICAGO_GRAY = "#58595b"
15
+ UCHICAGO_TEAL = "#7EBEC5"
16
+
17
+ # Signal line colors for the x, y, z accelerometry axes
18
+ LST_COLORS = [UCHICAGO_MAROON, UCHICAGO_TEAL, UCHICAGO_GRAY]
19
+
20
+ # Fill colors for annotation overlay quads (one per activity type)
21
+ ARTIFACT_COLORS = {
22
+ "chair_stand": "cyan",
23
+ "3m_walk": "magenta",
24
+ "6min_walk": "green",
25
+ "tug": "yellow",
26
+ }
27
+
28
+ # ---------------------------------------------------------------------------
29
+ # Filesystem paths
30
+ # ---------------------------------------------------------------------------
31
+ DATA_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
32
+ READINGS_FOLDER = os.path.join(DATA_FOLDER, "readings")
33
+ OUTPUT_FOLDER = os.path.join(DATA_FOLDER, "output")
34
+ # Glob pattern for per-user annotation Excel files (the * is replaced by username)
35
+ ANNOTATIONS_GLOB = os.path.join(OUTPUT_FOLDER, "annotations_*.xlsx")
36
+ # Path to the JSON credentials file used by the admin panel to add/remove users.
37
+ # Overridden by demo/config_overrides.py for demo deployments.
38
+ CREDENTIALS_FILE = os.path.join(
39
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
40
+ "credentials.json",
41
+ )
42
+
43
+ # ---------------------------------------------------------------------------
44
+ # User lists
45
+ # These are module-level mutable lists so the admin panel can add/remove
46
+ # users at runtime without a server restart. Because they are shared across
47
+ # sessions, admin changes take effect for all sessions immediately.
48
+ # ---------------------------------------------------------------------------
49
+ ADMIN_USERS = ["megan", "kristen", "manu"]
50
+
51
+ ANNOTATOR_USERS = sorted([
52
+ "ideyah", "evelyn", "junny", "amritap1", "ldepablo1", "ar277",
53
+ "megan", "kristen", "fran", "alan", "anita", "liberto",
54
+ ])
55
+
56
+ KNOWN_USERS = sorted(set(ADMIN_USERS + ANNOTATOR_USERS))
57
+
58
+ # ---------------------------------------------------------------------------
59
+ # Defaults and formats
60
+ # ---------------------------------------------------------------------------
61
+ DEFAULT_WINDOW_SIZE = 3600 # seconds of signal data shown at once
62
+
63
+ # Timestamp format used for anchor time display and HDF5 queries.
64
+ # Must match the format produced by pandas dt.strftime.
65
+ TIME_FMT = "%b %d %Y %I:%M %p"
66
+
67
+ # ---------------------------------------------------------------------------
68
+ # Annotation DataFrame schema
69
+ # ---------------------------------------------------------------------------
70
+ ANNOTATION_COLUMNS = [
71
+ "fname", "artifact", "segment", "scoring", "review",
72
+ "start_epoch", "end_epoch", "start_time", "end_time",
73
+ "annotated_at", "user", "notes",
74
+ ]
75
+
76
+ DISPLAYED_ANNOTATION_COLUMNS = [
77
+ "artifact", "segment", "scoring", "review",
78
+ "start_time", "end_time", "annotated_at", "user", "notes",
79
+ ]
@@ -0,0 +1,243 @@
1
+ """
2
+ Data loading and persistence for accelerometry signals and annotations.
3
+
4
+ Handles HDF5 signal file discovery, time-windowed data loading, annotation
5
+ file I/O (Excel-based), and DataFrame normalization.
6
+ """
7
+
8
+ import glob
9
+ import os
10
+ from itertools import cycle
11
+
12
+ import numpy as np
13
+ import pandas as pd
14
+
15
+ from . import config as _config
16
+ from .config import (
17
+ ANNOTATION_COLUMNS,
18
+ ANNOTATOR_USERS,
19
+ TIME_FMT,
20
+ )
21
+
22
+
23
+ def get_filenames():
24
+ """Discover HDF5 files and assign each to an annotator deterministically.
25
+
26
+ Returns
27
+ -------
28
+ list of str
29
+ Sorted list of ``"username--filename"`` strings. The assignment
30
+ uses a fixed random seed so every server restart produces the
31
+ same mapping, distributing files evenly across annotators.
32
+ """
33
+ # Fixed seed ensures the same user-to-file assignment across restarts
34
+ np.random.seed(2020)
35
+ users_to_assign = list(ANNOTATOR_USERS)
36
+ np.random.shuffle(users_to_assign)
37
+ users_cycle = cycle(users_to_assign)
38
+ lst_files = sorted(
39
+ next(users_cycle) + "--" + os.path.splitext(f)[0]
40
+ for f in os.listdir(_config.READINGS_FOLDER)
41
+ if os.path.splitext(f)[1].lower() == ".h5"
42
+ )
43
+ return lst_files
44
+
45
+
46
+ def get_filedata(fname, anchor_timestamp, windowsize):
47
+ """Load a time window of accelerometry data from an HDF5 file.
48
+
49
+ Parameters
50
+ ----------
51
+ fname : str
52
+ Path to the HDF5 file (without ``.h5`` extension).
53
+ anchor_timestamp : str or None
54
+ Center of the time window in ``TIME_FMT``. If None, the window
55
+ starts at the beginning of the file.
56
+ windowsize : float
57
+ Total window duration in seconds.
58
+
59
+ Returns
60
+ -------
61
+ tuple of (str, str or None, str or None, DataFrame)
62
+ ``(anchor_timestamp, file_start, file_end, pdf)`` where
63
+ ``file_start`` and ``file_end`` are only set on the first load
64
+ (when anchor_timestamp was None).
65
+ """
66
+ from datetime import datetime, timedelta
67
+
68
+ file_path = fname + ".h5"
69
+
70
+ if anchor_timestamp is None:
71
+ # First load: read the first and last rows to determine file bounds
72
+ first_row = pd.read_hdf(file_path, "readings", start=0, stop=1)
73
+ last_row = pd.read_hdf(file_path, "readings", start=-1)
74
+ anchor_timestamp = first_row["timestamp"].dt.strftime(TIME_FMT).values[0]
75
+ file_start = first_row["timestamp"].dt.strftime(TIME_FMT).values[0]
76
+ file_end = last_row["timestamp"].dt.strftime(TIME_FMT).values[0]
77
+ else:
78
+ # Subsequent loads: file bounds already known by the caller
79
+ file_start = None
80
+ file_end = None
81
+
82
+ anchor_dt = datetime.strptime(anchor_timestamp, TIME_FMT)
83
+ half_window = timedelta(seconds=int(windowsize / 2))
84
+ start_dt = anchor_dt - half_window
85
+ end_dt = anchor_dt + half_window
86
+
87
+ start_str = start_dt.strftime(TIME_FMT)
88
+ end_str = end_dt.strftime(TIME_FMT)
89
+
90
+ # HDF5 where-clause pushes filtering to the storage layer for speed
91
+ pdf = pd.read_hdf(
92
+ file_path,
93
+ "readings",
94
+ where=f"(timestamp >= Timestamp('{start_str}')) & (timestamp <= Timestamp('{end_str}'))",
95
+ )
96
+
97
+ return anchor_timestamp, file_start, file_end, pdf
98
+
99
+
100
+ def clamp_anchor(anchor_timestamp, file_start, file_end, windowsize):
101
+ """Clamp anchor_timestamp so the window stays within file bounds.
102
+
103
+ Parameters
104
+ ----------
105
+ anchor_timestamp : str
106
+ Current anchor in ``TIME_FMT``.
107
+ file_start, file_end : str
108
+ File bounds in ``TIME_FMT``.
109
+ windowsize : float
110
+ Window duration in seconds.
111
+
112
+ Returns
113
+ -------
114
+ str
115
+ Clamped anchor in ``TIME_FMT``.
116
+ """
117
+ from datetime import datetime, timedelta
118
+
119
+ anchor_dt = datetime.strptime(anchor_timestamp, TIME_FMT)
120
+ start_dt = datetime.strptime(file_start, TIME_FMT)
121
+ end_dt = datetime.strptime(file_end, TIME_FMT)
122
+
123
+ # Prevent the window from extending past either end of the file
124
+ if anchor_dt >= end_dt:
125
+ anchor_dt = end_dt - timedelta(seconds=int(windowsize / 2))
126
+ if anchor_dt <= start_dt:
127
+ anchor_dt = start_dt + timedelta(seconds=int(windowsize / 2))
128
+
129
+ return anchor_dt.strftime(TIME_FMT)
130
+
131
+
132
+ def get_annotations_from_files(pattern=None):
133
+ """Load all per-user annotation Excel files and concatenate them.
134
+
135
+ Parameters
136
+ ----------
137
+ pattern : str, optional
138
+ Glob pattern for annotation files. Defaults to ``ANNOTATIONS_GLOB``.
139
+
140
+ Returns
141
+ -------
142
+ DataFrame
143
+ Combined annotations (unsorted, not yet cleaned).
144
+ """
145
+ if pattern is None:
146
+ pattern = _config.ANNOTATIONS_GLOB
147
+ files = [n for n in glob.glob(pattern) if os.path.isfile(n)]
148
+ if files:
149
+ return pd.concat([pd.read_excel(n, engine="openpyxl") for n in files])
150
+ return pd.DataFrame(columns=ANNOTATION_COLUMNS)
151
+
152
+
153
+ def cleanup_annotations(pdf):
154
+ """Sort and normalize an annotation DataFrame.
155
+
156
+ Ensures consistent types for datetime, numeric, and string columns
157
+ so that downstream code (Bokeh serialization, DataFrame filtering)
158
+ doesn't encounter NaN or mixed-type surprises.
159
+
160
+ Parameters
161
+ ----------
162
+ pdf : DataFrame
163
+ Raw or partially-processed annotations.
164
+
165
+ Returns
166
+ -------
167
+ DataFrame
168
+ Cleaned copy.
169
+ """
170
+ pdf = pdf.sort_values(
171
+ by=["user", "fname", "artifact", "segment", "scoring", "review", "annotated_at"],
172
+ ascending=False,
173
+ )
174
+ if pdf.shape[0] > 0:
175
+ if "notes" not in pdf.columns:
176
+ pdf = pdf.assign(notes="")
177
+ pdf = pdf.assign(
178
+ start_time=pd.to_datetime(pdf["start_time"], errors="coerce"),
179
+ end_time=pd.to_datetime(pdf["end_time"], errors="coerce"),
180
+ notes=pdf["notes"].fillna(""),
181
+ )
182
+ # Fill NaN in numeric columns to prevent Bokeh JSON serialization
183
+ # errors (Bokeh's PayloadEncoder has allow_nan=False)
184
+ for col in ["segment", "scoring", "review", "start_epoch", "end_epoch"]:
185
+ if col in pdf.columns:
186
+ pdf[col] = pdf[col].fillna(0)
187
+ pdf = pdf.assign(notes=pdf["notes"].astype(str))
188
+ return pdf
189
+
190
+
191
+ def save_annotations(pdf_annotations, uname, fname):
192
+ """Persist the current user's annotations for one file to disk.
193
+
194
+ Merges the in-memory annotations with any existing data from other
195
+ files in the user's Excel file, then writes the result.
196
+
197
+ Parameters
198
+ ----------
199
+ pdf_annotations : DataFrame
200
+ Full in-memory annotation set (all users, all files).
201
+ uname : str
202
+ Current user whose annotations should be saved.
203
+ fname : str
204
+ Current file path (basename is extracted internally).
205
+
206
+ Returns
207
+ -------
208
+ DataFrame
209
+ Freshly-reloaded annotations from *all* users' files on disk.
210
+ """
211
+ annotations_file = _config.ANNOTATIONS_GLOB.replace("*", uname)
212
+ pdf_old = pd.DataFrame(columns=ANNOTATION_COLUMNS)
213
+ if os.path.exists(annotations_file):
214
+ pdf_old = pd.read_excel(annotations_file, engine="openpyxl")
215
+ pdf_old = pdf_old.assign(
216
+ annotated_at=pd.to_datetime(pdf_old["annotated_at"], errors="coerce")
217
+ )
218
+
219
+ basename = os.path.basename(fname)
220
+ pdf_current = pdf_annotations.loc[
221
+ (pdf_annotations["user"] == uname)
222
+ & (pdf_annotations["fname"] == basename)
223
+ ]
224
+
225
+ if pdf_old.shape[0] > 0:
226
+ # Replace only the current user+file slice, keep everything else
227
+ pdf_all = pd.concat(
228
+ [
229
+ pdf_old.loc[
230
+ ~((pdf_old["user"] == uname) & (pdf_old["fname"] == basename))
231
+ ],
232
+ pdf_current,
233
+ ],
234
+ ignore_index=True,
235
+ ).reset_index(drop=True)
236
+ else:
237
+ pdf_all = pdf_current
238
+
239
+ pdf_all = cleanup_annotations(pdf_all)
240
+ pdf_all.to_excel(annotations_file, index=False)
241
+
242
+ # Reload from disk so all sessions see a consistent snapshot
243
+ return get_annotations_from_files()
@@ -0,0 +1,32 @@
1
+ function table_to_csv(source) {
2
+ const columns = Object.keys(source.data)
3
+ const nrows = source.get_length()
4
+ const lines = [columns.join(',')]
5
+
6
+ for (let i = 0; i < nrows; i++) {
7
+ let row = [];
8
+ for (let j = 0; j < columns.length; j++) {
9
+ const column = columns[j]
10
+ row.push(source.data[column][i].toString())
11
+ }
12
+ lines.push(row.join(','))
13
+ }
14
+ return lines.join('\n').concat('\n')
15
+ }
16
+
17
+
18
+ const filename = 'annotations.csv'
19
+ const filetext = table_to_csv(source)
20
+ const blob = new Blob([filetext], { type: 'text/csv;charset=utf-8;' })
21
+
22
+ //addresses IE
23
+ if (navigator.msSaveBlob) {
24
+ navigator.msSaveBlob(blob, filename)
25
+ } else {
26
+ const link = document.createElement('a')
27
+ link.href = URL.createObjectURL(blob)
28
+ link.download = filename
29
+ link.target = '_blank'
30
+ link.style.visibility = 'hidden'
31
+ link.dispatchEvent(new MouseEvent('click'))
32
+ }
@@ -0,0 +1,239 @@
1
+ """
2
+ Plotting module — native Bokeh figures with LTTB downsampling.
3
+
4
+ Creates a main signal plot (with annotation overlays and box-select)
5
+ and a range selector (minimap) for navigating large time series.
6
+ LTTB downsampling keeps the browser responsive by limiting the number
7
+ of points sent over the websocket while preserving visual fidelity.
8
+ """
9
+
10
+ import numpy as np
11
+ from bokeh.models import (
12
+ BoxSelectTool, ColumnDataSource, DatetimeTickFormatter,
13
+ Range1d, RangeTool,
14
+ )
15
+ from bokeh.plotting import figure
16
+
17
+ from .config import ARTIFACT_COLORS, LST_COLORS, UCHICAGO_MAROON
18
+
19
+ # Maximum points to send to the browser per signal axis.
20
+ # 10000 provides high visual fidelity while remaining responsive
21
+ # with the canvas backend (no WebGL).
22
+ MAX_POINTS = 10000
23
+
24
+
25
+ def _downsample(timestamps, values, n_out):
26
+ """Downsample a time series using LTTB (Largest Triangle Three Buckets).
27
+
28
+ LTTB selects representative points that preserve the visual shape
29
+ of the signal. Falls back to uniform strided sampling if the
30
+ ``lttbc`` C extension is not installed.
31
+
32
+ Parameters
33
+ ----------
34
+ timestamps : ndarray
35
+ Datetime64 array of timestamps.
36
+ values : ndarray
37
+ Signal values corresponding to *timestamps*.
38
+ n_out : int
39
+ Target number of output points.
40
+
41
+ Returns
42
+ -------
43
+ tuple of (ndarray, ndarray)
44
+ Downsampled ``(timestamps, values)``.
45
+ """
46
+ if len(timestamps) <= n_out:
47
+ return timestamps, values
48
+ try:
49
+ import lttbc
50
+ # lttbc operates on float64 arrays
51
+ ts_float = timestamps.astype(np.float64)
52
+ vals_float = values.astype(np.float64)
53
+ ds_ts, ds_vals = lttbc.downsample(ts_float, vals_float, n_out)
54
+ return ds_ts.astype(timestamps.dtype), ds_vals
55
+ except Exception:
56
+ # Graceful fallback: take every Nth sample
57
+ step = max(1, len(timestamps) // n_out)
58
+ return timestamps[::step], values[::step]
59
+
60
+
61
+ def make_plot(pdf, annotation_cds):
62
+ """Create the main signal plot and range selector.
63
+
64
+ Parameters
65
+ ----------
66
+ pdf : DataFrame or None
67
+ Signal data with columns ``timestamp``, ``x``, ``y``, ``z``.
68
+ If None or empty, returns empty placeholder plots.
69
+ annotation_cds : dict[str, ColumnDataSource]
70
+ Persistent Bokeh ColumnDataSources keyed by annotation type
71
+ (``"chair_stand"``, ``"segment"``, etc.). Their ``.data`` is
72
+ updated externally; the plot just references them so overlays
73
+ refresh without rebuilding the figure.
74
+
75
+ Returns
76
+ -------
77
+ tuple of (Panel.pane.Bokeh, Panel.pane.Bokeh, Figure, ColumnDataSource)
78
+ ``(main_pane, range_pane, main_fig, signal_cds)`` where
79
+ ``signal_cds`` is the downsampled signal data source (needed
80
+ for wiring box-select callbacks).
81
+ """
82
+ import panel as pn
83
+
84
+ if pdf is None or len(pdf) == 0:
85
+ empty_fig = figure(height=300, sizing_mode="stretch_width")
86
+ empty_cds = ColumnDataSource(data=dict(timestamp=[], x=[], y=[], z=[]))
87
+ return (
88
+ pn.pane.Bokeh(empty_fig, sizing_mode="stretch_width"),
89
+ pn.pane.Bokeh(empty_fig, sizing_mode="stretch_width"),
90
+ empty_fig,
91
+ empty_cds,
92
+ )
93
+
94
+ ts_raw = pdf["timestamp"].values
95
+
96
+ # --- Downsample each axis independently via LTTB ---
97
+ # Each axis may pick slightly different representative timestamps,
98
+ # but we reuse the first axis's timestamps for all three. This is
99
+ # a minor approximation that keeps the code simple without visible
100
+ # impact on the plot.
101
+ ds_data = {"timestamp": None}
102
+ for col in ["x", "y", "z"]:
103
+ ds_ts, ds_vals = _downsample(ts_raw, pdf[col].values, MAX_POINTS)
104
+ if ds_data["timestamp"] is None:
105
+ ds_data["timestamp"] = ds_ts
106
+ ds_data[col] = ds_vals
107
+
108
+ colsource = ColumnDataSource(data=ds_data)
109
+
110
+ full_start = ts_raw[0]
111
+ # Show ~10% of the file initially so the user sees detail
112
+ initial_end_idx = min(len(ts_raw) - 1, int(len(ts_raw) * 0.1))
113
+ initial_end = ts_raw[initial_end_idx]
114
+
115
+ # Explicit y_range computed from signal data. Using Range1d (not
116
+ # DataRange1d) is critical because DataRange1d would auto-expand to
117
+ # include annotation quad bounds, squashing the signal to a thin line.
118
+ y_min = min(ds_data["x"].min(), ds_data["y"].min(), ds_data["z"].min())
119
+ y_max = max(ds_data["x"].max(), ds_data["y"].max(), ds_data["z"].max())
120
+ y_pad = (y_max - y_min) * 0.05
121
+ y_range = Range1d(start=y_min - y_pad, end=y_max + y_pad)
122
+
123
+ # --- Main signal plot ---
124
+ main_fig = figure(
125
+ height=300,
126
+ x_axis_type="datetime",
127
+ x_axis_location="above",
128
+ background_fill_color="#e8e8e8",
129
+ x_range=Range1d(start=full_start, end=initial_end),
130
+ y_range=y_range,
131
+ sizing_mode="stretch_width",
132
+ toolbar_location=None,
133
+ )
134
+ main_fig.yaxis.visible = False
135
+
136
+ for color, col in zip(LST_COLORS, ["x", "y", "z"]):
137
+ main_fig.line(
138
+ "timestamp", col, color=color, source=colsource,
139
+ alpha=0.95, line_width=1.5,
140
+ # Dim unselected data so the box-selected region stands out
141
+ nonselection_alpha=0.2, selection_alpha=1,
142
+ )
143
+ # Invisible scatter points on top of lines so that BoxSelectTool
144
+ # can select data indices. Line glyphs alone don't support
145
+ # index-based hit testing.
146
+ main_fig.scatter(
147
+ "timestamp", col, color=None, source=colsource,
148
+ size=0, alpha=0, nonselection_alpha=0, selection_alpha=0,
149
+ )
150
+
151
+ main_fig.xaxis.formatter = DatetimeTickFormatter(
152
+ days="%Y/%m/%d",
153
+ months="%Y/%m/%d %H:%M",
154
+ hours="%Y/%m/%d %H:%M",
155
+ minutes="%H:%M",
156
+ seconds="%H:%M:%S",
157
+ milliseconds="%Ss:%3Nms",
158
+ )
159
+
160
+ # Width-only box select for time-range annotation
161
+ box_select = BoxSelectTool(dimensions="width")
162
+ main_fig.add_tools(box_select)
163
+ main_fig.toolbar.active_drag = box_select
164
+
165
+ # --- Annotation overlay quads ---
166
+ # Quads span the full y_range so they are visible behind the signal.
167
+ # Using level="overlay" prevents them from affecting auto-range.
168
+ q_top = y_max + y_pad
169
+ q_bot = y_min - y_pad
170
+
171
+ # Activity type overlays (semi-transparent colored fills)
172
+ for key, color in ARTIFACT_COLORS.items():
173
+ main_fig.quad(
174
+ left="start_time", right="end_time", top=q_top, bottom=q_bot,
175
+ fill_color=color, fill_alpha=0.2, line_alpha=0,
176
+ source=annotation_cds[key], level="overlay",
177
+ )
178
+
179
+ # Flag overlays (hatch patterns with no fill, matching the original app)
180
+ flag_hatches = {
181
+ "segment": "cross",
182
+ "scoring": "dot",
183
+ "review": "spiral",
184
+ }
185
+ for key, hatch in flag_hatches.items():
186
+ main_fig.quad(
187
+ left="start_time", right="end_time", top=q_top, bottom=q_bot,
188
+ fill_color=None, fill_alpha=0, line_alpha=0,
189
+ hatch_pattern=hatch, hatch_color="black",
190
+ hatch_weight=0.5, hatch_alpha=0.1,
191
+ source=annotation_cds[key], level="overlay",
192
+ )
193
+
194
+ # --- Range selector (minimap) ---
195
+ # Uses fewer points than the main plot since it's smaller
196
+ range_data = {"timestamp": None}
197
+ for col in ["x", "y", "z"]:
198
+ r_ts, r_vals = _downsample(ts_raw, pdf[col].values, 2000)
199
+ if range_data["timestamp"] is None:
200
+ range_data["timestamp"] = r_ts
201
+ range_data[col] = r_vals
202
+ range_source = ColumnDataSource(data=range_data)
203
+
204
+ range_fig = figure(
205
+ height=130,
206
+ y_range=main_fig.y_range,
207
+ x_axis_type="datetime",
208
+ y_axis_type=None,
209
+ tools="",
210
+ toolbar_location=None,
211
+ background_fill_color="#e8e8e8",
212
+ sizing_mode="stretch_width",
213
+ )
214
+
215
+ for color, col in zip(LST_COLORS, ["x", "y", "z"]):
216
+ range_fig.line(
217
+ "timestamp", col, color=color, source=range_source,
218
+ alpha=0.8, line_width=1.2,
219
+ )
220
+
221
+ range_fig.xaxis.formatter = DatetimeTickFormatter(
222
+ days="%m/%d %H:%M",
223
+ months="%m/%d %H:%M",
224
+ hours="%m/%d %H:%M",
225
+ minutes="%m/%d %H:%M",
226
+ seconds="%m/%d %H:%M:%S",
227
+ )
228
+
229
+ # RangeTool links the minimap's draggable overlay to main_fig.x_range
230
+ range_tool = RangeTool(x_range=main_fig.x_range)
231
+ range_tool.overlay.fill_color = UCHICAGO_MAROON
232
+ range_tool.overlay.fill_alpha = 0.15
233
+ range_fig.add_tools(range_tool)
234
+ range_fig.toolbar.active_multi = "auto"
235
+
236
+ main_pane = pn.pane.Bokeh(main_fig, sizing_mode="stretch_width")
237
+ range_pane = pn.pane.Bokeh(range_fig, sizing_mode="stretch_width")
238
+
239
+ return main_pane, range_pane, main_fig, colsource