pycoustic 0.1.8__tar.gz → 0.1.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,13 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pycoustic
3
- Version: 0.1.8
3
+ Version: 0.1.9
4
4
  Summary:
5
5
  Author: thumpercastle
6
6
  Author-email: tony.ryb@gmail.com
7
- Requires-Python: >=3.11.9,<4.0
7
+ Requires-Python: >=3.10,<=3.13
8
8
  Classifier: Programming Language :: Python :: 3
9
+ Classifier: Programming Language :: Python :: 3.10
10
+ Classifier: Programming Language :: Python :: 3.11
9
11
  Classifier: Programming Language :: Python :: 3.12
10
12
  Classifier: Programming Language :: Python :: 3.13
11
13
  Requires-Dist: numpy (==2.2.6)
@@ -14,7 +14,13 @@ class Log:
14
14
  :param path: the file path for the .csv noise data
15
15
  """
16
16
  self._filepath = path
17
- self._master = pd.read_csv(path, index_col="Time", parse_dates=["Time"], dayfirst=True)
17
+ self._master = pd.read_csv(
18
+ path,
19
+ index_col="Time",
20
+ parse_dates=["Time"],
21
+ date_format="%Y/%m/%d %H:%M", # Explicit format to avoid the dayfirst warning
22
+ # dayfirst=False, # Optional: include for clarity; default is False
23
+ )
18
24
  self._master.index = pd.to_datetime(self._master.index)
19
25
  self._master = self._master.sort_index(axis=1)
20
26
  self._start = self._master.index.min()
@@ -177,8 +183,8 @@ class Log:
177
183
  if isinstance(df, pd.Series):
178
184
  df = pd.DataFrame(data=df)
179
185
  return df.set_index(idx, inplace=False)
180
-
181
- def _get_period(self, data=None, period="days", night_idx=True):
186
+ #test
187
+ def get_period(self, data=None, period="days", night_idx=True):
182
188
  """
183
189
  Private method to get data for daytime, evening or night-time periods.
184
190
  :param data: Input data, usually master
@@ -198,7 +204,7 @@ class Log:
198
204
  data = self._return_as_night_idx(data=data)
199
205
  return data.between_time(self._night_start, self._day_start, inclusive="left")
200
206
 
201
- def _leq_by_date(self, data, cols=None):
207
+ def leq_by_date(self, data, cols=None):
202
208
  """
203
209
  Private method to undertake Leq calculations organised by date. For contiguous night-time periods crossing
204
210
  over midnight (e.g. from 23:00 to 07:00), the input data needs to have a night-time index.
@@ -0,0 +1,635 @@
1
+ import os
2
+ import ast
3
+ import tempfile
4
+ from typing import List, Dict
5
+
6
+ import pandas as pd
7
+ import plotly.graph_objects as go
8
+ import datetime as _dt
9
+ import streamlit as st
10
+ import re
11
+
12
+ # Import from the package so relative imports inside submodules resolve
13
+ try:
14
+ from pycoustic.survey import *
15
+ except ImportError:
16
+ # Fallback for local runs
17
+ from survey import *
18
+
19
+ try:
20
+ from pycoustic.log import *
21
+ except ImportError:
22
+ from log import *
23
+
24
+ # Streamlit app config
25
+ st.set_page_config(page_title="Pycoustic Acoustic Survey Explorer", layout="wide")
26
+
27
+ # Graph colour palette config
28
+ COLOURS = {
29
+ "Leq A": "#FBAE18", # light grey
30
+ "L90 A": "#4d4d4d", # dark grey
31
+ "Lmax A": "#fc2c2c", # red
32
+ }
33
+ # Graph template config
34
+ TEMPLATE = "plotly"
35
+
36
+ if "apply_agg" not in st.session_state:
37
+ st.session_state["apply_agg"] = False
38
+ if "period_last" not in st.session_state:
39
+ st.session_state["period_last"] = ""
40
+
41
+ # Python
42
+ # Helper to resolve a usable Survey/Log object from session state
43
+ def _resolve_survey_like():
44
+ try:
45
+ import streamlit as st
46
+ except Exception:
47
+ return None
48
+
49
+ survey = st.session_state.get("survey")
50
+ if hasattr(survey, "set_periods"):
51
+ return survey
52
+
53
+ # Try a few common alternative keys that may contain the actual object
54
+ for key in ("survey_obj", "log_obj", "log"):
55
+ obj = st.session_state.get(key)
56
+ if hasattr(obj, "set_periods"):
57
+ # Normalize so later code that looks up "survey" also works
58
+ st.session_state["survey"] = obj
59
+ return obj
60
+
61
+ return None
62
+
63
+
64
+ def _coerce_hm_tuple(t):
65
+ if t is None:
66
+ return None
67
+ try:
68
+ h, m = t
69
+ return (int(h), int(m))
70
+ except Exception:
71
+ # Fall back to original value if it can't be coerced
72
+ return t
73
+
74
+
75
+ def render_sidebar_set_periods():
76
+ """
77
+ Sidebar UI to choose Day/Evening/Night boundaries.
78
+ Applies changes to the Survey stored in st.session_state['survey'] so that
79
+ render_resi_summary uses the updated periods.
80
+ """
81
+ st.sidebar.subheader("Assessment Periods")
82
+
83
+ # Defaults commonly used: Day 07:00–19:00, Evening 19:00–23:00, Night 23:00–07:00
84
+ default_day = _dt.time(7, 0)
85
+ default_eve = _dt.time(19, 0)
86
+ default_night = _dt.time(23, 0)
87
+
88
+ # Sidebar time inputs
89
+ day_start = st.sidebar.time_input("Day starts", value=default_day, key="period_day_start")
90
+ eve_start = st.sidebar.time_input("Evening starts", value=default_eve, key="period_eve_start")
91
+ night_start = st.sidebar.time_input("Night starts", value=default_night, key="period_night_start")
92
+
93
+ # Immediately convert to tuples of ints
94
+ day_tuple = (int(day_start.hour), int(day_start.minute))
95
+ eve_tuple = (int(eve_start.hour), int(eve_start.minute))
96
+ night_tuple = (int(night_start.hour), int(night_start.minute))
97
+
98
+ # Apply to Survey
99
+ _set_periods_on_survey(day_tuple, eve_tuple, night_tuple)
100
+
101
+
102
+ def _set_periods_on_survey(day_tuple, eve_tuple, night_tuple):
103
+ """
104
+ Accepts (hour, minute) tuples and updates the Survey periods.
105
+ """
106
+ survey = _resolve_survey_like()
107
+ if survey is None:
108
+ return
109
+
110
+ times = {
111
+ "day": _coerce_hm_tuple(day_tuple),
112
+ "evening": _coerce_hm_tuple(eve_tuple),
113
+ "night": _coerce_hm_tuple(night_tuple),
114
+ }
115
+ if hasattr(survey, "set_periods"):
116
+ survey.set_periods(times=times)
117
+
118
+
119
+ def render_resi_summary(survey):
120
+ """
121
+ Render the Residential Summary (survey.resi_summary) in the Streamlit GUI.
122
+ Includes options for lmax_n, lmax_t and optional advanced inputs for leq_cols/max_cols.
123
+ """
124
+
125
+
126
+ st.header("Broadband Summary")
127
+
128
+ if survey is None:
129
+ st.info("No survey loaded.")
130
+ return
131
+
132
+ with st.expander("Options", expanded=False):
133
+ lmax_n = st.number_input("Nth-highest Lmax (lmax_n)", min_value=1, max_value=1000, value=10, step=1)
134
+ lmax_t_choice = st.selectbox(
135
+ "Lmax time basis (lmax_t)",
136
+ options=["2min", "1min", "5min", "15min", "60min", "custom"],
137
+ index=0,
138
+ help="Select the time aggregation used to compute Lmax rankings."
139
+ )
140
+ if lmax_t_choice == "custom":
141
+ lmax_t = st.text_input("Custom time basis (e.g., '30s', '10min')", value="2min")
142
+ else:
143
+ lmax_t = lmax_t_choice
144
+
145
+ advanced = st.checkbox("Advanced column selection (leq_cols, max_cols)")
146
+ leq_cols = None
147
+ max_cols = None
148
+
149
+ if advanced:
150
+ st.caption("Provide lists of tuples. Example: [(\"Leq\",\"A\"), (\"L90\",\"125\")]")
151
+ leq_text = st.text_input("leq_cols", value="")
152
+ max_text = st.text_input("max_cols", value="")
153
+ parse_errors = []
154
+
155
+ def _parse_tuple_list(s):
156
+ if not s.strip():
157
+ return None
158
+ try:
159
+ val = ast.literal_eval(s)
160
+ if not isinstance(val, (list, tuple)):
161
+ raise ValueError("Expected a list/tuple of tuples")
162
+ # Coerce to list of tuples
163
+ parsed = []
164
+ for item in val:
165
+ if not isinstance(item, (list, tuple)) or len(item) != 2:
166
+ raise ValueError("Each entry must be a 2-tuple like (name, subname)")
167
+ parsed.append((str(item[0]), str(item[1])))
168
+ return parsed
169
+ except Exception as e:
170
+ parse_errors.append(str(e))
171
+ return None
172
+
173
+ leq_cols = _parse_tuple_list(leq_text)
174
+ max_cols = _parse_tuple_list(max_text)
175
+
176
+ if parse_errors:
177
+ st.warning("There were issues parsing advanced inputs: " + "; ".join(parse_errors))
178
+
179
+ run = st.button("Compute residential summary", use_container_width=True)
180
+
181
+ if run:
182
+ try:
183
+ with st.spinner("Computing summary..."):
184
+ df = survey.resi_summary(
185
+ leq_cols=leq_cols,
186
+ max_cols=max_cols,
187
+ lmax_n=int(lmax_n),
188
+ lmax_t=str(lmax_t),
189
+ )
190
+
191
+ if df is None:
192
+ st.info("No data returned.")
193
+ return
194
+
195
+ st.success(f"Summary computed. Rows: {getattr(df, 'shape', ['?','?'])[0]}, Columns: {getattr(df, 'shape', ['?','?'])[1]}")
196
+ st.dataframe(df, use_container_width=True, height=480)
197
+
198
+ csv_bytes = df.to_csv(index=True).encode("utf-8")
199
+ st.download_button(
200
+ label="Download CSV",
201
+ data=csv_bytes,
202
+ file_name="resi_summary.csv",
203
+ mime="text/csv",
204
+ use_container_width=True,
205
+ )
206
+ except Exception as e:
207
+ st.error(f"Failed to compute residential summary: {e}")
208
+
209
+ with st.sidebar:
210
+ # File Upload in expander container
211
+ with st.expander("File Upload", expanded=True):
212
+ files = st.file_uploader(
213
+ "Select one or more CSV files",
214
+ type="csv",
215
+ accept_multiple_files=True,
216
+ )
217
+ if not files:
218
+ st.stop()
219
+ # Integration period entry in expander container
220
+ with st.expander("Graph Integration Period", expanded=True):
221
+ int_period = st.number_input(
222
+ "Insert new integration period (must be larger than data)",
223
+ step=1,
224
+ value=15,
225
+ )
226
+ period_select = st.selectbox(
227
+ "Please select time period",
228
+ ("second(s)", "minute(s)", "hour(s)"),
229
+ index=1,
230
+ )
231
+
232
+ # Build the period string
233
+ suffix_map = {"second(s)": "s", "minute(s)": "min", "hour(s)": "h"}
234
+ period = f"{int_period}{suffix_map.get(period_select, '')}"
235
+
236
+ # If the period changed since last time, reset the "apply_agg" flag
237
+ if st.session_state["period_last"] != period:
238
+ st.session_state["apply_agg"] = False
239
+ st.session_state["period_last"] = period
240
+
241
+ # Button to trigger aggregation for ALL positions
242
+ apply_agg_btn = st.button("Apply Integration Period")
243
+ if apply_agg_btn:
244
+ st.session_state["apply_agg"] = True
245
+
246
+
247
+ #test
248
+ # Main Window / Data Load
249
+ with st.spinner("Processing Data...", show_time=True):
250
+ # Load each uploaded CSV into a pycoustic Log
251
+ logs: Dict[str, Log] = {}
252
+ for upload_file in files:
253
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmp:
254
+ tmp.write(upload_file.getbuffer())
255
+ path = tmp.name
256
+ try:
257
+ logs[upload_file.name] = Log(path)
258
+ except Exception as err:
259
+ st.error(f"Failed to load `{upload_file.name}` into Pycoustic: {err}")
260
+ finally:
261
+ os.unlink(path)
262
+ #test
263
+ # Build Survey and pull summary + spectra
264
+ summary_df = leq_spec_df = lmax_spec_df = None
265
+ summary_error = ""
266
+ if logs:
267
+ try:
268
+ survey = Survey()
269
+ st.session_state["survey"] = survey
270
+ if callable(getattr(survey, "add_log", None)):
271
+ for name, lg in logs.items():
272
+ survey.add_log(lg, name=name)
273
+ elif hasattr(survey, "_logs"):
274
+ survey._logs = logs
275
+
276
+ summary_df = survey.resi_summary()
277
+ leq_spec_df = getattr(survey, "typical_leq_spectra", lambda: None)()
278
+ lmax_spec_df = getattr(survey, "lmax_spectra", lambda: None)()
279
+ render_sidebar_set_periods()
280
+
281
+ except Exception as err:
282
+ summary_error = str(err)
283
+ else:
284
+ summary_error = "No valid logs loaded."
285
+
286
+
287
+
288
+ # Helper list of “position” names (i.e. filenames)
289
+ pos_list = list(logs.keys())
290
+
291
+ # Helper: turn a “spectra” DataFrame into a long‐format table for plotting
292
+ def spectra_to_rows(df: pd.DataFrame, pos_names: List[str]) -> pd.DataFrame | None:
293
+ if df is None:
294
+ return None
295
+ if not isinstance(df.columns, pd.MultiIndex):
296
+ tidy = df.reset_index().rename(columns={df.index.name or "index": "Period"})
297
+ if "Position" not in tidy.columns:
298
+ tidy.insert(0, "Position", pos_names[0] if pos_names else "Pos1")
299
+ return tidy
300
+
301
+ # If there is a MultiIndex
302
+ bands = [band for _, band in df.columns][: len({band for _, band in df.columns})]
303
+ set_len = len(bands)
304
+ blocks = []
305
+ for i, pos in enumerate(pos_names):
306
+ start, end = i * set_len, (i + 1) * set_len
307
+ if end > df.shape[1]:
308
+ break
309
+ sub = df.iloc[:, start:end].copy()
310
+ sub.columns = [str(b) for b in bands]
311
+ sub = sub.reset_index().rename(columns={df.index.names[-1] or "index": "Period"})
312
+ if "Position" not in sub.columns:
313
+ sub.insert(0, "Position", pos)
314
+ blocks.append(sub)
315
+ return pd.concat(blocks, ignore_index=True)
316
+
317
+ #Create tabs
318
+ ui_tabs = st.tabs(["Summary"] + pos_list)
319
+
320
+ #Summary tab
321
+ with ui_tabs[0]:
322
+
323
+ st.subheader("Broadband Summary")
324
+
325
+ survey = st.session_state.get("survey")
326
+ if survey is None:
327
+ st.warning("Survey not found in session_state['survey']. Ensure you set it after loading.")
328
+ render_resi_summary(survey)
329
+
330
+ if summary_df is not None:
331
+ st.dataframe(summary_df)
332
+ else:
333
+ st.warning(f"Summary unavailable: {summary_error}")
334
+
335
+ # Plot “Typical Leq Spectra” and “Lmax Spectra”, if available
336
+ for title, df_data in (
337
+ ("Typical Leq Spectra", leq_spec_df),
338
+ ("Lmax Spectra", lmax_spec_df),
339
+ ):
340
+ tidy = spectra_to_rows(df_data, pos_list)
341
+ if tidy is None:
342
+ continue
343
+
344
+ freq_cols = [c for c in tidy.columns if c not in ("Position", "Period", "A")]
345
+ if freq_cols:
346
+ fig = go.Figure()
347
+ for pos in pos_list:
348
+ subset = tidy[tidy["Position"] == pos]
349
+ for _, row in subset.iterrows():
350
+ period_label = row["Period"]
351
+ # Cast to string so .lower() is safe
352
+ period_label_str = str(period_label)
353
+ mode = (
354
+ "lines+markers"
355
+ if period_label_str.lower().startswith("day")
356
+ else "lines"
357
+ )
358
+ label = (
359
+ f"{pos} {period_label_str}"
360
+ if len(pos_list) > 1
361
+ else period_label_str
362
+ )
363
+ fig.add_trace(
364
+ go.Scatter(
365
+ x=freq_cols,
366
+ y=row[freq_cols],
367
+ mode=mode,
368
+ name=label,
369
+ )
370
+ )
371
+ fig.update_layout(
372
+ template=TEMPLATE,
373
+ title=f"{title} - Day & Night",
374
+ xaxis_title="Octave band (Hz)",
375
+ yaxis_title="dB",
376
+ )
377
+ st.plotly_chart(fig, use_container_width=True)
378
+ else:
379
+ st.warning(f"No frequency columns found for `{title}`.")
380
+
381
+ # Position‐Specific Tabs
382
+ for tab, uf in zip(ui_tabs[1:], files):
383
+ with tab:
384
+ log = logs.get(uf.name)
385
+ if log is None:
386
+ st.error(f"Log for `{uf.name}` not found.")
387
+ continue
388
+
389
+ # Decide whether to show raw or aggregated data
390
+ if st.session_state["apply_agg"]:
391
+ # 1) Re-aggregate / resample using the chosen period
392
+ try:
393
+ df_used = log.as_interval(t=period)
394
+ df_used = df_used.reset_index().rename(
395
+ columns={df_used.index.name or "index": "Timestamp"}
396
+ )
397
+ subheader = "Integrated Survey Data"
398
+ except Exception as e:
399
+ st.error(f"Failed to apply integration period for `{uf.name}`: {e}")
400
+ continue
401
+ else:
402
+ # 2) Show the raw data (from log._master) if available
403
+ try:
404
+ raw_master = log._master # original DataFrame, indexed by Timestamp
405
+ df_used = raw_master.reset_index().rename(columns={"Time": "Timestamp"})
406
+ subheader = "Raw Survey Data"
407
+ except Exception as e:
408
+ st.error(f"Failed to load raw data for `{uf.name}`: {e}")
409
+ continue
410
+
411
+ # Prepare a flattened‐column header copy JUST FOR PLOTTING
412
+ df_plot = df_used.copy()
413
+ if isinstance(df_plot.columns, pd.MultiIndex):
414
+ flattened_cols = []
415
+ for lvl0, lvl1 in df_plot.columns:
416
+ lvl0_str = str(lvl0)
417
+ lvl1_str = str(lvl1) if lvl1 is not None else ""
418
+ flattened_cols.append(f"{lvl0_str} {lvl1_str}".strip())
419
+ df_plot.columns = flattened_cols
420
+
421
+ # Time‐history Graph (Leq A, L90 A, Lmax A) using df_plot
422
+ required_cols = {"Leq A", "L90 A", "Lmax A"}
423
+ if required_cols.issubset(set(df_plot.columns)):
424
+ fig = go.Figure()
425
+ fig.add_trace(
426
+ go.Scatter(
427
+ x=df_plot["Timestamp"],
428
+ y=df_plot["Leq A"],
429
+ name="Leq A",
430
+ mode="lines",
431
+ line=dict(color=COLOURS["Leq A"], width=1),
432
+ )
433
+ )
434
+ fig.add_trace(
435
+ go.Scatter(
436
+ x=df_plot["Timestamp"],
437
+ y=df_plot["L90 A"],
438
+ name="L90 A",
439
+ mode="lines",
440
+ line=dict(color=COLOURS["L90 A"], width=1),
441
+ )
442
+ )
443
+ fig.add_trace(
444
+ go.Scatter(
445
+ x=df_plot["Timestamp"],
446
+ y=df_plot["Lmax A"],
447
+ name="Lmax A",
448
+ mode="markers",
449
+ marker=dict(color=COLOURS["Lmax A"], size=3),
450
+ )
451
+ )
452
+ fig.update_layout(
453
+ template=TEMPLATE,
454
+ margin=dict(l=0, r=0, t=0, b=0),
455
+ xaxis=dict(
456
+ title="Time & Date (hh:mm & dd/mm/yyyy)",
457
+ type="date",
458
+ tickformat="%H:%M<br>%d/%m/%Y",
459
+ tickangle=0,
460
+ ),
461
+ yaxis_title="Measured Sound Pressure Level dB(A)",
462
+ legend=dict(orientation="h", yanchor="top", y=-0.25, xanchor="left", x=0),
463
+ height=600,
464
+ )
465
+ st.plotly_chart(fig, use_container_width=True)
466
+ else:
467
+ st.warning(f"Required columns {required_cols} missing in {subheader}.")
468
+
469
+ # --- Finally, display the TABLE with MultiIndex intact ---
470
+ st.subheader(subheader)
471
+ st.dataframe(df_used, hide_index=True)
472
+
473
+
474
+
475
+ # --- Summary tab: show Lmax spectra table ---
476
+ # Assumes the first tab in `ui_tabs` is "Summary"
477
+ try:
478
+ summary_tab = ui_tabs[0]
479
+ except Exception:
480
+ summary_tab = None
481
+
482
+ if summary_tab is not None:
483
+ with summary_tab:
484
+ st.markdown("### Lmax Spectra")
485
+ # Lmax spectra controls and live update
486
+ if "lmax_kwargs" not in st.session_state:
487
+ st.session_state.lmax_kwargs = {"n": 10, "t": "2min", "period": "nights"}
488
+
489
+ st.markdown("# Parameters")
490
+
491
+ col1, col2, col3 = st.columns(3)
492
+ with col1:
493
+ n_val = st.number_input(
494
+ "Nth-highest (n)",
495
+ min_value=1,
496
+ step=1,
497
+ value=int(st.session_state.lmax_kwargs.get("n", 10)),
498
+ key="lmax_n_input",
499
+ )
500
+ with col2:
501
+ t_val = st.text_input(
502
+ "Aggregation period (t)",
503
+ value=st.session_state.lmax_kwargs.get("t", "2min"),
504
+ key="lmax_t_input",
505
+ help='Examples: "1min", "2min", "5min"',
506
+ )
507
+ with col3:
508
+ period_options = ["days", "evenings", "nights"]
509
+ default_period = st.session_state.lmax_kwargs.get("period", "nights")
510
+ try:
511
+ default_idx = period_options.index(default_period)
512
+ except ValueError:
513
+ default_idx = period_options.index("nights")
514
+ period_val = st.selectbox(
515
+ "Time window",
516
+ options=period_options,
517
+ index=default_idx,
518
+ key="lmax_period_select",
519
+ )
520
+
521
+ # Update kwargs from UI
522
+ st.session_state.lmax_kwargs = {
523
+ "n": int(n_val),
524
+ "t": t_val.strip(),
525
+ "period": period_val,
526
+ }
527
+
528
+ # Compute and store df_lmax so changes reflect immediately in Streamlit Cloud
529
+ df_lmax = None
530
+ try:
531
+ df_lmax = survey.lmax_spectra(**st.session_state.lmax_kwargs)
532
+ st.session_state.df_lmax = df_lmax
533
+ st.success("Lmax spectra updated.")
534
+ except Exception as e:
535
+ st.session_state.df_lmax = None
536
+ st.error(f"Failed to compute Lmax spectra: {e}")
537
+
538
+ # Display
539
+ if st.session_state.df_lmax is not None:
540
+ st.dataframe(st.session_state.df_lmax)
541
+
542
+ # Leq spectra table
543
+ st.subheader("Leq spectra")
544
+ try:
545
+ # Always compute the full Leq spectra across all logs/positions
546
+ leq_spec_df = survey.leq_spectra()
547
+ st.session_state["leq_spec_df"] = leq_spec_df
548
+
549
+ # Flatten MultiIndex columns (if present) so the table renders clearly
550
+ if hasattr(leq_spec_df, "columns") and isinstance(leq_spec_df.columns, pd.MultiIndex):
551
+ leq_spec_df = leq_spec_df.copy()
552
+ leq_spec_df.columns = [" / ".join(map(str, c)) for c in leq_spec_df.columns.to_flat_index()]
553
+
554
+ if leq_spec_df is not None and hasattr(leq_spec_df, "empty") and not leq_spec_df.empty:
555
+ st.dataframe(leq_spec_df, use_container_width=True)
556
+ else:
557
+ st.info("No Leq spectra available to display.")
558
+ except Exception as e:
559
+ st.warning(f"Unable to display Leq spectra: {e}")
560
+
561
+ # --- Modal table (similar to "Leq spectra") ---
562
+ st.subheader("Modal")
563
+
564
+ # --- Modal duration overrides (in minutes, optional) ---
565
+ with st.expander("Modal durations (optional overrides)", expanded=False):
566
+ st.caption('Enter values like "60min" or "15min". Leave blank to use library defaults.')
567
+ c1, c2, c3 = st.columns(3)
568
+ with c1:
569
+ modal_day_t = st.text_input("day_t", value="", placeholder="60min", key="modal_day_t")
570
+ with c2:
571
+ modal_evening_t = st.text_input("evening_t", value="", placeholder="120min", key="modal_evening_t")
572
+ with c3:
573
+ modal_night_t = st.text_input("night_t", value="", placeholder="180min", key="modal_night_t")
574
+
575
+
576
+ def _is_valid_minutes(s: str) -> bool:
577
+ if not s:
578
+ return False
579
+ s_norm = s.strip().lower()
580
+ m = re.fullmatch(r"(\d+)\s*min", s_norm)
581
+ if not m:
582
+ return False
583
+ try:
584
+ return int(m.group(1)) > 0
585
+ except ValueError:
586
+ return False
587
+
588
+
589
+ def _normalize_minutes(s: str) -> str:
590
+ # normalize to "<int>min" without spaces, lowercase
591
+ s_norm = s.strip().lower()
592
+ num = re.fullmatch(r"(\d+)\s*min", s_norm).group(1)
593
+ return f"{int(num)}min"
594
+
595
+
596
+ # Build kwargs to pass only for valid, non-empty inputs
597
+ modal_kwargs: dict = {}
598
+ if modal_day_t.strip():
599
+ if _is_valid_minutes(modal_day_t):
600
+ modal_kwargs["day_t"] = _normalize_minutes(modal_day_t)
601
+ else:
602
+ st.warning('day_t must be in the format "<number>min", e.g., "60min".')
603
+ if modal_evening_t.strip():
604
+ if _is_valid_minutes(modal_evening_t):
605
+ modal_kwargs["evening_t"] = _normalize_minutes(modal_evening_t)
606
+ else:
607
+ st.warning('evening_t must be in the format "<number>min", e.g., "120min".')
608
+ if modal_night_t.strip():
609
+ if _is_valid_minutes(modal_night_t):
610
+ modal_kwargs["night_t"] = _normalize_minutes(modal_night_t)
611
+ else:
612
+ st.warning('night_t must be in the format "<number>min", e.g., "180min".')
613
+
614
+ if "modal_df" not in st.session_state:
615
+ st.session_state.modal_df = None
616
+
617
+ if survey is not None:
618
+ try:
619
+ st.session_state.modal_df = survey.modal(**modal_kwargs)
620
+ except Exception as e:
621
+ st.session_state.modal_df = None
622
+ st.warning(f"Could not compute modal results: {e}")
623
+
624
+ if st.session_state.modal_df is not None:
625
+ st.dataframe(st.session_state.modal_df, use_container_width=True)
626
+ else:
627
+ st.info("No modal results to display.")
628
+ #testing
629
+ # Python
630
+ # ... inside the method that prepares dataframes for rendering per tab/section ...
631
+ # Make sure `pd` is already imported as pandas and `self.leq_spec_df` has been set via self.survey.leq_spectra()
632
+
633
+
634
+
635
+ # ... keep the rest of your conditions (e.g., for other tables) unchanged ...