pycoustic 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,277 @@
1
+ import os
2
+ import tempfile
3
+ from typing import List, Dict
4
+
5
+ import pandas as pd
6
+ import plotly.graph_objects as go
7
+ import streamlit as st
8
+
9
+ from log import *
10
+ from survey import *
11
+
12
+ # Streamlit app config
13
+ st.set_page_config(page_title="Pycoustic Acoustic Survey Explorer", layout="wide")
14
+
15
+ # Graph colour palette config
16
+ COLOURS = {
17
+ "Leq A": "#9e9e9e", # light grey
18
+ "L90 A": "#4d4d4d", # dark grey
19
+ "Lmax A": "#fc2c2c", # red
20
+ }
21
+ # Graph template config
22
+ TEMPLATE = "plotly"
23
+
24
+ if "apply_agg" not in st.session_state:
25
+ st.session_state["apply_agg"] = False
26
+ if "period_last" not in st.session_state:
27
+ st.session_state["period_last"] = ""
28
+
29
+ with st.sidebar:
30
+ # File Upload in expander container
31
+ with st.expander("File Upload", expanded=True):
32
+ files = st.file_uploader(
33
+ "Select one or more CSV files",
34
+ type="csv",
35
+ accept_multiple_files=True,
36
+ )
37
+ if not files:
38
+ st.stop()
39
+ # Integration period entry in expander container
40
+ with st.expander("Integration Period", expanded=True):
41
+ int_period = st.number_input(
42
+ "Insert new integration period (must be larger than data)",
43
+ step=1,
44
+ value=15,
45
+ )
46
+ period_select = st.selectbox(
47
+ "Please select time period",
48
+ ("second(s)", "minute(s)", "hour(s)"),
49
+ index=1,
50
+ )
51
+
52
+ # Build the period string
53
+ suffix_map = {"second(s)": "s", "minute(s)": "min", "hour(s)": "h"}
54
+ period = f"{int_period}{suffix_map.get(period_select, '')}"
55
+
56
+ # If the period changed since last time, reset the "apply_agg" flag
57
+ if st.session_state["period_last"] != period:
58
+ st.session_state["apply_agg"] = False
59
+ st.session_state["period_last"] = period
60
+
61
+ # Button to trigger aggregation for ALL positions
62
+ apply_agg_btn = st.button("Apply Integration Period")
63
+ if apply_agg_btn:
64
+ st.session_state["apply_agg"] = True
65
+
66
+ # Main Window / Data Load
67
+ with st.spinner("Processing Data...", show_time=True):
68
+ # Load each uploaded CSV into a pycoustic Log
69
+ logs: Dict[str, Log] = {}
70
+ for upload_file in files:
71
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmp:
72
+ tmp.write(upload_file.getbuffer())
73
+ path = tmp.name
74
+ try:
75
+ logs[upload_file.name] = Log(path)
76
+ except Exception as err:
77
+ st.error(f"Failed to load `{upload_file.name}` into Pycoustic: {err}")
78
+ finally:
79
+ os.unlink(path)
80
+
81
+ # Build Survey and pull summary + spectra
82
+ summary_df = leq_spec_df = lmax_spec_df = None
83
+ summary_error = ""
84
+ if logs:
85
+ try:
86
+ survey = Survey()
87
+ if callable(getattr(survey, "add_log", None)):
88
+ for name, lg in logs.items():
89
+ survey.add_log(lg, name=name)
90
+ elif hasattr(survey, "_logs"):
91
+ survey._logs = logs
92
+
93
+ summary_df = survey.resi_summary()
94
+ leq_spec_df = getattr(survey, "typical_leq_spectra", lambda: None)()
95
+ lmax_spec_df = getattr(survey, "lmax_spectra", lambda: None)()
96
+ except Exception as err:
97
+ summary_error = str(err)
98
+ else:
99
+ summary_error = "No valid logs loaded."
100
+
101
+ # Helper list of “position” names (i.e. filenames)
102
+ pos_list = list(logs.keys())
103
+
104
+ # Helper: turn a “spectra” DataFrame into a long‐format table for plotting
105
+ def spectra_to_rows(df: pd.DataFrame, pos_names: List[str]) -> pd.DataFrame | None:
106
+ if df is None:
107
+ return None
108
+ if not isinstance(df.columns, pd.MultiIndex):
109
+ tidy = df.reset_index().rename(columns={df.index.name or "index": "Period"})
110
+ if "Position" not in tidy.columns:
111
+ tidy.insert(0, "Position", pos_names[0] if pos_names else "Pos1")
112
+ return tidy
113
+
114
+ # If there is a MultiIndex
115
+ bands = [band for _, band in df.columns][: len({band for _, band in df.columns})]
116
+ set_len = len(bands)
117
+ blocks = []
118
+ for i, pos in enumerate(pos_names):
119
+ start, end = i * set_len, (i + 1) * set_len
120
+ if end > df.shape[1]:
121
+ break
122
+ sub = df.iloc[:, start:end].copy()
123
+ sub.columns = [str(b) for b in bands]
124
+ sub = sub.reset_index().rename(columns={df.index.names[-1] or "index": "Period"})
125
+ if "Position" not in sub.columns:
126
+ sub.insert(0, "Position", pos)
127
+ blocks.append(sub)
128
+ return pd.concat(blocks, ignore_index=True)
129
+
130
+ #Create tabs
131
+ ui_tabs = st.tabs(["Summary"] + pos_list)
132
+
133
+ #Summary tab
134
+ with ui_tabs[0]:
135
+ st.subheader("Broadband Summary")
136
+ if summary_df is not None:
137
+ st.dataframe(summary_df)
138
+ else:
139
+ st.warning(f"Summary unavailable: {summary_error}")
140
+
141
+ # Plot “Typical Leq Spectra” and “Lmax Spectra”, if available
142
+ for title, df_data in (
143
+ ("Typical Leq Spectra", leq_spec_df),
144
+ ("Lmax Spectra", lmax_spec_df),
145
+ ):
146
+ tidy = spectra_to_rows(df_data, pos_list)
147
+ if tidy is None:
148
+ continue
149
+
150
+ freq_cols = [c for c in tidy.columns if c not in ("Position", "Period", "A")]
151
+ if freq_cols:
152
+ fig = go.Figure()
153
+ for pos in pos_list:
154
+ subset = tidy[tidy["Position"] == pos]
155
+ for _, row in subset.iterrows():
156
+ period_label = row["Period"]
157
+ # Cast to string so .lower() is safe
158
+ period_label_str = str(period_label)
159
+ mode = (
160
+ "lines+markers"
161
+ if period_label_str.lower().startswith("day")
162
+ else "lines"
163
+ )
164
+ label = (
165
+ f"{pos} {period_label_str}"
166
+ if len(pos_list) > 1
167
+ else period_label_str
168
+ )
169
+ fig.add_trace(
170
+ go.Scatter(
171
+ x=freq_cols,
172
+ y=row[freq_cols],
173
+ mode=mode,
174
+ name=label,
175
+ )
176
+ )
177
+ fig.update_layout(
178
+ template=TEMPLATE,
179
+ title=f"{title} - Day & Night",
180
+ xaxis_title="Octave band (Hz)",
181
+ yaxis_title="dB",
182
+ )
183
+ st.plotly_chart(fig, use_container_width=True)
184
+ else:
185
+ st.warning(f"No frequency columns found for `{title}`.")
186
+
187
+ # Position‐Specific Tabs
188
+ for tab, uf in zip(ui_tabs[1:], files):
189
+ with tab:
190
+ log = logs.get(uf.name)
191
+ if log is None:
192
+ st.error(f"Log for `{uf.name}` not found.")
193
+ continue
194
+
195
+ # Decide whether to show raw or aggregated data
196
+ if st.session_state["apply_agg"]:
197
+ # 1) Re-aggregate / resample using the chosen period
198
+ try:
199
+ df_used = log.as_interval(t=period)
200
+ df_used = df_used.reset_index().rename(
201
+ columns={df_used.index.name or "index": "Timestamp"}
202
+ )
203
+ subheader = "Integrated Survey Data"
204
+ except Exception as e:
205
+ st.error(f"Failed to apply integration period for `{uf.name}`: {e}")
206
+ continue
207
+ else:
208
+ # 2) Show the raw data (from log._master) if available
209
+ try:
210
+ raw_master = log._master # original DataFrame, indexed by Timestamp
211
+ df_used = raw_master.reset_index().rename(columns={"Time": "Timestamp"})
212
+ subheader = "Raw Survey Data"
213
+ except Exception as e:
214
+ st.error(f"Failed to load raw data for `{uf.name}`: {e}")
215
+ continue
216
+
217
+ # Prepare a flattened‐column header copy JUST FOR PLOTTING
218
+ df_plot = df_used.copy()
219
+ if isinstance(df_plot.columns, pd.MultiIndex):
220
+ flattened_cols = []
221
+ for lvl0, lvl1 in df_plot.columns:
222
+ lvl0_str = str(lvl0)
223
+ lvl1_str = str(lvl1) if lvl1 is not None else ""
224
+ flattened_cols.append(f"{lvl0_str} {lvl1_str}".strip())
225
+ df_plot.columns = flattened_cols
226
+
227
+ # Time‐history Graph (Leq A, L90 A, Lmax A) using df_plot
228
+ required_cols = {"Leq A", "L90 A", "Lmax A"}
229
+ if required_cols.issubset(set(df_plot.columns)):
230
+ fig = go.Figure()
231
+ fig.add_trace(
232
+ go.Scatter(
233
+ x=df_plot["Timestamp"],
234
+ y=df_plot["Leq A"],
235
+ name="Leq A",
236
+ mode="lines",
237
+ line=dict(color=COLOURS["Leq A"], width=1),
238
+ )
239
+ )
240
+ fig.add_trace(
241
+ go.Scatter(
242
+ x=df_plot["Timestamp"],
243
+ y=df_plot["L90 A"],
244
+ name="L90 A",
245
+ mode="lines",
246
+ line=dict(color=COLOURS["L90 A"], width=1),
247
+ )
248
+ )
249
+ fig.add_trace(
250
+ go.Scatter(
251
+ x=df_plot["Timestamp"],
252
+ y=df_plot["Lmax A"],
253
+ name="Lmax A",
254
+ mode="markers",
255
+ marker=dict(color=COLOURS["Lmax A"], size=3),
256
+ )
257
+ )
258
+ fig.update_layout(
259
+ template=TEMPLATE,
260
+ margin=dict(l=0, r=0, t=0, b=0),
261
+ xaxis=dict(
262
+ title="Time & Date (hh:mm & dd/mm/yyyy)",
263
+ type="date",
264
+ tickformat="%H:%M<br>%d/%m/%Y",
265
+ tickangle=0,
266
+ ),
267
+ yaxis_title="Measured Sound Pressure Level dB(A)",
268
+ legend=dict(orientation="h", yanchor="top", y=-0.25, xanchor="left", x=0),
269
+ height=600,
270
+ )
271
+ st.plotly_chart(fig, use_container_width=True)
272
+ else:
273
+ st.warning(f"Required columns {required_cols} missing in {subheader}.")
274
+
275
+ # --- Finally, display the TABLE with MultiIndex intact ---
276
+ st.subheader(subheader)
277
+ st.dataframe(df_used, hide_index=True)
@@ -0,0 +1,421 @@
1
+ # Python
2
+ # streamlit_pycoustic_app.py
3
+ import io
4
+ import json
5
+ import tempfile
6
+ from datetime import time
7
+
8
+ import pandas as pd
9
+ import streamlit as st
10
+
11
+ # Import pycoustic classes
12
+ from pycoustic import Log, Survey
13
+
14
+ # python
15
+ import os
16
+ from typing import Optional
17
+
18
+ class _SafeNoop:
19
+ """
20
+ Minimal no-op proxy that safely absorbs attribute access and calls.
21
+ Prevents AttributeError like "'str' object has no attribute ...".
22
+ """
23
+ def __init__(self, name: str = "object"):
24
+ self._name = name
25
+
26
+ def __getattr__(self, item):
27
+ return _SafeNoop(f"{self._name}.{item}")
28
+
29
+ def __call__(self, *args, **kwargs):
30
+ return None
31
+
32
+ def __repr__(self) -> str:
33
+ return f"<_SafeNoop {self._name}>"
34
+
35
+ def _sanitize_session_state() -> None:
36
+ """
37
+ Replace any string left in common survey/log slots with a safe no-op proxy.
38
+ This avoids downstream AttributeError when code expects objects.
39
+ """
40
+ try:
41
+ import streamlit as st # type: ignore
42
+ except Exception:
43
+ return
44
+
45
+ for key in ("survey", "log_obj", "log"):
46
+ if key in st.session_state:
47
+ val = st.session_state.get(key)
48
+ if isinstance(val, str):
49
+ # Preserve original label if useful for UI
50
+ st.session_state[f"{key}_name"] = val
51
+ # Install a no-op proxy in place of the string
52
+ st.session_state[key] = _SafeNoop(key)
53
+
54
+ # Run sanitization as early as possible
55
+ _sanitize_session_state()
56
+
57
+ def _resolve_survey_like() -> Optional[object]:
58
+ """
59
+ Return the first available survey-like object from session state,
60
+ or None if nothing usable is present.
61
+ """
62
+ try:
63
+ import streamlit as st # type: ignore
64
+ except Exception:
65
+ return None
66
+
67
+ for key in ("survey", "log_obj", "log"):
68
+ if key in st.session_state:
69
+ return st.session_state.get(key)
70
+ return None
71
+
72
+ def _coerce_hm_tuple(val) -> tuple[int, int]:
73
+ """
74
+ Coerces an input into a (hour, minute) tuple.
75
+ Accepts tuples, lists, or 'HH:MM' / 'H:M' strings.
76
+ """
77
+ if isinstance(val, (tuple, list)) and len(val) == 2:
78
+ return int(val[0]), int(val[1])
79
+ if isinstance(val, str):
80
+ parts = val.strip().split(":")
81
+ if len(parts) == 2:
82
+ return int(parts[0]), int(parts[1])
83
+ # Fallback to 00:00 if invalid
84
+ return 0, 0
85
+
86
+ def _set_periods_on_survey(day_tuple, eve_tuple, night_tuple) -> None:
87
+ """
88
+ Accepts (hour, minute) tuples and updates the Survey periods, if available.
89
+ Safely no-ops if a proper survey object isn't present.
90
+ """
91
+ survey = _resolve_survey_like()
92
+ if survey is None:
93
+ return
94
+
95
+ times = {
96
+ "day": _coerce_hm_tuple(day_tuple),
97
+ "evening": _coerce_hm_tuple(eve_tuple),
98
+ "night": _coerce_hm_tuple(night_tuple),
99
+ }
100
+
101
+ setter = getattr(survey, "set_periods", None)
102
+ if callable(setter):
103
+ try:
104
+ setter(times=times)
105
+ except Exception:
106
+ # Swallow to keep the UI responsive even if backend rejects values
107
+ pass
108
+
109
+ def _looks_like_path(s: str) -> bool:
110
+ s = s.strip()
111
+ return (
112
+ s.lower().endswith(".csv")
113
+ or os.sep in s
114
+ or "/" in s
115
+ or "\\" in s
116
+ )
117
+
118
+ def _usable_acoustic_obj(obj) -> bool:
119
+ # Consider it usable if it exposes either API used elsewhere.
120
+ return hasattr(obj, "set_periods") or hasattr(obj, "_leq_by_date")
121
+
122
+ def _coerce_or_clear_state_key(st, key: str) -> None:
123
+ """
124
+ If st.session_state[key] is a string:
125
+ - If it looks like a CSV path, try to build a Log object from it.
126
+ - Otherwise, move it to key_name and clear the object slot to avoid attribute errors.
127
+ """
128
+ if key not in st.session_state:
129
+ return
130
+
131
+ val = st.session_state.get(key)
132
+
133
+ # Already usable object
134
+ if _usable_acoustic_obj(val):
135
+ return
136
+
137
+ # Try to coerce from a CSV-like path string
138
+ if isinstance(val, str):
139
+ if _looks_like_path(val):
140
+ try:
141
+ import pycoustic as pc # Lazy import
142
+ st.session_state[key] = pc.Log(path=val.strip())
143
+ return
144
+ except Exception:
145
+ # Fall through to clearing if coercion fails
146
+ pass
147
+
148
+ # Preserve label for UI, clear the object slot to avoid attribute errors
149
+ st.session_state[f"{key}_name"] = val
150
+ st.session_state[key] = None
151
+
152
+ def _normalize_session_state() -> None:
153
+ try:
154
+ import streamlit as st # type: ignore
155
+ except Exception:
156
+ return
157
+
158
+ # Coerce or clear common object keys
159
+ for k in ("survey", "log_obj", "log"):
160
+ _coerce_or_clear_state_key(st, k)
161
+
162
+ # Promote first usable object into the canonical "survey" slot
163
+ if not _usable_acoustic_obj(st.session_state.get("survey")):
164
+ for k in ("log_obj", "log"):
165
+ candidate = st.session_state.get(k)
166
+ if _usable_acoustic_obj(candidate):
167
+ st.session_state["survey"] = candidate
168
+ break
169
+
170
+ # Run normalization early so downstream code doesn't encounter attribute errors
171
+ _normalize_session_state()
172
+
173
+
174
+ # --------------- Helpers ---------------
175
+
176
+ def save_upload_to_tmp(uploaded_file) -> str:
177
+ """Persist an uploaded CSV to a temporary file and return its path."""
178
+ # Create a persistent temporary file (delete later on reset)
179
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmp:
180
+ tmp.write(uploaded_file.getbuffer())
181
+ return tmp.name
182
+
183
+
184
+ def build_survey(log_map: dict, times_kwarg: dict | None = None) -> Survey:
185
+ """Create a Survey, attach logs, and optionally call set_periods(times=...)."""
186
+ survey = Survey()
187
+
188
+ # Attach logs to the Survey (simple, direct assignment to internal storage)
189
+ # If a public adder method exists, prefer that; fallback to internal attribute.
190
+ if hasattr(survey, "add_log"):
191
+ for key, lg in log_map.items():
192
+ try:
193
+ survey.add_log(key, lg) # type: ignore[attr-defined]
194
+ except Exception:
195
+ # Fallback if signature differs
196
+ setattr(survey, "_logs", log_map)
197
+ break
198
+ else:
199
+ setattr(survey, "_logs", log_map)
200
+
201
+ # Apply periods if provided
202
+ if times_kwarg is not None:
203
+ try:
204
+ survey.set_periods(times=times_kwarg)
205
+ except Exception as e:
206
+ st.warning(f"set_periods failed with provided times: {e}")
207
+
208
+ return survey
209
+
210
+
211
+ def flatten_columns(df: pd.DataFrame) -> pd.DataFrame:
212
+ """Flatten MultiIndex columns for nicer display in Streamlit."""
213
+ if isinstance(df.columns, pd.MultiIndex):
214
+ flat = df.copy()
215
+ flat.columns = [" / ".join(map(str, c)) for c in df.columns.to_flat_index()]
216
+ return flat
217
+ return df
218
+
219
+
220
+ def parse_extra_kwargs(raw: str) -> dict:
221
+ """Parse a JSON dict from a text area. Returns {} on error."""
222
+ if not raw or not raw.strip():
223
+ return {}
224
+ try:
225
+ parsed = json.loads(raw)
226
+ if not isinstance(parsed, dict):
227
+ st.warning("Extra kwargs JSON should be an object/dict; ignoring.")
228
+ return {}
229
+ return parsed
230
+ except Exception as e:
231
+ st.warning(f"Unable to parse extra kwargs JSON. Ignoring. Error: {e}")
232
+ return {}
233
+
234
+
235
+ # --------------- Streamlit App ---------------
236
+
237
+ st.set_page_config(page_title="pycoustic GUI", layout="wide")
238
+ st.title("pycoustic Streamlit GUI")
239
+
240
+ # Initialize session state
241
+ ss = st.session_state
242
+ ss.setdefault("tmp_paths", []) # List[str] for cleanup
243
+ ss.setdefault("logs", {}) # Dict[str, Log]
244
+ ss.setdefault("survey", None) # Survey or None
245
+ ss.setdefault("resi_df", None) # Cached summary
246
+ ss.setdefault("periods_times", { # Default times for set_periods()
247
+ "day": (7, 0),
248
+ "evening": (23, 0),
249
+ "night": (23, 0),
250
+ })
251
+ ss.setdefault("lmax_n", 5)
252
+ ss.setdefault("lmax_t", 30)
253
+ ss.setdefault("extra_kwargs_raw", "{}")
254
+
255
+ with st.expander("1) Load CSV data", expanded=True):
256
+ st.write("Upload one or more CSV files to create Log objects for a single Survey.")
257
+
258
+ uploaded = st.file_uploader(
259
+ "Select CSV files",
260
+ type=["csv"],
261
+ accept_multiple_files=True,
262
+ help="Each CSV should match the expected pycoustic format."
263
+ )
264
+
265
+ if uploaded:
266
+ st.caption("Assign a position name for each file (defaults to base filename).")
267
+
268
+ # Build a list of (file, default_name) for user naming
269
+ pos_names = []
270
+ for idx, f in enumerate(uploaded):
271
+ default_name = f.name.rsplit(".", 1)[0]
272
+ name = st.text_input(
273
+ f"Position name for file {idx + 1}: {f.name}",
274
+ value=default_name,
275
+ key=f"pos_name_{f.name}_{idx}"
276
+ )
277
+ pos_names.append((f, name.strip() or default_name))
278
+
279
+ col_l, col_r = st.columns([1, 1])
280
+ replace = col_l.checkbox("Replace existing survey/logs", value=True)
281
+ load_btn = col_r.button("Load CSVs")
282
+
283
+ if load_btn:
284
+ if replace:
285
+ # Reset previous state
286
+ for p in ss["tmp_paths"]:
287
+ try:
288
+ # Cleanup files on supported OS; not critical if fails
289
+ import os
290
+ os.unlink(p)
291
+ except Exception:
292
+ pass
293
+ ss["tmp_paths"] = []
294
+ ss["logs"] = {}
295
+ ss["survey"] = None
296
+ ss["resi_df"] = None
297
+
298
+ added = 0
299
+ for f, pos_name in pos_names:
300
+ try:
301
+ tmp_path = save_upload_to_tmp(f)
302
+ ss["tmp_paths"].append(tmp_path)
303
+ log_obj = Log(path=tmp_path)
304
+ ss["logs"][pos_name] = log_obj
305
+ added += 1
306
+ except Exception as e:
307
+ st.error(f"Failed to load {f.name}: {e}")
308
+
309
+ if added > 0:
310
+ st.success(f"Loaded {added} file(s) into logs.")
311
+ else:
312
+ st.warning("No files loaded. Please check the CSV format and try again.")
313
+
314
+ if ss["logs"]:
315
+ st.info(f"Current logs in session: {', '.join(ss['logs'].keys())}")
316
+
317
+ with st.expander("2) Configure periods (survey.set_periods)", expanded=True):
318
+ st.write("Set daily period start times. These will be passed as times=... to set_periods().")
319
+
320
+ # Show time pickers; convert to tuples (hour, minute)
321
+ day_t = st.time_input("Day start", value=time(ss["periods_times"]["day"][0], ss["periods_times"]["day"][1]))
322
+ eve_t = st.time_input("Evening start", value=time(ss["periods_times"]["evening"][0], ss["periods_times"]["evening"][1]))
323
+ night_t = st.time_input("Night start", value=time(ss["periods_times"]["night"][0], ss["periods_times"]["night"][1]))
324
+
325
+ # Update in session
326
+ new_times = {
327
+ "day": (day_t.hour, day_t.minute),
328
+ "evening": (eve_t.hour, eve_t.minute),
329
+ "night": (night_t.hour, night_t.minute),
330
+ }
331
+
332
+ apply_periods = st.button("Apply periods to Survey")
333
+
334
+ if apply_periods:
335
+ if not ss["logs"]:
336
+ st.warning("Load logs first.")
337
+ else:
338
+ ss["periods_times"] = new_times
339
+ # Build or update Survey
340
+ ss["survey"] = build_survey(ss["logs"], times_kwarg=ss["periods_times"])
341
+ # Invalidate old summary
342
+ ss["resi_df"] = None
343
+ st.success("Periods applied to Survey.")
344
+
345
+ with st.expander("3) Compute results (survey.resi_summary)", expanded=True):
346
+ st.write("Set kwargs for resi_summary(). Adjust lmax_n and lmax_t, and optionally pass extra kwargs as JSON.")
347
+
348
+ col1, col2 = st.columns([1, 1])
349
+ ss["lmax_n"] = col1.number_input("lmax_n", min_value=1, value=int(ss["lmax_n"]), step=1)
350
+ ss["lmax_t"] = col2.number_input("lmax_t", min_value=1, value=int(ss["lmax_t"]), step=1)
351
+
352
+ ss["extra_kwargs_raw"] = st.text_area(
353
+ "Extra kwargs (JSON object)",
354
+ value=ss["extra_kwargs_raw"],
355
+ height=120,
356
+ help="Example: {\"include_LAE\": true} (only pass valid kwargs for resi_summary)"
357
+ )
358
+
359
+ compute = st.button("Update resi_summary")
360
+
361
+ if compute:
362
+ if ss["survey"] is None:
363
+ if not ss["logs"]:
364
+ st.warning("Load logs first.")
365
+ else:
366
+ # Create Survey if missing
367
+ ss["survey"] = build_survey(ss["logs"], times_kwarg=ss["periods_times"])
368
+
369
+ if ss["survey"] is not None:
370
+ kwargs = parse_extra_kwargs(ss["extra_kwargs_raw"])
371
+ kwargs["lmax_n"] = int(ss["lmax_n"])
372
+ kwargs["lmax_t"] = int(ss["lmax_t"])
373
+
374
+ try:
375
+ df = ss["survey"].resi_summary(**kwargs)
376
+ if df is None or (hasattr(df, "empty") and df.empty):
377
+ st.info("resi_summary returned no data.")
378
+ ss["resi_df"] = None
379
+ else:
380
+ ss["resi_df"] = df
381
+ st.success("resi_summary updated.")
382
+ except Exception as e:
383
+ st.error(f"resi_summary failed: {e}")
384
+
385
+ # --------------- Results ---------------
386
+ st.subheader("resi_summary results")
387
+ if ss["resi_df"] is not None:
388
+ show_df = flatten_columns(ss["resi_df"])
389
+ st.dataframe(show_df, use_container_width=True)
390
+
391
+ # Download
392
+ try:
393
+ csv_buf = io.StringIO()
394
+ show_df.to_csv(csv_buf)
395
+ st.download_button(
396
+ "Download CSV",
397
+ data=csv_buf.getvalue(),
398
+ file_name="resi_summary.csv",
399
+ mime="text/csv"
400
+ )
401
+ except Exception as e:
402
+ st.warning(f"Unable to prepare CSV download: {e}")
403
+ else:
404
+ st.info("No results yet. Load CSVs, apply periods, and compute resi_summary.")
405
+
406
+ # --------------- Utilities ---------------
407
+ with st.sidebar:
408
+ st.header("Utilities")
409
+ if st.button("Reset session"):
410
+ # Clean up temp files
411
+ for p in ss["tmp_paths"]:
412
+ try:
413
+ import os
414
+ os.unlink(p)
415
+ except Exception:
416
+ pass
417
+ for key in list(st.session_state.keys()):
418
+ del st.session_state[key]
419
+ st.experimental_rerun()
420
+
421
+ st.caption("Tip: After uploading and loading files, set periods, then compute resi_summary.")