pycoustic 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,142 @@
1
+ import os
2
+ import tempfile
3
+ from typing import List, Dict
4
+
5
+ import pandas as pd
6
+ import plotly.graph_objects as go
7
+ import streamlit as st
8
+
9
+ # Import pycoustic classes
10
+ from log import *
11
+ from survey import *
12
+ from weather import *
13
+
14
+ st.set_page_config(page_title="pycoustic GUI", layout="wide")
15
+ st.title("pycoustic Streamlit GUI")
16
+
17
+ # Initialize session state
18
+ ss = st.session_state
19
+ ss.setdefault("tmp_paths", []) # List[str] for cleanup
20
+ ss.setdefault("logs", {}) # Dict[str, Log]
21
+ ss.setdefault("survey", None) # Survey or None
22
+ ss.setdefault("resi_df", None) # Cached summary
23
+ ss.setdefault("periods_times", { # Default times for set_periods()
24
+ "day": (7, 0),
25
+ "evening": (23, 0),
26
+ "night": (23, 0),
27
+ })
28
+ ss.setdefault("lmax_n", 5)
29
+ ss.setdefault("lmax_t", 30)
30
+ ss.setdefault("extra_kwargs_raw", "{}")
31
+
32
+
33
+ def save_upload_to_tmp(uploaded_file) -> str:
34
+ """Persist an uploaded CSV to a temporary file and return its path."""
35
+ # Create a persistent temporary file (delete later on reset)
36
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmp:
37
+ tmp.write(uploaded_file.getbuffer())
38
+ return tmp.name
39
+
40
+
41
+ def build_survey(log_map: dict, times_kwarg: dict | None = None) -> Survey:
42
+ """Create a Survey, attach logs, and optionally call set_periods(times=...)."""
43
+ survey = Survey()
44
+
45
+ # Attach logs to the Survey (simple, direct assignment to internal storage)
46
+ # If a public adder method exists, prefer that; fallback to internal attribute.
47
+ if hasattr(survey, "add_log"):
48
+ for key, lg in log_map.items():
49
+ try:
50
+ survey.add_log(key, lg) # type: ignore[attr-defined]
51
+ except Exception:
52
+ # Fallback if signature differs
53
+ setattr(survey, "_logs", log_map)
54
+ break
55
+ else:
56
+ setattr(survey, "_logs", log_map)
57
+
58
+ # Apply periods if provided
59
+ if times_kwarg is not None:
60
+ try:
61
+ survey.set_periods(times=times_kwarg)
62
+ except Exception as e:
63
+ st.warning(f"set_periods failed with provided times: {e}")
64
+
65
+ return survey
66
+
67
+
68
+ # File Upload in expander container
69
+ with st.expander("1) Load CSV data", expanded=True):
70
+ st.write("Upload one or more CSV files to create Log objects for a single Survey.")
71
+
72
+ uploaded = st.file_uploader(
73
+ "Select CSV files",
74
+ type=["csv"],
75
+ accept_multiple_files=True,
76
+ help="Each CSV should match the expected pycoustic format."
77
+ )
78
+
79
+ if uploaded:
80
+ st.caption("Assign a position name for each file (defaults to base filename).")
81
+
82
+ # Build a list of (file, default_name) for user naming
83
+ pos_names = []
84
+ for idx, f in enumerate(uploaded):
85
+ default_name = f.name.rsplit(".", 1)[0]
86
+ name = st.text_input(
87
+ f"Position name for file {idx + 1}: {f.name}",
88
+ value=default_name,
89
+ key=f"pos_name_{f.name}_{idx}"
90
+ )
91
+ pos_names.append((f, name.strip() or default_name))
92
+
93
+ col_l, col_r = st.columns([1, 1])
94
+ replace = col_l.checkbox("Replace existing survey/logs", value=True)
95
+ load_btn = col_r.button("Load CSVs")
96
+
97
+ if load_btn:
98
+ if replace:
99
+ # Reset previous state
100
+ for p in ss["tmp_paths"]:
101
+ try:
102
+ # Cleanup files on supported OS; not critical if fails
103
+ import os
104
+ os.unlink(p)
105
+ except Exception:
106
+ pass
107
+ ss["tmp_paths"] = []
108
+ ss["logs"] = {}
109
+ ss["survey"] = None
110
+ ss["resi_df"] = None
111
+
112
+ added = 0
113
+ for f, pos_name in pos_names:
114
+ try:
115
+ tmp_path = save_upload_to_tmp(f)
116
+ ss["tmp_paths"].append(tmp_path)
117
+ log_obj = Log(path=tmp_path)
118
+ ss["logs"][pos_name] = log_obj
119
+ added += 1
120
+ except Exception as e:
121
+ st.error(f"Failed to load {f.name}: {e}")
122
+
123
+ if added > 0:
124
+ st.success(f"Loaded {added} file(s) into logs.")
125
+ else:
126
+ st.warning("No files loaded. Please check the CSV format and try again.")
127
+
128
+ if ss["logs"]:
129
+ st.info(f"Current logs in session: {', '.join(ss['logs'].keys())}")
130
+
131
+ ss["survey"] = Survey()
132
+ for k in ss["logs"].keys():
133
+ ss["survey"].add_log(ss["survey"], name="k")
134
+ st.text(k)
135
+
136
+ st.text(type(ss["survey"]))
137
+ st.table(ss["survey"].resi_summary())
138
+
139
+ with st.expander("Broadband Summary", expanded=True):
140
+ df = ss["survey"]._logs
141
+ st.text(df)
142
+ #test
@@ -0,0 +1,234 @@
1
+ # streamlit_pycoustic_app.py
2
+ import ast
3
+ import datetime as dt
4
+ import tempfile
5
+ from pathlib import Path
6
+ from typing import Any, Dict, Iterable
7
+
8
+ import streamlit as st
9
+
10
+ from pycoustic import Log, Survey
11
+
12
+
13
+ def _parse_kwargs(text: str) -> Dict[str, Any]:
14
+ """
15
+ Safely parse a Python dict literal from text area.
16
+ Returns {} if empty or invalid.
17
+ """
18
+ if not text or not text.strip():
19
+ return {}
20
+ try:
21
+ parsed = ast.literal_eval(text)
22
+ return parsed if isinstance(parsed, dict) else {}
23
+ except Exception:
24
+ return {}
25
+
26
+
27
+ def _display_result(obj: Any):
28
+ """
29
+ Display helper to handle common return types.
30
+ """
31
+ # Plotly Figure-like
32
+ if hasattr(obj, "to_plotly_json"):
33
+ st.plotly_chart(obj, use_container_width=True)
34
+ return
35
+
36
+ # Pandas DataFrame-like
37
+ if hasattr(obj, "to_dict") and hasattr(obj, "columns"):
38
+ st.dataframe(obj, use_container_width=True)
39
+ return
40
+
41
+ # Dict/list -> JSON
42
+ if isinstance(obj, (dict, list)):
43
+ st.json(obj)
44
+ return
45
+
46
+ # Fallback
47
+ st.write(obj)
48
+
49
+
50
+ def _ensure_state():
51
+ if "survey" not in st.session_state:
52
+ st.session_state["survey"] = None
53
+ if "periods" not in st.session_state:
54
+ st.session_state["periods"] = {"day": (7, 0), "evening": (19, 0), "night": (23, 0)}
55
+
56
+
57
+ def _write_uploaded_to_temp(uploaded) -> str:
58
+ """
59
+ Persist an UploadedFile to a temporary file and return the path.
60
+ Using a real file path keeps Log(...) happy across environments.
61
+ """
62
+ suffix = Path(uploaded.name).suffix or ".csv"
63
+ with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
64
+ tmp.write(uploaded.getbuffer())
65
+ return tmp.name
66
+
67
+
68
+ def _build_survey_from_files(files) -> Survey:
69
+ """
70
+ Create a Survey and attach Log objects for each uploaded file.
71
+ """
72
+ survey = Survey()
73
+ for f in files:
74
+ # Persist to disk to ensure compatibility with pandas and any path usage in Log
75
+ tmp_path = _write_uploaded_to_temp(f)
76
+ log_obj = Log(path=tmp_path)
77
+
78
+ key = Path(f.name).stem
79
+ # Attach Log to survey
80
+ if hasattr(survey, "add_log"):
81
+ try:
82
+ survey.add_log(key, log_obj)
83
+ except TypeError:
84
+ survey.add_log(log_obj, key)
85
+ else:
86
+ # Fallback to internal storage if no public API is available
87
+ survey._logs[key] = log_obj # noqa: SLF001
88
+ return survey
89
+
90
+
91
+ def _apply_periods_to_all_logs(survey: Survey, times: Dict[str, tuple[int, int]]):
92
+ """
93
+ Apply set_periods to each Log attached to the Survey.
94
+ This avoids calling set_periods on Survey if it doesn't exist.
95
+ """
96
+ logs: Iterable[Log] = getattr(survey, "_logs", {}).values()
97
+ for log in logs:
98
+ if hasattr(log, "set_periods"):
99
+ log.set_periods(times=times)
100
+
101
+
102
+ def _render_period_controls(survey: Survey):
103
+ st.subheader("Assessment Periods")
104
+
105
+ # Current periods from session (defaults set in _ensure_state)
106
+ periods = st.session_state["periods"]
107
+ day_h, day_m = periods["day"]
108
+ eve_h, eve_m = periods["evening"]
109
+ night_h, night_m = periods["night"]
110
+
111
+ c1, c2, c3 = st.columns(3)
112
+ with c1:
113
+ day_time = st.time_input("Day starts", value=dt.time(day_h, day_m), key="period_day_start")
114
+ with c2:
115
+ eve_time = st.time_input("Evening starts", value=dt.time(eve_h, eve_m), key="period_eve_start")
116
+ with c3:
117
+ night_time = st.time_input("Night starts", value=dt.time(night_h, night_m), key="period_night_start")
118
+
119
+ new_periods = {
120
+ "day": (int(day_time.hour), int(day_time.minute)),
121
+ "evening": (int(eve_time.hour), int(eve_time.minute)),
122
+ "night": (int(night_time.hour), int(night_time.minute)),
123
+ }
124
+
125
+ if st.button("Apply periods to all logs", key="apply_periods"):
126
+ try:
127
+ _apply_periods_to_all_logs(survey, new_periods)
128
+ st.session_state["periods"] = new_periods
129
+ st.success("Periods applied to all logs.")
130
+ except Exception as e:
131
+ st.warning(f"Could not set periods: {e}")
132
+
133
+
134
+ def _render_method_runner(survey: Survey, method_name: str, help_text: str = ""):
135
+ """
136
+ Generic UI for running a Survey method with kwargs provided via text area.
137
+ """
138
+ with st.expander(method_name, expanded=True):
139
+ if help_text:
140
+ st.caption(help_text)
141
+
142
+ kwargs_text = st.text_area(
143
+ "kwargs (Python dict literal)",
144
+ value="{}",
145
+ key=f"kwargs_{method_name}",
146
+ placeholder='Example: {"position": "UA1", "date": "2023-06-01"}',
147
+ height=100,
148
+ )
149
+
150
+ kwargs = _parse_kwargs(kwargs_text)
151
+ if st.button(f"Run {method_name}", key=f"run_{method_name}"):
152
+ try:
153
+ fn = getattr(survey, method_name)
154
+ result = fn(**kwargs)
155
+ _display_result(result)
156
+ except AttributeError:
157
+ st.error(f"Survey has no method named '{method_name}'.")
158
+ except Exception as e:
159
+ st.error(f"Error running {method_name}: {e}")
160
+
161
+
162
+ def main():
163
+ st.set_page_config(page_title="pycoustic GUI", layout="wide")
164
+ st.title("pycoustic – Streamlit GUI")
165
+
166
+ _ensure_state()
167
+
168
+ st.sidebar.header("Load CSV Logs")
169
+ files = st.sidebar.file_uploader(
170
+ "Upload one or more CSV files",
171
+ type=["csv"],
172
+ accept_multiple_files=True,
173
+ help="Each file becomes a Log; all Logs go into one Survey."
174
+ )
175
+
176
+ build = st.sidebar.button("Create / Update Survey", type="primary")
177
+
178
+ if build and files:
179
+ try:
180
+ survey = _build_survey_from_files(files)
181
+ # Apply default periods to all logs
182
+ _apply_periods_to_all_logs(survey, st.session_state["periods"])
183
+ st.session_state["survey"] = survey
184
+ st.success("Survey created/updated.")
185
+ except Exception as e:
186
+ st.error(f"Unable to create Survey: {e}")
187
+
188
+ survey: Survey = st.session_state.get("survey")
189
+
190
+ if survey is None:
191
+ st.info("Upload CSV files in the sidebar and click 'Create / Update Survey' to begin.")
192
+ return
193
+
194
+ # Period controls
195
+ _render_period_controls(survey)
196
+
197
+ st.markdown("---")
198
+ st.header("Survey Outputs")
199
+
200
+ _render_method_runner(
201
+ survey,
202
+ "resi_summary",
203
+ help_text="Summary results for residential assessment. Provide any optional kwargs here."
204
+ )
205
+ _render_method_runner(
206
+ survey,
207
+ "modal",
208
+ help_text="Run modal analysis over the survey. Provide any optional kwargs here."
209
+ )
210
+ _render_method_runner(
211
+ survey,
212
+ "leq_spectra",
213
+ help_text="Compute or plot Leq spectra. Provide any optional kwargs here."
214
+ )
215
+ _render_method_runner(
216
+ survey,
217
+ "lmax_spectra",
218
+ help_text="Compute or plot Lmax spectra. Provide any optional kwargs here."
219
+ )
220
+
221
+ st.markdown("---")
222
+ with st.expander("Loaded Logs", expanded=False):
223
+ try:
224
+ names = list(getattr(survey, "_logs", {}).keys())
225
+ if names:
226
+ st.write(", ".join(names))
227
+ else:
228
+ st.write("No logs found in survey.")
229
+ except Exception:
230
+ st.write("Unable to list logs.")
231
+
232
+
233
+ if __name__ == "__main__":
234
+ main()
pycoustic/survey.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import pandas as pd
2
2
  import numpy as np
3
- from .weather import WeatherHistory
3
+ # from .weather import WeatherHistory
4
4
 
5
5
 
6
6
  DECIMALS=1
@@ -39,6 +39,17 @@ class Survey:
39
39
  df.columns = new_cols
40
40
  return df
41
41
 
42
+ # def _leq_by_date(self, data, cols=None):
43
+ # """
44
+ # Delegate Leq-by-date computation to one of the underlying Log instances.
45
+ # Assumes all logs share the same period configuration.
46
+ # """
47
+ # if not getattr(self, "_logs", None):
48
+ # raise AttributeError("Survey has no logs available to compute _leq_by_date")
49
+ # any_log = next(iter(self._logs.values()))
50
+ # if not hasattr(any_log, "_leq_by_date"):
51
+ # raise AttributeError("Underlying Log does not implement _leq_by_date")
52
+ # return any_log._leq_by_date(data, cols=cols)
42
53
  # ###########################---PUBLIC---######################################
43
54
 
44
55
  def set_periods(self, times=None):
@@ -105,51 +116,54 @@ class Survey:
105
116
  leq_cols = [("Leq", "A")]
106
117
  if max_cols is None:
107
118
  max_cols = [("Lmax", "A")]
108
- for key in self._logs.keys():
109
- log = self._logs[key]
119
+
120
+ for key, lg in self._logs.items(): # changed: iterate items() to get lg directly
110
121
  combined_list = []
122
+ headers_for_log = [] # new: collect headers per log
123
+
111
124
  # Day
112
- days = log._leq_by_date(log._get_period(data=log.get_antilogs(), period="days"), cols=leq_cols)
125
+ days = lg.leq_by_date(lg.get_period(data=lg.get_antilogs(), period="days"), cols=leq_cols)
113
126
  days.sort_index(inplace=True)
114
127
  combined_list.append(days)
115
- period_headers = ["Daytime" for i in range(len(leq_cols))]
128
+ headers_for_log.extend(["Daytime"] * len(leq_cols)) # changed: don't reset global headers
129
+
116
130
  # Evening
117
- if log.is_evening():
118
- evenings = log._leq_by_date(log._get_period(data=log.get_antilogs(), period="evenings"), cols=leq_cols)
131
+ if lg.is_evening():
132
+ evenings = lg.leq_by_date(lg.get_period(data=lg.get_antilogs(), period="evenings"), cols=leq_cols)
119
133
  evenings.sort_index(inplace=True)
120
134
  combined_list.append(evenings)
121
- for i in range(len(leq_cols)):
122
- period_headers.append("Evening")
135
+ headers_for_log.extend(["Evening"] * len(leq_cols))
136
+
123
137
  # Night Leq
124
- nights = log._leq_by_date(log._get_period(data=log.get_antilogs(), period="nights"), cols=leq_cols)
138
+ nights = lg.leq_by_date(lg.get_period(data=lg.get_antilogs(), period="nights"), cols=leq_cols)
125
139
  nights.sort_index(inplace=True)
126
140
  combined_list.append(nights)
127
- for i in range(len(leq_cols)):
128
- period_headers.append("Night-time")
141
+ headers_for_log.extend(["Night-time"] * len(leq_cols))
142
+
129
143
  # Night max
130
- maxes = log.as_interval(t=lmax_t)
131
- maxes = log._get_period(data=maxes, period="nights", night_idx=True)
132
- maxes = log.get_nth_high_low(n=lmax_n, data=maxes)[max_cols]
144
+ maxes = lg.as_interval(t=lmax_t)
145
+ maxes = lg.get_period(data=maxes, period="nights", night_idx=True)
146
+ maxes = lg.get_nth_high_low(n=lmax_n, data=maxes)[max_cols]
133
147
  maxes.sort_index(inplace=True)
134
- # +++
135
- # SS Feb2025 - Code changed to prevent exception
136
- #maxes.index = maxes.index.date
137
148
  try:
138
149
  maxes.index = pd.to_datetime(maxes.index)
139
150
  maxes.index = maxes.index.date
140
151
  except Exception as e:
141
152
  print(f"Error converting index to date: {e}")
142
- # SSS ---
143
153
  maxes.index.name = None
144
154
  combined_list.append(maxes)
145
- for i in range(len(max_cols)):
146
- period_headers.append("Night-time")
155
+ headers_for_log.extend(["Night-time"] * len(max_cols))
156
+
147
157
  summary = pd.concat(objs=combined_list, axis=1)
148
158
  summary = self._insert_multiindex(df=summary, super=key)
149
159
  combi = pd.concat(objs=[combi, summary], axis=0)
160
+
161
+ # append this log's headers to the global list
162
+ period_headers.extend(headers_for_log)
163
+
150
164
  combi = self._insert_header(df=combi, new_head_list=period_headers, header_idx=0)
151
165
  return combi
152
-
166
+ #test
153
167
  def modal(self, cols=None, by_date=False, day_t="60min", evening_t="60min", night_t="15min"):
154
168
  """
155
169
  Get a dataframe summarising Modal L90 values for each time period, as suggested by BS 4142:2014.
@@ -179,17 +193,17 @@ class Survey:
179
193
  pos_summary = []
180
194
  # Daytime
181
195
  period_headers = ["Daytime"]
182
- days = log.get_modal(data=log._get_period(data=log.as_interval(t=day_t), period="days"), by_date=by_date, cols=cols)
196
+ days = log.get_modal(data=log.get_period(data=log.as_interval(t=day_t), period="days"), by_date=by_date, cols=cols)
183
197
  days.sort_index(inplace=True)
184
198
  pos_summary.append(days)
185
199
  # Evening
186
200
  if log.is_evening():
187
201
  period_headers.append("Evening")
188
- evenings = log.get_modal(data=log._get_period(data=log.as_interval(t=evening_t), period="evenings"), by_date=by_date, cols=cols)
202
+ evenings = log.get_modal(data=log.get_period(data=log.as_interval(t=evening_t), period="evenings"), by_date=by_date, cols=cols)
189
203
  evenings.sort_index(inplace=True)
190
204
  pos_summary.append(evenings)
191
205
  # Night time
192
- nights = log.get_modal(data=log._get_period(data=log.as_interval(t=night_t), period="nights"), by_date=by_date, cols=cols)
206
+ nights = log.get_modal(data=log.get_period(data=log.as_interval(t=night_t), period="nights"), by_date=by_date, cols=cols)
193
207
  nights.sort_index(inplace=True)
194
208
  pos_summary.append(nights)
195
209
  period_headers.append("Night-time")
@@ -220,17 +234,17 @@ class Survey:
220
234
  pos_summary = []
221
235
  # Daytime
222
236
  period_headers = ["Daytime"]
223
- days = log.counts(data=log._get_period(data=log.as_interval(t=day_t), period="days"), cols=cols)
237
+ days = log.counts(data=log.get_period(data=log.as_interval(t=day_t), period="days"), cols=cols)
224
238
  days.sort_index(inplace=True)
225
239
  pos_summary.append(days)
226
240
  # Evening
227
241
  if log.is_evening():
228
242
  period_headers.append("Evening")
229
- evenings = log.counts(data=log._get_period(data=log.as_interval(t=evening_t), period="evenings"), cols=cols)
243
+ evenings = log.counts(data=log.get_period(data=log.as_interval(t=evening_t), period="evenings"), cols=cols)
230
244
  evenings.sort_index(inplace=True)
231
245
  pos_summary.append(evenings)
232
246
  # Night time
233
- nights = log.counts(data=log._get_period(data=log.as_interval(t=night_t), period="nights"), cols=cols)
247
+ nights = log.counts(data=log.get_period(data=log.as_interval(t=night_t), period="nights"), cols=cols)
234
248
  nights.sort_index(inplace=True)
235
249
  pos_summary.append(nights)
236
250
  period_headers.append("Night-time")
@@ -262,7 +276,7 @@ class Survey:
262
276
  for key in self._logs.keys():
263
277
  log = self._logs[key]
264
278
  combined_list = []
265
- maxes = log.get_nth_high_low(n=n, data=log._get_period(data=log.as_interval(t=t), period=period))[["Lmax", "Time"]]
279
+ maxes = log.get_nth_high_low(n=n, data=log.get_period(data=log.as_interval(t=t), period=period))[["Lmax", "Time"]]
266
280
  maxes.sort_index(inplace=True)
267
281
  combined_list.append(maxes)
268
282
  summary = pd.concat(objs=combined_list, axis=1)
@@ -285,15 +299,15 @@ class Survey:
285
299
  for key in self._logs.keys():
286
300
  log = self._logs[key]
287
301
  # Day
288
- days = log._get_period(data=log.get_antilogs(), period="days")
302
+ days = log.get_period(data=log.get_antilogs(), period="days")
289
303
  days = days[leq_cols].apply(lambda x: np.round(10*np.log10(np.mean(x)), DECIMALS))
290
304
  # Night-time
291
- nights = log._get_period(data=log.get_antilogs(), period="nights")
305
+ nights = log.get_period(data=log.get_antilogs(), period="nights")
292
306
  nights = nights[leq_cols].apply(lambda x: np.round(10*np.log10(np.mean(x)), DECIMALS))
293
307
  df = pd.DataFrame
294
308
  # Evening
295
309
  if log.is_evening():
296
- evenings = log._get_period(data=log.get_antilogs(), period="evenings")
310
+ evenings = log.get_period(data=log.get_antilogs(), period="evenings")
297
311
  evenings = evenings[leq_cols].apply(lambda x: np.round(10 * np.log10(np.mean(x)), DECIMALS))
298
312
  df = pd.concat([days, evenings, nights], axis=1, keys=["Daytime", "Evening", "Night-time"])
299
313
  else:
@@ -330,6 +344,101 @@ class Survey:
330
344
  index=["Min", "Max", "Mean"]).drop(columns=["dt"]).round(decimals=1)
331
345
 
332
346
 
347
+
348
+ appid = ""
349
+ # with open("tests/openweather_app_id.txt") as f:
350
+ # appid = f.readlines()[0]
351
+
352
+ w_dict = {
353
+ "start": "2022-09-16 12:00:00",
354
+ "end": "2022-09-17 18:00:00",
355
+ "interval": 6,
356
+ "api_key": appid,
357
+ "country": "GB",
358
+ "postcode": "WC1",
359
+ "tz": "GB"
360
+ }
361
+
362
+
363
+ def test_weather_obj(weather_test_dict):
364
+ hist = WeatherHistory(start=w_dict["start"], end=w_dict["end"], interval=w_dict["interval"],
365
+ api_key=w_dict["api_key"], country=w_dict["country"], postcode=w_dict["postcode"],
366
+ tz=w_dict["tz"])
367
+ hist.compute_weather_history()
368
+ return hist
369
+
370
+ #TODO: Make this take the start and end times of a Survey object.
371
+ #TODO: Implement post codes instead of coordinates
372
+ #TODO: Implement the WeatherHistory as methods within Survey.
373
+ class WeatherHistory:
374
+ def __init__(self):
375
+ return
376
+
377
+ def reinit(self, start=None, end=None, interval=6, api_key="", country="GB", postcode="WC1", tz="",
378
+ units="metric"):
379
+ if api_key==None:
380
+ raise ValueError("API key is missing")
381
+ if type(start) == str:
382
+ self._start = dt.datetime.strptime(start, "%Y-%m-%d %H:%M:%S")
383
+ else:
384
+ self._start = start
385
+ if type(end) == str:
386
+ self._end = dt.datetime.strptime(end, "%Y-%m-%d %H:%M:%S")
387
+ else:
388
+ self._end = end
389
+ self._interval = interval
390
+ self._api_key = str(api_key)
391
+ self._lat, self._lon = self.get_latlon(api_key=api_key, country=country, postcode=postcode)
392
+ self._hist = None
393
+ self._units = units
394
+
395
+ def get_latlon(self, api_key="", country="GB", postcode=""):
396
+ query = str("http://api.openweathermap.org/geo/1.0/zip?zip=" + postcode + "," + country + "&appid=" + api_key)
397
+ resp = requests.get(query)
398
+ return resp.json()["lat"], resp.json()["lon"]
399
+
400
+ def _construct_api_call(self, timestamp):
401
+ base = "https://api.openweathermap.org/data/3.0/onecall/timemachine?"
402
+ query = str(base + "lat=" + str(self._lat) + "&" + "lon=" + str(self._lon) + "&" + "units=" + self._units + \
403
+ "&" + "dt=" + str(timestamp) + "&" + "appid=" + self._api_key)
404
+ return query
405
+
406
+ def _construct_timestamps(self):
407
+ next_time = (self._start + dt.timedelta(hours=self._interval))
408
+ timestamps = [int(self._start.timestamp())]
409
+ while next_time < self._end:
410
+ timestamps.append(int(next_time.timestamp()))
411
+ next_time += dt.timedelta(hours=self._interval)
412
+ return timestamps
413
+
414
+ def _make_and_parse_api_call(self, query):
415
+ response = requests.get(query)
416
+ # This drops some unwanted cols like lat, lon, timezone and tz offset.
417
+ resp_dict = response.json()["data"][0]
418
+ del resp_dict["weather"] # delete weather key as not useful.
419
+ # TODO: parse 'weather' nested dict.
420
+ return resp_dict
421
+
422
+ def compute_weather_history(self, drop_cols):
423
+ # construct timestamps
424
+ timestamps = self._construct_timestamps()
425
+ # make calls to API
426
+ responses = []
427
+ for ts in timestamps:
428
+ query = self._construct_api_call(timestamp=ts)
429
+ response_dict = self._make_and_parse_api_call(query=query)
430
+ responses.append(pd.Series(response_dict))
431
+ df = pd.concat(responses, axis=1).transpose()
432
+ for col in ["dt", "sunrise", "sunset"]:
433
+ df[col] = df[col].apply(lambda x: dt.datetime.fromtimestamp(int(x))) # convert timestamp into datetime
434
+ df.drop(columns=drop_cols, inplace=True)
435
+ return df
436
+
437
+ def get_weather_history(self):
438
+ return self._hist
439
+
440
+
441
+
333
442
  # TODO: Fix this bug in weatherhist
334
443
  # survey.weather(api_key=r"eef3f749e018627b70c2ead1475a1a32", postcode="HA8")
335
444
  # dt temp pressure humidity clouds wind_speed wind_deg \
@@ -415,4 +524,10 @@ class Survey:
415
524
  # File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\numpy\_core\_methods.py", line 48, in _amin
416
525
  # return umr_minimum(a, axis, None, out, keepdims, initial, where)
417
526
  # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
418
- # TypeError: '<=' not supported between instances of 'dict' and 'dict'
527
+ # TypeError: '<=' not supported between instances of 'dict' and 'dict'
528
+
529
+
530
+ #TODO: Fix this error:
531
+ #
532
+ # C:\Users\tonyr\PycharmProjects\pycoustic\pycoustic\survey.py:316: FutureWarning:
533
+ # The behavior of pd.concat with len(keys) != len(objs) is deprecated. In a future version this will raise instead of truncating to the smaller of the two sequences
@@ -1,11 +1,13 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pycoustic
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary:
5
5
  Author: thumpercastle
6
6
  Author-email: tony.ryb@gmail.com
7
- Requires-Python: >=3.11.9,<4.0
7
+ Requires-Python: >=3.10,<=3.13
8
8
  Classifier: Programming Language :: Python :: 3
9
+ Classifier: Programming Language :: Python :: 3.10
10
+ Classifier: Programming Language :: Python :: 3.11
9
11
  Classifier: Programming Language :: Python :: 3.12
10
12
  Classifier: Programming Language :: Python :: 3.13
11
13
  Requires-Dist: numpy (==2.2.6)