pycoustic 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pycoustic/log.py +15 -4
- pycoustic/pycoustic_gui_app-ai.py +635 -0
- pycoustic/pycoustic_gui_app.py +277 -0
- pycoustic/pycoustic_streamlit_gpt5.py +421 -0
- pycoustic/streamlit-ai.py +492 -0
- pycoustic/streamlit-new.py +142 -0
- pycoustic/streamlit_pycoustic_gpt5_dead.py +234 -0
- pycoustic/survey.py +125 -22
- pycoustic/weather.py +1 -0
- {pycoustic-0.1.7.dist-info → pycoustic-0.1.9.dist-info}/METADATA +4 -2
- pycoustic-0.1.9.dist-info/RECORD +14 -0
- pycoustic-0.1.7.dist-info/RECORD +0 -8
- {pycoustic-0.1.7.dist-info → pycoustic-0.1.9.dist-info}/WHEEL +0 -0
@@ -0,0 +1,234 @@
|
|
1
|
+
# streamlit_pycoustic_app.py
|
2
|
+
import ast
|
3
|
+
import datetime as dt
|
4
|
+
import tempfile
|
5
|
+
from pathlib import Path
|
6
|
+
from typing import Any, Dict, Iterable
|
7
|
+
|
8
|
+
import streamlit as st
|
9
|
+
|
10
|
+
from pycoustic import Log, Survey
|
11
|
+
|
12
|
+
|
13
|
+
def _parse_kwargs(text: str) -> Dict[str, Any]:
|
14
|
+
"""
|
15
|
+
Safely parse a Python dict literal from text area.
|
16
|
+
Returns {} if empty or invalid.
|
17
|
+
"""
|
18
|
+
if not text or not text.strip():
|
19
|
+
return {}
|
20
|
+
try:
|
21
|
+
parsed = ast.literal_eval(text)
|
22
|
+
return parsed if isinstance(parsed, dict) else {}
|
23
|
+
except Exception:
|
24
|
+
return {}
|
25
|
+
|
26
|
+
|
27
|
+
def _display_result(obj: Any):
|
28
|
+
"""
|
29
|
+
Display helper to handle common return types.
|
30
|
+
"""
|
31
|
+
# Plotly Figure-like
|
32
|
+
if hasattr(obj, "to_plotly_json"):
|
33
|
+
st.plotly_chart(obj, use_container_width=True)
|
34
|
+
return
|
35
|
+
|
36
|
+
# Pandas DataFrame-like
|
37
|
+
if hasattr(obj, "to_dict") and hasattr(obj, "columns"):
|
38
|
+
st.dataframe(obj, use_container_width=True)
|
39
|
+
return
|
40
|
+
|
41
|
+
# Dict/list -> JSON
|
42
|
+
if isinstance(obj, (dict, list)):
|
43
|
+
st.json(obj)
|
44
|
+
return
|
45
|
+
|
46
|
+
# Fallback
|
47
|
+
st.write(obj)
|
48
|
+
|
49
|
+
|
50
|
+
def _ensure_state():
|
51
|
+
if "survey" not in st.session_state:
|
52
|
+
st.session_state["survey"] = None
|
53
|
+
if "periods" not in st.session_state:
|
54
|
+
st.session_state["periods"] = {"day": (7, 0), "evening": (19, 0), "night": (23, 0)}
|
55
|
+
|
56
|
+
|
57
|
+
def _write_uploaded_to_temp(uploaded) -> str:
|
58
|
+
"""
|
59
|
+
Persist an UploadedFile to a temporary file and return the path.
|
60
|
+
Using a real file path keeps Log(...) happy across environments.
|
61
|
+
"""
|
62
|
+
suffix = Path(uploaded.name).suffix or ".csv"
|
63
|
+
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
64
|
+
tmp.write(uploaded.getbuffer())
|
65
|
+
return tmp.name
|
66
|
+
|
67
|
+
|
68
|
+
def _build_survey_from_files(files) -> Survey:
|
69
|
+
"""
|
70
|
+
Create a Survey and attach Log objects for each uploaded file.
|
71
|
+
"""
|
72
|
+
survey = Survey()
|
73
|
+
for f in files:
|
74
|
+
# Persist to disk to ensure compatibility with pandas and any path usage in Log
|
75
|
+
tmp_path = _write_uploaded_to_temp(f)
|
76
|
+
log_obj = Log(path=tmp_path)
|
77
|
+
|
78
|
+
key = Path(f.name).stem
|
79
|
+
# Attach Log to survey
|
80
|
+
if hasattr(survey, "add_log"):
|
81
|
+
try:
|
82
|
+
survey.add_log(key, log_obj)
|
83
|
+
except TypeError:
|
84
|
+
survey.add_log(log_obj, key)
|
85
|
+
else:
|
86
|
+
# Fallback to internal storage if no public API is available
|
87
|
+
survey._logs[key] = log_obj # noqa: SLF001
|
88
|
+
return survey
|
89
|
+
|
90
|
+
|
91
|
+
def _apply_periods_to_all_logs(survey: Survey, times: Dict[str, tuple[int, int]]):
|
92
|
+
"""
|
93
|
+
Apply set_periods to each Log attached to the Survey.
|
94
|
+
This avoids calling set_periods on Survey if it doesn't exist.
|
95
|
+
"""
|
96
|
+
logs: Iterable[Log] = getattr(survey, "_logs", {}).values()
|
97
|
+
for log in logs:
|
98
|
+
if hasattr(log, "set_periods"):
|
99
|
+
log.set_periods(times=times)
|
100
|
+
|
101
|
+
|
102
|
+
def _render_period_controls(survey: Survey):
|
103
|
+
st.subheader("Assessment Periods")
|
104
|
+
|
105
|
+
# Current periods from session (defaults set in _ensure_state)
|
106
|
+
periods = st.session_state["periods"]
|
107
|
+
day_h, day_m = periods["day"]
|
108
|
+
eve_h, eve_m = periods["evening"]
|
109
|
+
night_h, night_m = periods["night"]
|
110
|
+
|
111
|
+
c1, c2, c3 = st.columns(3)
|
112
|
+
with c1:
|
113
|
+
day_time = st.time_input("Day starts", value=dt.time(day_h, day_m), key="period_day_start")
|
114
|
+
with c2:
|
115
|
+
eve_time = st.time_input("Evening starts", value=dt.time(eve_h, eve_m), key="period_eve_start")
|
116
|
+
with c3:
|
117
|
+
night_time = st.time_input("Night starts", value=dt.time(night_h, night_m), key="period_night_start")
|
118
|
+
|
119
|
+
new_periods = {
|
120
|
+
"day": (int(day_time.hour), int(day_time.minute)),
|
121
|
+
"evening": (int(eve_time.hour), int(eve_time.minute)),
|
122
|
+
"night": (int(night_time.hour), int(night_time.minute)),
|
123
|
+
}
|
124
|
+
|
125
|
+
if st.button("Apply periods to all logs", key="apply_periods"):
|
126
|
+
try:
|
127
|
+
_apply_periods_to_all_logs(survey, new_periods)
|
128
|
+
st.session_state["periods"] = new_periods
|
129
|
+
st.success("Periods applied to all logs.")
|
130
|
+
except Exception as e:
|
131
|
+
st.warning(f"Could not set periods: {e}")
|
132
|
+
|
133
|
+
|
134
|
+
def _render_method_runner(survey: Survey, method_name: str, help_text: str = ""):
|
135
|
+
"""
|
136
|
+
Generic UI for running a Survey method with kwargs provided via text area.
|
137
|
+
"""
|
138
|
+
with st.expander(method_name, expanded=True):
|
139
|
+
if help_text:
|
140
|
+
st.caption(help_text)
|
141
|
+
|
142
|
+
kwargs_text = st.text_area(
|
143
|
+
"kwargs (Python dict literal)",
|
144
|
+
value="{}",
|
145
|
+
key=f"kwargs_{method_name}",
|
146
|
+
placeholder='Example: {"position": "UA1", "date": "2023-06-01"}',
|
147
|
+
height=100,
|
148
|
+
)
|
149
|
+
|
150
|
+
kwargs = _parse_kwargs(kwargs_text)
|
151
|
+
if st.button(f"Run {method_name}", key=f"run_{method_name}"):
|
152
|
+
try:
|
153
|
+
fn = getattr(survey, method_name)
|
154
|
+
result = fn(**kwargs)
|
155
|
+
_display_result(result)
|
156
|
+
except AttributeError:
|
157
|
+
st.error(f"Survey has no method named '{method_name}'.")
|
158
|
+
except Exception as e:
|
159
|
+
st.error(f"Error running {method_name}: {e}")
|
160
|
+
|
161
|
+
|
162
|
+
def main():
|
163
|
+
st.set_page_config(page_title="pycoustic GUI", layout="wide")
|
164
|
+
st.title("pycoustic – Streamlit GUI")
|
165
|
+
|
166
|
+
_ensure_state()
|
167
|
+
|
168
|
+
st.sidebar.header("Load CSV Logs")
|
169
|
+
files = st.sidebar.file_uploader(
|
170
|
+
"Upload one or more CSV files",
|
171
|
+
type=["csv"],
|
172
|
+
accept_multiple_files=True,
|
173
|
+
help="Each file becomes a Log; all Logs go into one Survey."
|
174
|
+
)
|
175
|
+
|
176
|
+
build = st.sidebar.button("Create / Update Survey", type="primary")
|
177
|
+
|
178
|
+
if build and files:
|
179
|
+
try:
|
180
|
+
survey = _build_survey_from_files(files)
|
181
|
+
# Apply default periods to all logs
|
182
|
+
_apply_periods_to_all_logs(survey, st.session_state["periods"])
|
183
|
+
st.session_state["survey"] = survey
|
184
|
+
st.success("Survey created/updated.")
|
185
|
+
except Exception as e:
|
186
|
+
st.error(f"Unable to create Survey: {e}")
|
187
|
+
|
188
|
+
survey: Survey = st.session_state.get("survey")
|
189
|
+
|
190
|
+
if survey is None:
|
191
|
+
st.info("Upload CSV files in the sidebar and click 'Create / Update Survey' to begin.")
|
192
|
+
return
|
193
|
+
|
194
|
+
# Period controls
|
195
|
+
_render_period_controls(survey)
|
196
|
+
|
197
|
+
st.markdown("---")
|
198
|
+
st.header("Survey Outputs")
|
199
|
+
|
200
|
+
_render_method_runner(
|
201
|
+
survey,
|
202
|
+
"resi_summary",
|
203
|
+
help_text="Summary results for residential assessment. Provide any optional kwargs here."
|
204
|
+
)
|
205
|
+
_render_method_runner(
|
206
|
+
survey,
|
207
|
+
"modal",
|
208
|
+
help_text="Run modal analysis over the survey. Provide any optional kwargs here."
|
209
|
+
)
|
210
|
+
_render_method_runner(
|
211
|
+
survey,
|
212
|
+
"leq_spectra",
|
213
|
+
help_text="Compute or plot Leq spectra. Provide any optional kwargs here."
|
214
|
+
)
|
215
|
+
_render_method_runner(
|
216
|
+
survey,
|
217
|
+
"lmax_spectra",
|
218
|
+
help_text="Compute or plot Lmax spectra. Provide any optional kwargs here."
|
219
|
+
)
|
220
|
+
|
221
|
+
st.markdown("---")
|
222
|
+
with st.expander("Loaded Logs", expanded=False):
|
223
|
+
try:
|
224
|
+
names = list(getattr(survey, "_logs", {}).keys())
|
225
|
+
if names:
|
226
|
+
st.write(", ".join(names))
|
227
|
+
else:
|
228
|
+
st.write("No logs found in survey.")
|
229
|
+
except Exception:
|
230
|
+
st.write("Unable to list logs.")
|
231
|
+
|
232
|
+
|
233
|
+
if __name__ == "__main__":
|
234
|
+
main()
|
pycoustic/survey.py
CHANGED
@@ -10,7 +10,7 @@ pd.set_option('display.max_rows', None)
|
|
10
10
|
|
11
11
|
#survey.leq_spectra() bug
|
12
12
|
#TODO: C:\Users\tonyr\PycharmProjects\pycoustic\.venv1\Lib\site-packages\pycoustic\survey.py:287: FutureWarning: The behavior of pd.concat with len(keys) != len(objs) is deprecated. In a future version this will raise instead of truncating to the smaller of the two sequences combi = pd.concat(all_pos, axis=1, keys=["UA1", "UA2"])
|
13
|
-
|
13
|
+
#TODO: Survey should make a deep copy of Log objects. Otherwise setting time periods messes it up for other instances.
|
14
14
|
|
15
15
|
class Survey:
|
16
16
|
"""
|
@@ -39,6 +39,17 @@ class Survey:
|
|
39
39
|
df.columns = new_cols
|
40
40
|
return df
|
41
41
|
|
42
|
+
# def _leq_by_date(self, data, cols=None):
|
43
|
+
# """
|
44
|
+
# Delegate Leq-by-date computation to one of the underlying Log instances.
|
45
|
+
# Assumes all logs share the same period configuration.
|
46
|
+
# """
|
47
|
+
# if not getattr(self, "_logs", None):
|
48
|
+
# raise AttributeError("Survey has no logs available to compute _leq_by_date")
|
49
|
+
# any_log = next(iter(self._logs.values()))
|
50
|
+
# if not hasattr(any_log, "_leq_by_date"):
|
51
|
+
# raise AttributeError("Underlying Log does not implement _leq_by_date")
|
52
|
+
# return any_log._leq_by_date(data, cols=cols)
|
42
53
|
# ###########################---PUBLIC---######################################
|
43
54
|
|
44
55
|
def set_periods(self, times=None):
|
@@ -105,51 +116,54 @@ class Survey:
|
|
105
116
|
leq_cols = [("Leq", "A")]
|
106
117
|
if max_cols is None:
|
107
118
|
max_cols = [("Lmax", "A")]
|
108
|
-
|
109
|
-
|
119
|
+
|
120
|
+
for key, lg in self._logs.items(): # changed: iterate items() to get lg directly
|
110
121
|
combined_list = []
|
122
|
+
headers_for_log = [] # new: collect headers per log
|
123
|
+
|
111
124
|
# Day
|
112
|
-
days =
|
125
|
+
days = lg.leq_by_date(lg.get_period(data=lg.get_antilogs(), period="days"), cols=leq_cols)
|
113
126
|
days.sort_index(inplace=True)
|
114
127
|
combined_list.append(days)
|
115
|
-
|
128
|
+
headers_for_log.extend(["Daytime"] * len(leq_cols)) # changed: don't reset global headers
|
129
|
+
|
116
130
|
# Evening
|
117
|
-
if
|
118
|
-
evenings =
|
131
|
+
if lg.is_evening():
|
132
|
+
evenings = lg.leq_by_date(lg.get_period(data=lg.get_antilogs(), period="evenings"), cols=leq_cols)
|
119
133
|
evenings.sort_index(inplace=True)
|
120
134
|
combined_list.append(evenings)
|
121
|
-
|
122
|
-
|
135
|
+
headers_for_log.extend(["Evening"] * len(leq_cols))
|
136
|
+
|
123
137
|
# Night Leq
|
124
|
-
nights =
|
138
|
+
nights = lg.leq_by_date(lg.get_period(data=lg.get_antilogs(), period="nights"), cols=leq_cols)
|
125
139
|
nights.sort_index(inplace=True)
|
126
140
|
combined_list.append(nights)
|
127
|
-
|
128
|
-
|
141
|
+
headers_for_log.extend(["Night-time"] * len(leq_cols))
|
142
|
+
|
129
143
|
# Night max
|
130
|
-
maxes =
|
131
|
-
maxes =
|
132
|
-
maxes =
|
144
|
+
maxes = lg.as_interval(t=lmax_t)
|
145
|
+
maxes = lg.get_period(data=maxes, period="nights", night_idx=True)
|
146
|
+
maxes = lg.get_nth_high_low(n=lmax_n, data=maxes)[max_cols]
|
133
147
|
maxes.sort_index(inplace=True)
|
134
|
-
# +++
|
135
|
-
# SS Feb2025 - Code changed to prevent exception
|
136
|
-
#maxes.index = maxes.index.date
|
137
148
|
try:
|
138
149
|
maxes.index = pd.to_datetime(maxes.index)
|
139
150
|
maxes.index = maxes.index.date
|
140
151
|
except Exception as e:
|
141
152
|
print(f"Error converting index to date: {e}")
|
142
|
-
# SSS ---
|
143
153
|
maxes.index.name = None
|
144
154
|
combined_list.append(maxes)
|
145
|
-
|
146
|
-
|
155
|
+
headers_for_log.extend(["Night-time"] * len(max_cols))
|
156
|
+
|
147
157
|
summary = pd.concat(objs=combined_list, axis=1)
|
148
158
|
summary = self._insert_multiindex(df=summary, super=key)
|
149
159
|
combi = pd.concat(objs=[combi, summary], axis=0)
|
160
|
+
|
161
|
+
# append this log's headers to the global list
|
162
|
+
period_headers.extend(headers_for_log)
|
163
|
+
|
150
164
|
combi = self._insert_header(df=combi, new_head_list=period_headers, header_idx=0)
|
151
165
|
return combi
|
152
|
-
|
166
|
+
#test
|
153
167
|
def modal(self, cols=None, by_date=False, day_t="60min", evening_t="60min", night_t="15min"):
|
154
168
|
"""
|
155
169
|
Get a dataframe summarising Modal L90 values for each time period, as suggested by BS 4142:2014.
|
@@ -200,6 +214,7 @@ class Survey:
|
|
200
214
|
return combi
|
201
215
|
|
202
216
|
def counts(self, cols=None, day_t="60min", evening_t="60min", night_t="15min"):
|
217
|
+
#TODO Need to order rows and rename from 'date'
|
203
218
|
"""
|
204
219
|
Returns counts for each time period. For example, this can return the number of L90 occurrences at each decibel
|
205
220
|
level for daytime and night-time periods.
|
@@ -327,3 +342,91 @@ class Survey:
|
|
327
342
|
raise ValueError("No weather history available. Use Survey.weather() first.")
|
328
343
|
return pd.DataFrame([self._weatherhist.min(), self._weatherhist.max(), self._weatherhist.mean()],
|
329
344
|
index=["Min", "Max", "Mean"]).drop(columns=["dt"]).round(decimals=1)
|
345
|
+
|
346
|
+
|
347
|
+
# TODO: Fix this bug in weatherhist
|
348
|
+
# survey.weather(api_key=r"eef3f749e018627b70c2ead1475a1a32", postcode="HA8")
|
349
|
+
# dt temp pressure humidity clouds wind_speed wind_deg \
|
350
|
+
# 0 2025-09-03 08:59:00 17.52 998 97 75 6.69 210
|
351
|
+
# 1 2025-09-03 14:59:00 19.85 997 84 40 9.26 220
|
352
|
+
# 2 2025-09-03 20:59:00 16.27 1003.0 90.0 20.0 4.63 240.0
|
353
|
+
# 3 2025-09-04 02:59:00 14.59 1005.0 91.0 99.0 3.09 230.0
|
354
|
+
# 4 2025-09-04 08:59:00 15.08 1004 93 40 4.12 200
|
355
|
+
# 5 2025-09-04 14:59:00 18.73 1007 63 40 8.75 260
|
356
|
+
# 6 2025-09-04 20:59:00 15.64 1013.0 76.0 0.0 3.6 270.0
|
357
|
+
# 7 2025-09-05 02:59:00 11.42 1016.0 94.0 0.0 3.09 260.0
|
358
|
+
# 8 2025-09-05 08:59:00 14.12 1020.0 89.0 20.0 3.09 270.0
|
359
|
+
# 9 2025-09-05 14:59:00 22.16 1021.0 50.0 0.0 4.12 280.0
|
360
|
+
# 10 2025-09-05 20:59:00 17.38 1023.0 75.0 75.0 3.09 220.0
|
361
|
+
# 11 2025-09-06 02:59:00 14.37 1022.0 83.0 99.0 1.78 187.0
|
362
|
+
# 12 2025-09-06 08:59:00 16.44 1020.0 73.0 100.0 3.48 138.0
|
363
|
+
# 13 2025-09-06 14:59:00 23.21 1037.0 50.0 0.0 7.72 160.0
|
364
|
+
# 14 2025-09-06 20:59:00 18.5 1035.0 75.0 93.0 3.6 120.0
|
365
|
+
# 15 2025-09-07 02:59:00 16.06 1031.0 77.0 84.0 3.09 120.0
|
366
|
+
# 16 2025-09-07 08:59:00 18.78 1029.0 77.0 0.0 4.63 110.0
|
367
|
+
# 17 2025-09-07 14:59:00 23.82 1027.0 67.0 75.0 8.75 200.0
|
368
|
+
# 18 2025-09-07 20:59:00 19.38 1031.0 76.0 72.0 4.63 200.0
|
369
|
+
# 19 2025-09-08 02:59:00 14.49 1034.0 91.0 4.0 1.54 190.0
|
370
|
+
# 20 2025-09-08 08:59:00 14.84 1037.0 85.0 20.0 4.12 240.0
|
371
|
+
# rain wind_gust uvi
|
372
|
+
# 0 {'1h': 0.25} NaN NaN
|
373
|
+
# 1 {'1h': 1.27} 14.92 NaN
|
374
|
+
# 2 NaN NaN NaN
|
375
|
+
# 3 NaN NaN NaN
|
376
|
+
# 4 {'1h': 1.27} NaN NaN
|
377
|
+
# 5 {'3h': 0.13} NaN NaN
|
378
|
+
# 6 NaN NaN NaN
|
379
|
+
# 7 NaN NaN NaN
|
380
|
+
# 8 NaN NaN NaN
|
381
|
+
# 9 NaN NaN NaN
|
382
|
+
# 10 NaN NaN NaN
|
383
|
+
# 11 NaN 3.31 0.0
|
384
|
+
# 12 NaN 7.4 0.86
|
385
|
+
# 13 NaN NaN 2.96
|
386
|
+
# 14 NaN NaN 0.0
|
387
|
+
# 15 NaN NaN 0.0
|
388
|
+
# 16 NaN NaN 1.1
|
389
|
+
# 17 NaN NaN 2.24
|
390
|
+
# 18 NaN NaN 0.0
|
391
|
+
# 19 NaN NaN 0.0
|
392
|
+
# 20 NaN NaN 1.12
|
393
|
+
# survey.weather_summary()
|
394
|
+
# Traceback (most recent call last):
|
395
|
+
# File "<input>", line 1, in <module>
|
396
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pycoustic\survey.py", line 328, in weather_summary
|
397
|
+
# return pd.DataFrame([self._weatherhist.min(), self._weatherhist.max(), self._weatherhist.mean()],
|
398
|
+
# ^^^^^^^^^^^^^^^^^^^^^^^
|
399
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\frame.py", line 11643, in min
|
400
|
+
# result = super().min(axis, skipna, numeric_only, **kwargs)
|
401
|
+
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
402
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\generic.py", line 12388, in min
|
403
|
+
# return self._stat_function(
|
404
|
+
# ^^^^^^^^^^^^^^^^^^^^
|
405
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\generic.py", line 12377, in _stat_function
|
406
|
+
# return self._reduce(
|
407
|
+
# ^^^^^^^^^^^^^
|
408
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\frame.py", line 11562, in _reduce
|
409
|
+
# res = df._mgr.reduce(blk_func)
|
410
|
+
# ^^^^^^^^^^^^^^^^^^^^^^^^
|
411
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\internals\managers.py", line 1500, in reduce
|
412
|
+
# nbs = blk.reduce(func)
|
413
|
+
# ^^^^^^^^^^^^^^^^
|
414
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\internals\blocks.py", line 404, in reduce
|
415
|
+
# result = func(self.values)
|
416
|
+
# ^^^^^^^^^^^^^^^^^
|
417
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\frame.py", line 11481, in blk_func
|
418
|
+
# return op(values, axis=axis, skipna=skipna, **kwds)
|
419
|
+
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
420
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\nanops.py", line 147, in f
|
421
|
+
# result = alt(values, axis=axis, skipna=skipna, **kwds)
|
422
|
+
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
423
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\nanops.py", line 404, in new_func
|
424
|
+
# result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs)
|
425
|
+
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
426
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\pandas\core\nanops.py", line 1098, in reduction
|
427
|
+
# result = getattr(values, meth)(axis)
|
428
|
+
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
429
|
+
# File "C:\Users\tonyr\PycharmProjects\pycoustic\.venv2\Lib\site-packages\numpy\_core\_methods.py", line 48, in _amin
|
430
|
+
# return umr_minimum(a, axis, None, out, keepdims, initial, where)
|
431
|
+
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
432
|
+
# TypeError: '<=' not supported between instances of 'dict' and 'dict'
|
pycoustic/weather.py
CHANGED
@@ -1,11 +1,13 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: pycoustic
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.9
|
4
4
|
Summary:
|
5
5
|
Author: thumpercastle
|
6
6
|
Author-email: tony.ryb@gmail.com
|
7
|
-
Requires-Python: >=3.
|
7
|
+
Requires-Python: >=3.10,<=3.13
|
8
8
|
Classifier: Programming Language :: Python :: 3
|
9
|
+
Classifier: Programming Language :: Python :: 3.10
|
10
|
+
Classifier: Programming Language :: Python :: 3.11
|
9
11
|
Classifier: Programming Language :: Python :: 3.12
|
10
12
|
Classifier: Programming Language :: Python :: 3.13
|
11
13
|
Requires-Dist: numpy (==2.2.6)
|
@@ -0,0 +1,14 @@
|
|
1
|
+
pycoustic/__init__.py,sha256=jq9Tzc5nEgXh8eNf0AkAypmw3Dda9A-iSy-tyFaTksA,89
|
2
|
+
pycoustic/log.py,sha256=e8rAy9hIYP2H-3vTDVe0-6swe_n_gXjuFCu6Q-xNiYQ,17827
|
3
|
+
pycoustic/pycoustic_gui_app-ai.py,sha256=nEX7Q5oWzTLmtC_xqbh74vXpQak8gwuqf2ScPq1Ir7o,24432
|
4
|
+
pycoustic/pycoustic_gui_app.py,sha256=Hs61Y8fAp7uoRONa4RLSVl0UvGXZZ96n5eJGilErlAU,11143
|
5
|
+
pycoustic/pycoustic_streamlit_gpt5.py,sha256=gpkPPBGwADt9HFI4S7YD1U-TjpLTMVwcBUJd7wTefek,14259
|
6
|
+
pycoustic/streamlit-ai.py,sha256=OZdrQbGwQyVvA_4Q8bTOCZUZGdSlZG9NL9z3f16W-A8,16414
|
7
|
+
pycoustic/streamlit-new.py,sha256=AR5dwQinMXugvGcyNvI_W59bfFRGj6E90Fqah9toKto,4885
|
8
|
+
pycoustic/streamlit_pycoustic_gpt5_dead.py,sha256=sFUxLkvNUZoh2cVzruqsJJiLIlJxOQQpYYK6oHZfPlM,7309
|
9
|
+
pycoustic/survey.py,sha256=6gC2sd0vOusx8bEyCwqmfSR5k04VeV93Ong0OdEVVks,24071
|
10
|
+
pycoustic/tkgui.py,sha256=YAy5f_qkXZ3yU8BvB-nIVQX1fYwPs_IkwmDEXHPMAa4,13997
|
11
|
+
pycoustic/weather.py,sha256=q9FbDKjY0WaNvaYMHeDk7Bhbq0_Q7ehsTM_vUaCjeAk,3753
|
12
|
+
pycoustic-0.1.9.dist-info/METADATA,sha256=2NDXL0ovNkEJKxx-P2ErBkdTHNA1AWL77RFAaKQdI6o,8515
|
13
|
+
pycoustic-0.1.9.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
14
|
+
pycoustic-0.1.9.dist-info/RECORD,,
|
pycoustic-0.1.7.dist-info/RECORD
DELETED
@@ -1,8 +0,0 @@
|
|
1
|
-
pycoustic/__init__.py,sha256=jq9Tzc5nEgXh8eNf0AkAypmw3Dda9A-iSy-tyFaTksA,89
|
2
|
-
pycoustic/log.py,sha256=HNdS2hKKbUdqY7iAMj9QJqoI9r4ZtJ7GCXnIx8XpTH4,17145
|
3
|
-
pycoustic/survey.py,sha256=KTNCt4kV63Dq06RCbh9G9Nl8Frk8NsTt6AhxiMIEixg,17746
|
4
|
-
pycoustic/tkgui.py,sha256=YAy5f_qkXZ3yU8BvB-nIVQX1fYwPs_IkwmDEXHPMAa4,13997
|
5
|
-
pycoustic/weather.py,sha256=3FIzpp3jniA1SRObMCnKsobVFZxJX5gpugsAWA3bH8o,3751
|
6
|
-
pycoustic-0.1.7.dist-info/METADATA,sha256=SEdgTvtyNxGVcgYFKrjwIh0TZw5gUzHoc_GRGaxkykg,8413
|
7
|
-
pycoustic-0.1.7.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
8
|
-
pycoustic-0.1.7.dist-info/RECORD,,
|
File without changes
|