swmm-pandas 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swmm/pandas/__init__.py +7 -0
- swmm/pandas/constants.py +37 -0
- swmm/pandas/input/README.md +61 -0
- swmm/pandas/input/__init__.py +2 -0
- swmm/pandas/input/_section_classes.py +2309 -0
- swmm/pandas/input/input.py +888 -0
- swmm/pandas/input/model.py +403 -0
- swmm/pandas/output/__init__.py +2 -0
- swmm/pandas/output/output.py +2580 -0
- swmm/pandas/output/structure.py +317 -0
- swmm/pandas/output/tools.py +32 -0
- swmm/pandas/py.typed +0 -0
- swmm/pandas/report/__init__.py +1 -0
- swmm/pandas/report/report.py +773 -0
- swmm_pandas-0.6.0.dist-info/METADATA +71 -0
- swmm_pandas-0.6.0.dist-info/RECORD +19 -0
- swmm_pandas-0.6.0.dist-info/WHEEL +4 -0
- swmm_pandas-0.6.0.dist-info/entry_points.txt +4 -0
- swmm_pandas-0.6.0.dist-info/licenses/LICENSE.md +157 -0
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from collections.abc import Sequence
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
from pandas.core.api import DataFrame
|
|
6
|
+
from pandas._libs.missing import NA
|
|
7
|
+
|
|
8
|
+
from swmm.pandas.output.tools import arrayish
|
|
9
|
+
|
|
10
|
+
volumeConstants = {
|
|
11
|
+
"CFS": dict(multiplier=1 * (7.481 / 1e6), volumeUnits="MG"),
|
|
12
|
+
"GPM": dict(multiplier=(1 / 60) * (1 / 1e6), volumeUnits="MG"),
|
|
13
|
+
"MGD": dict(multiplier=(1 / 86400) * 1, volumeUnits="MG"),
|
|
14
|
+
"CMS": dict(multiplier=1 * 1, volumeUnits="CM"),
|
|
15
|
+
"LPS": dict(multiplier=1 * (1 / 1000), volumeUnits="CM"),
|
|
16
|
+
"MLD": dict(multiplier=(1 / 86400) * 1000, volumeUnits="CM"),
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
hour_unit = np.timedelta64(1, "h")
|
|
20
|
+
|
|
21
|
+
# This class as it is could be DRYer, but it should also integrate with the
|
|
22
|
+
# inp file module and rpt file module when they are implemented. Plan to refactor
|
|
23
|
+
# and make more DRY when those are added
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Structure:
|
|
27
|
+
"""
|
|
28
|
+
A class that represents a particular system structure that may be represented by
|
|
29
|
+
multiple model elements. The outputs from each element are combined into a single
|
|
30
|
+
time series for analysis as if they are a single structure.
|
|
31
|
+
|
|
32
|
+
The structure class can be used to summarize flow and flooding at one or more model
|
|
33
|
+
elements from a particular simulation. Parse link flow time series into discrete events
|
|
34
|
+
with the `flowEvents` method, or summarize flooding events with the `floodEvents` method.
|
|
35
|
+
|
|
36
|
+
Parameters
|
|
37
|
+
----------
|
|
38
|
+
outfile: swmm.pandas.Output
|
|
39
|
+
The swmm-pandas outfile object containing the model elements.
|
|
40
|
+
link: Union[str, Sequence[str]]
|
|
41
|
+
The list of links that belong to the structure.
|
|
42
|
+
node: Union[str, Sequence[str]]
|
|
43
|
+
The list of nodes that below to the structure.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def __init__(
|
|
47
|
+
self,
|
|
48
|
+
outfile,
|
|
49
|
+
link: str | Sequence[str],
|
|
50
|
+
node: str | Sequence[str],
|
|
51
|
+
):
|
|
52
|
+
self.out = outfile
|
|
53
|
+
"""The Output object from which this structure is derived"""
|
|
54
|
+
self.link = link
|
|
55
|
+
"""A list of the link(s) that belong to this structure"""
|
|
56
|
+
self.node = node
|
|
57
|
+
"""A list of the node(s) that belong to this structure"""
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def floodFrame(self) -> DataFrame:
|
|
61
|
+
"""
|
|
62
|
+
Returns a pandas DataFrame with the flood rates
|
|
63
|
+
of each node in the structure
|
|
64
|
+
|
|
65
|
+
Returns
|
|
66
|
+
-------
|
|
67
|
+
pd.DataFrame
|
|
68
|
+
Time series of flooding for each node
|
|
69
|
+
"""
|
|
70
|
+
if hasattr(self, "_floodFrame"):
|
|
71
|
+
pass
|
|
72
|
+
else:
|
|
73
|
+
self._floodFrame = self.out.node_series(
|
|
74
|
+
self.node, "flooding_losses", columns="elem"
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
return self._floodFrame
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def flowFrame(self) -> DataFrame:
|
|
81
|
+
"""
|
|
82
|
+
Returns a pandas DataFrame with the flow rates
|
|
83
|
+
of each link in the structure
|
|
84
|
+
|
|
85
|
+
Returns
|
|
86
|
+
-------
|
|
87
|
+
pd.DataFrame
|
|
88
|
+
Time series of flow for each link
|
|
89
|
+
"""
|
|
90
|
+
if hasattr(self, "_flowFrame"):
|
|
91
|
+
pass
|
|
92
|
+
else:
|
|
93
|
+
self._flowFrame = self.out.link_series(
|
|
94
|
+
self.link, "flow_rate", columns="elem"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
return self._flowFrame
|
|
98
|
+
|
|
99
|
+
def _aggSeries(
|
|
100
|
+
self,
|
|
101
|
+
df: DataFrame,
|
|
102
|
+
useNegative: bool | Sequence[bool] = False,
|
|
103
|
+
reverse: bool | int | Sequence[bool | int] = False,
|
|
104
|
+
aggFunc: str = "sum",
|
|
105
|
+
):
|
|
106
|
+
"""
|
|
107
|
+
Aggregate a multi element time series into a single element time series.
|
|
108
|
+
This function is used to calculate combined flow rates and flooding rates
|
|
109
|
+
for the structure.
|
|
110
|
+
|
|
111
|
+
Parameters
|
|
112
|
+
----------
|
|
113
|
+
df: pd.DataFrame
|
|
114
|
+
A DataFrame with a time series each column for each element in the structure.
|
|
115
|
+
(e.g. output of self.flowFrame)
|
|
116
|
+
useNegative: Union[bool, Sequence[bool]], optional
|
|
117
|
+
If true, negative values will not be removed from the time series.
|
|
118
|
+
Can either provide a boolean to apply to all columns or a list of
|
|
119
|
+
booleans to apply to each column, by default False
|
|
120
|
+
reverse: Union[bool, Sequence[bool]], optional
|
|
121
|
+
If true, the timeseries will be multiplied by -1.
|
|
122
|
+
Can either priovide a boolean to apply to all columns or a list of
|
|
123
|
+
booleans to apply to each column, by default False
|
|
124
|
+
aggFunc: str, optional
|
|
125
|
+
The aggregation function to apply to all columns in the DataFrame. Should be
|
|
126
|
+
compatible with `pd.DataFrame.agg`_., by default "sum"
|
|
127
|
+
|
|
128
|
+
.. _pd.DataFrame.agg: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.agg.html
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
Returns
|
|
132
|
+
-------
|
|
133
|
+
pd.Series
|
|
134
|
+
Single time series aggregated from input DataFrame.
|
|
135
|
+
|
|
136
|
+
Raises
|
|
137
|
+
------
|
|
138
|
+
ValueError
|
|
139
|
+
If useNegative or reverse arguments are not a compatible type.
|
|
140
|
+
|
|
141
|
+
"""
|
|
142
|
+
|
|
143
|
+
# reverse values if requested
|
|
144
|
+
if isinstance(reverse, arrayish):
|
|
145
|
+
reverse = [-1 if col else 1 for col in reverse]
|
|
146
|
+
elif isinstance(reverse, bool):
|
|
147
|
+
reverse = -1 if reverse else 1
|
|
148
|
+
else:
|
|
149
|
+
raise ValueError(
|
|
150
|
+
f"invert must be either bool or sequence of bool, given {type(reverse)}"
|
|
151
|
+
)
|
|
152
|
+
df = df * reverse
|
|
153
|
+
|
|
154
|
+
# screen out negative values if requested
|
|
155
|
+
if isinstance(useNegative, arrayish):
|
|
156
|
+
for i, col in enumerate(df.columns):
|
|
157
|
+
if not useNegative[i]:
|
|
158
|
+
df.loc[df[col] < 0, col] = 0
|
|
159
|
+
elif isinstance(useNegative, bool):
|
|
160
|
+
if not useNegative:
|
|
161
|
+
for i, col in enumerate(df.columns):
|
|
162
|
+
df.loc[df[col] < 0, col] = 0
|
|
163
|
+
else:
|
|
164
|
+
raise ValueError(
|
|
165
|
+
f"useNegative must be either bool or sequence of bool, given {type(useNegative)}"
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# return aggregated df according to given aggFunc
|
|
169
|
+
return df.agg(func=aggFunc, axis=1)
|
|
170
|
+
|
|
171
|
+
def flowEvents(
|
|
172
|
+
self,
|
|
173
|
+
inter_event_period: float = 6,
|
|
174
|
+
thresholdFlow: float = 0.01,
|
|
175
|
+
useNegativeFlow: bool | Sequence[bool] = False,
|
|
176
|
+
reverseFlow: bool | Sequence[bool] = False,
|
|
177
|
+
):
|
|
178
|
+
"""
|
|
179
|
+
Bin flow data into discrete events based on an inter-event period and threshold flow rate.
|
|
180
|
+
Maximum flowrates, total flow volumes, and duration of each event are returned in a DataFrame.
|
|
181
|
+
|
|
182
|
+
Parameters
|
|
183
|
+
----------
|
|
184
|
+
inter_event_period: float, optional
|
|
185
|
+
The period in hours of flow less than or equal to thresholdFlow that demarks
|
|
186
|
+
flow events, default to 6
|
|
187
|
+
thresholdFlow: float, optional
|
|
188
|
+
The flowrate in model flow units that dry or baseline condition that is not considered
|
|
189
|
+
significant, default to 0.01
|
|
190
|
+
useNegativeFlow: bool, optional
|
|
191
|
+
If true, the method will consider negative flows when calculating flow volumes, defaults False
|
|
192
|
+
reverseFlow: bool, optional
|
|
193
|
+
If true, the method will calculate the flow in the reverse direction by multiplying
|
|
194
|
+
the timeseries by negative one, defaults to False
|
|
195
|
+
|
|
196
|
+
Returns
|
|
197
|
+
-------
|
|
198
|
+
pd.DataFrame
|
|
199
|
+
DataFrame with statistics on each flow event
|
|
200
|
+
"""
|
|
201
|
+
# pull aggregated series
|
|
202
|
+
series = self._aggSeries(self.flowFrame, useNegativeFlow, reverseFlow)
|
|
203
|
+
|
|
204
|
+
# put series in DataFrame, and add event_num column
|
|
205
|
+
q = DataFrame(
|
|
206
|
+
series[series > thresholdFlow], columns=["flow_rate"]
|
|
207
|
+
).reset_index()
|
|
208
|
+
q["event_num"] = NA
|
|
209
|
+
# initialize first event
|
|
210
|
+
q.loc[0, "event_num"] = 1
|
|
211
|
+
|
|
212
|
+
# calculate period between flows greater than threshold
|
|
213
|
+
hours = q.datetime.diff(1) / hour_unit
|
|
214
|
+
|
|
215
|
+
# slice out times demarking a new event
|
|
216
|
+
# assign event numbers to those starting points
|
|
217
|
+
slicer = hours > inter_event_period
|
|
218
|
+
q.loc[slicer, "event_num"] = range(2, sum(slicer) + 2)
|
|
219
|
+
q.event_num.fillna(method="ffill", inplace=True)
|
|
220
|
+
|
|
221
|
+
# group by event_num
|
|
222
|
+
gpd = q.groupby("event_num")
|
|
223
|
+
|
|
224
|
+
# find indices of max flow timesteps in each event
|
|
225
|
+
maxSer = gpd.flow_rate.idxmax()
|
|
226
|
+
|
|
227
|
+
# find event start date
|
|
228
|
+
start_date = gpd.datetime.min().rename("start_datetime")
|
|
229
|
+
# calculate volume for each event
|
|
230
|
+
vol = (
|
|
231
|
+
gpd.flow_rate.sum()
|
|
232
|
+
* self.out.report
|
|
233
|
+
* volumeConstants[self.out.units[1]]["multiplier"]
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# add unit name to column
|
|
237
|
+
vol.name = f"totalVolume_{volumeConstants[self.out.units[1]]['volumeUnits']}"
|
|
238
|
+
|
|
239
|
+
# calculate the duration of each event in hours
|
|
240
|
+
durations = (gpd.datetime.count() * self.out.report) / 60 / 60
|
|
241
|
+
durations.name = "hours_duration"
|
|
242
|
+
|
|
243
|
+
# join in event volumes and durations with event maxima
|
|
244
|
+
return (
|
|
245
|
+
q.loc[maxSer]
|
|
246
|
+
.join(start_date, on="event_num")
|
|
247
|
+
.join(vol, on="event_num")
|
|
248
|
+
.join(durations, on="event_num")
|
|
249
|
+
.rename({"flow_rate": "maxFlow", "datetime": "time_of_maxFlow"}, axis=1)
|
|
250
|
+
.set_index("event_num")
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
def floodEvents(
|
|
254
|
+
self,
|
|
255
|
+
inter_event_period: float = 6,
|
|
256
|
+
thresholdFood: float = 0.01,
|
|
257
|
+
):
|
|
258
|
+
"""
|
|
259
|
+
Bins flooding data into discrete events based on an inter-event period and threshold flooding rate.
|
|
260
|
+
Maximum flooding rates and duration of each flooding event are returned in a DataFrame.
|
|
261
|
+
|
|
262
|
+
TODO: add in ponded depth when inp file is integrated. Ponded volume from out file is tough to interpret alone.
|
|
263
|
+
|
|
264
|
+
Parameters
|
|
265
|
+
----------
|
|
266
|
+
inter_event_period: float, optionalep
|
|
267
|
+
The period in hours of flooding less than or equal to thresholdFlood that demarks
|
|
268
|
+
flow events, default to 6
|
|
269
|
+
thresholdFood: float, optional
|
|
270
|
+
The flooding rate in model flow units above which should be considered in
|
|
271
|
+
calculations, default to 0.01
|
|
272
|
+
|
|
273
|
+
Returns
|
|
274
|
+
-------
|
|
275
|
+
pd.DataFrame
|
|
276
|
+
DataFrame with statistics on each flooding event
|
|
277
|
+
"""
|
|
278
|
+
series = self._aggSeries(self.floodFrame)
|
|
279
|
+
|
|
280
|
+
# put series in DataFrame, and add event_num column
|
|
281
|
+
q = DataFrame(
|
|
282
|
+
series[series > thresholdFood],
|
|
283
|
+
columns=["flooding_losses"],
|
|
284
|
+
).reset_index()
|
|
285
|
+
q["event_num"] = NA
|
|
286
|
+
# initialize first event
|
|
287
|
+
q.loc[0, "event_num"] = 1
|
|
288
|
+
|
|
289
|
+
# calculate period between flows greater than threshold
|
|
290
|
+
hours = q.datetime.diff(1) / hour_unit
|
|
291
|
+
|
|
292
|
+
# slice out times demarking a new event
|
|
293
|
+
# assign event numbers to those starting points
|
|
294
|
+
slicer = hours > inter_event_period
|
|
295
|
+
q.loc[slicer, "event_num"] = range(2, sum(slicer) + 2)
|
|
296
|
+
q.event_num.fillna(method="ffill", inplace=True)
|
|
297
|
+
|
|
298
|
+
# group by event_num
|
|
299
|
+
gpd = q.groupby("event_num")
|
|
300
|
+
|
|
301
|
+
# find indices of max flow timesteps in each event
|
|
302
|
+
maxSer = gpd.flooding_losses.idxmax()
|
|
303
|
+
|
|
304
|
+
# calculate the duration of each event in hours
|
|
305
|
+
durations = (gpd.datetime.count() * self.out.report) / 60 / 60
|
|
306
|
+
durations.name = "hours_duration"
|
|
307
|
+
|
|
308
|
+
# return event maxima joined with durations
|
|
309
|
+
return (
|
|
310
|
+
q.loc[maxSer]
|
|
311
|
+
.join(durations, on="event_num")
|
|
312
|
+
.rename(
|
|
313
|
+
{"flooding_losses": "maxFloodRate", "datetime": "time_of_maxFlow"},
|
|
314
|
+
axis=1,
|
|
315
|
+
)
|
|
316
|
+
.set_index("event_num")
|
|
317
|
+
)
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from aenum import EnumMeta
|
|
5
|
+
|
|
6
|
+
arrayishNone = (list, tuple, set, np.ndarray, type(None))
|
|
7
|
+
arrayish = (list, tuple, set, np.ndarray)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def elements(path: str) -> dict[str, list[str]]:
|
|
11
|
+
with open(path) as fil:
|
|
12
|
+
elements: dict[str, list[str]] = {}
|
|
13
|
+
for lin in fil:
|
|
14
|
+
line = lin.replace("\n", "")
|
|
15
|
+
if "[" in line:
|
|
16
|
+
section = line.replace("[", "").replace("]", "").lower().strip()
|
|
17
|
+
elements[section] = []
|
|
18
|
+
continue
|
|
19
|
+
if len(line) > 0:
|
|
20
|
+
elements[section].append(line)
|
|
21
|
+
return elements
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _enum_get(enum: EnumMeta, name: str) -> int | None:
|
|
25
|
+
try:
|
|
26
|
+
return enum.__getitem__(name.upper())
|
|
27
|
+
except KeyError:
|
|
28
|
+
return None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _enum_keys(enum: EnumMeta) -> list[str]:
|
|
32
|
+
return list(map(lambda x: x.lower(), enum.__members__.keys()))
|
swmm/pandas/py.typed
ADDED
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from swmm.pandas.report.report import Report
|